V4L/DVB (7779): pvrusb2-dvb: quiet down noise in kernel log for feed debug
[linux-2.6.git] / drivers / isdn / hisax / hfc_pci.c
blobf1265667b06241205e9e60c8f7650c9175469904
1 /* $Id: hfc_pci.c,v 1.48.2.4 2004/02/11 13:21:33 keil Exp $
3 * low level driver for CCD's hfc-pci based cards
5 * Author Werner Cornelius
6 * based on existing driver for CCD hfc ISA cards
7 * Copyright by Werner Cornelius <werner@isdn4linux.de>
8 * by Karsten Keil <keil@isdn4linux.de>
10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference.
13 * For changes and modifications please read
14 * Documentation/isdn/HiSax.cert
18 #include <linux/init.h>
19 #include "hisax.h"
20 #include "hfc_pci.h"
21 #include "isdnl1.h"
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
25 static const char *hfcpci_revision = "$Revision: 1.48.2.4 $";
27 /* table entry in the PCI devices list */
28 typedef struct {
29 int vendor_id;
30 int device_id;
31 char *vendor_name;
32 char *card_name;
33 } PCI_ENTRY;
35 #define NT_T1_COUNT 20 /* number of 3.125ms interrupts for G2 timeout */
36 #define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
37 #define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
39 static const PCI_ENTRY id_list[] =
41 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_2BD0, "CCD/Billion/Asuscom", "2BD0"},
42 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B000, "Billion", "B000"},
43 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B006, "Billion", "B006"},
44 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B007, "Billion", "B007"},
45 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B008, "Billion", "B008"},
46 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B009, "Billion", "B009"},
47 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00A, "Billion", "B00A"},
48 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B, "Billion", "B00B"},
49 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C, "Billion", "B00C"},
50 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100, "Seyeon", "B100"},
51 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B700, "Primux II S0", "B700"},
52 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B701, "Primux II S0 NT", "B701"},
53 {PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1, "Abocom/Magitek", "2BD1"},
54 {PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675, "Asuscom/Askey", "675"},
55 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT, "German telekom", "T-Concept"},
56 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_A1T, "German telekom", "A1T"},
57 {PCI_VENDOR_ID_ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575, "Motorola MC145575", "MC145575"},
58 {PCI_VENDOR_ID_ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0, "Zoltrix", "2BD0"},
59 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E,"Digi International", "Digi DataFire Micro V IOM2 (Europe)"},
60 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E,"Digi International", "Digi DataFire Micro V (Europe)"},
61 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A,"Digi International", "Digi DataFire Micro V IOM2 (North America)"},
62 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A,"Digi International", "Digi DataFire Micro V (North America)"},
63 {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2, "Sitecom Europe", "DC-105 ISDN PCI"},
64 {0, 0, NULL, NULL},
68 /******************************************/
69 /* free hardware resources used by driver */
70 /******************************************/
71 static void
72 release_io_hfcpci(struct IsdnCardState *cs)
74 printk(KERN_INFO "HiSax: release hfcpci at %p\n",
75 cs->hw.hfcpci.pci_io);
76 cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */
77 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
78 Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */
79 mdelay(10);
80 Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */
81 mdelay(10);
82 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
83 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, 0); /* disable memory mapped ports + busmaster */
84 del_timer(&cs->hw.hfcpci.timer);
85 kfree(cs->hw.hfcpci.share_start);
86 cs->hw.hfcpci.share_start = NULL;
87 iounmap((void *)cs->hw.hfcpci.pci_io);
90 /********************************************************************************/
91 /* function called to reset the HFC PCI chip. A complete software reset of chip */
92 /* and fifos is done. */
93 /********************************************************************************/
94 static void
95 reset_hfcpci(struct IsdnCardState *cs)
97 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
98 cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */
99 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
101 printk(KERN_INFO "HFC_PCI: resetting card\n");
102 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO + PCI_ENA_MASTER); /* enable memory ports + busmaster */
103 Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */
104 mdelay(10);
105 Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */
106 mdelay(10);
107 if (Read_hfc(cs, HFCPCI_STATUS) & 2)
108 printk(KERN_WARNING "HFC-PCI init bit busy\n");
110 cs->hw.hfcpci.fifo_en = 0x30; /* only D fifos enabled */
111 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
113 cs->hw.hfcpci.trm = 0 + HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
114 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
116 Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_TE); /* ST-Bit delay for TE-Mode */
117 cs->hw.hfcpci.sctrl_e = HFCPCI_AUTO_AWAKE;
118 Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e); /* S/T Auto awake */
119 cs->hw.hfcpci.bswapped = 0; /* no exchange */
120 cs->hw.hfcpci.nt_mode = 0; /* we are in TE mode */
121 cs->hw.hfcpci.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
122 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
124 cs->hw.hfcpci.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
125 HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
126 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
128 /* Clear already pending ints */
129 if (Read_hfc(cs, HFCPCI_INT_S1));
131 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 2); /* HFC ST 2 */
132 udelay(10);
133 Write_hfc(cs, HFCPCI_STATES, 2); /* HFC ST 2 */
134 cs->hw.hfcpci.mst_m = HFCPCI_MASTER; /* HFC Master Mode */
136 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
137 cs->hw.hfcpci.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
138 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
139 cs->hw.hfcpci.sctrl_r = 0;
140 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
142 /* Init GCI/IOM2 in master mode */
143 /* Slots 0 and 1 are set for B-chan 1 and 2 */
144 /* D- and monitor/CI channel are not enabled */
145 /* STIO1 is used as output for data, B1+B2 from ST->IOM+HFC */
146 /* STIO2 is used as data input, B1+B2 from IOM->ST */
147 /* ST B-channel send disabled -> continous 1s */
148 /* The IOM slots are always enabled */
149 cs->hw.hfcpci.conn = 0x36; /* set data flow directions */
150 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
151 Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* B1-Slot 0 STIO1 out enabled */
152 Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* B2-Slot 1 STIO1 out enabled */
153 Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* B1-Slot 0 STIO2 in enabled */
154 Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* B2-Slot 1 STIO2 in enabled */
156 /* Finally enable IRQ output */
157 cs->hw.hfcpci.int_m2 = HFCPCI_IRQ_ENABLE;
158 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
159 if (Read_hfc(cs, HFCPCI_INT_S1));
162 /***************************************************/
163 /* Timer function called when kernel timer expires */
164 /***************************************************/
165 static void
166 hfcpci_Timer(struct IsdnCardState *cs)
168 cs->hw.hfcpci.timer.expires = jiffies + 75;
169 /* WD RESET */
170 /* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcpci.ctmt | 0x80);
171 add_timer(&cs->hw.hfcpci.timer);
176 /*********************************/
177 /* schedule a new D-channel task */
178 /*********************************/
179 static void
180 sched_event_D_pci(struct IsdnCardState *cs, int event)
182 test_and_set_bit(event, &cs->event);
183 schedule_work(&cs->tqueue);
186 /*********************************/
187 /* schedule a new b_channel task */
188 /*********************************/
189 static void
190 hfcpci_sched_event(struct BCState *bcs, int event)
192 test_and_set_bit(event, &bcs->event);
193 schedule_work(&bcs->tqueue);
196 /************************************************/
197 /* select a b-channel entry matching and active */
198 /************************************************/
199 static
200 struct BCState *
201 Sel_BCS(struct IsdnCardState *cs, int channel)
203 if (cs->bcs[0].mode && (cs->bcs[0].channel == channel))
204 return (&cs->bcs[0]);
205 else if (cs->bcs[1].mode && (cs->bcs[1].channel == channel))
206 return (&cs->bcs[1]);
207 else
208 return (NULL);
211 /***************************************/
212 /* clear the desired B-channel rx fifo */
213 /***************************************/
214 static void hfcpci_clear_fifo_rx(struct IsdnCardState *cs, int fifo)
215 { u_char fifo_state;
216 bzfifo_type *bzr;
218 if (fifo) {
219 bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
220 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2RX;
221 } else {
222 bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1;
223 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1RX;
225 if (fifo_state)
226 cs->hw.hfcpci.fifo_en ^= fifo_state;
227 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
228 cs->hw.hfcpci.last_bfifo_cnt[fifo] = 0;
229 bzr->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1;
230 bzr->za[MAX_B_FRAMES].z2 = bzr->za[MAX_B_FRAMES].z1;
231 bzr->f1 = MAX_B_FRAMES;
232 bzr->f2 = bzr->f1; /* init F pointers to remain constant */
233 if (fifo_state)
234 cs->hw.hfcpci.fifo_en |= fifo_state;
235 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
238 /***************************************/
239 /* clear the desired B-channel tx fifo */
240 /***************************************/
241 static void hfcpci_clear_fifo_tx(struct IsdnCardState *cs, int fifo)
242 { u_char fifo_state;
243 bzfifo_type *bzt;
245 if (fifo) {
246 bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2;
247 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2TX;
248 } else {
249 bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1;
250 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1TX;
252 if (fifo_state)
253 cs->hw.hfcpci.fifo_en ^= fifo_state;
254 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
255 bzt->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1;
256 bzt->za[MAX_B_FRAMES].z2 = bzt->za[MAX_B_FRAMES].z1;
257 bzt->f1 = MAX_B_FRAMES;
258 bzt->f2 = bzt->f1; /* init F pointers to remain constant */
259 if (fifo_state)
260 cs->hw.hfcpci.fifo_en |= fifo_state;
261 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
264 /*********************************************/
265 /* read a complete B-frame out of the buffer */
266 /*********************************************/
267 static struct sk_buff
269 hfcpci_empty_fifo(struct BCState *bcs, bzfifo_type * bz, u_char * bdata, int count)
271 u_char *ptr, *ptr1, new_f2;
272 struct sk_buff *skb;
273 struct IsdnCardState *cs = bcs->cs;
274 int total, maxlen, new_z2;
275 z_type *zp;
277 if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
278 debugl1(cs, "hfcpci_empty_fifo");
279 zp = &bz->za[bz->f2]; /* point to Z-Regs */
280 new_z2 = zp->z2 + count; /* new position in fifo */
281 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
282 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
283 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
284 if ((count > HSCX_BUFMAX + 3) || (count < 4) ||
285 (*(bdata + (zp->z1 - B_SUB_VAL)))) {
286 if (cs->debug & L1_DEB_WARN)
287 debugl1(cs, "hfcpci_empty_fifo: incoming packet invalid length %d or crc", count);
288 #ifdef ERROR_STATISTIC
289 bcs->err_inv++;
290 #endif
291 bz->za[new_f2].z2 = new_z2;
292 bz->f2 = new_f2; /* next buffer */
293 skb = NULL;
294 } else if (!(skb = dev_alloc_skb(count - 3)))
295 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
296 else {
297 total = count;
298 count -= 3;
299 ptr = skb_put(skb, count);
301 if (zp->z2 + count <= B_FIFO_SIZE + B_SUB_VAL)
302 maxlen = count; /* complete transfer */
303 else
304 maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */
306 ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */
307 memcpy(ptr, ptr1, maxlen); /* copy data */
308 count -= maxlen;
310 if (count) { /* rest remaining */
311 ptr += maxlen;
312 ptr1 = bdata; /* start of buffer */
313 memcpy(ptr, ptr1, count); /* rest */
315 bz->za[new_f2].z2 = new_z2;
316 bz->f2 = new_f2; /* next buffer */
319 return (skb);
322 /*******************************/
323 /* D-channel receive procedure */
324 /*******************************/
325 static
327 receive_dmsg(struct IsdnCardState *cs)
329 struct sk_buff *skb;
330 int maxlen;
331 int rcnt, total;
332 int count = 5;
333 u_char *ptr, *ptr1;
334 dfifo_type *df;
335 z_type *zp;
337 df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_rx;
338 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
339 debugl1(cs, "rec_dmsg blocked");
340 return (1);
342 while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
343 zp = &df->za[df->f2 & D_FREG_MASK];
344 rcnt = zp->z1 - zp->z2;
345 if (rcnt < 0)
346 rcnt += D_FIFO_SIZE;
347 rcnt++;
348 if (cs->debug & L1_DEB_ISAC)
349 debugl1(cs, "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)",
350 df->f1, df->f2, zp->z1, zp->z2, rcnt);
352 if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
353 (df->data[zp->z1])) {
354 if (cs->debug & L1_DEB_WARN)
355 debugl1(cs, "empty_fifo hfcpci paket inv. len %d or crc %d", rcnt, df->data[zp->z1]);
356 #ifdef ERROR_STATISTIC
357 cs->err_rx++;
358 #endif
359 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */
360 df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + rcnt) & (D_FIFO_SIZE - 1);
361 } else if ((skb = dev_alloc_skb(rcnt - 3))) {
362 total = rcnt;
363 rcnt -= 3;
364 ptr = skb_put(skb, rcnt);
366 if (zp->z2 + rcnt <= D_FIFO_SIZE)
367 maxlen = rcnt; /* complete transfer */
368 else
369 maxlen = D_FIFO_SIZE - zp->z2; /* maximum */
371 ptr1 = df->data + zp->z2; /* start of data */
372 memcpy(ptr, ptr1, maxlen); /* copy data */
373 rcnt -= maxlen;
375 if (rcnt) { /* rest remaining */
376 ptr += maxlen;
377 ptr1 = df->data; /* start of buffer */
378 memcpy(ptr, ptr1, rcnt); /* rest */
380 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */
381 df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + total) & (D_FIFO_SIZE - 1);
383 skb_queue_tail(&cs->rq, skb);
384 sched_event_D_pci(cs, D_RCVBUFREADY);
385 } else
386 printk(KERN_WARNING "HFC-PCI: D receive out of memory\n");
388 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
389 return (1);
392 /*******************************************************************************/
393 /* check for transparent receive data and read max one threshold size if avail */
394 /*******************************************************************************/
395 static int
396 hfcpci_empty_fifo_trans(struct BCState *bcs, bzfifo_type * bz, u_char * bdata)
398 unsigned short *z1r, *z2r;
399 int new_z2, fcnt, maxlen;
400 struct sk_buff *skb;
401 u_char *ptr, *ptr1;
403 z1r = &bz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
404 z2r = z1r + 1;
406 if (!(fcnt = *z1r - *z2r))
407 return (0); /* no data avail */
409 if (fcnt <= 0)
410 fcnt += B_FIFO_SIZE; /* bytes actually buffered */
411 if (fcnt > HFCPCI_BTRANS_THRESHOLD)
412 fcnt = HFCPCI_BTRANS_THRESHOLD; /* limit size */
414 new_z2 = *z2r + fcnt; /* new position in fifo */
415 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
416 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
418 if (!(skb = dev_alloc_skb(fcnt)))
419 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
420 else {
421 ptr = skb_put(skb, fcnt);
422 if (*z2r + fcnt <= B_FIFO_SIZE + B_SUB_VAL)
423 maxlen = fcnt; /* complete transfer */
424 else
425 maxlen = B_FIFO_SIZE + B_SUB_VAL - *z2r; /* maximum */
427 ptr1 = bdata + (*z2r - B_SUB_VAL); /* start of data */
428 memcpy(ptr, ptr1, maxlen); /* copy data */
429 fcnt -= maxlen;
431 if (fcnt) { /* rest remaining */
432 ptr += maxlen;
433 ptr1 = bdata; /* start of buffer */
434 memcpy(ptr, ptr1, fcnt); /* rest */
436 skb_queue_tail(&bcs->rqueue, skb);
437 hfcpci_sched_event(bcs, B_RCVBUFREADY);
440 *z2r = new_z2; /* new position */
441 return (1);
442 } /* hfcpci_empty_fifo_trans */
444 /**********************************/
445 /* B-channel main receive routine */
446 /**********************************/
447 static void
448 main_rec_hfcpci(struct BCState *bcs)
450 struct IsdnCardState *cs = bcs->cs;
451 int rcnt, real_fifo;
452 int receive, count = 5;
453 struct sk_buff *skb;
454 bzfifo_type *bz;
455 u_char *bdata;
456 z_type *zp;
459 if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) {
460 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
461 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2;
462 real_fifo = 1;
463 } else {
464 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1;
465 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b1;
466 real_fifo = 0;
468 Begin:
469 count--;
470 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
471 debugl1(cs, "rec_data %d blocked", bcs->channel);
472 return;
474 if (bz->f1 != bz->f2) {
475 if (cs->debug & L1_DEB_HSCX)
476 debugl1(cs, "hfcpci rec %d f1(%d) f2(%d)",
477 bcs->channel, bz->f1, bz->f2);
478 zp = &bz->za[bz->f2];
480 rcnt = zp->z1 - zp->z2;
481 if (rcnt < 0)
482 rcnt += B_FIFO_SIZE;
483 rcnt++;
484 if (cs->debug & L1_DEB_HSCX)
485 debugl1(cs, "hfcpci rec %d z1(%x) z2(%x) cnt(%d)",
486 bcs->channel, zp->z1, zp->z2, rcnt);
487 if ((skb = hfcpci_empty_fifo(bcs, bz, bdata, rcnt))) {
488 skb_queue_tail(&bcs->rqueue, skb);
489 hfcpci_sched_event(bcs, B_RCVBUFREADY);
491 rcnt = bz->f1 - bz->f2;
492 if (rcnt < 0)
493 rcnt += MAX_B_FRAMES + 1;
494 if (cs->hw.hfcpci.last_bfifo_cnt[real_fifo] > rcnt + 1) {
495 rcnt = 0;
496 hfcpci_clear_fifo_rx(cs, real_fifo);
498 cs->hw.hfcpci.last_bfifo_cnt[real_fifo] = rcnt;
499 if (rcnt > 1)
500 receive = 1;
501 else
502 receive = 0;
503 } else if (bcs->mode == L1_MODE_TRANS)
504 receive = hfcpci_empty_fifo_trans(bcs, bz, bdata);
505 else
506 receive = 0;
507 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
508 if (count && receive)
509 goto Begin;
512 /**************************/
513 /* D-channel send routine */
514 /**************************/
515 static void
516 hfcpci_fill_dfifo(struct IsdnCardState *cs)
518 int fcnt;
519 int count, new_z1, maxlen;
520 dfifo_type *df;
521 u_char *src, *dst, new_f1;
523 if (!cs->tx_skb)
524 return;
525 if (cs->tx_skb->len <= 0)
526 return;
528 df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_tx;
530 if (cs->debug & L1_DEB_ISAC)
531 debugl1(cs, "hfcpci_fill_Dfifo f1(%d) f2(%d) z1(f1)(%x)",
532 df->f1, df->f2,
533 df->za[df->f1 & D_FREG_MASK].z1);
534 fcnt = df->f1 - df->f2; /* frame count actually buffered */
535 if (fcnt < 0)
536 fcnt += (MAX_D_FRAMES + 1); /* if wrap around */
537 if (fcnt > (MAX_D_FRAMES - 1)) {
538 if (cs->debug & L1_DEB_ISAC)
539 debugl1(cs, "hfcpci_fill_Dfifo more as 14 frames");
540 #ifdef ERROR_STATISTIC
541 cs->err_tx++;
542 #endif
543 return;
545 /* now determine free bytes in FIFO buffer */
546 count = df->za[df->f2 & D_FREG_MASK].z2 - df->za[df->f1 & D_FREG_MASK].z1 - 1;
547 if (count <= 0)
548 count += D_FIFO_SIZE; /* count now contains available bytes */
550 if (cs->debug & L1_DEB_ISAC)
551 debugl1(cs, "hfcpci_fill_Dfifo count(%ld/%d)",
552 cs->tx_skb->len, count);
553 if (count < cs->tx_skb->len) {
554 if (cs->debug & L1_DEB_ISAC)
555 debugl1(cs, "hfcpci_fill_Dfifo no fifo mem");
556 return;
558 count = cs->tx_skb->len; /* get frame len */
559 new_z1 = (df->za[df->f1 & D_FREG_MASK].z1 + count) & (D_FIFO_SIZE - 1);
560 new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
561 src = cs->tx_skb->data; /* source pointer */
562 dst = df->data + df->za[df->f1 & D_FREG_MASK].z1;
563 maxlen = D_FIFO_SIZE - df->za[df->f1 & D_FREG_MASK].z1; /* end fifo */
564 if (maxlen > count)
565 maxlen = count; /* limit size */
566 memcpy(dst, src, maxlen); /* first copy */
568 count -= maxlen; /* remaining bytes */
569 if (count) {
570 dst = df->data; /* start of buffer */
571 src += maxlen; /* new position */
572 memcpy(dst, src, count);
574 df->za[new_f1 & D_FREG_MASK].z1 = new_z1; /* for next buffer */
575 df->za[df->f1 & D_FREG_MASK].z1 = new_z1; /* new pos actual buffer */
576 df->f1 = new_f1; /* next frame */
578 dev_kfree_skb_any(cs->tx_skb);
579 cs->tx_skb = NULL;
582 /**************************/
583 /* B-channel send routine */
584 /**************************/
585 static void
586 hfcpci_fill_fifo(struct BCState *bcs)
588 struct IsdnCardState *cs = bcs->cs;
589 int maxlen, fcnt;
590 int count, new_z1;
591 bzfifo_type *bz;
592 u_char *bdata;
593 u_char new_f1, *src, *dst;
594 unsigned short *z1t, *z2t;
596 if (!bcs->tx_skb)
597 return;
598 if (bcs->tx_skb->len <= 0)
599 return;
601 if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) {
602 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2;
603 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b2;
604 } else {
605 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1;
606 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b1;
609 if (bcs->mode == L1_MODE_TRANS) {
610 z1t = &bz->za[MAX_B_FRAMES].z1;
611 z2t = z1t + 1;
612 if (cs->debug & L1_DEB_HSCX)
613 debugl1(cs, "hfcpci_fill_fifo_trans %d z1(%x) z2(%x)",
614 bcs->channel, *z1t, *z2t);
615 fcnt = *z2t - *z1t;
616 if (fcnt <= 0)
617 fcnt += B_FIFO_SIZE; /* fcnt contains available bytes in fifo */
618 fcnt = B_FIFO_SIZE - fcnt; /* remaining bytes to send */
620 while ((fcnt < 2 * HFCPCI_BTRANS_THRESHOLD) && (bcs->tx_skb)) {
621 if (bcs->tx_skb->len < B_FIFO_SIZE - fcnt) {
622 /* data is suitable for fifo */
623 count = bcs->tx_skb->len;
625 new_z1 = *z1t + count; /* new buffer Position */
626 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
627 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
628 src = bcs->tx_skb->data; /* source pointer */
629 dst = bdata + (*z1t - B_SUB_VAL);
630 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - *z1t; /* end of fifo */
631 if (maxlen > count)
632 maxlen = count; /* limit size */
633 memcpy(dst, src, maxlen); /* first copy */
635 count -= maxlen; /* remaining bytes */
636 if (count) {
637 dst = bdata; /* start of buffer */
638 src += maxlen; /* new position */
639 memcpy(dst, src, count);
641 bcs->tx_cnt -= bcs->tx_skb->len;
642 fcnt += bcs->tx_skb->len;
643 *z1t = new_z1; /* now send data */
644 } else if (cs->debug & L1_DEB_HSCX)
645 debugl1(cs, "hfcpci_fill_fifo_trans %d frame length %d discarded",
646 bcs->channel, bcs->tx_skb->len);
648 if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
649 (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
650 u_long flags;
651 spin_lock_irqsave(&bcs->aclock, flags);
652 bcs->ackcnt += bcs->tx_skb->len;
653 spin_unlock_irqrestore(&bcs->aclock, flags);
654 schedule_event(bcs, B_ACKPENDING);
657 dev_kfree_skb_any(bcs->tx_skb);
658 bcs->tx_skb = skb_dequeue(&bcs->squeue); /* fetch next data */
660 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
661 return;
663 if (cs->debug & L1_DEB_HSCX)
664 debugl1(cs, "hfcpci_fill_fifo_hdlc %d f1(%d) f2(%d) z1(f1)(%x)",
665 bcs->channel, bz->f1, bz->f2,
666 bz->za[bz->f1].z1);
668 fcnt = bz->f1 - bz->f2; /* frame count actually buffered */
669 if (fcnt < 0)
670 fcnt += (MAX_B_FRAMES + 1); /* if wrap around */
671 if (fcnt > (MAX_B_FRAMES - 1)) {
672 if (cs->debug & L1_DEB_HSCX)
673 debugl1(cs, "hfcpci_fill_Bfifo more as 14 frames");
674 return;
676 /* now determine free bytes in FIFO buffer */
677 count = bz->za[bz->f2].z2 - bz->za[bz->f1].z1 - 1;
678 if (count <= 0)
679 count += B_FIFO_SIZE; /* count now contains available bytes */
681 if (cs->debug & L1_DEB_HSCX)
682 debugl1(cs, "hfcpci_fill_fifo %d count(%ld/%d),%lx",
683 bcs->channel, bcs->tx_skb->len,
684 count, current->state);
686 if (count < bcs->tx_skb->len) {
687 if (cs->debug & L1_DEB_HSCX)
688 debugl1(cs, "hfcpci_fill_fifo no fifo mem");
689 return;
691 count = bcs->tx_skb->len; /* get frame len */
692 new_z1 = bz->za[bz->f1].z1 + count; /* new buffer Position */
693 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
694 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
696 new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
697 src = bcs->tx_skb->data; /* source pointer */
698 dst = bdata + (bz->za[bz->f1].z1 - B_SUB_VAL);
699 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - bz->za[bz->f1].z1; /* end fifo */
700 if (maxlen > count)
701 maxlen = count; /* limit size */
702 memcpy(dst, src, maxlen); /* first copy */
704 count -= maxlen; /* remaining bytes */
705 if (count) {
706 dst = bdata; /* start of buffer */
707 src += maxlen; /* new position */
708 memcpy(dst, src, count);
710 bcs->tx_cnt -= bcs->tx_skb->len;
711 if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
712 (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
713 u_long flags;
714 spin_lock_irqsave(&bcs->aclock, flags);
715 bcs->ackcnt += bcs->tx_skb->len;
716 spin_unlock_irqrestore(&bcs->aclock, flags);
717 schedule_event(bcs, B_ACKPENDING);
720 bz->za[new_f1].z1 = new_z1; /* for next buffer */
721 bz->f1 = new_f1; /* next frame */
723 dev_kfree_skb_any(bcs->tx_skb);
724 bcs->tx_skb = NULL;
725 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
728 /**********************************************/
729 /* D-channel l1 state call for leased NT-mode */
730 /**********************************************/
731 static void
732 dch_nt_l2l1(struct PStack *st, int pr, void *arg)
734 struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
736 switch (pr) {
737 case (PH_DATA | REQUEST):
738 case (PH_PULL | REQUEST):
739 case (PH_PULL | INDICATION):
740 st->l1.l1hw(st, pr, arg);
741 break;
742 case (PH_ACTIVATE | REQUEST):
743 st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL);
744 break;
745 case (PH_TESTLOOP | REQUEST):
746 if (1 & (long) arg)
747 debugl1(cs, "PH_TEST_LOOP B1");
748 if (2 & (long) arg)
749 debugl1(cs, "PH_TEST_LOOP B2");
750 if (!(3 & (long) arg))
751 debugl1(cs, "PH_TEST_LOOP DISABLED");
752 st->l1.l1hw(st, HW_TESTLOOP | REQUEST, arg);
753 break;
754 default:
755 if (cs->debug)
756 debugl1(cs, "dch_nt_l2l1 msg %04X unhandled", pr);
757 break;
763 /***********************/
764 /* set/reset echo mode */
765 /***********************/
766 static int
767 hfcpci_auxcmd(struct IsdnCardState *cs, isdn_ctrl * ic)
769 u_long flags;
770 int i = *(unsigned int *) ic->parm.num;
772 if ((ic->arg == 98) &&
773 (!(cs->hw.hfcpci.int_m1 & (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC + HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC)))) {
774 spin_lock_irqsave(&cs->lock, flags);
775 Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_NT); /* ST-Bit delay for NT-Mode */
776 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 0); /* HFC ST G0 */
777 udelay(10);
778 cs->hw.hfcpci.sctrl |= SCTRL_MODE_NT;
779 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl); /* set NT-mode */
780 udelay(10);
781 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 1); /* HFC ST G1 */
782 udelay(10);
783 Write_hfc(cs, HFCPCI_STATES, 1 | HFCPCI_ACTIVATE | HFCPCI_DO_ACTION);
784 cs->dc.hfcpci.ph_state = 1;
785 cs->hw.hfcpci.nt_mode = 1;
786 cs->hw.hfcpci.nt_timer = 0;
787 cs->stlist->l2.l2l1 = dch_nt_l2l1;
788 spin_unlock_irqrestore(&cs->lock, flags);
789 debugl1(cs, "NT mode activated");
790 return (0);
792 if ((cs->chanlimit > 1) || (cs->hw.hfcpci.bswapped) ||
793 (cs->hw.hfcpci.nt_mode) || (ic->arg != 12))
794 return (-EINVAL);
796 spin_lock_irqsave(&cs->lock, flags);
797 if (i) {
798 cs->logecho = 1;
799 cs->hw.hfcpci.trm |= 0x20; /* enable echo chan */
800 cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_B2REC;
801 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2RX;
802 } else {
803 cs->logecho = 0;
804 cs->hw.hfcpci.trm &= ~0x20; /* disable echo chan */
805 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_B2REC;
806 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2RX;
808 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA;
809 cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA;
810 cs->hw.hfcpci.conn |= 0x10; /* B2-IOM -> B2-ST */
811 cs->hw.hfcpci.ctmt &= ~2;
812 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
813 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
814 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
815 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
816 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
817 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
818 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
819 spin_unlock_irqrestore(&cs->lock, flags);
820 return (0);
821 } /* hfcpci_auxcmd */
823 /*****************************/
824 /* E-channel receive routine */
825 /*****************************/
826 static void
827 receive_emsg(struct IsdnCardState *cs)
829 int rcnt;
830 int receive, count = 5;
831 bzfifo_type *bz;
832 u_char *bdata;
833 z_type *zp;
834 u_char *ptr, *ptr1, new_f2;
835 int total, maxlen, new_z2;
836 u_char e_buffer[256];
838 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
839 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2;
840 Begin:
841 count--;
842 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
843 debugl1(cs, "echo_rec_data blocked");
844 return;
846 if (bz->f1 != bz->f2) {
847 if (cs->debug & L1_DEB_ISAC)
848 debugl1(cs, "hfcpci e_rec f1(%d) f2(%d)",
849 bz->f1, bz->f2);
850 zp = &bz->za[bz->f2];
852 rcnt = zp->z1 - zp->z2;
853 if (rcnt < 0)
854 rcnt += B_FIFO_SIZE;
855 rcnt++;
856 if (cs->debug & L1_DEB_ISAC)
857 debugl1(cs, "hfcpci e_rec z1(%x) z2(%x) cnt(%d)",
858 zp->z1, zp->z2, rcnt);
859 new_z2 = zp->z2 + rcnt; /* new position in fifo */
860 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
861 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
862 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
863 if ((rcnt > 256 + 3) || (count < 4) ||
864 (*(bdata + (zp->z1 - B_SUB_VAL)))) {
865 if (cs->debug & L1_DEB_WARN)
866 debugl1(cs, "hfcpci_empty_echan: incoming packet invalid length %d or crc", rcnt);
867 bz->za[new_f2].z2 = new_z2;
868 bz->f2 = new_f2; /* next buffer */
869 } else {
870 total = rcnt;
871 rcnt -= 3;
872 ptr = e_buffer;
874 if (zp->z2 <= B_FIFO_SIZE + B_SUB_VAL)
875 maxlen = rcnt; /* complete transfer */
876 else
877 maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */
879 ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */
880 memcpy(ptr, ptr1, maxlen); /* copy data */
881 rcnt -= maxlen;
883 if (rcnt) { /* rest remaining */
884 ptr += maxlen;
885 ptr1 = bdata; /* start of buffer */
886 memcpy(ptr, ptr1, rcnt); /* rest */
888 bz->za[new_f2].z2 = new_z2;
889 bz->f2 = new_f2; /* next buffer */
890 if (cs->debug & DEB_DLOG_HEX) {
891 ptr = cs->dlog;
892 if ((total - 3) < MAX_DLOG_SPACE / 3 - 10) {
893 *ptr++ = 'E';
894 *ptr++ = 'C';
895 *ptr++ = 'H';
896 *ptr++ = 'O';
897 *ptr++ = ':';
898 ptr += QuickHex(ptr, e_buffer, total - 3);
899 ptr--;
900 *ptr++ = '\n';
901 *ptr = 0;
902 HiSax_putstatus(cs, NULL, cs->dlog);
903 } else
904 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
908 rcnt = bz->f1 - bz->f2;
909 if (rcnt < 0)
910 rcnt += MAX_B_FRAMES + 1;
911 if (rcnt > 1)
912 receive = 1;
913 else
914 receive = 0;
915 } else
916 receive = 0;
917 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
918 if (count && receive)
919 goto Begin;
920 } /* receive_emsg */
922 /*********************/
923 /* Interrupt handler */
924 /*********************/
925 static irqreturn_t
926 hfcpci_interrupt(int intno, void *dev_id)
928 u_long flags;
929 struct IsdnCardState *cs = dev_id;
930 u_char exval;
931 struct BCState *bcs;
932 int count = 15;
933 u_char val, stat;
935 if (!(cs->hw.hfcpci.int_m2 & 0x08)) {
936 debugl1(cs, "HFC-PCI: int_m2 %x not initialised", cs->hw.hfcpci.int_m2);
937 return IRQ_NONE; /* not initialised */
939 spin_lock_irqsave(&cs->lock, flags);
940 if (HFCPCI_ANYINT & (stat = Read_hfc(cs, HFCPCI_STATUS))) {
941 val = Read_hfc(cs, HFCPCI_INT_S1);
942 if (cs->debug & L1_DEB_ISAC)
943 debugl1(cs, "HFC-PCI: stat(%02x) s1(%02x)", stat, val);
944 } else {
945 spin_unlock_irqrestore(&cs->lock, flags);
946 return IRQ_NONE;
948 if (cs->debug & L1_DEB_ISAC)
949 debugl1(cs, "HFC-PCI irq %x %s", val,
950 test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags) ?
951 "locked" : "unlocked");
952 val &= cs->hw.hfcpci.int_m1;
953 if (val & 0x40) { /* state machine irq */
954 exval = Read_hfc(cs, HFCPCI_STATES) & 0xf;
955 if (cs->debug & L1_DEB_ISAC)
956 debugl1(cs, "ph_state chg %d->%d", cs->dc.hfcpci.ph_state,
957 exval);
958 cs->dc.hfcpci.ph_state = exval;
959 sched_event_D_pci(cs, D_L1STATECHANGE);
960 val &= ~0x40;
962 if (val & 0x80) { /* timer irq */
963 if (cs->hw.hfcpci.nt_mode) {
964 if ((--cs->hw.hfcpci.nt_timer) < 0)
965 sched_event_D_pci(cs, D_L1STATECHANGE);
967 val &= ~0x80;
968 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
970 while (val) {
971 if (test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
972 cs->hw.hfcpci.int_s1 |= val;
973 spin_unlock_irqrestore(&cs->lock, flags);
974 return IRQ_HANDLED;
976 if (cs->hw.hfcpci.int_s1 & 0x18) {
977 exval = val;
978 val = cs->hw.hfcpci.int_s1;
979 cs->hw.hfcpci.int_s1 = exval;
981 if (val & 0x08) {
982 if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) {
983 if (cs->debug)
984 debugl1(cs, "hfcpci spurious 0x08 IRQ");
985 } else
986 main_rec_hfcpci(bcs);
988 if (val & 0x10) {
989 if (cs->logecho)
990 receive_emsg(cs);
991 else if (!(bcs = Sel_BCS(cs, 1))) {
992 if (cs->debug)
993 debugl1(cs, "hfcpci spurious 0x10 IRQ");
994 } else
995 main_rec_hfcpci(bcs);
997 if (val & 0x01) {
998 if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) {
999 if (cs->debug)
1000 debugl1(cs, "hfcpci spurious 0x01 IRQ");
1001 } else {
1002 if (bcs->tx_skb) {
1003 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1004 hfcpci_fill_fifo(bcs);
1005 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1006 } else
1007 debugl1(cs, "fill_data %d blocked", bcs->channel);
1008 } else {
1009 if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
1010 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1011 hfcpci_fill_fifo(bcs);
1012 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1013 } else
1014 debugl1(cs, "fill_data %d blocked", bcs->channel);
1015 } else {
1016 hfcpci_sched_event(bcs, B_XMTBUFREADY);
1021 if (val & 0x02) {
1022 if (!(bcs = Sel_BCS(cs, 1))) {
1023 if (cs->debug)
1024 debugl1(cs, "hfcpci spurious 0x02 IRQ");
1025 } else {
1026 if (bcs->tx_skb) {
1027 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1028 hfcpci_fill_fifo(bcs);
1029 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1030 } else
1031 debugl1(cs, "fill_data %d blocked", bcs->channel);
1032 } else {
1033 if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
1034 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1035 hfcpci_fill_fifo(bcs);
1036 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1037 } else
1038 debugl1(cs, "fill_data %d blocked", bcs->channel);
1039 } else {
1040 hfcpci_sched_event(bcs, B_XMTBUFREADY);
1045 if (val & 0x20) { /* receive dframe */
1046 receive_dmsg(cs);
1048 if (val & 0x04) { /* dframe transmitted */
1049 if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
1050 del_timer(&cs->dbusytimer);
1051 if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
1052 sched_event_D_pci(cs, D_CLEARBUSY);
1053 if (cs->tx_skb) {
1054 if (cs->tx_skb->len) {
1055 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1056 hfcpci_fill_dfifo(cs);
1057 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1058 } else {
1059 debugl1(cs, "hfcpci_fill_dfifo irq blocked");
1061 goto afterXPR;
1062 } else {
1063 dev_kfree_skb_irq(cs->tx_skb);
1064 cs->tx_cnt = 0;
1065 cs->tx_skb = NULL;
1068 if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
1069 cs->tx_cnt = 0;
1070 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1071 hfcpci_fill_dfifo(cs);
1072 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1073 } else {
1074 debugl1(cs, "hfcpci_fill_dfifo irq blocked");
1076 } else
1077 sched_event_D_pci(cs, D_XMTBUFREADY);
1079 afterXPR:
1080 if (cs->hw.hfcpci.int_s1 && count--) {
1081 val = cs->hw.hfcpci.int_s1;
1082 cs->hw.hfcpci.int_s1 = 0;
1083 if (cs->debug & L1_DEB_ISAC)
1084 debugl1(cs, "HFC-PCI irq %x loop %d", val, 15 - count);
1085 } else
1086 val = 0;
1088 spin_unlock_irqrestore(&cs->lock, flags);
1089 return IRQ_HANDLED;
1092 /********************************************************************/
1093 /* timer callback for D-chan busy resolution. Currently no function */
1094 /********************************************************************/
1095 static void
1096 hfcpci_dbusy_timer(struct IsdnCardState *cs)
1100 /*************************************/
1101 /* Layer 1 D-channel hardware access */
1102 /*************************************/
1103 static void
1104 HFCPCI_l1hw(struct PStack *st, int pr, void *arg)
1106 u_long flags;
1107 struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
1108 struct sk_buff *skb = arg;
1110 switch (pr) {
1111 case (PH_DATA | REQUEST):
1112 if (cs->debug & DEB_DLOG_HEX)
1113 LogFrame(cs, skb->data, skb->len);
1114 if (cs->debug & DEB_DLOG_VERBOSE)
1115 dlogframe(cs, skb, 0);
1116 spin_lock_irqsave(&cs->lock, flags);
1117 if (cs->tx_skb) {
1118 skb_queue_tail(&cs->sq, skb);
1119 #ifdef L2FRAME_DEBUG /* psa */
1120 if (cs->debug & L1_DEB_LAPD)
1121 Logl2Frame(cs, skb, "PH_DATA Queued", 0);
1122 #endif
1123 } else {
1124 cs->tx_skb = skb;
1125 cs->tx_cnt = 0;
1126 #ifdef L2FRAME_DEBUG /* psa */
1127 if (cs->debug & L1_DEB_LAPD)
1128 Logl2Frame(cs, skb, "PH_DATA", 0);
1129 #endif
1130 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1131 hfcpci_fill_dfifo(cs);
1132 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1133 } else
1134 debugl1(cs, "hfcpci_fill_dfifo blocked");
1137 spin_unlock_irqrestore(&cs->lock, flags);
1138 break;
1139 case (PH_PULL | INDICATION):
1140 spin_lock_irqsave(&cs->lock, flags);
1141 if (cs->tx_skb) {
1142 if (cs->debug & L1_DEB_WARN)
1143 debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
1144 skb_queue_tail(&cs->sq, skb);
1145 spin_unlock_irqrestore(&cs->lock, flags);
1146 break;
1148 if (cs->debug & DEB_DLOG_HEX)
1149 LogFrame(cs, skb->data, skb->len);
1150 if (cs->debug & DEB_DLOG_VERBOSE)
1151 dlogframe(cs, skb, 0);
1152 cs->tx_skb = skb;
1153 cs->tx_cnt = 0;
1154 #ifdef L2FRAME_DEBUG /* psa */
1155 if (cs->debug & L1_DEB_LAPD)
1156 Logl2Frame(cs, skb, "PH_DATA_PULLED", 0);
1157 #endif
1158 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1159 hfcpci_fill_dfifo(cs);
1160 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1161 } else
1162 debugl1(cs, "hfcpci_fill_dfifo blocked");
1163 spin_unlock_irqrestore(&cs->lock, flags);
1164 break;
1165 case (PH_PULL | REQUEST):
1166 #ifdef L2FRAME_DEBUG /* psa */
1167 if (cs->debug & L1_DEB_LAPD)
1168 debugl1(cs, "-> PH_REQUEST_PULL");
1169 #endif
1170 if (!cs->tx_skb) {
1171 test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1172 st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
1173 } else
1174 test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1175 break;
1176 case (HW_RESET | REQUEST):
1177 spin_lock_irqsave(&cs->lock, flags);
1178 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3); /* HFC ST 3 */
1179 udelay(6);
1180 Write_hfc(cs, HFCPCI_STATES, 3); /* HFC ST 2 */
1181 cs->hw.hfcpci.mst_m |= HFCPCI_MASTER;
1182 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1183 Write_hfc(cs, HFCPCI_STATES, HFCPCI_ACTIVATE | HFCPCI_DO_ACTION);
1184 spin_unlock_irqrestore(&cs->lock, flags);
1185 l1_msg(cs, HW_POWERUP | CONFIRM, NULL);
1186 break;
1187 case (HW_ENABLE | REQUEST):
1188 spin_lock_irqsave(&cs->lock, flags);
1189 Write_hfc(cs, HFCPCI_STATES, HFCPCI_DO_ACTION);
1190 spin_unlock_irqrestore(&cs->lock, flags);
1191 break;
1192 case (HW_DEACTIVATE | REQUEST):
1193 spin_lock_irqsave(&cs->lock, flags);
1194 cs->hw.hfcpci.mst_m &= ~HFCPCI_MASTER;
1195 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1196 spin_unlock_irqrestore(&cs->lock, flags);
1197 break;
1198 case (HW_INFO3 | REQUEST):
1199 spin_lock_irqsave(&cs->lock, flags);
1200 cs->hw.hfcpci.mst_m |= HFCPCI_MASTER;
1201 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1202 spin_unlock_irqrestore(&cs->lock, flags);
1203 break;
1204 case (HW_TESTLOOP | REQUEST):
1205 spin_lock_irqsave(&cs->lock, flags);
1206 switch ((long) arg) {
1207 case (1):
1208 Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* tx slot */
1209 Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* rx slot */
1210 cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~7) | 1;
1211 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1212 break;
1214 case (2):
1215 Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* tx slot */
1216 Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* rx slot */
1217 cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~0x38) | 0x08;
1218 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1219 break;
1221 default:
1222 spin_unlock_irqrestore(&cs->lock, flags);
1223 if (cs->debug & L1_DEB_WARN)
1224 debugl1(cs, "hfcpci_l1hw loop invalid %4lx", (long) arg);
1225 return;
1227 cs->hw.hfcpci.trm |= 0x80; /* enable IOM-loop */
1228 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
1229 spin_unlock_irqrestore(&cs->lock, flags);
1230 break;
1231 default:
1232 if (cs->debug & L1_DEB_WARN)
1233 debugl1(cs, "hfcpci_l1hw unknown pr %4x", pr);
1234 break;
1238 /***********************************************/
1239 /* called during init setting l1 stack pointer */
1240 /***********************************************/
1241 static void
1242 setstack_hfcpci(struct PStack *st, struct IsdnCardState *cs)
1244 st->l1.l1hw = HFCPCI_l1hw;
1247 /**************************************/
1248 /* send B-channel data if not blocked */
1249 /**************************************/
1250 static void
1251 hfcpci_send_data(struct BCState *bcs)
1253 struct IsdnCardState *cs = bcs->cs;
1255 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1256 hfcpci_fill_fifo(bcs);
1257 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1258 } else
1259 debugl1(cs, "send_data %d blocked", bcs->channel);
1262 /***************************************************************/
1263 /* activate/deactivate hardware for selected channels and mode */
1264 /***************************************************************/
1265 static void
1266 mode_hfcpci(struct BCState *bcs, int mode, int bc)
1268 struct IsdnCardState *cs = bcs->cs;
1269 int fifo2;
1271 if (cs->debug & L1_DEB_HSCX)
1272 debugl1(cs, "HFCPCI bchannel mode %d bchan %d/%d",
1273 mode, bc, bcs->channel);
1274 bcs->mode = mode;
1275 bcs->channel = bc;
1276 fifo2 = bc;
1277 if (cs->chanlimit > 1) {
1278 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1279 cs->hw.hfcpci.sctrl_e &= ~0x80;
1280 } else {
1281 if (bc) {
1282 if (mode != L1_MODE_NULL) {
1283 cs->hw.hfcpci.bswapped = 1; /* B1 and B2 exchanged */
1284 cs->hw.hfcpci.sctrl_e |= 0x80;
1285 } else {
1286 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1287 cs->hw.hfcpci.sctrl_e &= ~0x80;
1289 fifo2 = 0;
1290 } else {
1291 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1292 cs->hw.hfcpci.sctrl_e &= ~0x80;
1295 switch (mode) {
1296 case (L1_MODE_NULL):
1297 if (bc) {
1298 cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA;
1299 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA;
1300 } else {
1301 cs->hw.hfcpci.sctrl &= ~SCTRL_B1_ENA;
1302 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B1_ENA;
1304 if (fifo2) {
1305 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2;
1306 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1307 } else {
1308 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1;
1309 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1311 break;
1312 case (L1_MODE_TRANS):
1313 hfcpci_clear_fifo_rx(cs, fifo2);
1314 hfcpci_clear_fifo_tx(cs, fifo2);
1315 if (bc) {
1316 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1317 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1318 } else {
1319 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1320 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1322 if (fifo2) {
1323 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
1324 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1325 cs->hw.hfcpci.ctmt |= 2;
1326 cs->hw.hfcpci.conn &= ~0x18;
1327 } else {
1328 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
1329 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1330 cs->hw.hfcpci.ctmt |= 1;
1331 cs->hw.hfcpci.conn &= ~0x03;
1333 break;
1334 case (L1_MODE_HDLC):
1335 hfcpci_clear_fifo_rx(cs, fifo2);
1336 hfcpci_clear_fifo_tx(cs, fifo2);
1337 if (bc) {
1338 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1339 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1340 } else {
1341 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1342 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1344 if (fifo2) {
1345 cs->hw.hfcpci.last_bfifo_cnt[1] = 0;
1346 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
1347 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1348 cs->hw.hfcpci.ctmt &= ~2;
1349 cs->hw.hfcpci.conn &= ~0x18;
1350 } else {
1351 cs->hw.hfcpci.last_bfifo_cnt[0] = 0;
1352 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
1353 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1354 cs->hw.hfcpci.ctmt &= ~1;
1355 cs->hw.hfcpci.conn &= ~0x03;
1357 break;
1358 case (L1_MODE_EXTRN):
1359 if (bc) {
1360 cs->hw.hfcpci.conn |= 0x10;
1361 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1362 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1363 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2;
1364 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1365 } else {
1366 cs->hw.hfcpci.conn |= 0x02;
1367 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1368 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1369 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1;
1370 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1372 break;
1374 Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e);
1375 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1376 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
1377 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
1378 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
1379 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
1380 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1383 /******************************/
1384 /* Layer2 -> Layer 1 Transfer */
1385 /******************************/
1386 static void
1387 hfcpci_l2l1(struct PStack *st, int pr, void *arg)
1389 struct BCState *bcs = st->l1.bcs;
1390 u_long flags;
1391 struct sk_buff *skb = arg;
1393 switch (pr) {
1394 case (PH_DATA | REQUEST):
1395 spin_lock_irqsave(&bcs->cs->lock, flags);
1396 if (bcs->tx_skb) {
1397 skb_queue_tail(&bcs->squeue, skb);
1398 } else {
1399 bcs->tx_skb = skb;
1400 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
1401 bcs->cs->BC_Send_Data(bcs);
1403 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1404 break;
1405 case (PH_PULL | INDICATION):
1406 spin_lock_irqsave(&bcs->cs->lock, flags);
1407 if (bcs->tx_skb) {
1408 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1409 printk(KERN_WARNING "hfc_l2l1: this shouldn't happen\n");
1410 break;
1412 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
1413 bcs->tx_skb = skb;
1414 bcs->cs->BC_Send_Data(bcs);
1415 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1416 break;
1417 case (PH_PULL | REQUEST):
1418 if (!bcs->tx_skb) {
1419 test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1420 st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
1421 } else
1422 test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1423 break;
1424 case (PH_ACTIVATE | REQUEST):
1425 spin_lock_irqsave(&bcs->cs->lock, flags);
1426 test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
1427 mode_hfcpci(bcs, st->l1.mode, st->l1.bc);
1428 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1429 l1_msg_b(st, pr, arg);
1430 break;
1431 case (PH_DEACTIVATE | REQUEST):
1432 l1_msg_b(st, pr, arg);
1433 break;
1434 case (PH_DEACTIVATE | CONFIRM):
1435 spin_lock_irqsave(&bcs->cs->lock, flags);
1436 test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
1437 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1438 mode_hfcpci(bcs, 0, st->l1.bc);
1439 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1440 st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
1441 break;
1445 /******************************************/
1446 /* deactivate B-channel access and queues */
1447 /******************************************/
1448 static void
1449 close_hfcpci(struct BCState *bcs)
1451 mode_hfcpci(bcs, 0, bcs->channel);
1452 if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
1453 skb_queue_purge(&bcs->rqueue);
1454 skb_queue_purge(&bcs->squeue);
1455 if (bcs->tx_skb) {
1456 dev_kfree_skb_any(bcs->tx_skb);
1457 bcs->tx_skb = NULL;
1458 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1463 /*************************************/
1464 /* init B-channel queues and control */
1465 /*************************************/
1466 static int
1467 open_hfcpcistate(struct IsdnCardState *cs, struct BCState *bcs)
1469 if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
1470 skb_queue_head_init(&bcs->rqueue);
1471 skb_queue_head_init(&bcs->squeue);
1473 bcs->tx_skb = NULL;
1474 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1475 bcs->event = 0;
1476 bcs->tx_cnt = 0;
1477 return (0);
1480 /*********************************/
1481 /* inits the stack for B-channel */
1482 /*********************************/
1483 static int
1484 setstack_2b(struct PStack *st, struct BCState *bcs)
1486 bcs->channel = st->l1.bc;
1487 if (open_hfcpcistate(st->l1.hardware, bcs))
1488 return (-1);
1489 st->l1.bcs = bcs;
1490 st->l2.l2l1 = hfcpci_l2l1;
1491 setstack_manager(st);
1492 bcs->st = st;
1493 setstack_l1_B(st);
1494 return (0);
1497 /***************************/
1498 /* handle L1 state changes */
1499 /***************************/
1500 static void
1501 hfcpci_bh(struct work_struct *work)
1503 struct IsdnCardState *cs =
1504 container_of(work, struct IsdnCardState, tqueue);
1505 u_long flags;
1506 // struct PStack *stptr;
1508 if (!cs)
1509 return;
1510 if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
1511 if (!cs->hw.hfcpci.nt_mode)
1512 switch (cs->dc.hfcpci.ph_state) {
1513 case (0):
1514 l1_msg(cs, HW_RESET | INDICATION, NULL);
1515 break;
1516 case (3):
1517 l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL);
1518 break;
1519 case (8):
1520 l1_msg(cs, HW_RSYNC | INDICATION, NULL);
1521 break;
1522 case (6):
1523 l1_msg(cs, HW_INFO2 | INDICATION, NULL);
1524 break;
1525 case (7):
1526 l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL);
1527 break;
1528 default:
1529 break;
1530 } else {
1531 spin_lock_irqsave(&cs->lock, flags);
1532 switch (cs->dc.hfcpci.ph_state) {
1533 case (2):
1534 if (cs->hw.hfcpci.nt_timer < 0) {
1535 cs->hw.hfcpci.nt_timer = 0;
1536 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1537 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1538 /* Clear already pending ints */
1539 if (Read_hfc(cs, HFCPCI_INT_S1));
1540 Write_hfc(cs, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
1541 udelay(10);
1542 Write_hfc(cs, HFCPCI_STATES, 4);
1543 cs->dc.hfcpci.ph_state = 4;
1544 } else {
1545 cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_TIMER;
1546 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1547 cs->hw.hfcpci.ctmt &= ~HFCPCI_AUTO_TIMER;
1548 cs->hw.hfcpci.ctmt |= HFCPCI_TIM3_125;
1549 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
1550 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
1551 cs->hw.hfcpci.nt_timer = NT_T1_COUNT;
1552 Write_hfc(cs, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3); /* allow G2 -> G3 transition */
1554 break;
1555 case (1):
1556 case (3):
1557 case (4):
1558 cs->hw.hfcpci.nt_timer = 0;
1559 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1560 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1561 break;
1562 default:
1563 break;
1565 spin_unlock_irqrestore(&cs->lock, flags);
1568 if (test_and_clear_bit(D_RCVBUFREADY, &cs->event))
1569 DChannel_proc_rcv(cs);
1570 if (test_and_clear_bit(D_XMTBUFREADY, &cs->event))
1571 DChannel_proc_xmt(cs);
1575 /********************************/
1576 /* called for card init message */
1577 /********************************/
1578 static void
1579 inithfcpci(struct IsdnCardState *cs)
1581 cs->bcs[0].BC_SetStack = setstack_2b;
1582 cs->bcs[1].BC_SetStack = setstack_2b;
1583 cs->bcs[0].BC_Close = close_hfcpci;
1584 cs->bcs[1].BC_Close = close_hfcpci;
1585 cs->dbusytimer.function = (void *) hfcpci_dbusy_timer;
1586 cs->dbusytimer.data = (long) cs;
1587 init_timer(&cs->dbusytimer);
1588 mode_hfcpci(cs->bcs, 0, 0);
1589 mode_hfcpci(cs->bcs + 1, 0, 1);
1594 /*******************************************/
1595 /* handle card messages from control layer */
1596 /*******************************************/
1597 static int
1598 hfcpci_card_msg(struct IsdnCardState *cs, int mt, void *arg)
1600 u_long flags;
1602 if (cs->debug & L1_DEB_ISAC)
1603 debugl1(cs, "HFCPCI: card_msg %x", mt);
1604 switch (mt) {
1605 case CARD_RESET:
1606 spin_lock_irqsave(&cs->lock, flags);
1607 reset_hfcpci(cs);
1608 spin_unlock_irqrestore(&cs->lock, flags);
1609 return (0);
1610 case CARD_RELEASE:
1611 release_io_hfcpci(cs);
1612 return (0);
1613 case CARD_INIT:
1614 spin_lock_irqsave(&cs->lock, flags);
1615 inithfcpci(cs);
1616 reset_hfcpci(cs);
1617 spin_unlock_irqrestore(&cs->lock, flags);
1618 msleep(80); /* Timeout 80ms */
1619 /* now switch timer interrupt off */
1620 spin_lock_irqsave(&cs->lock, flags);
1621 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1622 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1623 /* reinit mode reg */
1624 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1625 spin_unlock_irqrestore(&cs->lock, flags);
1626 return (0);
1627 case CARD_TEST:
1628 return (0);
1630 return (0);
1634 /* this variable is used as card index when more than one cards are present */
1635 static struct pci_dev *dev_hfcpci __devinitdata = NULL;
1637 int __devinit
1638 setup_hfcpci(struct IsdnCard *card)
1640 u_long flags;
1641 struct IsdnCardState *cs = card->cs;
1642 char tmp[64];
1643 int i;
1644 struct pci_dev *tmp_hfcpci = NULL;
1646 #ifdef __BIG_ENDIAN
1647 #error "not running on big endian machines now"
1648 #endif
1650 strcpy(tmp, hfcpci_revision);
1651 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
1653 cs->hw.hfcpci.int_s1 = 0;
1654 cs->dc.hfcpci.ph_state = 0;
1655 cs->hw.hfcpci.fifo = 255;
1656 if (cs->typ != ISDN_CTYPE_HFC_PCI)
1657 return(0);
1659 i = 0;
1660 while (id_list[i].vendor_id) {
1661 tmp_hfcpci = pci_find_device(id_list[i].vendor_id,
1662 id_list[i].device_id,
1663 dev_hfcpci);
1664 i++;
1665 if (tmp_hfcpci) {
1666 if (pci_enable_device(tmp_hfcpci))
1667 continue;
1668 pci_set_master(tmp_hfcpci);
1669 if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[ 0].start & PCI_BASE_ADDRESS_IO_MASK)))
1670 continue;
1671 else
1672 break;
1676 if (!tmp_hfcpci) {
1677 printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
1678 return (0);
1681 i--;
1682 dev_hfcpci = tmp_hfcpci; /* old device */
1683 cs->hw.hfcpci.dev = dev_hfcpci;
1684 cs->irq = dev_hfcpci->irq;
1685 if (!cs->irq) {
1686 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
1687 return (0);
1689 cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start;
1690 printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
1692 if (!cs->hw.hfcpci.pci_io) {
1693 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
1694 return (0);
1696 /* Allocate memory for FIFOS */
1697 /* Because the HFC-PCI needs a 32K physical alignment, we */
1698 /* need to allocate the double mem and align the address */
1699 if (!(cs->hw.hfcpci.share_start = kmalloc(65536, GFP_KERNEL))) {
1700 printk(KERN_WARNING "HFC-PCI: Error allocating memory for FIFO!\n");
1701 return 0;
1703 cs->hw.hfcpci.fifos = (void *)
1704 (((ulong) cs->hw.hfcpci.share_start) & ~0x7FFF) + 0x8000;
1705 pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u_int) virt_to_bus(cs->hw.hfcpci.fifos));
1706 cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256);
1707 printk(KERN_INFO
1708 "HFC-PCI: defined at mem %p fifo %p(%#x) IRQ %d HZ %d\n",
1709 cs->hw.hfcpci.pci_io,
1710 cs->hw.hfcpci.fifos,
1711 (u_int) virt_to_bus(cs->hw.hfcpci.fifos),
1712 cs->irq, HZ);
1714 spin_lock_irqsave(&cs->lock, flags);
1716 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
1717 cs->hw.hfcpci.int_m2 = 0; /* disable alle interrupts */
1718 cs->hw.hfcpci.int_m1 = 0;
1719 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1720 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
1721 /* At this point the needed PCI config is done */
1722 /* fifos are still not enabled */
1724 INIT_WORK(&cs->tqueue, hfcpci_bh);
1725 cs->setstack_d = setstack_hfcpci;
1726 cs->BC_Send_Data = &hfcpci_send_data;
1727 cs->readisac = NULL;
1728 cs->writeisac = NULL;
1729 cs->readisacfifo = NULL;
1730 cs->writeisacfifo = NULL;
1731 cs->BC_Read_Reg = NULL;
1732 cs->BC_Write_Reg = NULL;
1733 cs->irq_func = &hfcpci_interrupt;
1734 cs->irq_flags |= IRQF_SHARED;
1735 cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer;
1736 cs->hw.hfcpci.timer.data = (long) cs;
1737 init_timer(&cs->hw.hfcpci.timer);
1738 cs->cardmsg = &hfcpci_card_msg;
1739 cs->auxcmd = &hfcpci_auxcmd;
1741 spin_unlock_irqrestore(&cs->lock, flags);
1743 return (1);