1 /******************************************************************************
2 iphase.c: Device driver for Interphase ATM PCI adapter cards
3 Author: Peter Wang <pwang@iphase.com>
4 Interphase Corporation <www.iphase.com>
6 *******************************************************************************
8 This software may be used and distributed according to the terms
9 of the GNU Public License (GPL), incorporated herein by reference.
10 Drivers based on this skeleton fall under the GPL and must retain
11 the authorship (implicit copyright) notice.
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
19 was originally written by Monalisa Agrawal at UNH. Now this driver
20 supports a variety of varients of Interphase ATM PCI (i)Chip adapter
21 card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
22 in terms of PHY type, the size of control memory and the size of
23 packet memory. The followings are the change log and history:
25 Bugfix the Mona's UBR driver.
26 Modify the basic memory allocation and dma logic.
27 Port the driver to the latest kernel from 2.0.46.
28 Complete the ABR logic of the driver, and added the ABR work-
29 around for the hardware anormalies.
31 Add the flow control logic to the driver to allow rate-limit VC.
32 Add 4K VC support to the board with 512K control memory.
33 Add the support of all the variants of the Interphase ATM PCI
34 (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
35 (25M UTP25) and x531 (DS3 and E3).
38 Support and updates available at: ftp://ftp.iphase.com/pub/atm
40 *******************************************************************************/
45 #include <linux/version.h>
46 #include <linux/module.h>
47 #include <linux/kernel.h>
49 #include <linux/pci.h>
50 #include <linux/errno.h>
51 #include <linux/atm.h>
52 #include <linux/atmdev.h>
53 #include <linux/sonet.h>
54 #include <linux/skbuff.h>
55 #include <linux/time.h>
56 #include <linux/sched.h> /* for xtime */
57 #include <linux/delay.h>
58 #include <linux/uio.h>
59 #include <linux/init.h>
60 #include <asm/system.h>
62 #include <asm/atomic.h>
63 #include <asm/uaccess.h>
64 #include <asm/string.h>
65 #include <asm/byteorder.h>
67 #include <linux/vmalloc.h>
70 #define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
72 struct k_sonet_stats sonet_stats
; /* link diagnostics */
73 unsigned char loop_mode
; /* loopback mode */
74 struct atm_dev
*dev
; /* device back-pointer */
75 struct suni_priv
*next
; /* next SUNI */
77 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
79 static unsigned char ia_phy_get(struct atm_dev
*dev
, unsigned long addr
);
81 static IADEV
*ia_dev
[8] = {NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
};
82 static struct atm_dev
*_ia_dev
[8] = {NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
};
83 static int iadev_count
= 0;
84 static void ia_led_timer(unsigned long arg
);
85 static struct timer_list ia_timer
= { function
: ia_led_timer
};
86 struct atm_vcc
*vcc_close_que
[100];
87 static int IA_TX_BUF
= DFL_TX_BUFFERS
, IA_TX_BUF_SZ
= DFL_TX_BUF_SZ
;
88 static int IA_RX_BUF
= DFL_RX_BUFFERS
, IA_RX_BUF_SZ
= DFL_RX_BUF_SZ
;
89 static u32 IADebugFlag
= /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
90 |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
93 MODULE_PARM(IA_TX_BUF
, "i");
94 MODULE_PARM(IA_TX_BUF_SZ
, "i");
95 MODULE_PARM(IA_RX_BUF
, "i");
96 MODULE_PARM(IA_RX_BUF_SZ
, "i");
97 MODULE_PARM(IADebugFlag
, "i");
100 /**************************** IA_LIB **********************************/
102 static void ia_init_rtn_q (IARTN_Q
*que
)
108 static void ia_enque_head_rtn_q (IARTN_Q
*que
, IARTN_Q
* data
)
111 if (que
->next
== NULL
)
112 que
->next
= que
->tail
= data
;
114 data
->next
= que
->next
;
120 static int ia_enque_rtn_q (IARTN_Q
*que
, struct desc_tbl_t data
) {
122 entry
= (IARTN_Q
*)kmalloc(sizeof(IARTN_Q
), GFP_KERNEL
);
123 if (!entry
) return -1;
126 if (que
->next
== NULL
)
127 que
->next
= que
->tail
= entry
;
129 que
->tail
->next
= entry
;
130 que
->tail
= que
->tail
->next
;
135 static IARTN_Q
* ia_deque_rtn_q (IARTN_Q
*que
) {
137 if (que
->next
== NULL
)
140 if ( que
->next
== que
->tail
)
141 que
->next
= que
->tail
= NULL
;
143 que
->next
= que
->next
->next
;
147 static void ia_hack_tcq(IADEV
*dev
) {
151 struct ia_vcc
*iavcc_r
= NULL
;
152 extern void desc_dbg(IADEV
*iadev
);
154 tcq_wr
= readl(dev
->seg_reg
+TCQ_WR_PTR
) & 0xffff;
155 while (dev
->host_tcq_wr
!= tcq_wr
) {
156 desc1
= *(u_short
*)(dev
->seg_ram
+ dev
->host_tcq_wr
);
158 else if (!dev
->desc_tbl
[desc1
-1].timestamp
) {
159 IF_ABR(printk(" Desc %d is reset at %ld\n", desc1
-1, jiffies
);)
160 *(u_short
*) (dev
->seg_ram
+ dev
->host_tcq_wr
) = 0;
162 else if (dev
->desc_tbl
[desc1
-1].timestamp
) {
163 if (!(iavcc_r
= dev
->desc_tbl
[desc1
-1].iavcc
)) {
164 printk("IA: Fatal err in get_desc\n");
167 iavcc_r
->vc_desc_cnt
--;
168 dev
->desc_tbl
[desc1
-1].timestamp
= 0;
169 IF_EVENT(printk("ia_hack: return_q skb = 0x%x desc = %d\n",
170 (u32
)dev
->desc_tbl
[desc1
-1].txskb
, desc1
);)
171 if (iavcc_r
->pcr
< dev
->rate_limit
) {
172 IA_SKB_STATE (dev
->desc_tbl
[desc1
-1].txskb
) |= IA_TX_DONE
;
173 if (ia_enque_rtn_q(&dev
->tx_return_q
, dev
->desc_tbl
[desc1
-1]) < 0)
174 printk("ia_hack_tcq: No memory available\n");
176 dev
->desc_tbl
[desc1
-1].iavcc
= NULL
;
177 dev
->desc_tbl
[desc1
-1].txskb
= NULL
;
179 dev
->host_tcq_wr
+= 2;
180 if (dev
->host_tcq_wr
> dev
->ffL
.tcq_ed
)
181 dev
->host_tcq_wr
= dev
->ffL
.tcq_st
;
185 static u16
get_desc (IADEV
*dev
, struct ia_vcc
*iavcc
) {
188 struct ia_vcc
*iavcc_r
= NULL
;
190 static unsigned long timer
= 0;
192 extern void desc_dbg(IADEV
*iadev
);
195 if(((jiffies
- timer
)>50)||((dev
->ffL
.tcq_rd
==dev
->host_tcq_wr
))){
198 while (i
< dev
->num_tx_desc
) {
199 if (!dev
->desc_tbl
[i
].timestamp
) {
203 ltimeout
= dev
->desc_tbl
[i
].iavcc
->ltimeout
;
204 delta
= jiffies
- dev
->desc_tbl
[i
].timestamp
;
205 if (delta
>= ltimeout
) {
206 IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld,
207 time = %ld\n", i
,dev
->desc_tbl
[i
].timestamp
, delta
, jiffies
);)
208 if (dev
->ffL
.tcq_rd
== dev
->ffL
.tcq_st
)
209 dev
->ffL
.tcq_rd
= dev
->ffL
.tcq_ed
;
211 dev
->ffL
.tcq_rd
-= 2;
212 *(u_short
*)(dev
->seg_ram
+ dev
->ffL
.tcq_rd
) = i
+1;
213 if (!(skb
= dev
->desc_tbl
[i
].txskb
) ||
214 !(iavcc_r
= dev
->desc_tbl
[i
].iavcc
))
215 printk("Fatal err, desc table vcc or skb is NULL\n");
217 iavcc_r
->vc_desc_cnt
--;
218 dev
->desc_tbl
[i
].timestamp
= 0;
219 dev
->desc_tbl
[i
].iavcc
= NULL
;
220 dev
->desc_tbl
[i
].txskb
= NULL
;
225 if (dev
->ffL
.tcq_rd
== dev
->host_tcq_wr
)
228 /* Get the next available descriptor number from TCQ */
229 desc_num
= *(u_short
*)(dev
->seg_ram
+ dev
->ffL
.tcq_rd
);
231 while (!desc_num
|| (dev
->desc_tbl
[desc_num
-1]).timestamp
) {
232 dev
->ffL
.tcq_rd
+= 2;
233 if (dev
->ffL
.tcq_rd
> dev
->ffL
.tcq_ed
)
234 dev
->ffL
.tcq_rd
= dev
->ffL
.tcq_st
;
235 if (dev
->ffL
.tcq_rd
== dev
->host_tcq_wr
)
237 desc_num
= *(u_short
*)(dev
->seg_ram
+ dev
->ffL
.tcq_rd
);
240 /* get system time */
241 dev
->desc_tbl
[desc_num
-1].timestamp
= jiffies
;
245 static void clear_lockup (struct atm_vcc
*vcc
, IADEV
*dev
) {
247 vcstatus_t
*vcstatus
;
249 u_short tempCellSlot
, tempFract
;
250 struct main_vc
*abr_vc
= (struct main_vc
*)dev
->MAIN_VC_TABLE_ADDR
;
251 struct ext_vc
*eabr_vc
= (struct ext_vc
*)dev
->EXT_VC_TABLE_ADDR
;
254 if (vcc
->qos
.txtp
.traffic_class
== ATM_ABR
) {
255 vcstatus
= (vcstatus_t
*) &(dev
->testTable
[vcc
->vci
]->vc_status
);
258 if( vcstatus
->cnt
== 0x05 ) {
261 if( eabr_vc
->last_desc
) {
262 if( (abr_vc
->status
& 0x07) == ABR_STATE
/* 0x2 */ ) {
263 /* Wait for 10 Micro sec */
265 if ((eabr_vc
->last_desc
)&&((abr_vc
->status
& 0x07)==ABR_STATE
))
269 tempCellSlot
= abr_vc
->last_cell_slot
;
270 tempFract
= abr_vc
->fraction
;
271 if((tempCellSlot
== dev
->testTable
[vcc
->vci
]->lastTime
)
272 && (tempFract
== dev
->testTable
[vcc
->vci
]->fract
))
274 dev
->testTable
[vcc
->vci
]->lastTime
= tempCellSlot
;
275 dev
->testTable
[vcc
->vci
]->fract
= tempFract
;
277 } /* last descriptor */
279 } /* vcstatus->cnt */
282 IF_ABR(printk("LOCK UP found\n");)
283 writew(0xFFFD, dev
->seg_reg
+MODE_REG_0
);
284 /* Wait for 10 Micro sec */
286 abr_vc
->status
&= 0xFFF8;
287 abr_vc
->status
|= 0x0001; /* state is idle */
288 shd_tbl
= (u_short
*)dev
->ABR_SCHED_TABLE_ADDR
;
289 for( i
= 0; ((i
< dev
->num_vc
) && (shd_tbl
[i
])); i
++ );
291 shd_tbl
[i
] = vcc
->vci
;
293 IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc
->vci
);)
294 writew(T_ONLINE
, dev
->seg_reg
+MODE_REG_0
);
295 writew(~(TRANSMIT_DONE
|TCQ_NOT_EMPTY
), dev
->seg_reg
+SEG_MASK_REG
);
296 writew(TRANSMIT_DONE
, dev
->seg_reg
+SEG_INTR_STATUS_REG
);
306 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
308 ** +----+----+------------------+-------------------------------+
309 ** | R | NZ | 5-bit exponent | 9-bit mantissa |
310 ** +----+----+------------------+-------------------------------+
312 ** R = reserverd (written as 0)
313 ** NZ = 0 if 0 cells/sec; 1 otherwise
315 ** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
318 cellrate_to_float(u32 cr
)
322 #define M_BITS 9 /* Number of bits in mantissa */
323 #define E_BITS 5 /* Number of bits in exponent */
327 u32 tmp
= cr
& 0x00ffffff;
336 flot
= NZ
| (i
<< M_BITS
) | (cr
& M_MASK
);
338 flot
= NZ
| (i
<< M_BITS
) | ((cr
<< (M_BITS
- i
)) & M_MASK
);
340 flot
= NZ
| (i
<< M_BITS
) | ((cr
>> (i
- M_BITS
)) & M_MASK
);
346 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
349 float_to_cellrate(u16 rate
)
351 u32 exp
, mantissa
, cps
;
352 if ((rate
& NZ
) == 0)
354 exp
= (rate
>> M_BITS
) & E_MASK
;
355 mantissa
= rate
& M_MASK
;
358 cps
= (1 << M_BITS
) | mantissa
;
361 else if (exp
> M_BITS
)
362 cps
<<= (exp
- M_BITS
);
364 cps
>>= (M_BITS
- exp
);
369 static void init_abr_vc (IADEV
*dev
, srv_cls_param_t
*srv_p
) {
370 srv_p
->class_type
= ATM_ABR
;
371 srv_p
->pcr
= dev
->LineRate
;
373 srv_p
->icr
= 0x055cb7;
374 srv_p
->tbe
= 0xffffff;
385 ia_open_abr_vc(IADEV
*dev
, srv_cls_param_t
*srv_p
,
386 struct atm_vcc
*vcc
, u8 flag
)
388 f_vc_abr_entry
*f_abr_vc
;
389 r_vc_abr_entry
*r_abr_vc
;
392 u16 adtf
, air
, *ptr16
;
393 f_abr_vc
=(f_vc_abr_entry
*)dev
->MAIN_VC_TABLE_ADDR
;
394 f_abr_vc
+= vcc
->vci
;
396 case 1: /* FFRED initialization */
397 #if 0 /* sanity check */
400 if (srv_p
->pcr
> dev
->LineRate
)
401 srv_p
->pcr
= dev
->LineRate
;
402 if ((srv_p
->mcr
+ dev
->sum_mcr
) > dev
->LineRate
)
403 return MCR_UNAVAILABLE
;
404 if (srv_p
->mcr
> srv_p
->pcr
)
407 srv_p
->icr
= srv_p
->pcr
;
408 if ((srv_p
->icr
< srv_p
->mcr
) || (srv_p
->icr
> srv_p
->pcr
))
410 if ((srv_p
->tbe
< MIN_TBE
) || (srv_p
->tbe
> MAX_TBE
))
412 if ((srv_p
->frtt
< MIN_FRTT
) || (srv_p
->frtt
> MAX_FRTT
))
414 if (srv_p
->nrm
> MAX_NRM
)
416 if (srv_p
->trm
> MAX_TRM
)
418 if (srv_p
->adtf
> MAX_ADTF
)
420 else if (srv_p
->adtf
== 0)
422 if (srv_p
->cdf
> MAX_CDF
)
424 if (srv_p
->rif
> MAX_RIF
)
426 if (srv_p
->rdf
> MAX_RDF
)
429 memset ((caddr_t
)f_abr_vc
, 0, sizeof(f_vc_abr_entry
));
430 f_abr_vc
->f_vc_type
= ABR
;
431 nrm
= 2 << srv_p
->nrm
; /* (2 ** (srv_p->nrm +1)) */
432 /* i.e 2**n = 2 << (n-1) */
433 f_abr_vc
->f_nrm
= nrm
<< 8 | nrm
;
434 trm
= 100000/(2 << (16 - srv_p
->trm
));
435 if ( trm
== 0) trm
= 1;
436 f_abr_vc
->f_nrmexp
=(((srv_p
->nrm
+1) & 0x0f) << 12)|(MRM
<< 8) | trm
;
437 crm
= srv_p
->tbe
/ nrm
;
438 if (crm
== 0) crm
= 1;
439 f_abr_vc
->f_crm
= crm
& 0xff;
440 f_abr_vc
->f_pcr
= cellrate_to_float(srv_p
->pcr
);
441 icr
= MIN( srv_p
->icr
, (srv_p
->tbe
> srv_p
->frtt
) ?
442 ((srv_p
->tbe
/srv_p
->frtt
)*1000000) :
443 (1000000/(srv_p
->frtt
/srv_p
->tbe
)));
444 f_abr_vc
->f_icr
= cellrate_to_float(icr
);
445 adtf
= (10000 * srv_p
->adtf
)/8192;
446 if (adtf
== 0) adtf
= 1;
447 f_abr_vc
->f_cdf
= ((7 - srv_p
->cdf
) << 12 | adtf
) & 0xfff;
448 f_abr_vc
->f_mcr
= cellrate_to_float(srv_p
->mcr
);
449 f_abr_vc
->f_acr
= f_abr_vc
->f_icr
;
450 f_abr_vc
->f_status
= 0x0042;
452 case 0: /* RFRED initialization */
453 ptr16
= (u_short
*)(dev
->reass_ram
+ REASS_TABLE
*dev
->memSize
);
454 *(ptr16
+ vcc
->vci
) = NO_AAL5_PKT
| REASS_ABR
;
455 r_abr_vc
= (r_vc_abr_entry
*)(dev
->reass_ram
+ABR_VC_TABLE
*dev
->memSize
);
456 r_abr_vc
+= vcc
->vci
;
457 r_abr_vc
->r_status_rdf
= (15 - srv_p
->rdf
) & 0x000f;
458 air
= srv_p
->pcr
<< (15 - srv_p
->rif
);
459 if (air
== 0) air
= 1;
460 r_abr_vc
->r_air
= cellrate_to_float(air
);
461 dev
->testTable
[vcc
->vci
]->vc_status
= VC_ACTIVE
| VC_ABR
;
462 dev
->sum_mcr
+= srv_p
->mcr
;
470 static int ia_cbr_setup (IADEV
*dev
, struct atm_vcc
*vcc
) {
471 u32 rateLow
=0, rateHigh
, rate
;
473 struct ia_vcc
*ia_vcc
;
475 int idealSlot
=0, testSlot
, toBeAssigned
, inc
;
477 u16
*SchedTbl
, *TstSchedTbl
;
483 /* IpAdjustTrafficParams */
484 if (vcc
->qos
.txtp
.max_pcr
<= 0) {
485 IF_ERR(printk("PCR for CBR not defined\n");)
488 rate
= vcc
->qos
.txtp
.max_pcr
;
489 entries
= rate
/ dev
->Granularity
;
490 IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
491 entries
, rate
, dev
->Granularity
);)
493 IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
494 rateLow
= entries
* dev
->Granularity
;
495 rateHigh
= (entries
+ 1) * dev
->Granularity
;
496 if (3*(rate
- rateLow
) > (rateHigh
- rate
))
498 if (entries
> dev
->CbrRemEntries
) {
499 IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
500 IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
501 entries
, dev
->CbrRemEntries
);)
505 ia_vcc
= INPH_IA_VCC(vcc
);
506 ia_vcc
->NumCbrEntry
= entries
;
507 dev
->sum_mcr
+= entries
* dev
->Granularity
;
508 /* IaFFrednInsertCbrSched */
509 // Starting at an arbitrary location, place the entries into the table
510 // as smoothly as possible
512 spacing
= dev
->CbrTotEntries
/ entries
;
513 sp_mod
= dev
->CbrTotEntries
% entries
; // get modulo
514 toBeAssigned
= entries
;
517 IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex
,spacing
,sp_mod
);)
520 // If this is the first time, start the table loading for this connection
521 // as close to entryPoint as possible.
522 if (toBeAssigned
== entries
)
524 idealSlot
= dev
->CbrEntryPt
;
525 dev
->CbrEntryPt
+= 2; // Adding 2 helps to prevent clumping
526 if (dev
->CbrEntryPt
>= dev
->CbrTotEntries
)
527 dev
->CbrEntryPt
-= dev
->CbrTotEntries
;// Wrap if necessary
529 idealSlot
+= (u32
)(spacing
+ fracSlot
); // Point to the next location
530 // in the table that would be smoothest
531 fracSlot
= ((sp_mod
+ sp_mod2
) / entries
); // get new integer part
532 sp_mod2
= ((sp_mod
+ sp_mod2
) % entries
); // calc new fractional part
534 if (idealSlot
>= (int)dev
->CbrTotEntries
)
535 idealSlot
-= dev
->CbrTotEntries
;
536 // Continuously check around this ideal value until a null
537 // location is encountered.
538 SchedTbl
= (u16
*)(dev
->seg_ram
+CBR_SCHED_TABLE
*dev
->memSize
);
540 testSlot
= idealSlot
;
541 TstSchedTbl
= (u16
*)(SchedTbl
+testSlot
); //set index and read in value
542 IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%x, NumToAssign=%d\n",
543 testSlot
, (u32
)TstSchedTbl
,toBeAssigned
);)
544 memcpy((caddr_t
)&cbrVC
,(caddr_t
)TstSchedTbl
,sizeof(u16
));
545 while (cbrVC
) // If another VC at this location, we have to keep looking
548 testSlot
= idealSlot
- inc
;
549 if (testSlot
< 0) { // Wrap if necessary
550 testSlot
+= dev
->CbrTotEntries
;
551 IF_CBR(printk("Testslot Wrap. STable Start=0x%x,Testslot=%d\n",
552 (u32
)SchedTbl
,testSlot
);)
554 TstSchedTbl
= (u16
*)(SchedTbl
+ testSlot
); // set table index
555 memcpy((caddr_t
)&cbrVC
,(caddr_t
)TstSchedTbl
,sizeof(u16
));
558 testSlot
= idealSlot
+ inc
;
559 if (testSlot
>= (int)dev
->CbrTotEntries
) { // Wrap if necessary
560 testSlot
-= dev
->CbrTotEntries
;
561 IF_CBR(printk("TotCbrEntries=%d",dev
->CbrTotEntries
);)
562 IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
563 testSlot
, toBeAssigned
);)
565 // set table index and read in value
566 TstSchedTbl
= (u16
*)(SchedTbl
+ testSlot
);
567 IF_CBR(printk("Reading CBR Tbl from 0x%x, CbrVal=0x%x Iteration %d\n",
568 (u32
)TstSchedTbl
,cbrVC
,inc
);)
569 memcpy((caddr_t
)&cbrVC
,(caddr_t
)TstSchedTbl
,sizeof(u16
));
571 // Move this VCI number into this location of the CBR Sched table.
572 memcpy((caddr_t
)TstSchedTbl
, (caddr_t
)&vcIndex
,sizeof(u16
));
573 dev
->CbrRemEntries
--;
577 /* IaFFrednCbrEnable */
578 dev
->NumEnabledCBR
++;
579 if (dev
->NumEnabledCBR
== 1) {
580 writew((CBR_EN
| UBR_EN
| ABR_EN
| (0x23 << 2)), dev
->seg_reg
+STPARMS
);
581 IF_CBR(printk("CBR is enabled\n");)
585 static void ia_cbrVc_close (struct atm_vcc
*vcc
) {
587 u16
*SchedTbl
, NullVci
= 0;
590 iadev
= INPH_IA_DEV(vcc
->dev
);
591 iadev
->NumEnabledCBR
--;
592 SchedTbl
= (u16
*)(iadev
->seg_ram
+CBR_SCHED_TABLE
*iadev
->memSize
);
593 if (iadev
->NumEnabledCBR
== 0) {
594 writew((UBR_EN
| ABR_EN
| (0x23 << 2)), iadev
->seg_reg
+STPARMS
);
595 IF_CBR (printk("CBR support disabled\n");)
598 for (i
=0; i
< iadev
->CbrTotEntries
; i
++)
600 if (*SchedTbl
== vcc
->vci
) {
601 iadev
->CbrRemEntries
++;
607 IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound
);)
610 static int ia_avail_descs(IADEV
*iadev
) {
613 if (iadev
->host_tcq_wr
>= iadev
->ffL
.tcq_rd
)
614 tmp
= (iadev
->host_tcq_wr
- iadev
->ffL
.tcq_rd
) / 2;
616 tmp
= (iadev
->ffL
.tcq_ed
- iadev
->ffL
.tcq_rd
+ 2 + iadev
->host_tcq_wr
-
617 iadev
->ffL
.tcq_st
) / 2;
621 static int ia_que_tx (IADEV
*iadev
) {
625 struct ia_vcc
*iavcc
;
626 static int ia_pkt_tx (struct atm_vcc
*vcc
, struct sk_buff
*skb
);
627 num_desc
= ia_avail_descs(iadev
);
628 while (num_desc
&& (skb
= skb_dequeue(&iadev
->tx_backlog
))) {
629 if (!(vcc
= ATM_SKB(skb
)->vcc
)) {
630 dev_kfree_skb_any(skb
);
631 printk("ia_que_tx: Null vcc\n");
634 if (!test_bit(ATM_VF_READY
,&vcc
->flags
)) {
635 dev_kfree_skb_any(skb
);
636 printk("Free the SKB on closed vci %d \n", vcc
->vci
);
639 iavcc
= INPH_IA_VCC(vcc
);
640 if (ia_pkt_tx (vcc
, skb
)) {
641 skb_queue_head(&iadev
->tx_backlog
, skb
);
647 void ia_tx_poll (IADEV
*iadev
) {
648 struct atm_vcc
*vcc
= NULL
;
649 struct sk_buff
*skb
= NULL
, *skb1
= NULL
;
650 struct ia_vcc
*iavcc
;
654 while ( (rtne
= ia_deque_rtn_q(&iadev
->tx_return_q
))) {
655 skb
= rtne
->data
.txskb
;
657 printk("ia_tx_poll: skb is null\n");
660 vcc
= ATM_SKB(skb
)->vcc
;
662 printk("ia_tx_poll: vcc is null\n");
663 dev_kfree_skb_any(skb
);
667 iavcc
= INPH_IA_VCC(vcc
);
669 printk("ia_tx_poll: iavcc is null\n");
670 dev_kfree_skb_any(skb
);
674 skb1
= skb_dequeue(&iavcc
->txing_skb
);
675 while (skb1
&& (skb1
!= skb
)) {
676 if (!(IA_SKB_STATE(skb1
) & IA_TX_DONE
)) {
677 printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc
->vci
);
679 IF_ERR(printk("Release the SKB not match\n");)
680 if (vcc
&& (vcc
->pop
) && (skb1
->len
!= 0))
683 IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
687 dev_kfree_skb_any(skb1
);
688 skb1
= skb_dequeue(&iavcc
->txing_skb
);
691 IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc
->vci
);)
692 ia_enque_head_rtn_q (&iadev
->tx_return_q
, rtne
);
695 if (vcc
&& (vcc
->pop
) && (skb
->len
!= 0))
698 IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb
);)
701 dev_kfree_skb_any(skb
);
708 static void ia_eeprom_put (IADEV
*iadev
, u32 addr
, u_short val
)
713 * Issue a command to enable writes to the NOVRAM
715 NVRAM_CMD (EXTEND
+ EWEN
);
718 * issue the write command
720 NVRAM_CMD(IAWRITE
+ addr
);
722 * Send the data, starting with D15, then D14, and so on for 16 bits
724 for (i
=15; i
>=0; i
--) {
725 NVRAM_CLKOUT (val
& 0x8000);
730 t
= readl(iadev
->reg
+IPHASE5575_EEPROM_ACCESS
);
732 t
= readl(iadev
->reg
+IPHASE5575_EEPROM_ACCESS
);
736 * disable writes again
738 NVRAM_CMD(EXTEND
+ EWDS
)
744 static u16
ia_eeprom_get (IADEV
*iadev
, u32 addr
)
750 * Read the first bit that was clocked with the falling edge of the
751 * the last command data clock
753 NVRAM_CMD(IAREAD
+ addr
);
755 * Now read the rest of the bits, the next bit read is D14, then D13,
759 for (i
=15; i
>=0; i
--) {
768 static void ia_hw_type(IADEV
*iadev
) {
769 u_short memType
= ia_eeprom_get(iadev
, 25);
770 iadev
->memType
= memType
;
771 if ((memType
& MEM_SIZE_MASK
) == MEM_SIZE_1M
) {
772 iadev
->num_tx_desc
= IA_TX_BUF
;
773 iadev
->tx_buf_sz
= IA_TX_BUF_SZ
;
774 iadev
->num_rx_desc
= IA_RX_BUF
;
775 iadev
->rx_buf_sz
= IA_RX_BUF_SZ
;
776 } else if ((memType
& MEM_SIZE_MASK
) == MEM_SIZE_512K
) {
777 if (IA_TX_BUF
== DFL_TX_BUFFERS
)
778 iadev
->num_tx_desc
= IA_TX_BUF
/ 2;
780 iadev
->num_tx_desc
= IA_TX_BUF
;
781 iadev
->tx_buf_sz
= IA_TX_BUF_SZ
;
782 if (IA_RX_BUF
== DFL_RX_BUFFERS
)
783 iadev
->num_rx_desc
= IA_RX_BUF
/ 2;
785 iadev
->num_rx_desc
= IA_RX_BUF
;
786 iadev
->rx_buf_sz
= IA_RX_BUF_SZ
;
789 if (IA_TX_BUF
== DFL_TX_BUFFERS
)
790 iadev
->num_tx_desc
= IA_TX_BUF
/ 8;
792 iadev
->num_tx_desc
= IA_TX_BUF
;
793 iadev
->tx_buf_sz
= IA_TX_BUF_SZ
;
794 if (IA_RX_BUF
== DFL_RX_BUFFERS
)
795 iadev
->num_rx_desc
= IA_RX_BUF
/ 8;
797 iadev
->num_rx_desc
= IA_RX_BUF
;
798 iadev
->rx_buf_sz
= IA_RX_BUF_SZ
;
800 iadev
->rx_pkt_ram
= TX_PACKET_RAM
+ (iadev
->num_tx_desc
* iadev
->tx_buf_sz
);
801 IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
802 iadev
->num_tx_desc
, iadev
->tx_buf_sz
, iadev
->num_rx_desc
,
803 iadev
->rx_buf_sz
, iadev
->rx_pkt_ram
);)
806 if ((memType
& FE_MASK
) == FE_SINGLE_MODE
) {
807 iadev
->phy_type
= PHY_OC3C_S
;
808 else if ((memType
& FE_MASK
) == FE_UTP_OPTION
)
809 iadev
->phy_type
= PHY_UTP155
;
811 iadev
->phy_type
= PHY_OC3C_M
;
814 iadev
->phy_type
= memType
& FE_MASK
;
815 IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
816 memType
,iadev
->phy_type
);)
817 if (iadev
->phy_type
== FE_25MBIT_PHY
)
818 iadev
->LineRate
= (u32
)(((25600000/8)*26)/(27*53));
819 else if (iadev
->phy_type
== FE_DS3_PHY
)
820 iadev
->LineRate
= (u32
)(((44736000/8)*26)/(27*53));
821 else if (iadev
->phy_type
== FE_E3_PHY
)
822 iadev
->LineRate
= (u32
)(((34368000/8)*26)/(27*53));
824 iadev
->LineRate
= (u32
)(ATM_OC3_PCR
);
825 IF_INIT(printk("iadev->LineRate = %d \n", iadev
->LineRate
);)
829 static void IaFrontEndIntr(IADEV
*iadev
) {
830 volatile IA_SUNI
*suni
;
831 volatile ia_mb25_t
*mb25
;
832 volatile suni_pm7345_t
*suni_pm7345
;
836 if(iadev
->phy_type
& FE_25MBIT_PHY
) {
837 mb25
= (ia_mb25_t
*)iadev
->phy
;
838 iadev
->carrier_detect
= Boolean(mb25
->mb25_intr_status
& MB25_IS_GSB
);
839 } else if (iadev
->phy_type
& FE_DS3_PHY
) {
840 suni_pm7345
= (suni_pm7345_t
*)iadev
->phy
;
841 /* clear FRMR interrupts */
842 frmr_intr
= suni_pm7345
->suni_ds3_frm_intr_stat
;
843 iadev
->carrier_detect
=
844 Boolean(!(suni_pm7345
->suni_ds3_frm_stat
& SUNI_DS3_LOSV
));
845 } else if (iadev
->phy_type
& FE_E3_PHY
) {
846 suni_pm7345
= (suni_pm7345_t
*)iadev
->phy
;
847 frmr_intr
= suni_pm7345
->suni_e3_frm_maint_intr_ind
;
848 iadev
->carrier_detect
=
849 Boolean(!(suni_pm7345
->suni_e3_frm_fram_intr_ind_stat
&SUNI_E3_LOS
));
852 suni
= (IA_SUNI
*)iadev
->phy
;
853 intr_status
= suni
->suni_rsop_status
& 0xff;
854 iadev
->carrier_detect
= Boolean(!(suni
->suni_rsop_status
& SUNI_LOSV
));
856 if (iadev
->carrier_detect
)
857 printk("IA: SUNI carrier detected\n");
859 printk("IA: SUNI carrier lost signal\n");
863 void ia_mb25_init (IADEV
*iadev
)
865 volatile ia_mb25_t
*mb25
= (ia_mb25_t
*)iadev
->phy
;
867 mb25
->mb25_master_ctrl
= MB25_MC_DRIC
| MB25_MC_DREC
| MB25_MC_ENABLED
;
869 mb25
->mb25_master_ctrl
= MB25_MC_DRIC
| MB25_MC_DREC
;
870 mb25
->mb25_diag_control
= 0;
872 * Initialize carrier detect state
874 iadev
->carrier_detect
= Boolean(mb25
->mb25_intr_status
& MB25_IS_GSB
);
878 void ia_suni_pm7345_init (IADEV
*iadev
)
880 volatile suni_pm7345_t
*suni_pm7345
= (suni_pm7345_t
*)iadev
->phy
;
881 if (iadev
->phy_type
& FE_DS3_PHY
)
883 iadev
->carrier_detect
=
884 Boolean(!(suni_pm7345
->suni_ds3_frm_stat
& SUNI_DS3_LOSV
));
885 suni_pm7345
->suni_ds3_frm_intr_enbl
= 0x17;
886 suni_pm7345
->suni_ds3_frm_cfg
= 1;
887 suni_pm7345
->suni_ds3_tran_cfg
= 1;
888 suni_pm7345
->suni_config
= 0;
889 suni_pm7345
->suni_splr_cfg
= 0;
890 suni_pm7345
->suni_splt_cfg
= 0;
894 iadev
->carrier_detect
=
895 Boolean(!(suni_pm7345
->suni_e3_frm_fram_intr_ind_stat
& SUNI_E3_LOS
));
896 suni_pm7345
->suni_e3_frm_fram_options
= 0x4;
897 suni_pm7345
->suni_e3_frm_maint_options
= 0x20;
898 suni_pm7345
->suni_e3_frm_fram_intr_enbl
= 0x1d;
899 suni_pm7345
->suni_e3_frm_maint_intr_enbl
= 0x30;
900 suni_pm7345
->suni_e3_tran_stat_diag_options
= 0x0;
901 suni_pm7345
->suni_e3_tran_fram_options
= 0x1;
902 suni_pm7345
->suni_config
= SUNI_PM7345_E3ENBL
;
903 suni_pm7345
->suni_splr_cfg
= 0x41;
904 suni_pm7345
->suni_splt_cfg
= 0x41;
907 * Enable RSOP loss of signal interrupt.
909 suni_pm7345
->suni_intr_enbl
= 0x28;
912 * Clear error counters
914 suni_pm7345
->suni_id_reset
= 0;
917 * Clear "PMCTST" in master test register.
919 suni_pm7345
->suni_master_test
= 0;
921 suni_pm7345
->suni_rxcp_ctrl
= 0x2c;
922 suni_pm7345
->suni_rxcp_fctrl
= 0x81;
924 suni_pm7345
->suni_rxcp_idle_pat_h1
= 0;
925 suni_pm7345
->suni_rxcp_idle_pat_h2
= 0;
926 suni_pm7345
->suni_rxcp_idle_pat_h3
= 0;
927 suni_pm7345
->suni_rxcp_idle_pat_h4
= 1;
929 suni_pm7345
->suni_rxcp_idle_mask_h1
= 0xff;
930 suni_pm7345
->suni_rxcp_idle_mask_h2
= 0xff;
931 suni_pm7345
->suni_rxcp_idle_mask_h3
= 0xff;
932 suni_pm7345
->suni_rxcp_idle_mask_h4
= 0xfe;
934 suni_pm7345
->suni_rxcp_cell_pat_h1
= 0;
935 suni_pm7345
->suni_rxcp_cell_pat_h2
= 0;
936 suni_pm7345
->suni_rxcp_cell_pat_h3
= 0;
937 suni_pm7345
->suni_rxcp_cell_pat_h4
= 1;
939 suni_pm7345
->suni_rxcp_cell_mask_h1
= 0xff;
940 suni_pm7345
->suni_rxcp_cell_mask_h2
= 0xff;
941 suni_pm7345
->suni_rxcp_cell_mask_h3
= 0xff;
942 suni_pm7345
->suni_rxcp_cell_mask_h4
= 0xff;
944 suni_pm7345
->suni_txcp_ctrl
= 0xa4;
945 suni_pm7345
->suni_txcp_intr_en_sts
= 0x10;
946 suni_pm7345
->suni_txcp_idle_pat_h5
= 0x55;
948 suni_pm7345
->suni_config
&= ~(SUNI_PM7345_LLB
|
953 suni_pm7345
->suni_rxcp_intr_en_sts
|= SUNI_OOCDE
;
959 /***************************** IA_LIB END *****************************/
961 /* pwang_test debug utility */
962 int tcnter
= 0, rcnter
= 0;
963 void xdump( u_char
* cp
, int length
, char* prefix
)
967 u_char
* pBuf
= prntBuf
;
969 while(count
< length
){
970 pBuf
+= sprintf( pBuf
, "%s", prefix
);
971 for(col
= 0;count
+ col
< length
&& col
< 16; col
++){
972 if (col
!= 0 && (col
% 4) == 0)
973 pBuf
+= sprintf( pBuf
, " " );
974 pBuf
+= sprintf( pBuf
, "%02X ", cp
[count
+ col
] );
976 while(col
++ < 16){ /* pad end of buffer with blanks */
978 sprintf( pBuf
, " " );
979 pBuf
+= sprintf( pBuf
, " " );
981 pBuf
+= sprintf( pBuf
, " " );
982 for(col
= 0;count
+ col
< length
&& col
< 16; col
++){
983 if (isprint((int)cp
[count
+ col
]))
984 pBuf
+= sprintf( pBuf
, "%c", cp
[count
+ col
] );
986 pBuf
+= sprintf( pBuf
, "." );
988 sprintf( pBuf
, "\n" );
995 } /* close xdump(... */
998 static struct atm_dev
*ia_boards
= NULL
;
1000 #define ACTUAL_RAM_BASE \
1001 RAM_BASE*((iadev->mem)/(128 * 1024))
1002 #define ACTUAL_SEG_RAM_BASE \
1003 IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1004 #define ACTUAL_REASS_RAM_BASE \
1005 IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1008 /*-- some utilities and memory allocation stuff will come here -------------*/
1010 void desc_dbg(IADEV
*iadev
) {
1012 u_short tcq_wr_ptr
, tcq_st_ptr
, tcq_ed_ptr
;
1014 // regval = readl((u32)ia_cmds->maddr);
1015 tcq_wr_ptr
= readw(iadev
->seg_reg
+TCQ_WR_PTR
);
1016 printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1017 tcq_wr_ptr
, readw(iadev
->seg_ram
+tcq_wr_ptr
),
1018 readw(iadev
->seg_ram
+tcq_wr_ptr
-2));
1019 printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev
->host_tcq_wr
,
1021 tcq_st_ptr
= readw(iadev
->seg_reg
+TCQ_ST_ADR
);
1022 tcq_ed_ptr
= readw(iadev
->seg_reg
+TCQ_ED_ADR
);
1023 printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr
, tcq_ed_ptr
);
1025 while (tcq_st_ptr
!= tcq_ed_ptr
) {
1026 tmp
= iadev
->seg_ram
+tcq_st_ptr
;
1027 printk("TCQ slot %d desc = %d Addr = 0x%x\n", i
++, readw(tmp
), tmp
);
1030 for(i
=0; i
<iadev
->num_tx_desc
; i
++)
1031 printk("Desc_tbl[%d] = %d \n", i
, iadev
->desc_tbl
[i
].timestamp
);
1035 /*----------------------------- Recieving side stuff --------------------------*/
1037 static void rx_excp_rcvd(struct atm_dev
*dev
)
1039 #if 0 /* closing the receiving size will cause too many excp int */
1042 u_short excpq_rd_ptr
;
1045 iadev
= INPH_IA_DEV(dev
);
1046 state
= readl(iadev
->reass_reg
+ STATE_REG
) & 0xffff;
1047 while((state
& EXCPQ_EMPTY
) != EXCPQ_EMPTY
)
1048 { printk("state = %x \n", state
);
1049 excpq_rd_ptr
= readw(iadev
->reass_reg
+ EXCP_Q_RD_PTR
) & 0xffff;
1050 printk("state = %x excpq_rd_ptr = %x \n", state
, excpq_rd_ptr
);
1051 if (excpq_rd_ptr
== *(u16
*)(iadev
->reass_reg
+ EXCP_Q_WR_PTR
))
1052 IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1053 // TODO: update exception stat
1054 vci
= readw(iadev
->reass_ram
+excpq_rd_ptr
);
1055 error
= readw(iadev
->reass_ram
+excpq_rd_ptr
+2) & 0x0007;
1058 if (excpq_rd_ptr
> (readw(iadev
->reass_reg
+ EXCP_Q_ED_ADR
)& 0xffff))
1059 excpq_rd_ptr
= readw(iadev
->reass_reg
+ EXCP_Q_ST_ADR
)& 0xffff;
1060 writew( excpq_rd_ptr
, iadev
->reass_reg
+ EXCP_Q_RD_PTR
);
1061 state
= readl(iadev
->reass_reg
+ STATE_REG
) & 0xffff;
1066 static void free_desc(struct atm_dev
*dev
, int desc
)
1069 iadev
= INPH_IA_DEV(dev
);
1070 writew(desc
, iadev
->reass_ram
+iadev
->rfL
.fdq_wr
);
1071 iadev
->rfL
.fdq_wr
+=2;
1072 if (iadev
->rfL
.fdq_wr
> iadev
->rfL
.fdq_ed
)
1073 iadev
->rfL
.fdq_wr
= iadev
->rfL
.fdq_st
;
1074 writew(iadev
->rfL
.fdq_wr
, iadev
->reass_reg
+FREEQ_WR_PTR
);
1078 static int rx_pkt(struct atm_dev
*dev
)
1081 struct atm_vcc
*vcc
;
1082 unsigned short status
;
1083 struct rx_buf_desc
*buf_desc_ptr
;
1087 struct sk_buff
*skb
;
1088 u_int buf_addr
, dma_addr
;
1089 iadev
= INPH_IA_DEV(dev
);
1090 if (iadev
->rfL
.pcq_rd
== (readw(iadev
->reass_reg
+PCQ_WR_PTR
)&0xffff))
1092 printk(KERN_ERR DEV_LABEL
"(itf %d) Receive queue empty\n", dev
->number
);
1095 /* mask 1st 3 bits to get the actual descno. */
1096 desc
= readw(iadev
->reass_ram
+iadev
->rfL
.pcq_rd
) & 0x1fff;
1097 IF_RX(printk("reass_ram = 0x%x iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1098 iadev
->reass_ram
, iadev
->rfL
.pcq_rd
, desc
);
1099 printk(" pcq_wr_ptr = 0x%x\n",
1100 readw(iadev
->reass_reg
+PCQ_WR_PTR
)&0xffff);)
1101 /* update the read pointer - maybe we shud do this in the end*/
1102 if ( iadev
->rfL
.pcq_rd
== iadev
->rfL
.pcq_ed
)
1103 iadev
->rfL
.pcq_rd
= iadev
->rfL
.pcq_st
;
1105 iadev
->rfL
.pcq_rd
+= 2;
1106 writew(iadev
->rfL
.pcq_rd
, iadev
->reass_reg
+PCQ_RD_PTR
);
1108 /* get the buffer desc entry.
1109 update stuff. - doesn't seem to be any update necessary
1111 buf_desc_ptr
= (struct rx_buf_desc
*)iadev
->RX_DESC_BASE_ADDR
;
1112 /* make the ptr point to the corresponding buffer desc entry */
1113 buf_desc_ptr
+= desc
;
1114 if (!desc
|| (desc
> iadev
->num_rx_desc
) ||
1115 ((buf_desc_ptr
->vc_index
& 0xffff) > iadev
->num_vc
)) {
1116 free_desc(dev
, desc
);
1117 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc
);)
1120 vcc
= iadev
->rx_open
[buf_desc_ptr
->vc_index
& 0xffff];
1123 free_desc(dev
, desc
);
1124 printk("IA: null vcc, drop PDU\n");
1129 /* might want to check the status bits for errors */
1130 status
= (u_short
) (buf_desc_ptr
->desc_mode
);
1131 if (status
& (RX_CER
| RX_PTE
| RX_OFL
))
1133 atomic_inc(&vcc
->stats
->rx_err
);
1134 IF_ERR(printk("IA: bad packet, dropping it");)
1135 if (status
& RX_CER
) {
1136 IF_ERR(printk(" cause: packet CRC error\n");)
1138 else if (status
& RX_PTE
) {
1139 IF_ERR(printk(" cause: packet time out\n");)
1142 IF_ERR(printk(" cause: buffer over flow\n");)
1144 free_desc(dev
, desc
);
1152 buf_addr
= (buf_desc_ptr
->buf_start_hi
<< 16) | buf_desc_ptr
->buf_start_lo
;
1153 dma_addr
= (buf_desc_ptr
->dma_start_hi
<< 16) | buf_desc_ptr
->dma_start_lo
;
1154 len
= dma_addr
- buf_addr
;
1155 if (len
> iadev
->rx_buf_sz
) {
1156 printk("Over %d bytes sdu received, dropped!!!\n", iadev
->rx_buf_sz
);
1157 atomic_inc(&vcc
->stats
->rx_err
);
1158 free_desc(dev
, desc
);
1162 #if LINUX_VERSION_CODE >= 0x20312
1163 if (!(skb
= atm_alloc_charge(vcc
, len
, GFP_ATOMIC
))) {
1165 if (atm_charge(vcc
, atm_pdu2truesize(len
))) {
1166 /* lets allocate an skb for now */
1167 skb
= alloc_skb(len
, GFP_ATOMIC
);
1170 IF_ERR(printk("can't allocate memory for recv, drop pkt!\n");)
1171 atomic_inc(&vcc
->stats
->rx_drop
);
1172 atm_return(vcc
, atm_pdu2truesize(len
));
1173 free_desc(dev
, desc
);
1178 IF_EVENT(printk("IA: Rx over the rx_quota %ld\n", vcc
->rx_quota
);)
1181 printk("Drop control packets\n");
1182 free_desc(dev
, desc
);
1187 ATM_SKB(skb
)->vcc
= vcc
;
1188 ATM_SKB(skb
)->iovcnt
= 0;
1189 ATM_DESC(skb
) = desc
;
1190 skb_queue_tail(&iadev
->rx_dma_q
, skb
);
1192 /* Build the DLE structure */
1193 wr_ptr
= iadev
->rx_dle_q
.write
;
1194 wr_ptr
->sys_pkt_addr
= virt_to_bus(skb
->data
);
1195 wr_ptr
->local_pkt_addr
= buf_addr
;
1196 wr_ptr
->bytes
= len
; /* We don't know this do we ?? */
1197 wr_ptr
->mode
= DMA_INT_ENABLE
;
1199 /* shud take care of wrap around here too. */
1200 if(++wr_ptr
== iadev
->rx_dle_q
.end
)
1201 wr_ptr
= iadev
->rx_dle_q
.start
;
1202 iadev
->rx_dle_q
.write
= wr_ptr
;
1204 /* Increment transaction counter */
1205 writel(1, iadev
->dma
+IPHASE5575_RX_COUNTER
);
1209 static void rx_intr(struct atm_dev
*dev
)
1215 iadev
= INPH_IA_DEV(dev
);
1216 status
= readl(iadev
->reass_reg
+REASS_INTR_STATUS_REG
) & 0xffff;
1217 IF_EVENT(printk("rx_intr: status = 0x%x\n", status
);)
1218 if (status
& RX_PKT_RCVD
)
1221 /* Basically recvd an interrupt for receving a packet.
1222 A descriptor would have been written to the packet complete
1223 queue. Get all the descriptors and set up dma to move the
1224 packets till the packet complete queue is empty..
1226 state
= readl(iadev
->reass_reg
+ STATE_REG
) & 0xffff;
1227 IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status
);)
1228 while(!(state
& PCQ_EMPTY
))
1231 state
= readl(iadev
->reass_reg
+ STATE_REG
) & 0xffff;
1235 if (status
& RX_FREEQ_EMPT
)
1238 iadev
->rx_tmp_cnt
= iadev
->rx_pkt_cnt
;
1239 iadev
->rx_tmp_jif
= jiffies
;
1242 else if (((jiffies
- iadev
->rx_tmp_jif
) > 50) &&
1243 ((iadev
->rx_pkt_cnt
- iadev
->rx_tmp_cnt
) == 0)) {
1244 for (i
= 1; i
<= iadev
->num_rx_desc
; i
++)
1246 printk("Test logic RUN!!!!\n");
1247 writew( ~(RX_FREEQ_EMPT
|RX_EXCP_RCVD
),iadev
->reass_reg
+REASS_MASK_REG
);
1250 IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status
);)
1253 if (status
& RX_EXCP_RCVD
)
1255 /* probably need to handle the exception queue also. */
1256 IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status
);)
1261 if (status
& RX_RAW_RCVD
)
1263 /* need to handle the raw incoming cells. This deepnds on
1264 whether we have programmed to receive the raw cells or not.
1266 IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status
);)
1271 static void rx_dle_intr(struct atm_dev
*dev
)
1274 struct atm_vcc
*vcc
;
1275 struct sk_buff
*skb
;
1278 struct dle
*dle
, *cur_dle
;
1280 iadev
= INPH_IA_DEV(dev
);
1282 /* free all the dles done, that is just update our own dle read pointer
1283 - do we really need to do this. Think not. */
1284 /* DMA is done, just get all the recevie buffers from the rx dma queue
1285 and push them up to the higher layer protocol. Also free the desc
1286 associated with the buffer. */
1287 dle
= iadev
->rx_dle_q
.read
;
1288 dle_lp
= readl(iadev
->dma
+IPHASE5575_RX_LIST_ADDR
) & (sizeof(struct dle
)*DLE_ENTRIES
- 1);
1289 cur_dle
= (struct dle
*)(iadev
->rx_dle_q
.start
+ (dle_lp
>> 4));
1290 while(dle
!= cur_dle
)
1292 /* free the DMAed skb */
1293 skb
= skb_dequeue(&iadev
->rx_dma_q
);
1296 desc
= ATM_DESC(skb
);
1297 free_desc(dev
, desc
);
1301 printk("rx_dle_intr: skb len 0\n");
1302 dev_kfree_skb_any(skb
);
1306 struct cpcs_trailer
*trailer
;
1308 struct ia_vcc
*ia_vcc
;
1309 /* no VCC related housekeeping done as yet. lets see */
1310 vcc
= ATM_SKB(skb
)->vcc
;
1312 printk("IA: null vcc\n");
1313 atomic_inc(&vcc
->stats
->rx_err
);
1314 dev_kfree_skb_any(skb
);
1317 ia_vcc
= INPH_IA_VCC(vcc
);
1320 atomic_inc(&vcc
->stats
->rx_err
);
1321 dev_kfree_skb_any(skb
);
1322 #if LINUX_VERSION_CODE >= 0x20312
1323 atm_return(vcc
, atm_guess_pdu2truesize(skb
->len
));
1325 atm_return(vcc
, atm_pdu2truesize(skb
->len
));
1329 // get real pkt length pwang_test
1330 trailer
= (struct cpcs_trailer
*)((u_char
*)skb
->data
+
1331 skb
->len
- sizeof(struct cpcs_trailer
));
1332 length
= swap(trailer
->length
);
1333 if ((length
> iadev
->rx_buf_sz
) || (length
>
1334 (skb
->len
- sizeof(struct cpcs_trailer
))))
1336 atomic_inc(&vcc
->stats
->rx_err
);
1337 dev_kfree_skb_any(skb
);
1338 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
1340 #if LINUX_VERSION_CODE >= 0x20312
1341 atm_return(vcc
, atm_guess_pdu2truesize(skb
->len
));
1343 atm_return(vcc
, atm_pdu2truesize(skb
->len
));
1347 skb_trim(skb
, length
);
1349 /* Display the packet */
1350 IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb
->len
);
1351 xdump(skb
->data
, skb
->len
, "RX: ");
1354 IF_RX(printk("rx_dle_intr: skb push");)
1356 atomic_inc(&vcc
->stats
->rx
);
1357 iadev
->rx_pkt_cnt
++;
1360 if (++dle
== iadev
->rx_dle_q
.end
)
1361 dle
= iadev
->rx_dle_q
.start
;
1363 iadev
->rx_dle_q
.read
= dle
;
1365 /* if the interrupts are masked because there were no free desc available,
1367 if (!iadev
->rxing
) {
1368 state
= readl(iadev
->reass_reg
+ STATE_REG
) & 0xffff;
1369 if (!(state
& FREEQ_EMPTY
)) {
1370 state
= readl(iadev
->reass_reg
+ REASS_MASK_REG
) & 0xffff;
1371 writel(state
& ~(RX_FREEQ_EMPT
|/* RX_EXCP_RCVD |*/ RX_PKT_RCVD
),
1372 iadev
->reass_reg
+REASS_MASK_REG
);
1379 static int open_rx(struct atm_vcc
*vcc
)
1384 IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc
->vpi
, vcc
->vci
);)
1386 if (vcc
->qos
.rxtp
.traffic_class
== ATM_NONE
) return 0;
1387 iadev
= INPH_IA_DEV(vcc
->dev
);
1388 if (vcc
->qos
.rxtp
.traffic_class
== ATM_ABR
) {
1389 if (iadev
->phy_type
& FE_25MBIT_PHY
) {
1390 printk("IA: ABR not support\n");
1394 /* Make only this VCI in the vc table valid and let all
1395 others be invalid entries */
1396 vc_table
= (u_short
*)(iadev
->reass_ram
+RX_VC_TABLE
*iadev
->memSize
);
1397 vc_table
+= vcc
->vci
;
1398 /* mask the last 6 bits and OR it with 3 for 1K VCs */
1400 *vc_table
= vcc
->vci
<< 6;
1401 /* Also keep a list of open rx vcs so that we can attach them with
1402 incoming PDUs later. */
1403 if ((vcc
->qos
.rxtp
.traffic_class
== ATM_ABR
) ||
1404 (vcc
->qos
.txtp
.traffic_class
== ATM_ABR
))
1406 srv_cls_param_t srv_p
;
1407 init_abr_vc(iadev
, &srv_p
);
1408 ia_open_abr_vc(iadev
, &srv_p
, vcc
, 0);
1410 else { /* for UBR later may need to add CBR logic */
1411 reass_ptr
= (u_short
*)
1412 (iadev
->reass_ram
+REASS_TABLE
*iadev
->memSize
);
1413 reass_ptr
+= vcc
->vci
;
1414 *reass_ptr
= NO_AAL5_PKT
;
1417 if (iadev
->rx_open
[vcc
->vci
])
1418 printk(KERN_CRIT DEV_LABEL
"(itf %d): VCI %d already open\n",
1419 vcc
->dev
->number
, vcc
->vci
);
1420 iadev
->rx_open
[vcc
->vci
] = vcc
;
1424 static int rx_init(struct atm_dev
*dev
)
1427 struct rx_buf_desc
*buf_desc_ptr
;
1428 unsigned long rx_pkt_start
= 0;
1430 struct abr_vc_table
*abr_vc_table
;
1434 int i
,j
, vcsize_sel
;
1435 u_short freeq_st_adr
;
1436 u_short
*freeq_start
;
1438 iadev
= INPH_IA_DEV(dev
);
1439 // spin_lock_init(&iadev->rx_lock);
1440 /* I need to initialize the DLEs somewhere. Lets see what I
1441 need to do for this, hmmm...
1442 - allocate memory for 256 DLEs. make sure that it starts
1443 on a 4k byte address boundary. Program the start address
1444 in Receive List address register. ..... to do for TX also
1445 To make sure that it is a 4k byte boundary - allocate 8k and find
1446 4k byte boundary within.
1447 ( (addr + (4k-1)) & ~(4k-1) )
1450 /* allocate 8k bytes */
1451 dle_addr
= (u32
*)kmalloc(2*sizeof(struct dle
)*DLE_ENTRIES
, GFP_KERNEL
);
1454 printk(KERN_ERR DEV_LABEL
"can't allocate DLEs\n");
1456 /* find 4k byte boundary within the 8k allocated */
1457 dle_addr
= (u32
*)( ((u32
)dle_addr
+(4096-1)) & ~(4096-1) );
1458 iadev
->rx_dle_q
.start
= (struct dle
*)dle_addr
;
1459 iadev
->rx_dle_q
.read
= iadev
->rx_dle_q
.start
;
1460 iadev
->rx_dle_q
.write
= iadev
->rx_dle_q
.start
;
1461 iadev
->rx_dle_q
.end
= (struct dle
*)((u32
)dle_addr
+sizeof(struct dle
)*DLE_ENTRIES
);
1462 /* the end of the dle q points to the entry after the last
1463 DLE that can be used. */
1465 /* write the upper 20 bits of the start address to rx list address register */
1466 writel(virt_to_bus(dle_addr
) & 0xfffff000, iadev
->dma
+IPHASE5575_RX_LIST_ADDR
);
1467 IF_INIT(printk("Tx Dle list addr: 0x%08x value: 0x%0x\n",
1468 (u32
)(iadev
->dma
+IPHASE5575_TX_LIST_ADDR
),
1469 *(u32
*)(iadev
->dma
+IPHASE5575_TX_LIST_ADDR
));
1470 printk("Rx Dle list addr: 0x%08x value: 0x%0x\n",
1471 (u32
)(iadev
->dma
+IPHASE5575_RX_LIST_ADDR
),
1472 *(u32
*)(iadev
->dma
+IPHASE5575_RX_LIST_ADDR
));)
1474 writew(0xffff, iadev
->reass_reg
+REASS_MASK_REG
);
1475 writew(0, iadev
->reass_reg
+MODE_REG
);
1476 writew(RESET_REASS
, iadev
->reass_reg
+REASS_COMMAND_REG
);
1478 /* Receive side control memory map
1479 -------------------------------
1481 Buffer descr 0x0000 (736 - 23K)
1482 VP Table 0x5c00 (256 - 512)
1483 Except q 0x5e00 (128 - 512)
1484 Free buffer q 0x6000 (1K - 2K)
1485 Packet comp q 0x6800 (1K - 2K)
1486 Reass Table 0x7000 (1K - 2K)
1487 VC Table 0x7800 (1K - 2K)
1488 ABR VC Table 0x8000 (1K - 32K)
1491 /* Base address for Buffer Descriptor Table */
1492 writew(RX_DESC_BASE
>> 16, iadev
->reass_reg
+REASS_DESC_BASE
);
1493 /* Set the buffer size register */
1494 writew(iadev
->rx_buf_sz
, iadev
->reass_reg
+BUF_SIZE
);
1496 /* Initialize each entry in the Buffer Descriptor Table */
1497 iadev
->RX_DESC_BASE_ADDR
= iadev
->reass_ram
+RX_DESC_BASE
*iadev
->memSize
;
1498 buf_desc_ptr
=(struct rx_buf_desc
*)iadev
->RX_DESC_BASE_ADDR
;
1499 memset((caddr_t
)buf_desc_ptr
, 0, sizeof(struct rx_buf_desc
));
1501 rx_pkt_start
= iadev
->rx_pkt_ram
;
1502 for(i
=1; i
<=iadev
->num_rx_desc
; i
++)
1504 memset((caddr_t
)buf_desc_ptr
, 0, sizeof(struct rx_buf_desc
));
1505 buf_desc_ptr
->buf_start_hi
= rx_pkt_start
>> 16;
1506 buf_desc_ptr
->buf_start_lo
= rx_pkt_start
& 0x0000ffff;
1508 rx_pkt_start
+= iadev
->rx_buf_sz
;
1510 IF_INIT(printk("Rx Buffer desc ptr: 0x%0x\n", (u32
)(buf_desc_ptr
));)
1511 i
= FREE_BUF_DESC_Q
*iadev
->memSize
;
1512 writew(i
>> 16, iadev
->reass_reg
+REASS_QUEUE_BASE
);
1513 writew(i
, iadev
->reass_reg
+FREEQ_ST_ADR
);
1514 writew(i
+iadev
->num_rx_desc
*sizeof(u_short
),
1515 iadev
->reass_reg
+FREEQ_ED_ADR
);
1516 writew(i
, iadev
->reass_reg
+FREEQ_RD_PTR
);
1517 writew(i
+iadev
->num_rx_desc
*sizeof(u_short
),
1518 iadev
->reass_reg
+FREEQ_WR_PTR
);
1519 /* Fill the FREEQ with all the free descriptors. */
1520 freeq_st_adr
= readw(iadev
->reass_reg
+FREEQ_ST_ADR
);
1521 freeq_start
= (u_short
*)(iadev
->reass_ram
+freeq_st_adr
);
1522 for(i
=1; i
<=iadev
->num_rx_desc
; i
++)
1524 *freeq_start
= (u_short
)i
;
1527 IF_INIT(printk("freeq_start: 0x%0x\n", (u32
)freeq_start
);)
1528 /* Packet Complete Queue */
1529 i
= (PKT_COMP_Q
* iadev
->memSize
) & 0xffff;
1530 writew(i
, iadev
->reass_reg
+PCQ_ST_ADR
);
1531 writew(i
+iadev
->num_vc
*sizeof(u_short
), iadev
->reass_reg
+PCQ_ED_ADR
);
1532 writew(i
, iadev
->reass_reg
+PCQ_RD_PTR
);
1533 writew(i
, iadev
->reass_reg
+PCQ_WR_PTR
);
1535 /* Exception Queue */
1536 i
= (EXCEPTION_Q
* iadev
->memSize
) & 0xffff;
1537 writew(i
, iadev
->reass_reg
+EXCP_Q_ST_ADR
);
1538 writew(i
+ NUM_RX_EXCP
* sizeof(RX_ERROR_Q
),
1539 iadev
->reass_reg
+EXCP_Q_ED_ADR
);
1540 writew(i
, iadev
->reass_reg
+EXCP_Q_RD_PTR
);
1541 writew(i
, iadev
->reass_reg
+EXCP_Q_WR_PTR
);
1543 /* Load local copy of FREEQ and PCQ ptrs */
1544 iadev
->rfL
.fdq_st
= readw(iadev
->reass_reg
+FREEQ_ST_ADR
) & 0xffff;
1545 iadev
->rfL
.fdq_ed
= readw(iadev
->reass_reg
+FREEQ_ED_ADR
) & 0xffff ;
1546 iadev
->rfL
.fdq_rd
= readw(iadev
->reass_reg
+FREEQ_RD_PTR
) & 0xffff;
1547 iadev
->rfL
.fdq_wr
= readw(iadev
->reass_reg
+FREEQ_WR_PTR
) & 0xffff;
1548 iadev
->rfL
.pcq_st
= readw(iadev
->reass_reg
+PCQ_ST_ADR
) & 0xffff;
1549 iadev
->rfL
.pcq_ed
= readw(iadev
->reass_reg
+PCQ_ED_ADR
) & 0xffff;
1550 iadev
->rfL
.pcq_rd
= readw(iadev
->reass_reg
+PCQ_RD_PTR
) & 0xffff;
1551 iadev
->rfL
.pcq_wr
= readw(iadev
->reass_reg
+PCQ_WR_PTR
) & 0xffff;
1553 IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1554 iadev
->rfL
.pcq_st
, iadev
->rfL
.pcq_ed
, iadev
->rfL
.pcq_rd
,
1555 iadev
->rfL
.pcq_wr
);)
1556 /* just for check - no VP TBL */
1558 /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1559 /* initialize VP Table for invalid VPIs
1560 - I guess we can write all 1s or 0x000f in the entire memory
1561 space or something similar.
1564 /* This seems to work and looks right to me too !!! */
1565 i
= REASS_TABLE
* iadev
->memSize
;
1566 writew((i
>> 3), iadev
->reass_reg
+REASS_TABLE_BASE
);
1567 /* initialize Reassembly table to I don't know what ???? */
1568 reass_table
= (u16
*)(iadev
->reass_ram
+i
);
1569 j
= REASS_TABLE_SZ
* iadev
->memSize
;
1570 for(i
=0; i
< j
; i
++)
1571 *reass_table
++ = NO_AAL5_PKT
;
1574 while (i
!= iadev
->num_vc
) {
1578 i
= RX_VC_TABLE
* iadev
->memSize
;
1579 writew(((i
>>3) & 0xfff8) | vcsize_sel
, iadev
->reass_reg
+VC_LKUP_BASE
);
1580 vc_table
= (u16
*)(iadev
->reass_ram
+RX_VC_TABLE
*iadev
->memSize
);
1581 j
= RX_VC_TABLE_SZ
* iadev
->memSize
;
1582 for(i
= 0; i
< j
; i
++)
1584 /* shift the reassembly pointer by 3 + lower 3 bits of
1585 vc_lkup_base register (=3 for 1K VCs) and the last byte
1586 is those low 3 bits.
1587 Shall program this later.
1589 *vc_table
= (i
<< 6) | 15; /* for invalid VCI */
1593 i
= ABR_VC_TABLE
* iadev
->memSize
;
1594 writew(i
>> 3, iadev
->reass_reg
+ABR_LKUP_BASE
);
1596 i
= ABR_VC_TABLE
* iadev
->memSize
;
1597 abr_vc_table
= (struct abr_vc_table
*)(iadev
->reass_ram
+i
);
1598 j
= REASS_TABLE_SZ
* iadev
->memSize
;
1599 memset ((char*)abr_vc_table
, 0, j
* sizeof(struct abr_vc_table
) );
1600 for(i
= 0; i
< j
; i
++) {
1601 abr_vc_table
->rdf
= 0x0003;
1602 abr_vc_table
->air
= 0x5eb1;
1606 /* Initialize other registers */
1608 /* VP Filter Register set for VC Reassembly only */
1609 writew(0xff00, iadev
->reass_reg
+VP_FILTER
);
1610 writew(0, iadev
->reass_reg
+XTRA_RM_OFFSET
);
1611 writew(0x1, iadev
->reass_reg
+PROTOCOL_ID
);
1613 /* Packet Timeout Count related Registers :
1614 Set packet timeout to occur in about 3 seconds
1615 Set Packet Aging Interval count register to overflow in about 4 us
1617 writew(0xF6F8, iadev
->reass_reg
+PKT_TM_CNT
);
1619 i
= ((u32
)ptr16
>> 6) & 0xff;
1621 i
|=(((u32
)ptr16
<< 2) & 0xff00);
1622 writew(i
, iadev
->reass_reg
+TMOUT_RANGE
);
1623 /* initiate the desc_tble */
1624 for(i
=0; i
<iadev
->num_tx_desc
;i
++)
1625 iadev
->desc_tbl
[i
].timestamp
= 0;
1627 /* to clear the interrupt status register - read it */
1628 readw(iadev
->reass_reg
+REASS_INTR_STATUS_REG
);
1630 /* Mask Register - clear it */
1631 writew(~(RX_FREEQ_EMPT
|RX_PKT_RCVD
), iadev
->reass_reg
+REASS_MASK_REG
);
1633 skb_queue_head_init(&iadev
->rx_dma_q
);
1634 iadev
->rx_free_desc_qhead
= NULL
;
1635 iadev
->rx_open
=(struct atm_vcc
**)kmalloc(4*iadev
->num_vc
,GFP_KERNEL
);
1636 if (!iadev
->rx_open
)
1638 printk(KERN_ERR DEV_LABEL
"itf %d couldn't get free page\n",
1642 memset(iadev
->rx_open
, 0, 4*iadev
->num_vc
);
1644 iadev
->rx_pkt_cnt
= 0;
1646 writew(R_ONLINE
, iadev
->reass_reg
+MODE_REG
);
1652 The memory map suggested in appendix A and the coding for it.
1653 Keeping it around just in case we change our mind later.
1655 Buffer descr 0x0000 (128 - 4K)
1656 UBR sched 0x1000 (1K - 4K)
1657 UBR Wait q 0x2000 (1K - 4K)
1658 Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
1660 extended VC 0x4000 (1K - 8K)
1661 ABR sched 0x6000 and ABR wait queue (1K - 2K) each
1662 CBR sched 0x7000 (as needed)
1663 VC table 0x8000 (1K - 32K)
1666 static void tx_intr(struct atm_dev
*dev
)
1669 unsigned short status
;
1670 unsigned long flags
;
1672 iadev
= INPH_IA_DEV(dev
);
1674 status
= readl(iadev
->seg_reg
+SEG_INTR_STATUS_REG
);
1675 if (status
& TRANSMIT_DONE
){
1677 IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1678 spin_lock_irqsave(&iadev
->tx_lock
, flags
);
1680 spin_unlock_irqrestore(&iadev
->tx_lock
, flags
);
1681 writew(TRANSMIT_DONE
, iadev
->seg_reg
+SEG_INTR_STATUS_REG
);
1682 if (iadev
->close_pending
)
1683 wake_up(&iadev
->close_wait
);
1685 if (status
& TCQ_NOT_EMPTY
)
1687 IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1691 static void tx_dle_intr(struct atm_dev
*dev
)
1694 struct dle
*dle
, *cur_dle
;
1695 struct sk_buff
*skb
;
1696 struct atm_vcc
*vcc
;
1697 struct ia_vcc
*iavcc
;
1699 unsigned long flags
;
1701 iadev
= INPH_IA_DEV(dev
);
1702 spin_lock_irqsave(&iadev
->tx_lock
, flags
);
1703 dle
= iadev
->tx_dle_q
.read
;
1704 dle_lp
= readl(iadev
->dma
+IPHASE5575_TX_LIST_ADDR
) &
1705 (sizeof(struct dle
)*DLE_ENTRIES
- 1);
1706 cur_dle
= (struct dle
*)(iadev
->tx_dle_q
.start
+ (dle_lp
>> 4));
1707 while (dle
!= cur_dle
)
1709 /* free the DMAed skb */
1710 skb
= skb_dequeue(&iadev
->tx_dma_q
);
1712 vcc
= ATM_SKB(skb
)->vcc
;
1714 printk("tx_dle_intr: vcc is null\n");
1715 dev_kfree_skb_any(skb
);
1718 iavcc
= INPH_IA_VCC(vcc
);
1720 printk("tx_dle_intr: iavcc is null\n");
1721 dev_kfree_skb_any(skb
);
1724 if (vcc
->qos
.txtp
.pcr
>= iadev
->rate_limit
) {
1725 if ((vcc
->pop
) && (skb
->len
!= 0))
1730 dev_kfree_skb_any(skb
);
1733 else { /* Hold the rate-limited skb for flow control */
1734 IA_SKB_STATE(skb
) |= IA_DLED
;
1735 skb_queue_tail(&iavcc
->txing_skb
, skb
);
1737 IF_EVENT(printk("tx_dle_intr: enque skb = 0x%x \n", (u32
)skb
);)
1738 if (++dle
== iadev
->tx_dle_q
.end
)
1739 dle
= iadev
->tx_dle_q
.start
;
1741 iadev
->tx_dle_q
.read
= dle
;
1742 spin_unlock_irqrestore(&iadev
->tx_lock
, flags
);
1745 static int open_tx(struct atm_vcc
*vcc
)
1747 struct ia_vcc
*ia_vcc
;
1752 IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc
->vci
);)
1753 if (vcc
->qos
.txtp
.traffic_class
== ATM_NONE
) return 0;
1754 iadev
= INPH_IA_DEV(vcc
->dev
);
1756 if (iadev
->phy_type
& FE_25MBIT_PHY
) {
1757 if (vcc
->qos
.txtp
.traffic_class
== ATM_ABR
) {
1758 printk("IA: ABR not support\n");
1761 if (vcc
->qos
.txtp
.traffic_class
== ATM_CBR
) {
1762 printk("IA: CBR not support\n");
1766 ia_vcc
= INPH_IA_VCC(vcc
);
1767 memset((caddr_t
)ia_vcc
, 0, sizeof(struct ia_vcc
));
1768 if (vcc
->qos
.txtp
.max_sdu
>
1769 (iadev
->tx_buf_sz
- sizeof(struct cpcs_trailer
))){
1770 printk("IA: SDU size over the configured SDU size %d\n",
1775 ia_vcc
->vc_desc_cnt
= 0;
1779 if (vcc
->qos
.txtp
.max_pcr
== ATM_MAX_PCR
)
1780 vcc
->qos
.txtp
.pcr
= iadev
->LineRate
;
1781 else if ((vcc
->qos
.txtp
.max_pcr
== 0)&&( vcc
->qos
.txtp
.pcr
<= 0))
1782 vcc
->qos
.txtp
.pcr
= iadev
->LineRate
;
1783 else if ((vcc
->qos
.txtp
.max_pcr
> vcc
->qos
.txtp
.pcr
) && (vcc
->qos
.txtp
.max_pcr
> 0))
1784 vcc
->qos
.txtp
.pcr
= vcc
->qos
.txtp
.max_pcr
;
1785 if (vcc
->qos
.txtp
.pcr
> iadev
->LineRate
)
1786 vcc
->qos
.txtp
.pcr
= iadev
->LineRate
;
1787 ia_vcc
->pcr
= vcc
->qos
.txtp
.pcr
;
1789 if (ia_vcc
->pcr
> (iadev
->LineRate
/ 6) ) ia_vcc
->ltimeout
= HZ
/ 10;
1790 else if (ia_vcc
->pcr
> (iadev
->LineRate
/ 130)) ia_vcc
->ltimeout
= HZ
;
1791 else if (ia_vcc
->pcr
<= 170) ia_vcc
->ltimeout
= 16 * HZ
;
1792 else ia_vcc
->ltimeout
= 2700 * HZ
/ ia_vcc
->pcr
;
1793 if (ia_vcc
->pcr
< iadev
->rate_limit
)
1794 skb_queue_head_init (&ia_vcc
->txing_skb
);
1795 if (ia_vcc
->pcr
< iadev
->rate_limit
) {
1796 if (vcc
->qos
.txtp
.max_sdu
!= 0) {
1797 if (ia_vcc
->pcr
> 60000)
1798 vcc
->sk
->sndbuf
= vcc
->qos
.txtp
.max_sdu
* 5;
1799 else if (ia_vcc
->pcr
> 2000)
1800 vcc
->sk
->sndbuf
= vcc
->qos
.txtp
.max_sdu
* 4;
1802 vcc
->sk
->sndbuf
= 3*vcc
->qos
.txtp
.max_sdu
;
1805 vcc
->sk
->sndbuf
= 24576;
1808 vc
= (struct main_vc
*)iadev
->MAIN_VC_TABLE_ADDR
;
1809 evc
= (struct ext_vc
*)iadev
->EXT_VC_TABLE_ADDR
;
1812 memset((caddr_t
)vc
, 0, sizeof(struct main_vc
));
1813 memset((caddr_t
)evc
, 0, sizeof(struct ext_vc
));
1815 /* store the most significant 4 bits of vci as the last 4 bits
1816 of first part of atm header.
1817 store the last 12 bits of vci as first 12 bits of the second
1818 part of the atm header.
1820 evc
->atm_hdr1
= (vcc
->vci
>> 12) & 0x000f;
1821 evc
->atm_hdr2
= (vcc
->vci
& 0x0fff) << 4;
1823 /* check the following for different traffic classes */
1824 if (vcc
->qos
.txtp
.traffic_class
== ATM_UBR
)
1827 vc
->status
= CRC_APPEND
;
1828 vc
->acr
= cellrate_to_float(iadev
->LineRate
);
1829 if (vcc
->qos
.txtp
.pcr
> 0)
1830 vc
->acr
= cellrate_to_float(vcc
->qos
.txtp
.pcr
);
1831 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1832 vcc
->qos
.txtp
.max_pcr
,vc
->acr
);)
1834 else if (vcc
->qos
.txtp
.traffic_class
== ATM_ABR
)
1835 { srv_cls_param_t srv_p
;
1836 IF_ABR(printk("Tx ABR VCC\n");)
1837 init_abr_vc(iadev
, &srv_p
);
1838 if (vcc
->qos
.txtp
.pcr
> 0)
1839 srv_p
.pcr
= vcc
->qos
.txtp
.pcr
;
1840 if (vcc
->qos
.txtp
.min_pcr
> 0) {
1841 int tmpsum
= iadev
->sum_mcr
+iadev
->sum_cbr
+vcc
->qos
.txtp
.min_pcr
;
1842 if (tmpsum
> iadev
->LineRate
)
1844 srv_p
.mcr
= vcc
->qos
.txtp
.min_pcr
;
1845 iadev
->sum_mcr
+= vcc
->qos
.txtp
.min_pcr
;
1848 if (vcc
->qos
.txtp
.icr
)
1849 srv_p
.icr
= vcc
->qos
.txtp
.icr
;
1850 if (vcc
->qos
.txtp
.tbe
)
1851 srv_p
.tbe
= vcc
->qos
.txtp
.tbe
;
1852 if (vcc
->qos
.txtp
.frtt
)
1853 srv_p
.frtt
= vcc
->qos
.txtp
.frtt
;
1854 if (vcc
->qos
.txtp
.rif
)
1855 srv_p
.rif
= vcc
->qos
.txtp
.rif
;
1856 if (vcc
->qos
.txtp
.rdf
)
1857 srv_p
.rdf
= vcc
->qos
.txtp
.rdf
;
1858 if (vcc
->qos
.txtp
.nrm_pres
)
1859 srv_p
.nrm
= vcc
->qos
.txtp
.nrm
;
1860 if (vcc
->qos
.txtp
.trm_pres
)
1861 srv_p
.trm
= vcc
->qos
.txtp
.trm
;
1862 if (vcc
->qos
.txtp
.adtf_pres
)
1863 srv_p
.adtf
= vcc
->qos
.txtp
.adtf
;
1864 if (vcc
->qos
.txtp
.cdf_pres
)
1865 srv_p
.cdf
= vcc
->qos
.txtp
.cdf
;
1866 if (srv_p
.icr
> srv_p
.pcr
)
1867 srv_p
.icr
= srv_p
.pcr
;
1868 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
1869 srv_p
.pcr
, srv_p
.mcr
);)
1870 ia_open_abr_vc(iadev
, &srv_p
, vcc
, 1);
1871 } else if (vcc
->qos
.txtp
.traffic_class
== ATM_CBR
) {
1872 if (iadev
->phy_type
& FE_25MBIT_PHY
) {
1873 printk("IA: CBR not support\n");
1876 if (vcc
->qos
.txtp
.max_pcr
> iadev
->LineRate
) {
1877 IF_CBR(printk("PCR is not availble\n");)
1881 vc
->status
= CRC_APPEND
;
1882 if ((ret
= ia_cbr_setup (iadev
, vcc
)) < 0) {
1887 printk("iadev: Non UBR, ABR and CBR traffic not supportedn");
1889 iadev
->testTable
[vcc
->vci
]->vc_status
|= VC_ACTIVE
;
1890 IF_EVENT(printk("ia open_tx returning \n");)
1895 static int tx_init(struct atm_dev
*dev
)
1898 struct tx_buf_desc
*buf_desc_ptr
;
1899 unsigned int tx_pkt_start
;
1911 iadev
= INPH_IA_DEV(dev
);
1912 spin_lock_init(&iadev
->tx_lock
);
1914 IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1915 readw(iadev
->seg_reg
+SEG_MASK_REG
));)
1916 /*---------- Initializing Transmit DLEs ----------*/
1917 /* allocating 8k memory for transmit DLEs */
1918 dle_addr
= (u32
*)kmalloc(2*sizeof(struct dle
)*DLE_ENTRIES
, GFP_KERNEL
);
1921 printk(KERN_ERR DEV_LABEL
"can't allocate TX DLEs\n");
1924 /* find 4k byte boundary within the 8k allocated */
1925 dle_addr
= (u32
*)(((u32
)dle_addr
+(4096-1)) & ~(4096-1));
1926 iadev
->tx_dle_q
.start
= (struct dle
*)dle_addr
;
1927 iadev
->tx_dle_q
.read
= iadev
->tx_dle_q
.start
;
1928 iadev
->tx_dle_q
.write
= iadev
->tx_dle_q
.start
;
1929 iadev
->tx_dle_q
.end
= (struct dle
*)((u32
)dle_addr
+sizeof(struct dle
)*DLE_ENTRIES
);
1931 /* write the upper 20 bits of the start address to tx list address register */
1932 writel(virt_to_bus(dle_addr
) & 0xfffff000, iadev
->dma
+IPHASE5575_TX_LIST_ADDR
);
1933 writew(0xffff, iadev
->seg_reg
+SEG_MASK_REG
);
1934 writew(0, iadev
->seg_reg
+MODE_REG_0
);
1935 writew(RESET_SEG
, iadev
->seg_reg
+SEG_COMMAND_REG
);
1936 iadev
->MAIN_VC_TABLE_ADDR
= iadev
->seg_ram
+MAIN_VC_TABLE
*iadev
->memSize
;
1937 iadev
->EXT_VC_TABLE_ADDR
= iadev
->seg_ram
+EXT_VC_TABLE
*iadev
->memSize
;
1938 iadev
->ABR_SCHED_TABLE_ADDR
=iadev
->seg_ram
+ABR_SCHED_TABLE
*iadev
->memSize
;
1941 Transmit side control memory map
1942 --------------------------------
1943 Buffer descr 0x0000 (128 - 4K)
1944 Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
1947 CBR Table 0x1800 (as needed) - 6K
1948 UBR Table 0x3000 (1K - 4K) - 12K
1949 UBR Wait queue 0x4000 (1K - 4K) - 16K
1950 ABR sched 0x5000 and ABR wait queue (1K - 2K) each
1951 ABR Tbl - 20K, ABR Wq - 22K
1952 extended VC 0x6000 (1K - 8K) - 24K
1953 VC Table 0x8000 (1K - 32K) - 32K
1955 Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1956 and Wait q, which can be allotted later.
1959 /* Buffer Descriptor Table Base address */
1960 writew(TX_DESC_BASE
, iadev
->seg_reg
+SEG_DESC_BASE
);
1962 /* initialize each entry in the buffer descriptor table */
1963 buf_desc_ptr
=(struct tx_buf_desc
*)(iadev
->seg_ram
+TX_DESC_BASE
);
1964 memset((caddr_t
)buf_desc_ptr
, 0, sizeof(struct tx_buf_desc
));
1966 tx_pkt_start
= TX_PACKET_RAM
;
1967 for(i
=1; i
<=iadev
->num_tx_desc
; i
++)
1969 memset((caddr_t
)buf_desc_ptr
, 0, sizeof(struct tx_buf_desc
));
1970 buf_desc_ptr
->desc_mode
= AAL5
;
1971 buf_desc_ptr
->buf_start_hi
= tx_pkt_start
>> 16;
1972 buf_desc_ptr
->buf_start_lo
= tx_pkt_start
& 0x0000ffff;
1974 tx_pkt_start
+= iadev
->tx_buf_sz
;
1976 iadev
->tx_buf
= (caddr_t
*)kmalloc(iadev
->num_tx_desc
*sizeof(caddr_t
),
1978 if (!iadev
->tx_buf
) {
1979 printk(KERN_ERR DEV_LABEL
" couldn't get mem\n");
1982 for (i
= 0; i
< iadev
->num_tx_desc
; i
++)
1985 iadev
->tx_buf
[i
] =(caddr_t
)kmalloc(sizeof(struct cpcs_trailer
),
1986 GFP_KERNEL
|GFP_DMA
);
1987 if(!iadev
->tx_buf
[i
]) {
1988 printk(KERN_ERR DEV_LABEL
" couldn't get freepage\n");
1992 iadev
->desc_tbl
= (struct desc_tbl_t
*)kmalloc(iadev
->num_tx_desc
*
1993 sizeof(struct desc_tbl_t
), GFP_KERNEL
);
1995 /* Communication Queues base address */
1996 i
= TX_COMP_Q
* iadev
->memSize
;
1997 writew(i
>> 16, iadev
->seg_reg
+SEG_QUEUE_BASE
);
1999 /* Transmit Complete Queue */
2000 writew(i
, iadev
->seg_reg
+TCQ_ST_ADR
);
2001 writew(i
, iadev
->seg_reg
+TCQ_RD_PTR
);
2002 writew(i
+iadev
->num_tx_desc
*sizeof(u_short
),iadev
->seg_reg
+TCQ_WR_PTR
);
2003 iadev
->host_tcq_wr
= i
+ iadev
->num_tx_desc
*sizeof(u_short
);
2004 writew(i
+2 * iadev
->num_tx_desc
* sizeof(u_short
),
2005 iadev
->seg_reg
+TCQ_ED_ADR
);
2006 /* Fill the TCQ with all the free descriptors. */
2007 tcq_st_adr
= readw(iadev
->seg_reg
+TCQ_ST_ADR
);
2008 tcq_start
= (u_short
*)(iadev
->seg_ram
+tcq_st_adr
);
2009 for(i
=1; i
<=iadev
->num_tx_desc
; i
++)
2011 *tcq_start
= (u_short
)i
;
2015 /* Packet Ready Queue */
2016 i
= PKT_RDY_Q
* iadev
->memSize
;
2017 writew(i
, iadev
->seg_reg
+PRQ_ST_ADR
);
2018 writew(i
+2 * iadev
->num_tx_desc
* sizeof(u_short
),
2019 iadev
->seg_reg
+PRQ_ED_ADR
);
2020 writew(i
, iadev
->seg_reg
+PRQ_RD_PTR
);
2021 writew(i
, iadev
->seg_reg
+PRQ_WR_PTR
);
2023 /* Load local copy of PRQ and TCQ ptrs */
2024 iadev
->ffL
.prq_st
= readw(iadev
->seg_reg
+PRQ_ST_ADR
) & 0xffff;
2025 iadev
->ffL
.prq_ed
= readw(iadev
->seg_reg
+PRQ_ED_ADR
) & 0xffff;
2026 iadev
->ffL
.prq_wr
= readw(iadev
->seg_reg
+PRQ_WR_PTR
) & 0xffff;
2028 iadev
->ffL
.tcq_st
= readw(iadev
->seg_reg
+TCQ_ST_ADR
) & 0xffff;
2029 iadev
->ffL
.tcq_ed
= readw(iadev
->seg_reg
+TCQ_ED_ADR
) & 0xffff;
2030 iadev
->ffL
.tcq_rd
= readw(iadev
->seg_reg
+TCQ_RD_PTR
) & 0xffff;
2032 /* Just for safety initializing the queue to have desc 1 always */
2033 /* Fill the PRQ with all the free descriptors. */
2034 prq_st_adr
= readw(iadev
->seg_reg
+PRQ_ST_ADR
);
2035 prq_start
= (u_short
*)(iadev
->seg_ram
+prq_st_adr
);
2036 for(i
=1; i
<=iadev
->num_tx_desc
; i
++)
2038 *prq_start
= (u_short
)0; /* desc 1 in all entries */
2042 IF_INIT(printk("Start CBR Init\n");)
2043 #if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
2044 writew(0,iadev
->seg_reg
+CBR_PTR_BASE
);
2045 #else /* Charlie's logic is wrong ? */
2046 tmp16
= (iadev
->seg_ram
+CBR_SCHED_TABLE
*iadev
->memSize
)>>17;
2047 IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16
);)
2048 writew(tmp16
,iadev
->seg_reg
+CBR_PTR_BASE
);
2051 IF_INIT(printk("value in register = 0x%x\n",
2052 readw(iadev
->seg_reg
+CBR_PTR_BASE
));)
2053 tmp16
= (CBR_SCHED_TABLE
*iadev
->memSize
) >> 1;
2054 writew(tmp16
, iadev
->seg_reg
+CBR_TAB_BEG
);
2055 IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16
,
2056 readw(iadev
->seg_reg
+CBR_TAB_BEG
));)
2057 writew(tmp16
, iadev
->seg_reg
+CBR_TAB_END
+1); // CBR_PTR;
2058 tmp16
= (CBR_SCHED_TABLE
*iadev
->memSize
+ iadev
->num_vc
*6 - 2) >> 1;
2059 writew(tmp16
, iadev
->seg_reg
+CBR_TAB_END
);
2060 IF_INIT(printk("iadev->seg_reg = 0x%x CBR_PTR_BASE = 0x%x\n",
2061 (u32
)iadev
->seg_reg
, readw(iadev
->seg_reg
+CBR_PTR_BASE
));)
2062 IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2063 readw(iadev
->seg_reg
+CBR_TAB_BEG
), readw(iadev
->seg_reg
+CBR_TAB_END
),
2064 readw(iadev
->seg_reg
+CBR_TAB_END
+1));)
2065 tmp16
= (iadev
->seg_ram
+CBR_SCHED_TABLE
*iadev
->memSize
);
2067 /* Initialize the CBR Schedualing Table */
2068 memset((caddr_t
)(iadev
->seg_ram
+CBR_SCHED_TABLE
*iadev
->memSize
),
2069 0, iadev
->num_vc
*6);
2070 iadev
->CbrRemEntries
= iadev
->CbrTotEntries
= iadev
->num_vc
*3;
2071 iadev
->CbrEntryPt
= 0;
2072 iadev
->Granularity
= MAX_ATM_155
/ iadev
->CbrTotEntries
;
2073 iadev
->NumEnabledCBR
= 0;
2075 /* UBR scheduling Table and wait queue */
2076 /* initialize all bytes of UBR scheduler table and wait queue to 0
2077 - SCHEDSZ is 1K (# of entries).
2078 - UBR Table size is 4K
2079 - UBR wait queue is 4K
2080 since the table and wait queues are contiguous, all the bytes
2081 can be intialized by one memeset.
2086 while (i
!= iadev
->num_vc
) {
2091 i
= MAIN_VC_TABLE
* iadev
->memSize
;
2092 writew(vcsize_sel
| ((i
>> 8) & 0xfff8),iadev
->seg_reg
+VCT_BASE
);
2093 i
= EXT_VC_TABLE
* iadev
->memSize
;
2094 writew((i
>> 8) & 0xfffe, iadev
->seg_reg
+VCTE_BASE
);
2095 i
= UBR_SCHED_TABLE
* iadev
->memSize
;
2096 writew((i
& 0xffff) >> 11, iadev
->seg_reg
+UBR_SBPTR_BASE
);
2097 i
= UBR_WAIT_Q
* iadev
->memSize
;
2098 writew((i
>> 7) & 0xffff, iadev
->seg_reg
+UBRWQ_BASE
);
2099 memset((caddr_t
)(iadev
->seg_ram
+UBR_SCHED_TABLE
*iadev
->memSize
),
2100 0, iadev
->num_vc
*8);
2101 /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2102 /* initialize all bytes of ABR scheduler table and wait queue to 0
2103 - SCHEDSZ is 1K (# of entries).
2104 - ABR Table size is 2K
2105 - ABR wait queue is 2K
2106 since the table and wait queues are contiguous, all the bytes
2107 can be intialized by one memeset.
2109 i
= ABR_SCHED_TABLE
* iadev
->memSize
;
2110 writew((i
>> 11) & 0xffff, iadev
->seg_reg
+ABR_SBPTR_BASE
);
2111 i
= ABR_WAIT_Q
* iadev
->memSize
;
2112 writew((i
>> 7) & 0xffff, iadev
->seg_reg
+ABRWQ_BASE
);
2114 i
= ABR_SCHED_TABLE
*iadev
->memSize
;
2115 memset((caddr_t
)(iadev
->seg_ram
+i
), 0, iadev
->num_vc
*4);
2116 vc
= (struct main_vc
*)iadev
->MAIN_VC_TABLE_ADDR
;
2117 evc
= (struct ext_vc
*)iadev
->EXT_VC_TABLE_ADDR
;
2118 iadev
->testTable
= (struct testTable_t
**)
2119 kmalloc(sizeof(long)*iadev
->num_vc
, GFP_KERNEL
);
2120 if (!iadev
->testTable
) {
2121 printk("Get freepage failed\n");
2124 for(i
=0; i
<iadev
->num_vc
; i
++)
2126 memset((caddr_t
)vc
, 0, sizeof(struct main_vc
));
2127 memset((caddr_t
)evc
, 0, sizeof(struct ext_vc
));
2128 iadev
->testTable
[i
] = (struct testTable_t
*)
2129 kmalloc(sizeof(struct testTable_t
), GFP_KERNEL
);
2130 iadev
->testTable
[i
]->lastTime
= 0;
2131 iadev
->testTable
[i
]->fract
= 0;
2132 iadev
->testTable
[i
]->vc_status
= VC_UBR
;
2137 /* Other Initialization */
2139 /* Max Rate Register */
2140 if (iadev
->phy_type
& FE_25MBIT_PHY
) {
2141 writew(RATE25
, iadev
->seg_reg
+MAXRATE
);
2142 writew((UBR_EN
| (0x23 << 2)), iadev
->seg_reg
+STPARMS
);
2145 writew(cellrate_to_float(iadev
->LineRate
),iadev
->seg_reg
+MAXRATE
);
2146 writew((UBR_EN
| ABR_EN
| (0x23 << 2)), iadev
->seg_reg
+STPARMS
);
2148 /* Set Idle Header Reigisters to be sure */
2149 writew(0, iadev
->seg_reg
+IDLEHEADHI
);
2150 writew(0, iadev
->seg_reg
+IDLEHEADLO
);
2152 /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
2153 writew(0xaa00, iadev
->seg_reg
+ABRUBR_ARB
);
2155 iadev
->close_pending
= 0;
2156 #if LINUX_VERSION_CODE >= 0x20303
2157 init_waitqueue_head(&iadev
->close_wait
);
2158 init_waitqueue_head(&iadev
->timeout_wait
);
2160 iadev
->close_wait
= NULL
;
2161 iadev
->timeout_wait
= NULL
;
2163 skb_queue_head_init(&iadev
->tx_dma_q
);
2164 ia_init_rtn_q(&iadev
->tx_return_q
);
2166 /* RM Cell Protocol ID and Message Type */
2167 writew(RM_TYPE_4_0
, iadev
->seg_reg
+RM_TYPE
);
2168 skb_queue_head_init (&iadev
->tx_backlog
);
2170 /* Mode Register 1 */
2171 writew(MODE_REG_1_VAL
, iadev
->seg_reg
+MODE_REG_1
);
2173 /* Mode Register 0 */
2174 writew(T_ONLINE
, iadev
->seg_reg
+MODE_REG_0
);
2176 /* Interrupt Status Register - read to clear */
2177 readw(iadev
->seg_reg
+SEG_INTR_STATUS_REG
);
2179 /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2180 writew(~(TRANSMIT_DONE
| TCQ_NOT_EMPTY
), iadev
->seg_reg
+SEG_MASK_REG
);
2181 writew(TRANSMIT_DONE
, iadev
->seg_reg
+SEG_INTR_STATUS_REG
);
2182 iadev
->tx_pkt_cnt
= 0;
2183 iadev
->rate_limit
= iadev
->LineRate
/ 3;
2188 static void ia_int(int irq
, void *dev_id
, struct pt_regs
*regs
)
2190 struct atm_dev
*dev
;
2192 unsigned int status
;
2195 iadev
= INPH_IA_DEV(dev
);
2196 while( (status
= readl(iadev
->reg
+IPHASE5575_BUS_STATUS_REG
) & 0x7f))
2198 IF_EVENT(printk("ia_int: status = 0x%x\n", status
);)
2199 if (status
& STAT_REASSINT
)
2202 IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status
);)
2205 if (status
& STAT_DLERINT
)
2207 /* Clear this bit by writing a 1 to it. */
2208 *(u_int
*)(iadev
->reg
+IPHASE5575_BUS_STATUS_REG
) = STAT_DLERINT
;
2211 if (status
& STAT_SEGINT
)
2214 IF_EVENT(printk("IA: tx_intr \n");)
2217 if (status
& STAT_DLETINT
)
2219 *(u_int
*)(iadev
->reg
+IPHASE5575_BUS_STATUS_REG
) = STAT_DLETINT
;
2222 if (status
& (STAT_FEINT
| STAT_ERRINT
| STAT_MARKINT
))
2224 if (status
& STAT_FEINT
)
2225 IaFrontEndIntr(iadev
);
2232 /*----------------------------- entries --------------------------------*/
2233 static int get_esi(struct atm_dev
*dev
)
2240 iadev
= INPH_IA_DEV(dev
);
2241 mac1
= cpu_to_be32(le32_to_cpu(readl(
2242 iadev
->reg
+IPHASE5575_MAC1
)));
2243 mac2
= cpu_to_be16(le16_to_cpu(readl(iadev
->reg
+IPHASE5575_MAC2
)));
2244 IF_INIT(printk("ESI: 0x%08x%04x\n", mac1
, mac2
);)
2245 for (i
=0; i
<MAC1_LEN
; i
++)
2246 dev
->esi
[i
] = mac1
>>(8*(MAC1_LEN
-1-i
));
2248 for (i
=0; i
<MAC2_LEN
; i
++)
2249 dev
->esi
[i
+MAC1_LEN
] = mac2
>>(8*(MAC2_LEN
- 1 -i
));
2253 static int reset_sar(struct atm_dev
*dev
)
2257 unsigned int pci
[64];
2259 iadev
= INPH_IA_DEV(dev
);
2261 if ((error
= pci_read_config_dword(iadev
->pci
,
2262 i
*4, &pci
[i
])) != PCIBIOS_SUCCESSFUL
)
2264 writel(0, iadev
->reg
+IPHASE5575_EXT_RESET
);
2266 if ((error
= pci_write_config_dword(iadev
->pci
,
2267 i
*4, pci
[i
])) != PCIBIOS_SUCCESSFUL
)
2274 #if LINUX_VERSION_CODE >= 0x20312
2275 static int __init
ia_init(struct atm_dev
*dev
)
2277 __initfunc(static int ia_init(struct atm_dev
*dev
))
2281 unsigned long real_base
, base
;
2282 unsigned short command
;
2283 unsigned char revision
;
2286 /* The device has been identified and registered. Now we read
2287 necessary configuration info like memory base address,
2288 interrupt number etc */
2290 IF_INIT(printk(">ia_init\n");)
2291 dev
->ci_range
.vpi_bits
= 0;
2292 dev
->ci_range
.vci_bits
= NR_VCI_LD
;
2294 iadev
= INPH_IA_DEV(dev
);
2295 real_base
= pci_resource_start (iadev
->pci
, 0);
2296 iadev
->irq
= iadev
->pci
->irq
;
2298 if ((error
= pci_read_config_word(iadev
->pci
, PCI_COMMAND
,&command
))
2299 || (error
= pci_read_config_byte(iadev
->pci
,
2300 PCI_REVISION_ID
,&revision
)))
2302 printk(KERN_ERR DEV_LABEL
"(itf %d): init error 0x%x\n",
2306 IF_INIT(printk(DEV_LABEL
"(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2307 dev
->number
, revision
, real_base
, iadev
->irq
);)
2309 /* find mapping size of board */
2311 iadev
->pci_map_size
= pci_resource_len(iadev
->pci
, 0);
2313 if (iadev
->pci_map_size
== 0x100000){
2314 iadev
->num_vc
= 4096;
2315 dev
->ci_range
.vci_bits
= NR_VCI_4K_LD
;
2318 else if (iadev
->pci_map_size
== 0x40000) {
2319 iadev
->num_vc
= 1024;
2323 printk("Unknown pci_map_size = 0x%x\n", iadev
->pci_map_size
);
2326 IF_INIT(printk (DEV_LABEL
"map size: %i\n", iadev
->pci_map_size
);)
2328 /* enable bus mastering */
2329 pci_set_master(iadev
->pci
);
2332 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2336 /* mapping the physical address to a virtual address in address space */
2337 base
=(unsigned long)ioremap((unsigned long)real_base
,iadev
->pci_map_size
); /* ioremap is not resolved ??? */
2341 printk(DEV_LABEL
" (itf %d): can't set up page mapping\n",
2345 IF_INIT(printk(DEV_LABEL
" (itf %d): rev.%d,base=0x%lx,irq=%d\n",
2346 dev
->number
, revision
, base
, iadev
->irq
);)
2348 /* filling the iphase dev structure */
2349 iadev
->mem
= iadev
->pci_map_size
/2;
2350 iadev
->base_diff
= real_base
- base
;
2351 iadev
->real_base
= real_base
;
2354 /* Bus Interface Control Registers */
2355 iadev
->reg
= (u32
*) (base
+ REG_BASE
);
2356 /* Segmentation Control Registers */
2357 iadev
->seg_reg
= (u32
*) (base
+ SEG_BASE
);
2358 /* Reassembly Control Registers */
2359 iadev
->reass_reg
= (u32
*) (base
+ REASS_BASE
);
2360 /* Front end/ DMA control registers */
2361 iadev
->phy
= (u32
*) (base
+ PHY_BASE
);
2362 iadev
->dma
= (u32
*) (base
+ PHY_BASE
);
2363 /* RAM - Segmentation RAm and Reassembly RAM */
2364 iadev
->ram
= (u32
*) (base
+ ACTUAL_RAM_BASE
);
2365 iadev
->seg_ram
= (base
+ ACTUAL_SEG_RAM_BASE
);
2366 iadev
->reass_ram
= (base
+ ACTUAL_REASS_RAM_BASE
);
2368 /* lets print out the above */
2369 IF_INIT(printk("Base addrs: %08x %08x %08x \n %08x %08x %08x %08x\n",
2370 (u32
)iadev
->reg
,(u32
)iadev
->seg_reg
,(u32
)iadev
->reass_reg
,
2371 (u32
)iadev
->phy
, (u32
)iadev
->ram
, (u32
)iadev
->seg_ram
,
2372 (u32
)iadev
->reass_ram
);)
2374 /* lets try reading the MAC address */
2375 error
= get_esi(dev
);
2376 if (error
) return error
;
2378 for (i
=0; i
< ESI_LEN
; i
++)
2379 printk("%s%02X",i
? "-" : "",dev
->esi
[i
]);
2383 if (reset_sar(dev
)) {
2384 printk("IA: reset SAR fail, please try again\n");
2390 static void ia_update_stats(IADEV
*iadev
) {
2391 if (!iadev
->carrier_detect
)
2393 iadev
->rx_cell_cnt
+= readw(iadev
->reass_reg
+CELL_CTR0
)&0xffff;
2394 iadev
->rx_cell_cnt
+= (readw(iadev
->reass_reg
+CELL_CTR1
) & 0xffff) << 16;
2395 iadev
->drop_rxpkt
+= readw(iadev
->reass_reg
+ DRP_PKT_CNTR
) & 0xffff;
2396 iadev
->drop_rxcell
+= readw(iadev
->reass_reg
+ ERR_CNTR
) & 0xffff;
2397 iadev
->tx_cell_cnt
+= readw(iadev
->seg_reg
+ CELL_CTR_LO_AUTO
)&0xffff;
2398 iadev
->tx_cell_cnt
+= (readw(iadev
->seg_reg
+CELL_CTR_HIGH_AUTO
)&0xffff)<<16;
2402 static void ia_led_timer(unsigned long arg
) {
2403 unsigned long flags
;
2404 static u_char blinking
[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2406 static u32 ctrl_reg
;
2407 for (i
= 0; i
< iadev_count
; i
++) {
2409 ctrl_reg
= readl(ia_dev
[i
]->reg
+IPHASE5575_BUS_CONTROL_REG
);
2410 if (blinking
[i
] == 0) {
2412 ctrl_reg
&= (~CTRL_LED
);
2413 writel(ctrl_reg
, ia_dev
[i
]->reg
+IPHASE5575_BUS_CONTROL_REG
);
2414 ia_update_stats(ia_dev
[i
]);
2418 ctrl_reg
|= CTRL_LED
;
2419 writel(ctrl_reg
, ia_dev
[i
]->reg
+IPHASE5575_BUS_CONTROL_REG
);
2420 spin_lock_irqsave(&ia_dev
[i
]->tx_lock
, flags
);
2421 if (ia_dev
[i
]->close_pending
)
2422 wake_up(&ia_dev
[i
]->close_wait
);
2423 ia_tx_poll(ia_dev
[i
]);
2424 spin_unlock_irqrestore(&ia_dev
[i
]->tx_lock
, flags
);
2428 mod_timer(&ia_timer
, jiffies
+ HZ
/ 4);
2432 static void ia_phy_put(struct atm_dev
*dev
, unsigned char value
,
2435 writel(value
, INPH_IA_DEV(dev
)->phy
+addr
);
2438 static unsigned char ia_phy_get(struct atm_dev
*dev
, unsigned long addr
)
2440 return readl(INPH_IA_DEV(dev
)->phy
+addr
);
2443 #if LINUX_VERSION_CODE >= 0x20312
2444 static int __init
ia_start(struct atm_dev
*dev
)
2446 __initfunc(static int ia_start(struct atm_dev
*dev
))
2453 IF_EVENT(printk(">ia_start\n");)
2454 iadev
= INPH_IA_DEV(dev
);
2455 if (request_irq(iadev
->irq
, &ia_int
, SA_SHIRQ
, DEV_LABEL
, dev
)) {
2456 printk(KERN_ERR DEV_LABEL
"(itf %d): IRQ%d is already in use\n",
2457 dev
->number
, iadev
->irq
);
2460 /* @@@ should release IRQ on error */
2461 /* enabling memory + master */
2462 if ((error
= pci_write_config_word(iadev
->pci
,
2464 PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
)))
2466 printk(KERN_ERR DEV_LABEL
"(itf %d): can't enable memory+"
2467 "master (0x%x)\n",dev
->number
, error
);
2468 free_irq(iadev
->irq
, dev
);
2473 /* Maybe we should reset the front end, initialize Bus Interface Control
2474 Registers and see. */
2476 IF_INIT(printk("Bus ctrl reg: %08x\n",
2477 readl(iadev
->reg
+IPHASE5575_BUS_CONTROL_REG
));)
2478 ctrl_reg
= readl(iadev
->reg
+IPHASE5575_BUS_CONTROL_REG
);
2479 ctrl_reg
= (ctrl_reg
& (CTRL_LED
| CTRL_FE_RST
))
2487 | CTRL_DLETMASK
/* shud be removed l8r */
2494 writel(ctrl_reg
, iadev
->reg
+IPHASE5575_BUS_CONTROL_REG
);
2496 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2497 readl(iadev
->reg
+IPHASE5575_BUS_CONTROL_REG
));
2498 printk("Bus status reg after init: %08x\n",
2499 readl(iadev
->reg
+IPHASE5575_BUS_STATUS_REG
));)
2502 error
= tx_init(dev
);
2504 free_irq(iadev
->irq
, dev
);
2507 error
= rx_init(dev
);
2509 free_irq(iadev
->irq
, dev
);
2513 ctrl_reg
= readl(iadev
->reg
+IPHASE5575_BUS_CONTROL_REG
);
2514 writel(ctrl_reg
| CTRL_FE_RST
, iadev
->reg
+IPHASE5575_BUS_CONTROL_REG
);
2515 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2516 readl(iadev
->reg
+IPHASE5575_BUS_CONTROL_REG
));)
2517 phy
= 0; /* resolve compiler complaint */
2519 if ((phy
=ia_phy_get(dev
,0)) == 0x30)
2520 printk("IA: pm5346,rev.%d\n",phy
&0x0f);
2522 printk("IA: utopia,rev.%0x\n",phy
);)
2524 if (iadev
->phy_type
& FE_25MBIT_PHY
) {
2525 ia_mb25_init(iadev
);
2528 if (iadev
->phy_type
& (FE_DS3_PHY
| FE_E3_PHY
)) {
2529 ia_suni_pm7345_init(iadev
);
2533 error
= suni_init(dev
);
2535 free_irq(iadev
->irq
, dev
);
2539 /* Enable interrupt on loss of signal SUNI_RSOP_CIE 0x10
2540 SUNI_RSOP_CIE_LOSE - 0x04
2542 ia_phy_put(dev
, ia_phy_get(dev
,0x10) | 0x04, 0x10);
2544 error
= dev
->phy
->start(dev
);
2546 free_irq(iadev
->irq
, dev
);
2550 /* Get iadev->carrier_detect status */
2551 IaFrontEndIntr(iadev
);
2555 static void ia_close(struct atm_vcc
*vcc
)
2559 struct ia_vcc
*ia_vcc
;
2560 struct sk_buff
*skb
= NULL
;
2561 struct sk_buff_head tmp_tx_backlog
, tmp_vcc_backlog
;
2562 unsigned long closetime
, flags
;
2565 iadev
= INPH_IA_DEV(vcc
->dev
);
2566 ia_vcc
= INPH_IA_VCC(vcc
);
2567 if (!ia_vcc
) return;
2569 IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
2570 ia_vcc
->vc_desc_cnt
,vcc
->vci
);)
2571 clear_bit(ATM_VF_READY
,&vcc
->flags
);
2572 skb_queue_head_init (&tmp_tx_backlog
);
2573 skb_queue_head_init (&tmp_vcc_backlog
);
2574 if (vcc
->qos
.txtp
.traffic_class
!= ATM_NONE
) {
2575 iadev
->close_pending
++;
2576 sleep_on_timeout(&iadev
->timeout_wait
, 50);
2577 spin_lock_irqsave(&iadev
->tx_lock
, flags
);
2578 while((skb
= skb_dequeue(&iadev
->tx_backlog
))) {
2579 if (ATM_SKB(skb
)->vcc
== vcc
){
2580 if (vcc
->pop
) vcc
->pop(vcc
, skb
);
2581 else dev_kfree_skb_any(skb
);
2584 skb_queue_tail(&tmp_tx_backlog
, skb
);
2586 while((skb
= skb_dequeue(&tmp_tx_backlog
)))
2587 skb_queue_tail(&iadev
->tx_backlog
, skb
);
2588 IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc
->vc_desc_cnt
);)
2589 closetime
= jiffies
;
2590 ctimeout
= 300000 / ia_vcc
->pcr
;
2593 while (ia_vcc
->vc_desc_cnt
> 0){
2594 if ((jiffies
- closetime
) >= ctimeout
)
2596 spin_unlock_irqrestore(&iadev
->tx_lock
, flags
);
2597 sleep_on(&iadev
->close_wait
);
2598 spin_lock_irqsave(&iadev
->tx_lock
, flags
);
2600 iadev
->close_pending
--;
2601 iadev
->testTable
[vcc
->vci
]->lastTime
= 0;
2602 iadev
->testTable
[vcc
->vci
]->fract
= 0;
2603 iadev
->testTable
[vcc
->vci
]->vc_status
= VC_UBR
;
2604 if (vcc
->qos
.txtp
.traffic_class
== ATM_ABR
) {
2605 if (vcc
->qos
.txtp
.min_pcr
> 0)
2606 iadev
->sum_mcr
-= vcc
->qos
.txtp
.min_pcr
;
2608 if (vcc
->qos
.txtp
.traffic_class
== ATM_CBR
) {
2609 ia_vcc
= INPH_IA_VCC(vcc
);
2610 iadev
->sum_mcr
-= ia_vcc
->NumCbrEntry
*iadev
->Granularity
;
2611 ia_cbrVc_close (vcc
);
2613 spin_unlock_irqrestore(&iadev
->tx_lock
, flags
);
2616 if (vcc
->qos
.rxtp
.traffic_class
!= ATM_NONE
) {
2617 // reset reass table
2618 vc_table
= (u16
*)(iadev
->reass_ram
+REASS_TABLE
*iadev
->memSize
);
2619 vc_table
+= vcc
->vci
;
2620 *vc_table
= NO_AAL5_PKT
;
2622 vc_table
= (u16
*)(iadev
->reass_ram
+RX_VC_TABLE
*iadev
->memSize
);
2623 vc_table
+= vcc
->vci
;
2624 *vc_table
= (vcc
->vci
<< 6) | 15;
2625 if (vcc
->qos
.rxtp
.traffic_class
== ATM_ABR
) {
2626 struct abr_vc_table
*abr_vc_table
= (struct abr_vc_table
*)
2627 (iadev
->reass_ram
+ABR_VC_TABLE
*iadev
->memSize
);
2628 abr_vc_table
+= vcc
->vci
;
2629 abr_vc_table
->rdf
= 0x0003;
2630 abr_vc_table
->air
= 0x5eb1;
2632 // Drain the packets
2633 rx_dle_intr(vcc
->dev
);
2634 iadev
->rx_open
[vcc
->vci
] = 0;
2636 kfree(INPH_IA_VCC(vcc
));
2638 INPH_IA_VCC(vcc
) = NULL
;
2639 clear_bit(ATM_VF_ADDR
,&vcc
->flags
);
2643 static int ia_open(struct atm_vcc
*vcc
, short vpi
, int vci
)
2646 struct ia_vcc
*ia_vcc
;
2648 if (!test_bit(ATM_VF_PARTIAL
,&vcc
->flags
))
2650 IF_EVENT(printk("ia: not partially allocated resources\n");)
2651 INPH_IA_VCC(vcc
) = NULL
;
2653 iadev
= INPH_IA_DEV(vcc
->dev
);
2654 error
= atm_find_ci(vcc
, &vpi
, &vci
);
2657 printk("iadev: atm_find_ci returned error %d\n", error
);
2662 if (vci
!= ATM_VPI_UNSPEC
&& vpi
!= ATM_VCI_UNSPEC
)
2664 IF_EVENT(printk("iphase open: unspec part\n");)
2665 set_bit(ATM_VF_ADDR
,&vcc
->flags
);
2667 if (vcc
->qos
.aal
!= ATM_AAL5
)
2669 IF_EVENT(printk(DEV_LABEL
"(itf %d): open %d.%d\n",
2670 vcc
->dev
->number
, vcc
->vpi
, vcc
->vci
);)
2672 /* Device dependent initialization */
2673 ia_vcc
= kmalloc(sizeof(struct ia_vcc
), GFP_KERNEL
);
2674 if (!ia_vcc
) return -ENOMEM
;
2675 INPH_IA_VCC(vcc
) = ia_vcc
;
2677 if ((error
= open_rx(vcc
)))
2679 IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2684 if ((error
= open_tx(vcc
)))
2686 IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2691 set_bit(ATM_VF_READY
,&vcc
->flags
);
2695 static u8 first
= 1;
2697 ia_timer
.expires
= jiffies
+ 3*HZ
;
2698 add_timer(&ia_timer
);
2703 IF_EVENT(printk("ia open returning\n");)
2707 static int ia_change_qos(struct atm_vcc
*vcc
, struct atm_qos
*qos
, int flags
)
2709 IF_EVENT(printk(">ia_change_qos\n");)
2713 static int ia_ioctl(struct atm_dev
*dev
, unsigned int cmd
, void *arg
)
2719 IF_EVENT(printk(">ia_ioctl\n");)
2720 if (cmd
!= IA_CMD
) {
2721 if (!dev
->phy
->ioctl
) return -EINVAL
;
2722 return dev
->phy
->ioctl(dev
,cmd
,arg
);
2724 if (copy_from_user(&ia_cmds
, arg
, sizeof ia_cmds
)) return -EFAULT
;
2725 board
= ia_cmds
.status
;
2726 if ((board
< 0) || (board
> iadev_count
))
2728 iadev
= ia_dev
[board
];
2729 switch (ia_cmds
.cmd
) {
2732 switch (ia_cmds
.sub_cmd
) {
2734 if (!capable(CAP_NET_ADMIN
)) return -EPERM
;
2735 if (copy_to_user(ia_cmds
.buf
, iadev
, sizeof(IADEV
)))
2739 case MEMDUMP_SEGREG
:
2740 if (!capable(CAP_NET_ADMIN
)) return -EPERM
;
2741 tmps
= (u16
*)ia_cmds
.buf
;
2742 for(i
=0; i
<0x80; i
+=2, tmps
++)
2743 if(put_user(*(u16
*)(iadev
->seg_reg
+i
), tmps
)) return -EFAULT
;
2747 case MEMDUMP_REASSREG
:
2748 if (!capable(CAP_NET_ADMIN
)) return -EPERM
;
2749 tmps
= (u16
*)ia_cmds
.buf
;
2750 for(i
=0; i
<0x80; i
+=2, tmps
++)
2751 if(put_user(*(u16
*)(iadev
->reass_reg
+i
), tmps
)) return -EFAULT
;
2757 ia_regs_t regs_local
;
2758 ffredn_t
*ffL
= ®s_local
.ffredn
;
2759 rfredn_t
*rfL
= ®s_local
.rfredn
;
2761 if (!capable(CAP_NET_ADMIN
)) return -EPERM
;
2762 /* Copy real rfred registers into the local copy */
2763 for (i
=0; i
<(sizeof (rfredn_t
))/4; i
++)
2764 ((u_int
*)rfL
)[i
] = ((u_int
*)iadev
->reass_reg
)[i
] & 0xffff;
2765 /* Copy real ffred registers into the local copy */
2766 for (i
=0; i
<(sizeof (ffredn_t
))/4; i
++)
2767 ((u_int
*)ffL
)[i
] = ((u_int
*)iadev
->seg_reg
)[i
] & 0xffff;
2769 if (copy_to_user(ia_cmds
.buf
, ®s_local
,sizeof(ia_regs_t
)))
2771 printk("Board %d registers dumped\n", board
);
2777 if (!capable(CAP_NET_ADMIN
)) return -EPERM
;
2785 printk("skb = 0x%lx\n", (long)skb_peek(&iadev
->tx_backlog
));
2786 printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev
->tx_return_q
));
2791 struct k_sonet_stats
*stats
;
2792 stats
= &PRIV(_ia_dev
[board
])->sonet_stats
;
2793 printk("section_bip: %d\n", atomic_read(&stats
->section_bip
));
2794 printk("line_bip : %d\n", atomic_read(&stats
->line_bip
));
2795 printk("path_bip : %d\n", atomic_read(&stats
->path_bip
));
2796 printk("line_febe : %d\n", atomic_read(&stats
->line_febe
));
2797 printk("path_febe : %d\n", atomic_read(&stats
->path_febe
));
2798 printk("corr_hcs : %d\n", atomic_read(&stats
->corr_hcs
));
2799 printk("uncorr_hcs : %d\n", atomic_read(&stats
->uncorr_hcs
));
2800 printk("tx_cells : %d\n", atomic_read(&stats
->tx_cells
));
2801 printk("rx_cells : %d\n", atomic_read(&stats
->rx_cells
));
2806 if (!capable(CAP_NET_ADMIN
)) return -EPERM
;
2807 for (i
= 1; i
<= iadev
->num_rx_desc
; i
++)
2808 free_desc(_ia_dev
[board
], i
);
2809 writew( ~(RX_FREEQ_EMPT
| RX_EXCP_RCVD
),
2810 iadev
->reass_reg
+REASS_MASK_REG
);
2817 if (!capable(CAP_NET_ADMIN
)) return -EPERM
;
2818 IaFrontEndIntr(iadev
);
2821 if (!capable(CAP_NET_ADMIN
)) return -EPERM
;
2824 IADebugFlag
= ia_cmds
.maddr
;
2825 printk("New debug option loaded\n");
2841 static int ia_getsockopt(struct atm_vcc
*vcc
, int level
, int optname
,
2842 void *optval
, int optlen
)
2844 IF_EVENT(printk(">ia_getsockopt\n");)
2848 static int ia_setsockopt(struct atm_vcc
*vcc
, int level
, int optname
,
2849 void *optval
, int optlen
)
2851 IF_EVENT(printk(">ia_setsockopt\n");)
2855 static int ia_pkt_tx (struct atm_vcc
*vcc
, struct sk_buff
*skb
) {
2858 struct tx_buf_desc
*buf_desc_ptr
;
2862 int total_len
, pad
, last
;
2863 struct cpcs_trailer
*trailer
;
2864 struct ia_vcc
*iavcc
;
2865 iadev
= INPH_IA_DEV(vcc
->dev
);
2866 iavcc
= INPH_IA_VCC(vcc
);
2867 if (!iavcc
->txing
) {
2868 printk("discard packet on closed VC\n");
2869 if (vcc
->pop
) vcc
->pop(vcc
, skb
);
2870 else dev_kfree_skb_any(skb
);
2873 if (skb
->len
> iadev
->tx_buf_sz
- 8) {
2874 printk("Transmit size over tx buffer size\n");
2878 dev_kfree_skb_any(skb
);
2881 if ((u32
)skb
->data
& 3) {
2882 printk("Misaligned SKB\n");
2886 dev_kfree_skb_any(skb
);
2889 /* Get a descriptor number from our free descriptor queue
2890 We get the descr number from the TCQ now, since I am using
2891 the TCQ as a free buffer queue. Initially TCQ will be
2892 initialized with all the descriptors and is hence, full.
2894 desc
= get_desc (iadev
, iavcc
);
2897 comp_code
= desc
>> 13;
2900 if ((desc
== 0) || (desc
> iadev
->num_tx_desc
))
2902 IF_ERR(printk(DEV_LABEL
"invalid desc for send: %d\n", desc
);)
2903 atomic_inc(&vcc
->stats
->tx
);
2907 dev_kfree_skb_any(skb
);
2908 return 0; /* return SUCCESS */
2913 IF_ERR(printk(DEV_LABEL
"send desc:%d completion code %d error\n",
2917 /* remember the desc and vcc mapping */
2918 iavcc
->vc_desc_cnt
++;
2919 iadev
->desc_tbl
[desc
-1].iavcc
= iavcc
;
2920 iadev
->desc_tbl
[desc
-1].txskb
= skb
;
2921 IA_SKB_STATE(skb
) = 0;
2923 iadev
->ffL
.tcq_rd
+= 2;
2924 if (iadev
->ffL
.tcq_rd
> iadev
->ffL
.tcq_ed
)
2925 iadev
->ffL
.tcq_rd
= iadev
->ffL
.tcq_st
;
2926 writew(iadev
->ffL
.tcq_rd
, iadev
->seg_reg
+TCQ_RD_PTR
);
2928 /* Put the descriptor number in the packet ready queue
2929 and put the updated write pointer in the DLE field
2931 *(u16
*)(iadev
->seg_ram
+iadev
->ffL
.prq_wr
) = desc
;
2933 iadev
->ffL
.prq_wr
+= 2;
2934 if (iadev
->ffL
.prq_wr
> iadev
->ffL
.prq_ed
)
2935 iadev
->ffL
.prq_wr
= iadev
->ffL
.prq_st
;
2937 /* Figure out the exact length of the packet and padding required to
2938 make it aligned on a 48 byte boundary. */
2939 total_len
= skb
->len
+ sizeof(struct cpcs_trailer
);
2940 last
= total_len
- (total_len
/48)*48;
2942 total_len
= pad
+ total_len
;
2943 IF_TX(printk("ia packet len:%d padding:%d\n", total_len
, pad
);)
2945 /* Put the packet in a tx buffer */
2946 if (!iadev
->tx_buf
[desc
-1])
2947 printk("couldn't get free page\n");
2949 IF_TX(printk("Sent: skb = 0x%x skb->data: 0x%x len: %d, desc: %d\n",
2950 (u32
)skb
, (u32
)skb
->data
, skb
->len
, desc
);)
2951 addr
= virt_to_bus(skb
->data
);
2952 trailer
= (struct cpcs_trailer
*)iadev
->tx_buf
[desc
-1];
2953 trailer
->control
= 0;
2955 trailer
->length
= ((skb
->len
& 0xff) << 8) | ((skb
->len
& 0xff00) >> 8);
2956 trailer
->crc32
= 0; /* not needed - dummy bytes */
2958 /* Display the packet */
2959 IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2960 skb
->len
, tcnter
++);
2961 xdump(skb
->data
, skb
->len
, "TX: ");
2964 /* Build the buffer descriptor */
2965 buf_desc_ptr
= (struct tx_buf_desc
*)(iadev
->seg_ram
+TX_DESC_BASE
);
2966 buf_desc_ptr
+= desc
; /* points to the corresponding entry */
2967 buf_desc_ptr
->desc_mode
= AAL5
| EOM_EN
| APP_CRC32
| CMPL_INT
;
2968 writew(TRANSMIT_DONE
, iadev
->seg_reg
+SEG_INTR_STATUS_REG
);
2969 buf_desc_ptr
->vc_index
= vcc
->vci
;
2970 buf_desc_ptr
->bytes
= total_len
;
2972 if (vcc
->qos
.txtp
.traffic_class
== ATM_ABR
)
2973 clear_lockup (vcc
, iadev
);
2975 /* Build the DLE structure */
2976 wr_ptr
= iadev
->tx_dle_q
.write
;
2977 memset((caddr_t
)wr_ptr
, 0, sizeof(struct dle
));
2978 wr_ptr
->sys_pkt_addr
= addr
;
2979 wr_ptr
->local_pkt_addr
= (buf_desc_ptr
->buf_start_hi
<< 16) |
2980 buf_desc_ptr
->buf_start_lo
;
2981 /* wr_ptr->bytes = swap(total_len); didn't seem to affect ?? */
2982 wr_ptr
->bytes
= skb
->len
;
2984 /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
2985 if ((wr_ptr
->bytes
>> 2) == 0xb)
2986 wr_ptr
->bytes
= 0x30;
2988 wr_ptr
->mode
= TX_DLE_PSI
;
2989 wr_ptr
->prq_wr_ptr_data
= 0;
2991 /* end is not to be used for the DLE q */
2992 if (++wr_ptr
== iadev
->tx_dle_q
.end
)
2993 wr_ptr
= iadev
->tx_dle_q
.start
;
2995 /* Build trailer dle */
2996 wr_ptr
->sys_pkt_addr
= virt_to_bus(iadev
->tx_buf
[desc
-1]);
2997 wr_ptr
->local_pkt_addr
= ((buf_desc_ptr
->buf_start_hi
<< 16) |
2998 buf_desc_ptr
->buf_start_lo
) + total_len
- sizeof(struct cpcs_trailer
);
3000 wr_ptr
->bytes
= sizeof(struct cpcs_trailer
);
3001 wr_ptr
->mode
= DMA_INT_ENABLE
;
3002 wr_ptr
->prq_wr_ptr_data
= iadev
->ffL
.prq_wr
;
3004 /* end is not to be used for the DLE q */
3005 if (++wr_ptr
== iadev
->tx_dle_q
.end
)
3006 wr_ptr
= iadev
->tx_dle_q
.start
;
3008 iadev
->tx_dle_q
.write
= wr_ptr
;
3009 ATM_DESC(skb
) = vcc
->vci
;
3010 skb_queue_tail(&iadev
->tx_dma_q
, skb
);
3012 atomic_inc(&vcc
->stats
->tx
);
3013 iadev
->tx_pkt_cnt
++;
3014 /* Increment transaction counter */
3015 writel(2, iadev
->dma
+IPHASE5575_TX_COUNTER
);
3018 /* add flow control logic */
3019 if (atomic_read(&vcc
->stats
->tx
) % 20 == 0) {
3020 if (iavcc
->vc_desc_cnt
> 10) {
3021 vcc
->tx_quota
= vcc
->tx_quota
* 3 / 4;
3022 printk("Tx1: vcc->tx_quota = %d \n", (u32
)vcc
->tx_quota
);
3023 iavcc
->flow_inc
= -1;
3024 iavcc
->saved_tx_quota
= vcc
->tx_quota
;
3025 } else if ((iavcc
->flow_inc
< 0) && (iavcc
->vc_desc_cnt
< 3)) {
3026 // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3027 printk("Tx2: vcc->tx_quota = %d \n", (u32
)vcc
->tx_quota
);
3028 iavcc
->flow_inc
= 0;
3032 IF_TX(printk("ia send done\n");)
3036 static int ia_send(struct atm_vcc
*vcc
, struct sk_buff
*skb
)
3039 struct ia_vcc
*iavcc
;
3040 unsigned long flags
;
3042 iadev
= INPH_IA_DEV(vcc
->dev
);
3043 iavcc
= INPH_IA_VCC(vcc
);
3044 if ((!skb
)||(skb
->len
>(iadev
->tx_buf_sz
-sizeof(struct cpcs_trailer
))))
3047 printk(KERN_CRIT
"null skb in ia_send\n");
3048 else dev_kfree_skb_any(skb
);
3051 spin_lock_irqsave(&iadev
->tx_lock
, flags
);
3052 if (!test_bit(ATM_VF_READY
,&vcc
->flags
)){
3053 dev_kfree_skb_any(skb
);
3054 spin_unlock_irqrestore(&iadev
->tx_lock
, flags
);
3057 ATM_SKB(skb
)->vcc
= vcc
;
3059 if (skb_peek(&iadev
->tx_backlog
)) {
3060 skb_queue_tail(&iadev
->tx_backlog
, skb
);
3063 if (ia_pkt_tx (vcc
, skb
)) {
3064 skb_queue_tail(&iadev
->tx_backlog
, skb
);
3067 spin_unlock_irqrestore(&iadev
->tx_lock
, flags
);
3072 static int ia_sg_send(struct atm_vcc
*vcc
, unsigned long start
,
3075 IF_EVENT(printk(">ia_sg_send\n");)
3080 static int ia_proc_read(struct atm_dev
*dev
,loff_t
*pos
,char *page
)
3084 IADEV
*iadev
= INPH_IA_DEV(dev
);
3086 if (iadev
->phy_type
== FE_25MBIT_PHY
) {
3087 n
= sprintf(page
, " Board Type : Iphase5525-1KVC-128K\n");
3090 if (iadev
->phy_type
== FE_DS3_PHY
)
3091 n
= sprintf(page
, " Board Type : Iphase-ATM-DS3");
3092 else if (iadev
->phy_type
== FE_E3_PHY
)
3093 n
= sprintf(page
, " Board Type : Iphase-ATM-E3");
3094 else if (iadev
->phy_type
== FE_UTP_OPTION
)
3095 n
= sprintf(page
, " Board Type : Iphase-ATM-UTP155");
3097 n
= sprintf(page
, " Board Type : Iphase-ATM-OC3");
3099 if (iadev
->pci_map_size
== 0x40000)
3100 n
+= sprintf(tmpPtr
, "-1KVC-");
3102 n
+= sprintf(tmpPtr
, "-4KVC-");
3104 if ((iadev
->memType
& MEM_SIZE_MASK
) == MEM_SIZE_1M
)
3105 n
+= sprintf(tmpPtr
, "1M \n");
3106 else if ((iadev
->memType
& MEM_SIZE_MASK
) == MEM_SIZE_512K
)
3107 n
+= sprintf(tmpPtr
, "512K\n");
3109 n
+= sprintf(tmpPtr
, "128K\n");
3113 return sprintf(page
, " Number of Tx Buffer: %u\n"
3114 " Size of Tx Buffer : %u\n"
3115 " Number of Rx Buffer: %u\n"
3116 " Size of Rx Buffer : %u\n"
3117 " Packets Receiverd : %u\n"
3118 " Packets Transmitted: %u\n"
3119 " Cells Received : %u\n"
3120 " Cells Transmitted : %u\n"
3121 " Board Dropped Cells: %u\n"
3122 " Board Dropped Pkts : %u\n",
3123 iadev
->num_tx_desc
, iadev
->tx_buf_sz
,
3124 iadev
->num_rx_desc
, iadev
->rx_buf_sz
,
3125 iadev
->rx_pkt_cnt
, iadev
->tx_pkt_cnt
,
3126 iadev
->rx_cell_cnt
, iadev
->tx_cell_cnt
,
3127 iadev
->drop_rxcell
, iadev
->drop_rxpkt
);
3132 static const struct atmdev_ops ops
= {
3136 getsockopt
: ia_getsockopt
,
3137 setsockopt
: ia_setsockopt
,
3139 sg_send
: ia_sg_send
,
3140 phy_put
: ia_phy_put
,
3141 phy_get
: ia_phy_get
,
3142 change_qos
: ia_change_qos
,
3143 proc_read
: ia_proc_read
3147 #if LINUX_VERSION_CODE >= 0x20312
3148 int __init
ia_detect(void)
3150 __initfunc(int ia_detect(void))
3153 struct atm_dev
*dev
;
3155 unsigned long flags
;
3157 struct pci_dev
*prev_dev
;
3158 if (!pci_present()) {
3159 printk(KERN_ERR DEV_LABEL
" driver but no PCI BIOS ?\n");
3162 iadev
= (IADEV
*)kmalloc(sizeof(IADEV
), GFP_KERNEL
);
3163 if (!iadev
) return -ENOMEM
;
3164 memset((char*)iadev
, 0, sizeof(IADEV
));
3166 while((iadev
->pci
= pci_find_device(PCI_VENDOR_ID_IPHASE
,
3167 PCI_DEVICE_ID_IPHASE_5575
, prev_dev
))) {
3168 IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3169 iadev
->pci
->bus
->number
, PCI_SLOT(iadev
->pci
->devfn
),
3170 PCI_FUNC(iadev
->pci
->devfn
));)
3171 if (pci_enable_device(iadev
->pci
)) break;
3172 dev
= atm_dev_register(DEV_LABEL
, &ops
, -1, NULL
);
3174 IF_INIT(printk(DEV_LABEL
"registered at (itf :%d)\n",
3176 INPH_IA_DEV(dev
) = iadev
;
3177 // TODO: multi_board using ia_boards logic in cleanup_module
3178 ia_dev
[index
] = iadev
;
3179 _ia_dev
[index
] = dev
;
3180 IF_INIT(printk("dev_id = 0x%x iadev->LineRate = %d \n",
3181 (u32
)dev
, iadev
->LineRate
);)
3183 spin_lock_init(&iadev
->misc_lock
);
3184 spin_lock_irqsave(&iadev
->misc_lock
, flags
);
3185 if (ia_init(dev
) || ia_start(dev
)) {
3186 atm_dev_deregister(dev
);
3187 IF_INIT(printk("IA register failed!\n");)
3188 ia_dev
[index
] = NULL
;
3189 _ia_dev
[index
] = NULL
;
3191 spin_unlock_irqrestore(&iadev
->misc_lock
, flags
);
3194 spin_unlock_irqrestore(&iadev
->misc_lock
, flags
);
3195 IF_EVENT(printk("iadev_count = %d\n", iadev_count
);)
3196 prev_dev
= iadev
->pci
;
3197 iadev
->next_board
= ia_boards
;
3199 iadev
= (IADEV
*)kmalloc(
3200 sizeof(IADEV
), GFP_KERNEL
);
3202 memset((char*)iadev
, 0, sizeof(IADEV
));
3212 int init_module(void)
3214 IF_EVENT(printk(">ia init_module\n");)
3216 printk(KERN_ERR DEV_LABEL
": no adapter found\n");
3219 // MOD_INC_USE_COUNT;
3220 ia_timer
.expires
= jiffies
+ 3*HZ
;
3221 add_timer(&ia_timer
);
3227 void cleanup_module(void)
3229 struct atm_dev
*dev
;
3231 unsigned short command
;
3234 IF_EVENT(printk(">ia cleanup_module\n");)
3235 // MOD_DEC_USE_COUNT;
3237 printk("ia: module in use\n");
3238 del_timer(&ia_timer
);
3242 iadev
= INPH_IA_DEV(dev
);
3243 ia_boards
= iadev
->next_board
;
3245 /* disable interrupt of lost signal */
3246 ia_phy_put(dev
, ia_phy_get(dev
,0x10) & ~(0x4), 0x10);
3249 /* De-register device */
3250 atm_dev_deregister(dev
);
3251 IF_EVENT(printk("iav deregistered at (itf:%d)\n", dev
->number
);)
3252 for (i
= 0; i
< iadev
->num_tx_desc
; i
++)
3253 kfree(iadev
->tx_buf
[i
]);
3254 kfree(iadev
->tx_buf
);
3255 /* Disable memory mapping and busmastering */
3256 if (pci_read_config_word(iadev
->pci
,
3257 PCI_COMMAND
, &command
) != 0)
3259 printk("ia: can't read PCI_COMMAND.\n");
3261 command
&= ~(PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
);
3262 if (pci_write_config_word(iadev
->pci
,
3263 PCI_COMMAND
, command
) != 0)
3265 printk("ia: can't write PCI_COMMAND.\n");
3267 free_irq(iadev
->irq
, dev
);
3268 iounmap((void *) iadev
->base
);
3272 /* and voila whatever we tried seems to work. I don't know if it will
3273 fix suni errors though. Really doubt that. */
3274 for (i
= 0; i
<8; i
++) {