1 /* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
7 ForeRunnerHE ATM Adapter driver for ATM on Linux
8 Copyright (C) 1999-2001 Naval Research Laboratory
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 2.1 of the License, or (at your option) any later version.
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Lesser General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public
21 License along with this library; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30 ForeRunnerHE ATM Adapter driver for ATM on Linux
31 Copyright (C) 1999-2001 Naval Research Laboratory
33 Permission to use, copy, modify and distribute this software and its
34 documentation is hereby granted, provided that both the copyright
35 notice and this permission notice appear in all copies of the software,
36 derivative works or modified versions, and any portions thereof, and
37 that both notices appear in supporting documentation.
39 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
40 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
41 RESULTING FROM THE USE OF THIS SOFTWARE.
43 This driver was written using the "Programmer's Reference Manual for
44 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
47 chas williams <chas@cmf.nrl.navy.mil>
48 eric kinzie <ekinzie@cmf.nrl.navy.mil>
51 4096 supported 'connections'
52 group 0 is used for all traffic
53 interrupt queue 0 is used for all interrupts
54 aal0 support (based on work from ulrich.u.muller@nokia.com)
58 #include <linux/module.h>
59 #include <linux/kernel.h>
60 #include <linux/skbuff.h>
61 #include <linux/pci.h>
62 #include <linux/errno.h>
63 #include <linux/types.h>
64 #include <linux/string.h>
65 #include <linux/delay.h>
66 #include <linux/init.h>
68 #include <linux/sched.h>
69 #include <linux/timer.h>
70 #include <linux/interrupt.h>
71 #include <linux/dma-mapping.h>
73 #include <asm/byteorder.h>
74 #include <asm/uaccess.h>
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
81 #undef USE_SCATTERGATHER
82 #undef USE_CHECKSUM_HW /* still confused about this */
84 #undef USE_RBPS_POOL /* if memory is tight try this */
85 #undef USE_RBPL_POOL /* if memory is tight try this */
87 /* #undef CONFIG_ATM_HE_USE_SUNI */
92 #include <linux/atm_he.h>
94 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
97 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
99 #define HPRINTK(fmt,args...) do { } while (0)
100 #endif /* HE_DEBUG */
102 /* version definition */
104 static char *version
= "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
108 static int he_open(struct atm_vcc
*vcc
);
109 static void he_close(struct atm_vcc
*vcc
);
110 static int he_send(struct atm_vcc
*vcc
, struct sk_buff
*skb
);
111 static int he_ioctl(struct atm_dev
*dev
, unsigned int cmd
, void __user
*arg
);
112 static irqreturn_t
he_irq_handler(int irq
, void *dev_id
);
113 static void he_tasklet(unsigned long data
);
114 static int he_proc_read(struct atm_dev
*dev
,loff_t
*pos
,char *page
);
115 static int he_start(struct atm_dev
*dev
);
116 static void he_stop(struct he_dev
*dev
);
117 static void he_phy_put(struct atm_dev
*, unsigned char, unsigned long);
118 static unsigned char he_phy_get(struct atm_dev
*, unsigned long);
120 static u8
read_prom_byte(struct he_dev
*he_dev
, int addr
);
124 static struct he_dev
*he_devs
;
125 static int disable64
;
126 static short nvpibits
= -1;
127 static short nvcibits
= -1;
128 static short rx_skb_reserve
= 16;
129 static int irq_coalesce
= 1;
132 /* Read from EEPROM = 0000 0011b */
133 static unsigned int readtab
[] = {
148 CLK_HIGH
| SI_HIGH
, /* 1 */
150 CLK_HIGH
| SI_HIGH
/* 1 */
153 /* Clock to read from/write to the EEPROM */
154 static unsigned int clocktab
[] = {
174 static struct atmdev_ops he_ops
=
180 .phy_put
= he_phy_put
,
181 .phy_get
= he_phy_get
,
182 .proc_read
= he_proc_read
,
186 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
187 #define he_readl(dev, reg) readl((dev)->membase + (reg))
189 /* section 2.12 connection memory access */
191 static __inline__
void
192 he_writel_internal(struct he_dev
*he_dev
, unsigned val
, unsigned addr
,
195 he_writel(he_dev
, val
, CON_DAT
);
196 (void) he_readl(he_dev
, CON_DAT
); /* flush posted writes */
197 he_writel(he_dev
, flags
| CON_CTL_WRITE
| CON_CTL_ADDR(addr
), CON_CTL
);
198 while (he_readl(he_dev
, CON_CTL
) & CON_CTL_BUSY
);
201 #define he_writel_rcm(dev, val, reg) \
202 he_writel_internal(dev, val, reg, CON_CTL_RCM)
204 #define he_writel_tcm(dev, val, reg) \
205 he_writel_internal(dev, val, reg, CON_CTL_TCM)
207 #define he_writel_mbox(dev, val, reg) \
208 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
211 he_readl_internal(struct he_dev
*he_dev
, unsigned addr
, unsigned flags
)
213 he_writel(he_dev
, flags
| CON_CTL_READ
| CON_CTL_ADDR(addr
), CON_CTL
);
214 while (he_readl(he_dev
, CON_CTL
) & CON_CTL_BUSY
);
215 return he_readl(he_dev
, CON_DAT
);
218 #define he_readl_rcm(dev, reg) \
219 he_readl_internal(dev, reg, CON_CTL_RCM)
221 #define he_readl_tcm(dev, reg) \
222 he_readl_internal(dev, reg, CON_CTL_TCM)
224 #define he_readl_mbox(dev, reg) \
225 he_readl_internal(dev, reg, CON_CTL_MBOX)
228 /* figure 2.2 connection id */
230 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
232 /* 2.5.1 per connection transmit state registers */
234 #define he_writel_tsr0(dev, val, cid) \
235 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
236 #define he_readl_tsr0(dev, cid) \
237 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
239 #define he_writel_tsr1(dev, val, cid) \
240 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
242 #define he_writel_tsr2(dev, val, cid) \
243 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
245 #define he_writel_tsr3(dev, val, cid) \
246 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
248 #define he_writel_tsr4(dev, val, cid) \
249 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
253 * NOTE While the transmit connection is active, bits 23 through 0
254 * of this register must not be written by the host. Byte
255 * enables should be used during normal operation when writing
256 * the most significant byte.
259 #define he_writel_tsr4_upper(dev, val, cid) \
260 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
262 | CON_BYTE_DISABLE_2 \
263 | CON_BYTE_DISABLE_1 \
264 | CON_BYTE_DISABLE_0)
266 #define he_readl_tsr4(dev, cid) \
267 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
269 #define he_writel_tsr5(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
272 #define he_writel_tsr6(dev, val, cid) \
273 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
275 #define he_writel_tsr7(dev, val, cid) \
276 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
279 #define he_writel_tsr8(dev, val, cid) \
280 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
282 #define he_writel_tsr9(dev, val, cid) \
283 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
285 #define he_writel_tsr10(dev, val, cid) \
286 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
288 #define he_writel_tsr11(dev, val, cid) \
289 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
292 #define he_writel_tsr12(dev, val, cid) \
293 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
295 #define he_writel_tsr13(dev, val, cid) \
296 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
299 #define he_writel_tsr14(dev, val, cid) \
300 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
302 #define he_writel_tsr14_upper(dev, val, cid) \
303 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
305 | CON_BYTE_DISABLE_2 \
306 | CON_BYTE_DISABLE_1 \
307 | CON_BYTE_DISABLE_0)
309 /* 2.7.1 per connection receive state registers */
311 #define he_writel_rsr0(dev, val, cid) \
312 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
313 #define he_readl_rsr0(dev, cid) \
314 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
316 #define he_writel_rsr1(dev, val, cid) \
317 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
319 #define he_writel_rsr2(dev, val, cid) \
320 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
322 #define he_writel_rsr3(dev, val, cid) \
323 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
325 #define he_writel_rsr4(dev, val, cid) \
326 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
328 #define he_writel_rsr5(dev, val, cid) \
329 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
331 #define he_writel_rsr6(dev, val, cid) \
332 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
334 #define he_writel_rsr7(dev, val, cid) \
335 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
337 static __inline__
struct atm_vcc
*
338 __find_vcc(struct he_dev
*he_dev
, unsigned cid
)
340 struct hlist_head
*head
;
342 struct hlist_node
*node
;
347 vpi
= cid
>> he_dev
->vcibits
;
348 vci
= cid
& ((1 << he_dev
->vcibits
) - 1);
349 head
= &vcc_hash
[vci
& (VCC_HTABLE_SIZE
-1)];
351 sk_for_each(s
, node
, head
) {
353 if (vcc
->dev
== he_dev
->atm_dev
&&
354 vcc
->vci
== vci
&& vcc
->vpi
== vpi
&&
355 vcc
->qos
.rxtp
.traffic_class
!= ATM_NONE
) {
363 he_init_one(struct pci_dev
*pci_dev
, const struct pci_device_id
*pci_ent
)
365 struct atm_dev
*atm_dev
= NULL
;
366 struct he_dev
*he_dev
= NULL
;
369 printk(KERN_INFO
"he: %s\n", version
);
371 if (pci_enable_device(pci_dev
))
373 if (pci_set_dma_mask(pci_dev
, DMA_32BIT_MASK
) != 0) {
374 printk(KERN_WARNING
"he: no suitable dma available\n");
376 goto init_one_failure
;
379 atm_dev
= atm_dev_register(DEV_LABEL
, &he_ops
, -1, NULL
);
382 goto init_one_failure
;
384 pci_set_drvdata(pci_dev
, atm_dev
);
386 he_dev
= kzalloc(sizeof(struct he_dev
),
390 goto init_one_failure
;
392 he_dev
->pci_dev
= pci_dev
;
393 he_dev
->atm_dev
= atm_dev
;
394 he_dev
->atm_dev
->dev_data
= he_dev
;
395 atm_dev
->dev_data
= he_dev
;
396 he_dev
->number
= atm_dev
->number
;
397 if (he_start(atm_dev
)) {
400 goto init_one_failure
;
404 he_dev
->next
= he_devs
;
410 atm_dev_deregister(atm_dev
);
412 pci_disable_device(pci_dev
);
416 static void __devexit
417 he_remove_one (struct pci_dev
*pci_dev
)
419 struct atm_dev
*atm_dev
;
420 struct he_dev
*he_dev
;
422 atm_dev
= pci_get_drvdata(pci_dev
);
423 he_dev
= HE_DEV(atm_dev
);
425 /* need to remove from he_devs */
428 atm_dev_deregister(atm_dev
);
431 pci_set_drvdata(pci_dev
, NULL
);
432 pci_disable_device(pci_dev
);
437 rate_to_atmf(unsigned rate
) /* cps to atm forum format */
439 #define NONZERO (1 << 14)
447 while (rate
> 0x3ff) {
452 return (NONZERO
| (exp
<< 9) | (rate
& 0x1ff));
455 static void __devinit
456 he_init_rx_lbfp0(struct he_dev
*he_dev
)
458 unsigned i
, lbm_offset
, lbufd_index
, lbuf_addr
, lbuf_count
;
459 unsigned lbufs_per_row
= he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
460 unsigned lbuf_bufsize
= he_dev
->cells_per_lbuf
* ATM_CELL_PAYLOAD
;
461 unsigned row_offset
= he_dev
->r0_startrow
* he_dev
->bytes_per_row
;
464 lbm_offset
= he_readl(he_dev
, RCMLBM_BA
);
466 he_writel(he_dev
, lbufd_index
, RLBF0_H
);
468 for (i
= 0, lbuf_count
= 0; i
< he_dev
->r0_numbuffs
; ++i
) {
470 lbuf_addr
= (row_offset
+ (lbuf_count
* lbuf_bufsize
)) / 32;
472 he_writel_rcm(he_dev
, lbuf_addr
, lbm_offset
);
473 he_writel_rcm(he_dev
, lbufd_index
, lbm_offset
+ 1);
475 if (++lbuf_count
== lbufs_per_row
) {
477 row_offset
+= he_dev
->bytes_per_row
;
482 he_writel(he_dev
, lbufd_index
- 2, RLBF0_T
);
483 he_writel(he_dev
, he_dev
->r0_numbuffs
, RLBF0_C
);
486 static void __devinit
487 he_init_rx_lbfp1(struct he_dev
*he_dev
)
489 unsigned i
, lbm_offset
, lbufd_index
, lbuf_addr
, lbuf_count
;
490 unsigned lbufs_per_row
= he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
491 unsigned lbuf_bufsize
= he_dev
->cells_per_lbuf
* ATM_CELL_PAYLOAD
;
492 unsigned row_offset
= he_dev
->r1_startrow
* he_dev
->bytes_per_row
;
495 lbm_offset
= he_readl(he_dev
, RCMLBM_BA
) + (2 * lbufd_index
);
497 he_writel(he_dev
, lbufd_index
, RLBF1_H
);
499 for (i
= 0, lbuf_count
= 0; i
< he_dev
->r1_numbuffs
; ++i
) {
501 lbuf_addr
= (row_offset
+ (lbuf_count
* lbuf_bufsize
)) / 32;
503 he_writel_rcm(he_dev
, lbuf_addr
, lbm_offset
);
504 he_writel_rcm(he_dev
, lbufd_index
, lbm_offset
+ 1);
506 if (++lbuf_count
== lbufs_per_row
) {
508 row_offset
+= he_dev
->bytes_per_row
;
513 he_writel(he_dev
, lbufd_index
- 2, RLBF1_T
);
514 he_writel(he_dev
, he_dev
->r1_numbuffs
, RLBF1_C
);
517 static void __devinit
518 he_init_tx_lbfp(struct he_dev
*he_dev
)
520 unsigned i
, lbm_offset
, lbufd_index
, lbuf_addr
, lbuf_count
;
521 unsigned lbufs_per_row
= he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
522 unsigned lbuf_bufsize
= he_dev
->cells_per_lbuf
* ATM_CELL_PAYLOAD
;
523 unsigned row_offset
= he_dev
->tx_startrow
* he_dev
->bytes_per_row
;
525 lbufd_index
= he_dev
->r0_numbuffs
+ he_dev
->r1_numbuffs
;
526 lbm_offset
= he_readl(he_dev
, RCMLBM_BA
) + (2 * lbufd_index
);
528 he_writel(he_dev
, lbufd_index
, TLBF_H
);
530 for (i
= 0, lbuf_count
= 0; i
< he_dev
->tx_numbuffs
; ++i
) {
532 lbuf_addr
= (row_offset
+ (lbuf_count
* lbuf_bufsize
)) / 32;
534 he_writel_rcm(he_dev
, lbuf_addr
, lbm_offset
);
535 he_writel_rcm(he_dev
, lbufd_index
, lbm_offset
+ 1);
537 if (++lbuf_count
== lbufs_per_row
) {
539 row_offset
+= he_dev
->bytes_per_row
;
544 he_writel(he_dev
, lbufd_index
- 1, TLBF_T
);
548 he_init_tpdrq(struct he_dev
*he_dev
)
550 he_dev
->tpdrq_base
= pci_alloc_consistent(he_dev
->pci_dev
,
551 CONFIG_TPDRQ_SIZE
* sizeof(struct he_tpdrq
), &he_dev
->tpdrq_phys
);
552 if (he_dev
->tpdrq_base
== NULL
) {
553 hprintk("failed to alloc tpdrq\n");
556 memset(he_dev
->tpdrq_base
, 0,
557 CONFIG_TPDRQ_SIZE
* sizeof(struct he_tpdrq
));
559 he_dev
->tpdrq_tail
= he_dev
->tpdrq_base
;
560 he_dev
->tpdrq_head
= he_dev
->tpdrq_base
;
562 he_writel(he_dev
, he_dev
->tpdrq_phys
, TPDRQ_B_H
);
563 he_writel(he_dev
, 0, TPDRQ_T
);
564 he_writel(he_dev
, CONFIG_TPDRQ_SIZE
- 1, TPDRQ_S
);
569 static void __devinit
570 he_init_cs_block(struct he_dev
*he_dev
)
572 unsigned clock
, rate
, delta
;
575 /* 5.1.7 cs block initialization */
577 for (reg
= 0; reg
< 0x20; ++reg
)
578 he_writel_mbox(he_dev
, 0x0, CS_STTIM0
+ reg
);
580 /* rate grid timer reload values */
582 clock
= he_is622(he_dev
) ? 66667000 : 50000000;
583 rate
= he_dev
->atm_dev
->link_rate
;
584 delta
= rate
/ 16 / 2;
586 for (reg
= 0; reg
< 0x10; ++reg
) {
587 /* 2.4 internal transmit function
589 * we initialize the first row in the rate grid.
590 * values are period (in clock cycles) of timer
592 unsigned period
= clock
/ rate
;
594 he_writel_mbox(he_dev
, period
, CS_TGRLD0
+ reg
);
598 if (he_is622(he_dev
)) {
599 /* table 5.2 (4 cells per lbuf) */
600 he_writel_mbox(he_dev
, 0x000800fa, CS_ERTHR0
);
601 he_writel_mbox(he_dev
, 0x000c33cb, CS_ERTHR1
);
602 he_writel_mbox(he_dev
, 0x0010101b, CS_ERTHR2
);
603 he_writel_mbox(he_dev
, 0x00181dac, CS_ERTHR3
);
604 he_writel_mbox(he_dev
, 0x00280600, CS_ERTHR4
);
606 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
607 he_writel_mbox(he_dev
, 0x023de8b3, CS_ERCTL0
);
608 he_writel_mbox(he_dev
, 0x1801, CS_ERCTL1
);
609 he_writel_mbox(he_dev
, 0x68b3, CS_ERCTL2
);
610 he_writel_mbox(he_dev
, 0x1280, CS_ERSTAT0
);
611 he_writel_mbox(he_dev
, 0x68b3, CS_ERSTAT1
);
612 he_writel_mbox(he_dev
, 0x14585, CS_RTFWR
);
614 he_writel_mbox(he_dev
, 0x4680, CS_RTATR
);
617 he_writel_mbox(he_dev
, 0x00159ece, CS_TFBSET
);
618 he_writel_mbox(he_dev
, 0x68b3, CS_WCRMAX
);
619 he_writel_mbox(he_dev
, 0x5eb3, CS_WCRMIN
);
620 he_writel_mbox(he_dev
, 0xe8b3, CS_WCRINC
);
621 he_writel_mbox(he_dev
, 0xdeb3, CS_WCRDEC
);
622 he_writel_mbox(he_dev
, 0x68b3, CS_WCRCEIL
);
625 he_writel_mbox(he_dev
, 0x5, CS_OTPPER
);
626 he_writel_mbox(he_dev
, 0x14, CS_OTWPER
);
628 /* table 5.1 (4 cells per lbuf) */
629 he_writel_mbox(he_dev
, 0x000400ea, CS_ERTHR0
);
630 he_writel_mbox(he_dev
, 0x00063388, CS_ERTHR1
);
631 he_writel_mbox(he_dev
, 0x00081018, CS_ERTHR2
);
632 he_writel_mbox(he_dev
, 0x000c1dac, CS_ERTHR3
);
633 he_writel_mbox(he_dev
, 0x0014051a, CS_ERTHR4
);
635 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
636 he_writel_mbox(he_dev
, 0x0235e4b1, CS_ERCTL0
);
637 he_writel_mbox(he_dev
, 0x4701, CS_ERCTL1
);
638 he_writel_mbox(he_dev
, 0x64b1, CS_ERCTL2
);
639 he_writel_mbox(he_dev
, 0x1280, CS_ERSTAT0
);
640 he_writel_mbox(he_dev
, 0x64b1, CS_ERSTAT1
);
641 he_writel_mbox(he_dev
, 0xf424, CS_RTFWR
);
643 he_writel_mbox(he_dev
, 0x4680, CS_RTATR
);
646 he_writel_mbox(he_dev
, 0x000563b7, CS_TFBSET
);
647 he_writel_mbox(he_dev
, 0x64b1, CS_WCRMAX
);
648 he_writel_mbox(he_dev
, 0x5ab1, CS_WCRMIN
);
649 he_writel_mbox(he_dev
, 0xe4b1, CS_WCRINC
);
650 he_writel_mbox(he_dev
, 0xdab1, CS_WCRDEC
);
651 he_writel_mbox(he_dev
, 0x64b1, CS_WCRCEIL
);
654 he_writel_mbox(he_dev
, 0x6, CS_OTPPER
);
655 he_writel_mbox(he_dev
, 0x1e, CS_OTWPER
);
658 he_writel_mbox(he_dev
, 0x8, CS_OTTLIM
);
660 for (reg
= 0; reg
< 0x8; ++reg
)
661 he_writel_mbox(he_dev
, 0x0, CS_HGRRT0
+ reg
);
666 he_init_cs_block_rcm(struct he_dev
*he_dev
)
668 unsigned (*rategrid
)[16][16];
669 unsigned rate
, delta
;
672 unsigned rate_atmf
, exp
, man
;
673 unsigned long long rate_cps
;
674 int mult
, buf
, buf_limit
= 4;
676 rategrid
= kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL
);
680 /* initialize rate grid group table */
682 for (reg
= 0x0; reg
< 0xff; ++reg
)
683 he_writel_rcm(he_dev
, 0x0, CONFIG_RCMABR
+ reg
);
685 /* initialize rate controller groups */
687 for (reg
= 0x100; reg
< 0x1ff; ++reg
)
688 he_writel_rcm(he_dev
, 0x0, CONFIG_RCMABR
+ reg
);
690 /* initialize tNrm lookup table */
692 /* the manual makes reference to a routine in a sample driver
693 for proper configuration; fortunately, we only need this
694 in order to support abr connection */
696 /* initialize rate to group table */
698 rate
= he_dev
->atm_dev
->link_rate
;
702 * 2.4 transmit internal functions
704 * we construct a copy of the rate grid used by the scheduler
705 * in order to construct the rate to group table below
708 for (j
= 0; j
< 16; j
++) {
709 (*rategrid
)[0][j
] = rate
;
713 for (i
= 1; i
< 16; i
++)
714 for (j
= 0; j
< 16; j
++)
716 (*rategrid
)[i
][j
] = (*rategrid
)[i
- 1][j
] / 4;
718 (*rategrid
)[i
][j
] = (*rategrid
)[i
- 1][j
] / 2;
721 * 2.4 transmit internal function
723 * this table maps the upper 5 bits of exponent and mantissa
724 * of the atm forum representation of the rate into an index
729 while (rate_atmf
< 0x400) {
730 man
= (rate_atmf
& 0x1f) << 4;
731 exp
= rate_atmf
>> 5;
734 instead of '/ 512', use '>> 9' to prevent a call
735 to divdu3 on x86 platforms
737 rate_cps
= (unsigned long long) (1 << exp
) * (man
+ 512) >> 9;
740 rate_cps
= 10; /* 2.2.1 minimum payload rate is 10 cps */
742 for (i
= 255; i
> 0; i
--)
743 if ((*rategrid
)[i
/16][i
%16] >= rate_cps
)
744 break; /* pick nearest rate instead? */
747 * each table entry is 16 bits: (rate grid index (8 bits)
748 * and a buffer limit (8 bits)
749 * there are two table entries in each 32-bit register
753 buf
= rate_cps
* he_dev
->tx_numbuffs
/
754 (he_dev
->atm_dev
->link_rate
* 2);
756 /* this is pretty, but avoids _divdu3 and is mostly correct */
757 mult
= he_dev
->atm_dev
->link_rate
/ ATM_OC3_PCR
;
758 if (rate_cps
> (272 * mult
))
760 else if (rate_cps
> (204 * mult
))
762 else if (rate_cps
> (136 * mult
))
764 else if (rate_cps
> (68 * mult
))
771 reg
= (reg
<< 16) | ((i
<< 8) | buf
);
773 #define RTGTBL_OFFSET 0x400
776 he_writel_rcm(he_dev
, reg
,
777 CONFIG_RCMABR
+ RTGTBL_OFFSET
+ (rate_atmf
>> 1));
787 he_init_group(struct he_dev
*he_dev
, int group
)
792 /* small buffer pool */
794 he_dev
->rbps_pool
= pci_pool_create("rbps", he_dev
->pci_dev
,
795 CONFIG_RBPS_BUFSIZE
, 8, 0);
796 if (he_dev
->rbps_pool
== NULL
) {
797 hprintk("unable to create rbps pages\n");
800 #else /* !USE_RBPS_POOL */
801 he_dev
->rbps_pages
= pci_alloc_consistent(he_dev
->pci_dev
,
802 CONFIG_RBPS_SIZE
* CONFIG_RBPS_BUFSIZE
, &he_dev
->rbps_pages_phys
);
803 if (he_dev
->rbps_pages
== NULL
) {
804 hprintk("unable to create rbps page pool\n");
807 #endif /* USE_RBPS_POOL */
809 he_dev
->rbps_base
= pci_alloc_consistent(he_dev
->pci_dev
,
810 CONFIG_RBPS_SIZE
* sizeof(struct he_rbp
), &he_dev
->rbps_phys
);
811 if (he_dev
->rbps_base
== NULL
) {
812 hprintk("failed to alloc rbps\n");
815 memset(he_dev
->rbps_base
, 0, CONFIG_RBPS_SIZE
* sizeof(struct he_rbp
));
816 he_dev
->rbps_virt
= kmalloc(CONFIG_RBPS_SIZE
* sizeof(struct he_virt
), GFP_KERNEL
);
818 for (i
= 0; i
< CONFIG_RBPS_SIZE
; ++i
) {
819 dma_addr_t dma_handle
;
823 cpuaddr
= pci_pool_alloc(he_dev
->rbps_pool
, GFP_KERNEL
|GFP_DMA
, &dma_handle
);
827 cpuaddr
= he_dev
->rbps_pages
+ (i
* CONFIG_RBPS_BUFSIZE
);
828 dma_handle
= he_dev
->rbps_pages_phys
+ (i
* CONFIG_RBPS_BUFSIZE
);
831 he_dev
->rbps_virt
[i
].virt
= cpuaddr
;
832 he_dev
->rbps_base
[i
].status
= RBP_LOANED
| RBP_SMALLBUF
| (i
<< RBP_INDEX_OFF
);
833 he_dev
->rbps_base
[i
].phys
= dma_handle
;
836 he_dev
->rbps_tail
= &he_dev
->rbps_base
[CONFIG_RBPS_SIZE
- 1];
838 he_writel(he_dev
, he_dev
->rbps_phys
, G0_RBPS_S
+ (group
* 32));
839 he_writel(he_dev
, RBPS_MASK(he_dev
->rbps_tail
),
840 G0_RBPS_T
+ (group
* 32));
841 he_writel(he_dev
, CONFIG_RBPS_BUFSIZE
/4,
842 G0_RBPS_BS
+ (group
* 32));
844 RBP_THRESH(CONFIG_RBPS_THRESH
) |
845 RBP_QSIZE(CONFIG_RBPS_SIZE
- 1) |
847 G0_RBPS_QI
+ (group
* 32));
848 #else /* !USE_RBPS */
849 he_writel(he_dev
, 0x0, G0_RBPS_S
+ (group
* 32));
850 he_writel(he_dev
, 0x0, G0_RBPS_T
+ (group
* 32));
851 he_writel(he_dev
, 0x0, G0_RBPS_QI
+ (group
* 32));
852 he_writel(he_dev
, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
853 G0_RBPS_BS
+ (group
* 32));
854 #endif /* USE_RBPS */
856 /* large buffer pool */
858 he_dev
->rbpl_pool
= pci_pool_create("rbpl", he_dev
->pci_dev
,
859 CONFIG_RBPL_BUFSIZE
, 8, 0);
860 if (he_dev
->rbpl_pool
== NULL
) {
861 hprintk("unable to create rbpl pool\n");
864 #else /* !USE_RBPL_POOL */
865 he_dev
->rbpl_pages
= (void *) pci_alloc_consistent(he_dev
->pci_dev
,
866 CONFIG_RBPL_SIZE
* CONFIG_RBPL_BUFSIZE
, &he_dev
->rbpl_pages_phys
);
867 if (he_dev
->rbpl_pages
== NULL
) {
868 hprintk("unable to create rbpl pages\n");
871 #endif /* USE_RBPL_POOL */
873 he_dev
->rbpl_base
= pci_alloc_consistent(he_dev
->pci_dev
,
874 CONFIG_RBPL_SIZE
* sizeof(struct he_rbp
), &he_dev
->rbpl_phys
);
875 if (he_dev
->rbpl_base
== NULL
) {
876 hprintk("failed to alloc rbpl\n");
879 memset(he_dev
->rbpl_base
, 0, CONFIG_RBPL_SIZE
* sizeof(struct he_rbp
));
880 he_dev
->rbpl_virt
= kmalloc(CONFIG_RBPL_SIZE
* sizeof(struct he_virt
), GFP_KERNEL
);
882 for (i
= 0; i
< CONFIG_RBPL_SIZE
; ++i
) {
883 dma_addr_t dma_handle
;
887 cpuaddr
= pci_pool_alloc(he_dev
->rbpl_pool
, GFP_KERNEL
|GFP_DMA
, &dma_handle
);
891 cpuaddr
= he_dev
->rbpl_pages
+ (i
* CONFIG_RBPL_BUFSIZE
);
892 dma_handle
= he_dev
->rbpl_pages_phys
+ (i
* CONFIG_RBPL_BUFSIZE
);
895 he_dev
->rbpl_virt
[i
].virt
= cpuaddr
;
896 he_dev
->rbpl_base
[i
].status
= RBP_LOANED
| (i
<< RBP_INDEX_OFF
);
897 he_dev
->rbpl_base
[i
].phys
= dma_handle
;
899 he_dev
->rbpl_tail
= &he_dev
->rbpl_base
[CONFIG_RBPL_SIZE
- 1];
901 he_writel(he_dev
, he_dev
->rbpl_phys
, G0_RBPL_S
+ (group
* 32));
902 he_writel(he_dev
, RBPL_MASK(he_dev
->rbpl_tail
),
903 G0_RBPL_T
+ (group
* 32));
904 he_writel(he_dev
, CONFIG_RBPL_BUFSIZE
/4,
905 G0_RBPL_BS
+ (group
* 32));
907 RBP_THRESH(CONFIG_RBPL_THRESH
) |
908 RBP_QSIZE(CONFIG_RBPL_SIZE
- 1) |
910 G0_RBPL_QI
+ (group
* 32));
912 /* rx buffer ready queue */
914 he_dev
->rbrq_base
= pci_alloc_consistent(he_dev
->pci_dev
,
915 CONFIG_RBRQ_SIZE
* sizeof(struct he_rbrq
), &he_dev
->rbrq_phys
);
916 if (he_dev
->rbrq_base
== NULL
) {
917 hprintk("failed to allocate rbrq\n");
920 memset(he_dev
->rbrq_base
, 0, CONFIG_RBRQ_SIZE
* sizeof(struct he_rbrq
));
922 he_dev
->rbrq_head
= he_dev
->rbrq_base
;
923 he_writel(he_dev
, he_dev
->rbrq_phys
, G0_RBRQ_ST
+ (group
* 16));
924 he_writel(he_dev
, 0, G0_RBRQ_H
+ (group
* 16));
926 RBRQ_THRESH(CONFIG_RBRQ_THRESH
) | RBRQ_SIZE(CONFIG_RBRQ_SIZE
- 1),
927 G0_RBRQ_Q
+ (group
* 16));
929 hprintk("coalescing interrupts\n");
930 he_writel(he_dev
, RBRQ_TIME(768) | RBRQ_COUNT(7),
931 G0_RBRQ_I
+ (group
* 16));
933 he_writel(he_dev
, RBRQ_TIME(0) | RBRQ_COUNT(1),
934 G0_RBRQ_I
+ (group
* 16));
936 /* tx buffer ready queue */
938 he_dev
->tbrq_base
= pci_alloc_consistent(he_dev
->pci_dev
,
939 CONFIG_TBRQ_SIZE
* sizeof(struct he_tbrq
), &he_dev
->tbrq_phys
);
940 if (he_dev
->tbrq_base
== NULL
) {
941 hprintk("failed to allocate tbrq\n");
944 memset(he_dev
->tbrq_base
, 0, CONFIG_TBRQ_SIZE
* sizeof(struct he_tbrq
));
946 he_dev
->tbrq_head
= he_dev
->tbrq_base
;
948 he_writel(he_dev
, he_dev
->tbrq_phys
, G0_TBRQ_B_T
+ (group
* 16));
949 he_writel(he_dev
, 0, G0_TBRQ_H
+ (group
* 16));
950 he_writel(he_dev
, CONFIG_TBRQ_SIZE
- 1, G0_TBRQ_S
+ (group
* 16));
951 he_writel(he_dev
, CONFIG_TBRQ_THRESH
, G0_TBRQ_THRESH
+ (group
* 16));
957 he_init_irq(struct he_dev
*he_dev
)
961 /* 2.9.3.5 tail offset for each interrupt queue is located after the
962 end of the interrupt queue */
964 he_dev
->irq_base
= pci_alloc_consistent(he_dev
->pci_dev
,
965 (CONFIG_IRQ_SIZE
+1) * sizeof(struct he_irq
), &he_dev
->irq_phys
);
966 if (he_dev
->irq_base
== NULL
) {
967 hprintk("failed to allocate irq\n");
970 he_dev
->irq_tailoffset
= (unsigned *)
971 &he_dev
->irq_base
[CONFIG_IRQ_SIZE
];
972 *he_dev
->irq_tailoffset
= 0;
973 he_dev
->irq_head
= he_dev
->irq_base
;
974 he_dev
->irq_tail
= he_dev
->irq_base
;
976 for (i
= 0; i
< CONFIG_IRQ_SIZE
; ++i
)
977 he_dev
->irq_base
[i
].isw
= ITYPE_INVALID
;
979 he_writel(he_dev
, he_dev
->irq_phys
, IRQ0_BASE
);
981 IRQ_SIZE(CONFIG_IRQ_SIZE
) | IRQ_THRESH(CONFIG_IRQ_THRESH
),
983 he_writel(he_dev
, IRQ_INT_A
| IRQ_TYPE_LINE
, IRQ0_CNTL
);
984 he_writel(he_dev
, 0x0, IRQ0_DATA
);
986 he_writel(he_dev
, 0x0, IRQ1_BASE
);
987 he_writel(he_dev
, 0x0, IRQ1_HEAD
);
988 he_writel(he_dev
, 0x0, IRQ1_CNTL
);
989 he_writel(he_dev
, 0x0, IRQ1_DATA
);
991 he_writel(he_dev
, 0x0, IRQ2_BASE
);
992 he_writel(he_dev
, 0x0, IRQ2_HEAD
);
993 he_writel(he_dev
, 0x0, IRQ2_CNTL
);
994 he_writel(he_dev
, 0x0, IRQ2_DATA
);
996 he_writel(he_dev
, 0x0, IRQ3_BASE
);
997 he_writel(he_dev
, 0x0, IRQ3_HEAD
);
998 he_writel(he_dev
, 0x0, IRQ3_CNTL
);
999 he_writel(he_dev
, 0x0, IRQ3_DATA
);
1001 /* 2.9.3.2 interrupt queue mapping registers */
1003 he_writel(he_dev
, 0x0, GRP_10_MAP
);
1004 he_writel(he_dev
, 0x0, GRP_32_MAP
);
1005 he_writel(he_dev
, 0x0, GRP_54_MAP
);
1006 he_writel(he_dev
, 0x0, GRP_76_MAP
);
1008 if (request_irq(he_dev
->pci_dev
->irq
, he_irq_handler
, IRQF_DISABLED
|IRQF_SHARED
, DEV_LABEL
, he_dev
)) {
1009 hprintk("irq %d already in use\n", he_dev
->pci_dev
->irq
);
1013 he_dev
->irq
= he_dev
->pci_dev
->irq
;
1018 static int __devinit
1019 he_start(struct atm_dev
*dev
)
1021 struct he_dev
*he_dev
;
1022 struct pci_dev
*pci_dev
;
1023 unsigned long membase
;
1026 u32 gen_cntl_0
, host_cntl
, lb_swap
;
1027 u8 cache_size
, timer
;
1030 unsigned int status
, reg
;
1033 he_dev
= HE_DEV(dev
);
1034 pci_dev
= he_dev
->pci_dev
;
1036 membase
= pci_resource_start(pci_dev
, 0);
1037 HPRINTK("membase = 0x%lx irq = %d.\n", membase
, pci_dev
->irq
);
1040 * pci bus controller initialization
1043 /* 4.3 pci bus controller-specific initialization */
1044 if (pci_read_config_dword(pci_dev
, GEN_CNTL_0
, &gen_cntl_0
) != 0) {
1045 hprintk("can't read GEN_CNTL_0\n");
1048 gen_cntl_0
|= (MRL_ENB
| MRM_ENB
| IGNORE_TIMEOUT
);
1049 if (pci_write_config_dword(pci_dev
, GEN_CNTL_0
, gen_cntl_0
) != 0) {
1050 hprintk("can't write GEN_CNTL_0.\n");
1054 if (pci_read_config_word(pci_dev
, PCI_COMMAND
, &command
) != 0) {
1055 hprintk("can't read PCI_COMMAND.\n");
1059 command
|= (PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
| PCI_COMMAND_INVALIDATE
);
1060 if (pci_write_config_word(pci_dev
, PCI_COMMAND
, command
) != 0) {
1061 hprintk("can't enable memory.\n");
1065 if (pci_read_config_byte(pci_dev
, PCI_CACHE_LINE_SIZE
, &cache_size
)) {
1066 hprintk("can't read cache line size?\n");
1070 if (cache_size
< 16) {
1072 if (pci_write_config_byte(pci_dev
, PCI_CACHE_LINE_SIZE
, cache_size
))
1073 hprintk("can't set cache line size to %d\n", cache_size
);
1076 if (pci_read_config_byte(pci_dev
, PCI_LATENCY_TIMER
, &timer
)) {
1077 hprintk("can't read latency timer?\n");
1083 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1085 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1086 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1089 #define LAT_TIMER 209
1090 if (timer
< LAT_TIMER
) {
1091 HPRINTK("latency timer was %d, setting to %d\n", timer
, LAT_TIMER
);
1093 if (pci_write_config_byte(pci_dev
, PCI_LATENCY_TIMER
, timer
))
1094 hprintk("can't set latency timer to %d\n", timer
);
1097 if (!(he_dev
->membase
= ioremap(membase
, HE_REGMAP_SIZE
))) {
1098 hprintk("can't set up page mapping\n");
1102 /* 4.4 card reset */
1103 he_writel(he_dev
, 0x0, RESET_CNTL
);
1104 he_writel(he_dev
, 0xff, RESET_CNTL
);
1106 udelay(16*1000); /* 16 ms */
1107 status
= he_readl(he_dev
, RESET_CNTL
);
1108 if ((status
& BOARD_RST_STATUS
) == 0) {
1109 hprintk("reset failed\n");
1113 /* 4.5 set bus width */
1114 host_cntl
= he_readl(he_dev
, HOST_CNTL
);
1115 if (host_cntl
& PCI_BUS_SIZE64
)
1116 gen_cntl_0
|= ENBL_64
;
1118 gen_cntl_0
&= ~ENBL_64
;
1120 if (disable64
== 1) {
1121 hprintk("disabling 64-bit pci bus transfers\n");
1122 gen_cntl_0
&= ~ENBL_64
;
1125 if (gen_cntl_0
& ENBL_64
)
1126 hprintk("64-bit transfers enabled\n");
1128 pci_write_config_dword(pci_dev
, GEN_CNTL_0
, gen_cntl_0
);
1130 /* 4.7 read prom contents */
1131 for (i
= 0; i
< PROD_ID_LEN
; ++i
)
1132 he_dev
->prod_id
[i
] = read_prom_byte(he_dev
, PROD_ID
+ i
);
1134 he_dev
->media
= read_prom_byte(he_dev
, MEDIA
);
1136 for (i
= 0; i
< 6; ++i
)
1137 dev
->esi
[i
] = read_prom_byte(he_dev
, MAC_ADDR
+ i
);
1139 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1141 he_dev
->media
& 0x40 ? "SM" : "MM",
1148 he_dev
->atm_dev
->link_rate
= he_is622(he_dev
) ?
1149 ATM_OC12_PCR
: ATM_OC3_PCR
;
1151 /* 4.6 set host endianess */
1152 lb_swap
= he_readl(he_dev
, LB_SWAP
);
1153 if (he_is622(he_dev
))
1154 lb_swap
&= ~XFER_SIZE
; /* 4 cells */
1156 lb_swap
|= XFER_SIZE
; /* 8 cells */
1158 lb_swap
|= DESC_WR_SWAP
| INTR_SWAP
| BIG_ENDIAN_HOST
;
1160 lb_swap
&= ~(DESC_WR_SWAP
| INTR_SWAP
| BIG_ENDIAN_HOST
|
1161 DATA_WR_SWAP
| DATA_RD_SWAP
| DESC_RD_SWAP
);
1162 #endif /* __BIG_ENDIAN */
1163 he_writel(he_dev
, lb_swap
, LB_SWAP
);
1165 /* 4.8 sdram controller initialization */
1166 he_writel(he_dev
, he_is622(he_dev
) ? LB_64_ENB
: 0x0, SDRAM_CTL
);
1168 /* 4.9 initialize rnum value */
1169 lb_swap
|= SWAP_RNUM_MAX(0xf);
1170 he_writel(he_dev
, lb_swap
, LB_SWAP
);
1172 /* 4.10 initialize the interrupt queues */
1173 if ((err
= he_init_irq(he_dev
)) != 0)
1177 tasklet_init(&he_dev
->tasklet
, he_tasklet
, (unsigned long) he_dev
);
1179 spin_lock_init(&he_dev
->global_lock
);
1181 /* 4.11 enable pci bus controller state machines */
1182 host_cntl
|= (OUTFF_ENB
| CMDFF_ENB
|
1183 QUICK_RD_RETRY
| QUICK_WR_RETRY
| PERR_INT_ENB
);
1184 he_writel(he_dev
, host_cntl
, HOST_CNTL
);
1186 gen_cntl_0
|= INT_PROC_ENBL
|INIT_ENB
;
1187 pci_write_config_dword(pci_dev
, GEN_CNTL_0
, gen_cntl_0
);
1190 * atm network controller initialization
1193 /* 5.1.1 generic configuration state */
1196 * local (cell) buffer memory map
1200 * 0 ____________1023 bytes 0 _______________________2047 bytes
1202 * | utility | | rx0 | |
1203 * 5|____________| 255|___________________| u |
1206 * | rx0 | row | tx | l |
1208 * | | 767|___________________| t |
1209 * 517|____________| 768| | y |
1210 * row 518| | | rx1 | |
1211 * | | 1023|___________________|___|
1216 * 1535|____________|
1219 * 2047|____________|
1223 /* total 4096 connections */
1224 he_dev
->vcibits
= CONFIG_DEFAULT_VCIBITS
;
1225 he_dev
->vpibits
= CONFIG_DEFAULT_VPIBITS
;
1227 if (nvpibits
!= -1 && nvcibits
!= -1 && nvpibits
+nvcibits
!= HE_MAXCIDBITS
) {
1228 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS
);
1232 if (nvpibits
!= -1) {
1233 he_dev
->vpibits
= nvpibits
;
1234 he_dev
->vcibits
= HE_MAXCIDBITS
- nvpibits
;
1237 if (nvcibits
!= -1) {
1238 he_dev
->vcibits
= nvcibits
;
1239 he_dev
->vpibits
= HE_MAXCIDBITS
- nvcibits
;
1243 if (he_is622(he_dev
)) {
1244 he_dev
->cells_per_row
= 40;
1245 he_dev
->bytes_per_row
= 2048;
1246 he_dev
->r0_numrows
= 256;
1247 he_dev
->tx_numrows
= 512;
1248 he_dev
->r1_numrows
= 256;
1249 he_dev
->r0_startrow
= 0;
1250 he_dev
->tx_startrow
= 256;
1251 he_dev
->r1_startrow
= 768;
1253 he_dev
->cells_per_row
= 20;
1254 he_dev
->bytes_per_row
= 1024;
1255 he_dev
->r0_numrows
= 512;
1256 he_dev
->tx_numrows
= 1018;
1257 he_dev
->r1_numrows
= 512;
1258 he_dev
->r0_startrow
= 6;
1259 he_dev
->tx_startrow
= 518;
1260 he_dev
->r1_startrow
= 1536;
1263 he_dev
->cells_per_lbuf
= 4;
1264 he_dev
->buffer_limit
= 4;
1265 he_dev
->r0_numbuffs
= he_dev
->r0_numrows
*
1266 he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
1267 if (he_dev
->r0_numbuffs
> 2560)
1268 he_dev
->r0_numbuffs
= 2560;
1270 he_dev
->r1_numbuffs
= he_dev
->r1_numrows
*
1271 he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
1272 if (he_dev
->r1_numbuffs
> 2560)
1273 he_dev
->r1_numbuffs
= 2560;
1275 he_dev
->tx_numbuffs
= he_dev
->tx_numrows
*
1276 he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
1277 if (he_dev
->tx_numbuffs
> 5120)
1278 he_dev
->tx_numbuffs
= 5120;
1280 /* 5.1.2 configure hardware dependent registers */
1283 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1284 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1285 (he_is622(he_dev
) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1286 (he_is622(he_dev
) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1289 he_writel(he_dev
, BANK_ON
|
1290 (he_is622(he_dev
) ? (REF_RATE(0x384) | WIDE_DATA
) : REF_RATE(0x150)),
1294 (he_is622(he_dev
) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1295 RM_RW_WAIT(1), RCMCONFIG
);
1297 (he_is622(he_dev
) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1298 TM_RW_WAIT(1), TCMCONFIG
);
1300 he_writel(he_dev
, he_dev
->cells_per_lbuf
* ATM_CELL_PAYLOAD
, LB_CONFIG
);
1303 (he_is622(he_dev
) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1304 (he_is622(he_dev
) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1305 RX_VALVP(he_dev
->vpibits
) |
1306 RX_VALVC(he_dev
->vcibits
), RC_CONFIG
);
1308 he_writel(he_dev
, DRF_THRESH(0x20) |
1309 (he_is622(he_dev
) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1310 TX_VCI_MASK(he_dev
->vcibits
) |
1311 LBFREE_CNT(he_dev
->tx_numbuffs
), TX_CONFIG
);
1313 he_writel(he_dev
, 0x0, TXAAL5_PROTO
);
1315 he_writel(he_dev
, PHY_INT_ENB
|
1316 (he_is622(he_dev
) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1319 /* 5.1.3 initialize connection memory */
1321 for (i
= 0; i
< TCM_MEM_SIZE
; ++i
)
1322 he_writel_tcm(he_dev
, 0, i
);
1324 for (i
= 0; i
< RCM_MEM_SIZE
; ++i
)
1325 he_writel_rcm(he_dev
, 0, i
);
1328 * transmit connection memory map
1331 * 0x0 ___________________
1337 * 0x8000|___________________|
1340 * 0xc000|___________________|
1343 * 0xe000|___________________|
1345 * 0xf000|___________________|
1347 * 0x10000|___________________|
1350 * |___________________|
1353 * 0x1ffff|___________________|
1358 he_writel(he_dev
, CONFIG_TSRB
, TSRB_BA
);
1359 he_writel(he_dev
, CONFIG_TSRC
, TSRC_BA
);
1360 he_writel(he_dev
, CONFIG_TSRD
, TSRD_BA
);
1361 he_writel(he_dev
, CONFIG_TMABR
, TMABR_BA
);
1362 he_writel(he_dev
, CONFIG_TPDBA
, TPD_BA
);
1366 * receive connection memory map
1368 * 0x0 ___________________
1374 * 0x8000|___________________|
1377 * | LBM | link lists of local
1378 * | tx | buffer memory
1380 * 0xd000|___________________|
1383 * 0xe000|___________________|
1386 * |___________________|
1389 * 0xffff|___________________|
1392 he_writel(he_dev
, 0x08000, RCMLBM_BA
);
1393 he_writel(he_dev
, 0x0e000, RCMRSRB_BA
);
1394 he_writel(he_dev
, 0x0d800, RCMABR_BA
);
1396 /* 5.1.4 initialize local buffer free pools linked lists */
1398 he_init_rx_lbfp0(he_dev
);
1399 he_init_rx_lbfp1(he_dev
);
1401 he_writel(he_dev
, 0x0, RLBC_H
);
1402 he_writel(he_dev
, 0x0, RLBC_T
);
1403 he_writel(he_dev
, 0x0, RLBC_H2
);
1405 he_writel(he_dev
, 512, RXTHRSH
); /* 10% of r0+r1 buffers */
1406 he_writel(he_dev
, 256, LITHRSH
); /* 5% of r0+r1 buffers */
1408 he_init_tx_lbfp(he_dev
);
1410 he_writel(he_dev
, he_is622(he_dev
) ? 0x104780 : 0x800, UBUFF_BA
);
1412 /* 5.1.5 initialize intermediate receive queues */
1414 if (he_is622(he_dev
)) {
1415 he_writel(he_dev
, 0x000f, G0_INMQ_S
);
1416 he_writel(he_dev
, 0x200f, G0_INMQ_L
);
1418 he_writel(he_dev
, 0x001f, G1_INMQ_S
);
1419 he_writel(he_dev
, 0x201f, G1_INMQ_L
);
1421 he_writel(he_dev
, 0x002f, G2_INMQ_S
);
1422 he_writel(he_dev
, 0x202f, G2_INMQ_L
);
1424 he_writel(he_dev
, 0x003f, G3_INMQ_S
);
1425 he_writel(he_dev
, 0x203f, G3_INMQ_L
);
1427 he_writel(he_dev
, 0x004f, G4_INMQ_S
);
1428 he_writel(he_dev
, 0x204f, G4_INMQ_L
);
1430 he_writel(he_dev
, 0x005f, G5_INMQ_S
);
1431 he_writel(he_dev
, 0x205f, G5_INMQ_L
);
1433 he_writel(he_dev
, 0x006f, G6_INMQ_S
);
1434 he_writel(he_dev
, 0x206f, G6_INMQ_L
);
1436 he_writel(he_dev
, 0x007f, G7_INMQ_S
);
1437 he_writel(he_dev
, 0x207f, G7_INMQ_L
);
1439 he_writel(he_dev
, 0x0000, G0_INMQ_S
);
1440 he_writel(he_dev
, 0x0008, G0_INMQ_L
);
1442 he_writel(he_dev
, 0x0001, G1_INMQ_S
);
1443 he_writel(he_dev
, 0x0009, G1_INMQ_L
);
1445 he_writel(he_dev
, 0x0002, G2_INMQ_S
);
1446 he_writel(he_dev
, 0x000a, G2_INMQ_L
);
1448 he_writel(he_dev
, 0x0003, G3_INMQ_S
);
1449 he_writel(he_dev
, 0x000b, G3_INMQ_L
);
1451 he_writel(he_dev
, 0x0004, G4_INMQ_S
);
1452 he_writel(he_dev
, 0x000c, G4_INMQ_L
);
1454 he_writel(he_dev
, 0x0005, G5_INMQ_S
);
1455 he_writel(he_dev
, 0x000d, G5_INMQ_L
);
1457 he_writel(he_dev
, 0x0006, G6_INMQ_S
);
1458 he_writel(he_dev
, 0x000e, G6_INMQ_L
);
1460 he_writel(he_dev
, 0x0007, G7_INMQ_S
);
1461 he_writel(he_dev
, 0x000f, G7_INMQ_L
);
1464 /* 5.1.6 application tunable parameters */
1466 he_writel(he_dev
, 0x0, MCC
);
1467 he_writel(he_dev
, 0x0, OEC
);
1468 he_writel(he_dev
, 0x0, DCC
);
1469 he_writel(he_dev
, 0x0, CEC
);
1471 /* 5.1.7 cs block initialization */
1473 he_init_cs_block(he_dev
);
1475 /* 5.1.8 cs block connection memory initialization */
1477 if (he_init_cs_block_rcm(he_dev
) < 0)
1480 /* 5.1.10 initialize host structures */
1482 he_init_tpdrq(he_dev
);
1485 he_dev
->tpd_pool
= pci_pool_create("tpd", he_dev
->pci_dev
,
1486 sizeof(struct he_tpd
), TPD_ALIGNMENT
, 0);
1487 if (he_dev
->tpd_pool
== NULL
) {
1488 hprintk("unable to create tpd pci_pool\n");
1492 INIT_LIST_HEAD(&he_dev
->outstanding_tpds
);
1494 he_dev
->tpd_base
= (void *) pci_alloc_consistent(he_dev
->pci_dev
,
1495 CONFIG_NUMTPDS
* sizeof(struct he_tpd
), &he_dev
->tpd_base_phys
);
1496 if (!he_dev
->tpd_base
)
1499 for (i
= 0; i
< CONFIG_NUMTPDS
; ++i
) {
1500 he_dev
->tpd_base
[i
].status
= (i
<< TPD_ADDR_SHIFT
);
1501 he_dev
->tpd_base
[i
].inuse
= 0;
1504 he_dev
->tpd_head
= he_dev
->tpd_base
;
1505 he_dev
->tpd_end
= &he_dev
->tpd_base
[CONFIG_NUMTPDS
- 1];
1508 if (he_init_group(he_dev
, 0) != 0)
1511 for (group
= 1; group
< HE_NUM_GROUPS
; ++group
) {
1512 he_writel(he_dev
, 0x0, G0_RBPS_S
+ (group
* 32));
1513 he_writel(he_dev
, 0x0, G0_RBPS_T
+ (group
* 32));
1514 he_writel(he_dev
, 0x0, G0_RBPS_QI
+ (group
* 32));
1515 he_writel(he_dev
, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1516 G0_RBPS_BS
+ (group
* 32));
1518 he_writel(he_dev
, 0x0, G0_RBPL_S
+ (group
* 32));
1519 he_writel(he_dev
, 0x0, G0_RBPL_T
+ (group
* 32));
1520 he_writel(he_dev
, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1521 G0_RBPL_QI
+ (group
* 32));
1522 he_writel(he_dev
, 0x0, G0_RBPL_BS
+ (group
* 32));
1524 he_writel(he_dev
, 0x0, G0_RBRQ_ST
+ (group
* 16));
1525 he_writel(he_dev
, 0x0, G0_RBRQ_H
+ (group
* 16));
1526 he_writel(he_dev
, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1527 G0_RBRQ_Q
+ (group
* 16));
1528 he_writel(he_dev
, 0x0, G0_RBRQ_I
+ (group
* 16));
1530 he_writel(he_dev
, 0x0, G0_TBRQ_B_T
+ (group
* 16));
1531 he_writel(he_dev
, 0x0, G0_TBRQ_H
+ (group
* 16));
1532 he_writel(he_dev
, TBRQ_THRESH(0x1),
1533 G0_TBRQ_THRESH
+ (group
* 16));
1534 he_writel(he_dev
, 0x0, G0_TBRQ_S
+ (group
* 16));
1537 /* host status page */
1539 he_dev
->hsp
= pci_alloc_consistent(he_dev
->pci_dev
,
1540 sizeof(struct he_hsp
), &he_dev
->hsp_phys
);
1541 if (he_dev
->hsp
== NULL
) {
1542 hprintk("failed to allocate host status page\n");
1545 memset(he_dev
->hsp
, 0, sizeof(struct he_hsp
));
1546 he_writel(he_dev
, he_dev
->hsp_phys
, HSP_BA
);
1548 /* initialize framer */
1550 #ifdef CONFIG_ATM_HE_USE_SUNI
1551 suni_init(he_dev
->atm_dev
);
1552 if (he_dev
->atm_dev
->phy
&& he_dev
->atm_dev
->phy
->start
)
1553 he_dev
->atm_dev
->phy
->start(he_dev
->atm_dev
);
1554 #endif /* CONFIG_ATM_HE_USE_SUNI */
1557 /* this really should be in suni.c but for now... */
1560 val
= he_phy_get(he_dev
->atm_dev
, SUNI_TPOP_APM
);
1561 val
= (val
& ~SUNI_TPOP_APM_S
) | (SUNI_TPOP_S_SDH
<< SUNI_TPOP_APM_S_SHIFT
);
1562 he_phy_put(he_dev
->atm_dev
, val
, SUNI_TPOP_APM
);
1565 /* 5.1.12 enable transmit and receive */
1567 reg
= he_readl_mbox(he_dev
, CS_ERCTL0
);
1568 reg
|= TX_ENABLE
|ER_ENABLE
;
1569 he_writel_mbox(he_dev
, reg
, CS_ERCTL0
);
1571 reg
= he_readl(he_dev
, RC_CONFIG
);
1573 he_writel(he_dev
, reg
, RC_CONFIG
);
1575 for (i
= 0; i
< HE_NUM_CS_STPER
; ++i
) {
1576 he_dev
->cs_stper
[i
].inuse
= 0;
1577 he_dev
->cs_stper
[i
].pcr
= -1;
1579 he_dev
->total_bw
= 0;
1582 /* atm linux initialization */
1584 he_dev
->atm_dev
->ci_range
.vpi_bits
= he_dev
->vpibits
;
1585 he_dev
->atm_dev
->ci_range
.vci_bits
= he_dev
->vcibits
;
1587 he_dev
->irq_peak
= 0;
1588 he_dev
->rbrq_peak
= 0;
1589 he_dev
->rbpl_peak
= 0;
1590 he_dev
->tbrq_peak
= 0;
1592 HPRINTK("hell bent for leather!\n");
1598 he_stop(struct he_dev
*he_dev
)
1601 u32 gen_cntl_0
, reg
;
1602 struct pci_dev
*pci_dev
;
1604 pci_dev
= he_dev
->pci_dev
;
1606 /* disable interrupts */
1608 if (he_dev
->membase
) {
1609 pci_read_config_dword(pci_dev
, GEN_CNTL_0
, &gen_cntl_0
);
1610 gen_cntl_0
&= ~(INT_PROC_ENBL
| INIT_ENB
);
1611 pci_write_config_dword(pci_dev
, GEN_CNTL_0
, gen_cntl_0
);
1614 tasklet_disable(&he_dev
->tasklet
);
1617 /* disable recv and transmit */
1619 reg
= he_readl_mbox(he_dev
, CS_ERCTL0
);
1620 reg
&= ~(TX_ENABLE
|ER_ENABLE
);
1621 he_writel_mbox(he_dev
, reg
, CS_ERCTL0
);
1623 reg
= he_readl(he_dev
, RC_CONFIG
);
1624 reg
&= ~(RX_ENABLE
);
1625 he_writel(he_dev
, reg
, RC_CONFIG
);
1628 #ifdef CONFIG_ATM_HE_USE_SUNI
1629 if (he_dev
->atm_dev
->phy
&& he_dev
->atm_dev
->phy
->stop
)
1630 he_dev
->atm_dev
->phy
->stop(he_dev
->atm_dev
);
1631 #endif /* CONFIG_ATM_HE_USE_SUNI */
1634 free_irq(he_dev
->irq
, he_dev
);
1636 if (he_dev
->irq_base
)
1637 pci_free_consistent(he_dev
->pci_dev
, (CONFIG_IRQ_SIZE
+1)
1638 * sizeof(struct he_irq
), he_dev
->irq_base
, he_dev
->irq_phys
);
1641 pci_free_consistent(he_dev
->pci_dev
, sizeof(struct he_hsp
),
1642 he_dev
->hsp
, he_dev
->hsp_phys
);
1644 if (he_dev
->rbpl_base
) {
1645 #ifdef USE_RBPL_POOL
1646 for (i
= 0; i
< CONFIG_RBPL_SIZE
; ++i
) {
1647 void *cpuaddr
= he_dev
->rbpl_virt
[i
].virt
;
1648 dma_addr_t dma_handle
= he_dev
->rbpl_base
[i
].phys
;
1650 pci_pool_free(he_dev
->rbpl_pool
, cpuaddr
, dma_handle
);
1653 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBPL_SIZE
1654 * CONFIG_RBPL_BUFSIZE
, he_dev
->rbpl_pages
, he_dev
->rbpl_pages_phys
);
1656 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBPL_SIZE
1657 * sizeof(struct he_rbp
), he_dev
->rbpl_base
, he_dev
->rbpl_phys
);
1660 #ifdef USE_RBPL_POOL
1661 if (he_dev
->rbpl_pool
)
1662 pci_pool_destroy(he_dev
->rbpl_pool
);
1666 if (he_dev
->rbps_base
) {
1667 #ifdef USE_RBPS_POOL
1668 for (i
= 0; i
< CONFIG_RBPS_SIZE
; ++i
) {
1669 void *cpuaddr
= he_dev
->rbps_virt
[i
].virt
;
1670 dma_addr_t dma_handle
= he_dev
->rbps_base
[i
].phys
;
1672 pci_pool_free(he_dev
->rbps_pool
, cpuaddr
, dma_handle
);
1675 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBPS_SIZE
1676 * CONFIG_RBPS_BUFSIZE
, he_dev
->rbps_pages
, he_dev
->rbps_pages_phys
);
1678 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBPS_SIZE
1679 * sizeof(struct he_rbp
), he_dev
->rbps_base
, he_dev
->rbps_phys
);
1682 #ifdef USE_RBPS_POOL
1683 if (he_dev
->rbps_pool
)
1684 pci_pool_destroy(he_dev
->rbps_pool
);
1687 #endif /* USE_RBPS */
1689 if (he_dev
->rbrq_base
)
1690 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBRQ_SIZE
* sizeof(struct he_rbrq
),
1691 he_dev
->rbrq_base
, he_dev
->rbrq_phys
);
1693 if (he_dev
->tbrq_base
)
1694 pci_free_consistent(he_dev
->pci_dev
, CONFIG_TBRQ_SIZE
* sizeof(struct he_tbrq
),
1695 he_dev
->tbrq_base
, he_dev
->tbrq_phys
);
1697 if (he_dev
->tpdrq_base
)
1698 pci_free_consistent(he_dev
->pci_dev
, CONFIG_TBRQ_SIZE
* sizeof(struct he_tbrq
),
1699 he_dev
->tpdrq_base
, he_dev
->tpdrq_phys
);
1702 if (he_dev
->tpd_pool
)
1703 pci_pool_destroy(he_dev
->tpd_pool
);
1705 if (he_dev
->tpd_base
)
1706 pci_free_consistent(he_dev
->pci_dev
, CONFIG_NUMTPDS
* sizeof(struct he_tpd
),
1707 he_dev
->tpd_base
, he_dev
->tpd_base_phys
);
1710 if (he_dev
->pci_dev
) {
1711 pci_read_config_word(he_dev
->pci_dev
, PCI_COMMAND
, &command
);
1712 command
&= ~(PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
);
1713 pci_write_config_word(he_dev
->pci_dev
, PCI_COMMAND
, command
);
1716 if (he_dev
->membase
)
1717 iounmap(he_dev
->membase
);
1720 static struct he_tpd
*
1721 __alloc_tpd(struct he_dev
*he_dev
)
1725 dma_addr_t dma_handle
;
1727 tpd
= pci_pool_alloc(he_dev
->tpd_pool
, GFP_ATOMIC
|GFP_DMA
, &dma_handle
);
1731 tpd
->status
= TPD_ADDR(dma_handle
);
1733 tpd
->iovec
[0].addr
= 0; tpd
->iovec
[0].len
= 0;
1734 tpd
->iovec
[1].addr
= 0; tpd
->iovec
[1].len
= 0;
1735 tpd
->iovec
[2].addr
= 0; tpd
->iovec
[2].len
= 0;
1741 for (i
= 0; i
< CONFIG_NUMTPDS
; ++i
) {
1743 if (he_dev
->tpd_head
> he_dev
->tpd_end
) {
1744 he_dev
->tpd_head
= he_dev
->tpd_base
;
1747 if (!he_dev
->tpd_head
->inuse
) {
1748 he_dev
->tpd_head
->inuse
= 1;
1749 he_dev
->tpd_head
->status
&= TPD_MASK
;
1750 he_dev
->tpd_head
->iovec
[0].addr
= 0; he_dev
->tpd_head
->iovec
[0].len
= 0;
1751 he_dev
->tpd_head
->iovec
[1].addr
= 0; he_dev
->tpd_head
->iovec
[1].len
= 0;
1752 he_dev
->tpd_head
->iovec
[2].addr
= 0; he_dev
->tpd_head
->iovec
[2].len
= 0;
1753 return he_dev
->tpd_head
;
1756 hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS
);
1761 #define AAL5_LEN(buf,len) \
1762 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1763 (((unsigned char *)(buf))[(len)-5]))
1767 * aal5 packets can optionally return the tcp checksum in the lower
1768 * 16 bits of the crc (RSR0_TCP_CKSUM)
1771 #define TCP_CKSUM(buf,len) \
1772 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1773 (((unsigned char *)(buf))[(len-1)]))
1776 he_service_rbrq(struct he_dev
*he_dev
, int group
)
1778 struct he_rbrq
*rbrq_tail
= (struct he_rbrq
*)
1779 ((unsigned long)he_dev
->rbrq_base
|
1780 he_dev
->hsp
->group
[group
].rbrq_tail
);
1781 struct he_rbp
*rbp
= NULL
;
1782 unsigned cid
, lastcid
= -1;
1783 unsigned buf_len
= 0;
1784 struct sk_buff
*skb
;
1785 struct atm_vcc
*vcc
= NULL
;
1786 struct he_vcc
*he_vcc
;
1787 struct he_iovec
*iov
;
1788 int pdus_assembled
= 0;
1791 read_lock(&vcc_sklist_lock
);
1792 while (he_dev
->rbrq_head
!= rbrq_tail
) {
1795 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1796 he_dev
->rbrq_head
, group
,
1797 RBRQ_ADDR(he_dev
->rbrq_head
),
1798 RBRQ_BUFLEN(he_dev
->rbrq_head
),
1799 RBRQ_CID(he_dev
->rbrq_head
),
1800 RBRQ_CRC_ERR(he_dev
->rbrq_head
) ? " CRC_ERR" : "",
1801 RBRQ_LEN_ERR(he_dev
->rbrq_head
) ? " LEN_ERR" : "",
1802 RBRQ_END_PDU(he_dev
->rbrq_head
) ? " END_PDU" : "",
1803 RBRQ_AAL5_PROT(he_dev
->rbrq_head
) ? " AAL5_PROT" : "",
1804 RBRQ_CON_CLOSED(he_dev
->rbrq_head
) ? " CON_CLOSED" : "",
1805 RBRQ_HBUF_ERR(he_dev
->rbrq_head
) ? " HBUF_ERR" : "");
1808 if (RBRQ_ADDR(he_dev
->rbrq_head
) & RBP_SMALLBUF
)
1809 rbp
= &he_dev
->rbps_base
[RBP_INDEX(RBRQ_ADDR(he_dev
->rbrq_head
))];
1812 rbp
= &he_dev
->rbpl_base
[RBP_INDEX(RBRQ_ADDR(he_dev
->rbrq_head
))];
1814 buf_len
= RBRQ_BUFLEN(he_dev
->rbrq_head
) * 4;
1815 cid
= RBRQ_CID(he_dev
->rbrq_head
);
1818 vcc
= __find_vcc(he_dev
, cid
);
1822 hprintk("vcc == NULL (cid 0x%x)\n", cid
);
1823 if (!RBRQ_HBUF_ERR(he_dev
->rbrq_head
))
1824 rbp
->status
&= ~RBP_LOANED
;
1826 goto next_rbrq_entry
;
1829 he_vcc
= HE_VCC(vcc
);
1830 if (he_vcc
== NULL
) {
1831 hprintk("he_vcc == NULL (cid 0x%x)\n", cid
);
1832 if (!RBRQ_HBUF_ERR(he_dev
->rbrq_head
))
1833 rbp
->status
&= ~RBP_LOANED
;
1834 goto next_rbrq_entry
;
1837 if (RBRQ_HBUF_ERR(he_dev
->rbrq_head
)) {
1838 hprintk("HBUF_ERR! (cid 0x%x)\n", cid
);
1839 atomic_inc(&vcc
->stats
->rx_drop
);
1840 goto return_host_buffers
;
1843 he_vcc
->iov_tail
->iov_base
= RBRQ_ADDR(he_dev
->rbrq_head
);
1844 he_vcc
->iov_tail
->iov_len
= buf_len
;
1845 he_vcc
->pdu_len
+= buf_len
;
1848 if (RBRQ_CON_CLOSED(he_dev
->rbrq_head
)) {
1850 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid
);
1851 wake_up(&he_vcc
->rx_waitq
);
1852 goto return_host_buffers
;
1856 if ((he_vcc
->iov_tail
- he_vcc
->iov_head
) > HE_MAXIOV
) {
1857 hprintk("iovec full! cid 0x%x\n", cid
);
1858 goto return_host_buffers
;
1861 if (!RBRQ_END_PDU(he_dev
->rbrq_head
))
1862 goto next_rbrq_entry
;
1864 if (RBRQ_LEN_ERR(he_dev
->rbrq_head
)
1865 || RBRQ_CRC_ERR(he_dev
->rbrq_head
)) {
1866 HPRINTK("%s%s (%d.%d)\n",
1867 RBRQ_CRC_ERR(he_dev
->rbrq_head
)
1869 RBRQ_LEN_ERR(he_dev
->rbrq_head
)
1871 vcc
->vpi
, vcc
->vci
);
1872 atomic_inc(&vcc
->stats
->rx_err
);
1873 goto return_host_buffers
;
1876 skb
= atm_alloc_charge(vcc
, he_vcc
->pdu_len
+ rx_skb_reserve
,
1879 HPRINTK("charge failed (%d.%d)\n", vcc
->vpi
, vcc
->vci
);
1880 goto return_host_buffers
;
1883 if (rx_skb_reserve
> 0)
1884 skb_reserve(skb
, rx_skb_reserve
);
1886 __net_timestamp(skb
);
1888 for (iov
= he_vcc
->iov_head
;
1889 iov
< he_vcc
->iov_tail
; ++iov
) {
1891 if (iov
->iov_base
& RBP_SMALLBUF
)
1892 memcpy(skb_put(skb
, iov
->iov_len
),
1893 he_dev
->rbps_virt
[RBP_INDEX(iov
->iov_base
)].virt
, iov
->iov_len
);
1896 memcpy(skb_put(skb
, iov
->iov_len
),
1897 he_dev
->rbpl_virt
[RBP_INDEX(iov
->iov_base
)].virt
, iov
->iov_len
);
1900 switch (vcc
->qos
.aal
) {
1902 /* 2.10.1.5 raw cell receive */
1903 skb
->len
= ATM_AAL0_SDU
;
1904 skb_set_tail_pointer(skb
, skb
->len
);
1907 /* 2.10.1.2 aal5 receive */
1909 skb
->len
= AAL5_LEN(skb
->data
, he_vcc
->pdu_len
);
1910 skb_set_tail_pointer(skb
, skb
->len
);
1911 #ifdef USE_CHECKSUM_HW
1912 if (vcc
->vpi
== 0 && vcc
->vci
>= ATM_NOT_RSV_VCI
) {
1913 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1914 skb
->csum
= TCP_CKSUM(skb
->data
,
1921 #ifdef should_never_happen
1922 if (skb
->len
> vcc
->qos
.rxtp
.max_sdu
)
1923 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb
->len
, vcc
->qos
.rxtp
.max_sdu
, cid
);
1927 ATM_SKB(skb
)->vcc
= vcc
;
1929 spin_unlock(&he_dev
->global_lock
);
1930 vcc
->push(vcc
, skb
);
1931 spin_lock(&he_dev
->global_lock
);
1933 atomic_inc(&vcc
->stats
->rx
);
1935 return_host_buffers
:
1938 for (iov
= he_vcc
->iov_head
;
1939 iov
< he_vcc
->iov_tail
; ++iov
) {
1941 if (iov
->iov_base
& RBP_SMALLBUF
)
1942 rbp
= &he_dev
->rbps_base
[RBP_INDEX(iov
->iov_base
)];
1945 rbp
= &he_dev
->rbpl_base
[RBP_INDEX(iov
->iov_base
)];
1947 rbp
->status
&= ~RBP_LOANED
;
1950 he_vcc
->iov_tail
= he_vcc
->iov_head
;
1951 he_vcc
->pdu_len
= 0;
1954 he_dev
->rbrq_head
= (struct he_rbrq
*)
1955 ((unsigned long) he_dev
->rbrq_base
|
1956 RBRQ_MASK(++he_dev
->rbrq_head
));
1959 read_unlock(&vcc_sklist_lock
);
1962 if (updated
> he_dev
->rbrq_peak
)
1963 he_dev
->rbrq_peak
= updated
;
1965 he_writel(he_dev
, RBRQ_MASK(he_dev
->rbrq_head
),
1966 G0_RBRQ_H
+ (group
* 16));
1969 return pdus_assembled
;
1973 he_service_tbrq(struct he_dev
*he_dev
, int group
)
1975 struct he_tbrq
*tbrq_tail
= (struct he_tbrq
*)
1976 ((unsigned long)he_dev
->tbrq_base
|
1977 he_dev
->hsp
->group
[group
].tbrq_tail
);
1979 int slot
, updated
= 0;
1981 struct he_tpd
*__tpd
;
1984 /* 2.1.6 transmit buffer return queue */
1986 while (he_dev
->tbrq_head
!= tbrq_tail
) {
1989 HPRINTK("tbrq%d 0x%x%s%s\n",
1991 TBRQ_TPD(he_dev
->tbrq_head
),
1992 TBRQ_EOS(he_dev
->tbrq_head
) ? " EOS" : "",
1993 TBRQ_MULTIPLE(he_dev
->tbrq_head
) ? " MULTIPLE" : "");
1996 list_for_each_entry(__tpd
, &he_dev
->outstanding_tpds
, entry
) {
1997 if (TPD_ADDR(__tpd
->status
) == TBRQ_TPD(he_dev
->tbrq_head
)) {
1999 list_del(&__tpd
->entry
);
2005 hprintk("unable to locate tpd for dma buffer %x\n",
2006 TBRQ_TPD(he_dev
->tbrq_head
));
2007 goto next_tbrq_entry
;
2010 tpd
= &he_dev
->tpd_base
[ TPD_INDEX(TBRQ_TPD(he_dev
->tbrq_head
)) ];
2013 if (TBRQ_EOS(he_dev
->tbrq_head
)) {
2014 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2015 he_mkcid(he_dev
, tpd
->vcc
->vpi
, tpd
->vcc
->vci
));
2017 wake_up(&HE_VCC(tpd
->vcc
)->tx_waitq
);
2019 goto next_tbrq_entry
;
2022 for (slot
= 0; slot
< TPD_MAXIOV
; ++slot
) {
2023 if (tpd
->iovec
[slot
].addr
)
2024 pci_unmap_single(he_dev
->pci_dev
,
2025 tpd
->iovec
[slot
].addr
,
2026 tpd
->iovec
[slot
].len
& TPD_LEN_MASK
,
2028 if (tpd
->iovec
[slot
].len
& TPD_LST
)
2033 if (tpd
->skb
) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2034 if (tpd
->vcc
&& tpd
->vcc
->pop
)
2035 tpd
->vcc
->pop(tpd
->vcc
, tpd
->skb
);
2037 dev_kfree_skb_any(tpd
->skb
);
2043 pci_pool_free(he_dev
->tpd_pool
, tpd
, TPD_ADDR(tpd
->status
));
2047 he_dev
->tbrq_head
= (struct he_tbrq
*)
2048 ((unsigned long) he_dev
->tbrq_base
|
2049 TBRQ_MASK(++he_dev
->tbrq_head
));
2053 if (updated
> he_dev
->tbrq_peak
)
2054 he_dev
->tbrq_peak
= updated
;
2056 he_writel(he_dev
, TBRQ_MASK(he_dev
->tbrq_head
),
2057 G0_TBRQ_H
+ (group
* 16));
2063 he_service_rbpl(struct he_dev
*he_dev
, int group
)
2065 struct he_rbp
*newtail
;
2066 struct he_rbp
*rbpl_head
;
2069 rbpl_head
= (struct he_rbp
*) ((unsigned long)he_dev
->rbpl_base
|
2070 RBPL_MASK(he_readl(he_dev
, G0_RBPL_S
)));
2073 newtail
= (struct he_rbp
*) ((unsigned long)he_dev
->rbpl_base
|
2074 RBPL_MASK(he_dev
->rbpl_tail
+1));
2076 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2077 if ((newtail
== rbpl_head
) || (newtail
->status
& RBP_LOANED
))
2080 newtail
->status
|= RBP_LOANED
;
2081 he_dev
->rbpl_tail
= newtail
;
2086 he_writel(he_dev
, RBPL_MASK(he_dev
->rbpl_tail
), G0_RBPL_T
);
2091 he_service_rbps(struct he_dev
*he_dev
, int group
)
2093 struct he_rbp
*newtail
;
2094 struct he_rbp
*rbps_head
;
2097 rbps_head
= (struct he_rbp
*) ((unsigned long)he_dev
->rbps_base
|
2098 RBPS_MASK(he_readl(he_dev
, G0_RBPS_S
)));
2101 newtail
= (struct he_rbp
*) ((unsigned long)he_dev
->rbps_base
|
2102 RBPS_MASK(he_dev
->rbps_tail
+1));
2104 /* table 3.42 -- rbps_tail should never be set to rbps_head */
2105 if ((newtail
== rbps_head
) || (newtail
->status
& RBP_LOANED
))
2108 newtail
->status
|= RBP_LOANED
;
2109 he_dev
->rbps_tail
= newtail
;
2114 he_writel(he_dev
, RBPS_MASK(he_dev
->rbps_tail
), G0_RBPS_T
);
2116 #endif /* USE_RBPS */
2119 he_tasklet(unsigned long data
)
2121 unsigned long flags
;
2122 struct he_dev
*he_dev
= (struct he_dev
*) data
;
2126 HPRINTK("tasklet (0x%lx)\n", data
);
2128 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2131 while (he_dev
->irq_head
!= he_dev
->irq_tail
) {
2134 type
= ITYPE_TYPE(he_dev
->irq_head
->isw
);
2135 group
= ITYPE_GROUP(he_dev
->irq_head
->isw
);
2138 case ITYPE_RBRQ_THRESH
:
2139 HPRINTK("rbrq%d threshold\n", group
);
2141 case ITYPE_RBRQ_TIMER
:
2142 if (he_service_rbrq(he_dev
, group
)) {
2143 he_service_rbpl(he_dev
, group
);
2145 he_service_rbps(he_dev
, group
);
2146 #endif /* USE_RBPS */
2149 case ITYPE_TBRQ_THRESH
:
2150 HPRINTK("tbrq%d threshold\n", group
);
2152 case ITYPE_TPD_COMPLETE
:
2153 he_service_tbrq(he_dev
, group
);
2155 case ITYPE_RBPL_THRESH
:
2156 he_service_rbpl(he_dev
, group
);
2158 case ITYPE_RBPS_THRESH
:
2160 he_service_rbps(he_dev
, group
);
2161 #endif /* USE_RBPS */
2164 HPRINTK("phy interrupt\n");
2165 #ifdef CONFIG_ATM_HE_USE_SUNI
2166 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2167 if (he_dev
->atm_dev
->phy
&& he_dev
->atm_dev
->phy
->interrupt
)
2168 he_dev
->atm_dev
->phy
->interrupt(he_dev
->atm_dev
);
2169 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2173 switch (type
|group
) {
2175 hprintk("parity error\n");
2178 hprintk("abort 0x%x\n", he_readl(he_dev
, ABORT_ADDR
));
2182 case ITYPE_TYPE(ITYPE_INVALID
):
2183 /* see 8.1.1 -- check all queues */
2185 HPRINTK("isw not updated 0x%x\n", he_dev
->irq_head
->isw
);
2187 he_service_rbrq(he_dev
, 0);
2188 he_service_rbpl(he_dev
, 0);
2190 he_service_rbps(he_dev
, 0);
2191 #endif /* USE_RBPS */
2192 he_service_tbrq(he_dev
, 0);
2195 hprintk("bad isw 0x%x?\n", he_dev
->irq_head
->isw
);
2198 he_dev
->irq_head
->isw
= ITYPE_INVALID
;
2200 he_dev
->irq_head
= (struct he_irq
*) NEXT_ENTRY(he_dev
->irq_base
, he_dev
->irq_head
, IRQ_MASK
);
2204 if (updated
> he_dev
->irq_peak
)
2205 he_dev
->irq_peak
= updated
;
2208 IRQ_SIZE(CONFIG_IRQ_SIZE
) |
2209 IRQ_THRESH(CONFIG_IRQ_THRESH
) |
2210 IRQ_TAIL(he_dev
->irq_tail
), IRQ0_HEAD
);
2211 (void) he_readl(he_dev
, INT_FIFO
); /* 8.1.2 controller errata; flush posted writes */
2214 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2219 he_irq_handler(int irq
, void *dev_id
)
2221 unsigned long flags
;
2222 struct he_dev
*he_dev
= (struct he_dev
* )dev_id
;
2228 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2230 he_dev
->irq_tail
= (struct he_irq
*) (((unsigned long)he_dev
->irq_base
) |
2231 (*he_dev
->irq_tailoffset
<< 2));
2233 if (he_dev
->irq_tail
== he_dev
->irq_head
) {
2234 HPRINTK("tailoffset not updated?\n");
2235 he_dev
->irq_tail
= (struct he_irq
*) ((unsigned long)he_dev
->irq_base
|
2236 ((he_readl(he_dev
, IRQ0_BASE
) & IRQ_MASK
) << 2));
2237 (void) he_readl(he_dev
, INT_FIFO
); /* 8.1.2 controller errata */
2241 if (he_dev
->irq_head
== he_dev
->irq_tail
/* && !IRQ_PENDING */)
2242 hprintk("spurious (or shared) interrupt?\n");
2245 if (he_dev
->irq_head
!= he_dev
->irq_tail
) {
2248 tasklet_schedule(&he_dev
->tasklet
);
2250 he_tasklet((unsigned long) he_dev
);
2252 he_writel(he_dev
, INT_CLEAR_A
, INT_FIFO
); /* clear interrupt */
2253 (void) he_readl(he_dev
, INT_FIFO
); /* flush posted writes */
2255 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2256 return IRQ_RETVAL(handled
);
2260 static __inline__
void
2261 __enqueue_tpd(struct he_dev
*he_dev
, struct he_tpd
*tpd
, unsigned cid
)
2263 struct he_tpdrq
*new_tail
;
2265 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2266 tpd
, cid
, he_dev
->tpdrq_tail
);
2268 /* new_tail = he_dev->tpdrq_tail; */
2269 new_tail
= (struct he_tpdrq
*) ((unsigned long) he_dev
->tpdrq_base
|
2270 TPDRQ_MASK(he_dev
->tpdrq_tail
+1));
2273 * check to see if we are about to set the tail == head
2274 * if true, update the head pointer from the adapter
2275 * to see if this is really the case (reading the queue
2276 * head for every enqueue would be unnecessarily slow)
2279 if (new_tail
== he_dev
->tpdrq_head
) {
2280 he_dev
->tpdrq_head
= (struct he_tpdrq
*)
2281 (((unsigned long)he_dev
->tpdrq_base
) |
2282 TPDRQ_MASK(he_readl(he_dev
, TPDRQ_B_H
)));
2284 if (new_tail
== he_dev
->tpdrq_head
) {
2287 hprintk("tpdrq full (cid 0x%x)\n", cid
);
2290 * push tpd onto a transmit backlog queue
2291 * after service_tbrq, service the backlog
2292 * for now, we just drop the pdu
2294 for (slot
= 0; slot
< TPD_MAXIOV
; ++slot
) {
2295 if (tpd
->iovec
[slot
].addr
)
2296 pci_unmap_single(he_dev
->pci_dev
,
2297 tpd
->iovec
[slot
].addr
,
2298 tpd
->iovec
[slot
].len
& TPD_LEN_MASK
,
2303 tpd
->vcc
->pop(tpd
->vcc
, tpd
->skb
);
2305 dev_kfree_skb_any(tpd
->skb
);
2306 atomic_inc(&tpd
->vcc
->stats
->tx_err
);
2309 pci_pool_free(he_dev
->tpd_pool
, tpd
, TPD_ADDR(tpd
->status
));
2317 /* 2.1.5 transmit packet descriptor ready queue */
2319 list_add_tail(&tpd
->entry
, &he_dev
->outstanding_tpds
);
2320 he_dev
->tpdrq_tail
->tpd
= TPD_ADDR(tpd
->status
);
2322 he_dev
->tpdrq_tail
->tpd
= he_dev
->tpd_base_phys
+
2323 (TPD_INDEX(tpd
->status
) * sizeof(struct he_tpd
));
2325 he_dev
->tpdrq_tail
->cid
= cid
;
2328 he_dev
->tpdrq_tail
= new_tail
;
2330 he_writel(he_dev
, TPDRQ_MASK(he_dev
->tpdrq_tail
), TPDRQ_T
);
2331 (void) he_readl(he_dev
, TPDRQ_T
); /* flush posted writes */
2335 he_open(struct atm_vcc
*vcc
)
2337 unsigned long flags
;
2338 struct he_dev
*he_dev
= HE_DEV(vcc
->dev
);
2339 struct he_vcc
*he_vcc
;
2341 unsigned cid
, rsr0
, rsr1
, rsr4
, tsr0
, tsr0_aal
, tsr4
, period
, reg
, clock
;
2342 short vpi
= vcc
->vpi
;
2345 if (vci
== ATM_VCI_UNSPEC
|| vpi
== ATM_VPI_UNSPEC
)
2348 HPRINTK("open vcc %p %d.%d\n", vcc
, vpi
, vci
);
2350 set_bit(ATM_VF_ADDR
, &vcc
->flags
);
2352 cid
= he_mkcid(he_dev
, vpi
, vci
);
2354 he_vcc
= kmalloc(sizeof(struct he_vcc
), GFP_ATOMIC
);
2355 if (he_vcc
== NULL
) {
2356 hprintk("unable to allocate he_vcc during open\n");
2360 he_vcc
->iov_tail
= he_vcc
->iov_head
;
2361 he_vcc
->pdu_len
= 0;
2362 he_vcc
->rc_index
= -1;
2364 init_waitqueue_head(&he_vcc
->rx_waitq
);
2365 init_waitqueue_head(&he_vcc
->tx_waitq
);
2367 vcc
->dev_data
= he_vcc
;
2369 if (vcc
->qos
.txtp
.traffic_class
!= ATM_NONE
) {
2372 pcr_goal
= atm_pcr_goal(&vcc
->qos
.txtp
);
2374 pcr_goal
= he_dev
->atm_dev
->link_rate
;
2375 if (pcr_goal
< 0) /* means round down, technically */
2376 pcr_goal
= -pcr_goal
;
2378 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid
, pcr_goal
);
2380 switch (vcc
->qos
.aal
) {
2382 tsr0_aal
= TSR0_AAL5
;
2386 tsr0_aal
= TSR0_AAL0_SDU
;
2387 tsr4
= TSR4_AAL0_SDU
;
2394 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2395 tsr0
= he_readl_tsr0(he_dev
, cid
);
2396 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2398 if (TSR0_CONN_STATE(tsr0
) != 0) {
2399 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid
, tsr0
);
2404 switch (vcc
->qos
.txtp
.traffic_class
) {
2406 /* 2.3.3.1 open connection ubr */
2408 tsr0
= TSR0_UBR
| TSR0_GROUP(0) | tsr0_aal
|
2409 TSR0_USE_WMIN
| TSR0_UPDATE_GER
;
2413 /* 2.3.3.2 open connection cbr */
2415 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2416 if ((he_dev
->total_bw
+ pcr_goal
)
2417 > (he_dev
->atm_dev
->link_rate
* 9 / 10))
2423 spin_lock_irqsave(&he_dev
->global_lock
, flags
); /* also protects he_dev->cs_stper[] */
2425 /* find an unused cs_stper register */
2426 for (reg
= 0; reg
< HE_NUM_CS_STPER
; ++reg
)
2427 if (he_dev
->cs_stper
[reg
].inuse
== 0 ||
2428 he_dev
->cs_stper
[reg
].pcr
== pcr_goal
)
2431 if (reg
== HE_NUM_CS_STPER
) {
2433 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2437 he_dev
->total_bw
+= pcr_goal
;
2439 he_vcc
->rc_index
= reg
;
2440 ++he_dev
->cs_stper
[reg
].inuse
;
2441 he_dev
->cs_stper
[reg
].pcr
= pcr_goal
;
2443 clock
= he_is622(he_dev
) ? 66667000 : 50000000;
2444 period
= clock
/ pcr_goal
;
2446 HPRINTK("rc_index = %d period = %d\n",
2449 he_writel_mbox(he_dev
, rate_to_atmf(period
/2),
2451 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2453 tsr0
= TSR0_CBR
| TSR0_GROUP(0) | tsr0_aal
|
2462 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2464 he_writel_tsr0(he_dev
, tsr0
, cid
);
2465 he_writel_tsr4(he_dev
, tsr4
| 1, cid
);
2466 he_writel_tsr1(he_dev
, TSR1_MCR(rate_to_atmf(0)) |
2467 TSR1_PCR(rate_to_atmf(pcr_goal
)), cid
);
2468 he_writel_tsr2(he_dev
, TSR2_ACR(rate_to_atmf(pcr_goal
)), cid
);
2469 he_writel_tsr9(he_dev
, TSR9_OPEN_CONN
, cid
);
2471 he_writel_tsr3(he_dev
, 0x0, cid
);
2472 he_writel_tsr5(he_dev
, 0x0, cid
);
2473 he_writel_tsr6(he_dev
, 0x0, cid
);
2474 he_writel_tsr7(he_dev
, 0x0, cid
);
2475 he_writel_tsr8(he_dev
, 0x0, cid
);
2476 he_writel_tsr10(he_dev
, 0x0, cid
);
2477 he_writel_tsr11(he_dev
, 0x0, cid
);
2478 he_writel_tsr12(he_dev
, 0x0, cid
);
2479 he_writel_tsr13(he_dev
, 0x0, cid
);
2480 he_writel_tsr14(he_dev
, 0x0, cid
);
2481 (void) he_readl_tsr0(he_dev
, cid
); /* flush posted writes */
2482 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2485 if (vcc
->qos
.rxtp
.traffic_class
!= ATM_NONE
) {
2488 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid
,
2489 &HE_VCC(vcc
)->rx_waitq
);
2491 switch (vcc
->qos
.aal
) {
2503 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2505 rsr0
= he_readl_rsr0(he_dev
, cid
);
2506 if (rsr0
& RSR0_OPEN_CONN
) {
2507 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2509 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid
, rsr0
);
2515 rsr1
= RSR1_GROUP(0);
2516 rsr4
= RSR4_GROUP(0);
2517 #else /* !USE_RBPS */
2518 rsr1
= RSR1_GROUP(0)|RSR1_RBPL_ONLY
;
2519 rsr4
= RSR4_GROUP(0)|RSR4_RBPL_ONLY
;
2520 #endif /* USE_RBPS */
2521 rsr0
= vcc
->qos
.rxtp
.traffic_class
== ATM_UBR
?
2522 (RSR0_EPD_ENABLE
|RSR0_PPD_ENABLE
) : 0;
2524 #ifdef USE_CHECKSUM_HW
2525 if (vpi
== 0 && vci
>= ATM_NOT_RSV_VCI
)
2526 rsr0
|= RSR0_TCP_CKSUM
;
2529 he_writel_rsr4(he_dev
, rsr4
, cid
);
2530 he_writel_rsr1(he_dev
, rsr1
, cid
);
2531 /* 5.1.11 last parameter initialized should be
2532 the open/closed indication in rsr0 */
2533 he_writel_rsr0(he_dev
,
2534 rsr0
| RSR0_START_PDU
| RSR0_OPEN_CONN
| aal
, cid
);
2535 (void) he_readl_rsr0(he_dev
, cid
); /* flush posted writes */
2537 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2544 clear_bit(ATM_VF_ADDR
, &vcc
->flags
);
2547 set_bit(ATM_VF_READY
, &vcc
->flags
);
2553 he_close(struct atm_vcc
*vcc
)
2555 unsigned long flags
;
2556 DECLARE_WAITQUEUE(wait
, current
);
2557 struct he_dev
*he_dev
= HE_DEV(vcc
->dev
);
2560 struct he_vcc
*he_vcc
= HE_VCC(vcc
);
2561 #define MAX_RETRY 30
2562 int retry
= 0, sleep
= 1, tx_inuse
;
2564 HPRINTK("close vcc %p %d.%d\n", vcc
, vcc
->vpi
, vcc
->vci
);
2566 clear_bit(ATM_VF_READY
, &vcc
->flags
);
2567 cid
= he_mkcid(he_dev
, vcc
->vpi
, vcc
->vci
);
2569 if (vcc
->qos
.rxtp
.traffic_class
!= ATM_NONE
) {
2572 HPRINTK("close rx cid 0x%x\n", cid
);
2574 /* 2.7.2.2 close receive operation */
2576 /* wait for previous close (if any) to finish */
2578 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2579 while (he_readl(he_dev
, RCC_STAT
) & RCC_BUSY
) {
2580 HPRINTK("close cid 0x%x RCC_BUSY\n", cid
);
2584 set_current_state(TASK_UNINTERRUPTIBLE
);
2585 add_wait_queue(&he_vcc
->rx_waitq
, &wait
);
2587 he_writel_rsr0(he_dev
, RSR0_CLOSE_CONN
, cid
);
2588 (void) he_readl_rsr0(he_dev
, cid
); /* flush posted writes */
2589 he_writel_mbox(he_dev
, cid
, RXCON_CLOSE
);
2590 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2592 timeout
= schedule_timeout(30*HZ
);
2594 remove_wait_queue(&he_vcc
->rx_waitq
, &wait
);
2595 set_current_state(TASK_RUNNING
);
2598 hprintk("close rx timeout cid 0x%x\n", cid
);
2600 HPRINTK("close rx cid 0x%x complete\n", cid
);
2604 if (vcc
->qos
.txtp
.traffic_class
!= ATM_NONE
) {
2605 volatile unsigned tsr4
, tsr0
;
2608 HPRINTK("close tx cid 0x%x\n", cid
);
2612 * ... the host must first stop queueing packets to the TPDRQ
2613 * on the connection to be closed, then wait for all outstanding
2614 * packets to be transmitted and their buffers returned to the
2615 * TBRQ. When the last packet on the connection arrives in the
2616 * TBRQ, the host issues the close command to the adapter.
2619 while (((tx_inuse
= atomic_read(&sk_atm(vcc
)->sk_wmem_alloc
)) > 0) &&
2620 (retry
< MAX_RETRY
)) {
2629 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid
, tx_inuse
);
2631 /* 2.3.1.1 generic close operations with flush */
2633 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2634 he_writel_tsr4_upper(he_dev
, TSR4_FLUSH_CONN
, cid
);
2635 /* also clears TSR4_SESSION_ENDED */
2637 switch (vcc
->qos
.txtp
.traffic_class
) {
2639 he_writel_tsr1(he_dev
,
2640 TSR1_MCR(rate_to_atmf(200000))
2641 | TSR1_PCR(0), cid
);
2644 he_writel_tsr14_upper(he_dev
, TSR14_DELETE
, cid
);
2647 (void) he_readl_tsr4(he_dev
, cid
); /* flush posted writes */
2649 tpd
= __alloc_tpd(he_dev
);
2651 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid
);
2652 goto close_tx_incomplete
;
2654 tpd
->status
|= TPD_EOS
| TPD_INT
;
2659 set_current_state(TASK_UNINTERRUPTIBLE
);
2660 add_wait_queue(&he_vcc
->tx_waitq
, &wait
);
2661 __enqueue_tpd(he_dev
, tpd
, cid
);
2662 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2664 timeout
= schedule_timeout(30*HZ
);
2666 remove_wait_queue(&he_vcc
->tx_waitq
, &wait
);
2667 set_current_state(TASK_RUNNING
);
2669 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2672 hprintk("close tx timeout cid 0x%x\n", cid
);
2673 goto close_tx_incomplete
;
2676 while (!((tsr4
= he_readl_tsr4(he_dev
, cid
)) & TSR4_SESSION_ENDED
)) {
2677 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid
, tsr4
);
2681 while (TSR0_CONN_STATE(tsr0
= he_readl_tsr0(he_dev
, cid
)) != 0) {
2682 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid
, tsr0
);
2686 close_tx_incomplete
:
2688 if (vcc
->qos
.txtp
.traffic_class
== ATM_CBR
) {
2689 int reg
= he_vcc
->rc_index
;
2691 HPRINTK("cs_stper reg = %d\n", reg
);
2693 if (he_dev
->cs_stper
[reg
].inuse
== 0)
2694 hprintk("cs_stper[%d].inuse = 0!\n", reg
);
2696 --he_dev
->cs_stper
[reg
].inuse
;
2698 he_dev
->total_bw
-= he_dev
->cs_stper
[reg
].pcr
;
2700 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2702 HPRINTK("close tx cid 0x%x complete\n", cid
);
2707 clear_bit(ATM_VF_ADDR
, &vcc
->flags
);
2711 he_send(struct atm_vcc
*vcc
, struct sk_buff
*skb
)
2713 unsigned long flags
;
2714 struct he_dev
*he_dev
= HE_DEV(vcc
->dev
);
2715 unsigned cid
= he_mkcid(he_dev
, vcc
->vpi
, vcc
->vci
);
2717 #ifdef USE_SCATTERGATHER
2721 #define HE_TPD_BUFSIZE 0xffff
2723 HPRINTK("send %d.%d\n", vcc
->vpi
, vcc
->vci
);
2725 if ((skb
->len
> HE_TPD_BUFSIZE
) ||
2726 ((vcc
->qos
.aal
== ATM_AAL0
) && (skb
->len
!= ATM_AAL0_SDU
))) {
2727 hprintk("buffer too large (or small) -- %d bytes\n", skb
->len
);
2731 dev_kfree_skb_any(skb
);
2732 atomic_inc(&vcc
->stats
->tx_err
);
2736 #ifndef USE_SCATTERGATHER
2737 if (skb_shinfo(skb
)->nr_frags
) {
2738 hprintk("no scatter/gather support\n");
2742 dev_kfree_skb_any(skb
);
2743 atomic_inc(&vcc
->stats
->tx_err
);
2747 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2749 tpd
= __alloc_tpd(he_dev
);
2754 dev_kfree_skb_any(skb
);
2755 atomic_inc(&vcc
->stats
->tx_err
);
2756 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2760 if (vcc
->qos
.aal
== ATM_AAL5
)
2761 tpd
->status
|= TPD_CELLTYPE(TPD_USERCELL
);
2763 char *pti_clp
= (void *) (skb
->data
+ 3);
2766 pti
= (*pti_clp
& ATM_HDR_PTI_MASK
) >> ATM_HDR_PTI_SHIFT
;
2767 clp
= (*pti_clp
& ATM_HDR_CLP
);
2768 tpd
->status
|= TPD_CELLTYPE(pti
);
2770 tpd
->status
|= TPD_CLP
;
2772 skb_pull(skb
, ATM_AAL0_SDU
- ATM_CELL_PAYLOAD
);
2775 #ifdef USE_SCATTERGATHER
2776 tpd
->iovec
[slot
].addr
= pci_map_single(he_dev
->pci_dev
, skb
->data
,
2777 skb
->len
- skb
->data_len
, PCI_DMA_TODEVICE
);
2778 tpd
->iovec
[slot
].len
= skb
->len
- skb
->data_len
;
2781 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2782 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2784 if (slot
== TPD_MAXIOV
) { /* queue tpd; start new tpd */
2786 tpd
->skb
= NULL
; /* not the last fragment
2787 so dont ->push() yet */
2790 __enqueue_tpd(he_dev
, tpd
, cid
);
2791 tpd
= __alloc_tpd(he_dev
);
2796 dev_kfree_skb_any(skb
);
2797 atomic_inc(&vcc
->stats
->tx_err
);
2798 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2801 tpd
->status
|= TPD_USERCELL
;
2805 tpd
->iovec
[slot
].addr
= pci_map_single(he_dev
->pci_dev
,
2806 (void *) page_address(frag
->page
) + frag
->page_offset
,
2807 frag
->size
, PCI_DMA_TODEVICE
);
2808 tpd
->iovec
[slot
].len
= frag
->size
;
2813 tpd
->iovec
[slot
- 1].len
|= TPD_LST
;
2815 tpd
->address0
= pci_map_single(he_dev
->pci_dev
, skb
->data
, skb
->len
, PCI_DMA_TODEVICE
);
2816 tpd
->length0
= skb
->len
| TPD_LST
;
2818 tpd
->status
|= TPD_INT
;
2823 ATM_SKB(skb
)->vcc
= vcc
;
2825 __enqueue_tpd(he_dev
, tpd
, cid
);
2826 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2828 atomic_inc(&vcc
->stats
->tx
);
2834 he_ioctl(struct atm_dev
*atm_dev
, unsigned int cmd
, void __user
*arg
)
2836 unsigned long flags
;
2837 struct he_dev
*he_dev
= HE_DEV(atm_dev
);
2838 struct he_ioctl_reg reg
;
2843 if (!capable(CAP_NET_ADMIN
))
2846 if (copy_from_user(®
, arg
,
2847 sizeof(struct he_ioctl_reg
)))
2850 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2852 case HE_REGTYPE_PCI
:
2853 reg
.val
= he_readl(he_dev
, reg
.addr
);
2855 case HE_REGTYPE_RCM
:
2857 he_readl_rcm(he_dev
, reg
.addr
);
2859 case HE_REGTYPE_TCM
:
2861 he_readl_tcm(he_dev
, reg
.addr
);
2863 case HE_REGTYPE_MBOX
:
2865 he_readl_mbox(he_dev
, reg
.addr
);
2871 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2873 if (copy_to_user(arg
, ®
,
2874 sizeof(struct he_ioctl_reg
)))
2878 #ifdef CONFIG_ATM_HE_USE_SUNI
2879 if (atm_dev
->phy
&& atm_dev
->phy
->ioctl
)
2880 err
= atm_dev
->phy
->ioctl(atm_dev
, cmd
, arg
);
2881 #else /* CONFIG_ATM_HE_USE_SUNI */
2883 #endif /* CONFIG_ATM_HE_USE_SUNI */
2891 he_phy_put(struct atm_dev
*atm_dev
, unsigned char val
, unsigned long addr
)
2893 unsigned long flags
;
2894 struct he_dev
*he_dev
= HE_DEV(atm_dev
);
2896 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val
, addr
);
2898 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2899 he_writel(he_dev
, val
, FRAMER
+ (addr
*4));
2900 (void) he_readl(he_dev
, FRAMER
+ (addr
*4)); /* flush posted writes */
2901 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2905 static unsigned char
2906 he_phy_get(struct atm_dev
*atm_dev
, unsigned long addr
)
2908 unsigned long flags
;
2909 struct he_dev
*he_dev
= HE_DEV(atm_dev
);
2912 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2913 reg
= he_readl(he_dev
, FRAMER
+ (addr
*4));
2914 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2916 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr
, reg
);
2921 he_proc_read(struct atm_dev
*dev
, loff_t
*pos
, char *page
)
2923 unsigned long flags
;
2924 struct he_dev
*he_dev
= HE_DEV(dev
);
2927 struct he_rbrq
*rbrq_tail
;
2928 struct he_tpdrq
*tpdrq_head
;
2929 int rbpl_head
, rbpl_tail
;
2931 static long mcc
= 0, oec
= 0, dcc
= 0, cec
= 0;
2936 return sprintf(page
, "%s\n", version
);
2939 return sprintf(page
, "%s%s\n\n",
2940 he_dev
->prod_id
, he_dev
->media
& 0x40 ? "SM" : "MM");
2943 return sprintf(page
, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2945 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2946 mcc
+= he_readl(he_dev
, MCC
);
2947 oec
+= he_readl(he_dev
, OEC
);
2948 dcc
+= he_readl(he_dev
, DCC
);
2949 cec
+= he_readl(he_dev
, CEC
);
2950 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2953 return sprintf(page
, "%16ld %16ld %13ld %17ld\n\n",
2954 mcc
, oec
, dcc
, cec
);
2957 return sprintf(page
, "irq_size = %d inuse = ? peak = %d\n",
2958 CONFIG_IRQ_SIZE
, he_dev
->irq_peak
);
2961 return sprintf(page
, "tpdrq_size = %d inuse = ?\n",
2965 return sprintf(page
, "rbrq_size = %d inuse = ? peak = %d\n",
2966 CONFIG_RBRQ_SIZE
, he_dev
->rbrq_peak
);
2969 return sprintf(page
, "tbrq_size = %d peak = %d\n",
2970 CONFIG_TBRQ_SIZE
, he_dev
->tbrq_peak
);
2974 rbpl_head
= RBPL_MASK(he_readl(he_dev
, G0_RBPL_S
));
2975 rbpl_tail
= RBPL_MASK(he_readl(he_dev
, G0_RBPL_T
));
2977 inuse
= rbpl_head
- rbpl_tail
;
2979 inuse
+= CONFIG_RBPL_SIZE
* sizeof(struct he_rbp
);
2980 inuse
/= sizeof(struct he_rbp
);
2983 return sprintf(page
, "rbpl_size = %d inuse = %d\n\n",
2984 CONFIG_RBPL_SIZE
, inuse
);
2988 return sprintf(page
, "rate controller periods (cbr)\n pcr #vc\n");
2990 for (i
= 0; i
< HE_NUM_CS_STPER
; ++i
)
2992 return sprintf(page
, "cs_stper%-2d %8ld %3d\n", i
,
2993 he_dev
->cs_stper
[i
].pcr
,
2994 he_dev
->cs_stper
[i
].inuse
);
2997 return sprintf(page
, "total bw (cbr): %d (limit %d)\n",
2998 he_dev
->total_bw
, he_dev
->atm_dev
->link_rate
* 10 / 9);
3003 /* eeprom routines -- see 4.7 */
3006 read_prom_byte(struct he_dev
*he_dev
, int addr
)
3008 u32 val
= 0, tmp_read
= 0;
3012 val
= readl(he_dev
->membase
+ HOST_CNTL
);
3015 /* Turn on write enable */
3017 he_writel(he_dev
, val
, HOST_CNTL
);
3019 /* Send READ instruction */
3020 for (i
= 0; i
< ARRAY_SIZE(readtab
); i
++) {
3021 he_writel(he_dev
, val
| readtab
[i
], HOST_CNTL
);
3022 udelay(EEPROM_DELAY
);
3025 /* Next, we need to send the byte address to read from */
3026 for (i
= 7; i
>= 0; i
--) {
3027 he_writel(he_dev
, val
| clocktab
[j
++] | (((addr
>> i
) & 1) << 9), HOST_CNTL
);
3028 udelay(EEPROM_DELAY
);
3029 he_writel(he_dev
, val
| clocktab
[j
++] | (((addr
>> i
) & 1) << 9), HOST_CNTL
);
3030 udelay(EEPROM_DELAY
);
3035 val
&= 0xFFFFF7FF; /* Turn off write enable */
3036 he_writel(he_dev
, val
, HOST_CNTL
);
3038 /* Now, we can read data from the EEPROM by clocking it in */
3039 for (i
= 7; i
>= 0; i
--) {
3040 he_writel(he_dev
, val
| clocktab
[j
++], HOST_CNTL
);
3041 udelay(EEPROM_DELAY
);
3042 tmp_read
= he_readl(he_dev
, HOST_CNTL
);
3043 byte_read
|= (unsigned char)
3044 ((tmp_read
& ID_DOUT
) >> ID_DOFFSET
<< i
);
3045 he_writel(he_dev
, val
| clocktab
[j
++], HOST_CNTL
);
3046 udelay(EEPROM_DELAY
);
3049 he_writel(he_dev
, val
| ID_CS
, HOST_CNTL
);
3050 udelay(EEPROM_DELAY
);
3055 MODULE_LICENSE("GPL");
3056 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3057 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3058 module_param(disable64
, bool, 0);
3059 MODULE_PARM_DESC(disable64
, "disable 64-bit pci bus transfers");
3060 module_param(nvpibits
, short, 0);
3061 MODULE_PARM_DESC(nvpibits
, "numbers of bits for vpi (default 0)");
3062 module_param(nvcibits
, short, 0);
3063 MODULE_PARM_DESC(nvcibits
, "numbers of bits for vci (default 12)");
3064 module_param(rx_skb_reserve
, short, 0);
3065 MODULE_PARM_DESC(rx_skb_reserve
, "padding for receive skb (default 16)");
3066 module_param(irq_coalesce
, bool, 0);
3067 MODULE_PARM_DESC(irq_coalesce
, "use interrupt coalescing (default 1)");
3068 module_param(sdh
, bool, 0);
3069 MODULE_PARM_DESC(sdh
, "use SDH framing (default 0)");
3071 static struct pci_device_id he_pci_tbl
[] = {
3072 { PCI_VENDOR_ID_FORE
, PCI_DEVICE_ID_FORE_HE
, PCI_ANY_ID
, PCI_ANY_ID
,
3077 MODULE_DEVICE_TABLE(pci
, he_pci_tbl
);
3079 static struct pci_driver he_driver
= {
3081 .probe
= he_init_one
,
3082 .remove
= __devexit_p(he_remove_one
),
3083 .id_table
= he_pci_tbl
,
3086 static int __init
he_init(void)
3088 return pci_register_driver(&he_driver
);
3091 static void __exit
he_cleanup(void)
3093 pci_unregister_driver(&he_driver
);
3096 module_init(he_init
);
3097 module_exit(he_cleanup
);