5 ForeRunnerHE ATM Adapter driver for ATM on Linux
6 Copyright (C) 1999-2001 Naval Research Laboratory
8 This library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Lesser General Public
10 License as published by the Free Software Foundation; either
11 version 2.1 of the License, or (at your option) any later version.
13 This library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public
19 License along with this library; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 ForeRunnerHE ATM Adapter driver for ATM on Linux
29 Copyright (C) 1999-2001 Naval Research Laboratory
31 Permission to use, copy, modify and distribute this software and its
32 documentation is hereby granted, provided that both the copyright
33 notice and this permission notice appear in all copies of the software,
34 derivative works or modified versions, and any portions thereof, and
35 that both notices appear in supporting documentation.
37 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39 RESULTING FROM THE USE OF THIS SOFTWARE.
41 This driver was written using the "Programmer's Reference Manual for
42 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
45 chas williams <chas@cmf.nrl.navy.mil>
46 eric kinzie <ekinzie@cmf.nrl.navy.mil>
49 4096 supported 'connections'
50 group 0 is used for all traffic
51 interrupt queue 0 is used for all interrupts
52 aal0 support (based on work from ulrich.u.muller@nokia.com)
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
71 #include <asm/byteorder.h>
72 #include <asm/uaccess.h>
74 #include <linux/atmdev.h>
75 #include <linux/atm.h>
76 #include <linux/sonet.h>
79 #undef USE_SCATTERGATHER
80 #undef USE_CHECKSUM_HW /* still confused about this */
82 #undef USE_RBPS_POOL /* if memory is tight try this */
83 #undef USE_RBPL_POOL /* if memory is tight try this */
85 /* #undef CONFIG_ATM_HE_USE_SUNI */
90 #include <linux/atm_he.h>
92 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
95 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
97 #define HPRINTK(fmt,args...) do { } while (0)
102 static int he_open(struct atm_vcc
*vcc
);
103 static void he_close(struct atm_vcc
*vcc
);
104 static int he_send(struct atm_vcc
*vcc
, struct sk_buff
*skb
);
105 static int he_ioctl(struct atm_dev
*dev
, unsigned int cmd
, void __user
*arg
);
106 static irqreturn_t
he_irq_handler(int irq
, void *dev_id
);
107 static void he_tasklet(unsigned long data
);
108 static int he_proc_read(struct atm_dev
*dev
,loff_t
*pos
,char *page
);
109 static int he_start(struct atm_dev
*dev
);
110 static void he_stop(struct he_dev
*dev
);
111 static void he_phy_put(struct atm_dev
*, unsigned char, unsigned long);
112 static unsigned char he_phy_get(struct atm_dev
*, unsigned long);
114 static u8
read_prom_byte(struct he_dev
*he_dev
, int addr
);
118 static struct he_dev
*he_devs
;
119 static int disable64
;
120 static short nvpibits
= -1;
121 static short nvcibits
= -1;
122 static short rx_skb_reserve
= 16;
123 static int irq_coalesce
= 1;
126 /* Read from EEPROM = 0000 0011b */
127 static unsigned int readtab
[] = {
142 CLK_HIGH
| SI_HIGH
, /* 1 */
144 CLK_HIGH
| SI_HIGH
/* 1 */
147 /* Clock to read from/write to the EEPROM */
148 static unsigned int clocktab
[] = {
168 static struct atmdev_ops he_ops
=
174 .phy_put
= he_phy_put
,
175 .phy_get
= he_phy_get
,
176 .proc_read
= he_proc_read
,
180 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
181 #define he_readl(dev, reg) readl((dev)->membase + (reg))
183 /* section 2.12 connection memory access */
185 static __inline__
void
186 he_writel_internal(struct he_dev
*he_dev
, unsigned val
, unsigned addr
,
189 he_writel(he_dev
, val
, CON_DAT
);
190 (void) he_readl(he_dev
, CON_DAT
); /* flush posted writes */
191 he_writel(he_dev
, flags
| CON_CTL_WRITE
| CON_CTL_ADDR(addr
), CON_CTL
);
192 while (he_readl(he_dev
, CON_CTL
) & CON_CTL_BUSY
);
195 #define he_writel_rcm(dev, val, reg) \
196 he_writel_internal(dev, val, reg, CON_CTL_RCM)
198 #define he_writel_tcm(dev, val, reg) \
199 he_writel_internal(dev, val, reg, CON_CTL_TCM)
201 #define he_writel_mbox(dev, val, reg) \
202 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
205 he_readl_internal(struct he_dev
*he_dev
, unsigned addr
, unsigned flags
)
207 he_writel(he_dev
, flags
| CON_CTL_READ
| CON_CTL_ADDR(addr
), CON_CTL
);
208 while (he_readl(he_dev
, CON_CTL
) & CON_CTL_BUSY
);
209 return he_readl(he_dev
, CON_DAT
);
212 #define he_readl_rcm(dev, reg) \
213 he_readl_internal(dev, reg, CON_CTL_RCM)
215 #define he_readl_tcm(dev, reg) \
216 he_readl_internal(dev, reg, CON_CTL_TCM)
218 #define he_readl_mbox(dev, reg) \
219 he_readl_internal(dev, reg, CON_CTL_MBOX)
222 /* figure 2.2 connection id */
224 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
226 /* 2.5.1 per connection transmit state registers */
228 #define he_writel_tsr0(dev, val, cid) \
229 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
230 #define he_readl_tsr0(dev, cid) \
231 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
233 #define he_writel_tsr1(dev, val, cid) \
234 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
236 #define he_writel_tsr2(dev, val, cid) \
237 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
239 #define he_writel_tsr3(dev, val, cid) \
240 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
242 #define he_writel_tsr4(dev, val, cid) \
243 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
247 * NOTE While the transmit connection is active, bits 23 through 0
248 * of this register must not be written by the host. Byte
249 * enables should be used during normal operation when writing
250 * the most significant byte.
253 #define he_writel_tsr4_upper(dev, val, cid) \
254 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
256 | CON_BYTE_DISABLE_2 \
257 | CON_BYTE_DISABLE_1 \
258 | CON_BYTE_DISABLE_0)
260 #define he_readl_tsr4(dev, cid) \
261 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
263 #define he_writel_tsr5(dev, val, cid) \
264 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
266 #define he_writel_tsr6(dev, val, cid) \
267 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
269 #define he_writel_tsr7(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
273 #define he_writel_tsr8(dev, val, cid) \
274 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
276 #define he_writel_tsr9(dev, val, cid) \
277 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
279 #define he_writel_tsr10(dev, val, cid) \
280 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
282 #define he_writel_tsr11(dev, val, cid) \
283 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
286 #define he_writel_tsr12(dev, val, cid) \
287 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
289 #define he_writel_tsr13(dev, val, cid) \
290 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
293 #define he_writel_tsr14(dev, val, cid) \
294 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
296 #define he_writel_tsr14_upper(dev, val, cid) \
297 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
299 | CON_BYTE_DISABLE_2 \
300 | CON_BYTE_DISABLE_1 \
301 | CON_BYTE_DISABLE_0)
303 /* 2.7.1 per connection receive state registers */
305 #define he_writel_rsr0(dev, val, cid) \
306 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
307 #define he_readl_rsr0(dev, cid) \
308 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
310 #define he_writel_rsr1(dev, val, cid) \
311 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
313 #define he_writel_rsr2(dev, val, cid) \
314 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
316 #define he_writel_rsr3(dev, val, cid) \
317 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
319 #define he_writel_rsr4(dev, val, cid) \
320 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
322 #define he_writel_rsr5(dev, val, cid) \
323 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
325 #define he_writel_rsr6(dev, val, cid) \
326 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
328 #define he_writel_rsr7(dev, val, cid) \
329 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
331 static __inline__
struct atm_vcc
*
332 __find_vcc(struct he_dev
*he_dev
, unsigned cid
)
334 struct hlist_head
*head
;
336 struct hlist_node
*node
;
341 vpi
= cid
>> he_dev
->vcibits
;
342 vci
= cid
& ((1 << he_dev
->vcibits
) - 1);
343 head
= &vcc_hash
[vci
& (VCC_HTABLE_SIZE
-1)];
345 sk_for_each(s
, node
, head
) {
347 if (vcc
->dev
== he_dev
->atm_dev
&&
348 vcc
->vci
== vci
&& vcc
->vpi
== vpi
&&
349 vcc
->qos
.rxtp
.traffic_class
!= ATM_NONE
) {
357 he_init_one(struct pci_dev
*pci_dev
, const struct pci_device_id
*pci_ent
)
359 struct atm_dev
*atm_dev
= NULL
;
360 struct he_dev
*he_dev
= NULL
;
363 printk(KERN_INFO
"ATM he driver\n");
365 if (pci_enable_device(pci_dev
))
367 if (pci_set_dma_mask(pci_dev
, DMA_32BIT_MASK
) != 0) {
368 printk(KERN_WARNING
"he: no suitable dma available\n");
370 goto init_one_failure
;
373 atm_dev
= atm_dev_register(DEV_LABEL
, &he_ops
, -1, NULL
);
376 goto init_one_failure
;
378 pci_set_drvdata(pci_dev
, atm_dev
);
380 he_dev
= kzalloc(sizeof(struct he_dev
),
384 goto init_one_failure
;
386 he_dev
->pci_dev
= pci_dev
;
387 he_dev
->atm_dev
= atm_dev
;
388 he_dev
->atm_dev
->dev_data
= he_dev
;
389 atm_dev
->dev_data
= he_dev
;
390 he_dev
->number
= atm_dev
->number
;
392 tasklet_init(&he_dev
->tasklet
, he_tasklet
, (unsigned long) he_dev
);
394 spin_lock_init(&he_dev
->global_lock
);
396 if (he_start(atm_dev
)) {
399 goto init_one_failure
;
403 he_dev
->next
= he_devs
;
409 atm_dev_deregister(atm_dev
);
411 pci_disable_device(pci_dev
);
415 static void __devexit
416 he_remove_one (struct pci_dev
*pci_dev
)
418 struct atm_dev
*atm_dev
;
419 struct he_dev
*he_dev
;
421 atm_dev
= pci_get_drvdata(pci_dev
);
422 he_dev
= HE_DEV(atm_dev
);
424 /* need to remove from he_devs */
427 atm_dev_deregister(atm_dev
);
430 pci_set_drvdata(pci_dev
, NULL
);
431 pci_disable_device(pci_dev
);
436 rate_to_atmf(unsigned rate
) /* cps to atm forum format */
438 #define NONZERO (1 << 14)
446 while (rate
> 0x3ff) {
451 return (NONZERO
| (exp
<< 9) | (rate
& 0x1ff));
454 static void __devinit
455 he_init_rx_lbfp0(struct he_dev
*he_dev
)
457 unsigned i
, lbm_offset
, lbufd_index
, lbuf_addr
, lbuf_count
;
458 unsigned lbufs_per_row
= he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
459 unsigned lbuf_bufsize
= he_dev
->cells_per_lbuf
* ATM_CELL_PAYLOAD
;
460 unsigned row_offset
= he_dev
->r0_startrow
* he_dev
->bytes_per_row
;
463 lbm_offset
= he_readl(he_dev
, RCMLBM_BA
);
465 he_writel(he_dev
, lbufd_index
, RLBF0_H
);
467 for (i
= 0, lbuf_count
= 0; i
< he_dev
->r0_numbuffs
; ++i
) {
469 lbuf_addr
= (row_offset
+ (lbuf_count
* lbuf_bufsize
)) / 32;
471 he_writel_rcm(he_dev
, lbuf_addr
, lbm_offset
);
472 he_writel_rcm(he_dev
, lbufd_index
, lbm_offset
+ 1);
474 if (++lbuf_count
== lbufs_per_row
) {
476 row_offset
+= he_dev
->bytes_per_row
;
481 he_writel(he_dev
, lbufd_index
- 2, RLBF0_T
);
482 he_writel(he_dev
, he_dev
->r0_numbuffs
, RLBF0_C
);
485 static void __devinit
486 he_init_rx_lbfp1(struct he_dev
*he_dev
)
488 unsigned i
, lbm_offset
, lbufd_index
, lbuf_addr
, lbuf_count
;
489 unsigned lbufs_per_row
= he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
490 unsigned lbuf_bufsize
= he_dev
->cells_per_lbuf
* ATM_CELL_PAYLOAD
;
491 unsigned row_offset
= he_dev
->r1_startrow
* he_dev
->bytes_per_row
;
494 lbm_offset
= he_readl(he_dev
, RCMLBM_BA
) + (2 * lbufd_index
);
496 he_writel(he_dev
, lbufd_index
, RLBF1_H
);
498 for (i
= 0, lbuf_count
= 0; i
< he_dev
->r1_numbuffs
; ++i
) {
500 lbuf_addr
= (row_offset
+ (lbuf_count
* lbuf_bufsize
)) / 32;
502 he_writel_rcm(he_dev
, lbuf_addr
, lbm_offset
);
503 he_writel_rcm(he_dev
, lbufd_index
, lbm_offset
+ 1);
505 if (++lbuf_count
== lbufs_per_row
) {
507 row_offset
+= he_dev
->bytes_per_row
;
512 he_writel(he_dev
, lbufd_index
- 2, RLBF1_T
);
513 he_writel(he_dev
, he_dev
->r1_numbuffs
, RLBF1_C
);
516 static void __devinit
517 he_init_tx_lbfp(struct he_dev
*he_dev
)
519 unsigned i
, lbm_offset
, lbufd_index
, lbuf_addr
, lbuf_count
;
520 unsigned lbufs_per_row
= he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
521 unsigned lbuf_bufsize
= he_dev
->cells_per_lbuf
* ATM_CELL_PAYLOAD
;
522 unsigned row_offset
= he_dev
->tx_startrow
* he_dev
->bytes_per_row
;
524 lbufd_index
= he_dev
->r0_numbuffs
+ he_dev
->r1_numbuffs
;
525 lbm_offset
= he_readl(he_dev
, RCMLBM_BA
) + (2 * lbufd_index
);
527 he_writel(he_dev
, lbufd_index
, TLBF_H
);
529 for (i
= 0, lbuf_count
= 0; i
< he_dev
->tx_numbuffs
; ++i
) {
531 lbuf_addr
= (row_offset
+ (lbuf_count
* lbuf_bufsize
)) / 32;
533 he_writel_rcm(he_dev
, lbuf_addr
, lbm_offset
);
534 he_writel_rcm(he_dev
, lbufd_index
, lbm_offset
+ 1);
536 if (++lbuf_count
== lbufs_per_row
) {
538 row_offset
+= he_dev
->bytes_per_row
;
543 he_writel(he_dev
, lbufd_index
- 1, TLBF_T
);
547 he_init_tpdrq(struct he_dev
*he_dev
)
549 he_dev
->tpdrq_base
= pci_alloc_consistent(he_dev
->pci_dev
,
550 CONFIG_TPDRQ_SIZE
* sizeof(struct he_tpdrq
), &he_dev
->tpdrq_phys
);
551 if (he_dev
->tpdrq_base
== NULL
) {
552 hprintk("failed to alloc tpdrq\n");
555 memset(he_dev
->tpdrq_base
, 0,
556 CONFIG_TPDRQ_SIZE
* sizeof(struct he_tpdrq
));
558 he_dev
->tpdrq_tail
= he_dev
->tpdrq_base
;
559 he_dev
->tpdrq_head
= he_dev
->tpdrq_base
;
561 he_writel(he_dev
, he_dev
->tpdrq_phys
, TPDRQ_B_H
);
562 he_writel(he_dev
, 0, TPDRQ_T
);
563 he_writel(he_dev
, CONFIG_TPDRQ_SIZE
- 1, TPDRQ_S
);
568 static void __devinit
569 he_init_cs_block(struct he_dev
*he_dev
)
571 unsigned clock
, rate
, delta
;
574 /* 5.1.7 cs block initialization */
576 for (reg
= 0; reg
< 0x20; ++reg
)
577 he_writel_mbox(he_dev
, 0x0, CS_STTIM0
+ reg
);
579 /* rate grid timer reload values */
581 clock
= he_is622(he_dev
) ? 66667000 : 50000000;
582 rate
= he_dev
->atm_dev
->link_rate
;
583 delta
= rate
/ 16 / 2;
585 for (reg
= 0; reg
< 0x10; ++reg
) {
586 /* 2.4 internal transmit function
588 * we initialize the first row in the rate grid.
589 * values are period (in clock cycles) of timer
591 unsigned period
= clock
/ rate
;
593 he_writel_mbox(he_dev
, period
, CS_TGRLD0
+ reg
);
597 if (he_is622(he_dev
)) {
598 /* table 5.2 (4 cells per lbuf) */
599 he_writel_mbox(he_dev
, 0x000800fa, CS_ERTHR0
);
600 he_writel_mbox(he_dev
, 0x000c33cb, CS_ERTHR1
);
601 he_writel_mbox(he_dev
, 0x0010101b, CS_ERTHR2
);
602 he_writel_mbox(he_dev
, 0x00181dac, CS_ERTHR3
);
603 he_writel_mbox(he_dev
, 0x00280600, CS_ERTHR4
);
605 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
606 he_writel_mbox(he_dev
, 0x023de8b3, CS_ERCTL0
);
607 he_writel_mbox(he_dev
, 0x1801, CS_ERCTL1
);
608 he_writel_mbox(he_dev
, 0x68b3, CS_ERCTL2
);
609 he_writel_mbox(he_dev
, 0x1280, CS_ERSTAT0
);
610 he_writel_mbox(he_dev
, 0x68b3, CS_ERSTAT1
);
611 he_writel_mbox(he_dev
, 0x14585, CS_RTFWR
);
613 he_writel_mbox(he_dev
, 0x4680, CS_RTATR
);
616 he_writel_mbox(he_dev
, 0x00159ece, CS_TFBSET
);
617 he_writel_mbox(he_dev
, 0x68b3, CS_WCRMAX
);
618 he_writel_mbox(he_dev
, 0x5eb3, CS_WCRMIN
);
619 he_writel_mbox(he_dev
, 0xe8b3, CS_WCRINC
);
620 he_writel_mbox(he_dev
, 0xdeb3, CS_WCRDEC
);
621 he_writel_mbox(he_dev
, 0x68b3, CS_WCRCEIL
);
624 he_writel_mbox(he_dev
, 0x5, CS_OTPPER
);
625 he_writel_mbox(he_dev
, 0x14, CS_OTWPER
);
627 /* table 5.1 (4 cells per lbuf) */
628 he_writel_mbox(he_dev
, 0x000400ea, CS_ERTHR0
);
629 he_writel_mbox(he_dev
, 0x00063388, CS_ERTHR1
);
630 he_writel_mbox(he_dev
, 0x00081018, CS_ERTHR2
);
631 he_writel_mbox(he_dev
, 0x000c1dac, CS_ERTHR3
);
632 he_writel_mbox(he_dev
, 0x0014051a, CS_ERTHR4
);
634 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
635 he_writel_mbox(he_dev
, 0x0235e4b1, CS_ERCTL0
);
636 he_writel_mbox(he_dev
, 0x4701, CS_ERCTL1
);
637 he_writel_mbox(he_dev
, 0x64b1, CS_ERCTL2
);
638 he_writel_mbox(he_dev
, 0x1280, CS_ERSTAT0
);
639 he_writel_mbox(he_dev
, 0x64b1, CS_ERSTAT1
);
640 he_writel_mbox(he_dev
, 0xf424, CS_RTFWR
);
642 he_writel_mbox(he_dev
, 0x4680, CS_RTATR
);
645 he_writel_mbox(he_dev
, 0x000563b7, CS_TFBSET
);
646 he_writel_mbox(he_dev
, 0x64b1, CS_WCRMAX
);
647 he_writel_mbox(he_dev
, 0x5ab1, CS_WCRMIN
);
648 he_writel_mbox(he_dev
, 0xe4b1, CS_WCRINC
);
649 he_writel_mbox(he_dev
, 0xdab1, CS_WCRDEC
);
650 he_writel_mbox(he_dev
, 0x64b1, CS_WCRCEIL
);
653 he_writel_mbox(he_dev
, 0x6, CS_OTPPER
);
654 he_writel_mbox(he_dev
, 0x1e, CS_OTWPER
);
657 he_writel_mbox(he_dev
, 0x8, CS_OTTLIM
);
659 for (reg
= 0; reg
< 0x8; ++reg
)
660 he_writel_mbox(he_dev
, 0x0, CS_HGRRT0
+ reg
);
665 he_init_cs_block_rcm(struct he_dev
*he_dev
)
667 unsigned (*rategrid
)[16][16];
668 unsigned rate
, delta
;
671 unsigned rate_atmf
, exp
, man
;
672 unsigned long long rate_cps
;
673 int mult
, buf
, buf_limit
= 4;
675 rategrid
= kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL
);
679 /* initialize rate grid group table */
681 for (reg
= 0x0; reg
< 0xff; ++reg
)
682 he_writel_rcm(he_dev
, 0x0, CONFIG_RCMABR
+ reg
);
684 /* initialize rate controller groups */
686 for (reg
= 0x100; reg
< 0x1ff; ++reg
)
687 he_writel_rcm(he_dev
, 0x0, CONFIG_RCMABR
+ reg
);
689 /* initialize tNrm lookup table */
691 /* the manual makes reference to a routine in a sample driver
692 for proper configuration; fortunately, we only need this
693 in order to support abr connection */
695 /* initialize rate to group table */
697 rate
= he_dev
->atm_dev
->link_rate
;
701 * 2.4 transmit internal functions
703 * we construct a copy of the rate grid used by the scheduler
704 * in order to construct the rate to group table below
707 for (j
= 0; j
< 16; j
++) {
708 (*rategrid
)[0][j
] = rate
;
712 for (i
= 1; i
< 16; i
++)
713 for (j
= 0; j
< 16; j
++)
715 (*rategrid
)[i
][j
] = (*rategrid
)[i
- 1][j
] / 4;
717 (*rategrid
)[i
][j
] = (*rategrid
)[i
- 1][j
] / 2;
720 * 2.4 transmit internal function
722 * this table maps the upper 5 bits of exponent and mantissa
723 * of the atm forum representation of the rate into an index
728 while (rate_atmf
< 0x400) {
729 man
= (rate_atmf
& 0x1f) << 4;
730 exp
= rate_atmf
>> 5;
733 instead of '/ 512', use '>> 9' to prevent a call
734 to divdu3 on x86 platforms
736 rate_cps
= (unsigned long long) (1 << exp
) * (man
+ 512) >> 9;
739 rate_cps
= 10; /* 2.2.1 minimum payload rate is 10 cps */
741 for (i
= 255; i
> 0; i
--)
742 if ((*rategrid
)[i
/16][i
%16] >= rate_cps
)
743 break; /* pick nearest rate instead? */
746 * each table entry is 16 bits: (rate grid index (8 bits)
747 * and a buffer limit (8 bits)
748 * there are two table entries in each 32-bit register
752 buf
= rate_cps
* he_dev
->tx_numbuffs
/
753 (he_dev
->atm_dev
->link_rate
* 2);
755 /* this is pretty, but avoids _divdu3 and is mostly correct */
756 mult
= he_dev
->atm_dev
->link_rate
/ ATM_OC3_PCR
;
757 if (rate_cps
> (272 * mult
))
759 else if (rate_cps
> (204 * mult
))
761 else if (rate_cps
> (136 * mult
))
763 else if (rate_cps
> (68 * mult
))
770 reg
= (reg
<< 16) | ((i
<< 8) | buf
);
772 #define RTGTBL_OFFSET 0x400
775 he_writel_rcm(he_dev
, reg
,
776 CONFIG_RCMABR
+ RTGTBL_OFFSET
+ (rate_atmf
>> 1));
786 he_init_group(struct he_dev
*he_dev
, int group
)
791 /* small buffer pool */
793 he_dev
->rbps_pool
= pci_pool_create("rbps", he_dev
->pci_dev
,
794 CONFIG_RBPS_BUFSIZE
, 8, 0);
795 if (he_dev
->rbps_pool
== NULL
) {
796 hprintk("unable to create rbps pages\n");
799 #else /* !USE_RBPS_POOL */
800 he_dev
->rbps_pages
= pci_alloc_consistent(he_dev
->pci_dev
,
801 CONFIG_RBPS_SIZE
* CONFIG_RBPS_BUFSIZE
, &he_dev
->rbps_pages_phys
);
802 if (he_dev
->rbps_pages
== NULL
) {
803 hprintk("unable to create rbps page pool\n");
806 #endif /* USE_RBPS_POOL */
808 he_dev
->rbps_base
= pci_alloc_consistent(he_dev
->pci_dev
,
809 CONFIG_RBPS_SIZE
* sizeof(struct he_rbp
), &he_dev
->rbps_phys
);
810 if (he_dev
->rbps_base
== NULL
) {
811 hprintk("failed to alloc rbps\n");
814 memset(he_dev
->rbps_base
, 0, CONFIG_RBPS_SIZE
* sizeof(struct he_rbp
));
815 he_dev
->rbps_virt
= kmalloc(CONFIG_RBPS_SIZE
* sizeof(struct he_virt
), GFP_KERNEL
);
817 for (i
= 0; i
< CONFIG_RBPS_SIZE
; ++i
) {
818 dma_addr_t dma_handle
;
822 cpuaddr
= pci_pool_alloc(he_dev
->rbps_pool
, GFP_KERNEL
|GFP_DMA
, &dma_handle
);
826 cpuaddr
= he_dev
->rbps_pages
+ (i
* CONFIG_RBPS_BUFSIZE
);
827 dma_handle
= he_dev
->rbps_pages_phys
+ (i
* CONFIG_RBPS_BUFSIZE
);
830 he_dev
->rbps_virt
[i
].virt
= cpuaddr
;
831 he_dev
->rbps_base
[i
].status
= RBP_LOANED
| RBP_SMALLBUF
| (i
<< RBP_INDEX_OFF
);
832 he_dev
->rbps_base
[i
].phys
= dma_handle
;
835 he_dev
->rbps_tail
= &he_dev
->rbps_base
[CONFIG_RBPS_SIZE
- 1];
837 he_writel(he_dev
, he_dev
->rbps_phys
, G0_RBPS_S
+ (group
* 32));
838 he_writel(he_dev
, RBPS_MASK(he_dev
->rbps_tail
),
839 G0_RBPS_T
+ (group
* 32));
840 he_writel(he_dev
, CONFIG_RBPS_BUFSIZE
/4,
841 G0_RBPS_BS
+ (group
* 32));
843 RBP_THRESH(CONFIG_RBPS_THRESH
) |
844 RBP_QSIZE(CONFIG_RBPS_SIZE
- 1) |
846 G0_RBPS_QI
+ (group
* 32));
847 #else /* !USE_RBPS */
848 he_writel(he_dev
, 0x0, G0_RBPS_S
+ (group
* 32));
849 he_writel(he_dev
, 0x0, G0_RBPS_T
+ (group
* 32));
850 he_writel(he_dev
, 0x0, G0_RBPS_QI
+ (group
* 32));
851 he_writel(he_dev
, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
852 G0_RBPS_BS
+ (group
* 32));
853 #endif /* USE_RBPS */
855 /* large buffer pool */
857 he_dev
->rbpl_pool
= pci_pool_create("rbpl", he_dev
->pci_dev
,
858 CONFIG_RBPL_BUFSIZE
, 8, 0);
859 if (he_dev
->rbpl_pool
== NULL
) {
860 hprintk("unable to create rbpl pool\n");
863 #else /* !USE_RBPL_POOL */
864 he_dev
->rbpl_pages
= (void *) pci_alloc_consistent(he_dev
->pci_dev
,
865 CONFIG_RBPL_SIZE
* CONFIG_RBPL_BUFSIZE
, &he_dev
->rbpl_pages_phys
);
866 if (he_dev
->rbpl_pages
== NULL
) {
867 hprintk("unable to create rbpl pages\n");
870 #endif /* USE_RBPL_POOL */
872 he_dev
->rbpl_base
= pci_alloc_consistent(he_dev
->pci_dev
,
873 CONFIG_RBPL_SIZE
* sizeof(struct he_rbp
), &he_dev
->rbpl_phys
);
874 if (he_dev
->rbpl_base
== NULL
) {
875 hprintk("failed to alloc rbpl\n");
878 memset(he_dev
->rbpl_base
, 0, CONFIG_RBPL_SIZE
* sizeof(struct he_rbp
));
879 he_dev
->rbpl_virt
= kmalloc(CONFIG_RBPL_SIZE
* sizeof(struct he_virt
), GFP_KERNEL
);
881 for (i
= 0; i
< CONFIG_RBPL_SIZE
; ++i
) {
882 dma_addr_t dma_handle
;
886 cpuaddr
= pci_pool_alloc(he_dev
->rbpl_pool
, GFP_KERNEL
|GFP_DMA
, &dma_handle
);
890 cpuaddr
= he_dev
->rbpl_pages
+ (i
* CONFIG_RBPL_BUFSIZE
);
891 dma_handle
= he_dev
->rbpl_pages_phys
+ (i
* CONFIG_RBPL_BUFSIZE
);
894 he_dev
->rbpl_virt
[i
].virt
= cpuaddr
;
895 he_dev
->rbpl_base
[i
].status
= RBP_LOANED
| (i
<< RBP_INDEX_OFF
);
896 he_dev
->rbpl_base
[i
].phys
= dma_handle
;
898 he_dev
->rbpl_tail
= &he_dev
->rbpl_base
[CONFIG_RBPL_SIZE
- 1];
900 he_writel(he_dev
, he_dev
->rbpl_phys
, G0_RBPL_S
+ (group
* 32));
901 he_writel(he_dev
, RBPL_MASK(he_dev
->rbpl_tail
),
902 G0_RBPL_T
+ (group
* 32));
903 he_writel(he_dev
, CONFIG_RBPL_BUFSIZE
/4,
904 G0_RBPL_BS
+ (group
* 32));
906 RBP_THRESH(CONFIG_RBPL_THRESH
) |
907 RBP_QSIZE(CONFIG_RBPL_SIZE
- 1) |
909 G0_RBPL_QI
+ (group
* 32));
911 /* rx buffer ready queue */
913 he_dev
->rbrq_base
= pci_alloc_consistent(he_dev
->pci_dev
,
914 CONFIG_RBRQ_SIZE
* sizeof(struct he_rbrq
), &he_dev
->rbrq_phys
);
915 if (he_dev
->rbrq_base
== NULL
) {
916 hprintk("failed to allocate rbrq\n");
919 memset(he_dev
->rbrq_base
, 0, CONFIG_RBRQ_SIZE
* sizeof(struct he_rbrq
));
921 he_dev
->rbrq_head
= he_dev
->rbrq_base
;
922 he_writel(he_dev
, he_dev
->rbrq_phys
, G0_RBRQ_ST
+ (group
* 16));
923 he_writel(he_dev
, 0, G0_RBRQ_H
+ (group
* 16));
925 RBRQ_THRESH(CONFIG_RBRQ_THRESH
) | RBRQ_SIZE(CONFIG_RBRQ_SIZE
- 1),
926 G0_RBRQ_Q
+ (group
* 16));
928 hprintk("coalescing interrupts\n");
929 he_writel(he_dev
, RBRQ_TIME(768) | RBRQ_COUNT(7),
930 G0_RBRQ_I
+ (group
* 16));
932 he_writel(he_dev
, RBRQ_TIME(0) | RBRQ_COUNT(1),
933 G0_RBRQ_I
+ (group
* 16));
935 /* tx buffer ready queue */
937 he_dev
->tbrq_base
= pci_alloc_consistent(he_dev
->pci_dev
,
938 CONFIG_TBRQ_SIZE
* sizeof(struct he_tbrq
), &he_dev
->tbrq_phys
);
939 if (he_dev
->tbrq_base
== NULL
) {
940 hprintk("failed to allocate tbrq\n");
943 memset(he_dev
->tbrq_base
, 0, CONFIG_TBRQ_SIZE
* sizeof(struct he_tbrq
));
945 he_dev
->tbrq_head
= he_dev
->tbrq_base
;
947 he_writel(he_dev
, he_dev
->tbrq_phys
, G0_TBRQ_B_T
+ (group
* 16));
948 he_writel(he_dev
, 0, G0_TBRQ_H
+ (group
* 16));
949 he_writel(he_dev
, CONFIG_TBRQ_SIZE
- 1, G0_TBRQ_S
+ (group
* 16));
950 he_writel(he_dev
, CONFIG_TBRQ_THRESH
, G0_TBRQ_THRESH
+ (group
* 16));
956 he_init_irq(struct he_dev
*he_dev
)
960 /* 2.9.3.5 tail offset for each interrupt queue is located after the
961 end of the interrupt queue */
963 he_dev
->irq_base
= pci_alloc_consistent(he_dev
->pci_dev
,
964 (CONFIG_IRQ_SIZE
+1) * sizeof(struct he_irq
), &he_dev
->irq_phys
);
965 if (he_dev
->irq_base
== NULL
) {
966 hprintk("failed to allocate irq\n");
969 he_dev
->irq_tailoffset
= (unsigned *)
970 &he_dev
->irq_base
[CONFIG_IRQ_SIZE
];
971 *he_dev
->irq_tailoffset
= 0;
972 he_dev
->irq_head
= he_dev
->irq_base
;
973 he_dev
->irq_tail
= he_dev
->irq_base
;
975 for (i
= 0; i
< CONFIG_IRQ_SIZE
; ++i
)
976 he_dev
->irq_base
[i
].isw
= ITYPE_INVALID
;
978 he_writel(he_dev
, he_dev
->irq_phys
, IRQ0_BASE
);
980 IRQ_SIZE(CONFIG_IRQ_SIZE
) | IRQ_THRESH(CONFIG_IRQ_THRESH
),
982 he_writel(he_dev
, IRQ_INT_A
| IRQ_TYPE_LINE
, IRQ0_CNTL
);
983 he_writel(he_dev
, 0x0, IRQ0_DATA
);
985 he_writel(he_dev
, 0x0, IRQ1_BASE
);
986 he_writel(he_dev
, 0x0, IRQ1_HEAD
);
987 he_writel(he_dev
, 0x0, IRQ1_CNTL
);
988 he_writel(he_dev
, 0x0, IRQ1_DATA
);
990 he_writel(he_dev
, 0x0, IRQ2_BASE
);
991 he_writel(he_dev
, 0x0, IRQ2_HEAD
);
992 he_writel(he_dev
, 0x0, IRQ2_CNTL
);
993 he_writel(he_dev
, 0x0, IRQ2_DATA
);
995 he_writel(he_dev
, 0x0, IRQ3_BASE
);
996 he_writel(he_dev
, 0x0, IRQ3_HEAD
);
997 he_writel(he_dev
, 0x0, IRQ3_CNTL
);
998 he_writel(he_dev
, 0x0, IRQ3_DATA
);
1000 /* 2.9.3.2 interrupt queue mapping registers */
1002 he_writel(he_dev
, 0x0, GRP_10_MAP
);
1003 he_writel(he_dev
, 0x0, GRP_32_MAP
);
1004 he_writel(he_dev
, 0x0, GRP_54_MAP
);
1005 he_writel(he_dev
, 0x0, GRP_76_MAP
);
1007 if (request_irq(he_dev
->pci_dev
->irq
, he_irq_handler
, IRQF_DISABLED
|IRQF_SHARED
, DEV_LABEL
, he_dev
)) {
1008 hprintk("irq %d already in use\n", he_dev
->pci_dev
->irq
);
1012 he_dev
->irq
= he_dev
->pci_dev
->irq
;
1017 static int __devinit
1018 he_start(struct atm_dev
*dev
)
1020 struct he_dev
*he_dev
;
1021 struct pci_dev
*pci_dev
;
1022 unsigned long membase
;
1025 u32 gen_cntl_0
, host_cntl
, lb_swap
;
1026 u8 cache_size
, timer
;
1029 unsigned int status
, reg
;
1032 he_dev
= HE_DEV(dev
);
1033 pci_dev
= he_dev
->pci_dev
;
1035 membase
= pci_resource_start(pci_dev
, 0);
1036 HPRINTK("membase = 0x%lx irq = %d.\n", membase
, pci_dev
->irq
);
1039 * pci bus controller initialization
1042 /* 4.3 pci bus controller-specific initialization */
1043 if (pci_read_config_dword(pci_dev
, GEN_CNTL_0
, &gen_cntl_0
) != 0) {
1044 hprintk("can't read GEN_CNTL_0\n");
1047 gen_cntl_0
|= (MRL_ENB
| MRM_ENB
| IGNORE_TIMEOUT
);
1048 if (pci_write_config_dword(pci_dev
, GEN_CNTL_0
, gen_cntl_0
) != 0) {
1049 hprintk("can't write GEN_CNTL_0.\n");
1053 if (pci_read_config_word(pci_dev
, PCI_COMMAND
, &command
) != 0) {
1054 hprintk("can't read PCI_COMMAND.\n");
1058 command
|= (PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
| PCI_COMMAND_INVALIDATE
);
1059 if (pci_write_config_word(pci_dev
, PCI_COMMAND
, command
) != 0) {
1060 hprintk("can't enable memory.\n");
1064 if (pci_read_config_byte(pci_dev
, PCI_CACHE_LINE_SIZE
, &cache_size
)) {
1065 hprintk("can't read cache line size?\n");
1069 if (cache_size
< 16) {
1071 if (pci_write_config_byte(pci_dev
, PCI_CACHE_LINE_SIZE
, cache_size
))
1072 hprintk("can't set cache line size to %d\n", cache_size
);
1075 if (pci_read_config_byte(pci_dev
, PCI_LATENCY_TIMER
, &timer
)) {
1076 hprintk("can't read latency timer?\n");
1082 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1084 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1085 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1088 #define LAT_TIMER 209
1089 if (timer
< LAT_TIMER
) {
1090 HPRINTK("latency timer was %d, setting to %d\n", timer
, LAT_TIMER
);
1092 if (pci_write_config_byte(pci_dev
, PCI_LATENCY_TIMER
, timer
))
1093 hprintk("can't set latency timer to %d\n", timer
);
1096 if (!(he_dev
->membase
= ioremap(membase
, HE_REGMAP_SIZE
))) {
1097 hprintk("can't set up page mapping\n");
1101 /* 4.4 card reset */
1102 he_writel(he_dev
, 0x0, RESET_CNTL
);
1103 he_writel(he_dev
, 0xff, RESET_CNTL
);
1105 udelay(16*1000); /* 16 ms */
1106 status
= he_readl(he_dev
, RESET_CNTL
);
1107 if ((status
& BOARD_RST_STATUS
) == 0) {
1108 hprintk("reset failed\n");
1112 /* 4.5 set bus width */
1113 host_cntl
= he_readl(he_dev
, HOST_CNTL
);
1114 if (host_cntl
& PCI_BUS_SIZE64
)
1115 gen_cntl_0
|= ENBL_64
;
1117 gen_cntl_0
&= ~ENBL_64
;
1119 if (disable64
== 1) {
1120 hprintk("disabling 64-bit pci bus transfers\n");
1121 gen_cntl_0
&= ~ENBL_64
;
1124 if (gen_cntl_0
& ENBL_64
)
1125 hprintk("64-bit transfers enabled\n");
1127 pci_write_config_dword(pci_dev
, GEN_CNTL_0
, gen_cntl_0
);
1129 /* 4.7 read prom contents */
1130 for (i
= 0; i
< PROD_ID_LEN
; ++i
)
1131 he_dev
->prod_id
[i
] = read_prom_byte(he_dev
, PROD_ID
+ i
);
1133 he_dev
->media
= read_prom_byte(he_dev
, MEDIA
);
1135 for (i
= 0; i
< 6; ++i
)
1136 dev
->esi
[i
] = read_prom_byte(he_dev
, MAC_ADDR
+ i
);
1138 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1140 he_dev
->media
& 0x40 ? "SM" : "MM",
1147 he_dev
->atm_dev
->link_rate
= he_is622(he_dev
) ?
1148 ATM_OC12_PCR
: ATM_OC3_PCR
;
1150 /* 4.6 set host endianess */
1151 lb_swap
= he_readl(he_dev
, LB_SWAP
);
1152 if (he_is622(he_dev
))
1153 lb_swap
&= ~XFER_SIZE
; /* 4 cells */
1155 lb_swap
|= XFER_SIZE
; /* 8 cells */
1157 lb_swap
|= DESC_WR_SWAP
| INTR_SWAP
| BIG_ENDIAN_HOST
;
1159 lb_swap
&= ~(DESC_WR_SWAP
| INTR_SWAP
| BIG_ENDIAN_HOST
|
1160 DATA_WR_SWAP
| DATA_RD_SWAP
| DESC_RD_SWAP
);
1161 #endif /* __BIG_ENDIAN */
1162 he_writel(he_dev
, lb_swap
, LB_SWAP
);
1164 /* 4.8 sdram controller initialization */
1165 he_writel(he_dev
, he_is622(he_dev
) ? LB_64_ENB
: 0x0, SDRAM_CTL
);
1167 /* 4.9 initialize rnum value */
1168 lb_swap
|= SWAP_RNUM_MAX(0xf);
1169 he_writel(he_dev
, lb_swap
, LB_SWAP
);
1171 /* 4.10 initialize the interrupt queues */
1172 if ((err
= he_init_irq(he_dev
)) != 0)
1175 /* 4.11 enable pci bus controller state machines */
1176 host_cntl
|= (OUTFF_ENB
| CMDFF_ENB
|
1177 QUICK_RD_RETRY
| QUICK_WR_RETRY
| PERR_INT_ENB
);
1178 he_writel(he_dev
, host_cntl
, HOST_CNTL
);
1180 gen_cntl_0
|= INT_PROC_ENBL
|INIT_ENB
;
1181 pci_write_config_dword(pci_dev
, GEN_CNTL_0
, gen_cntl_0
);
1184 * atm network controller initialization
1187 /* 5.1.1 generic configuration state */
1190 * local (cell) buffer memory map
1194 * 0 ____________1023 bytes 0 _______________________2047 bytes
1196 * | utility | | rx0 | |
1197 * 5|____________| 255|___________________| u |
1200 * | rx0 | row | tx | l |
1202 * | | 767|___________________| t |
1203 * 517|____________| 768| | y |
1204 * row 518| | | rx1 | |
1205 * | | 1023|___________________|___|
1210 * 1535|____________|
1213 * 2047|____________|
1217 /* total 4096 connections */
1218 he_dev
->vcibits
= CONFIG_DEFAULT_VCIBITS
;
1219 he_dev
->vpibits
= CONFIG_DEFAULT_VPIBITS
;
1221 if (nvpibits
!= -1 && nvcibits
!= -1 && nvpibits
+nvcibits
!= HE_MAXCIDBITS
) {
1222 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS
);
1226 if (nvpibits
!= -1) {
1227 he_dev
->vpibits
= nvpibits
;
1228 he_dev
->vcibits
= HE_MAXCIDBITS
- nvpibits
;
1231 if (nvcibits
!= -1) {
1232 he_dev
->vcibits
= nvcibits
;
1233 he_dev
->vpibits
= HE_MAXCIDBITS
- nvcibits
;
1237 if (he_is622(he_dev
)) {
1238 he_dev
->cells_per_row
= 40;
1239 he_dev
->bytes_per_row
= 2048;
1240 he_dev
->r0_numrows
= 256;
1241 he_dev
->tx_numrows
= 512;
1242 he_dev
->r1_numrows
= 256;
1243 he_dev
->r0_startrow
= 0;
1244 he_dev
->tx_startrow
= 256;
1245 he_dev
->r1_startrow
= 768;
1247 he_dev
->cells_per_row
= 20;
1248 he_dev
->bytes_per_row
= 1024;
1249 he_dev
->r0_numrows
= 512;
1250 he_dev
->tx_numrows
= 1018;
1251 he_dev
->r1_numrows
= 512;
1252 he_dev
->r0_startrow
= 6;
1253 he_dev
->tx_startrow
= 518;
1254 he_dev
->r1_startrow
= 1536;
1257 he_dev
->cells_per_lbuf
= 4;
1258 he_dev
->buffer_limit
= 4;
1259 he_dev
->r0_numbuffs
= he_dev
->r0_numrows
*
1260 he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
1261 if (he_dev
->r0_numbuffs
> 2560)
1262 he_dev
->r0_numbuffs
= 2560;
1264 he_dev
->r1_numbuffs
= he_dev
->r1_numrows
*
1265 he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
1266 if (he_dev
->r1_numbuffs
> 2560)
1267 he_dev
->r1_numbuffs
= 2560;
1269 he_dev
->tx_numbuffs
= he_dev
->tx_numrows
*
1270 he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
1271 if (he_dev
->tx_numbuffs
> 5120)
1272 he_dev
->tx_numbuffs
= 5120;
1274 /* 5.1.2 configure hardware dependent registers */
1277 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1278 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1279 (he_is622(he_dev
) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1280 (he_is622(he_dev
) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1283 he_writel(he_dev
, BANK_ON
|
1284 (he_is622(he_dev
) ? (REF_RATE(0x384) | WIDE_DATA
) : REF_RATE(0x150)),
1288 (he_is622(he_dev
) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1289 RM_RW_WAIT(1), RCMCONFIG
);
1291 (he_is622(he_dev
) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1292 TM_RW_WAIT(1), TCMCONFIG
);
1294 he_writel(he_dev
, he_dev
->cells_per_lbuf
* ATM_CELL_PAYLOAD
, LB_CONFIG
);
1297 (he_is622(he_dev
) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1298 (he_is622(he_dev
) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1299 RX_VALVP(he_dev
->vpibits
) |
1300 RX_VALVC(he_dev
->vcibits
), RC_CONFIG
);
1302 he_writel(he_dev
, DRF_THRESH(0x20) |
1303 (he_is622(he_dev
) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1304 TX_VCI_MASK(he_dev
->vcibits
) |
1305 LBFREE_CNT(he_dev
->tx_numbuffs
), TX_CONFIG
);
1307 he_writel(he_dev
, 0x0, TXAAL5_PROTO
);
1309 he_writel(he_dev
, PHY_INT_ENB
|
1310 (he_is622(he_dev
) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1313 /* 5.1.3 initialize connection memory */
1315 for (i
= 0; i
< TCM_MEM_SIZE
; ++i
)
1316 he_writel_tcm(he_dev
, 0, i
);
1318 for (i
= 0; i
< RCM_MEM_SIZE
; ++i
)
1319 he_writel_rcm(he_dev
, 0, i
);
1322 * transmit connection memory map
1325 * 0x0 ___________________
1331 * 0x8000|___________________|
1334 * 0xc000|___________________|
1337 * 0xe000|___________________|
1339 * 0xf000|___________________|
1341 * 0x10000|___________________|
1344 * |___________________|
1347 * 0x1ffff|___________________|
1352 he_writel(he_dev
, CONFIG_TSRB
, TSRB_BA
);
1353 he_writel(he_dev
, CONFIG_TSRC
, TSRC_BA
);
1354 he_writel(he_dev
, CONFIG_TSRD
, TSRD_BA
);
1355 he_writel(he_dev
, CONFIG_TMABR
, TMABR_BA
);
1356 he_writel(he_dev
, CONFIG_TPDBA
, TPD_BA
);
1360 * receive connection memory map
1362 * 0x0 ___________________
1368 * 0x8000|___________________|
1371 * | LBM | link lists of local
1372 * | tx | buffer memory
1374 * 0xd000|___________________|
1377 * 0xe000|___________________|
1380 * |___________________|
1383 * 0xffff|___________________|
1386 he_writel(he_dev
, 0x08000, RCMLBM_BA
);
1387 he_writel(he_dev
, 0x0e000, RCMRSRB_BA
);
1388 he_writel(he_dev
, 0x0d800, RCMABR_BA
);
1390 /* 5.1.4 initialize local buffer free pools linked lists */
1392 he_init_rx_lbfp0(he_dev
);
1393 he_init_rx_lbfp1(he_dev
);
1395 he_writel(he_dev
, 0x0, RLBC_H
);
1396 he_writel(he_dev
, 0x0, RLBC_T
);
1397 he_writel(he_dev
, 0x0, RLBC_H2
);
1399 he_writel(he_dev
, 512, RXTHRSH
); /* 10% of r0+r1 buffers */
1400 he_writel(he_dev
, 256, LITHRSH
); /* 5% of r0+r1 buffers */
1402 he_init_tx_lbfp(he_dev
);
1404 he_writel(he_dev
, he_is622(he_dev
) ? 0x104780 : 0x800, UBUFF_BA
);
1406 /* 5.1.5 initialize intermediate receive queues */
1408 if (he_is622(he_dev
)) {
1409 he_writel(he_dev
, 0x000f, G0_INMQ_S
);
1410 he_writel(he_dev
, 0x200f, G0_INMQ_L
);
1412 he_writel(he_dev
, 0x001f, G1_INMQ_S
);
1413 he_writel(he_dev
, 0x201f, G1_INMQ_L
);
1415 he_writel(he_dev
, 0x002f, G2_INMQ_S
);
1416 he_writel(he_dev
, 0x202f, G2_INMQ_L
);
1418 he_writel(he_dev
, 0x003f, G3_INMQ_S
);
1419 he_writel(he_dev
, 0x203f, G3_INMQ_L
);
1421 he_writel(he_dev
, 0x004f, G4_INMQ_S
);
1422 he_writel(he_dev
, 0x204f, G4_INMQ_L
);
1424 he_writel(he_dev
, 0x005f, G5_INMQ_S
);
1425 he_writel(he_dev
, 0x205f, G5_INMQ_L
);
1427 he_writel(he_dev
, 0x006f, G6_INMQ_S
);
1428 he_writel(he_dev
, 0x206f, G6_INMQ_L
);
1430 he_writel(he_dev
, 0x007f, G7_INMQ_S
);
1431 he_writel(he_dev
, 0x207f, G7_INMQ_L
);
1433 he_writel(he_dev
, 0x0000, G0_INMQ_S
);
1434 he_writel(he_dev
, 0x0008, G0_INMQ_L
);
1436 he_writel(he_dev
, 0x0001, G1_INMQ_S
);
1437 he_writel(he_dev
, 0x0009, G1_INMQ_L
);
1439 he_writel(he_dev
, 0x0002, G2_INMQ_S
);
1440 he_writel(he_dev
, 0x000a, G2_INMQ_L
);
1442 he_writel(he_dev
, 0x0003, G3_INMQ_S
);
1443 he_writel(he_dev
, 0x000b, G3_INMQ_L
);
1445 he_writel(he_dev
, 0x0004, G4_INMQ_S
);
1446 he_writel(he_dev
, 0x000c, G4_INMQ_L
);
1448 he_writel(he_dev
, 0x0005, G5_INMQ_S
);
1449 he_writel(he_dev
, 0x000d, G5_INMQ_L
);
1451 he_writel(he_dev
, 0x0006, G6_INMQ_S
);
1452 he_writel(he_dev
, 0x000e, G6_INMQ_L
);
1454 he_writel(he_dev
, 0x0007, G7_INMQ_S
);
1455 he_writel(he_dev
, 0x000f, G7_INMQ_L
);
1458 /* 5.1.6 application tunable parameters */
1460 he_writel(he_dev
, 0x0, MCC
);
1461 he_writel(he_dev
, 0x0, OEC
);
1462 he_writel(he_dev
, 0x0, DCC
);
1463 he_writel(he_dev
, 0x0, CEC
);
1465 /* 5.1.7 cs block initialization */
1467 he_init_cs_block(he_dev
);
1469 /* 5.1.8 cs block connection memory initialization */
1471 if (he_init_cs_block_rcm(he_dev
) < 0)
1474 /* 5.1.10 initialize host structures */
1476 he_init_tpdrq(he_dev
);
1479 he_dev
->tpd_pool
= pci_pool_create("tpd", he_dev
->pci_dev
,
1480 sizeof(struct he_tpd
), TPD_ALIGNMENT
, 0);
1481 if (he_dev
->tpd_pool
== NULL
) {
1482 hprintk("unable to create tpd pci_pool\n");
1486 INIT_LIST_HEAD(&he_dev
->outstanding_tpds
);
1488 he_dev
->tpd_base
= (void *) pci_alloc_consistent(he_dev
->pci_dev
,
1489 CONFIG_NUMTPDS
* sizeof(struct he_tpd
), &he_dev
->tpd_base_phys
);
1490 if (!he_dev
->tpd_base
)
1493 for (i
= 0; i
< CONFIG_NUMTPDS
; ++i
) {
1494 he_dev
->tpd_base
[i
].status
= (i
<< TPD_ADDR_SHIFT
);
1495 he_dev
->tpd_base
[i
].inuse
= 0;
1498 he_dev
->tpd_head
= he_dev
->tpd_base
;
1499 he_dev
->tpd_end
= &he_dev
->tpd_base
[CONFIG_NUMTPDS
- 1];
1502 if (he_init_group(he_dev
, 0) != 0)
1505 for (group
= 1; group
< HE_NUM_GROUPS
; ++group
) {
1506 he_writel(he_dev
, 0x0, G0_RBPS_S
+ (group
* 32));
1507 he_writel(he_dev
, 0x0, G0_RBPS_T
+ (group
* 32));
1508 he_writel(he_dev
, 0x0, G0_RBPS_QI
+ (group
* 32));
1509 he_writel(he_dev
, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1510 G0_RBPS_BS
+ (group
* 32));
1512 he_writel(he_dev
, 0x0, G0_RBPL_S
+ (group
* 32));
1513 he_writel(he_dev
, 0x0, G0_RBPL_T
+ (group
* 32));
1514 he_writel(he_dev
, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1515 G0_RBPL_QI
+ (group
* 32));
1516 he_writel(he_dev
, 0x0, G0_RBPL_BS
+ (group
* 32));
1518 he_writel(he_dev
, 0x0, G0_RBRQ_ST
+ (group
* 16));
1519 he_writel(he_dev
, 0x0, G0_RBRQ_H
+ (group
* 16));
1520 he_writel(he_dev
, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1521 G0_RBRQ_Q
+ (group
* 16));
1522 he_writel(he_dev
, 0x0, G0_RBRQ_I
+ (group
* 16));
1524 he_writel(he_dev
, 0x0, G0_TBRQ_B_T
+ (group
* 16));
1525 he_writel(he_dev
, 0x0, G0_TBRQ_H
+ (group
* 16));
1526 he_writel(he_dev
, TBRQ_THRESH(0x1),
1527 G0_TBRQ_THRESH
+ (group
* 16));
1528 he_writel(he_dev
, 0x0, G0_TBRQ_S
+ (group
* 16));
1531 /* host status page */
1533 he_dev
->hsp
= pci_alloc_consistent(he_dev
->pci_dev
,
1534 sizeof(struct he_hsp
), &he_dev
->hsp_phys
);
1535 if (he_dev
->hsp
== NULL
) {
1536 hprintk("failed to allocate host status page\n");
1539 memset(he_dev
->hsp
, 0, sizeof(struct he_hsp
));
1540 he_writel(he_dev
, he_dev
->hsp_phys
, HSP_BA
);
1542 /* initialize framer */
1544 #ifdef CONFIG_ATM_HE_USE_SUNI
1545 suni_init(he_dev
->atm_dev
);
1546 if (he_dev
->atm_dev
->phy
&& he_dev
->atm_dev
->phy
->start
)
1547 he_dev
->atm_dev
->phy
->start(he_dev
->atm_dev
);
1548 #endif /* CONFIG_ATM_HE_USE_SUNI */
1551 /* this really should be in suni.c but for now... */
1554 val
= he_phy_get(he_dev
->atm_dev
, SUNI_TPOP_APM
);
1555 val
= (val
& ~SUNI_TPOP_APM_S
) | (SUNI_TPOP_S_SDH
<< SUNI_TPOP_APM_S_SHIFT
);
1556 he_phy_put(he_dev
->atm_dev
, val
, SUNI_TPOP_APM
);
1559 /* 5.1.12 enable transmit and receive */
1561 reg
= he_readl_mbox(he_dev
, CS_ERCTL0
);
1562 reg
|= TX_ENABLE
|ER_ENABLE
;
1563 he_writel_mbox(he_dev
, reg
, CS_ERCTL0
);
1565 reg
= he_readl(he_dev
, RC_CONFIG
);
1567 he_writel(he_dev
, reg
, RC_CONFIG
);
1569 for (i
= 0; i
< HE_NUM_CS_STPER
; ++i
) {
1570 he_dev
->cs_stper
[i
].inuse
= 0;
1571 he_dev
->cs_stper
[i
].pcr
= -1;
1573 he_dev
->total_bw
= 0;
1576 /* atm linux initialization */
1578 he_dev
->atm_dev
->ci_range
.vpi_bits
= he_dev
->vpibits
;
1579 he_dev
->atm_dev
->ci_range
.vci_bits
= he_dev
->vcibits
;
1581 he_dev
->irq_peak
= 0;
1582 he_dev
->rbrq_peak
= 0;
1583 he_dev
->rbpl_peak
= 0;
1584 he_dev
->tbrq_peak
= 0;
1586 HPRINTK("hell bent for leather!\n");
1592 he_stop(struct he_dev
*he_dev
)
1595 u32 gen_cntl_0
, reg
;
1596 struct pci_dev
*pci_dev
;
1598 pci_dev
= he_dev
->pci_dev
;
1600 /* disable interrupts */
1602 if (he_dev
->membase
) {
1603 pci_read_config_dword(pci_dev
, GEN_CNTL_0
, &gen_cntl_0
);
1604 gen_cntl_0
&= ~(INT_PROC_ENBL
| INIT_ENB
);
1605 pci_write_config_dword(pci_dev
, GEN_CNTL_0
, gen_cntl_0
);
1608 tasklet_disable(&he_dev
->tasklet
);
1611 /* disable recv and transmit */
1613 reg
= he_readl_mbox(he_dev
, CS_ERCTL0
);
1614 reg
&= ~(TX_ENABLE
|ER_ENABLE
);
1615 he_writel_mbox(he_dev
, reg
, CS_ERCTL0
);
1617 reg
= he_readl(he_dev
, RC_CONFIG
);
1618 reg
&= ~(RX_ENABLE
);
1619 he_writel(he_dev
, reg
, RC_CONFIG
);
1622 #ifdef CONFIG_ATM_HE_USE_SUNI
1623 if (he_dev
->atm_dev
->phy
&& he_dev
->atm_dev
->phy
->stop
)
1624 he_dev
->atm_dev
->phy
->stop(he_dev
->atm_dev
);
1625 #endif /* CONFIG_ATM_HE_USE_SUNI */
1628 free_irq(he_dev
->irq
, he_dev
);
1630 if (he_dev
->irq_base
)
1631 pci_free_consistent(he_dev
->pci_dev
, (CONFIG_IRQ_SIZE
+1)
1632 * sizeof(struct he_irq
), he_dev
->irq_base
, he_dev
->irq_phys
);
1635 pci_free_consistent(he_dev
->pci_dev
, sizeof(struct he_hsp
),
1636 he_dev
->hsp
, he_dev
->hsp_phys
);
1638 if (he_dev
->rbpl_base
) {
1639 #ifdef USE_RBPL_POOL
1642 for (i
= 0; i
< CONFIG_RBPL_SIZE
; ++i
) {
1643 void *cpuaddr
= he_dev
->rbpl_virt
[i
].virt
;
1644 dma_addr_t dma_handle
= he_dev
->rbpl_base
[i
].phys
;
1646 pci_pool_free(he_dev
->rbpl_pool
, cpuaddr
, dma_handle
);
1649 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBPL_SIZE
1650 * CONFIG_RBPL_BUFSIZE
, he_dev
->rbpl_pages
, he_dev
->rbpl_pages_phys
);
1652 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBPL_SIZE
1653 * sizeof(struct he_rbp
), he_dev
->rbpl_base
, he_dev
->rbpl_phys
);
1656 #ifdef USE_RBPL_POOL
1657 if (he_dev
->rbpl_pool
)
1658 pci_pool_destroy(he_dev
->rbpl_pool
);
1662 if (he_dev
->rbps_base
) {
1663 #ifdef USE_RBPS_POOL
1666 for (i
= 0; i
< CONFIG_RBPS_SIZE
; ++i
) {
1667 void *cpuaddr
= he_dev
->rbps_virt
[i
].virt
;
1668 dma_addr_t dma_handle
= he_dev
->rbps_base
[i
].phys
;
1670 pci_pool_free(he_dev
->rbps_pool
, cpuaddr
, dma_handle
);
1673 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBPS_SIZE
1674 * CONFIG_RBPS_BUFSIZE
, he_dev
->rbps_pages
, he_dev
->rbps_pages_phys
);
1676 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBPS_SIZE
1677 * sizeof(struct he_rbp
), he_dev
->rbps_base
, he_dev
->rbps_phys
);
1680 #ifdef USE_RBPS_POOL
1681 if (he_dev
->rbps_pool
)
1682 pci_pool_destroy(he_dev
->rbps_pool
);
1685 #endif /* USE_RBPS */
1687 if (he_dev
->rbrq_base
)
1688 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBRQ_SIZE
* sizeof(struct he_rbrq
),
1689 he_dev
->rbrq_base
, he_dev
->rbrq_phys
);
1691 if (he_dev
->tbrq_base
)
1692 pci_free_consistent(he_dev
->pci_dev
, CONFIG_TBRQ_SIZE
* sizeof(struct he_tbrq
),
1693 he_dev
->tbrq_base
, he_dev
->tbrq_phys
);
1695 if (he_dev
->tpdrq_base
)
1696 pci_free_consistent(he_dev
->pci_dev
, CONFIG_TBRQ_SIZE
* sizeof(struct he_tbrq
),
1697 he_dev
->tpdrq_base
, he_dev
->tpdrq_phys
);
1700 if (he_dev
->tpd_pool
)
1701 pci_pool_destroy(he_dev
->tpd_pool
);
1703 if (he_dev
->tpd_base
)
1704 pci_free_consistent(he_dev
->pci_dev
, CONFIG_NUMTPDS
* sizeof(struct he_tpd
),
1705 he_dev
->tpd_base
, he_dev
->tpd_base_phys
);
1708 if (he_dev
->pci_dev
) {
1709 pci_read_config_word(he_dev
->pci_dev
, PCI_COMMAND
, &command
);
1710 command
&= ~(PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
);
1711 pci_write_config_word(he_dev
->pci_dev
, PCI_COMMAND
, command
);
1714 if (he_dev
->membase
)
1715 iounmap(he_dev
->membase
);
1718 static struct he_tpd
*
1719 __alloc_tpd(struct he_dev
*he_dev
)
1723 dma_addr_t dma_handle
;
1725 tpd
= pci_pool_alloc(he_dev
->tpd_pool
, GFP_ATOMIC
|GFP_DMA
, &dma_handle
);
1729 tpd
->status
= TPD_ADDR(dma_handle
);
1731 tpd
->iovec
[0].addr
= 0; tpd
->iovec
[0].len
= 0;
1732 tpd
->iovec
[1].addr
= 0; tpd
->iovec
[1].len
= 0;
1733 tpd
->iovec
[2].addr
= 0; tpd
->iovec
[2].len
= 0;
1739 for (i
= 0; i
< CONFIG_NUMTPDS
; ++i
) {
1741 if (he_dev
->tpd_head
> he_dev
->tpd_end
) {
1742 he_dev
->tpd_head
= he_dev
->tpd_base
;
1745 if (!he_dev
->tpd_head
->inuse
) {
1746 he_dev
->tpd_head
->inuse
= 1;
1747 he_dev
->tpd_head
->status
&= TPD_MASK
;
1748 he_dev
->tpd_head
->iovec
[0].addr
= 0; he_dev
->tpd_head
->iovec
[0].len
= 0;
1749 he_dev
->tpd_head
->iovec
[1].addr
= 0; he_dev
->tpd_head
->iovec
[1].len
= 0;
1750 he_dev
->tpd_head
->iovec
[2].addr
= 0; he_dev
->tpd_head
->iovec
[2].len
= 0;
1751 return he_dev
->tpd_head
;
1754 hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS
);
1759 #define AAL5_LEN(buf,len) \
1760 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1761 (((unsigned char *)(buf))[(len)-5]))
1765 * aal5 packets can optionally return the tcp checksum in the lower
1766 * 16 bits of the crc (RSR0_TCP_CKSUM)
1769 #define TCP_CKSUM(buf,len) \
1770 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1771 (((unsigned char *)(buf))[(len-1)]))
1774 he_service_rbrq(struct he_dev
*he_dev
, int group
)
1776 struct he_rbrq
*rbrq_tail
= (struct he_rbrq
*)
1777 ((unsigned long)he_dev
->rbrq_base
|
1778 he_dev
->hsp
->group
[group
].rbrq_tail
);
1779 struct he_rbp
*rbp
= NULL
;
1780 unsigned cid
, lastcid
= -1;
1781 unsigned buf_len
= 0;
1782 struct sk_buff
*skb
;
1783 struct atm_vcc
*vcc
= NULL
;
1784 struct he_vcc
*he_vcc
;
1785 struct he_iovec
*iov
;
1786 int pdus_assembled
= 0;
1789 read_lock(&vcc_sklist_lock
);
1790 while (he_dev
->rbrq_head
!= rbrq_tail
) {
1793 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1794 he_dev
->rbrq_head
, group
,
1795 RBRQ_ADDR(he_dev
->rbrq_head
),
1796 RBRQ_BUFLEN(he_dev
->rbrq_head
),
1797 RBRQ_CID(he_dev
->rbrq_head
),
1798 RBRQ_CRC_ERR(he_dev
->rbrq_head
) ? " CRC_ERR" : "",
1799 RBRQ_LEN_ERR(he_dev
->rbrq_head
) ? " LEN_ERR" : "",
1800 RBRQ_END_PDU(he_dev
->rbrq_head
) ? " END_PDU" : "",
1801 RBRQ_AAL5_PROT(he_dev
->rbrq_head
) ? " AAL5_PROT" : "",
1802 RBRQ_CON_CLOSED(he_dev
->rbrq_head
) ? " CON_CLOSED" : "",
1803 RBRQ_HBUF_ERR(he_dev
->rbrq_head
) ? " HBUF_ERR" : "");
1806 if (RBRQ_ADDR(he_dev
->rbrq_head
) & RBP_SMALLBUF
)
1807 rbp
= &he_dev
->rbps_base
[RBP_INDEX(RBRQ_ADDR(he_dev
->rbrq_head
))];
1810 rbp
= &he_dev
->rbpl_base
[RBP_INDEX(RBRQ_ADDR(he_dev
->rbrq_head
))];
1812 buf_len
= RBRQ_BUFLEN(he_dev
->rbrq_head
) * 4;
1813 cid
= RBRQ_CID(he_dev
->rbrq_head
);
1816 vcc
= __find_vcc(he_dev
, cid
);
1820 hprintk("vcc == NULL (cid 0x%x)\n", cid
);
1821 if (!RBRQ_HBUF_ERR(he_dev
->rbrq_head
))
1822 rbp
->status
&= ~RBP_LOANED
;
1824 goto next_rbrq_entry
;
1827 he_vcc
= HE_VCC(vcc
);
1828 if (he_vcc
== NULL
) {
1829 hprintk("he_vcc == NULL (cid 0x%x)\n", cid
);
1830 if (!RBRQ_HBUF_ERR(he_dev
->rbrq_head
))
1831 rbp
->status
&= ~RBP_LOANED
;
1832 goto next_rbrq_entry
;
1835 if (RBRQ_HBUF_ERR(he_dev
->rbrq_head
)) {
1836 hprintk("HBUF_ERR! (cid 0x%x)\n", cid
);
1837 atomic_inc(&vcc
->stats
->rx_drop
);
1838 goto return_host_buffers
;
1841 he_vcc
->iov_tail
->iov_base
= RBRQ_ADDR(he_dev
->rbrq_head
);
1842 he_vcc
->iov_tail
->iov_len
= buf_len
;
1843 he_vcc
->pdu_len
+= buf_len
;
1846 if (RBRQ_CON_CLOSED(he_dev
->rbrq_head
)) {
1848 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid
);
1849 wake_up(&he_vcc
->rx_waitq
);
1850 goto return_host_buffers
;
1854 if ((he_vcc
->iov_tail
- he_vcc
->iov_head
) > HE_MAXIOV
) {
1855 hprintk("iovec full! cid 0x%x\n", cid
);
1856 goto return_host_buffers
;
1859 if (!RBRQ_END_PDU(he_dev
->rbrq_head
))
1860 goto next_rbrq_entry
;
1862 if (RBRQ_LEN_ERR(he_dev
->rbrq_head
)
1863 || RBRQ_CRC_ERR(he_dev
->rbrq_head
)) {
1864 HPRINTK("%s%s (%d.%d)\n",
1865 RBRQ_CRC_ERR(he_dev
->rbrq_head
)
1867 RBRQ_LEN_ERR(he_dev
->rbrq_head
)
1869 vcc
->vpi
, vcc
->vci
);
1870 atomic_inc(&vcc
->stats
->rx_err
);
1871 goto return_host_buffers
;
1874 skb
= atm_alloc_charge(vcc
, he_vcc
->pdu_len
+ rx_skb_reserve
,
1877 HPRINTK("charge failed (%d.%d)\n", vcc
->vpi
, vcc
->vci
);
1878 goto return_host_buffers
;
1881 if (rx_skb_reserve
> 0)
1882 skb_reserve(skb
, rx_skb_reserve
);
1884 __net_timestamp(skb
);
1886 for (iov
= he_vcc
->iov_head
;
1887 iov
< he_vcc
->iov_tail
; ++iov
) {
1889 if (iov
->iov_base
& RBP_SMALLBUF
)
1890 memcpy(skb_put(skb
, iov
->iov_len
),
1891 he_dev
->rbps_virt
[RBP_INDEX(iov
->iov_base
)].virt
, iov
->iov_len
);
1894 memcpy(skb_put(skb
, iov
->iov_len
),
1895 he_dev
->rbpl_virt
[RBP_INDEX(iov
->iov_base
)].virt
, iov
->iov_len
);
1898 switch (vcc
->qos
.aal
) {
1900 /* 2.10.1.5 raw cell receive */
1901 skb
->len
= ATM_AAL0_SDU
;
1902 skb_set_tail_pointer(skb
, skb
->len
);
1905 /* 2.10.1.2 aal5 receive */
1907 skb
->len
= AAL5_LEN(skb
->data
, he_vcc
->pdu_len
);
1908 skb_set_tail_pointer(skb
, skb
->len
);
1909 #ifdef USE_CHECKSUM_HW
1910 if (vcc
->vpi
== 0 && vcc
->vci
>= ATM_NOT_RSV_VCI
) {
1911 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1912 skb
->csum
= TCP_CKSUM(skb
->data
,
1919 #ifdef should_never_happen
1920 if (skb
->len
> vcc
->qos
.rxtp
.max_sdu
)
1921 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb
->len
, vcc
->qos
.rxtp
.max_sdu
, cid
);
1925 ATM_SKB(skb
)->vcc
= vcc
;
1927 spin_unlock(&he_dev
->global_lock
);
1928 vcc
->push(vcc
, skb
);
1929 spin_lock(&he_dev
->global_lock
);
1931 atomic_inc(&vcc
->stats
->rx
);
1933 return_host_buffers
:
1936 for (iov
= he_vcc
->iov_head
;
1937 iov
< he_vcc
->iov_tail
; ++iov
) {
1939 if (iov
->iov_base
& RBP_SMALLBUF
)
1940 rbp
= &he_dev
->rbps_base
[RBP_INDEX(iov
->iov_base
)];
1943 rbp
= &he_dev
->rbpl_base
[RBP_INDEX(iov
->iov_base
)];
1945 rbp
->status
&= ~RBP_LOANED
;
1948 he_vcc
->iov_tail
= he_vcc
->iov_head
;
1949 he_vcc
->pdu_len
= 0;
1952 he_dev
->rbrq_head
= (struct he_rbrq
*)
1953 ((unsigned long) he_dev
->rbrq_base
|
1954 RBRQ_MASK(++he_dev
->rbrq_head
));
1957 read_unlock(&vcc_sklist_lock
);
1960 if (updated
> he_dev
->rbrq_peak
)
1961 he_dev
->rbrq_peak
= updated
;
1963 he_writel(he_dev
, RBRQ_MASK(he_dev
->rbrq_head
),
1964 G0_RBRQ_H
+ (group
* 16));
1967 return pdus_assembled
;
1971 he_service_tbrq(struct he_dev
*he_dev
, int group
)
1973 struct he_tbrq
*tbrq_tail
= (struct he_tbrq
*)
1974 ((unsigned long)he_dev
->tbrq_base
|
1975 he_dev
->hsp
->group
[group
].tbrq_tail
);
1977 int slot
, updated
= 0;
1979 struct he_tpd
*__tpd
;
1982 /* 2.1.6 transmit buffer return queue */
1984 while (he_dev
->tbrq_head
!= tbrq_tail
) {
1987 HPRINTK("tbrq%d 0x%x%s%s\n",
1989 TBRQ_TPD(he_dev
->tbrq_head
),
1990 TBRQ_EOS(he_dev
->tbrq_head
) ? " EOS" : "",
1991 TBRQ_MULTIPLE(he_dev
->tbrq_head
) ? " MULTIPLE" : "");
1994 list_for_each_entry(__tpd
, &he_dev
->outstanding_tpds
, entry
) {
1995 if (TPD_ADDR(__tpd
->status
) == TBRQ_TPD(he_dev
->tbrq_head
)) {
1997 list_del(&__tpd
->entry
);
2003 hprintk("unable to locate tpd for dma buffer %x\n",
2004 TBRQ_TPD(he_dev
->tbrq_head
));
2005 goto next_tbrq_entry
;
2008 tpd
= &he_dev
->tpd_base
[ TPD_INDEX(TBRQ_TPD(he_dev
->tbrq_head
)) ];
2011 if (TBRQ_EOS(he_dev
->tbrq_head
)) {
2012 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2013 he_mkcid(he_dev
, tpd
->vcc
->vpi
, tpd
->vcc
->vci
));
2015 wake_up(&HE_VCC(tpd
->vcc
)->tx_waitq
);
2017 goto next_tbrq_entry
;
2020 for (slot
= 0; slot
< TPD_MAXIOV
; ++slot
) {
2021 if (tpd
->iovec
[slot
].addr
)
2022 pci_unmap_single(he_dev
->pci_dev
,
2023 tpd
->iovec
[slot
].addr
,
2024 tpd
->iovec
[slot
].len
& TPD_LEN_MASK
,
2026 if (tpd
->iovec
[slot
].len
& TPD_LST
)
2031 if (tpd
->skb
) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2032 if (tpd
->vcc
&& tpd
->vcc
->pop
)
2033 tpd
->vcc
->pop(tpd
->vcc
, tpd
->skb
);
2035 dev_kfree_skb_any(tpd
->skb
);
2041 pci_pool_free(he_dev
->tpd_pool
, tpd
, TPD_ADDR(tpd
->status
));
2045 he_dev
->tbrq_head
= (struct he_tbrq
*)
2046 ((unsigned long) he_dev
->tbrq_base
|
2047 TBRQ_MASK(++he_dev
->tbrq_head
));
2051 if (updated
> he_dev
->tbrq_peak
)
2052 he_dev
->tbrq_peak
= updated
;
2054 he_writel(he_dev
, TBRQ_MASK(he_dev
->tbrq_head
),
2055 G0_TBRQ_H
+ (group
* 16));
2061 he_service_rbpl(struct he_dev
*he_dev
, int group
)
2063 struct he_rbp
*newtail
;
2064 struct he_rbp
*rbpl_head
;
2067 rbpl_head
= (struct he_rbp
*) ((unsigned long)he_dev
->rbpl_base
|
2068 RBPL_MASK(he_readl(he_dev
, G0_RBPL_S
)));
2071 newtail
= (struct he_rbp
*) ((unsigned long)he_dev
->rbpl_base
|
2072 RBPL_MASK(he_dev
->rbpl_tail
+1));
2074 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2075 if ((newtail
== rbpl_head
) || (newtail
->status
& RBP_LOANED
))
2078 newtail
->status
|= RBP_LOANED
;
2079 he_dev
->rbpl_tail
= newtail
;
2084 he_writel(he_dev
, RBPL_MASK(he_dev
->rbpl_tail
), G0_RBPL_T
);
2089 he_service_rbps(struct he_dev
*he_dev
, int group
)
2091 struct he_rbp
*newtail
;
2092 struct he_rbp
*rbps_head
;
2095 rbps_head
= (struct he_rbp
*) ((unsigned long)he_dev
->rbps_base
|
2096 RBPS_MASK(he_readl(he_dev
, G0_RBPS_S
)));
2099 newtail
= (struct he_rbp
*) ((unsigned long)he_dev
->rbps_base
|
2100 RBPS_MASK(he_dev
->rbps_tail
+1));
2102 /* table 3.42 -- rbps_tail should never be set to rbps_head */
2103 if ((newtail
== rbps_head
) || (newtail
->status
& RBP_LOANED
))
2106 newtail
->status
|= RBP_LOANED
;
2107 he_dev
->rbps_tail
= newtail
;
2112 he_writel(he_dev
, RBPS_MASK(he_dev
->rbps_tail
), G0_RBPS_T
);
2114 #endif /* USE_RBPS */
2117 he_tasklet(unsigned long data
)
2119 unsigned long flags
;
2120 struct he_dev
*he_dev
= (struct he_dev
*) data
;
2124 HPRINTK("tasklet (0x%lx)\n", data
);
2126 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2129 while (he_dev
->irq_head
!= he_dev
->irq_tail
) {
2132 type
= ITYPE_TYPE(he_dev
->irq_head
->isw
);
2133 group
= ITYPE_GROUP(he_dev
->irq_head
->isw
);
2136 case ITYPE_RBRQ_THRESH
:
2137 HPRINTK("rbrq%d threshold\n", group
);
2139 case ITYPE_RBRQ_TIMER
:
2140 if (he_service_rbrq(he_dev
, group
)) {
2141 he_service_rbpl(he_dev
, group
);
2143 he_service_rbps(he_dev
, group
);
2144 #endif /* USE_RBPS */
2147 case ITYPE_TBRQ_THRESH
:
2148 HPRINTK("tbrq%d threshold\n", group
);
2150 case ITYPE_TPD_COMPLETE
:
2151 he_service_tbrq(he_dev
, group
);
2153 case ITYPE_RBPL_THRESH
:
2154 he_service_rbpl(he_dev
, group
);
2156 case ITYPE_RBPS_THRESH
:
2158 he_service_rbps(he_dev
, group
);
2159 #endif /* USE_RBPS */
2162 HPRINTK("phy interrupt\n");
2163 #ifdef CONFIG_ATM_HE_USE_SUNI
2164 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2165 if (he_dev
->atm_dev
->phy
&& he_dev
->atm_dev
->phy
->interrupt
)
2166 he_dev
->atm_dev
->phy
->interrupt(he_dev
->atm_dev
);
2167 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2171 switch (type
|group
) {
2173 hprintk("parity error\n");
2176 hprintk("abort 0x%x\n", he_readl(he_dev
, ABORT_ADDR
));
2180 case ITYPE_TYPE(ITYPE_INVALID
):
2181 /* see 8.1.1 -- check all queues */
2183 HPRINTK("isw not updated 0x%x\n", he_dev
->irq_head
->isw
);
2185 he_service_rbrq(he_dev
, 0);
2186 he_service_rbpl(he_dev
, 0);
2188 he_service_rbps(he_dev
, 0);
2189 #endif /* USE_RBPS */
2190 he_service_tbrq(he_dev
, 0);
2193 hprintk("bad isw 0x%x?\n", he_dev
->irq_head
->isw
);
2196 he_dev
->irq_head
->isw
= ITYPE_INVALID
;
2198 he_dev
->irq_head
= (struct he_irq
*) NEXT_ENTRY(he_dev
->irq_base
, he_dev
->irq_head
, IRQ_MASK
);
2202 if (updated
> he_dev
->irq_peak
)
2203 he_dev
->irq_peak
= updated
;
2206 IRQ_SIZE(CONFIG_IRQ_SIZE
) |
2207 IRQ_THRESH(CONFIG_IRQ_THRESH
) |
2208 IRQ_TAIL(he_dev
->irq_tail
), IRQ0_HEAD
);
2209 (void) he_readl(he_dev
, INT_FIFO
); /* 8.1.2 controller errata; flush posted writes */
2212 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2217 he_irq_handler(int irq
, void *dev_id
)
2219 unsigned long flags
;
2220 struct he_dev
*he_dev
= (struct he_dev
* )dev_id
;
2226 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2228 he_dev
->irq_tail
= (struct he_irq
*) (((unsigned long)he_dev
->irq_base
) |
2229 (*he_dev
->irq_tailoffset
<< 2));
2231 if (he_dev
->irq_tail
== he_dev
->irq_head
) {
2232 HPRINTK("tailoffset not updated?\n");
2233 he_dev
->irq_tail
= (struct he_irq
*) ((unsigned long)he_dev
->irq_base
|
2234 ((he_readl(he_dev
, IRQ0_BASE
) & IRQ_MASK
) << 2));
2235 (void) he_readl(he_dev
, INT_FIFO
); /* 8.1.2 controller errata */
2239 if (he_dev
->irq_head
== he_dev
->irq_tail
/* && !IRQ_PENDING */)
2240 hprintk("spurious (or shared) interrupt?\n");
2243 if (he_dev
->irq_head
!= he_dev
->irq_tail
) {
2246 tasklet_schedule(&he_dev
->tasklet
);
2248 he_tasklet((unsigned long) he_dev
);
2250 he_writel(he_dev
, INT_CLEAR_A
, INT_FIFO
); /* clear interrupt */
2251 (void) he_readl(he_dev
, INT_FIFO
); /* flush posted writes */
2253 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2254 return IRQ_RETVAL(handled
);
2258 static __inline__
void
2259 __enqueue_tpd(struct he_dev
*he_dev
, struct he_tpd
*tpd
, unsigned cid
)
2261 struct he_tpdrq
*new_tail
;
2263 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2264 tpd
, cid
, he_dev
->tpdrq_tail
);
2266 /* new_tail = he_dev->tpdrq_tail; */
2267 new_tail
= (struct he_tpdrq
*) ((unsigned long) he_dev
->tpdrq_base
|
2268 TPDRQ_MASK(he_dev
->tpdrq_tail
+1));
2271 * check to see if we are about to set the tail == head
2272 * if true, update the head pointer from the adapter
2273 * to see if this is really the case (reading the queue
2274 * head for every enqueue would be unnecessarily slow)
2277 if (new_tail
== he_dev
->tpdrq_head
) {
2278 he_dev
->tpdrq_head
= (struct he_tpdrq
*)
2279 (((unsigned long)he_dev
->tpdrq_base
) |
2280 TPDRQ_MASK(he_readl(he_dev
, TPDRQ_B_H
)));
2282 if (new_tail
== he_dev
->tpdrq_head
) {
2285 hprintk("tpdrq full (cid 0x%x)\n", cid
);
2288 * push tpd onto a transmit backlog queue
2289 * after service_tbrq, service the backlog
2290 * for now, we just drop the pdu
2292 for (slot
= 0; slot
< TPD_MAXIOV
; ++slot
) {
2293 if (tpd
->iovec
[slot
].addr
)
2294 pci_unmap_single(he_dev
->pci_dev
,
2295 tpd
->iovec
[slot
].addr
,
2296 tpd
->iovec
[slot
].len
& TPD_LEN_MASK
,
2301 tpd
->vcc
->pop(tpd
->vcc
, tpd
->skb
);
2303 dev_kfree_skb_any(tpd
->skb
);
2304 atomic_inc(&tpd
->vcc
->stats
->tx_err
);
2307 pci_pool_free(he_dev
->tpd_pool
, tpd
, TPD_ADDR(tpd
->status
));
2315 /* 2.1.5 transmit packet descriptor ready queue */
2317 list_add_tail(&tpd
->entry
, &he_dev
->outstanding_tpds
);
2318 he_dev
->tpdrq_tail
->tpd
= TPD_ADDR(tpd
->status
);
2320 he_dev
->tpdrq_tail
->tpd
= he_dev
->tpd_base_phys
+
2321 (TPD_INDEX(tpd
->status
) * sizeof(struct he_tpd
));
2323 he_dev
->tpdrq_tail
->cid
= cid
;
2326 he_dev
->tpdrq_tail
= new_tail
;
2328 he_writel(he_dev
, TPDRQ_MASK(he_dev
->tpdrq_tail
), TPDRQ_T
);
2329 (void) he_readl(he_dev
, TPDRQ_T
); /* flush posted writes */
2333 he_open(struct atm_vcc
*vcc
)
2335 unsigned long flags
;
2336 struct he_dev
*he_dev
= HE_DEV(vcc
->dev
);
2337 struct he_vcc
*he_vcc
;
2339 unsigned cid
, rsr0
, rsr1
, rsr4
, tsr0
, tsr0_aal
, tsr4
, period
, reg
, clock
;
2340 short vpi
= vcc
->vpi
;
2343 if (vci
== ATM_VCI_UNSPEC
|| vpi
== ATM_VPI_UNSPEC
)
2346 HPRINTK("open vcc %p %d.%d\n", vcc
, vpi
, vci
);
2348 set_bit(ATM_VF_ADDR
, &vcc
->flags
);
2350 cid
= he_mkcid(he_dev
, vpi
, vci
);
2352 he_vcc
= kmalloc(sizeof(struct he_vcc
), GFP_ATOMIC
);
2353 if (he_vcc
== NULL
) {
2354 hprintk("unable to allocate he_vcc during open\n");
2358 he_vcc
->iov_tail
= he_vcc
->iov_head
;
2359 he_vcc
->pdu_len
= 0;
2360 he_vcc
->rc_index
= -1;
2362 init_waitqueue_head(&he_vcc
->rx_waitq
);
2363 init_waitqueue_head(&he_vcc
->tx_waitq
);
2365 vcc
->dev_data
= he_vcc
;
2367 if (vcc
->qos
.txtp
.traffic_class
!= ATM_NONE
) {
2370 pcr_goal
= atm_pcr_goal(&vcc
->qos
.txtp
);
2372 pcr_goal
= he_dev
->atm_dev
->link_rate
;
2373 if (pcr_goal
< 0) /* means round down, technically */
2374 pcr_goal
= -pcr_goal
;
2376 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid
, pcr_goal
);
2378 switch (vcc
->qos
.aal
) {
2380 tsr0_aal
= TSR0_AAL5
;
2384 tsr0_aal
= TSR0_AAL0_SDU
;
2385 tsr4
= TSR4_AAL0_SDU
;
2392 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2393 tsr0
= he_readl_tsr0(he_dev
, cid
);
2394 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2396 if (TSR0_CONN_STATE(tsr0
) != 0) {
2397 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid
, tsr0
);
2402 switch (vcc
->qos
.txtp
.traffic_class
) {
2404 /* 2.3.3.1 open connection ubr */
2406 tsr0
= TSR0_UBR
| TSR0_GROUP(0) | tsr0_aal
|
2407 TSR0_USE_WMIN
| TSR0_UPDATE_GER
;
2411 /* 2.3.3.2 open connection cbr */
2413 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2414 if ((he_dev
->total_bw
+ pcr_goal
)
2415 > (he_dev
->atm_dev
->link_rate
* 9 / 10))
2421 spin_lock_irqsave(&he_dev
->global_lock
, flags
); /* also protects he_dev->cs_stper[] */
2423 /* find an unused cs_stper register */
2424 for (reg
= 0; reg
< HE_NUM_CS_STPER
; ++reg
)
2425 if (he_dev
->cs_stper
[reg
].inuse
== 0 ||
2426 he_dev
->cs_stper
[reg
].pcr
== pcr_goal
)
2429 if (reg
== HE_NUM_CS_STPER
) {
2431 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2435 he_dev
->total_bw
+= pcr_goal
;
2437 he_vcc
->rc_index
= reg
;
2438 ++he_dev
->cs_stper
[reg
].inuse
;
2439 he_dev
->cs_stper
[reg
].pcr
= pcr_goal
;
2441 clock
= he_is622(he_dev
) ? 66667000 : 50000000;
2442 period
= clock
/ pcr_goal
;
2444 HPRINTK("rc_index = %d period = %d\n",
2447 he_writel_mbox(he_dev
, rate_to_atmf(period
/2),
2449 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2451 tsr0
= TSR0_CBR
| TSR0_GROUP(0) | tsr0_aal
|
2460 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2462 he_writel_tsr0(he_dev
, tsr0
, cid
);
2463 he_writel_tsr4(he_dev
, tsr4
| 1, cid
);
2464 he_writel_tsr1(he_dev
, TSR1_MCR(rate_to_atmf(0)) |
2465 TSR1_PCR(rate_to_atmf(pcr_goal
)), cid
);
2466 he_writel_tsr2(he_dev
, TSR2_ACR(rate_to_atmf(pcr_goal
)), cid
);
2467 he_writel_tsr9(he_dev
, TSR9_OPEN_CONN
, cid
);
2469 he_writel_tsr3(he_dev
, 0x0, cid
);
2470 he_writel_tsr5(he_dev
, 0x0, cid
);
2471 he_writel_tsr6(he_dev
, 0x0, cid
);
2472 he_writel_tsr7(he_dev
, 0x0, cid
);
2473 he_writel_tsr8(he_dev
, 0x0, cid
);
2474 he_writel_tsr10(he_dev
, 0x0, cid
);
2475 he_writel_tsr11(he_dev
, 0x0, cid
);
2476 he_writel_tsr12(he_dev
, 0x0, cid
);
2477 he_writel_tsr13(he_dev
, 0x0, cid
);
2478 he_writel_tsr14(he_dev
, 0x0, cid
);
2479 (void) he_readl_tsr0(he_dev
, cid
); /* flush posted writes */
2480 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2483 if (vcc
->qos
.rxtp
.traffic_class
!= ATM_NONE
) {
2486 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid
,
2487 &HE_VCC(vcc
)->rx_waitq
);
2489 switch (vcc
->qos
.aal
) {
2501 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2503 rsr0
= he_readl_rsr0(he_dev
, cid
);
2504 if (rsr0
& RSR0_OPEN_CONN
) {
2505 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2507 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid
, rsr0
);
2513 rsr1
= RSR1_GROUP(0);
2514 rsr4
= RSR4_GROUP(0);
2515 #else /* !USE_RBPS */
2516 rsr1
= RSR1_GROUP(0)|RSR1_RBPL_ONLY
;
2517 rsr4
= RSR4_GROUP(0)|RSR4_RBPL_ONLY
;
2518 #endif /* USE_RBPS */
2519 rsr0
= vcc
->qos
.rxtp
.traffic_class
== ATM_UBR
?
2520 (RSR0_EPD_ENABLE
|RSR0_PPD_ENABLE
) : 0;
2522 #ifdef USE_CHECKSUM_HW
2523 if (vpi
== 0 && vci
>= ATM_NOT_RSV_VCI
)
2524 rsr0
|= RSR0_TCP_CKSUM
;
2527 he_writel_rsr4(he_dev
, rsr4
, cid
);
2528 he_writel_rsr1(he_dev
, rsr1
, cid
);
2529 /* 5.1.11 last parameter initialized should be
2530 the open/closed indication in rsr0 */
2531 he_writel_rsr0(he_dev
,
2532 rsr0
| RSR0_START_PDU
| RSR0_OPEN_CONN
| aal
, cid
);
2533 (void) he_readl_rsr0(he_dev
, cid
); /* flush posted writes */
2535 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2542 clear_bit(ATM_VF_ADDR
, &vcc
->flags
);
2545 set_bit(ATM_VF_READY
, &vcc
->flags
);
2551 he_close(struct atm_vcc
*vcc
)
2553 unsigned long flags
;
2554 DECLARE_WAITQUEUE(wait
, current
);
2555 struct he_dev
*he_dev
= HE_DEV(vcc
->dev
);
2558 struct he_vcc
*he_vcc
= HE_VCC(vcc
);
2559 #define MAX_RETRY 30
2560 int retry
= 0, sleep
= 1, tx_inuse
;
2562 HPRINTK("close vcc %p %d.%d\n", vcc
, vcc
->vpi
, vcc
->vci
);
2564 clear_bit(ATM_VF_READY
, &vcc
->flags
);
2565 cid
= he_mkcid(he_dev
, vcc
->vpi
, vcc
->vci
);
2567 if (vcc
->qos
.rxtp
.traffic_class
!= ATM_NONE
) {
2570 HPRINTK("close rx cid 0x%x\n", cid
);
2572 /* 2.7.2.2 close receive operation */
2574 /* wait for previous close (if any) to finish */
2576 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2577 while (he_readl(he_dev
, RCC_STAT
) & RCC_BUSY
) {
2578 HPRINTK("close cid 0x%x RCC_BUSY\n", cid
);
2582 set_current_state(TASK_UNINTERRUPTIBLE
);
2583 add_wait_queue(&he_vcc
->rx_waitq
, &wait
);
2585 he_writel_rsr0(he_dev
, RSR0_CLOSE_CONN
, cid
);
2586 (void) he_readl_rsr0(he_dev
, cid
); /* flush posted writes */
2587 he_writel_mbox(he_dev
, cid
, RXCON_CLOSE
);
2588 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2590 timeout
= schedule_timeout(30*HZ
);
2592 remove_wait_queue(&he_vcc
->rx_waitq
, &wait
);
2593 set_current_state(TASK_RUNNING
);
2596 hprintk("close rx timeout cid 0x%x\n", cid
);
2598 HPRINTK("close rx cid 0x%x complete\n", cid
);
2602 if (vcc
->qos
.txtp
.traffic_class
!= ATM_NONE
) {
2603 volatile unsigned tsr4
, tsr0
;
2606 HPRINTK("close tx cid 0x%x\n", cid
);
2610 * ... the host must first stop queueing packets to the TPDRQ
2611 * on the connection to be closed, then wait for all outstanding
2612 * packets to be transmitted and their buffers returned to the
2613 * TBRQ. When the last packet on the connection arrives in the
2614 * TBRQ, the host issues the close command to the adapter.
2617 while (((tx_inuse
= atomic_read(&sk_atm(vcc
)->sk_wmem_alloc
)) > 0) &&
2618 (retry
< MAX_RETRY
)) {
2627 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid
, tx_inuse
);
2629 /* 2.3.1.1 generic close operations with flush */
2631 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2632 he_writel_tsr4_upper(he_dev
, TSR4_FLUSH_CONN
, cid
);
2633 /* also clears TSR4_SESSION_ENDED */
2635 switch (vcc
->qos
.txtp
.traffic_class
) {
2637 he_writel_tsr1(he_dev
,
2638 TSR1_MCR(rate_to_atmf(200000))
2639 | TSR1_PCR(0), cid
);
2642 he_writel_tsr14_upper(he_dev
, TSR14_DELETE
, cid
);
2645 (void) he_readl_tsr4(he_dev
, cid
); /* flush posted writes */
2647 tpd
= __alloc_tpd(he_dev
);
2649 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid
);
2650 goto close_tx_incomplete
;
2652 tpd
->status
|= TPD_EOS
| TPD_INT
;
2657 set_current_state(TASK_UNINTERRUPTIBLE
);
2658 add_wait_queue(&he_vcc
->tx_waitq
, &wait
);
2659 __enqueue_tpd(he_dev
, tpd
, cid
);
2660 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2662 timeout
= schedule_timeout(30*HZ
);
2664 remove_wait_queue(&he_vcc
->tx_waitq
, &wait
);
2665 set_current_state(TASK_RUNNING
);
2667 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2670 hprintk("close tx timeout cid 0x%x\n", cid
);
2671 goto close_tx_incomplete
;
2674 while (!((tsr4
= he_readl_tsr4(he_dev
, cid
)) & TSR4_SESSION_ENDED
)) {
2675 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid
, tsr4
);
2679 while (TSR0_CONN_STATE(tsr0
= he_readl_tsr0(he_dev
, cid
)) != 0) {
2680 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid
, tsr0
);
2684 close_tx_incomplete
:
2686 if (vcc
->qos
.txtp
.traffic_class
== ATM_CBR
) {
2687 int reg
= he_vcc
->rc_index
;
2689 HPRINTK("cs_stper reg = %d\n", reg
);
2691 if (he_dev
->cs_stper
[reg
].inuse
== 0)
2692 hprintk("cs_stper[%d].inuse = 0!\n", reg
);
2694 --he_dev
->cs_stper
[reg
].inuse
;
2696 he_dev
->total_bw
-= he_dev
->cs_stper
[reg
].pcr
;
2698 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2700 HPRINTK("close tx cid 0x%x complete\n", cid
);
2705 clear_bit(ATM_VF_ADDR
, &vcc
->flags
);
2709 he_send(struct atm_vcc
*vcc
, struct sk_buff
*skb
)
2711 unsigned long flags
;
2712 struct he_dev
*he_dev
= HE_DEV(vcc
->dev
);
2713 unsigned cid
= he_mkcid(he_dev
, vcc
->vpi
, vcc
->vci
);
2715 #ifdef USE_SCATTERGATHER
2719 #define HE_TPD_BUFSIZE 0xffff
2721 HPRINTK("send %d.%d\n", vcc
->vpi
, vcc
->vci
);
2723 if ((skb
->len
> HE_TPD_BUFSIZE
) ||
2724 ((vcc
->qos
.aal
== ATM_AAL0
) && (skb
->len
!= ATM_AAL0_SDU
))) {
2725 hprintk("buffer too large (or small) -- %d bytes\n", skb
->len
);
2729 dev_kfree_skb_any(skb
);
2730 atomic_inc(&vcc
->stats
->tx_err
);
2734 #ifndef USE_SCATTERGATHER
2735 if (skb_shinfo(skb
)->nr_frags
) {
2736 hprintk("no scatter/gather support\n");
2740 dev_kfree_skb_any(skb
);
2741 atomic_inc(&vcc
->stats
->tx_err
);
2745 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2747 tpd
= __alloc_tpd(he_dev
);
2752 dev_kfree_skb_any(skb
);
2753 atomic_inc(&vcc
->stats
->tx_err
);
2754 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2758 if (vcc
->qos
.aal
== ATM_AAL5
)
2759 tpd
->status
|= TPD_CELLTYPE(TPD_USERCELL
);
2761 char *pti_clp
= (void *) (skb
->data
+ 3);
2764 pti
= (*pti_clp
& ATM_HDR_PTI_MASK
) >> ATM_HDR_PTI_SHIFT
;
2765 clp
= (*pti_clp
& ATM_HDR_CLP
);
2766 tpd
->status
|= TPD_CELLTYPE(pti
);
2768 tpd
->status
|= TPD_CLP
;
2770 skb_pull(skb
, ATM_AAL0_SDU
- ATM_CELL_PAYLOAD
);
2773 #ifdef USE_SCATTERGATHER
2774 tpd
->iovec
[slot
].addr
= pci_map_single(he_dev
->pci_dev
, skb
->data
,
2775 skb
->len
- skb
->data_len
, PCI_DMA_TODEVICE
);
2776 tpd
->iovec
[slot
].len
= skb
->len
- skb
->data_len
;
2779 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2780 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2782 if (slot
== TPD_MAXIOV
) { /* queue tpd; start new tpd */
2784 tpd
->skb
= NULL
; /* not the last fragment
2785 so dont ->push() yet */
2788 __enqueue_tpd(he_dev
, tpd
, cid
);
2789 tpd
= __alloc_tpd(he_dev
);
2794 dev_kfree_skb_any(skb
);
2795 atomic_inc(&vcc
->stats
->tx_err
);
2796 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2799 tpd
->status
|= TPD_USERCELL
;
2803 tpd
->iovec
[slot
].addr
= pci_map_single(he_dev
->pci_dev
,
2804 (void *) page_address(frag
->page
) + frag
->page_offset
,
2805 frag
->size
, PCI_DMA_TODEVICE
);
2806 tpd
->iovec
[slot
].len
= frag
->size
;
2811 tpd
->iovec
[slot
- 1].len
|= TPD_LST
;
2813 tpd
->address0
= pci_map_single(he_dev
->pci_dev
, skb
->data
, skb
->len
, PCI_DMA_TODEVICE
);
2814 tpd
->length0
= skb
->len
| TPD_LST
;
2816 tpd
->status
|= TPD_INT
;
2821 ATM_SKB(skb
)->vcc
= vcc
;
2823 __enqueue_tpd(he_dev
, tpd
, cid
);
2824 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2826 atomic_inc(&vcc
->stats
->tx
);
2832 he_ioctl(struct atm_dev
*atm_dev
, unsigned int cmd
, void __user
*arg
)
2834 unsigned long flags
;
2835 struct he_dev
*he_dev
= HE_DEV(atm_dev
);
2836 struct he_ioctl_reg reg
;
2841 if (!capable(CAP_NET_ADMIN
))
2844 if (copy_from_user(®
, arg
,
2845 sizeof(struct he_ioctl_reg
)))
2848 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2850 case HE_REGTYPE_PCI
:
2851 reg
.val
= he_readl(he_dev
, reg
.addr
);
2853 case HE_REGTYPE_RCM
:
2855 he_readl_rcm(he_dev
, reg
.addr
);
2857 case HE_REGTYPE_TCM
:
2859 he_readl_tcm(he_dev
, reg
.addr
);
2861 case HE_REGTYPE_MBOX
:
2863 he_readl_mbox(he_dev
, reg
.addr
);
2869 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2871 if (copy_to_user(arg
, ®
,
2872 sizeof(struct he_ioctl_reg
)))
2876 #ifdef CONFIG_ATM_HE_USE_SUNI
2877 if (atm_dev
->phy
&& atm_dev
->phy
->ioctl
)
2878 err
= atm_dev
->phy
->ioctl(atm_dev
, cmd
, arg
);
2879 #else /* CONFIG_ATM_HE_USE_SUNI */
2881 #endif /* CONFIG_ATM_HE_USE_SUNI */
2889 he_phy_put(struct atm_dev
*atm_dev
, unsigned char val
, unsigned long addr
)
2891 unsigned long flags
;
2892 struct he_dev
*he_dev
= HE_DEV(atm_dev
);
2894 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val
, addr
);
2896 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2897 he_writel(he_dev
, val
, FRAMER
+ (addr
*4));
2898 (void) he_readl(he_dev
, FRAMER
+ (addr
*4)); /* flush posted writes */
2899 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2903 static unsigned char
2904 he_phy_get(struct atm_dev
*atm_dev
, unsigned long addr
)
2906 unsigned long flags
;
2907 struct he_dev
*he_dev
= HE_DEV(atm_dev
);
2910 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2911 reg
= he_readl(he_dev
, FRAMER
+ (addr
*4));
2912 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2914 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr
, reg
);
2919 he_proc_read(struct atm_dev
*dev
, loff_t
*pos
, char *page
)
2921 unsigned long flags
;
2922 struct he_dev
*he_dev
= HE_DEV(dev
);
2925 struct he_rbrq
*rbrq_tail
;
2926 struct he_tpdrq
*tpdrq_head
;
2927 int rbpl_head
, rbpl_tail
;
2929 static long mcc
= 0, oec
= 0, dcc
= 0, cec
= 0;
2934 return sprintf(page
, "ATM he driver\n");
2937 return sprintf(page
, "%s%s\n\n",
2938 he_dev
->prod_id
, he_dev
->media
& 0x40 ? "SM" : "MM");
2941 return sprintf(page
, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2943 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2944 mcc
+= he_readl(he_dev
, MCC
);
2945 oec
+= he_readl(he_dev
, OEC
);
2946 dcc
+= he_readl(he_dev
, DCC
);
2947 cec
+= he_readl(he_dev
, CEC
);
2948 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2951 return sprintf(page
, "%16ld %16ld %13ld %17ld\n\n",
2952 mcc
, oec
, dcc
, cec
);
2955 return sprintf(page
, "irq_size = %d inuse = ? peak = %d\n",
2956 CONFIG_IRQ_SIZE
, he_dev
->irq_peak
);
2959 return sprintf(page
, "tpdrq_size = %d inuse = ?\n",
2963 return sprintf(page
, "rbrq_size = %d inuse = ? peak = %d\n",
2964 CONFIG_RBRQ_SIZE
, he_dev
->rbrq_peak
);
2967 return sprintf(page
, "tbrq_size = %d peak = %d\n",
2968 CONFIG_TBRQ_SIZE
, he_dev
->tbrq_peak
);
2972 rbpl_head
= RBPL_MASK(he_readl(he_dev
, G0_RBPL_S
));
2973 rbpl_tail
= RBPL_MASK(he_readl(he_dev
, G0_RBPL_T
));
2975 inuse
= rbpl_head
- rbpl_tail
;
2977 inuse
+= CONFIG_RBPL_SIZE
* sizeof(struct he_rbp
);
2978 inuse
/= sizeof(struct he_rbp
);
2981 return sprintf(page
, "rbpl_size = %d inuse = %d\n\n",
2982 CONFIG_RBPL_SIZE
, inuse
);
2986 return sprintf(page
, "rate controller periods (cbr)\n pcr #vc\n");
2988 for (i
= 0; i
< HE_NUM_CS_STPER
; ++i
)
2990 return sprintf(page
, "cs_stper%-2d %8ld %3d\n", i
,
2991 he_dev
->cs_stper
[i
].pcr
,
2992 he_dev
->cs_stper
[i
].inuse
);
2995 return sprintf(page
, "total bw (cbr): %d (limit %d)\n",
2996 he_dev
->total_bw
, he_dev
->atm_dev
->link_rate
* 10 / 9);
3001 /* eeprom routines -- see 4.7 */
3003 static u8
read_prom_byte(struct he_dev
*he_dev
, int addr
)
3005 u32 val
= 0, tmp_read
= 0;
3009 val
= readl(he_dev
->membase
+ HOST_CNTL
);
3012 /* Turn on write enable */
3014 he_writel(he_dev
, val
, HOST_CNTL
);
3016 /* Send READ instruction */
3017 for (i
= 0; i
< ARRAY_SIZE(readtab
); i
++) {
3018 he_writel(he_dev
, val
| readtab
[i
], HOST_CNTL
);
3019 udelay(EEPROM_DELAY
);
3022 /* Next, we need to send the byte address to read from */
3023 for (i
= 7; i
>= 0; i
--) {
3024 he_writel(he_dev
, val
| clocktab
[j
++] | (((addr
>> i
) & 1) << 9), HOST_CNTL
);
3025 udelay(EEPROM_DELAY
);
3026 he_writel(he_dev
, val
| clocktab
[j
++] | (((addr
>> i
) & 1) << 9), HOST_CNTL
);
3027 udelay(EEPROM_DELAY
);
3032 val
&= 0xFFFFF7FF; /* Turn off write enable */
3033 he_writel(he_dev
, val
, HOST_CNTL
);
3035 /* Now, we can read data from the EEPROM by clocking it in */
3036 for (i
= 7; i
>= 0; i
--) {
3037 he_writel(he_dev
, val
| clocktab
[j
++], HOST_CNTL
);
3038 udelay(EEPROM_DELAY
);
3039 tmp_read
= he_readl(he_dev
, HOST_CNTL
);
3040 byte_read
|= (unsigned char)
3041 ((tmp_read
& ID_DOUT
) >> ID_DOFFSET
<< i
);
3042 he_writel(he_dev
, val
| clocktab
[j
++], HOST_CNTL
);
3043 udelay(EEPROM_DELAY
);
3046 he_writel(he_dev
, val
| ID_CS
, HOST_CNTL
);
3047 udelay(EEPROM_DELAY
);
3052 MODULE_LICENSE("GPL");
3053 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3054 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3055 module_param(disable64
, bool, 0);
3056 MODULE_PARM_DESC(disable64
, "disable 64-bit pci bus transfers");
3057 module_param(nvpibits
, short, 0);
3058 MODULE_PARM_DESC(nvpibits
, "numbers of bits for vpi (default 0)");
3059 module_param(nvcibits
, short, 0);
3060 MODULE_PARM_DESC(nvcibits
, "numbers of bits for vci (default 12)");
3061 module_param(rx_skb_reserve
, short, 0);
3062 MODULE_PARM_DESC(rx_skb_reserve
, "padding for receive skb (default 16)");
3063 module_param(irq_coalesce
, bool, 0);
3064 MODULE_PARM_DESC(irq_coalesce
, "use interrupt coalescing (default 1)");
3065 module_param(sdh
, bool, 0);
3066 MODULE_PARM_DESC(sdh
, "use SDH framing (default 0)");
3068 static struct pci_device_id he_pci_tbl
[] = {
3069 { PCI_VENDOR_ID_FORE
, PCI_DEVICE_ID_FORE_HE
, PCI_ANY_ID
, PCI_ANY_ID
,
3074 MODULE_DEVICE_TABLE(pci
, he_pci_tbl
);
3076 static struct pci_driver he_driver
= {
3078 .probe
= he_init_one
,
3079 .remove
= __devexit_p(he_remove_one
),
3080 .id_table
= he_pci_tbl
,
3083 static int __init
he_init(void)
3085 return pci_register_driver(&he_driver
);
3088 static void __exit
he_cleanup(void)
3090 pci_unregister_driver(&he_driver
);
3093 module_init(he_init
);
3094 module_exit(he_cleanup
);