[NETPOLL]: no need to store local_mac
[linux-2.6/linux-loongson.git] / drivers / atm / he.c
blob3b64a99772ea826187dbc792875332722f77f94b
1 /* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
3 /*
5 he.c
7 ForeRunnerHE ATM Adapter driver for ATM on Linux
8 Copyright (C) 1999-2001 Naval Research Laboratory
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 2.1 of the License, or (at your option) any later version.
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Lesser General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public
21 License along with this library; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 he.c
30 ForeRunnerHE ATM Adapter driver for ATM on Linux
31 Copyright (C) 1999-2001 Naval Research Laboratory
33 Permission to use, copy, modify and distribute this software and its
34 documentation is hereby granted, provided that both the copyright
35 notice and this permission notice appear in all copies of the software,
36 derivative works or modified versions, and any portions thereof, and
37 that both notices appear in supporting documentation.
39 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
40 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
41 RESULTING FROM THE USE OF THIS SOFTWARE.
43 This driver was written using the "Programmer's Reference Manual for
44 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
46 AUTHORS:
47 chas williams <chas@cmf.nrl.navy.mil>
48 eric kinzie <ekinzie@cmf.nrl.navy.mil>
50 NOTES:
51 4096 supported 'connections'
52 group 0 is used for all traffic
53 interrupt queue 0 is used for all interrupts
54 aal0 support (based on work from ulrich.u.muller@nokia.com)
58 #include <linux/module.h>
59 #include <linux/kernel.h>
60 #include <linux/skbuff.h>
61 #include <linux/pci.h>
62 #include <linux/errno.h>
63 #include <linux/types.h>
64 #include <linux/string.h>
65 #include <linux/delay.h>
66 #include <linux/init.h>
67 #include <linux/mm.h>
68 #include <linux/sched.h>
69 #include <linux/timer.h>
70 #include <linux/interrupt.h>
71 #include <linux/dma-mapping.h>
72 #include <asm/io.h>
73 #include <asm/byteorder.h>
74 #include <asm/uaccess.h>
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
80 #define USE_TASKLET
81 #undef USE_SCATTERGATHER
82 #undef USE_CHECKSUM_HW /* still confused about this */
83 #define USE_RBPS
84 #undef USE_RBPS_POOL /* if memory is tight try this */
85 #undef USE_RBPL_POOL /* if memory is tight try this */
86 #define USE_TPD_POOL
87 /* #undef CONFIG_ATM_HE_USE_SUNI */
88 /* #undef HE_DEBUG */
90 #include "he.h"
91 #include "suni.h"
92 #include <linux/atm_he.h>
94 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
96 #ifdef HE_DEBUG
97 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
98 #else /* !HE_DEBUG */
99 #define HPRINTK(fmt,args...) do { } while (0)
100 #endif /* HE_DEBUG */
102 /* version definition */
104 static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
106 /* declarations */
108 static int he_open(struct atm_vcc *vcc);
109 static void he_close(struct atm_vcc *vcc);
110 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
111 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
112 static irqreturn_t he_irq_handler(int irq, void *dev_id);
113 static void he_tasklet(unsigned long data);
114 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
115 static int he_start(struct atm_dev *dev);
116 static void he_stop(struct he_dev *dev);
117 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
118 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
120 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
122 /* globals */
124 static struct he_dev *he_devs;
125 static int disable64;
126 static short nvpibits = -1;
127 static short nvcibits = -1;
128 static short rx_skb_reserve = 16;
129 static int irq_coalesce = 1;
130 static int sdh = 0;
132 /* Read from EEPROM = 0000 0011b */
133 static unsigned int readtab[] = {
134 CS_HIGH | CLK_HIGH,
135 CS_LOW | CLK_LOW,
136 CLK_HIGH, /* 0 */
137 CLK_LOW,
138 CLK_HIGH, /* 0 */
139 CLK_LOW,
140 CLK_HIGH, /* 0 */
141 CLK_LOW,
142 CLK_HIGH, /* 0 */
143 CLK_LOW,
144 CLK_HIGH, /* 0 */
145 CLK_LOW,
146 CLK_HIGH, /* 0 */
147 CLK_LOW | SI_HIGH,
148 CLK_HIGH | SI_HIGH, /* 1 */
149 CLK_LOW | SI_HIGH,
150 CLK_HIGH | SI_HIGH /* 1 */
153 /* Clock to read from/write to the EEPROM */
154 static unsigned int clocktab[] = {
155 CLK_LOW,
156 CLK_HIGH,
157 CLK_LOW,
158 CLK_HIGH,
159 CLK_LOW,
160 CLK_HIGH,
161 CLK_LOW,
162 CLK_HIGH,
163 CLK_LOW,
164 CLK_HIGH,
165 CLK_LOW,
166 CLK_HIGH,
167 CLK_LOW,
168 CLK_HIGH,
169 CLK_LOW,
170 CLK_HIGH,
171 CLK_LOW
174 static struct atmdev_ops he_ops =
176 .open = he_open,
177 .close = he_close,
178 .ioctl = he_ioctl,
179 .send = he_send,
180 .phy_put = he_phy_put,
181 .phy_get = he_phy_get,
182 .proc_read = he_proc_read,
183 .owner = THIS_MODULE
186 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
187 #define he_readl(dev, reg) readl((dev)->membase + (reg))
189 /* section 2.12 connection memory access */
191 static __inline__ void
192 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
193 unsigned flags)
195 he_writel(he_dev, val, CON_DAT);
196 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
197 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
198 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
201 #define he_writel_rcm(dev, val, reg) \
202 he_writel_internal(dev, val, reg, CON_CTL_RCM)
204 #define he_writel_tcm(dev, val, reg) \
205 he_writel_internal(dev, val, reg, CON_CTL_TCM)
207 #define he_writel_mbox(dev, val, reg) \
208 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
210 static unsigned
211 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
213 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
214 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
215 return he_readl(he_dev, CON_DAT);
218 #define he_readl_rcm(dev, reg) \
219 he_readl_internal(dev, reg, CON_CTL_RCM)
221 #define he_readl_tcm(dev, reg) \
222 he_readl_internal(dev, reg, CON_CTL_TCM)
224 #define he_readl_mbox(dev, reg) \
225 he_readl_internal(dev, reg, CON_CTL_MBOX)
228 /* figure 2.2 connection id */
230 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
232 /* 2.5.1 per connection transmit state registers */
234 #define he_writel_tsr0(dev, val, cid) \
235 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
236 #define he_readl_tsr0(dev, cid) \
237 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
239 #define he_writel_tsr1(dev, val, cid) \
240 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
242 #define he_writel_tsr2(dev, val, cid) \
243 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
245 #define he_writel_tsr3(dev, val, cid) \
246 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
248 #define he_writel_tsr4(dev, val, cid) \
249 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
251 /* from page 2-20
253 * NOTE While the transmit connection is active, bits 23 through 0
254 * of this register must not be written by the host. Byte
255 * enables should be used during normal operation when writing
256 * the most significant byte.
259 #define he_writel_tsr4_upper(dev, val, cid) \
260 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
261 CON_CTL_TCM \
262 | CON_BYTE_DISABLE_2 \
263 | CON_BYTE_DISABLE_1 \
264 | CON_BYTE_DISABLE_0)
266 #define he_readl_tsr4(dev, cid) \
267 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
269 #define he_writel_tsr5(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
272 #define he_writel_tsr6(dev, val, cid) \
273 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
275 #define he_writel_tsr7(dev, val, cid) \
276 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
279 #define he_writel_tsr8(dev, val, cid) \
280 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
282 #define he_writel_tsr9(dev, val, cid) \
283 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
285 #define he_writel_tsr10(dev, val, cid) \
286 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
288 #define he_writel_tsr11(dev, val, cid) \
289 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
292 #define he_writel_tsr12(dev, val, cid) \
293 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
295 #define he_writel_tsr13(dev, val, cid) \
296 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
299 #define he_writel_tsr14(dev, val, cid) \
300 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
302 #define he_writel_tsr14_upper(dev, val, cid) \
303 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
304 CON_CTL_TCM \
305 | CON_BYTE_DISABLE_2 \
306 | CON_BYTE_DISABLE_1 \
307 | CON_BYTE_DISABLE_0)
309 /* 2.7.1 per connection receive state registers */
311 #define he_writel_rsr0(dev, val, cid) \
312 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
313 #define he_readl_rsr0(dev, cid) \
314 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
316 #define he_writel_rsr1(dev, val, cid) \
317 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
319 #define he_writel_rsr2(dev, val, cid) \
320 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
322 #define he_writel_rsr3(dev, val, cid) \
323 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
325 #define he_writel_rsr4(dev, val, cid) \
326 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
328 #define he_writel_rsr5(dev, val, cid) \
329 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
331 #define he_writel_rsr6(dev, val, cid) \
332 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
334 #define he_writel_rsr7(dev, val, cid) \
335 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
337 static __inline__ struct atm_vcc*
338 __find_vcc(struct he_dev *he_dev, unsigned cid)
340 struct hlist_head *head;
341 struct atm_vcc *vcc;
342 struct hlist_node *node;
343 struct sock *s;
344 short vpi;
345 int vci;
347 vpi = cid >> he_dev->vcibits;
348 vci = cid & ((1 << he_dev->vcibits) - 1);
349 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
351 sk_for_each(s, node, head) {
352 vcc = atm_sk(s);
353 if (vcc->dev == he_dev->atm_dev &&
354 vcc->vci == vci && vcc->vpi == vpi &&
355 vcc->qos.rxtp.traffic_class != ATM_NONE) {
356 return vcc;
359 return NULL;
362 static int __devinit
363 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
365 struct atm_dev *atm_dev = NULL;
366 struct he_dev *he_dev = NULL;
367 int err = 0;
369 printk(KERN_INFO "he: %s\n", version);
371 if (pci_enable_device(pci_dev))
372 return -EIO;
373 if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK) != 0) {
374 printk(KERN_WARNING "he: no suitable dma available\n");
375 err = -EIO;
376 goto init_one_failure;
379 atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
380 if (!atm_dev) {
381 err = -ENODEV;
382 goto init_one_failure;
384 pci_set_drvdata(pci_dev, atm_dev);
386 he_dev = kzalloc(sizeof(struct he_dev),
387 GFP_KERNEL);
388 if (!he_dev) {
389 err = -ENOMEM;
390 goto init_one_failure;
392 he_dev->pci_dev = pci_dev;
393 he_dev->atm_dev = atm_dev;
394 he_dev->atm_dev->dev_data = he_dev;
395 atm_dev->dev_data = he_dev;
396 he_dev->number = atm_dev->number;
397 #ifdef USE_TASKLET
398 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
399 #endif
400 spin_lock_init(&he_dev->global_lock);
402 if (he_start(atm_dev)) {
403 he_stop(he_dev);
404 err = -ENODEV;
405 goto init_one_failure;
407 he_dev->next = NULL;
408 if (he_devs)
409 he_dev->next = he_devs;
410 he_devs = he_dev;
411 return 0;
413 init_one_failure:
414 if (atm_dev)
415 atm_dev_deregister(atm_dev);
416 kfree(he_dev);
417 pci_disable_device(pci_dev);
418 return err;
421 static void __devexit
422 he_remove_one (struct pci_dev *pci_dev)
424 struct atm_dev *atm_dev;
425 struct he_dev *he_dev;
427 atm_dev = pci_get_drvdata(pci_dev);
428 he_dev = HE_DEV(atm_dev);
430 /* need to remove from he_devs */
432 he_stop(he_dev);
433 atm_dev_deregister(atm_dev);
434 kfree(he_dev);
436 pci_set_drvdata(pci_dev, NULL);
437 pci_disable_device(pci_dev);
441 static unsigned
442 rate_to_atmf(unsigned rate) /* cps to atm forum format */
444 #define NONZERO (1 << 14)
446 unsigned exp = 0;
448 if (rate == 0)
449 return 0;
451 rate <<= 9;
452 while (rate > 0x3ff) {
453 ++exp;
454 rate >>= 1;
457 return (NONZERO | (exp << 9) | (rate & 0x1ff));
460 static void __devinit
461 he_init_rx_lbfp0(struct he_dev *he_dev)
463 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
464 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
465 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
466 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
468 lbufd_index = 0;
469 lbm_offset = he_readl(he_dev, RCMLBM_BA);
471 he_writel(he_dev, lbufd_index, RLBF0_H);
473 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
474 lbufd_index += 2;
475 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
477 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
478 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
480 if (++lbuf_count == lbufs_per_row) {
481 lbuf_count = 0;
482 row_offset += he_dev->bytes_per_row;
484 lbm_offset += 4;
487 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
488 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
491 static void __devinit
492 he_init_rx_lbfp1(struct he_dev *he_dev)
494 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
495 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
496 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
497 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
499 lbufd_index = 1;
500 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
502 he_writel(he_dev, lbufd_index, RLBF1_H);
504 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
505 lbufd_index += 2;
506 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
508 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
509 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
511 if (++lbuf_count == lbufs_per_row) {
512 lbuf_count = 0;
513 row_offset += he_dev->bytes_per_row;
515 lbm_offset += 4;
518 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
519 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
522 static void __devinit
523 he_init_tx_lbfp(struct he_dev *he_dev)
525 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
526 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
527 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
528 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
530 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
531 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
533 he_writel(he_dev, lbufd_index, TLBF_H);
535 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
536 lbufd_index += 1;
537 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
539 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
540 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
542 if (++lbuf_count == lbufs_per_row) {
543 lbuf_count = 0;
544 row_offset += he_dev->bytes_per_row;
546 lbm_offset += 2;
549 he_writel(he_dev, lbufd_index - 1, TLBF_T);
552 static int __devinit
553 he_init_tpdrq(struct he_dev *he_dev)
555 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
556 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
557 if (he_dev->tpdrq_base == NULL) {
558 hprintk("failed to alloc tpdrq\n");
559 return -ENOMEM;
561 memset(he_dev->tpdrq_base, 0,
562 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
564 he_dev->tpdrq_tail = he_dev->tpdrq_base;
565 he_dev->tpdrq_head = he_dev->tpdrq_base;
567 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
568 he_writel(he_dev, 0, TPDRQ_T);
569 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
571 return 0;
574 static void __devinit
575 he_init_cs_block(struct he_dev *he_dev)
577 unsigned clock, rate, delta;
578 int reg;
580 /* 5.1.7 cs block initialization */
582 for (reg = 0; reg < 0x20; ++reg)
583 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
585 /* rate grid timer reload values */
587 clock = he_is622(he_dev) ? 66667000 : 50000000;
588 rate = he_dev->atm_dev->link_rate;
589 delta = rate / 16 / 2;
591 for (reg = 0; reg < 0x10; ++reg) {
592 /* 2.4 internal transmit function
594 * we initialize the first row in the rate grid.
595 * values are period (in clock cycles) of timer
597 unsigned period = clock / rate;
599 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
600 rate -= delta;
603 if (he_is622(he_dev)) {
604 /* table 5.2 (4 cells per lbuf) */
605 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
606 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
607 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
608 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
609 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
611 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
612 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
613 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
614 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
615 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
616 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
617 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
619 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
621 /* table 5.8 */
622 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
623 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
624 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
625 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
626 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
627 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
629 /* table 5.9 */
630 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
631 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
632 } else {
633 /* table 5.1 (4 cells per lbuf) */
634 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
635 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
636 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
637 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
638 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
640 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
641 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
642 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
643 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
644 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
645 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
646 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
648 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
650 /* table 5.8 */
651 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
652 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
653 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
654 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
655 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
656 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
658 /* table 5.9 */
659 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
660 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
663 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
665 for (reg = 0; reg < 0x8; ++reg)
666 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
670 static int __devinit
671 he_init_cs_block_rcm(struct he_dev *he_dev)
673 unsigned (*rategrid)[16][16];
674 unsigned rate, delta;
675 int i, j, reg;
677 unsigned rate_atmf, exp, man;
678 unsigned long long rate_cps;
679 int mult, buf, buf_limit = 4;
681 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
682 if (!rategrid)
683 return -ENOMEM;
685 /* initialize rate grid group table */
687 for (reg = 0x0; reg < 0xff; ++reg)
688 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
690 /* initialize rate controller groups */
692 for (reg = 0x100; reg < 0x1ff; ++reg)
693 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
695 /* initialize tNrm lookup table */
697 /* the manual makes reference to a routine in a sample driver
698 for proper configuration; fortunately, we only need this
699 in order to support abr connection */
701 /* initialize rate to group table */
703 rate = he_dev->atm_dev->link_rate;
704 delta = rate / 32;
707 * 2.4 transmit internal functions
709 * we construct a copy of the rate grid used by the scheduler
710 * in order to construct the rate to group table below
713 for (j = 0; j < 16; j++) {
714 (*rategrid)[0][j] = rate;
715 rate -= delta;
718 for (i = 1; i < 16; i++)
719 for (j = 0; j < 16; j++)
720 if (i > 14)
721 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
722 else
723 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
726 * 2.4 transmit internal function
728 * this table maps the upper 5 bits of exponent and mantissa
729 * of the atm forum representation of the rate into an index
730 * on rate grid
733 rate_atmf = 0;
734 while (rate_atmf < 0x400) {
735 man = (rate_atmf & 0x1f) << 4;
736 exp = rate_atmf >> 5;
739 instead of '/ 512', use '>> 9' to prevent a call
740 to divdu3 on x86 platforms
742 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
744 if (rate_cps < 10)
745 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
747 for (i = 255; i > 0; i--)
748 if ((*rategrid)[i/16][i%16] >= rate_cps)
749 break; /* pick nearest rate instead? */
752 * each table entry is 16 bits: (rate grid index (8 bits)
753 * and a buffer limit (8 bits)
754 * there are two table entries in each 32-bit register
757 #ifdef notdef
758 buf = rate_cps * he_dev->tx_numbuffs /
759 (he_dev->atm_dev->link_rate * 2);
760 #else
761 /* this is pretty, but avoids _divdu3 and is mostly correct */
762 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
763 if (rate_cps > (272 * mult))
764 buf = 4;
765 else if (rate_cps > (204 * mult))
766 buf = 3;
767 else if (rate_cps > (136 * mult))
768 buf = 2;
769 else if (rate_cps > (68 * mult))
770 buf = 1;
771 else
772 buf = 0;
773 #endif
774 if (buf > buf_limit)
775 buf = buf_limit;
776 reg = (reg << 16) | ((i << 8) | buf);
778 #define RTGTBL_OFFSET 0x400
780 if (rate_atmf & 0x1)
781 he_writel_rcm(he_dev, reg,
782 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
784 ++rate_atmf;
787 kfree(rategrid);
788 return 0;
791 static int __devinit
792 he_init_group(struct he_dev *he_dev, int group)
794 int i;
796 #ifdef USE_RBPS
797 /* small buffer pool */
798 #ifdef USE_RBPS_POOL
799 he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
800 CONFIG_RBPS_BUFSIZE, 8, 0);
801 if (he_dev->rbps_pool == NULL) {
802 hprintk("unable to create rbps pages\n");
803 return -ENOMEM;
805 #else /* !USE_RBPS_POOL */
806 he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
807 CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
808 if (he_dev->rbps_pages == NULL) {
809 hprintk("unable to create rbps page pool\n");
810 return -ENOMEM;
812 #endif /* USE_RBPS_POOL */
814 he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
815 CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
816 if (he_dev->rbps_base == NULL) {
817 hprintk("failed to alloc rbps\n");
818 return -ENOMEM;
820 memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
821 he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
823 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
824 dma_addr_t dma_handle;
825 void *cpuaddr;
827 #ifdef USE_RBPS_POOL
828 cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
829 if (cpuaddr == NULL)
830 return -ENOMEM;
831 #else
832 cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
833 dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
834 #endif
836 he_dev->rbps_virt[i].virt = cpuaddr;
837 he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
838 he_dev->rbps_base[i].phys = dma_handle;
841 he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
843 he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
844 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
845 G0_RBPS_T + (group * 32));
846 he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
847 G0_RBPS_BS + (group * 32));
848 he_writel(he_dev,
849 RBP_THRESH(CONFIG_RBPS_THRESH) |
850 RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
851 RBP_INT_ENB,
852 G0_RBPS_QI + (group * 32));
853 #else /* !USE_RBPS */
854 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
855 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
856 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
857 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
858 G0_RBPS_BS + (group * 32));
859 #endif /* USE_RBPS */
861 /* large buffer pool */
862 #ifdef USE_RBPL_POOL
863 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
864 CONFIG_RBPL_BUFSIZE, 8, 0);
865 if (he_dev->rbpl_pool == NULL) {
866 hprintk("unable to create rbpl pool\n");
867 return -ENOMEM;
869 #else /* !USE_RBPL_POOL */
870 he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
871 CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
872 if (he_dev->rbpl_pages == NULL) {
873 hprintk("unable to create rbpl pages\n");
874 return -ENOMEM;
876 #endif /* USE_RBPL_POOL */
878 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
879 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
880 if (he_dev->rbpl_base == NULL) {
881 hprintk("failed to alloc rbpl\n");
882 return -ENOMEM;
884 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
885 he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
887 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
888 dma_addr_t dma_handle;
889 void *cpuaddr;
891 #ifdef USE_RBPL_POOL
892 cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
893 if (cpuaddr == NULL)
894 return -ENOMEM;
895 #else
896 cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
897 dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
898 #endif
900 he_dev->rbpl_virt[i].virt = cpuaddr;
901 he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
902 he_dev->rbpl_base[i].phys = dma_handle;
904 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
906 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
907 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
908 G0_RBPL_T + (group * 32));
909 he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
910 G0_RBPL_BS + (group * 32));
911 he_writel(he_dev,
912 RBP_THRESH(CONFIG_RBPL_THRESH) |
913 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
914 RBP_INT_ENB,
915 G0_RBPL_QI + (group * 32));
917 /* rx buffer ready queue */
919 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
920 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
921 if (he_dev->rbrq_base == NULL) {
922 hprintk("failed to allocate rbrq\n");
923 return -ENOMEM;
925 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
927 he_dev->rbrq_head = he_dev->rbrq_base;
928 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
929 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
930 he_writel(he_dev,
931 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
932 G0_RBRQ_Q + (group * 16));
933 if (irq_coalesce) {
934 hprintk("coalescing interrupts\n");
935 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
936 G0_RBRQ_I + (group * 16));
937 } else
938 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
939 G0_RBRQ_I + (group * 16));
941 /* tx buffer ready queue */
943 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
944 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
945 if (he_dev->tbrq_base == NULL) {
946 hprintk("failed to allocate tbrq\n");
947 return -ENOMEM;
949 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
951 he_dev->tbrq_head = he_dev->tbrq_base;
953 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
954 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
955 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
956 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
958 return 0;
961 static int __devinit
962 he_init_irq(struct he_dev *he_dev)
964 int i;
966 /* 2.9.3.5 tail offset for each interrupt queue is located after the
967 end of the interrupt queue */
969 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
970 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
971 if (he_dev->irq_base == NULL) {
972 hprintk("failed to allocate irq\n");
973 return -ENOMEM;
975 he_dev->irq_tailoffset = (unsigned *)
976 &he_dev->irq_base[CONFIG_IRQ_SIZE];
977 *he_dev->irq_tailoffset = 0;
978 he_dev->irq_head = he_dev->irq_base;
979 he_dev->irq_tail = he_dev->irq_base;
981 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
982 he_dev->irq_base[i].isw = ITYPE_INVALID;
984 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
985 he_writel(he_dev,
986 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
987 IRQ0_HEAD);
988 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
989 he_writel(he_dev, 0x0, IRQ0_DATA);
991 he_writel(he_dev, 0x0, IRQ1_BASE);
992 he_writel(he_dev, 0x0, IRQ1_HEAD);
993 he_writel(he_dev, 0x0, IRQ1_CNTL);
994 he_writel(he_dev, 0x0, IRQ1_DATA);
996 he_writel(he_dev, 0x0, IRQ2_BASE);
997 he_writel(he_dev, 0x0, IRQ2_HEAD);
998 he_writel(he_dev, 0x0, IRQ2_CNTL);
999 he_writel(he_dev, 0x0, IRQ2_DATA);
1001 he_writel(he_dev, 0x0, IRQ3_BASE);
1002 he_writel(he_dev, 0x0, IRQ3_HEAD);
1003 he_writel(he_dev, 0x0, IRQ3_CNTL);
1004 he_writel(he_dev, 0x0, IRQ3_DATA);
1006 /* 2.9.3.2 interrupt queue mapping registers */
1008 he_writel(he_dev, 0x0, GRP_10_MAP);
1009 he_writel(he_dev, 0x0, GRP_32_MAP);
1010 he_writel(he_dev, 0x0, GRP_54_MAP);
1011 he_writel(he_dev, 0x0, GRP_76_MAP);
1013 if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_DISABLED|IRQF_SHARED, DEV_LABEL, he_dev)) {
1014 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
1015 return -EINVAL;
1018 he_dev->irq = he_dev->pci_dev->irq;
1020 return 0;
1023 static int __devinit
1024 he_start(struct atm_dev *dev)
1026 struct he_dev *he_dev;
1027 struct pci_dev *pci_dev;
1028 unsigned long membase;
1030 u16 command;
1031 u32 gen_cntl_0, host_cntl, lb_swap;
1032 u8 cache_size, timer;
1034 unsigned err;
1035 unsigned int status, reg;
1036 int i, group;
1038 he_dev = HE_DEV(dev);
1039 pci_dev = he_dev->pci_dev;
1041 membase = pci_resource_start(pci_dev, 0);
1042 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
1045 * pci bus controller initialization
1048 /* 4.3 pci bus controller-specific initialization */
1049 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1050 hprintk("can't read GEN_CNTL_0\n");
1051 return -EINVAL;
1053 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1054 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1055 hprintk("can't write GEN_CNTL_0.\n");
1056 return -EINVAL;
1059 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1060 hprintk("can't read PCI_COMMAND.\n");
1061 return -EINVAL;
1064 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1065 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1066 hprintk("can't enable memory.\n");
1067 return -EINVAL;
1070 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1071 hprintk("can't read cache line size?\n");
1072 return -EINVAL;
1075 if (cache_size < 16) {
1076 cache_size = 16;
1077 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1078 hprintk("can't set cache line size to %d\n", cache_size);
1081 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1082 hprintk("can't read latency timer?\n");
1083 return -EINVAL;
1086 /* from table 3.9
1088 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1090 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1091 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1094 #define LAT_TIMER 209
1095 if (timer < LAT_TIMER) {
1096 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1097 timer = LAT_TIMER;
1098 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1099 hprintk("can't set latency timer to %d\n", timer);
1102 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1103 hprintk("can't set up page mapping\n");
1104 return -EINVAL;
1107 /* 4.4 card reset */
1108 he_writel(he_dev, 0x0, RESET_CNTL);
1109 he_writel(he_dev, 0xff, RESET_CNTL);
1111 udelay(16*1000); /* 16 ms */
1112 status = he_readl(he_dev, RESET_CNTL);
1113 if ((status & BOARD_RST_STATUS) == 0) {
1114 hprintk("reset failed\n");
1115 return -EINVAL;
1118 /* 4.5 set bus width */
1119 host_cntl = he_readl(he_dev, HOST_CNTL);
1120 if (host_cntl & PCI_BUS_SIZE64)
1121 gen_cntl_0 |= ENBL_64;
1122 else
1123 gen_cntl_0 &= ~ENBL_64;
1125 if (disable64 == 1) {
1126 hprintk("disabling 64-bit pci bus transfers\n");
1127 gen_cntl_0 &= ~ENBL_64;
1130 if (gen_cntl_0 & ENBL_64)
1131 hprintk("64-bit transfers enabled\n");
1133 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1135 /* 4.7 read prom contents */
1136 for (i = 0; i < PROD_ID_LEN; ++i)
1137 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1139 he_dev->media = read_prom_byte(he_dev, MEDIA);
1141 for (i = 0; i < 6; ++i)
1142 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1144 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1145 he_dev->prod_id,
1146 he_dev->media & 0x40 ? "SM" : "MM",
1147 dev->esi[0],
1148 dev->esi[1],
1149 dev->esi[2],
1150 dev->esi[3],
1151 dev->esi[4],
1152 dev->esi[5]);
1153 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1154 ATM_OC12_PCR : ATM_OC3_PCR;
1156 /* 4.6 set host endianess */
1157 lb_swap = he_readl(he_dev, LB_SWAP);
1158 if (he_is622(he_dev))
1159 lb_swap &= ~XFER_SIZE; /* 4 cells */
1160 else
1161 lb_swap |= XFER_SIZE; /* 8 cells */
1162 #ifdef __BIG_ENDIAN
1163 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1164 #else
1165 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1166 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1167 #endif /* __BIG_ENDIAN */
1168 he_writel(he_dev, lb_swap, LB_SWAP);
1170 /* 4.8 sdram controller initialization */
1171 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1173 /* 4.9 initialize rnum value */
1174 lb_swap |= SWAP_RNUM_MAX(0xf);
1175 he_writel(he_dev, lb_swap, LB_SWAP);
1177 /* 4.10 initialize the interrupt queues */
1178 if ((err = he_init_irq(he_dev)) != 0)
1179 return err;
1181 /* 4.11 enable pci bus controller state machines */
1182 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1183 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1184 he_writel(he_dev, host_cntl, HOST_CNTL);
1186 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1187 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1190 * atm network controller initialization
1193 /* 5.1.1 generic configuration state */
1196 * local (cell) buffer memory map
1198 * HE155 HE622
1200 * 0 ____________1023 bytes 0 _______________________2047 bytes
1201 * | | | | |
1202 * | utility | | rx0 | |
1203 * 5|____________| 255|___________________| u |
1204 * 6| | 256| | t |
1205 * | | | | i |
1206 * | rx0 | row | tx | l |
1207 * | | | | i |
1208 * | | 767|___________________| t |
1209 * 517|____________| 768| | y |
1210 * row 518| | | rx1 | |
1211 * | | 1023|___________________|___|
1212 * | |
1213 * | tx |
1214 * | |
1215 * | |
1216 * 1535|____________|
1217 * 1536| |
1218 * | rx1 |
1219 * 2047|____________|
1223 /* total 4096 connections */
1224 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1225 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1227 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1228 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1229 return -ENODEV;
1232 if (nvpibits != -1) {
1233 he_dev->vpibits = nvpibits;
1234 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1237 if (nvcibits != -1) {
1238 he_dev->vcibits = nvcibits;
1239 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1243 if (he_is622(he_dev)) {
1244 he_dev->cells_per_row = 40;
1245 he_dev->bytes_per_row = 2048;
1246 he_dev->r0_numrows = 256;
1247 he_dev->tx_numrows = 512;
1248 he_dev->r1_numrows = 256;
1249 he_dev->r0_startrow = 0;
1250 he_dev->tx_startrow = 256;
1251 he_dev->r1_startrow = 768;
1252 } else {
1253 he_dev->cells_per_row = 20;
1254 he_dev->bytes_per_row = 1024;
1255 he_dev->r0_numrows = 512;
1256 he_dev->tx_numrows = 1018;
1257 he_dev->r1_numrows = 512;
1258 he_dev->r0_startrow = 6;
1259 he_dev->tx_startrow = 518;
1260 he_dev->r1_startrow = 1536;
1263 he_dev->cells_per_lbuf = 4;
1264 he_dev->buffer_limit = 4;
1265 he_dev->r0_numbuffs = he_dev->r0_numrows *
1266 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1267 if (he_dev->r0_numbuffs > 2560)
1268 he_dev->r0_numbuffs = 2560;
1270 he_dev->r1_numbuffs = he_dev->r1_numrows *
1271 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1272 if (he_dev->r1_numbuffs > 2560)
1273 he_dev->r1_numbuffs = 2560;
1275 he_dev->tx_numbuffs = he_dev->tx_numrows *
1276 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1277 if (he_dev->tx_numbuffs > 5120)
1278 he_dev->tx_numbuffs = 5120;
1280 /* 5.1.2 configure hardware dependent registers */
1282 he_writel(he_dev,
1283 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1284 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1285 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1286 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1287 LBARB);
1289 he_writel(he_dev, BANK_ON |
1290 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1291 SDRAMCON);
1293 he_writel(he_dev,
1294 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1295 RM_RW_WAIT(1), RCMCONFIG);
1296 he_writel(he_dev,
1297 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1298 TM_RW_WAIT(1), TCMCONFIG);
1300 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1302 he_writel(he_dev,
1303 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1304 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1305 RX_VALVP(he_dev->vpibits) |
1306 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1308 he_writel(he_dev, DRF_THRESH(0x20) |
1309 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1310 TX_VCI_MASK(he_dev->vcibits) |
1311 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1313 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1315 he_writel(he_dev, PHY_INT_ENB |
1316 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1317 RH_CONFIG);
1319 /* 5.1.3 initialize connection memory */
1321 for (i = 0; i < TCM_MEM_SIZE; ++i)
1322 he_writel_tcm(he_dev, 0, i);
1324 for (i = 0; i < RCM_MEM_SIZE; ++i)
1325 he_writel_rcm(he_dev, 0, i);
1328 * transmit connection memory map
1330 * tx memory
1331 * 0x0 ___________________
1332 * | |
1333 * | |
1334 * | TSRa |
1335 * | |
1336 * | |
1337 * 0x8000|___________________|
1338 * | |
1339 * | TSRb |
1340 * 0xc000|___________________|
1341 * | |
1342 * | TSRc |
1343 * 0xe000|___________________|
1344 * | TSRd |
1345 * 0xf000|___________________|
1346 * | tmABR |
1347 * 0x10000|___________________|
1348 * | |
1349 * | tmTPD |
1350 * |___________________|
1351 * | |
1352 * ....
1353 * 0x1ffff|___________________|
1358 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1359 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1360 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1361 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1362 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1366 * receive connection memory map
1368 * 0x0 ___________________
1369 * | |
1370 * | |
1371 * | RSRa |
1372 * | |
1373 * | |
1374 * 0x8000|___________________|
1375 * | |
1376 * | rx0/1 |
1377 * | LBM | link lists of local
1378 * | tx | buffer memory
1379 * | |
1380 * 0xd000|___________________|
1381 * | |
1382 * | rmABR |
1383 * 0xe000|___________________|
1384 * | |
1385 * | RSRb |
1386 * |___________________|
1387 * | |
1388 * ....
1389 * 0xffff|___________________|
1392 he_writel(he_dev, 0x08000, RCMLBM_BA);
1393 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1394 he_writel(he_dev, 0x0d800, RCMABR_BA);
1396 /* 5.1.4 initialize local buffer free pools linked lists */
1398 he_init_rx_lbfp0(he_dev);
1399 he_init_rx_lbfp1(he_dev);
1401 he_writel(he_dev, 0x0, RLBC_H);
1402 he_writel(he_dev, 0x0, RLBC_T);
1403 he_writel(he_dev, 0x0, RLBC_H2);
1405 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1406 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1408 he_init_tx_lbfp(he_dev);
1410 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1412 /* 5.1.5 initialize intermediate receive queues */
1414 if (he_is622(he_dev)) {
1415 he_writel(he_dev, 0x000f, G0_INMQ_S);
1416 he_writel(he_dev, 0x200f, G0_INMQ_L);
1418 he_writel(he_dev, 0x001f, G1_INMQ_S);
1419 he_writel(he_dev, 0x201f, G1_INMQ_L);
1421 he_writel(he_dev, 0x002f, G2_INMQ_S);
1422 he_writel(he_dev, 0x202f, G2_INMQ_L);
1424 he_writel(he_dev, 0x003f, G3_INMQ_S);
1425 he_writel(he_dev, 0x203f, G3_INMQ_L);
1427 he_writel(he_dev, 0x004f, G4_INMQ_S);
1428 he_writel(he_dev, 0x204f, G4_INMQ_L);
1430 he_writel(he_dev, 0x005f, G5_INMQ_S);
1431 he_writel(he_dev, 0x205f, G5_INMQ_L);
1433 he_writel(he_dev, 0x006f, G6_INMQ_S);
1434 he_writel(he_dev, 0x206f, G6_INMQ_L);
1436 he_writel(he_dev, 0x007f, G7_INMQ_S);
1437 he_writel(he_dev, 0x207f, G7_INMQ_L);
1438 } else {
1439 he_writel(he_dev, 0x0000, G0_INMQ_S);
1440 he_writel(he_dev, 0x0008, G0_INMQ_L);
1442 he_writel(he_dev, 0x0001, G1_INMQ_S);
1443 he_writel(he_dev, 0x0009, G1_INMQ_L);
1445 he_writel(he_dev, 0x0002, G2_INMQ_S);
1446 he_writel(he_dev, 0x000a, G2_INMQ_L);
1448 he_writel(he_dev, 0x0003, G3_INMQ_S);
1449 he_writel(he_dev, 0x000b, G3_INMQ_L);
1451 he_writel(he_dev, 0x0004, G4_INMQ_S);
1452 he_writel(he_dev, 0x000c, G4_INMQ_L);
1454 he_writel(he_dev, 0x0005, G5_INMQ_S);
1455 he_writel(he_dev, 0x000d, G5_INMQ_L);
1457 he_writel(he_dev, 0x0006, G6_INMQ_S);
1458 he_writel(he_dev, 0x000e, G6_INMQ_L);
1460 he_writel(he_dev, 0x0007, G7_INMQ_S);
1461 he_writel(he_dev, 0x000f, G7_INMQ_L);
1464 /* 5.1.6 application tunable parameters */
1466 he_writel(he_dev, 0x0, MCC);
1467 he_writel(he_dev, 0x0, OEC);
1468 he_writel(he_dev, 0x0, DCC);
1469 he_writel(he_dev, 0x0, CEC);
1471 /* 5.1.7 cs block initialization */
1473 he_init_cs_block(he_dev);
1475 /* 5.1.8 cs block connection memory initialization */
1477 if (he_init_cs_block_rcm(he_dev) < 0)
1478 return -ENOMEM;
1480 /* 5.1.10 initialize host structures */
1482 he_init_tpdrq(he_dev);
1484 #ifdef USE_TPD_POOL
1485 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1486 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1487 if (he_dev->tpd_pool == NULL) {
1488 hprintk("unable to create tpd pci_pool\n");
1489 return -ENOMEM;
1492 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1493 #else
1494 he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
1495 CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
1496 if (!he_dev->tpd_base)
1497 return -ENOMEM;
1499 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1500 he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
1501 he_dev->tpd_base[i].inuse = 0;
1504 he_dev->tpd_head = he_dev->tpd_base;
1505 he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
1506 #endif
1508 if (he_init_group(he_dev, 0) != 0)
1509 return -ENOMEM;
1511 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1512 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1513 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1514 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1515 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1516 G0_RBPS_BS + (group * 32));
1518 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1519 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1520 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1521 G0_RBPL_QI + (group * 32));
1522 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1524 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1525 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1526 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1527 G0_RBRQ_Q + (group * 16));
1528 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1530 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1531 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1532 he_writel(he_dev, TBRQ_THRESH(0x1),
1533 G0_TBRQ_THRESH + (group * 16));
1534 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1537 /* host status page */
1539 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1540 sizeof(struct he_hsp), &he_dev->hsp_phys);
1541 if (he_dev->hsp == NULL) {
1542 hprintk("failed to allocate host status page\n");
1543 return -ENOMEM;
1545 memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1546 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1548 /* initialize framer */
1550 #ifdef CONFIG_ATM_HE_USE_SUNI
1551 suni_init(he_dev->atm_dev);
1552 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1553 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1554 #endif /* CONFIG_ATM_HE_USE_SUNI */
1556 if (sdh) {
1557 /* this really should be in suni.c but for now... */
1558 int val;
1560 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1561 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1562 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1565 /* 5.1.12 enable transmit and receive */
1567 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1568 reg |= TX_ENABLE|ER_ENABLE;
1569 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1571 reg = he_readl(he_dev, RC_CONFIG);
1572 reg |= RX_ENABLE;
1573 he_writel(he_dev, reg, RC_CONFIG);
1575 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1576 he_dev->cs_stper[i].inuse = 0;
1577 he_dev->cs_stper[i].pcr = -1;
1579 he_dev->total_bw = 0;
1582 /* atm linux initialization */
1584 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1585 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1587 he_dev->irq_peak = 0;
1588 he_dev->rbrq_peak = 0;
1589 he_dev->rbpl_peak = 0;
1590 he_dev->tbrq_peak = 0;
1592 HPRINTK("hell bent for leather!\n");
1594 return 0;
1597 static void
1598 he_stop(struct he_dev *he_dev)
1600 u16 command;
1601 u32 gen_cntl_0, reg;
1602 struct pci_dev *pci_dev;
1604 pci_dev = he_dev->pci_dev;
1606 /* disable interrupts */
1608 if (he_dev->membase) {
1609 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1610 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1611 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1613 #ifdef USE_TASKLET
1614 tasklet_disable(&he_dev->tasklet);
1615 #endif
1617 /* disable recv and transmit */
1619 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1620 reg &= ~(TX_ENABLE|ER_ENABLE);
1621 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1623 reg = he_readl(he_dev, RC_CONFIG);
1624 reg &= ~(RX_ENABLE);
1625 he_writel(he_dev, reg, RC_CONFIG);
1628 #ifdef CONFIG_ATM_HE_USE_SUNI
1629 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1630 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1631 #endif /* CONFIG_ATM_HE_USE_SUNI */
1633 if (he_dev->irq)
1634 free_irq(he_dev->irq, he_dev);
1636 if (he_dev->irq_base)
1637 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1638 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1640 if (he_dev->hsp)
1641 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1642 he_dev->hsp, he_dev->hsp_phys);
1644 if (he_dev->rbpl_base) {
1645 #ifdef USE_RBPL_POOL
1646 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1647 void *cpuaddr = he_dev->rbpl_virt[i].virt;
1648 dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1650 pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1652 #else
1653 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1654 * CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
1655 #endif
1656 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1657 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1660 #ifdef USE_RBPL_POOL
1661 if (he_dev->rbpl_pool)
1662 pci_pool_destroy(he_dev->rbpl_pool);
1663 #endif
1665 #ifdef USE_RBPS
1666 if (he_dev->rbps_base) {
1667 #ifdef USE_RBPS_POOL
1668 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1669 void *cpuaddr = he_dev->rbps_virt[i].virt;
1670 dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1672 pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1674 #else
1675 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1676 * CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
1677 #endif
1678 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1679 * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1682 #ifdef USE_RBPS_POOL
1683 if (he_dev->rbps_pool)
1684 pci_pool_destroy(he_dev->rbps_pool);
1685 #endif
1687 #endif /* USE_RBPS */
1689 if (he_dev->rbrq_base)
1690 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1691 he_dev->rbrq_base, he_dev->rbrq_phys);
1693 if (he_dev->tbrq_base)
1694 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1695 he_dev->tbrq_base, he_dev->tbrq_phys);
1697 if (he_dev->tpdrq_base)
1698 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1699 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1701 #ifdef USE_TPD_POOL
1702 if (he_dev->tpd_pool)
1703 pci_pool_destroy(he_dev->tpd_pool);
1704 #else
1705 if (he_dev->tpd_base)
1706 pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
1707 he_dev->tpd_base, he_dev->tpd_base_phys);
1708 #endif
1710 if (he_dev->pci_dev) {
1711 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1712 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1713 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1716 if (he_dev->membase)
1717 iounmap(he_dev->membase);
1720 static struct he_tpd *
1721 __alloc_tpd(struct he_dev *he_dev)
1723 #ifdef USE_TPD_POOL
1724 struct he_tpd *tpd;
1725 dma_addr_t dma_handle;
1727 tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
1728 if (tpd == NULL)
1729 return NULL;
1731 tpd->status = TPD_ADDR(dma_handle);
1732 tpd->reserved = 0;
1733 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1734 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1735 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1737 return tpd;
1738 #else
1739 int i;
1741 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1742 ++he_dev->tpd_head;
1743 if (he_dev->tpd_head > he_dev->tpd_end) {
1744 he_dev->tpd_head = he_dev->tpd_base;
1747 if (!he_dev->tpd_head->inuse) {
1748 he_dev->tpd_head->inuse = 1;
1749 he_dev->tpd_head->status &= TPD_MASK;
1750 he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
1751 he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
1752 he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
1753 return he_dev->tpd_head;
1756 hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
1757 return NULL;
1758 #endif
1761 #define AAL5_LEN(buf,len) \
1762 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1763 (((unsigned char *)(buf))[(len)-5]))
1765 /* 2.10.1.2 receive
1767 * aal5 packets can optionally return the tcp checksum in the lower
1768 * 16 bits of the crc (RSR0_TCP_CKSUM)
1771 #define TCP_CKSUM(buf,len) \
1772 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1773 (((unsigned char *)(buf))[(len-1)]))
1775 static int
1776 he_service_rbrq(struct he_dev *he_dev, int group)
1778 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1779 ((unsigned long)he_dev->rbrq_base |
1780 he_dev->hsp->group[group].rbrq_tail);
1781 struct he_rbp *rbp = NULL;
1782 unsigned cid, lastcid = -1;
1783 unsigned buf_len = 0;
1784 struct sk_buff *skb;
1785 struct atm_vcc *vcc = NULL;
1786 struct he_vcc *he_vcc;
1787 struct he_iovec *iov;
1788 int pdus_assembled = 0;
1789 int updated = 0;
1791 read_lock(&vcc_sklist_lock);
1792 while (he_dev->rbrq_head != rbrq_tail) {
1793 ++updated;
1795 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1796 he_dev->rbrq_head, group,
1797 RBRQ_ADDR(he_dev->rbrq_head),
1798 RBRQ_BUFLEN(he_dev->rbrq_head),
1799 RBRQ_CID(he_dev->rbrq_head),
1800 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1801 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1802 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1803 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1804 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1805 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1807 #ifdef USE_RBPS
1808 if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1809 rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1810 else
1811 #endif
1812 rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1814 buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1815 cid = RBRQ_CID(he_dev->rbrq_head);
1817 if (cid != lastcid)
1818 vcc = __find_vcc(he_dev, cid);
1819 lastcid = cid;
1821 if (vcc == NULL) {
1822 hprintk("vcc == NULL (cid 0x%x)\n", cid);
1823 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1824 rbp->status &= ~RBP_LOANED;
1826 goto next_rbrq_entry;
1829 he_vcc = HE_VCC(vcc);
1830 if (he_vcc == NULL) {
1831 hprintk("he_vcc == NULL (cid 0x%x)\n", cid);
1832 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1833 rbp->status &= ~RBP_LOANED;
1834 goto next_rbrq_entry;
1837 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1838 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1839 atomic_inc(&vcc->stats->rx_drop);
1840 goto return_host_buffers;
1843 he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1844 he_vcc->iov_tail->iov_len = buf_len;
1845 he_vcc->pdu_len += buf_len;
1846 ++he_vcc->iov_tail;
1848 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1849 lastcid = -1;
1850 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1851 wake_up(&he_vcc->rx_waitq);
1852 goto return_host_buffers;
1855 #ifdef notdef
1856 if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1857 hprintk("iovec full! cid 0x%x\n", cid);
1858 goto return_host_buffers;
1860 #endif
1861 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1862 goto next_rbrq_entry;
1864 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1865 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1866 HPRINTK("%s%s (%d.%d)\n",
1867 RBRQ_CRC_ERR(he_dev->rbrq_head)
1868 ? "CRC_ERR " : "",
1869 RBRQ_LEN_ERR(he_dev->rbrq_head)
1870 ? "LEN_ERR" : "",
1871 vcc->vpi, vcc->vci);
1872 atomic_inc(&vcc->stats->rx_err);
1873 goto return_host_buffers;
1876 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1877 GFP_ATOMIC);
1878 if (!skb) {
1879 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1880 goto return_host_buffers;
1883 if (rx_skb_reserve > 0)
1884 skb_reserve(skb, rx_skb_reserve);
1886 __net_timestamp(skb);
1888 for (iov = he_vcc->iov_head;
1889 iov < he_vcc->iov_tail; ++iov) {
1890 #ifdef USE_RBPS
1891 if (iov->iov_base & RBP_SMALLBUF)
1892 memcpy(skb_put(skb, iov->iov_len),
1893 he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1894 else
1895 #endif
1896 memcpy(skb_put(skb, iov->iov_len),
1897 he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1900 switch (vcc->qos.aal) {
1901 case ATM_AAL0:
1902 /* 2.10.1.5 raw cell receive */
1903 skb->len = ATM_AAL0_SDU;
1904 skb_set_tail_pointer(skb, skb->len);
1905 break;
1906 case ATM_AAL5:
1907 /* 2.10.1.2 aal5 receive */
1909 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1910 skb_set_tail_pointer(skb, skb->len);
1911 #ifdef USE_CHECKSUM_HW
1912 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1913 skb->ip_summed = CHECKSUM_COMPLETE;
1914 skb->csum = TCP_CKSUM(skb->data,
1915 he_vcc->pdu_len);
1917 #endif
1918 break;
1921 #ifdef should_never_happen
1922 if (skb->len > vcc->qos.rxtp.max_sdu)
1923 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1924 #endif
1926 #ifdef notdef
1927 ATM_SKB(skb)->vcc = vcc;
1928 #endif
1929 spin_unlock(&he_dev->global_lock);
1930 vcc->push(vcc, skb);
1931 spin_lock(&he_dev->global_lock);
1933 atomic_inc(&vcc->stats->rx);
1935 return_host_buffers:
1936 ++pdus_assembled;
1938 for (iov = he_vcc->iov_head;
1939 iov < he_vcc->iov_tail; ++iov) {
1940 #ifdef USE_RBPS
1941 if (iov->iov_base & RBP_SMALLBUF)
1942 rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1943 else
1944 #endif
1945 rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1947 rbp->status &= ~RBP_LOANED;
1950 he_vcc->iov_tail = he_vcc->iov_head;
1951 he_vcc->pdu_len = 0;
1953 next_rbrq_entry:
1954 he_dev->rbrq_head = (struct he_rbrq *)
1955 ((unsigned long) he_dev->rbrq_base |
1956 RBRQ_MASK(++he_dev->rbrq_head));
1959 read_unlock(&vcc_sklist_lock);
1961 if (updated) {
1962 if (updated > he_dev->rbrq_peak)
1963 he_dev->rbrq_peak = updated;
1965 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1966 G0_RBRQ_H + (group * 16));
1969 return pdus_assembled;
1972 static void
1973 he_service_tbrq(struct he_dev *he_dev, int group)
1975 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1976 ((unsigned long)he_dev->tbrq_base |
1977 he_dev->hsp->group[group].tbrq_tail);
1978 struct he_tpd *tpd;
1979 int slot, updated = 0;
1980 #ifdef USE_TPD_POOL
1981 struct he_tpd *__tpd;
1982 #endif
1984 /* 2.1.6 transmit buffer return queue */
1986 while (he_dev->tbrq_head != tbrq_tail) {
1987 ++updated;
1989 HPRINTK("tbrq%d 0x%x%s%s\n",
1990 group,
1991 TBRQ_TPD(he_dev->tbrq_head),
1992 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1993 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1994 #ifdef USE_TPD_POOL
1995 tpd = NULL;
1996 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1997 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1998 tpd = __tpd;
1999 list_del(&__tpd->entry);
2000 break;
2004 if (tpd == NULL) {
2005 hprintk("unable to locate tpd for dma buffer %x\n",
2006 TBRQ_TPD(he_dev->tbrq_head));
2007 goto next_tbrq_entry;
2009 #else
2010 tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
2011 #endif
2013 if (TBRQ_EOS(he_dev->tbrq_head)) {
2014 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2015 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
2016 if (tpd->vcc)
2017 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
2019 goto next_tbrq_entry;
2022 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2023 if (tpd->iovec[slot].addr)
2024 pci_unmap_single(he_dev->pci_dev,
2025 tpd->iovec[slot].addr,
2026 tpd->iovec[slot].len & TPD_LEN_MASK,
2027 PCI_DMA_TODEVICE);
2028 if (tpd->iovec[slot].len & TPD_LST)
2029 break;
2033 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2034 if (tpd->vcc && tpd->vcc->pop)
2035 tpd->vcc->pop(tpd->vcc, tpd->skb);
2036 else
2037 dev_kfree_skb_any(tpd->skb);
2040 next_tbrq_entry:
2041 #ifdef USE_TPD_POOL
2042 if (tpd)
2043 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2044 #else
2045 tpd->inuse = 0;
2046 #endif
2047 he_dev->tbrq_head = (struct he_tbrq *)
2048 ((unsigned long) he_dev->tbrq_base |
2049 TBRQ_MASK(++he_dev->tbrq_head));
2052 if (updated) {
2053 if (updated > he_dev->tbrq_peak)
2054 he_dev->tbrq_peak = updated;
2056 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
2057 G0_TBRQ_H + (group * 16));
2062 static void
2063 he_service_rbpl(struct he_dev *he_dev, int group)
2065 struct he_rbp *newtail;
2066 struct he_rbp *rbpl_head;
2067 int moved = 0;
2069 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2070 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
2072 for (;;) {
2073 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2074 RBPL_MASK(he_dev->rbpl_tail+1));
2076 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2077 if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2078 break;
2080 newtail->status |= RBP_LOANED;
2081 he_dev->rbpl_tail = newtail;
2082 ++moved;
2085 if (moved)
2086 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2089 #ifdef USE_RBPS
2090 static void
2091 he_service_rbps(struct he_dev *he_dev, int group)
2093 struct he_rbp *newtail;
2094 struct he_rbp *rbps_head;
2095 int moved = 0;
2097 rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2098 RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2100 for (;;) {
2101 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2102 RBPS_MASK(he_dev->rbps_tail+1));
2104 /* table 3.42 -- rbps_tail should never be set to rbps_head */
2105 if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2106 break;
2108 newtail->status |= RBP_LOANED;
2109 he_dev->rbps_tail = newtail;
2110 ++moved;
2113 if (moved)
2114 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2116 #endif /* USE_RBPS */
2118 static void
2119 he_tasklet(unsigned long data)
2121 unsigned long flags;
2122 struct he_dev *he_dev = (struct he_dev *) data;
2123 int group, type;
2124 int updated = 0;
2126 HPRINTK("tasklet (0x%lx)\n", data);
2127 #ifdef USE_TASKLET
2128 spin_lock_irqsave(&he_dev->global_lock, flags);
2129 #endif
2131 while (he_dev->irq_head != he_dev->irq_tail) {
2132 ++updated;
2134 type = ITYPE_TYPE(he_dev->irq_head->isw);
2135 group = ITYPE_GROUP(he_dev->irq_head->isw);
2137 switch (type) {
2138 case ITYPE_RBRQ_THRESH:
2139 HPRINTK("rbrq%d threshold\n", group);
2140 /* fall through */
2141 case ITYPE_RBRQ_TIMER:
2142 if (he_service_rbrq(he_dev, group)) {
2143 he_service_rbpl(he_dev, group);
2144 #ifdef USE_RBPS
2145 he_service_rbps(he_dev, group);
2146 #endif /* USE_RBPS */
2148 break;
2149 case ITYPE_TBRQ_THRESH:
2150 HPRINTK("tbrq%d threshold\n", group);
2151 /* fall through */
2152 case ITYPE_TPD_COMPLETE:
2153 he_service_tbrq(he_dev, group);
2154 break;
2155 case ITYPE_RBPL_THRESH:
2156 he_service_rbpl(he_dev, group);
2157 break;
2158 case ITYPE_RBPS_THRESH:
2159 #ifdef USE_RBPS
2160 he_service_rbps(he_dev, group);
2161 #endif /* USE_RBPS */
2162 break;
2163 case ITYPE_PHY:
2164 HPRINTK("phy interrupt\n");
2165 #ifdef CONFIG_ATM_HE_USE_SUNI
2166 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2167 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2168 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2169 spin_lock_irqsave(&he_dev->global_lock, flags);
2170 #endif
2171 break;
2172 case ITYPE_OTHER:
2173 switch (type|group) {
2174 case ITYPE_PARITY:
2175 hprintk("parity error\n");
2176 break;
2177 case ITYPE_ABORT:
2178 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2179 break;
2181 break;
2182 case ITYPE_TYPE(ITYPE_INVALID):
2183 /* see 8.1.1 -- check all queues */
2185 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2187 he_service_rbrq(he_dev, 0);
2188 he_service_rbpl(he_dev, 0);
2189 #ifdef USE_RBPS
2190 he_service_rbps(he_dev, 0);
2191 #endif /* USE_RBPS */
2192 he_service_tbrq(he_dev, 0);
2193 break;
2194 default:
2195 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2198 he_dev->irq_head->isw = ITYPE_INVALID;
2200 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2203 if (updated) {
2204 if (updated > he_dev->irq_peak)
2205 he_dev->irq_peak = updated;
2207 he_writel(he_dev,
2208 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2209 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2210 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2211 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2213 #ifdef USE_TASKLET
2214 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2215 #endif
2218 static irqreturn_t
2219 he_irq_handler(int irq, void *dev_id)
2221 unsigned long flags;
2222 struct he_dev *he_dev = (struct he_dev * )dev_id;
2223 int handled = 0;
2225 if (he_dev == NULL)
2226 return IRQ_NONE;
2228 spin_lock_irqsave(&he_dev->global_lock, flags);
2230 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2231 (*he_dev->irq_tailoffset << 2));
2233 if (he_dev->irq_tail == he_dev->irq_head) {
2234 HPRINTK("tailoffset not updated?\n");
2235 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2236 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2237 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2240 #ifdef DEBUG
2241 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2242 hprintk("spurious (or shared) interrupt?\n");
2243 #endif
2245 if (he_dev->irq_head != he_dev->irq_tail) {
2246 handled = 1;
2247 #ifdef USE_TASKLET
2248 tasklet_schedule(&he_dev->tasklet);
2249 #else
2250 he_tasklet((unsigned long) he_dev);
2251 #endif
2252 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2253 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2255 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2256 return IRQ_RETVAL(handled);
2260 static __inline__ void
2261 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2263 struct he_tpdrq *new_tail;
2265 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2266 tpd, cid, he_dev->tpdrq_tail);
2268 /* new_tail = he_dev->tpdrq_tail; */
2269 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2270 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2273 * check to see if we are about to set the tail == head
2274 * if true, update the head pointer from the adapter
2275 * to see if this is really the case (reading the queue
2276 * head for every enqueue would be unnecessarily slow)
2279 if (new_tail == he_dev->tpdrq_head) {
2280 he_dev->tpdrq_head = (struct he_tpdrq *)
2281 (((unsigned long)he_dev->tpdrq_base) |
2282 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2284 if (new_tail == he_dev->tpdrq_head) {
2285 int slot;
2287 hprintk("tpdrq full (cid 0x%x)\n", cid);
2289 * FIXME
2290 * push tpd onto a transmit backlog queue
2291 * after service_tbrq, service the backlog
2292 * for now, we just drop the pdu
2294 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2295 if (tpd->iovec[slot].addr)
2296 pci_unmap_single(he_dev->pci_dev,
2297 tpd->iovec[slot].addr,
2298 tpd->iovec[slot].len & TPD_LEN_MASK,
2299 PCI_DMA_TODEVICE);
2301 if (tpd->skb) {
2302 if (tpd->vcc->pop)
2303 tpd->vcc->pop(tpd->vcc, tpd->skb);
2304 else
2305 dev_kfree_skb_any(tpd->skb);
2306 atomic_inc(&tpd->vcc->stats->tx_err);
2308 #ifdef USE_TPD_POOL
2309 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2310 #else
2311 tpd->inuse = 0;
2312 #endif
2313 return;
2317 /* 2.1.5 transmit packet descriptor ready queue */
2318 #ifdef USE_TPD_POOL
2319 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2320 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2321 #else
2322 he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
2323 (TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
2324 #endif
2325 he_dev->tpdrq_tail->cid = cid;
2326 wmb();
2328 he_dev->tpdrq_tail = new_tail;
2330 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2331 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2334 static int
2335 he_open(struct atm_vcc *vcc)
2337 unsigned long flags;
2338 struct he_dev *he_dev = HE_DEV(vcc->dev);
2339 struct he_vcc *he_vcc;
2340 int err = 0;
2341 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2342 short vpi = vcc->vpi;
2343 int vci = vcc->vci;
2345 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2346 return 0;
2348 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2350 set_bit(ATM_VF_ADDR, &vcc->flags);
2352 cid = he_mkcid(he_dev, vpi, vci);
2354 he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2355 if (he_vcc == NULL) {
2356 hprintk("unable to allocate he_vcc during open\n");
2357 return -ENOMEM;
2360 he_vcc->iov_tail = he_vcc->iov_head;
2361 he_vcc->pdu_len = 0;
2362 he_vcc->rc_index = -1;
2364 init_waitqueue_head(&he_vcc->rx_waitq);
2365 init_waitqueue_head(&he_vcc->tx_waitq);
2367 vcc->dev_data = he_vcc;
2369 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2370 int pcr_goal;
2372 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2373 if (pcr_goal == 0)
2374 pcr_goal = he_dev->atm_dev->link_rate;
2375 if (pcr_goal < 0) /* means round down, technically */
2376 pcr_goal = -pcr_goal;
2378 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2380 switch (vcc->qos.aal) {
2381 case ATM_AAL5:
2382 tsr0_aal = TSR0_AAL5;
2383 tsr4 = TSR4_AAL5;
2384 break;
2385 case ATM_AAL0:
2386 tsr0_aal = TSR0_AAL0_SDU;
2387 tsr4 = TSR4_AAL0_SDU;
2388 break;
2389 default:
2390 err = -EINVAL;
2391 goto open_failed;
2394 spin_lock_irqsave(&he_dev->global_lock, flags);
2395 tsr0 = he_readl_tsr0(he_dev, cid);
2396 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2398 if (TSR0_CONN_STATE(tsr0) != 0) {
2399 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2400 err = -EBUSY;
2401 goto open_failed;
2404 switch (vcc->qos.txtp.traffic_class) {
2405 case ATM_UBR:
2406 /* 2.3.3.1 open connection ubr */
2408 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2409 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2410 break;
2412 case ATM_CBR:
2413 /* 2.3.3.2 open connection cbr */
2415 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2416 if ((he_dev->total_bw + pcr_goal)
2417 > (he_dev->atm_dev->link_rate * 9 / 10))
2419 err = -EBUSY;
2420 goto open_failed;
2423 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2425 /* find an unused cs_stper register */
2426 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2427 if (he_dev->cs_stper[reg].inuse == 0 ||
2428 he_dev->cs_stper[reg].pcr == pcr_goal)
2429 break;
2431 if (reg == HE_NUM_CS_STPER) {
2432 err = -EBUSY;
2433 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2434 goto open_failed;
2437 he_dev->total_bw += pcr_goal;
2439 he_vcc->rc_index = reg;
2440 ++he_dev->cs_stper[reg].inuse;
2441 he_dev->cs_stper[reg].pcr = pcr_goal;
2443 clock = he_is622(he_dev) ? 66667000 : 50000000;
2444 period = clock / pcr_goal;
2446 HPRINTK("rc_index = %d period = %d\n",
2447 reg, period);
2449 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2450 CS_STPER0 + reg);
2451 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2453 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2454 TSR0_RC_INDEX(reg);
2456 break;
2457 default:
2458 err = -EINVAL;
2459 goto open_failed;
2462 spin_lock_irqsave(&he_dev->global_lock, flags);
2464 he_writel_tsr0(he_dev, tsr0, cid);
2465 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2466 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2467 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2468 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2469 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2471 he_writel_tsr3(he_dev, 0x0, cid);
2472 he_writel_tsr5(he_dev, 0x0, cid);
2473 he_writel_tsr6(he_dev, 0x0, cid);
2474 he_writel_tsr7(he_dev, 0x0, cid);
2475 he_writel_tsr8(he_dev, 0x0, cid);
2476 he_writel_tsr10(he_dev, 0x0, cid);
2477 he_writel_tsr11(he_dev, 0x0, cid);
2478 he_writel_tsr12(he_dev, 0x0, cid);
2479 he_writel_tsr13(he_dev, 0x0, cid);
2480 he_writel_tsr14(he_dev, 0x0, cid);
2481 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2482 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2485 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2486 unsigned aal;
2488 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2489 &HE_VCC(vcc)->rx_waitq);
2491 switch (vcc->qos.aal) {
2492 case ATM_AAL5:
2493 aal = RSR0_AAL5;
2494 break;
2495 case ATM_AAL0:
2496 aal = RSR0_RAWCELL;
2497 break;
2498 default:
2499 err = -EINVAL;
2500 goto open_failed;
2503 spin_lock_irqsave(&he_dev->global_lock, flags);
2505 rsr0 = he_readl_rsr0(he_dev, cid);
2506 if (rsr0 & RSR0_OPEN_CONN) {
2507 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2509 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2510 err = -EBUSY;
2511 goto open_failed;
2514 #ifdef USE_RBPS
2515 rsr1 = RSR1_GROUP(0);
2516 rsr4 = RSR4_GROUP(0);
2517 #else /* !USE_RBPS */
2518 rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
2519 rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
2520 #endif /* USE_RBPS */
2521 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2522 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2524 #ifdef USE_CHECKSUM_HW
2525 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2526 rsr0 |= RSR0_TCP_CKSUM;
2527 #endif
2529 he_writel_rsr4(he_dev, rsr4, cid);
2530 he_writel_rsr1(he_dev, rsr1, cid);
2531 /* 5.1.11 last parameter initialized should be
2532 the open/closed indication in rsr0 */
2533 he_writel_rsr0(he_dev,
2534 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2535 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2537 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2540 open_failed:
2542 if (err) {
2543 kfree(he_vcc);
2544 clear_bit(ATM_VF_ADDR, &vcc->flags);
2546 else
2547 set_bit(ATM_VF_READY, &vcc->flags);
2549 return err;
2552 static void
2553 he_close(struct atm_vcc *vcc)
2555 unsigned long flags;
2556 DECLARE_WAITQUEUE(wait, current);
2557 struct he_dev *he_dev = HE_DEV(vcc->dev);
2558 struct he_tpd *tpd;
2559 unsigned cid;
2560 struct he_vcc *he_vcc = HE_VCC(vcc);
2561 #define MAX_RETRY 30
2562 int retry = 0, sleep = 1, tx_inuse;
2564 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2566 clear_bit(ATM_VF_READY, &vcc->flags);
2567 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2569 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2570 int timeout;
2572 HPRINTK("close rx cid 0x%x\n", cid);
2574 /* 2.7.2.2 close receive operation */
2576 /* wait for previous close (if any) to finish */
2578 spin_lock_irqsave(&he_dev->global_lock, flags);
2579 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2580 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2581 udelay(250);
2584 set_current_state(TASK_UNINTERRUPTIBLE);
2585 add_wait_queue(&he_vcc->rx_waitq, &wait);
2587 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2588 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2589 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2590 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2592 timeout = schedule_timeout(30*HZ);
2594 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2595 set_current_state(TASK_RUNNING);
2597 if (timeout == 0)
2598 hprintk("close rx timeout cid 0x%x\n", cid);
2600 HPRINTK("close rx cid 0x%x complete\n", cid);
2604 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2605 volatile unsigned tsr4, tsr0;
2606 int timeout;
2608 HPRINTK("close tx cid 0x%x\n", cid);
2610 /* 2.1.2
2612 * ... the host must first stop queueing packets to the TPDRQ
2613 * on the connection to be closed, then wait for all outstanding
2614 * packets to be transmitted and their buffers returned to the
2615 * TBRQ. When the last packet on the connection arrives in the
2616 * TBRQ, the host issues the close command to the adapter.
2619 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 0) &&
2620 (retry < MAX_RETRY)) {
2621 msleep(sleep);
2622 if (sleep < 250)
2623 sleep = sleep * 2;
2625 ++retry;
2628 if (tx_inuse)
2629 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2631 /* 2.3.1.1 generic close operations with flush */
2633 spin_lock_irqsave(&he_dev->global_lock, flags);
2634 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2635 /* also clears TSR4_SESSION_ENDED */
2637 switch (vcc->qos.txtp.traffic_class) {
2638 case ATM_UBR:
2639 he_writel_tsr1(he_dev,
2640 TSR1_MCR(rate_to_atmf(200000))
2641 | TSR1_PCR(0), cid);
2642 break;
2643 case ATM_CBR:
2644 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2645 break;
2647 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2649 tpd = __alloc_tpd(he_dev);
2650 if (tpd == NULL) {
2651 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2652 goto close_tx_incomplete;
2654 tpd->status |= TPD_EOS | TPD_INT;
2655 tpd->skb = NULL;
2656 tpd->vcc = vcc;
2657 wmb();
2659 set_current_state(TASK_UNINTERRUPTIBLE);
2660 add_wait_queue(&he_vcc->tx_waitq, &wait);
2661 __enqueue_tpd(he_dev, tpd, cid);
2662 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2664 timeout = schedule_timeout(30*HZ);
2666 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2667 set_current_state(TASK_RUNNING);
2669 spin_lock_irqsave(&he_dev->global_lock, flags);
2671 if (timeout == 0) {
2672 hprintk("close tx timeout cid 0x%x\n", cid);
2673 goto close_tx_incomplete;
2676 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2677 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2678 udelay(250);
2681 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2682 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2683 udelay(250);
2686 close_tx_incomplete:
2688 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2689 int reg = he_vcc->rc_index;
2691 HPRINTK("cs_stper reg = %d\n", reg);
2693 if (he_dev->cs_stper[reg].inuse == 0)
2694 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2695 else
2696 --he_dev->cs_stper[reg].inuse;
2698 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2700 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2702 HPRINTK("close tx cid 0x%x complete\n", cid);
2705 kfree(he_vcc);
2707 clear_bit(ATM_VF_ADDR, &vcc->flags);
2710 static int
2711 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2713 unsigned long flags;
2714 struct he_dev *he_dev = HE_DEV(vcc->dev);
2715 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2716 struct he_tpd *tpd;
2717 #ifdef USE_SCATTERGATHER
2718 int i, slot = 0;
2719 #endif
2721 #define HE_TPD_BUFSIZE 0xffff
2723 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2725 if ((skb->len > HE_TPD_BUFSIZE) ||
2726 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2727 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2728 if (vcc->pop)
2729 vcc->pop(vcc, skb);
2730 else
2731 dev_kfree_skb_any(skb);
2732 atomic_inc(&vcc->stats->tx_err);
2733 return -EINVAL;
2736 #ifndef USE_SCATTERGATHER
2737 if (skb_shinfo(skb)->nr_frags) {
2738 hprintk("no scatter/gather support\n");
2739 if (vcc->pop)
2740 vcc->pop(vcc, skb);
2741 else
2742 dev_kfree_skb_any(skb);
2743 atomic_inc(&vcc->stats->tx_err);
2744 return -EINVAL;
2746 #endif
2747 spin_lock_irqsave(&he_dev->global_lock, flags);
2749 tpd = __alloc_tpd(he_dev);
2750 if (tpd == NULL) {
2751 if (vcc->pop)
2752 vcc->pop(vcc, skb);
2753 else
2754 dev_kfree_skb_any(skb);
2755 atomic_inc(&vcc->stats->tx_err);
2756 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2757 return -ENOMEM;
2760 if (vcc->qos.aal == ATM_AAL5)
2761 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2762 else {
2763 char *pti_clp = (void *) (skb->data + 3);
2764 int clp, pti;
2766 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2767 clp = (*pti_clp & ATM_HDR_CLP);
2768 tpd->status |= TPD_CELLTYPE(pti);
2769 if (clp)
2770 tpd->status |= TPD_CLP;
2772 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2775 #ifdef USE_SCATTERGATHER
2776 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2777 skb->len - skb->data_len, PCI_DMA_TODEVICE);
2778 tpd->iovec[slot].len = skb->len - skb->data_len;
2779 ++slot;
2781 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2782 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2784 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2785 tpd->vcc = vcc;
2786 tpd->skb = NULL; /* not the last fragment
2787 so dont ->push() yet */
2788 wmb();
2790 __enqueue_tpd(he_dev, tpd, cid);
2791 tpd = __alloc_tpd(he_dev);
2792 if (tpd == NULL) {
2793 if (vcc->pop)
2794 vcc->pop(vcc, skb);
2795 else
2796 dev_kfree_skb_any(skb);
2797 atomic_inc(&vcc->stats->tx_err);
2798 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2799 return -ENOMEM;
2801 tpd->status |= TPD_USERCELL;
2802 slot = 0;
2805 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2806 (void *) page_address(frag->page) + frag->page_offset,
2807 frag->size, PCI_DMA_TODEVICE);
2808 tpd->iovec[slot].len = frag->size;
2809 ++slot;
2813 tpd->iovec[slot - 1].len |= TPD_LST;
2814 #else
2815 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2816 tpd->length0 = skb->len | TPD_LST;
2817 #endif
2818 tpd->status |= TPD_INT;
2820 tpd->vcc = vcc;
2821 tpd->skb = skb;
2822 wmb();
2823 ATM_SKB(skb)->vcc = vcc;
2825 __enqueue_tpd(he_dev, tpd, cid);
2826 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2828 atomic_inc(&vcc->stats->tx);
2830 return 0;
2833 static int
2834 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2836 unsigned long flags;
2837 struct he_dev *he_dev = HE_DEV(atm_dev);
2838 struct he_ioctl_reg reg;
2839 int err = 0;
2841 switch (cmd) {
2842 case HE_GET_REG:
2843 if (!capable(CAP_NET_ADMIN))
2844 return -EPERM;
2846 if (copy_from_user(&reg, arg,
2847 sizeof(struct he_ioctl_reg)))
2848 return -EFAULT;
2850 spin_lock_irqsave(&he_dev->global_lock, flags);
2851 switch (reg.type) {
2852 case HE_REGTYPE_PCI:
2853 reg.val = he_readl(he_dev, reg.addr);
2854 break;
2855 case HE_REGTYPE_RCM:
2856 reg.val =
2857 he_readl_rcm(he_dev, reg.addr);
2858 break;
2859 case HE_REGTYPE_TCM:
2860 reg.val =
2861 he_readl_tcm(he_dev, reg.addr);
2862 break;
2863 case HE_REGTYPE_MBOX:
2864 reg.val =
2865 he_readl_mbox(he_dev, reg.addr);
2866 break;
2867 default:
2868 err = -EINVAL;
2869 break;
2871 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2872 if (err == 0)
2873 if (copy_to_user(arg, &reg,
2874 sizeof(struct he_ioctl_reg)))
2875 return -EFAULT;
2876 break;
2877 default:
2878 #ifdef CONFIG_ATM_HE_USE_SUNI
2879 if (atm_dev->phy && atm_dev->phy->ioctl)
2880 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2881 #else /* CONFIG_ATM_HE_USE_SUNI */
2882 err = -EINVAL;
2883 #endif /* CONFIG_ATM_HE_USE_SUNI */
2884 break;
2887 return err;
2890 static void
2891 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2893 unsigned long flags;
2894 struct he_dev *he_dev = HE_DEV(atm_dev);
2896 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2898 spin_lock_irqsave(&he_dev->global_lock, flags);
2899 he_writel(he_dev, val, FRAMER + (addr*4));
2900 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2901 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2905 static unsigned char
2906 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2908 unsigned long flags;
2909 struct he_dev *he_dev = HE_DEV(atm_dev);
2910 unsigned reg;
2912 spin_lock_irqsave(&he_dev->global_lock, flags);
2913 reg = he_readl(he_dev, FRAMER + (addr*4));
2914 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2916 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2917 return reg;
2920 static int
2921 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2923 unsigned long flags;
2924 struct he_dev *he_dev = HE_DEV(dev);
2925 int left, i;
2926 #ifdef notdef
2927 struct he_rbrq *rbrq_tail;
2928 struct he_tpdrq *tpdrq_head;
2929 int rbpl_head, rbpl_tail;
2930 #endif
2931 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2934 left = *pos;
2935 if (!left--)
2936 return sprintf(page, "%s\n", version);
2938 if (!left--)
2939 return sprintf(page, "%s%s\n\n",
2940 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2942 if (!left--)
2943 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2945 spin_lock_irqsave(&he_dev->global_lock, flags);
2946 mcc += he_readl(he_dev, MCC);
2947 oec += he_readl(he_dev, OEC);
2948 dcc += he_readl(he_dev, DCC);
2949 cec += he_readl(he_dev, CEC);
2950 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2952 if (!left--)
2953 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2954 mcc, oec, dcc, cec);
2956 if (!left--)
2957 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2958 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2960 if (!left--)
2961 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2962 CONFIG_TPDRQ_SIZE);
2964 if (!left--)
2965 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2966 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2968 if (!left--)
2969 return sprintf(page, "tbrq_size = %d peak = %d\n",
2970 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2973 #ifdef notdef
2974 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2975 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2977 inuse = rbpl_head - rbpl_tail;
2978 if (inuse < 0)
2979 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2980 inuse /= sizeof(struct he_rbp);
2982 if (!left--)
2983 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2984 CONFIG_RBPL_SIZE, inuse);
2985 #endif
2987 if (!left--)
2988 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2990 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2991 if (!left--)
2992 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2993 he_dev->cs_stper[i].pcr,
2994 he_dev->cs_stper[i].inuse);
2996 if (!left--)
2997 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2998 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
3000 return 0;
3003 /* eeprom routines -- see 4.7 */
3006 read_prom_byte(struct he_dev *he_dev, int addr)
3008 u32 val = 0, tmp_read = 0;
3009 int i, j = 0;
3010 u8 byte_read = 0;
3012 val = readl(he_dev->membase + HOST_CNTL);
3013 val &= 0xFFFFE0FF;
3015 /* Turn on write enable */
3016 val |= 0x800;
3017 he_writel(he_dev, val, HOST_CNTL);
3019 /* Send READ instruction */
3020 for (i = 0; i < ARRAY_SIZE(readtab); i++) {
3021 he_writel(he_dev, val | readtab[i], HOST_CNTL);
3022 udelay(EEPROM_DELAY);
3025 /* Next, we need to send the byte address to read from */
3026 for (i = 7; i >= 0; i--) {
3027 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3028 udelay(EEPROM_DELAY);
3029 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3030 udelay(EEPROM_DELAY);
3033 j = 0;
3035 val &= 0xFFFFF7FF; /* Turn off write enable */
3036 he_writel(he_dev, val, HOST_CNTL);
3038 /* Now, we can read data from the EEPROM by clocking it in */
3039 for (i = 7; i >= 0; i--) {
3040 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3041 udelay(EEPROM_DELAY);
3042 tmp_read = he_readl(he_dev, HOST_CNTL);
3043 byte_read |= (unsigned char)
3044 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
3045 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3046 udelay(EEPROM_DELAY);
3049 he_writel(he_dev, val | ID_CS, HOST_CNTL);
3050 udelay(EEPROM_DELAY);
3052 return byte_read;
3055 MODULE_LICENSE("GPL");
3056 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3057 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3058 module_param(disable64, bool, 0);
3059 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
3060 module_param(nvpibits, short, 0);
3061 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
3062 module_param(nvcibits, short, 0);
3063 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
3064 module_param(rx_skb_reserve, short, 0);
3065 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
3066 module_param(irq_coalesce, bool, 0);
3067 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
3068 module_param(sdh, bool, 0);
3069 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
3071 static struct pci_device_id he_pci_tbl[] = {
3072 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
3073 0, 0, 0 },
3074 { 0, }
3077 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
3079 static struct pci_driver he_driver = {
3080 .name = "he",
3081 .probe = he_init_one,
3082 .remove = __devexit_p(he_remove_one),
3083 .id_table = he_pci_tbl,
3086 static int __init he_init(void)
3088 return pci_register_driver(&he_driver);
3091 static void __exit he_cleanup(void)
3093 pci_unregister_driver(&he_driver);
3096 module_init(he_init);
3097 module_exit(he_cleanup);