Import 2.3.18pre1
[davej-history.git] / drivers / atm / nicstar.c
blob97c768ace0e00476d252f87688f0e916279bb847
1 /******************************************************************************
3 * nicstar.c
5 * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards.
7 * IMPORTANT: The included file nicstarmac.c was NOT WRITTEN BY ME.
8 * It was taken from the frle-0.22 device driver.
9 * As the file doesn't have a copyright notice, in the file
10 * nicstarmac.copyright I put the copyright notice from the
11 * frle-0.22 device driver.
12 * Some code is based on the nicstar driver by M. Welsh.
14 * Author: Rui Prior (rprior@inescn.pt)
15 * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999
18 * (C) INESC 1999
21 ******************************************************************************/
24 /* Header files ***************************************************************/
26 #include <linux/module.h>
27 #include <linux/config.h>
28 #include <linux/kernel.h>
29 #include <linux/skbuff.h>
30 #include <linux/atmdev.h>
31 #include <linux/atm.h>
32 #include <linux/pci.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/delay.h>
36 #include <linux/init.h>
37 #include <linux/sched.h>
38 #include <linux/timer.h>
39 #include <linux/interrupt.h>
40 #include <asm/io.h>
41 #include <asm/uaccess.h>
42 #include "nicstar.h"
43 #include "nicstarmac.h"
44 #ifdef CONFIG_ATM_NICSTAR_USE_SUNI
45 #include "suni.h"
46 #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */
49 /* Additional code ************************************************************/
51 #include "nicstarmac.c"
54 /* Configurable parameters ****************************************************/
56 #undef PHY_LOOPBACK
57 #undef TX_DEBUG
58 #undef RX_DEBUG
59 #undef GENERAL_DEBUG
60 #undef EXTRA_DEBUG
62 #undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know
63 you're going to use only raw ATM */
66 /* Do not touch these *********************************************************/
68 #ifdef TX_DEBUG
69 #define TXPRINTK(args...) printk(args)
70 #else
71 #define TXPRINTK(args...)
72 #endif /* TX_DEBUG */
74 #ifdef RX_DEBUG
75 #define RXPRINTK(args...) printk(args)
76 #else
77 #define RXPRINTK(args...)
78 #endif /* RX_DEBUG */
80 #ifdef GENERAL_DEBUG
81 #define PRINTK(args...) printk(args)
82 #else
83 #define PRINTK(args...)
84 #endif /* GENERAL_DEBUG */
86 #ifdef EXTRA_DEBUG
87 #define XPRINTK(args...) printk(args)
88 #else
89 #define XPRINTK(args...)
90 #endif /* EXTRA_DEBUG */
93 /* Macros *********************************************************************/
95 #define MAX(a,b) ((a) > (b) ? (a) : (b))
96 #define MIN(a,b) ((a) < (b) ? (a) : (b))
98 #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ)
100 #define NS_DELAY mdelay(1)
102 #define ALIGN_ADDRESS(addr, alignment) \
103 ((((u32) (addr)) + (((u32) (alignment)) - 1)) & ~(((u32) (alignment)) - 1))
105 #undef CEIL(d)
107 #ifndef ATM_SKB
108 #define ATM_SKB(s) (&(s)->atm)
109 #endif
112 /* Version definition *********************************************************/
114 #include <linux/version.h>
115 char kernel_version[] = UTS_RELEASE;
118 /* Function declarations ******************************************************/
120 static u32 ns_read_sram(ns_dev *card, u32 sram_address);
121 static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count);
122 static int ns_init_card(int i, struct pci_dev *pcidev);
123 static void ns_init_card_error(ns_dev *card, int error);
124 static scq_info *get_scq(int size, u32 scd);
125 static void free_scq(scq_info *scq, struct atm_vcc *vcc);
126 static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
127 u32 handle2, u32 addr2);
128 static void ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
129 static int ns_open(struct atm_vcc *vcc, short vpi, int vci);
130 static void ns_close(struct atm_vcc *vcc);
131 static void fill_tst(ns_dev *card, int n, vc_map *vc);
132 static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb);
133 static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd,
134 struct sk_buff *skb);
135 static void process_tsq(ns_dev *card);
136 static void drain_scq(ns_dev *card, scq_info *scq, int pos);
137 static void process_rsq(ns_dev *card);
138 static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe);
139 #ifdef NS_USE_DESTRUCTORS
140 static void ns_sb_destructor(struct sk_buff *sb);
141 static void ns_lb_destructor(struct sk_buff *lb);
142 static void ns_hb_destructor(struct sk_buff *hb);
143 #endif /* NS_USE_DESTRUCTORS */
144 static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb);
145 static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count);
146 static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb);
147 static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb);
148 static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb);
149 static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page);
150 static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void *arg);
151 static void which_list(ns_dev *card, struct sk_buff *skb);
152 static void ns_poll(unsigned long arg);
153 static int ns_parse_mac(char *mac, unsigned char *esi);
154 static short ns_h2i(char c);
155 static void ns_phy_put(struct atm_dev *dev, unsigned char value,
156 unsigned long addr);
157 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
161 /* Global variables ***********************************************************/
163 static struct ns_dev *cards[NS_MAX_CARDS];
164 static unsigned num_cards = 0;
165 static struct atmdev_ops atm_ops =
167 NULL, /* dev_close */
168 ns_open, /* open */
169 ns_close, /* close */
170 ns_ioctl, /* ioctl */
171 NULL, /* getsockopt */
172 NULL, /* setsockopt */
173 ns_send, /* send */
174 NULL, /* sg_send */
175 NULL, /* send_oam */
176 ns_phy_put, /* phy_put */
177 ns_phy_get, /* phy_get */
178 NULL, /* feedback */
179 NULL, /* change_qos */
180 NULL, /* free_rx_skb */
181 ns_proc_read /* proc_read */
183 static struct timer_list ns_timer;
184 static char *mac[NS_MAX_CARDS] = { NULL
185 #if NS_MAX_CARDS > 1
186 , NULL
187 #endif /* NS_MAX_CARDS > 1 */
188 #if NS_MAX_CARDS > 2
189 , NULL
190 #endif /* NS_MAX_CARDS > 2 */
191 #if NS_MAX_CARDS > 3
192 , NULL
193 #endif /* NS_MAX_CARDS > 3 */
194 #if NS_MAX_CARDS > 4
195 , NULL
196 #endif /* NS_MAX_CARDS > 4 */
199 #ifdef MODULE
200 MODULE_PARM(mac, "1-" __MODULE_STRING(NS_MAX_CARDS) "s");
201 #endif /* MODULE */
204 /* Functions*******************************************************************/
206 #ifdef MODULE
208 int init_module(void)
210 int i;
211 unsigned error = 0; /* Initialized to remove compile warning */
212 struct pci_dev *pcidev;
214 XPRINTK("nicstar: init_module() called.\n");
215 if(!pci_present())
217 printk("nicstar: no PCI subsystem found.\n");
218 return -EIO;
221 for(i = 0; i < NS_MAX_CARDS; i++)
222 cards[i] = NULL;
224 pcidev = NULL;
225 for(i = 0; i < NS_MAX_CARDS; i++)
227 if ((pcidev = pci_find_device(PCI_VENDOR_ID_IDT,
228 PCI_DEVICE_ID_IDT_IDT77201,
229 pcidev)) == NULL)
230 break;
232 error = ns_init_card(i, pcidev);
233 if (error)
234 cards[i--] = NULL; /* Try to find another card but don't increment index */
237 if (i == 0)
239 if (!error)
241 printk("nicstar: no cards found.\n");
242 return -ENXIO;
244 else
245 return -EIO;
247 TXPRINTK("nicstar: TX debug enabled.\n");
248 RXPRINTK("nicstar: RX debug enabled.\n");
249 PRINTK("nicstar: General debug enabled.\n");
250 #ifdef PHY_LOOPBACK
251 printk("nicstar: using PHY loopback.\n");
252 #endif /* PHY_LOOPBACK */
253 XPRINTK("nicstar: init_module() returned.\n");
255 init_timer(&ns_timer);
256 ns_timer.expires = jiffies + NS_POLL_PERIOD;
257 ns_timer.data = 0UL;
258 ns_timer.function = ns_poll;
259 add_timer(&ns_timer);
260 return 0;
265 void cleanup_module(void)
267 int i, j;
268 unsigned short pci_command;
269 ns_dev *card;
270 struct sk_buff *hb;
271 struct sk_buff *iovb;
272 struct sk_buff *lb;
273 struct sk_buff *sb;
275 XPRINTK("nicstar: cleanup_module() called.\n");
277 if (MOD_IN_USE)
278 printk("nicstar: module in use, remove delayed.\n");
280 del_timer(&ns_timer);
282 for (i = 0; i < NS_MAX_CARDS; i++)
284 if (cards[i] == NULL)
285 continue;
287 card = cards[i];
289 /* Stop everything */
290 writel(0x00000000, card->membase + CFG);
292 /* De-register device */
293 atm_dev_deregister(card->atmdev);
295 /* Disable memory mapping and busmastering */
296 if (pci_read_config_word(card->pcidev, PCI_COMMAND, &pci_command) != 0)
298 printk("nicstar%d: can't read PCI_COMMAND.\n", i);
300 pci_command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
301 if (pci_write_config_word(card->pcidev, PCI_COMMAND, pci_command) != 0)
303 printk("nicstar%d: can't write PCI_COMMAND.\n", i);
306 /* Free up resources */
307 j = 0;
308 PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count);
309 while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
311 kfree_skb(hb);
312 j++;
314 PRINTK("nicstar%d: %d huge buffers freed.\n", i, j);
315 j = 0;
316 PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, card->iovpool.count);
317 while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
319 kfree_skb(iovb);
320 j++;
322 PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j);
323 while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
324 kfree_skb(lb);
325 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
326 kfree_skb(sb);
327 free_scq(card->scq0, NULL);
328 for (j = 0; j < NS_FRSCD_NUM; j++)
330 if (card->scd2vc[j] != NULL)
331 free_scq(card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
333 kfree(card->rsq.org);
334 kfree(card->tsq.org);
335 free_irq(card->pcidev->irq, card);
336 iounmap((void *) card->membase);
337 kfree(card);
340 XPRINTK("nicstar: cleanup_module() returned.\n");
344 #else
346 int __init nicstar_detect(void)
348 int i;
349 unsigned error = 0; /* Initialized to remove compile warning */
350 struct pci_dev *pcidev;
352 if(!pci_present())
354 printk("nicstar: no PCI subsystem found.\n");
355 return -EIO;
358 for(i = 0; i < NS_MAX_CARDS; i++)
359 cards[i] = NULL;
361 pcidev = NULL;
362 for(i = 0; i < NS_MAX_CARDS; i++)
364 if ((pcidev = pci_find_device(PCI_VENDOR_ID_IDT,
365 PCI_DEVICE_ID_IDT_IDT77201,
366 pcidev)) == NULL)
367 break;
369 error = ns_init_card(i, pcidev);
370 if (error)
371 cards[i--] = NULL; /* Try to find another card but don't increment index */
374 if (i == 0 && error)
375 return -EIO;
377 TXPRINTK("nicstar: TX debug enabled.\n");
378 RXPRINTK("nicstar: RX debug enabled.\n");
379 PRINTK("nicstar: General debug enabled.\n");
380 #ifdef PHY_LOOPBACK
381 printk("nicstar: using PHY loopback.\n");
382 #endif /* PHY_LOOPBACK */
383 XPRINTK("nicstar: init_module() returned.\n");
385 init_timer(&ns_timer);
386 ns_timer.expires = jiffies + NS_POLL_PERIOD;
387 ns_timer.data = 0UL;
388 ns_timer.function = ns_poll;
389 add_timer(&ns_timer);
390 return i;
394 #endif /* MODULE */
397 static u32 ns_read_sram(ns_dev *card, u32 sram_address)
399 unsigned long flags;
400 u32 data;
401 sram_address <<= 2;
402 sram_address &= 0x0007FFFC; /* address must be dword aligned */
403 sram_address |= 0x50000000; /* SRAM read command */
404 save_flags(flags); cli();
405 while (CMD_BUSY(card));
406 writel(sram_address, card->membase + CMD);
407 while (CMD_BUSY(card));
408 data = readl(card->membase + DR0);
409 restore_flags(flags);
410 return data;
415 static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count)
417 unsigned long flags;
418 int i, c;
419 count--; /* count range now is 0..3 instead of 1..4 */
420 c = count;
421 c <<= 2; /* to use increments of 4 */
422 save_flags(flags); cli();
423 while (CMD_BUSY(card));
424 for (i = 0; i <= c; i += 4)
425 writel(*(value++), card->membase + i);
426 /* Note: DR# registers are the first 4 dwords in nicstar's memspace,
427 so card->membase + DR0 == card->membase */
428 sram_address <<= 2;
429 sram_address &= 0x0007FFFC;
430 sram_address |= (0x40000000 | count);
431 writel(sram_address, card->membase + CMD);
432 restore_flags(flags);
436 static int ns_init_card(int i, struct pci_dev *pcidev)
438 int j;
439 struct ns_dev *card;
440 unsigned short pci_command;
441 unsigned char pci_latency;
442 unsigned error;
443 u32 data;
444 u32 u32d[4];
445 u32 ns_cfg_rctsize;
446 int bcount;
448 error = 0;
450 if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL)
452 printk("nicstar%d: can't allocate memory for device structure.\n", i);
453 error = 2;
454 ns_init_card_error(card, error);
455 return error;
457 cards[i] = card;
459 card->index = i;
460 card->pcidev = pcidev;
461 card->membase = (u32) pcidev->resource[1].start;
462 #ifdef __powerpc__
463 /* Compensate for different memory map between host CPU and PCI bus.
464 Shouldn't we use a macro for this? */
465 card->membase += KERNELBASE;
466 #endif /* __powerpc__ */
467 card->membase = (u32) ioremap(card->membase, NS_IOREMAP_SIZE);
468 if (card->membase == (u32) (NULL))
470 printk("nicstar%d: can't ioremap() membase.\n",i);
471 error = 3;
472 ns_init_card_error(card, error);
473 return error;
475 PRINTK("nicstar%d: membase at 0x%x.\n", i, card->membase);
477 if (pci_read_config_word(pcidev, PCI_COMMAND, &pci_command) != 0)
479 printk("nicstar%d: can't read PCI_COMMAND.\n", i);
480 error = 4;
481 ns_init_card_error(card, error);
482 return error;
484 pci_command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
485 if (pci_write_config_word(pcidev, PCI_COMMAND, pci_command) != 0)
487 printk("nicstar%d: can't write PCI_COMMAND.\n", i);
488 error = 5;
489 ns_init_card_error(card, error);
490 return error;
493 if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0)
495 printk("nicstar%d: can't read PCI latency timer.\n", i);
496 error = 6;
497 ns_init_card_error(card, error);
498 return error;
500 if (pci_latency < NS_PCI_LATENCY)
502 PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, NS_PCI_LATENCY);
503 for (j = 1; j < 4; j++)
505 if (pci_write_config_byte(pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0);
506 break;
508 if (j == 4)
510 printk("nicstar%d: can't set PCI latency timer to %d.\n", i, NS_PCI_LATENCY);
511 error = 7;
512 ns_init_card_error(card, error);
513 return error;
517 /* Clear timer overflow */
518 data = readl(card->membase + STAT);
519 if (data & NS_STAT_TMROF)
520 writel(NS_STAT_TMROF, card->membase + STAT);
522 /* Software reset */
523 writel(NS_CFG_SWRST, card->membase + CFG);
524 NS_DELAY;
525 writel(0x00000000, card->membase + CFG);
527 /* PHY reset */
528 writel(0x00000008, card->membase + GP);
529 NS_DELAY;
530 writel(0x00000001, card->membase + GP);
531 NS_DELAY;
532 while (CMD_BUSY(card));
533 writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */
534 NS_DELAY;
536 /* Detect PHY type */
537 while (CMD_BUSY(card));
538 writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD);
539 while (CMD_BUSY(card));
540 data = readl(card->membase + DR0);
541 switch(data) {
542 case 0x00000009:
543 printk("nicstar%d: PHY seems to be 25 Mbps.\n", i);
544 card->max_pcr = IDT_25_PCR;
545 while(CMD_BUSY(card));
546 writel(0x00000008, card->membase + DR0);
547 writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD);
548 /* Clear an eventual pending interrupt */
549 writel(NS_STAT_SFBQF, card->membase + STAT);
550 #ifdef PHY_LOOPBACK
551 while(CMD_BUSY(card));
552 writel(0x00000022, card->membase + DR0);
553 writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD);
554 #endif /* PHY_LOOPBACK */
555 break;
556 case 0x00000030:
557 case 0x00000031:
558 printk("nicstar%d: PHY seems to be 155 Mbps.\n", i);
559 card->max_pcr = ATM_OC3_PCR;
560 #ifdef PHY_LOOPBACK
561 while(CMD_BUSY(card));
562 writel(0x00000002, card->membase + DR0);
563 writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD);
564 #endif /* PHY_LOOPBACK */
565 break;
566 default:
567 printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data);
568 error = 8;
569 ns_init_card_error(card, error);
570 return error;
572 writel(0x00000000, card->membase + GP);
574 /* Determine SRAM size */
575 data = 0x76543210;
576 ns_write_sram(card, 0x1C003, &data, 1);
577 data = 0x89ABCDEF;
578 ns_write_sram(card, 0x14003, &data, 1);
579 if (ns_read_sram(card, 0x14003) == 0x89ABCDEF &&
580 ns_read_sram(card, 0x1C003) == 0x76543210)
581 card->sram_size = 128;
582 else
583 card->sram_size = 32;
584 PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size);
586 card->rct_size = NS_MAX_RCTSIZE;
588 #if (NS_MAX_RCTSIZE == 4096)
589 if (card->sram_size == 128)
590 printk("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", i);
591 #elif (NS_MAX_RCTSIZE == 16384)
592 if (card->sram_size == 32)
594 printk("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", i);
595 card->rct_size = 4096;
597 #else
598 #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c
599 #endif
601 card->vpibits = NS_VPIBITS;
602 if (card->rct_size == 4096)
603 card->vcibits = 12 - NS_VPIBITS;
604 else /* card->rct_size == 16384 */
605 card->vcibits = 14 - NS_VPIBITS;
607 /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */
608 if (mac[i] == NULL)
609 nicstar_init_eprom(card->membase);
611 if (request_irq(pcidev->irq, &ns_irq_handler, SA_INTERRUPT | SA_SHIRQ, "nicstar", card) != 0)
613 printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
614 error = 9;
615 ns_init_card_error(card, error);
616 return error;
619 /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
620 writel(0x00000000, card->membase + VPM);
622 /* Initialize TSQ */
623 card->tsq.org = kmalloc(NS_TSQSIZE + NS_TSQ_ALIGNMENT, GFP_KERNEL);
624 if (card->tsq.org == NULL)
626 printk("nicstar%d: can't allocate TSQ.\n", i);
627 error = 10;
628 ns_init_card_error(card, error);
629 return error;
631 card->tsq.base = (ns_tsi *) ALIGN_ADDRESS(card->tsq.org, NS_TSQ_ALIGNMENT);
632 card->tsq.next = card->tsq.base;
633 card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1);
634 for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++)
635 ns_tsi_init(card->tsq.base + j);
636 writel(0x00000000, card->membase + TSQH);
637 writel((u32) virt_to_bus(card->tsq.base), card->membase + TSQB);
638 PRINTK("nicstar%d: TSQ base at 0x%x 0x%x 0x%x.\n", i, (u32) card->tsq.base,
639 (u32) virt_to_bus(card->tsq.base), readl(card->membase + TSQB));
641 /* Initialize RSQ */
642 card->rsq.org = kmalloc(NS_RSQSIZE + NS_RSQ_ALIGNMENT, GFP_KERNEL);
643 if (card->rsq.org == NULL)
645 printk("nicstar%d: can't allocate RSQ.\n", i);
646 error = 11;
647 ns_init_card_error(card, error);
648 return error;
650 card->rsq.base = (ns_rsqe *) ALIGN_ADDRESS(card->rsq.org, NS_RSQ_ALIGNMENT);
651 card->rsq.next = card->rsq.base;
652 card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1);
653 for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++)
654 ns_rsqe_init(card->rsq.base + j);
655 writel(0x00000000, card->membase + RSQH);
656 writel((u32) virt_to_bus(card->rsq.base), card->membase + RSQB);
657 PRINTK("nicstar%d: RSQ base at 0x%x.\n", i, (u32) card->rsq.base);
659 /* Initialize SCQ0, the only VBR SCQ used */
660 card->scq1 = (scq_info *) NULL;
661 card->scq2 = (scq_info *) NULL;
662 card->scq0 = get_scq(VBR_SCQSIZE, NS_VRSCD0);
663 if (card->scq0 == (scq_info *) NULL)
665 printk("nicstar%d: can't get SCQ0.\n", i);
666 error = 12;
667 ns_init_card_error(card, error);
668 return error;
670 u32d[0] = (u32) virt_to_bus(card->scq0->base);
671 u32d[1] = (u32) 0x00000000;
672 u32d[2] = (u32) 0xffffffff;
673 u32d[3] = (u32) 0x00000000;
674 ns_write_sram(card, NS_VRSCD0, u32d, 4);
675 ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */
676 ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */
677 card->scq0->scd = NS_VRSCD0;
678 PRINTK("nicstar%d: VBR-SCQ0 base at 0x%x.\n", i, (u32) card->scq0->base);
680 /* Initialize TSTs */
681 card->tst_addr = NS_TST0;
682 card->tst_free_entries = NS_TST_NUM_ENTRIES;
683 data = NS_TST_OPCODE_VARIABLE;
684 for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
685 ns_write_sram(card, NS_TST0 + j, &data, 1);
686 data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0);
687 ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1);
688 for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
689 ns_write_sram(card, NS_TST1 + j, &data, 1);
690 data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1);
691 ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1);
692 for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
693 card->tste2vc[j] = NULL;
694 writel(NS_TST0 << 2, card->membase + TSTB);
697 /* Initialize RCT. AAL type is set on opening the VC. */
698 #ifdef RCQ_SUPPORT
699 u32d[0] = NS_RCTE_RAWCELLINTEN;
700 #else
701 u32d[0] = 0x00000000;
702 #endif RCQ_SUPPORT
703 u32d[1] = 0x00000000;
704 u32d[2] = 0x00000000;
705 u32d[3] = 0xFFFFFFFF;
706 for (j = 0; j < card->rct_size; j++)
707 ns_write_sram(card, j * 4, u32d, 4);
709 memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map));
711 for (j = 0; j < NS_FRSCD_NUM; j++)
712 card->scd2vc[j] = NULL;
714 /* Initialize buffer levels */
715 card->sbnr.min = MIN_SB;
716 card->sbnr.init = NUM_SB;
717 card->sbnr.max = MAX_SB;
718 card->lbnr.min = MIN_LB;
719 card->lbnr.init = NUM_LB;
720 card->lbnr.max = MAX_LB;
721 card->iovnr.min = MIN_IOVB;
722 card->iovnr.init = NUM_IOVB;
723 card->iovnr.max = MAX_IOVB;
724 card->hbnr.min = MIN_HB;
725 card->hbnr.init = NUM_HB;
726 card->hbnr.max = MAX_HB;
728 card->sm_handle = 0x00000000;
729 card->sm_addr = 0x00000000;
730 card->lg_handle = 0x00000000;
731 card->lg_addr = 0x00000000;
733 card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */
735 /* Pre-allocate some huge buffers */
736 skb_queue_head_init(&card->hbpool.queue);
737 card->hbpool.count = 0;
738 for (j = 0; j < NUM_HB; j++)
740 struct sk_buff *hb;
741 hb = alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
742 if (hb == NULL)
744 printk("nicstar%d: can't allocate %dth of %d huge buffers.\n",
745 i, j, NUM_HB);
746 error = 13;
747 ns_init_card_error(card, error);
748 return error;
750 skb_queue_tail(&card->hbpool.queue, hb);
751 card->hbpool.count++;
755 /* Allocate large buffers */
756 skb_queue_head_init(&card->lbpool.queue);
757 card->lbpool.count = 0; /* Not used */
758 for (j = 0; j < NUM_LB; j++)
760 struct sk_buff *lb;
761 lb = alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
762 if (lb == NULL)
764 printk("nicstar%d: can't allocate %dth of %d large buffers.\n",
765 i, j, NUM_LB);
766 error = 14;
767 ns_init_card_error(card, error);
768 return error;
770 skb_queue_tail(&card->lbpool.queue, lb);
771 skb_reserve(lb, NS_SMBUFSIZE);
772 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0);
773 /* Due to the implementation of push_rxbufs() this is 1, not 0 */
774 if (j == 1)
776 card->rcbuf = lb;
777 card->rawch = (u32) virt_to_bus(lb->data);
780 /* Test for strange behaviour which leads to crashes */
781 if ((bcount = ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min)
783 printk("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n",
784 i, j, bcount);
785 error = 14;
786 ns_init_card_error(card, error);
787 return error;
791 /* Allocate small buffers */
792 skb_queue_head_init(&card->sbpool.queue);
793 card->sbpool.count = 0; /* Not used */
794 for (j = 0; j < NUM_SB; j++)
796 struct sk_buff *sb;
797 sb = alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
798 if (sb == NULL)
800 printk("nicstar%d: can't allocate %dth of %d small buffers.\n",
801 i, j, NUM_SB);
802 error = 15;
803 ns_init_card_error(card, error);
804 return error;
806 skb_queue_tail(&card->sbpool.queue, sb);
807 skb_reserve(sb, NS_AAL0_HEADER);
808 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0);
810 /* Test for strange behaviour which leads to crashes */
811 if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min)
813 printk("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n",
814 i, j, bcount);
815 error = 15;
816 ns_init_card_error(card, error);
817 return error;
821 /* Allocate iovec buffers */
822 skb_queue_head_init(&card->iovpool.queue);
823 card->iovpool.count = 0;
824 for (j = 0; j < NUM_IOVB; j++)
826 struct sk_buff *iovb;
827 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
828 if (iovb == NULL)
830 printk("nicstar%d: can't allocate %dth of %d iovec buffers.\n",
831 i, j, NUM_IOVB);
832 error = 16;
833 ns_init_card_error(card, error);
834 return error;
836 skb_queue_tail(&card->iovpool.queue, iovb);
837 card->iovpool.count++;
841 card->in_handler = 0;
842 card->in_poll = 0;
843 card->intcnt = 0;
845 /* Configure NICStAR */
846 if (card->rct_size == 4096)
847 ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES;
848 else /* (card->rct_size == 16384) */
849 ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES;
851 card->efbie = 1;
853 /* Register device */
854 card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, 0UL);
855 if (card->atmdev == NULL)
857 printk("nicstar%d: can't register device.\n", i);
858 error = 17;
859 ns_init_card_error(card, error);
860 return error;
863 if (ns_parse_mac(mac[i], card->atmdev->esi))
864 nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
865 card->atmdev->esi, 6);
867 printk("nicstar%d: MAC address %02X:%02X:%02X:%02X:%02X:%02X\n", i,
868 card->atmdev->esi[0], card->atmdev->esi[1], card->atmdev->esi[2],
869 card->atmdev->esi[3], card->atmdev->esi[4], card->atmdev->esi[5]);
871 card->atmdev->dev_data = card;
872 card->atmdev->ci_range.vpi_bits = card->vpibits;
873 card->atmdev->ci_range.vci_bits = card->vcibits;
874 card->atmdev->link_rate = card->max_pcr;
876 card->atmdev->phy = NULL;
877 #ifdef CONFIG_ATM_NICSTAR_USE_SUNI
878 if (card->max_pcr == ATM_OC3_PCR) {
879 suni_init(card->atmdev);
880 #ifdef MODULE
881 MOD_INC_USE_COUNT;
882 /* Can't remove the nicstar driver or the suni driver would oops */
883 #endif /* MODULE */
885 #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */
886 if (card->atmdev->phy && card->atmdev->phy->start)
887 card->atmdev->phy->start(card->atmdev);
889 writel(NS_CFG_RXPATH |
890 NS_CFG_SMBUFSIZE |
891 NS_CFG_LGBUFSIZE |
892 NS_CFG_EFBIE |
893 NS_CFG_RSQSIZE |
894 NS_CFG_VPIBITS |
895 ns_cfg_rctsize |
896 NS_CFG_RXINT_NODELAY |
897 NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */
898 NS_CFG_RSQAFIE |
899 NS_CFG_TXEN |
900 NS_CFG_TXIE |
901 NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */
902 NS_CFG_PHYIE,
903 card->membase + CFG);
905 num_cards++;
907 return error;
912 static void ns_init_card_error(ns_dev *card, int error)
914 if (error >= 17)
916 writel(0x00000000, card->membase + CFG);
918 if (error >= 16)
920 struct sk_buff *iovb;
921 while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
922 kfree_skb(iovb);
924 if (error >= 15)
926 struct sk_buff *sb;
927 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
928 kfree_skb(sb);
929 free_scq(card->scq0, NULL);
931 if (error >= 14)
933 struct sk_buff *lb;
934 while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
935 kfree_skb(lb);
937 if (error >= 13)
939 struct sk_buff *hb;
940 while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
941 kfree_skb(hb);
943 if (error >= 12)
945 kfree(card->rsq.org);
947 if (error >= 11)
949 kfree(card->tsq.org);
951 if (error >= 10)
953 free_irq(card->pcidev->irq, card);
955 if (error >= 4)
957 iounmap((void *) card->membase);
959 if (error >= 3)
961 kfree(card);
967 static scq_info *get_scq(int size, u32 scd)
969 scq_info *scq;
970 int i;
972 if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
973 return (scq_info *) NULL;
975 scq = (scq_info *) kmalloc(sizeof(scq_info), GFP_KERNEL);
976 if (scq == (scq_info *) NULL)
977 return (scq_info *) NULL;
978 scq->org = kmalloc(2 * size, GFP_KERNEL);
979 if (scq->org == NULL)
981 kfree(scq);
982 return (scq_info *) NULL;
984 scq->skb = (struct sk_buff **) kmalloc(sizeof(struct sk_buff *) *
985 (size / NS_SCQE_SIZE), GFP_KERNEL);
986 if (scq->skb == (struct sk_buff **) NULL)
988 kfree(scq->org);
989 kfree(scq);
990 return (scq_info *) NULL;
992 scq->num_entries = size / NS_SCQE_SIZE;
993 scq->base = (ns_scqe *) ALIGN_ADDRESS(scq->org, size);
994 scq->next = scq->base;
995 scq->last = scq->base + (scq->num_entries - 1);
996 scq->tail = scq->last;
997 scq->scd = scd;
998 scq->num_entries = size / NS_SCQE_SIZE;
999 scq->tbd_count = 0;
1000 init_waitqueue_head(&scq->scqfull_waitq);
1001 scq->full = 0;
1003 for (i = 0; i < scq->num_entries; i++)
1004 scq->skb[i] = NULL;
1006 return scq;
1011 /* For variable rate SCQ vcc must be NULL */
1012 static void free_scq(scq_info *scq, struct atm_vcc *vcc)
1014 int i;
1016 if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
1017 for (i = 0; i < scq->num_entries; i++)
1019 if (scq->skb[i] != NULL)
1021 vcc = ATM_SKB(scq->skb[i])->vcc;
1022 if (vcc->pop != NULL)
1023 vcc->pop(vcc, scq->skb[i]);
1024 else
1025 dev_kfree_skb(scq->skb[i]);
1028 else /* vcc must be != NULL */
1030 if (vcc == NULL)
1032 printk("nicstar: free_scq() called with vcc == NULL for fixed rate scq.");
1033 for (i = 0; i < scq->num_entries; i++)
1034 dev_kfree_skb(scq->skb[i]);
1036 else
1037 for (i = 0; i < scq->num_entries; i++)
1039 if (scq->skb[i] != NULL)
1041 if (vcc->pop != NULL)
1042 vcc->pop(vcc, scq->skb[i]);
1043 else
1044 dev_kfree_skb(scq->skb[i]);
1048 kfree(scq->skb);
1049 kfree(scq->org);
1050 kfree(scq);
1055 /* The handles passed must be pointers to the sk_buff containing the small
1056 or large buffer(s) cast to u32. */
1057 static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1058 u32 handle2, u32 addr2)
1060 u32 stat;
1061 unsigned long flags;
1064 #ifdef GENERAL_DEBUG
1065 if (!addr1)
1066 printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", card->index);
1067 #endif /* GENERAL_DEBUG */
1069 stat = readl(card->membase + STAT);
1070 card->sbfqc = ns_stat_sfbqc_get(stat);
1071 card->lbfqc = ns_stat_lfbqc_get(stat);
1072 if (type == BUF_SM)
1074 if (!addr2)
1076 if (card->sm_addr)
1078 addr2 = card->sm_addr;
1079 handle2 = card->sm_handle;
1080 card->sm_addr = 0x00000000;
1081 card->sm_handle = 0x00000000;
1083 else /* (!sm_addr) */
1085 card->sm_addr = addr1;
1086 card->sm_handle = handle1;
1090 else /* type == BUF_LG */
1092 if (!addr2)
1094 if (card->lg_addr)
1096 addr2 = card->lg_addr;
1097 handle2 = card->lg_handle;
1098 card->lg_addr = 0x00000000;
1099 card->lg_handle = 0x00000000;
1101 else /* (!lg_addr) */
1103 card->lg_addr = addr1;
1104 card->lg_handle = handle1;
1109 if (addr2)
1111 if (type == BUF_SM)
1113 if (card->sbfqc >= card->sbnr.max)
1115 skb_unlink((struct sk_buff *) handle1);
1116 kfree_skb((struct sk_buff *) handle1);
1117 skb_unlink((struct sk_buff *) handle2);
1118 kfree_skb((struct sk_buff *) handle2);
1119 return;
1121 else
1122 card->sbfqc += 2;
1124 else /* (type == BUF_LG) */
1126 if (card->lbfqc >= card->lbnr.max)
1128 skb_unlink((struct sk_buff *) handle1);
1129 kfree_skb((struct sk_buff *) handle1);
1130 skb_unlink((struct sk_buff *) handle2);
1131 kfree_skb((struct sk_buff *) handle2);
1132 return;
1134 else
1135 card->lbfqc += 2;
1138 save_flags(flags); cli();
1140 while (CMD_BUSY(card));
1141 writel(addr2, card->membase + DR3);
1142 writel(handle2, card->membase + DR2);
1143 writel(addr1, card->membase + DR1);
1144 writel(handle1, card->membase + DR0);
1145 writel(NS_CMD_WRITE_FREEBUFQ | (u32) type, card->membase + CMD);
1147 restore_flags(flags);
1149 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index,
1150 (type == BUF_SM ? "small" : "large"), addr1, addr2);
1153 if (!card->efbie && card->sbfqc >= card->sbnr.min &&
1154 card->lbfqc >= card->lbnr.min)
1156 card->efbie = 1;
1157 writel((readl(card->membase + CFG) | NS_CFG_EFBIE), card->membase + CFG);
1160 return;
1165 static void ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
1167 u32 stat_r;
1168 ns_dev *card;
1169 struct atm_dev *dev;
1171 card = (ns_dev *) dev_id;
1172 dev = card->atmdev;
1173 card->intcnt++;
1175 PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index);
1177 if (card->in_handler)
1179 printk("nicstar%d: Re-entering ns_irq_handler()???\n", card->index);
1180 return;
1182 card->in_handler = 1;
1183 if (card->in_poll)
1185 card->in_handler = 0;
1186 printk("nicstar%d: Called irq handler while in ns_poll()!?\n",
1187 card->index);
1188 return;
1191 stat_r = readl(card->membase + STAT);
1193 /* Transmit Status Indicator has been written to T. S. Queue */
1194 if (stat_r & NS_STAT_TSIF)
1196 TXPRINTK("nicstar%d: TSI interrupt\n", card->index);
1197 process_tsq(card);
1198 writel(NS_STAT_TSIF, card->membase + STAT);
1201 /* Incomplete CS-PDU has been transmitted */
1202 if (stat_r & NS_STAT_TXICP)
1204 writel(NS_STAT_TXICP, card->membase + STAT);
1205 TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n",
1206 card->index);
1209 /* Transmit Status Queue 7/8 full */
1210 if (stat_r & NS_STAT_TSQF)
1212 writel(NS_STAT_TSQF, card->membase + STAT);
1213 PRINTK("nicstar%d: TSQ full.\n", card->index);
1214 process_tsq(card);
1217 /* Timer overflow */
1218 if (stat_r & NS_STAT_TMROF)
1220 writel(NS_STAT_TMROF, card->membase + STAT);
1221 PRINTK("nicstar%d: Timer overflow.\n", card->index);
1224 /* PHY device interrupt signal active */
1225 if (stat_r & NS_STAT_PHYI)
1227 writel(NS_STAT_PHYI, card->membase + STAT);
1228 PRINTK("nicstar%d: PHY interrupt.\n", card->index);
1229 if (dev->phy && dev->phy->interrupt) {
1230 dev->phy->interrupt(dev);
1234 /* Small Buffer Queue is full */
1235 if (stat_r & NS_STAT_SFBQF)
1237 writel(NS_STAT_SFBQF, card->membase + STAT);
1238 printk("nicstar%d: Small free buffer queue is full.\n", card->index);
1241 /* Large Buffer Queue is full */
1242 if (stat_r & NS_STAT_LFBQF)
1244 writel(NS_STAT_LFBQF, card->membase + STAT);
1245 printk("nicstar%d: Large free buffer queue is full.\n", card->index);
1248 /* Receive Status Queue is full */
1249 if (stat_r & NS_STAT_RSQF)
1251 writel(NS_STAT_RSQF, card->membase + STAT);
1252 printk("nicstar%d: RSQ full.\n", card->index);
1253 process_rsq(card);
1256 /* Complete CS-PDU received */
1257 if (stat_r & NS_STAT_EOPDU)
1259 RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index);
1260 process_rsq(card);
1261 writel(NS_STAT_EOPDU, card->membase + STAT);
1264 /* Raw cell received */
1265 if (stat_r & NS_STAT_RAWCF)
1267 writel(NS_STAT_RAWCF, card->membase + STAT);
1268 #ifndef RCQ_SUPPORT
1269 printk("nicstar%d: Raw cell received and no support yet...\n",
1270 card->index);
1271 #endif /* RCQ_SUPPORT */
1272 /* NOTE: the following procedure may keep a raw cell pending untill the
1273 next interrupt. As this preliminary support is only meant to
1274 avoid buffer leakage, this is not an issue. */
1275 while (readl(card->membase + RAWCT) != card->rawch)
1277 ns_rcqe *rawcell;
1279 rawcell = (ns_rcqe *) bus_to_virt(card->rawch);
1280 if (ns_rcqe_islast(rawcell))
1282 struct sk_buff *oldbuf;
1284 oldbuf = card->rcbuf;
1285 card->rcbuf = (struct sk_buff *) ns_rcqe_nextbufhandle(rawcell);
1286 card->rawch = (u32) virt_to_bus(card->rcbuf->data);
1287 recycle_rx_buf(card, oldbuf);
1289 else
1290 card->rawch += NS_RCQE_SIZE;
1294 /* Small buffer queue is empty */
1295 if (stat_r & NS_STAT_SFBQE)
1297 int i;
1298 struct sk_buff *sb;
1300 writel(NS_STAT_SFBQE, card->membase + STAT);
1301 printk("nicstar%d: Small free buffer queue empty.\n",
1302 card->index);
1303 for (i = 0; i < card->sbnr.min; i++)
1305 sb = alloc_skb(NS_SMSKBSIZE, GFP_ATOMIC);
1306 if (sb == NULL)
1308 writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG);
1309 card->efbie = 0;
1310 break;
1312 skb_queue_tail(&card->sbpool.queue, sb);
1313 skb_reserve(sb, NS_AAL0_HEADER);
1314 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0);
1316 card->sbfqc = i;
1317 process_rsq(card);
1320 /* Large buffer queue empty */
1321 if (stat_r & NS_STAT_LFBQE)
1323 int i;
1324 struct sk_buff *lb;
1326 writel(NS_STAT_LFBQE, card->membase + STAT);
1327 printk("nicstar%d: Large free buffer queue empty.\n",
1328 card->index);
1329 for (i = 0; i < card->lbnr.min; i++)
1331 lb = alloc_skb(NS_LGSKBSIZE, GFP_ATOMIC);
1332 if (lb == NULL)
1334 writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG);
1335 card->efbie = 0;
1336 break;
1338 skb_queue_tail(&card->lbpool.queue, lb);
1339 skb_reserve(lb, NS_SMBUFSIZE);
1340 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0);
1342 card->lbfqc = i;
1343 process_rsq(card);
1346 /* Receive Status Queue is 7/8 full */
1347 if (stat_r & NS_STAT_RSQAF)
1349 writel(NS_STAT_RSQAF, card->membase + STAT);
1350 RXPRINTK("nicstar%d: RSQ almost full.\n", card->index);
1351 process_rsq(card);
1354 card->in_handler = 0;
1355 PRINTK("nicstar%d: end of interrupt service\n", card->index);
1360 static int ns_open(struct atm_vcc *vcc, short vpi, int vci)
1362 ns_dev *card;
1363 vc_map *vc;
1364 int error;
1365 unsigned long tmpl, modl;
1366 int tcr, tcra; /* target cell rate, and absolute value */
1367 int n = 0; /* Number of entries in the TST. Initialized to remove
1368 the compiler warning. */
1369 u32 u32d[4];
1370 int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler
1371 warning. How I wish compilers were clever enough to
1372 tell which variables can truly be used
1373 uninitialized... */
1374 int inuse; /* tx or rx vc already in use by another vcc */
1376 card = (ns_dev *) vcc->dev->dev_data;
1377 PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int) vpi, vci);
1378 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
1380 PRINTK("nicstar%d: unsupported AAL.\n", card->index);
1381 return -EINVAL;
1384 if ((error = atm_find_ci(vcc, &vpi, &vci)))
1386 PRINTK("nicstar%d: error in atm_find_ci().\n", card->index);
1387 return error;
1389 vc = &(card->vcmap[vpi << card->vcibits | vci]);
1390 vcc->vpi = vpi;
1391 vcc->vci = vci;
1392 vcc->dev_data = vc;
1394 inuse = 0;
1395 if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx)
1396 inuse = 1;
1397 if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx)
1398 inuse += 2;
1399 if (inuse)
1401 printk("nicstar%d: %s vci already in use.\n", card->index,
1402 inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx");
1403 return -EINVAL;
1406 vcc->flags |= ATM_VF_ADDR;
1408 /* NOTE: You are not allowed to modify an open connection's QOS. To change
1409 that, remove the ATM_VF_PARTIAL flag checking. There may be other changes
1410 needed to do that. */
1411 if (!(vcc->flags & ATM_VF_PARTIAL))
1413 scq_info *scq;
1415 vcc->flags |= ATM_VF_PARTIAL;
1416 if (vcc->qos.txtp.traffic_class == ATM_CBR)
1418 /* Check requested cell rate and availability of SCD */
1419 if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 &&
1420 vcc->qos.txtp.min_pcr == 0)
1422 PRINTK("nicstar%d: trying to open a CBR vc with cell rate = 0 \n",
1423 card->index);
1424 vcc->flags &= ~(ATM_VF_ADDR | ATM_VF_PARTIAL);
1425 return -EINVAL;
1428 tcr = atm_pcr_goal(&(vcc->qos.txtp));
1429 tcra = tcr >= 0 ? tcr : -tcr;
1431 PRINTK("nicstar%d: target cell rate = %d.\n", card->index,
1432 vcc->qos.txtp.max_pcr);
1434 tmpl = (unsigned long)tcra * (unsigned long)NS_TST_NUM_ENTRIES;
1435 modl = tmpl % card->max_pcr;
1437 n = (int)(tmpl / card->max_pcr);
1438 if (tcr > 0)
1440 if (modl > 0) n++;
1442 else if (tcr == 0)
1444 if ((n = (card->tst_free_entries - NS_TST_RESERVED)) <= 0)
1446 PRINTK("nicstar%d: no CBR bandwidth free.\n", card->index);
1447 vcc->flags &= ~(ATM_VF_ADDR | ATM_VF_PARTIAL);
1448 return -EINVAL;
1452 if (n == 0)
1454 printk("nicstar%d: selected bandwidth < granularity.\n", card->index);
1455 vcc->flags &= ~(ATM_VF_ADDR | ATM_VF_PARTIAL);
1456 return -EINVAL;
1459 if (n > (card->tst_free_entries - NS_TST_RESERVED))
1461 PRINTK("nicstar%d: not enough free CBR bandwidth.\n", card->index);
1462 vcc->flags &= ~(ATM_VF_ADDR | ATM_VF_PARTIAL);
1463 return -EINVAL;
1465 else
1466 card->tst_free_entries -= n;
1468 XPRINTK("nicstar%d: writing %d tst entries.\n", card->index, n);
1469 for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++)
1471 if (card->scd2vc[frscdi] == NULL)
1473 card->scd2vc[frscdi] = vc;
1474 break;
1477 if (frscdi == NS_FRSCD_NUM)
1479 PRINTK("nicstar%d: no SCD available for CBR channel.\n", card->index);
1480 card->tst_free_entries += n;
1481 vcc->flags &= ~(ATM_VF_ADDR | ATM_VF_PARTIAL);
1482 return -EBUSY;
1485 vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE;
1487 scq = get_scq(CBR_SCQSIZE, vc->cbr_scd);
1488 if (scq == (scq_info *) NULL)
1490 PRINTK("nicstar%d: can't get fixed rate SCQ.\n", card->index);
1491 card->scd2vc[frscdi] = NULL;
1492 card->tst_free_entries += n;
1493 vcc->flags &= ~(ATM_VF_ADDR | ATM_VF_PARTIAL);
1494 return -ENOMEM;
1496 vc->scq = scq;
1497 u32d[0] = (u32) virt_to_bus(scq->base);
1498 u32d[1] = (u32) 0x00000000;
1499 u32d[2] = (u32) 0xffffffff;
1500 u32d[3] = (u32) 0x00000000;
1501 ns_write_sram(card, vc->cbr_scd, u32d, 4);
1503 fill_tst(card, n, vc);
1505 else /* not CBR */
1507 vc->cbr_scd = 0x00000000;
1508 vc->scq = card->scq0;
1511 if (vcc->qos.txtp.traffic_class != ATM_NONE)
1513 vc->tx = 1;
1514 vc->tx_vcc = vcc;
1515 vc->tbd_count = 0;
1517 if (vcc->qos.rxtp.traffic_class != ATM_NONE)
1519 u32 status;
1521 vc->rx = 1;
1522 vc->rx_vcc = vcc;
1523 vc->rx_iov = NULL;
1525 /* Open the connection in hardware */
1526 if (vcc->qos.aal == ATM_AAL5)
1527 status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN;
1528 else /* vcc->qos.aal == ATM_AAL0 */
1529 status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN;
1530 #ifdef RCQ_SUPPORT
1531 status |= NS_RCTE_RAWCELLINTEN;
1532 #endif /* RCQ_SUPPORT */
1533 ns_write_sram(card, NS_RCT + (vpi << card->vcibits | vci) *
1534 NS_RCT_ENTRY_SIZE, &status, 1);
1539 vcc->flags |= ATM_VF_READY;
1540 MOD_INC_USE_COUNT;
1541 return 0;
1546 static void ns_close(struct atm_vcc *vcc)
1548 vc_map *vc;
1549 ns_dev *card;
1550 u32 data;
1551 int i;
1553 vc = vcc->dev_data;
1554 card = vcc->dev->dev_data;
1555 PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index,
1556 (int) vcc->vpi, vcc->vci);
1558 vcc->flags &= ~(ATM_VF_READY);
1560 if (vcc->qos.rxtp.traffic_class != ATM_NONE)
1562 u32 addr;
1563 unsigned long flags;
1565 addr = NS_RCT + (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE;
1566 save_flags(flags); cli();
1567 while(CMD_BUSY(card));
1568 writel(NS_CMD_CLOSE_CONNECTION | addr << 2, card->membase + CMD);
1569 restore_flags(flags);
1571 vc->rx = 0;
1572 if (vc->rx_iov != NULL)
1574 struct sk_buff *iovb;
1575 u32 stat;
1577 stat = readl(card->membase + STAT);
1578 card->sbfqc = ns_stat_sfbqc_get(stat);
1579 card->lbfqc = ns_stat_lfbqc_get(stat);
1581 PRINTK("nicstar%d: closing a VC with pending rx buffers.\n",
1582 card->index);
1583 iovb = vc->rx_iov;
1584 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
1585 ATM_SKB(iovb)->iovcnt);
1586 ATM_SKB(iovb)->iovcnt = 0;
1587 ATM_SKB(iovb)->vcc = NULL;
1588 save_flags(flags); cli();
1589 recycle_iov_buf(card, iovb);
1590 restore_flags(flags);
1591 vc->rx_iov = NULL;
1595 if (vcc->qos.txtp.traffic_class != ATM_NONE)
1597 vc->tx = 0;
1600 if (vcc->qos.txtp.traffic_class == ATM_CBR)
1602 unsigned long flags;
1603 ns_scqe *scqep;
1604 scq_info *scq;
1606 scq = vc->scq;
1608 for (;;)
1610 save_flags(flags); cli();
1611 scqep = scq->next;
1612 if (scqep == scq->base)
1613 scqep = scq->last;
1614 else
1615 scqep--;
1616 if (scqep == scq->tail)
1618 restore_flags(flags);
1619 break;
1621 /* If the last entry is not a TSR, place one in the SCQ in order to
1622 be able to completely drain it and then close. */
1623 if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next)
1625 ns_scqe tsr;
1626 u32 scdi, scqi;
1627 u32 data;
1628 int index;
1630 tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
1631 scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
1632 scqi = scq->next - scq->base;
1633 tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
1634 tsr.word_3 = 0x00000000;
1635 tsr.word_4 = 0x00000000;
1636 *scq->next = tsr;
1637 index = (int) scqi;
1638 scq->skb[index] = NULL;
1639 if (scq->next == scq->last)
1640 scq->next = scq->base;
1641 else
1642 scq->next++;
1643 data = (u32) virt_to_bus(scq->next);
1644 ns_write_sram(card, scq->scd, &data, 1);
1646 schedule();
1647 restore_flags(flags);
1650 /* Free all TST entries */
1651 data = NS_TST_OPCODE_VARIABLE;
1652 for (i = 0; i < NS_TST_NUM_ENTRIES; i++)
1654 if (card->tste2vc[i] == vc)
1656 ns_write_sram(card, card->tst_addr + i, &data, 1);
1657 card->tste2vc[i] = NULL;
1658 card->tst_free_entries++;
1662 card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL;
1663 free_scq(vc->scq, vcc);
1666 vcc->dev_data = NULL;
1667 vcc->flags &= ~(ATM_VF_PARTIAL | ATM_VF_ADDR);
1668 MOD_DEC_USE_COUNT;
1670 #ifdef RX_DEBUG
1672 u32 stat, cfg;
1673 stat = readl(card->membase + STAT);
1674 cfg = readl(card->membase + CFG);
1675 printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg);
1676 printk("TSQ: base = 0x%08X next = 0x%08X last = 0x%08X TSQT = 0x%08X \n",
1677 (u32) card->tsq.base, (u32) card->tsq.next,(u32) card->tsq.last,
1678 readl(card->membase + TSQT));
1679 printk("RSQ: base = 0x%08X next = 0x%08X last = 0x%08X RSQT = 0x%08X \n",
1680 (u32) card->rsq.base, (u32) card->rsq.next,(u32) card->rsq.last,
1681 readl(card->membase + RSQT));
1682 printk("Empty free buffer queue interrupt %s \n",
1683 card->efbie ? "enabled" : "disabled");
1684 printk("SBCNT = %d count = %d LBCNT = %d count = %d \n",
1685 ns_stat_sfbqc_get(stat), card->sbpool.count,
1686 ns_stat_lfbqc_get(stat), card->lbpool.count);
1687 printk("hbpool.count = %d iovpool.count = %d \n",
1688 card->hbpool.count, card->iovpool.count);
1690 #endif /* RX_DEBUG */
1695 static void fill_tst(ns_dev *card, int n, vc_map *vc)
1697 u32 new_tst;
1698 unsigned long cl;
1699 int e, r;
1700 u32 data;
1702 /* It would be very complicated to keep the two TSTs synchronized while
1703 assuring that writes are only made to the inactive TST. So, for now I
1704 will use only one TST. If problems occur, I will change this again */
1706 new_tst = card->tst_addr;
1708 /* Fill procedure */
1710 for (e = 0; e < NS_TST_NUM_ENTRIES; e++)
1712 if (card->tste2vc[e] == NULL)
1713 break;
1715 if (e == NS_TST_NUM_ENTRIES) {
1716 printk("nicstar%d: No free TST entries found. \n", card->index);
1717 return;
1720 r = n;
1721 cl = NS_TST_NUM_ENTRIES;
1722 data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd);
1724 while (r > 0)
1726 if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL)
1728 card->tste2vc[e] = vc;
1729 ns_write_sram(card, new_tst + e, &data, 1);
1730 cl -= NS_TST_NUM_ENTRIES;
1731 r--;
1734 if (++e == NS_TST_NUM_ENTRIES) {
1735 e = 0;
1737 cl += n;
1740 /* End of fill procedure */
1742 data = ns_tste_make(NS_TST_OPCODE_END, new_tst);
1743 ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1);
1744 ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1);
1745 card->tst_addr = new_tst;
1750 static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
1752 ns_dev *card;
1753 vc_map *vc;
1754 scq_info *scq;
1755 unsigned long buflen;
1756 ns_scqe scqe;
1757 u32 flags; /* TBD flags, not CPU flags */
1759 card = vcc->dev->dev_data;
1760 TXPRINTK("nicstar%d: ns_send() called.\n", card->index);
1761 if ((vc = (vc_map *) vcc->dev_data) == NULL)
1763 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
1764 vcc->stats->tx_err++;
1765 dev_kfree_skb(skb);
1766 return -EINVAL;
1769 if (!vc->tx)
1771 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
1772 vcc->stats->tx_err++;
1773 dev_kfree_skb(skb);
1774 return -EINVAL;
1777 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
1779 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
1780 vcc->stats->tx_err++;
1781 dev_kfree_skb(skb);
1782 return -EINVAL;
1785 if (ATM_SKB(skb)->iovcnt != 0)
1787 printk("nicstar%d: No scatter-gather yet.\n", card->index);
1788 vcc->stats->tx_err++;
1789 dev_kfree_skb(skb);
1790 return -EINVAL;
1793 ATM_SKB(skb)->vcc = vcc;
1795 if (vcc->qos.aal == ATM_AAL5)
1797 buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */
1798 flags = NS_TBD_AAL5;
1799 scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data));
1800 scqe.word_3 = cpu_to_le32((u32) skb->len);
1801 scqe.word_4 = cpu_to_le32(((u32) vcc->vpi) << NS_TBD_VPI_SHIFT |
1802 ((u32) vcc->vci) << NS_TBD_VCI_SHIFT);
1803 flags |= NS_TBD_EOPDU;
1805 else /* (vcc->qos.aal == ATM_AAL0) */
1807 buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */
1808 flags = NS_TBD_AAL0;
1809 scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data) + NS_AAL0_HEADER);
1810 scqe.word_3 = cpu_to_le32(0x00000000);
1811 if (*skb->data & 0x02) /* Payload type 1 - end of pdu */
1812 flags |= NS_TBD_EOPDU;
1813 scqe.word_4 = cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK);
1814 /* Force the VPI/VCI to be the same as in VCC struct */
1815 scqe.word_4 |= cpu_to_le32((((u32) vcc->vpi) << NS_TBD_VPI_SHIFT |
1816 ((u32) vcc->vci) << NS_TBD_VCI_SHIFT) &
1817 NS_TBD_VC_MASK);
1820 if (vcc->qos.txtp.traffic_class == ATM_CBR)
1822 scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen);
1823 scq = ((vc_map *) vcc->dev_data)->scq;
1825 else
1827 scqe.word_1 = ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen);
1828 scq = card->scq0;
1831 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
1833 vcc->stats->tx_err++;
1834 dev_kfree_skb(skb);
1835 return -EIO;
1837 vcc->stats->tx++;
1839 return 0;
1844 static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd,
1845 struct sk_buff *skb)
1847 unsigned long flags;
1848 ns_scqe tsr;
1849 u32 scdi, scqi;
1850 int scq_is_vbr;
1851 u32 data;
1852 int index;
1854 if (scq->tail == scq->next)
1856 if (in_interrupt()) {
1857 printk("nicstar%d: Error pushing TBD.\n", card->index);
1858 return 1;
1861 save_flags(flags); cli();
1862 scq->full = 1;
1863 interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT);
1864 restore_flags(flags);
1866 if (scq->full) {
1867 printk("nicstar%d: Timeout pushing TBD.\n", card->index);
1868 return 1;
1871 *scq->next = *tbd;
1872 index = (int) (scq->next - scq->base);
1873 scq->skb[index] = skb;
1874 XPRINTK("nicstar%d: sending skb at 0x%x (pos %d).\n",
1875 card->index, (u32) skb, index);
1876 XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n",
1877 card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2),
1878 le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4),
1879 (u32) scq->next);
1880 if (scq->next == scq->last)
1881 scq->next = scq->base;
1882 else
1883 scq->next++;
1885 vc->tbd_count++;
1886 if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
1888 scq->tbd_count++;
1889 scq_is_vbr = 1;
1891 else
1892 scq_is_vbr = 0;
1894 if (vc->tbd_count >= MAX_TBD_PER_VC || scq->tbd_count >= MAX_TBD_PER_SCQ)
1896 if (scq->tail == scq->next)
1898 if (in_interrupt()) {
1899 data = (u32) virt_to_bus(scq->next);
1900 ns_write_sram(card, scq->scd, &data, 1);
1901 printk("nicstar%d: Error pushing TSR.\n", card->index);
1902 return 0;
1905 save_flags(flags); cli();
1906 scq->full = 1;
1907 interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT);
1908 restore_flags(flags);
1911 if (!scq->full)
1913 tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
1914 if (scq_is_vbr)
1915 scdi = NS_TSR_SCDISVBR;
1916 else
1917 scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
1918 scqi = scq->next - scq->base;
1919 tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
1920 tsr.word_3 = 0x00000000;
1921 tsr.word_4 = 0x00000000;
1923 *scq->next = tsr;
1924 index = (int) scqi;
1925 scq->skb[index] = NULL;
1926 XPRINTK("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n",
1927 card->index, le32_to_cpu(tsr.word_1), le32_to_cpu(tsr.word_2),
1928 le32_to_cpu(tsr.word_3), le32_to_cpu(tsr.word_4),
1929 (u32) scq->next);
1930 if (scq->next == scq->last)
1931 scq->next = scq->base;
1932 else
1933 scq->next++;
1934 vc->tbd_count = 0;
1935 scq->tbd_count = 0;
1937 else
1938 PRINTK("nicstar%d: Timeout pushing TSR.\n", card->index);
1941 data = (u32) virt_to_bus(scq->next);
1942 ns_write_sram(card, scq->scd, &data, 1);
1944 return 0;
1949 static void process_tsq(ns_dev *card)
1951 u32 scdi;
1952 scq_info *scq;
1953 ns_tsi *previous, *one_ahead, *two_ahead;
1954 int serviced_entries; /* flag indicating at least on entry was serviced */
1956 serviced_entries = 0;
1958 if (card->tsq.next == card->tsq.last)
1959 one_ahead = card->tsq.base;
1960 else
1961 one_ahead = card->tsq.next + 1;
1963 if (one_ahead == card->tsq.last)
1964 two_ahead = card->tsq.base;
1965 else
1966 two_ahead = one_ahead + 1;
1968 while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) ||
1969 !ns_tsi_isempty(two_ahead))
1970 /* At most two empty, as stated in the 77201 errata */
1972 serviced_entries = 1;
1974 /* Skip the one or two possible empty entries */
1975 while (ns_tsi_isempty(card->tsq.next)) {
1976 if (card->tsq.next == card->tsq.last)
1977 card->tsq.next = card->tsq.base;
1978 else
1979 card->tsq.next++;
1982 if (!ns_tsi_tmrof(card->tsq.next))
1984 scdi = ns_tsi_getscdindex(card->tsq.next);
1985 if (scdi == NS_TSI_SCDISVBR)
1986 scq = card->scq0;
1987 else
1989 if (card->scd2vc[scdi] == NULL)
1991 printk("nicstar%d: could not find VC from SCD index.\n",
1992 card->index);
1993 ns_tsi_init(card->tsq.next);
1994 return;
1996 scq = card->scd2vc[scdi]->scq;
1998 drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next));
1999 scq->full = 0;
2000 wake_up_interruptible(&(scq->scqfull_waitq));
2003 ns_tsi_init(card->tsq.next);
2004 previous = card->tsq.next;
2005 if (card->tsq.next == card->tsq.last)
2006 card->tsq.next = card->tsq.base;
2007 else
2008 card->tsq.next++;
2010 if (card->tsq.next == card->tsq.last)
2011 one_ahead = card->tsq.base;
2012 else
2013 one_ahead = card->tsq.next + 1;
2015 if (one_ahead == card->tsq.last)
2016 two_ahead = card->tsq.base;
2017 else
2018 two_ahead = one_ahead + 1;
2021 if (serviced_entries) {
2022 writel((((u32) previous) - ((u32) card->tsq.base)),
2023 card->membase + TSQH);
2029 static void drain_scq(ns_dev *card, scq_info *scq, int pos)
2031 struct atm_vcc *vcc;
2032 struct sk_buff *skb;
2033 int i;
2035 XPRINTK("nicstar%d: drain_scq() called, scq at 0x%x, pos %d.\n",
2036 card->index, (u32) scq, pos);
2037 if (pos >= scq->num_entries)
2039 printk("nicstar%d: Bad index on drain_scq().\n", card->index);
2040 return;
2043 i = (int) (scq->tail - scq->base);
2044 if (++i == scq->num_entries)
2045 i = 0;
2046 while (i != pos)
2048 skb = scq->skb[i];
2049 XPRINTK("nicstar%d: freeing skb at 0x%x (index %d).\n",
2050 card->index, (u32) skb, i);
2051 if (skb != NULL)
2053 vcc = ATM_SKB(skb)->vcc;
2054 if (vcc->pop != NULL)
2055 vcc->pop(vcc, skb);
2056 else
2057 dev_kfree_skb(skb);
2058 scq->skb[i] = NULL;
2060 if (++i == scq->num_entries)
2061 i = 0;
2063 scq->tail = scq->base + pos;
2068 static void process_rsq(ns_dev *card)
2070 ns_rsqe *previous;
2072 if (!ns_rsqe_valid(card->rsq.next))
2073 return;
2074 while (ns_rsqe_valid(card->rsq.next))
2076 dequeue_rx(card, card->rsq.next);
2077 ns_rsqe_init(card->rsq.next);
2078 previous = card->rsq.next;
2079 if (card->rsq.next == card->rsq.last)
2080 card->rsq.next = card->rsq.base;
2081 else
2082 card->rsq.next++;
2084 writel((((u32) previous) - ((u32) card->rsq.base)),
2085 card->membase + RSQH);
2090 static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2092 u32 vpi, vci;
2093 vc_map *vc;
2094 struct sk_buff *iovb;
2095 struct iovec *iov;
2096 struct atm_vcc *vcc;
2097 struct sk_buff *skb;
2098 unsigned short aal5_len;
2099 int len;
2100 u32 stat;
2102 stat = readl(card->membase + STAT);
2103 card->sbfqc = ns_stat_sfbqc_get(stat);
2104 card->lbfqc = ns_stat_lfbqc_get(stat);
2106 skb = (struct sk_buff *) le32_to_cpu(rsqe->buffer_handle);
2107 vpi = ns_rsqe_vpi(rsqe);
2108 vci = ns_rsqe_vci(rsqe);
2109 if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits)
2111 printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n",
2112 card->index, vpi, vci);
2113 recycle_rx_buf(card, skb);
2114 return;
2117 vc = &(card->vcmap[vpi << card->vcibits | vci]);
2118 if (!vc->rx)
2120 RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n",
2121 card->index, vpi, vci);
2122 recycle_rx_buf(card, skb);
2123 return;
2126 vcc = vc->rx_vcc;
2128 if (vcc->qos.aal == ATM_AAL0)
2130 struct sk_buff *sb;
2131 unsigned char *cell;
2132 int i;
2134 cell = skb->data;
2135 for (i = ns_rsqe_cellcount(rsqe); i; i--)
2137 if ((sb = alloc_skb(NS_SMSKBSIZE, GFP_ATOMIC)) == NULL)
2139 printk("nicstar%d: Can't allocate buffers for aal0.\n",
2140 card->index);
2141 vcc->stats->rx_drop += i;
2142 break;
2144 if (!atm_charge(vcc, sb->truesize))
2146 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
2147 card->index);
2148 vcc->stats->rx_drop += i - 1; /* already increased by 1 */
2149 kfree_skb(sb);
2150 break;
2152 /* Rebuild the header */
2153 *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 |
2154 (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000);
2155 if (i == 1 && ns_rsqe_eopdu(rsqe))
2156 *((u32 *) sb->data) |= 0x00000002;
2157 skb_put(sb, NS_AAL0_HEADER);
2158 memcpy(sb->tail, cell, ATM_CELL_PAYLOAD);
2159 skb_put(sb, ATM_CELL_PAYLOAD);
2160 ATM_SKB(sb)->vcc = vcc;
2161 sb->stamp = xtime;
2162 vcc->push(vcc, sb);
2163 vcc->stats->rx++;
2164 cell += ATM_CELL_PAYLOAD;
2167 recycle_rx_buf(card, skb);
2168 return;
2171 /* To reach this point, the AAL layer can only be AAL5 */
2173 if ((iovb = vc->rx_iov) == NULL)
2175 iovb = skb_dequeue(&(card->iovpool.queue));
2176 if (iovb == NULL) /* No buffers in the queue */
2178 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC);
2179 if (iovb == NULL)
2181 printk("nicstar%d: Out of iovec buffers.\n", card->index);
2182 vcc->stats->rx_drop++;
2183 recycle_rx_buf(card, skb);
2184 return;
2187 else
2188 if (--card->iovpool.count < card->iovnr.min)
2190 struct sk_buff *new_iovb;
2191 if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL)
2193 skb_queue_tail(&card->iovpool.queue, new_iovb);
2194 card->iovpool.count++;
2197 vc->rx_iov = iovb;
2198 ATM_SKB(iovb)->iovcnt = 0;
2199 iovb->len = 0;
2200 iovb->tail = iovb->data = iovb->head;
2201 ATM_SKB(iovb)->vcc = vcc;
2202 /* IMPORTANT: a pointer to the sk_buff containing the small or large
2203 buffer is stored as iovec base, NOT a pointer to the
2204 small or large buffer itself. */
2206 else if (ATM_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
2208 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
2209 vcc->stats->rx_err++;
2210 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
2211 ATM_SKB(iovb)->iovcnt = 0;
2212 iovb->len = 0;
2213 iovb->tail = iovb->data = iovb->head;
2214 ATM_SKB(iovb)->vcc = vcc;
2216 iov = &((struct iovec *) iovb->data)[ATM_SKB(iovb)->iovcnt++];
2217 iov->iov_base = (void *) skb;
2218 iov->iov_len = ns_rsqe_cellcount(rsqe) * 48;
2219 iovb->len += iov->iov_len;
2221 if (ATM_SKB(iovb)->iovcnt == 1)
2223 if (skb->list != &card->sbpool.queue)
2225 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
2226 card->index);
2227 which_list(card, skb);
2228 vcc->stats->rx_err++;
2229 recycle_rx_buf(card, skb);
2230 vc->rx_iov = NULL;
2231 recycle_iov_buf(card, iovb);
2232 return;
2235 else /* ATM_SKB(iovb)->iovcnt >= 2 */
2237 if (skb->list != &card->lbpool.queue)
2239 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
2240 card->index);
2241 which_list(card, skb);
2242 vcc->stats->rx_err++;
2243 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
2244 ATM_SKB(iovb)->iovcnt);
2245 vc->rx_iov = NULL;
2246 recycle_iov_buf(card, iovb);
2247 return;
2251 if (ns_rsqe_eopdu(rsqe))
2253 /* This works correctly regardless of the endianness of the host */
2254 unsigned char *L1L2 = (unsigned char *)((u32)skb->data +
2255 iov->iov_len - 6);
2256 aal5_len = L1L2[0] << 8 | L1L2[1];
2257 len = (aal5_len == 0x0000) ? 0x10000 : aal5_len;
2258 if (ns_rsqe_crcerr(rsqe) ||
2259 len + 8 > iovb->len || len + (47 + 8) < iovb->len)
2261 printk("nicstar%d: AAL5 CRC error", card->index);
2262 if (len + 8 > iovb->len || len + (47 + 8) < iovb->len)
2263 printk(" - PDU size mismatch.\n");
2264 else
2265 printk(".\n");
2266 vcc->stats->rx_err++;
2267 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
2268 ATM_SKB(iovb)->iovcnt);
2269 vc->rx_iov = NULL;
2270 recycle_iov_buf(card, iovb);
2271 return;
2274 /* By this point we (hopefully) have a complete SDU without errors. */
2276 if (ATM_SKB(iovb)->iovcnt == 1) /* Just a small buffer */
2278 /* skb points to a small buffer */
2279 if (!atm_charge(vcc, skb->truesize))
2281 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data),
2282 0, 0);
2284 else
2286 skb_put(skb, len);
2287 dequeue_sm_buf(card, skb);
2288 #ifdef NS_USE_DESTRUCTORS
2289 skb->destructor = ns_sb_destructor;
2290 #endif /* NS_USE_DESTRUCTORS */
2291 ATM_SKB(skb)->vcc = vcc;
2292 skb->stamp = xtime;
2293 vcc->push(vcc, skb);
2294 vcc->stats->rx++;
2297 else if (ATM_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
2299 struct sk_buff *sb;
2301 sb = (struct sk_buff *) (iov - 1)->iov_base;
2302 /* skb points to a large buffer */
2304 if (len <= NS_SMBUFSIZE)
2306 if (!atm_charge(vcc, sb->truesize))
2308 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data),
2309 0, 0);
2311 else
2313 skb_put(sb, len);
2314 dequeue_sm_buf(card, sb);
2315 #ifdef NS_USE_DESTRUCTORS
2316 sb->destructor = ns_sb_destructor;
2317 #endif /* NS_USE_DESTRUCTORS */
2318 ATM_SKB(sb)->vcc = vcc;
2319 sb->stamp = xtime;
2320 vcc->push(vcc, sb);
2321 vcc->stats->rx++;
2324 push_rxbufs(card, BUF_LG, (u32) skb,
2325 (u32) virt_to_bus(skb->data), 0, 0);
2328 else /* len > NS_SMBUFSIZE, the usual case */
2330 if (!atm_charge(vcc, skb->truesize))
2332 push_rxbufs(card, BUF_LG, (u32) skb,
2333 (u32) virt_to_bus(skb->data), 0, 0);
2335 else
2337 dequeue_lg_buf(card, skb);
2338 #ifdef NS_USE_DESTRUCTORS
2339 skb->destructor = ns_lb_destructor;
2340 #endif /* NS_USE_DESTRUCTORS */
2341 skb_push(skb, NS_SMBUFSIZE);
2342 memcpy(skb->data, sb->data, NS_SMBUFSIZE);
2343 skb_put(skb, len - NS_SMBUFSIZE);
2344 ATM_SKB(skb)->vcc = vcc;
2345 skb->stamp = xtime;
2346 vcc->push(vcc, skb);
2347 vcc->stats->rx++;
2350 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data),
2351 0, 0);
2356 else /* Must push a huge buffer */
2358 struct sk_buff *hb, *sb, *lb;
2359 int remaining, tocopy;
2360 int j;
2362 hb = skb_dequeue(&(card->hbpool.queue));
2363 if (hb == NULL) /* No buffers in the queue */
2366 hb = alloc_skb(NS_HBUFSIZE, GFP_ATOMIC);
2367 if (hb == NULL)
2369 printk("nicstar%d: Out of huge buffers.\n", card->index);
2370 vcc->stats->rx_drop++;
2371 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
2372 ATM_SKB(iovb)->iovcnt);
2373 vc->rx_iov = NULL;
2374 recycle_iov_buf(card, iovb);
2375 return;
2377 else if (card->hbpool.count < card->hbnr.min)
2379 struct sk_buff *new_hb;
2380 if ((new_hb = alloc_skb(NS_HBUFSIZE, GFP_ATOMIC)) != NULL)
2382 skb_queue_tail(&card->hbpool.queue, new_hb);
2383 card->hbpool.count++;
2387 else
2388 if (--card->hbpool.count < card->hbnr.min)
2390 struct sk_buff *new_hb;
2391 if ((new_hb = alloc_skb(NS_HBUFSIZE, GFP_ATOMIC)) != NULL)
2393 skb_queue_tail(&card->hbpool.queue, new_hb);
2394 card->hbpool.count++;
2396 if (card->hbpool.count < card->hbnr.min)
2398 if ((new_hb = alloc_skb(NS_HBUFSIZE, GFP_ATOMIC)) != NULL)
2400 skb_queue_tail(&card->hbpool.queue, new_hb);
2401 card->hbpool.count++;
2406 iov = (struct iovec *) iovb->data;
2408 if (!atm_charge(vcc, hb->truesize))
2410 recycle_iovec_rx_bufs(card, iov, ATM_SKB(iovb)->iovcnt);
2411 if (card->hbpool.count < card->hbnr.max)
2413 skb_queue_tail(&card->hbpool.queue, hb);
2414 card->hbpool.count++;
2416 else
2417 kfree_skb(hb);
2419 else
2421 /* Copy the small buffer to the huge buffer */
2422 sb = (struct sk_buff *) iov->iov_base;
2423 memcpy(hb->data, sb->data, iov->iov_len);
2424 skb_put(hb, iov->iov_len);
2425 remaining = len - iov->iov_len;
2426 iov++;
2427 /* Free the small buffer */
2428 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data),
2429 0, 0);
2431 /* Copy all large buffers to the huge buffer and free them */
2432 for (j = 1; j < ATM_SKB(iovb)->iovcnt; j++)
2434 lb = (struct sk_buff *) iov->iov_base;
2435 tocopy = MIN(remaining, iov->iov_len);
2436 memcpy(hb->tail, lb->data, tocopy);
2437 skb_put(hb, tocopy);
2438 iov++;
2439 remaining -= tocopy;
2440 push_rxbufs(card, BUF_LG, (u32) lb,
2441 (u32) virt_to_bus(lb->data), 0, 0);
2443 #ifdef EXTRA_DEBUG
2444 if (remaining != 0 || hb->len != len)
2445 printk("nicstar%d: Huge buffer len mismatch.\n", card->index);
2446 #endif /* EXTRA_DEBUG */
2447 ATM_SKB(hb)->vcc = vcc;
2448 #ifdef NS_USE_DESTRUCTORS
2449 hb->destructor = ns_hb_destructor;
2450 #endif /* NS_USE_DESTRUCTORS */
2451 hb->stamp = xtime;
2452 vcc->push(vcc, hb);
2453 vcc->stats->rx++;
2457 vc->rx_iov = NULL;
2458 recycle_iov_buf(card, iovb);
2465 #ifdef NS_USE_DESTRUCTORS
2467 static void ns_sb_destructor(struct sk_buff *sb)
2469 ns_dev *card;
2470 u32 stat;
2472 card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data;
2473 stat = readl(card->membase + STAT);
2474 card->sbfqc = ns_stat_sfbqc_get(stat);
2475 card->lbfqc = ns_stat_lfbqc_get(stat);
2479 sb = alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2480 if (sb == NULL)
2481 break;
2482 skb_queue_tail(&card->sbpool.queue, sb);
2483 skb_reserve(sb, NS_AAL0_HEADER);
2484 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0);
2485 } while (card->sbfqc < card->sbnr.min);
2490 static void ns_lb_destructor(struct sk_buff *lb)
2492 ns_dev *card;
2493 u32 stat;
2495 card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data;
2496 stat = readl(card->membase + STAT);
2497 card->sbfqc = ns_stat_sfbqc_get(stat);
2498 card->lbfqc = ns_stat_lfbqc_get(stat);
2502 lb = alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2503 if (lb == NULL)
2504 break;
2505 skb_queue_tail(&card->lbpool.queue, lb);
2506 skb_reserve(lb, NS_SMBUFSIZE);
2507 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0);
2508 } while (card->lbfqc < card->lbnr.min);
2513 static void ns_hb_destructor(struct sk_buff *hb)
2515 ns_dev *card;
2517 card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data;
2519 while (card->hbpool.count < card->hbnr.init)
2521 hb = alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2522 if (hb == NULL)
2523 break;
2524 skb_queue_tail(&card->hbpool.queue, hb);
2525 card->hbpool.count++;
2529 #endif /* NS_USE_DESTRUCTORS */
2533 static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb)
2535 if (skb->list == &card->sbpool.queue)
2536 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
2537 else if (skb->list == &card->lbpool.queue)
2538 push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
2539 else
2541 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2542 kfree_skb(skb);
2548 static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count)
2550 struct sk_buff *skb;
2552 for (; count > 0; count--)
2554 skb = (struct sk_buff *) (iov++)->iov_base;
2555 if (skb->list == &card->sbpool.queue)
2556 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data),
2557 0, 0);
2558 else if (skb->list == &card->lbpool.queue)
2559 push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data),
2560 0, 0);
2561 else
2563 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2564 kfree_skb(skb);
2571 static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
2573 if (card->iovpool.count < card->iovnr.max)
2575 skb_queue_tail(&card->iovpool.queue, iovb);
2576 card->iovpool.count++;
2578 else
2579 kfree_skb(iovb);
2584 static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2586 skb_unlink(sb);
2587 #ifdef NS_USE_DESTRUCTORS
2588 if (card->sbfqc < card->sbnr.min)
2589 #else
2590 if (card->sbfqc < card->sbnr.init)
2592 struct sk_buff *new_sb;
2593 if ((new_sb = alloc_skb(NS_SMSKBSIZE, GFP_ATOMIC)) != NULL)
2595 skb_queue_tail(&card->sbpool.queue, new_sb);
2596 skb_reserve(new_sb, NS_AAL0_HEADER);
2597 push_rxbufs(card, BUF_SM, (u32) new_sb,
2598 (u32) virt_to_bus(new_sb->data), 0, 0);
2601 if (card->sbfqc < card->sbnr.init)
2602 #endif /* NS_USE_DESTRUCTORS */
2604 struct sk_buff *new_sb;
2605 if ((new_sb = alloc_skb(NS_SMSKBSIZE, GFP_ATOMIC)) != NULL)
2607 skb_queue_tail(&card->sbpool.queue, new_sb);
2608 skb_reserve(new_sb, NS_AAL0_HEADER);
2609 push_rxbufs(card, BUF_SM, (u32) new_sb,
2610 (u32) virt_to_bus(new_sb->data), 0, 0);
2617 static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2619 skb_unlink(lb);
2620 #ifdef NS_USE_DESTRUCTORS
2621 if (card->lbfqc < card->lbnr.min)
2622 #else
2623 if (card->lbfqc < card->lbnr.init)
2625 struct sk_buff *new_lb;
2626 if ((new_lb = alloc_skb(NS_LGSKBSIZE, GFP_ATOMIC)) != NULL)
2628 skb_queue_tail(&card->lbpool.queue, new_lb);
2629 skb_reserve(new_lb, NS_SMBUFSIZE);
2630 push_rxbufs(card, BUF_LG, (u32) new_lb,
2631 (u32) virt_to_bus(new_lb->data), 0, 0);
2634 if (card->lbfqc < card->lbnr.init)
2635 #endif /* NS_USE_DESTRUCTORS */
2637 struct sk_buff *new_lb;
2638 if ((new_lb = alloc_skb(NS_LGSKBSIZE, GFP_ATOMIC)) != NULL)
2640 skb_queue_tail(&card->lbpool.queue, new_lb);
2641 skb_reserve(new_lb, NS_SMBUFSIZE);
2642 push_rxbufs(card, BUF_LG, (u32) new_lb,
2643 (u32) virt_to_bus(new_lb->data), 0, 0);
2650 static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2652 u32 stat;
2653 ns_dev *card;
2654 int left;
2656 left = (int) *pos;
2657 card = (ns_dev *) dev->dev_data;
2658 stat = readl(card->membase + STAT);
2659 if (!left--)
2660 return sprintf(page, "Pool count min init max \n");
2661 if (!left--)
2662 return sprintf(page, "Small %5d %5d %5d %5d \n",
2663 ns_stat_sfbqc_get(stat), card->sbnr.min, card->sbnr.init,
2664 card->sbnr.max);
2665 if (!left--)
2666 return sprintf(page, "Large %5d %5d %5d %5d \n",
2667 ns_stat_lfbqc_get(stat), card->lbnr.min, card->lbnr.init,
2668 card->lbnr.max);
2669 if (!left--)
2670 return sprintf(page, "Huge %5d %5d %5d %5d \n", card->hbpool.count,
2671 card->hbnr.min, card->hbnr.init, card->hbnr.max);
2672 if (!left--)
2673 return sprintf(page, "Iovec %5d %5d %5d %5d \n", card->iovpool.count,
2674 card->iovnr.min, card->iovnr.init, card->iovnr.max);
2675 if (!left--)
2677 int retval;
2678 retval = sprintf(page, "Interrupt counter: %u \n", card->intcnt);
2679 card->intcnt = 0;
2680 return retval;
2682 /* Dump 25.6 Mbps PHY registers */
2683 if (card->max_pcr == IDT_25_PCR && !left--)
2685 u32 phy_regs[4];
2686 u32 i;
2688 for (i = 0; i < 4; i++)
2690 while (CMD_BUSY(card));
2691 writel(NS_CMD_READ_UTILITY | 0x00000200 | i, card->membase + CMD);
2692 while (CMD_BUSY(card));
2693 phy_regs[i] = readl(card->membase + DR0) & 0x000000FF;
2696 return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n",
2697 phy_regs[0], phy_regs[1], phy_regs[2], phy_regs[3]);
2699 #if 0
2700 /* Dump TST */
2701 if (left-- < NS_TST_NUM_ENTRIES)
2703 if (card->tste2vc[left + 1] == NULL)
2704 return sprintf(page, "%5d - VBR/UBR \n", left + 1);
2705 else
2706 return sprintf(page, "%5d - %d %d \n", left + 1,
2707 card->tste2vc[left + 1]->tx_vcc->vpi,
2708 card->tste2vc[left + 1]->tx_vcc->vci);
2710 #endif /* 0 */
2711 return 0;
2716 static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void *arg)
2718 ns_dev *card;
2719 pool_levels pl;
2720 int btype;
2721 unsigned long flags;
2723 card = dev->dev_data;
2724 switch (cmd)
2726 case NS_GETPSTAT:
2727 if (get_user(pl.buftype, &((pool_levels *) arg)->buftype))
2728 return -EFAULT;
2729 switch (pl.buftype)
2731 case NS_BUFTYPE_SMALL:
2732 pl.count = ns_stat_sfbqc_get(readl(card->membase + STAT));
2733 pl.level.min = card->sbnr.min;
2734 pl.level.init = card->sbnr.init;
2735 pl.level.max = card->sbnr.max;
2736 break;
2738 case NS_BUFTYPE_LARGE:
2739 pl.count = ns_stat_lfbqc_get(readl(card->membase + STAT));
2740 pl.level.min = card->lbnr.min;
2741 pl.level.init = card->lbnr.init;
2742 pl.level.max = card->lbnr.max;
2743 break;
2745 case NS_BUFTYPE_HUGE:
2746 pl.count = card->hbpool.count;
2747 pl.level.min = card->hbnr.min;
2748 pl.level.init = card->hbnr.init;
2749 pl.level.max = card->hbnr.max;
2750 break;
2752 case NS_BUFTYPE_IOVEC:
2753 pl.count = card->iovpool.count;
2754 pl.level.min = card->iovnr.min;
2755 pl.level.init = card->iovnr.init;
2756 pl.level.max = card->iovnr.max;
2757 break;
2759 default:
2760 return -EINVAL;
2763 if (!copy_to_user((pool_levels *) arg, &pl, sizeof(pl)))
2764 return (sizeof(pl));
2765 else
2766 return -EFAULT;
2768 case NS_SETBUFLEV:
2769 if (!suser())
2770 return -EPERM;
2771 if (copy_from_user(&pl, (pool_levels *) arg, sizeof(pl)))
2772 return -EFAULT;
2773 if (pl.level.min >= pl.level.init || pl.level.init >= pl.level.max)
2774 return -EINVAL;
2775 if (pl.level.min == 0)
2776 return -EINVAL;
2777 switch (pl.buftype)
2779 case NS_BUFTYPE_SMALL:
2780 if (pl.level.max > TOP_SB)
2781 return -EINVAL;
2782 card->sbnr.min = pl.level.min;
2783 card->sbnr.init = pl.level.init;
2784 card->sbnr.max = pl.level.max;
2785 break;
2787 case NS_BUFTYPE_LARGE:
2788 if (pl.level.max > TOP_LB)
2789 return -EINVAL;
2790 card->lbnr.min = pl.level.min;
2791 card->lbnr.init = pl.level.init;
2792 card->lbnr.max = pl.level.max;
2793 break;
2795 case NS_BUFTYPE_HUGE:
2796 if (pl.level.max > TOP_HB)
2797 return -EINVAL;
2798 card->hbnr.min = pl.level.min;
2799 card->hbnr.init = pl.level.init;
2800 card->hbnr.max = pl.level.max;
2801 break;
2803 case NS_BUFTYPE_IOVEC:
2804 if (pl.level.max > TOP_IOVB)
2805 return -EINVAL;
2806 card->iovnr.min = pl.level.min;
2807 card->iovnr.init = pl.level.init;
2808 card->iovnr.max = pl.level.max;
2809 break;
2811 default:
2812 return -EINVAL;
2815 return 0;
2817 case NS_ADJBUFLEV:
2818 if (!suser())
2819 return -EPERM;
2820 btype = (int) arg; /* an int is the same size as a pointer */
2821 switch (btype)
2823 case NS_BUFTYPE_SMALL:
2824 while (card->sbfqc < card->sbnr.init)
2826 struct sk_buff *sb;
2828 sb = alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2829 if (sb == NULL)
2830 return -ENOMEM;
2831 skb_queue_tail(&card->sbpool.queue, sb);
2832 skb_reserve(sb, NS_AAL0_HEADER);
2833 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0);
2835 break;
2837 case NS_BUFTYPE_LARGE:
2838 while (card->lbfqc < card->lbnr.init)
2840 struct sk_buff *lb;
2842 lb = alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2843 if (lb == NULL)
2844 return -ENOMEM;
2845 skb_queue_tail(&card->lbpool.queue, lb);
2846 skb_reserve(lb, NS_SMBUFSIZE);
2847 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0);
2849 break;
2851 case NS_BUFTYPE_HUGE:
2852 while (card->hbpool.count > card->hbnr.init)
2854 struct sk_buff *hb;
2856 save_flags(flags); cli();
2857 hb = skb_dequeue(&card->hbpool.queue);
2858 card->hbpool.count--;
2859 restore_flags(flags);
2860 if (hb == NULL)
2861 printk("nicstar%d: huge buffer count inconsistent.\n",
2862 card->index);
2863 else
2864 kfree_skb(hb);
2867 while (card->hbpool.count < card->hbnr.init)
2869 struct sk_buff *hb;
2871 hb = alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2872 if (hb == NULL)
2873 return -ENOMEM;
2874 save_flags(flags); cli();
2875 skb_queue_tail(&card->hbpool.queue, hb);
2876 card->hbpool.count++;
2877 restore_flags(flags);
2879 break;
2881 case NS_BUFTYPE_IOVEC:
2882 while (card->iovpool.count > card->iovnr.init)
2884 struct sk_buff *iovb;
2886 save_flags(flags); cli();
2887 iovb = skb_dequeue(&card->iovpool.queue);
2888 card->iovpool.count--;
2889 restore_flags(flags);
2890 if (iovb == NULL)
2891 printk("nicstar%d: iovec buffer count inconsistent.\n",
2892 card->index);
2893 else
2894 kfree_skb(iovb);
2897 while (card->iovpool.count < card->iovnr.init)
2899 struct sk_buff *iovb;
2901 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
2902 if (iovb == NULL)
2903 return -ENOMEM;
2904 save_flags(flags); cli();
2905 skb_queue_tail(&card->iovpool.queue, iovb);
2906 card->iovpool.count++;
2907 restore_flags(flags);
2909 break;
2911 default:
2912 return -EINVAL;
2915 return 0;
2917 default:
2918 if (dev->phy && dev->phy->ioctl) {
2919 return dev->phy->ioctl(dev, cmd, arg);
2921 else {
2922 printk("nicstar%d: %s == NULL \n", card->index,
2923 dev->phy ? "dev->phy->ioctl" : "dev->phy");
2924 return -EINVAL;
2931 static void which_list(ns_dev *card, struct sk_buff *skb)
2933 printk("It's a %s buffer.\n", skb->list == &card->sbpool.queue ?
2934 "small" : skb->list == &card->lbpool.queue ? "large" :
2935 skb->list == &card->hbpool.queue ? "huge" :
2936 skb->list == &card->iovpool.queue ? "iovec" : "unknown");
2941 static void ns_poll(unsigned long arg)
2943 int i;
2944 ns_dev *card;
2945 unsigned long flags;
2946 u32 stat_r, stat_w;
2948 PRINTK("nicstar: Entering ns_poll().\n");
2949 for (i = 0; i < num_cards; i++)
2951 card = cards[i];
2952 save_flags(flags); cli();
2953 if (card->in_poll)
2955 printk("nicstar: Re-entering ns_poll()???\n");
2956 restore_flags(flags);
2957 continue;
2959 card->in_poll = 1;
2960 if (card->in_handler)
2962 card->in_poll = 0;
2963 printk("nicstar%d: ns_poll called while in interrupt handler!?\n",
2964 card->index);
2965 restore_flags(flags);
2966 continue;
2969 stat_w = 0;
2970 stat_r = readl(card->membase + STAT);
2971 if (stat_r & NS_STAT_TSIF)
2972 stat_w |= NS_STAT_TSIF;
2973 if (stat_r & NS_STAT_EOPDU)
2974 stat_w |= NS_STAT_EOPDU;
2976 process_tsq(card);
2977 process_rsq(card);
2979 writel(stat_w, card->membase + STAT);
2980 card->in_poll = 0;
2981 restore_flags(flags);
2983 mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD);
2984 PRINTK("nicstar: Leaving ns_poll().\n");
2989 static int ns_parse_mac(char *mac, unsigned char *esi)
2991 int i, j;
2992 short byte1, byte0;
2994 if (mac == NULL || esi == NULL)
2995 return -1;
2996 j = 0;
2997 for (i = 0; i < 6; i++)
2999 if ((byte1 = ns_h2i(mac[j++])) < 0)
3000 return -1;
3001 if ((byte0 = ns_h2i(mac[j++])) < 0)
3002 return -1;
3003 esi[i] = (unsigned char) (byte1 * 16 + byte0);
3004 if (i < 5)
3006 if (mac[j++] != ':')
3007 return -1;
3010 return 0;
3015 static short ns_h2i(char c)
3017 if (c >= '0' && c <= '9')
3018 return (short) (c - '0');
3019 if (c >= 'A' && c <= 'F')
3020 return (short) (c - 'A' + 10);
3021 if (c >= 'a' && c <= 'f')
3022 return (short) (c - 'a' + 10);
3023 return -1;
3028 static void ns_phy_put(struct atm_dev *dev, unsigned char value,
3029 unsigned long addr)
3031 ns_dev *card;
3032 unsigned long flags;
3034 card = dev->dev_data;
3035 save_flags(flags); cli();
3036 while(CMD_BUSY(card));
3037 writel((unsigned long) value, card->membase + DR0);
3038 writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF),
3039 card->membase + CMD);
3040 restore_flags(flags);
3045 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr)
3047 ns_dev *card;
3048 unsigned long flags;
3049 unsigned long data;
3051 card = dev->dev_data;
3052 save_flags(flags); cli();
3053 while(CMD_BUSY(card));
3054 writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF),
3055 card->membase + CMD);
3056 while(CMD_BUSY(card));
3057 data = readl(card->membase + DR0) & 0x000000FF;
3058 restore_flags(flags);
3059 return (unsigned char) data;