1 /******************************************************************************
5 * Device driver supporting CBR for NICStAR based cards.
7 * IMPORTANT: The included file nicstarmac.c was NOT WRITTEN BY ME.
8 * It was taken from the frle-0.22 device driver.
9 * As the file doesn't have a copyright notice, in the file
10 * nicstarmac.copyright I put the copyright notice from the
11 * frle-0.22 device driver.
12 * Some code is based on the nicstar driver by M. Welsh.
18 ******************************************************************************/
21 /* Header files ***************************************************************/
23 #include <linux/module.h>
24 #include <linux/config.h>
25 #include <linux/kernel.h>
26 #include <linux/skbuff.h>
27 #include <linux/atmdev.h>
28 #include <linux/atm.h>
29 #include <linux/pci.h>
30 #include <linux/types.h>
31 #include <linux/string.h>
32 #include <linux/delay.h>
33 #include <linux/init.h>
34 #include <linux/sched.h>
35 #include <linux/timer.h>
37 #include <asm/uaccess.h>
39 #include "nicstarmac.h"
42 /* Additional code ************************************************************/
44 #include "nicstarmac.c"
47 /* Configurable parameters ****************************************************/
55 #undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know
56 you're going to use only raw ATM */
59 /* Do not touch these *********************************************************/
62 #define TXPRINTK(args...) printk(args)
64 #define TXPRINTK(args...)
68 #define RXPRINTK(args...) printk(args)
70 #define RXPRINTK(args...)
74 #define PRINTK(args...) printk(args)
76 #define PRINTK(args...)
77 #endif /* GENERAL_DEBUG */
80 #define XPRINTK(args...) printk(args)
82 #define XPRINTK(args...)
83 #endif /* EXTRA_DEBUG */
86 /* Macros *********************************************************************/
88 #define MAX(a,b) ((a) > (b) ? (a) : (b))
89 #define MIN(a,b) ((a) < (b) ? (a) : (b))
91 #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ)
93 #define NS_DELAY mdelay(1)
95 #define ALIGN_ADDRESS(addr, alignment) \
96 ((((u32) (addr)) + (((u32) (alignment)) - 1)) & ~(((u32) (alignment)) - 1))
101 /* Version definition *********************************************************/
103 #include <linux/version.h>
104 char kernel_version[] = UTS_RELEASE;
107 /* Function declarations ******************************************************/
109 static u32 ns_read_sram(ns_dev *card, u32 sram_address);
110 static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count);
111 static int ns_init_card(int i, struct pci_dev *pcidev);
112 static void ns_init_card_error(ns_dev *card, int error);
113 static scq_info *get_scq(int size, u32 scd);
114 static void free_scq(scq_info *scq, struct atm_vcc *vcc);
115 static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
116 u32 handle2, u32 addr2);
117 static void ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
118 static int ns_open(struct atm_vcc *vcc, short vpi, int vci);
119 static void ns_close(struct atm_vcc *vcc);
120 static void fill_tst(ns_dev *card, int n, vc_map *vc);
121 static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb);
122 static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd,
123 struct sk_buff *skb);
124 static void process_tsq(ns_dev *card);
125 static void drain_scq(ns_dev *card, scq_info *scq, int pos);
126 static void process_rsq(ns_dev *card);
127 static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe);
128 #ifdef NS_USE_DESTRUCTORS
129 static void ns_sb_destructor(struct sk_buff *sb);
130 static void ns_lb_destructor(struct sk_buff *lb);
131 static void ns_hb_destructor(struct sk_buff *hb);
132 #endif /* NS_USE_DESTRUCTORS */
133 static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb);
134 static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count);
135 static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb);
136 static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb);
137 static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb);
138 static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page);
139 static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void *arg);
140 static void which_list(ns_dev *card, struct sk_buff *skb);
141 static void ns_poll(unsigned long arg);
144 /* Global variables ***********************************************************/
146 static struct ns_dev *cards[NS_MAX_CARDS];
147 static unsigned num_cards = 0;
148 static struct atmdev_ops atm_ops =
150 NULL, /* dev_close */
152 ns_close, /* close */
153 ns_ioctl, /* ioctl */
154 NULL, /* getsockopt */
155 NULL, /* setsockopt */
162 NULL, /* change_qos */
163 NULL, /* free_rx_skb */
164 ns_proc_read /* proc_read */
166 static struct timer_list ns_timer;
169 /* Functions*******************************************************************/
173 int init_module(void)
176 unsigned error = 0; /* Initialized to remove compile warning */
177 struct pci_dev *pcidev;
179 XPRINTK("nicstar: init_module() called.\n");
182 printk("nicstar: no PCI subsystem found.\n");
186 for(i = 0; i < NS_MAX_CARDS; i++)
190 for(i = 0; i < NS_MAX_CARDS; i++)
192 if ((pcidev = pci_find_device(PCI_VENDOR_ID_IDT,
193 PCI_DEVICE_ID_IDT_IDT77201,
197 error = ns_init_card(i, pcidev);
199 i--; /* Try to find another card but don't increment index */
206 printk("nicstar: no cards found.\n");
212 TXPRINTK("nicstar: TX debug enabled.\n");
213 RXPRINTK("nicstar: RX debug enabled.\n");
214 PRINTK("nicstar: General debug enabled.\n");
216 printk("nicstar: using PHY loopback.\n");
217 #endif /* PHY_LOOPBACK */
218 XPRINTK("nicstar: init_module() returned.\n");
220 ns_timer.next = NULL;
221 ns_timer.prev = NULL;
222 ns_timer.expires = jiffies + NS_POLL_PERIOD;
224 ns_timer.function = ns_poll;
225 add_timer(&ns_timer);
231 void cleanup_module(void)
234 unsigned short pci_command;
237 struct sk_buff *iovb;
241 XPRINTK("nicstar: cleanup_module() called.\n");
244 printk("nicstar: module in use, remove delayed.\n");
246 del_timer(&ns_timer);
248 for (i = 0; i < NS_MAX_CARDS; i++)
250 if (cards[i] == NULL)
255 /* Stop everything */
256 writel(0x00000000, card->membase + CFG);
258 /* De-register device */
259 atm_dev_deregister(card->atmdev);
261 /* Disable memory mapping and busmastering */
262 if (pci_read_config_word(card->pcidev, PCI_COMMAND, &pci_command) != 0)
264 printk("nicstar%d: can't read PCI_COMMAND.\n", i);
266 pci_command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
267 if (pci_write_config_word(card->pcidev, PCI_COMMAND, pci_command) != 0)
269 printk("nicstar%d: can't write PCI_COMMAND.\n", i);
272 /* Free up resources */
274 PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count);
275 while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
280 PRINTK("nicstar%d: %d huge buffers freed.\n", i, j);
282 PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, card->iovpool.count);
283 while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
288 PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j);
289 while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
291 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
293 free_scq(card->scq0, NULL);
294 for (j = 0; j < NS_FRSCD_NUM; j++)
296 if (card->scd2vc[j] != NULL)
297 free_scq(card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
299 kfree(card->rsq.org);
300 kfree(card->tsq.org);
301 free_irq(card->pcidev->irq, card);
302 iounmap((void *) card->membase);
306 XPRINTK("nicstar: cleanup_module() returned.\n");
312 __initfunc(int nicstar_detect(void))
315 unsigned error = 0; /* Initialized to remove compile warning */
316 struct pci_dev *pcidev;
320 printk("nicstar: no PCI subsystem found.\n");
324 for(i = 0; i < NS_MAX_CARDS; i++)
328 for(i = 0; i < NS_MAX_CARDS; i++)
330 if ((pcidev = pci_find_device(PCI_VENDOR_ID_IDT,
331 PCI_DEVICE_ID_IDT_IDT77201,
335 error = ns_init_card(i, pcidev);
337 i--; /* Try to find another card but don't increment index */
343 TXPRINTK("nicstar: TX debug enabled.\n");
344 RXPRINTK("nicstar: RX debug enabled.\n");
345 PRINTK("nicstar: General debug enabled.\n");
347 printk("nicstar: using PHY loopback.\n");
348 #endif /* PHY_LOOPBACK */
349 XPRINTK("nicstar: init_module() returned.\n");
358 static u32 ns_read_sram(ns_dev *card, u32 sram_address)
363 sram_address &= 0x0007FFFC; /* address must be dword aligned */
364 sram_address |= 0x50000000; /* SRAM read command */
365 save_flags(flags); cli();
366 while (CMD_BUSY(card));
367 writel(sram_address, card->membase + CMD);
368 while (CMD_BUSY(card));
369 data = readl(card->membase + DR0);
370 restore_flags(flags);
376 static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count)
380 count--; /* count range now is 0..3 instead of 1..4 */
382 c <<= 2; /* to use increments of 4 */
383 save_flags(flags); cli();
384 while (CMD_BUSY(card));
385 for (i = 0; i <= c; i += 4)
386 writel(*(value++), card->membase + i);
387 /* Note: DR# registers are the first 4 dwords in nicstar's memspace,
388 so card->membase + DR0 == card->membase */
390 sram_address &= 0x0007FFFC;
391 sram_address |= (0x40000000 | count);
392 writel(sram_address, card->membase + CMD);
393 restore_flags(flags);
397 static int ns_init_card(int i, struct pci_dev *pcidev)
401 unsigned short pci_command;
402 unsigned char pci_latency;
411 if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL)
413 printk("nicstar%d: can't allocate memory for device structure.\n", i);
415 ns_init_card_error(card, error);
421 card->pcidev = pcidev;
422 card->membase = (u32) (pcidev->base_address[1] & PCI_BASE_ADDRESS_MEM_MASK);
423 card->membase = (u32) ioremap(card->membase, NS_IOREMAP_SIZE);
424 if (card->membase == (u32) (NULL))
426 printk("nicstar%d: can't ioremap() membase.\n",i);
428 ns_init_card_error(card, error);
431 PRINTK("nicstar%d: membase at 0x%x.\n", i, card->membase);
433 if (pci_read_config_word(pcidev, PCI_COMMAND, &pci_command) != 0)
435 printk("nicstar%d: can't read PCI_COMMAND.\n", i);
437 ns_init_card_error(card, error);
440 pci_command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
441 if (pci_write_config_word(pcidev, PCI_COMMAND, pci_command) != 0)
443 printk("nicstar%d: can't write PCI_COMMAND.\n", i);
445 ns_init_card_error(card, error);
449 if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0)
451 printk("nicstar%d: can't read PCI latency timer.\n", i);
453 ns_init_card_error(card, error);
456 if (pci_latency < NS_PCI_LATENCY)
458 PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, NS_PCI_LATENCY);
459 for (j = 1; j < 4; j++)
461 if (pci_write_config_byte(pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0);
466 printk("nicstar%d: can't set PCI latency timer to %d.\n", i, NS_PCI_LATENCY);
468 ns_init_card_error(card, error);
473 /* Clear timer overflow */
474 data = readl(card->membase + STAT);
475 if (data & NS_STAT_TMROF)
476 writel(NS_STAT_TMROF, card->membase + STAT);
479 writel(NS_CFG_SWRST, card->membase + CFG);
481 writel(0x00000000, card->membase + CFG);
484 writel(0x00000008, card->membase + GP);
486 writel(0x00000001, card->membase + GP);
488 while (CMD_BUSY(card));
489 writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */
492 /* Detect PHY type */
493 while (CMD_BUSY(card));
494 writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD);
495 while (CMD_BUSY(card));
496 data = readl(card->membase + DR0);
497 if (data == 0x00000009)
499 printk("nicstar%d: PHY seems to be 25 Mbps.\n", i);
500 card->max_pcr = IDT_25_PCR;
501 while(CMD_BUSY(card));
502 writel(0x00000008, card->membase + DR0);
503 writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD);
504 /* Clear an eventual pending interrupt */
505 writel(NS_STAT_SFBQF, card->membase + STAT);
507 while(CMD_BUSY(card));
508 writel(0x00000022, card->membase + DR0);
509 writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD);
510 #endif /* PHY_LOOPBACK */
512 else if (data == 0x00000030)
514 printk("nicstar%d: PHY seems to be 155 Mbps.\n", i);
515 card->max_pcr = ATM_OC3_PCR;
517 while(CMD_BUSY(card));
518 writel(0x00000002, card->membase + DR0);
519 writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD);
520 #endif /* PHY_LOOPBACK */
524 printk("nicstar%d: can't determine PHY type.\n", i);
526 ns_init_card_error(card, error);
529 writel(0x00000000, card->membase + GP);
531 /* Determine SRAM size */
533 ns_write_sram(card, 0x1C003, &data, 1);
535 ns_write_sram(card, 0x14003, &data, 1);
536 if (ns_read_sram(card, 0x14003) == 0x89ABCDEF &&
537 ns_read_sram(card, 0x1C003) == 0x76543210)
538 card->sram_size = 128;
540 card->sram_size = 32;
541 PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size);
543 card->rct_size = NS_MAX_RCTSIZE;
545 #if (NS_MAX_RCTSIZE == 4096)
546 if (card->sram_size == 128)
547 printk("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", i);
548 #elif (NS_MAX_RCTSIZE == 16384)
549 if (card->sram_size == 32)
551 printk("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", i);
552 card->rct_size = 4096;
555 #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c
558 card->vpibits = NS_VPIBITS;
559 if (card->rct_size == 4096)
560 card->vcibits = 12 - NS_VPIBITS;
561 else /* card->rct_size == 16384 */
562 card->vcibits = 14 - NS_VPIBITS;
564 #ifdef ESI_FROM_EPROM
565 /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */
566 nicstar_init_eprom(card->membase);
567 #endif /* ESI_FROM_EPROM */
569 if (request_irq(pcidev->irq, &ns_irq_handler, SA_INTERRUPT, "nicstar", card) != 0)
571 printk("nicstar%d: can't allocate IRQ.\n", i);
573 ns_init_card_error(card, error);
577 /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
578 writel(0x00000000, card->membase + VPM);
581 card->tsq.org = kmalloc(NS_TSQSIZE + NS_TSQ_ALIGNMENT, GFP_KERNEL);
582 if (card->tsq.org == NULL)
584 printk("nicstar%d: can't allocate TSQ.\n", i);
586 ns_init_card_error(card, error);
589 card->tsq.base = (ns_tsi *) ALIGN_ADDRESS(card->tsq.org, NS_TSQ_ALIGNMENT);
590 card->tsq.next = card->tsq.base;
591 card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1);
592 for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++)
593 ns_tsi_init(card->tsq.base + j);
594 writel(0x00000000, card->membase + TSQH);
595 writel((u32) virt_to_bus(card->tsq.base), card->membase + TSQB);
596 PRINTK("nicstar%d: TSQ base at 0x%x 0x%x 0x%x.\n", i, (u32) card->tsq.base,
597 (u32) virt_to_bus(card->tsq.base), readl(card->membase + TSQB));
600 card->rsq.org = kmalloc(NS_RSQSIZE + NS_RSQ_ALIGNMENT, GFP_KERNEL);
601 if (card->rsq.org == NULL)
603 printk("nicstar%d: can't allocate RSQ.\n", i);
605 ns_init_card_error(card, error);
608 card->rsq.base = (ns_rsqe *) ALIGN_ADDRESS(card->rsq.org, NS_RSQ_ALIGNMENT);
609 card->rsq.next = card->rsq.base;
610 card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1);
611 for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++)
612 ns_rsqe_init(card->rsq.base + j);
613 writel(0x00000000, card->membase + RSQH);
614 writel((u32) virt_to_bus(card->rsq.base), card->membase + RSQB);
615 PRINTK("nicstar%d: RSQ base at 0x%x.\n", i, (u32) card->rsq.base);
617 /* Initialize SCQ0, the only VBR SCQ used */
618 card->scq1 = (scq_info *) NULL;
619 card->scq2 = (scq_info *) NULL;
620 card->scq0 = get_scq(VBR_SCQSIZE, NS_VRSCD0);
621 if (card->scq0 == (scq_info *) NULL)
623 printk("nicstar%d: can't get SCQ0.\n", i);
625 ns_init_card_error(card, error);
628 u32d[0] = (u32) virt_to_bus(card->scq0->base);
629 u32d[1] = (u32) 0x00000000;
630 u32d[2] = (u32) 0xffffffff;
631 u32d[3] = (u32) 0x00000000;
632 ns_write_sram(card, NS_VRSCD0, u32d, 4);
633 ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */
634 ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */
635 card->scq0->scd = NS_VRSCD0;
636 PRINTK("nicstar%d: VBR-SCQ0 base at 0x%x.\n", i, (u32) card->scq0->base);
638 /* Initialize TSTs */
639 card->tst_addr = NS_TST0;
640 card->tst_free_entries = NS_TST_NUM_ENTRIES;
641 data = NS_TST_OPCODE_VARIABLE;
642 for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
643 ns_write_sram(card, NS_TST0 + j, &data, 1);
644 data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0);
645 ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1);
646 for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
647 ns_write_sram(card, NS_TST1 + j, &data, 1);
648 data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1);
649 ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1);
650 for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
651 card->tste2vc[j] = NULL;
652 writel(NS_TST0 << 2, card->membase + TSTB);
655 /* Initialize RCT. AAL type is set on opening the VC. */
657 u32d[0] = NS_RCTE_RAWCELLINTEN;
659 u32d[0] = 0x00000000;
661 u32d[1] = 0x00000000;
662 u32d[2] = 0x00000000;
663 u32d[3] = 0xFFFFFFFF;
664 for (j = 0; j < card->rct_size; j++)
665 ns_write_sram(card, j * 4, u32d, 4);
667 memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map));
669 for (j = 0; j < NS_FRSCD_NUM; j++)
670 card->scd2vc[j] = NULL;
672 /* Initialize buffer levels */
673 card->sbnr.min = MIN_SB;
674 card->sbnr.init = NUM_SB;
675 card->sbnr.max = MAX_SB;
676 card->lbnr.min = MIN_LB;
677 card->lbnr.init = NUM_LB;
678 card->lbnr.max = MAX_LB;
679 card->iovnr.min = MIN_IOVB;
680 card->iovnr.init = NUM_IOVB;
681 card->iovnr.max = MAX_IOVB;
682 card->hbnr.min = MIN_HB;
683 card->hbnr.init = NUM_HB;
684 card->hbnr.max = MAX_HB;
686 card->sm_handle = 0x00000000;
687 card->sm_addr = 0x00000000;
688 card->lg_handle = 0x00000000;
689 card->lg_addr = 0x00000000;
691 card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */
693 /* Allocate small buffers */
694 skb_queue_head_init(&card->sbpool.queue);
695 card->sbpool.count = 0; /* Not used */
696 for (j = 0; j < NUM_SB; j++)
699 sb = alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
702 printk("nicstar%d: can't allocate %dth of %d small buffers.\n",
705 ns_init_card_error(card, error);
708 skb_queue_tail(&card->sbpool.queue, sb);
709 skb_reserve(sb, NS_AAL0_HEADER);
710 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0);
712 /* Test for strange behaviour which leads to crashes */
713 if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min)
715 printk("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n",
718 ns_init_card_error(card, error);
723 /* Allocate large buffers */
724 skb_queue_head_init(&card->lbpool.queue);
725 card->lbpool.count = 0; /* Not used */
726 for (j = 0; j < NUM_LB; j++)
729 lb = alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
732 printk("nicstar%d: can't allocate %dth of %d large buffers.\n",
735 ns_init_card_error(card, error);
738 skb_queue_tail(&card->lbpool.queue, lb);
739 skb_reserve(lb, NS_SMBUFSIZE);
740 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0);
741 /* Due to the implementation of push_rxbufs() this is 1, not 0 */
745 card->rawch = (u32) virt_to_bus(lb->data);
748 /* Test for strange behaviour which leads to crashes */
749 if ((bcount = ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min)
751 printk("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n",
754 ns_init_card_error(card, error);
759 /* Allocate iovec buffers */
760 skb_queue_head_init(&card->iovpool.queue);
761 card->iovpool.count = 0;
762 for (j = 0; j < NUM_IOVB; j++)
764 struct sk_buff *iovb;
765 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
768 printk("nicstar%d: can't allocate %dth of %d iovec buffers.\n",
771 ns_init_card_error(card, error);
774 skb_queue_tail(&card->iovpool.queue, iovb);
775 card->iovpool.count++;
779 /* Pre-allocate some huge buffers */
780 skb_queue_head_init(&card->hbpool.queue);
781 card->hbpool.count = 0;
782 for (j = 0; j < NUM_HB; j++)
785 hb = alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
788 printk("nicstar%d: can't allocate %dth of %d huge buffers.\n",
791 ns_init_card_error(card, error);
794 skb_queue_tail(&card->hbpool.queue, hb);
795 card->hbpool.count++;
798 card->in_handler = 0;
802 /* Configure NICStAR */
803 if (card->rct_size == 4096)
804 ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES;
805 else /* (card->rct_size == 16384) */
806 ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES;
809 writel(NS_CFG_RXPATH |
816 NS_CFG_RXINT_NODELAY |
817 NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */
821 NS_CFG_TSQFIE_OPT, /* Only enabled if ENABLE_TSQFIE */
822 card->membase + CFG);
824 /* Register device */
825 card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, 0UL);
826 if (card->atmdev == NULL)
828 printk("nicstar%d: can't register device.\n", i);
830 ns_init_card_error(card, error);
834 #ifdef ESI_FROM_EPROM
835 nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
836 card->atmdev->esi, 6);
837 printk("nicstar%d: MAC address %02X:%02X:%02X:%02X:%02X:%02X\n", i,
838 card->atmdev->esi[0], card->atmdev->esi[1], card->atmdev->esi[2],
839 card->atmdev->esi[3], card->atmdev->esi[4], card->atmdev->esi[5]);
841 card->atmdev->esi[0] = NS_ESI0;
842 card->atmdev->esi[1] = NS_ESI1;
843 card->atmdev->esi[2] = NS_ESI2;
844 card->atmdev->esi[3] = NS_ESI3;
845 card->atmdev->esi[4] = NS_ESI4;
846 card->atmdev->esi[5] = NS_ESI5;
847 #endif /* ESI_FROM_EPROM */
849 card->atmdev->dev_data = card;
850 card->atmdev->ci_range.vpi_bits = card->vpibits;
851 card->atmdev->ci_range.vci_bits = card->vcibits;
860 static void ns_init_card_error(ns_dev *card, int error)
864 writel(0x00000000, card->membase + CFG);
869 while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
874 struct sk_buff *iovb;
875 while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
881 while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
887 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
889 free_scq(card->scq0, NULL);
893 kfree(card->rsq.org);
897 kfree(card->tsq.org);
901 free_irq(card->pcidev->irq, card);
905 iounmap((void *) card->membase);
915 static scq_info *get_scq(int size, u32 scd)
920 if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
921 return (scq_info *) NULL;
923 scq = (scq_info *) kmalloc(sizeof(scq_info), GFP_KERNEL);
924 if (scq == (scq_info *) NULL)
925 return (scq_info *) NULL;
926 scq->org = kmalloc(2 * size, GFP_KERNEL);
927 if (scq->org == NULL)
930 return (scq_info *) NULL;
932 scq->skb = (struct sk_buff **) kmalloc(sizeof(struct sk_buff *) *
933 (size / NS_SCQE_SIZE), GFP_KERNEL);
934 if (scq->skb == (struct sk_buff **) NULL)
938 return (scq_info *) NULL;
940 scq->num_entries = size / NS_SCQE_SIZE;
941 scq->base = (ns_scqe *) ALIGN_ADDRESS(scq->org, size);
942 scq->next = scq->base;
943 scq->last = scq->base + (scq->num_entries - 1);
944 scq->tail = scq->last;
946 scq->num_entries = size / NS_SCQE_SIZE;
948 scq->scqfull_waitq = NULL;
951 for (i = 0; i < scq->num_entries; i++)
959 /* For variable rate SCQ vcc must be NULL */
960 static void free_scq(scq_info *scq, struct atm_vcc *vcc)
964 if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
965 for (i = 0; i < scq->num_entries; i++)
967 if (scq->skb[i] != NULL)
969 vcc = scq->skb[i]->atm.vcc;
970 if (vcc->pop != NULL)
971 vcc->pop(vcc, scq->skb[i]);
973 dev_kfree_skb(scq->skb[i]);
976 else /* vcc must be != NULL */
980 printk("nicstar: free_scq() called with vcc == NULL for fixed rate scq.");
981 for (i = 0; i < scq->num_entries; i++)
982 dev_kfree_skb(scq->skb[i]);
985 for (i = 0; i < scq->num_entries; i++)
987 if (scq->skb[i] != NULL)
989 if (vcc->pop != NULL)
990 vcc->pop(vcc, scq->skb[i]);
992 dev_kfree_skb(scq->skb[i]);
1003 /* The handles passed must be pointers to the sk_buff containing the small
1004 or large buffer(s) cast to u32. */
1005 static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1006 u32 handle2, u32 addr2)
1009 unsigned long flags;
1012 #ifdef GENERAL_DEBUG
1014 printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", card->index);
1015 #endif /* GENERAL_DEBUG */
1017 stat = readl(card->membase + STAT);
1018 card->sbfqc = ns_stat_sfbqc_get(stat);
1019 card->lbfqc = ns_stat_lfbqc_get(stat);
1026 addr2 = card->sm_addr;
1027 handle2 = card->sm_handle;
1028 card->sm_addr = 0x00000000;
1029 card->sm_handle = 0x00000000;
1031 else /* (!sm_addr) */
1033 card->sm_addr = addr1;
1034 card->sm_handle = handle1;
1038 else /* type == BUF_LG */
1044 addr2 = card->lg_addr;
1045 handle2 = card->lg_handle;
1046 card->lg_addr = 0x00000000;
1047 card->lg_handle = 0x00000000;
1049 else /* (!lg_addr) */
1051 card->lg_addr = addr1;
1052 card->lg_handle = handle1;
1061 if (card->sbfqc >= card->sbnr.max)
1063 skb_unlink((struct sk_buff *) handle1);
1064 kfree_skb((struct sk_buff *) handle1);
1065 skb_unlink((struct sk_buff *) handle2);
1066 kfree_skb((struct sk_buff *) handle2);
1072 else /* (type == BUF_LG) */
1074 if (card->lbfqc >= card->lbnr.max)
1076 skb_unlink((struct sk_buff *) handle1);
1077 kfree_skb((struct sk_buff *) handle1);
1078 skb_unlink((struct sk_buff *) handle2);
1079 kfree_skb((struct sk_buff *) handle2);
1086 save_flags(flags); cli();
1088 while (CMD_BUSY(card));
1089 writel(handle1, card->membase + DR0);
1090 writel(addr1, card->membase + DR1);
1091 writel(handle2, card->membase + DR2);
1092 writel(addr2, card->membase + DR3);
1093 writel(NS_CMD_WRITE_FREEBUFQ | (u32) type, card->membase + CMD);
1095 restore_flags(flags);
1097 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index,
1098 (type == BUF_SM ? "small" : "large"), addr1, addr2);
1101 if (!card->efbie && card->sbfqc >= card->sbnr.min &&
1102 card->lbfqc >= card->lbnr.min)
1105 writel((readl(card->membase + CFG) | NS_CFG_EFBIE), card->membase + CFG);
1113 static void ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
1118 card = (ns_dev *) dev_id;
1121 PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index);
1123 if (card->in_handler)
1125 printk("nicstar%d: Re-entering ns_irq_handler()???\n", card->index);
1128 card->in_handler = 1;
1131 card->in_handler = 0;
1132 printk("nicstar%d: Called irq handler while in ns_poll()!?\n",
1137 stat_r = readl(card->membase + STAT);
1139 /* Transmit Status Indicator has been written to T. S. Queue */
1140 if (stat_r & NS_STAT_TSIF)
1142 TXPRINTK("nicstar%d: TSI interrupt\n", card->index);
1144 writel(NS_STAT_TSIF, card->membase + STAT);
1147 /* Incomplete CS-PDU has been transmitted */
1148 if (stat_r & NS_STAT_TXICP)
1150 writel(NS_STAT_TXICP, card->membase + STAT);
1151 TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n",
1155 /* Transmit Status Queue 7/8 full */
1156 if (stat_r & NS_STAT_TSQF)
1158 writel(NS_STAT_TSQF, card->membase + STAT);
1159 PRINTK("nicstar%d: TSQ full.\n", card->index);
1163 /* Timer overflow */
1164 if (stat_r & NS_STAT_TMROF)
1166 writel(NS_STAT_TMROF, card->membase + STAT);
1167 PRINTK("nicstar%d: Timer overflow.\n", card->index);
1170 /* PHY device interrupt signal active */
1171 if (stat_r & NS_STAT_PHYI)
1173 writel(NS_STAT_PHYI, card->membase + STAT);
1174 printk("nicstar%d: PHY interrupt.\n", card->index);
1177 /* Small Buffer Queue is full */
1178 if (stat_r & NS_STAT_SFBQF)
1180 writel(NS_STAT_SFBQF, card->membase + STAT);
1181 printk("nicstar%d: Small free buffer queue is full.\n", card->index);
1184 /* Large Buffer Queue is full */
1185 if (stat_r & NS_STAT_LFBQF)
1187 writel(NS_STAT_LFBQF, card->membase + STAT);
1188 printk("nicstar%d: Large free buffer queue is full.\n", card->index);
1191 /* Receive Status Queue is full */
1192 if (stat_r & NS_STAT_RSQF)
1194 writel(NS_STAT_RSQF, card->membase + STAT);
1195 printk("nicstar%d: RSQ full.\n", card->index);
1199 /* Complete CS-PDU received */
1200 if (stat_r & NS_STAT_EOPDU)
1202 RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index);
1204 writel(NS_STAT_EOPDU, card->membase + STAT);
1207 /* Raw cell received */
1208 if (stat_r & NS_STAT_RAWCF)
1210 writel(NS_STAT_RAWCF, card->membase + STAT);
1212 printk("nicstar%d: Raw cell received and no support yet...\n",
1214 #endif /* RCQ_SUPPORT */
1215 /* NOTE: the following procedure may keep a raw cell pending untill the
1216 next interrupt. As this preliminary support is only meant to
1217 avoid buffer leakage, this is not an issue. */
1218 while (readl(card->membase + RAWCT) != card->rawch)
1222 rawcell = (ns_rcqe *) bus_to_virt(card->rawch);
1223 if (ns_rcqe_islast(rawcell))
1225 struct sk_buff *oldbuf;
1227 oldbuf = card->rcbuf;
1228 card->rcbuf = (struct sk_buff *) ns_rcqe_nextbufhandle(rawcell);
1229 card->rawch = (u32) virt_to_bus(card->rcbuf->data);
1230 recycle_rx_buf(card, oldbuf);
1233 card->rawch += NS_RCQE_SIZE;
1237 /* Small buffer queue is empty */
1238 if (stat_r & NS_STAT_SFBQE)
1243 writel(NS_STAT_SFBQE, card->membase + STAT);
1244 printk("nicstar%d: Small free buffer queue empty.\n",
1246 for (i = 0; i < card->sbnr.min; i++)
1248 sb = alloc_skb(NS_SMSKBSIZE, GFP_ATOMIC);
1251 writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG);
1255 skb_queue_tail(&card->sbpool.queue, sb);
1256 skb_reserve(sb, NS_AAL0_HEADER);
1257 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0);
1263 /* Large buffer queue empty */
1264 if (stat_r & NS_STAT_LFBQE)
1269 writel(NS_STAT_LFBQE, card->membase + STAT);
1270 printk("nicstar%d: Large free buffer queue empty.\n",
1272 for (i = 0; i < card->lbnr.min; i++)
1274 lb = alloc_skb(NS_LGSKBSIZE, GFP_ATOMIC);
1277 writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG);
1281 skb_queue_tail(&card->lbpool.queue, lb);
1282 skb_reserve(lb, NS_SMBUFSIZE);
1283 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0);
1289 /* Receive Status Queue is 7/8 full */
1290 if (stat_r & NS_STAT_RSQAF)
1292 writel(NS_STAT_RSQAF, card->membase + STAT);
1293 RXPRINTK("nicstar%d: RSQ almost full.\n", card->index);
1297 card->in_handler = 0;
1298 PRINTK("nicstar%d: end of interrupt service\n", card->index);
1303 static int ns_open(struct atm_vcc *vcc, short vpi, int vci)
1309 int tcr, tcra; /* target cell rate, and absolute value */
1310 int n = 0; /* Number of entries in the TST. Initialized to remove
1311 the compiler warning. */
1313 int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler
1314 warning. How I wish compilers were clever enough to
1315 tell which variables can truly be used
1317 int inuse; /* tx or rx vc already in use by another vcc */
1319 card = (ns_dev *) vcc->dev->dev_data;
1320 PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int) vpi, vci);
1321 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
1323 PRINTK("nicstar%d: unsupported AAL.\n", card->index);
1327 if ((error = atm_find_ci(vcc, &vpi, &vci)))
1329 PRINTK("nicstar%d: error in atm_find_ci().\n", card->index);
1332 vc = &(card->vcmap[vpi << card->vcibits | vci]);
1338 if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx)
1340 if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx)
1344 printk("nicstar%d: %s vci already in use.\n", card->index,
1345 inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx");
1349 vcc->flags |= ATM_VF_ADDR;
1351 /* NOTE: You are not allowed to modify an open connection's QOS. To change
1352 that, remove the ATM_VF_PARTIAL flag checking. There may be other changes
1353 needed to do that. */
1354 if (!(vcc->flags & ATM_VF_PARTIAL))
1358 vcc->flags |= ATM_VF_PARTIAL;
1359 if (vcc->qos.txtp.traffic_class == ATM_CBR)
1361 /* Check requested cell rate and availability of SCD */
1362 if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 &&
1363 vcc->qos.txtp.min_pcr == 0)
1365 PRINTK("nicstar%d: trying to open a CBR vc with cell rate = 0 \n",
1367 vcc->flags &= ~(ATM_VF_ADDR | ATM_VF_PARTIAL);
1371 tcr = atm_pcr_goal(&(vcc->qos.txtp));
1372 tcra = tcr >= 0 ? tcr : -tcr;
1374 PRINTK("nicstar%d: target cell rate = %d.\n", card->index,
1375 vcc->qos.txtp.max_pcr);
1377 tmpd = ((double) tcra) * ((double) NS_TST_NUM_ENTRIES) /
1378 ((double) card->max_pcr);
1383 if (tmpd > (double) n) n++;
1387 if (tmpd < (double) n) n--;
1391 if ((n = (card->tst_free_entries - NS_TST_RESERVED)) <= 0)
1393 PRINTK("nicstar%d: no CBR bandwidth free.\n", card->index);
1394 vcc->flags &= ~(ATM_VF_ADDR | ATM_VF_PARTIAL);
1401 printk("nicstar%d: selected bandwidth < granularity.\n", card->index);
1402 vcc->flags &= ~(ATM_VF_ADDR | ATM_VF_PARTIAL);
1406 if (vcc->qos.txtp.max_pcr > 0)
1408 tmpd = (double) n * (double) card->max_pcr /
1409 (double) NS_TST_NUM_ENTRIES;
1410 if (tmpd > PCR_TOLERANCE * (double) vcc->qos.txtp.max_pcr)
1412 PRINTK("nicstar%d: target cell rate exceeded requested max_pcr.\n",
1417 if (n > (card->tst_free_entries - NS_TST_RESERVED))
1419 PRINTK("nicstar%d: not enough free CBR bandwidth.\n", card->index);
1420 vcc->flags &= ~(ATM_VF_ADDR | ATM_VF_PARTIAL);
1424 card->tst_free_entries -= n;
1426 XPRINTK("nicstar%d: writing %d tst entries.\n", card->index, n);
1427 for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++)
1429 if (card->scd2vc[frscdi] == NULL)
1431 card->scd2vc[frscdi] = vc;
1435 if (frscdi == NS_FRSCD_NUM)
1437 PRINTK("nicstar%d: no SCD available for CBR channel.\n", card->index);
1438 card->tst_free_entries += n;
1439 vcc->flags &= ~(ATM_VF_ADDR | ATM_VF_PARTIAL);
1443 vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE;
1445 scq = get_scq(CBR_SCQSIZE, vc->cbr_scd);
1446 if (scq == (scq_info *) NULL)
1448 PRINTK("nicstar%d: can't get fixed rate SCQ.\n", card->index);
1449 card->scd2vc[frscdi] = NULL;
1450 card->tst_free_entries += n;
1451 vcc->flags &= ~(ATM_VF_ADDR | ATM_VF_PARTIAL);
1455 u32d[0] = (u32) virt_to_bus(scq->base);
1456 u32d[1] = (u32) 0x00000000;
1457 u32d[2] = (u32) 0xffffffff;
1458 u32d[3] = (u32) 0x00000000;
1459 ns_write_sram(card, vc->cbr_scd, u32d, 4);
1461 fill_tst(card, n, vc);
1465 vc->cbr_scd = 0x00000000;
1466 vc->scq = card->scq0;
1469 if (vcc->qos.txtp.traffic_class != ATM_NONE)
1475 if (vcc->qos.rxtp.traffic_class != ATM_NONE)
1483 /* Open the connection in hardware */
1484 if (vcc->qos.aal == ATM_AAL5)
1485 status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN;
1486 else /* vcc->qos.aal == ATM_AAL0 */
1487 status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN;
1489 status |= NS_RCTE_RAWCELLINTEN;
1490 #endif /* RCQ_SUPPORT */
1491 ns_write_sram(card, NS_RCT + (vpi << card->vcibits | vci) *
1492 NS_RCT_ENTRY_SIZE, &status, 1);
1497 vcc->flags |= ATM_VF_READY;
1503 static void ns_close(struct atm_vcc *vcc)
1511 card = vcc->dev->dev_data;
1512 PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index,
1513 (int) vcc->vpi, vcc->vci);
1515 vcc->flags &= ~(ATM_VF_READY);
1517 if (vcc->qos.rxtp.traffic_class != ATM_NONE)
1520 unsigned long flags;
1522 addr = NS_RCT + (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE;
1523 save_flags(flags); cli();
1524 while(CMD_BUSY(card));
1525 writel(NS_CMD_CLOSE_CONNECTION | addr << 2, card->membase + CMD);
1526 restore_flags(flags);
1529 if (vc->rx_iov != NULL)
1531 struct sk_buff *iovb;
1534 stat = readl(card->membase + STAT);
1535 card->sbfqc = ns_stat_sfbqc_get(stat);
1536 card->lbfqc = ns_stat_lfbqc_get(stat);
1538 PRINTK("nicstar%d: closing a VC with pending rx buffers.\n",
1541 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
1543 iovb->atm.iovcnt = 0;
1544 iovb->atm.vcc = NULL;
1545 save_flags(flags); cli();
1546 recycle_iov_buf(card, iovb);
1547 restore_flags(flags);
1552 if (vcc->qos.txtp.traffic_class != ATM_NONE)
1557 if (vcc->qos.txtp.traffic_class == ATM_CBR)
1559 unsigned long flags;
1567 save_flags(flags); cli();
1569 if (scqep == scq->base)
1573 if (scqep == scq->tail)
1575 restore_flags(flags);
1578 /* If the last entry is not a TSR, place one in the SCQ in order to
1579 be able to completely drain it and then close. */
1580 if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next)
1587 tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
1588 scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
1589 scqi = scq->next - scq->base;
1590 tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
1591 tsr.word_3 = 0x00000000;
1592 tsr.word_4 = 0x00000000;
1595 scq->skb[index] = NULL;
1596 if (scq->next == scq->last)
1597 scq->next = scq->base;
1600 data = (u32) virt_to_bus(scq->next);
1601 ns_write_sram(card, scq->scd, &data, 1);
1604 restore_flags(flags);
1607 /* Free all TST entries */
1608 data = NS_TST_OPCODE_VARIABLE;
1609 for (i = 0; i < NS_TST_NUM_ENTRIES; i++)
1611 if (card->tste2vc[i] == vc)
1613 ns_write_sram(card, card->tst_addr + i, &data, 1);
1614 card->tste2vc[i] = NULL;
1615 card->tst_free_entries++;
1619 card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL;
1620 free_scq(vc->scq, vcc);
1623 vcc->dev_data = NULL;
1624 vcc->flags &= ~(ATM_VF_PARTIAL | ATM_VF_ADDR);
1629 stat = readl(card->membase + STAT);
1630 cfg = readl(card->membase + CFG);
1631 printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg);
1632 printk("TSQ: base = 0x%08X next = 0x%08X last = 0x%08X TSQT = 0x%08X \n",
1633 (u32) card->tsq.base, (u32) card->tsq.next,(u32) card->tsq.last,
1634 readl(card->membase + TSQT));
1635 printk("RSQ: base = 0x%08X next = 0x%08X last = 0x%08X RSQT = 0x%08X \n",
1636 (u32) card->rsq.base, (u32) card->rsq.next,(u32) card->rsq.last,
1637 readl(card->membase + RSQT));
1638 printk("Empty free buffer queue interrupt %s \n",
1639 card->efbie ? "enabled" : "disabled");
1640 printk("SBCNT = %d count = %d LBCNT = %d count = %d \n",
1641 ns_stat_sfbqc_get(stat), card->sbpool.count,
1642 ns_stat_lfbqc_get(stat), card->lbpool.count);
1643 printk("hbpool.count = %d iovpool.count = %d \n",
1644 card->hbpool.count, card->iovpool.count);
1646 #endif /* RX_DEBUG */
1651 static void fill_tst(ns_dev *card, int n, vc_map *vc)
1658 /* It would be very complicated to keep the two TSTs synchronized while
1659 assuring that writes are only made to the inactive TST. So, for now I
1660 will use only one TST. If problems occur, I will change this again */
1662 new_tst = card->tst_addr;
1664 /* Fill procedure */
1666 for (e = 0; e < NS_TST_NUM_ENTRIES; e++)
1668 if (card->tste2vc[e] == NULL)
1671 if (e == NS_TST_NUM_ENTRIES)
1672 printk("nicstar%d: No free TST entries found. \n", card->index);
1676 q = (double) n / (double) NS_TST_NUM_ENTRIES;
1678 data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd);
1680 while (e < NS_TST_NUM_ENTRIES)
1682 if (c >= 1.0 && card->tste2vc[e] == NULL)
1684 card->tste2vc[e] = vc;
1685 ns_write_sram(card, new_tst + e, &data, 1);
1695 printk("nicstar%d: Not enough free TST entries. CBR lower than requested.\n",
1698 /* End of fill procedure */
1700 data = ns_tste_make(NS_TST_OPCODE_END, new_tst);
1701 ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1);
1702 ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1);
1703 card->tst_addr = new_tst;
1708 static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
1713 unsigned long buflen;
1715 u32 flags; /* TBD flags, not CPU flags */
1717 card = vcc->dev->dev_data;
1718 TXPRINTK("nicstar%d: ns_send() called.\n", card->index);
1719 if ((vc = (vc_map *) vcc->dev_data) == NULL)
1721 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
1722 vcc->stats->tx_err++;
1729 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
1730 vcc->stats->tx_err++;
1735 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
1737 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
1738 vcc->stats->tx_err++;
1743 if (skb->atm.iovcnt != 0)
1745 printk("nicstar%d: No scatter-gather yet.\n", card->index);
1746 vcc->stats->tx_err++;
1753 if (vcc->qos.aal == ATM_AAL5)
1755 buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */
1756 flags = NS_TBD_AAL5;
1757 scqe.word_2 = (u32) virt_to_bus(skb->data);
1758 scqe.word_3 = (u32) skb->len;
1759 scqe.word_4 = ((u32) vcc->vpi) << NS_TBD_VPI_SHIFT |
1760 ((u32) vcc->vci) << NS_TBD_VCI_SHIFT;
1761 flags |= NS_TBD_EOPDU;
1763 else /* (vcc->qos.aal == ATM_AAL0) */
1765 buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */
1766 flags = NS_TBD_AAL0;
1767 scqe.word_2 = (u32) virt_to_bus(skb->data) + NS_AAL0_HEADER;
1768 scqe.word_3 = 0x00000000;
1769 if (*skb->data & 0x02) /* Payload type 1 - end of pdu */
1770 flags |= NS_TBD_EOPDU;
1771 scqe.word_4 = *((u32 *) skb->data) & ~NS_TBD_VC_MASK;
1772 /* Force the VPI/VCI to be the same as in VCC struct */
1773 scqe.word_4 |= (((u32) vcc->vpi) << NS_TBD_VPI_SHIFT |
1774 ((u32) vcc->vci) << NS_TBD_VCI_SHIFT) & NS_TBD_VC_MASK;
1777 if (vcc->qos.txtp.traffic_class == ATM_CBR)
1779 scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen);
1780 scq = ((vc_map *) vcc->dev_data)->scq;
1784 scqe.word_1 = ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen);
1788 if (push_scqe(card, vc, scq, &scqe, skb) != 0) /* Timeout pushing the TBD */
1790 printk("nicstar%d: Timeout pushing TBD.\n", card->index);
1791 vcc->stats->tx_err++;
1802 static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd,
1803 struct sk_buff *skb)
1805 unsigned long flags;
1812 if (scq->tail == scq->next)
1814 save_flags(flags); cli();
1816 current->timeout = jiffies + SCQFULL_TIMEOUT;
1817 interruptible_sleep_on(&scq->scqfull_waitq);
1818 restore_flags(flags);
1824 index = (int) (scq->next - scq->base);
1825 scq->skb[index] = skb;
1826 XPRINTK("nicstar%d: sending skb at 0x%x (pos %d).\n",
1827 card->index, (u32) skb, index);
1828 XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n",
1829 card->index, tbd->word_1, tbd->word_2, tbd->word_3, tbd->word_4,
1831 if (scq->next == scq->last)
1832 scq->next = scq->base;
1837 if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
1845 if (vc->tbd_count >= MAX_TBD_PER_VC || scq->tbd_count >= MAX_TBD_PER_SCQ)
1847 if (scq->tail == scq->next)
1849 save_flags(flags); cli();
1851 current->timeout = jiffies + SCQFULL_TIMEOUT;
1852 interruptible_sleep_on(&scq->scqfull_waitq);
1853 restore_flags(flags);
1858 tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
1860 scdi = NS_TSR_SCDISVBR;
1862 scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
1863 scqi = scq->next - scq->base;
1864 tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
1865 tsr.word_3 = 0x00000000;
1866 tsr.word_4 = 0x00000000;
1870 scq->skb[index] = NULL;
1871 XPRINTK("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n",
1872 card->index, tsr.word_1, tsr.word_2, tsr.word_3, tsr.word_4,
1874 if (scq->next == scq->last)
1875 scq->next = scq->base;
1882 PRINTK("nicstar%d: Could not write TSI.\n", card->index);
1885 data = (u32) virt_to_bus(scq->next);
1886 ns_write_sram(card, scq->scd, &data, 1);
1893 static void process_tsq(ns_dev *card)
1899 if (ns_tsi_isempty(card->tsq.next))
1901 while (!ns_tsi_isempty(card->tsq.next))
1903 if (!ns_tsi_tmrof(card->tsq.next))
1905 scdi = ns_tsi_getscdindex(card->tsq.next);
1906 if (scdi == NS_TSI_SCDISVBR)
1910 if (card->scd2vc[scdi] == NULL)
1912 printk("nicstar%d: could not find VC from SCD index.\n",
1914 ns_tsi_init(card->tsq.next);
1917 scq = card->scd2vc[scdi]->scq;
1919 drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next));
1921 wake_up_interruptible(&(scq->scqfull_waitq));
1924 ns_tsi_init(card->tsq.next);
1925 previous = card->tsq.next;
1926 if (card->tsq.next == card->tsq.last)
1927 card->tsq.next = card->tsq.base;
1931 writel((((u32) previous) - ((u32) card->tsq.base)),
1932 card->membase + TSQH);
1937 static void drain_scq(ns_dev *card, scq_info *scq, int pos)
1939 struct atm_vcc *vcc;
1940 struct sk_buff *skb;
1943 XPRINTK("nicstar%d: drain_scq() called, scq at 0x%x, pos %d.\n",
1944 card->index, (u32) scq, pos);
1945 if (pos >= scq->num_entries)
1947 printk("nicstar%d: Bad index on drain_scq().\n", card->index);
1951 i = (int) (scq->tail - scq->base);
1952 if (++i == scq->num_entries)
1957 XPRINTK("nicstar%d: freeing skb at 0x%x (index %d).\n",
1958 card->index, (u32) skb, i);
1962 if (vcc->pop != NULL)
1968 if (++i == scq->num_entries)
1971 scq->tail = scq->base + pos;
1976 static void process_rsq(ns_dev *card)
1980 if (!ns_rsqe_valid(card->rsq.next))
1982 while (ns_rsqe_valid(card->rsq.next))
1984 dequeue_rx(card, card->rsq.next);
1985 ns_rsqe_init(card->rsq.next);
1986 previous = card->rsq.next;
1987 if (card->rsq.next == card->rsq.last)
1988 card->rsq.next = card->rsq.base;
1992 writel((((u32) previous) - ((u32) card->rsq.base)),
1993 card->membase + RSQH);
1998 static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2002 struct sk_buff *iovb;
2004 struct atm_vcc *vcc;
2005 struct sk_buff *skb;
2006 unsigned short aal5_len;
2010 stat = readl(card->membase + STAT);
2011 card->sbfqc = ns_stat_sfbqc_get(stat);
2012 card->lbfqc = ns_stat_lfbqc_get(stat);
2014 skb = (struct sk_buff *) rsqe->buffer_handle;
2015 vpi = ns_rsqe_vpi(rsqe);
2016 vci = ns_rsqe_vci(rsqe);
2017 if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits)
2019 printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n",
2020 card->index, vpi, vci);
2021 recycle_rx_buf(card, skb);
2025 vc = &(card->vcmap[vpi << card->vcibits | vci]);
2028 RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n",
2029 card->index, vpi, vci);
2030 recycle_rx_buf(card, skb);
2036 if (vcc->qos.aal == ATM_AAL0)
2039 unsigned char *cell;
2043 for (i = ns_rsqe_cellcount(rsqe); i; i--)
2045 if ((sb = alloc_skb(NS_SMSKBSIZE, GFP_ATOMIC)) == NULL)
2047 printk("nicstar%d: Can't allocate buffers for aal0.\n",
2049 vcc->stats->rx_drop += i;
2052 if (!atm_charge(vcc, sb->truesize))
2054 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
2056 vcc->stats->rx_drop += i - 1; /* already increased by 1 */
2060 /* Rebuild the header */
2061 *((u32 *) sb->data) = rsqe->word_1 << 4 |
2062 (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000);
2063 if (i == 1 && ns_rsqe_eopdu(rsqe))
2064 *((u32 *) sb->data) |= 0x00000002;
2065 skb_put(sb, NS_AAL0_HEADER);
2066 memcpy(sb->tail, cell, ATM_CELL_PAYLOAD);
2067 skb_put(sb, ATM_CELL_PAYLOAD);
2072 cell += ATM_CELL_PAYLOAD;
2075 recycle_rx_buf(card, skb);
2079 /* To reach this point, the AAL layer can only be AAL5 */
2081 if ((iovb = vc->rx_iov) == NULL)
2083 iovb = skb_dequeue(&(card->iovpool.queue));
2084 if (iovb == NULL) /* No buffers in the queue */
2086 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC);
2089 printk("nicstar%d: Out of iovec buffers.\n", card->index);
2090 vcc->stats->rx_drop++;
2091 recycle_rx_buf(card, skb);
2096 if (--card->iovpool.count < card->iovnr.min)
2098 struct sk_buff *new_iovb;
2099 if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL)
2101 skb_queue_tail(&card->iovpool.queue, new_iovb);
2102 card->iovpool.count++;
2106 iovb->atm.iovcnt = 0;
2108 iovb->tail = iovb->data = iovb->head;
2109 iovb->atm.vcc = vcc;
2110 /* IMPORTANT: a pointer to the sk_buff containing the small or large
2111 buffer is stored as iovec base, NOT a pointer to the
2112 small or large buffer itself. */
2114 else if (iovb->atm.iovcnt >= NS_MAX_IOVECS)
2116 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
2117 vcc->stats->rx_err++;
2118 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
2119 iovb->atm.iovcnt = 0;
2121 iovb->tail = iovb->data = iovb->head;
2122 iovb->atm.vcc = vcc;
2124 iov = &((struct iovec *) iovb->data)[iovb->atm.iovcnt++];
2125 iov->iov_base = (void *) skb;
2126 iov->iov_len = ns_rsqe_cellcount(rsqe) * 48;
2127 iovb->len += iov->iov_len;
2129 if (iovb->atm.iovcnt == 1)
2131 if (skb->list != &card->sbpool.queue)
2133 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
2135 which_list(card, skb);
2136 vcc->stats->rx_err++;
2137 recycle_rx_buf(card, skb);
2139 recycle_iov_buf(card, iovb);
2143 else /* iovb->atm.iovcnt >= 2 */
2145 if (skb->list != &card->lbpool.queue)
2147 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
2149 which_list(card, skb);
2150 vcc->stats->rx_err++;
2151 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
2154 recycle_iov_buf(card, iovb);
2159 if (ns_rsqe_eopdu(rsqe))
2161 aal5_len = *((unsigned short *) ((u32) skb->data + iov->iov_len - 6));
2162 /* Swap byte order. Is it just me or the nicstar manual sais this should
2163 already be in little endian format? */
2164 aal5_len = ((aal5_len & 0x00ff) << 8 | (aal5_len & 0xff00) >> 8);
2165 len = (aal5_len == 0x0000) ? 0x10000 : aal5_len;
2166 if (ns_rsqe_crcerr(rsqe) ||
2167 len + 8 > iovb->len || len + (47 + 8) < iovb->len)
2169 if (ns_rsqe_crcerr(rsqe))
2170 printk("nicstar%d: AAL5 CRC error.\n", card->index);
2172 printk("nicstar%d: AAL5 PDU size mismatch.\n", card->index);
2173 vcc->stats->rx_err++;
2174 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, iovb->atm.iovcnt);
2176 recycle_iov_buf(card, iovb);
2180 /* By this point we (hopefully) have a complete SDU without errors. */
2182 if (iovb->atm.iovcnt == 1) /* Just a small buffer */
2184 /* skb points to a small buffer */
2185 if (!atm_charge(vcc, skb->truesize))
2187 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data),
2193 dequeue_sm_buf(card, skb);
2194 #ifdef NS_USE_DESTRUCTORS
2195 skb->destructor = ns_sb_destructor;
2196 #endif /* NS_USE_DESTRUCTORS */
2199 vcc->push(vcc, skb);
2203 else if (iovb->atm.iovcnt == 2) /* One small plus one large buffer */
2207 sb = (struct sk_buff *) (iov - 1)->iov_base;
2208 /* skb points to a large buffer */
2210 if (len <= NS_SMBUFSIZE)
2212 if (!atm_charge(vcc, sb->truesize))
2214 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data),
2220 dequeue_sm_buf(card, sb);
2221 #ifdef NS_USE_DESTRUCTORS
2222 sb->destructor = ns_sb_destructor;
2223 #endif /* NS_USE_DESTRUCTORS */
2230 push_rxbufs(card, BUF_LG, (u32) skb,
2231 (u32) virt_to_bus(skb->data), 0, 0);
2234 else /* len > NS_SMBUFSIZE, the usual case */
2236 if (!atm_charge(vcc, skb->truesize))
2238 push_rxbufs(card, BUF_LG, (u32) skb,
2239 (u32) virt_to_bus(skb->data), 0, 0);
2243 dequeue_lg_buf(card, skb);
2244 #ifdef NS_USE_DESTRUCTORS
2245 skb->destructor = ns_lb_destructor;
2246 #endif /* NS_USE_DESTRUCTORS */
2247 skb_push(skb, NS_SMBUFSIZE);
2248 memcpy(skb->data, sb->data, NS_SMBUFSIZE);
2249 skb_put(skb, len - NS_SMBUFSIZE);
2252 vcc->push(vcc, skb);
2256 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data),
2262 else /* Must push a huge buffer */
2264 struct sk_buff *hb, *sb, *lb;
2265 int remaining, tocopy;
2268 hb = skb_dequeue(&(card->hbpool.queue));
2269 if (hb == NULL) /* No buffers in the queue */
2272 hb = alloc_skb(NS_HBUFSIZE, GFP_ATOMIC);
2275 printk("nicstar%d: Out of huge buffers.\n", card->index);
2276 vcc->stats->rx_drop++;
2277 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
2280 recycle_iov_buf(card, iovb);
2283 else if (card->hbpool.count < card->hbnr.min)
2285 struct sk_buff *new_hb;
2286 if ((new_hb = alloc_skb(NS_HBUFSIZE, GFP_ATOMIC)) != NULL)
2288 skb_queue_tail(&card->hbpool.queue, new_hb);
2289 card->hbpool.count++;
2294 if (--card->hbpool.count < card->hbnr.min)
2296 struct sk_buff *new_hb;
2297 if ((new_hb = alloc_skb(NS_HBUFSIZE, GFP_ATOMIC)) != NULL)
2299 skb_queue_tail(&card->hbpool.queue, new_hb);
2300 card->hbpool.count++;
2302 if (card->hbpool.count < card->hbnr.min)
2304 if ((new_hb = alloc_skb(NS_HBUFSIZE, GFP_ATOMIC)) != NULL)
2306 skb_queue_tail(&card->hbpool.queue, new_hb);
2307 card->hbpool.count++;
2312 iov = (struct iovec *) iovb->data;
2314 if (!atm_charge(vcc, hb->truesize))
2316 recycle_iovec_rx_bufs(card, iov, iovb->atm.iovcnt);
2317 if (card->hbpool.count < card->hbnr.max)
2319 skb_queue_tail(&card->hbpool.queue, hb);
2320 card->hbpool.count++;
2327 /* Copy the small buffer to the huge buffer */
2328 sb = (struct sk_buff *) iov->iov_base;
2329 memcpy(hb->data, sb->data, iov->iov_len);
2330 skb_put(hb, iov->iov_len);
2331 remaining = len - iov->iov_len;
2333 /* Free the small buffer */
2334 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data),
2337 /* Copy all large buffers to the huge buffer and free them */
2338 for (j = 1; j < iovb->atm.iovcnt; j++)
2340 lb = (struct sk_buff *) iov->iov_base;
2341 tocopy = MIN(remaining, iov->iov_len);
2342 memcpy(hb->tail, lb->data, tocopy);
2343 skb_put(hb, tocopy);
2345 remaining -= tocopy;
2346 push_rxbufs(card, BUF_LG, (u32) lb,
2347 (u32) virt_to_bus(lb->data), 0, 0);
2350 if (remaining != 0 || hb->len != len)
2351 printk("nicstar%d: Huge buffer len mismatch.\n", card->index);
2352 #endif /* EXTRA_DEBUG */
2354 #ifdef NS_USE_DESTRUCTORS
2355 hb->destructor = ns_hb_destructor;
2356 #endif /* NS_USE_DESTRUCTORS */
2364 recycle_iov_buf(card, iovb);
2371 #ifdef NS_USE_DESTRUCTORS
2373 static void ns_sb_destructor(struct sk_buff *sb)
2378 card = (ns_dev *) sb->atm.vcc->dev->dev_data;
2379 stat = readl(card->membase + STAT);
2380 card->sbfqc = ns_stat_sfbqc_get(stat);
2381 card->lbfqc = ns_stat_lfbqc_get(stat);
2385 sb = alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2388 skb_queue_tail(&card->sbpool.queue, sb);
2389 skb_reserve(sb, NS_AAL0_HEADER);
2390 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0);
2391 } while (card->sbfqc < card->sbnr.min);
2396 static void ns_lb_destructor(struct sk_buff *lb)
2401 card = (ns_dev *) lb->atm.vcc->dev->dev_data;
2402 stat = readl(card->membase + STAT);
2403 card->sbfqc = ns_stat_sfbqc_get(stat);
2404 card->lbfqc = ns_stat_lfbqc_get(stat);
2408 lb = alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2411 skb_queue_tail(&card->lbpool.queue, lb);
2412 skb_reserve(lb, NS_SMBUFSIZE);
2413 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0);
2414 } while (card->lbfqc < card->lbnr.min);
2419 static void ns_hb_destructor(struct sk_buff *hb)
2423 card = (ns_dev *) hb->atm.vcc->dev->dev_data;
2425 while (card->hbpool.count < card->hbnr.init)
2427 hb = alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2430 skb_queue_tail(&card->hbpool.queue, hb);
2431 card->hbpool.count++;
2435 #endif /* NS_USE_DESTRUCTORS */
2439 static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb)
2441 if (skb->list == &card->sbpool.queue)
2442 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
2443 else if (skb->list == &card->lbpool.queue)
2444 push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
2447 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2454 static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count)
2456 struct sk_buff *skb;
2458 for (; count > 0; count--)
2460 skb = (struct sk_buff *) (iov++)->iov_base;
2461 if (skb->list == &card->sbpool.queue)
2462 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data),
2464 else if (skb->list == &card->lbpool.queue)
2465 push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data),
2469 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2477 static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
2479 if (card->iovpool.count < card->iovnr.max)
2481 skb_queue_tail(&card->iovpool.queue, iovb);
2482 card->iovpool.count++;
2490 static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2493 #ifdef NS_USE_DESTRUCTORS
2494 if (card->sbfqc < card->sbnr.min)
2496 if (card->sbfqc < card->sbnr.init)
2498 struct sk_buff *new_sb;
2499 if ((new_sb = alloc_skb(NS_SMSKBSIZE, GFP_ATOMIC)) != NULL)
2501 skb_queue_tail(&card->sbpool.queue, new_sb);
2502 skb_reserve(new_sb, NS_AAL0_HEADER);
2503 push_rxbufs(card, BUF_SM, (u32) new_sb,
2504 (u32) virt_to_bus(new_sb->data), 0, 0);
2507 if (card->sbfqc < card->sbnr.init)
2508 #endif /* NS_USE_DESTRUCTORS */
2510 struct sk_buff *new_sb;
2511 if ((new_sb = alloc_skb(NS_SMSKBSIZE, GFP_ATOMIC)) != NULL)
2513 skb_queue_tail(&card->sbpool.queue, new_sb);
2514 skb_reserve(new_sb, NS_AAL0_HEADER);
2515 push_rxbufs(card, BUF_SM, (u32) new_sb,
2516 (u32) virt_to_bus(new_sb->data), 0, 0);
2523 static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2526 #ifdef NS_USE_DESTRUCTORS
2527 if (card->lbfqc < card->lbnr.min)
2529 if (card->lbfqc < card->lbnr.init)
2531 struct sk_buff *new_lb;
2532 if ((new_lb = alloc_skb(NS_LGSKBSIZE, GFP_ATOMIC)) != NULL)
2534 skb_queue_tail(&card->lbpool.queue, new_lb);
2535 skb_reserve(new_lb, NS_SMBUFSIZE);
2536 push_rxbufs(card, BUF_LG, (u32) new_lb,
2537 (u32) virt_to_bus(new_lb->data), 0, 0);
2540 if (card->lbfqc < card->lbnr.init)
2541 #endif /* NS_USE_DESTRUCTORS */
2543 struct sk_buff *new_lb;
2544 if ((new_lb = alloc_skb(NS_LGSKBSIZE, GFP_ATOMIC)) != NULL)
2546 skb_queue_tail(&card->lbpool.queue, new_lb);
2547 skb_reserve(new_lb, NS_SMBUFSIZE);
2548 push_rxbufs(card, BUF_LG, (u32) new_lb,
2549 (u32) virt_to_bus(new_lb->data), 0, 0);
2556 static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2563 card = (ns_dev *) dev->dev_data;
2564 stat = readl(card->membase + STAT);
2566 return sprintf(page, "Pool count min init max \n");
2568 return sprintf(page, "Small %5d %5d %5d %5d \n",
2569 ns_stat_sfbqc_get(stat), card->sbnr.min, card->sbnr.init,
2572 return sprintf(page, "Large %5d %5d %5d %5d \n",
2573 ns_stat_lfbqc_get(stat), card->lbnr.min, card->lbnr.init,
2576 return sprintf(page, "Huge %5d %5d %5d %5d \n", card->hbpool.count,
2577 card->hbnr.min, card->hbnr.init, card->hbnr.max);
2579 return sprintf(page, "Iovec %5d %5d %5d %5d \n", card->iovpool.count,
2580 card->iovnr.min, card->iovnr.init, card->iovnr.max);
2584 retval = sprintf(page, "Interrupt counter: %u \n", card->intcnt);
2588 /* Dump 25.6 Mbps PHY registers */
2589 if (card->max_pcr == IDT_25_PCR && !left--)
2594 for (i = 0; i < 4; i++)
2596 while (CMD_BUSY(card));
2597 writel(NS_CMD_READ_UTILITY | 0x00000200 | i, card->membase + CMD);
2598 while (CMD_BUSY(card));
2599 phy_regs[i] = readl(card->membase + DR0) & 0x000000FF;
2602 return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n",
2603 phy_regs[0], phy_regs[1], phy_regs[2], phy_regs[3]);
2607 if (left-- < NS_TST_NUM_ENTRIES)
2609 if (card->tste2vc[left + 1] == NULL)
2610 return sprintf(page, "%5d - VBR/UBR \n", left + 1);
2612 return sprintf(page, "%5d - %d %d \n", left + 1,
2613 card->tste2vc[left + 1]->tx_vcc->vpi,
2614 card->tste2vc[left + 1]->tx_vcc->vci);
2622 static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void *arg)
2627 unsigned long flags;
2629 card = dev->dev_data;
2633 if (get_user(pl.buftype, &((pool_levels *) arg)->buftype))
2637 case NS_BUFTYPE_SMALL:
2638 pl.count = ns_stat_sfbqc_get(readl(card->membase + STAT));
2639 pl.level.min = card->sbnr.min;
2640 pl.level.init = card->sbnr.init;
2641 pl.level.max = card->sbnr.max;
2644 case NS_BUFTYPE_LARGE:
2645 pl.count = ns_stat_lfbqc_get(readl(card->membase + STAT));
2646 pl.level.min = card->lbnr.min;
2647 pl.level.init = card->lbnr.init;
2648 pl.level.max = card->lbnr.max;
2651 case NS_BUFTYPE_HUGE:
2652 pl.count = card->hbpool.count;
2653 pl.level.min = card->hbnr.min;
2654 pl.level.init = card->hbnr.init;
2655 pl.level.max = card->hbnr.max;
2658 case NS_BUFTYPE_IOVEC:
2659 pl.count = card->iovpool.count;
2660 pl.level.min = card->iovnr.min;
2661 pl.level.init = card->iovnr.init;
2662 pl.level.max = card->iovnr.max;
2669 if (!copy_to_user((pool_levels *) arg, &pl, sizeof(pl)))
2670 return (sizeof(pl));
2677 if (copy_from_user(&pl, (pool_levels *) arg, sizeof(pl)))
2679 if (pl.level.min >= pl.level.init || pl.level.init >= pl.level.max)
2681 if (pl.level.min == 0)
2685 case NS_BUFTYPE_SMALL:
2686 if (pl.level.max > TOP_SB)
2688 card->sbnr.min = pl.level.min;
2689 card->sbnr.init = pl.level.init;
2690 card->sbnr.max = pl.level.max;
2693 case NS_BUFTYPE_LARGE:
2694 if (pl.level.max > TOP_LB)
2696 card->lbnr.min = pl.level.min;
2697 card->lbnr.init = pl.level.init;
2698 card->lbnr.max = pl.level.max;
2701 case NS_BUFTYPE_HUGE:
2702 if (pl.level.max > TOP_HB)
2704 card->hbnr.min = pl.level.min;
2705 card->hbnr.init = pl.level.init;
2706 card->hbnr.max = pl.level.max;
2709 case NS_BUFTYPE_IOVEC:
2710 if (pl.level.max > TOP_IOVB)
2712 card->iovnr.min = pl.level.min;
2713 card->iovnr.init = pl.level.init;
2714 card->iovnr.max = pl.level.max;
2726 btype = (int) arg; /* an int is the same size as a pointer */
2729 case NS_BUFTYPE_SMALL:
2730 while (card->sbfqc < card->sbnr.init)
2734 sb = alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2737 skb_queue_tail(&card->sbpool.queue, sb);
2738 skb_reserve(sb, NS_AAL0_HEADER);
2739 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0);
2743 case NS_BUFTYPE_LARGE:
2744 while (card->lbfqc < card->lbnr.init)
2748 lb = alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2751 skb_queue_tail(&card->lbpool.queue, lb);
2752 skb_reserve(lb, NS_SMBUFSIZE);
2753 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0);
2757 case NS_BUFTYPE_HUGE:
2758 while (card->hbpool.count > card->hbnr.init)
2762 save_flags(flags); cli();
2763 hb = skb_dequeue(&card->hbpool.queue);
2764 card->hbpool.count--;
2765 restore_flags(flags);
2767 printk("nicstar%d: huge buffer count inconsistent.\n",
2773 while (card->hbpool.count < card->hbnr.init)
2777 hb = alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2780 save_flags(flags); cli();
2781 skb_queue_tail(&card->hbpool.queue, hb);
2782 card->hbpool.count++;
2783 restore_flags(flags);
2787 case NS_BUFTYPE_IOVEC:
2788 while (card->iovpool.count > card->iovnr.init)
2790 struct sk_buff *iovb;
2792 save_flags(flags); cli();
2793 iovb = skb_dequeue(&card->iovpool.queue);
2794 card->iovpool.count--;
2795 restore_flags(flags);
2797 printk("nicstar%d: iovec buffer count inconsistent.\n",
2803 while (card->iovpool.count < card->iovnr.init)
2805 struct sk_buff *iovb;
2807 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
2810 save_flags(flags); cli();
2811 skb_queue_tail(&card->iovpool.queue, iovb);
2812 card->iovpool.count++;
2813 restore_flags(flags);
2824 if (dev->phy->ioctl == NULL) return -EINVAL;
2825 return dev->phy->ioctl(dev, cmd, arg);
2831 static void which_list(ns_dev *card, struct sk_buff *skb)
2833 printk("It's a %s buffer.\n", skb->list == &card->sbpool.queue ?
2834 "small" : skb->list == &card->lbpool.queue ? "large" :
2835 skb->list == &card->hbpool.queue ? "huge" :
2836 skb->list == &card->iovpool.queue ? "iovec" : "unknown");
2841 static void ns_poll(unsigned long arg)
2845 unsigned long flags;
2848 PRINTK("nicstar: Entering ns_poll().\n");
2849 for (i = 0; i < num_cards; i++)
2852 save_flags(flags); cli();
2855 printk("nicstar: Re-entering ns_poll()???\n");
2859 if (card->in_handler)
2862 printk("nicstar%d: ns_poll called while in interrupt handler!?\n",
2868 stat_r = readl(card->membase + STAT);
2869 if (stat_r & NS_STAT_TSIF)
2870 stat_w |= NS_STAT_TSIF;
2871 if (stat_r & NS_STAT_EOPDU)
2872 stat_w |= NS_STAT_EOPDU;
2877 writel(card->membase + STAT, stat_w);
2879 restore_flags(flags);
2881 mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD);
2882 PRINTK("nicstar: Leaving ns_poll().\n");