Changes invalid argument by operation not supported (et_linux.c)
[tomato.git] / release / src-rt / et / sys / et_linux.c
blob9057ff75759029a8077762727d7cee30280fb17d
1 /*
2 * Linux device driver for
3 * Broadcom BCM47XX 10/100/1000 Mbps Ethernet Controller
5 * Copyright (C) 2010, Broadcom Corporation
6 * All Rights Reserved.
7 *
8 * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
9 * the contents of this file may not be disclosed to third parties, copied
10 * or duplicated in any form, in whole or in part, without the prior
11 * written permission of Broadcom Corporation.
13 * $Id: et_linux.c,v 1.131.8.9 2010-12-20 04:28:38 Exp $
16 #define __UNDEF_NO_VERSION__
18 #include <typedefs.h>
20 #include <linux/module.h>
21 #include <linuxver.h>
22 #include <bcmdefs.h>
23 #include <osl.h>
25 #include <linux/types.h>
26 #include <linux/errno.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
29 #include <linux/kernel.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/delay.h>
34 #include <linux/string.h>
35 #include <linux/sockios.h>
36 #ifdef SIOCETHTOOL
37 #include <linux/ethtool.h>
38 #endif /* SIOCETHTOOL */
39 #include <linux/ip.h>
40 #include <linux/if_vlan.h>
42 #include <asm/system.h>
43 #include <asm/io.h>
44 #include <asm/irq.h>
45 #include <asm/pgtable.h>
46 #include <asm/uaccess.h>
48 #include <epivers.h>
49 #include <bcmendian.h>
50 #include <bcmdefs.h>
51 #include <proto/ethernet.h>
52 #include <proto/vlan.h>
53 #include <bcmdevs.h>
54 #include <bcmenetmib.h>
55 #include <bcmgmacmib.h>
56 #include <bcmenetrxh.h>
57 #include <bcmenetphy.h>
58 #include <etioctl.h>
59 #include <bcmutils.h>
60 #include <pcicfg.h>
61 #include <et_dbg.h>
62 #include <hndsoc.h>
63 #include <bcmgmacrxh.h>
64 #include <etc.h>
65 #ifdef HNDCTF
66 #include <ctf/hndctf.h>
67 #endif /* HNDCTF */
70 #ifdef ET_ALL_PASSIVE
71 #define ET_ALL_PASSIVE_ENAB(et) (!(et)->all_dispatch_mode)
72 #else /* ET_ALL_PASSIVE */
73 #define ET_ALL_PASSIVE_ENAB(et) 0
74 #endif /* ET_ALL_PASSIVE */
76 MODULE_LICENSE("Proprietary");
78 #ifdef PLC
79 typedef struct et_plc {
80 bool hw; /* plc hardware present */
81 int32 txvid; /* vlan used to send to plc */
82 int32 rxvid1; /* frames rx'd on this will be sent to br */
83 int32 rxvid2; /* frames rx'd on this will be sent to wds */
84 int32 rxvid3; /* frames rx'd on this will be sent to plc */
85 struct net_device *txdev; /* plc device (vid 3) for tx */
86 struct net_device *rxdev1; /* plc device (vid 4) for rx */
87 struct net_device *rxdev2; /* plc device (vid 5) for tx & rx */
88 struct net_device *rxdev3; /* plc device (vid 6) for tx & rx */
89 } et_plc_t;
90 #endif /* PLC */
92 /* In 2.6.20 kernels work functions get passed a pointer to the
93 * struct work, so things will continue to work as long as the work
94 * structure is the first component of the task structure.
96 typedef struct et_task {
97 struct work_struct work;
98 void *context;
99 } et_task_t;
101 typedef struct et_info {
102 etc_info_t *etc; /* pointer to common os-independent data */
103 struct net_device *dev; /* backpoint to device */
104 struct pci_dev *pdev; /* backpoint to pci_dev */
105 void *osh; /* pointer to os handle */
106 #ifdef HNDCTF
107 ctf_t *cih; /* ctf instance handle */
108 #endif /* HNDCTF */
109 struct semaphore sem; /* use semaphore to allow sleep */
110 spinlock_t lock; /* per-device perimeter lock */
111 spinlock_t txq_lock; /* lock for txq protection */
112 struct sk_buff_head txq[NUMTXQ]; /* send queue */
113 void *regsva; /* opaque chip registers virtual address */
114 struct timer_list timer; /* one second watchdog timer */
115 struct net_device_stats stats; /* stat counter reporting structure */
116 int events; /* bit channel between isr and dpc */
117 struct et_info *next; /* pointer to next et_info_t in chain */
118 #ifndef BCM_NAPI
119 struct tasklet_struct tasklet; /* dpc tasklet */
120 #endif /* BCM_NAPI */
121 #ifdef ET_ALL_PASSIVE
122 et_task_t dpc_task; /* work queue for rx dpc */
123 et_task_t txq_task; /* work queue for tx frames */
124 bool all_dispatch_mode; /* dispatch mode: tasklets or passive */
125 #endif /* ET_ALL_PASSIVE */
126 bool resched; /* dpc was rescheduled */
127 #ifdef PLC
128 et_plc_t plc; /* plc interface info */
129 #endif /* PLC */
130 } et_info_t;
132 static int et_found = 0;
133 static et_info_t *et_list = NULL;
135 /* defines */
136 #define DATAHIWAT 50 /* data msg txq hiwat mark */
138 #define ET_INFO(dev) (et_info_t *)((dev)->priv)
140 #define ET_LOCK(et) \
141 do { \
142 if (ET_ALL_PASSIVE_ENAB(et)) \
143 down(&(et)->sem); \
144 else \
145 spin_lock_bh(&(et)->lock); \
146 } while (0)
148 #define ET_UNLOCK(et) \
149 do { \
150 if (ET_ALL_PASSIVE_ENAB(et)) \
151 up(&(et)->sem); \
152 else \
153 spin_unlock_bh(&(et)->lock); \
154 } while (0)
156 #define ET_TXQ_LOCK(et) spin_lock_bh(&(et)->txq_lock)
157 #define ET_TXQ_UNLOCK(et) spin_unlock_bh(&(et)->txq_lock)
159 #define INT_LOCK(flags) local_irq_save(flags)
160 #define INT_UNLOCK(flags) local_irq_restore(flags)
162 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 4, 5)
163 #error Linux version must be newer than 2.4.5
164 #endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 4, 5) */
166 /* prototypes called by etc.c */
167 void et_init(et_info_t *et, uint options);
168 void et_reset(et_info_t *et);
169 void et_link_up(et_info_t *et);
170 void et_link_down(et_info_t *et);
171 void et_up(et_info_t *et);
172 void et_down(et_info_t *et, int reset);
173 void et_dump(et_info_t *et, struct bcmstrbuf *b);
175 /* local prototypes */
176 static void et_free(et_info_t *et);
177 static int et_open(struct net_device *dev);
178 static int et_close(struct net_device *dev);
179 static int et_start(struct sk_buff *skb, struct net_device *dev);
180 static void et_sendnext(et_info_t *et);
181 static struct net_device_stats *et_get_stats(struct net_device *dev);
182 static int et_set_mac_address(struct net_device *dev, void *addr);
183 static void et_set_multicast_list(struct net_device *dev);
184 static void _et_watchdog(struct net_device *data);
185 static void et_watchdog(ulong data);
186 #ifdef ET_ALL_PASSIVE
187 static void et_watchdog_task(et_task_t *task);
188 #endif /* ET_ALL_PASSIVE */
189 static int et_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
190 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
191 static irqreturn_t et_isr(int irq, void *dev_id);
192 #else
193 static irqreturn_t et_isr(int irq, void *dev_id, struct pt_regs *ptregs);
194 #endif
195 #ifdef BCM_NAPI
196 static int et_poll(struct net_device *dev, int *budget);
197 #else /* BCM_NAPI */
198 static void et_dpc(ulong data);
199 #endif /* BCM_NAPI */
200 #ifdef ET_ALL_PASSIVE
201 static void et_dpc_work(struct et_task *task);
202 static void et_txq_work(struct et_task *task);
203 static int et_schedule_task(et_info_t *et, void (*fn)(struct et_task *task), void *context);
204 #endif /* ET_ALL_PASSIVE */
205 static void et_sendup(et_info_t *et, struct sk_buff *skb);
206 #ifdef BCMDBG
207 static void et_dumpet(et_info_t *et, struct bcmstrbuf *b);
208 #endif /* BCMDBG */
210 /* recognized PCI IDs */
211 static struct pci_device_id et_id_table[] __devinitdata = {
212 { vendor: PCI_ANY_ID,
213 device: PCI_ANY_ID,
214 subvendor: PCI_ANY_ID,
215 subdevice: PCI_ANY_ID,
216 class: PCI_CLASS_NETWORK_OTHER << 8,
217 class_mask: 0xffff00,
218 driver_data: 0,
220 { vendor: PCI_ANY_ID,
221 device: PCI_ANY_ID,
222 subvendor: PCI_ANY_ID,
223 subdevice: PCI_ANY_ID,
224 class: PCI_CLASS_NETWORK_ETHERNET << 8,
225 class_mask: 0xffff00,
226 driver_data: 0,
228 { 0, }
230 MODULE_DEVICE_TABLE(pci, et_id_table);
232 #if defined(BCMDBG)
233 static uint32 msglevel = 0xdeadbeef;
234 module_param(msglevel, uint, 0644);
235 #endif /* defined(BCMDBG) */
237 #ifdef ET_ALL_PASSIVE
238 /* passive mode: 1: enable, 0: disable */
239 static int passivemode = 0;
240 module_param(passivemode, int, 0);
241 #endif /* ET_ALL_PASSIVE */
243 #ifdef HNDCTF
244 static void
245 et_ctf_detach(ctf_t *ci, void *arg)
247 et_info_t *et = (et_info_t *)arg;
249 et->cih = NULL;
251 #ifdef CTFPOOL
252 /* free the buffers in fast pool */
253 osl_ctfpool_cleanup(et->osh);
254 #endif /* CTFPOOL */
256 return;
258 #endif /* HNDCTF */
260 #ifdef PLC
261 #define PLC_VIDTODEV(et, vid) \
262 (((vid) == (et)->plc.rxvid2) ? (et)->plc.rxdev2 : \
263 ((vid) == (et)->plc.rxvid1) ? (et)->plc.rxdev1 : \
264 ((vid) == (et)->plc.rxvid3) ? (et)->plc.rxdev3 : \
265 ((vid) == (et)->plc.txvid) ? (et)->plc.txdev : NULL)
266 static int
267 et_plc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
269 struct net_device *real_dev, *vl_dev;
270 et_info_t *et;
271 uint16 vid;
273 /* we are only interested in plc vifs */
274 vl_dev = (struct net_device *)ptr;
275 if ((vl_dev->priv_flags & IFF_802_1Q_VLAN) == 0)
276 return NOTIFY_DONE;
278 /* get the pointer to real device */
279 real_dev = VLAN_DEV_INFO(vl_dev)->real_dev;
280 vid = VLAN_DEV_INFO(vl_dev)->vlan_id;
282 et = ET_INFO(real_dev);
283 if (et == NULL) {
284 ET_ERROR(("%s: not an ethernet vlan\n", __FUNCTION__));
285 return NOTIFY_DONE;
288 ET_TRACE(("et%d: %s: event %ld for %s\n", et->etc->unit, __FUNCTION__,
289 event, vl_dev->name));
291 if (event == NETDEV_REGISTER) {
292 /* save the netdev pointers of plc vifs when corresponding
293 * interface register event is received.
295 if (vid == et->plc.txvid)
296 et->plc.txdev = vl_dev;
297 else if (vid == et->plc.rxvid1)
298 et->plc.rxdev1 = vl_dev;
299 else if (vid == et->plc.rxvid2)
300 et->plc.rxdev2 = vl_dev;
301 else if (vid == et->plc.rxvid3)
302 et->plc.rxdev3 = vl_dev;
303 else
305 } else if (event == NETDEV_UNREGISTER) {
306 /* clear the netdev pointers of plc vifs when corresponding
307 * interface unregister event is received.
309 if (vid == et->plc.txvid)
310 et->plc.txdev = NULL;
311 else if (vid == et->plc.rxvid1)
312 et->plc.rxdev1 = NULL;
313 else if (vid == et->plc.rxvid2)
314 et->plc.rxdev2 = NULL;
315 else if (vid == et->plc.rxvid3)
316 et->plc.rxdev3 = NULL;
317 else
321 return NOTIFY_DONE;
324 static struct notifier_block et_plc_netdev_notifier = {
325 .notifier_call = et_plc_netdev_event
328 static inline int32
329 et_plc_recv(et_info_t *et, struct sk_buff *skb)
331 struct net_device *plc_dev;
332 struct ethervlan_header *evh;
334 evh = (struct ethervlan_header *)PKTDATA(et->osh, skb);
336 /* all incoming frames from plc are vlan tagged */
337 if (evh->vlan_type != HTON16(ETHER_TYPE_8021Q))
338 return -1;
340 ASSERT((NTOH16(evh->vlan_tag) & VLAN_VID_MASK) != 3);
342 plc_dev = PLC_VIDTODEV(et, NTOH16(evh->vlan_tag) & VLAN_VID_MASK);
343 if (plc_dev == NULL)
344 return -1;
346 /* call the master hook function to route frames to appropriate
347 * transmit interface.
349 if (plc_dev->master_hook != NULL) {
350 PKTSETPRIO(skb, (NTOH16(evh->vlan_tag) & VLAN_PRI_MASK) >> VLAN_PRI_SHIFT);
351 if (plc_dev->master_hook(skb, plc_dev, plc_dev->master_hook_arg) == 0) {
352 struct net_device_stats *stats;
353 stats = vlan_dev_get_stats(plc_dev);
354 stats->rx_packets++;
355 stats->rx_bytes += skb->len;
356 return 0;
358 skb->dev = plc_dev->master;
361 return -1;
363 #endif /* PLC */
365 static int __devinit
366 et_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
368 struct net_device *dev;
369 et_info_t *et;
370 osl_t *osh;
371 char name[128];
372 int i, unit = et_found, err;
373 #ifdef PLC
374 int8 *var;
375 #endif /* PLC */
377 ET_TRACE(("et%d: et_probe: bus %d slot %d func %d irq %d\n", unit,
378 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), pdev->irq));
380 if (!etc_chipmatch(pdev->vendor, pdev->device))
381 return -ENODEV;
383 osh = osl_attach(pdev, PCI_BUS, FALSE);
384 ASSERT(osh);
386 pci_set_master(pdev);
387 if ((err = pci_enable_device(pdev)) != 0) {
388 ET_ERROR(("et%d: et_probe: pci_enable_device() failed\n", unit));
389 osl_detach(osh);
390 return err;
393 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
394 if ((dev = alloc_etherdev(0)) == NULL) {
395 ET_ERROR(("et%d: et_probe: alloc_etherdev() failed\n", unit));
396 osl_detach(osh);
397 return -ENOMEM;
399 #else
400 if (!(dev = (struct net_device *) MALLOC(osh, sizeof(struct net_device)))) {
401 ET_ERROR(("et%d: et_probe: out of memory, malloced %d bytes\n", unit,
402 MALLOCED(osh)));
403 osl_detach(osh);
404 return -ENOMEM;
406 bzero(dev, sizeof(struct net_device));
408 if (!init_etherdev(dev, 0)) {
409 ET_ERROR(("et%d: et_probe: init_etherdev() failed\n", unit));
410 MFREE(osh, dev, sizeof(struct net_device));
411 osl_detach(osh);
412 return -ENOMEM;
414 #endif /* >= 2.6.0 */
416 /* allocate private info */
417 if ((et = (et_info_t *)MALLOC(osh, sizeof(et_info_t))) == NULL) {
418 ET_ERROR(("et%d: et_probe: out of memory, malloced %d bytes\n", unit,
419 MALLOCED(osh)));
420 MFREE(osh, dev, sizeof(et_info_t));
421 osl_detach(osh);
422 return -ENOMEM;
424 bzero(et, sizeof(et_info_t));
425 dev->priv = (void *)et;
426 et->dev = dev;
427 et->pdev = pdev;
428 et->osh = osh;
429 pci_set_drvdata(pdev, et);
431 /* map chip registers (47xx: and sprom) */
432 dev->base_addr = pci_resource_start(pdev, 0);
433 if ((et->regsva = ioremap_nocache(dev->base_addr, PCI_BAR0_WINSZ)) == NULL) {
434 ET_ERROR(("et%d: ioremap() failed\n", unit));
435 goto fail;
438 init_MUTEX(&et->sem);
439 spin_lock_init(&et->lock);
440 spin_lock_init(&et->txq_lock);
442 for (i = 0; i < NUMTXQ; i++)
443 skb_queue_head_init(&et->txq[i]);
445 /* common load-time initialization */
446 et->etc = etc_attach((void *)et, pdev->vendor, pdev->device, unit, osh, et->regsva);
447 if (et->etc == NULL) {
448 ET_ERROR(("et%d: etc_attach() failed\n", unit));
449 goto fail;
452 #ifdef HNDCTF
453 et->cih = ctf_attach(osh, dev->name, &et_msg_level, et_ctf_detach, et);
455 if ((ctf_dev_register(et->cih, dev, FALSE) != BCME_OK) ||
456 (ctf_enable(et->cih, dev, TRUE) != BCME_OK)) {
457 ET_ERROR(("et%d: ctf_dev_register() failed\n", unit));
458 goto fail;
460 #endif /* HNDCTF */
462 #ifdef CTFPOOL
463 /* create ctf packet pool with specified number of buffers */
464 if (CTF_ENAB(et->cih) && (num_physpages >= 8192) &&
465 (osl_ctfpool_init(osh, CTFPOOLSZ, RXBUFSZ+BCMEXTRAHDROOM) < 0)) {
466 ET_ERROR(("et%d: chipattach: ctfpool alloc/init failed\n", unit));
467 goto fail;
469 #endif /* CTFPOOL */
471 bcopy(&et->etc->cur_etheraddr, dev->dev_addr, ETHER_ADDR_LEN);
473 /* init 1 second watchdog timer */
474 init_timer(&et->timer);
475 et->timer.data = (ulong)dev;
476 et->timer.function = et_watchdog;
478 #ifndef BCM_NAPI
479 /* setup the bottom half handler */
480 tasklet_init(&et->tasklet, et_dpc, (ulong)et);
481 #endif /* BCM_NAPI */
483 #ifdef ET_ALL_PASSIVE
484 if (ET_ALL_PASSIVE_ENAB(et)) {
485 MY_INIT_WORK(&et->dpc_task.work, (work_func_t)et_dpc_work);
486 et->dpc_task.context = et;
487 MY_INIT_WORK(&et->txq_task.work, (work_func_t)et_txq_work);
488 et->txq_task.context = et;
490 et->all_dispatch_mode = (passivemode == 0) ? TRUE : FALSE;
491 #endif /* ET_ALL_PASSIVE */
493 /* register our interrupt handler */
494 if (request_irq(pdev->irq, et_isr, IRQF_SHARED, dev->name, et)) {
495 ET_ERROR(("et%d: request_irq() failed\n", unit));
496 goto fail;
498 dev->irq = pdev->irq;
500 /* add us to the global linked list */
501 et->next = et_list;
502 et_list = et;
504 /* lastly, enable our entry points */
505 dev->open = et_open;
506 dev->stop = et_close;
507 dev->hard_start_xmit = et_start;
508 dev->get_stats = et_get_stats;
509 dev->set_mac_address = et_set_mac_address;
510 dev->set_multicast_list = et_set_multicast_list;
511 dev->do_ioctl = et_ioctl;
512 #ifdef BCM_NAPI
513 dev->poll = et_poll;
514 dev->weight = (ET_GMAC(et->etc) ? 64 : 32);
515 #endif /* BCM_NAPI */
517 if (register_netdev(dev)) {
518 ET_ERROR(("et%d: register_netdev() failed\n", unit));
519 goto fail;
522 et_found++;
524 /* print hello string */
525 (*et->etc->chops->longname)(et->etc->ch, name, sizeof(name));
526 printf("%s: %s %s\n", dev->name, name, EPI_VERSION_STR);
528 #ifdef PLC
529 /* read plc_vifs to initialize the vids to use for receiving
530 * and forwarding the frames.
532 var = getvar(NULL, "plc_vifs");
534 if (var == NULL) {
535 ET_ERROR(("et%d: %s: PLC vifs not configured\n", unit, __FUNCTION__));
536 return (0);
539 et->plc.hw = TRUE;
541 /* initialize the vids to use for plc rx and tx */
542 sscanf(var, "vlan%d vlan%d vlan%d vlan%d",
543 &et->plc.txvid, &et->plc.rxvid1, &et->plc.rxvid2, &et->plc.rxvid3);
545 ET_ERROR(("et%d: %s: PLC vifs %s\n", unit, __FUNCTION__, var));
547 /* register a callback to be called on plc dev create event */
548 register_netdevice_notifier(&et_plc_netdev_notifier);
549 #endif /* PLC */
551 return (0);
553 fail:
554 et_free(et);
555 return (-ENODEV);
558 static int
559 et_suspend(struct pci_dev *pdev, DRV_SUSPEND_STATE_TYPE state)
561 et_info_t *et;
563 if ((et = (et_info_t *) pci_get_drvdata(pdev))) {
564 netif_device_detach(et->dev);
565 ET_LOCK(et);
566 et_down(et, 1);
567 ET_UNLOCK(et);
570 return 0;
573 static int
574 et_resume(struct pci_dev *pdev)
576 et_info_t *et;
578 if ((et = (et_info_t *) pci_get_drvdata(pdev))) {
579 ET_LOCK(et);
580 et_up(et);
581 ET_UNLOCK(et);
582 netif_device_attach(et->dev);
585 return 0;
588 /* Compatibility routines */
589 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)
590 static void
591 _et_suspend(struct pci_dev *pdev)
593 et_suspend(pdev, 0);
596 static void
597 _et_resume(struct pci_dev *pdev)
599 et_resume(pdev);
601 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6) */
603 static void __devexit
604 et_remove(struct pci_dev *pdev)
606 et_info_t *et;
608 if (!etc_chipmatch(pdev->vendor, pdev->device))
609 return;
611 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12)
612 et_suspend(pdev, 0);
613 #else
614 et_suspend(pdev, PMSG_SUSPEND);
615 #endif
617 if ((et = (et_info_t *) pci_get_drvdata(pdev))) {
618 et_free(et);
619 pci_set_drvdata(pdev, NULL);
623 static struct pci_driver et_pci_driver = {
624 name: "et",
625 probe: et_probe,
626 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)
627 suspend: _et_suspend,
628 resume: _et_resume,
629 #else
630 suspend: et_suspend,
631 resume: et_resume,
632 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6) */
633 remove: __devexit_p(et_remove),
634 id_table: et_id_table,
637 static int __init
638 et_module_init(void)
640 #if defined(BCMDBG)
641 if (msglevel != 0xdeadbeef)
642 et_msg_level = msglevel;
643 else {
644 char *var = getvar(NULL, "et_msglevel");
645 if (var)
646 et_msg_level = bcm_strtoul(var, NULL, 0);
649 printf("%s: msglevel set to 0x%x\n", __FUNCTION__, et_msg_level);
650 #endif /* defined(BCMDBG) */
652 #ifdef ET_ALL_PASSIVE
654 char *var = getvar(NULL, "et_dispatch_mode");
655 if (var)
656 passivemode = bcm_strtoul(var, NULL, 0);
657 printf("%s: passivemode set to 0x%x\n", __FUNCTION__, passivemode);
659 #endif /* ET_ALL_PASSIVE */
661 return pci_module_init(&et_pci_driver);
664 static void __exit
665 et_module_exit(void)
667 pci_unregister_driver(&et_pci_driver);
670 module_init(et_module_init);
671 module_exit(et_module_exit);
673 static void
674 et_free(et_info_t *et)
676 et_info_t **prev;
677 osl_t *osh;
679 if (et == NULL)
680 return;
682 ET_TRACE(("et: et_free\n"));
684 if (et->dev && et->dev->irq)
685 free_irq(et->dev->irq, et);
687 #ifdef HNDCTF
688 if (et->cih)
689 ctf_dev_unregister(et->cih, et->dev);
690 #endif /* HNDCTF */
692 if (et->dev) {
693 unregister_netdev(et->dev);
694 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
695 free_netdev(et->dev);
696 #else
697 MFREE(et->osh, et->dev, sizeof(struct net_device));
698 #endif
699 et->dev = NULL;
702 #ifdef CTFPOOL
703 /* free the buffers in fast pool */
704 osl_ctfpool_cleanup(et->osh);
705 #endif /* CTFPOOL */
707 #ifdef HNDCTF
708 /* free ctf resources */
709 if (et->cih)
710 ctf_detach(et->cih);
711 #endif /* HNDCTF */
713 /* free common resources */
714 if (et->etc) {
715 etc_detach(et->etc);
716 et->etc = NULL;
720 * unregister_netdev() calls get_stats() which may read chip registers
721 * so we cannot unmap the chip registers until after calling unregister_netdev() .
723 if (et->regsva) {
724 iounmap((void *)et->regsva);
725 et->regsva = NULL;
728 /* remove us from the global linked list */
729 for (prev = &et_list; *prev; prev = &(*prev)->next)
730 if (*prev == et) {
731 *prev = et->next;
732 break;
735 osh = et->osh;
736 MFREE(et->osh, et, sizeof(et_info_t));
738 if (MALLOCED(osh))
739 printf("Memory leak of bytes %d\n", MALLOCED(osh));
740 ASSERT(MALLOCED(osh) == 0);
742 osl_detach(osh);
745 static int
746 et_open(struct net_device *dev)
748 et_info_t *et;
750 et = ET_INFO(dev);
752 ET_TRACE(("et%d: et_open\n", et->etc->unit));
754 et->etc->promisc = (dev->flags & IFF_PROMISC)? TRUE: FALSE;
755 et->etc->allmulti = (dev->flags & IFF_ALLMULTI)? TRUE: et->etc->promisc;
757 ET_LOCK(et);
758 et_up(et);
759 ET_UNLOCK(et);
761 OLD_MOD_INC_USE_COUNT;
763 return (0);
766 static int
767 et_close(struct net_device *dev)
769 et_info_t *et;
771 et = ET_INFO(dev);
773 ET_TRACE(("et%d: et_close\n", et->etc->unit));
775 et->etc->promisc = FALSE;
776 et->etc->allmulti = FALSE;
778 ET_LOCK(et);
779 et_down(et, 1);
780 ET_UNLOCK(et);
782 OLD_MOD_DEC_USE_COUNT;
784 return (0);
787 #ifdef ET_ALL_PASSIVE
788 /* Schedule a completion handler to run at safe time */
789 static int BCMFASTPATH
790 et_schedule_task(et_info_t *et, void (*fn)(struct et_task *task), void *context)
792 et_task_t *task;
794 ET_TRACE(("et%d: et_schedule_task\n", et->etc->unit));
796 if (!(task = MALLOC(et->osh, sizeof(et_task_t)))) {
797 ET_ERROR(("et%d: et_schedule_task: out of memory, malloced %d bytes\n",
798 et->etc->unit, MALLOCED(et->osh)));
799 return -ENOMEM;
802 MY_INIT_WORK(&task->work, (work_func_t)fn);
803 task->context = context;
805 if (!schedule_work(&task->work)) {
806 ET_ERROR(("et%d: schedule_work() failed\n", et->etc->unit));
807 MFREE(et->osh, task, sizeof(et_task_t));
808 return -ENOMEM;
811 return 0;
814 static void BCMFASTPATH
815 et_txq_work(struct et_task *task)
817 et_info_t *et = (et_info_t *)task->context;
819 ET_LOCK(et);
820 et_sendnext(et);
821 ET_UNLOCK(et);
823 return;
825 #endif /* ET_ALL_PASSIVE */
828 * Yeah, queueing the packets on a tx queue instead of throwing them
829 * directly into the descriptor ring in the case of dma is kinda lame,
830 * but this results in a unified transmit path for both dma and pio
831 * and localizes/simplifies the netif_*_queue semantics, too.
833 static int BCMFASTPATH
834 et_start(struct sk_buff *skb, struct net_device *dev)
836 et_info_t *et;
837 uint32 q = 0;
839 et = ET_INFO(dev);
841 if (ET_GMAC(et->etc) && (et->etc->qos))
842 q = etc_up2tc(PKTPRIO(skb));
844 ET_TRACE(("et%d: et_start: len %d\n", et->etc->unit, skb->len));
845 ET_LOG("et%d: et_start: len %d", et->etc->unit, skb->len);
848 /* put it on the tx queue and call sendnext */
849 ET_TXQ_LOCK(et);
850 __skb_queue_tail(&et->txq[q], skb);
851 et->etc->txq_state |= (1 << q);
852 ET_TXQ_UNLOCK(et);
854 if (!ET_ALL_PASSIVE_ENAB(et)) {
855 ET_LOCK(et);
856 et_sendnext(et);
857 ET_UNLOCK(et);
859 #ifdef ET_ALL_PASSIVE
860 else
861 schedule_work(&et->txq_task.work);
862 #endif /* ET_ALL_PASSIVE */
864 ET_LOG("et%d: et_start ret\n", et->etc->unit, 0);
866 return (0);
869 static void BCMFASTPATH
870 et_sendnext(et_info_t *et)
872 etc_info_t *etc;
873 struct sk_buff *skb;
874 void *p;
875 uint32 priq = TX_Q0;
877 etc = et->etc;
879 ET_TRACE(("et%d: et_sendnext\n", etc->unit));
880 ET_LOG("et%d: et_sendnext", etc->unit, 0);
882 /* dequeue packets from highest priority queue and send */
883 while (1) {
884 ET_TXQ_LOCK(et);
886 if (etc->txq_state == 0)
887 break;
889 priq = etc_priq(etc->txq_state);
891 ET_TRACE(("et%d: txq_state %x priq %d txavail %d\n",
892 etc->unit, etc->txq_state, priq,
893 *(uint *)etc->txavail[priq]));
895 if (skb_peek(&et->txq[priq]) == NULL) {
896 etc->txq_state &= ~(1 << priq);
897 ET_TXQ_UNLOCK(et);
898 continue;
901 #ifdef DMA
902 /* current highest priority dma queue is full */
903 if (*(uint *)(etc->txavail[priq]) == 0)
904 #else /* DMA */
905 if (etc->pioactive != NULL)
906 #endif /* DMA */
907 break;
909 skb = __skb_dequeue(&et->txq[priq]);
911 ET_TXQ_UNLOCK(et);
912 ET_PRHDR("tx", (struct ether_header *)skb->data, skb->len, etc->unit);
913 ET_PRPKT("txpkt", skb->data, skb->len, etc->unit);
915 /* Convert the packet. */
916 if ((p = PKTFRMNATIVE(et->osh, skb)) == NULL) {
917 PKTFREE(etc->osh, skb, TRUE);
918 return;
921 (*etc->chops->tx)(etc->ch, p);
923 etc->txframe++;
924 etc->txbyte += skb->len;
927 /* no flow control when qos is enabled */
928 if (!et->etc->qos) {
929 /* stop the queue whenever txq fills */
930 if ((skb_queue_len(&et->txq[TX_Q0]) > DATAHIWAT) && !netif_queue_stopped(et->dev))
931 netif_stop_queue(et->dev);
932 else if (netif_queue_stopped(et->dev) &&
933 (skb_queue_len(&et->txq[TX_Q0]) < (DATAHIWAT/2)))
934 netif_wake_queue(et->dev);
935 } else {
936 /* drop the frame if corresponding prec txq len exceeds hiwat
937 * when qos is enabled.
939 if ((priq != TC_NONE) && (skb_queue_len(&et->txq[priq]) > DATAHIWAT)) {
940 skb = __skb_dequeue_tail(&et->txq[priq]);
941 PKTFREE(et->osh, skb, TRUE);
942 etc->txnobuf++;
946 ET_TXQ_UNLOCK(et);
949 void
950 et_init(et_info_t *et, uint options)
952 ET_TRACE(("et%d: et_init\n", et->etc->unit));
953 ET_LOG("et%d: et_init", et->etc->unit, 0);
955 et_reset(et);
957 etc_init(et->etc, options);
961 void
962 et_reset(et_info_t *et)
964 ET_TRACE(("et%d: et_reset\n", et->etc->unit));
966 etc_reset(et->etc);
968 /* zap any pending dpc interrupt bits */
969 et->events = 0;
971 /* dpc will not be rescheduled */
972 et->resched = 0;
975 void
976 et_up(et_info_t *et)
978 etc_info_t *etc;
980 etc = et->etc;
982 if (etc->up)
983 return;
985 ET_TRACE(("et%d: et_up\n", etc->unit));
987 etc_up(etc);
989 /* schedule one second watchdog timer */
990 et->timer.expires = jiffies + HZ;
991 add_timer(&et->timer);
993 netif_start_queue(et->dev);
996 void
997 et_down(et_info_t *et, int reset)
999 etc_info_t *etc;
1000 struct sk_buff *skb;
1001 int32 i;
1003 etc = et->etc;
1005 ET_TRACE(("et%d: et_down\n", etc->unit));
1007 netif_down(et->dev);
1008 netif_stop_queue(et->dev);
1010 /* stop watchdog timer */
1011 del_timer(&et->timer);
1013 etc_down(etc, reset);
1015 /* flush the txq(s) */
1016 for (i = 0; i < NUMTXQ; i++)
1017 while ((skb = skb_dequeue(&et->txq[i])))
1018 PKTFREE(etc->osh, skb, TRUE);
1020 #ifndef BCM_NAPI
1021 /* kill dpc */
1022 ET_UNLOCK(et);
1023 tasklet_kill(&et->tasklet);
1024 ET_LOCK(et);
1025 #endif /* BCM_NAPI */
1029 * These are interrupt on/off entry points. Disable interrupts
1030 * during interrupt state transition.
1032 void
1033 et_intrson(et_info_t *et)
1035 unsigned long flags;
1036 INT_LOCK(flags);
1037 (*et->etc->chops->intrson)(et->etc->ch);
1038 INT_UNLOCK(flags);
1041 static void
1042 _et_watchdog(struct net_device *dev)
1044 et_info_t *et;
1046 et = ET_INFO(dev);
1048 ET_LOCK(et);
1050 etc_watchdog(et->etc);
1052 /* reschedule one second watchdog timer */
1053 et->timer.expires = jiffies + HZ;
1054 add_timer(&et->timer);
1056 #ifdef CTFPOOL
1057 /* allocate and add a new skb to the pkt pool */
1058 if (CTF_ENAB(et->cih))
1059 osl_ctfpool_replenish(et->osh, CTFPOOL_REFILL_THRESH);
1060 #endif /* CTFPOOL */
1061 ET_UNLOCK(et);
1064 #ifdef ET_ALL_PASSIVE
1065 static void
1066 et_watchdog_task(et_task_t *task)
1068 et_info_t *et = ET_INFO((struct net_device *)task->context);
1070 _et_watchdog((struct net_device *)task->context);
1071 MFREE(et->osh, task, sizeof(et_task_t));
1073 #endif /* ET_ALL_PASSIVE */
1075 static void
1076 et_watchdog(ulong data)
1078 struct net_device *dev = (struct net_device *)data;
1079 #ifdef ET_ALL_PASSIVE
1080 et_info_t *et = ET_INFO(dev);
1081 #endif /* ET_ALL_PASSIVE */
1083 if (!ET_ALL_PASSIVE_ENAB(et))
1084 _et_watchdog(dev);
1085 #ifdef ET_ALL_PASSIVE
1086 else
1087 et_schedule_task(et, et_watchdog_task, dev);
1088 #endif /* ET_ALL_PASSIVE */
1092 #ifdef SIOCETHTOOL
1093 static int
1094 et_ethtool(et_info_t *et, struct ethtool_cmd *ecmd)
1096 int ret = 0;
1097 int speed;
1098 struct ethtool_drvinfo *info;
1100 ET_LOCK(et);
1102 switch (ecmd->cmd) {
1103 case ETHTOOL_GSET:
1104 ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
1105 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1106 SUPPORTED_Autoneg);
1107 ecmd->advertising = ADVERTISED_TP;
1108 ecmd->advertising |= (et->etc->advertise & ADV_10HALF) ?
1109 ADVERTISED_10baseT_Half : 0;
1110 ecmd->advertising |= (et->etc->advertise & ADV_10FULL) ?
1111 ADVERTISED_10baseT_Full : 0;
1112 ecmd->advertising |= (et->etc->advertise & ADV_100HALF) ?
1113 ADVERTISED_100baseT_Half : 0;
1114 ecmd->advertising |= (et->etc->advertise & ADV_100FULL) ?
1115 ADVERTISED_100baseT_Full : 0;
1116 ecmd->advertising |= (et->etc->advertise2 & ADV_1000FULL) ?
1117 ADVERTISED_1000baseT_Full : 0;
1118 ecmd->advertising |= (et->etc->advertise2 & ADV_1000HALF) ?
1119 ADVERTISED_1000baseT_Half : 0;
1120 ecmd->advertising |= (et->etc->forcespeed == ET_AUTO) ?
1121 ADVERTISED_Autoneg : 0;
1122 if (et->etc->linkstate) {
1123 ecmd->speed = (et->etc->speed == 1000) ? SPEED_1000 :
1124 ((et->etc->speed == 100) ? SPEED_100 : SPEED_10);
1125 ecmd->duplex = (et->etc->duplex == 1) ? DUPLEX_FULL : DUPLEX_HALF;
1126 } else {
1127 ecmd->speed = 0;
1128 ecmd->duplex = 0;
1130 ecmd->port = PORT_TP;
1131 ecmd->phy_address = 0;
1132 ecmd->transceiver = XCVR_INTERNAL;
1133 ecmd->autoneg = (et->etc->forcespeed == ET_AUTO) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1134 ecmd->maxtxpkt = 0;
1135 ecmd->maxrxpkt = 0;
1136 break;
1137 case ETHTOOL_SSET:
1138 if (!capable(CAP_NET_ADMIN)) {
1139 ret = -EPERM;
1140 break;
1142 else if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
1143 speed = ET_10HALF;
1144 else if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
1145 speed = ET_10FULL;
1146 else if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
1147 speed = ET_100HALF;
1148 else if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
1149 speed = ET_100FULL;
1150 else if (ecmd->speed == SPEED_1000 && ecmd->duplex == DUPLEX_FULL)
1151 speed = ET_1000FULL;
1152 else if (ecmd->autoneg == AUTONEG_ENABLE)
1153 speed = ET_AUTO;
1154 else {
1155 ret = -EINVAL;
1156 break;
1158 ret = etc_ioctl(et->etc, ETCSPEED, &speed);
1159 break;
1160 case ETHTOOL_GDRVINFO:
1161 info = (struct ethtool_drvinfo *)ecmd;
1162 bzero(info, sizeof(struct ethtool_drvinfo));
1163 info->cmd = ETHTOOL_GDRVINFO;
1164 sprintf(info->driver, "et%d", et->etc->unit);
1165 strcpy(info->version, EPI_VERSION_STR);
1166 break;
1167 default:
1168 ret = -EOPNOTSUPP;
1169 break;
1172 ET_UNLOCK(et);
1174 return (ret);
1176 #endif /* SIOCETHTOOL */
1178 static int
1179 et_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1181 et_info_t *et;
1182 int error;
1183 char *buf;
1184 int size, ethtoolcmd;
1185 bool get = 0, set;
1186 et_var_t *var = NULL;
1187 void *buffer = NULL;
1189 et = ET_INFO(dev);
1191 ET_TRACE(("et%d: et_ioctl: cmd 0x%x\n", et->etc->unit, cmd));
1193 switch (cmd) {
1194 #ifdef SIOCETHTOOL
1195 case SIOCETHTOOL:
1196 if (copy_from_user(&ethtoolcmd, ifr->ifr_data, sizeof(uint32)))
1197 return (-EFAULT);
1199 if (ethtoolcmd == ETHTOOL_GDRVINFO)
1200 size = sizeof(struct ethtool_drvinfo);
1201 else
1202 size = sizeof(struct ethtool_cmd);
1203 get = TRUE; set = TRUE;
1204 break;
1205 #endif /* SIOCETHTOOL */
1206 case SIOCGETCDUMP:
1207 size = 4096;
1208 get = TRUE; set = FALSE;
1209 break;
1210 case SIOCGETCPHYRD:
1211 case SIOCGETCPHYRD2:
1212 case SIOCGETCROBORD:
1213 size = sizeof(int) * 2;
1214 get = TRUE; set = TRUE;
1215 break;
1216 case SIOCSETCPHYWR:
1217 case SIOCSETCPHYWR2:
1218 case SIOCSETCROBOWR:
1219 size = sizeof(int) * 2;
1220 get = FALSE; set = TRUE;
1221 break;
1222 case SIOCSETGETVAR:
1223 size = sizeof(et_var_t);
1224 set = TRUE;
1225 break;
1226 default:
1227 size = sizeof(int);
1228 get = FALSE; set = TRUE;
1229 break;
1232 if ((buf = MALLOC(et->osh, size)) == NULL) {
1233 ET_ERROR(("et: et_ioctl: out of memory, malloced %d bytes\n", MALLOCED(et->osh)));
1234 return (-ENOMEM);
1237 if (set && copy_from_user(buf, ifr->ifr_data, size)) {
1238 MFREE(et->osh, buf, size);
1239 return (-EFAULT);
1242 if (cmd == SIOCSETGETVAR) {
1243 var = (et_var_t *)buf;
1244 if (var->buf) {
1245 if (!var->set)
1246 get = TRUE;
1248 if (!(buffer = (void *) MALLOC(et->osh, var->len))) {
1249 ET_ERROR(("et: et_ioctl: out of memory, malloced %d bytes\n",
1250 MALLOCED(et->osh)));
1251 MFREE(et->osh, buf, size);
1252 return (-ENOMEM);
1255 if (copy_from_user(buffer, var->buf, var->len)) {
1256 MFREE(et->osh, buffer, var->len);
1257 MFREE(et->osh, buf, size);
1258 return (-EFAULT);
1263 switch (cmd) {
1264 #ifdef SIOCETHTOOL
1265 case SIOCETHTOOL:
1266 error = et_ethtool(et, (struct ethtool_cmd *)buf);
1267 break;
1268 #endif /* SIOCETHTOOL */
1269 case SIOCSETGETVAR:
1270 ET_LOCK(et);
1271 error = etc_iovar(et->etc, var->cmd, var->set, buffer);
1272 ET_UNLOCK(et);
1273 if (!error && get)
1274 error = copy_to_user(var->buf, buffer, var->len);
1276 if (buffer)
1277 MFREE(et->osh, buffer, var->len);
1278 break;
1279 default:
1280 ET_LOCK(et);
1281 error = etc_ioctl(et->etc, cmd - SIOCSETCUP, buf) ? -EINVAL : 0;
1282 ET_UNLOCK(et);
1283 break;
1286 if (!error && get)
1287 error = copy_to_user(ifr->ifr_data, buf, size);
1289 MFREE(et->osh, buf, size);
1291 return (error);
1294 static struct net_device_stats *
1295 et_get_stats(struct net_device *dev)
1297 et_info_t *et;
1298 etc_info_t *etc;
1299 struct net_device_stats *stats;
1301 et = ET_INFO(dev);
1303 ET_TRACE(("et%d: et_get_stats\n", et->etc->unit));
1305 ET_LOCK(et);
1307 etc = et->etc;
1308 stats = &et->stats;
1309 bzero(stats, sizeof(struct net_device_stats));
1311 /* refresh stats */
1312 if (et->etc->up)
1313 (*etc->chops->statsupd)(etc->ch);
1315 /* SWAG */
1316 stats->rx_packets = etc->rxframe;
1317 stats->tx_packets = etc->txframe;
1318 stats->rx_bytes = etc->rxbyte;
1319 stats->tx_bytes = etc->txbyte;
1320 stats->rx_errors = etc->rxerror;
1321 stats->tx_errors = etc->txerror;
1323 if (ET_GMAC(etc)) {
1324 gmacmib_t *mib;
1326 mib = etc->mib;
1327 stats->collisions = mib->tx_total_cols;
1328 stats->rx_length_errors = (mib->rx_oversize_pkts + mib->rx_undersize);
1329 stats->rx_crc_errors = mib->rx_crc_errs;
1330 stats->rx_frame_errors = mib->rx_align_errs;
1331 stats->rx_missed_errors = mib->rx_missed_pkts;
1332 } else {
1333 bcmenetmib_t *mib;
1335 mib = etc->mib;
1336 stats->collisions = mib->tx_total_cols;
1337 stats->rx_length_errors = (mib->rx_oversize_pkts + mib->rx_undersize);
1338 stats->rx_crc_errors = mib->rx_crc_errs;
1339 stats->rx_frame_errors = mib->rx_align_errs;
1340 stats->rx_missed_errors = mib->rx_missed_pkts;
1344 stats->rx_fifo_errors = etc->rxoflo;
1345 stats->rx_over_errors = etc->rxoflo;
1346 stats->tx_fifo_errors = etc->txuflo;
1348 ET_UNLOCK(et);
1350 return (stats);
1353 static int
1354 et_set_mac_address(struct net_device *dev, void *addr)
1356 et_info_t *et;
1357 struct sockaddr *sa = (struct sockaddr *) addr;
1359 et = ET_INFO(dev);
1360 ET_TRACE(("et%d: et_set_mac_address\n", et->etc->unit));
1362 if (et->etc->up)
1363 return -EBUSY;
1365 bcopy(sa->sa_data, dev->dev_addr, ETHER_ADDR_LEN);
1366 bcopy(dev->dev_addr, &et->etc->cur_etheraddr, ETHER_ADDR_LEN);
1368 return 0;
1371 static void
1372 et_set_multicast_list(struct net_device *dev)
1374 et_info_t *et;
1375 etc_info_t *etc;
1376 struct dev_mc_list *mclist;
1377 int i;
1379 et = ET_INFO(dev);
1380 etc = et->etc;
1382 ET_TRACE(("et%d: et_set_multicast_list\n", etc->unit));
1384 ET_LOCK(et);
1386 if (etc->up) {
1387 etc->promisc = (dev->flags & IFF_PROMISC)? TRUE: FALSE;
1388 etc->allmulti = (dev->flags & IFF_ALLMULTI)? TRUE: etc->promisc;
1390 /* copy the list of multicasts into our private table */
1391 for (i = 0, mclist = dev->mc_list; mclist && (i < dev->mc_count);
1392 i++, mclist = mclist->next) {
1393 if (i >= MAXMULTILIST) {
1394 etc->allmulti = TRUE;
1395 i = 0;
1396 break;
1398 etc->multicast[i] = *((struct ether_addr *)mclist->dmi_addr);
1400 etc->nmulticast = i;
1402 et_init(et, ET_INIT_DEF_OPTIONS);
1405 ET_UNLOCK(et);
1408 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
1409 static irqreturn_t BCMFASTPATH
1410 et_isr(int irq, void *dev_id)
1411 #else
1412 static irqreturn_t BCMFASTPATH
1413 et_isr(int irq, void *dev_id, struct pt_regs *ptregs)
1414 #endif
1416 et_info_t *et;
1417 struct chops *chops;
1418 void *ch;
1419 uint events = 0;
1421 et = (et_info_t *)dev_id;
1422 chops = et->etc->chops;
1423 ch = et->etc->ch;
1425 /* guard against shared interrupts */
1426 if (!et->etc->up)
1427 goto done;
1429 /* get interrupt condition bits */
1430 events = (*chops->getintrevents)(ch, TRUE);
1433 /* not for us */
1434 if (!(events & INTR_NEW))
1435 goto done;
1437 ET_TRACE(("et%d: et_isr: events 0x%x\n", et->etc->unit, events));
1438 ET_LOG("et%d: et_isr: events 0x%x", et->etc->unit, events);
1440 /* disable interrupts */
1441 (*chops->intrsoff)(ch);
1443 /* save intstatus bits */
1444 ASSERT(et->events == 0);
1445 et->events = events;
1447 ASSERT(et->resched == FALSE);
1448 #ifdef BCM_NAPI
1449 /* allow the device to be added to the cpu polling list if we are up */
1450 if (netif_rx_schedule_prep(et->dev)) {
1451 /* tell the network core that we have packets to send up */
1452 __netif_rx_schedule(et->dev);
1453 } else {
1454 ET_ERROR(("et%d: et_isr: intr while in poll!\n",
1455 et->etc->unit));
1456 (*chops->intrson)(ch);
1458 #else /* BCM_NAPI */
1459 /* schedule dpc */
1460 #ifdef ET_ALL_PASSIVE
1461 if (ET_ALL_PASSIVE_ENAB(et)) {
1462 schedule_work(&et->dpc_task.work);
1463 } else
1464 #endif /* ET_ALL_PASSIVE */
1465 tasklet_schedule(&et->tasklet);
1466 #endif /* BCM_NAPI */
1468 done:
1469 ET_LOG("et%d: et_isr ret", et->etc->unit, 0);
1471 return IRQ_RETVAL(events & INTR_NEW);
1474 static inline int
1475 et_rxevent(osl_t *osh, et_info_t *et, struct chops *chops, void *ch, int quota)
1477 uint processed = 0;
1478 void *p = NULL, *h = NULL, *t = NULL;
1479 struct sk_buff *skb;
1481 /* read the buffers first */
1482 while ((p = (*chops->rx)(ch))) {
1483 if (t == NULL)
1484 h = t = p;
1485 else {
1486 PKTSETLINK(t, p);
1487 t = p;
1490 /* we reached quota already */
1491 if (++processed >= quota) {
1492 /* reschedule et_dpc()/et_poll() */
1493 et->resched = TRUE;
1494 break;
1498 /* prefetch the headers */
1499 if (h != NULL)
1500 ETPREFHDRS(PKTDATA(osh, h), PREFSZ);
1502 /* post more rx bufs */
1503 (*chops->rxfill)(ch);
1505 while ((p = h) != NULL) {
1506 h = PKTLINK(h);
1507 PKTSETLINK(p, NULL);
1508 /* prefetch the headers */
1509 if (h != NULL)
1510 ETPREFHDRS(PKTDATA(osh, h), PREFSZ);
1511 skb = PKTTONATIVE(osh, p);
1512 et_sendup(et, skb);
1515 return (processed);
1518 #ifdef BCM_NAPI
1519 static int BCMFASTPATH
1520 et_poll(struct net_device *dev, int *budget)
1522 int quota = min(dev->quota, *budget);
1523 et_info_t *et = ET_INFO(dev);
1524 #else /* BCM_NAPI */
1525 static void BCMFASTPATH
1526 et_dpc(ulong data)
1528 int quota = RXBND;
1529 et_info_t *et = (et_info_t *)data;
1530 #endif /* BCM_NAPI */
1531 struct chops *chops;
1532 void *ch;
1533 osl_t *osh;
1534 uint nrx = 0;
1536 chops = et->etc->chops;
1537 ch = et->etc->ch;
1538 osh = et->etc->osh;
1540 ET_TRACE(("et%d: et_dpc: events 0x%x\n", et->etc->unit, et->events));
1541 ET_LOG("et%d: et_dpc: events 0x%x", et->etc->unit, et->events);
1543 #ifndef BCM_NAPI
1544 ET_LOCK(et);
1545 #endif /* BCM_NAPI */
1547 if (!et->etc->up)
1548 goto done;
1550 /* get interrupt condition bits again when dpc was rescheduled */
1551 if (et->resched) {
1552 et->events = (*chops->getintrevents)(ch, FALSE);
1553 et->resched = FALSE;
1556 if (et->events & INTR_RX)
1557 nrx = et_rxevent(osh, et, chops, ch, quota);
1559 if (et->events & INTR_TX) {
1560 (*chops->txreclaim)(ch, FALSE);
1561 (*chops->rxfill)(ch);
1564 /* handle error conditions, if reset required leave interrupts off! */
1565 if (et->events & INTR_ERROR) {
1566 if ((*chops->errors)(ch))
1567 et_init(et, ET_INIT_INTROFF);
1568 else
1569 if (nrx < quota)
1570 nrx += et_rxevent(osh, et, chops, ch, quota);
1573 /* run the tx queue */
1574 if (et->etc->txq_state != 0)
1575 et_sendnext(et);
1577 /* clear this before re-enabling interrupts */
1578 et->events = 0;
1580 /* something may bring the driver down */
1581 if (!et->etc->up) {
1582 et->resched = FALSE;
1583 goto done;
1586 #ifndef BCM_NAPI
1587 #ifdef ET_ALL_PASSIVE
1588 if (et->resched) {
1589 if (!ET_ALL_PASSIVE_ENAB(et))
1590 tasklet_schedule(&et->tasklet);
1591 else
1592 schedule_work(&et->dpc_task.work);
1594 else
1595 (*chops->intrson)(ch);
1596 #else /* ET_ALL_PASSIVE */
1597 /* there may be frames left, reschedule et_dpc() */
1598 if (et->resched)
1599 tasklet_schedule(&et->tasklet);
1600 /* re-enable interrupts */
1601 else
1602 (*chops->intrson)(ch);
1603 #endif /* ET_ALL_PASSIVE */
1604 #endif /* BCM_NAPI */
1606 done:
1607 ET_LOG("et%d: et_dpc ret", et->etc->unit, 0);
1609 #ifdef BCM_NAPI
1610 /* update number of frames processed */
1611 *budget -= nrx;
1612 dev->quota -= nrx;
1614 ET_TRACE(("et%d: et_poll: quota %d budget %d\n",
1615 et->etc->unit, dev->quota, *budget));
1617 /* we got packets but no quota */
1618 if (et->resched)
1619 /* indicate that we are not done, don't enable
1620 * interrupts yet. linux network core will call
1621 * us again.
1623 return (1);
1625 netif_rx_complete(dev);
1627 /* enable interrupts now */
1628 (*chops->intrson)(ch);
1630 /* indicate that we are done */
1631 return (0);
1632 #else /* BCM_NAPI */
1633 ET_UNLOCK(et);
1634 return;
1635 #endif /* BCM_NAPI */
1638 #ifdef ET_ALL_PASSIVE
1639 static void BCMFASTPATH
1640 et_dpc_work(struct et_task *task)
1642 et_info_t *et = (et_info_t *)task->context;
1643 et_dpc((unsigned long)et);
1644 return;
1646 #endif /* ET_ALL_PASSIVE */
1648 static void
1649 et_error(et_info_t *et, struct sk_buff *skb, void *rxh)
1651 uchar eabuf[32];
1652 struct ether_header *eh;
1654 eh = (struct ether_header *)skb->data;
1655 bcm_ether_ntoa((struct ether_addr *)eh->ether_shost, eabuf);
1657 if (RXH_OVERSIZE(et->etc, rxh)) {
1658 ET_ERROR(("et%d: rx: over size packet from %s\n", et->etc->unit, eabuf));
1660 if (RXH_CRC(et->etc, rxh)) {
1661 ET_ERROR(("et%d: rx: crc error from %s\n", et->etc->unit, eabuf));
1663 if (RXH_OVF(et->etc, rxh)) {
1664 ET_ERROR(("et%d: rx: fifo overflow\n", et->etc->unit));
1666 if (RXH_NO(et->etc, rxh)) {
1667 ET_ERROR(("et%d: rx: crc error (odd nibbles) from %s\n",
1668 et->etc->unit, eabuf));
1670 if (RXH_RXER(et->etc, rxh)) {
1671 ET_ERROR(("et%d: rx: symbol error from %s\n", et->etc->unit, eabuf));
1675 #ifdef CONFIG_IP_NF_DNSMQ
1676 typedef int (*dnsmqHitHook)(struct sk_buff *skb);
1677 extern dnsmqHitHook dnsmq_hit_hook;
1678 #endif
1680 static inline int32
1681 et_ctf_forward(et_info_t *et, struct sk_buff *skb)
1683 #ifdef CONFIG_IP_NF_DNSMQ
1684 if(dnsmq_hit_hook&&dnsmq_hit_hook(skb))
1685 return (BCME_ERROR);
1686 #endif
1688 #ifdef HNDCTF
1689 /* use slow path if ctf is disabled */
1690 if (!CTF_ENAB(et->cih))
1691 return (BCME_ERROR);
1693 /* try cut thru first */
1694 if (ctf_forward(et->cih, skb, skb->dev) != BCME_ERROR)
1695 return (BCME_OK);
1697 /* clear skipct flag before sending up */
1698 PKTCLRSKIPCT(et->osh, skb);
1699 #endif /* HNDCTF */
1701 #ifdef CTFPOOL
1702 /* allocate and add a new skb to the pkt pool */
1703 if (PKTISFAST(et->osh, skb))
1704 osl_ctfpool_add(et->osh);
1706 /* clear fast buf flag before sending up */
1707 PKTCLRFAST(et->osh, skb);
1709 /* re-init the hijacked field */
1710 CTFPOOLPTR(et->osh, skb) = NULL;
1711 #endif /* CTFPOOL */
1713 return (BCME_ERROR);
1717 void BCMFASTPATH
1718 et_sendup(et_info_t *et, struct sk_buff *skb)
1720 etc_info_t *etc;
1721 void *rxh;
1722 uint16 flags;
1724 etc = et->etc;
1726 /* packet buffer starts with rxhdr */
1727 rxh = skb->data;
1729 /* strip off rxhdr */
1730 __skb_pull(skb, HWRXOFF);
1732 ET_TRACE(("et%d: et_sendup: %d bytes\n", et->etc->unit, skb->len));
1733 ET_LOG("et%d: et_sendup: len %d", et->etc->unit, skb->len);
1735 etc->rxframe++;
1736 etc->rxbyte += skb->len;
1738 /* eh should now be aligned 2-mod-4 */
1739 ASSERT(((ulong)skb->data & 3) == 2);
1741 /* strip off crc32 */
1742 __skb_trim(skb, skb->len - ETHER_CRC_LEN);
1744 ET_PRHDR("rx", (struct ether_header *)skb->data, skb->len, etc->unit);
1745 ET_PRPKT("rxpkt", skb->data, skb->len, etc->unit);
1747 /* get the error flags */
1748 flags = RXH_FLAGS(etc, rxh);
1750 /* check for reported frame errors */
1751 if (flags)
1752 goto err;
1754 skb->dev = et->dev;
1756 #ifdef PLC
1757 if (et->plc.hw && (et_plc_recv(et, skb) == 0))
1758 return;
1759 #endif /* PLC */
1761 #ifdef HNDCTF
1762 /* try cut thru' before sending up */
1763 if (et_ctf_forward(et, skb) != BCME_ERROR)
1764 return;
1765 #endif /* HNDCTF */
1767 /* extract priority from payload and store it out-of-band
1768 * in skb->priority
1770 if (et->etc->qos)
1771 pktsetprio(skb, TRUE);
1773 skb->protocol = eth_type_trans(skb, skb->dev);
1775 /* send it up */
1776 #ifdef BCM_NAPI
1777 netif_receive_skb(skb);
1778 #else /* BCM_NAPI */
1779 netif_rx(skb);
1780 #endif /* BCM_NAPI */
1782 ET_LOG("et%d: et_sendup ret", et->etc->unit, 0);
1784 return;
1786 err:
1787 et_error(et, skb, rxh);
1788 PKTFRMNATIVE(etc->osh, skb);
1789 PKTFREE(etc->osh, skb, FALSE);
1791 return;
1794 void
1795 et_dump(et_info_t *et, struct bcmstrbuf *b)
1797 bcm_bprintf(b, "et%d: %s %s version %s\n", et->etc->unit,
1798 __DATE__, __TIME__, EPI_VERSION_STR);
1800 #ifdef HNDCTF
1801 #if defined(BCMDBG) || defined(BCMDBG_DUMP)
1802 ctf_dump(et->cih, b);
1803 #endif /* BCMDBG || BCMDBG_DUMP */
1804 #endif /* HNDCTF */
1806 #ifdef BCMDBG
1807 et_dumpet(et, b);
1808 etc_dump(et->etc, b);
1809 #endif /* BCMDBG */
1812 #ifdef BCMDBG
1813 static void
1814 et_dumpet(et_info_t *et, struct bcmstrbuf *b)
1816 bcm_bprintf(b, "et %p dev %p name %s tbusy %d txq[0].qlen %d malloced %d\n",
1817 et, et->dev, et->dev->name, (uint)netif_queue_stopped(et->dev), et->txq[0].qlen,
1818 MALLOCED(et->osh));
1820 #endif /* BCMDBG */
1822 void
1823 et_link_up(et_info_t *et)
1825 ET_ERROR(("et%d: link up (%d%s)\n",
1826 et->etc->unit, et->etc->speed, (et->etc->duplex? "FD" : "HD")));
1829 void
1830 et_link_down(et_info_t *et)
1832 ET_ERROR(("et%d: link down\n", et->etc->unit));
1836 * 47XX-specific shared mdc/mdio contortion:
1837 * Find the et associated with the same chip as <et>
1838 * and coreunit matching <coreunit>.
1840 void *
1841 et_phyfind(et_info_t *et, uint coreunit)
1843 et_info_t *tmp;
1844 uint bus, slot;
1846 bus = et->pdev->bus->number;
1847 slot = PCI_SLOT(et->pdev->devfn);
1849 /* walk the list et's */
1850 for (tmp = et_list; tmp; tmp = tmp->next) {
1851 if (et->etc == NULL)
1852 continue;
1853 if (tmp->pdev == NULL)
1854 continue;
1855 if (tmp->pdev->bus->number != bus)
1856 continue;
1857 if (tmp->etc->nicmode)
1858 if (PCI_SLOT(tmp->pdev->devfn) != slot)
1859 continue;
1860 if (tmp->etc->coreunit != coreunit)
1861 continue;
1862 break;
1864 return (tmp);
1867 /* shared phy read entry point */
1868 uint16
1869 et_phyrd(et_info_t *et, uint phyaddr, uint reg)
1871 uint16 val;
1873 ET_LOCK(et);
1874 val = et->etc->chops->phyrd(et->etc->ch, phyaddr, reg);
1875 ET_UNLOCK(et);
1877 return (val);
1880 /* shared phy write entry point */
1881 void
1882 et_phywr(et_info_t *et, uint phyaddr, uint reg, uint16 val)
1884 ET_LOCK(et);
1885 et->etc->chops->phywr(et->etc->ch, phyaddr, reg, val);
1886 ET_UNLOCK(et);