2 * Linux device driver for
3 * Broadcom BCM47XX 10/100/1000 Mbps Ethernet Controller
5 * Copyright (C) 2010, Broadcom Corporation
8 * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
9 * the contents of this file may not be disclosed to third parties, copied
10 * or duplicated in any form, in whole or in part, without the prior
11 * written permission of Broadcom Corporation.
13 * $Id: et_linux.c,v 1.131.8.9 2010-12-20 04:28:38 Exp $
16 #define __UNDEF_NO_VERSION__
20 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/errno.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
29 #include <linux/kernel.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/delay.h>
34 #include <linux/string.h>
35 #include <linux/sockios.h>
37 #include <linux/ethtool.h>
38 #endif /* SIOCETHTOOL */
40 #include <linux/if_vlan.h>
42 #include <asm/system.h>
45 #include <asm/pgtable.h>
46 #include <asm/uaccess.h>
49 #include <bcmendian.h>
51 #include <proto/ethernet.h>
52 #include <proto/vlan.h>
54 #include <bcmenetmib.h>
55 #include <bcmgmacmib.h>
56 #include <bcmenetrxh.h>
57 #include <bcmenetphy.h>
63 #include <bcmgmacrxh.h>
66 #include <ctf/hndctf.h>
71 #define ET_ALL_PASSIVE_ENAB(et) (!(et)->all_dispatch_mode)
72 #else /* ET_ALL_PASSIVE */
73 #define ET_ALL_PASSIVE_ENAB(et) 0
74 #endif /* ET_ALL_PASSIVE */
76 MODULE_LICENSE("Proprietary");
79 typedef struct et_plc
{
80 bool hw
; /* plc hardware present */
81 int32 txvid
; /* vlan used to send to plc */
82 int32 rxvid1
; /* frames rx'd on this will be sent to br */
83 int32 rxvid2
; /* frames rx'd on this will be sent to wds */
84 int32 rxvid3
; /* frames rx'd on this will be sent to plc */
85 struct net_device
*txdev
; /* plc device (vid 3) for tx */
86 struct net_device
*rxdev1
; /* plc device (vid 4) for rx */
87 struct net_device
*rxdev2
; /* plc device (vid 5) for tx & rx */
88 struct net_device
*rxdev3
; /* plc device (vid 6) for tx & rx */
92 /* In 2.6.20 kernels work functions get passed a pointer to the
93 * struct work, so things will continue to work as long as the work
94 * structure is the first component of the task structure.
96 typedef struct et_task
{
97 struct work_struct work
;
101 typedef struct et_info
{
102 etc_info_t
*etc
; /* pointer to common os-independent data */
103 struct net_device
*dev
; /* backpoint to device */
104 struct pci_dev
*pdev
; /* backpoint to pci_dev */
105 void *osh
; /* pointer to os handle */
107 ctf_t
*cih
; /* ctf instance handle */
109 struct semaphore sem
; /* use semaphore to allow sleep */
110 spinlock_t lock
; /* per-device perimeter lock */
111 spinlock_t txq_lock
; /* lock for txq protection */
112 struct sk_buff_head txq
[NUMTXQ
]; /* send queue */
113 void *regsva
; /* opaque chip registers virtual address */
114 struct timer_list timer
; /* one second watchdog timer */
115 struct net_device_stats stats
; /* stat counter reporting structure */
116 int events
; /* bit channel between isr and dpc */
117 struct et_info
*next
; /* pointer to next et_info_t in chain */
119 struct tasklet_struct tasklet
; /* dpc tasklet */
120 #endif /* BCM_NAPI */
121 #ifdef ET_ALL_PASSIVE
122 et_task_t dpc_task
; /* work queue for rx dpc */
123 et_task_t txq_task
; /* work queue for tx frames */
124 bool all_dispatch_mode
; /* dispatch mode: tasklets or passive */
125 #endif /* ET_ALL_PASSIVE */
126 bool resched
; /* dpc was rescheduled */
128 et_plc_t plc
; /* plc interface info */
132 static int et_found
= 0;
133 static et_info_t
*et_list
= NULL
;
136 #define DATAHIWAT 50 /* data msg txq hiwat mark */
138 #define ET_INFO(dev) (et_info_t *)((dev)->priv)
140 #define ET_LOCK(et) \
142 if (ET_ALL_PASSIVE_ENAB(et)) \
145 spin_lock_bh(&(et)->lock); \
148 #define ET_UNLOCK(et) \
150 if (ET_ALL_PASSIVE_ENAB(et)) \
153 spin_unlock_bh(&(et)->lock); \
156 #define ET_TXQ_LOCK(et) spin_lock_bh(&(et)->txq_lock)
157 #define ET_TXQ_UNLOCK(et) spin_unlock_bh(&(et)->txq_lock)
159 #define INT_LOCK(flags) local_irq_save(flags)
160 #define INT_UNLOCK(flags) local_irq_restore(flags)
162 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 4, 5)
163 #error Linux version must be newer than 2.4.5
164 #endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 4, 5) */
166 /* prototypes called by etc.c */
167 void et_init(et_info_t
*et
, uint options
);
168 void et_reset(et_info_t
*et
);
169 void et_link_up(et_info_t
*et
);
170 void et_link_down(et_info_t
*et
);
171 void et_up(et_info_t
*et
);
172 void et_down(et_info_t
*et
, int reset
);
173 void et_dump(et_info_t
*et
, struct bcmstrbuf
*b
);
175 /* local prototypes */
176 static void et_free(et_info_t
*et
);
177 static int et_open(struct net_device
*dev
);
178 static int et_close(struct net_device
*dev
);
179 static int et_start(struct sk_buff
*skb
, struct net_device
*dev
);
180 static void et_sendnext(et_info_t
*et
);
181 static struct net_device_stats
*et_get_stats(struct net_device
*dev
);
182 static int et_set_mac_address(struct net_device
*dev
, void *addr
);
183 static void et_set_multicast_list(struct net_device
*dev
);
184 static void _et_watchdog(struct net_device
*data
);
185 static void et_watchdog(ulong data
);
186 #ifdef ET_ALL_PASSIVE
187 static void et_watchdog_task(et_task_t
*task
);
188 #endif /* ET_ALL_PASSIVE */
189 static int et_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
);
190 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
191 static irqreturn_t
et_isr(int irq
, void *dev_id
);
193 static irqreturn_t
et_isr(int irq
, void *dev_id
, struct pt_regs
*ptregs
);
196 static int et_poll(struct net_device
*dev
, int *budget
);
198 static void et_dpc(ulong data
);
199 #endif /* BCM_NAPI */
200 #ifdef ET_ALL_PASSIVE
201 static void et_dpc_work(struct et_task
*task
);
202 static void et_txq_work(struct et_task
*task
);
203 static int et_schedule_task(et_info_t
*et
, void (*fn
)(struct et_task
*task
), void *context
);
204 #endif /* ET_ALL_PASSIVE */
205 static void et_sendup(et_info_t
*et
, struct sk_buff
*skb
);
207 static void et_dumpet(et_info_t
*et
, struct bcmstrbuf
*b
);
210 /* recognized PCI IDs */
211 static struct pci_device_id et_id_table
[] __devinitdata
= {
212 { vendor
: PCI_ANY_ID
,
214 subvendor
: PCI_ANY_ID
,
215 subdevice
: PCI_ANY_ID
,
216 class: PCI_CLASS_NETWORK_OTHER
<< 8,
217 class_mask
: 0xffff00,
220 { vendor
: PCI_ANY_ID
,
222 subvendor
: PCI_ANY_ID
,
223 subdevice
: PCI_ANY_ID
,
224 class: PCI_CLASS_NETWORK_ETHERNET
<< 8,
225 class_mask
: 0xffff00,
230 MODULE_DEVICE_TABLE(pci
, et_id_table
);
233 static uint32 msglevel
= 0xdeadbeef;
234 module_param(msglevel
, uint
, 0644);
235 #endif /* defined(BCMDBG) */
237 #ifdef ET_ALL_PASSIVE
238 /* passive mode: 1: enable, 0: disable */
239 static int passivemode
= 0;
240 module_param(passivemode
, int, 0);
241 #endif /* ET_ALL_PASSIVE */
245 et_ctf_detach(ctf_t
*ci
, void *arg
)
247 et_info_t
*et
= (et_info_t
*)arg
;
252 /* free the buffers in fast pool */
253 osl_ctfpool_cleanup(et
->osh
);
261 #define PLC_VIDTODEV(et, vid) \
262 (((vid) == (et)->plc.rxvid2) ? (et)->plc.rxdev2 : \
263 ((vid) == (et)->plc.rxvid1) ? (et)->plc.rxdev1 : \
264 ((vid) == (et)->plc.rxvid3) ? (et)->plc.rxdev3 : \
265 ((vid) == (et)->plc.txvid) ? (et)->plc.txdev : NULL)
267 et_plc_netdev_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
269 struct net_device
*real_dev
, *vl_dev
;
273 /* we are only interested in plc vifs */
274 vl_dev
= (struct net_device
*)ptr
;
275 if ((vl_dev
->priv_flags
& IFF_802_1Q_VLAN
) == 0)
278 /* get the pointer to real device */
279 real_dev
= VLAN_DEV_INFO(vl_dev
)->real_dev
;
280 vid
= VLAN_DEV_INFO(vl_dev
)->vlan_id
;
282 et
= ET_INFO(real_dev
);
284 ET_ERROR(("%s: not an ethernet vlan\n", __FUNCTION__
));
288 ET_TRACE(("et%d: %s: event %ld for %s\n", et
->etc
->unit
, __FUNCTION__
,
289 event
, vl_dev
->name
));
291 if (event
== NETDEV_REGISTER
) {
292 /* save the netdev pointers of plc vifs when corresponding
293 * interface register event is received.
295 if (vid
== et
->plc
.txvid
)
296 et
->plc
.txdev
= vl_dev
;
297 else if (vid
== et
->plc
.rxvid1
)
298 et
->plc
.rxdev1
= vl_dev
;
299 else if (vid
== et
->plc
.rxvid2
)
300 et
->plc
.rxdev2
= vl_dev
;
301 else if (vid
== et
->plc
.rxvid3
)
302 et
->plc
.rxdev3
= vl_dev
;
305 } else if (event
== NETDEV_UNREGISTER
) {
306 /* clear the netdev pointers of plc vifs when corresponding
307 * interface unregister event is received.
309 if (vid
== et
->plc
.txvid
)
310 et
->plc
.txdev
= NULL
;
311 else if (vid
== et
->plc
.rxvid1
)
312 et
->plc
.rxdev1
= NULL
;
313 else if (vid
== et
->plc
.rxvid2
)
314 et
->plc
.rxdev2
= NULL
;
315 else if (vid
== et
->plc
.rxvid3
)
316 et
->plc
.rxdev3
= NULL
;
324 static struct notifier_block et_plc_netdev_notifier
= {
325 .notifier_call
= et_plc_netdev_event
329 et_plc_recv(et_info_t
*et
, struct sk_buff
*skb
)
331 struct net_device
*plc_dev
;
332 struct ethervlan_header
*evh
;
334 evh
= (struct ethervlan_header
*)PKTDATA(et
->osh
, skb
);
336 /* all incoming frames from plc are vlan tagged */
337 if (evh
->vlan_type
!= HTON16(ETHER_TYPE_8021Q
))
340 ASSERT((NTOH16(evh
->vlan_tag
) & VLAN_VID_MASK
) != 3);
342 plc_dev
= PLC_VIDTODEV(et
, NTOH16(evh
->vlan_tag
) & VLAN_VID_MASK
);
346 /* call the master hook function to route frames to appropriate
347 * transmit interface.
349 if (plc_dev
->master_hook
!= NULL
) {
350 PKTSETPRIO(skb
, (NTOH16(evh
->vlan_tag
) & VLAN_PRI_MASK
) >> VLAN_PRI_SHIFT
);
351 if (plc_dev
->master_hook(skb
, plc_dev
, plc_dev
->master_hook_arg
) == 0) {
352 struct net_device_stats
*stats
;
353 stats
= vlan_dev_get_stats(plc_dev
);
355 stats
->rx_bytes
+= skb
->len
;
358 skb
->dev
= plc_dev
->master
;
366 et_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
368 struct net_device
*dev
;
372 int i
, unit
= et_found
, err
;
377 ET_TRACE(("et%d: et_probe: bus %d slot %d func %d irq %d\n", unit
,
378 pdev
->bus
->number
, PCI_SLOT(pdev
->devfn
), PCI_FUNC(pdev
->devfn
), pdev
->irq
));
380 if (!etc_chipmatch(pdev
->vendor
, pdev
->device
))
383 osh
= osl_attach(pdev
, PCI_BUS
, FALSE
);
386 pci_set_master(pdev
);
387 if ((err
= pci_enable_device(pdev
)) != 0) {
388 ET_ERROR(("et%d: et_probe: pci_enable_device() failed\n", unit
));
393 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
394 if ((dev
= alloc_etherdev(0)) == NULL
) {
395 ET_ERROR(("et%d: et_probe: alloc_etherdev() failed\n", unit
));
400 if (!(dev
= (struct net_device
*) MALLOC(osh
, sizeof(struct net_device
)))) {
401 ET_ERROR(("et%d: et_probe: out of memory, malloced %d bytes\n", unit
,
406 bzero(dev
, sizeof(struct net_device
));
408 if (!init_etherdev(dev
, 0)) {
409 ET_ERROR(("et%d: et_probe: init_etherdev() failed\n", unit
));
410 MFREE(osh
, dev
, sizeof(struct net_device
));
414 #endif /* >= 2.6.0 */
416 /* allocate private info */
417 if ((et
= (et_info_t
*)MALLOC(osh
, sizeof(et_info_t
))) == NULL
) {
418 ET_ERROR(("et%d: et_probe: out of memory, malloced %d bytes\n", unit
,
420 MFREE(osh
, dev
, sizeof(et_info_t
));
424 bzero(et
, sizeof(et_info_t
));
425 dev
->priv
= (void *)et
;
429 pci_set_drvdata(pdev
, et
);
431 /* map chip registers (47xx: and sprom) */
432 dev
->base_addr
= pci_resource_start(pdev
, 0);
433 if ((et
->regsva
= ioremap_nocache(dev
->base_addr
, PCI_BAR0_WINSZ
)) == NULL
) {
434 ET_ERROR(("et%d: ioremap() failed\n", unit
));
438 init_MUTEX(&et
->sem
);
439 spin_lock_init(&et
->lock
);
440 spin_lock_init(&et
->txq_lock
);
442 for (i
= 0; i
< NUMTXQ
; i
++)
443 skb_queue_head_init(&et
->txq
[i
]);
445 /* common load-time initialization */
446 et
->etc
= etc_attach((void *)et
, pdev
->vendor
, pdev
->device
, unit
, osh
, et
->regsva
);
447 if (et
->etc
== NULL
) {
448 ET_ERROR(("et%d: etc_attach() failed\n", unit
));
453 et
->cih
= ctf_attach(osh
, dev
->name
, &et_msg_level
, et_ctf_detach
, et
);
455 if ((ctf_dev_register(et
->cih
, dev
, FALSE
) != BCME_OK
) ||
456 (ctf_enable(et
->cih
, dev
, TRUE
) != BCME_OK
)) {
457 ET_ERROR(("et%d: ctf_dev_register() failed\n", unit
));
463 /* create ctf packet pool with specified number of buffers */
464 if (CTF_ENAB(et
->cih
) && (num_physpages
>= 8192) &&
465 (osl_ctfpool_init(osh
, CTFPOOLSZ
, RXBUFSZ
+BCMEXTRAHDROOM
) < 0)) {
466 ET_ERROR(("et%d: chipattach: ctfpool alloc/init failed\n", unit
));
471 bcopy(&et
->etc
->cur_etheraddr
, dev
->dev_addr
, ETHER_ADDR_LEN
);
473 /* init 1 second watchdog timer */
474 init_timer(&et
->timer
);
475 et
->timer
.data
= (ulong
)dev
;
476 et
->timer
.function
= et_watchdog
;
479 /* setup the bottom half handler */
480 tasklet_init(&et
->tasklet
, et_dpc
, (ulong
)et
);
481 #endif /* BCM_NAPI */
483 #ifdef ET_ALL_PASSIVE
484 if (ET_ALL_PASSIVE_ENAB(et
)) {
485 MY_INIT_WORK(&et
->dpc_task
.work
, (work_func_t
)et_dpc_work
);
486 et
->dpc_task
.context
= et
;
487 MY_INIT_WORK(&et
->txq_task
.work
, (work_func_t
)et_txq_work
);
488 et
->txq_task
.context
= et
;
490 et
->all_dispatch_mode
= (passivemode
== 0) ? TRUE
: FALSE
;
491 #endif /* ET_ALL_PASSIVE */
493 /* register our interrupt handler */
494 if (request_irq(pdev
->irq
, et_isr
, IRQF_SHARED
, dev
->name
, et
)) {
495 ET_ERROR(("et%d: request_irq() failed\n", unit
));
498 dev
->irq
= pdev
->irq
;
500 /* add us to the global linked list */
504 /* lastly, enable our entry points */
506 dev
->stop
= et_close
;
507 dev
->hard_start_xmit
= et_start
;
508 dev
->get_stats
= et_get_stats
;
509 dev
->set_mac_address
= et_set_mac_address
;
510 dev
->set_multicast_list
= et_set_multicast_list
;
511 dev
->do_ioctl
= et_ioctl
;
514 dev
->weight
= (ET_GMAC(et
->etc
) ? 64 : 32);
515 #endif /* BCM_NAPI */
517 if (register_netdev(dev
)) {
518 ET_ERROR(("et%d: register_netdev() failed\n", unit
));
524 /* print hello string */
525 (*et
->etc
->chops
->longname
)(et
->etc
->ch
, name
, sizeof(name
));
526 printf("%s: %s %s\n", dev
->name
, name
, EPI_VERSION_STR
);
529 /* read plc_vifs to initialize the vids to use for receiving
530 * and forwarding the frames.
532 var
= getvar(NULL
, "plc_vifs");
535 ET_ERROR(("et%d: %s: PLC vifs not configured\n", unit
, __FUNCTION__
));
541 /* initialize the vids to use for plc rx and tx */
542 sscanf(var
, "vlan%d vlan%d vlan%d vlan%d",
543 &et
->plc
.txvid
, &et
->plc
.rxvid1
, &et
->plc
.rxvid2
, &et
->plc
.rxvid3
);
545 ET_ERROR(("et%d: %s: PLC vifs %s\n", unit
, __FUNCTION__
, var
));
547 /* register a callback to be called on plc dev create event */
548 register_netdevice_notifier(&et_plc_netdev_notifier
);
559 et_suspend(struct pci_dev
*pdev
, DRV_SUSPEND_STATE_TYPE state
)
563 if ((et
= (et_info_t
*) pci_get_drvdata(pdev
))) {
564 netif_device_detach(et
->dev
);
574 et_resume(struct pci_dev
*pdev
)
578 if ((et
= (et_info_t
*) pci_get_drvdata(pdev
))) {
582 netif_device_attach(et
->dev
);
588 /* Compatibility routines */
589 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)
591 _et_suspend(struct pci_dev
*pdev
)
597 _et_resume(struct pci_dev
*pdev
)
601 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6) */
603 static void __devexit
604 et_remove(struct pci_dev
*pdev
)
608 if (!etc_chipmatch(pdev
->vendor
, pdev
->device
))
611 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12)
614 et_suspend(pdev
, PMSG_SUSPEND
);
617 if ((et
= (et_info_t
*) pci_get_drvdata(pdev
))) {
619 pci_set_drvdata(pdev
, NULL
);
623 static struct pci_driver et_pci_driver
= {
626 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)
627 suspend
: _et_suspend
,
632 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6) */
633 remove
: __devexit_p(et_remove
),
634 id_table
: et_id_table
,
641 if (msglevel
!= 0xdeadbeef)
642 et_msg_level
= msglevel
;
644 char *var
= getvar(NULL
, "et_msglevel");
646 et_msg_level
= bcm_strtoul(var
, NULL
, 0);
649 printf("%s: msglevel set to 0x%x\n", __FUNCTION__
, et_msg_level
);
650 #endif /* defined(BCMDBG) */
652 #ifdef ET_ALL_PASSIVE
654 char *var
= getvar(NULL
, "et_dispatch_mode");
656 passivemode
= bcm_strtoul(var
, NULL
, 0);
657 printf("%s: passivemode set to 0x%x\n", __FUNCTION__
, passivemode
);
659 #endif /* ET_ALL_PASSIVE */
661 return pci_module_init(&et_pci_driver
);
667 pci_unregister_driver(&et_pci_driver
);
670 module_init(et_module_init
);
671 module_exit(et_module_exit
);
674 et_free(et_info_t
*et
)
682 ET_TRACE(("et: et_free\n"));
684 if (et
->dev
&& et
->dev
->irq
)
685 free_irq(et
->dev
->irq
, et
);
689 ctf_dev_unregister(et
->cih
, et
->dev
);
693 unregister_netdev(et
->dev
);
694 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
695 free_netdev(et
->dev
);
697 MFREE(et
->osh
, et
->dev
, sizeof(struct net_device
));
703 /* free the buffers in fast pool */
704 osl_ctfpool_cleanup(et
->osh
);
708 /* free ctf resources */
713 /* free common resources */
720 * unregister_netdev() calls get_stats() which may read chip registers
721 * so we cannot unmap the chip registers until after calling unregister_netdev() .
724 iounmap((void *)et
->regsva
);
728 /* remove us from the global linked list */
729 for (prev
= &et_list
; *prev
; prev
= &(*prev
)->next
)
736 MFREE(et
->osh
, et
, sizeof(et_info_t
));
739 printf("Memory leak of bytes %d\n", MALLOCED(osh
));
740 ASSERT(MALLOCED(osh
) == 0);
746 et_open(struct net_device
*dev
)
752 ET_TRACE(("et%d: et_open\n", et
->etc
->unit
));
754 et
->etc
->promisc
= (dev
->flags
& IFF_PROMISC
)? TRUE
: FALSE
;
755 et
->etc
->allmulti
= (dev
->flags
& IFF_ALLMULTI
)? TRUE
: et
->etc
->promisc
;
761 OLD_MOD_INC_USE_COUNT
;
767 et_close(struct net_device
*dev
)
773 ET_TRACE(("et%d: et_close\n", et
->etc
->unit
));
775 et
->etc
->promisc
= FALSE
;
776 et
->etc
->allmulti
= FALSE
;
782 OLD_MOD_DEC_USE_COUNT
;
787 #ifdef ET_ALL_PASSIVE
788 /* Schedule a completion handler to run at safe time */
789 static int BCMFASTPATH
790 et_schedule_task(et_info_t
*et
, void (*fn
)(struct et_task
*task
), void *context
)
794 ET_TRACE(("et%d: et_schedule_task\n", et
->etc
->unit
));
796 if (!(task
= MALLOC(et
->osh
, sizeof(et_task_t
)))) {
797 ET_ERROR(("et%d: et_schedule_task: out of memory, malloced %d bytes\n",
798 et
->etc
->unit
, MALLOCED(et
->osh
)));
802 MY_INIT_WORK(&task
->work
, (work_func_t
)fn
);
803 task
->context
= context
;
805 if (!schedule_work(&task
->work
)) {
806 ET_ERROR(("et%d: schedule_work() failed\n", et
->etc
->unit
));
807 MFREE(et
->osh
, task
, sizeof(et_task_t
));
814 static void BCMFASTPATH
815 et_txq_work(struct et_task
*task
)
817 et_info_t
*et
= (et_info_t
*)task
->context
;
825 #endif /* ET_ALL_PASSIVE */
828 * Yeah, queueing the packets on a tx queue instead of throwing them
829 * directly into the descriptor ring in the case of dma is kinda lame,
830 * but this results in a unified transmit path for both dma and pio
831 * and localizes/simplifies the netif_*_queue semantics, too.
833 static int BCMFASTPATH
834 et_start(struct sk_buff
*skb
, struct net_device
*dev
)
841 if (ET_GMAC(et
->etc
) && (et
->etc
->qos
))
842 q
= etc_up2tc(PKTPRIO(skb
));
844 ET_TRACE(("et%d: et_start: len %d\n", et
->etc
->unit
, skb
->len
));
845 ET_LOG("et%d: et_start: len %d", et
->etc
->unit
, skb
->len
);
848 /* put it on the tx queue and call sendnext */
850 __skb_queue_tail(&et
->txq
[q
], skb
);
851 et
->etc
->txq_state
|= (1 << q
);
854 if (!ET_ALL_PASSIVE_ENAB(et
)) {
859 #ifdef ET_ALL_PASSIVE
861 schedule_work(&et
->txq_task
.work
);
862 #endif /* ET_ALL_PASSIVE */
864 ET_LOG("et%d: et_start ret\n", et
->etc
->unit
, 0);
869 static void BCMFASTPATH
870 et_sendnext(et_info_t
*et
)
879 ET_TRACE(("et%d: et_sendnext\n", etc
->unit
));
880 ET_LOG("et%d: et_sendnext", etc
->unit
, 0);
882 /* dequeue packets from highest priority queue and send */
886 if (etc
->txq_state
== 0)
889 priq
= etc_priq(etc
->txq_state
);
891 ET_TRACE(("et%d: txq_state %x priq %d txavail %d\n",
892 etc
->unit
, etc
->txq_state
, priq
,
893 *(uint
*)etc
->txavail
[priq
]));
895 if (skb_peek(&et
->txq
[priq
]) == NULL
) {
896 etc
->txq_state
&= ~(1 << priq
);
902 /* current highest priority dma queue is full */
903 if (*(uint
*)(etc
->txavail
[priq
]) == 0)
905 if (etc
->pioactive
!= NULL
)
909 skb
= __skb_dequeue(&et
->txq
[priq
]);
912 ET_PRHDR("tx", (struct ether_header
*)skb
->data
, skb
->len
, etc
->unit
);
913 ET_PRPKT("txpkt", skb
->data
, skb
->len
, etc
->unit
);
915 /* Convert the packet. */
916 if ((p
= PKTFRMNATIVE(et
->osh
, skb
)) == NULL
) {
917 PKTFREE(etc
->osh
, skb
, TRUE
);
921 (*etc
->chops
->tx
)(etc
->ch
, p
);
924 etc
->txbyte
+= skb
->len
;
927 /* no flow control when qos is enabled */
929 /* stop the queue whenever txq fills */
930 if ((skb_queue_len(&et
->txq
[TX_Q0
]) > DATAHIWAT
) && !netif_queue_stopped(et
->dev
))
931 netif_stop_queue(et
->dev
);
932 else if (netif_queue_stopped(et
->dev
) &&
933 (skb_queue_len(&et
->txq
[TX_Q0
]) < (DATAHIWAT
/2)))
934 netif_wake_queue(et
->dev
);
936 /* drop the frame if corresponding prec txq len exceeds hiwat
937 * when qos is enabled.
939 if ((priq
!= TC_NONE
) && (skb_queue_len(&et
->txq
[priq
]) > DATAHIWAT
)) {
940 skb
= __skb_dequeue_tail(&et
->txq
[priq
]);
941 PKTFREE(et
->osh
, skb
, TRUE
);
950 et_init(et_info_t
*et
, uint options
)
952 ET_TRACE(("et%d: et_init\n", et
->etc
->unit
));
953 ET_LOG("et%d: et_init", et
->etc
->unit
, 0);
957 etc_init(et
->etc
, options
);
962 et_reset(et_info_t
*et
)
964 ET_TRACE(("et%d: et_reset\n", et
->etc
->unit
));
968 /* zap any pending dpc interrupt bits */
971 /* dpc will not be rescheduled */
985 ET_TRACE(("et%d: et_up\n", etc
->unit
));
989 /* schedule one second watchdog timer */
990 et
->timer
.expires
= jiffies
+ HZ
;
991 add_timer(&et
->timer
);
993 netif_start_queue(et
->dev
);
997 et_down(et_info_t
*et
, int reset
)
1000 struct sk_buff
*skb
;
1005 ET_TRACE(("et%d: et_down\n", etc
->unit
));
1007 netif_down(et
->dev
);
1008 netif_stop_queue(et
->dev
);
1010 /* stop watchdog timer */
1011 del_timer(&et
->timer
);
1013 etc_down(etc
, reset
);
1015 /* flush the txq(s) */
1016 for (i
= 0; i
< NUMTXQ
; i
++)
1017 while ((skb
= skb_dequeue(&et
->txq
[i
])))
1018 PKTFREE(etc
->osh
, skb
, TRUE
);
1023 tasklet_kill(&et
->tasklet
);
1025 #endif /* BCM_NAPI */
1029 * These are interrupt on/off entry points. Disable interrupts
1030 * during interrupt state transition.
1033 et_intrson(et_info_t
*et
)
1035 unsigned long flags
;
1037 (*et
->etc
->chops
->intrson
)(et
->etc
->ch
);
1042 _et_watchdog(struct net_device
*dev
)
1050 etc_watchdog(et
->etc
);
1052 /* reschedule one second watchdog timer */
1053 et
->timer
.expires
= jiffies
+ HZ
;
1054 add_timer(&et
->timer
);
1057 /* allocate and add a new skb to the pkt pool */
1058 if (CTF_ENAB(et
->cih
))
1059 osl_ctfpool_replenish(et
->osh
, CTFPOOL_REFILL_THRESH
);
1060 #endif /* CTFPOOL */
1064 #ifdef ET_ALL_PASSIVE
1066 et_watchdog_task(et_task_t
*task
)
1068 et_info_t
*et
= ET_INFO((struct net_device
*)task
->context
);
1070 _et_watchdog((struct net_device
*)task
->context
);
1071 MFREE(et
->osh
, task
, sizeof(et_task_t
));
1073 #endif /* ET_ALL_PASSIVE */
1076 et_watchdog(ulong data
)
1078 struct net_device
*dev
= (struct net_device
*)data
;
1079 #ifdef ET_ALL_PASSIVE
1080 et_info_t
*et
= ET_INFO(dev
);
1081 #endif /* ET_ALL_PASSIVE */
1083 if (!ET_ALL_PASSIVE_ENAB(et
))
1085 #ifdef ET_ALL_PASSIVE
1087 et_schedule_task(et
, et_watchdog_task
, dev
);
1088 #endif /* ET_ALL_PASSIVE */
1094 et_ethtool(et_info_t
*et
, struct ethtool_cmd
*ecmd
)
1098 struct ethtool_drvinfo
*info
;
1102 switch (ecmd
->cmd
) {
1104 ecmd
->supported
= (SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
|
1105 SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
|
1107 ecmd
->advertising
= ADVERTISED_TP
;
1108 ecmd
->advertising
|= (et
->etc
->advertise
& ADV_10HALF
) ?
1109 ADVERTISED_10baseT_Half
: 0;
1110 ecmd
->advertising
|= (et
->etc
->advertise
& ADV_10FULL
) ?
1111 ADVERTISED_10baseT_Full
: 0;
1112 ecmd
->advertising
|= (et
->etc
->advertise
& ADV_100HALF
) ?
1113 ADVERTISED_100baseT_Half
: 0;
1114 ecmd
->advertising
|= (et
->etc
->advertise
& ADV_100FULL
) ?
1115 ADVERTISED_100baseT_Full
: 0;
1116 ecmd
->advertising
|= (et
->etc
->advertise2
& ADV_1000FULL
) ?
1117 ADVERTISED_1000baseT_Full
: 0;
1118 ecmd
->advertising
|= (et
->etc
->advertise2
& ADV_1000HALF
) ?
1119 ADVERTISED_1000baseT_Half
: 0;
1120 ecmd
->advertising
|= (et
->etc
->forcespeed
== ET_AUTO
) ?
1121 ADVERTISED_Autoneg
: 0;
1122 if (et
->etc
->linkstate
) {
1123 ecmd
->speed
= (et
->etc
->speed
== 1000) ? SPEED_1000
:
1124 ((et
->etc
->speed
== 100) ? SPEED_100
: SPEED_10
);
1125 ecmd
->duplex
= (et
->etc
->duplex
== 1) ? DUPLEX_FULL
: DUPLEX_HALF
;
1130 ecmd
->port
= PORT_TP
;
1131 ecmd
->phy_address
= 0;
1132 ecmd
->transceiver
= XCVR_INTERNAL
;
1133 ecmd
->autoneg
= (et
->etc
->forcespeed
== ET_AUTO
) ? AUTONEG_ENABLE
: AUTONEG_DISABLE
;
1138 if (!capable(CAP_NET_ADMIN
)) {
1142 else if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_HALF
)
1144 else if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_FULL
)
1146 else if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_HALF
)
1148 else if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_FULL
)
1150 else if (ecmd
->speed
== SPEED_1000
&& ecmd
->duplex
== DUPLEX_FULL
)
1151 speed
= ET_1000FULL
;
1152 else if (ecmd
->autoneg
== AUTONEG_ENABLE
)
1158 ret
= etc_ioctl(et
->etc
, ETCSPEED
, &speed
);
1160 case ETHTOOL_GDRVINFO
:
1161 info
= (struct ethtool_drvinfo
*)ecmd
;
1162 bzero(info
, sizeof(struct ethtool_drvinfo
));
1163 info
->cmd
= ETHTOOL_GDRVINFO
;
1164 sprintf(info
->driver
, "et%d", et
->etc
->unit
);
1165 strcpy(info
->version
, EPI_VERSION_STR
);
1176 #endif /* SIOCETHTOOL */
1179 et_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1184 int size
, ethtoolcmd
;
1186 et_var_t
*var
= NULL
;
1187 void *buffer
= NULL
;
1191 ET_TRACE(("et%d: et_ioctl: cmd 0x%x\n", et
->etc
->unit
, cmd
));
1196 if (copy_from_user(ðtoolcmd
, ifr
->ifr_data
, sizeof(uint32
)))
1199 if (ethtoolcmd
== ETHTOOL_GDRVINFO
)
1200 size
= sizeof(struct ethtool_drvinfo
);
1202 size
= sizeof(struct ethtool_cmd
);
1203 get
= TRUE
; set
= TRUE
;
1205 #endif /* SIOCETHTOOL */
1208 get
= TRUE
; set
= FALSE
;
1211 case SIOCGETCPHYRD2
:
1212 case SIOCGETCROBORD
:
1213 size
= sizeof(int) * 2;
1214 get
= TRUE
; set
= TRUE
;
1217 case SIOCSETCPHYWR2
:
1218 case SIOCSETCROBOWR
:
1219 size
= sizeof(int) * 2;
1220 get
= FALSE
; set
= TRUE
;
1223 size
= sizeof(et_var_t
);
1228 get
= FALSE
; set
= TRUE
;
1232 if ((buf
= MALLOC(et
->osh
, size
)) == NULL
) {
1233 ET_ERROR(("et: et_ioctl: out of memory, malloced %d bytes\n", MALLOCED(et
->osh
)));
1237 if (set
&& copy_from_user(buf
, ifr
->ifr_data
, size
)) {
1238 MFREE(et
->osh
, buf
, size
);
1242 if (cmd
== SIOCSETGETVAR
) {
1243 var
= (et_var_t
*)buf
;
1248 if (!(buffer
= (void *) MALLOC(et
->osh
, var
->len
))) {
1249 ET_ERROR(("et: et_ioctl: out of memory, malloced %d bytes\n",
1250 MALLOCED(et
->osh
)));
1251 MFREE(et
->osh
, buf
, size
);
1255 if (copy_from_user(buffer
, var
->buf
, var
->len
)) {
1256 MFREE(et
->osh
, buffer
, var
->len
);
1257 MFREE(et
->osh
, buf
, size
);
1266 error
= et_ethtool(et
, (struct ethtool_cmd
*)buf
);
1268 #endif /* SIOCETHTOOL */
1271 error
= etc_iovar(et
->etc
, var
->cmd
, var
->set
, buffer
);
1274 error
= copy_to_user(var
->buf
, buffer
, var
->len
);
1277 MFREE(et
->osh
, buffer
, var
->len
);
1281 error
= etc_ioctl(et
->etc
, cmd
- SIOCSETCUP
, buf
) ? -EINVAL
: 0;
1287 error
= copy_to_user(ifr
->ifr_data
, buf
, size
);
1289 MFREE(et
->osh
, buf
, size
);
1294 static struct net_device_stats
*
1295 et_get_stats(struct net_device
*dev
)
1299 struct net_device_stats
*stats
;
1303 ET_TRACE(("et%d: et_get_stats\n", et
->etc
->unit
));
1309 bzero(stats
, sizeof(struct net_device_stats
));
1313 (*etc
->chops
->statsupd
)(etc
->ch
);
1316 stats
->rx_packets
= etc
->rxframe
;
1317 stats
->tx_packets
= etc
->txframe
;
1318 stats
->rx_bytes
= etc
->rxbyte
;
1319 stats
->tx_bytes
= etc
->txbyte
;
1320 stats
->rx_errors
= etc
->rxerror
;
1321 stats
->tx_errors
= etc
->txerror
;
1327 stats
->collisions
= mib
->tx_total_cols
;
1328 stats
->rx_length_errors
= (mib
->rx_oversize_pkts
+ mib
->rx_undersize
);
1329 stats
->rx_crc_errors
= mib
->rx_crc_errs
;
1330 stats
->rx_frame_errors
= mib
->rx_align_errs
;
1331 stats
->rx_missed_errors
= mib
->rx_missed_pkts
;
1336 stats
->collisions
= mib
->tx_total_cols
;
1337 stats
->rx_length_errors
= (mib
->rx_oversize_pkts
+ mib
->rx_undersize
);
1338 stats
->rx_crc_errors
= mib
->rx_crc_errs
;
1339 stats
->rx_frame_errors
= mib
->rx_align_errs
;
1340 stats
->rx_missed_errors
= mib
->rx_missed_pkts
;
1344 stats
->rx_fifo_errors
= etc
->rxoflo
;
1345 stats
->rx_over_errors
= etc
->rxoflo
;
1346 stats
->tx_fifo_errors
= etc
->txuflo
;
1354 et_set_mac_address(struct net_device
*dev
, void *addr
)
1357 struct sockaddr
*sa
= (struct sockaddr
*) addr
;
1360 ET_TRACE(("et%d: et_set_mac_address\n", et
->etc
->unit
));
1365 bcopy(sa
->sa_data
, dev
->dev_addr
, ETHER_ADDR_LEN
);
1366 bcopy(dev
->dev_addr
, &et
->etc
->cur_etheraddr
, ETHER_ADDR_LEN
);
1372 et_set_multicast_list(struct net_device
*dev
)
1376 struct dev_mc_list
*mclist
;
1382 ET_TRACE(("et%d: et_set_multicast_list\n", etc
->unit
));
1387 etc
->promisc
= (dev
->flags
& IFF_PROMISC
)? TRUE
: FALSE
;
1388 etc
->allmulti
= (dev
->flags
& IFF_ALLMULTI
)? TRUE
: etc
->promisc
;
1390 /* copy the list of multicasts into our private table */
1391 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& (i
< dev
->mc_count
);
1392 i
++, mclist
= mclist
->next
) {
1393 if (i
>= MAXMULTILIST
) {
1394 etc
->allmulti
= TRUE
;
1398 etc
->multicast
[i
] = *((struct ether_addr
*)mclist
->dmi_addr
);
1400 etc
->nmulticast
= i
;
1402 et_init(et
, ET_INIT_DEF_OPTIONS
);
1408 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
1409 static irqreturn_t BCMFASTPATH
1410 et_isr(int irq
, void *dev_id
)
1412 static irqreturn_t BCMFASTPATH
1413 et_isr(int irq
, void *dev_id
, struct pt_regs
*ptregs
)
1417 struct chops
*chops
;
1421 et
= (et_info_t
*)dev_id
;
1422 chops
= et
->etc
->chops
;
1425 /* guard against shared interrupts */
1429 /* get interrupt condition bits */
1430 events
= (*chops
->getintrevents
)(ch
, TRUE
);
1434 if (!(events
& INTR_NEW
))
1437 ET_TRACE(("et%d: et_isr: events 0x%x\n", et
->etc
->unit
, events
));
1438 ET_LOG("et%d: et_isr: events 0x%x", et
->etc
->unit
, events
);
1440 /* disable interrupts */
1441 (*chops
->intrsoff
)(ch
);
1443 /* save intstatus bits */
1444 ASSERT(et
->events
== 0);
1445 et
->events
= events
;
1447 ASSERT(et
->resched
== FALSE
);
1449 /* allow the device to be added to the cpu polling list if we are up */
1450 if (netif_rx_schedule_prep(et
->dev
)) {
1451 /* tell the network core that we have packets to send up */
1452 __netif_rx_schedule(et
->dev
);
1454 ET_ERROR(("et%d: et_isr: intr while in poll!\n",
1456 (*chops
->intrson
)(ch
);
1458 #else /* BCM_NAPI */
1460 #ifdef ET_ALL_PASSIVE
1461 if (ET_ALL_PASSIVE_ENAB(et
)) {
1462 schedule_work(&et
->dpc_task
.work
);
1464 #endif /* ET_ALL_PASSIVE */
1465 tasklet_schedule(&et
->tasklet
);
1466 #endif /* BCM_NAPI */
1469 ET_LOG("et%d: et_isr ret", et
->etc
->unit
, 0);
1471 return IRQ_RETVAL(events
& INTR_NEW
);
1475 et_rxevent(osl_t
*osh
, et_info_t
*et
, struct chops
*chops
, void *ch
, int quota
)
1478 void *p
= NULL
, *h
= NULL
, *t
= NULL
;
1479 struct sk_buff
*skb
;
1481 /* read the buffers first */
1482 while ((p
= (*chops
->rx
)(ch
))) {
1490 /* we reached quota already */
1491 if (++processed
>= quota
) {
1492 /* reschedule et_dpc()/et_poll() */
1498 /* prefetch the headers */
1500 ETPREFHDRS(PKTDATA(osh
, h
), PREFSZ
);
1502 /* post more rx bufs */
1503 (*chops
->rxfill
)(ch
);
1505 while ((p
= h
) != NULL
) {
1507 PKTSETLINK(p
, NULL
);
1508 /* prefetch the headers */
1510 ETPREFHDRS(PKTDATA(osh
, h
), PREFSZ
);
1511 skb
= PKTTONATIVE(osh
, p
);
1519 static int BCMFASTPATH
1520 et_poll(struct net_device
*dev
, int *budget
)
1522 int quota
= min(dev
->quota
, *budget
);
1523 et_info_t
*et
= ET_INFO(dev
);
1524 #else /* BCM_NAPI */
1525 static void BCMFASTPATH
1529 et_info_t
*et
= (et_info_t
*)data
;
1530 #endif /* BCM_NAPI */
1531 struct chops
*chops
;
1536 chops
= et
->etc
->chops
;
1540 ET_TRACE(("et%d: et_dpc: events 0x%x\n", et
->etc
->unit
, et
->events
));
1541 ET_LOG("et%d: et_dpc: events 0x%x", et
->etc
->unit
, et
->events
);
1545 #endif /* BCM_NAPI */
1550 /* get interrupt condition bits again when dpc was rescheduled */
1552 et
->events
= (*chops
->getintrevents
)(ch
, FALSE
);
1553 et
->resched
= FALSE
;
1556 if (et
->events
& INTR_RX
)
1557 nrx
= et_rxevent(osh
, et
, chops
, ch
, quota
);
1559 if (et
->events
& INTR_TX
) {
1560 (*chops
->txreclaim
)(ch
, FALSE
);
1561 (*chops
->rxfill
)(ch
);
1564 /* handle error conditions, if reset required leave interrupts off! */
1565 if (et
->events
& INTR_ERROR
) {
1566 if ((*chops
->errors
)(ch
))
1567 et_init(et
, ET_INIT_INTROFF
);
1570 nrx
+= et_rxevent(osh
, et
, chops
, ch
, quota
);
1573 /* run the tx queue */
1574 if (et
->etc
->txq_state
!= 0)
1577 /* clear this before re-enabling interrupts */
1580 /* something may bring the driver down */
1582 et
->resched
= FALSE
;
1587 #ifdef ET_ALL_PASSIVE
1589 if (!ET_ALL_PASSIVE_ENAB(et
))
1590 tasklet_schedule(&et
->tasklet
);
1592 schedule_work(&et
->dpc_task
.work
);
1595 (*chops
->intrson
)(ch
);
1596 #else /* ET_ALL_PASSIVE */
1597 /* there may be frames left, reschedule et_dpc() */
1599 tasklet_schedule(&et
->tasklet
);
1600 /* re-enable interrupts */
1602 (*chops
->intrson
)(ch
);
1603 #endif /* ET_ALL_PASSIVE */
1604 #endif /* BCM_NAPI */
1607 ET_LOG("et%d: et_dpc ret", et
->etc
->unit
, 0);
1610 /* update number of frames processed */
1614 ET_TRACE(("et%d: et_poll: quota %d budget %d\n",
1615 et
->etc
->unit
, dev
->quota
, *budget
));
1617 /* we got packets but no quota */
1619 /* indicate that we are not done, don't enable
1620 * interrupts yet. linux network core will call
1625 netif_rx_complete(dev
);
1627 /* enable interrupts now */
1628 (*chops
->intrson
)(ch
);
1630 /* indicate that we are done */
1632 #else /* BCM_NAPI */
1635 #endif /* BCM_NAPI */
1638 #ifdef ET_ALL_PASSIVE
1639 static void BCMFASTPATH
1640 et_dpc_work(struct et_task
*task
)
1642 et_info_t
*et
= (et_info_t
*)task
->context
;
1643 et_dpc((unsigned long)et
);
1646 #endif /* ET_ALL_PASSIVE */
1649 et_error(et_info_t
*et
, struct sk_buff
*skb
, void *rxh
)
1652 struct ether_header
*eh
;
1654 eh
= (struct ether_header
*)skb
->data
;
1655 bcm_ether_ntoa((struct ether_addr
*)eh
->ether_shost
, eabuf
);
1657 if (RXH_OVERSIZE(et
->etc
, rxh
)) {
1658 ET_ERROR(("et%d: rx: over size packet from %s\n", et
->etc
->unit
, eabuf
));
1660 if (RXH_CRC(et
->etc
, rxh
)) {
1661 ET_ERROR(("et%d: rx: crc error from %s\n", et
->etc
->unit
, eabuf
));
1663 if (RXH_OVF(et
->etc
, rxh
)) {
1664 ET_ERROR(("et%d: rx: fifo overflow\n", et
->etc
->unit
));
1666 if (RXH_NO(et
->etc
, rxh
)) {
1667 ET_ERROR(("et%d: rx: crc error (odd nibbles) from %s\n",
1668 et
->etc
->unit
, eabuf
));
1670 if (RXH_RXER(et
->etc
, rxh
)) {
1671 ET_ERROR(("et%d: rx: symbol error from %s\n", et
->etc
->unit
, eabuf
));
1675 #ifdef CONFIG_IP_NF_DNSMQ
1676 typedef int (*dnsmqHitHook
)(struct sk_buff
*skb
);
1677 extern dnsmqHitHook dnsmq_hit_hook
;
1681 et_ctf_forward(et_info_t
*et
, struct sk_buff
*skb
)
1683 #ifdef CONFIG_IP_NF_DNSMQ
1684 if(dnsmq_hit_hook
&&dnsmq_hit_hook(skb
))
1685 return (BCME_ERROR
);
1689 /* use slow path if ctf is disabled */
1690 if (!CTF_ENAB(et
->cih
))
1691 return (BCME_ERROR
);
1693 /* try cut thru first */
1694 if (ctf_forward(et
->cih
, skb
, skb
->dev
) != BCME_ERROR
)
1697 /* clear skipct flag before sending up */
1698 PKTCLRSKIPCT(et
->osh
, skb
);
1702 /* allocate and add a new skb to the pkt pool */
1703 if (PKTISFAST(et
->osh
, skb
))
1704 osl_ctfpool_add(et
->osh
);
1706 /* clear fast buf flag before sending up */
1707 PKTCLRFAST(et
->osh
, skb
);
1709 /* re-init the hijacked field */
1710 CTFPOOLPTR(et
->osh
, skb
) = NULL
;
1711 #endif /* CTFPOOL */
1713 return (BCME_ERROR
);
1718 et_sendup(et_info_t
*et
, struct sk_buff
*skb
)
1726 /* packet buffer starts with rxhdr */
1729 /* strip off rxhdr */
1730 __skb_pull(skb
, HWRXOFF
);
1732 ET_TRACE(("et%d: et_sendup: %d bytes\n", et
->etc
->unit
, skb
->len
));
1733 ET_LOG("et%d: et_sendup: len %d", et
->etc
->unit
, skb
->len
);
1736 etc
->rxbyte
+= skb
->len
;
1738 /* eh should now be aligned 2-mod-4 */
1739 ASSERT(((ulong
)skb
->data
& 3) == 2);
1741 /* strip off crc32 */
1742 __skb_trim(skb
, skb
->len
- ETHER_CRC_LEN
);
1744 ET_PRHDR("rx", (struct ether_header
*)skb
->data
, skb
->len
, etc
->unit
);
1745 ET_PRPKT("rxpkt", skb
->data
, skb
->len
, etc
->unit
);
1747 /* get the error flags */
1748 flags
= RXH_FLAGS(etc
, rxh
);
1750 /* check for reported frame errors */
1757 if (et
->plc
.hw
&& (et_plc_recv(et
, skb
) == 0))
1762 /* try cut thru' before sending up */
1763 if (et_ctf_forward(et
, skb
) != BCME_ERROR
)
1767 /* extract priority from payload and store it out-of-band
1771 pktsetprio(skb
, TRUE
);
1773 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1777 netif_receive_skb(skb
);
1778 #else /* BCM_NAPI */
1780 #endif /* BCM_NAPI */
1782 ET_LOG("et%d: et_sendup ret", et
->etc
->unit
, 0);
1787 et_error(et
, skb
, rxh
);
1788 PKTFRMNATIVE(etc
->osh
, skb
);
1789 PKTFREE(etc
->osh
, skb
, FALSE
);
1795 et_dump(et_info_t
*et
, struct bcmstrbuf
*b
)
1797 bcm_bprintf(b
, "et%d: %s %s version %s\n", et
->etc
->unit
,
1798 __DATE__
, __TIME__
, EPI_VERSION_STR
);
1801 #if defined(BCMDBG) || defined(BCMDBG_DUMP)
1802 ctf_dump(et
->cih
, b
);
1803 #endif /* BCMDBG || BCMDBG_DUMP */
1808 etc_dump(et
->etc
, b
);
1814 et_dumpet(et_info_t
*et
, struct bcmstrbuf
*b
)
1816 bcm_bprintf(b
, "et %p dev %p name %s tbusy %d txq[0].qlen %d malloced %d\n",
1817 et
, et
->dev
, et
->dev
->name
, (uint
)netif_queue_stopped(et
->dev
), et
->txq
[0].qlen
,
1823 et_link_up(et_info_t
*et
)
1825 ET_ERROR(("et%d: link up (%d%s)\n",
1826 et
->etc
->unit
, et
->etc
->speed
, (et
->etc
->duplex
? "FD" : "HD")));
1830 et_link_down(et_info_t
*et
)
1832 ET_ERROR(("et%d: link down\n", et
->etc
->unit
));
1836 * 47XX-specific shared mdc/mdio contortion:
1837 * Find the et associated with the same chip as <et>
1838 * and coreunit matching <coreunit>.
1841 et_phyfind(et_info_t
*et
, uint coreunit
)
1846 bus
= et
->pdev
->bus
->number
;
1847 slot
= PCI_SLOT(et
->pdev
->devfn
);
1849 /* walk the list et's */
1850 for (tmp
= et_list
; tmp
; tmp
= tmp
->next
) {
1851 if (et
->etc
== NULL
)
1853 if (tmp
->pdev
== NULL
)
1855 if (tmp
->pdev
->bus
->number
!= bus
)
1857 if (tmp
->etc
->nicmode
)
1858 if (PCI_SLOT(tmp
->pdev
->devfn
) != slot
)
1860 if (tmp
->etc
->coreunit
!= coreunit
)
1867 /* shared phy read entry point */
1869 et_phyrd(et_info_t
*et
, uint phyaddr
, uint reg
)
1874 val
= et
->etc
->chops
->phyrd(et
->etc
->ch
, phyaddr
, reg
);
1880 /* shared phy write entry point */
1882 et_phywr(et_info_t
*et
, uint phyaddr
, uint reg
, uint16 val
)
1885 et
->etc
->chops
->phywr(et
->etc
->ch
, phyaddr
, reg
, val
);