2 * Copyright (c) 2003 Stuart Walsh<stu@ipng.org.uk>
3 * and Duncan Barclay<dmlb@dmlb.org>
4 * Modifications for FreeBSD-stable by Edwin Groothuis
5 * <edwin at mavetju.org
6 * < http://lists.freebsd.org/mailman/listinfo/freebsd-bugs>>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 'AS IS' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * $FreeBSD: src/sys/dev/bfe/if_bfe.c 1.4.4.7 2004/03/02 08:41:33 julian Exp v
32 * $DragonFly: src/sys/dev/netif/bfe/if_bfe.c,v 1.34 2008/06/05 18:06:31 swildner Exp $
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sockio.h>
39 #include <sys/malloc.h>
40 #include <sys/interrupt.h>
41 #include <sys/kernel.h>
42 #include <sys/socket.h>
43 #include <sys/queue.h>
46 #include <sys/thread2.h>
49 #include <net/ifq_var.h>
50 #include <net/if_arp.h>
51 #include <net/ethernet.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/vlan/if_vlan_var.h>
60 #include <netinet/in_systm.h>
61 #include <netinet/in.h>
62 #include <netinet/ip.h>
64 #include <bus/pci/pcireg.h>
65 #include <bus/pci/pcivar.h>
66 #include <bus/pci/pcidevs.h>
68 #include <dev/netif/mii_layer/mii.h>
69 #include <dev/netif/mii_layer/miivar.h>
71 #include <dev/netif/bfe/if_bfereg.h>
73 MODULE_DEPEND(bfe
, pci
, 1, 1, 1);
74 MODULE_DEPEND(bfe
, miibus
, 1, 1, 1);
76 /* "controller miibus0" required. See GENERIC if you get errors here. */
77 #include "miibus_if.h"
79 #define BFE_DEVDESC_MAX 64 /* Maximum device description length */
81 static struct bfe_type bfe_devs
[] = {
82 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM4401
,
83 "Broadcom BCM4401 Fast Ethernet" },
84 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM4401B0
,
85 "Broadcom BCM4401-B0 Fast Ethernet" },
86 { PCI_VENDOR_BROADCOM
, PCI_PRODUCT_BROADCOM_BCM4402
,
87 "Broadcom BCM4402 Fast Ethernet" },
91 static int bfe_probe(device_t
);
92 static int bfe_attach(device_t
);
93 static int bfe_detach(device_t
);
94 static void bfe_intr(void *);
95 static void bfe_start(struct ifnet
*);
96 static int bfe_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
97 static void bfe_init(void *);
98 static void bfe_stop(struct bfe_softc
*);
99 static void bfe_watchdog(struct ifnet
*);
100 static void bfe_shutdown(device_t
);
101 static void bfe_tick(void *);
102 static void bfe_txeof(struct bfe_softc
*);
103 static void bfe_rxeof(struct bfe_softc
*);
104 static void bfe_set_rx_mode(struct bfe_softc
*);
105 static int bfe_list_rx_init(struct bfe_softc
*);
106 static int bfe_list_newbuf(struct bfe_softc
*, int, struct mbuf
*);
107 static void bfe_rx_ring_free(struct bfe_softc
*);
109 static void bfe_pci_setup(struct bfe_softc
*, uint32_t);
110 static int bfe_ifmedia_upd(struct ifnet
*);
111 static void bfe_ifmedia_sts(struct ifnet
*, struct ifmediareq
*);
112 static int bfe_miibus_readreg(device_t
, int, int);
113 static int bfe_miibus_writereg(device_t
, int, int, int);
114 static void bfe_miibus_statchg(device_t
);
115 static int bfe_wait_bit(struct bfe_softc
*, uint32_t, uint32_t,
117 static void bfe_get_config(struct bfe_softc
*sc
);
118 static void bfe_read_eeprom(struct bfe_softc
*, uint8_t *);
119 static void bfe_stats_update(struct bfe_softc
*);
120 static void bfe_clear_stats (struct bfe_softc
*);
121 static int bfe_readphy(struct bfe_softc
*, uint32_t, uint32_t*);
122 static int bfe_writephy(struct bfe_softc
*, uint32_t, uint32_t);
123 static int bfe_resetphy(struct bfe_softc
*);
124 static int bfe_setupphy(struct bfe_softc
*);
125 static void bfe_chip_reset(struct bfe_softc
*);
126 static void bfe_chip_halt(struct bfe_softc
*);
127 static void bfe_core_reset(struct bfe_softc
*);
128 static void bfe_core_disable(struct bfe_softc
*);
129 static int bfe_dma_alloc(device_t
);
130 static void bfe_dma_free(struct bfe_softc
*);
131 static void bfe_dma_map_desc(void *, bus_dma_segment_t
*, int, int);
132 static void bfe_dma_map(void *, bus_dma_segment_t
*, int, int);
133 static void bfe_cam_write(struct bfe_softc
*, u_char
*, int);
135 static device_method_t bfe_methods
[] = {
136 /* Device interface */
137 DEVMETHOD(device_probe
, bfe_probe
),
138 DEVMETHOD(device_attach
, bfe_attach
),
139 DEVMETHOD(device_detach
, bfe_detach
),
140 DEVMETHOD(device_shutdown
, bfe_shutdown
),
143 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
144 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
147 DEVMETHOD(miibus_readreg
, bfe_miibus_readreg
),
148 DEVMETHOD(miibus_writereg
, bfe_miibus_writereg
),
149 DEVMETHOD(miibus_statchg
, bfe_miibus_statchg
),
154 static driver_t bfe_driver
= {
157 sizeof(struct bfe_softc
)
160 static devclass_t bfe_devclass
;
162 DRIVER_MODULE(bfe
, pci
, bfe_driver
, bfe_devclass
, 0, 0);
163 DRIVER_MODULE(miibus
, bfe
, miibus_driver
, miibus_devclass
, 0, 0);
166 * Probe for a Broadcom 4401 chip.
169 bfe_probe(device_t dev
)
172 uint16_t vendor
, product
;
174 vendor
= pci_get_vendor(dev
);
175 product
= pci_get_device(dev
);
177 for (t
= bfe_devs
; t
->bfe_name
!= NULL
; t
++) {
178 if (vendor
== t
->bfe_vid
&& product
== t
->bfe_did
) {
179 device_set_desc(dev
, t
->bfe_name
);
188 bfe_dma_alloc(device_t dev
)
190 struct bfe_softc
*sc
= device_get_softc(dev
);
191 int error
, i
, tx_pos
, rx_pos
;
194 * parent tag. Apparently the chip cannot handle any DMA address
197 error
= bus_dma_tag_create(NULL
, /* parent */
198 4096, 0, /* alignment, boundary */
199 0x3FFFFFFF, /* lowaddr */
200 BUS_SPACE_MAXADDR
, /* highaddr */
201 NULL
, NULL
, /* filter, filterarg */
202 MAXBSIZE
, /* maxsize */
203 BUS_SPACE_UNRESTRICTED
, /* num of segments */
204 BUS_SPACE_MAXSIZE_32BIT
, /* max segment size */
206 &sc
->bfe_parent_tag
);
208 device_printf(dev
, "could not allocate parent dma tag\n");
212 /* tag for TX ring */
213 error
= bus_dma_tag_create(sc
->bfe_parent_tag
, 4096, 0,
214 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
217 BUS_SPACE_MAXSIZE_32BIT
,
220 device_printf(dev
, "could not allocate dma tag for TX list\n");
224 /* tag for RX ring */
225 error
= bus_dma_tag_create(sc
->bfe_parent_tag
, 4096, 0,
226 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
229 BUS_SPACE_MAXSIZE_32BIT
,
232 device_printf(dev
, "could not allocate dma tag for RX list\n");
237 error
= bus_dma_tag_create(sc
->bfe_parent_tag
, ETHER_ALIGN
, 0,
238 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
240 MCLBYTES
, 1, BUS_SPACE_MAXSIZE_32BIT
,
241 BUS_DMA_ALLOCNOW
, &sc
->bfe_tag
);
243 device_printf(dev
, "could not allocate dma tag for mbufs\n");
249 /* pre allocate dmamaps for RX list */
250 for (i
= 0; i
< BFE_RX_LIST_CNT
; i
++) {
251 error
= bus_dmamap_create(sc
->bfe_tag
, 0,
252 &sc
->bfe_rx_ring
[i
].bfe_map
);
255 device_printf(dev
, "cannot create DMA map for RX\n");
259 rx_pos
= BFE_RX_LIST_CNT
;
261 /* pre allocate dmamaps for TX list */
262 for (i
= 0; i
< BFE_TX_LIST_CNT
; i
++) {
263 error
= bus_dmamap_create(sc
->bfe_tag
, 0,
264 &sc
->bfe_tx_ring
[i
].bfe_map
);
267 device_printf(dev
, "cannot create DMA map for TX\n");
272 /* Alloc dma for rx ring */
273 error
= bus_dmamem_alloc(sc
->bfe_rx_tag
, (void *)&sc
->bfe_rx_list
,
274 BUS_DMA_WAITOK
| BUS_DMA_ZERO
,
277 device_printf(dev
, "cannot allocate DMA mem for RX\n");
281 error
= bus_dmamap_load(sc
->bfe_rx_tag
, sc
->bfe_rx_map
,
282 sc
->bfe_rx_list
, sizeof(struct bfe_desc
),
283 bfe_dma_map
, &sc
->bfe_rx_dma
, BUS_DMA_WAITOK
);
285 device_printf(dev
, "cannot load DMA map for RX\n");
289 bus_dmamap_sync(sc
->bfe_rx_tag
, sc
->bfe_rx_map
, BUS_DMASYNC_PREWRITE
);
291 /* Alloc dma for tx ring */
292 error
= bus_dmamem_alloc(sc
->bfe_tx_tag
, (void *)&sc
->bfe_tx_list
,
293 BUS_DMA_WAITOK
| BUS_DMA_ZERO
,
296 device_printf(dev
, "cannot allocate DMA mem for TX\n");
300 error
= bus_dmamap_load(sc
->bfe_tx_tag
, sc
->bfe_tx_map
,
301 sc
->bfe_tx_list
, sizeof(struct bfe_desc
),
302 bfe_dma_map
, &sc
->bfe_tx_dma
, BUS_DMA_WAITOK
);
304 device_printf(dev
, "cannot load DMA map for TX\n");
308 bus_dmamap_sync(sc
->bfe_tx_tag
, sc
->bfe_tx_map
, BUS_DMASYNC_PREWRITE
);
313 for (i
= 0; i
< rx_pos
; ++i
)
314 bus_dmamap_destroy(sc
->bfe_tag
, sc
->bfe_rx_ring
[i
].bfe_map
);
315 for (i
= 0; i
< tx_pos
; ++i
)
316 bus_dmamap_destroy(sc
->bfe_tag
, sc
->bfe_tx_ring
[i
].bfe_map
);
318 bus_dma_tag_destroy(sc
->bfe_tag
);
324 bfe_attach(device_t dev
)
327 struct bfe_softc
*sc
;
330 sc
= device_get_softc(dev
);
333 callout_init(&sc
->bfe_stat_timer
);
337 * Handle power management nonsense.
339 if (pci_get_powerstate(dev
) != PCI_POWERSTATE_D0
) {
340 uint32_t membase
, irq
;
342 /* Save important PCI config data. */
343 membase
= pci_read_config(dev
, BFE_PCI_MEMLO
, 4);
344 irq
= pci_read_config(dev
, BFE_PCI_INTLINE
, 4);
346 /* Reset the power state. */
347 device_printf(dev
, "chip is in D%d power mode"
348 " -- setting to D0\n", pci_get_powerstate(dev
));
350 pci_set_powerstate(dev
, PCI_POWERSTATE_D0
);
352 /* Restore PCI config data. */
353 pci_write_config(dev
, BFE_PCI_MEMLO
, membase
, 4);
354 pci_write_config(dev
, BFE_PCI_INTLINE
, irq
, 4);
356 #endif /* !BURN_BRIDGE */
359 * Map control/status registers.
361 pci_enable_busmaster(dev
);
364 sc
->bfe_res
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
, &rid
,
366 if (sc
->bfe_res
== NULL
) {
367 device_printf(dev
, "couldn't map memory\n");
371 sc
->bfe_btag
= rman_get_bustag(sc
->bfe_res
);
372 sc
->bfe_bhandle
= rman_get_bushandle(sc
->bfe_res
);
374 /* Allocate interrupt */
377 sc
->bfe_irq
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
, &rid
,
378 RF_SHAREABLE
| RF_ACTIVE
);
379 if (sc
->bfe_irq
== NULL
) {
380 device_printf(dev
, "couldn't map interrupt\n");
385 error
= bfe_dma_alloc(dev
);
387 device_printf(dev
, "failed to allocate DMA resources\n");
391 /* Set up ifnet structure */
392 ifp
= &sc
->arpcom
.ac_if
;
394 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
395 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
396 ifp
->if_ioctl
= bfe_ioctl
;
397 ifp
->if_start
= bfe_start
;
398 ifp
->if_watchdog
= bfe_watchdog
;
399 ifp
->if_init
= bfe_init
;
400 ifp
->if_mtu
= ETHERMTU
;
401 ifp
->if_baudrate
= 100000000;
402 ifp
->if_capabilities
|= IFCAP_VLAN_MTU
;
403 ifp
->if_capenable
|= IFCAP_VLAN_MTU
;
404 ifp
->if_hdrlen
= sizeof(struct ether_vlan_header
);
405 ifq_set_maxlen(&ifp
->if_snd
, BFE_TX_QLEN
);
406 ifq_set_ready(&ifp
->if_snd
);
410 /* Reset the chip and turn on the PHY */
413 if (mii_phy_probe(dev
, &sc
->bfe_miibus
,
414 bfe_ifmedia_upd
, bfe_ifmedia_sts
)) {
415 device_printf(dev
, "MII without any PHY!\n");
420 ether_ifattach(ifp
, sc
->arpcom
.ac_enaddr
, NULL
);
423 * Hook interrupt last to avoid having to lock softc
425 error
= bus_setup_intr(dev
, sc
->bfe_irq
, INTR_NETSAFE
,
426 bfe_intr
, sc
, &sc
->bfe_intrhand
,
427 sc
->arpcom
.ac_if
.if_serializer
);
431 device_printf(dev
, "couldn't set up irq\n");
435 ifp
->if_cpuid
= ithread_cpuid(rman_get_start(sc
->bfe_irq
));
436 KKASSERT(ifp
->if_cpuid
>= 0 && ifp
->if_cpuid
< ncpus
);
444 bfe_detach(device_t dev
)
446 struct bfe_softc
*sc
= device_get_softc(dev
);
447 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
449 if (device_is_attached(dev
)) {
450 lwkt_serialize_enter(ifp
->if_serializer
);
453 bus_teardown_intr(dev
, sc
->bfe_irq
, sc
->bfe_intrhand
);
454 lwkt_serialize_exit(ifp
->if_serializer
);
458 if (sc
->bfe_miibus
!= NULL
)
459 device_delete_child(dev
, sc
->bfe_miibus
);
460 bus_generic_detach(dev
);
462 if (sc
->bfe_irq
!= NULL
)
463 bus_release_resource(dev
, SYS_RES_IRQ
, 0, sc
->bfe_irq
);
465 if (sc
->bfe_res
!= NULL
) {
466 bus_release_resource(dev
, SYS_RES_MEMORY
, BFE_PCI_MEMLO
,
475 * Stop all chip I/O so that the kernel's probe routines don't
476 * get confused by errant DMAs when rebooting.
479 bfe_shutdown(device_t dev
)
481 struct bfe_softc
*sc
= device_get_softc(dev
);
482 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
484 lwkt_serialize_enter(ifp
->if_serializer
);
486 lwkt_serialize_exit(ifp
->if_serializer
);
490 bfe_miibus_readreg(device_t dev
, int phy
, int reg
)
492 struct bfe_softc
*sc
;
495 sc
= device_get_softc(dev
);
496 if (phy
!= sc
->bfe_phyaddr
)
498 bfe_readphy(sc
, reg
, &ret
);
504 bfe_miibus_writereg(device_t dev
, int phy
, int reg
, int val
)
506 struct bfe_softc
*sc
;
508 sc
= device_get_softc(dev
);
509 if (phy
!= sc
->bfe_phyaddr
)
511 bfe_writephy(sc
, reg
, val
);
517 bfe_miibus_statchg(device_t dev
)
523 bfe_tx_ring_free(struct bfe_softc
*sc
)
527 for (i
= 0; i
< BFE_TX_LIST_CNT
; i
++) {
528 bus_dmamap_unload(sc
->bfe_tag
,
529 sc
->bfe_tx_ring
[i
].bfe_map
);
530 if (sc
->bfe_tx_ring
[i
].bfe_mbuf
!= NULL
) {
531 m_freem(sc
->bfe_tx_ring
[i
].bfe_mbuf
);
532 sc
->bfe_tx_ring
[i
].bfe_mbuf
= NULL
;
535 bzero(sc
->bfe_tx_list
, BFE_TX_LIST_SIZE
);
536 bus_dmamap_sync(sc
->bfe_tx_tag
, sc
->bfe_tx_map
, BUS_DMASYNC_PREWRITE
);
540 bfe_rx_ring_free(struct bfe_softc
*sc
)
544 for (i
= 0; i
< BFE_RX_LIST_CNT
; i
++) {
545 if (sc
->bfe_rx_ring
[i
].bfe_mbuf
!= NULL
) {
546 bus_dmamap_unload(sc
->bfe_tag
,
547 sc
->bfe_rx_ring
[i
].bfe_map
);
548 m_freem(sc
->bfe_rx_ring
[i
].bfe_mbuf
);
549 sc
->bfe_rx_ring
[i
].bfe_mbuf
= NULL
;
552 bzero(sc
->bfe_rx_list
, BFE_RX_LIST_SIZE
);
553 bus_dmamap_sync(sc
->bfe_rx_tag
, sc
->bfe_rx_map
, BUS_DMASYNC_PREWRITE
);
557 bfe_list_rx_init(struct bfe_softc
*sc
)
561 for (i
= 0; i
< BFE_RX_LIST_CNT
; i
++)
562 if (bfe_list_newbuf(sc
, i
, NULL
) == ENOBUFS
)
565 bus_dmamap_sync(sc
->bfe_rx_tag
, sc
->bfe_rx_map
, BUS_DMASYNC_PREWRITE
);
566 CSR_WRITE_4(sc
, BFE_DMARX_PTR
, (i
* sizeof(struct bfe_desc
)));
574 bfe_list_newbuf(struct bfe_softc
*sc
, int c
, struct mbuf
*m
)
576 struct bfe_rxheader
*rx_header
;
581 if ((c
< 0) || (c
>= BFE_RX_LIST_CNT
))
585 m
= m_getcl(MB_DONTWAIT
, MT_DATA
, M_PKTHDR
);
588 m
->m_len
= m
->m_pkthdr
.len
= MCLBYTES
;
591 m
->m_data
= m
->m_ext
.ext_buf
;
593 rx_header
= mtod(m
, struct bfe_rxheader
*);
595 rx_header
->flags
= 0;
597 /* Map the mbuf into DMA */
599 d
= &sc
->bfe_rx_list
[c
];
600 r
= &sc
->bfe_rx_ring
[c
];
602 bus_dmamap_load(sc
->bfe_tag
, r
->bfe_map
, mtod(m
, void *),
603 MCLBYTES
, bfe_dma_map_desc
, d
, BUS_DMA_NOWAIT
);
604 bus_dmamap_sync(sc
->bfe_tag
, r
->bfe_map
, BUS_DMASYNC_PREWRITE
);
606 ctrl
= ETHER_MAX_LEN
+ 32;
608 if(c
== BFE_RX_LIST_CNT
- 1)
609 ctrl
|= BFE_DESC_EOT
;
613 bus_dmamap_sync(sc
->bfe_rx_tag
, sc
->bfe_rx_map
, BUS_DMASYNC_PREWRITE
);
618 bfe_get_config(struct bfe_softc
*sc
)
622 bfe_read_eeprom(sc
, eeprom
);
624 sc
->arpcom
.ac_enaddr
[0] = eeprom
[79];
625 sc
->arpcom
.ac_enaddr
[1] = eeprom
[78];
626 sc
->arpcom
.ac_enaddr
[2] = eeprom
[81];
627 sc
->arpcom
.ac_enaddr
[3] = eeprom
[80];
628 sc
->arpcom
.ac_enaddr
[4] = eeprom
[83];
629 sc
->arpcom
.ac_enaddr
[5] = eeprom
[82];
631 sc
->bfe_phyaddr
= eeprom
[90] & 0x1f;
632 sc
->bfe_mdc_port
= (eeprom
[90] >> 14) & 0x1;
634 sc
->bfe_core_unit
= 0;
635 sc
->bfe_dma_offset
= BFE_PCI_DMA
;
639 bfe_pci_setup(struct bfe_softc
*sc
, uint32_t cores
)
641 uint32_t bar_orig
, pci_rev
, val
;
643 bar_orig
= pci_read_config(sc
->bfe_dev
, BFE_BAR0_WIN
, 4);
644 pci_write_config(sc
->bfe_dev
, BFE_BAR0_WIN
, BFE_REG_PCI
, 4);
645 pci_rev
= CSR_READ_4(sc
, BFE_SBIDHIGH
) & BFE_RC_MASK
;
647 val
= CSR_READ_4(sc
, BFE_SBINTVEC
);
649 CSR_WRITE_4(sc
, BFE_SBINTVEC
, val
);
651 val
= CSR_READ_4(sc
, BFE_SSB_PCI_TRANS_2
);
652 val
|= BFE_SSB_PCI_PREF
| BFE_SSB_PCI_BURST
;
653 CSR_WRITE_4(sc
, BFE_SSB_PCI_TRANS_2
, val
);
655 pci_write_config(sc
->bfe_dev
, BFE_BAR0_WIN
, bar_orig
, 4);
659 bfe_clear_stats(struct bfe_softc
*sc
)
663 CSR_WRITE_4(sc
, BFE_MIB_CTRL
, BFE_MIB_CLR_ON_READ
);
664 for (reg
= BFE_TX_GOOD_O
; reg
<= BFE_TX_PAUSE
; reg
+= 4)
666 for (reg
= BFE_RX_GOOD_O
; reg
<= BFE_RX_NPAUSE
; reg
+= 4)
671 bfe_resetphy(struct bfe_softc
*sc
)
675 bfe_writephy(sc
, 0, BMCR_RESET
);
677 bfe_readphy(sc
, 0, &val
);
678 if (val
& BMCR_RESET
) {
679 if_printf(&sc
->arpcom
.ac_if
,
680 "PHY Reset would not complete.\n");
687 bfe_chip_halt(struct bfe_softc
*sc
)
689 /* disable interrupts - not that it actually does..*/
690 CSR_WRITE_4(sc
, BFE_IMASK
, 0);
691 CSR_READ_4(sc
, BFE_IMASK
);
693 CSR_WRITE_4(sc
, BFE_ENET_CTRL
, BFE_ENET_DISABLE
);
694 bfe_wait_bit(sc
, BFE_ENET_CTRL
, BFE_ENET_DISABLE
, 200, 1);
696 CSR_WRITE_4(sc
, BFE_DMARX_CTRL
, 0);
697 CSR_WRITE_4(sc
, BFE_DMATX_CTRL
, 0);
702 bfe_chip_reset(struct bfe_softc
*sc
)
706 /* Set the interrupt vector for the enet core */
707 bfe_pci_setup(sc
, BFE_INTVEC_ENET0
);
710 val
= CSR_READ_4(sc
, BFE_SBTMSLOW
) & (BFE_RESET
| BFE_REJECT
| BFE_CLOCK
);
711 if (val
== BFE_CLOCK
) {
712 /* It is, so shut it down */
713 CSR_WRITE_4(sc
, BFE_RCV_LAZY
, 0);
714 CSR_WRITE_4(sc
, BFE_ENET_CTRL
, BFE_ENET_DISABLE
);
715 bfe_wait_bit(sc
, BFE_ENET_CTRL
, BFE_ENET_DISABLE
, 100, 1);
716 CSR_WRITE_4(sc
, BFE_DMATX_CTRL
, 0);
717 sc
->bfe_tx_cnt
= sc
->bfe_tx_prod
= sc
->bfe_tx_cons
= 0;
718 if (CSR_READ_4(sc
, BFE_DMARX_STAT
) & BFE_STAT_EMASK
)
719 bfe_wait_bit(sc
, BFE_DMARX_STAT
, BFE_STAT_SIDLE
, 100, 0);
720 CSR_WRITE_4(sc
, BFE_DMARX_CTRL
, 0);
721 sc
->bfe_rx_prod
= sc
->bfe_rx_cons
= 0;
728 * We want the phy registers to be accessible even when
729 * the driver is "downed" so initialize MDC preamble, frequency,
730 * and whether internal or external phy here.
733 /* 4402 has 62.5Mhz SB clock and internal phy */
734 CSR_WRITE_4(sc
, BFE_MDIO_CTRL
, 0x8d);
736 /* Internal or external PHY? */
737 val
= CSR_READ_4(sc
, BFE_DEVCTRL
);
738 if (!(val
& BFE_IPP
))
739 CSR_WRITE_4(sc
, BFE_ENET_CTRL
, BFE_ENET_EPSEL
);
740 else if (CSR_READ_4(sc
, BFE_DEVCTRL
) & BFE_EPR
) {
741 BFE_AND(sc
, BFE_DEVCTRL
, ~BFE_EPR
);
745 /* Enable CRC32 generation and set proper LED modes */
746 BFE_OR(sc
, BFE_MAC_CTRL
, BFE_CTRL_CRC32_ENAB
| BFE_CTRL_LED
);
748 /* Reset or clear powerdown control bit */
749 BFE_AND(sc
, BFE_MAC_CTRL
, ~BFE_CTRL_PDOWN
);
751 CSR_WRITE_4(sc
, BFE_RCV_LAZY
, ((1 << BFE_LAZY_FC_SHIFT
) &
755 * We don't want lazy interrupts, so just send them at the end of a
758 BFE_OR(sc
, BFE_RCV_LAZY
, 0);
760 /* Set max lengths, accounting for VLAN tags */
761 CSR_WRITE_4(sc
, BFE_RXMAXLEN
, ETHER_MAX_LEN
+32);
762 CSR_WRITE_4(sc
, BFE_TXMAXLEN
, ETHER_MAX_LEN
+32);
764 /* Set watermark XXX - magic */
765 CSR_WRITE_4(sc
, BFE_TX_WMARK
, 56);
768 * Initialise DMA channels - not forgetting dma addresses need to be
769 * added to BFE_PCI_DMA
771 CSR_WRITE_4(sc
, BFE_DMATX_CTRL
, BFE_TX_CTRL_ENABLE
);
772 CSR_WRITE_4(sc
, BFE_DMATX_ADDR
, sc
->bfe_tx_dma
+ BFE_PCI_DMA
);
774 CSR_WRITE_4(sc
, BFE_DMARX_CTRL
, (BFE_RX_OFFSET
<< BFE_RX_CTRL_ROSHIFT
) |
776 CSR_WRITE_4(sc
, BFE_DMARX_ADDR
, sc
->bfe_rx_dma
+ BFE_PCI_DMA
);
783 bfe_core_disable(struct bfe_softc
*sc
)
785 if ((CSR_READ_4(sc
, BFE_SBTMSLOW
)) & BFE_RESET
)
789 * Set reject, wait for it set, then wait for the core to stop being busy
790 * Then set reset and reject and enable the clocks
792 CSR_WRITE_4(sc
, BFE_SBTMSLOW
, (BFE_REJECT
| BFE_CLOCK
));
793 bfe_wait_bit(sc
, BFE_SBTMSLOW
, BFE_REJECT
, 1000, 0);
794 bfe_wait_bit(sc
, BFE_SBTMSHIGH
, BFE_BUSY
, 1000, 1);
795 CSR_WRITE_4(sc
, BFE_SBTMSLOW
, (BFE_FGC
| BFE_CLOCK
| BFE_REJECT
|
797 CSR_READ_4(sc
, BFE_SBTMSLOW
);
799 /* Leave reset and reject set */
800 CSR_WRITE_4(sc
, BFE_SBTMSLOW
, (BFE_REJECT
| BFE_RESET
));
805 bfe_core_reset(struct bfe_softc
*sc
)
809 /* Disable the core */
810 bfe_core_disable(sc
);
812 /* and bring it back up */
813 CSR_WRITE_4(sc
, BFE_SBTMSLOW
, (BFE_RESET
| BFE_CLOCK
| BFE_FGC
));
814 CSR_READ_4(sc
, BFE_SBTMSLOW
);
817 /* Chip bug, clear SERR, IB and TO if they are set. */
818 if (CSR_READ_4(sc
, BFE_SBTMSHIGH
) & BFE_SERR
)
819 CSR_WRITE_4(sc
, BFE_SBTMSHIGH
, 0);
820 val
= CSR_READ_4(sc
, BFE_SBIMSTATE
);
821 if (val
& (BFE_IBE
| BFE_TO
))
822 CSR_WRITE_4(sc
, BFE_SBIMSTATE
, val
& ~(BFE_IBE
| BFE_TO
));
824 /* Clear reset and allow it to move through the core */
825 CSR_WRITE_4(sc
, BFE_SBTMSLOW
, (BFE_CLOCK
| BFE_FGC
));
826 CSR_READ_4(sc
, BFE_SBTMSLOW
);
829 /* Leave the clock set */
830 CSR_WRITE_4(sc
, BFE_SBTMSLOW
, BFE_CLOCK
);
831 CSR_READ_4(sc
, BFE_SBTMSLOW
);
836 bfe_cam_write(struct bfe_softc
*sc
, u_char
*data
, int index
)
840 val
= ((uint32_t) data
[2]) << 24;
841 val
|= ((uint32_t) data
[3]) << 16;
842 val
|= ((uint32_t) data
[4]) << 8;
843 val
|= ((uint32_t) data
[5]);
844 CSR_WRITE_4(sc
, BFE_CAM_DATA_LO
, val
);
845 val
= (BFE_CAM_HI_VALID
|
846 (((uint32_t) data
[0]) << 8) |
847 (((uint32_t) data
[1])));
848 CSR_WRITE_4(sc
, BFE_CAM_DATA_HI
, val
);
849 CSR_WRITE_4(sc
, BFE_CAM_CTRL
, (BFE_CAM_WRITE
|
850 ((uint32_t)index
<< BFE_CAM_INDEX_SHIFT
)));
851 bfe_wait_bit(sc
, BFE_CAM_CTRL
, BFE_CAM_BUSY
, 10000, 1);
855 bfe_set_rx_mode(struct bfe_softc
*sc
)
857 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
858 struct ifmultiaddr
*ifma
;
862 val
= CSR_READ_4(sc
, BFE_RXCONF
);
864 if (ifp
->if_flags
& IFF_PROMISC
)
865 val
|= BFE_RXCONF_PROMISC
;
867 val
&= ~BFE_RXCONF_PROMISC
;
869 if (ifp
->if_flags
& IFF_BROADCAST
)
870 val
&= ~BFE_RXCONF_DBCAST
;
872 val
|= BFE_RXCONF_DBCAST
;
875 CSR_WRITE_4(sc
, BFE_CAM_CTRL
, 0);
876 bfe_cam_write(sc
, sc
->arpcom
.ac_enaddr
, i
++);
878 if (ifp
->if_flags
& IFF_ALLMULTI
) {
879 val
|= BFE_RXCONF_ALLMULTI
;
881 val
&= ~BFE_RXCONF_ALLMULTI
;
882 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
883 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
886 LLADDR((struct sockaddr_dl
*)ifma
->ifma_addr
), i
++);
890 CSR_WRITE_4(sc
, BFE_RXCONF
, val
);
891 BFE_OR(sc
, BFE_CAM_CTRL
, BFE_CAM_ENABLE
);
895 bfe_dma_map(void *arg
, bus_dma_segment_t
*segs
, int nseg
, int error
)
900 *ptr
= segs
->ds_addr
;
904 bfe_dma_map_desc(void *arg
, bus_dma_segment_t
*segs
, int nseg
, int error
)
909 /* The chip needs all addresses to be added to BFE_PCI_DMA */
910 d
->bfe_addr
= segs
->ds_addr
+ BFE_PCI_DMA
;
914 bfe_dma_free(struct bfe_softc
*sc
)
916 if (sc
->bfe_tx_tag
!= NULL
) {
917 bus_dmamap_unload(sc
->bfe_tx_tag
, sc
->bfe_tx_map
);
918 if (sc
->bfe_tx_list
!= NULL
) {
919 bus_dmamem_free(sc
->bfe_tx_tag
, sc
->bfe_tx_list
,
921 sc
->bfe_tx_list
= NULL
;
923 bus_dma_tag_destroy(sc
->bfe_tx_tag
);
924 sc
->bfe_tx_tag
= NULL
;
927 if (sc
->bfe_rx_tag
!= NULL
) {
928 bus_dmamap_unload(sc
->bfe_rx_tag
, sc
->bfe_rx_map
);
929 if (sc
->bfe_rx_list
!= NULL
) {
930 bus_dmamem_free(sc
->bfe_rx_tag
, sc
->bfe_rx_list
,
932 sc
->bfe_rx_list
= NULL
;
934 bus_dma_tag_destroy(sc
->bfe_rx_tag
);
935 sc
->bfe_rx_tag
= NULL
;
938 if (sc
->bfe_tag
!= NULL
) {
941 for (i
= 0; i
< BFE_TX_LIST_CNT
; i
++) {
942 bus_dmamap_destroy(sc
->bfe_tag
,
943 sc
->bfe_tx_ring
[i
].bfe_map
);
945 for (i
= 0; i
< BFE_RX_LIST_CNT
; i
++) {
946 bus_dmamap_destroy(sc
->bfe_tag
,
947 sc
->bfe_rx_ring
[i
].bfe_map
);
950 bus_dma_tag_destroy(sc
->bfe_tag
);
954 if (sc
->bfe_parent_tag
!= NULL
) {
955 bus_dma_tag_destroy(sc
->bfe_parent_tag
);
956 sc
->bfe_parent_tag
= NULL
;
961 bfe_read_eeprom(struct bfe_softc
*sc
, uint8_t *data
)
964 uint16_t *ptr
= (uint16_t *)data
;
966 for (i
= 0; i
< 128; i
+= 2)
967 ptr
[i
/2] = CSR_READ_4(sc
, 4096 + i
);
971 bfe_wait_bit(struct bfe_softc
*sc
, uint32_t reg
, uint32_t bit
,
972 u_long timeout
, const int clear
)
976 for (i
= 0; i
< timeout
; i
++) {
977 uint32_t val
= CSR_READ_4(sc
, reg
);
979 if (clear
&& !(val
& bit
))
981 if (!clear
&& (val
& bit
))
986 if_printf(&sc
->arpcom
.ac_if
,
987 "BUG! Timeout waiting for bit %08x of register "
988 "%x to %s.\n", bit
, reg
,
989 (clear
? "clear" : "set"));
996 bfe_readphy(struct bfe_softc
*sc
, uint32_t reg
, uint32_t *val
)
1001 CSR_WRITE_4(sc
, BFE_EMAC_ISTAT
, BFE_EMAC_INT_MII
);
1002 CSR_WRITE_4(sc
, BFE_MDIO_DATA
, (BFE_MDIO_SB_START
|
1003 (BFE_MDIO_OP_READ
<< BFE_MDIO_OP_SHIFT
) |
1004 (sc
->bfe_phyaddr
<< BFE_MDIO_PMD_SHIFT
) |
1005 (reg
<< BFE_MDIO_RA_SHIFT
) |
1006 (BFE_MDIO_TA_VALID
<< BFE_MDIO_TA_SHIFT
)));
1007 err
= bfe_wait_bit(sc
, BFE_EMAC_ISTAT
, BFE_EMAC_INT_MII
, 100, 0);
1008 *val
= CSR_READ_4(sc
, BFE_MDIO_DATA
) & BFE_MDIO_DATA_DATA
;
1013 bfe_writephy(struct bfe_softc
*sc
, uint32_t reg
, uint32_t val
)
1017 CSR_WRITE_4(sc
, BFE_EMAC_ISTAT
, BFE_EMAC_INT_MII
);
1018 CSR_WRITE_4(sc
, BFE_MDIO_DATA
, (BFE_MDIO_SB_START
|
1019 (BFE_MDIO_OP_WRITE
<< BFE_MDIO_OP_SHIFT
) |
1020 (sc
->bfe_phyaddr
<< BFE_MDIO_PMD_SHIFT
) |
1021 (reg
<< BFE_MDIO_RA_SHIFT
) |
1022 (BFE_MDIO_TA_VALID
<< BFE_MDIO_TA_SHIFT
) |
1023 (val
& BFE_MDIO_DATA_DATA
)));
1024 status
= bfe_wait_bit(sc
, BFE_EMAC_ISTAT
, BFE_EMAC_INT_MII
, 100, 0);
1030 * XXX - I think this is handled by the PHY driver, but it can't hurt to do it
1034 bfe_setupphy(struct bfe_softc
*sc
)
1038 /* Enable activity LED */
1039 bfe_readphy(sc
, 26, &val
);
1040 bfe_writephy(sc
, 26, val
& 0x7fff);
1041 bfe_readphy(sc
, 26, &val
);
1043 /* Enable traffic meter LED mode */
1044 bfe_readphy(sc
, 27, &val
);
1045 bfe_writephy(sc
, 27, val
| (1 << 6));
1051 bfe_stats_update(struct bfe_softc
*sc
)
1056 val
= &sc
->bfe_hwstats
.tx_good_octets
;
1057 for (reg
= BFE_TX_GOOD_O
; reg
<= BFE_TX_PAUSE
; reg
+= 4)
1058 *val
++ += CSR_READ_4(sc
, reg
);
1059 val
= &sc
->bfe_hwstats
.rx_good_octets
;
1060 for (reg
= BFE_RX_GOOD_O
; reg
<= BFE_RX_NPAUSE
; reg
+= 4)
1061 *val
++ += CSR_READ_4(sc
, reg
);
1065 bfe_txeof(struct bfe_softc
*sc
)
1067 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1068 uint32_t i
, chipidx
;
1070 chipidx
= CSR_READ_4(sc
, BFE_DMATX_STAT
) & BFE_STAT_CDMASK
;
1071 chipidx
/= sizeof(struct bfe_desc
);
1073 i
= sc
->bfe_tx_cons
;
1074 /* Go through the mbufs and free those that have been transmitted */
1075 while (i
!= chipidx
) {
1076 struct bfe_data
*r
= &sc
->bfe_tx_ring
[i
];
1078 bus_dmamap_unload(sc
->bfe_tag
, r
->bfe_map
);
1079 if (r
->bfe_mbuf
!= NULL
) {
1081 m_freem(r
->bfe_mbuf
);
1085 BFE_INC(i
, BFE_TX_LIST_CNT
);
1088 if (i
!= sc
->bfe_tx_cons
) {
1089 /* we freed up some mbufs */
1090 sc
->bfe_tx_cons
= i
;
1091 ifp
->if_flags
&= ~IFF_OACTIVE
;
1093 if (sc
->bfe_tx_cnt
== 0)
1099 /* Pass a received packet up the stack */
1101 bfe_rxeof(struct bfe_softc
*sc
)
1103 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1105 struct bfe_rxheader
*rxheader
;
1107 uint32_t cons
, status
, current
, len
, flags
;
1109 cons
= sc
->bfe_rx_cons
;
1110 status
= CSR_READ_4(sc
, BFE_DMARX_STAT
);
1111 current
= (status
& BFE_STAT_CDMASK
) / sizeof(struct bfe_desc
);
1113 while (current
!= cons
) {
1114 r
= &sc
->bfe_rx_ring
[cons
];
1116 rxheader
= mtod(m
, struct bfe_rxheader
*);
1117 bus_dmamap_sync(sc
->bfe_tag
, r
->bfe_map
, BUS_DMASYNC_POSTREAD
);
1118 len
= rxheader
->len
;
1121 bus_dmamap_unload(sc
->bfe_tag
, r
->bfe_map
);
1122 flags
= rxheader
->flags
;
1124 len
-= ETHER_CRC_LEN
;
1126 /* flag an error and try again */
1127 if ((len
> ETHER_MAX_LEN
+32) || (flags
& BFE_RX_FLAG_ERRORS
)) {
1129 if (flags
& BFE_RX_FLAG_SERR
)
1130 ifp
->if_collisions
++;
1131 bfe_list_newbuf(sc
, cons
, m
);
1132 BFE_INC(cons
, BFE_RX_LIST_CNT
);
1136 /* Go past the rx header */
1137 if (bfe_list_newbuf(sc
, cons
, NULL
) != 0) {
1138 bfe_list_newbuf(sc
, cons
, m
);
1139 BFE_INC(cons
, BFE_RX_LIST_CNT
);
1144 m_adj(m
, BFE_RX_OFFSET
);
1145 m
->m_len
= m
->m_pkthdr
.len
= len
;
1148 m
->m_pkthdr
.rcvif
= ifp
;
1150 ifp
->if_input(ifp
, m
);
1151 BFE_INC(cons
, BFE_RX_LIST_CNT
);
1153 sc
->bfe_rx_cons
= cons
;
1159 struct bfe_softc
*sc
= xsc
;
1160 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1161 uint32_t istat
, imask
, flag
;
1163 istat
= CSR_READ_4(sc
, BFE_ISTAT
);
1164 imask
= CSR_READ_4(sc
, BFE_IMASK
);
1167 * Defer unsolicited interrupts - This is necessary because setting the
1168 * chips interrupt mask register to 0 doesn't actually stop the
1172 CSR_WRITE_4(sc
, BFE_ISTAT
, istat
);
1173 CSR_READ_4(sc
, BFE_ISTAT
);
1175 /* not expecting this interrupt, disregard it */
1180 if (istat
& BFE_ISTAT_ERRORS
) {
1181 flag
= CSR_READ_4(sc
, BFE_DMATX_STAT
);
1182 if (flag
& BFE_STAT_EMASK
)
1185 flag
= CSR_READ_4(sc
, BFE_DMARX_STAT
);
1186 if (flag
& BFE_RX_FLAG_ERRORS
)
1189 ifp
->if_flags
&= ~IFF_RUNNING
;
1193 /* A packet was received */
1194 if (istat
& BFE_ISTAT_RX
)
1197 /* A packet was sent */
1198 if (istat
& BFE_ISTAT_TX
)
1201 /* We have packets pending, fire them out */
1202 if ((ifp
->if_flags
& IFF_RUNNING
) && !ifq_is_empty(&ifp
->if_snd
))
1207 bfe_encap(struct bfe_softc
*sc
, struct mbuf
**m_head
, uint32_t *txidx
)
1209 struct bfe_desc
*d
= NULL
;
1210 struct bfe_data
*r
= NULL
;
1212 uint32_t frag
, cur
, cnt
= 0;
1213 int error
, chainlen
= 0;
1215 KKASSERT(BFE_TX_LIST_CNT
>= (2 + sc
->bfe_tx_cnt
));
1218 * Count the number of frags in this chain to see if
1219 * we need to m_defrag. Since the descriptor list is shared
1220 * by all packets, we'll m_defrag long chains so that they
1221 * do not use up the entire list, even if they would fit.
1223 for (m
= *m_head
; m
!= NULL
; m
= m
->m_next
)
1226 if (chainlen
> (BFE_TX_LIST_CNT
/ 4) ||
1227 BFE_TX_LIST_CNT
< (2 + chainlen
+ sc
->bfe_tx_cnt
)) {
1228 m
= m_defrag(*m_head
, MB_DONTWAIT
);
1237 * Start packing the mbufs in this chain into
1238 * the fragment pointers. Stop when we run out
1239 * of fragments or hit the end of the mbuf chain.
1241 cur
= frag
= *txidx
;
1244 for (m
= *m_head
; m
!= NULL
; m
= m
->m_next
) {
1245 if (m
->m_len
!= 0) {
1246 KKASSERT(BFE_TX_LIST_CNT
>= (2 + sc
->bfe_tx_cnt
+ cnt
));
1248 d
= &sc
->bfe_tx_list
[cur
];
1249 r
= &sc
->bfe_tx_ring
[cur
];
1250 d
->bfe_ctrl
= BFE_DESC_LEN
& m
->m_len
;
1251 /* always intterupt on completion */
1252 d
->bfe_ctrl
|= BFE_DESC_IOC
;
1254 /* Set start of frame */
1255 d
->bfe_ctrl
|= BFE_DESC_SOF
;
1257 if (cur
== BFE_TX_LIST_CNT
- 1) {
1259 * Tell the chip to wrap to the start of the
1262 d
->bfe_ctrl
|= BFE_DESC_EOT
;
1265 error
= bus_dmamap_load(sc
->bfe_tag
, r
->bfe_map
,
1266 mtod(m
, void *), m
->m_len
,
1267 bfe_dma_map_desc
, d
,
1270 /* XXX This should be a fatal error. */
1271 if_printf(&sc
->arpcom
.ac_if
,
1272 "%s bus_dmamap_load failed: %d",
1278 bus_dmamap_sync(sc
->bfe_tag
, r
->bfe_map
,
1279 BUS_DMASYNC_PREWRITE
);
1282 BFE_INC(cur
, BFE_TX_LIST_CNT
);
1287 sc
->bfe_tx_list
[frag
].bfe_ctrl
|= BFE_DESC_EOF
;
1288 sc
->bfe_tx_ring
[frag
].bfe_mbuf
= *m_head
;
1289 bus_dmamap_sync(sc
->bfe_tx_tag
, sc
->bfe_tx_map
, BUS_DMASYNC_PREWRITE
);
1292 sc
->bfe_tx_cnt
+= cnt
;
1297 * Set up to transmit a packet
1300 bfe_start(struct ifnet
*ifp
)
1302 struct bfe_softc
*sc
= ifp
->if_softc
;
1303 struct mbuf
*m_head
= NULL
;
1304 int idx
, need_trans
;
1307 * Not much point trying to send if the link is down
1308 * or we have nothing to send.
1310 if (!sc
->bfe_link
) {
1311 ifq_purge(&ifp
->if_snd
);
1315 if (ifp
->if_flags
& IFF_OACTIVE
)
1318 idx
= sc
->bfe_tx_prod
;
1321 while (sc
->bfe_tx_ring
[idx
].bfe_mbuf
== NULL
) {
1322 if (BFE_TX_LIST_CNT
< (2 + sc
->bfe_tx_cnt
)) {
1323 ifp
->if_flags
|= IFF_OACTIVE
;
1327 m_head
= ifq_dequeue(&ifp
->if_snd
, NULL
);
1332 * Pack the data into the tx ring. If we don't have
1333 * enough room, let the chip drain the ring.
1335 if (bfe_encap(sc
, &m_head
, &idx
)) {
1336 ifp
->if_flags
|= IFF_OACTIVE
;
1342 * If there's a BPF listener, bounce a copy of this frame
1345 BPF_MTAP(ifp
, m_head
);
1351 sc
->bfe_tx_prod
= idx
;
1352 /* Transmit - twice due to apparent hardware bug */
1353 CSR_WRITE_4(sc
, BFE_DMATX_PTR
, idx
* sizeof(struct bfe_desc
));
1354 CSR_WRITE_4(sc
, BFE_DMATX_PTR
, idx
* sizeof(struct bfe_desc
));
1357 * Set a timeout in case the chip goes out to lunch.
1365 struct bfe_softc
*sc
= (struct bfe_softc
*)xsc
;
1366 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1368 if (ifp
->if_flags
& IFF_RUNNING
)
1374 if (bfe_list_rx_init(sc
) == ENOBUFS
) {
1375 if_printf(ifp
, "bfe_init failed. "
1376 " Not enough memory for list buffers\n");
1381 bfe_set_rx_mode(sc
);
1383 /* Enable the chip and core */
1384 BFE_OR(sc
, BFE_ENET_CTRL
, BFE_ENET_ENABLE
);
1385 /* Enable interrupts */
1386 CSR_WRITE_4(sc
, BFE_IMASK
, BFE_IMASK_DEF
);
1388 bfe_ifmedia_upd(ifp
);
1389 ifp
->if_flags
|= IFF_RUNNING
;
1390 ifp
->if_flags
&= ~IFF_OACTIVE
;
1392 callout_reset(&sc
->bfe_stat_timer
, hz
, bfe_tick
, sc
);
1396 * Set media options.
1399 bfe_ifmedia_upd(struct ifnet
*ifp
)
1401 struct bfe_softc
*sc
= ifp
->if_softc
;
1402 struct mii_data
*mii
;
1404 mii
= device_get_softc(sc
->bfe_miibus
);
1406 if (mii
->mii_instance
) {
1407 struct mii_softc
*miisc
;
1408 for (miisc
= LIST_FIRST(&mii
->mii_phys
); miisc
!= NULL
;
1409 miisc
= LIST_NEXT(miisc
, mii_list
))
1410 mii_phy_reset(miisc
);
1420 * Report current media status.
1423 bfe_ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
1425 struct bfe_softc
*sc
= ifp
->if_softc
;
1426 struct mii_data
*mii
;
1428 mii
= device_get_softc(sc
->bfe_miibus
);
1430 ifmr
->ifm_active
= mii
->mii_media_active
;
1431 ifmr
->ifm_status
= mii
->mii_media_status
;
1435 bfe_ioctl(struct ifnet
*ifp
, u_long command
, caddr_t data
, struct ucred
*cr
)
1437 struct bfe_softc
*sc
= ifp
->if_softc
;
1438 struct ifreq
*ifr
= (struct ifreq
*) data
;
1439 struct mii_data
*mii
;
1444 if (ifp
->if_flags
& IFF_UP
)
1445 if (ifp
->if_flags
& IFF_RUNNING
)
1446 bfe_set_rx_mode(sc
);
1449 else if (ifp
->if_flags
& IFF_RUNNING
)
1454 if (ifp
->if_flags
& IFF_RUNNING
)
1455 bfe_set_rx_mode(sc
);
1459 mii
= device_get_softc(sc
->bfe_miibus
);
1460 error
= ifmedia_ioctl(ifp
, ifr
, &mii
->mii_media
,
1464 error
= ether_ioctl(ifp
, command
, data
);
1471 bfe_watchdog(struct ifnet
*ifp
)
1473 struct bfe_softc
*sc
= ifp
->if_softc
;
1475 if_printf(ifp
, "watchdog timeout -- resetting\n");
1477 ifp
->if_flags
&= ~IFF_RUNNING
;
1486 struct bfe_softc
*sc
= xsc
;
1487 struct mii_data
*mii
;
1488 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1490 mii
= device_get_softc(sc
->bfe_miibus
);
1492 lwkt_serialize_enter(ifp
->if_serializer
);
1494 bfe_stats_update(sc
);
1495 callout_reset(&sc
->bfe_stat_timer
, hz
, bfe_tick
, sc
);
1497 if (sc
->bfe_link
== 0) {
1499 if (!sc
->bfe_link
&& mii
->mii_media_status
& IFM_ACTIVE
&&
1500 IFM_SUBTYPE(mii
->mii_media_active
) != IFM_NONE
) {
1506 lwkt_serialize_exit(ifp
->if_serializer
);
1510 * Stop the adapter and free any mbufs allocated to the
1514 bfe_stop(struct bfe_softc
*sc
)
1516 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1518 callout_stop(&sc
->bfe_stat_timer
);
1521 bfe_tx_ring_free(sc
);
1522 bfe_rx_ring_free(sc
);
1524 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);