2 * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * Driver for Attansic Technology Corp. L2 FastEthernet adapter.
27 * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon.
29 * $FreeBSD: src/sys/dev/ae/if_ae.c,v 1.1.2.3.2.1 2009/04/15 03:14:26 kensmith Exp $
32 #include <sys/param.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
36 #include <sys/interrupt.h>
37 #include <sys/malloc.h>
40 #include <sys/serialize.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/sysctl.h>
45 #include <net/ethernet.h>
48 #include <net/if_arp.h>
49 #include <net/if_dl.h>
50 #include <net/if_media.h>
51 #include <net/ifq_var.h>
52 #include <net/vlan/if_vlan_var.h>
53 #include <net/vlan/if_vlan_ether.h>
55 #include <bus/pci/pcireg.h>
56 #include <bus/pci/pcivar.h>
59 #include <dev/netif/mii_layer/miivar.h>
61 #include <dev/netif/ae/if_aereg.h>
62 #include <dev/netif/ae/if_aevar.h>
64 /* "device miibus" required. See GENERIC if you get errors here. */
65 #include "miibus_if.h"
68 * Devices supported by this driver.
70 static const struct ae_dev
{
75 { VENDORID_ATTANSIC
, DEVICEID_ATTANSIC_L2
,
76 "Attansic Technology Corp, L2 Fast Ethernet" },
77 /* Required last entry */
82 static int ae_probe(device_t
);
83 static int ae_attach(device_t
);
84 static int ae_detach(device_t
);
85 static int ae_shutdown(device_t
);
86 static int ae_suspend(device_t
);
87 static int ae_resume(device_t
);
88 static int ae_miibus_readreg(device_t
, int, int);
89 static int ae_miibus_writereg(device_t
, int, int, int);
90 static void ae_miibus_statchg(device_t
);
92 static int ae_mediachange(struct ifnet
*);
93 static void ae_mediastatus(struct ifnet
*, struct ifmediareq
*);
94 static void ae_init(void *);
95 static void ae_start(struct ifnet
*, struct ifaltq_subque
*);
96 static int ae_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
97 static void ae_watchdog(struct ifnet
*);
98 static void ae_stop(struct ae_softc
*);
99 static void ae_tick(void *);
101 static void ae_intr(void *);
102 static void ae_tx_intr(struct ae_softc
*);
103 static void ae_rx_intr(struct ae_softc
*);
104 static int ae_rxeof(struct ae_softc
*, struct ae_rxd
*);
106 static int ae_encap(struct ae_softc
*, struct mbuf
**);
107 static void ae_sysctl_node(struct ae_softc
*);
108 static void ae_phy_reset(struct ae_softc
*);
109 static int ae_reset(struct ae_softc
*);
110 static void ae_pcie_init(struct ae_softc
*);
111 static void ae_get_eaddr(struct ae_softc
*);
112 static void ae_dma_free(struct ae_softc
*);
113 static int ae_dma_alloc(struct ae_softc
*);
114 static void ae_mac_config(struct ae_softc
*);
115 static void ae_stop_rxmac(struct ae_softc
*);
116 static void ae_stop_txmac(struct ae_softc
*);
117 static void ae_rxfilter(struct ae_softc
*);
118 static void ae_rxvlan(struct ae_softc
*);
119 static void ae_update_stats_rx(uint16_t, struct ae_stats
*);
120 static void ae_update_stats_tx(uint16_t, struct ae_stats
*);
121 static void ae_powersave_disable(struct ae_softc
*);
122 static void ae_powersave_enable(struct ae_softc
*);
124 static device_method_t ae_methods
[] = {
125 /* Device interface. */
126 DEVMETHOD(device_probe
, ae_probe
),
127 DEVMETHOD(device_attach
, ae_attach
),
128 DEVMETHOD(device_detach
, ae_detach
),
129 DEVMETHOD(device_shutdown
, ae_shutdown
),
130 DEVMETHOD(device_suspend
, ae_suspend
),
131 DEVMETHOD(device_resume
, ae_resume
),
134 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
135 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
138 DEVMETHOD(miibus_readreg
, ae_miibus_readreg
),
139 DEVMETHOD(miibus_writereg
, ae_miibus_writereg
),
140 DEVMETHOD(miibus_statchg
, ae_miibus_statchg
),
144 static driver_t ae_driver
= {
147 sizeof(struct ae_softc
)
150 static devclass_t ae_devclass
;
151 DECLARE_DUMMY_MODULE(if_ae
);
152 MODULE_DEPEND(if_ae
, miibus
, 1, 1, 1);
153 DRIVER_MODULE(if_ae
, pci
, ae_driver
, ae_devclass
, NULL
, NULL
);
154 DRIVER_MODULE(miibus
, ae
, miibus_driver
, miibus_devclass
, NULL
, NULL
);
156 /* Register access macros. */
157 #define AE_WRITE_4(_sc, reg, val) \
158 bus_space_write_4((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
159 #define AE_WRITE_2(_sc, reg, val) \
160 bus_space_write_2((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
161 #define AE_WRITE_1(_sc, reg, val) \
162 bus_space_write_1((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
163 #define AE_READ_4(_sc, reg) \
164 bus_space_read_4((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
165 #define AE_READ_2(_sc, reg) \
166 bus_space_read_2((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
167 #define AE_READ_1(_sc, reg) \
168 bus_space_read_1((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
170 #define AE_PHY_READ(sc, reg) \
171 ae_miibus_readreg(sc->ae_dev, 0, reg)
172 #define AE_PHY_WRITE(sc, reg, val) \
173 ae_miibus_writereg(sc->ae_dev, 0, reg, val)
174 #define AE_CHECK_EADDR_VALID(eaddr) \
175 ((eaddr[0] == 0 && eaddr[1] == 0) || \
176 (eaddr[0] == 0xffffffff && eaddr[1] == 0xffff))
177 #define AE_RXD_VLAN(vtag) \
178 (((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9))
179 #define AE_TXD_VLAN(vtag) \
180 (((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08))
185 #define STATS_ENTRY(node, desc, field) \
186 { node, desc, offsetof(struct ae_stats, field) }
192 STATS_ENTRY("bcast", "broadcast frames", tx_bcast
),
193 STATS_ENTRY("mcast", "multicast frames", tx_mcast
),
194 STATS_ENTRY("pause", "PAUSE frames", tx_pause
),
195 STATS_ENTRY("control", "control frames", tx_ctrl
),
196 STATS_ENTRY("defers", "deferrals occuried", tx_defer
),
197 STATS_ENTRY("exc_defers", "excessive deferrals occuried", tx_excdefer
),
198 STATS_ENTRY("singlecols", "single collisions occuried", tx_singlecol
),
199 STATS_ENTRY("multicols", "multiple collisions occuried", tx_multicol
),
200 STATS_ENTRY("latecols", "late collisions occuried", tx_latecol
),
201 STATS_ENTRY("aborts", "transmit aborts due collisions", tx_abortcol
),
202 STATS_ENTRY("underruns", "Tx FIFO underruns", tx_underrun
)
204 STATS_ENTRY("bcast", "broadcast frames", rx_bcast
),
205 STATS_ENTRY("mcast", "multicast frames", rx_mcast
),
206 STATS_ENTRY("pause", "PAUSE frames", rx_pause
),
207 STATS_ENTRY("control", "control frames", rx_ctrl
),
208 STATS_ENTRY("crc_errors", "frames with CRC errors", rx_crcerr
),
209 STATS_ENTRY("code_errors", "frames with invalid opcode", rx_codeerr
),
210 STATS_ENTRY("runt", "runt frames", rx_runt
),
211 STATS_ENTRY("frag", "fragmented frames", rx_frag
),
212 STATS_ENTRY("align_errors", "frames with alignment errors", rx_align
),
213 STATS_ENTRY("truncated", "frames truncated due to Rx FIFO inderrun",
216 #define AE_STATS_RX_LEN NELEM(ae_stats_rx)
217 #define AE_STATS_TX_LEN NELEM(ae_stats_tx)
220 ae_stop(struct ae_softc
*sc
)
222 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
225 ASSERT_SERIALIZED(ifp
->if_serializer
);
227 ifp
->if_flags
&= ~IFF_RUNNING
;
228 ifq_clr_oactive(&ifp
->if_snd
);
231 sc
->ae_flags
&= ~AE_FLAG_LINK
;
232 callout_stop(&sc
->ae_tick_ch
);
235 * Clear and disable interrupts.
237 AE_WRITE_4(sc
, AE_IMR_REG
, 0);
238 AE_WRITE_4(sc
, AE_ISR_REG
, 0xffffffff);
249 AE_WRITE_1(sc
, AE_DMAREAD_REG
, ~AE_DMAREAD_EN
);
250 AE_WRITE_1(sc
, AE_DMAWRITE_REG
, ~AE_DMAWRITE_EN
);
253 * Wait for everything to enter idle state.
255 for (i
= 0; i
< AE_IDLE_TIMEOUT
; i
++) {
256 if (AE_READ_4(sc
, AE_IDLE_REG
) == 0)
260 if (i
== AE_IDLE_TIMEOUT
)
261 if_printf(ifp
, "could not enter idle state in stop.\n");
265 ae_stop_rxmac(struct ae_softc
*sc
)
271 * Stop Rx MAC engine.
273 val
= AE_READ_4(sc
, AE_MAC_REG
);
274 if ((val
& AE_MAC_RX_EN
) != 0) {
275 val
&= ~AE_MAC_RX_EN
;
276 AE_WRITE_4(sc
, AE_MAC_REG
, val
);
280 * Stop Rx DMA engine.
282 if (AE_READ_1(sc
, AE_DMAWRITE_REG
) == AE_DMAWRITE_EN
)
283 AE_WRITE_1(sc
, AE_DMAWRITE_REG
, 0);
286 * Wait for IDLE state.
288 for (i
= 0; i
< AE_IDLE_TIMEOUT
; i
--) {
289 val
= AE_READ_4(sc
, AE_IDLE_REG
);
290 if ((val
& (AE_IDLE_RXMAC
| AE_IDLE_DMAWRITE
)) == 0)
294 if (i
== AE_IDLE_TIMEOUT
) {
295 if_printf(&sc
->arpcom
.ac_if
,
296 "timed out while stopping Rx MAC.\n");
301 ae_stop_txmac(struct ae_softc
*sc
)
307 * Stop Tx MAC engine.
309 val
= AE_READ_4(sc
, AE_MAC_REG
);
310 if ((val
& AE_MAC_TX_EN
) != 0) {
311 val
&= ~AE_MAC_TX_EN
;
312 AE_WRITE_4(sc
, AE_MAC_REG
, val
);
316 * Stop Tx DMA engine.
318 if (AE_READ_1(sc
, AE_DMAREAD_REG
) == AE_DMAREAD_EN
)
319 AE_WRITE_1(sc
, AE_DMAREAD_REG
, 0);
322 * Wait for IDLE state.
324 for (i
= 0; i
< AE_IDLE_TIMEOUT
; i
--) {
325 val
= AE_READ_4(sc
, AE_IDLE_REG
);
326 if ((val
& (AE_IDLE_TXMAC
| AE_IDLE_DMAREAD
)) == 0)
330 if (i
== AE_IDLE_TIMEOUT
) {
331 if_printf(&sc
->arpcom
.ac_if
,
332 "timed out while stopping Tx MAC.\n");
337 * Callback from MII layer when media changes.
340 ae_miibus_statchg(device_t dev
)
342 struct ae_softc
*sc
= device_get_softc(dev
);
343 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
344 struct mii_data
*mii
;
347 ASSERT_SERIALIZED(ifp
->if_serializer
);
349 if ((ifp
->if_flags
& IFF_RUNNING
) == 0)
352 mii
= device_get_softc(sc
->ae_miibus
);
353 sc
->ae_flags
&= ~AE_FLAG_LINK
;
354 if ((mii
->mii_media_status
& IFM_AVALID
) != 0) {
355 switch (IFM_SUBTYPE(mii
->mii_media_active
)) {
358 sc
->ae_flags
|= AE_FLAG_LINK
;
365 /* Stop Rx/Tx MACs. */
369 /* Program MACs with resolved speed/duplex/flow-control. */
370 if ((sc
->ae_flags
& AE_FLAG_LINK
) != 0) {
374 * Restart DMA engines.
376 AE_WRITE_1(sc
, AE_DMAREAD_REG
, AE_DMAREAD_EN
);
377 AE_WRITE_1(sc
, AE_DMAWRITE_REG
, AE_DMAWRITE_EN
);
380 * Enable Rx and Tx MACs.
382 val
= AE_READ_4(sc
, AE_MAC_REG
);
383 val
|= AE_MAC_TX_EN
| AE_MAC_RX_EN
;
384 AE_WRITE_4(sc
, AE_MAC_REG
, val
);
389 ae_sysctl_node(struct ae_softc
*sc
)
391 struct sysctl_ctx_list
*ctx
;
392 struct sysctl_oid
*root
, *stats
, *stats_rx
, *stats_tx
;
393 struct ae_stats
*ae_stats
;
396 ae_stats
= &sc
->stats
;
398 ctx
= device_get_sysctl_ctx(sc
->ae_dev
);
399 root
= device_get_sysctl_tree(sc
->ae_dev
);
400 stats
= SYSCTL_ADD_NODE(ctx
, SYSCTL_CHILDREN(root
), OID_AUTO
, "stats",
401 CTLFLAG_RD
, NULL
, "ae statistics");
403 device_printf(sc
->ae_dev
, "can't add stats sysctl node\n");
408 * Receiver statistcics.
410 stats_rx
= SYSCTL_ADD_NODE(ctx
, SYSCTL_CHILDREN(stats
), OID_AUTO
, "rx",
411 CTLFLAG_RD
, NULL
, "Rx MAC statistics");
412 if (stats_rx
!= NULL
) {
413 for (i
= 0; i
< AE_STATS_RX_LEN
; i
++) {
414 SYSCTL_ADD_UINT(ctx
, SYSCTL_CHILDREN(stats_rx
),
415 OID_AUTO
, ae_stats_rx
[i
].node
, CTLFLAG_RD
,
416 (char *)ae_stats
+ ae_stats_rx
[i
].offset
, 0,
417 ae_stats_rx
[i
].desc
);
422 * Transmitter statistcics.
424 stats_tx
= SYSCTL_ADD_NODE(ctx
, SYSCTL_CHILDREN(stats
), OID_AUTO
, "tx",
425 CTLFLAG_RD
, NULL
, "Tx MAC statistics");
426 if (stats_tx
!= NULL
) {
427 for (i
= 0; i
< AE_STATS_TX_LEN
; i
++) {
428 SYSCTL_ADD_UINT(ctx
, SYSCTL_CHILDREN(stats_tx
),
429 OID_AUTO
, ae_stats_tx
[i
].node
, CTLFLAG_RD
,
430 (char *)ae_stats
+ ae_stats_tx
[i
].offset
, 0,
431 ae_stats_tx
[i
].desc
);
437 ae_miibus_readreg(device_t dev
, int phy
, int reg
)
439 struct ae_softc
*sc
= device_get_softc(dev
);
444 * Locking is done in upper layers.
446 if (phy
!= sc
->ae_phyaddr
)
448 val
= ((reg
<< AE_MDIO_REGADDR_SHIFT
) & AE_MDIO_REGADDR_MASK
) |
449 AE_MDIO_START
| AE_MDIO_READ
| AE_MDIO_SUP_PREAMBLE
|
450 ((AE_MDIO_CLK_25_4
<< AE_MDIO_CLK_SHIFT
) & AE_MDIO_CLK_MASK
);
451 AE_WRITE_4(sc
, AE_MDIO_REG
, val
);
454 * Wait for operation to complete.
456 for (i
= 0; i
< AE_MDIO_TIMEOUT
; i
++) {
458 val
= AE_READ_4(sc
, AE_MDIO_REG
);
459 if ((val
& (AE_MDIO_START
| AE_MDIO_BUSY
)) == 0)
462 if (i
== AE_MDIO_TIMEOUT
) {
463 device_printf(sc
->ae_dev
, "phy read timeout: %d.\n", reg
);
466 return ((val
<< AE_MDIO_DATA_SHIFT
) & AE_MDIO_DATA_MASK
);
470 ae_miibus_writereg(device_t dev
, int phy
, int reg
, int val
)
472 struct ae_softc
*sc
= device_get_softc(dev
);
477 * Locking is done in upper layers.
479 if (phy
!= sc
->ae_phyaddr
)
481 aereg
= ((reg
<< AE_MDIO_REGADDR_SHIFT
) & AE_MDIO_REGADDR_MASK
) |
482 AE_MDIO_START
| AE_MDIO_SUP_PREAMBLE
|
483 ((AE_MDIO_CLK_25_4
<< AE_MDIO_CLK_SHIFT
) & AE_MDIO_CLK_MASK
) |
484 ((val
<< AE_MDIO_DATA_SHIFT
) & AE_MDIO_DATA_MASK
);
485 AE_WRITE_4(sc
, AE_MDIO_REG
, aereg
);
488 * Wait for operation to complete.
490 for (i
= 0; i
< AE_MDIO_TIMEOUT
; i
++) {
492 aereg
= AE_READ_4(sc
, AE_MDIO_REG
);
493 if ((aereg
& (AE_MDIO_START
| AE_MDIO_BUSY
)) == 0)
496 if (i
== AE_MDIO_TIMEOUT
)
497 device_printf(sc
->ae_dev
, "phy write timeout: %d.\n", reg
);
502 ae_probe(device_t dev
)
504 uint16_t vendor
, devid
;
505 const struct ae_dev
*sp
;
507 vendor
= pci_get_vendor(dev
);
508 devid
= pci_get_device(dev
);
509 for (sp
= ae_devs
; sp
->ae_name
!= NULL
; sp
++) {
510 if (vendor
== sp
->ae_vendorid
&&
511 devid
== sp
->ae_deviceid
) {
512 device_set_desc(dev
, sp
->ae_name
);
520 ae_dma_alloc(struct ae_softc
*sc
)
526 * Create parent DMA tag.
528 error
= bus_dma_tag_create(NULL
, 1, 0,
529 BUS_SPACE_MAXADDR_32BIT
,
532 BUS_SPACE_MAXSIZE_32BIT
,
534 BUS_SPACE_MAXSIZE_32BIT
,
535 0, &sc
->dma_parent_tag
);
537 device_printf(sc
->ae_dev
, "could not creare parent DMA tag.\n");
542 * Create DMA stuffs for TxD.
544 sc
->txd_base
= bus_dmamem_coherent_any(sc
->dma_parent_tag
, 4,
545 AE_TXD_BUFSIZE_DEFAULT
, BUS_DMA_WAITOK
| BUS_DMA_ZERO
,
546 &sc
->dma_txd_tag
, &sc
->dma_txd_map
,
547 &sc
->dma_txd_busaddr
);
548 if (sc
->txd_base
== NULL
) {
549 device_printf(sc
->ae_dev
, "could not creare TxD DMA stuffs.\n");
554 * Create DMA stuffs for TxS.
556 sc
->txs_base
= bus_dmamem_coherent_any(sc
->dma_parent_tag
, 4,
557 AE_TXS_COUNT_DEFAULT
* 4, BUS_DMA_WAITOK
| BUS_DMA_ZERO
,
558 &sc
->dma_txs_tag
, &sc
->dma_txs_map
,
559 &sc
->dma_txs_busaddr
);
560 if (sc
->txs_base
== NULL
) {
561 device_printf(sc
->ae_dev
, "could not creare TxS DMA stuffs.\n");
566 * Create DMA stuffs for RxD.
568 sc
->rxd_base_dma
= bus_dmamem_coherent_any(sc
->dma_parent_tag
, 128,
569 AE_RXD_COUNT_DEFAULT
* 1536 + 120,
570 BUS_DMA_WAITOK
| BUS_DMA_ZERO
,
571 &sc
->dma_rxd_tag
, &sc
->dma_rxd_map
,
573 if (sc
->rxd_base_dma
== NULL
) {
574 device_printf(sc
->ae_dev
, "could not creare RxD DMA stuffs.\n");
577 sc
->dma_rxd_busaddr
= busaddr
+ 120;
578 sc
->rxd_base
= (struct ae_rxd
*)(sc
->rxd_base_dma
+ 120);
584 ae_mac_config(struct ae_softc
*sc
)
586 struct mii_data
*mii
;
589 mii
= device_get_softc(sc
->ae_miibus
);
590 val
= AE_READ_4(sc
, AE_MAC_REG
);
591 val
&= ~AE_MAC_FULL_DUPLEX
;
592 /* XXX disable AE_MAC_TX_FLOW_EN? */
593 if ((IFM_OPTIONS(mii
->mii_media_active
) & IFM_FDX
) != 0)
594 val
|= AE_MAC_FULL_DUPLEX
;
595 AE_WRITE_4(sc
, AE_MAC_REG
, val
);
599 ae_rxeof(struct ae_softc
*sc
, struct ae_rxd
*rxd
)
601 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
606 flags
= le16toh(rxd
->flags
);
608 if_printf(ifp
, "Rx interrupt occuried.\n");
610 size
= le16toh(rxd
->len
) - ETHER_CRC_LEN
;
611 if (size
< (ETHER_MIN_LEN
- ETHER_CRC_LEN
-
612 sizeof(struct ether_vlan_header
))) {
613 if_printf(ifp
, "Runt frame received.");
617 m
= m_devget(&rxd
->data
[0], size
, ETHER_ALIGN
, ifp
, NULL
);
621 if ((ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) &&
622 (flags
& AE_RXD_HAS_VLAN
)) {
623 m
->m_pkthdr
.ether_vlantag
= AE_RXD_VLAN(le16toh(rxd
->vlan
));
624 m
->m_flags
|= M_VLANTAG
;
626 ifp
->if_input(ifp
, m
, NULL
, -1);
632 ae_rx_intr(struct ae_softc
*sc
)
634 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
640 * Syncronize DMA buffers.
642 bus_dmamap_sync(sc
->dma_rxd_tag
, sc
->dma_rxd_map
,
643 BUS_DMASYNC_POSTREAD
);
645 rxd
= (struct ae_rxd
*)(sc
->rxd_base
+ sc
->rxd_cur
);
647 flags
= le16toh(rxd
->flags
);
648 if ((flags
& AE_RXD_UPDATE
) == 0)
650 rxd
->flags
= htole16(flags
& ~AE_RXD_UPDATE
);
653 ae_update_stats_rx(flags
, &sc
->stats
);
656 * Update position index.
658 sc
->rxd_cur
= (sc
->rxd_cur
+ 1) % AE_RXD_COUNT_DEFAULT
;
659 if ((flags
& AE_RXD_SUCCESS
) == 0) {
660 IFNET_STAT_INC(ifp
, ierrors
, 1);
664 error
= ae_rxeof(sc
, rxd
);
666 IFNET_STAT_INC(ifp
, ierrors
, 1);
668 IFNET_STAT_INC(ifp
, ipackets
, 1);
671 /* Update Rx index. */
672 AE_WRITE_2(sc
, AE_MB_RXD_IDX_REG
, sc
->rxd_cur
);
676 ae_tx_intr(struct ae_softc
*sc
)
678 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
684 * Syncronize DMA buffers.
686 bus_dmamap_sync(sc
->dma_txd_tag
, sc
->dma_txd_map
, BUS_DMASYNC_POSTREAD
);
687 bus_dmamap_sync(sc
->dma_txs_tag
, sc
->dma_txs_map
, BUS_DMASYNC_POSTREAD
);
690 txs
= sc
->txs_base
+ sc
->txs_ack
;
692 flags
= le16toh(txs
->flags
);
693 if ((flags
& AE_TXS_UPDATE
) == 0)
695 txs
->flags
= htole16(flags
& ~AE_TXS_UPDATE
);
698 ae_update_stats_tx(flags
, &sc
->stats
);
701 * Update TxS position.
703 sc
->txs_ack
= (sc
->txs_ack
+ 1) % AE_TXS_COUNT_DEFAULT
;
704 sc
->ae_flags
|= AE_FLAG_TXAVAIL
;
705 txd
= (struct ae_txd
*)(sc
->txd_base
+ sc
->txd_ack
);
706 if (txs
->len
!= txd
->len
) {
707 device_printf(sc
->ae_dev
, "Size mismatch: "
709 le16toh(txs
->len
), le16toh(txd
->len
));
713 * Move txd ack and align on 4-byte boundary.
715 sc
->txd_ack
= ((sc
->txd_ack
+ le16toh(txd
->len
) + 4 + 3) & ~3) %
716 AE_TXD_BUFSIZE_DEFAULT
;
717 if ((flags
& AE_TXS_SUCCESS
) != 0)
718 IFNET_STAT_INC(ifp
, opackets
, 1);
720 IFNET_STAT_INC(ifp
, oerrors
, 1);
724 if (sc
->tx_inproc
< 0) {
726 if_printf(ifp
, "Received stray Tx interrupt(s).\n");
729 if (sc
->tx_inproc
== 0)
730 ifp
->if_timer
= 0; /* Unarm watchdog. */
731 if (sc
->ae_flags
& AE_FLAG_TXAVAIL
) {
732 ifq_clr_oactive(&ifp
->if_snd
);
733 if (!ifq_is_empty(&ifp
->if_snd
))
742 * Syncronize DMA buffers.
744 bus_dmamap_sync(sc
->dma_txd_tag
, sc
->dma_txd_map
, BUS_DMASYNC_PREWRITE
);
745 bus_dmamap_sync(sc
->dma_txs_tag
, sc
->dma_txs_map
, BUS_DMASYNC_PREWRITE
);
751 struct ae_softc
*sc
= xsc
;
752 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
755 ASSERT_SERIALIZED(ifp
->if_serializer
);
757 val
= AE_READ_4(sc
, AE_ISR_REG
);
758 if (val
== 0 || (val
& AE_IMR_DEFAULT
) == 0)
762 AE_WRITE_4(sc
, AE_ISR_REG
, AE_ISR_DISABLE
);
765 /* Read interrupt status. */
766 val
= AE_READ_4(sc
, AE_ISR_REG
);
768 /* Clear interrupts and disable them. */
769 AE_WRITE_4(sc
, AE_ISR_REG
, val
| AE_ISR_DISABLE
);
771 if (ifp
->if_flags
& IFF_RUNNING
) {
772 if (val
& (AE_ISR_DMAR_TIMEOUT
|
773 AE_ISR_DMAW_TIMEOUT
|
774 AE_ISR_PHY_LINKDOWN
)) {
777 if (val
& AE_ISR_TX_EVENT
)
779 if (val
& AE_ISR_RX_EVENT
)
783 /* Re-enable interrupts. */
784 AE_WRITE_4(sc
, AE_ISR_REG
, 0);
790 struct ae_softc
*sc
= xsc
;
791 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
792 struct mii_data
*mii
;
793 uint8_t eaddr
[ETHER_ADDR_LEN
];
797 ASSERT_SERIALIZED(ifp
->if_serializer
);
799 mii
= device_get_softc(sc
->ae_miibus
);
803 ae_powersave_disable(sc
);
806 * Clear and disable interrupts.
808 AE_WRITE_4(sc
, AE_ISR_REG
, 0xffffffff);
811 * Set the MAC address.
813 bcopy(IF_LLADDR(ifp
), eaddr
, ETHER_ADDR_LEN
);
814 val
= eaddr
[2] << 24 | eaddr
[3] << 16 | eaddr
[4] << 8 | eaddr
[5];
815 AE_WRITE_4(sc
, AE_EADDR0_REG
, val
);
816 val
= eaddr
[0] << 8 | eaddr
[1];
817 AE_WRITE_4(sc
, AE_EADDR1_REG
, val
);
820 * Set ring buffers base addresses.
822 addr
= sc
->dma_rxd_busaddr
;
823 AE_WRITE_4(sc
, AE_DESC_ADDR_HI_REG
, BUS_ADDR_HI(addr
));
824 AE_WRITE_4(sc
, AE_RXD_ADDR_LO_REG
, BUS_ADDR_LO(addr
));
825 addr
= sc
->dma_txd_busaddr
;
826 AE_WRITE_4(sc
, AE_TXD_ADDR_LO_REG
, BUS_ADDR_LO(addr
));
827 addr
= sc
->dma_txs_busaddr
;
828 AE_WRITE_4(sc
, AE_TXS_ADDR_LO_REG
, BUS_ADDR_LO(addr
));
831 * Configure ring buffers sizes.
833 AE_WRITE_2(sc
, AE_RXD_COUNT_REG
, AE_RXD_COUNT_DEFAULT
);
834 AE_WRITE_2(sc
, AE_TXD_BUFSIZE_REG
, AE_TXD_BUFSIZE_DEFAULT
/ 4);
835 AE_WRITE_2(sc
, AE_TXS_COUNT_REG
, AE_TXS_COUNT_DEFAULT
);
838 * Configure interframe gap parameters.
840 val
= ((AE_IFG_TXIPG_DEFAULT
<< AE_IFG_TXIPG_SHIFT
) &
842 ((AE_IFG_RXIPG_DEFAULT
<< AE_IFG_RXIPG_SHIFT
) &
844 ((AE_IFG_IPGR1_DEFAULT
<< AE_IFG_IPGR1_SHIFT
) &
846 ((AE_IFG_IPGR2_DEFAULT
<< AE_IFG_IPGR2_SHIFT
) &
848 AE_WRITE_4(sc
, AE_IFG_REG
, val
);
851 * Configure half-duplex operation.
853 val
= ((AE_HDPX_LCOL_DEFAULT
<< AE_HDPX_LCOL_SHIFT
) &
855 ((AE_HDPX_RETRY_DEFAULT
<< AE_HDPX_RETRY_SHIFT
) &
856 AE_HDPX_RETRY_MASK
) |
857 ((AE_HDPX_ABEBT_DEFAULT
<< AE_HDPX_ABEBT_SHIFT
) &
858 AE_HDPX_ABEBT_MASK
) |
859 ((AE_HDPX_JAMIPG_DEFAULT
<< AE_HDPX_JAMIPG_SHIFT
) &
860 AE_HDPX_JAMIPG_MASK
) | AE_HDPX_EXC_EN
;
861 AE_WRITE_4(sc
, AE_HDPX_REG
, val
);
864 * Configure interrupt moderate timer.
866 AE_WRITE_2(sc
, AE_IMT_REG
, AE_IMT_DEFAULT
);
867 val
= AE_READ_4(sc
, AE_MASTER_REG
);
868 val
|= AE_MASTER_IMT_EN
;
869 AE_WRITE_4(sc
, AE_MASTER_REG
, val
);
872 * Configure interrupt clearing timer.
874 AE_WRITE_2(sc
, AE_ICT_REG
, AE_ICT_DEFAULT
);
879 val
= ifp
->if_mtu
+ ETHER_HDR_LEN
+ sizeof(struct ether_vlan_header
) +
881 AE_WRITE_2(sc
, AE_MTU_REG
, val
);
884 * Configure cut-through threshold.
886 AE_WRITE_4(sc
, AE_CUT_THRESH_REG
, AE_CUT_THRESH_DEFAULT
);
889 * Configure flow control.
891 AE_WRITE_2(sc
, AE_FLOW_THRESH_HI_REG
, (AE_RXD_COUNT_DEFAULT
/ 8) * 7);
892 AE_WRITE_2(sc
, AE_FLOW_THRESH_LO_REG
, (AE_RXD_COUNT_MIN
/ 8) >
893 (AE_RXD_COUNT_DEFAULT
/ 12) ? (AE_RXD_COUNT_MIN
/ 8) :
894 (AE_RXD_COUNT_DEFAULT
/ 12));
899 sc
->txd_cur
= sc
->rxd_cur
= 0;
900 sc
->txs_ack
= sc
->txd_ack
= 0;
902 AE_WRITE_2(sc
, AE_MB_TXD_IDX_REG
, sc
->txd_cur
);
903 AE_WRITE_2(sc
, AE_MB_RXD_IDX_REG
, sc
->rxd_cur
);
905 sc
->ae_flags
|= AE_FLAG_TXAVAIL
; /* Free Tx's available. */
910 AE_WRITE_1(sc
, AE_DMAREAD_REG
, AE_DMAREAD_EN
);
911 AE_WRITE_1(sc
, AE_DMAWRITE_REG
, AE_DMAWRITE_EN
);
914 * Check if everything is OK.
916 val
= AE_READ_4(sc
, AE_ISR_REG
);
917 if ((val
& AE_ISR_PHY_LINKDOWN
) != 0) {
918 device_printf(sc
->ae_dev
, "Initialization failed.\n");
923 * Clear interrupt status.
925 AE_WRITE_4(sc
, AE_ISR_REG
, 0x3fffffff);
926 AE_WRITE_4(sc
, AE_ISR_REG
, 0x0);
931 val
= AE_READ_4(sc
, AE_MASTER_REG
);
932 AE_WRITE_4(sc
, AE_MASTER_REG
, val
| AE_MASTER_MANUAL_INT
);
933 AE_WRITE_4(sc
, AE_IMR_REG
, AE_IMR_DEFAULT
);
938 AE_WRITE_4(sc
, AE_WOL_REG
, 0);
943 val
= AE_MAC_TX_CRC_EN
| AE_MAC_TX_AUTOPAD
|
944 AE_MAC_FULL_DUPLEX
| AE_MAC_CLK_PHY
|
945 AE_MAC_TX_FLOW_EN
| AE_MAC_RX_FLOW_EN
|
946 ((AE_HALFBUF_DEFAULT
<< AE_HALFBUF_SHIFT
) & AE_HALFBUF_MASK
) |
947 ((AE_MAC_PREAMBLE_DEFAULT
<< AE_MAC_PREAMBLE_SHIFT
) &
948 AE_MAC_PREAMBLE_MASK
);
949 AE_WRITE_4(sc
, AE_MAC_REG
, val
);
960 val
= AE_READ_4(sc
, AE_MAC_REG
);
961 AE_WRITE_4(sc
, AE_MAC_REG
, val
| AE_MAC_TX_EN
| AE_MAC_RX_EN
);
963 sc
->ae_flags
&= ~AE_FLAG_LINK
;
964 mii_mediachg(mii
); /* Switch to the current media. */
966 callout_reset(&sc
->ae_tick_ch
, hz
, ae_tick
, sc
);
967 ifp
->if_flags
|= IFF_RUNNING
;
968 ifq_clr_oactive(&ifp
->if_snd
);
972 ae_watchdog(struct ifnet
*ifp
)
974 struct ae_softc
*sc
= ifp
->if_softc
;
976 ASSERT_SERIALIZED(ifp
->if_serializer
);
978 if ((sc
->ae_flags
& AE_FLAG_LINK
) == 0)
979 if_printf(ifp
, "watchdog timeout (missed link).\n");
981 if_printf(ifp
, "watchdog timeout - resetting.\n");
982 IFNET_STAT_INC(ifp
, oerrors
, 1);
985 if (!ifq_is_empty(&ifp
->if_snd
))
992 struct ae_softc
*sc
= xsc
;
993 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
994 struct mii_data
*mii
= device_get_softc(sc
->ae_miibus
);
996 lwkt_serialize_enter(ifp
->if_serializer
);
998 callout_reset(&sc
->ae_tick_ch
, hz
, ae_tick
, sc
);
999 lwkt_serialize_exit(ifp
->if_serializer
);
1003 ae_rxvlan(struct ae_softc
*sc
)
1005 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1008 val
= AE_READ_4(sc
, AE_MAC_REG
);
1009 val
&= ~AE_MAC_RMVLAN_EN
;
1010 if (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
)
1011 val
|= AE_MAC_RMVLAN_EN
;
1012 AE_WRITE_4(sc
, AE_MAC_REG
, val
);
1016 ae_rxfilter(struct ae_softc
*sc
)
1018 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1019 struct ifmultiaddr
*ifma
;
1024 rxcfg
= AE_READ_4(sc
, AE_MAC_REG
);
1025 rxcfg
&= ~(AE_MAC_MCAST_EN
| AE_MAC_BCAST_EN
| AE_MAC_PROMISC_EN
);
1026 rxcfg
|= AE_MAC_BCAST_EN
;
1027 if (ifp
->if_flags
& IFF_PROMISC
)
1028 rxcfg
|= AE_MAC_PROMISC_EN
;
1029 if (ifp
->if_flags
& IFF_ALLMULTI
)
1030 rxcfg
|= AE_MAC_MCAST_EN
;
1033 * Wipe old settings.
1035 AE_WRITE_4(sc
, AE_REG_MHT0
, 0);
1036 AE_WRITE_4(sc
, AE_REG_MHT1
, 0);
1037 if (ifp
->if_flags
& (IFF_PROMISC
| IFF_ALLMULTI
)) {
1038 AE_WRITE_4(sc
, AE_REG_MHT0
, 0xffffffff);
1039 AE_WRITE_4(sc
, AE_REG_MHT1
, 0xffffffff);
1040 AE_WRITE_4(sc
, AE_MAC_REG
, rxcfg
);
1045 * Load multicast tables.
1047 bzero(mchash
, sizeof(mchash
));
1048 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
1049 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
1051 crc
= ether_crc32_le(LLADDR((struct sockaddr_dl
*)
1052 ifma
->ifma_addr
), ETHER_ADDR_LEN
);
1053 mchash
[crc
>> 31] |= 1 << ((crc
>> 26) & 0x1f);
1055 AE_WRITE_4(sc
, AE_REG_MHT0
, mchash
[0]);
1056 AE_WRITE_4(sc
, AE_REG_MHT1
, mchash
[1]);
1057 AE_WRITE_4(sc
, AE_MAC_REG
, rxcfg
);
1061 ae_tx_avail_size(struct ae_softc
*sc
)
1065 if (sc
->txd_cur
>= sc
->txd_ack
)
1066 avail
= AE_TXD_BUFSIZE_DEFAULT
- (sc
->txd_cur
- sc
->txd_ack
);
1068 avail
= sc
->txd_ack
- sc
->txd_cur
;
1069 return (avail
- 4); /* 4-byte header. */
1073 ae_encap(struct ae_softc
*sc
, struct mbuf
**m_head
)
1077 unsigned int to_end
;
1080 M_ASSERTPKTHDR((*m_head
));
1082 len
= m0
->m_pkthdr
.len
;
1083 if ((sc
->ae_flags
& AE_FLAG_TXAVAIL
) == 0 ||
1084 ae_tx_avail_size(sc
) < len
) {
1086 if_printf(sc
->ifp
, "No free Tx available.\n");
1091 hdr
= (struct ae_txd
*)(sc
->txd_base
+ sc
->txd_cur
);
1092 bzero(hdr
, sizeof(*hdr
));
1095 sc
->txd_cur
= (sc
->txd_cur
+ 4) % AE_TXD_BUFSIZE_DEFAULT
;
1097 /* Space available to the end of the ring */
1098 to_end
= AE_TXD_BUFSIZE_DEFAULT
- sc
->txd_cur
;
1100 if (to_end
>= len
) {
1101 m_copydata(m0
, 0, len
, (caddr_t
)(sc
->txd_base
+ sc
->txd_cur
));
1103 m_copydata(m0
, 0, to_end
, (caddr_t
)(sc
->txd_base
+
1105 m_copydata(m0
, to_end
, len
- to_end
, (caddr_t
)sc
->txd_base
);
1109 * Set TxD flags and parameters.
1111 if ((m0
->m_flags
& M_VLANTAG
) != 0) {
1112 hdr
->vlan
= htole16(AE_TXD_VLAN(m0
->m_pkthdr
.ether_vlantag
));
1113 hdr
->len
= htole16(len
| AE_TXD_INSERT_VTAG
);
1115 hdr
->len
= htole16(len
);
1119 * Set current TxD position and round up to a 4-byte boundary.
1121 sc
->txd_cur
= ((sc
->txd_cur
+ len
+ 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT
;
1122 if (sc
->txd_cur
== sc
->txd_ack
)
1123 sc
->ae_flags
&= ~AE_FLAG_TXAVAIL
;
1125 if_printf(sc
->ifp
, "New txd_cur = %d.\n", sc
->txd_cur
);
1129 * Update TxS position and check if there are empty TxS available.
1131 sc
->txs_base
[sc
->txs_cur
].flags
&= ~htole16(AE_TXS_UPDATE
);
1132 sc
->txs_cur
= (sc
->txs_cur
+ 1) % AE_TXS_COUNT_DEFAULT
;
1133 if (sc
->txs_cur
== sc
->txs_ack
)
1134 sc
->ae_flags
&= ~AE_FLAG_TXAVAIL
;
1137 * Synchronize DMA memory.
1139 bus_dmamap_sync(sc
->dma_txd_tag
, sc
->dma_txd_map
, BUS_DMASYNC_PREWRITE
);
1140 bus_dmamap_sync(sc
->dma_txs_tag
, sc
->dma_txs_map
, BUS_DMASYNC_PREWRITE
);
1146 ae_start(struct ifnet
*ifp
, struct ifaltq_subque
*ifsq
)
1148 struct ae_softc
*sc
= ifp
->if_softc
;
1151 ASSERT_ALTQ_SQ_DEFAULT(ifp
, ifsq
);
1152 ASSERT_SERIALIZED(ifp
->if_serializer
);
1155 if_printf(ifp
, "Start called.\n");
1157 if ((sc
->ae_flags
& AE_FLAG_LINK
) == 0) {
1158 ifq_purge(&ifp
->if_snd
);
1161 if ((ifp
->if_flags
& IFF_RUNNING
) == 0 || ifq_is_oactive(&ifp
->if_snd
))
1165 while (!ifq_is_empty(&ifp
->if_snd
)) {
1168 m0
= ifq_dequeue(&ifp
->if_snd
);
1170 break; /* Nothing to do. */
1172 error
= ae_encap(sc
, &m0
);
1175 ifq_prepend(&ifp
->if_snd
, m0
);
1176 ifq_set_oactive(&ifp
->if_snd
);
1178 if_printf(ifp
, "Setting OACTIVE.\n");
1186 /* Bounce a copy of the frame to BPF. */
1187 ETHER_BPF_MTAP(ifp
, m0
);
1190 if (trans
) { /* Something was dequeued. */
1191 AE_WRITE_2(sc
, AE_MB_TXD_IDX_REG
, sc
->txd_cur
/ 4);
1192 ifp
->if_timer
= AE_TX_TIMEOUT
; /* Load watchdog. */
1194 if_printf(ifp
, "%d packets dequeued.\n", count
);
1195 if_printf(ifp
, "Tx pos now is %d.\n", sc
->txd_cur
);
1201 ae_ioctl(struct ifnet
*ifp
, u_long cmd
, caddr_t data
, struct ucred
*cr
)
1203 struct ae_softc
*sc
= ifp
->if_softc
;
1205 struct mii_data
*mii
;
1206 int error
= 0, mask
;
1208 ASSERT_SERIALIZED(ifp
->if_serializer
);
1210 ifr
= (struct ifreq
*)data
;
1213 if (ifp
->if_flags
& IFF_UP
) {
1214 if (ifp
->if_flags
& IFF_RUNNING
) {
1215 if (((ifp
->if_flags
^ sc
->ae_if_flags
)
1216 & (IFF_PROMISC
| IFF_ALLMULTI
)) != 0)
1222 if (ifp
->if_flags
& IFF_RUNNING
)
1225 sc
->ae_if_flags
= ifp
->if_flags
;
1230 if (ifp
->if_flags
& IFF_RUNNING
)
1236 mii
= device_get_softc(sc
->ae_miibus
);
1237 error
= ifmedia_ioctl(ifp
, ifr
, &mii
->mii_media
, cmd
);
1241 mask
= ifr
->ifr_reqcap
^ ifp
->if_capenable
;
1242 if (mask
& IFCAP_VLAN_HWTAGGING
) {
1243 ifp
->if_capenable
^= IFCAP_VLAN_HWTAGGING
;
1249 error
= ether_ioctl(ifp
, cmd
, data
);
1256 ae_attach(device_t dev
)
1258 struct ae_softc
*sc
= device_get_softc(dev
);
1259 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1263 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
1264 callout_init(&sc
->ae_tick_ch
);
1266 /* Enable bus mastering */
1267 pci_enable_busmaster(dev
);
1270 * Allocate memory mapped IO
1272 sc
->ae_mem_rid
= PCIR_BAR(0);
1273 sc
->ae_mem_res
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
,
1274 &sc
->ae_mem_rid
, RF_ACTIVE
);
1275 if (sc
->ae_mem_res
== NULL
) {
1276 device_printf(dev
, "can't allocate IO memory\n");
1279 sc
->ae_mem_bt
= rman_get_bustag(sc
->ae_mem_res
);
1280 sc
->ae_mem_bh
= rman_get_bushandle(sc
->ae_mem_res
);
1286 sc
->ae_irq_res
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
,
1288 RF_SHAREABLE
| RF_ACTIVE
);
1289 if (sc
->ae_irq_res
== NULL
) {
1290 device_printf(dev
, "can't allocate irq\n");
1295 /* Set PHY address. */
1296 sc
->ae_phyaddr
= AE_PHYADDR_DEFAULT
;
1298 /* Create sysctl tree */
1305 * Reset the ethernet controller.
1311 * Get PCI and chip id/revision.
1313 sc
->ae_rev
= pci_get_revid(dev
);
1315 (AE_READ_4(sc
, AE_MASTER_REG
) >> AE_MASTER_REVNUM_SHIFT
) &
1316 AE_MASTER_REVNUM_MASK
;
1318 device_printf(dev
, "PCI device revision : 0x%04x\n", sc
->ae_rev
);
1319 device_printf(dev
, "Chip id/revision : 0x%04x\n",
1325 * Unintialized hardware returns an invalid chip id/revision
1326 * as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that
1327 * unplugged cable results in putting hardware into automatic
1328 * power down mode which in turn returns invalld chip revision.
1330 if (sc
->ae_chip_rev
== 0xFFFF) {
1331 device_printf(dev
,"invalid chip revision : 0x%04x -- "
1332 "not initialized?\n", sc
->ae_chip_rev
);
1337 /* Get DMA parameters from PCIe device control register. */
1338 pcie_ptr
= pci_get_pciecap_ptr(dev
);
1341 sc
->ae_flags
|= AE_FLAG_PCIE
;
1342 devctl
= pci_read_config(dev
, pcie_ptr
+ PCIER_DEVCTRL
, 2);
1343 /* Max read request size. */
1344 sc
->ae_dma_rd_burst
= ((devctl
>> 12) & 0x07) <<
1345 DMA_CFG_RD_BURST_SHIFT
;
1346 /* Max payload size. */
1347 sc
->ae_dma_wr_burst
= ((devctl
>> 5) & 0x07) <<
1348 DMA_CFG_WR_BURST_SHIFT
;
1350 device_printf(dev
, "Read request size : %d bytes.\n",
1351 128 << ((devctl
>> 12) & 0x07));
1352 device_printf(dev
, "TLP payload size : %d bytes.\n",
1353 128 << ((devctl
>> 5) & 0x07));
1356 sc
->ae_dma_rd_burst
= DMA_CFG_RD_BURST_128
;
1357 sc
->ae_dma_wr_burst
= DMA_CFG_WR_BURST_128
;
1361 /* Create DMA stuffs */
1362 error
= ae_dma_alloc(sc
);
1366 /* Load station address. */
1370 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
1371 ifp
->if_ioctl
= ae_ioctl
;
1372 ifp
->if_start
= ae_start
;
1373 ifp
->if_init
= ae_init
;
1374 ifp
->if_watchdog
= ae_watchdog
;
1375 ifq_set_maxlen(&ifp
->if_snd
, IFQ_MAXLEN
- 1);
1376 ifq_set_ready(&ifp
->if_snd
);
1377 ifp
->if_capabilities
= IFCAP_VLAN_MTU
|
1378 IFCAP_VLAN_HWTAGGING
;
1379 ifp
->if_hwassist
= 0;
1380 ifp
->if_capenable
= ifp
->if_capabilities
;
1382 /* Set up MII bus. */
1383 error
= mii_phy_probe(dev
, &sc
->ae_miibus
,
1384 ae_mediachange
, ae_mediastatus
);
1386 device_printf(dev
, "no PHY found!\n");
1389 ether_ifattach(ifp
, sc
->ae_eaddr
, NULL
);
1391 /* Tell the upper layer(s) we support long frames. */
1392 ifp
->if_data
.ifi_hdrlen
= sizeof(struct ether_vlan_header
);
1394 ifq_set_cpuid(&ifp
->if_snd
, rman_get_cpuid(sc
->ae_irq_res
));
1396 error
= bus_setup_intr(dev
, sc
->ae_irq_res
, INTR_MPSAFE
, ae_intr
, sc
,
1397 &sc
->ae_irq_handle
, ifp
->if_serializer
);
1399 device_printf(dev
, "could not set up interrupt handler.\n");
1400 ether_ifdetach(ifp
);
1410 ae_detach(device_t dev
)
1412 struct ae_softc
*sc
= device_get_softc(dev
);
1414 if (device_is_attached(dev
)) {
1415 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1417 lwkt_serialize_enter(ifp
->if_serializer
);
1418 sc
->ae_flags
|= AE_FLAG_DETACH
;
1420 bus_teardown_intr(dev
, sc
->ae_irq_res
, sc
->ae_irq_handle
);
1421 lwkt_serialize_exit(ifp
->if_serializer
);
1423 ether_ifdetach(ifp
);
1426 if (sc
->ae_miibus
!= NULL
)
1427 device_delete_child(dev
, sc
->ae_miibus
);
1428 bus_generic_detach(dev
);
1430 if (sc
->ae_irq_res
!= NULL
) {
1431 bus_release_resource(dev
, SYS_RES_IRQ
, sc
->ae_irq_rid
,
1434 if (sc
->ae_mem_res
!= NULL
) {
1435 bus_release_resource(dev
, SYS_RES_MEMORY
, sc
->ae_mem_rid
,
1444 ae_dma_free(struct ae_softc
*sc
)
1446 if (sc
->dma_txd_tag
!= NULL
) {
1447 bus_dmamap_unload(sc
->dma_txd_tag
, sc
->dma_txd_map
);
1448 bus_dmamem_free(sc
->dma_txd_tag
, sc
->txd_base
,
1450 bus_dma_tag_destroy(sc
->dma_txd_tag
);
1452 if (sc
->dma_txs_tag
!= NULL
) {
1453 bus_dmamap_unload(sc
->dma_txs_tag
, sc
->dma_txs_map
);
1454 bus_dmamem_free(sc
->dma_txs_tag
, sc
->txs_base
,
1456 bus_dma_tag_destroy(sc
->dma_txs_tag
);
1458 if (sc
->dma_rxd_tag
!= NULL
) {
1459 bus_dmamap_unload(sc
->dma_rxd_tag
, sc
->dma_rxd_map
);
1460 bus_dmamem_free(sc
->dma_rxd_tag
,
1461 sc
->rxd_base_dma
, sc
->dma_rxd_map
);
1462 bus_dma_tag_destroy(sc
->dma_rxd_tag
);
1464 if (sc
->dma_parent_tag
!= NULL
)
1465 bus_dma_tag_destroy(sc
->dma_parent_tag
);
1469 ae_pcie_init(struct ae_softc
*sc
)
1471 AE_WRITE_4(sc
, AE_PCIE_LTSSM_TESTMODE_REG
,
1472 AE_PCIE_LTSSM_TESTMODE_DEFAULT
);
1473 AE_WRITE_4(sc
, AE_PCIE_DLL_TX_CTRL_REG
,
1474 AE_PCIE_DLL_TX_CTRL_DEFAULT
);
1478 ae_phy_reset(struct ae_softc
*sc
)
1480 AE_WRITE_4(sc
, AE_PHY_ENABLE_REG
, AE_PHY_ENABLE
);
1481 DELAY(1000); /* XXX: pause(9) ? */
1485 ae_reset(struct ae_softc
*sc
)
1490 * Issue a soft reset.
1492 AE_WRITE_4(sc
, AE_MASTER_REG
, AE_MASTER_SOFT_RESET
);
1493 bus_space_barrier(sc
->ae_mem_bt
, sc
->ae_mem_bh
, AE_MASTER_REG
, 4,
1494 BUS_SPACE_BARRIER_READ
| BUS_SPACE_BARRIER_WRITE
);
1497 * Wait for reset to complete.
1499 for (i
= 0; i
< AE_RESET_TIMEOUT
; i
++) {
1500 if ((AE_READ_4(sc
, AE_MASTER_REG
) & AE_MASTER_SOFT_RESET
) == 0)
1504 if (i
== AE_RESET_TIMEOUT
) {
1505 device_printf(sc
->ae_dev
, "reset timeout.\n");
1510 * Wait for everything to enter idle state.
1512 for (i
= 0; i
< AE_IDLE_TIMEOUT
; i
++) {
1513 if (AE_READ_4(sc
, AE_IDLE_REG
) == 0)
1517 if (i
== AE_IDLE_TIMEOUT
) {
1518 device_printf(sc
->ae_dev
, "could not enter idle state.\n");
1525 ae_check_eeprom_present(struct ae_softc
*sc
, int *vpdc
)
1531 * Not sure why, but Linux does this.
1533 val
= AE_READ_4(sc
, AE_SPICTL_REG
);
1534 if ((val
& AE_SPICTL_VPD_EN
) != 0) {
1535 val
&= ~AE_SPICTL_VPD_EN
;
1536 AE_WRITE_4(sc
, AE_SPICTL_REG
, val
);
1538 error
= pci_find_extcap(sc
->ae_dev
, PCIY_VPD
, vpdc
);
1543 ae_vpd_read_word(struct ae_softc
*sc
, int reg
, uint32_t *word
)
1548 AE_WRITE_4(sc
, AE_VPD_DATA_REG
, 0); /* Clear register value. */
1551 * VPD registers start at offset 0x100. Read them.
1553 val
= 0x100 + reg
* 4;
1554 AE_WRITE_4(sc
, AE_VPD_CAP_REG
, (val
<< AE_VPD_CAP_ADDR_SHIFT
) &
1555 AE_VPD_CAP_ADDR_MASK
);
1556 for (i
= 0; i
< AE_VPD_TIMEOUT
; i
++) {
1558 val
= AE_READ_4(sc
, AE_VPD_CAP_REG
);
1559 if ((val
& AE_VPD_CAP_DONE
) != 0)
1562 if (i
== AE_VPD_TIMEOUT
) {
1563 device_printf(sc
->ae_dev
, "timeout reading VPD register %d.\n",
1567 *word
= AE_READ_4(sc
, AE_VPD_DATA_REG
);
1572 ae_get_vpd_eaddr(struct ae_softc
*sc
, uint32_t *eaddr
)
1574 uint32_t word
, reg
, val
;
1583 error
= ae_check_eeprom_present(sc
, &vpdc
);
1588 * Read the VPD configuration space.
1589 * Each register is prefixed with signature,
1590 * so we can check if it is valid.
1592 for (i
= 0, found
= 0; i
< AE_VPD_NREGS
; i
++) {
1593 error
= ae_vpd_read_word(sc
, i
, &word
);
1600 if ((word
& AE_VPD_SIG_MASK
) != AE_VPD_SIG
)
1602 reg
= word
>> AE_VPD_REG_SHIFT
;
1603 i
++; /* Move to the next word. */
1604 if (reg
!= AE_EADDR0_REG
&& reg
!= AE_EADDR1_REG
)
1607 error
= ae_vpd_read_word(sc
, i
, &val
);
1610 if (reg
== AE_EADDR0_REG
)
1619 eaddr
[1] &= 0xffff; /* Only last 2 bytes are used. */
1620 if (AE_CHECK_EADDR_VALID(eaddr
) != 0) {
1622 device_printf(sc
->ae_dev
,
1623 "VPD ethernet address registers are invalid.\n");
1630 ae_get_reg_eaddr(struct ae_softc
*sc
, uint32_t *eaddr
)
1633 * BIOS is supposed to set this.
1635 eaddr
[0] = AE_READ_4(sc
, AE_EADDR0_REG
);
1636 eaddr
[1] = AE_READ_4(sc
, AE_EADDR1_REG
);
1637 eaddr
[1] &= 0xffff; /* Only last 2 bytes are used. */
1638 if (AE_CHECK_EADDR_VALID(eaddr
) != 0) {
1640 device_printf(sc
->ae_dev
,
1641 "Ethetnet address registers are invalid.\n");
1648 ae_get_eaddr(struct ae_softc
*sc
)
1650 uint32_t eaddr
[2] = {0, 0};
1656 error
= ae_get_vpd_eaddr(sc
, eaddr
);
1658 error
= ae_get_reg_eaddr(sc
, eaddr
);
1661 device_printf(sc
->ae_dev
,
1662 "Generating random ethernet address.\n");
1663 eaddr
[0] = karc4random();
1665 * Set OUI to ASUSTek COMPUTER INC.
1667 sc
->ae_eaddr
[0] = 0x02; /* U/L bit set. */
1668 sc
->ae_eaddr
[1] = 0x1f;
1669 sc
->ae_eaddr
[2] = 0xc6;
1670 sc
->ae_eaddr
[3] = (eaddr
[0] >> 16) & 0xff;
1671 sc
->ae_eaddr
[4] = (eaddr
[0] >> 8) & 0xff;
1672 sc
->ae_eaddr
[5] = (eaddr
[0] >> 0) & 0xff;
1674 sc
->ae_eaddr
[0] = (eaddr
[1] >> 8) & 0xff;
1675 sc
->ae_eaddr
[1] = (eaddr
[1] >> 0) & 0xff;
1676 sc
->ae_eaddr
[2] = (eaddr
[0] >> 24) & 0xff;
1677 sc
->ae_eaddr
[3] = (eaddr
[0] >> 16) & 0xff;
1678 sc
->ae_eaddr
[4] = (eaddr
[0] >> 8) & 0xff;
1679 sc
->ae_eaddr
[5] = (eaddr
[0] >> 0) & 0xff;
1684 ae_mediachange(struct ifnet
*ifp
)
1686 struct ae_softc
*sc
= ifp
->if_softc
;
1687 struct mii_data
*mii
= device_get_softc(sc
->ae_miibus
);
1690 ASSERT_SERIALIZED(ifp
->if_serializer
);
1691 if (mii
->mii_instance
!= 0) {
1692 struct mii_softc
*miisc
;
1693 LIST_FOREACH(miisc
, &mii
->mii_phys
, mii_list
)
1694 mii_phy_reset(miisc
);
1696 error
= mii_mediachg(mii
);
1701 ae_mediastatus(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
1703 struct ae_softc
*sc
= ifp
->if_softc
;
1704 struct mii_data
*mii
= device_get_softc(sc
->ae_miibus
);
1706 ASSERT_SERIALIZED(ifp
->if_serializer
);
1708 ifmr
->ifm_status
= mii
->mii_media_status
;
1709 ifmr
->ifm_active
= mii
->mii_media_active
;
1713 ae_update_stats_tx(uint16_t flags
, struct ae_stats
*stats
)
1715 if ((flags
& AE_TXS_BCAST
) != 0)
1717 if ((flags
& AE_TXS_MCAST
) != 0)
1719 if ((flags
& AE_TXS_PAUSE
) != 0)
1721 if ((flags
& AE_TXS_CTRL
) != 0)
1723 if ((flags
& AE_TXS_DEFER
) != 0)
1725 if ((flags
& AE_TXS_EXCDEFER
) != 0)
1726 stats
->tx_excdefer
++;
1727 if ((flags
& AE_TXS_SINGLECOL
) != 0)
1728 stats
->tx_singlecol
++;
1729 if ((flags
& AE_TXS_MULTICOL
) != 0)
1730 stats
->tx_multicol
++;
1731 if ((flags
& AE_TXS_LATECOL
) != 0)
1732 stats
->tx_latecol
++;
1733 if ((flags
& AE_TXS_ABORTCOL
) != 0)
1734 stats
->tx_abortcol
++;
1735 if ((flags
& AE_TXS_UNDERRUN
) != 0)
1736 stats
->tx_underrun
++;
1740 ae_update_stats_rx(uint16_t flags
, struct ae_stats
*stats
)
1742 if ((flags
& AE_RXD_BCAST
) != 0)
1744 if ((flags
& AE_RXD_MCAST
) != 0)
1746 if ((flags
& AE_RXD_PAUSE
) != 0)
1748 if ((flags
& AE_RXD_CTRL
) != 0)
1750 if ((flags
& AE_RXD_CRCERR
) != 0)
1752 if ((flags
& AE_RXD_CODEERR
) != 0)
1753 stats
->rx_codeerr
++;
1754 if ((flags
& AE_RXD_RUNT
) != 0)
1756 if ((flags
& AE_RXD_FRAG
) != 0)
1758 if ((flags
& AE_RXD_TRUNC
) != 0)
1760 if ((flags
& AE_RXD_ALIGN
) != 0)
1765 ae_resume(device_t dev
)
1767 struct ae_softc
*sc
= device_get_softc(dev
);
1768 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1770 lwkt_serialize_enter(ifp
->if_serializer
);
1772 AE_READ_4(sc
, AE_WOL_REG
); /* Clear WOL status. */
1775 if ((ifp
->if_flags
& IFF_UP
) != 0)
1777 lwkt_serialize_exit(ifp
->if_serializer
);
1782 ae_suspend(device_t dev
)
1784 struct ae_softc
*sc
= device_get_softc(dev
);
1785 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1787 lwkt_serialize_enter(ifp
->if_serializer
);
1790 /* we don't use ae_pm_init because we don't want WOL */
1793 lwkt_serialize_exit(ifp
->if_serializer
);
1798 ae_shutdown(device_t dev
)
1800 struct ae_softc
*sc
= device_get_softc(dev
);
1801 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1805 lwkt_serialize_enter(ifp
->if_serializer
);
1806 ae_powersave_enable(sc
);
1807 lwkt_serialize_exit(ifp
->if_serializer
);
1813 ae_powersave_disable(struct ae_softc
*sc
)
1817 AE_PHY_WRITE(sc
, AE_PHY_DBG_ADDR
, 0);
1818 val
= AE_PHY_READ(sc
, AE_PHY_DBG_DATA
);
1819 if (val
& AE_PHY_DBG_POWERSAVE
) {
1820 val
&= ~AE_PHY_DBG_POWERSAVE
;
1821 AE_PHY_WRITE(sc
, AE_PHY_DBG_DATA
, val
);
1827 ae_powersave_enable(struct ae_softc
*sc
)
1832 * XXX magic numbers.
1834 AE_PHY_WRITE(sc
, AE_PHY_DBG_ADDR
, 0);
1835 val
= AE_PHY_READ(sc
, AE_PHY_DBG_DATA
);
1836 AE_PHY_WRITE(sc
, AE_PHY_DBG_ADDR
, val
| 0x1000);
1837 AE_PHY_WRITE(sc
, AE_PHY_DBG_ADDR
, 2);
1838 AE_PHY_WRITE(sc
, AE_PHY_DBG_DATA
, 0x3000);
1839 AE_PHY_WRITE(sc
, AE_PHY_DBG_ADDR
, 3);
1840 AE_PHY_WRITE(sc
, AE_PHY_DBG_DATA
, 0);