2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.17 2008/09/17 08:51:29 sephe Exp $
37 #include <sys/param.h>
38 #include <sys/bitops.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
42 #include <sys/interrupt.h>
43 #include <sys/malloc.h>
46 #include <sys/serialize.h>
47 #include <sys/socket.h>
48 #include <sys/sockio.h>
49 #include <sys/sysctl.h>
51 #include <net/ethernet.h>
54 #include <net/if_arp.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/ifq_var.h>
58 #include <net/vlan/if_vlan_var.h>
60 #include <dev/netif/mii_layer/miivar.h>
62 #include <bus/pci/pcireg.h>
63 #include <bus/pci/pcivar.h>
64 #include <bus/pci/pcidevs.h>
66 #include <dev/netif/et/if_etreg.h>
67 #include <dev/netif/et/if_etvar.h>
69 #include "miibus_if.h"
71 static int et_probe(device_t
);
72 static int et_attach(device_t
);
73 static int et_detach(device_t
);
74 static int et_shutdown(device_t
);
76 static int et_miibus_readreg(device_t
, int, int);
77 static int et_miibus_writereg(device_t
, int, int, int);
78 static void et_miibus_statchg(device_t
);
80 static void et_init(void *);
81 static int et_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
82 static void et_start(struct ifnet
*);
83 static void et_watchdog(struct ifnet
*);
84 static int et_ifmedia_upd(struct ifnet
*);
85 static void et_ifmedia_sts(struct ifnet
*, struct ifmediareq
*);
87 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS
);
88 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS
);
90 static void et_intr(void *);
91 static void et_enable_intrs(struct et_softc
*, uint32_t);
92 static void et_disable_intrs(struct et_softc
*);
93 static void et_rxeof(struct et_softc
*);
94 static void et_txeof(struct et_softc
*, int);
96 static int et_dma_alloc(device_t
);
97 static void et_dma_free(device_t
);
98 static void et_dma_mem_destroy(bus_dma_tag_t
, void *, bus_dmamap_t
);
99 static int et_dma_mbuf_create(device_t
);
100 static void et_dma_mbuf_destroy(device_t
, int, const int[]);
101 static int et_jumbo_mem_alloc(device_t
);
102 static void et_jumbo_mem_free(device_t
);
103 static int et_init_tx_ring(struct et_softc
*);
104 static int et_init_rx_ring(struct et_softc
*);
105 static void et_free_tx_ring(struct et_softc
*);
106 static void et_free_rx_ring(struct et_softc
*);
107 static int et_encap(struct et_softc
*, struct mbuf
**);
108 static struct et_jslot
*
109 et_jalloc(struct et_jumbo_data
*);
110 static void et_jfree(void *);
111 static void et_jref(void *);
112 static int et_newbuf(struct et_rxbuf_data
*, int, int, int);
113 static int et_newbuf_cluster(struct et_rxbuf_data
*, int, int);
114 static int et_newbuf_hdr(struct et_rxbuf_data
*, int, int);
115 static int et_newbuf_jumbo(struct et_rxbuf_data
*, int, int);
117 static void et_stop(struct et_softc
*);
118 static int et_chip_init(struct et_softc
*);
119 static void et_chip_attach(struct et_softc
*);
120 static void et_init_mac(struct et_softc
*);
121 static void et_init_rxmac(struct et_softc
*);
122 static void et_init_txmac(struct et_softc
*);
123 static int et_init_rxdma(struct et_softc
*);
124 static int et_init_txdma(struct et_softc
*);
125 static int et_start_rxdma(struct et_softc
*);
126 static int et_start_txdma(struct et_softc
*);
127 static int et_stop_rxdma(struct et_softc
*);
128 static int et_stop_txdma(struct et_softc
*);
129 static int et_enable_txrx(struct et_softc
*, int);
130 static void et_reset(struct et_softc
*);
131 static int et_bus_config(device_t
);
132 static void et_get_eaddr(device_t
, uint8_t[]);
133 static void et_setmulti(struct et_softc
*);
134 static void et_tick(void *);
135 static void et_setmedia(struct et_softc
*);
136 static void et_setup_rxdesc(struct et_rxbuf_data
*, int, bus_addr_t
);
138 static const struct et_dev
{
143 { PCI_VENDOR_LUCENT
, PCI_PRODUCT_LUCENT_ET1310
,
144 "Agere ET1310 Gigabit Ethernet" },
145 { PCI_VENDOR_LUCENT
, PCI_PRODUCT_LUCENT_ET1310_FAST
,
146 "Agere ET1310 Fast Ethernet" },
150 static device_method_t et_methods
[] = {
151 DEVMETHOD(device_probe
, et_probe
),
152 DEVMETHOD(device_attach
, et_attach
),
153 DEVMETHOD(device_detach
, et_detach
),
154 DEVMETHOD(device_shutdown
, et_shutdown
),
156 DEVMETHOD(device_suspend
, et_suspend
),
157 DEVMETHOD(device_resume
, et_resume
),
160 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
161 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
163 DEVMETHOD(miibus_readreg
, et_miibus_readreg
),
164 DEVMETHOD(miibus_writereg
, et_miibus_writereg
),
165 DEVMETHOD(miibus_statchg
, et_miibus_statchg
),
170 static driver_t et_driver
= {
173 sizeof(struct et_softc
)
176 static devclass_t et_devclass
;
178 DECLARE_DUMMY_MODULE(if_et
);
179 MODULE_DEPEND(if_et
, miibus
, 1, 1, 1);
180 DRIVER_MODULE(if_et
, pci
, et_driver
, et_devclass
, 0, 0);
181 DRIVER_MODULE(miibus
, et
, miibus_driver
, miibus_devclass
, 0, 0);
183 static int et_rx_intr_npkts
= 129;
184 static int et_rx_intr_delay
= 25; /* x4 usec */
185 static int et_tx_intr_nsegs
= 256;
186 static uint32_t et_timer
= 1000 * 1000 * 1000; /* nanosec */
188 TUNABLE_INT("hw.et.timer", &et_timer
);
189 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts
);
190 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay
);
191 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs
);
199 static const struct et_bsize et_bufsize_std
[ET_RX_NRING
] = {
200 { .bufsize
= ET_RXDMA_CTRL_RING0_128
, .jumbo
= 0,
201 .newbuf
= et_newbuf_hdr
},
202 { .bufsize
= ET_RXDMA_CTRL_RING1_2048
, .jumbo
= 0,
203 .newbuf
= et_newbuf_cluster
},
206 static const struct et_bsize et_bufsize_jumbo
[ET_RX_NRING
] = {
207 { .bufsize
= ET_RXDMA_CTRL_RING0_128
, .jumbo
= 0,
208 .newbuf
= et_newbuf_hdr
},
209 { .bufsize
= ET_RXDMA_CTRL_RING1_16384
, .jumbo
= 1,
210 .newbuf
= et_newbuf_jumbo
},
214 et_probe(device_t dev
)
216 const struct et_dev
*d
;
219 vid
= pci_get_vendor(dev
);
220 did
= pci_get_device(dev
);
222 for (d
= et_devices
; d
->desc
!= NULL
; ++d
) {
223 if (vid
== d
->vid
&& did
== d
->did
) {
224 device_set_desc(dev
, d
->desc
);
232 et_attach(device_t dev
)
234 struct et_softc
*sc
= device_get_softc(dev
);
235 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
236 uint8_t eaddr
[ETHER_ADDR_LEN
];
239 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
240 callout_init(&sc
->sc_tick
);
243 * Initialize tunables
245 sc
->sc_rx_intr_npkts
= et_rx_intr_npkts
;
246 sc
->sc_rx_intr_delay
= et_rx_intr_delay
;
247 sc
->sc_tx_intr_nsegs
= et_tx_intr_nsegs
;
248 sc
->sc_timer
= et_timer
;
251 if (pci_get_powerstate(dev
) != PCI_POWERSTATE_D0
) {
254 irq
= pci_read_config(dev
, PCIR_INTLINE
, 4);
255 mem
= pci_read_config(dev
, ET_PCIR_BAR
, 4);
257 device_printf(dev
, "chip is in D%d power mode "
258 "-- setting to D0\n", pci_get_powerstate(dev
));
260 pci_set_powerstate(dev
, PCI_POWERSTATE_D0
);
262 pci_write_config(dev
, PCIR_INTLINE
, irq
, 4);
263 pci_write_config(dev
, ET_PCIR_BAR
, mem
, 4);
265 #endif /* !BURN_BRIDGE */
267 /* Enable bus mastering */
268 pci_enable_busmaster(dev
);
273 sc
->sc_mem_rid
= ET_PCIR_BAR
;
274 sc
->sc_mem_res
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
,
275 &sc
->sc_mem_rid
, RF_ACTIVE
);
276 if (sc
->sc_mem_res
== NULL
) {
277 device_printf(dev
, "can't allocate IO memory\n");
280 sc
->sc_mem_bt
= rman_get_bustag(sc
->sc_mem_res
);
281 sc
->sc_mem_bh
= rman_get_bushandle(sc
->sc_mem_res
);
287 sc
->sc_irq_res
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
,
289 RF_SHAREABLE
| RF_ACTIVE
);
290 if (sc
->sc_irq_res
== NULL
) {
291 device_printf(dev
, "can't allocate irq\n");
299 sysctl_ctx_init(&sc
->sc_sysctl_ctx
);
300 sc
->sc_sysctl_tree
= SYSCTL_ADD_NODE(&sc
->sc_sysctl_ctx
,
301 SYSCTL_STATIC_CHILDREN(_hw
),
303 device_get_nameunit(dev
),
305 if (sc
->sc_sysctl_tree
== NULL
) {
306 device_printf(dev
, "can't add sysctl node\n");
311 SYSCTL_ADD_PROC(&sc
->sc_sysctl_ctx
,
312 SYSCTL_CHILDREN(sc
->sc_sysctl_tree
),
313 OID_AUTO
, "rx_intr_npkts", CTLTYPE_INT
| CTLFLAG_RW
,
314 sc
, 0, et_sysctl_rx_intr_npkts
, "I",
315 "RX IM, # packets per RX interrupt");
316 SYSCTL_ADD_PROC(&sc
->sc_sysctl_ctx
,
317 SYSCTL_CHILDREN(sc
->sc_sysctl_tree
),
318 OID_AUTO
, "rx_intr_delay", CTLTYPE_INT
| CTLFLAG_RW
,
319 sc
, 0, et_sysctl_rx_intr_delay
, "I",
320 "RX IM, RX interrupt delay (x10 usec)");
321 SYSCTL_ADD_INT(&sc
->sc_sysctl_ctx
,
322 SYSCTL_CHILDREN(sc
->sc_sysctl_tree
), OID_AUTO
,
323 "tx_intr_nsegs", CTLFLAG_RW
, &sc
->sc_tx_intr_nsegs
, 0,
324 "TX IM, # segments per TX interrupt");
325 SYSCTL_ADD_UINT(&sc
->sc_sysctl_ctx
,
326 SYSCTL_CHILDREN(sc
->sc_sysctl_tree
), OID_AUTO
,
327 "timer", CTLFLAG_RW
, &sc
->sc_timer
, 0,
330 error
= et_bus_config(dev
);
334 et_get_eaddr(dev
, eaddr
);
336 CSR_WRITE_4(sc
, ET_PM
,
337 ET_PM_SYSCLK_GATE
| ET_PM_TXCLK_GATE
| ET_PM_RXCLK_GATE
);
341 et_disable_intrs(sc
);
343 error
= et_dma_alloc(dev
);
348 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
349 ifp
->if_init
= et_init
;
350 ifp
->if_ioctl
= et_ioctl
;
351 ifp
->if_start
= et_start
;
352 ifp
->if_watchdog
= et_watchdog
;
353 ifp
->if_mtu
= ETHERMTU
;
354 ifp
->if_capabilities
= IFCAP_VLAN_MTU
;
355 ifp
->if_capenable
= ifp
->if_capabilities
;
356 ifq_set_maxlen(&ifp
->if_snd
, ET_TX_NDESC
);
357 ifq_set_ready(&ifp
->if_snd
);
361 error
= mii_phy_probe(dev
, &sc
->sc_miibus
,
362 et_ifmedia_upd
, et_ifmedia_sts
);
364 device_printf(dev
, "can't probe any PHY\n");
368 ether_ifattach(ifp
, eaddr
, NULL
);
370 error
= bus_setup_intr(dev
, sc
->sc_irq_res
, INTR_MPSAFE
, et_intr
, sc
,
371 &sc
->sc_irq_handle
, ifp
->if_serializer
);
374 device_printf(dev
, "can't setup intr\n");
378 ifp
->if_cpuid
= ithread_cpuid(rman_get_start(sc
->sc_irq_res
));
379 KKASSERT(ifp
->if_cpuid
>= 0 && ifp
->if_cpuid
< ncpus
);
388 et_detach(device_t dev
)
390 struct et_softc
*sc
= device_get_softc(dev
);
392 if (device_is_attached(dev
)) {
393 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
395 lwkt_serialize_enter(ifp
->if_serializer
);
397 bus_teardown_intr(dev
, sc
->sc_irq_res
, sc
->sc_irq_handle
);
398 lwkt_serialize_exit(ifp
->if_serializer
);
403 if (sc
->sc_sysctl_tree
!= NULL
)
404 sysctl_ctx_free(&sc
->sc_sysctl_ctx
);
406 if (sc
->sc_miibus
!= NULL
)
407 device_delete_child(dev
, sc
->sc_miibus
);
408 bus_generic_detach(dev
);
410 if (sc
->sc_irq_res
!= NULL
) {
411 bus_release_resource(dev
, SYS_RES_IRQ
, sc
->sc_irq_rid
,
415 if (sc
->sc_mem_res
!= NULL
) {
416 bus_release_resource(dev
, SYS_RES_MEMORY
, sc
->sc_mem_rid
,
426 et_shutdown(device_t dev
)
428 struct et_softc
*sc
= device_get_softc(dev
);
429 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
431 lwkt_serialize_enter(ifp
->if_serializer
);
433 lwkt_serialize_exit(ifp
->if_serializer
);
438 et_miibus_readreg(device_t dev
, int phy
, int reg
)
440 struct et_softc
*sc
= device_get_softc(dev
);
444 /* Stop any pending operations */
445 CSR_WRITE_4(sc
, ET_MII_CMD
, 0);
447 val
= __SHIFTIN(phy
, ET_MII_ADDR_PHY
) |
448 __SHIFTIN(reg
, ET_MII_ADDR_REG
);
449 CSR_WRITE_4(sc
, ET_MII_ADDR
, val
);
452 CSR_WRITE_4(sc
, ET_MII_CMD
, ET_MII_CMD_READ
);
456 for (i
= 0; i
< NRETRY
; ++i
) {
457 val
= CSR_READ_4(sc
, ET_MII_IND
);
458 if ((val
& (ET_MII_IND_BUSY
| ET_MII_IND_INVALID
)) == 0)
463 if_printf(&sc
->arpcom
.ac_if
,
464 "read phy %d, reg %d timed out\n", phy
, reg
);
471 val
= CSR_READ_4(sc
, ET_MII_STAT
);
472 ret
= __SHIFTOUT(val
, ET_MII_STAT_VALUE
);
475 /* Make sure that the current operation is stopped */
476 CSR_WRITE_4(sc
, ET_MII_CMD
, 0);
481 et_miibus_writereg(device_t dev
, int phy
, int reg
, int val0
)
483 struct et_softc
*sc
= device_get_softc(dev
);
487 /* Stop any pending operations */
488 CSR_WRITE_4(sc
, ET_MII_CMD
, 0);
490 val
= __SHIFTIN(phy
, ET_MII_ADDR_PHY
) |
491 __SHIFTIN(reg
, ET_MII_ADDR_REG
);
492 CSR_WRITE_4(sc
, ET_MII_ADDR
, val
);
495 CSR_WRITE_4(sc
, ET_MII_CTRL
, __SHIFTIN(val0
, ET_MII_CTRL_VALUE
));
499 for (i
= 0; i
< NRETRY
; ++i
) {
500 val
= CSR_READ_4(sc
, ET_MII_IND
);
501 if ((val
& ET_MII_IND_BUSY
) == 0)
506 if_printf(&sc
->arpcom
.ac_if
,
507 "write phy %d, reg %d timed out\n", phy
, reg
);
508 et_miibus_readreg(dev
, phy
, reg
);
513 /* Make sure that the current operation is stopped */
514 CSR_WRITE_4(sc
, ET_MII_CMD
, 0);
519 et_miibus_statchg(device_t dev
)
521 et_setmedia(device_get_softc(dev
));
525 et_ifmedia_upd(struct ifnet
*ifp
)
527 struct et_softc
*sc
= ifp
->if_softc
;
528 struct mii_data
*mii
= device_get_softc(sc
->sc_miibus
);
530 if (mii
->mii_instance
!= 0) {
531 struct mii_softc
*miisc
;
533 LIST_FOREACH(miisc
, &mii
->mii_phys
, mii_list
)
534 mii_phy_reset(miisc
);
542 et_ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
544 struct et_softc
*sc
= ifp
->if_softc
;
545 struct mii_data
*mii
= device_get_softc(sc
->sc_miibus
);
548 ifmr
->ifm_active
= mii
->mii_media_active
;
549 ifmr
->ifm_status
= mii
->mii_media_status
;
553 et_stop(struct et_softc
*sc
)
555 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
557 ASSERT_SERIALIZED(ifp
->if_serializer
);
559 callout_stop(&sc
->sc_tick
);
564 et_disable_intrs(sc
);
573 sc
->sc_flags
&= ~ET_FLAG_TXRX_ENABLED
;
576 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
580 et_bus_config(device_t dev
)
582 uint32_t val
, max_plsz
;
583 uint16_t ack_latency
, replay_timer
;
586 * Test whether EEPROM is valid
587 * NOTE: Read twice to get the correct value
589 pci_read_config(dev
, ET_PCIR_EEPROM_STATUS
, 1);
590 val
= pci_read_config(dev
, ET_PCIR_EEPROM_STATUS
, 1);
591 if (val
& ET_PCIM_EEPROM_STATUS_ERROR
) {
592 device_printf(dev
, "EEPROM status error 0x%02x\n", val
);
599 * Configure ACK latency and replay timer according to
602 val
= pci_read_config(dev
, ET_PCIR_DEVICE_CAPS
, 4);
603 max_plsz
= val
& ET_PCIM_DEVICE_CAPS_MAX_PLSZ
;
606 case ET_PCIV_DEVICE_CAPS_PLSZ_128
:
607 ack_latency
= ET_PCIV_ACK_LATENCY_128
;
608 replay_timer
= ET_PCIV_REPLAY_TIMER_128
;
611 case ET_PCIV_DEVICE_CAPS_PLSZ_256
:
612 ack_latency
= ET_PCIV_ACK_LATENCY_256
;
613 replay_timer
= ET_PCIV_REPLAY_TIMER_256
;
617 ack_latency
= pci_read_config(dev
, ET_PCIR_ACK_LATENCY
, 2);
618 replay_timer
= pci_read_config(dev
, ET_PCIR_REPLAY_TIMER
, 2);
619 device_printf(dev
, "ack latency %u, replay timer %u\n",
620 ack_latency
, replay_timer
);
623 if (ack_latency
!= 0) {
624 pci_write_config(dev
, ET_PCIR_ACK_LATENCY
, ack_latency
, 2);
625 pci_write_config(dev
, ET_PCIR_REPLAY_TIMER
, replay_timer
, 2);
629 * Set L0s and L1 latency timer to 2us
631 val
= ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2);
632 pci_write_config(dev
, ET_PCIR_L0S_L1_LATENCY
, val
, 1);
635 * Set max read request size to 2048 bytes
637 val
= pci_read_config(dev
, ET_PCIR_DEVICE_CTRL
, 2);
638 val
&= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ
;
639 val
|= ET_PCIV_DEVICE_CTRL_RRSZ_2K
;
640 pci_write_config(dev
, ET_PCIR_DEVICE_CTRL
, val
, 2);
646 et_get_eaddr(device_t dev
, uint8_t eaddr
[])
651 val
= pci_read_config(dev
, ET_PCIR_MAC_ADDR0
, 4);
652 for (i
= 0; i
< 4; ++i
)
653 eaddr
[i
] = (val
>> (8 * i
)) & 0xff;
655 val
= pci_read_config(dev
, ET_PCIR_MAC_ADDR1
, 2);
656 for (; i
< ETHER_ADDR_LEN
; ++i
)
657 eaddr
[i
] = (val
>> (8 * (i
- 4))) & 0xff;
661 et_reset(struct et_softc
*sc
)
663 CSR_WRITE_4(sc
, ET_MAC_CFG1
,
664 ET_MAC_CFG1_RST_TXFUNC
| ET_MAC_CFG1_RST_RXFUNC
|
665 ET_MAC_CFG1_RST_TXMC
| ET_MAC_CFG1_RST_RXMC
|
666 ET_MAC_CFG1_SIM_RST
| ET_MAC_CFG1_SOFT_RST
);
668 CSR_WRITE_4(sc
, ET_SWRST
,
669 ET_SWRST_TXDMA
| ET_SWRST_RXDMA
|
670 ET_SWRST_TXMAC
| ET_SWRST_RXMAC
|
671 ET_SWRST_MAC
| ET_SWRST_MAC_STAT
| ET_SWRST_MMC
);
673 CSR_WRITE_4(sc
, ET_MAC_CFG1
,
674 ET_MAC_CFG1_RST_TXFUNC
| ET_MAC_CFG1_RST_RXFUNC
|
675 ET_MAC_CFG1_RST_TXMC
| ET_MAC_CFG1_RST_RXMC
);
676 CSR_WRITE_4(sc
, ET_MAC_CFG1
, 0);
680 et_disable_intrs(struct et_softc
*sc
)
682 CSR_WRITE_4(sc
, ET_INTR_MASK
, 0xffffffff);
686 et_enable_intrs(struct et_softc
*sc
, uint32_t intrs
)
688 CSR_WRITE_4(sc
, ET_INTR_MASK
, ~intrs
);
692 et_dma_alloc(device_t dev
)
694 struct et_softc
*sc
= device_get_softc(dev
);
695 struct et_txdesc_ring
*tx_ring
= &sc
->sc_tx_ring
;
696 struct et_txstatus_data
*txsd
= &sc
->sc_tx_status
;
697 struct et_rxstat_ring
*rxst_ring
= &sc
->sc_rxstat_ring
;
698 struct et_rxstatus_data
*rxsd
= &sc
->sc_rx_status
;
702 * Create top level DMA tag
704 error
= bus_dma_tag_create(NULL
, 1, 0,
708 BUS_SPACE_MAXSIZE_32BIT
,
710 BUS_SPACE_MAXSIZE_32BIT
,
713 device_printf(dev
, "can't create DMA tag\n");
718 * Create TX ring DMA stuffs
720 tx_ring
->tr_desc
= bus_dmamem_coherent_any(sc
->sc_dtag
,
721 ET_ALIGN
, ET_TX_RING_SIZE
,
722 BUS_DMA_WAITOK
| BUS_DMA_ZERO
,
723 &tx_ring
->tr_dtag
, &tx_ring
->tr_dmap
,
725 if (tx_ring
->tr_desc
== NULL
) {
726 device_printf(dev
, "can't create TX ring DMA stuffs\n");
731 * Create TX status DMA stuffs
733 txsd
->txsd_status
= bus_dmamem_coherent_any(sc
->sc_dtag
,
734 ET_ALIGN
, sizeof(uint32_t),
735 BUS_DMA_WAITOK
| BUS_DMA_ZERO
,
736 &txsd
->txsd_dtag
, &txsd
->txsd_dmap
,
738 if (txsd
->txsd_status
== NULL
) {
739 device_printf(dev
, "can't create TX status DMA stuffs\n");
744 * Create DMA stuffs for RX rings
746 for (i
= 0; i
< ET_RX_NRING
; ++i
) {
747 static const uint32_t rx_ring_posreg
[ET_RX_NRING
] =
748 { ET_RX_RING0_POS
, ET_RX_RING1_POS
};
750 struct et_rxdesc_ring
*rx_ring
= &sc
->sc_rx_ring
[i
];
752 rx_ring
->rr_desc
= bus_dmamem_coherent_any(sc
->sc_dtag
,
753 ET_ALIGN
, ET_RX_RING_SIZE
,
754 BUS_DMA_WAITOK
| BUS_DMA_ZERO
,
755 &rx_ring
->rr_dtag
, &rx_ring
->rr_dmap
,
757 if (rx_ring
->rr_desc
== NULL
) {
758 device_printf(dev
, "can't create DMA stuffs for "
759 "the %d RX ring\n", i
);
762 rx_ring
->rr_posreg
= rx_ring_posreg
[i
];
766 * Create RX stat ring DMA stuffs
768 rxst_ring
->rsr_stat
= bus_dmamem_coherent_any(sc
->sc_dtag
,
769 ET_ALIGN
, ET_RXSTAT_RING_SIZE
,
770 BUS_DMA_WAITOK
| BUS_DMA_ZERO
,
771 &rxst_ring
->rsr_dtag
, &rxst_ring
->rsr_dmap
,
772 &rxst_ring
->rsr_paddr
);
773 if (rxst_ring
->rsr_stat
== NULL
) {
774 device_printf(dev
, "can't create RX stat ring DMA stuffs\n");
779 * Create RX status DMA stuffs
781 rxsd
->rxsd_status
= bus_dmamem_coherent_any(sc
->sc_dtag
,
782 ET_ALIGN
, sizeof(struct et_rxstatus
),
783 BUS_DMA_WAITOK
| BUS_DMA_ZERO
,
784 &rxsd
->rxsd_dtag
, &rxsd
->rxsd_dmap
,
786 if (rxsd
->rxsd_status
== NULL
) {
787 device_printf(dev
, "can't create RX status DMA stuffs\n");
792 * Create mbuf DMA stuffs
794 error
= et_dma_mbuf_create(dev
);
799 * Create jumbo buffer DMA stuffs
800 * NOTE: Allow it to fail
802 if (et_jumbo_mem_alloc(dev
) == 0)
803 sc
->sc_flags
|= ET_FLAG_JUMBO
;
809 et_dma_free(device_t dev
)
811 struct et_softc
*sc
= device_get_softc(dev
);
812 struct et_txdesc_ring
*tx_ring
= &sc
->sc_tx_ring
;
813 struct et_txstatus_data
*txsd
= &sc
->sc_tx_status
;
814 struct et_rxstat_ring
*rxst_ring
= &sc
->sc_rxstat_ring
;
815 struct et_rxstatus_data
*rxsd
= &sc
->sc_rx_status
;
816 int i
, rx_done
[ET_RX_NRING
];
819 * Destroy TX ring DMA stuffs
821 et_dma_mem_destroy(tx_ring
->tr_dtag
, tx_ring
->tr_desc
,
825 * Destroy TX status DMA stuffs
827 et_dma_mem_destroy(txsd
->txsd_dtag
, txsd
->txsd_status
,
831 * Destroy DMA stuffs for RX rings
833 for (i
= 0; i
< ET_RX_NRING
; ++i
) {
834 struct et_rxdesc_ring
*rx_ring
= &sc
->sc_rx_ring
[i
];
836 et_dma_mem_destroy(rx_ring
->rr_dtag
, rx_ring
->rr_desc
,
841 * Destroy RX stat ring DMA stuffs
843 et_dma_mem_destroy(rxst_ring
->rsr_dtag
, rxst_ring
->rsr_stat
,
844 rxst_ring
->rsr_dmap
);
847 * Destroy RX status DMA stuffs
849 et_dma_mem_destroy(rxsd
->rxsd_dtag
, rxsd
->rxsd_status
,
853 * Destroy mbuf DMA stuffs
855 for (i
= 0; i
< ET_RX_NRING
; ++i
)
856 rx_done
[i
] = ET_RX_NDESC
;
857 et_dma_mbuf_destroy(dev
, ET_TX_NDESC
, rx_done
);
860 * Destroy jumbo buffer DMA stuffs
862 if (sc
->sc_flags
& ET_FLAG_JUMBO
)
863 et_jumbo_mem_free(dev
);
866 * Destroy top level DMA tag
868 if (sc
->sc_dtag
!= NULL
)
869 bus_dma_tag_destroy(sc
->sc_dtag
);
873 et_dma_mbuf_create(device_t dev
)
875 struct et_softc
*sc
= device_get_softc(dev
);
876 struct et_txbuf_data
*tbd
= &sc
->sc_tx_data
;
877 int i
, error
, rx_done
[ET_RX_NRING
];
880 * Create RX mbuf DMA tag
882 error
= bus_dma_tag_create(sc
->sc_dtag
, 1, 0,
883 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
885 MCLBYTES
, 1, MCLBYTES
,
886 BUS_DMA_ALLOCNOW
| BUS_DMA_WAITOK
,
889 device_printf(dev
, "can't create RX mbuf DMA tag\n");
894 * Create spare DMA map for RX mbufs
896 error
= bus_dmamap_create(sc
->sc_rxbuf_dtag
, BUS_DMA_WAITOK
,
897 &sc
->sc_rxbuf_tmp_dmap
);
899 device_printf(dev
, "can't create spare mbuf DMA map\n");
900 bus_dma_tag_destroy(sc
->sc_rxbuf_dtag
);
901 sc
->sc_rxbuf_dtag
= NULL
;
906 * Create DMA maps for RX mbufs
908 bzero(rx_done
, sizeof(rx_done
));
909 for (i
= 0; i
< ET_RX_NRING
; ++i
) {
910 struct et_rxbuf_data
*rbd
= &sc
->sc_rx_data
[i
];
913 for (j
= 0; j
< ET_RX_NDESC
; ++j
) {
914 error
= bus_dmamap_create(sc
->sc_rxbuf_dtag
,
916 &rbd
->rbd_buf
[j
].rb_dmap
);
918 device_printf(dev
, "can't create %d RX mbuf "
919 "for %d RX ring\n", j
, i
);
921 et_dma_mbuf_destroy(dev
, 0, rx_done
);
925 rx_done
[i
] = ET_RX_NDESC
;
928 rbd
->rbd_ring
= &sc
->sc_rx_ring
[i
];
932 * Create TX mbuf DMA tag
934 error
= bus_dma_tag_create(sc
->sc_dtag
, 1, 0,
935 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
937 ET_JUMBO_FRAMELEN
, ET_NSEG_MAX
, MCLBYTES
,
938 BUS_DMA_ALLOCNOW
| BUS_DMA_WAITOK
|
942 device_printf(dev
, "can't create TX mbuf DMA tag\n");
947 * Create DMA maps for TX mbufs
949 for (i
= 0; i
< ET_TX_NDESC
; ++i
) {
950 error
= bus_dmamap_create(sc
->sc_txbuf_dtag
,
951 BUS_DMA_WAITOK
| BUS_DMA_ONEBPAGE
,
952 &tbd
->tbd_buf
[i
].tb_dmap
);
954 device_printf(dev
, "can't create %d TX mbuf "
956 et_dma_mbuf_destroy(dev
, i
, rx_done
);
965 et_dma_mbuf_destroy(device_t dev
, int tx_done
, const int rx_done
[])
967 struct et_softc
*sc
= device_get_softc(dev
);
968 struct et_txbuf_data
*tbd
= &sc
->sc_tx_data
;
972 * Destroy DMA tag and maps for RX mbufs
974 if (sc
->sc_rxbuf_dtag
) {
975 for (i
= 0; i
< ET_RX_NRING
; ++i
) {
976 struct et_rxbuf_data
*rbd
= &sc
->sc_rx_data
[i
];
979 for (j
= 0; j
< rx_done
[i
]; ++j
) {
980 struct et_rxbuf
*rb
= &rbd
->rbd_buf
[j
];
982 KASSERT(rb
->rb_mbuf
== NULL
,
983 ("RX mbuf in %d RX ring is "
984 "not freed yet\n", i
));
985 bus_dmamap_destroy(sc
->sc_rxbuf_dtag
,
989 bus_dmamap_destroy(sc
->sc_rxbuf_dtag
, sc
->sc_rxbuf_tmp_dmap
);
990 bus_dma_tag_destroy(sc
->sc_rxbuf_dtag
);
991 sc
->sc_rxbuf_dtag
= NULL
;
995 * Destroy DMA tag and maps for TX mbufs
997 if (sc
->sc_txbuf_dtag
) {
998 for (i
= 0; i
< tx_done
; ++i
) {
999 struct et_txbuf
*tb
= &tbd
->tbd_buf
[i
];
1001 KASSERT(tb
->tb_mbuf
== NULL
,
1002 ("TX mbuf is not freed yet\n"));
1003 bus_dmamap_destroy(sc
->sc_txbuf_dtag
, tb
->tb_dmap
);
1005 bus_dma_tag_destroy(sc
->sc_txbuf_dtag
);
1006 sc
->sc_txbuf_dtag
= NULL
;
1011 et_dma_mem_destroy(bus_dma_tag_t dtag
, void *addr
, bus_dmamap_t dmap
)
1014 bus_dmamap_unload(dtag
, dmap
);
1015 bus_dmamem_free(dtag
, addr
, dmap
);
1016 bus_dma_tag_destroy(dtag
);
1021 et_chip_attach(struct et_softc
*sc
)
1026 * Perform minimal initialization
1029 /* Disable loopback */
1030 CSR_WRITE_4(sc
, ET_LOOPBACK
, 0);
1033 CSR_WRITE_4(sc
, ET_MAC_CFG1
,
1034 ET_MAC_CFG1_RST_TXFUNC
| ET_MAC_CFG1_RST_RXFUNC
|
1035 ET_MAC_CFG1_RST_TXMC
| ET_MAC_CFG1_RST_RXMC
|
1036 ET_MAC_CFG1_SIM_RST
| ET_MAC_CFG1_SOFT_RST
);
1039 * Setup half duplex mode
1041 val
= __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC
) |
1042 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX
) |
1043 __SHIFTIN(55, ET_MAC_HDX_COLLWIN
) |
1044 ET_MAC_HDX_EXC_DEFER
;
1045 CSR_WRITE_4(sc
, ET_MAC_HDX
, val
);
1047 /* Clear MAC control */
1048 CSR_WRITE_4(sc
, ET_MAC_CTRL
, 0);
1051 CSR_WRITE_4(sc
, ET_MII_CFG
, ET_MII_CFG_CLKRST
);
1053 /* Bring MAC out of reset state */
1054 CSR_WRITE_4(sc
, ET_MAC_CFG1
, 0);
1056 /* Enable memory controllers */
1057 CSR_WRITE_4(sc
, ET_MMC_CTRL
, ET_MMC_CTRL_ENABLE
);
1063 struct et_softc
*sc
= xsc
;
1064 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1067 ASSERT_SERIALIZED(ifp
->if_serializer
);
1069 if ((ifp
->if_flags
& IFF_RUNNING
) == 0)
1072 et_disable_intrs(sc
);
1074 intrs
= CSR_READ_4(sc
, ET_INTR_STATUS
);
1076 if (intrs
== 0) /* Not interested */
1079 if (intrs
& ET_INTR_RXEOF
)
1081 if (intrs
& (ET_INTR_TXEOF
| ET_INTR_TIMER
))
1083 if (intrs
& ET_INTR_TIMER
)
1084 CSR_WRITE_4(sc
, ET_TIMER
, sc
->sc_timer
);
1086 et_enable_intrs(sc
, ET_INTRS
);
1092 struct et_softc
*sc
= xsc
;
1093 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1094 const struct et_bsize
*arr
;
1097 ASSERT_SERIALIZED(ifp
->if_serializer
);
1101 arr
= ET_FRAMELEN(ifp
->if_mtu
) < MCLBYTES
?
1102 et_bufsize_std
: et_bufsize_jumbo
;
1103 for (i
= 0; i
< ET_RX_NRING
; ++i
) {
1104 sc
->sc_rx_data
[i
].rbd_bufsize
= arr
[i
].bufsize
;
1105 sc
->sc_rx_data
[i
].rbd_newbuf
= arr
[i
].newbuf
;
1106 sc
->sc_rx_data
[i
].rbd_jumbo
= arr
[i
].jumbo
;
1109 error
= et_init_tx_ring(sc
);
1113 error
= et_init_rx_ring(sc
);
1117 error
= et_chip_init(sc
);
1121 error
= et_enable_txrx(sc
, 1);
1125 et_enable_intrs(sc
, ET_INTRS
);
1127 callout_reset(&sc
->sc_tick
, hz
, et_tick
, sc
);
1129 CSR_WRITE_4(sc
, ET_TIMER
, sc
->sc_timer
);
1131 ifp
->if_flags
|= IFF_RUNNING
;
1132 ifp
->if_flags
&= ~IFF_OACTIVE
;
1139 et_ioctl(struct ifnet
*ifp
, u_long cmd
, caddr_t data
, struct ucred
*cr
)
1141 struct et_softc
*sc
= ifp
->if_softc
;
1142 struct mii_data
*mii
= device_get_softc(sc
->sc_miibus
);
1143 struct ifreq
*ifr
= (struct ifreq
*)data
;
1144 int error
= 0, max_framelen
;
1146 ASSERT_SERIALIZED(ifp
->if_serializer
);
1150 if (ifp
->if_flags
& IFF_UP
) {
1151 if (ifp
->if_flags
& IFF_RUNNING
) {
1152 if ((ifp
->if_flags
^ sc
->sc_if_flags
) &
1153 (IFF_ALLMULTI
| IFF_PROMISC
))
1159 if (ifp
->if_flags
& IFF_RUNNING
)
1162 sc
->sc_if_flags
= ifp
->if_flags
;
1167 error
= ifmedia_ioctl(ifp
, ifr
, &mii
->mii_media
, cmd
);
1172 if (ifp
->if_flags
& IFF_RUNNING
)
1177 if (sc
->sc_flags
& ET_FLAG_JUMBO
)
1178 max_framelen
= ET_JUMBO_FRAMELEN
;
1180 max_framelen
= MCLBYTES
- 1;
1182 if (ET_FRAMELEN(ifr
->ifr_mtu
) > max_framelen
) {
1187 ifp
->if_mtu
= ifr
->ifr_mtu
;
1188 if (ifp
->if_flags
& IFF_RUNNING
)
1193 error
= ether_ioctl(ifp
, cmd
, data
);
1200 et_start(struct ifnet
*ifp
)
1202 struct et_softc
*sc
= ifp
->if_softc
;
1203 struct et_txbuf_data
*tbd
= &sc
->sc_tx_data
;
1206 ASSERT_SERIALIZED(ifp
->if_serializer
);
1208 if ((sc
->sc_flags
& ET_FLAG_TXRX_ENABLED
) == 0) {
1209 ifq_purge(&ifp
->if_snd
);
1213 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_OACTIVE
)) != IFF_RUNNING
)
1222 if ((tbd
->tbd_used
+ ET_NSEG_SPARE
) > ET_TX_NDESC
) {
1224 ifp
->if_flags
|= IFF_OACTIVE
;
1233 m
= ifq_dequeue(&ifp
->if_snd
, NULL
);
1237 error
= et_encap(sc
, &m
);
1240 KKASSERT(m
== NULL
);
1242 if (error
== EFBIG
) {
1244 * Excessive fragmented packets
1247 ifp
->if_flags
|= IFF_OACTIVE
;
1267 et_watchdog(struct ifnet
*ifp
)
1269 ASSERT_SERIALIZED(ifp
->if_serializer
);
1271 if_printf(ifp
, "watchdog timed out\n");
1273 ifp
->if_init(ifp
->if_softc
);
1278 et_stop_rxdma(struct et_softc
*sc
)
1280 CSR_WRITE_4(sc
, ET_RXDMA_CTRL
,
1281 ET_RXDMA_CTRL_HALT
| ET_RXDMA_CTRL_RING1_ENABLE
);
1284 if ((CSR_READ_4(sc
, ET_RXDMA_CTRL
) & ET_RXDMA_CTRL_HALTED
) == 0) {
1285 if_printf(&sc
->arpcom
.ac_if
, "can't stop RX DMA engine\n");
1292 et_stop_txdma(struct et_softc
*sc
)
1294 CSR_WRITE_4(sc
, ET_TXDMA_CTRL
,
1295 ET_TXDMA_CTRL_HALT
| ET_TXDMA_CTRL_SINGLE_EPKT
);
1300 et_free_tx_ring(struct et_softc
*sc
)
1302 struct et_txbuf_data
*tbd
= &sc
->sc_tx_data
;
1303 struct et_txdesc_ring
*tx_ring
= &sc
->sc_tx_ring
;
1306 for (i
= 0; i
< ET_TX_NDESC
; ++i
) {
1307 struct et_txbuf
*tb
= &tbd
->tbd_buf
[i
];
1309 if (tb
->tb_mbuf
!= NULL
) {
1310 bus_dmamap_unload(sc
->sc_txbuf_dtag
, tb
->tb_dmap
);
1311 m_freem(tb
->tb_mbuf
);
1315 bzero(tx_ring
->tr_desc
, ET_TX_RING_SIZE
);
1319 et_free_rx_ring(struct et_softc
*sc
)
1323 for (n
= 0; n
< ET_RX_NRING
; ++n
) {
1324 struct et_rxbuf_data
*rbd
= &sc
->sc_rx_data
[n
];
1325 struct et_rxdesc_ring
*rx_ring
= &sc
->sc_rx_ring
[n
];
1328 for (i
= 0; i
< ET_RX_NDESC
; ++i
) {
1329 struct et_rxbuf
*rb
= &rbd
->rbd_buf
[i
];
1331 if (rb
->rb_mbuf
!= NULL
) {
1332 if (!rbd
->rbd_jumbo
) {
1333 bus_dmamap_unload(sc
->sc_rxbuf_dtag
,
1336 m_freem(rb
->rb_mbuf
);
1340 bzero(rx_ring
->rr_desc
, ET_RX_RING_SIZE
);
1345 et_setmulti(struct et_softc
*sc
)
1347 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1348 uint32_t hash
[4] = { 0, 0, 0, 0 };
1349 uint32_t rxmac_ctrl
, pktfilt
;
1350 struct ifmultiaddr
*ifma
;
1353 pktfilt
= CSR_READ_4(sc
, ET_PKTFILT
);
1354 rxmac_ctrl
= CSR_READ_4(sc
, ET_RXMAC_CTRL
);
1356 pktfilt
&= ~(ET_PKTFILT_BCAST
| ET_PKTFILT_MCAST
| ET_PKTFILT_UCAST
);
1357 if (ifp
->if_flags
& (IFF_PROMISC
| IFF_ALLMULTI
)) {
1358 rxmac_ctrl
|= ET_RXMAC_CTRL_NO_PKTFILT
;
1363 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
1366 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
1369 h
= ether_crc32_be(LLADDR((struct sockaddr_dl
*)
1370 ifma
->ifma_addr
), ETHER_ADDR_LEN
);
1371 h
= (h
& 0x3f800000) >> 23;
1374 if (h
>= 32 && h
< 64) {
1377 } else if (h
>= 64 && h
< 96) {
1380 } else if (h
>= 96) {
1389 for (i
= 0; i
< 4; ++i
)
1390 CSR_WRITE_4(sc
, ET_MULTI_HASH
+ (i
* 4), hash
[i
]);
1393 pktfilt
|= ET_PKTFILT_MCAST
;
1394 rxmac_ctrl
&= ~ET_RXMAC_CTRL_NO_PKTFILT
;
1396 CSR_WRITE_4(sc
, ET_PKTFILT
, pktfilt
);
1397 CSR_WRITE_4(sc
, ET_RXMAC_CTRL
, rxmac_ctrl
);
1401 et_chip_init(struct et_softc
*sc
)
1403 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1405 int error
, frame_len
, rxmem_size
;
1408 * Split 16Kbytes internal memory between TX and RX
1409 * according to frame length.
1411 frame_len
= ET_FRAMELEN(ifp
->if_mtu
);
1412 if (frame_len
< 2048) {
1413 rxmem_size
= ET_MEM_RXSIZE_DEFAULT
;
1414 } else if (frame_len
<= ET_RXMAC_CUT_THRU_FRMLEN
) {
1415 rxmem_size
= ET_MEM_SIZE
/ 2;
1417 rxmem_size
= ET_MEM_SIZE
-
1418 roundup(frame_len
+ ET_MEM_TXSIZE_EX
, ET_MEM_UNIT
);
1420 rxq_end
= ET_QUEUE_ADDR(rxmem_size
);
1422 CSR_WRITE_4(sc
, ET_RXQUEUE_START
, ET_QUEUE_ADDR_START
);
1423 CSR_WRITE_4(sc
, ET_RXQUEUE_END
, rxq_end
);
1424 CSR_WRITE_4(sc
, ET_TXQUEUE_START
, rxq_end
+ 1);
1425 CSR_WRITE_4(sc
, ET_TXQUEUE_END
, ET_QUEUE_ADDR_END
);
1428 CSR_WRITE_4(sc
, ET_LOOPBACK
, 0);
1430 /* Clear MSI configure */
1431 CSR_WRITE_4(sc
, ET_MSI_CFG
, 0);
1434 CSR_WRITE_4(sc
, ET_TIMER
, 0);
1436 /* Initialize MAC */
1439 /* Enable memory controllers */
1440 CSR_WRITE_4(sc
, ET_MMC_CTRL
, ET_MMC_CTRL_ENABLE
);
1442 /* Initialize RX MAC */
1445 /* Initialize TX MAC */
1448 /* Initialize RX DMA engine */
1449 error
= et_init_rxdma(sc
);
1453 /* Initialize TX DMA engine */
1454 error
= et_init_txdma(sc
);
1462 et_init_tx_ring(struct et_softc
*sc
)
1464 struct et_txdesc_ring
*tx_ring
= &sc
->sc_tx_ring
;
1465 struct et_txstatus_data
*txsd
= &sc
->sc_tx_status
;
1466 struct et_txbuf_data
*tbd
= &sc
->sc_tx_data
;
1468 bzero(tx_ring
->tr_desc
, ET_TX_RING_SIZE
);
1470 tbd
->tbd_start_index
= 0;
1471 tbd
->tbd_start_wrap
= 0;
1474 bzero(txsd
->txsd_status
, sizeof(uint32_t));
1480 et_init_rx_ring(struct et_softc
*sc
)
1482 struct et_rxstatus_data
*rxsd
= &sc
->sc_rx_status
;
1483 struct et_rxstat_ring
*rxst_ring
= &sc
->sc_rxstat_ring
;
1486 for (n
= 0; n
< ET_RX_NRING
; ++n
) {
1487 struct et_rxbuf_data
*rbd
= &sc
->sc_rx_data
[n
];
1490 for (i
= 0; i
< ET_RX_NDESC
; ++i
) {
1491 error
= rbd
->rbd_newbuf(rbd
, i
, 1);
1493 if_printf(&sc
->arpcom
.ac_if
, "%d ring %d buf, "
1494 "newbuf failed: %d\n", n
, i
, error
);
1500 bzero(rxsd
->rxsd_status
, sizeof(struct et_rxstatus
));
1501 bzero(rxst_ring
->rsr_stat
, ET_RXSTAT_RING_SIZE
);
1507 et_init_rxdma(struct et_softc
*sc
)
1509 struct et_rxstatus_data
*rxsd
= &sc
->sc_rx_status
;
1510 struct et_rxstat_ring
*rxst_ring
= &sc
->sc_rxstat_ring
;
1511 struct et_rxdesc_ring
*rx_ring
;
1514 error
= et_stop_rxdma(sc
);
1516 if_printf(&sc
->arpcom
.ac_if
, "can't init RX DMA engine\n");
1523 CSR_WRITE_4(sc
, ET_RX_STATUS_HI
, ET_ADDR_HI(rxsd
->rxsd_paddr
));
1524 CSR_WRITE_4(sc
, ET_RX_STATUS_LO
, ET_ADDR_LO(rxsd
->rxsd_paddr
));
1527 * Install RX stat ring
1529 CSR_WRITE_4(sc
, ET_RXSTAT_HI
, ET_ADDR_HI(rxst_ring
->rsr_paddr
));
1530 CSR_WRITE_4(sc
, ET_RXSTAT_LO
, ET_ADDR_LO(rxst_ring
->rsr_paddr
));
1531 CSR_WRITE_4(sc
, ET_RXSTAT_CNT
, ET_RX_NSTAT
- 1);
1532 CSR_WRITE_4(sc
, ET_RXSTAT_POS
, 0);
1533 CSR_WRITE_4(sc
, ET_RXSTAT_MINCNT
, ((ET_RX_NSTAT
* 15) / 100) - 1);
1535 /* Match ET_RXSTAT_POS */
1536 rxst_ring
->rsr_index
= 0;
1537 rxst_ring
->rsr_wrap
= 0;
1540 * Install the 2nd RX descriptor ring
1542 rx_ring
= &sc
->sc_rx_ring
[1];
1543 CSR_WRITE_4(sc
, ET_RX_RING1_HI
, ET_ADDR_HI(rx_ring
->rr_paddr
));
1544 CSR_WRITE_4(sc
, ET_RX_RING1_LO
, ET_ADDR_LO(rx_ring
->rr_paddr
));
1545 CSR_WRITE_4(sc
, ET_RX_RING1_CNT
, ET_RX_NDESC
- 1);
1546 CSR_WRITE_4(sc
, ET_RX_RING1_POS
, ET_RX_RING1_POS_WRAP
);
1547 CSR_WRITE_4(sc
, ET_RX_RING1_MINCNT
, ((ET_RX_NDESC
* 15) / 100) - 1);
1549 /* Match ET_RX_RING1_POS */
1550 rx_ring
->rr_index
= 0;
1551 rx_ring
->rr_wrap
= 1;
1554 * Install the 1st RX descriptor ring
1556 rx_ring
= &sc
->sc_rx_ring
[0];
1557 CSR_WRITE_4(sc
, ET_RX_RING0_HI
, ET_ADDR_HI(rx_ring
->rr_paddr
));
1558 CSR_WRITE_4(sc
, ET_RX_RING0_LO
, ET_ADDR_LO(rx_ring
->rr_paddr
));
1559 CSR_WRITE_4(sc
, ET_RX_RING0_CNT
, ET_RX_NDESC
- 1);
1560 CSR_WRITE_4(sc
, ET_RX_RING0_POS
, ET_RX_RING0_POS_WRAP
);
1561 CSR_WRITE_4(sc
, ET_RX_RING0_MINCNT
, ((ET_RX_NDESC
* 15) / 100) - 1);
1563 /* Match ET_RX_RING0_POS */
1564 rx_ring
->rr_index
= 0;
1565 rx_ring
->rr_wrap
= 1;
1568 * RX intr moderation
1570 CSR_WRITE_4(sc
, ET_RX_INTR_NPKTS
, sc
->sc_rx_intr_npkts
);
1571 CSR_WRITE_4(sc
, ET_RX_INTR_DELAY
, sc
->sc_rx_intr_delay
);
1577 et_init_txdma(struct et_softc
*sc
)
1579 struct et_txdesc_ring
*tx_ring
= &sc
->sc_tx_ring
;
1580 struct et_txstatus_data
*txsd
= &sc
->sc_tx_status
;
1583 error
= et_stop_txdma(sc
);
1585 if_printf(&sc
->arpcom
.ac_if
, "can't init TX DMA engine\n");
1590 * Install TX descriptor ring
1592 CSR_WRITE_4(sc
, ET_TX_RING_HI
, ET_ADDR_HI(tx_ring
->tr_paddr
));
1593 CSR_WRITE_4(sc
, ET_TX_RING_LO
, ET_ADDR_LO(tx_ring
->tr_paddr
));
1594 CSR_WRITE_4(sc
, ET_TX_RING_CNT
, ET_TX_NDESC
- 1);
1599 CSR_WRITE_4(sc
, ET_TX_STATUS_HI
, ET_ADDR_HI(txsd
->txsd_paddr
));
1600 CSR_WRITE_4(sc
, ET_TX_STATUS_LO
, ET_ADDR_LO(txsd
->txsd_paddr
));
1602 CSR_WRITE_4(sc
, ET_TX_READY_POS
, 0);
1604 /* Match ET_TX_READY_POS */
1605 tx_ring
->tr_ready_index
= 0;
1606 tx_ring
->tr_ready_wrap
= 0;
1612 et_init_mac(struct et_softc
*sc
)
1614 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1615 const uint8_t *eaddr
= IF_LLADDR(ifp
);
1619 CSR_WRITE_4(sc
, ET_MAC_CFG1
,
1620 ET_MAC_CFG1_RST_TXFUNC
| ET_MAC_CFG1_RST_RXFUNC
|
1621 ET_MAC_CFG1_RST_TXMC
| ET_MAC_CFG1_RST_RXMC
|
1622 ET_MAC_CFG1_SIM_RST
| ET_MAC_CFG1_SOFT_RST
);
1625 * Setup inter packet gap
1627 val
= __SHIFTIN(56, ET_IPG_NONB2B_1
) |
1628 __SHIFTIN(88, ET_IPG_NONB2B_2
) |
1629 __SHIFTIN(80, ET_IPG_MINIFG
) |
1630 __SHIFTIN(96, ET_IPG_B2B
);
1631 CSR_WRITE_4(sc
, ET_IPG
, val
);
1634 * Setup half duplex mode
1636 val
= __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC
) |
1637 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX
) |
1638 __SHIFTIN(55, ET_MAC_HDX_COLLWIN
) |
1639 ET_MAC_HDX_EXC_DEFER
;
1640 CSR_WRITE_4(sc
, ET_MAC_HDX
, val
);
1642 /* Clear MAC control */
1643 CSR_WRITE_4(sc
, ET_MAC_CTRL
, 0);
1646 CSR_WRITE_4(sc
, ET_MII_CFG
, ET_MII_CFG_CLKRST
);
1651 val
= eaddr
[2] | (eaddr
[3] << 8) | (eaddr
[4] << 16) | (eaddr
[5] << 24);
1652 CSR_WRITE_4(sc
, ET_MAC_ADDR1
, val
);
1653 val
= (eaddr
[0] << 16) | (eaddr
[1] << 24);
1654 CSR_WRITE_4(sc
, ET_MAC_ADDR2
, val
);
1656 /* Set max frame length */
1657 CSR_WRITE_4(sc
, ET_MAX_FRMLEN
, ET_FRAMELEN(ifp
->if_mtu
));
1659 /* Bring MAC out of reset state */
1660 CSR_WRITE_4(sc
, ET_MAC_CFG1
, 0);
1664 et_init_rxmac(struct et_softc
*sc
)
1666 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1667 const uint8_t *eaddr
= IF_LLADDR(ifp
);
1671 /* Disable RX MAC and WOL */
1672 CSR_WRITE_4(sc
, ET_RXMAC_CTRL
, ET_RXMAC_CTRL_WOL_DISABLE
);
1675 * Clear all WOL related registers
1677 for (i
= 0; i
< 3; ++i
)
1678 CSR_WRITE_4(sc
, ET_WOL_CRC
+ (i
* 4), 0);
1679 for (i
= 0; i
< 20; ++i
)
1680 CSR_WRITE_4(sc
, ET_WOL_MASK
+ (i
* 4), 0);
1683 * Set WOL source address. XXX is this necessary?
1685 val
= (eaddr
[2] << 24) | (eaddr
[3] << 16) | (eaddr
[4] << 8) | eaddr
[5];
1686 CSR_WRITE_4(sc
, ET_WOL_SA_LO
, val
);
1687 val
= (eaddr
[0] << 8) | eaddr
[1];
1688 CSR_WRITE_4(sc
, ET_WOL_SA_HI
, val
);
1690 /* Clear packet filters */
1691 CSR_WRITE_4(sc
, ET_PKTFILT
, 0);
1693 /* No ucast filtering */
1694 CSR_WRITE_4(sc
, ET_UCAST_FILTADDR1
, 0);
1695 CSR_WRITE_4(sc
, ET_UCAST_FILTADDR2
, 0);
1696 CSR_WRITE_4(sc
, ET_UCAST_FILTADDR3
, 0);
1698 if (ET_FRAMELEN(ifp
->if_mtu
) > ET_RXMAC_CUT_THRU_FRMLEN
) {
1700 * In order to transmit jumbo packets greater than
1701 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1702 * RX MAC and RX DMA needs to be reduced in size to
1703 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In
1704 * order to implement this, we must use "cut through"
1705 * mode in the RX MAC, which chops packets down into
1706 * segments. In this case we selected 256 bytes,
1707 * since this is the size of the PCI-Express TLP's
1708 * that the ET1310 uses.
1710 val
= __SHIFTIN(ET_RXMAC_SEGSZ(256), ET_RXMAC_MC_SEGSZ_MAX
) |
1711 ET_RXMAC_MC_SEGSZ_ENABLE
;
1715 CSR_WRITE_4(sc
, ET_RXMAC_MC_SEGSZ
, val
);
1717 CSR_WRITE_4(sc
, ET_RXMAC_MC_WATERMARK
, 0);
1719 /* Initialize RX MAC management register */
1720 CSR_WRITE_4(sc
, ET_RXMAC_MGT
, 0);
1722 CSR_WRITE_4(sc
, ET_RXMAC_SPACE_AVL
, 0);
1724 CSR_WRITE_4(sc
, ET_RXMAC_MGT
,
1725 ET_RXMAC_MGT_PASS_ECRC
|
1726 ET_RXMAC_MGT_PASS_ELEN
|
1727 ET_RXMAC_MGT_PASS_ETRUNC
|
1728 ET_RXMAC_MGT_CHECK_PKT
);
1731 * Configure runt filtering (may not work on certain chip generation)
1733 val
= __SHIFTIN(ETHER_MIN_LEN
, ET_PKTFILT_MINLEN
) | ET_PKTFILT_FRAG
;
1734 CSR_WRITE_4(sc
, ET_PKTFILT
, val
);
1736 /* Enable RX MAC but leave WOL disabled */
1737 CSR_WRITE_4(sc
, ET_RXMAC_CTRL
,
1738 ET_RXMAC_CTRL_WOL_DISABLE
| ET_RXMAC_CTRL_ENABLE
);
1741 * Setup multicast hash and allmulti/promisc mode
1747 et_init_txmac(struct et_softc
*sc
)
1749 /* Disable TX MAC and FC(?) */
1750 CSR_WRITE_4(sc
, ET_TXMAC_CTRL
, ET_TXMAC_CTRL_FC_DISABLE
);
1752 /* No flow control yet */
1753 CSR_WRITE_4(sc
, ET_TXMAC_FLOWCTRL
, 0);
1755 /* Enable TX MAC but leave FC(?) diabled */
1756 CSR_WRITE_4(sc
, ET_TXMAC_CTRL
,
1757 ET_TXMAC_CTRL_ENABLE
| ET_TXMAC_CTRL_FC_DISABLE
);
1761 et_start_rxdma(struct et_softc
*sc
)
1765 val
|= __SHIFTIN(sc
->sc_rx_data
[0].rbd_bufsize
,
1766 ET_RXDMA_CTRL_RING0_SIZE
) |
1767 ET_RXDMA_CTRL_RING0_ENABLE
;
1768 val
|= __SHIFTIN(sc
->sc_rx_data
[1].rbd_bufsize
,
1769 ET_RXDMA_CTRL_RING1_SIZE
) |
1770 ET_RXDMA_CTRL_RING1_ENABLE
;
1772 CSR_WRITE_4(sc
, ET_RXDMA_CTRL
, val
);
1776 if (CSR_READ_4(sc
, ET_RXDMA_CTRL
) & ET_RXDMA_CTRL_HALTED
) {
1777 if_printf(&sc
->arpcom
.ac_if
, "can't start RX DMA engine\n");
1784 et_start_txdma(struct et_softc
*sc
)
1786 CSR_WRITE_4(sc
, ET_TXDMA_CTRL
, ET_TXDMA_CTRL_SINGLE_EPKT
);
1791 et_enable_txrx(struct et_softc
*sc
, int media_upd
)
1793 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1797 val
= CSR_READ_4(sc
, ET_MAC_CFG1
);
1798 val
|= ET_MAC_CFG1_TXEN
| ET_MAC_CFG1_RXEN
;
1799 val
&= ~(ET_MAC_CFG1_TXFLOW
| ET_MAC_CFG1_RXFLOW
|
1800 ET_MAC_CFG1_LOOPBACK
);
1801 CSR_WRITE_4(sc
, ET_MAC_CFG1
, val
);
1804 et_ifmedia_upd(ifp
);
1810 for (i
= 0; i
< NRETRY
; ++i
) {
1811 val
= CSR_READ_4(sc
, ET_MAC_CFG1
);
1812 if ((val
& (ET_MAC_CFG1_SYNC_TXEN
| ET_MAC_CFG1_SYNC_RXEN
)) ==
1813 (ET_MAC_CFG1_SYNC_TXEN
| ET_MAC_CFG1_SYNC_RXEN
))
1819 if_printf(ifp
, "can't enable RX/TX\n");
1822 sc
->sc_flags
|= ET_FLAG_TXRX_ENABLED
;
1827 * Start TX/RX DMA engine
1829 error
= et_start_rxdma(sc
);
1833 error
= et_start_txdma(sc
);
1841 et_rxeof(struct et_softc
*sc
)
1843 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1844 struct et_rxstatus_data
*rxsd
= &sc
->sc_rx_status
;
1845 struct et_rxstat_ring
*rxst_ring
= &sc
->sc_rxstat_ring
;
1846 uint32_t rxs_stat_ring
;
1847 int rxst_wrap
, rxst_index
;
1848 struct mbuf_chain chain
[MAXCPU
];
1850 if ((sc
->sc_flags
& ET_FLAG_TXRX_ENABLED
) == 0)
1853 rxs_stat_ring
= rxsd
->rxsd_status
->rxs_stat_ring
;
1854 rxst_wrap
= (rxs_stat_ring
& ET_RXS_STATRING_WRAP
) ? 1 : 0;
1855 rxst_index
= __SHIFTOUT(rxs_stat_ring
, ET_RXS_STATRING_INDEX
);
1857 ether_input_chain_init(chain
);
1859 while (rxst_index
!= rxst_ring
->rsr_index
||
1860 rxst_wrap
!= rxst_ring
->rsr_wrap
) {
1861 struct et_rxbuf_data
*rbd
;
1862 struct et_rxdesc_ring
*rx_ring
;
1863 struct et_rxstat
*st
;
1865 int buflen
, buf_idx
, ring_idx
;
1866 uint32_t rxstat_pos
, rxring_pos
;
1868 KKASSERT(rxst_ring
->rsr_index
< ET_RX_NSTAT
);
1869 st
= &rxst_ring
->rsr_stat
[rxst_ring
->rsr_index
];
1871 buflen
= __SHIFTOUT(st
->rxst_info2
, ET_RXST_INFO2_LEN
);
1872 buf_idx
= __SHIFTOUT(st
->rxst_info2
, ET_RXST_INFO2_BUFIDX
);
1873 ring_idx
= __SHIFTOUT(st
->rxst_info2
, ET_RXST_INFO2_RINGIDX
);
1875 if (++rxst_ring
->rsr_index
== ET_RX_NSTAT
) {
1876 rxst_ring
->rsr_index
= 0;
1877 rxst_ring
->rsr_wrap
^= 1;
1879 rxstat_pos
= __SHIFTIN(rxst_ring
->rsr_index
,
1880 ET_RXSTAT_POS_INDEX
);
1881 if (rxst_ring
->rsr_wrap
)
1882 rxstat_pos
|= ET_RXSTAT_POS_WRAP
;
1883 CSR_WRITE_4(sc
, ET_RXSTAT_POS
, rxstat_pos
);
1885 if (ring_idx
>= ET_RX_NRING
) {
1887 if_printf(ifp
, "invalid ring index %d\n", ring_idx
);
1890 if (buf_idx
>= ET_RX_NDESC
) {
1892 if_printf(ifp
, "invalid buf index %d\n", buf_idx
);
1896 rbd
= &sc
->sc_rx_data
[ring_idx
];
1897 m
= rbd
->rbd_buf
[buf_idx
].rb_mbuf
;
1899 if (rbd
->rbd_newbuf(rbd
, buf_idx
, 0) == 0) {
1900 if (buflen
< ETHER_CRC_LEN
) {
1904 m
->m_pkthdr
.len
= m
->m_len
= buflen
;
1905 m
->m_pkthdr
.rcvif
= ifp
;
1907 m_adj(m
, -ETHER_CRC_LEN
);
1910 ether_input_chain(ifp
, m
, NULL
, chain
);
1915 m
= NULL
; /* Catch invalid reference */
1917 rx_ring
= &sc
->sc_rx_ring
[ring_idx
];
1919 if (buf_idx
!= rx_ring
->rr_index
) {
1920 if_printf(ifp
, "WARNING!! ring %d, "
1921 "buf_idx %d, rr_idx %d\n",
1922 ring_idx
, buf_idx
, rx_ring
->rr_index
);
1925 KKASSERT(rx_ring
->rr_index
< ET_RX_NDESC
);
1926 if (++rx_ring
->rr_index
== ET_RX_NDESC
) {
1927 rx_ring
->rr_index
= 0;
1928 rx_ring
->rr_wrap
^= 1;
1930 rxring_pos
= __SHIFTIN(rx_ring
->rr_index
, ET_RX_RING_POS_INDEX
);
1931 if (rx_ring
->rr_wrap
)
1932 rxring_pos
|= ET_RX_RING_POS_WRAP
;
1933 CSR_WRITE_4(sc
, rx_ring
->rr_posreg
, rxring_pos
);
1936 ether_input_dispatch(chain
);
1940 et_encap(struct et_softc
*sc
, struct mbuf
**m0
)
1942 bus_dma_segment_t segs
[ET_NSEG_MAX
];
1943 struct et_txdesc_ring
*tx_ring
= &sc
->sc_tx_ring
;
1944 struct et_txbuf_data
*tbd
= &sc
->sc_tx_data
;
1945 struct et_txdesc
*td
;
1947 int error
, maxsegs
, nsegs
, first_idx
, last_idx
, i
;
1948 uint32_t tx_ready_pos
, last_td_ctrl2
;
1950 maxsegs
= ET_TX_NDESC
- tbd
->tbd_used
;
1951 if (maxsegs
> ET_NSEG_MAX
)
1952 maxsegs
= ET_NSEG_MAX
;
1953 KASSERT(maxsegs
>= ET_NSEG_SPARE
,
1954 ("not enough spare TX desc (%d)\n", maxsegs
));
1956 KKASSERT(tx_ring
->tr_ready_index
< ET_TX_NDESC
);
1957 first_idx
= tx_ring
->tr_ready_index
;
1958 map
= tbd
->tbd_buf
[first_idx
].tb_dmap
;
1960 error
= bus_dmamap_load_mbuf_defrag(sc
->sc_txbuf_dtag
, map
, m0
,
1961 segs
, maxsegs
, &nsegs
, BUS_DMA_NOWAIT
);
1964 bus_dmamap_sync(sc
->sc_txbuf_dtag
, map
, BUS_DMASYNC_PREWRITE
);
1966 last_td_ctrl2
= ET_TDCTRL2_LAST_FRAG
;
1968 if (sc
->sc_tx
/ sc
->sc_tx_intr_nsegs
!= sc
->sc_tx_intr
) {
1969 sc
->sc_tx_intr
= sc
->sc_tx
/ sc
->sc_tx_intr_nsegs
;
1970 last_td_ctrl2
|= ET_TDCTRL2_INTR
;
1974 for (i
= 0; i
< nsegs
; ++i
) {
1977 idx
= (first_idx
+ i
) % ET_TX_NDESC
;
1978 td
= &tx_ring
->tr_desc
[idx
];
1979 td
->td_addr_hi
= ET_ADDR_HI(segs
[i
].ds_addr
);
1980 td
->td_addr_lo
= ET_ADDR_LO(segs
[i
].ds_addr
);
1981 td
->td_ctrl1
= __SHIFTIN(segs
[i
].ds_len
, ET_TDCTRL1_LEN
);
1983 if (i
== nsegs
- 1) { /* Last frag */
1984 td
->td_ctrl2
= last_td_ctrl2
;
1988 KKASSERT(tx_ring
->tr_ready_index
< ET_TX_NDESC
);
1989 if (++tx_ring
->tr_ready_index
== ET_TX_NDESC
) {
1990 tx_ring
->tr_ready_index
= 0;
1991 tx_ring
->tr_ready_wrap
^= 1;
1994 td
= &tx_ring
->tr_desc
[first_idx
];
1995 td
->td_ctrl2
|= ET_TDCTRL2_FIRST_FRAG
; /* First frag */
1997 KKASSERT(last_idx
>= 0);
1998 tbd
->tbd_buf
[first_idx
].tb_dmap
= tbd
->tbd_buf
[last_idx
].tb_dmap
;
1999 tbd
->tbd_buf
[last_idx
].tb_dmap
= map
;
2000 tbd
->tbd_buf
[last_idx
].tb_mbuf
= *m0
;
2002 tbd
->tbd_used
+= nsegs
;
2003 KKASSERT(tbd
->tbd_used
<= ET_TX_NDESC
);
2005 tx_ready_pos
= __SHIFTIN(tx_ring
->tr_ready_index
,
2006 ET_TX_READY_POS_INDEX
);
2007 if (tx_ring
->tr_ready_wrap
)
2008 tx_ready_pos
|= ET_TX_READY_POS_WRAP
;
2009 CSR_WRITE_4(sc
, ET_TX_READY_POS
, tx_ready_pos
);
2021 et_txeof(struct et_softc
*sc
, int start
)
2023 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2024 struct et_txdesc_ring
*tx_ring
= &sc
->sc_tx_ring
;
2025 struct et_txbuf_data
*tbd
= &sc
->sc_tx_data
;
2029 if ((sc
->sc_flags
& ET_FLAG_TXRX_ENABLED
) == 0)
2032 if (tbd
->tbd_used
== 0)
2035 tx_done
= CSR_READ_4(sc
, ET_TX_DONE_POS
);
2036 end
= __SHIFTOUT(tx_done
, ET_TX_DONE_POS_INDEX
);
2037 wrap
= (tx_done
& ET_TX_DONE_POS_WRAP
) ? 1 : 0;
2039 while (tbd
->tbd_start_index
!= end
|| tbd
->tbd_start_wrap
!= wrap
) {
2040 struct et_txbuf
*tb
;
2042 KKASSERT(tbd
->tbd_start_index
< ET_TX_NDESC
);
2043 tb
= &tbd
->tbd_buf
[tbd
->tbd_start_index
];
2045 bzero(&tx_ring
->tr_desc
[tbd
->tbd_start_index
],
2046 sizeof(struct et_txdesc
));
2048 if (tb
->tb_mbuf
!= NULL
) {
2049 bus_dmamap_unload(sc
->sc_txbuf_dtag
, tb
->tb_dmap
);
2050 m_freem(tb
->tb_mbuf
);
2055 if (++tbd
->tbd_start_index
== ET_TX_NDESC
) {
2056 tbd
->tbd_start_index
= 0;
2057 tbd
->tbd_start_wrap
^= 1;
2060 KKASSERT(tbd
->tbd_used
> 0);
2064 if (tbd
->tbd_used
== 0)
2066 if (tbd
->tbd_used
+ ET_NSEG_SPARE
<= ET_TX_NDESC
)
2067 ifp
->if_flags
&= ~IFF_OACTIVE
;
2076 struct et_softc
*sc
= xsc
;
2077 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2078 struct mii_data
*mii
= device_get_softc(sc
->sc_miibus
);
2080 lwkt_serialize_enter(ifp
->if_serializer
);
2083 if ((sc
->sc_flags
& ET_FLAG_TXRX_ENABLED
) == 0 &&
2084 (mii
->mii_media_status
& IFM_ACTIVE
) &&
2085 IFM_SUBTYPE(mii
->mii_media_active
) != IFM_NONE
) {
2086 if_printf(ifp
, "Link up, enable TX/RX\n");
2087 if (et_enable_txrx(sc
, 0) == 0)
2090 callout_reset(&sc
->sc_tick
, hz
, et_tick
, sc
);
2092 lwkt_serialize_exit(ifp
->if_serializer
);
2096 et_newbuf_cluster(struct et_rxbuf_data
*rbd
, int buf_idx
, int init
)
2098 return et_newbuf(rbd
, buf_idx
, init
, MCLBYTES
);
2102 et_newbuf_hdr(struct et_rxbuf_data
*rbd
, int buf_idx
, int init
)
2104 return et_newbuf(rbd
, buf_idx
, init
, MHLEN
);
2108 et_newbuf(struct et_rxbuf_data
*rbd
, int buf_idx
, int init
, int len0
)
2110 struct et_softc
*sc
= rbd
->rbd_softc
;
2111 struct et_rxbuf
*rb
;
2113 bus_dma_segment_t seg
;
2115 int error
, len
, nseg
;
2117 KASSERT(!rbd
->rbd_jumbo
, ("calling %s with jumbo ring\n", __func__
));
2119 KKASSERT(buf_idx
< ET_RX_NDESC
);
2120 rb
= &rbd
->rbd_buf
[buf_idx
];
2122 m
= m_getl(len0
, init
? MB_WAIT
: MB_DONTWAIT
, MT_DATA
, M_PKTHDR
, &len
);
2127 if_printf(&sc
->arpcom
.ac_if
,
2128 "m_getl failed, size %d\n", len0
);
2134 m
->m_len
= m
->m_pkthdr
.len
= len
;
2137 * Try load RX mbuf into temporary DMA tag
2139 error
= bus_dmamap_load_mbuf_segment(sc
->sc_rxbuf_dtag
,
2140 sc
->sc_rxbuf_tmp_dmap
, m
, &seg
, 1, &nseg
,
2145 if_printf(&sc
->arpcom
.ac_if
, "can't load RX mbuf\n");
2153 bus_dmamap_sync(sc
->sc_rxbuf_dtag
, rb
->rb_dmap
,
2154 BUS_DMASYNC_POSTREAD
);
2155 bus_dmamap_unload(sc
->sc_rxbuf_dtag
, rb
->rb_dmap
);
2158 rb
->rb_paddr
= seg
.ds_addr
;
2161 * Swap RX buf's DMA map with the loaded temporary one
2164 rb
->rb_dmap
= sc
->sc_rxbuf_tmp_dmap
;
2165 sc
->sc_rxbuf_tmp_dmap
= dmap
;
2169 et_setup_rxdesc(rbd
, buf_idx
, rb
->rb_paddr
);
2174 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS
)
2176 struct et_softc
*sc
= arg1
;
2177 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2180 lwkt_serialize_enter(ifp
->if_serializer
);
2182 v
= sc
->sc_rx_intr_npkts
;
2183 error
= sysctl_handle_int(oidp
, &v
, 0, req
);
2184 if (error
|| req
->newptr
== NULL
)
2191 if (sc
->sc_rx_intr_npkts
!= v
) {
2192 if (ifp
->if_flags
& IFF_RUNNING
)
2193 CSR_WRITE_4(sc
, ET_RX_INTR_NPKTS
, v
);
2194 sc
->sc_rx_intr_npkts
= v
;
2197 lwkt_serialize_exit(ifp
->if_serializer
);
2202 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS
)
2204 struct et_softc
*sc
= arg1
;
2205 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2208 lwkt_serialize_enter(ifp
->if_serializer
);
2210 v
= sc
->sc_rx_intr_delay
;
2211 error
= sysctl_handle_int(oidp
, &v
, 0, req
);
2212 if (error
|| req
->newptr
== NULL
)
2219 if (sc
->sc_rx_intr_delay
!= v
) {
2220 if (ifp
->if_flags
& IFF_RUNNING
)
2221 CSR_WRITE_4(sc
, ET_RX_INTR_DELAY
, v
);
2222 sc
->sc_rx_intr_delay
= v
;
2225 lwkt_serialize_exit(ifp
->if_serializer
);
2230 et_setmedia(struct et_softc
*sc
)
2232 struct mii_data
*mii
= device_get_softc(sc
->sc_miibus
);
2233 uint32_t cfg2
, ctrl
;
2235 cfg2
= CSR_READ_4(sc
, ET_MAC_CFG2
);
2236 cfg2
&= ~(ET_MAC_CFG2_MODE_MII
| ET_MAC_CFG2_MODE_GMII
|
2237 ET_MAC_CFG2_FDX
| ET_MAC_CFG2_BIGFRM
);
2238 cfg2
|= ET_MAC_CFG2_LENCHK
| ET_MAC_CFG2_CRC
| ET_MAC_CFG2_PADCRC
|
2239 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN
);
2241 ctrl
= CSR_READ_4(sc
, ET_MAC_CTRL
);
2242 ctrl
&= ~(ET_MAC_CTRL_GHDX
| ET_MAC_CTRL_MODE_MII
);
2244 if (IFM_SUBTYPE(mii
->mii_media_active
) == IFM_1000_T
) {
2245 cfg2
|= ET_MAC_CFG2_MODE_GMII
;
2247 cfg2
|= ET_MAC_CFG2_MODE_MII
;
2248 ctrl
|= ET_MAC_CTRL_MODE_MII
;
2251 if ((mii
->mii_media_active
& IFM_GMASK
) == IFM_FDX
)
2252 cfg2
|= ET_MAC_CFG2_FDX
;
2254 ctrl
|= ET_MAC_CTRL_GHDX
;
2256 CSR_WRITE_4(sc
, ET_MAC_CTRL
, ctrl
);
2257 CSR_WRITE_4(sc
, ET_MAC_CFG2
, cfg2
);
2261 et_jumbo_mem_alloc(device_t dev
)
2263 struct et_softc
*sc
= device_get_softc(dev
);
2264 struct et_jumbo_data
*jd
= &sc
->sc_jumbo_data
;
2269 jd
->jd_buf
= bus_dmamem_coherent_any(sc
->sc_dtag
,
2270 ET_JUMBO_ALIGN
, ET_JUMBO_MEM_SIZE
, BUS_DMA_WAITOK
,
2271 &jd
->jd_dtag
, &jd
->jd_dmap
, &paddr
);
2272 if (jd
->jd_buf
== NULL
) {
2273 device_printf(dev
, "can't create jumbo DMA stuffs\n");
2277 jd
->jd_slots
= kmalloc(sizeof(*jd
->jd_slots
) * ET_JSLOTS
, M_DEVBUF
,
2279 lwkt_serialize_init(&jd
->jd_serializer
);
2280 SLIST_INIT(&jd
->jd_free_slots
);
2283 for (i
= 0; i
< ET_JSLOTS
; ++i
) {
2284 struct et_jslot
*jslot
= &jd
->jd_slots
[i
];
2286 jslot
->jslot_data
= jd
;
2287 jslot
->jslot_buf
= buf
;
2288 jslot
->jslot_paddr
= paddr
;
2289 jslot
->jslot_inuse
= 0;
2290 jslot
->jslot_index
= i
;
2291 SLIST_INSERT_HEAD(&jd
->jd_free_slots
, jslot
, jslot_link
);
2300 et_jumbo_mem_free(device_t dev
)
2302 struct et_softc
*sc
= device_get_softc(dev
);
2303 struct et_jumbo_data
*jd
= &sc
->sc_jumbo_data
;
2305 KKASSERT(sc
->sc_flags
& ET_FLAG_JUMBO
);
2307 kfree(jd
->jd_slots
, M_DEVBUF
);
2308 et_dma_mem_destroy(jd
->jd_dtag
, jd
->jd_buf
, jd
->jd_dmap
);
2311 static struct et_jslot
*
2312 et_jalloc(struct et_jumbo_data
*jd
)
2314 struct et_jslot
*jslot
;
2316 lwkt_serialize_enter(&jd
->jd_serializer
);
2318 jslot
= SLIST_FIRST(&jd
->jd_free_slots
);
2320 SLIST_REMOVE_HEAD(&jd
->jd_free_slots
, jslot_link
);
2321 jslot
->jslot_inuse
= 1;
2324 lwkt_serialize_exit(&jd
->jd_serializer
);
2329 et_jfree(void *xjslot
)
2331 struct et_jslot
*jslot
= xjslot
;
2332 struct et_jumbo_data
*jd
= jslot
->jslot_data
;
2334 if (&jd
->jd_slots
[jslot
->jslot_index
] != jslot
) {
2335 panic("%s wrong jslot!?\n", __func__
);
2336 } else if (jslot
->jslot_inuse
== 0) {
2337 panic("%s jslot already freed\n", __func__
);
2339 lwkt_serialize_enter(&jd
->jd_serializer
);
2341 atomic_subtract_int(&jslot
->jslot_inuse
, 1);
2342 if (jslot
->jslot_inuse
== 0) {
2343 SLIST_INSERT_HEAD(&jd
->jd_free_slots
, jslot
,
2347 lwkt_serialize_exit(&jd
->jd_serializer
);
2352 et_jref(void *xjslot
)
2354 struct et_jslot
*jslot
= xjslot
;
2355 struct et_jumbo_data
*jd
= jslot
->jslot_data
;
2357 if (&jd
->jd_slots
[jslot
->jslot_index
] != jslot
)
2358 panic("%s wrong jslot!?\n", __func__
);
2359 else if (jslot
->jslot_inuse
== 0)
2360 panic("%s jslot already freed\n", __func__
);
2362 atomic_add_int(&jslot
->jslot_inuse
, 1);
2366 et_newbuf_jumbo(struct et_rxbuf_data
*rbd
, int buf_idx
, int init
)
2368 struct et_softc
*sc
= rbd
->rbd_softc
;
2369 struct et_rxbuf
*rb
;
2371 struct et_jslot
*jslot
;
2374 KASSERT(rbd
->rbd_jumbo
, ("calling %s with non-jumbo ring\n", __func__
));
2376 KKASSERT(buf_idx
< ET_RX_NDESC
);
2377 rb
= &rbd
->rbd_buf
[buf_idx
];
2381 MGETHDR(m
, init
? MB_WAIT
: MB_DONTWAIT
, MT_DATA
);
2384 if_printf(&sc
->arpcom
.ac_if
, "MGETHDR failed\n");
2391 jslot
= et_jalloc(&sc
->sc_jumbo_data
);
2392 if (jslot
== NULL
) {
2396 if_printf(&sc
->arpcom
.ac_if
,
2397 "jslot allocation failed\n");
2404 m
->m_ext
.ext_arg
= jslot
;
2405 m
->m_ext
.ext_buf
= jslot
->jslot_buf
;
2406 m
->m_ext
.ext_free
= et_jfree
;
2407 m
->m_ext
.ext_ref
= et_jref
;
2408 m
->m_ext
.ext_size
= ET_JUMBO_FRAMELEN
;
2409 m
->m_flags
|= M_EXT
;
2410 m
->m_data
= m
->m_ext
.ext_buf
;
2411 m
->m_len
= m
->m_pkthdr
.len
= m
->m_ext
.ext_size
;
2414 rb
->rb_paddr
= jslot
->jslot_paddr
;
2418 et_setup_rxdesc(rbd
, buf_idx
, rb
->rb_paddr
);
2423 et_setup_rxdesc(struct et_rxbuf_data
*rbd
, int buf_idx
, bus_addr_t paddr
)
2425 struct et_rxdesc_ring
*rx_ring
= rbd
->rbd_ring
;
2426 struct et_rxdesc
*desc
;
2428 KKASSERT(buf_idx
< ET_RX_NDESC
);
2429 desc
= &rx_ring
->rr_desc
[buf_idx
];
2431 desc
->rd_addr_hi
= ET_ADDR_HI(paddr
);
2432 desc
->rd_addr_lo
= ET_ADDR_LO(paddr
);
2433 desc
->rd_ctrl
= __SHIFTIN(buf_idx
, ET_RDCTRL_BUFIDX
);