1 /* $OpenBSD: if_txp.c,v 1.48 2001/06/27 06:34:50 kjc Exp $ */
2 /* $FreeBSD: src/sys/dev/txp/if_txp.c,v 1.4.2.4 2001/12/14 19:50:43 jlemon Exp $ */
3 /* $DragonFly: src/sys/dev/netif/txp/if_txp.c,v 1.49 2008/05/16 13:19:12 sephe Exp $ */
7 * Jason L. Wright <jason@thought.net>, Theo de Raadt, and
8 * Aaron Campbell <aaron@monkey.org>. All rights reserved.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Jason L. Wright,
21 * Theo de Raadt and Aaron Campbell.
22 * 4. Neither the name of the author nor the names of any co-contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
27 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
36 * THE POSSIBILITY OF SUCH DAMAGE.
40 * Driver for 3c990 (Typhoon) Ethernet ASIC
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/sockio.h>
47 #include <sys/malloc.h>
48 #include <sys/kernel.h>
49 #include <sys/socket.h>
50 #include <sys/serialize.h>
53 #include <sys/thread2.h>
54 #include <sys/interrupt.h>
57 #include <net/ifq_var.h>
58 #include <net/if_arp.h>
59 #include <net/ethernet.h>
60 #include <net/if_dl.h>
61 #include <net/if_types.h>
62 #include <net/vlan/if_vlan_var.h>
63 #include <net/vlan/if_vlan_ether.h>
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #include <netinet/if_ether.h>
70 #include <sys/in_cksum.h>
72 #include <net/if_media.h>
76 #include <vm/vm.h> /* for vtophys */
77 #include <vm/pmap.h> /* for vtophys */
79 #include "../mii_layer/mii.h"
80 #include "../mii_layer/miivar.h"
82 #include <bus/pci/pcidevs.h>
83 #include <bus/pci/pcireg.h>
84 #include <bus/pci/pcivar.h>
86 #define TXP_USEIOSPACE
87 #define __STRICT_ALIGNMENT
89 #include "if_txpreg.h"
93 * Various supported device vendors/types and their names.
95 static struct txp_type txp_devs
[] = {
96 { PCI_VENDOR_3COM
, PCI_PRODUCT_3COM_3CR990TX95
,
97 "3Com 3cR990-TX-95 Etherlink with 3XP Processor" },
98 { PCI_VENDOR_3COM
, PCI_PRODUCT_3COM_3CR990TX97
,
99 "3Com 3cR990-TX-97 Etherlink with 3XP Processor" },
100 { PCI_VENDOR_3COM
, PCI_PRODUCT_3COM_3C990B
,
101 "3Com 3cR990B-TXM Etherlink with 3XP Processor" },
102 { PCI_VENDOR_3COM
, PCI_PRODUCT_3COM_3CR990SVR95
,
103 "3Com 3cR990-SRV-95 Etherlink Server with 3XP Processor" },
104 { PCI_VENDOR_3COM
, PCI_PRODUCT_3COM_3CR990SVR97
,
105 "3Com 3cR990-SRV-97 Etherlink Server with 3XP Processor" },
106 { PCI_VENDOR_3COM
, PCI_PRODUCT_3COM_3C990BSVR
,
107 "3Com 3cR990B-SRV Etherlink Server with 3XP Processor" },
111 static int txp_probe (device_t
);
112 static int txp_attach (device_t
);
113 static int txp_detach (device_t
);
114 static void txp_intr (void *);
115 static void txp_tick (void *);
116 static int txp_shutdown (device_t
);
117 static int txp_ioctl (struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
118 static void txp_start (struct ifnet
*);
119 static void txp_stop (struct txp_softc
*);
120 static void txp_init (void *);
121 static void txp_watchdog (struct ifnet
*);
123 static void txp_release_resources (device_t
);
124 static int txp_chip_init (struct txp_softc
*);
125 static int txp_reset_adapter (struct txp_softc
*);
126 static int txp_download_fw (struct txp_softc
*);
127 static int txp_download_fw_wait (struct txp_softc
*);
128 static int txp_download_fw_section (struct txp_softc
*,
129 struct txp_fw_section_header
*, int);
130 static int txp_alloc_rings (struct txp_softc
*);
131 static int txp_rxring_fill (struct txp_softc
*);
132 static void txp_rxring_empty (struct txp_softc
*);
133 static void txp_set_filter (struct txp_softc
*);
135 static int txp_cmd_desc_numfree (struct txp_softc
*);
136 static int txp_command (struct txp_softc
*, u_int16_t
, u_int16_t
, u_int32_t
,
137 u_int32_t
, u_int16_t
*, u_int32_t
*, u_int32_t
*, int);
138 static int txp_command2 (struct txp_softc
*, u_int16_t
, u_int16_t
,
139 u_int32_t
, u_int32_t
, struct txp_ext_desc
*, u_int8_t
,
140 struct txp_rsp_desc
**, int);
141 static int txp_response (struct txp_softc
*, u_int32_t
, u_int16_t
, u_int16_t
,
142 struct txp_rsp_desc
**);
143 static void txp_rsp_fixup (struct txp_softc
*, struct txp_rsp_desc
*,
144 struct txp_rsp_desc
*);
145 static void txp_capabilities (struct txp_softc
*);
147 static void txp_ifmedia_sts (struct ifnet
*, struct ifmediareq
*);
148 static int txp_ifmedia_upd (struct ifnet
*);
150 static void txp_show_descriptor (void *);
152 static void txp_tx_reclaim (struct txp_softc
*, struct txp_tx_ring
*);
153 static void txp_rxbuf_reclaim (struct txp_softc
*);
154 static void txp_rx_reclaim (struct txp_softc
*, struct txp_rx_ring
*);
156 #ifdef TXP_USEIOSPACE
157 #define TXP_RES SYS_RES_IOPORT
158 #define TXP_RID TXP_PCI_LOIO
160 #define TXP_RES SYS_RES_MEMORY
161 #define TXP_RID TXP_PCI_LOMEM
164 static device_method_t txp_methods
[] = {
165 /* Device interface */
166 DEVMETHOD(device_probe
, txp_probe
),
167 DEVMETHOD(device_attach
, txp_attach
),
168 DEVMETHOD(device_detach
, txp_detach
),
169 DEVMETHOD(device_shutdown
, txp_shutdown
),
173 static driver_t txp_driver
= {
176 sizeof(struct txp_softc
)
179 static devclass_t txp_devclass
;
181 DECLARE_DUMMY_MODULE(if_txp
);
182 DRIVER_MODULE(if_txp
, pci
, txp_driver
, txp_devclass
, 0, 0);
185 txp_probe(device_t dev
)
190 vid
= pci_get_vendor(dev
);
191 did
= pci_get_device(dev
);
193 for (t
= txp_devs
; t
->txp_name
!= NULL
; ++t
) {
194 if ((vid
== t
->txp_vid
) && (did
== t
->txp_did
)) {
195 device_set_desc(dev
, t
->txp_name
);
204 txp_attach(device_t dev
)
206 struct txp_softc
*sc
;
210 uint8_t enaddr
[ETHER_ADDR_LEN
];
213 sc
= device_get_softc(dev
);
214 callout_init(&sc
->txp_stat_timer
);
216 ifp
= &sc
->sc_arpcom
.ac_if
;
217 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
219 pci_enable_busmaster(dev
);
222 sc
->sc_res
= bus_alloc_resource_any(dev
, TXP_RES
, &rid
, RF_ACTIVE
);
224 if (sc
->sc_res
== NULL
) {
225 device_printf(dev
, "couldn't map ports/memory\n");
229 sc
->sc_bt
= rman_get_bustag(sc
->sc_res
);
230 sc
->sc_bh
= rman_get_bushandle(sc
->sc_res
);
232 /* Allocate interrupt */
234 sc
->sc_irq
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
, &rid
,
235 RF_SHAREABLE
| RF_ACTIVE
);
237 if (sc
->sc_irq
== NULL
) {
238 device_printf(dev
, "couldn't map interrupt\n");
243 if (txp_chip_init(sc
)) {
248 sc
->sc_fwbuf
= contigmalloc(32768, M_DEVBUF
,
249 M_WAITOK
, 0, 0xffffffff, PAGE_SIZE
, 0);
250 error
= txp_download_fw(sc
);
251 contigfree(sc
->sc_fwbuf
, 32768, M_DEVBUF
);
257 sc
->sc_ldata
= contigmalloc(sizeof(struct txp_ldata
), M_DEVBUF
,
258 M_WAITOK
| M_ZERO
, 0, 0xffffffff, PAGE_SIZE
, 0);
260 if (txp_alloc_rings(sc
)) {
265 if (txp_command(sc
, TXP_CMD_MAX_PKT_SIZE_WRITE
, TXP_MAX_PKTLEN
, 0, 0,
266 NULL
, NULL
, NULL
, 1)) {
271 if (txp_command(sc
, TXP_CMD_STATION_ADDRESS_READ
, 0, 0, 0,
272 &p1
, &p2
, NULL
, 1)) {
279 enaddr
[0] = ((uint8_t *)&p1
)[1];
280 enaddr
[1] = ((uint8_t *)&p1
)[0];
281 enaddr
[2] = ((uint8_t *)&p2
)[3];
282 enaddr
[3] = ((uint8_t *)&p2
)[2];
283 enaddr
[4] = ((uint8_t *)&p2
)[1];
284 enaddr
[5] = ((uint8_t *)&p2
)[0];
286 ifmedia_init(&sc
->sc_ifmedia
, 0, txp_ifmedia_upd
, txp_ifmedia_sts
);
287 ifmedia_add(&sc
->sc_ifmedia
, IFM_ETHER
|IFM_10_T
, 0, NULL
);
288 ifmedia_add(&sc
->sc_ifmedia
, IFM_ETHER
|IFM_10_T
|IFM_HDX
, 0, NULL
);
289 ifmedia_add(&sc
->sc_ifmedia
, IFM_ETHER
|IFM_10_T
|IFM_FDX
, 0, NULL
);
290 ifmedia_add(&sc
->sc_ifmedia
, IFM_ETHER
|IFM_100_TX
, 0, NULL
);
291 ifmedia_add(&sc
->sc_ifmedia
, IFM_ETHER
|IFM_100_TX
|IFM_HDX
, 0, NULL
);
292 ifmedia_add(&sc
->sc_ifmedia
, IFM_ETHER
|IFM_100_TX
|IFM_FDX
, 0, NULL
);
293 ifmedia_add(&sc
->sc_ifmedia
, IFM_ETHER
|IFM_AUTO
, 0, NULL
);
295 sc
->sc_xcvr
= TXP_XCVR_AUTO
;
296 txp_command(sc
, TXP_CMD_XCVR_SELECT
, TXP_XCVR_AUTO
, 0, 0,
297 NULL
, NULL
, NULL
, 0);
298 ifmedia_set(&sc
->sc_ifmedia
, IFM_ETHER
|IFM_AUTO
);
301 ifp
->if_mtu
= ETHERMTU
;
302 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
303 ifp
->if_ioctl
= txp_ioctl
;
304 ifp
->if_start
= txp_start
;
305 ifp
->if_watchdog
= txp_watchdog
;
306 ifp
->if_init
= txp_init
;
307 ifp
->if_baudrate
= 100000000;
308 ifq_set_maxlen(&ifp
->if_snd
, TX_ENTRIES
);
309 ifq_set_ready(&ifp
->if_snd
);
310 ifp
->if_hwassist
= 0;
311 txp_capabilities(sc
);
313 ether_ifattach(ifp
, enaddr
, NULL
);
315 error
= bus_setup_intr(dev
, sc
->sc_irq
, INTR_NETSAFE
,
316 txp_intr
, sc
, &sc
->sc_intrhand
,
319 device_printf(dev
, "couldn't set up irq\n");
324 ifp
->if_cpuid
= ithread_cpuid(rman_get_start(sc
->sc_irq
));
325 KKASSERT(ifp
->if_cpuid
>= 0 && ifp
->if_cpuid
< ncpus
);
330 txp_release_resources(dev
);
335 txp_detach(device_t dev
)
337 struct txp_softc
*sc
= device_get_softc(dev
);
338 struct ifnet
*ifp
= &sc
->sc_arpcom
.ac_if
;
341 lwkt_serialize_enter(ifp
->if_serializer
);
345 bus_teardown_intr(dev
, sc
->sc_irq
, sc
->sc_intrhand
);
347 lwkt_serialize_exit(ifp
->if_serializer
);
349 ifmedia_removeall(&sc
->sc_ifmedia
);
352 for (i
= 0; i
< RXBUF_ENTRIES
; i
++)
353 kfree(sc
->sc_rxbufs
[i
].rb_sd
, M_DEVBUF
);
355 txp_release_resources(dev
);
361 txp_release_resources(device_t dev
)
363 struct txp_softc
*sc
;
365 sc
= device_get_softc(dev
);
367 if (sc
->sc_irq
!= NULL
)
368 bus_release_resource(dev
, SYS_RES_IRQ
, 0, sc
->sc_irq
);
370 if (sc
->sc_res
!= NULL
)
371 bus_release_resource(dev
, TXP_RES
, TXP_RID
, sc
->sc_res
);
373 if (sc
->sc_ldata
!= NULL
)
374 contigfree(sc
->sc_ldata
, sizeof(struct txp_ldata
), M_DEVBUF
);
380 txp_chip_init(struct txp_softc
*sc
)
382 /* disable interrupts */
383 WRITE_REG(sc
, TXP_IER
, 0);
384 WRITE_REG(sc
, TXP_IMR
,
385 TXP_INT_SELF
| TXP_INT_PCI_TABORT
| TXP_INT_PCI_MABORT
|
386 TXP_INT_DMA3
| TXP_INT_DMA2
| TXP_INT_DMA1
| TXP_INT_DMA0
|
389 /* ack all interrupts */
390 WRITE_REG(sc
, TXP_ISR
, TXP_INT_RESERVED
| TXP_INT_LATCH
|
391 TXP_INT_A2H_7
| TXP_INT_A2H_6
| TXP_INT_A2H_5
| TXP_INT_A2H_4
|
392 TXP_INT_SELF
| TXP_INT_PCI_TABORT
| TXP_INT_PCI_MABORT
|
393 TXP_INT_DMA3
| TXP_INT_DMA2
| TXP_INT_DMA1
| TXP_INT_DMA0
|
394 TXP_INT_A2H_3
| TXP_INT_A2H_2
| TXP_INT_A2H_1
| TXP_INT_A2H_0
);
396 if (txp_reset_adapter(sc
))
399 /* disable interrupts */
400 WRITE_REG(sc
, TXP_IER
, 0);
401 WRITE_REG(sc
, TXP_IMR
,
402 TXP_INT_SELF
| TXP_INT_PCI_TABORT
| TXP_INT_PCI_MABORT
|
403 TXP_INT_DMA3
| TXP_INT_DMA2
| TXP_INT_DMA1
| TXP_INT_DMA0
|
406 /* ack all interrupts */
407 WRITE_REG(sc
, TXP_ISR
, TXP_INT_RESERVED
| TXP_INT_LATCH
|
408 TXP_INT_A2H_7
| TXP_INT_A2H_6
| TXP_INT_A2H_5
| TXP_INT_A2H_4
|
409 TXP_INT_SELF
| TXP_INT_PCI_TABORT
| TXP_INT_PCI_MABORT
|
410 TXP_INT_DMA3
| TXP_INT_DMA2
| TXP_INT_DMA1
| TXP_INT_DMA0
|
411 TXP_INT_A2H_3
| TXP_INT_A2H_2
| TXP_INT_A2H_1
| TXP_INT_A2H_0
);
417 txp_reset_adapter(struct txp_softc
*sc
)
422 WRITE_REG(sc
, TXP_SRR
, TXP_SRR_ALL
);
424 WRITE_REG(sc
, TXP_SRR
, 0);
426 /* Should wait max 6 seconds */
427 for (i
= 0; i
< 6000; i
++) {
428 r
= READ_REG(sc
, TXP_A2H_0
);
429 if (r
== STAT_WAITING_FOR_HOST_REQUEST
)
434 if (r
!= STAT_WAITING_FOR_HOST_REQUEST
) {
435 if_printf(&sc
->sc_arpcom
.ac_if
, "reset hung\n");
443 txp_download_fw(struct txp_softc
*sc
)
445 struct txp_fw_file_header
*fileheader
;
446 struct txp_fw_section_header
*secthead
;
448 u_int32_t r
, i
, ier
, imr
;
450 ier
= READ_REG(sc
, TXP_IER
);
451 WRITE_REG(sc
, TXP_IER
, ier
| TXP_INT_A2H_0
);
453 imr
= READ_REG(sc
, TXP_IMR
);
454 WRITE_REG(sc
, TXP_IMR
, imr
| TXP_INT_A2H_0
);
456 for (i
= 0; i
< 10000; i
++) {
457 r
= READ_REG(sc
, TXP_A2H_0
);
458 if (r
== STAT_WAITING_FOR_HOST_REQUEST
)
462 if (r
!= STAT_WAITING_FOR_HOST_REQUEST
) {
463 if_printf(&sc
->sc_arpcom
.ac_if
,
464 "not waiting for host request\n");
469 WRITE_REG(sc
, TXP_ISR
, TXP_INT_A2H_0
);
471 fileheader
= (struct txp_fw_file_header
*)tc990image
;
472 if (bcmp("TYPHOON", fileheader
->magicid
, sizeof(fileheader
->magicid
))) {
473 if_printf(&sc
->sc_arpcom
.ac_if
, "fw invalid magic\n");
477 /* Tell boot firmware to get ready for image */
478 WRITE_REG(sc
, TXP_H2A_1
, fileheader
->addr
);
479 WRITE_REG(sc
, TXP_H2A_0
, TXP_BOOTCMD_RUNTIME_IMAGE
);
481 if (txp_download_fw_wait(sc
)) {
482 if_printf(&sc
->sc_arpcom
.ac_if
, "fw wait failed, initial\n");
486 secthead
= (struct txp_fw_section_header
*)(((u_int8_t
*)tc990image
) +
487 sizeof(struct txp_fw_file_header
));
489 for (sect
= 0; sect
< fileheader
->nsections
; sect
++) {
490 if (txp_download_fw_section(sc
, secthead
, sect
))
492 secthead
= (struct txp_fw_section_header
*)
493 (((u_int8_t
*)secthead
) + secthead
->nbytes
+
497 WRITE_REG(sc
, TXP_H2A_0
, TXP_BOOTCMD_DOWNLOAD_COMPLETE
);
499 for (i
= 0; i
< 10000; i
++) {
500 r
= READ_REG(sc
, TXP_A2H_0
);
501 if (r
== STAT_WAITING_FOR_BOOT
)
505 if (r
!= STAT_WAITING_FOR_BOOT
) {
506 if_printf(&sc
->sc_arpcom
.ac_if
, "not waiting for boot\n");
510 WRITE_REG(sc
, TXP_IER
, ier
);
511 WRITE_REG(sc
, TXP_IMR
, imr
);
517 txp_download_fw_wait(struct txp_softc
*sc
)
521 for (i
= 0; i
< 10000; i
++) {
522 r
= READ_REG(sc
, TXP_ISR
);
523 if (r
& TXP_INT_A2H_0
)
528 if (!(r
& TXP_INT_A2H_0
)) {
529 if_printf(&sc
->sc_arpcom
.ac_if
, "fw wait failed comm0\n");
533 WRITE_REG(sc
, TXP_ISR
, TXP_INT_A2H_0
);
535 r
= READ_REG(sc
, TXP_A2H_0
);
536 if (r
!= STAT_WAITING_FOR_SEGMENT
) {
537 if_printf(&sc
->sc_arpcom
.ac_if
, "fw not waiting for segment\n");
544 txp_download_fw_section(struct txp_softc
*sc
,
545 struct txp_fw_section_header
*sect
, int sectnum
)
552 /* Skip zero length sections */
553 if (sect
->nbytes
== 0)
556 /* Make sure we aren't past the end of the image */
557 rseg
= ((u_int8_t
*)sect
) - ((u_int8_t
*)tc990image
);
558 if (rseg
>= sizeof(tc990image
)) {
559 if_printf(&sc
->sc_arpcom
.ac_if
, "fw invalid section address, "
560 "section %d\n", sectnum
);
564 /* Make sure this section doesn't go past the end */
565 rseg
+= sect
->nbytes
;
566 if (rseg
>= sizeof(tc990image
)) {
567 if_printf(&sc
->sc_arpcom
.ac_if
, "fw truncated section %d\n",
572 bcopy(((u_int8_t
*)sect
) + sizeof(*sect
), sc
->sc_fwbuf
, sect
->nbytes
);
573 dma
= vtophys(sc
->sc_fwbuf
);
576 * dummy up mbuf and verify section checksum
579 m
.m_next
= m
.m_nextpkt
= NULL
;
580 m
.m_len
= sect
->nbytes
;
581 m
.m_data
= sc
->sc_fwbuf
;
583 csum
= in_cksum(&m
, sect
->nbytes
);
584 if (csum
!= sect
->cksum
) {
585 if_printf(&sc
->sc_arpcom
.ac_if
, "fw section %d, bad "
586 "cksum (expected 0x%x got 0x%x)\n",
587 sectnum
, sect
->cksum
, csum
);
592 WRITE_REG(sc
, TXP_H2A_1
, sect
->nbytes
);
593 WRITE_REG(sc
, TXP_H2A_2
, sect
->cksum
);
594 WRITE_REG(sc
, TXP_H2A_3
, sect
->addr
);
595 WRITE_REG(sc
, TXP_H2A_4
, 0);
596 WRITE_REG(sc
, TXP_H2A_5
, dma
& 0xffffffff);
597 WRITE_REG(sc
, TXP_H2A_0
, TXP_BOOTCMD_SEGMENT_AVAILABLE
);
599 if (txp_download_fw_wait(sc
)) {
600 if_printf(&sc
->sc_arpcom
.ac_if
, "fw wait failed, "
601 "section %d\n", sectnum
);
612 struct txp_softc
*sc
= vsc
;
613 struct txp_hostvar
*hv
= sc
->sc_hostvar
;
616 /* mask all interrupts */
617 WRITE_REG(sc
, TXP_IMR
, TXP_INT_RESERVED
| TXP_INT_SELF
|
618 TXP_INT_A2H_7
| TXP_INT_A2H_6
| TXP_INT_A2H_5
| TXP_INT_A2H_4
|
619 TXP_INT_A2H_2
| TXP_INT_A2H_1
| TXP_INT_A2H_0
|
620 TXP_INT_DMA3
| TXP_INT_DMA2
| TXP_INT_DMA1
| TXP_INT_DMA0
|
621 TXP_INT_PCI_TABORT
| TXP_INT_PCI_MABORT
| TXP_INT_LATCH
);
623 isr
= READ_REG(sc
, TXP_ISR
);
625 WRITE_REG(sc
, TXP_ISR
, isr
);
627 if ((*sc
->sc_rxhir
.r_roff
) != (*sc
->sc_rxhir
.r_woff
))
628 txp_rx_reclaim(sc
, &sc
->sc_rxhir
);
629 if ((*sc
->sc_rxlor
.r_roff
) != (*sc
->sc_rxlor
.r_woff
))
630 txp_rx_reclaim(sc
, &sc
->sc_rxlor
);
632 if (hv
->hv_rx_buf_write_idx
== hv
->hv_rx_buf_read_idx
)
633 txp_rxbuf_reclaim(sc
);
635 if (sc
->sc_txhir
.r_cnt
&& (sc
->sc_txhir
.r_cons
!=
636 TXP_OFFSET2IDX(*(sc
->sc_txhir
.r_off
))))
637 txp_tx_reclaim(sc
, &sc
->sc_txhir
);
639 if (sc
->sc_txlor
.r_cnt
&& (sc
->sc_txlor
.r_cons
!=
640 TXP_OFFSET2IDX(*(sc
->sc_txlor
.r_off
))))
641 txp_tx_reclaim(sc
, &sc
->sc_txlor
);
643 isr
= READ_REG(sc
, TXP_ISR
);
646 /* unmask all interrupts */
647 WRITE_REG(sc
, TXP_IMR
, TXP_INT_A2H_3
);
649 if_devstart(&sc
->sc_arpcom
.ac_if
);
653 txp_rx_reclaim(struct txp_softc
*sc
, struct txp_rx_ring
*r
)
655 struct ifnet
*ifp
= &sc
->sc_arpcom
.ac_if
;
656 struct txp_rx_desc
*rxd
;
658 struct txp_swdesc
*sd
= NULL
;
659 u_int32_t roff
, woff
;
663 rxd
= r
->r_desc
+ (roff
/ sizeof(struct txp_rx_desc
));
665 while (roff
!= woff
) {
667 if (rxd
->rx_flags
& RX_FLAGS_ERROR
) {
668 if_printf(ifp
, "error 0x%x\n", rxd
->rx_stat
);
673 /* retrieve stashed pointer */
679 m
->m_pkthdr
.len
= m
->m_len
= rxd
->rx_len
;
681 #ifdef __STRICT_ALIGNMENT
684 * XXX Nice chip, except it won't accept "off by 2"
685 * buffers, so we're force to copy. Supposedly
686 * this will be fixed in a newer firmware rev
687 * and this will be temporary.
691 MGETHDR(mnew
, MB_DONTWAIT
, MT_DATA
);
696 if (m
->m_len
> (MHLEN
- 2)) {
697 MCLGET(mnew
, MB_DONTWAIT
);
698 if (!(mnew
->m_flags
& M_EXT
)) {
704 mnew
->m_pkthdr
.rcvif
= ifp
;
706 mnew
->m_pkthdr
.len
= mnew
->m_len
= m
->m_len
;
707 m_copydata(m
, 0, m
->m_pkthdr
.len
, mtod(mnew
, caddr_t
));
713 if (rxd
->rx_stat
& RX_STAT_IPCKSUMBAD
)
714 m
->m_pkthdr
.csum_flags
|= CSUM_IP_CHECKED
;
715 else if (rxd
->rx_stat
& RX_STAT_IPCKSUMGOOD
)
716 m
->m_pkthdr
.csum_flags
|=
717 CSUM_IP_CHECKED
|CSUM_IP_VALID
;
719 if ((rxd
->rx_stat
& RX_STAT_TCPCKSUMGOOD
) ||
720 (rxd
->rx_stat
& RX_STAT_UDPCKSUMGOOD
)) {
721 m
->m_pkthdr
.csum_flags
|=
722 CSUM_DATA_VALID
|CSUM_PSEUDO_HDR
|
723 CSUM_FRAG_NOT_CHECKED
;
724 m
->m_pkthdr
.csum_data
= 0xffff;
727 if (rxd
->rx_stat
& RX_STAT_VLAN
) {
728 m
->m_flags
|= M_VLANTAG
;
729 m
->m_pkthdr
.ether_vlantag
= htons(rxd
->rx_vlan
>> 16);
731 ifp
->if_input(ifp
, m
);
735 roff
+= sizeof(struct txp_rx_desc
);
736 if (roff
== (RX_ENTRIES
* sizeof(struct txp_rx_desc
))) {
750 txp_rxbuf_reclaim(struct txp_softc
*sc
)
752 struct ifnet
*ifp
= &sc
->sc_arpcom
.ac_if
;
753 struct txp_hostvar
*hv
= sc
->sc_hostvar
;
754 struct txp_rxbuf_desc
*rbd
;
755 struct txp_swdesc
*sd
;
758 if (!(ifp
->if_flags
& IFF_RUNNING
))
761 i
= sc
->sc_rxbufprod
;
762 rbd
= sc
->sc_rxbufs
+ i
;
766 if (sd
->sd_mbuf
!= NULL
)
769 MGETHDR(sd
->sd_mbuf
, MB_DONTWAIT
, MT_DATA
);
770 if (sd
->sd_mbuf
== NULL
)
773 MCLGET(sd
->sd_mbuf
, MB_DONTWAIT
);
774 if ((sd
->sd_mbuf
->m_flags
& M_EXT
) == 0)
776 sd
->sd_mbuf
->m_pkthdr
.rcvif
= ifp
;
777 sd
->sd_mbuf
->m_pkthdr
.len
= sd
->sd_mbuf
->m_len
= MCLBYTES
;
779 rbd
->rb_paddrlo
= vtophys(mtod(sd
->sd_mbuf
, vm_offset_t
))
783 hv
->hv_rx_buf_write_idx
= TXP_IDX2OFFSET(i
);
785 if (++i
== RXBUF_ENTRIES
) {
792 sc
->sc_rxbufprod
= i
;
797 m_freem(sd
->sd_mbuf
);
803 * Reclaim mbufs and entries from a transmit ring.
806 txp_tx_reclaim(struct txp_softc
*sc
, struct txp_tx_ring
*r
)
808 struct ifnet
*ifp
= &sc
->sc_arpcom
.ac_if
;
809 u_int32_t idx
= TXP_OFFSET2IDX(*(r
->r_off
));
810 u_int32_t cons
= r
->r_cons
, cnt
= r
->r_cnt
;
811 struct txp_tx_desc
*txd
= r
->r_desc
+ cons
;
812 struct txp_swdesc
*sd
= sc
->sc_txd
+ cons
;
815 while (cons
!= idx
) {
819 if ((txd
->tx_flags
& TX_FLAGS_TYPE_M
) ==
820 TX_FLAGS_TYPE_DATA
) {
829 ifp
->if_flags
&= ~IFF_OACTIVE
;
831 if (++cons
== TX_ENTRIES
) {
850 txp_shutdown(device_t dev
)
852 struct txp_softc
*sc
;
855 sc
= device_get_softc(dev
);
856 ifp
= &sc
->sc_arpcom
.ac_if
;
857 lwkt_serialize_enter(ifp
->if_serializer
);
859 /* mask all interrupts */
860 WRITE_REG(sc
, TXP_IMR
,
861 TXP_INT_SELF
| TXP_INT_PCI_TABORT
| TXP_INT_PCI_MABORT
|
862 TXP_INT_DMA3
| TXP_INT_DMA2
| TXP_INT_DMA1
| TXP_INT_DMA0
|
865 txp_command(sc
, TXP_CMD_TX_DISABLE
, 0, 0, 0, NULL
, NULL
, NULL
, 0);
866 txp_command(sc
, TXP_CMD_RX_DISABLE
, 0, 0, 0, NULL
, NULL
, NULL
, 0);
867 txp_command(sc
, TXP_CMD_HALT
, 0, 0, 0, NULL
, NULL
, NULL
, 0);
869 lwkt_serialize_exit(ifp
->if_serializer
);
874 txp_alloc_rings(struct txp_softc
*sc
)
876 struct txp_boot_record
*boot
;
877 struct txp_ldata
*ld
;
882 boot
= &ld
->txp_boot
;
888 bzero(&ld
->txp_hostvar
, sizeof(struct txp_hostvar
));
889 boot
->br_hostvar_lo
= vtophys(&ld
->txp_hostvar
);
890 boot
->br_hostvar_hi
= 0;
891 sc
->sc_hostvar
= (struct txp_hostvar
*)&ld
->txp_hostvar
;
893 /* hi priority tx ring */
894 boot
->br_txhipri_lo
= vtophys(&ld
->txp_txhiring
);
895 boot
->br_txhipri_hi
= 0;
896 boot
->br_txhipri_siz
= TX_ENTRIES
* sizeof(struct txp_tx_desc
);
897 sc
->sc_txhir
.r_reg
= TXP_H2A_1
;
898 sc
->sc_txhir
.r_desc
= (struct txp_tx_desc
*)&ld
->txp_txhiring
;
899 sc
->sc_txhir
.r_cons
= sc
->sc_txhir
.r_prod
= sc
->sc_txhir
.r_cnt
= 0;
900 sc
->sc_txhir
.r_off
= &sc
->sc_hostvar
->hv_tx_hi_desc_read_idx
;
902 /* lo priority tx ring */
903 boot
->br_txlopri_lo
= vtophys(&ld
->txp_txloring
);
904 boot
->br_txlopri_hi
= 0;
905 boot
->br_txlopri_siz
= TX_ENTRIES
* sizeof(struct txp_tx_desc
);
906 sc
->sc_txlor
.r_reg
= TXP_H2A_3
;
907 sc
->sc_txlor
.r_desc
= (struct txp_tx_desc
*)&ld
->txp_txloring
;
908 sc
->sc_txlor
.r_cons
= sc
->sc_txlor
.r_prod
= sc
->sc_txlor
.r_cnt
= 0;
909 sc
->sc_txlor
.r_off
= &sc
->sc_hostvar
->hv_tx_lo_desc_read_idx
;
911 /* high priority rx ring */
912 boot
->br_rxhipri_lo
= vtophys(&ld
->txp_rxhiring
);
913 boot
->br_rxhipri_hi
= 0;
914 boot
->br_rxhipri_siz
= RX_ENTRIES
* sizeof(struct txp_rx_desc
);
915 sc
->sc_rxhir
.r_desc
= (struct txp_rx_desc
*)&ld
->txp_rxhiring
;
916 sc
->sc_rxhir
.r_roff
= &sc
->sc_hostvar
->hv_rx_hi_read_idx
;
917 sc
->sc_rxhir
.r_woff
= &sc
->sc_hostvar
->hv_rx_hi_write_idx
;
919 /* low priority rx ring */
920 boot
->br_rxlopri_lo
= vtophys(&ld
->txp_rxloring
);
921 boot
->br_rxlopri_hi
= 0;
922 boot
->br_rxlopri_siz
= RX_ENTRIES
* sizeof(struct txp_rx_desc
);
923 sc
->sc_rxlor
.r_desc
= (struct txp_rx_desc
*)&ld
->txp_rxloring
;
924 sc
->sc_rxlor
.r_roff
= &sc
->sc_hostvar
->hv_rx_lo_read_idx
;
925 sc
->sc_rxlor
.r_woff
= &sc
->sc_hostvar
->hv_rx_lo_write_idx
;
928 bzero(&ld
->txp_cmdring
, sizeof(struct txp_cmd_desc
) * CMD_ENTRIES
);
929 boot
->br_cmd_lo
= vtophys(&ld
->txp_cmdring
);
931 boot
->br_cmd_siz
= CMD_ENTRIES
* sizeof(struct txp_cmd_desc
);
932 sc
->sc_cmdring
.base
= (struct txp_cmd_desc
*)&ld
->txp_cmdring
;
933 sc
->sc_cmdring
.size
= CMD_ENTRIES
* sizeof(struct txp_cmd_desc
);
934 sc
->sc_cmdring
.lastwrite
= 0;
937 bzero(&ld
->txp_rspring
, sizeof(struct txp_rsp_desc
) * RSP_ENTRIES
);
938 boot
->br_resp_lo
= vtophys(&ld
->txp_rspring
);
939 boot
->br_resp_hi
= 0;
940 boot
->br_resp_siz
= CMD_ENTRIES
* sizeof(struct txp_rsp_desc
);
941 sc
->sc_rspring
.base
= (struct txp_rsp_desc
*)&ld
->txp_rspring
;
942 sc
->sc_rspring
.size
= RSP_ENTRIES
* sizeof(struct txp_rsp_desc
);
943 sc
->sc_rspring
.lastwrite
= 0;
945 /* receive buffer ring */
946 boot
->br_rxbuf_lo
= vtophys(&ld
->txp_rxbufs
);
947 boot
->br_rxbuf_hi
= 0;
948 boot
->br_rxbuf_siz
= RXBUF_ENTRIES
* sizeof(struct txp_rxbuf_desc
);
949 sc
->sc_rxbufs
= (struct txp_rxbuf_desc
*)&ld
->txp_rxbufs
;
951 for (i
= 0; i
< RXBUF_ENTRIES
; i
++) {
952 struct txp_swdesc
*sd
;
953 if (sc
->sc_rxbufs
[i
].rb_sd
!= NULL
)
955 sc
->sc_rxbufs
[i
].rb_sd
= kmalloc(sizeof(struct txp_swdesc
),
957 sd
= sc
->sc_rxbufs
[i
].rb_sd
;
960 sc
->sc_rxbufprod
= 0;
963 bzero(&ld
->txp_zero
, sizeof(u_int32_t
));
964 boot
->br_zero_lo
= vtophys(&ld
->txp_zero
);
965 boot
->br_zero_hi
= 0;
967 /* See if it's waiting for boot, and try to boot it */
968 for (i
= 0; i
< 10000; i
++) {
969 r
= READ_REG(sc
, TXP_A2H_0
);
970 if (r
== STAT_WAITING_FOR_BOOT
)
975 if (r
!= STAT_WAITING_FOR_BOOT
) {
976 if_printf(&sc
->sc_arpcom
.ac_if
, "not waiting for boot\n");
980 WRITE_REG(sc
, TXP_H2A_2
, 0);
981 WRITE_REG(sc
, TXP_H2A_1
, vtophys(sc
->sc_boot
));
982 WRITE_REG(sc
, TXP_H2A_0
, TXP_BOOTCMD_REGISTER_BOOT_RECORD
);
984 /* See if it booted */
985 for (i
= 0; i
< 10000; i
++) {
986 r
= READ_REG(sc
, TXP_A2H_0
);
987 if (r
== STAT_RUNNING
)
991 if (r
!= STAT_RUNNING
) {
992 if_printf(&sc
->sc_arpcom
.ac_if
, "fw not running\n");
996 /* Clear TX and CMD ring write registers */
997 WRITE_REG(sc
, TXP_H2A_1
, TXP_BOOTCMD_NULL
);
998 WRITE_REG(sc
, TXP_H2A_2
, TXP_BOOTCMD_NULL
);
999 WRITE_REG(sc
, TXP_H2A_3
, TXP_BOOTCMD_NULL
);
1000 WRITE_REG(sc
, TXP_H2A_0
, TXP_BOOTCMD_NULL
);
1006 txp_ioctl(struct ifnet
*ifp
, u_long command
, caddr_t data
, struct ucred
*cr
)
1008 struct txp_softc
*sc
= ifp
->if_softc
;
1009 struct ifreq
*ifr
= (struct ifreq
*)data
;
1014 if (ifp
->if_flags
& IFF_UP
) {
1017 if (ifp
->if_flags
& IFF_RUNNING
)
1024 * Multicast list has changed; set the hardware
1025 * filter accordingly.
1032 error
= ifmedia_ioctl(ifp
, ifr
, &sc
->sc_ifmedia
, command
);
1035 error
= ether_ioctl(ifp
, command
, data
);
1042 txp_rxring_fill(struct txp_softc
*sc
)
1046 struct txp_swdesc
*sd
;
1048 ifp
= &sc
->sc_arpcom
.ac_if
;
1050 for (i
= 0; i
< RXBUF_ENTRIES
; i
++) {
1051 sd
= sc
->sc_rxbufs
[i
].rb_sd
;
1052 MGETHDR(sd
->sd_mbuf
, MB_DONTWAIT
, MT_DATA
);
1053 if (sd
->sd_mbuf
== NULL
)
1056 MCLGET(sd
->sd_mbuf
, MB_DONTWAIT
);
1057 if ((sd
->sd_mbuf
->m_flags
& M_EXT
) == 0) {
1058 m_freem(sd
->sd_mbuf
);
1061 sd
->sd_mbuf
->m_pkthdr
.len
= sd
->sd_mbuf
->m_len
= MCLBYTES
;
1062 sd
->sd_mbuf
->m_pkthdr
.rcvif
= ifp
;
1064 sc
->sc_rxbufs
[i
].rb_paddrlo
=
1065 vtophys(mtod(sd
->sd_mbuf
, vm_offset_t
));
1066 sc
->sc_rxbufs
[i
].rb_paddrhi
= 0;
1069 sc
->sc_hostvar
->hv_rx_buf_write_idx
= (RXBUF_ENTRIES
- 1) *
1070 sizeof(struct txp_rxbuf_desc
);
1076 txp_rxring_empty(struct txp_softc
*sc
)
1079 struct txp_swdesc
*sd
;
1081 if (sc
->sc_rxbufs
== NULL
)
1084 for (i
= 0; i
< RXBUF_ENTRIES
; i
++) {
1085 if (&sc
->sc_rxbufs
[i
] == NULL
)
1087 sd
= sc
->sc_rxbufs
[i
].rb_sd
;
1090 if (sd
->sd_mbuf
!= NULL
) {
1091 m_freem(sd
->sd_mbuf
);
1102 struct txp_softc
*sc
;
1108 ifp
= &sc
->sc_arpcom
.ac_if
;
1110 if (ifp
->if_flags
& IFF_RUNNING
)
1115 txp_command(sc
, TXP_CMD_MAX_PKT_SIZE_WRITE
, TXP_MAX_PKTLEN
, 0, 0,
1116 NULL
, NULL
, NULL
, 1);
1118 /* Set station address. */
1119 ((u_int8_t
*)&p1
)[1] = sc
->sc_arpcom
.ac_enaddr
[0];
1120 ((u_int8_t
*)&p1
)[0] = sc
->sc_arpcom
.ac_enaddr
[1];
1121 ((u_int8_t
*)&p2
)[3] = sc
->sc_arpcom
.ac_enaddr
[2];
1122 ((u_int8_t
*)&p2
)[2] = sc
->sc_arpcom
.ac_enaddr
[3];
1123 ((u_int8_t
*)&p2
)[1] = sc
->sc_arpcom
.ac_enaddr
[4];
1124 ((u_int8_t
*)&p2
)[0] = sc
->sc_arpcom
.ac_enaddr
[5];
1125 txp_command(sc
, TXP_CMD_STATION_ADDRESS_WRITE
, p1
, p2
, 0,
1126 NULL
, NULL
, NULL
, 1);
1130 txp_rxring_fill(sc
);
1132 txp_command(sc
, TXP_CMD_TX_ENABLE
, 0, 0, 0, NULL
, NULL
, NULL
, 1);
1133 txp_command(sc
, TXP_CMD_RX_ENABLE
, 0, 0, 0, NULL
, NULL
, NULL
, 1);
1135 WRITE_REG(sc
, TXP_IER
, TXP_INT_RESERVED
| TXP_INT_SELF
|
1136 TXP_INT_A2H_7
| TXP_INT_A2H_6
| TXP_INT_A2H_5
| TXP_INT_A2H_4
|
1137 TXP_INT_A2H_2
| TXP_INT_A2H_1
| TXP_INT_A2H_0
|
1138 TXP_INT_DMA3
| TXP_INT_DMA2
| TXP_INT_DMA1
| TXP_INT_DMA0
|
1139 TXP_INT_PCI_TABORT
| TXP_INT_PCI_MABORT
| TXP_INT_LATCH
);
1140 WRITE_REG(sc
, TXP_IMR
, TXP_INT_A2H_3
);
1142 ifp
->if_flags
|= IFF_RUNNING
;
1143 ifp
->if_flags
&= ~IFF_OACTIVE
;
1146 callout_reset(&sc
->txp_stat_timer
, hz
, txp_tick
, sc
);
1152 struct txp_softc
*sc
= vsc
;
1153 struct ifnet
*ifp
= &sc
->sc_arpcom
.ac_if
;
1154 struct txp_rsp_desc
*rsp
= NULL
;
1155 struct txp_ext_desc
*ext
;
1157 lwkt_serialize_enter(ifp
->if_serializer
);
1158 txp_rxbuf_reclaim(sc
);
1160 if (txp_command2(sc
, TXP_CMD_READ_STATISTICS
, 0, 0, 0, NULL
, 0,
1163 if (rsp
->rsp_numdesc
!= 6)
1165 if (txp_command(sc
, TXP_CMD_CLEAR_STATISTICS
, 0, 0, 0,
1166 NULL
, NULL
, NULL
, 1))
1168 ext
= (struct txp_ext_desc
*)(rsp
+ 1);
1170 ifp
->if_ierrors
+= ext
[3].ext_2
+ ext
[3].ext_3
+ ext
[3].ext_4
+
1171 ext
[4].ext_1
+ ext
[4].ext_4
;
1172 ifp
->if_oerrors
+= ext
[0].ext_1
+ ext
[1].ext_1
+ ext
[1].ext_4
+
1174 ifp
->if_collisions
+= ext
[0].ext_2
+ ext
[0].ext_3
+ ext
[1].ext_2
+
1176 ifp
->if_opackets
+= rsp
->rsp_par2
;
1177 ifp
->if_ipackets
+= ext
[2].ext_3
;
1181 kfree(rsp
, M_DEVBUF
);
1183 callout_reset(&sc
->txp_stat_timer
, hz
, txp_tick
, sc
);
1184 lwkt_serialize_exit(ifp
->if_serializer
);
1188 txp_start(struct ifnet
*ifp
)
1190 struct txp_softc
*sc
= ifp
->if_softc
;
1191 struct txp_tx_ring
*r
= &sc
->sc_txhir
;
1192 struct txp_tx_desc
*txd
;
1193 struct txp_frag_desc
*fxd
;
1194 struct mbuf
*m
, *m0
, *m_defragged
;
1195 struct txp_swdesc
*sd
;
1196 u_int32_t firstprod
, firstcnt
, prod
, cnt
;
1198 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_OACTIVE
)) != IFF_RUNNING
)
1210 if ((TX_ENTRIES
- cnt
) < 4)
1214 m
= ifq_dequeue(&ifp
->if_snd
, NULL
);
1218 frag
= 1; /* Extra desc */
1219 for (m0
= m
; m0
!= NULL
; m0
= m0
->m_next
)
1221 if ((cnt
+ frag
) >= (TX_ENTRIES
- 4)) {
1222 if (m_defragged
!= NULL
) {
1224 * Even after defragmentation, there
1225 * are still too many fragments, so
1232 m_defragged
= m_defrag(m
, MB_DONTWAIT
);
1233 if (m_defragged
== NULL
) {
1239 /* Recount # of fragments */
1243 sd
= sc
->sc_txd
+ prod
;
1246 txd
= r
->r_desc
+ prod
;
1247 txd
->tx_flags
= TX_FLAGS_TYPE_DATA
;
1248 txd
->tx_numdesc
= 0;
1254 if (++prod
== TX_ENTRIES
)
1258 KASSERT(cnt
< (TX_ENTRIES
- 4), ("too many frag\n"));
1260 if (m
->m_flags
& M_VLANTAG
) {
1261 txd
->tx_pflags
= TX_PFLAGS_VLAN
|
1262 (htons(m
->m_pkthdr
.ether_vlantag
) <<
1263 TX_PFLAGS_VLANTAG_S
);
1266 if (m
->m_pkthdr
.csum_flags
& CSUM_IP
)
1267 txd
->tx_pflags
|= TX_PFLAGS_IPCKSUM
;
1270 if (m
->m_pkthdr
.csum_flags
& CSUM_TCP
)
1271 txd
->tx_pflags
|= TX_PFLAGS_TCPCKSUM
;
1272 if (m
->m_pkthdr
.csum_flags
& CSUM_UDP
)
1273 txd
->tx_pflags
|= TX_PFLAGS_UDPCKSUM
;
1276 fxd
= (struct txp_frag_desc
*)(r
->r_desc
+ prod
);
1277 for (m0
= m
; m0
!= NULL
; m0
= m0
->m_next
) {
1282 KASSERT(cnt
< (TX_ENTRIES
- 4), ("too many frag\n"));
1286 fxd
->frag_flags
= FRAG_FLAGS_TYPE_FRAG
;
1287 fxd
->frag_rsvd1
= 0;
1288 fxd
->frag_len
= m0
->m_len
;
1289 fxd
->frag_addrlo
= vtophys(mtod(m0
, vm_offset_t
));
1290 fxd
->frag_addrhi
= 0;
1291 fxd
->frag_rsvd2
= 0;
1293 if (++prod
== TX_ENTRIES
) {
1294 fxd
= (struct txp_frag_desc
*)r
->r_desc
;
1302 ETHER_BPF_MTAP(ifp
, m
);
1303 WRITE_REG(sc
, r
->r_reg
, TXP_IDX2OFFSET(prod
));
1311 ifp
->if_flags
|= IFF_OACTIVE
;
1312 r
->r_prod
= firstprod
;
1313 r
->r_cnt
= firstcnt
;
1318 * Handle simple commands sent to the typhoon
1321 txp_command(struct txp_softc
*sc
, u_int16_t id
, u_int16_t in1
, u_int32_t in2
,
1322 u_int32_t in3
, u_int16_t
*out1
, u_int32_t
*out2
, u_int32_t
*out3
,
1325 struct txp_rsp_desc
*rsp
= NULL
;
1327 if (txp_command2(sc
, id
, in1
, in2
, in3
, NULL
, 0, &rsp
, wait
))
1334 *out1
= rsp
->rsp_par1
;
1336 *out2
= rsp
->rsp_par2
;
1338 *out3
= rsp
->rsp_par3
;
1339 kfree(rsp
, M_DEVBUF
);
1344 txp_command2(struct txp_softc
*sc
, u_int16_t id
, u_int16_t in1
, u_int32_t in2
,
1345 u_int32_t in3
, struct txp_ext_desc
*in_extp
, u_int8_t in_extn
,
1346 struct txp_rsp_desc
**rspp
, int wait
)
1348 struct txp_hostvar
*hv
= sc
->sc_hostvar
;
1349 struct txp_cmd_desc
*cmd
;
1350 struct txp_ext_desc
*ext
;
1354 if (txp_cmd_desc_numfree(sc
) < (in_extn
+ 1)) {
1355 if_printf(&sc
->sc_arpcom
.ac_if
, "no free cmd descriptors\n");
1359 idx
= sc
->sc_cmdring
.lastwrite
;
1360 cmd
= (struct txp_cmd_desc
*)(((u_int8_t
*)sc
->sc_cmdring
.base
) + idx
);
1361 bzero(cmd
, sizeof(*cmd
));
1363 cmd
->cmd_numdesc
= in_extn
;
1364 cmd
->cmd_seq
= seq
= sc
->sc_seq
++;
1366 cmd
->cmd_par1
= in1
;
1367 cmd
->cmd_par2
= in2
;
1368 cmd
->cmd_par3
= in3
;
1369 cmd
->cmd_flags
= CMD_FLAGS_TYPE_CMD
|
1370 (wait
? CMD_FLAGS_RESP
: 0) | CMD_FLAGS_VALID
;
1372 idx
+= sizeof(struct txp_cmd_desc
);
1373 if (idx
== sc
->sc_cmdring
.size
)
1376 for (i
= 0; i
< in_extn
; i
++) {
1377 ext
= (struct txp_ext_desc
*)(((u_int8_t
*)sc
->sc_cmdring
.base
) + idx
);
1378 bcopy(in_extp
, ext
, sizeof(struct txp_ext_desc
));
1380 idx
+= sizeof(struct txp_cmd_desc
);
1381 if (idx
== sc
->sc_cmdring
.size
)
1385 sc
->sc_cmdring
.lastwrite
= idx
;
1387 WRITE_REG(sc
, TXP_H2A_2
, sc
->sc_cmdring
.lastwrite
);
1392 for (i
= 0; i
< 10000; i
++) {
1393 idx
= hv
->hv_resp_read_idx
;
1394 if (idx
!= hv
->hv_resp_write_idx
) {
1396 if (txp_response(sc
, idx
, id
, seq
, rspp
))
1403 if (i
== 1000 || (*rspp
) == NULL
) {
1404 if_printf(&sc
->sc_arpcom
.ac_if
, "0x%x command failed\n", id
);
1412 txp_response(struct txp_softc
*sc
, u_int32_t ridx
, u_int16_t id
, u_int16_t seq
,
1413 struct txp_rsp_desc
**rspp
)
1415 struct txp_hostvar
*hv
= sc
->sc_hostvar
;
1416 struct txp_rsp_desc
*rsp
;
1418 while (ridx
!= hv
->hv_resp_write_idx
) {
1419 rsp
= (struct txp_rsp_desc
*)(((u_int8_t
*)sc
->sc_rspring
.base
) + ridx
);
1421 if (id
== rsp
->rsp_id
&& rsp
->rsp_seq
== seq
) {
1422 *rspp
= (struct txp_rsp_desc
*)kmalloc(
1423 sizeof(struct txp_rsp_desc
) * (rsp
->rsp_numdesc
+ 1),
1424 M_DEVBUF
, M_INTWAIT
);
1425 if ((*rspp
) == NULL
)
1427 txp_rsp_fixup(sc
, rsp
, *rspp
);
1431 if (rsp
->rsp_flags
& RSP_FLAGS_ERROR
) {
1432 if_printf(&sc
->sc_arpcom
.ac_if
, "response error!\n");
1433 txp_rsp_fixup(sc
, rsp
, NULL
);
1434 ridx
= hv
->hv_resp_read_idx
;
1438 switch (rsp
->rsp_id
) {
1439 case TXP_CMD_CYCLE_STATISTICS
:
1440 case TXP_CMD_MEDIA_STATUS_READ
:
1442 case TXP_CMD_HELLO_RESPONSE
:
1443 if_printf(&sc
->sc_arpcom
.ac_if
, "hello\n");
1446 if_printf(&sc
->sc_arpcom
.ac_if
, "unknown id(0x%x)\n",
1450 txp_rsp_fixup(sc
, rsp
, NULL
);
1451 ridx
= hv
->hv_resp_read_idx
;
1452 hv
->hv_resp_read_idx
= ridx
;
1459 txp_rsp_fixup(struct txp_softc
*sc
, struct txp_rsp_desc
*rsp
,
1460 struct txp_rsp_desc
*dst
)
1462 struct txp_rsp_desc
*src
= rsp
;
1463 struct txp_hostvar
*hv
= sc
->sc_hostvar
;
1466 ridx
= hv
->hv_resp_read_idx
;
1468 for (i
= 0; i
< rsp
->rsp_numdesc
+ 1; i
++) {
1470 bcopy(src
, dst
++, sizeof(struct txp_rsp_desc
));
1471 ridx
+= sizeof(struct txp_rsp_desc
);
1472 if (ridx
== sc
->sc_rspring
.size
) {
1473 src
= sc
->sc_rspring
.base
;
1477 sc
->sc_rspring
.lastwrite
= hv
->hv_resp_read_idx
= ridx
;
1480 hv
->hv_resp_read_idx
= ridx
;
1484 txp_cmd_desc_numfree(struct txp_softc
*sc
)
1486 struct txp_hostvar
*hv
= sc
->sc_hostvar
;
1487 struct txp_boot_record
*br
= sc
->sc_boot
;
1488 u_int32_t widx
, ridx
, nfree
;
1490 widx
= sc
->sc_cmdring
.lastwrite
;
1491 ridx
= hv
->hv_cmd_read_idx
;
1494 /* Ring is completely free */
1495 nfree
= br
->br_cmd_siz
- sizeof(struct txp_cmd_desc
);
1498 nfree
= br
->br_cmd_siz
-
1499 (widx
- ridx
+ sizeof(struct txp_cmd_desc
));
1501 nfree
= ridx
- widx
- sizeof(struct txp_cmd_desc
);
1504 return (nfree
/ sizeof(struct txp_cmd_desc
));
1508 txp_stop(struct txp_softc
*sc
)
1512 ifp
= &sc
->sc_arpcom
.ac_if
;
1514 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
1516 callout_stop(&sc
->txp_stat_timer
);
1518 txp_command(sc
, TXP_CMD_TX_DISABLE
, 0, 0, 0, NULL
, NULL
, NULL
, 1);
1519 txp_command(sc
, TXP_CMD_RX_DISABLE
, 0, 0, 0, NULL
, NULL
, NULL
, 1);
1521 txp_rxring_empty(sc
);
1527 txp_watchdog(struct ifnet
*ifp
)
1533 txp_ifmedia_upd(struct ifnet
*ifp
)
1535 struct txp_softc
*sc
= ifp
->if_softc
;
1536 struct ifmedia
*ifm
= &sc
->sc_ifmedia
;
1539 if (IFM_TYPE(ifm
->ifm_media
) != IFM_ETHER
)
1542 if (IFM_SUBTYPE(ifm
->ifm_media
) == IFM_10_T
) {
1543 if ((ifm
->ifm_media
& IFM_GMASK
) == IFM_FDX
)
1544 new_xcvr
= TXP_XCVR_10_FDX
;
1546 new_xcvr
= TXP_XCVR_10_HDX
;
1547 } else if (IFM_SUBTYPE(ifm
->ifm_media
) == IFM_100_TX
) {
1548 if ((ifm
->ifm_media
& IFM_GMASK
) == IFM_FDX
)
1549 new_xcvr
= TXP_XCVR_100_FDX
;
1551 new_xcvr
= TXP_XCVR_100_HDX
;
1552 } else if (IFM_SUBTYPE(ifm
->ifm_media
) == IFM_AUTO
) {
1553 new_xcvr
= TXP_XCVR_AUTO
;
1558 if (sc
->sc_xcvr
== new_xcvr
)
1561 txp_command(sc
, TXP_CMD_XCVR_SELECT
, new_xcvr
, 0, 0,
1562 NULL
, NULL
, NULL
, 0);
1563 sc
->sc_xcvr
= new_xcvr
;
1569 txp_ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
1571 struct txp_softc
*sc
= ifp
->if_softc
;
1572 struct ifmedia
*ifm
= &sc
->sc_ifmedia
;
1573 u_int16_t bmsr
, bmcr
, anlpar
;
1575 ifmr
->ifm_status
= IFM_AVALID
;
1576 ifmr
->ifm_active
= IFM_ETHER
;
1578 if (txp_command(sc
, TXP_CMD_PHY_MGMT_READ
, 0, MII_BMSR
, 0,
1579 &bmsr
, NULL
, NULL
, 1))
1581 if (txp_command(sc
, TXP_CMD_PHY_MGMT_READ
, 0, MII_BMSR
, 0,
1582 &bmsr
, NULL
, NULL
, 1))
1585 if (txp_command(sc
, TXP_CMD_PHY_MGMT_READ
, 0, MII_BMCR
, 0,
1586 &bmcr
, NULL
, NULL
, 1))
1589 if (txp_command(sc
, TXP_CMD_PHY_MGMT_READ
, 0, MII_ANLPAR
, 0,
1590 &anlpar
, NULL
, NULL
, 1))
1593 if (bmsr
& BMSR_LINK
)
1594 ifmr
->ifm_status
|= IFM_ACTIVE
;
1596 if (bmcr
& BMCR_ISO
) {
1597 ifmr
->ifm_active
|= IFM_NONE
;
1598 ifmr
->ifm_status
= 0;
1602 if (bmcr
& BMCR_LOOP
)
1603 ifmr
->ifm_active
|= IFM_LOOP
;
1605 if (bmcr
& BMCR_AUTOEN
) {
1606 if ((bmsr
& BMSR_ACOMP
) == 0) {
1607 ifmr
->ifm_active
|= IFM_NONE
;
1611 if (anlpar
& ANLPAR_T4
)
1612 ifmr
->ifm_active
|= IFM_100_T4
;
1613 else if (anlpar
& ANLPAR_TX_FD
)
1614 ifmr
->ifm_active
|= IFM_100_TX
|IFM_FDX
;
1615 else if (anlpar
& ANLPAR_TX
)
1616 ifmr
->ifm_active
|= IFM_100_TX
;
1617 else if (anlpar
& ANLPAR_10_FD
)
1618 ifmr
->ifm_active
|= IFM_10_T
|IFM_FDX
;
1619 else if (anlpar
& ANLPAR_10
)
1620 ifmr
->ifm_active
|= IFM_10_T
;
1622 ifmr
->ifm_active
|= IFM_NONE
;
1624 ifmr
->ifm_active
= ifm
->ifm_cur
->ifm_media
;
1628 ifmr
->ifm_active
|= IFM_NONE
;
1629 ifmr
->ifm_status
&= ~IFM_AVALID
;
1634 txp_show_descriptor(void *d
)
1636 struct txp_cmd_desc
*cmd
= d
;
1637 struct txp_rsp_desc
*rsp
= d
;
1638 struct txp_tx_desc
*txd
= d
;
1639 struct txp_frag_desc
*frgd
= d
;
1641 switch (cmd
->cmd_flags
& CMD_FLAGS_TYPE_M
) {
1642 case CMD_FLAGS_TYPE_CMD
:
1643 /* command descriptor */
1644 kprintf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
1645 cmd
->cmd_flags
, cmd
->cmd_numdesc
, cmd
->cmd_id
, cmd
->cmd_seq
,
1646 cmd
->cmd_par1
, cmd
->cmd_par2
, cmd
->cmd_par3
);
1648 case CMD_FLAGS_TYPE_RESP
:
1649 /* response descriptor */
1650 kprintf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
1651 rsp
->rsp_flags
, rsp
->rsp_numdesc
, rsp
->rsp_id
, rsp
->rsp_seq
,
1652 rsp
->rsp_par1
, rsp
->rsp_par2
, rsp
->rsp_par3
);
1654 case CMD_FLAGS_TYPE_DATA
:
1655 /* data header (assuming tx for now) */
1656 kprintf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
1657 txd
->tx_flags
, txd
->tx_numdesc
, txd
->tx_totlen
,
1658 txd
->tx_addrlo
, txd
->tx_addrhi
, txd
->tx_pflags
);
1660 case CMD_FLAGS_TYPE_FRAG
:
1661 /* fragment descriptor */
1662 kprintf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
1663 frgd
->frag_flags
, frgd
->frag_rsvd1
, frgd
->frag_len
,
1664 frgd
->frag_addrlo
, frgd
->frag_addrhi
, frgd
->frag_rsvd2
);
1667 kprintf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
1668 cmd
->cmd_flags
& CMD_FLAGS_TYPE_M
,
1669 cmd
->cmd_flags
, cmd
->cmd_numdesc
, cmd
->cmd_id
, cmd
->cmd_seq
,
1670 cmd
->cmd_par1
, cmd
->cmd_par2
, cmd
->cmd_par3
);
1677 txp_set_filter(struct txp_softc
*sc
)
1679 struct ifnet
*ifp
= &sc
->sc_arpcom
.ac_if
;
1681 struct ifmultiaddr
*ifma
;
1683 if (ifp
->if_flags
& IFF_PROMISC
) {
1684 filter
= TXP_RXFILT_PROMISC
;
1688 filter
= TXP_RXFILT_DIRECT
;
1690 if (ifp
->if_flags
& IFF_BROADCAST
)
1691 filter
|= TXP_RXFILT_BROADCAST
;
1693 if (ifp
->if_flags
& IFF_ALLMULTI
) {
1694 filter
|= TXP_RXFILT_ALLMULTI
;
1696 uint32_t hashbit
, hash
[2];
1699 hash
[0] = hash
[1] = 0;
1701 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
1702 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
1706 hashbit
= (uint16_t)(ether_crc32_be(
1707 LLADDR((struct sockaddr_dl
*)ifma
->ifma_addr
),
1708 ETHER_ADDR_LEN
) & (64 - 1));
1709 hash
[hashbit
/ 32] |= (1 << hashbit
% 32);
1713 filter
|= TXP_RXFILT_HASHMULTI
;
1714 txp_command(sc
, TXP_CMD_MCAST_HASH_MASK_WRITE
,
1715 2, hash
[0], hash
[1], NULL
, NULL
, NULL
, 0);
1720 txp_command(sc
, TXP_CMD_RX_FILTER_WRITE
, filter
, 0, 0,
1721 NULL
, NULL
, NULL
, 1);
1725 txp_capabilities(struct txp_softc
*sc
)
1727 struct ifnet
*ifp
= &sc
->sc_arpcom
.ac_if
;
1728 struct txp_rsp_desc
*rsp
= NULL
;
1729 struct txp_ext_desc
*ext
;
1731 if (txp_command2(sc
, TXP_CMD_OFFLOAD_READ
, 0, 0, 0, NULL
, 0, &rsp
, 1))
1734 if (rsp
->rsp_numdesc
!= 1)
1736 ext
= (struct txp_ext_desc
*)(rsp
+ 1);
1738 sc
->sc_tx_capability
= ext
->ext_1
& OFFLOAD_MASK
;
1739 sc
->sc_rx_capability
= ext
->ext_2
& OFFLOAD_MASK
;
1740 ifp
->if_capabilities
= 0;
1742 if (rsp
->rsp_par2
& rsp
->rsp_par3
& OFFLOAD_VLAN
) {
1743 sc
->sc_tx_capability
|= OFFLOAD_VLAN
;
1744 sc
->sc_rx_capability
|= OFFLOAD_VLAN
;
1745 ifp
->if_capabilities
|= IFCAP_VLAN_HWTAGGING
;
1750 if (rsp
->rsp_par2
& rsp
->rsp_par3
& OFFLOAD_IPSEC
) {
1751 sc
->sc_tx_capability
|= OFFLOAD_IPSEC
;
1752 sc
->sc_rx_capability
|= OFFLOAD_IPSEC
;
1753 ifp
->if_capabilities
|= IFCAP_IPSEC
;
1757 if (rsp
->rsp_par2
& rsp
->rsp_par3
& OFFLOAD_IPCKSUM
) {
1758 sc
->sc_tx_capability
|= OFFLOAD_IPCKSUM
;
1759 sc
->sc_rx_capability
|= OFFLOAD_IPCKSUM
;
1760 ifp
->if_capabilities
|= IFCAP_HWCSUM
;
1761 ifp
->if_hwassist
|= CSUM_IP
;
1764 if (rsp
->rsp_par2
& rsp
->rsp_par3
& OFFLOAD_TCPCKSUM
) {
1766 sc
->sc_tx_capability
|= OFFLOAD_TCPCKSUM
;
1768 sc
->sc_rx_capability
|= OFFLOAD_TCPCKSUM
;
1769 ifp
->if_capabilities
|= IFCAP_HWCSUM
;
1772 if (rsp
->rsp_par2
& rsp
->rsp_par3
& OFFLOAD_UDPCKSUM
) {
1774 sc
->sc_tx_capability
|= OFFLOAD_UDPCKSUM
;
1776 sc
->sc_rx_capability
|= OFFLOAD_UDPCKSUM
;
1777 ifp
->if_capabilities
|= IFCAP_HWCSUM
;
1779 ifp
->if_capenable
= ifp
->if_capabilities
;
1781 if (txp_command(sc
, TXP_CMD_OFFLOAD_WRITE
, 0,
1782 sc
->sc_tx_capability
, sc
->sc_rx_capability
, NULL
, NULL
, NULL
, 1))
1787 kfree(rsp
, M_DEVBUF
);