1 /* $OpenBSD: if_txp.c,v 1.48 2001/06/27 06:34:50 kjc Exp $ */
2 /* $FreeBSD: src/sys/dev/txp/if_txp.c,v 1.4.2.4 2001/12/14 19:50:43 jlemon Exp $ */
3 /* $DragonFly: src/sys/dev/netif/txp/if_txp.c,v 1.45 2008/01/06 16:55:50 swildner Exp $ */
7 * Jason L. Wright <jason@thought.net>, Theo de Raadt, and
8 * Aaron Campbell <aaron@monkey.org>. All rights reserved.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Jason L. Wright,
21 * Theo de Raadt and Aaron Campbell.
22 * 4. Neither the name of the author nor the names of any co-contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
27 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
36 * THE POSSIBILITY OF SUCH DAMAGE.
40 * Driver for 3c990 (Typhoon) Ethernet ASIC
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/sockio.h>
47 #include <sys/malloc.h>
48 #include <sys/kernel.h>
49 #include <sys/socket.h>
50 #include <sys/serialize.h>
53 #include <sys/thread2.h>
56 #include <net/ifq_var.h>
57 #include <net/if_arp.h>
58 #include <net/ethernet.h>
59 #include <net/if_dl.h>
60 #include <net/if_types.h>
61 #include <net/vlan/if_vlan_var.h>
63 #include <netinet/in.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in_var.h>
66 #include <netinet/ip.h>
67 #include <netinet/if_ether.h>
68 #include <sys/in_cksum.h>
70 #include <net/if_media.h>
74 #include <vm/vm.h> /* for vtophys */
75 #include <vm/pmap.h> /* for vtophys */
77 #include "../mii_layer/mii.h"
78 #include "../mii_layer/miivar.h"
80 #include <bus/pci/pcidevs.h>
81 #include <bus/pci/pcireg.h>
82 #include <bus/pci/pcivar.h>
84 #define TXP_USEIOSPACE
85 #define __STRICT_ALIGNMENT
87 #include "if_txpreg.h"
91 * Various supported device vendors/types and their names.
93 static struct txp_type txp_devs
[] = {
94 { PCI_VENDOR_3COM
, PCI_PRODUCT_3COM_3CR990TX95
,
95 "3Com 3cR990-TX-95 Etherlink with 3XP Processor" },
96 { PCI_VENDOR_3COM
, PCI_PRODUCT_3COM_3CR990TX97
,
97 "3Com 3cR990-TX-97 Etherlink with 3XP Processor" },
98 { PCI_VENDOR_3COM
, PCI_PRODUCT_3COM_3C990B
,
99 "3Com 3cR990B-TXM Etherlink with 3XP Processor" },
100 { PCI_VENDOR_3COM
, PCI_PRODUCT_3COM_3CR990SVR95
,
101 "3Com 3cR990-SRV-95 Etherlink Server with 3XP Processor" },
102 { PCI_VENDOR_3COM
, PCI_PRODUCT_3COM_3CR990SVR97
,
103 "3Com 3cR990-SRV-97 Etherlink Server with 3XP Processor" },
104 { PCI_VENDOR_3COM
, PCI_PRODUCT_3COM_3C990BSVR
,
105 "3Com 3cR990B-SRV Etherlink Server with 3XP Processor" },
109 static int txp_probe (device_t
);
110 static int txp_attach (device_t
);
111 static int txp_detach (device_t
);
112 static void txp_intr (void *);
113 static void txp_tick (void *);
114 static int txp_shutdown (device_t
);
115 static int txp_ioctl (struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
116 static void txp_start (struct ifnet
*);
117 static void txp_stop (struct txp_softc
*);
118 static void txp_init (void *);
119 static void txp_watchdog (struct ifnet
*);
121 static void txp_release_resources (device_t
);
122 static int txp_chip_init (struct txp_softc
*);
123 static int txp_reset_adapter (struct txp_softc
*);
124 static int txp_download_fw (struct txp_softc
*);
125 static int txp_download_fw_wait (struct txp_softc
*);
126 static int txp_download_fw_section (struct txp_softc
*,
127 struct txp_fw_section_header
*, int);
128 static int txp_alloc_rings (struct txp_softc
*);
129 static int txp_rxring_fill (struct txp_softc
*);
130 static void txp_rxring_empty (struct txp_softc
*);
131 static void txp_set_filter (struct txp_softc
*);
133 static int txp_cmd_desc_numfree (struct txp_softc
*);
134 static int txp_command (struct txp_softc
*, u_int16_t
, u_int16_t
, u_int32_t
,
135 u_int32_t
, u_int16_t
*, u_int32_t
*, u_int32_t
*, int);
136 static int txp_command2 (struct txp_softc
*, u_int16_t
, u_int16_t
,
137 u_int32_t
, u_int32_t
, struct txp_ext_desc
*, u_int8_t
,
138 struct txp_rsp_desc
**, int);
139 static int txp_response (struct txp_softc
*, u_int32_t
, u_int16_t
, u_int16_t
,
140 struct txp_rsp_desc
**);
141 static void txp_rsp_fixup (struct txp_softc
*, struct txp_rsp_desc
*,
142 struct txp_rsp_desc
*);
143 static void txp_capabilities (struct txp_softc
*);
145 static void txp_ifmedia_sts (struct ifnet
*, struct ifmediareq
*);
146 static int txp_ifmedia_upd (struct ifnet
*);
148 static void txp_show_descriptor (void *);
150 static void txp_tx_reclaim (struct txp_softc
*, struct txp_tx_ring
*);
151 static void txp_rxbuf_reclaim (struct txp_softc
*);
152 static void txp_rx_reclaim (struct txp_softc
*, struct txp_rx_ring
*);
154 #ifdef TXP_USEIOSPACE
155 #define TXP_RES SYS_RES_IOPORT
156 #define TXP_RID TXP_PCI_LOIO
158 #define TXP_RES SYS_RES_MEMORY
159 #define TXP_RID TXP_PCI_LOMEM
162 static device_method_t txp_methods
[] = {
163 /* Device interface */
164 DEVMETHOD(device_probe
, txp_probe
),
165 DEVMETHOD(device_attach
, txp_attach
),
166 DEVMETHOD(device_detach
, txp_detach
),
167 DEVMETHOD(device_shutdown
, txp_shutdown
),
171 static driver_t txp_driver
= {
174 sizeof(struct txp_softc
)
177 static devclass_t txp_devclass
;
179 DECLARE_DUMMY_MODULE(if_txp
);
180 DRIVER_MODULE(if_txp
, pci
, txp_driver
, txp_devclass
, 0, 0);
183 txp_probe(device_t dev
)
188 vid
= pci_get_vendor(dev
);
189 did
= pci_get_device(dev
);
191 for (t
= txp_devs
; t
->txp_name
!= NULL
; ++t
) {
192 if ((vid
== t
->txp_vid
) && (did
== t
->txp_did
)) {
193 device_set_desc(dev
, t
->txp_name
);
202 txp_attach(device_t dev
)
204 struct txp_softc
*sc
;
208 uint8_t enaddr
[ETHER_ADDR_LEN
];
211 sc
= device_get_softc(dev
);
212 callout_init(&sc
->txp_stat_timer
);
214 ifp
= &sc
->sc_arpcom
.ac_if
;
215 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
217 pci_enable_busmaster(dev
);
220 sc
->sc_res
= bus_alloc_resource_any(dev
, TXP_RES
, &rid
, RF_ACTIVE
);
222 if (sc
->sc_res
== NULL
) {
223 device_printf(dev
, "couldn't map ports/memory\n");
227 sc
->sc_bt
= rman_get_bustag(sc
->sc_res
);
228 sc
->sc_bh
= rman_get_bushandle(sc
->sc_res
);
230 /* Allocate interrupt */
232 sc
->sc_irq
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
, &rid
,
233 RF_SHAREABLE
| RF_ACTIVE
);
235 if (sc
->sc_irq
== NULL
) {
236 device_printf(dev
, "couldn't map interrupt\n");
241 if (txp_chip_init(sc
)) {
246 sc
->sc_fwbuf
= contigmalloc(32768, M_DEVBUF
,
247 M_WAITOK
, 0, 0xffffffff, PAGE_SIZE
, 0);
248 error
= txp_download_fw(sc
);
249 contigfree(sc
->sc_fwbuf
, 32768, M_DEVBUF
);
255 sc
->sc_ldata
= contigmalloc(sizeof(struct txp_ldata
), M_DEVBUF
,
256 M_WAITOK
| M_ZERO
, 0, 0xffffffff, PAGE_SIZE
, 0);
258 if (txp_alloc_rings(sc
)) {
263 if (txp_command(sc
, TXP_CMD_MAX_PKT_SIZE_WRITE
, TXP_MAX_PKTLEN
, 0, 0,
264 NULL
, NULL
, NULL
, 1)) {
269 if (txp_command(sc
, TXP_CMD_STATION_ADDRESS_READ
, 0, 0, 0,
270 &p1
, &p2
, NULL
, 1)) {
277 enaddr
[0] = ((uint8_t *)&p1
)[1];
278 enaddr
[1] = ((uint8_t *)&p1
)[0];
279 enaddr
[2] = ((uint8_t *)&p2
)[3];
280 enaddr
[3] = ((uint8_t *)&p2
)[2];
281 enaddr
[4] = ((uint8_t *)&p2
)[1];
282 enaddr
[5] = ((uint8_t *)&p2
)[0];
284 ifmedia_init(&sc
->sc_ifmedia
, 0, txp_ifmedia_upd
, txp_ifmedia_sts
);
285 ifmedia_add(&sc
->sc_ifmedia
, IFM_ETHER
|IFM_10_T
, 0, NULL
);
286 ifmedia_add(&sc
->sc_ifmedia
, IFM_ETHER
|IFM_10_T
|IFM_HDX
, 0, NULL
);
287 ifmedia_add(&sc
->sc_ifmedia
, IFM_ETHER
|IFM_10_T
|IFM_FDX
, 0, NULL
);
288 ifmedia_add(&sc
->sc_ifmedia
, IFM_ETHER
|IFM_100_TX
, 0, NULL
);
289 ifmedia_add(&sc
->sc_ifmedia
, IFM_ETHER
|IFM_100_TX
|IFM_HDX
, 0, NULL
);
290 ifmedia_add(&sc
->sc_ifmedia
, IFM_ETHER
|IFM_100_TX
|IFM_FDX
, 0, NULL
);
291 ifmedia_add(&sc
->sc_ifmedia
, IFM_ETHER
|IFM_AUTO
, 0, NULL
);
293 sc
->sc_xcvr
= TXP_XCVR_AUTO
;
294 txp_command(sc
, TXP_CMD_XCVR_SELECT
, TXP_XCVR_AUTO
, 0, 0,
295 NULL
, NULL
, NULL
, 0);
296 ifmedia_set(&sc
->sc_ifmedia
, IFM_ETHER
|IFM_AUTO
);
299 ifp
->if_mtu
= ETHERMTU
;
300 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
301 ifp
->if_ioctl
= txp_ioctl
;
302 ifp
->if_start
= txp_start
;
303 ifp
->if_watchdog
= txp_watchdog
;
304 ifp
->if_init
= txp_init
;
305 ifp
->if_baudrate
= 100000000;
306 ifq_set_maxlen(&ifp
->if_snd
, TX_ENTRIES
);
307 ifq_set_ready(&ifp
->if_snd
);
308 ifp
->if_hwassist
= 0;
309 txp_capabilities(sc
);
311 ether_ifattach(ifp
, enaddr
, NULL
);
313 error
= bus_setup_intr(dev
, sc
->sc_irq
, INTR_NETSAFE
,
314 txp_intr
, sc
, &sc
->sc_intrhand
,
317 device_printf(dev
, "couldn't set up irq\n");
325 txp_release_resources(dev
);
330 txp_detach(device_t dev
)
332 struct txp_softc
*sc
= device_get_softc(dev
);
333 struct ifnet
*ifp
= &sc
->sc_arpcom
.ac_if
;
336 lwkt_serialize_enter(ifp
->if_serializer
);
340 bus_teardown_intr(dev
, sc
->sc_irq
, sc
->sc_intrhand
);
342 lwkt_serialize_exit(ifp
->if_serializer
);
344 ifmedia_removeall(&sc
->sc_ifmedia
);
347 for (i
= 0; i
< RXBUF_ENTRIES
; i
++)
348 kfree(sc
->sc_rxbufs
[i
].rb_sd
, M_DEVBUF
);
350 txp_release_resources(dev
);
356 txp_release_resources(device_t dev
)
358 struct txp_softc
*sc
;
360 sc
= device_get_softc(dev
);
362 if (sc
->sc_irq
!= NULL
)
363 bus_release_resource(dev
, SYS_RES_IRQ
, 0, sc
->sc_irq
);
365 if (sc
->sc_res
!= NULL
)
366 bus_release_resource(dev
, TXP_RES
, TXP_RID
, sc
->sc_res
);
368 if (sc
->sc_ldata
!= NULL
)
369 contigfree(sc
->sc_ldata
, sizeof(struct txp_ldata
), M_DEVBUF
);
375 txp_chip_init(struct txp_softc
*sc
)
377 /* disable interrupts */
378 WRITE_REG(sc
, TXP_IER
, 0);
379 WRITE_REG(sc
, TXP_IMR
,
380 TXP_INT_SELF
| TXP_INT_PCI_TABORT
| TXP_INT_PCI_MABORT
|
381 TXP_INT_DMA3
| TXP_INT_DMA2
| TXP_INT_DMA1
| TXP_INT_DMA0
|
384 /* ack all interrupts */
385 WRITE_REG(sc
, TXP_ISR
, TXP_INT_RESERVED
| TXP_INT_LATCH
|
386 TXP_INT_A2H_7
| TXP_INT_A2H_6
| TXP_INT_A2H_5
| TXP_INT_A2H_4
|
387 TXP_INT_SELF
| TXP_INT_PCI_TABORT
| TXP_INT_PCI_MABORT
|
388 TXP_INT_DMA3
| TXP_INT_DMA2
| TXP_INT_DMA1
| TXP_INT_DMA0
|
389 TXP_INT_A2H_3
| TXP_INT_A2H_2
| TXP_INT_A2H_1
| TXP_INT_A2H_0
);
391 if (txp_reset_adapter(sc
))
394 /* disable interrupts */
395 WRITE_REG(sc
, TXP_IER
, 0);
396 WRITE_REG(sc
, TXP_IMR
,
397 TXP_INT_SELF
| TXP_INT_PCI_TABORT
| TXP_INT_PCI_MABORT
|
398 TXP_INT_DMA3
| TXP_INT_DMA2
| TXP_INT_DMA1
| TXP_INT_DMA0
|
401 /* ack all interrupts */
402 WRITE_REG(sc
, TXP_ISR
, TXP_INT_RESERVED
| TXP_INT_LATCH
|
403 TXP_INT_A2H_7
| TXP_INT_A2H_6
| TXP_INT_A2H_5
| TXP_INT_A2H_4
|
404 TXP_INT_SELF
| TXP_INT_PCI_TABORT
| TXP_INT_PCI_MABORT
|
405 TXP_INT_DMA3
| TXP_INT_DMA2
| TXP_INT_DMA1
| TXP_INT_DMA0
|
406 TXP_INT_A2H_3
| TXP_INT_A2H_2
| TXP_INT_A2H_1
| TXP_INT_A2H_0
);
412 txp_reset_adapter(struct txp_softc
*sc
)
417 WRITE_REG(sc
, TXP_SRR
, TXP_SRR_ALL
);
419 WRITE_REG(sc
, TXP_SRR
, 0);
421 /* Should wait max 6 seconds */
422 for (i
= 0; i
< 6000; i
++) {
423 r
= READ_REG(sc
, TXP_A2H_0
);
424 if (r
== STAT_WAITING_FOR_HOST_REQUEST
)
429 if (r
!= STAT_WAITING_FOR_HOST_REQUEST
) {
430 if_printf(&sc
->sc_arpcom
.ac_if
, "reset hung\n");
438 txp_download_fw(struct txp_softc
*sc
)
440 struct txp_fw_file_header
*fileheader
;
441 struct txp_fw_section_header
*secthead
;
443 u_int32_t r
, i
, ier
, imr
;
445 ier
= READ_REG(sc
, TXP_IER
);
446 WRITE_REG(sc
, TXP_IER
, ier
| TXP_INT_A2H_0
);
448 imr
= READ_REG(sc
, TXP_IMR
);
449 WRITE_REG(sc
, TXP_IMR
, imr
| TXP_INT_A2H_0
);
451 for (i
= 0; i
< 10000; i
++) {
452 r
= READ_REG(sc
, TXP_A2H_0
);
453 if (r
== STAT_WAITING_FOR_HOST_REQUEST
)
457 if (r
!= STAT_WAITING_FOR_HOST_REQUEST
) {
458 if_printf(&sc
->sc_arpcom
.ac_if
,
459 "not waiting for host request\n");
464 WRITE_REG(sc
, TXP_ISR
, TXP_INT_A2H_0
);
466 fileheader
= (struct txp_fw_file_header
*)tc990image
;
467 if (bcmp("TYPHOON", fileheader
->magicid
, sizeof(fileheader
->magicid
))) {
468 if_printf(&sc
->sc_arpcom
.ac_if
, "fw invalid magic\n");
472 /* Tell boot firmware to get ready for image */
473 WRITE_REG(sc
, TXP_H2A_1
, fileheader
->addr
);
474 WRITE_REG(sc
, TXP_H2A_0
, TXP_BOOTCMD_RUNTIME_IMAGE
);
476 if (txp_download_fw_wait(sc
)) {
477 if_printf(&sc
->sc_arpcom
.ac_if
, "fw wait failed, initial\n");
481 secthead
= (struct txp_fw_section_header
*)(((u_int8_t
*)tc990image
) +
482 sizeof(struct txp_fw_file_header
));
484 for (sect
= 0; sect
< fileheader
->nsections
; sect
++) {
485 if (txp_download_fw_section(sc
, secthead
, sect
))
487 secthead
= (struct txp_fw_section_header
*)
488 (((u_int8_t
*)secthead
) + secthead
->nbytes
+
492 WRITE_REG(sc
, TXP_H2A_0
, TXP_BOOTCMD_DOWNLOAD_COMPLETE
);
494 for (i
= 0; i
< 10000; i
++) {
495 r
= READ_REG(sc
, TXP_A2H_0
);
496 if (r
== STAT_WAITING_FOR_BOOT
)
500 if (r
!= STAT_WAITING_FOR_BOOT
) {
501 if_printf(&sc
->sc_arpcom
.ac_if
, "not waiting for boot\n");
505 WRITE_REG(sc
, TXP_IER
, ier
);
506 WRITE_REG(sc
, TXP_IMR
, imr
);
512 txp_download_fw_wait(struct txp_softc
*sc
)
516 for (i
= 0; i
< 10000; i
++) {
517 r
= READ_REG(sc
, TXP_ISR
);
518 if (r
& TXP_INT_A2H_0
)
523 if (!(r
& TXP_INT_A2H_0
)) {
524 if_printf(&sc
->sc_arpcom
.ac_if
, "fw wait failed comm0\n");
528 WRITE_REG(sc
, TXP_ISR
, TXP_INT_A2H_0
);
530 r
= READ_REG(sc
, TXP_A2H_0
);
531 if (r
!= STAT_WAITING_FOR_SEGMENT
) {
532 if_printf(&sc
->sc_arpcom
.ac_if
, "fw not waiting for segment\n");
539 txp_download_fw_section(struct txp_softc
*sc
,
540 struct txp_fw_section_header
*sect
, int sectnum
)
547 /* Skip zero length sections */
548 if (sect
->nbytes
== 0)
551 /* Make sure we aren't past the end of the image */
552 rseg
= ((u_int8_t
*)sect
) - ((u_int8_t
*)tc990image
);
553 if (rseg
>= sizeof(tc990image
)) {
554 if_printf(&sc
->sc_arpcom
.ac_if
, "fw invalid section address, "
555 "section %d\n", sectnum
);
559 /* Make sure this section doesn't go past the end */
560 rseg
+= sect
->nbytes
;
561 if (rseg
>= sizeof(tc990image
)) {
562 if_printf(&sc
->sc_arpcom
.ac_if
, "fw truncated section %d\n",
567 bcopy(((u_int8_t
*)sect
) + sizeof(*sect
), sc
->sc_fwbuf
, sect
->nbytes
);
568 dma
= vtophys(sc
->sc_fwbuf
);
571 * dummy up mbuf and verify section checksum
574 m
.m_next
= m
.m_nextpkt
= NULL
;
575 m
.m_len
= sect
->nbytes
;
576 m
.m_data
= sc
->sc_fwbuf
;
578 csum
= in_cksum(&m
, sect
->nbytes
);
579 if (csum
!= sect
->cksum
) {
580 if_printf(&sc
->sc_arpcom
.ac_if
, "fw section %d, bad "
581 "cksum (expected 0x%x got 0x%x)\n",
582 sectnum
, sect
->cksum
, csum
);
587 WRITE_REG(sc
, TXP_H2A_1
, sect
->nbytes
);
588 WRITE_REG(sc
, TXP_H2A_2
, sect
->cksum
);
589 WRITE_REG(sc
, TXP_H2A_3
, sect
->addr
);
590 WRITE_REG(sc
, TXP_H2A_4
, 0);
591 WRITE_REG(sc
, TXP_H2A_5
, dma
& 0xffffffff);
592 WRITE_REG(sc
, TXP_H2A_0
, TXP_BOOTCMD_SEGMENT_AVAILABLE
);
594 if (txp_download_fw_wait(sc
)) {
595 if_printf(&sc
->sc_arpcom
.ac_if
, "fw wait failed, "
596 "section %d\n", sectnum
);
607 struct txp_softc
*sc
= vsc
;
608 struct txp_hostvar
*hv
= sc
->sc_hostvar
;
611 /* mask all interrupts */
612 WRITE_REG(sc
, TXP_IMR
, TXP_INT_RESERVED
| TXP_INT_SELF
|
613 TXP_INT_A2H_7
| TXP_INT_A2H_6
| TXP_INT_A2H_5
| TXP_INT_A2H_4
|
614 TXP_INT_A2H_2
| TXP_INT_A2H_1
| TXP_INT_A2H_0
|
615 TXP_INT_DMA3
| TXP_INT_DMA2
| TXP_INT_DMA1
| TXP_INT_DMA0
|
616 TXP_INT_PCI_TABORT
| TXP_INT_PCI_MABORT
| TXP_INT_LATCH
);
618 isr
= READ_REG(sc
, TXP_ISR
);
620 WRITE_REG(sc
, TXP_ISR
, isr
);
622 if ((*sc
->sc_rxhir
.r_roff
) != (*sc
->sc_rxhir
.r_woff
))
623 txp_rx_reclaim(sc
, &sc
->sc_rxhir
);
624 if ((*sc
->sc_rxlor
.r_roff
) != (*sc
->sc_rxlor
.r_woff
))
625 txp_rx_reclaim(sc
, &sc
->sc_rxlor
);
627 if (hv
->hv_rx_buf_write_idx
== hv
->hv_rx_buf_read_idx
)
628 txp_rxbuf_reclaim(sc
);
630 if (sc
->sc_txhir
.r_cnt
&& (sc
->sc_txhir
.r_cons
!=
631 TXP_OFFSET2IDX(*(sc
->sc_txhir
.r_off
))))
632 txp_tx_reclaim(sc
, &sc
->sc_txhir
);
634 if (sc
->sc_txlor
.r_cnt
&& (sc
->sc_txlor
.r_cons
!=
635 TXP_OFFSET2IDX(*(sc
->sc_txlor
.r_off
))))
636 txp_tx_reclaim(sc
, &sc
->sc_txlor
);
638 isr
= READ_REG(sc
, TXP_ISR
);
641 /* unmask all interrupts */
642 WRITE_REG(sc
, TXP_IMR
, TXP_INT_A2H_3
);
644 txp_start(&sc
->sc_arpcom
.ac_if
);
650 txp_rx_reclaim(struct txp_softc
*sc
, struct txp_rx_ring
*r
)
652 struct ifnet
*ifp
= &sc
->sc_arpcom
.ac_if
;
653 struct txp_rx_desc
*rxd
;
655 struct txp_swdesc
*sd
= NULL
;
656 u_int32_t roff
, woff
;
660 rxd
= r
->r_desc
+ (roff
/ sizeof(struct txp_rx_desc
));
662 while (roff
!= woff
) {
664 if (rxd
->rx_flags
& RX_FLAGS_ERROR
) {
665 if_printf(ifp
, "error 0x%x\n", rxd
->rx_stat
);
670 /* retrieve stashed pointer */
676 m
->m_pkthdr
.len
= m
->m_len
= rxd
->rx_len
;
678 #ifdef __STRICT_ALIGNMENT
681 * XXX Nice chip, except it won't accept "off by 2"
682 * buffers, so we're force to copy. Supposedly
683 * this will be fixed in a newer firmware rev
684 * and this will be temporary.
688 MGETHDR(mnew
, MB_DONTWAIT
, MT_DATA
);
693 if (m
->m_len
> (MHLEN
- 2)) {
694 MCLGET(mnew
, MB_DONTWAIT
);
695 if (!(mnew
->m_flags
& M_EXT
)) {
701 mnew
->m_pkthdr
.rcvif
= ifp
;
703 mnew
->m_pkthdr
.len
= mnew
->m_len
= m
->m_len
;
704 m_copydata(m
, 0, m
->m_pkthdr
.len
, mtod(mnew
, caddr_t
));
710 if (rxd
->rx_stat
& RX_STAT_IPCKSUMBAD
)
711 m
->m_pkthdr
.csum_flags
|= CSUM_IP_CHECKED
;
712 else if (rxd
->rx_stat
& RX_STAT_IPCKSUMGOOD
)
713 m
->m_pkthdr
.csum_flags
|=
714 CSUM_IP_CHECKED
|CSUM_IP_VALID
;
716 if ((rxd
->rx_stat
& RX_STAT_TCPCKSUMGOOD
) ||
717 (rxd
->rx_stat
& RX_STAT_UDPCKSUMGOOD
)) {
718 m
->m_pkthdr
.csum_flags
|=
719 CSUM_DATA_VALID
|CSUM_PSEUDO_HDR
|
720 CSUM_FRAG_NOT_CHECKED
;
721 m
->m_pkthdr
.csum_data
= 0xffff;
724 lwkt_serialize_enter(ifp
->if_serializer
);
725 if (rxd
->rx_stat
& RX_STAT_VLAN
)
726 VLAN_INPUT_TAG(m
, htons(rxd
->rx_vlan
>> 16));
728 ifp
->if_input(ifp
, m
);
729 lwkt_serialize_exit(ifp
->if_serializer
);
733 roff
+= sizeof(struct txp_rx_desc
);
734 if (roff
== (RX_ENTRIES
* sizeof(struct txp_rx_desc
))) {
748 txp_rxbuf_reclaim(struct txp_softc
*sc
)
750 struct ifnet
*ifp
= &sc
->sc_arpcom
.ac_if
;
751 struct txp_hostvar
*hv
= sc
->sc_hostvar
;
752 struct txp_rxbuf_desc
*rbd
;
753 struct txp_swdesc
*sd
;
756 if (!(ifp
->if_flags
& IFF_RUNNING
))
759 i
= sc
->sc_rxbufprod
;
760 rbd
= sc
->sc_rxbufs
+ i
;
764 if (sd
->sd_mbuf
!= NULL
)
767 MGETHDR(sd
->sd_mbuf
, MB_DONTWAIT
, MT_DATA
);
768 if (sd
->sd_mbuf
== NULL
)
771 MCLGET(sd
->sd_mbuf
, MB_DONTWAIT
);
772 if ((sd
->sd_mbuf
->m_flags
& M_EXT
) == 0)
774 sd
->sd_mbuf
->m_pkthdr
.rcvif
= ifp
;
775 sd
->sd_mbuf
->m_pkthdr
.len
= sd
->sd_mbuf
->m_len
= MCLBYTES
;
777 rbd
->rb_paddrlo
= vtophys(mtod(sd
->sd_mbuf
, vm_offset_t
))
781 hv
->hv_rx_buf_write_idx
= TXP_IDX2OFFSET(i
);
783 if (++i
== RXBUF_ENTRIES
) {
790 sc
->sc_rxbufprod
= i
;
795 m_freem(sd
->sd_mbuf
);
801 * Reclaim mbufs and entries from a transmit ring.
804 txp_tx_reclaim(struct txp_softc
*sc
, struct txp_tx_ring
*r
)
806 struct ifnet
*ifp
= &sc
->sc_arpcom
.ac_if
;
807 u_int32_t idx
= TXP_OFFSET2IDX(*(r
->r_off
));
808 u_int32_t cons
= r
->r_cons
, cnt
= r
->r_cnt
;
809 struct txp_tx_desc
*txd
= r
->r_desc
+ cons
;
810 struct txp_swdesc
*sd
= sc
->sc_txd
+ cons
;
813 while (cons
!= idx
) {
817 if ((txd
->tx_flags
& TX_FLAGS_TYPE_M
) ==
818 TX_FLAGS_TYPE_DATA
) {
827 ifp
->if_flags
&= ~IFF_OACTIVE
;
829 if (++cons
== TX_ENTRIES
) {
848 txp_shutdown(device_t dev
)
850 struct txp_softc
*sc
;
853 sc
= device_get_softc(dev
);
854 ifp
= &sc
->sc_arpcom
.ac_if
;
855 lwkt_serialize_enter(ifp
->if_serializer
);
857 /* mask all interrupts */
858 WRITE_REG(sc
, TXP_IMR
,
859 TXP_INT_SELF
| TXP_INT_PCI_TABORT
| TXP_INT_PCI_MABORT
|
860 TXP_INT_DMA3
| TXP_INT_DMA2
| TXP_INT_DMA1
| TXP_INT_DMA0
|
863 txp_command(sc
, TXP_CMD_TX_DISABLE
, 0, 0, 0, NULL
, NULL
, NULL
, 0);
864 txp_command(sc
, TXP_CMD_RX_DISABLE
, 0, 0, 0, NULL
, NULL
, NULL
, 0);
865 txp_command(sc
, TXP_CMD_HALT
, 0, 0, 0, NULL
, NULL
, NULL
, 0);
867 lwkt_serialize_exit(ifp
->if_serializer
);
872 txp_alloc_rings(struct txp_softc
*sc
)
874 struct txp_boot_record
*boot
;
875 struct txp_ldata
*ld
;
880 boot
= &ld
->txp_boot
;
886 bzero(&ld
->txp_hostvar
, sizeof(struct txp_hostvar
));
887 boot
->br_hostvar_lo
= vtophys(&ld
->txp_hostvar
);
888 boot
->br_hostvar_hi
= 0;
889 sc
->sc_hostvar
= (struct txp_hostvar
*)&ld
->txp_hostvar
;
891 /* hi priority tx ring */
892 boot
->br_txhipri_lo
= vtophys(&ld
->txp_txhiring
);
893 boot
->br_txhipri_hi
= 0;
894 boot
->br_txhipri_siz
= TX_ENTRIES
* sizeof(struct txp_tx_desc
);
895 sc
->sc_txhir
.r_reg
= TXP_H2A_1
;
896 sc
->sc_txhir
.r_desc
= (struct txp_tx_desc
*)&ld
->txp_txhiring
;
897 sc
->sc_txhir
.r_cons
= sc
->sc_txhir
.r_prod
= sc
->sc_txhir
.r_cnt
= 0;
898 sc
->sc_txhir
.r_off
= &sc
->sc_hostvar
->hv_tx_hi_desc_read_idx
;
900 /* lo priority tx ring */
901 boot
->br_txlopri_lo
= vtophys(&ld
->txp_txloring
);
902 boot
->br_txlopri_hi
= 0;
903 boot
->br_txlopri_siz
= TX_ENTRIES
* sizeof(struct txp_tx_desc
);
904 sc
->sc_txlor
.r_reg
= TXP_H2A_3
;
905 sc
->sc_txlor
.r_desc
= (struct txp_tx_desc
*)&ld
->txp_txloring
;
906 sc
->sc_txlor
.r_cons
= sc
->sc_txlor
.r_prod
= sc
->sc_txlor
.r_cnt
= 0;
907 sc
->sc_txlor
.r_off
= &sc
->sc_hostvar
->hv_tx_lo_desc_read_idx
;
909 /* high priority rx ring */
910 boot
->br_rxhipri_lo
= vtophys(&ld
->txp_rxhiring
);
911 boot
->br_rxhipri_hi
= 0;
912 boot
->br_rxhipri_siz
= RX_ENTRIES
* sizeof(struct txp_rx_desc
);
913 sc
->sc_rxhir
.r_desc
= (struct txp_rx_desc
*)&ld
->txp_rxhiring
;
914 sc
->sc_rxhir
.r_roff
= &sc
->sc_hostvar
->hv_rx_hi_read_idx
;
915 sc
->sc_rxhir
.r_woff
= &sc
->sc_hostvar
->hv_rx_hi_write_idx
;
917 /* low priority rx ring */
918 boot
->br_rxlopri_lo
= vtophys(&ld
->txp_rxloring
);
919 boot
->br_rxlopri_hi
= 0;
920 boot
->br_rxlopri_siz
= RX_ENTRIES
* sizeof(struct txp_rx_desc
);
921 sc
->sc_rxlor
.r_desc
= (struct txp_rx_desc
*)&ld
->txp_rxloring
;
922 sc
->sc_rxlor
.r_roff
= &sc
->sc_hostvar
->hv_rx_lo_read_idx
;
923 sc
->sc_rxlor
.r_woff
= &sc
->sc_hostvar
->hv_rx_lo_write_idx
;
926 bzero(&ld
->txp_cmdring
, sizeof(struct txp_cmd_desc
) * CMD_ENTRIES
);
927 boot
->br_cmd_lo
= vtophys(&ld
->txp_cmdring
);
929 boot
->br_cmd_siz
= CMD_ENTRIES
* sizeof(struct txp_cmd_desc
);
930 sc
->sc_cmdring
.base
= (struct txp_cmd_desc
*)&ld
->txp_cmdring
;
931 sc
->sc_cmdring
.size
= CMD_ENTRIES
* sizeof(struct txp_cmd_desc
);
932 sc
->sc_cmdring
.lastwrite
= 0;
935 bzero(&ld
->txp_rspring
, sizeof(struct txp_rsp_desc
) * RSP_ENTRIES
);
936 boot
->br_resp_lo
= vtophys(&ld
->txp_rspring
);
937 boot
->br_resp_hi
= 0;
938 boot
->br_resp_siz
= CMD_ENTRIES
* sizeof(struct txp_rsp_desc
);
939 sc
->sc_rspring
.base
= (struct txp_rsp_desc
*)&ld
->txp_rspring
;
940 sc
->sc_rspring
.size
= RSP_ENTRIES
* sizeof(struct txp_rsp_desc
);
941 sc
->sc_rspring
.lastwrite
= 0;
943 /* receive buffer ring */
944 boot
->br_rxbuf_lo
= vtophys(&ld
->txp_rxbufs
);
945 boot
->br_rxbuf_hi
= 0;
946 boot
->br_rxbuf_siz
= RXBUF_ENTRIES
* sizeof(struct txp_rxbuf_desc
);
947 sc
->sc_rxbufs
= (struct txp_rxbuf_desc
*)&ld
->txp_rxbufs
;
949 for (i
= 0; i
< RXBUF_ENTRIES
; i
++) {
950 struct txp_swdesc
*sd
;
951 if (sc
->sc_rxbufs
[i
].rb_sd
!= NULL
)
953 sc
->sc_rxbufs
[i
].rb_sd
= kmalloc(sizeof(struct txp_swdesc
),
955 sd
= sc
->sc_rxbufs
[i
].rb_sd
;
958 sc
->sc_rxbufprod
= 0;
961 bzero(&ld
->txp_zero
, sizeof(u_int32_t
));
962 boot
->br_zero_lo
= vtophys(&ld
->txp_zero
);
963 boot
->br_zero_hi
= 0;
965 /* See if it's waiting for boot, and try to boot it */
966 for (i
= 0; i
< 10000; i
++) {
967 r
= READ_REG(sc
, TXP_A2H_0
);
968 if (r
== STAT_WAITING_FOR_BOOT
)
973 if (r
!= STAT_WAITING_FOR_BOOT
) {
974 if_printf(&sc
->sc_arpcom
.ac_if
, "not waiting for boot\n");
978 WRITE_REG(sc
, TXP_H2A_2
, 0);
979 WRITE_REG(sc
, TXP_H2A_1
, vtophys(sc
->sc_boot
));
980 WRITE_REG(sc
, TXP_H2A_0
, TXP_BOOTCMD_REGISTER_BOOT_RECORD
);
982 /* See if it booted */
983 for (i
= 0; i
< 10000; i
++) {
984 r
= READ_REG(sc
, TXP_A2H_0
);
985 if (r
== STAT_RUNNING
)
989 if (r
!= STAT_RUNNING
) {
990 if_printf(&sc
->sc_arpcom
.ac_if
, "fw not running\n");
994 /* Clear TX and CMD ring write registers */
995 WRITE_REG(sc
, TXP_H2A_1
, TXP_BOOTCMD_NULL
);
996 WRITE_REG(sc
, TXP_H2A_2
, TXP_BOOTCMD_NULL
);
997 WRITE_REG(sc
, TXP_H2A_3
, TXP_BOOTCMD_NULL
);
998 WRITE_REG(sc
, TXP_H2A_0
, TXP_BOOTCMD_NULL
);
1004 txp_ioctl(struct ifnet
*ifp
, u_long command
, caddr_t data
, struct ucred
*cr
)
1006 struct txp_softc
*sc
= ifp
->if_softc
;
1007 struct ifreq
*ifr
= (struct ifreq
*)data
;
1012 if (ifp
->if_flags
& IFF_UP
) {
1015 if (ifp
->if_flags
& IFF_RUNNING
)
1022 * Multicast list has changed; set the hardware
1023 * filter accordingly.
1030 error
= ifmedia_ioctl(ifp
, ifr
, &sc
->sc_ifmedia
, command
);
1033 error
= ether_ioctl(ifp
, command
, data
);
1040 txp_rxring_fill(struct txp_softc
*sc
)
1044 struct txp_swdesc
*sd
;
1046 ifp
= &sc
->sc_arpcom
.ac_if
;
1048 for (i
= 0; i
< RXBUF_ENTRIES
; i
++) {
1049 sd
= sc
->sc_rxbufs
[i
].rb_sd
;
1050 MGETHDR(sd
->sd_mbuf
, MB_DONTWAIT
, MT_DATA
);
1051 if (sd
->sd_mbuf
== NULL
)
1054 MCLGET(sd
->sd_mbuf
, MB_DONTWAIT
);
1055 if ((sd
->sd_mbuf
->m_flags
& M_EXT
) == 0) {
1056 m_freem(sd
->sd_mbuf
);
1059 sd
->sd_mbuf
->m_pkthdr
.len
= sd
->sd_mbuf
->m_len
= MCLBYTES
;
1060 sd
->sd_mbuf
->m_pkthdr
.rcvif
= ifp
;
1062 sc
->sc_rxbufs
[i
].rb_paddrlo
=
1063 vtophys(mtod(sd
->sd_mbuf
, vm_offset_t
));
1064 sc
->sc_rxbufs
[i
].rb_paddrhi
= 0;
1067 sc
->sc_hostvar
->hv_rx_buf_write_idx
= (RXBUF_ENTRIES
- 1) *
1068 sizeof(struct txp_rxbuf_desc
);
1074 txp_rxring_empty(struct txp_softc
*sc
)
1077 struct txp_swdesc
*sd
;
1079 if (sc
->sc_rxbufs
== NULL
)
1082 for (i
= 0; i
< RXBUF_ENTRIES
; i
++) {
1083 if (&sc
->sc_rxbufs
[i
] == NULL
)
1085 sd
= sc
->sc_rxbufs
[i
].rb_sd
;
1088 if (sd
->sd_mbuf
!= NULL
) {
1089 m_freem(sd
->sd_mbuf
);
1100 struct txp_softc
*sc
;
1106 ifp
= &sc
->sc_arpcom
.ac_if
;
1108 if (ifp
->if_flags
& IFF_RUNNING
)
1113 txp_command(sc
, TXP_CMD_MAX_PKT_SIZE_WRITE
, TXP_MAX_PKTLEN
, 0, 0,
1114 NULL
, NULL
, NULL
, 1);
1116 /* Set station address. */
1117 ((u_int8_t
*)&p1
)[1] = sc
->sc_arpcom
.ac_enaddr
[0];
1118 ((u_int8_t
*)&p1
)[0] = sc
->sc_arpcom
.ac_enaddr
[1];
1119 ((u_int8_t
*)&p2
)[3] = sc
->sc_arpcom
.ac_enaddr
[2];
1120 ((u_int8_t
*)&p2
)[2] = sc
->sc_arpcom
.ac_enaddr
[3];
1121 ((u_int8_t
*)&p2
)[1] = sc
->sc_arpcom
.ac_enaddr
[4];
1122 ((u_int8_t
*)&p2
)[0] = sc
->sc_arpcom
.ac_enaddr
[5];
1123 txp_command(sc
, TXP_CMD_STATION_ADDRESS_WRITE
, p1
, p2
, 0,
1124 NULL
, NULL
, NULL
, 1);
1128 txp_rxring_fill(sc
);
1130 txp_command(sc
, TXP_CMD_TX_ENABLE
, 0, 0, 0, NULL
, NULL
, NULL
, 1);
1131 txp_command(sc
, TXP_CMD_RX_ENABLE
, 0, 0, 0, NULL
, NULL
, NULL
, 1);
1133 WRITE_REG(sc
, TXP_IER
, TXP_INT_RESERVED
| TXP_INT_SELF
|
1134 TXP_INT_A2H_7
| TXP_INT_A2H_6
| TXP_INT_A2H_5
| TXP_INT_A2H_4
|
1135 TXP_INT_A2H_2
| TXP_INT_A2H_1
| TXP_INT_A2H_0
|
1136 TXP_INT_DMA3
| TXP_INT_DMA2
| TXP_INT_DMA1
| TXP_INT_DMA0
|
1137 TXP_INT_PCI_TABORT
| TXP_INT_PCI_MABORT
| TXP_INT_LATCH
);
1138 WRITE_REG(sc
, TXP_IMR
, TXP_INT_A2H_3
);
1140 ifp
->if_flags
|= IFF_RUNNING
;
1141 ifp
->if_flags
&= ~IFF_OACTIVE
;
1144 callout_reset(&sc
->txp_stat_timer
, hz
, txp_tick
, sc
);
1150 struct txp_softc
*sc
= vsc
;
1151 struct ifnet
*ifp
= &sc
->sc_arpcom
.ac_if
;
1152 struct txp_rsp_desc
*rsp
= NULL
;
1153 struct txp_ext_desc
*ext
;
1155 lwkt_serialize_enter(ifp
->if_serializer
);
1156 txp_rxbuf_reclaim(sc
);
1158 if (txp_command2(sc
, TXP_CMD_READ_STATISTICS
, 0, 0, 0, NULL
, 0,
1161 if (rsp
->rsp_numdesc
!= 6)
1163 if (txp_command(sc
, TXP_CMD_CLEAR_STATISTICS
, 0, 0, 0,
1164 NULL
, NULL
, NULL
, 1))
1166 ext
= (struct txp_ext_desc
*)(rsp
+ 1);
1168 ifp
->if_ierrors
+= ext
[3].ext_2
+ ext
[3].ext_3
+ ext
[3].ext_4
+
1169 ext
[4].ext_1
+ ext
[4].ext_4
;
1170 ifp
->if_oerrors
+= ext
[0].ext_1
+ ext
[1].ext_1
+ ext
[1].ext_4
+
1172 ifp
->if_collisions
+= ext
[0].ext_2
+ ext
[0].ext_3
+ ext
[1].ext_2
+
1174 ifp
->if_opackets
+= rsp
->rsp_par2
;
1175 ifp
->if_ipackets
+= ext
[2].ext_3
;
1179 kfree(rsp
, M_DEVBUF
);
1181 callout_reset(&sc
->txp_stat_timer
, hz
, txp_tick
, sc
);
1182 lwkt_serialize_exit(ifp
->if_serializer
);
1186 txp_start(struct ifnet
*ifp
)
1188 struct txp_softc
*sc
= ifp
->if_softc
;
1189 struct txp_tx_ring
*r
= &sc
->sc_txhir
;
1190 struct txp_tx_desc
*txd
;
1191 struct txp_frag_desc
*fxd
;
1192 struct mbuf
*m
, *m0
;
1193 struct txp_swdesc
*sd
;
1194 u_int32_t firstprod
, firstcnt
, prod
, cnt
;
1197 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_OACTIVE
)) != IFF_RUNNING
)
1204 m
= ifq_poll(&ifp
->if_snd
);
1211 sd
= sc
->sc_txd
+ prod
;
1214 if ((TX_ENTRIES
- cnt
) < 4)
1217 txd
= r
->r_desc
+ prod
;
1219 txd
->tx_flags
= TX_FLAGS_TYPE_DATA
;
1220 txd
->tx_numdesc
= 0;
1226 if (++prod
== TX_ENTRIES
)
1229 if (++cnt
>= (TX_ENTRIES
- 4))
1232 if ((m
->m_flags
& (M_PROTO1
|M_PKTHDR
)) == (M_PROTO1
|M_PKTHDR
) &&
1233 m
->m_pkthdr
.rcvif
!= NULL
) {
1234 ifv
= m
->m_pkthdr
.rcvif
->if_softc
;
1235 txd
->tx_pflags
= TX_PFLAGS_VLAN
|
1236 (htons(ifv
->ifv_tag
) << TX_PFLAGS_VLANTAG_S
);
1239 if (m
->m_pkthdr
.csum_flags
& CSUM_IP
)
1240 txd
->tx_pflags
|= TX_PFLAGS_IPCKSUM
;
1243 if (m
->m_pkthdr
.csum_flags
& CSUM_TCP
)
1244 txd
->tx_pflags
|= TX_PFLAGS_TCPCKSUM
;
1245 if (m
->m_pkthdr
.csum_flags
& CSUM_UDP
)
1246 txd
->tx_pflags
|= TX_PFLAGS_UDPCKSUM
;
1249 fxd
= (struct txp_frag_desc
*)(r
->r_desc
+ prod
);
1250 for (m0
= m
; m0
!= NULL
; m0
= m0
->m_next
) {
1253 if (++cnt
>= (TX_ENTRIES
- 4))
1258 fxd
->frag_flags
= FRAG_FLAGS_TYPE_FRAG
;
1259 fxd
->frag_rsvd1
= 0;
1260 fxd
->frag_len
= m0
->m_len
;
1261 fxd
->frag_addrlo
= vtophys(mtod(m0
, vm_offset_t
));
1262 fxd
->frag_addrhi
= 0;
1263 fxd
->frag_rsvd2
= 0;
1265 if (++prod
== TX_ENTRIES
) {
1266 fxd
= (struct txp_frag_desc
*)r
->r_desc
;
1275 ifq_dequeue(&ifp
->if_snd
, m
);
1277 WRITE_REG(sc
, r
->r_reg
, TXP_IDX2OFFSET(prod
));
1285 ifp
->if_flags
|= IFF_OACTIVE
;
1286 r
->r_prod
= firstprod
;
1287 r
->r_cnt
= firstcnt
;
1292 * Handle simple commands sent to the typhoon
1295 txp_command(struct txp_softc
*sc
, u_int16_t id
, u_int16_t in1
, u_int32_t in2
,
1296 u_int32_t in3
, u_int16_t
*out1
, u_int32_t
*out2
, u_int32_t
*out3
,
1299 struct txp_rsp_desc
*rsp
= NULL
;
1301 if (txp_command2(sc
, id
, in1
, in2
, in3
, NULL
, 0, &rsp
, wait
))
1308 *out1
= rsp
->rsp_par1
;
1310 *out2
= rsp
->rsp_par2
;
1312 *out3
= rsp
->rsp_par3
;
1313 kfree(rsp
, M_DEVBUF
);
1318 txp_command2(struct txp_softc
*sc
, u_int16_t id
, u_int16_t in1
, u_int32_t in2
,
1319 u_int32_t in3
, struct txp_ext_desc
*in_extp
, u_int8_t in_extn
,
1320 struct txp_rsp_desc
**rspp
, int wait
)
1322 struct txp_hostvar
*hv
= sc
->sc_hostvar
;
1323 struct txp_cmd_desc
*cmd
;
1324 struct txp_ext_desc
*ext
;
1328 if (txp_cmd_desc_numfree(sc
) < (in_extn
+ 1)) {
1329 if_printf(&sc
->sc_arpcom
.ac_if
, "no free cmd descriptors\n");
1333 idx
= sc
->sc_cmdring
.lastwrite
;
1334 cmd
= (struct txp_cmd_desc
*)(((u_int8_t
*)sc
->sc_cmdring
.base
) + idx
);
1335 bzero(cmd
, sizeof(*cmd
));
1337 cmd
->cmd_numdesc
= in_extn
;
1338 cmd
->cmd_seq
= seq
= sc
->sc_seq
++;
1340 cmd
->cmd_par1
= in1
;
1341 cmd
->cmd_par2
= in2
;
1342 cmd
->cmd_par3
= in3
;
1343 cmd
->cmd_flags
= CMD_FLAGS_TYPE_CMD
|
1344 (wait
? CMD_FLAGS_RESP
: 0) | CMD_FLAGS_VALID
;
1346 idx
+= sizeof(struct txp_cmd_desc
);
1347 if (idx
== sc
->sc_cmdring
.size
)
1350 for (i
= 0; i
< in_extn
; i
++) {
1351 ext
= (struct txp_ext_desc
*)(((u_int8_t
*)sc
->sc_cmdring
.base
) + idx
);
1352 bcopy(in_extp
, ext
, sizeof(struct txp_ext_desc
));
1354 idx
+= sizeof(struct txp_cmd_desc
);
1355 if (idx
== sc
->sc_cmdring
.size
)
1359 sc
->sc_cmdring
.lastwrite
= idx
;
1361 WRITE_REG(sc
, TXP_H2A_2
, sc
->sc_cmdring
.lastwrite
);
1366 for (i
= 0; i
< 10000; i
++) {
1367 idx
= hv
->hv_resp_read_idx
;
1368 if (idx
!= hv
->hv_resp_write_idx
) {
1370 if (txp_response(sc
, idx
, id
, seq
, rspp
))
1377 if (i
== 1000 || (*rspp
) == NULL
) {
1378 if_printf(&sc
->sc_arpcom
.ac_if
, "0x%x command failed\n", id
);
1386 txp_response(struct txp_softc
*sc
, u_int32_t ridx
, u_int16_t id
, u_int16_t seq
,
1387 struct txp_rsp_desc
**rspp
)
1389 struct txp_hostvar
*hv
= sc
->sc_hostvar
;
1390 struct txp_rsp_desc
*rsp
;
1392 while (ridx
!= hv
->hv_resp_write_idx
) {
1393 rsp
= (struct txp_rsp_desc
*)(((u_int8_t
*)sc
->sc_rspring
.base
) + ridx
);
1395 if (id
== rsp
->rsp_id
&& rsp
->rsp_seq
== seq
) {
1396 *rspp
= (struct txp_rsp_desc
*)kmalloc(
1397 sizeof(struct txp_rsp_desc
) * (rsp
->rsp_numdesc
+ 1),
1398 M_DEVBUF
, M_INTWAIT
);
1399 if ((*rspp
) == NULL
)
1401 txp_rsp_fixup(sc
, rsp
, *rspp
);
1405 if (rsp
->rsp_flags
& RSP_FLAGS_ERROR
) {
1406 if_printf(&sc
->sc_arpcom
.ac_if
, "response error!\n");
1407 txp_rsp_fixup(sc
, rsp
, NULL
);
1408 ridx
= hv
->hv_resp_read_idx
;
1412 switch (rsp
->rsp_id
) {
1413 case TXP_CMD_CYCLE_STATISTICS
:
1414 case TXP_CMD_MEDIA_STATUS_READ
:
1416 case TXP_CMD_HELLO_RESPONSE
:
1417 if_printf(&sc
->sc_arpcom
.ac_if
, "hello\n");
1420 if_printf(&sc
->sc_arpcom
.ac_if
, "unknown id(0x%x)\n",
1424 txp_rsp_fixup(sc
, rsp
, NULL
);
1425 ridx
= hv
->hv_resp_read_idx
;
1426 hv
->hv_resp_read_idx
= ridx
;
1433 txp_rsp_fixup(struct txp_softc
*sc
, struct txp_rsp_desc
*rsp
,
1434 struct txp_rsp_desc
*dst
)
1436 struct txp_rsp_desc
*src
= rsp
;
1437 struct txp_hostvar
*hv
= sc
->sc_hostvar
;
1440 ridx
= hv
->hv_resp_read_idx
;
1442 for (i
= 0; i
< rsp
->rsp_numdesc
+ 1; i
++) {
1444 bcopy(src
, dst
++, sizeof(struct txp_rsp_desc
));
1445 ridx
+= sizeof(struct txp_rsp_desc
);
1446 if (ridx
== sc
->sc_rspring
.size
) {
1447 src
= sc
->sc_rspring
.base
;
1451 sc
->sc_rspring
.lastwrite
= hv
->hv_resp_read_idx
= ridx
;
1454 hv
->hv_resp_read_idx
= ridx
;
1458 txp_cmd_desc_numfree(struct txp_softc
*sc
)
1460 struct txp_hostvar
*hv
= sc
->sc_hostvar
;
1461 struct txp_boot_record
*br
= sc
->sc_boot
;
1462 u_int32_t widx
, ridx
, nfree
;
1464 widx
= sc
->sc_cmdring
.lastwrite
;
1465 ridx
= hv
->hv_cmd_read_idx
;
1468 /* Ring is completely free */
1469 nfree
= br
->br_cmd_siz
- sizeof(struct txp_cmd_desc
);
1472 nfree
= br
->br_cmd_siz
-
1473 (widx
- ridx
+ sizeof(struct txp_cmd_desc
));
1475 nfree
= ridx
- widx
- sizeof(struct txp_cmd_desc
);
1478 return (nfree
/ sizeof(struct txp_cmd_desc
));
1482 txp_stop(struct txp_softc
*sc
)
1486 ifp
= &sc
->sc_arpcom
.ac_if
;
1488 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
1490 callout_stop(&sc
->txp_stat_timer
);
1492 txp_command(sc
, TXP_CMD_TX_DISABLE
, 0, 0, 0, NULL
, NULL
, NULL
, 1);
1493 txp_command(sc
, TXP_CMD_RX_DISABLE
, 0, 0, 0, NULL
, NULL
, NULL
, 1);
1495 txp_rxring_empty(sc
);
1501 txp_watchdog(struct ifnet
*ifp
)
1507 txp_ifmedia_upd(struct ifnet
*ifp
)
1509 struct txp_softc
*sc
= ifp
->if_softc
;
1510 struct ifmedia
*ifm
= &sc
->sc_ifmedia
;
1513 if (IFM_TYPE(ifm
->ifm_media
) != IFM_ETHER
)
1516 if (IFM_SUBTYPE(ifm
->ifm_media
) == IFM_10_T
) {
1517 if ((ifm
->ifm_media
& IFM_GMASK
) == IFM_FDX
)
1518 new_xcvr
= TXP_XCVR_10_FDX
;
1520 new_xcvr
= TXP_XCVR_10_HDX
;
1521 } else if (IFM_SUBTYPE(ifm
->ifm_media
) == IFM_100_TX
) {
1522 if ((ifm
->ifm_media
& IFM_GMASK
) == IFM_FDX
)
1523 new_xcvr
= TXP_XCVR_100_FDX
;
1525 new_xcvr
= TXP_XCVR_100_HDX
;
1526 } else if (IFM_SUBTYPE(ifm
->ifm_media
) == IFM_AUTO
) {
1527 new_xcvr
= TXP_XCVR_AUTO
;
1532 if (sc
->sc_xcvr
== new_xcvr
)
1535 txp_command(sc
, TXP_CMD_XCVR_SELECT
, new_xcvr
, 0, 0,
1536 NULL
, NULL
, NULL
, 0);
1537 sc
->sc_xcvr
= new_xcvr
;
1543 txp_ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
1545 struct txp_softc
*sc
= ifp
->if_softc
;
1546 struct ifmedia
*ifm
= &sc
->sc_ifmedia
;
1547 u_int16_t bmsr
, bmcr
, anlpar
;
1549 ifmr
->ifm_status
= IFM_AVALID
;
1550 ifmr
->ifm_active
= IFM_ETHER
;
1552 if (txp_command(sc
, TXP_CMD_PHY_MGMT_READ
, 0, MII_BMSR
, 0,
1553 &bmsr
, NULL
, NULL
, 1))
1555 if (txp_command(sc
, TXP_CMD_PHY_MGMT_READ
, 0, MII_BMSR
, 0,
1556 &bmsr
, NULL
, NULL
, 1))
1559 if (txp_command(sc
, TXP_CMD_PHY_MGMT_READ
, 0, MII_BMCR
, 0,
1560 &bmcr
, NULL
, NULL
, 1))
1563 if (txp_command(sc
, TXP_CMD_PHY_MGMT_READ
, 0, MII_ANLPAR
, 0,
1564 &anlpar
, NULL
, NULL
, 1))
1567 if (bmsr
& BMSR_LINK
)
1568 ifmr
->ifm_status
|= IFM_ACTIVE
;
1570 if (bmcr
& BMCR_ISO
) {
1571 ifmr
->ifm_active
|= IFM_NONE
;
1572 ifmr
->ifm_status
= 0;
1576 if (bmcr
& BMCR_LOOP
)
1577 ifmr
->ifm_active
|= IFM_LOOP
;
1579 if (bmcr
& BMCR_AUTOEN
) {
1580 if ((bmsr
& BMSR_ACOMP
) == 0) {
1581 ifmr
->ifm_active
|= IFM_NONE
;
1585 if (anlpar
& ANLPAR_T4
)
1586 ifmr
->ifm_active
|= IFM_100_T4
;
1587 else if (anlpar
& ANLPAR_TX_FD
)
1588 ifmr
->ifm_active
|= IFM_100_TX
|IFM_FDX
;
1589 else if (anlpar
& ANLPAR_TX
)
1590 ifmr
->ifm_active
|= IFM_100_TX
;
1591 else if (anlpar
& ANLPAR_10_FD
)
1592 ifmr
->ifm_active
|= IFM_10_T
|IFM_FDX
;
1593 else if (anlpar
& ANLPAR_10
)
1594 ifmr
->ifm_active
|= IFM_10_T
;
1596 ifmr
->ifm_active
|= IFM_NONE
;
1598 ifmr
->ifm_active
= ifm
->ifm_cur
->ifm_media
;
1602 ifmr
->ifm_active
|= IFM_NONE
;
1603 ifmr
->ifm_status
&= ~IFM_AVALID
;
1608 txp_show_descriptor(void *d
)
1610 struct txp_cmd_desc
*cmd
= d
;
1611 struct txp_rsp_desc
*rsp
= d
;
1612 struct txp_tx_desc
*txd
= d
;
1613 struct txp_frag_desc
*frgd
= d
;
1615 switch (cmd
->cmd_flags
& CMD_FLAGS_TYPE_M
) {
1616 case CMD_FLAGS_TYPE_CMD
:
1617 /* command descriptor */
1618 kprintf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
1619 cmd
->cmd_flags
, cmd
->cmd_numdesc
, cmd
->cmd_id
, cmd
->cmd_seq
,
1620 cmd
->cmd_par1
, cmd
->cmd_par2
, cmd
->cmd_par3
);
1622 case CMD_FLAGS_TYPE_RESP
:
1623 /* response descriptor */
1624 kprintf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
1625 rsp
->rsp_flags
, rsp
->rsp_numdesc
, rsp
->rsp_id
, rsp
->rsp_seq
,
1626 rsp
->rsp_par1
, rsp
->rsp_par2
, rsp
->rsp_par3
);
1628 case CMD_FLAGS_TYPE_DATA
:
1629 /* data header (assuming tx for now) */
1630 kprintf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
1631 txd
->tx_flags
, txd
->tx_numdesc
, txd
->tx_totlen
,
1632 txd
->tx_addrlo
, txd
->tx_addrhi
, txd
->tx_pflags
);
1634 case CMD_FLAGS_TYPE_FRAG
:
1635 /* fragment descriptor */
1636 kprintf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
1637 frgd
->frag_flags
, frgd
->frag_rsvd1
, frgd
->frag_len
,
1638 frgd
->frag_addrlo
, frgd
->frag_addrhi
, frgd
->frag_rsvd2
);
1641 kprintf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
1642 cmd
->cmd_flags
& CMD_FLAGS_TYPE_M
,
1643 cmd
->cmd_flags
, cmd
->cmd_numdesc
, cmd
->cmd_id
, cmd
->cmd_seq
,
1644 cmd
->cmd_par1
, cmd
->cmd_par2
, cmd
->cmd_par3
);
1651 txp_set_filter(struct txp_softc
*sc
)
1653 struct ifnet
*ifp
= &sc
->sc_arpcom
.ac_if
;
1655 struct ifmultiaddr
*ifma
;
1657 if (ifp
->if_flags
& IFF_PROMISC
) {
1658 filter
= TXP_RXFILT_PROMISC
;
1662 filter
= TXP_RXFILT_DIRECT
;
1664 if (ifp
->if_flags
& IFF_BROADCAST
)
1665 filter
|= TXP_RXFILT_BROADCAST
;
1667 if (ifp
->if_flags
& IFF_ALLMULTI
) {
1668 filter
|= TXP_RXFILT_ALLMULTI
;
1670 uint32_t hashbit
, hash
[2];
1673 hash
[0] = hash
[1] = 0;
1675 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
1676 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
1680 hashbit
= (uint16_t)(ether_crc32_be(
1681 LLADDR((struct sockaddr_dl
*)ifma
->ifma_addr
),
1682 ETHER_ADDR_LEN
) & (64 - 1));
1683 hash
[hashbit
/ 32] |= (1 << hashbit
% 32);
1687 filter
|= TXP_RXFILT_HASHMULTI
;
1688 txp_command(sc
, TXP_CMD_MCAST_HASH_MASK_WRITE
,
1689 2, hash
[0], hash
[1], NULL
, NULL
, NULL
, 0);
1694 txp_command(sc
, TXP_CMD_RX_FILTER_WRITE
, filter
, 0, 0,
1695 NULL
, NULL
, NULL
, 1);
1699 txp_capabilities(struct txp_softc
*sc
)
1701 struct ifnet
*ifp
= &sc
->sc_arpcom
.ac_if
;
1702 struct txp_rsp_desc
*rsp
= NULL
;
1703 struct txp_ext_desc
*ext
;
1705 if (txp_command2(sc
, TXP_CMD_OFFLOAD_READ
, 0, 0, 0, NULL
, 0, &rsp
, 1))
1708 if (rsp
->rsp_numdesc
!= 1)
1710 ext
= (struct txp_ext_desc
*)(rsp
+ 1);
1712 sc
->sc_tx_capability
= ext
->ext_1
& OFFLOAD_MASK
;
1713 sc
->sc_rx_capability
= ext
->ext_2
& OFFLOAD_MASK
;
1714 ifp
->if_capabilities
= 0;
1716 if (rsp
->rsp_par2
& rsp
->rsp_par3
& OFFLOAD_VLAN
) {
1717 sc
->sc_tx_capability
|= OFFLOAD_VLAN
;
1718 sc
->sc_rx_capability
|= OFFLOAD_VLAN
;
1719 ifp
->if_capabilities
|= IFCAP_VLAN_HWTAGGING
;
1724 if (rsp
->rsp_par2
& rsp
->rsp_par3
& OFFLOAD_IPSEC
) {
1725 sc
->sc_tx_capability
|= OFFLOAD_IPSEC
;
1726 sc
->sc_rx_capability
|= OFFLOAD_IPSEC
;
1727 ifp
->if_capabilities
|= IFCAP_IPSEC
;
1731 if (rsp
->rsp_par2
& rsp
->rsp_par3
& OFFLOAD_IPCKSUM
) {
1732 sc
->sc_tx_capability
|= OFFLOAD_IPCKSUM
;
1733 sc
->sc_rx_capability
|= OFFLOAD_IPCKSUM
;
1734 ifp
->if_capabilities
|= IFCAP_HWCSUM
;
1735 ifp
->if_hwassist
|= CSUM_IP
;
1738 if (rsp
->rsp_par2
& rsp
->rsp_par3
& OFFLOAD_TCPCKSUM
) {
1740 sc
->sc_tx_capability
|= OFFLOAD_TCPCKSUM
;
1742 sc
->sc_rx_capability
|= OFFLOAD_TCPCKSUM
;
1743 ifp
->if_capabilities
|= IFCAP_HWCSUM
;
1746 if (rsp
->rsp_par2
& rsp
->rsp_par3
& OFFLOAD_UDPCKSUM
) {
1748 sc
->sc_tx_capability
|= OFFLOAD_UDPCKSUM
;
1750 sc
->sc_rx_capability
|= OFFLOAD_UDPCKSUM
;
1751 ifp
->if_capabilities
|= IFCAP_HWCSUM
;
1753 ifp
->if_capenable
= ifp
->if_capabilities
;
1755 if (txp_command(sc
, TXP_CMD_OFFLOAD_WRITE
, 0,
1756 sc
->sc_tx_capability
, sc
->sc_rx_capability
, NULL
, NULL
, NULL
, 1))
1761 kfree(rsp
, M_DEVBUF
);