2 * Copyright (c) 1997, 1998
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
32 * $FreeBSD: src/sys/pci/if_vr.c,v 1.26.2.13 2003/02/06 04:46:20 silby Exp $
36 * VIA Rhine fast ethernet PCI NIC driver
38 * Supports various network adapters based on the VIA Rhine
39 * and Rhine II PCI controllers, including the D-Link DFE530TX.
40 * Datasheets are available at http://www.via.com.tw.
42 * Written by Bill Paul <wpaul@ctr.columbia.edu>
43 * Electrical Engineering Department
44 * Columbia University, New York City
48 * The VIA Rhine controllers are similar in some respects to the
49 * the DEC tulip chips, except less complicated. The controller
50 * uses an MII bus and an external physical layer interface. The
51 * receiver has a one entry perfect filter and a 64-bit hash table
52 * multicast filter. Transmit and receive descriptors are similar
55 * The Rhine has a serious flaw in its transmit DMA mechanism:
56 * transmit buffers must be longword aligned. Unfortunately,
57 * FreeBSD doesn't guarantee that mbufs will be filled in starting
58 * at longword boundaries, so we have to do a buffer copy before
62 #include "opt_ifpoll.h"
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/sockio.h>
68 #include <sys/malloc.h>
69 #include <sys/kernel.h>
70 #include <sys/socket.h>
71 #include <sys/serialize.h>
74 #include <sys/interrupt.h>
77 #include <net/ifq_var.h>
78 #include <net/if_arp.h>
79 #include <net/ethernet.h>
80 #include <net/if_dl.h>
81 #include <net/if_media.h>
82 #include <net/if_poll.h>
86 #include <vm/vm.h> /* for vtophys */
87 #include <vm/pmap.h> /* for vtophys */
89 #include <dev/netif/mii_layer/mii.h>
90 #include <dev/netif/mii_layer/miivar.h>
93 #include <bus/pci/pcireg.h>
94 #include <bus/pci/pcivar.h>
98 #include <dev/netif/vr/if_vrreg.h>
100 /* "controller miibus0" required. See GENERIC if you get errors here. */
101 #include "miibus_if.h"
106 * Various supported device vendors/types and their names.
108 static struct vr_type vr_devs
[] = {
109 { PCI_VENDOR_VIATECH
, PCI_PRODUCT_VIATECH_VT3043
,
110 "VIA VT3043 Rhine I 10/100BaseTX" },
111 { PCI_VENDOR_VIATECH
, PCI_PRODUCT_VIATECH_VT86C100A
,
112 "VIA VT86C100A Rhine II 10/100BaseTX" },
113 { PCI_VENDOR_VIATECH
, PCI_PRODUCT_VIATECH_VT6102
,
114 "VIA VT6102 Rhine II 10/100BaseTX" },
115 { PCI_VENDOR_VIATECH
, PCI_PRODUCT_VIATECH_VT6105
,
116 "VIA VT6105 Rhine III 10/100BaseTX" },
117 { PCI_VENDOR_VIATECH
, PCI_PRODUCT_VIATECH_VT6105M
,
118 "VIA VT6105M Rhine III 10/100BaseTX" },
119 { PCI_VENDOR_DELTA
, PCI_PRODUCT_DELTA_RHINEII
,
120 "Delta Electronics Rhine II 10/100BaseTX" },
121 { PCI_VENDOR_ADDTRON
, PCI_PRODUCT_ADDTRON_RHINEII
,
122 "Addtron Technology Rhine II 10/100BaseTX" },
126 static int vr_probe(device_t
);
127 static int vr_attach(device_t
);
128 static int vr_detach(device_t
);
130 static int vr_newbuf(struct vr_softc
*, struct vr_chain_onefrag
*,
132 static int vr_encap(struct vr_softc
*, int, struct mbuf
* );
134 static void vr_rxeof(struct vr_softc
*);
135 static void vr_rxeoc(struct vr_softc
*);
136 static void vr_txeof(struct vr_softc
*);
137 static void vr_txeoc(struct vr_softc
*);
138 static void vr_tick(void *);
139 static void vr_intr(void *);
140 static void vr_start(struct ifnet
*, struct ifaltq_subque
*);
141 static int vr_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
142 static void vr_init(void *);
143 static void vr_stop(struct vr_softc
*);
144 static void vr_watchdog(struct ifnet
*);
145 static void vr_shutdown(device_t
);
146 static int vr_ifmedia_upd(struct ifnet
*);
147 static void vr_ifmedia_sts(struct ifnet
*, struct ifmediareq
*);
150 static void vr_mii_sync(struct vr_softc
*);
151 static void vr_mii_send(struct vr_softc
*, uint32_t, int);
153 static int vr_mii_readreg(struct vr_softc
*, struct vr_mii_frame
*);
154 static int vr_mii_writereg(struct vr_softc
*, struct vr_mii_frame
*);
155 static int vr_miibus_readreg(device_t
, int, int);
156 static int vr_miibus_writereg(device_t
, int, int, int);
157 static void vr_miibus_statchg(device_t
);
159 static void vr_setcfg(struct vr_softc
*, int);
160 static void vr_setmulti(struct vr_softc
*);
161 static void vr_reset(struct vr_softc
*);
162 static int vr_list_rx_init(struct vr_softc
*);
163 static int vr_list_tx_init(struct vr_softc
*);
165 static void vr_npoll(struct ifnet
*, struct ifpoll_info
*);
166 static void vr_npoll_compat(struct ifnet
*, void *, int);
170 #define VR_RES SYS_RES_IOPORT
171 #define VR_RID VR_PCI_LOIO
173 #define VR_RES SYS_RES_MEMORY
174 #define VR_RID VR_PCI_LOMEM
177 static device_method_t vr_methods
[] = {
178 /* Device interface */
179 DEVMETHOD(device_probe
, vr_probe
),
180 DEVMETHOD(device_attach
, vr_attach
),
181 DEVMETHOD(device_detach
, vr_detach
),
182 DEVMETHOD(device_shutdown
, vr_shutdown
),
185 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
186 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
189 DEVMETHOD(miibus_readreg
, vr_miibus_readreg
),
190 DEVMETHOD(miibus_writereg
, vr_miibus_writereg
),
191 DEVMETHOD(miibus_statchg
, vr_miibus_statchg
),
196 static driver_t vr_driver
= {
199 sizeof(struct vr_softc
)
202 static devclass_t vr_devclass
;
204 DECLARE_DUMMY_MODULE(if_vr
);
205 DRIVER_MODULE(if_vr
, pci
, vr_driver
, vr_devclass
, NULL
, NULL
);
206 DRIVER_MODULE(miibus
, vr
, miibus_driver
, miibus_devclass
, NULL
, NULL
);
208 #define VR_SETBIT(sc, reg, x) \
209 CSR_WRITE_1(sc, reg, \
210 CSR_READ_1(sc, reg) | (x))
212 #define VR_CLRBIT(sc, reg, x) \
213 CSR_WRITE_1(sc, reg, \
214 CSR_READ_1(sc, reg) & ~(x))
216 #define VR_SETBIT16(sc, reg, x) \
217 CSR_WRITE_2(sc, reg, \
218 CSR_READ_2(sc, reg) | (x))
220 #define VR_CLRBIT16(sc, reg, x) \
221 CSR_WRITE_2(sc, reg, \
222 CSR_READ_2(sc, reg) & ~(x))
224 #define VR_SETBIT32(sc, reg, x) \
225 CSR_WRITE_4(sc, reg, \
226 CSR_READ_4(sc, reg) | (x))
228 #define VR_CLRBIT32(sc, reg, x) \
229 CSR_WRITE_4(sc, reg, \
230 CSR_READ_4(sc, reg) & ~(x))
233 CSR_WRITE_1(sc, VR_MIICMD, \
234 CSR_READ_1(sc, VR_MIICMD) | (x))
237 CSR_WRITE_1(sc, VR_MIICMD, \
238 CSR_READ_1(sc, VR_MIICMD) & ~(x))
242 * Sync the PHYs by setting data bit and strobing the clock 32 times.
245 vr_mii_sync(struct vr_softc
*sc
)
249 SIO_SET(VR_MIICMD_DIR
|VR_MIICMD_DATAIN
);
251 for (i
= 0; i
< 32; i
++) {
252 SIO_SET(VR_MIICMD_CLK
);
254 SIO_CLR(VR_MIICMD_CLK
);
260 * Clock a series of bits through the MII.
263 vr_mii_send(struct vr_softc
*sc
, uint32_t bits
, int cnt
)
267 SIO_CLR(VR_MIICMD_CLK
);
269 for (i
= (0x1 << (cnt
- 1)); i
; i
>>= 1) {
271 SIO_SET(VR_MIICMD_DATAIN
);
273 SIO_CLR(VR_MIICMD_DATAIN
);
275 SIO_CLR(VR_MIICMD_CLK
);
277 SIO_SET(VR_MIICMD_CLK
);
283 * Read an PHY register through the MII.
286 vr_mii_readreg(struct vr_softc
*sc
, struct vr_mii_frame
*frame
)
291 /* Set up frame for RX. */
292 frame
->mii_stdelim
= VR_MII_STARTDELIM
;
293 frame
->mii_opcode
= VR_MII_READOP
;
294 frame
->mii_turnaround
= 0;
297 CSR_WRITE_1(sc
, VR_MIICMD
, 0);
298 VR_SETBIT(sc
, VR_MIICMD
, VR_MIICMD_DIRECTPGM
);
300 /* Turn on data xmit. */
301 SIO_SET(VR_MIICMD_DIR
);
305 /* Send command/address info. */
306 vr_mii_send(sc
, frame
->mii_stdelim
, 2);
307 vr_mii_send(sc
, frame
->mii_opcode
, 2);
308 vr_mii_send(sc
, frame
->mii_phyaddr
, 5);
309 vr_mii_send(sc
, frame
->mii_regaddr
, 5);
312 SIO_CLR((VR_MIICMD_CLK
|VR_MIICMD_DATAIN
));
314 SIO_SET(VR_MIICMD_CLK
);
318 SIO_CLR(VR_MIICMD_DIR
);
321 SIO_CLR(VR_MIICMD_CLK
);
323 ack
= CSR_READ_4(sc
, VR_MIICMD
) & VR_MIICMD_DATAOUT
;
324 SIO_SET(VR_MIICMD_CLK
);
328 * Now try reading data bits. If the ack failed, we still
329 * need to clock through 16 cycles to keep the PHY(s) in sync.
332 for(i
= 0; i
< 16; i
++) {
333 SIO_CLR(VR_MIICMD_CLK
);
335 SIO_SET(VR_MIICMD_CLK
);
341 for (i
= 0x8000; i
; i
>>= 1) {
342 SIO_CLR(VR_MIICMD_CLK
);
345 if (CSR_READ_4(sc
, VR_MIICMD
) & VR_MIICMD_DATAOUT
)
346 frame
->mii_data
|= i
;
349 SIO_SET(VR_MIICMD_CLK
);
354 SIO_CLR(VR_MIICMD_CLK
);
356 SIO_SET(VR_MIICMD_CLK
);
367 /* Set the PHY address. */
368 CSR_WRITE_1(sc
, VR_PHYADDR
, (CSR_READ_1(sc
, VR_PHYADDR
)& 0xe0)|
371 /* Set the register address. */
372 CSR_WRITE_1(sc
, VR_MIIADDR
, frame
->mii_regaddr
);
373 VR_SETBIT(sc
, VR_MIICMD
, VR_MIICMD_READ_ENB
);
375 for (i
= 0; i
< 10000; i
++) {
376 if ((CSR_READ_1(sc
, VR_MIICMD
) & VR_MIICMD_READ_ENB
) == 0)
380 frame
->mii_data
= CSR_READ_2(sc
, VR_MIIDATA
);
388 * Write to a PHY register through the MII.
391 vr_mii_writereg(struct vr_softc
*sc
, struct vr_mii_frame
*frame
)
394 CSR_WRITE_1(sc
, VR_MIICMD
, 0);
395 VR_SETBIT(sc
, VR_MIICMD
, VR_MIICMD_DIRECTPGM
);
397 /* Set up frame for TX. */
398 frame
->mii_stdelim
= VR_MII_STARTDELIM
;
399 frame
->mii_opcode
= VR_MII_WRITEOP
;
400 frame
->mii_turnaround
= VR_MII_TURNAROUND
;
402 /* Turn on data output. */
403 SIO_SET(VR_MIICMD_DIR
);
407 vr_mii_send(sc
, frame
->mii_stdelim
, 2);
408 vr_mii_send(sc
, frame
->mii_opcode
, 2);
409 vr_mii_send(sc
, frame
->mii_phyaddr
, 5);
410 vr_mii_send(sc
, frame
->mii_regaddr
, 5);
411 vr_mii_send(sc
, frame
->mii_turnaround
, 2);
412 vr_mii_send(sc
, frame
->mii_data
, 16);
415 SIO_SET(VR_MIICMD_CLK
);
417 SIO_CLR(VR_MIICMD_CLK
);
421 SIO_CLR(VR_MIICMD_DIR
);
429 /* Set the PHY-adress */
430 CSR_WRITE_1(sc
, VR_PHYADDR
, (CSR_READ_1(sc
, VR_PHYADDR
)& 0xe0)|
433 /* Set the register address and data to write. */
434 CSR_WRITE_1(sc
, VR_MIIADDR
, frame
->mii_regaddr
);
435 CSR_WRITE_2(sc
, VR_MIIDATA
, frame
->mii_data
);
437 VR_SETBIT(sc
, VR_MIICMD
, VR_MIICMD_WRITE_ENB
);
439 for (i
= 0; i
< 10000; i
++) {
440 if ((CSR_READ_1(sc
, VR_MIICMD
) & VR_MIICMD_WRITE_ENB
) == 0)
449 vr_miibus_readreg(device_t dev
, int phy
, int reg
)
451 struct vr_mii_frame frame
;
454 sc
= device_get_softc(dev
);
456 switch (sc
->vr_revid
) {
457 case REV_ID_VT6102_APOLLO
:
465 bzero(&frame
, sizeof(frame
));
467 frame
.mii_phyaddr
= phy
;
468 frame
.mii_regaddr
= reg
;
469 vr_mii_readreg(sc
, &frame
);
471 return(frame
.mii_data
);
475 vr_miibus_writereg(device_t dev
, int phy
, int reg
, int data
)
477 struct vr_mii_frame frame
;
480 sc
= device_get_softc(dev
);
482 switch (sc
->vr_revid
) {
483 case REV_ID_VT6102_APOLLO
:
491 bzero(&frame
, sizeof(frame
));
493 frame
.mii_phyaddr
= phy
;
494 frame
.mii_regaddr
= reg
;
495 frame
.mii_data
= data
;
497 vr_mii_writereg(sc
, &frame
);
503 vr_miibus_statchg(device_t dev
)
505 struct mii_data
*mii
;
508 sc
= device_get_softc(dev
);
509 mii
= device_get_softc(sc
->vr_miibus
);
510 vr_setcfg(sc
, mii
->mii_media_active
);
514 * Program the 64-bit multicast hash filter.
517 vr_setmulti(struct vr_softc
*sc
)
520 uint32_t hashes
[2] = { 0, 0 };
521 struct ifmultiaddr
*ifma
;
525 ifp
= &sc
->arpcom
.ac_if
;
527 rxfilt
= CSR_READ_1(sc
, VR_RXCFG
);
529 if (ifp
->if_flags
& IFF_ALLMULTI
|| ifp
->if_flags
& IFF_PROMISC
) {
530 rxfilt
|= VR_RXCFG_RX_MULTI
;
531 CSR_WRITE_1(sc
, VR_RXCFG
, rxfilt
);
532 CSR_WRITE_4(sc
, VR_MAR0
, 0xFFFFFFFF);
533 CSR_WRITE_4(sc
, VR_MAR1
, 0xFFFFFFFF);
537 /* First, zero out all the existing hash bits. */
538 CSR_WRITE_4(sc
, VR_MAR0
, 0);
539 CSR_WRITE_4(sc
, VR_MAR1
, 0);
541 /* Now program new ones. */
542 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
545 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
548 /* use the lower 6 bits */
550 LLADDR((struct sockaddr_dl
*)ifma
->ifma_addr
),
551 ETHER_ADDR_LEN
) >> 26) & 0x0000003F;
553 hashes
[0] |= (1 << h
);
555 hashes
[1] |= (1 << (h
- 32));
560 rxfilt
|= VR_RXCFG_RX_MULTI
;
562 rxfilt
&= ~VR_RXCFG_RX_MULTI
;
564 CSR_WRITE_4(sc
, VR_MAR0
, hashes
[0]);
565 CSR_WRITE_4(sc
, VR_MAR1
, hashes
[1]);
566 CSR_WRITE_1(sc
, VR_RXCFG
, rxfilt
);
570 * In order to fiddle with the
571 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
572 * first have to put the transmit and/or receive logic in the idle state.
575 vr_setcfg(struct vr_softc
*sc
, int media
)
579 if (CSR_READ_2(sc
, VR_COMMAND
) & (VR_CMD_TX_ON
|VR_CMD_RX_ON
)) {
581 VR_CLRBIT16(sc
, VR_COMMAND
, (VR_CMD_TX_ON
|VR_CMD_RX_ON
));
584 if ((media
& IFM_GMASK
) == IFM_FDX
)
585 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_FULLDUPLEX
);
587 VR_CLRBIT16(sc
, VR_COMMAND
, VR_CMD_FULLDUPLEX
);
590 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_TX_ON
|VR_CMD_RX_ON
);
594 vr_reset(struct vr_softc
*sc
)
598 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_RESET
);
600 for (i
= 0; i
< VR_TIMEOUT
; i
++) {
602 if (!(CSR_READ_2(sc
, VR_COMMAND
) & VR_CMD_RESET
))
605 if (i
== VR_TIMEOUT
) {
606 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
608 if (sc
->vr_revid
< REV_ID_VT3065_A
) {
609 if_printf(ifp
, "reset never completed!\n");
611 /* Use newer force reset command */
612 if_printf(ifp
, "Using force reset command.\n");
613 VR_SETBIT(sc
, VR_MISC_CR1
, VR_MISCCR1_FORSRST
);
617 /* Wait a little while for the chip to get its brains in order. */
622 * Probe for a VIA Rhine chip. Check the PCI vendor and device
623 * IDs against our list and return a device name if we find a match.
626 vr_probe(device_t dev
)
631 vid
= pci_get_vendor(dev
);
632 did
= pci_get_device(dev
);
634 for (t
= vr_devs
; t
->vr_name
!= NULL
; ++t
) {
635 if (vid
== t
->vr_vid
&& did
== t
->vr_did
) {
636 device_set_desc(dev
, t
->vr_name
);
645 * Attach the interface. Allocate softc structures, do ifmedia
646 * setup and ethernet/BPF attach.
649 vr_attach(device_t dev
)
652 uint8_t eaddr
[ETHER_ADDR_LEN
];
657 sc
= device_get_softc(dev
);
658 callout_init(&sc
->vr_stat_timer
);
661 * Handle power management nonsense.
663 if (pci_get_powerstate(dev
) != PCI_POWERSTATE_D0
) {
664 uint32_t iobase
, membase
, irq
;
666 /* Save important PCI config data. */
667 iobase
= pci_read_config(dev
, VR_PCI_LOIO
, 4);
668 membase
= pci_read_config(dev
, VR_PCI_LOMEM
, 4);
669 irq
= pci_read_config(dev
, VR_PCI_INTLINE
, 4);
671 /* Reset the power state. */
672 device_printf(dev
, "chip is in D%d power mode "
673 "-- setting to D0\n", pci_get_powerstate(dev
));
674 pci_set_powerstate(dev
, PCI_POWERSTATE_D0
);
676 /* Restore PCI config data. */
677 pci_write_config(dev
, VR_PCI_LOIO
, iobase
, 4);
678 pci_write_config(dev
, VR_PCI_LOMEM
, membase
, 4);
679 pci_write_config(dev
, VR_PCI_INTLINE
, irq
, 4);
682 pci_enable_busmaster(dev
);
684 sc
->vr_revid
= pci_get_revid(dev
);
687 sc
->vr_res
= bus_alloc_resource_any(dev
, VR_RES
, &rid
, RF_ACTIVE
);
689 if (sc
->vr_res
== NULL
) {
690 device_printf(dev
, "couldn't map ports/memory\n");
694 sc
->vr_btag
= rman_get_bustag(sc
->vr_res
);
695 sc
->vr_bhandle
= rman_get_bushandle(sc
->vr_res
);
697 /* Allocate interrupt */
699 sc
->vr_irq
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
, &rid
,
700 RF_SHAREABLE
| RF_ACTIVE
);
702 if (sc
->vr_irq
== NULL
) {
703 device_printf(dev
, "couldn't map interrupt\n");
709 * Windows may put the chip in suspend mode when it
710 * shuts down. Be sure to kick it in the head to wake it
713 VR_CLRBIT(sc
, VR_STICKHW
, (VR_STICKHW_DS0
|VR_STICKHW_DS1
));
715 ifp
= &sc
->arpcom
.ac_if
;
716 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
718 /* Reset the adapter. */
722 * Turn on bit2 (MIION) in PCI configuration register 0x53 during
723 * initialization and disable AUTOPOLL.
725 pci_write_config(dev
, VR_PCI_MODE
,
726 pci_read_config(dev
, VR_PCI_MODE
, 4) | (VR_MODE3_MIION
<< 24), 4);
727 VR_CLRBIT(sc
, VR_MIICMD
, VR_MIICMD_AUTOPOLL
);
730 * Get station address. The way the Rhine chips work,
731 * you're not allowed to directly access the EEPROM once
732 * they've been programmed a special way. Consequently,
733 * we need to read the node address from the PAR0 and PAR1
736 VR_SETBIT(sc
, VR_EECSR
, VR_EECSR_LOAD
);
738 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
739 eaddr
[i
] = CSR_READ_1(sc
, VR_PAR0
+ i
);
741 sc
->vr_ldata
= contigmalloc(sizeof(struct vr_list_data
), M_DEVBUF
,
742 M_WAITOK
| M_ZERO
, 0, 0xffffffff, PAGE_SIZE
, 0);
744 if (sc
->vr_ldata
== NULL
) {
745 device_printf(dev
, "no memory for list buffers!\n");
750 /* Initialize TX buffer */
751 sc
->vr_cdata
.vr_tx_buf
= contigmalloc(VR_TX_BUF_SIZE
, M_DEVBUF
,
752 M_WAITOK
, 0, 0xffffffff, PAGE_SIZE
, 0);
753 if (sc
->vr_cdata
.vr_tx_buf
== NULL
) {
754 device_printf(dev
, "can't allocate tx buffer!\n");
759 /* Set various TX indexes to invalid value */
760 sc
->vr_cdata
.vr_tx_free_idx
= -1;
761 sc
->vr_cdata
.vr_tx_tail_idx
= -1;
762 sc
->vr_cdata
.vr_tx_head_idx
= -1;
766 ifp
->if_mtu
= ETHERMTU
;
767 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
768 ifp
->if_ioctl
= vr_ioctl
;
769 ifp
->if_start
= vr_start
;
771 ifp
->if_npoll
= vr_npoll
;
773 ifp
->if_watchdog
= vr_watchdog
;
774 ifp
->if_init
= vr_init
;
775 ifp
->if_baudrate
= 10000000;
776 ifq_set_maxlen(&ifp
->if_snd
, VR_TX_LIST_CNT
- 1);
777 ifq_set_ready(&ifp
->if_snd
);
782 if (mii_phy_probe(dev
, &sc
->vr_miibus
,
783 vr_ifmedia_upd
, vr_ifmedia_sts
)) {
784 if_printf(ifp
, "MII without any phy!\n");
789 /* Call MI attach routine. */
790 ether_ifattach(ifp
, eaddr
, NULL
);
792 ifq_set_cpuid(&ifp
->if_snd
, rman_get_cpuid(sc
->vr_irq
));
795 ifpoll_compat_setup(&sc
->vr_npoll
, NULL
, NULL
, device_get_unit(dev
),
799 error
= bus_setup_intr(dev
, sc
->vr_irq
, INTR_MPSAFE
,
800 vr_intr
, sc
, &sc
->vr_intrhand
,
803 device_printf(dev
, "couldn't set up irq\n");
816 vr_detach(device_t dev
)
818 struct vr_softc
*sc
= device_get_softc(dev
);
819 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
821 if (device_is_attached(dev
)) {
822 lwkt_serialize_enter(ifp
->if_serializer
);
824 bus_teardown_intr(dev
, sc
->vr_irq
, sc
->vr_intrhand
);
825 lwkt_serialize_exit(ifp
->if_serializer
);
829 if (sc
->vr_miibus
!= NULL
)
830 device_delete_child(dev
, sc
->vr_miibus
);
831 bus_generic_detach(dev
);
833 if (sc
->vr_irq
!= NULL
)
834 bus_release_resource(dev
, SYS_RES_IRQ
, 0, sc
->vr_irq
);
835 if (sc
->vr_res
!= NULL
)
836 bus_release_resource(dev
, VR_RES
, VR_RID
, sc
->vr_res
);
837 if (sc
->vr_ldata
!= NULL
)
838 contigfree(sc
->vr_ldata
, sizeof(struct vr_list_data
), M_DEVBUF
);
839 if (sc
->vr_cdata
.vr_tx_buf
!= NULL
)
840 contigfree(sc
->vr_cdata
.vr_tx_buf
, VR_TX_BUF_SIZE
, M_DEVBUF
);
846 * Initialize the transmit descriptors.
849 vr_list_tx_init(struct vr_softc
*sc
)
851 struct vr_chain_data
*cd
;
852 struct vr_list_data
*ld
;
853 struct vr_chain
*tx_chain
;
858 tx_chain
= cd
->vr_tx_chain
;
860 for (i
= 0; i
< VR_TX_LIST_CNT
; i
++) {
861 tx_chain
[i
].vr_ptr
= &ld
->vr_tx_list
[i
];
862 if (i
== (VR_TX_LIST_CNT
- 1))
863 tx_chain
[i
].vr_next_idx
= 0;
865 tx_chain
[i
].vr_next_idx
= i
+ 1;
868 for (i
= 0; i
< VR_TX_LIST_CNT
; ++i
) {
872 tx_buf
= VR_TX_BUF(sc
, i
);
873 next_idx
= tx_chain
[i
].vr_next_idx
;
875 tx_chain
[i
].vr_next_desc_paddr
=
876 vtophys(tx_chain
[next_idx
].vr_ptr
);
877 tx_chain
[i
].vr_buf_paddr
= vtophys(tx_buf
);
880 cd
->vr_tx_free_idx
= 0;
881 cd
->vr_tx_tail_idx
= cd
->vr_tx_head_idx
= -1;
888 * Initialize the RX descriptors and allocate mbufs for them. Note that
889 * we arrange the descriptors in a closed ring, so that the last descriptor
890 * points back to the first.
893 vr_list_rx_init(struct vr_softc
*sc
)
895 struct vr_chain_data
*cd
;
896 struct vr_list_data
*ld
;
902 for (i
= 0; i
< VR_RX_LIST_CNT
; i
++) {
903 cd
->vr_rx_chain
[i
].vr_ptr
= (struct vr_desc
*)&ld
->vr_rx_list
[i
];
904 if (vr_newbuf(sc
, &cd
->vr_rx_chain
[i
], NULL
) == ENOBUFS
)
906 if (i
== (VR_RX_LIST_CNT
- 1))
910 cd
->vr_rx_chain
[i
].vr_nextdesc
= &cd
->vr_rx_chain
[nexti
];
911 ld
->vr_rx_list
[i
].vr_next
= vtophys(&ld
->vr_rx_list
[nexti
]);
914 cd
->vr_rx_head
= &cd
->vr_rx_chain
[0];
920 * Initialize an RX descriptor and attach an MBUF cluster.
921 * Note: the length fields are only 11 bits wide, which means the
922 * largest size we can specify is 2047. This is important because
923 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
924 * overflow the field and make a mess.
927 vr_newbuf(struct vr_softc
*sc
, struct vr_chain_onefrag
*c
, struct mbuf
*m
)
929 struct mbuf
*m_new
= NULL
;
932 m_new
= m_getcl(M_NOWAIT
, MT_DATA
, M_PKTHDR
);
935 m_new
->m_len
= m_new
->m_pkthdr
.len
= MCLBYTES
;
938 m_new
->m_len
= m_new
->m_pkthdr
.len
= MCLBYTES
;
939 m_new
->m_data
= m_new
->m_ext
.ext_buf
;
942 m_adj(m_new
, sizeof(uint64_t));
945 c
->vr_ptr
->vr_status
= VR_RXSTAT
;
946 c
->vr_ptr
->vr_data
= vtophys(mtod(m_new
, caddr_t
));
947 c
->vr_ptr
->vr_ctl
= VR_RXCTL
| VR_RXLEN
;
953 * A frame has been uploaded: pass the resulting mbuf chain up to
954 * the higher level protocols.
957 vr_rxeof(struct vr_softc
*sc
)
961 struct vr_chain_onefrag
*cur_rx
;
965 ifp
= &sc
->arpcom
.ac_if
;
967 while(!((rxstat
= sc
->vr_cdata
.vr_rx_head
->vr_ptr
->vr_status
) &
969 struct mbuf
*m0
= NULL
;
971 cur_rx
= sc
->vr_cdata
.vr_rx_head
;
972 sc
->vr_cdata
.vr_rx_head
= cur_rx
->vr_nextdesc
;
976 * If an error occurs, update stats, clear the
977 * status word and leave the mbuf cluster in place:
978 * it should simply get re-used next time this descriptor
979 * comes up in the ring.
981 if (rxstat
& VR_RXSTAT_RXERR
) {
982 IFNET_STAT_INC(ifp
, ierrors
, 1);
983 if_printf(ifp
, "rx error (%02x):", rxstat
& 0x000000ff);
984 if (rxstat
& VR_RXSTAT_CRCERR
)
985 kprintf(" crc error");
986 if (rxstat
& VR_RXSTAT_FRAMEALIGNERR
)
987 kprintf(" frame alignment error\n");
988 if (rxstat
& VR_RXSTAT_FIFOOFLOW
)
989 kprintf(" FIFO overflow");
990 if (rxstat
& VR_RXSTAT_GIANT
)
991 kprintf(" received giant packet");
992 if (rxstat
& VR_RXSTAT_RUNT
)
993 kprintf(" received runt packet");
994 if (rxstat
& VR_RXSTAT_BUSERR
)
995 kprintf(" system bus error");
996 if (rxstat
& VR_RXSTAT_BUFFERR
)
997 kprintf("rx buffer error");
999 vr_newbuf(sc
, cur_rx
, m
);
1003 /* No errors; receive the packet. */
1004 total_len
= VR_RXBYTES(cur_rx
->vr_ptr
->vr_status
);
1007 * XXX The VIA Rhine chip includes the CRC with every
1008 * received frame, and there's no way to turn this
1009 * behavior off (at least, I can't find anything in
1010 * the manual that explains how to do it) so we have
1011 * to trim off the CRC manually.
1013 total_len
-= ETHER_CRC_LEN
;
1015 m0
= m_devget(mtod(m
, char *) - ETHER_ALIGN
,
1016 total_len
+ ETHER_ALIGN
, 0, ifp
);
1017 vr_newbuf(sc
, cur_rx
, m
);
1019 IFNET_STAT_INC(ifp
, ierrors
, 1);
1022 m_adj(m0
, ETHER_ALIGN
);
1025 IFNET_STAT_INC(ifp
, ipackets
, 1);
1026 ifp
->if_input(ifp
, m
, NULL
, -1);
1031 vr_rxeoc(struct vr_softc
*sc
)
1036 ifp
= &sc
->arpcom
.ac_if
;
1038 IFNET_STAT_INC(ifp
, ierrors
, 1);
1040 VR_CLRBIT16(sc
, VR_COMMAND
, VR_CMD_RX_ON
);
1043 /* Wait for receiver to stop */
1045 i
&& (CSR_READ_2(sc
, VR_COMMAND
) & VR_CMD_RX_ON
);
1047 ; /* Wait for receiver to stop */
1050 if_printf(ifp
, "rx shutdown error!\n");
1051 sc
->vr_flags
|= VR_F_RESTART
;
1057 CSR_WRITE_4(sc
, VR_RXADDR
, vtophys(sc
->vr_cdata
.vr_rx_head
->vr_ptr
));
1058 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_RX_ON
);
1059 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_RX_GO
);
1063 * A frame was downloaded to the chip. It's safe for us to clean up
1067 vr_txeof(struct vr_softc
*sc
)
1069 struct vr_chain_data
*cd
;
1070 struct vr_chain
*tx_chain
;
1073 ifp
= &sc
->arpcom
.ac_if
;
1076 /* Reset the timeout timer; if_txeoc will clear it. */
1080 if (cd
->vr_tx_head_idx
== -1)
1083 tx_chain
= cd
->vr_tx_chain
;
1086 * Go through our tx list and free mbufs for those
1087 * frames that have been transmitted.
1089 while(tx_chain
[cd
->vr_tx_head_idx
].vr_buf
!= NULL
) {
1090 struct vr_chain
*cur_tx
;
1094 cur_tx
= &tx_chain
[cd
->vr_tx_head_idx
];
1095 txstat
= cur_tx
->vr_ptr
->vr_status
;
1097 if ((txstat
& VR_TXSTAT_ABRT
) ||
1098 (txstat
& VR_TXSTAT_UDF
)) {
1100 i
&& (CSR_READ_2(sc
, VR_COMMAND
) & VR_CMD_TX_ON
);
1102 ; /* Wait for chip to shutdown */
1104 if_printf(ifp
, "tx shutdown timeout\n");
1105 sc
->vr_flags
|= VR_F_RESTART
;
1108 VR_TXOWN(cur_tx
) = VR_TXSTAT_OWN
;
1109 CSR_WRITE_4(sc
, VR_TXADDR
, vtophys(cur_tx
->vr_ptr
));
1113 if (txstat
& VR_TXSTAT_OWN
)
1116 if (txstat
& VR_TXSTAT_ERRSUM
) {
1117 IFNET_STAT_INC(ifp
, oerrors
, 1);
1118 if (txstat
& VR_TXSTAT_DEFER
)
1119 IFNET_STAT_INC(ifp
, collisions
, 1);
1120 if (txstat
& VR_TXSTAT_LATECOLL
)
1121 IFNET_STAT_INC(ifp
, collisions
, 1);
1124 IFNET_STAT_INC(ifp
, collisions
,
1125 (txstat
& VR_TXSTAT_COLLCNT
) >> 3);
1127 IFNET_STAT_INC(ifp
, opackets
, 1);
1128 cur_tx
->vr_buf
= NULL
;
1130 if (cd
->vr_tx_head_idx
== cd
->vr_tx_tail_idx
) {
1131 cd
->vr_tx_head_idx
= -1;
1132 cd
->vr_tx_tail_idx
= -1;
1136 cd
->vr_tx_head_idx
= cur_tx
->vr_next_idx
;
1141 * TX 'end of channel' interrupt handler.
1144 vr_txeoc(struct vr_softc
*sc
)
1148 ifp
= &sc
->arpcom
.ac_if
;
1150 if (sc
->vr_cdata
.vr_tx_head_idx
== -1) {
1151 ifq_clr_oactive(&ifp
->if_snd
);
1152 sc
->vr_cdata
.vr_tx_tail_idx
= -1;
1160 struct vr_softc
*sc
= xsc
;
1161 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1162 struct mii_data
*mii
;
1164 lwkt_serialize_enter(ifp
->if_serializer
);
1166 if (sc
->vr_flags
& VR_F_RESTART
) {
1167 if_printf(&sc
->arpcom
.ac_if
, "restarting\n");
1171 sc
->vr_flags
&= ~VR_F_RESTART
;
1174 mii
= device_get_softc(sc
->vr_miibus
);
1177 callout_reset(&sc
->vr_stat_timer
, hz
, vr_tick
, sc
);
1179 lwkt_serialize_exit(ifp
->if_serializer
);
1185 struct vr_softc
*sc
;
1190 ifp
= &sc
->arpcom
.ac_if
;
1192 /* Supress unwanted interrupts. */
1193 if (!(ifp
->if_flags
& IFF_UP
)) {
1198 /* Disable interrupts. */
1199 if ((ifp
->if_flags
& IFF_NPOLLING
) == 0)
1200 CSR_WRITE_2(sc
, VR_IMR
, 0x0000);
1203 status
= CSR_READ_2(sc
, VR_ISR
);
1205 CSR_WRITE_2(sc
, VR_ISR
, status
);
1207 if ((status
& VR_INTRS
) == 0)
1210 if (status
& VR_ISR_RX_OK
)
1213 if (status
& VR_ISR_RX_DROPPED
) {
1214 if_printf(ifp
, "rx packet lost\n");
1215 IFNET_STAT_INC(ifp
, ierrors
, 1);
1218 if ((status
& VR_ISR_RX_ERR
) || (status
& VR_ISR_RX_NOBUF
) ||
1219 (status
& VR_ISR_RX_OFLOW
)) {
1220 if_printf(ifp
, "receive error (%04x)", status
);
1221 if (status
& VR_ISR_RX_NOBUF
)
1222 kprintf(" no buffers");
1223 if (status
& VR_ISR_RX_OFLOW
)
1224 kprintf(" overflow");
1225 if (status
& VR_ISR_RX_DROPPED
)
1226 kprintf(" packet lost");
1231 if ((status
& VR_ISR_BUSERR
) || (status
& VR_ISR_TX_UNDERRUN
)) {
1237 if ((status
& VR_ISR_TX_OK
) || (status
& VR_ISR_TX_ABRT
) ||
1238 (status
& VR_ISR_TX_ABRT2
) || (status
& VR_ISR_UDFI
)) {
1240 if ((status
& VR_ISR_UDFI
) ||
1241 (status
& VR_ISR_TX_ABRT2
) ||
1242 (status
& VR_ISR_TX_ABRT
)) {
1243 IFNET_STAT_INC(ifp
, oerrors
, 1);
1244 if (sc
->vr_cdata
.vr_tx_head_idx
!= -1) {
1245 VR_SETBIT16(sc
, VR_COMMAND
,
1247 VR_SETBIT16(sc
, VR_COMMAND
,
1257 /* Re-enable interrupts. */
1258 if ((ifp
->if_flags
& IFF_NPOLLING
) == 0)
1259 CSR_WRITE_2(sc
, VR_IMR
, VR_INTRS
);
1261 if (!ifq_is_empty(&ifp
->if_snd
))
1266 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1267 * pointers to the fragment pointers.
1270 vr_encap(struct vr_softc
*sc
, int chain_idx
, struct mbuf
*m_head
)
1277 KASSERT(chain_idx
>= 0 && chain_idx
< VR_TX_LIST_CNT
,
1278 ("%s: chain idx(%d) out of range 0-%d",
1279 sc
->arpcom
.ac_if
.if_xname
, chain_idx
, VR_TX_LIST_CNT
));
1282 * The VIA Rhine wants packet buffers to be longword
1283 * aligned, but very often our mbufs aren't. Rather than
1284 * waste time trying to decide when to copy and when not
1285 * to copy, just do it all the time.
1287 tx_buf
= VR_TX_BUF(sc
, chain_idx
);
1288 m_copydata(m_head
, 0, m_head
->m_pkthdr
.len
, tx_buf
);
1289 len
= m_head
->m_pkthdr
.len
;
1292 * The Rhine chip doesn't auto-pad, so we have to make
1293 * sure to pad short frames out to the minimum frame length
1296 if (len
< VR_MIN_FRAMELEN
) {
1297 bzero(tx_buf
+ len
, VR_MIN_FRAMELEN
- len
);
1298 len
= VR_MIN_FRAMELEN
;
1301 c
= &sc
->vr_cdata
.vr_tx_chain
[chain_idx
];
1305 f
->vr_data
= c
->vr_buf_paddr
;
1307 f
->vr_ctl
|= (VR_TXCTL_TLINK
| VR_TXCTL_FIRSTFRAG
);
1308 f
->vr_ctl
|= (VR_TXCTL_LASTFRAG
| VR_TXCTL_FINT
);
1310 f
->vr_next
= c
->vr_next_desc_paddr
;
1316 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1317 * to the mbuf data regions directly in the transmit lists. We also save a
1318 * copy of the pointers since the transmit list fragment pointers are
1319 * physical addresses.
1322 vr_start(struct ifnet
*ifp
, struct ifaltq_subque
*ifsq
)
1324 struct vr_softc
*sc
;
1325 struct vr_chain_data
*cd
;
1326 struct vr_chain
*tx_chain
;
1327 int cur_tx_idx
, start_tx_idx
, prev_tx_idx
;
1329 ASSERT_ALTQ_SQ_DEFAULT(ifp
, ifsq
);
1331 if ((ifp
->if_flags
& IFF_RUNNING
) == 0 || ifq_is_oactive(&ifp
->if_snd
))
1336 tx_chain
= cd
->vr_tx_chain
;
1338 start_tx_idx
= cd
->vr_tx_free_idx
;
1339 cur_tx_idx
= prev_tx_idx
= -1;
1341 /* Check for an available queue slot. If there are none, punt. */
1342 if (tx_chain
[start_tx_idx
].vr_buf
!= NULL
) {
1343 ifq_set_oactive(&ifp
->if_snd
);
1347 while (tx_chain
[cd
->vr_tx_free_idx
].vr_buf
== NULL
) {
1348 struct mbuf
*m_head
;
1349 struct vr_chain
*cur_tx
;
1351 m_head
= ifq_dequeue(&ifp
->if_snd
);
1355 /* Pick a descriptor off the free list. */
1356 cur_tx_idx
= cd
->vr_tx_free_idx
;
1357 cur_tx
= &tx_chain
[cur_tx_idx
];
1359 /* Pack the data into the descriptor. */
1360 if (vr_encap(sc
, cur_tx_idx
, m_head
)) {
1361 ifq_set_oactive(&ifp
->if_snd
);
1362 cur_tx_idx
= prev_tx_idx
;
1367 if (cur_tx_idx
!= start_tx_idx
)
1368 VR_TXOWN(cur_tx
) = VR_TXSTAT_OWN
;
1370 BPF_MTAP(ifp
, m_head
);
1373 VR_TXOWN(cur_tx
) = VR_TXSTAT_OWN
;
1374 VR_SETBIT16(sc
, VR_COMMAND
, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO
);
1376 /* Iff everything went OK, we bump up free index. */
1377 prev_tx_idx
= cur_tx_idx
;
1378 cd
->vr_tx_free_idx
= cur_tx
->vr_next_idx
;
1381 /* If there are no frames queued, bail. */
1382 if (cur_tx_idx
== -1)
1385 sc
->vr_cdata
.vr_tx_tail_idx
= cur_tx_idx
;
1387 if (sc
->vr_cdata
.vr_tx_head_idx
== -1)
1388 sc
->vr_cdata
.vr_tx_head_idx
= start_tx_idx
;
1391 * Set a timeout in case the chip goes out to lunch.
1399 struct vr_softc
*sc
= xsc
;
1400 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1401 struct mii_data
*mii
;
1404 mii
= device_get_softc(sc
->vr_miibus
);
1406 /* Cancel pending I/O and free all RX/TX buffers. */
1410 /* Set our station address. */
1411 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
1412 CSR_WRITE_1(sc
, VR_PAR0
+ i
, sc
->arpcom
.ac_enaddr
[i
]);
1415 VR_CLRBIT(sc
, VR_BCR0
, VR_BCR0_DMA_LENGTH
);
1416 VR_SETBIT(sc
, VR_BCR0
, VR_BCR0_DMA_STORENFWD
);
1419 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
1420 * so we must set both.
1422 VR_CLRBIT(sc
, VR_BCR0
, VR_BCR0_RX_THRESH
);
1423 VR_SETBIT(sc
, VR_BCR0
, VR_BCR0_RXTHRESH128BYTES
);
1425 VR_CLRBIT(sc
, VR_BCR1
, VR_BCR1_TX_THRESH
);
1426 VR_SETBIT(sc
, VR_BCR1
, VR_BCR1_TXTHRESHSTORENFWD
);
1428 VR_CLRBIT(sc
, VR_RXCFG
, VR_RXCFG_RX_THRESH
);
1429 VR_SETBIT(sc
, VR_RXCFG
, VR_RXTHRESH_128BYTES
);
1431 VR_CLRBIT(sc
, VR_TXCFG
, VR_TXCFG_TX_THRESH
);
1432 VR_SETBIT(sc
, VR_TXCFG
, VR_TXTHRESH_STORENFWD
);
1434 /* Init circular RX list. */
1435 if (vr_list_rx_init(sc
) == ENOBUFS
) {
1437 if_printf(ifp
, "initialization failed: no memory for rx buffers\n");
1441 /* Init tx descriptors. */
1442 vr_list_tx_init(sc
);
1444 /* If we want promiscuous mode, set the allframes bit. */
1445 if (ifp
->if_flags
& IFF_PROMISC
)
1446 VR_SETBIT(sc
, VR_RXCFG
, VR_RXCFG_RX_PROMISC
);
1448 VR_CLRBIT(sc
, VR_RXCFG
, VR_RXCFG_RX_PROMISC
);
1450 /* Set capture broadcast bit to capture broadcast frames. */
1451 if (ifp
->if_flags
& IFF_BROADCAST
)
1452 VR_SETBIT(sc
, VR_RXCFG
, VR_RXCFG_RX_BROAD
);
1454 VR_CLRBIT(sc
, VR_RXCFG
, VR_RXCFG_RX_BROAD
);
1457 * Program the multicast filter, if necessary.
1462 * Load the address of the RX list.
1464 CSR_WRITE_4(sc
, VR_RXADDR
, vtophys(sc
->vr_cdata
.vr_rx_head
->vr_ptr
));
1466 /* Enable receiver and transmitter. */
1467 CSR_WRITE_2(sc
, VR_COMMAND
, VR_CMD_TX_NOPOLL
|VR_CMD_START
|
1468 VR_CMD_TX_ON
|VR_CMD_RX_ON
|
1471 CSR_WRITE_4(sc
, VR_TXADDR
, vtophys(&sc
->vr_ldata
->vr_tx_list
[0]));
1474 * Enable interrupts, unless we are polling.
1476 CSR_WRITE_2(sc
, VR_ISR
, 0xFFFF);
1477 #ifdef IFPOLL_ENABLE
1478 if ((ifp
->if_flags
& IFF_NPOLLING
) == 0)
1480 CSR_WRITE_2(sc
, VR_IMR
, VR_INTRS
);
1484 ifp
->if_flags
|= IFF_RUNNING
;
1485 ifq_clr_oactive(&ifp
->if_snd
);
1487 callout_reset(&sc
->vr_stat_timer
, hz
, vr_tick
, sc
);
1491 * Set media options.
1494 vr_ifmedia_upd(struct ifnet
*ifp
)
1496 struct vr_softc
*sc
;
1500 if (ifp
->if_flags
& IFF_UP
)
1507 * Report current media status.
1510 vr_ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
1512 struct vr_softc
*sc
;
1513 struct mii_data
*mii
;
1516 mii
= device_get_softc(sc
->vr_miibus
);
1518 ifmr
->ifm_active
= mii
->mii_media_active
;
1519 ifmr
->ifm_status
= mii
->mii_media_status
;
1523 vr_ioctl(struct ifnet
*ifp
, u_long command
, caddr_t data
, struct ucred
*cr
)
1525 struct vr_softc
*sc
= ifp
->if_softc
;
1526 struct ifreq
*ifr
= (struct ifreq
*) data
;
1527 struct mii_data
*mii
;
1532 if (ifp
->if_flags
& IFF_UP
) {
1535 if (ifp
->if_flags
& IFF_RUNNING
)
1547 mii
= device_get_softc(sc
->vr_miibus
);
1548 error
= ifmedia_ioctl(ifp
, ifr
, &mii
->mii_media
, command
);
1551 error
= ether_ioctl(ifp
, command
, data
);
1557 #ifdef IFPOLL_ENABLE
1560 vr_npoll_compat(struct ifnet
*ifp
, void *arg __unused
, int count __unused
)
1562 struct vr_softc
*sc
= ifp
->if_softc
;
1564 ASSERT_SERIALIZED(ifp
->if_serializer
);
1569 vr_npoll(struct ifnet
*ifp
, struct ifpoll_info
*info
)
1571 struct vr_softc
*sc
= ifp
->if_softc
;
1573 ASSERT_SERIALIZED(ifp
->if_serializer
);
1576 int cpuid
= sc
->vr_npoll
.ifpc_cpuid
;
1578 info
->ifpi_rx
[cpuid
].poll_func
= vr_npoll_compat
;
1579 info
->ifpi_rx
[cpuid
].arg
= NULL
;
1580 info
->ifpi_rx
[cpuid
].serializer
= ifp
->if_serializer
;
1582 if (ifp
->if_flags
& IFF_RUNNING
) {
1583 /* disable interrupts */
1584 CSR_WRITE_2(sc
, VR_IMR
, 0x0000);
1586 ifq_set_cpuid(&ifp
->if_snd
, cpuid
);
1588 if (ifp
->if_flags
& IFF_RUNNING
) {
1589 /* enable interrupts */
1590 CSR_WRITE_2(sc
, VR_IMR
, VR_INTRS
);
1592 ifq_set_cpuid(&ifp
->if_snd
, rman_get_cpuid(sc
->vr_irq
));
1596 #endif /* IFPOLL_ENABLE */
1599 vr_watchdog(struct ifnet
*ifp
)
1601 struct vr_softc
*sc
;
1605 IFNET_STAT_INC(ifp
, oerrors
, 1);
1606 if_printf(ifp
, "watchdog timeout\n");
1612 if (!ifq_is_empty(&ifp
->if_snd
))
1617 * Stop the adapter and free any mbufs allocated to the
1621 vr_stop(struct vr_softc
*sc
)
1626 ifp
= &sc
->arpcom
.ac_if
;
1629 callout_stop(&sc
->vr_stat_timer
);
1631 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_STOP
);
1632 VR_CLRBIT16(sc
, VR_COMMAND
, (VR_CMD_RX_ON
|VR_CMD_TX_ON
));
1633 CSR_WRITE_2(sc
, VR_IMR
, 0x0000);
1634 CSR_WRITE_4(sc
, VR_TXADDR
, 0x00000000);
1635 CSR_WRITE_4(sc
, VR_RXADDR
, 0x00000000);
1638 * Free data in the RX lists.
1640 for (i
= 0; i
< VR_RX_LIST_CNT
; i
++) {
1641 if (sc
->vr_cdata
.vr_rx_chain
[i
].vr_mbuf
!= NULL
) {
1642 m_freem(sc
->vr_cdata
.vr_rx_chain
[i
].vr_mbuf
);
1643 sc
->vr_cdata
.vr_rx_chain
[i
].vr_mbuf
= NULL
;
1646 bzero(&sc
->vr_ldata
->vr_rx_list
, sizeof(sc
->vr_ldata
->vr_rx_list
));
1649 * Reset the TX list buffer pointers.
1651 for (i
= 0; i
< VR_TX_LIST_CNT
; i
++)
1652 sc
->vr_cdata
.vr_tx_chain
[i
].vr_buf
= NULL
;
1654 bzero(&sc
->vr_ldata
->vr_tx_list
, sizeof(sc
->vr_ldata
->vr_tx_list
));
1656 ifp
->if_flags
&= ~IFF_RUNNING
;
1657 ifq_clr_oactive(&ifp
->if_snd
);
1661 * Stop all chip I/O so that the kernel's probe routines don't
1662 * get confused by errant DMAs when rebooting.
1665 vr_shutdown(device_t dev
)
1667 struct vr_softc
*sc
;
1669 sc
= device_get_softc(dev
);