2 * Copyright (c) 1997, 1998
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
32 * $FreeBSD: src/sys/pci/if_vr.c,v 1.26.2.13 2003/02/06 04:46:20 silby Exp $
36 * VIA Rhine fast ethernet PCI NIC driver
38 * Supports various network adapters based on the VIA Rhine
39 * and Rhine II PCI controllers, including the D-Link DFE530TX.
40 * Datasheets are available at http://www.via.com.tw.
42 * Written by Bill Paul <wpaul@ctr.columbia.edu>
43 * Electrical Engineering Department
44 * Columbia University, New York City
48 * The VIA Rhine controllers are similar in some respects to the
49 * the DEC tulip chips, except less complicated. The controller
50 * uses an MII bus and an external physical layer interface. The
51 * receiver has a one entry perfect filter and a 64-bit hash table
52 * multicast filter. Transmit and receive descriptors are similar
55 * The Rhine has a serious flaw in its transmit DMA mechanism:
56 * transmit buffers must be longword aligned. Unfortunately,
57 * FreeBSD doesn't guarantee that mbufs will be filled in starting
58 * at longword boundaries, so we have to do a buffer copy before
62 #include "opt_ifpoll.h"
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/sockio.h>
68 #include <sys/malloc.h>
69 #include <sys/kernel.h>
70 #include <sys/socket.h>
71 #include <sys/serialize.h>
74 #include <sys/thread2.h>
75 #include <sys/interrupt.h>
78 #include <net/ifq_var.h>
79 #include <net/if_arp.h>
80 #include <net/ethernet.h>
81 #include <net/if_dl.h>
82 #include <net/if_media.h>
83 #include <net/if_poll.h>
87 #include <vm/vm.h> /* for vtophys */
88 #include <vm/pmap.h> /* for vtophys */
90 #include <dev/netif/mii_layer/mii.h>
91 #include <dev/netif/mii_layer/miivar.h>
94 #include <bus/pci/pcireg.h>
95 #include <bus/pci/pcivar.h>
99 #include <dev/netif/vr/if_vrreg.h>
101 /* "controller miibus0" required. See GENERIC if you get errors here. */
102 #include "miibus_if.h"
107 * Various supported device vendors/types and their names.
109 static struct vr_type vr_devs
[] = {
110 { PCI_VENDOR_VIATECH
, PCI_PRODUCT_VIATECH_VT3043
,
111 "VIA VT3043 Rhine I 10/100BaseTX" },
112 { PCI_VENDOR_VIATECH
, PCI_PRODUCT_VIATECH_VT86C100A
,
113 "VIA VT86C100A Rhine II 10/100BaseTX" },
114 { PCI_VENDOR_VIATECH
, PCI_PRODUCT_VIATECH_VT6102
,
115 "VIA VT6102 Rhine II 10/100BaseTX" },
116 { PCI_VENDOR_VIATECH
, PCI_PRODUCT_VIATECH_VT6105
,
117 "VIA VT6105 Rhine III 10/100BaseTX" },
118 { PCI_VENDOR_VIATECH
, PCI_PRODUCT_VIATECH_VT6105M
,
119 "VIA VT6105M Rhine III 10/100BaseTX" },
120 { PCI_VENDOR_DELTA
, PCI_PRODUCT_DELTA_RHINEII
,
121 "Delta Electronics Rhine II 10/100BaseTX" },
122 { PCI_VENDOR_ADDTRON
, PCI_PRODUCT_ADDTRON_RHINEII
,
123 "Addtron Technology Rhine II 10/100BaseTX" },
127 static int vr_probe(device_t
);
128 static int vr_attach(device_t
);
129 static int vr_detach(device_t
);
131 static int vr_newbuf(struct vr_softc
*, struct vr_chain_onefrag
*,
133 static int vr_encap(struct vr_softc
*, int, struct mbuf
* );
135 static void vr_rxeof(struct vr_softc
*);
136 static void vr_rxeoc(struct vr_softc
*);
137 static void vr_txeof(struct vr_softc
*);
138 static void vr_txeoc(struct vr_softc
*);
139 static void vr_tick(void *);
140 static void vr_intr(void *);
141 static void vr_start(struct ifnet
*, struct ifaltq_subque
*);
142 static int vr_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
143 static void vr_init(void *);
144 static void vr_stop(struct vr_softc
*);
145 static void vr_watchdog(struct ifnet
*);
146 static void vr_shutdown(device_t
);
147 static int vr_ifmedia_upd(struct ifnet
*);
148 static void vr_ifmedia_sts(struct ifnet
*, struct ifmediareq
*);
151 static void vr_mii_sync(struct vr_softc
*);
152 static void vr_mii_send(struct vr_softc
*, uint32_t, int);
154 static int vr_mii_readreg(struct vr_softc
*, struct vr_mii_frame
*);
155 static int vr_mii_writereg(struct vr_softc
*, struct vr_mii_frame
*);
156 static int vr_miibus_readreg(device_t
, int, int);
157 static int vr_miibus_writereg(device_t
, int, int, int);
158 static void vr_miibus_statchg(device_t
);
160 static void vr_setcfg(struct vr_softc
*, int);
161 static void vr_setmulti(struct vr_softc
*);
162 static void vr_reset(struct vr_softc
*);
163 static int vr_list_rx_init(struct vr_softc
*);
164 static int vr_list_tx_init(struct vr_softc
*);
166 static void vr_npoll(struct ifnet
*, struct ifpoll_info
*);
167 static void vr_npoll_compat(struct ifnet
*, void *, int);
171 #define VR_RES SYS_RES_IOPORT
172 #define VR_RID VR_PCI_LOIO
174 #define VR_RES SYS_RES_MEMORY
175 #define VR_RID VR_PCI_LOMEM
178 static device_method_t vr_methods
[] = {
179 /* Device interface */
180 DEVMETHOD(device_probe
, vr_probe
),
181 DEVMETHOD(device_attach
, vr_attach
),
182 DEVMETHOD(device_detach
, vr_detach
),
183 DEVMETHOD(device_shutdown
, vr_shutdown
),
186 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
187 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
190 DEVMETHOD(miibus_readreg
, vr_miibus_readreg
),
191 DEVMETHOD(miibus_writereg
, vr_miibus_writereg
),
192 DEVMETHOD(miibus_statchg
, vr_miibus_statchg
),
197 static driver_t vr_driver
= {
200 sizeof(struct vr_softc
)
203 static devclass_t vr_devclass
;
205 DECLARE_DUMMY_MODULE(if_vr
);
206 DRIVER_MODULE(if_vr
, pci
, vr_driver
, vr_devclass
, NULL
, NULL
);
207 DRIVER_MODULE(miibus
, vr
, miibus_driver
, miibus_devclass
, NULL
, NULL
);
209 #define VR_SETBIT(sc, reg, x) \
210 CSR_WRITE_1(sc, reg, \
211 CSR_READ_1(sc, reg) | (x))
213 #define VR_CLRBIT(sc, reg, x) \
214 CSR_WRITE_1(sc, reg, \
215 CSR_READ_1(sc, reg) & ~(x))
217 #define VR_SETBIT16(sc, reg, x) \
218 CSR_WRITE_2(sc, reg, \
219 CSR_READ_2(sc, reg) | (x))
221 #define VR_CLRBIT16(sc, reg, x) \
222 CSR_WRITE_2(sc, reg, \
223 CSR_READ_2(sc, reg) & ~(x))
225 #define VR_SETBIT32(sc, reg, x) \
226 CSR_WRITE_4(sc, reg, \
227 CSR_READ_4(sc, reg) | (x))
229 #define VR_CLRBIT32(sc, reg, x) \
230 CSR_WRITE_4(sc, reg, \
231 CSR_READ_4(sc, reg) & ~(x))
234 CSR_WRITE_1(sc, VR_MIICMD, \
235 CSR_READ_1(sc, VR_MIICMD) | (x))
238 CSR_WRITE_1(sc, VR_MIICMD, \
239 CSR_READ_1(sc, VR_MIICMD) & ~(x))
243 * Sync the PHYs by setting data bit and strobing the clock 32 times.
246 vr_mii_sync(struct vr_softc
*sc
)
250 SIO_SET(VR_MIICMD_DIR
|VR_MIICMD_DATAIN
);
252 for (i
= 0; i
< 32; i
++) {
253 SIO_SET(VR_MIICMD_CLK
);
255 SIO_CLR(VR_MIICMD_CLK
);
261 * Clock a series of bits through the MII.
264 vr_mii_send(struct vr_softc
*sc
, uint32_t bits
, int cnt
)
268 SIO_CLR(VR_MIICMD_CLK
);
270 for (i
= (0x1 << (cnt
- 1)); i
; i
>>= 1) {
272 SIO_SET(VR_MIICMD_DATAIN
);
274 SIO_CLR(VR_MIICMD_DATAIN
);
276 SIO_CLR(VR_MIICMD_CLK
);
278 SIO_SET(VR_MIICMD_CLK
);
284 * Read an PHY register through the MII.
287 vr_mii_readreg(struct vr_softc
*sc
, struct vr_mii_frame
*frame
)
292 /* Set up frame for RX. */
293 frame
->mii_stdelim
= VR_MII_STARTDELIM
;
294 frame
->mii_opcode
= VR_MII_READOP
;
295 frame
->mii_turnaround
= 0;
298 CSR_WRITE_1(sc
, VR_MIICMD
, 0);
299 VR_SETBIT(sc
, VR_MIICMD
, VR_MIICMD_DIRECTPGM
);
301 /* Turn on data xmit. */
302 SIO_SET(VR_MIICMD_DIR
);
306 /* Send command/address info. */
307 vr_mii_send(sc
, frame
->mii_stdelim
, 2);
308 vr_mii_send(sc
, frame
->mii_opcode
, 2);
309 vr_mii_send(sc
, frame
->mii_phyaddr
, 5);
310 vr_mii_send(sc
, frame
->mii_regaddr
, 5);
313 SIO_CLR((VR_MIICMD_CLK
|VR_MIICMD_DATAIN
));
315 SIO_SET(VR_MIICMD_CLK
);
319 SIO_CLR(VR_MIICMD_DIR
);
322 SIO_CLR(VR_MIICMD_CLK
);
324 ack
= CSR_READ_4(sc
, VR_MIICMD
) & VR_MIICMD_DATAOUT
;
325 SIO_SET(VR_MIICMD_CLK
);
329 * Now try reading data bits. If the ack failed, we still
330 * need to clock through 16 cycles to keep the PHY(s) in sync.
333 for(i
= 0; i
< 16; i
++) {
334 SIO_CLR(VR_MIICMD_CLK
);
336 SIO_SET(VR_MIICMD_CLK
);
342 for (i
= 0x8000; i
; i
>>= 1) {
343 SIO_CLR(VR_MIICMD_CLK
);
346 if (CSR_READ_4(sc
, VR_MIICMD
) & VR_MIICMD_DATAOUT
)
347 frame
->mii_data
|= i
;
350 SIO_SET(VR_MIICMD_CLK
);
355 SIO_CLR(VR_MIICMD_CLK
);
357 SIO_SET(VR_MIICMD_CLK
);
368 /* Set the PHY address. */
369 CSR_WRITE_1(sc
, VR_PHYADDR
, (CSR_READ_1(sc
, VR_PHYADDR
)& 0xe0)|
372 /* Set the register address. */
373 CSR_WRITE_1(sc
, VR_MIIADDR
, frame
->mii_regaddr
);
374 VR_SETBIT(sc
, VR_MIICMD
, VR_MIICMD_READ_ENB
);
376 for (i
= 0; i
< 10000; i
++) {
377 if ((CSR_READ_1(sc
, VR_MIICMD
) & VR_MIICMD_READ_ENB
) == 0)
381 frame
->mii_data
= CSR_READ_2(sc
, VR_MIIDATA
);
389 * Write to a PHY register through the MII.
392 vr_mii_writereg(struct vr_softc
*sc
, struct vr_mii_frame
*frame
)
395 CSR_WRITE_1(sc
, VR_MIICMD
, 0);
396 VR_SETBIT(sc
, VR_MIICMD
, VR_MIICMD_DIRECTPGM
);
398 /* Set up frame for TX. */
399 frame
->mii_stdelim
= VR_MII_STARTDELIM
;
400 frame
->mii_opcode
= VR_MII_WRITEOP
;
401 frame
->mii_turnaround
= VR_MII_TURNAROUND
;
403 /* Turn on data output. */
404 SIO_SET(VR_MIICMD_DIR
);
408 vr_mii_send(sc
, frame
->mii_stdelim
, 2);
409 vr_mii_send(sc
, frame
->mii_opcode
, 2);
410 vr_mii_send(sc
, frame
->mii_phyaddr
, 5);
411 vr_mii_send(sc
, frame
->mii_regaddr
, 5);
412 vr_mii_send(sc
, frame
->mii_turnaround
, 2);
413 vr_mii_send(sc
, frame
->mii_data
, 16);
416 SIO_SET(VR_MIICMD_CLK
);
418 SIO_CLR(VR_MIICMD_CLK
);
422 SIO_CLR(VR_MIICMD_DIR
);
430 /* Set the PHY-adress */
431 CSR_WRITE_1(sc
, VR_PHYADDR
, (CSR_READ_1(sc
, VR_PHYADDR
)& 0xe0)|
434 /* Set the register address and data to write. */
435 CSR_WRITE_1(sc
, VR_MIIADDR
, frame
->mii_regaddr
);
436 CSR_WRITE_2(sc
, VR_MIIDATA
, frame
->mii_data
);
438 VR_SETBIT(sc
, VR_MIICMD
, VR_MIICMD_WRITE_ENB
);
440 for (i
= 0; i
< 10000; i
++) {
441 if ((CSR_READ_1(sc
, VR_MIICMD
) & VR_MIICMD_WRITE_ENB
) == 0)
450 vr_miibus_readreg(device_t dev
, int phy
, int reg
)
452 struct vr_mii_frame frame
;
455 sc
= device_get_softc(dev
);
457 switch (sc
->vr_revid
) {
458 case REV_ID_VT6102_APOLLO
:
466 bzero(&frame
, sizeof(frame
));
468 frame
.mii_phyaddr
= phy
;
469 frame
.mii_regaddr
= reg
;
470 vr_mii_readreg(sc
, &frame
);
472 return(frame
.mii_data
);
476 vr_miibus_writereg(device_t dev
, int phy
, int reg
, int data
)
478 struct vr_mii_frame frame
;
481 sc
= device_get_softc(dev
);
483 switch (sc
->vr_revid
) {
484 case REV_ID_VT6102_APOLLO
:
492 bzero(&frame
, sizeof(frame
));
494 frame
.mii_phyaddr
= phy
;
495 frame
.mii_regaddr
= reg
;
496 frame
.mii_data
= data
;
498 vr_mii_writereg(sc
, &frame
);
504 vr_miibus_statchg(device_t dev
)
506 struct mii_data
*mii
;
509 sc
= device_get_softc(dev
);
510 mii
= device_get_softc(sc
->vr_miibus
);
511 vr_setcfg(sc
, mii
->mii_media_active
);
515 * Program the 64-bit multicast hash filter.
518 vr_setmulti(struct vr_softc
*sc
)
521 uint32_t hashes
[2] = { 0, 0 };
522 struct ifmultiaddr
*ifma
;
526 ifp
= &sc
->arpcom
.ac_if
;
528 rxfilt
= CSR_READ_1(sc
, VR_RXCFG
);
530 if (ifp
->if_flags
& IFF_ALLMULTI
|| ifp
->if_flags
& IFF_PROMISC
) {
531 rxfilt
|= VR_RXCFG_RX_MULTI
;
532 CSR_WRITE_1(sc
, VR_RXCFG
, rxfilt
);
533 CSR_WRITE_4(sc
, VR_MAR0
, 0xFFFFFFFF);
534 CSR_WRITE_4(sc
, VR_MAR1
, 0xFFFFFFFF);
538 /* First, zero out all the existing hash bits. */
539 CSR_WRITE_4(sc
, VR_MAR0
, 0);
540 CSR_WRITE_4(sc
, VR_MAR1
, 0);
542 /* Now program new ones. */
543 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
546 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
549 /* use the lower 6 bits */
551 LLADDR((struct sockaddr_dl
*)ifma
->ifma_addr
),
552 ETHER_ADDR_LEN
) >> 26) & 0x0000003F;
554 hashes
[0] |= (1 << h
);
556 hashes
[1] |= (1 << (h
- 32));
561 rxfilt
|= VR_RXCFG_RX_MULTI
;
563 rxfilt
&= ~VR_RXCFG_RX_MULTI
;
565 CSR_WRITE_4(sc
, VR_MAR0
, hashes
[0]);
566 CSR_WRITE_4(sc
, VR_MAR1
, hashes
[1]);
567 CSR_WRITE_1(sc
, VR_RXCFG
, rxfilt
);
571 * In order to fiddle with the
572 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
573 * first have to put the transmit and/or receive logic in the idle state.
576 vr_setcfg(struct vr_softc
*sc
, int media
)
580 if (CSR_READ_2(sc
, VR_COMMAND
) & (VR_CMD_TX_ON
|VR_CMD_RX_ON
)) {
582 VR_CLRBIT16(sc
, VR_COMMAND
, (VR_CMD_TX_ON
|VR_CMD_RX_ON
));
585 if ((media
& IFM_GMASK
) == IFM_FDX
)
586 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_FULLDUPLEX
);
588 VR_CLRBIT16(sc
, VR_COMMAND
, VR_CMD_FULLDUPLEX
);
591 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_TX_ON
|VR_CMD_RX_ON
);
595 vr_reset(struct vr_softc
*sc
)
599 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_RESET
);
601 for (i
= 0; i
< VR_TIMEOUT
; i
++) {
603 if (!(CSR_READ_2(sc
, VR_COMMAND
) & VR_CMD_RESET
))
606 if (i
== VR_TIMEOUT
) {
607 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
609 if (sc
->vr_revid
< REV_ID_VT3065_A
) {
610 if_printf(ifp
, "reset never completed!\n");
612 /* Use newer force reset command */
613 if_printf(ifp
, "Using force reset command.\n");
614 VR_SETBIT(sc
, VR_MISC_CR1
, VR_MISCCR1_FORSRST
);
618 /* Wait a little while for the chip to get its brains in order. */
623 * Probe for a VIA Rhine chip. Check the PCI vendor and device
624 * IDs against our list and return a device name if we find a match.
627 vr_probe(device_t dev
)
632 vid
= pci_get_vendor(dev
);
633 did
= pci_get_device(dev
);
635 for (t
= vr_devs
; t
->vr_name
!= NULL
; ++t
) {
636 if (vid
== t
->vr_vid
&& did
== t
->vr_did
) {
637 device_set_desc(dev
, t
->vr_name
);
646 * Attach the interface. Allocate softc structures, do ifmedia
647 * setup and ethernet/BPF attach.
650 vr_attach(device_t dev
)
653 uint8_t eaddr
[ETHER_ADDR_LEN
];
658 sc
= device_get_softc(dev
);
659 callout_init(&sc
->vr_stat_timer
);
662 * Handle power management nonsense.
664 if (pci_get_powerstate(dev
) != PCI_POWERSTATE_D0
) {
665 uint32_t iobase
, membase
, irq
;
667 /* Save important PCI config data. */
668 iobase
= pci_read_config(dev
, VR_PCI_LOIO
, 4);
669 membase
= pci_read_config(dev
, VR_PCI_LOMEM
, 4);
670 irq
= pci_read_config(dev
, VR_PCI_INTLINE
, 4);
672 /* Reset the power state. */
673 device_printf(dev
, "chip is in D%d power mode "
674 "-- setting to D0\n", pci_get_powerstate(dev
));
675 pci_set_powerstate(dev
, PCI_POWERSTATE_D0
);
677 /* Restore PCI config data. */
678 pci_write_config(dev
, VR_PCI_LOIO
, iobase
, 4);
679 pci_write_config(dev
, VR_PCI_LOMEM
, membase
, 4);
680 pci_write_config(dev
, VR_PCI_INTLINE
, irq
, 4);
683 pci_enable_busmaster(dev
);
685 sc
->vr_revid
= pci_get_revid(dev
);
688 sc
->vr_res
= bus_alloc_resource_any(dev
, VR_RES
, &rid
, RF_ACTIVE
);
690 if (sc
->vr_res
== NULL
) {
691 device_printf(dev
, "couldn't map ports/memory\n");
695 sc
->vr_btag
= rman_get_bustag(sc
->vr_res
);
696 sc
->vr_bhandle
= rman_get_bushandle(sc
->vr_res
);
698 /* Allocate interrupt */
700 sc
->vr_irq
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
, &rid
,
701 RF_SHAREABLE
| RF_ACTIVE
);
703 if (sc
->vr_irq
== NULL
) {
704 device_printf(dev
, "couldn't map interrupt\n");
710 * Windows may put the chip in suspend mode when it
711 * shuts down. Be sure to kick it in the head to wake it
714 VR_CLRBIT(sc
, VR_STICKHW
, (VR_STICKHW_DS0
|VR_STICKHW_DS1
));
716 ifp
= &sc
->arpcom
.ac_if
;
717 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
719 /* Reset the adapter. */
723 * Turn on bit2 (MIION) in PCI configuration register 0x53 during
724 * initialization and disable AUTOPOLL.
726 pci_write_config(dev
, VR_PCI_MODE
,
727 pci_read_config(dev
, VR_PCI_MODE
, 4) | (VR_MODE3_MIION
<< 24), 4);
728 VR_CLRBIT(sc
, VR_MIICMD
, VR_MIICMD_AUTOPOLL
);
731 * Get station address. The way the Rhine chips work,
732 * you're not allowed to directly access the EEPROM once
733 * they've been programmed a special way. Consequently,
734 * we need to read the node address from the PAR0 and PAR1
737 VR_SETBIT(sc
, VR_EECSR
, VR_EECSR_LOAD
);
739 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
740 eaddr
[i
] = CSR_READ_1(sc
, VR_PAR0
+ i
);
742 sc
->vr_ldata
= contigmalloc(sizeof(struct vr_list_data
), M_DEVBUF
,
743 M_WAITOK
| M_ZERO
, 0, 0xffffffff, PAGE_SIZE
, 0);
745 if (sc
->vr_ldata
== NULL
) {
746 device_printf(dev
, "no memory for list buffers!\n");
751 /* Initialize TX buffer */
752 sc
->vr_cdata
.vr_tx_buf
= contigmalloc(VR_TX_BUF_SIZE
, M_DEVBUF
,
753 M_WAITOK
, 0, 0xffffffff, PAGE_SIZE
, 0);
754 if (sc
->vr_cdata
.vr_tx_buf
== NULL
) {
755 device_printf(dev
, "can't allocate tx buffer!\n");
760 /* Set various TX indexes to invalid value */
761 sc
->vr_cdata
.vr_tx_free_idx
= -1;
762 sc
->vr_cdata
.vr_tx_tail_idx
= -1;
763 sc
->vr_cdata
.vr_tx_head_idx
= -1;
767 ifp
->if_mtu
= ETHERMTU
;
768 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
769 ifp
->if_ioctl
= vr_ioctl
;
770 ifp
->if_start
= vr_start
;
772 ifp
->if_npoll
= vr_npoll
;
774 ifp
->if_watchdog
= vr_watchdog
;
775 ifp
->if_init
= vr_init
;
776 ifp
->if_baudrate
= 10000000;
777 ifq_set_maxlen(&ifp
->if_snd
, VR_TX_LIST_CNT
- 1);
778 ifq_set_ready(&ifp
->if_snd
);
783 if (mii_phy_probe(dev
, &sc
->vr_miibus
,
784 vr_ifmedia_upd
, vr_ifmedia_sts
)) {
785 if_printf(ifp
, "MII without any phy!\n");
790 /* Call MI attach routine. */
791 ether_ifattach(ifp
, eaddr
, NULL
);
793 ifq_set_cpuid(&ifp
->if_snd
, rman_get_cpuid(sc
->vr_irq
));
796 ifpoll_compat_setup(&sc
->vr_npoll
, NULL
, NULL
, device_get_unit(dev
),
800 error
= bus_setup_intr(dev
, sc
->vr_irq
, INTR_MPSAFE
,
801 vr_intr
, sc
, &sc
->vr_intrhand
,
804 device_printf(dev
, "couldn't set up irq\n");
817 vr_detach(device_t dev
)
819 struct vr_softc
*sc
= device_get_softc(dev
);
820 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
822 if (device_is_attached(dev
)) {
823 lwkt_serialize_enter(ifp
->if_serializer
);
825 bus_teardown_intr(dev
, sc
->vr_irq
, sc
->vr_intrhand
);
826 lwkt_serialize_exit(ifp
->if_serializer
);
830 if (sc
->vr_miibus
!= NULL
)
831 device_delete_child(dev
, sc
->vr_miibus
);
832 bus_generic_detach(dev
);
834 if (sc
->vr_irq
!= NULL
)
835 bus_release_resource(dev
, SYS_RES_IRQ
, 0, sc
->vr_irq
);
836 if (sc
->vr_res
!= NULL
)
837 bus_release_resource(dev
, VR_RES
, VR_RID
, sc
->vr_res
);
838 if (sc
->vr_ldata
!= NULL
)
839 contigfree(sc
->vr_ldata
, sizeof(struct vr_list_data
), M_DEVBUF
);
840 if (sc
->vr_cdata
.vr_tx_buf
!= NULL
)
841 contigfree(sc
->vr_cdata
.vr_tx_buf
, VR_TX_BUF_SIZE
, M_DEVBUF
);
847 * Initialize the transmit descriptors.
850 vr_list_tx_init(struct vr_softc
*sc
)
852 struct vr_chain_data
*cd
;
853 struct vr_list_data
*ld
;
854 struct vr_chain
*tx_chain
;
859 tx_chain
= cd
->vr_tx_chain
;
861 for (i
= 0; i
< VR_TX_LIST_CNT
; i
++) {
862 tx_chain
[i
].vr_ptr
= &ld
->vr_tx_list
[i
];
863 if (i
== (VR_TX_LIST_CNT
- 1))
864 tx_chain
[i
].vr_next_idx
= 0;
866 tx_chain
[i
].vr_next_idx
= i
+ 1;
869 for (i
= 0; i
< VR_TX_LIST_CNT
; ++i
) {
873 tx_buf
= VR_TX_BUF(sc
, i
);
874 next_idx
= tx_chain
[i
].vr_next_idx
;
876 tx_chain
[i
].vr_next_desc_paddr
=
877 vtophys(tx_chain
[next_idx
].vr_ptr
);
878 tx_chain
[i
].vr_buf_paddr
= vtophys(tx_buf
);
881 cd
->vr_tx_free_idx
= 0;
882 cd
->vr_tx_tail_idx
= cd
->vr_tx_head_idx
= -1;
889 * Initialize the RX descriptors and allocate mbufs for them. Note that
890 * we arrange the descriptors in a closed ring, so that the last descriptor
891 * points back to the first.
894 vr_list_rx_init(struct vr_softc
*sc
)
896 struct vr_chain_data
*cd
;
897 struct vr_list_data
*ld
;
903 for (i
= 0; i
< VR_RX_LIST_CNT
; i
++) {
904 cd
->vr_rx_chain
[i
].vr_ptr
= (struct vr_desc
*)&ld
->vr_rx_list
[i
];
905 if (vr_newbuf(sc
, &cd
->vr_rx_chain
[i
], NULL
) == ENOBUFS
)
907 if (i
== (VR_RX_LIST_CNT
- 1))
911 cd
->vr_rx_chain
[i
].vr_nextdesc
= &cd
->vr_rx_chain
[nexti
];
912 ld
->vr_rx_list
[i
].vr_next
= vtophys(&ld
->vr_rx_list
[nexti
]);
915 cd
->vr_rx_head
= &cd
->vr_rx_chain
[0];
921 * Initialize an RX descriptor and attach an MBUF cluster.
922 * Note: the length fields are only 11 bits wide, which means the
923 * largest size we can specify is 2047. This is important because
924 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
925 * overflow the field and make a mess.
928 vr_newbuf(struct vr_softc
*sc
, struct vr_chain_onefrag
*c
, struct mbuf
*m
)
930 struct mbuf
*m_new
= NULL
;
933 m_new
= m_getcl(M_NOWAIT
, MT_DATA
, M_PKTHDR
);
936 m_new
->m_len
= m_new
->m_pkthdr
.len
= MCLBYTES
;
939 m_new
->m_len
= m_new
->m_pkthdr
.len
= MCLBYTES
;
940 m_new
->m_data
= m_new
->m_ext
.ext_buf
;
943 m_adj(m_new
, sizeof(uint64_t));
946 c
->vr_ptr
->vr_status
= VR_RXSTAT
;
947 c
->vr_ptr
->vr_data
= vtophys(mtod(m_new
, caddr_t
));
948 c
->vr_ptr
->vr_ctl
= VR_RXCTL
| VR_RXLEN
;
954 * A frame has been uploaded: pass the resulting mbuf chain up to
955 * the higher level protocols.
958 vr_rxeof(struct vr_softc
*sc
)
962 struct vr_chain_onefrag
*cur_rx
;
966 ifp
= &sc
->arpcom
.ac_if
;
968 while(!((rxstat
= sc
->vr_cdata
.vr_rx_head
->vr_ptr
->vr_status
) &
970 struct mbuf
*m0
= NULL
;
972 cur_rx
= sc
->vr_cdata
.vr_rx_head
;
973 sc
->vr_cdata
.vr_rx_head
= cur_rx
->vr_nextdesc
;
977 * If an error occurs, update stats, clear the
978 * status word and leave the mbuf cluster in place:
979 * it should simply get re-used next time this descriptor
980 * comes up in the ring.
982 if (rxstat
& VR_RXSTAT_RXERR
) {
983 IFNET_STAT_INC(ifp
, ierrors
, 1);
984 if_printf(ifp
, "rx error (%02x):", rxstat
& 0x000000ff);
985 if (rxstat
& VR_RXSTAT_CRCERR
)
986 kprintf(" crc error");
987 if (rxstat
& VR_RXSTAT_FRAMEALIGNERR
)
988 kprintf(" frame alignment error\n");
989 if (rxstat
& VR_RXSTAT_FIFOOFLOW
)
990 kprintf(" FIFO overflow");
991 if (rxstat
& VR_RXSTAT_GIANT
)
992 kprintf(" received giant packet");
993 if (rxstat
& VR_RXSTAT_RUNT
)
994 kprintf(" received runt packet");
995 if (rxstat
& VR_RXSTAT_BUSERR
)
996 kprintf(" system bus error");
997 if (rxstat
& VR_RXSTAT_BUFFERR
)
998 kprintf("rx buffer error");
1000 vr_newbuf(sc
, cur_rx
, m
);
1004 /* No errors; receive the packet. */
1005 total_len
= VR_RXBYTES(cur_rx
->vr_ptr
->vr_status
);
1008 * XXX The VIA Rhine chip includes the CRC with every
1009 * received frame, and there's no way to turn this
1010 * behavior off (at least, I can't find anything in
1011 * the manual that explains how to do it) so we have
1012 * to trim off the CRC manually.
1014 total_len
-= ETHER_CRC_LEN
;
1016 m0
= m_devget(mtod(m
, char *) - ETHER_ALIGN
,
1017 total_len
+ ETHER_ALIGN
, 0, ifp
, NULL
);
1018 vr_newbuf(sc
, cur_rx
, m
);
1020 IFNET_STAT_INC(ifp
, ierrors
, 1);
1023 m_adj(m0
, ETHER_ALIGN
);
1026 IFNET_STAT_INC(ifp
, ipackets
, 1);
1027 ifp
->if_input(ifp
, m
, NULL
, -1);
1032 vr_rxeoc(struct vr_softc
*sc
)
1037 ifp
= &sc
->arpcom
.ac_if
;
1039 IFNET_STAT_INC(ifp
, ierrors
, 1);
1041 VR_CLRBIT16(sc
, VR_COMMAND
, VR_CMD_RX_ON
);
1044 /* Wait for receiver to stop */
1046 i
&& (CSR_READ_2(sc
, VR_COMMAND
) & VR_CMD_RX_ON
);
1048 ; /* Wait for receiver to stop */
1051 if_printf(ifp
, "rx shutdown error!\n");
1052 sc
->vr_flags
|= VR_F_RESTART
;
1058 CSR_WRITE_4(sc
, VR_RXADDR
, vtophys(sc
->vr_cdata
.vr_rx_head
->vr_ptr
));
1059 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_RX_ON
);
1060 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_RX_GO
);
1064 * A frame was downloaded to the chip. It's safe for us to clean up
1068 vr_txeof(struct vr_softc
*sc
)
1070 struct vr_chain_data
*cd
;
1071 struct vr_chain
*tx_chain
;
1074 ifp
= &sc
->arpcom
.ac_if
;
1077 /* Reset the timeout timer; if_txeoc will clear it. */
1081 if (cd
->vr_tx_head_idx
== -1)
1084 tx_chain
= cd
->vr_tx_chain
;
1087 * Go through our tx list and free mbufs for those
1088 * frames that have been transmitted.
1090 while(tx_chain
[cd
->vr_tx_head_idx
].vr_buf
!= NULL
) {
1091 struct vr_chain
*cur_tx
;
1095 cur_tx
= &tx_chain
[cd
->vr_tx_head_idx
];
1096 txstat
= cur_tx
->vr_ptr
->vr_status
;
1098 if ((txstat
& VR_TXSTAT_ABRT
) ||
1099 (txstat
& VR_TXSTAT_UDF
)) {
1101 i
&& (CSR_READ_2(sc
, VR_COMMAND
) & VR_CMD_TX_ON
);
1103 ; /* Wait for chip to shutdown */
1105 if_printf(ifp
, "tx shutdown timeout\n");
1106 sc
->vr_flags
|= VR_F_RESTART
;
1109 VR_TXOWN(cur_tx
) = VR_TXSTAT_OWN
;
1110 CSR_WRITE_4(sc
, VR_TXADDR
, vtophys(cur_tx
->vr_ptr
));
1114 if (txstat
& VR_TXSTAT_OWN
)
1117 if (txstat
& VR_TXSTAT_ERRSUM
) {
1118 IFNET_STAT_INC(ifp
, oerrors
, 1);
1119 if (txstat
& VR_TXSTAT_DEFER
)
1120 IFNET_STAT_INC(ifp
, collisions
, 1);
1121 if (txstat
& VR_TXSTAT_LATECOLL
)
1122 IFNET_STAT_INC(ifp
, collisions
, 1);
1125 IFNET_STAT_INC(ifp
, collisions
,
1126 (txstat
& VR_TXSTAT_COLLCNT
) >> 3);
1128 IFNET_STAT_INC(ifp
, opackets
, 1);
1129 cur_tx
->vr_buf
= NULL
;
1131 if (cd
->vr_tx_head_idx
== cd
->vr_tx_tail_idx
) {
1132 cd
->vr_tx_head_idx
= -1;
1133 cd
->vr_tx_tail_idx
= -1;
1137 cd
->vr_tx_head_idx
= cur_tx
->vr_next_idx
;
1142 * TX 'end of channel' interrupt handler.
1145 vr_txeoc(struct vr_softc
*sc
)
1149 ifp
= &sc
->arpcom
.ac_if
;
1151 if (sc
->vr_cdata
.vr_tx_head_idx
== -1) {
1152 ifq_clr_oactive(&ifp
->if_snd
);
1153 sc
->vr_cdata
.vr_tx_tail_idx
= -1;
1161 struct vr_softc
*sc
= xsc
;
1162 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1163 struct mii_data
*mii
;
1165 lwkt_serialize_enter(ifp
->if_serializer
);
1167 if (sc
->vr_flags
& VR_F_RESTART
) {
1168 if_printf(&sc
->arpcom
.ac_if
, "restarting\n");
1172 sc
->vr_flags
&= ~VR_F_RESTART
;
1175 mii
= device_get_softc(sc
->vr_miibus
);
1178 callout_reset(&sc
->vr_stat_timer
, hz
, vr_tick
, sc
);
1180 lwkt_serialize_exit(ifp
->if_serializer
);
1186 struct vr_softc
*sc
;
1191 ifp
= &sc
->arpcom
.ac_if
;
1193 /* Supress unwanted interrupts. */
1194 if (!(ifp
->if_flags
& IFF_UP
)) {
1199 /* Disable interrupts. */
1200 if ((ifp
->if_flags
& IFF_NPOLLING
) == 0)
1201 CSR_WRITE_2(sc
, VR_IMR
, 0x0000);
1204 status
= CSR_READ_2(sc
, VR_ISR
);
1206 CSR_WRITE_2(sc
, VR_ISR
, status
);
1208 if ((status
& VR_INTRS
) == 0)
1211 if (status
& VR_ISR_RX_OK
)
1214 if (status
& VR_ISR_RX_DROPPED
) {
1215 if_printf(ifp
, "rx packet lost\n");
1216 IFNET_STAT_INC(ifp
, ierrors
, 1);
1219 if ((status
& VR_ISR_RX_ERR
) || (status
& VR_ISR_RX_NOBUF
) ||
1220 (status
& VR_ISR_RX_OFLOW
)) {
1221 if_printf(ifp
, "receive error (%04x)", status
);
1222 if (status
& VR_ISR_RX_NOBUF
)
1223 kprintf(" no buffers");
1224 if (status
& VR_ISR_RX_OFLOW
)
1225 kprintf(" overflow");
1226 if (status
& VR_ISR_RX_DROPPED
)
1227 kprintf(" packet lost");
1232 if ((status
& VR_ISR_BUSERR
) || (status
& VR_ISR_TX_UNDERRUN
)) {
1238 if ((status
& VR_ISR_TX_OK
) || (status
& VR_ISR_TX_ABRT
) ||
1239 (status
& VR_ISR_TX_ABRT2
) || (status
& VR_ISR_UDFI
)) {
1241 if ((status
& VR_ISR_UDFI
) ||
1242 (status
& VR_ISR_TX_ABRT2
) ||
1243 (status
& VR_ISR_TX_ABRT
)) {
1244 IFNET_STAT_INC(ifp
, oerrors
, 1);
1245 if (sc
->vr_cdata
.vr_tx_head_idx
!= -1) {
1246 VR_SETBIT16(sc
, VR_COMMAND
,
1248 VR_SETBIT16(sc
, VR_COMMAND
,
1258 /* Re-enable interrupts. */
1259 if ((ifp
->if_flags
& IFF_NPOLLING
) == 0)
1260 CSR_WRITE_2(sc
, VR_IMR
, VR_INTRS
);
1262 if (!ifq_is_empty(&ifp
->if_snd
))
1267 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1268 * pointers to the fragment pointers.
1271 vr_encap(struct vr_softc
*sc
, int chain_idx
, struct mbuf
*m_head
)
1278 KASSERT(chain_idx
>= 0 && chain_idx
< VR_TX_LIST_CNT
,
1279 ("%s: chain idx(%d) out of range 0-%d",
1280 sc
->arpcom
.ac_if
.if_xname
, chain_idx
, VR_TX_LIST_CNT
));
1283 * The VIA Rhine wants packet buffers to be longword
1284 * aligned, but very often our mbufs aren't. Rather than
1285 * waste time trying to decide when to copy and when not
1286 * to copy, just do it all the time.
1288 tx_buf
= VR_TX_BUF(sc
, chain_idx
);
1289 m_copydata(m_head
, 0, m_head
->m_pkthdr
.len
, tx_buf
);
1290 len
= m_head
->m_pkthdr
.len
;
1293 * The Rhine chip doesn't auto-pad, so we have to make
1294 * sure to pad short frames out to the minimum frame length
1297 if (len
< VR_MIN_FRAMELEN
) {
1298 bzero(tx_buf
+ len
, VR_MIN_FRAMELEN
- len
);
1299 len
= VR_MIN_FRAMELEN
;
1302 c
= &sc
->vr_cdata
.vr_tx_chain
[chain_idx
];
1306 f
->vr_data
= c
->vr_buf_paddr
;
1308 f
->vr_ctl
|= (VR_TXCTL_TLINK
| VR_TXCTL_FIRSTFRAG
);
1309 f
->vr_ctl
|= (VR_TXCTL_LASTFRAG
| VR_TXCTL_FINT
);
1311 f
->vr_next
= c
->vr_next_desc_paddr
;
1317 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1318 * to the mbuf data regions directly in the transmit lists. We also save a
1319 * copy of the pointers since the transmit list fragment pointers are
1320 * physical addresses.
1323 vr_start(struct ifnet
*ifp
, struct ifaltq_subque
*ifsq
)
1325 struct vr_softc
*sc
;
1326 struct vr_chain_data
*cd
;
1327 struct vr_chain
*tx_chain
;
1328 int cur_tx_idx
, start_tx_idx
, prev_tx_idx
;
1330 ASSERT_ALTQ_SQ_DEFAULT(ifp
, ifsq
);
1332 if ((ifp
->if_flags
& IFF_RUNNING
) == 0 || ifq_is_oactive(&ifp
->if_snd
))
1337 tx_chain
= cd
->vr_tx_chain
;
1339 start_tx_idx
= cd
->vr_tx_free_idx
;
1340 cur_tx_idx
= prev_tx_idx
= -1;
1342 /* Check for an available queue slot. If there are none, punt. */
1343 if (tx_chain
[start_tx_idx
].vr_buf
!= NULL
) {
1344 ifq_set_oactive(&ifp
->if_snd
);
1348 while (tx_chain
[cd
->vr_tx_free_idx
].vr_buf
== NULL
) {
1349 struct mbuf
*m_head
;
1350 struct vr_chain
*cur_tx
;
1352 m_head
= ifq_dequeue(&ifp
->if_snd
);
1356 /* Pick a descriptor off the free list. */
1357 cur_tx_idx
= cd
->vr_tx_free_idx
;
1358 cur_tx
= &tx_chain
[cur_tx_idx
];
1360 /* Pack the data into the descriptor. */
1361 if (vr_encap(sc
, cur_tx_idx
, m_head
)) {
1362 ifq_set_oactive(&ifp
->if_snd
);
1363 cur_tx_idx
= prev_tx_idx
;
1368 if (cur_tx_idx
!= start_tx_idx
)
1369 VR_TXOWN(cur_tx
) = VR_TXSTAT_OWN
;
1371 BPF_MTAP(ifp
, m_head
);
1374 VR_TXOWN(cur_tx
) = VR_TXSTAT_OWN
;
1375 VR_SETBIT16(sc
, VR_COMMAND
, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO
);
1377 /* Iff everything went OK, we bump up free index. */
1378 prev_tx_idx
= cur_tx_idx
;
1379 cd
->vr_tx_free_idx
= cur_tx
->vr_next_idx
;
1382 /* If there are no frames queued, bail. */
1383 if (cur_tx_idx
== -1)
1386 sc
->vr_cdata
.vr_tx_tail_idx
= cur_tx_idx
;
1388 if (sc
->vr_cdata
.vr_tx_head_idx
== -1)
1389 sc
->vr_cdata
.vr_tx_head_idx
= start_tx_idx
;
1392 * Set a timeout in case the chip goes out to lunch.
1400 struct vr_softc
*sc
= xsc
;
1401 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1402 struct mii_data
*mii
;
1405 mii
= device_get_softc(sc
->vr_miibus
);
1407 /* Cancel pending I/O and free all RX/TX buffers. */
1411 /* Set our station address. */
1412 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
1413 CSR_WRITE_1(sc
, VR_PAR0
+ i
, sc
->arpcom
.ac_enaddr
[i
]);
1416 VR_CLRBIT(sc
, VR_BCR0
, VR_BCR0_DMA_LENGTH
);
1417 VR_SETBIT(sc
, VR_BCR0
, VR_BCR0_DMA_STORENFWD
);
1420 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
1421 * so we must set both.
1423 VR_CLRBIT(sc
, VR_BCR0
, VR_BCR0_RX_THRESH
);
1424 VR_SETBIT(sc
, VR_BCR0
, VR_BCR0_RXTHRESH128BYTES
);
1426 VR_CLRBIT(sc
, VR_BCR1
, VR_BCR1_TX_THRESH
);
1427 VR_SETBIT(sc
, VR_BCR1
, VR_BCR1_TXTHRESHSTORENFWD
);
1429 VR_CLRBIT(sc
, VR_RXCFG
, VR_RXCFG_RX_THRESH
);
1430 VR_SETBIT(sc
, VR_RXCFG
, VR_RXTHRESH_128BYTES
);
1432 VR_CLRBIT(sc
, VR_TXCFG
, VR_TXCFG_TX_THRESH
);
1433 VR_SETBIT(sc
, VR_TXCFG
, VR_TXTHRESH_STORENFWD
);
1435 /* Init circular RX list. */
1436 if (vr_list_rx_init(sc
) == ENOBUFS
) {
1438 if_printf(ifp
, "initialization failed: no memory for rx buffers\n");
1442 /* Init tx descriptors. */
1443 vr_list_tx_init(sc
);
1445 /* If we want promiscuous mode, set the allframes bit. */
1446 if (ifp
->if_flags
& IFF_PROMISC
)
1447 VR_SETBIT(sc
, VR_RXCFG
, VR_RXCFG_RX_PROMISC
);
1449 VR_CLRBIT(sc
, VR_RXCFG
, VR_RXCFG_RX_PROMISC
);
1451 /* Set capture broadcast bit to capture broadcast frames. */
1452 if (ifp
->if_flags
& IFF_BROADCAST
)
1453 VR_SETBIT(sc
, VR_RXCFG
, VR_RXCFG_RX_BROAD
);
1455 VR_CLRBIT(sc
, VR_RXCFG
, VR_RXCFG_RX_BROAD
);
1458 * Program the multicast filter, if necessary.
1463 * Load the address of the RX list.
1465 CSR_WRITE_4(sc
, VR_RXADDR
, vtophys(sc
->vr_cdata
.vr_rx_head
->vr_ptr
));
1467 /* Enable receiver and transmitter. */
1468 CSR_WRITE_2(sc
, VR_COMMAND
, VR_CMD_TX_NOPOLL
|VR_CMD_START
|
1469 VR_CMD_TX_ON
|VR_CMD_RX_ON
|
1472 CSR_WRITE_4(sc
, VR_TXADDR
, vtophys(&sc
->vr_ldata
->vr_tx_list
[0]));
1475 * Enable interrupts, unless we are polling.
1477 CSR_WRITE_2(sc
, VR_ISR
, 0xFFFF);
1478 #ifdef IFPOLL_ENABLE
1479 if ((ifp
->if_flags
& IFF_NPOLLING
) == 0)
1481 CSR_WRITE_2(sc
, VR_IMR
, VR_INTRS
);
1485 ifp
->if_flags
|= IFF_RUNNING
;
1486 ifq_clr_oactive(&ifp
->if_snd
);
1488 callout_reset(&sc
->vr_stat_timer
, hz
, vr_tick
, sc
);
1492 * Set media options.
1495 vr_ifmedia_upd(struct ifnet
*ifp
)
1497 struct vr_softc
*sc
;
1501 if (ifp
->if_flags
& IFF_UP
)
1508 * Report current media status.
1511 vr_ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
1513 struct vr_softc
*sc
;
1514 struct mii_data
*mii
;
1517 mii
= device_get_softc(sc
->vr_miibus
);
1519 ifmr
->ifm_active
= mii
->mii_media_active
;
1520 ifmr
->ifm_status
= mii
->mii_media_status
;
1524 vr_ioctl(struct ifnet
*ifp
, u_long command
, caddr_t data
, struct ucred
*cr
)
1526 struct vr_softc
*sc
= ifp
->if_softc
;
1527 struct ifreq
*ifr
= (struct ifreq
*) data
;
1528 struct mii_data
*mii
;
1533 if (ifp
->if_flags
& IFF_UP
) {
1536 if (ifp
->if_flags
& IFF_RUNNING
)
1548 mii
= device_get_softc(sc
->vr_miibus
);
1549 error
= ifmedia_ioctl(ifp
, ifr
, &mii
->mii_media
, command
);
1552 error
= ether_ioctl(ifp
, command
, data
);
1558 #ifdef IFPOLL_ENABLE
1561 vr_npoll_compat(struct ifnet
*ifp
, void *arg __unused
, int count __unused
)
1563 struct vr_softc
*sc
= ifp
->if_softc
;
1565 ASSERT_SERIALIZED(ifp
->if_serializer
);
1570 vr_npoll(struct ifnet
*ifp
, struct ifpoll_info
*info
)
1572 struct vr_softc
*sc
= ifp
->if_softc
;
1574 ASSERT_SERIALIZED(ifp
->if_serializer
);
1577 int cpuid
= sc
->vr_npoll
.ifpc_cpuid
;
1579 info
->ifpi_rx
[cpuid
].poll_func
= vr_npoll_compat
;
1580 info
->ifpi_rx
[cpuid
].arg
= NULL
;
1581 info
->ifpi_rx
[cpuid
].serializer
= ifp
->if_serializer
;
1583 if (ifp
->if_flags
& IFF_RUNNING
) {
1584 /* disable interrupts */
1585 CSR_WRITE_2(sc
, VR_IMR
, 0x0000);
1587 ifq_set_cpuid(&ifp
->if_snd
, cpuid
);
1589 if (ifp
->if_flags
& IFF_RUNNING
) {
1590 /* enable interrupts */
1591 CSR_WRITE_2(sc
, VR_IMR
, VR_INTRS
);
1593 ifq_set_cpuid(&ifp
->if_snd
, rman_get_cpuid(sc
->vr_irq
));
1597 #endif /* IFPOLL_ENABLE */
1600 vr_watchdog(struct ifnet
*ifp
)
1602 struct vr_softc
*sc
;
1606 IFNET_STAT_INC(ifp
, oerrors
, 1);
1607 if_printf(ifp
, "watchdog timeout\n");
1613 if (!ifq_is_empty(&ifp
->if_snd
))
1618 * Stop the adapter and free any mbufs allocated to the
1622 vr_stop(struct vr_softc
*sc
)
1627 ifp
= &sc
->arpcom
.ac_if
;
1630 callout_stop(&sc
->vr_stat_timer
);
1632 VR_SETBIT16(sc
, VR_COMMAND
, VR_CMD_STOP
);
1633 VR_CLRBIT16(sc
, VR_COMMAND
, (VR_CMD_RX_ON
|VR_CMD_TX_ON
));
1634 CSR_WRITE_2(sc
, VR_IMR
, 0x0000);
1635 CSR_WRITE_4(sc
, VR_TXADDR
, 0x00000000);
1636 CSR_WRITE_4(sc
, VR_RXADDR
, 0x00000000);
1639 * Free data in the RX lists.
1641 for (i
= 0; i
< VR_RX_LIST_CNT
; i
++) {
1642 if (sc
->vr_cdata
.vr_rx_chain
[i
].vr_mbuf
!= NULL
) {
1643 m_freem(sc
->vr_cdata
.vr_rx_chain
[i
].vr_mbuf
);
1644 sc
->vr_cdata
.vr_rx_chain
[i
].vr_mbuf
= NULL
;
1647 bzero(&sc
->vr_ldata
->vr_rx_list
, sizeof(sc
->vr_ldata
->vr_rx_list
));
1650 * Reset the TX list buffer pointers.
1652 for (i
= 0; i
< VR_TX_LIST_CNT
; i
++)
1653 sc
->vr_cdata
.vr_tx_chain
[i
].vr_buf
= NULL
;
1655 bzero(&sc
->vr_ldata
->vr_tx_list
, sizeof(sc
->vr_ldata
->vr_tx_list
));
1657 ifp
->if_flags
&= ~IFF_RUNNING
;
1658 ifq_clr_oactive(&ifp
->if_snd
);
1662 * Stop all chip I/O so that the kernel's probe routines don't
1663 * get confused by errant DMAs when rebooting.
1666 vr_shutdown(device_t dev
)
1668 struct vr_softc
*sc
;
1670 sc
= device_get_softc(dev
);