1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */
2 /* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.46 2008/10/28 07:30:49 sephe Exp $ */
5 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Sepherosa Ziehau <sepherosa@gmail.com> and
9 * Matthew Dillon <dillon@apollo.backplane.com>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
43 * Permission to use, copy, modify, and distribute this software for any
44 * purpose with or without fee is hereby granted, provided that the above
45 * copyright notice and this permission notice appear in all copies.
47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
58 #include "opt_polling.h"
60 #include <sys/param.h>
61 #include <sys/endian.h>
62 #include <sys/kernel.h>
64 #include <sys/interrupt.h>
67 #include <sys/serialize.h>
68 #include <sys/socket.h>
69 #include <sys/sockio.h>
70 #include <sys/sysctl.h>
72 #include <net/ethernet.h>
75 #include <net/if_arp.h>
76 #include <net/if_dl.h>
77 #include <net/if_media.h>
78 #include <net/ifq_var.h>
79 #include <net/if_types.h>
80 #include <net/if_var.h>
81 #include <net/vlan/if_vlan_var.h>
82 #include <net/vlan/if_vlan_ether.h>
84 #include <bus/pci/pcireg.h>
85 #include <bus/pci/pcivar.h>
86 #include <bus/pci/pcidevs.h>
88 #include <dev/netif/mii_layer/mii.h>
89 #include <dev/netif/mii_layer/miivar.h>
91 #include "miibus_if.h"
93 #include <dev/netif/nfe/if_nfereg.h>
94 #include <dev/netif/nfe/if_nfevar.h>
97 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
99 static int nfe_probe(device_t
);
100 static int nfe_attach(device_t
);
101 static int nfe_detach(device_t
);
102 static void nfe_shutdown(device_t
);
103 static int nfe_resume(device_t
);
104 static int nfe_suspend(device_t
);
106 static int nfe_miibus_readreg(device_t
, int, int);
107 static void nfe_miibus_writereg(device_t
, int, int, int);
108 static void nfe_miibus_statchg(device_t
);
110 #ifdef DEVICE_POLLING
111 static void nfe_poll(struct ifnet
*, enum poll_cmd
, int);
113 static void nfe_intr(void *);
114 static int nfe_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
115 static int nfe_rxeof(struct nfe_softc
*);
116 static int nfe_txeof(struct nfe_softc
*, int);
117 static int nfe_encap(struct nfe_softc
*, struct nfe_tx_ring
*,
119 static void nfe_start(struct ifnet
*);
120 static void nfe_watchdog(struct ifnet
*);
121 static void nfe_init(void *);
122 static void nfe_stop(struct nfe_softc
*);
123 static struct nfe_jbuf
*nfe_jalloc(struct nfe_softc
*);
124 static void nfe_jfree(void *);
125 static void nfe_jref(void *);
126 static int nfe_jpool_alloc(struct nfe_softc
*, struct nfe_rx_ring
*);
127 static void nfe_jpool_free(struct nfe_softc
*, struct nfe_rx_ring
*);
128 static int nfe_alloc_rx_ring(struct nfe_softc
*, struct nfe_rx_ring
*);
129 static void nfe_reset_rx_ring(struct nfe_softc
*, struct nfe_rx_ring
*);
130 static int nfe_init_rx_ring(struct nfe_softc
*, struct nfe_rx_ring
*);
131 static void nfe_free_rx_ring(struct nfe_softc
*, struct nfe_rx_ring
*);
132 static int nfe_alloc_tx_ring(struct nfe_softc
*, struct nfe_tx_ring
*);
133 static void nfe_reset_tx_ring(struct nfe_softc
*, struct nfe_tx_ring
*);
134 static int nfe_init_tx_ring(struct nfe_softc
*, struct nfe_tx_ring
*);
135 static void nfe_free_tx_ring(struct nfe_softc
*, struct nfe_tx_ring
*);
136 static int nfe_ifmedia_upd(struct ifnet
*);
137 static void nfe_ifmedia_sts(struct ifnet
*, struct ifmediareq
*);
138 static void nfe_setmulti(struct nfe_softc
*);
139 static void nfe_get_macaddr(struct nfe_softc
*, uint8_t *);
140 static void nfe_set_macaddr(struct nfe_softc
*, const uint8_t *);
141 static void nfe_powerup(device_t
);
142 static void nfe_mac_reset(struct nfe_softc
*);
143 static void nfe_tick(void *);
144 static void nfe_set_paddr_rxdesc(struct nfe_softc
*, struct nfe_rx_ring
*,
146 static void nfe_set_ready_rxdesc(struct nfe_softc
*, struct nfe_rx_ring
*,
148 static int nfe_newbuf_std(struct nfe_softc
*, struct nfe_rx_ring
*, int,
150 static int nfe_newbuf_jumbo(struct nfe_softc
*, struct nfe_rx_ring
*, int,
152 static void nfe_enable_intrs(struct nfe_softc
*);
153 static void nfe_disable_intrs(struct nfe_softc
*);
155 static int nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS
);
160 static int nfe_debug
= 0;
161 static int nfe_rx_ring_count
= NFE_RX_RING_DEF_COUNT
;
162 static int nfe_tx_ring_count
= NFE_TX_RING_DEF_COUNT
;
164 * hw timer simulated interrupt moderation @4000Hz. Negative values
165 * disable the timer when the discrete interrupt rate falls below
166 * the moderation rate.
168 * XXX 8000Hz might be better but if the interrupt is shared it can
171 static int nfe_imtime
= -250; /* uS */
173 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count
);
174 TUNABLE_INT("hw.nfe.tx_ring_count", &nfe_tx_ring_count
);
175 TUNABLE_INT("hw.nfe.imtimer", &nfe_imtime
);
176 TUNABLE_INT("hw.nfe.debug", &nfe_debug
);
178 #define DPRINTF(sc, fmt, ...) do { \
179 if ((sc)->sc_debug) { \
180 if_printf(&(sc)->arpcom.ac_if, \
185 #define DPRINTFN(sc, lv, fmt, ...) do { \
186 if ((sc)->sc_debug >= (lv)) { \
187 if_printf(&(sc)->arpcom.ac_if, \
192 #else /* !NFE_DEBUG */
194 #define DPRINTF(sc, fmt, ...)
195 #define DPRINTFN(sc, lv, fmt, ...)
197 #endif /* NFE_DEBUG */
199 static const struct nfe_dev
{
204 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE_LAN
,
205 "NVIDIA nForce Fast Ethernet" },
207 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE2_LAN
,
208 "NVIDIA nForce2 Fast Ethernet" },
210 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1
,
211 "NVIDIA nForce3 Gigabit Ethernet" },
213 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb
214 chipset, and possibly also the 400R; it might be both nForce2- and
215 nForce3-based boards can use the same MCPs (= southbridges) */
216 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2
,
217 "NVIDIA nForce3 Gigabit Ethernet" },
219 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3
,
220 "NVIDIA nForce3 Gigabit Ethernet" },
222 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4
,
223 "NVIDIA nForce3 Gigabit Ethernet" },
225 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5
,
226 "NVIDIA nForce3 Gigabit Ethernet" },
228 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_CK804_LAN1
,
229 "NVIDIA CK804 Gigabit Ethernet" },
231 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_CK804_LAN2
,
232 "NVIDIA CK804 Gigabit Ethernet" },
234 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP04_LAN1
,
235 "NVIDIA MCP04 Gigabit Ethernet" },
237 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP04_LAN2
,
238 "NVIDIA MCP04 Gigabit Ethernet" },
240 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP51_LAN1
,
241 "NVIDIA MCP51 Gigabit Ethernet" },
243 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP51_LAN2
,
244 "NVIDIA MCP51 Gigabit Ethernet" },
246 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP55_LAN1
,
247 "NVIDIA MCP55 Gigabit Ethernet" },
249 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP55_LAN2
,
250 "NVIDIA MCP55 Gigabit Ethernet" },
252 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP61_LAN1
,
253 "NVIDIA MCP61 Gigabit Ethernet" },
255 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP61_LAN2
,
256 "NVIDIA MCP61 Gigabit Ethernet" },
258 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP61_LAN3
,
259 "NVIDIA MCP61 Gigabit Ethernet" },
261 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP61_LAN4
,
262 "NVIDIA MCP61 Gigabit Ethernet" },
264 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP65_LAN1
,
265 "NVIDIA MCP65 Gigabit Ethernet" },
267 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP65_LAN2
,
268 "NVIDIA MCP65 Gigabit Ethernet" },
270 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP65_LAN3
,
271 "NVIDIA MCP65 Gigabit Ethernet" },
273 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP65_LAN4
,
274 "NVIDIA MCP65 Gigabit Ethernet" },
276 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP67_LAN1
,
277 "NVIDIA MCP67 Gigabit Ethernet" },
279 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP67_LAN2
,
280 "NVIDIA MCP67 Gigabit Ethernet" },
282 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP67_LAN3
,
283 "NVIDIA MCP67 Gigabit Ethernet" },
285 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP67_LAN4
,
286 "NVIDIA MCP67 Gigabit Ethernet" },
288 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP73_LAN1
,
289 "NVIDIA MCP73 Gigabit Ethernet" },
291 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP73_LAN2
,
292 "NVIDIA MCP73 Gigabit Ethernet" },
294 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP73_LAN3
,
295 "NVIDIA MCP73 Gigabit Ethernet" },
297 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP73_LAN4
,
298 "NVIDIA MCP73 Gigabit Ethernet" },
300 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP77_LAN1
,
301 "NVIDIA MCP77 Gigabit Ethernet" },
303 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP77_LAN2
,
304 "NVIDIA MCP77 Gigabit Ethernet" },
306 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP77_LAN3
,
307 "NVIDIA MCP77 Gigabit Ethernet" },
309 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP77_LAN4
,
310 "NVIDIA MCP77 Gigabit Ethernet" },
312 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP79_LAN1
,
313 "NVIDIA MCP79 Gigabit Ethernet" },
315 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP79_LAN2
,
316 "NVIDIA MCP79 Gigabit Ethernet" },
318 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP79_LAN3
,
319 "NVIDIA MCP79 Gigabit Ethernet" },
321 { PCI_VENDOR_NVIDIA
, PCI_PRODUCT_NVIDIA_MCP79_LAN4
,
322 "NVIDIA MCP79 Gigabit Ethernet" },
327 static device_method_t nfe_methods
[] = {
328 /* Device interface */
329 DEVMETHOD(device_probe
, nfe_probe
),
330 DEVMETHOD(device_attach
, nfe_attach
),
331 DEVMETHOD(device_detach
, nfe_detach
),
332 DEVMETHOD(device_suspend
, nfe_suspend
),
333 DEVMETHOD(device_resume
, nfe_resume
),
334 DEVMETHOD(device_shutdown
, nfe_shutdown
),
337 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
338 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
341 DEVMETHOD(miibus_readreg
, nfe_miibus_readreg
),
342 DEVMETHOD(miibus_writereg
, nfe_miibus_writereg
),
343 DEVMETHOD(miibus_statchg
, nfe_miibus_statchg
),
348 static driver_t nfe_driver
= {
351 sizeof(struct nfe_softc
)
354 static devclass_t nfe_devclass
;
356 DECLARE_DUMMY_MODULE(if_nfe
);
357 MODULE_DEPEND(if_nfe
, miibus
, 1, 1, 1);
358 DRIVER_MODULE(if_nfe
, pci
, nfe_driver
, nfe_devclass
, 0, 0);
359 DRIVER_MODULE(miibus
, nfe
, miibus_driver
, miibus_devclass
, 0, 0);
362 nfe_probe(device_t dev
)
364 const struct nfe_dev
*n
;
367 vid
= pci_get_vendor(dev
);
368 did
= pci_get_device(dev
);
369 for (n
= nfe_devices
; n
->desc
!= NULL
; ++n
) {
370 if (vid
== n
->vid
&& did
== n
->did
) {
371 struct nfe_softc
*sc
= device_get_softc(dev
);
374 case PCI_PRODUCT_NVIDIA_NFORCE_LAN
:
375 case PCI_PRODUCT_NVIDIA_NFORCE2_LAN
:
376 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN1
:
377 sc
->sc_caps
= NFE_NO_PWRCTL
|
380 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2
:
381 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3
:
382 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4
:
383 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5
:
384 sc
->sc_caps
= NFE_JUMBO_SUP
|
389 case PCI_PRODUCT_NVIDIA_MCP51_LAN1
:
390 case PCI_PRODUCT_NVIDIA_MCP51_LAN2
:
391 sc
->sc_caps
= NFE_FIX_EADDR
;
393 case PCI_PRODUCT_NVIDIA_MCP61_LAN1
:
394 case PCI_PRODUCT_NVIDIA_MCP61_LAN2
:
395 case PCI_PRODUCT_NVIDIA_MCP61_LAN3
:
396 case PCI_PRODUCT_NVIDIA_MCP61_LAN4
:
397 case PCI_PRODUCT_NVIDIA_MCP67_LAN1
:
398 case PCI_PRODUCT_NVIDIA_MCP67_LAN2
:
399 case PCI_PRODUCT_NVIDIA_MCP67_LAN3
:
400 case PCI_PRODUCT_NVIDIA_MCP67_LAN4
:
401 case PCI_PRODUCT_NVIDIA_MCP73_LAN1
:
402 case PCI_PRODUCT_NVIDIA_MCP73_LAN2
:
403 case PCI_PRODUCT_NVIDIA_MCP73_LAN3
:
404 case PCI_PRODUCT_NVIDIA_MCP73_LAN4
:
405 sc
->sc_caps
|= NFE_40BIT_ADDR
;
407 case PCI_PRODUCT_NVIDIA_CK804_LAN1
:
408 case PCI_PRODUCT_NVIDIA_CK804_LAN2
:
409 case PCI_PRODUCT_NVIDIA_MCP04_LAN1
:
410 case PCI_PRODUCT_NVIDIA_MCP04_LAN2
:
411 sc
->sc_caps
= NFE_JUMBO_SUP
|
417 case PCI_PRODUCT_NVIDIA_MCP65_LAN1
:
418 case PCI_PRODUCT_NVIDIA_MCP65_LAN2
:
419 case PCI_PRODUCT_NVIDIA_MCP65_LAN3
:
420 case PCI_PRODUCT_NVIDIA_MCP65_LAN4
:
421 sc
->sc_caps
= NFE_JUMBO_SUP
|
424 case PCI_PRODUCT_NVIDIA_MCP55_LAN1
:
425 case PCI_PRODUCT_NVIDIA_MCP55_LAN2
:
426 sc
->sc_caps
= NFE_JUMBO_SUP
|
432 case PCI_PRODUCT_NVIDIA_MCP77_LAN1
:
433 case PCI_PRODUCT_NVIDIA_MCP77_LAN2
:
434 case PCI_PRODUCT_NVIDIA_MCP77_LAN3
:
435 case PCI_PRODUCT_NVIDIA_MCP77_LAN4
:
436 case PCI_PRODUCT_NVIDIA_MCP79_LAN1
:
437 case PCI_PRODUCT_NVIDIA_MCP79_LAN2
:
438 case PCI_PRODUCT_NVIDIA_MCP79_LAN3
:
439 case PCI_PRODUCT_NVIDIA_MCP79_LAN4
:
440 sc
->sc_caps
= NFE_40BIT_ADDR
|
445 device_set_desc(dev
, n
->desc
);
446 device_set_async_attach(dev
, TRUE
);
454 nfe_attach(device_t dev
)
456 struct nfe_softc
*sc
= device_get_softc(dev
);
457 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
458 uint8_t eaddr
[ETHER_ADDR_LEN
];
462 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
463 lwkt_serialize_init(&sc
->sc_jbuf_serializer
);
466 * Initialize sysctl variables
468 sc
->sc_rx_ring_count
= nfe_rx_ring_count
;
469 sc
->sc_tx_ring_count
= nfe_tx_ring_count
;
470 sc
->sc_debug
= nfe_debug
;
471 if (nfe_imtime
< 0) {
472 sc
->sc_flags
|= NFE_F_DYN_IM
;
473 sc
->sc_imtime
= -nfe_imtime
;
475 sc
->sc_imtime
= nfe_imtime
;
477 sc
->sc_irq_enable
= NFE_IRQ_ENABLE(sc
);
479 sc
->sc_mem_rid
= PCIR_BAR(0);
481 if (sc
->sc_caps
& NFE_40BIT_ADDR
)
482 sc
->rxtxctl_desc
= NFE_RXTX_DESC_V3
;
483 else if (sc
->sc_caps
& NFE_JUMBO_SUP
)
484 sc
->rxtxctl_desc
= NFE_RXTX_DESC_V2
;
487 if (pci_get_powerstate(dev
) != PCI_POWERSTATE_D0
) {
490 mem
= pci_read_config(dev
, sc
->sc_mem_rid
, 4);
491 irq
= pci_read_config(dev
, PCIR_INTLINE
, 4);
493 device_printf(dev
, "chip is in D%d power mode "
494 "-- setting to D0\n", pci_get_powerstate(dev
));
496 pci_set_powerstate(dev
, PCI_POWERSTATE_D0
);
498 pci_write_config(dev
, sc
->sc_mem_rid
, mem
, 4);
499 pci_write_config(dev
, PCIR_INTLINE
, irq
, 4);
501 #endif /* !BURN_BRIDGE */
503 /* Enable bus mastering */
504 pci_enable_busmaster(dev
);
506 /* Allocate IO memory */
507 sc
->sc_mem_res
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
,
508 &sc
->sc_mem_rid
, RF_ACTIVE
);
509 if (sc
->sc_mem_res
== NULL
) {
510 device_printf(dev
, "could not allocate io memory\n");
513 sc
->sc_memh
= rman_get_bushandle(sc
->sc_mem_res
);
514 sc
->sc_memt
= rman_get_bustag(sc
->sc_mem_res
);
518 sc
->sc_irq_res
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
,
520 RF_SHAREABLE
| RF_ACTIVE
);
521 if (sc
->sc_irq_res
== NULL
) {
522 device_printf(dev
, "could not allocate irq\n");
528 NFE_WRITE(sc
, NFE_WOL_CTL
, 0);
530 if ((sc
->sc_caps
& NFE_NO_PWRCTL
) == 0)
533 nfe_get_macaddr(sc
, eaddr
);
536 * Allocate top level DMA tag
538 if (sc
->sc_caps
& NFE_40BIT_ADDR
)
539 lowaddr
= NFE_BUS_SPACE_MAXADDR
;
541 lowaddr
= BUS_SPACE_MAXADDR_32BIT
;
542 error
= bus_dma_tag_create(NULL
, /* parent */
543 1, 0, /* alignment, boundary */
544 lowaddr
, /* lowaddr */
545 BUS_SPACE_MAXADDR
, /* highaddr */
546 NULL
, NULL
, /* filter, filterarg */
547 BUS_SPACE_MAXSIZE_32BIT
,/* maxsize */
549 BUS_SPACE_MAXSIZE_32BIT
,/* maxsegsize */
553 device_printf(dev
, "could not allocate parent dma tag\n");
558 * Allocate Tx and Rx rings.
560 error
= nfe_alloc_tx_ring(sc
, &sc
->txq
);
562 device_printf(dev
, "could not allocate Tx ring\n");
566 error
= nfe_alloc_rx_ring(sc
, &sc
->rxq
);
568 device_printf(dev
, "could not allocate Rx ring\n");
575 sysctl_ctx_init(&sc
->sc_sysctl_ctx
);
576 sc
->sc_sysctl_tree
= SYSCTL_ADD_NODE(&sc
->sc_sysctl_ctx
,
577 SYSCTL_STATIC_CHILDREN(_hw
),
579 device_get_nameunit(dev
),
581 if (sc
->sc_sysctl_tree
== NULL
) {
582 device_printf(dev
, "can't add sysctl node\n");
586 SYSCTL_ADD_PROC(&sc
->sc_sysctl_ctx
,
587 SYSCTL_CHILDREN(sc
->sc_sysctl_tree
),
588 OID_AUTO
, "imtimer", CTLTYPE_INT
| CTLFLAG_RW
,
589 sc
, 0, nfe_sysctl_imtime
, "I",
590 "Interrupt moderation time (usec). "
591 "0 to disable interrupt moderation.");
592 SYSCTL_ADD_INT(&sc
->sc_sysctl_ctx
,
593 SYSCTL_CHILDREN(sc
->sc_sysctl_tree
), OID_AUTO
,
594 "rx_ring_count", CTLFLAG_RD
, &sc
->sc_rx_ring_count
,
596 SYSCTL_ADD_INT(&sc
->sc_sysctl_ctx
,
597 SYSCTL_CHILDREN(sc
->sc_sysctl_tree
), OID_AUTO
,
598 "tx_ring_count", CTLFLAG_RD
, &sc
->sc_tx_ring_count
,
600 SYSCTL_ADD_INT(&sc
->sc_sysctl_ctx
,
601 SYSCTL_CHILDREN(sc
->sc_sysctl_tree
), OID_AUTO
,
602 "debug", CTLFLAG_RW
, &sc
->sc_debug
,
603 0, "control debugging printfs");
605 error
= mii_phy_probe(dev
, &sc
->sc_miibus
, nfe_ifmedia_upd
,
608 device_printf(dev
, "MII without any phy\n");
613 ifp
->if_mtu
= ETHERMTU
;
614 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
615 ifp
->if_ioctl
= nfe_ioctl
;
616 ifp
->if_start
= nfe_start
;
617 #ifdef DEVICE_POLLING
618 ifp
->if_poll
= nfe_poll
;
620 ifp
->if_watchdog
= nfe_watchdog
;
621 ifp
->if_init
= nfe_init
;
622 ifq_set_maxlen(&ifp
->if_snd
, sc
->sc_tx_ring_count
);
623 ifq_set_ready(&ifp
->if_snd
);
625 ifp
->if_capabilities
= IFCAP_VLAN_MTU
;
627 if (sc
->sc_caps
& NFE_HW_VLAN
)
628 ifp
->if_capabilities
|= IFCAP_VLAN_HWTAGGING
;
631 if (sc
->sc_caps
& NFE_HW_CSUM
) {
632 ifp
->if_capabilities
|= IFCAP_HWCSUM
;
633 ifp
->if_hwassist
= NFE_CSUM_FEATURES
;
636 sc
->sc_caps
&= ~NFE_HW_CSUM
;
638 ifp
->if_capenable
= ifp
->if_capabilities
;
640 callout_init(&sc
->sc_tick_ch
);
642 ether_ifattach(ifp
, eaddr
, NULL
);
644 error
= bus_setup_intr(dev
, sc
->sc_irq_res
, INTR_MPSAFE
, nfe_intr
, sc
,
645 &sc
->sc_ih
, ifp
->if_serializer
);
647 device_printf(dev
, "could not setup intr\n");
652 ifp
->if_cpuid
= ithread_cpuid(rman_get_start(sc
->sc_irq_res
));
653 KKASSERT(ifp
->if_cpuid
>= 0 && ifp
->if_cpuid
< ncpus
);
662 nfe_detach(device_t dev
)
664 struct nfe_softc
*sc
= device_get_softc(dev
);
666 if (device_is_attached(dev
)) {
667 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
669 lwkt_serialize_enter(ifp
->if_serializer
);
671 bus_teardown_intr(dev
, sc
->sc_irq_res
, sc
->sc_ih
);
672 lwkt_serialize_exit(ifp
->if_serializer
);
677 if (sc
->sc_miibus
!= NULL
)
678 device_delete_child(dev
, sc
->sc_miibus
);
679 bus_generic_detach(dev
);
681 if (sc
->sc_sysctl_tree
!= NULL
)
682 sysctl_ctx_free(&sc
->sc_sysctl_ctx
);
684 if (sc
->sc_irq_res
!= NULL
) {
685 bus_release_resource(dev
, SYS_RES_IRQ
, sc
->sc_irq_rid
,
689 if (sc
->sc_mem_res
!= NULL
) {
690 bus_release_resource(dev
, SYS_RES_MEMORY
, sc
->sc_mem_rid
,
694 nfe_free_tx_ring(sc
, &sc
->txq
);
695 nfe_free_rx_ring(sc
, &sc
->rxq
);
696 if (sc
->sc_dtag
!= NULL
)
697 bus_dma_tag_destroy(sc
->sc_dtag
);
703 nfe_shutdown(device_t dev
)
705 struct nfe_softc
*sc
= device_get_softc(dev
);
706 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
708 lwkt_serialize_enter(ifp
->if_serializer
);
710 lwkt_serialize_exit(ifp
->if_serializer
);
714 nfe_suspend(device_t dev
)
716 struct nfe_softc
*sc
= device_get_softc(dev
);
717 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
719 lwkt_serialize_enter(ifp
->if_serializer
);
721 lwkt_serialize_exit(ifp
->if_serializer
);
727 nfe_resume(device_t dev
)
729 struct nfe_softc
*sc
= device_get_softc(dev
);
730 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
732 lwkt_serialize_enter(ifp
->if_serializer
);
733 if (ifp
->if_flags
& IFF_UP
)
735 lwkt_serialize_exit(ifp
->if_serializer
);
741 nfe_miibus_statchg(device_t dev
)
743 struct nfe_softc
*sc
= device_get_softc(dev
);
744 struct mii_data
*mii
= device_get_softc(sc
->sc_miibus
);
745 uint32_t phy
, seed
, misc
= NFE_MISC1_MAGIC
, link
= NFE_MEDIA_SET
;
747 ASSERT_SERIALIZED(sc
->arpcom
.ac_if
.if_serializer
);
749 phy
= NFE_READ(sc
, NFE_PHY_IFACE
);
750 phy
&= ~(NFE_PHY_HDX
| NFE_PHY_100TX
| NFE_PHY_1000T
);
752 seed
= NFE_READ(sc
, NFE_RNDSEED
);
753 seed
&= ~NFE_SEED_MASK
;
755 if ((mii
->mii_media_active
& IFM_GMASK
) == IFM_HDX
) {
756 phy
|= NFE_PHY_HDX
; /* half-duplex */
757 misc
|= NFE_MISC1_HDX
;
760 switch (IFM_SUBTYPE(mii
->mii_media_active
)) {
761 case IFM_1000_T
: /* full-duplex only */
762 link
|= NFE_MEDIA_1000T
;
763 seed
|= NFE_SEED_1000T
;
764 phy
|= NFE_PHY_1000T
;
767 link
|= NFE_MEDIA_100TX
;
768 seed
|= NFE_SEED_100TX
;
769 phy
|= NFE_PHY_100TX
;
772 link
|= NFE_MEDIA_10T
;
773 seed
|= NFE_SEED_10T
;
777 NFE_WRITE(sc
, NFE_RNDSEED
, seed
); /* XXX: gigabit NICs only? */
779 NFE_WRITE(sc
, NFE_PHY_IFACE
, phy
);
780 NFE_WRITE(sc
, NFE_MISC1
, misc
);
781 NFE_WRITE(sc
, NFE_LINKSPEED
, link
);
785 nfe_miibus_readreg(device_t dev
, int phy
, int reg
)
787 struct nfe_softc
*sc
= device_get_softc(dev
);
791 NFE_WRITE(sc
, NFE_PHY_STATUS
, 0xf);
793 if (NFE_READ(sc
, NFE_PHY_CTL
) & NFE_PHY_BUSY
) {
794 NFE_WRITE(sc
, NFE_PHY_CTL
, NFE_PHY_BUSY
);
798 NFE_WRITE(sc
, NFE_PHY_CTL
, (phy
<< NFE_PHYADD_SHIFT
) | reg
);
800 for (ntries
= 0; ntries
< 1000; ntries
++) {
802 if (!(NFE_READ(sc
, NFE_PHY_CTL
) & NFE_PHY_BUSY
))
805 if (ntries
== 1000) {
806 DPRINTFN(sc
, 2, "timeout waiting for PHY %s\n", "");
810 if (NFE_READ(sc
, NFE_PHY_STATUS
) & NFE_PHY_ERROR
) {
811 DPRINTFN(sc
, 2, "could not read PHY %s\n", "");
815 val
= NFE_READ(sc
, NFE_PHY_DATA
);
816 if (val
!= 0xffffffff && val
!= 0)
817 sc
->mii_phyaddr
= phy
;
819 DPRINTFN(sc
, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy
, reg
, val
);
825 nfe_miibus_writereg(device_t dev
, int phy
, int reg
, int val
)
827 struct nfe_softc
*sc
= device_get_softc(dev
);
831 NFE_WRITE(sc
, NFE_PHY_STATUS
, 0xf);
833 if (NFE_READ(sc
, NFE_PHY_CTL
) & NFE_PHY_BUSY
) {
834 NFE_WRITE(sc
, NFE_PHY_CTL
, NFE_PHY_BUSY
);
838 NFE_WRITE(sc
, NFE_PHY_DATA
, val
);
839 ctl
= NFE_PHY_WRITE
| (phy
<< NFE_PHYADD_SHIFT
) | reg
;
840 NFE_WRITE(sc
, NFE_PHY_CTL
, ctl
);
842 for (ntries
= 0; ntries
< 1000; ntries
++) {
844 if (!(NFE_READ(sc
, NFE_PHY_CTL
) & NFE_PHY_BUSY
))
850 DPRINTFN(sc
, 2, "could not write to PHY %s\n", "");
854 #ifdef DEVICE_POLLING
857 nfe_poll(struct ifnet
*ifp
, enum poll_cmd cmd
, int count
)
859 struct nfe_softc
*sc
= ifp
->if_softc
;
861 ASSERT_SERIALIZED(ifp
->if_serializer
);
865 nfe_disable_intrs(sc
);
868 case POLL_DEREGISTER
:
869 nfe_enable_intrs(sc
);
872 case POLL_AND_CHECK_STATUS
:
875 if (ifp
->if_flags
& IFF_RUNNING
) {
888 struct nfe_softc
*sc
= arg
;
889 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
892 r
= NFE_READ(sc
, NFE_IRQ_STATUS
);
894 return; /* not for us */
895 NFE_WRITE(sc
, NFE_IRQ_STATUS
, r
);
897 if (sc
->sc_rate_second
!= time_second
) {
899 * Calculate sc_rate_avg - interrupts per second.
901 sc
->sc_rate_second
= time_second
;
902 if (sc
->sc_rate_avg
< sc
->sc_rate_acc
)
903 sc
->sc_rate_avg
= sc
->sc_rate_acc
;
905 sc
->sc_rate_avg
= (sc
->sc_rate_avg
* 3 +
906 sc
->sc_rate_acc
) / 4;
908 } else if (sc
->sc_rate_avg
< sc
->sc_rate_acc
) {
910 * Don't wait for a tick to roll over if we are taking
911 * a lot of interrupts.
913 sc
->sc_rate_avg
= sc
->sc_rate_acc
;
916 DPRINTFN(sc
, 5, "%s: interrupt register %x\n", __func__
, r
);
918 if (r
& NFE_IRQ_LINK
) {
919 NFE_READ(sc
, NFE_PHY_STATUS
);
920 NFE_WRITE(sc
, NFE_PHY_STATUS
, 0xf);
921 DPRINTF(sc
, "link state changed %s\n", "");
924 if (ifp
->if_flags
& IFF_RUNNING
) {
932 ret
|= nfe_txeof(sc
, 1);
934 /* update the rate accumulator */
938 if (sc
->sc_flags
& NFE_F_DYN_IM
) {
939 rate
= 1000000 / sc
->sc_imtime
;
940 if ((sc
->sc_flags
& NFE_F_IRQ_TIMER
) == 0 &&
941 sc
->sc_rate_avg
> rate
) {
943 * Use the hardware timer to reduce the
944 * interrupt rate if the discrete interrupt
945 * rate has exceeded our threshold.
947 NFE_WRITE(sc
, NFE_IRQ_MASK
, NFE_IRQ_IMTIMER
);
948 sc
->sc_flags
|= NFE_F_IRQ_TIMER
;
949 } else if ((sc
->sc_flags
& NFE_F_IRQ_TIMER
) &&
950 sc
->sc_rate_avg
<= rate
) {
952 * Use discrete TX/RX interrupts if the rate
953 * has fallen below our threshold.
955 NFE_WRITE(sc
, NFE_IRQ_MASK
, NFE_IRQ_NOIMTIMER
);
956 sc
->sc_flags
&= ~NFE_F_IRQ_TIMER
;
959 * Recollect, mainly to avoid the possible race
960 * introduced by changing interrupt masks.
970 nfe_ioctl(struct ifnet
*ifp
, u_long cmd
, caddr_t data
, struct ucred
*cr
)
972 struct nfe_softc
*sc
= ifp
->if_softc
;
973 struct ifreq
*ifr
= (struct ifreq
*)data
;
974 struct mii_data
*mii
;
975 int error
= 0, mask
, jumbo_cap
;
977 ASSERT_SERIALIZED(ifp
->if_serializer
);
981 if ((sc
->sc_caps
& NFE_JUMBO_SUP
) && sc
->rxq
.jbuf
!= NULL
)
986 if ((jumbo_cap
&& ifr
->ifr_mtu
> NFE_JUMBO_MTU
) ||
987 (!jumbo_cap
&& ifr
->ifr_mtu
> ETHERMTU
)) {
989 } else if (ifp
->if_mtu
!= ifr
->ifr_mtu
) {
990 ifp
->if_mtu
= ifr
->ifr_mtu
;
991 if (ifp
->if_flags
& IFF_RUNNING
)
996 if (ifp
->if_flags
& IFF_UP
) {
998 * If only the PROMISC or ALLMULTI flag changes, then
999 * don't do a full re-init of the chip, just update
1002 if ((ifp
->if_flags
& IFF_RUNNING
) &&
1003 ((ifp
->if_flags
^ sc
->sc_if_flags
) &
1004 (IFF_ALLMULTI
| IFF_PROMISC
)) != 0) {
1007 if (!(ifp
->if_flags
& IFF_RUNNING
))
1011 if (ifp
->if_flags
& IFF_RUNNING
)
1014 sc
->sc_if_flags
= ifp
->if_flags
;
1018 if (ifp
->if_flags
& IFF_RUNNING
)
1023 mii
= device_get_softc(sc
->sc_miibus
);
1024 error
= ifmedia_ioctl(ifp
, ifr
, &mii
->mii_media
, cmd
);
1027 mask
= (ifr
->ifr_reqcap
^ ifp
->if_capenable
) & IFCAP_HWCSUM
;
1028 if (mask
&& (ifp
->if_capabilities
& IFCAP_HWCSUM
)) {
1029 ifp
->if_capenable
^= mask
;
1030 if (IFCAP_TXCSUM
& ifp
->if_capenable
)
1031 ifp
->if_hwassist
= NFE_CSUM_FEATURES
;
1033 ifp
->if_hwassist
= 0;
1035 if (ifp
->if_flags
& IFF_RUNNING
)
1040 error
= ether_ioctl(ifp
, cmd
, data
);
1047 nfe_rxeof(struct nfe_softc
*sc
)
1049 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1050 struct nfe_rx_ring
*ring
= &sc
->rxq
;
1052 struct mbuf_chain chain
[MAXCPU
];
1055 ether_input_chain_init(chain
);
1058 struct nfe_rx_data
*data
= &ring
->data
[ring
->cur
];
1063 if (sc
->sc_caps
& NFE_40BIT_ADDR
) {
1064 struct nfe_desc64
*desc64
= &ring
->desc64
[ring
->cur
];
1066 flags
= le16toh(desc64
->flags
);
1067 len
= le16toh(desc64
->length
) & 0x3fff;
1069 struct nfe_desc32
*desc32
= &ring
->desc32
[ring
->cur
];
1071 flags
= le16toh(desc32
->flags
);
1072 len
= le16toh(desc32
->length
) & 0x3fff;
1075 if (flags
& NFE_RX_READY
)
1080 if ((sc
->sc_caps
& (NFE_JUMBO_SUP
| NFE_40BIT_ADDR
)) == 0) {
1081 if (!(flags
& NFE_RX_VALID_V1
))
1084 if ((flags
& NFE_RX_FIXME_V1
) == NFE_RX_FIXME_V1
) {
1085 flags
&= ~NFE_RX_ERROR
;
1086 len
--; /* fix buffer length */
1089 if (!(flags
& NFE_RX_VALID_V2
))
1092 if ((flags
& NFE_RX_FIXME_V2
) == NFE_RX_FIXME_V2
) {
1093 flags
&= ~NFE_RX_ERROR
;
1094 len
--; /* fix buffer length */
1098 if (flags
& NFE_RX_ERROR
) {
1105 if (sc
->sc_flags
& NFE_F_USE_JUMBO
)
1106 error
= nfe_newbuf_jumbo(sc
, ring
, ring
->cur
, 0);
1108 error
= nfe_newbuf_std(sc
, ring
, ring
->cur
, 0);
1115 m
->m_pkthdr
.len
= m
->m_len
= len
;
1116 m
->m_pkthdr
.rcvif
= ifp
;
1118 if ((ifp
->if_capenable
& IFCAP_RXCSUM
) &&
1119 (flags
& NFE_RX_CSUMOK
)) {
1120 if (flags
& NFE_RX_IP_CSUMOK_V2
) {
1121 m
->m_pkthdr
.csum_flags
|= CSUM_IP_CHECKED
|
1126 (NFE_RX_UDP_CSUMOK_V2
| NFE_RX_TCP_CSUMOK_V2
)) {
1127 m
->m_pkthdr
.csum_flags
|= CSUM_DATA_VALID
|
1129 CSUM_FRAG_NOT_CHECKED
;
1130 m
->m_pkthdr
.csum_data
= 0xffff;
1135 ether_input_chain(ifp
, m
, NULL
, chain
);
1137 nfe_set_ready_rxdesc(sc
, ring
, ring
->cur
);
1138 sc
->rxq
.cur
= (sc
->rxq
.cur
+ 1) % sc
->sc_rx_ring_count
;
1142 ether_input_dispatch(chain
);
1147 nfe_txeof(struct nfe_softc
*sc
, int start
)
1149 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1150 struct nfe_tx_ring
*ring
= &sc
->txq
;
1151 struct nfe_tx_data
*data
= NULL
;
1153 while (ring
->next
!= ring
->cur
) {
1156 if (sc
->sc_caps
& NFE_40BIT_ADDR
)
1157 flags
= le16toh(ring
->desc64
[ring
->next
].flags
);
1159 flags
= le16toh(ring
->desc32
[ring
->next
].flags
);
1161 if (flags
& NFE_TX_VALID
)
1164 data
= &ring
->data
[ring
->next
];
1166 if ((sc
->sc_caps
& (NFE_JUMBO_SUP
| NFE_40BIT_ADDR
)) == 0) {
1167 if (!(flags
& NFE_TX_LASTFRAG_V1
) && data
->m
== NULL
)
1170 if ((flags
& NFE_TX_ERROR_V1
) != 0) {
1171 if_printf(ifp
, "tx v1 error 0x%4b\n", flags
,
1178 if (!(flags
& NFE_TX_LASTFRAG_V2
) && data
->m
== NULL
)
1181 if ((flags
& NFE_TX_ERROR_V2
) != 0) {
1182 if_printf(ifp
, "tx v2 error 0x%4b\n", flags
,
1190 if (data
->m
== NULL
) { /* should not get there */
1192 "last fragment bit w/o associated mbuf!\n");
1196 /* last fragment of the mbuf chain transmitted */
1197 bus_dmamap_unload(ring
->data_tag
, data
->map
);
1202 KKASSERT(ring
->queued
>= 0);
1203 ring
->next
= (ring
->next
+ 1) % sc
->sc_tx_ring_count
;
1206 if (sc
->sc_tx_ring_count
- ring
->queued
>=
1207 sc
->sc_tx_spare
+ NFE_NSEG_RSVD
)
1208 ifp
->if_flags
&= ~IFF_OACTIVE
;
1210 if (ring
->queued
== 0)
1213 if (start
&& !ifq_is_empty(&ifp
->if_snd
))
1223 nfe_encap(struct nfe_softc
*sc
, struct nfe_tx_ring
*ring
, struct mbuf
*m0
)
1225 bus_dma_segment_t segs
[NFE_MAX_SCATTER
];
1226 struct nfe_tx_data
*data
, *data_map
;
1228 struct nfe_desc64
*desc64
= NULL
;
1229 struct nfe_desc32
*desc32
= NULL
;
1232 int error
, i
, j
, maxsegs
, nsegs
;
1234 data
= &ring
->data
[ring
->cur
];
1236 data_map
= data
; /* Remember who owns the DMA map */
1238 maxsegs
= (sc
->sc_tx_ring_count
- ring
->queued
) - NFE_NSEG_RSVD
;
1239 if (maxsegs
> NFE_MAX_SCATTER
)
1240 maxsegs
= NFE_MAX_SCATTER
;
1241 KASSERT(maxsegs
>= sc
->sc_tx_spare
,
1242 ("no enough segments %d,%d\n", maxsegs
, sc
->sc_tx_spare
));
1244 error
= bus_dmamap_load_mbuf_defrag(ring
->data_tag
, map
, &m0
,
1245 segs
, maxsegs
, &nsegs
, BUS_DMA_NOWAIT
);
1248 bus_dmamap_sync(ring
->data_tag
, map
, BUS_DMASYNC_PREWRITE
);
1252 /* setup h/w VLAN tagging */
1253 if (m0
->m_flags
& M_VLANTAG
)
1254 vtag
= m0
->m_pkthdr
.ether_vlantag
;
1256 if (sc
->arpcom
.ac_if
.if_capenable
& IFCAP_TXCSUM
) {
1257 if (m0
->m_pkthdr
.csum_flags
& CSUM_IP
)
1258 flags
|= NFE_TX_IP_CSUM
;
1259 if (m0
->m_pkthdr
.csum_flags
& (CSUM_TCP
| CSUM_UDP
))
1260 flags
|= NFE_TX_TCP_CSUM
;
1264 * XXX urm. somebody is unaware of how hardware works. You
1265 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in
1266 * the ring until the entire chain is actually *VALID*. Otherwise
1267 * the hardware may encounter a partially initialized chain that
1268 * is marked as being ready to go when it in fact is not ready to
1272 for (i
= 0; i
< nsegs
; i
++) {
1273 j
= (ring
->cur
+ i
) % sc
->sc_tx_ring_count
;
1274 data
= &ring
->data
[j
];
1276 if (sc
->sc_caps
& NFE_40BIT_ADDR
) {
1277 desc64
= &ring
->desc64
[j
];
1278 desc64
->physaddr
[0] =
1279 htole32(NFE_ADDR_HI(segs
[i
].ds_addr
));
1280 desc64
->physaddr
[1] =
1281 htole32(NFE_ADDR_LO(segs
[i
].ds_addr
));
1282 desc64
->length
= htole16(segs
[i
].ds_len
- 1);
1283 desc64
->vtag
= htole32(vtag
);
1284 desc64
->flags
= htole16(flags
);
1286 desc32
= &ring
->desc32
[j
];
1287 desc32
->physaddr
= htole32(segs
[i
].ds_addr
);
1288 desc32
->length
= htole16(segs
[i
].ds_len
- 1);
1289 desc32
->flags
= htole16(flags
);
1292 /* csum flags and vtag belong to the first fragment only */
1293 flags
&= ~(NFE_TX_IP_CSUM
| NFE_TX_TCP_CSUM
);
1297 KKASSERT(ring
->queued
<= sc
->sc_tx_ring_count
);
1300 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1301 if (sc
->sc_caps
& NFE_40BIT_ADDR
) {
1302 desc64
->flags
|= htole16(NFE_TX_LASTFRAG_V2
);
1304 if (sc
->sc_caps
& NFE_JUMBO_SUP
)
1305 flags
= NFE_TX_LASTFRAG_V2
;
1307 flags
= NFE_TX_LASTFRAG_V1
;
1308 desc32
->flags
|= htole16(flags
);
1312 * Set NFE_TX_VALID backwards so the hardware doesn't see the
1313 * whole mess until the first descriptor in the map is flagged.
1315 for (i
= nsegs
- 1; i
>= 0; --i
) {
1316 j
= (ring
->cur
+ i
) % sc
->sc_tx_ring_count
;
1317 if (sc
->sc_caps
& NFE_40BIT_ADDR
) {
1318 desc64
= &ring
->desc64
[j
];
1319 desc64
->flags
|= htole16(NFE_TX_VALID
);
1321 desc32
= &ring
->desc32
[j
];
1322 desc32
->flags
|= htole16(NFE_TX_VALID
);
1325 ring
->cur
= (ring
->cur
+ nsegs
) % sc
->sc_tx_ring_count
;
1327 /* Exchange DMA map */
1328 data_map
->map
= data
->map
;
1338 nfe_start(struct ifnet
*ifp
)
1340 struct nfe_softc
*sc
= ifp
->if_softc
;
1341 struct nfe_tx_ring
*ring
= &sc
->txq
;
1342 int count
= 0, oactive
= 0;
1345 ASSERT_SERIALIZED(ifp
->if_serializer
);
1347 if ((ifp
->if_flags
& (IFF_OACTIVE
| IFF_RUNNING
)) != IFF_RUNNING
)
1353 if (sc
->sc_tx_ring_count
- ring
->queued
<
1354 sc
->sc_tx_spare
+ NFE_NSEG_RSVD
) {
1356 ifp
->if_flags
|= IFF_OACTIVE
;
1365 m0
= ifq_dequeue(&ifp
->if_snd
, NULL
);
1369 ETHER_BPF_MTAP(ifp
, m0
);
1371 error
= nfe_encap(sc
, ring
, m0
);
1374 if (error
== EFBIG
) {
1376 ifp
->if_flags
|= IFF_OACTIVE
;
1390 * `m0' may be freed in nfe_encap(), so
1391 * it should not be touched any more.
1395 if (count
== 0) /* nothing sent */
1399 NFE_WRITE(sc
, NFE_RXTX_CTL
, NFE_RXTX_KICKTX
| sc
->rxtxctl
);
1402 * Set a timeout in case the chip goes out to lunch.
1408 nfe_watchdog(struct ifnet
*ifp
)
1410 struct nfe_softc
*sc
= ifp
->if_softc
;
1412 ASSERT_SERIALIZED(ifp
->if_serializer
);
1414 if (ifp
->if_flags
& IFF_RUNNING
) {
1415 if_printf(ifp
, "watchdog timeout - lost interrupt recovered\n");
1420 if_printf(ifp
, "watchdog timeout\n");
1422 nfe_init(ifp
->if_softc
);
1430 struct nfe_softc
*sc
= xsc
;
1431 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1435 ASSERT_SERIALIZED(ifp
->if_serializer
);
1439 if ((sc
->sc_caps
& NFE_NO_PWRCTL
) == 0)
1444 * Switching between jumbo frames and normal frames should
1445 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring().
1447 if (ifp
->if_mtu
> ETHERMTU
) {
1448 sc
->sc_flags
|= NFE_F_USE_JUMBO
;
1449 sc
->rxq
.bufsz
= NFE_JBYTES
;
1450 sc
->sc_tx_spare
= NFE_NSEG_SPARE_JUMBO
;
1452 if_printf(ifp
, "use jumbo frames\n");
1454 sc
->sc_flags
&= ~NFE_F_USE_JUMBO
;
1455 sc
->rxq
.bufsz
= MCLBYTES
;
1456 sc
->sc_tx_spare
= NFE_NSEG_SPARE
;
1458 if_printf(ifp
, "use non-jumbo frames\n");
1461 error
= nfe_init_tx_ring(sc
, &sc
->txq
);
1467 error
= nfe_init_rx_ring(sc
, &sc
->rxq
);
1473 NFE_WRITE(sc
, NFE_TX_POLL
, 0);
1474 NFE_WRITE(sc
, NFE_STATUS
, 0);
1476 sc
->rxtxctl
= NFE_RXTX_BIT2
| sc
->rxtxctl_desc
;
1478 if (ifp
->if_capenable
& IFCAP_RXCSUM
)
1479 sc
->rxtxctl
|= NFE_RXTX_RXCSUM
;
1482 * Although the adapter is capable of stripping VLAN tags from received
1483 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1484 * purpose. This will be done in software by our network stack.
1486 if (sc
->sc_caps
& NFE_HW_VLAN
)
1487 sc
->rxtxctl
|= NFE_RXTX_VTAG_INSERT
;
1489 NFE_WRITE(sc
, NFE_RXTX_CTL
, NFE_RXTX_RESET
| sc
->rxtxctl
);
1491 NFE_WRITE(sc
, NFE_RXTX_CTL
, sc
->rxtxctl
);
1493 if (sc
->sc_caps
& NFE_HW_VLAN
)
1494 NFE_WRITE(sc
, NFE_VTAG_CTL
, NFE_VTAG_ENABLE
);
1496 NFE_WRITE(sc
, NFE_SETUP_R6
, 0);
1498 /* set MAC address */
1499 nfe_set_macaddr(sc
, sc
->arpcom
.ac_enaddr
);
1501 /* tell MAC where rings are in memory */
1502 if (sc
->sc_caps
& NFE_40BIT_ADDR
) {
1503 NFE_WRITE(sc
, NFE_RX_RING_ADDR_HI
,
1504 NFE_ADDR_HI(sc
->rxq
.physaddr
));
1506 NFE_WRITE(sc
, NFE_RX_RING_ADDR_LO
, NFE_ADDR_LO(sc
->rxq
.physaddr
));
1508 if (sc
->sc_caps
& NFE_40BIT_ADDR
) {
1509 NFE_WRITE(sc
, NFE_TX_RING_ADDR_HI
,
1510 NFE_ADDR_HI(sc
->txq
.physaddr
));
1512 NFE_WRITE(sc
, NFE_TX_RING_ADDR_LO
, NFE_ADDR_LO(sc
->txq
.physaddr
));
1514 NFE_WRITE(sc
, NFE_RING_SIZE
,
1515 (sc
->sc_rx_ring_count
- 1) << 16 |
1516 (sc
->sc_tx_ring_count
- 1));
1518 NFE_WRITE(sc
, NFE_RXBUFSZ
, sc
->rxq
.bufsz
);
1520 /* force MAC to wakeup */
1521 tmp
= NFE_READ(sc
, NFE_PWR_STATE
);
1522 NFE_WRITE(sc
, NFE_PWR_STATE
, tmp
| NFE_PWR_WAKEUP
);
1524 tmp
= NFE_READ(sc
, NFE_PWR_STATE
);
1525 NFE_WRITE(sc
, NFE_PWR_STATE
, tmp
| NFE_PWR_VALID
);
1527 NFE_WRITE(sc
, NFE_SETUP_R1
, NFE_R1_MAGIC
);
1528 NFE_WRITE(sc
, NFE_SETUP_R2
, NFE_R2_MAGIC
);
1529 NFE_WRITE(sc
, NFE_SETUP_R6
, NFE_R6_MAGIC
);
1531 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1532 NFE_WRITE(sc
, NFE_STATUS
, sc
->mii_phyaddr
<< 24 | NFE_STATUS_MAGIC
);
1534 NFE_WRITE(sc
, NFE_SETUP_R4
, NFE_R4_MAGIC
);
1536 sc
->rxtxctl
&= ~NFE_RXTX_BIT2
;
1537 NFE_WRITE(sc
, NFE_RXTX_CTL
, sc
->rxtxctl
);
1539 NFE_WRITE(sc
, NFE_RXTX_CTL
, NFE_RXTX_BIT1
| sc
->rxtxctl
);
1544 nfe_ifmedia_upd(ifp
);
1547 NFE_WRITE(sc
, NFE_RX_CTL
, NFE_RX_START
);
1550 NFE_WRITE(sc
, NFE_TX_CTL
, NFE_TX_START
);
1552 NFE_WRITE(sc
, NFE_PHY_STATUS
, 0xf);
1554 #ifdef DEVICE_POLLING
1555 if ((ifp
->if_flags
& IFF_POLLING
))
1556 nfe_disable_intrs(sc
);
1559 nfe_enable_intrs(sc
);
1561 callout_reset(&sc
->sc_tick_ch
, hz
, nfe_tick
, sc
);
1563 ifp
->if_flags
|= IFF_RUNNING
;
1564 ifp
->if_flags
&= ~IFF_OACTIVE
;
1567 * If we had stuff in the tx ring before its all cleaned out now
1568 * so we are not going to get an interrupt, jump-start any pending
1571 if (!ifq_is_empty(&ifp
->if_snd
))
1576 nfe_stop(struct nfe_softc
*sc
)
1578 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1579 uint32_t rxtxctl
= sc
->rxtxctl_desc
| NFE_RXTX_BIT2
;
1582 ASSERT_SERIALIZED(ifp
->if_serializer
);
1584 callout_stop(&sc
->sc_tick_ch
);
1587 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
1588 sc
->sc_flags
&= ~NFE_F_IRQ_TIMER
;
1590 #define WAITMAX 50000
1595 NFE_WRITE(sc
, NFE_TX_CTL
, 0);
1596 for (i
= 0; i
< WAITMAX
; ++i
) {
1598 if ((NFE_READ(sc
, NFE_TX_STATUS
) & NFE_TX_STATUS_BUSY
) == 0)
1602 if_printf(ifp
, "can't stop TX\n");
1608 NFE_WRITE(sc
, NFE_RX_CTL
, 0);
1609 for (i
= 0; i
< WAITMAX
; ++i
) {
1611 if ((NFE_READ(sc
, NFE_RX_STATUS
) & NFE_RX_STATUS_BUSY
) == 0)
1615 if_printf(ifp
, "can't stop RX\n");
1620 NFE_WRITE(sc
, NFE_RXTX_CTL
, NFE_RXTX_RESET
| rxtxctl
);
1622 NFE_WRITE(sc
, NFE_RXTX_CTL
, rxtxctl
);
1624 /* Disable interrupts */
1625 NFE_WRITE(sc
, NFE_IRQ_MASK
, 0);
1627 /* Reset Tx and Rx rings */
1628 nfe_reset_tx_ring(sc
, &sc
->txq
);
1629 nfe_reset_rx_ring(sc
, &sc
->rxq
);
1633 nfe_alloc_rx_ring(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1635 int i
, j
, error
, descsize
;
1639 if (sc
->sc_caps
& NFE_40BIT_ADDR
) {
1640 desc
= (void **)&ring
->desc64
;
1641 descsize
= sizeof(struct nfe_desc64
);
1643 desc
= (void **)&ring
->desc32
;
1644 descsize
= sizeof(struct nfe_desc32
);
1647 ring
->bufsz
= MCLBYTES
;
1648 ring
->cur
= ring
->next
= 0;
1650 error
= bus_dmamem_coherent(sc
->sc_dtag
, PAGE_SIZE
, 0,
1651 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
1652 sc
->sc_rx_ring_count
* descsize
,
1653 BUS_DMA_WAITOK
| BUS_DMA_ZERO
, &dmem
);
1655 if_printf(&sc
->arpcom
.ac_if
,
1656 "could not create RX desc ring\n");
1659 ring
->tag
= dmem
.dmem_tag
;
1660 ring
->map
= dmem
.dmem_map
;
1661 *desc
= dmem
.dmem_addr
;
1662 ring
->physaddr
= dmem
.dmem_busaddr
;
1664 if (sc
->sc_caps
& NFE_JUMBO_SUP
) {
1666 kmalloc(sizeof(struct nfe_jbuf
) * NFE_JPOOL_COUNT(sc
),
1667 M_DEVBUF
, M_WAITOK
| M_ZERO
);
1669 error
= nfe_jpool_alloc(sc
, ring
);
1671 if_printf(&sc
->arpcom
.ac_if
,
1672 "could not allocate jumbo frames\n");
1673 kfree(ring
->jbuf
, M_DEVBUF
);
1675 /* Allow jumbo frame allocation to fail */
1679 ring
->data
= kmalloc(sizeof(struct nfe_rx_data
) * sc
->sc_rx_ring_count
,
1680 M_DEVBUF
, M_WAITOK
| M_ZERO
);
1682 error
= bus_dma_tag_create(sc
->sc_dtag
, 1, 0,
1683 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
1685 MCLBYTES
, 1, MCLBYTES
,
1686 BUS_DMA_ALLOCNOW
| BUS_DMA_WAITOK
,
1689 if_printf(&sc
->arpcom
.ac_if
,
1690 "could not create RX mbuf DMA tag\n");
1694 /* Create a spare RX mbuf DMA map */
1695 error
= bus_dmamap_create(ring
->data_tag
, BUS_DMA_WAITOK
,
1696 &ring
->data_tmpmap
);
1698 if_printf(&sc
->arpcom
.ac_if
,
1699 "could not create spare RX mbuf DMA map\n");
1700 bus_dma_tag_destroy(ring
->data_tag
);
1701 ring
->data_tag
= NULL
;
1705 for (i
= 0; i
< sc
->sc_rx_ring_count
; i
++) {
1706 error
= bus_dmamap_create(ring
->data_tag
, BUS_DMA_WAITOK
,
1707 &ring
->data
[i
].map
);
1709 if_printf(&sc
->arpcom
.ac_if
,
1710 "could not create %dth RX mbuf DMA mapn", i
);
1716 for (j
= 0; j
< i
; ++j
)
1717 bus_dmamap_destroy(ring
->data_tag
, ring
->data
[i
].map
);
1718 bus_dmamap_destroy(ring
->data_tag
, ring
->data_tmpmap
);
1719 bus_dma_tag_destroy(ring
->data_tag
);
1720 ring
->data_tag
= NULL
;
1725 nfe_reset_rx_ring(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1729 for (i
= 0; i
< sc
->sc_rx_ring_count
; i
++) {
1730 struct nfe_rx_data
*data
= &ring
->data
[i
];
1732 if (data
->m
!= NULL
) {
1733 if ((sc
->sc_flags
& NFE_F_USE_JUMBO
) == 0)
1734 bus_dmamap_unload(ring
->data_tag
, data
->map
);
1740 ring
->cur
= ring
->next
= 0;
1744 nfe_init_rx_ring(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1748 for (i
= 0; i
< sc
->sc_rx_ring_count
; ++i
) {
1751 /* XXX should use a function pointer */
1752 if (sc
->sc_flags
& NFE_F_USE_JUMBO
)
1753 error
= nfe_newbuf_jumbo(sc
, ring
, i
, 1);
1755 error
= nfe_newbuf_std(sc
, ring
, i
, 1);
1757 if_printf(&sc
->arpcom
.ac_if
,
1758 "could not allocate RX buffer\n");
1761 nfe_set_ready_rxdesc(sc
, ring
, i
);
1767 nfe_free_rx_ring(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1769 if (ring
->data_tag
!= NULL
) {
1770 struct nfe_rx_data
*data
;
1773 for (i
= 0; i
< sc
->sc_rx_ring_count
; i
++) {
1774 data
= &ring
->data
[i
];
1776 if (data
->m
!= NULL
) {
1777 bus_dmamap_unload(ring
->data_tag
, data
->map
);
1780 bus_dmamap_destroy(ring
->data_tag
, data
->map
);
1782 bus_dmamap_destroy(ring
->data_tag
, ring
->data_tmpmap
);
1783 bus_dma_tag_destroy(ring
->data_tag
);
1786 nfe_jpool_free(sc
, ring
);
1788 if (ring
->jbuf
!= NULL
)
1789 kfree(ring
->jbuf
, M_DEVBUF
);
1790 if (ring
->data
!= NULL
)
1791 kfree(ring
->data
, M_DEVBUF
);
1793 if (ring
->tag
!= NULL
) {
1796 if (sc
->sc_caps
& NFE_40BIT_ADDR
)
1797 desc
= ring
->desc64
;
1799 desc
= ring
->desc32
;
1801 bus_dmamap_unload(ring
->tag
, ring
->map
);
1802 bus_dmamem_free(ring
->tag
, desc
, ring
->map
);
1803 bus_dma_tag_destroy(ring
->tag
);
1807 static struct nfe_jbuf
*
1808 nfe_jalloc(struct nfe_softc
*sc
)
1810 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1811 struct nfe_jbuf
*jbuf
;
1813 lwkt_serialize_enter(&sc
->sc_jbuf_serializer
);
1815 jbuf
= SLIST_FIRST(&sc
->rxq
.jfreelist
);
1817 SLIST_REMOVE_HEAD(&sc
->rxq
.jfreelist
, jnext
);
1820 if_printf(ifp
, "no free jumbo buffer\n");
1823 lwkt_serialize_exit(&sc
->sc_jbuf_serializer
);
1829 nfe_jfree(void *arg
)
1831 struct nfe_jbuf
*jbuf
= arg
;
1832 struct nfe_softc
*sc
= jbuf
->sc
;
1833 struct nfe_rx_ring
*ring
= jbuf
->ring
;
1835 if (&ring
->jbuf
[jbuf
->slot
] != jbuf
)
1836 panic("%s: free wrong jumbo buffer\n", __func__
);
1837 else if (jbuf
->inuse
== 0)
1838 panic("%s: jumbo buffer already freed\n", __func__
);
1840 lwkt_serialize_enter(&sc
->sc_jbuf_serializer
);
1841 atomic_subtract_int(&jbuf
->inuse
, 1);
1842 if (jbuf
->inuse
== 0)
1843 SLIST_INSERT_HEAD(&ring
->jfreelist
, jbuf
, jnext
);
1844 lwkt_serialize_exit(&sc
->sc_jbuf_serializer
);
1850 struct nfe_jbuf
*jbuf
= arg
;
1851 struct nfe_rx_ring
*ring
= jbuf
->ring
;
1853 if (&ring
->jbuf
[jbuf
->slot
] != jbuf
)
1854 panic("%s: ref wrong jumbo buffer\n", __func__
);
1855 else if (jbuf
->inuse
== 0)
1856 panic("%s: jumbo buffer already freed\n", __func__
);
1858 atomic_add_int(&jbuf
->inuse
, 1);
1862 nfe_jpool_alloc(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1864 struct nfe_jbuf
*jbuf
;
1866 bus_addr_t physaddr
;
1871 * Allocate a big chunk of DMA'able memory.
1873 error
= bus_dmamem_coherent(sc
->sc_dtag
, PAGE_SIZE
, 0,
1874 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
1876 BUS_DMA_WAITOK
, &dmem
);
1878 if_printf(&sc
->arpcom
.ac_if
,
1879 "could not create jumbo buffer\n");
1882 ring
->jtag
= dmem
.dmem_tag
;
1883 ring
->jmap
= dmem
.dmem_map
;
1884 ring
->jpool
= dmem
.dmem_addr
;
1885 physaddr
= dmem
.dmem_busaddr
;
1887 /* ..and split it into 9KB chunks */
1888 SLIST_INIT(&ring
->jfreelist
);
1891 for (i
= 0; i
< NFE_JPOOL_COUNT(sc
); i
++) {
1892 jbuf
= &ring
->jbuf
[i
];
1899 jbuf
->physaddr
= physaddr
;
1901 SLIST_INSERT_HEAD(&ring
->jfreelist
, jbuf
, jnext
);
1904 physaddr
+= NFE_JBYTES
;
1911 nfe_jpool_free(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
)
1913 if (ring
->jtag
!= NULL
) {
1914 bus_dmamap_unload(ring
->jtag
, ring
->jmap
);
1915 bus_dmamem_free(ring
->jtag
, ring
->jpool
, ring
->jmap
);
1916 bus_dma_tag_destroy(ring
->jtag
);
1921 nfe_alloc_tx_ring(struct nfe_softc
*sc
, struct nfe_tx_ring
*ring
)
1923 int i
, j
, error
, descsize
;
1927 if (sc
->sc_caps
& NFE_40BIT_ADDR
) {
1928 desc
= (void **)&ring
->desc64
;
1929 descsize
= sizeof(struct nfe_desc64
);
1931 desc
= (void **)&ring
->desc32
;
1932 descsize
= sizeof(struct nfe_desc32
);
1936 ring
->cur
= ring
->next
= 0;
1938 error
= bus_dmamem_coherent(sc
->sc_dtag
, PAGE_SIZE
, 0,
1939 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
1940 sc
->sc_tx_ring_count
* descsize
,
1941 BUS_DMA_WAITOK
| BUS_DMA_ZERO
, &dmem
);
1943 if_printf(&sc
->arpcom
.ac_if
,
1944 "could not create TX desc ring\n");
1947 ring
->tag
= dmem
.dmem_tag
;
1948 ring
->map
= dmem
.dmem_map
;
1949 *desc
= dmem
.dmem_addr
;
1950 ring
->physaddr
= dmem
.dmem_busaddr
;
1952 ring
->data
= kmalloc(sizeof(struct nfe_tx_data
) * sc
->sc_tx_ring_count
,
1953 M_DEVBUF
, M_WAITOK
| M_ZERO
);
1955 error
= bus_dma_tag_create(sc
->sc_dtag
, 1, 0,
1956 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
1958 NFE_JBYTES
, NFE_MAX_SCATTER
, MCLBYTES
,
1959 BUS_DMA_ALLOCNOW
| BUS_DMA_WAITOK
| BUS_DMA_ONEBPAGE
,
1962 if_printf(&sc
->arpcom
.ac_if
,
1963 "could not create TX buf DMA tag\n");
1967 for (i
= 0; i
< sc
->sc_tx_ring_count
; i
++) {
1968 error
= bus_dmamap_create(ring
->data_tag
,
1969 BUS_DMA_WAITOK
| BUS_DMA_ONEBPAGE
,
1970 &ring
->data
[i
].map
);
1972 if_printf(&sc
->arpcom
.ac_if
,
1973 "could not create %dth TX buf DMA map\n", i
);
1980 for (j
= 0; j
< i
; ++j
)
1981 bus_dmamap_destroy(ring
->data_tag
, ring
->data
[i
].map
);
1982 bus_dma_tag_destroy(ring
->data_tag
);
1983 ring
->data_tag
= NULL
;
1988 nfe_reset_tx_ring(struct nfe_softc
*sc
, struct nfe_tx_ring
*ring
)
1992 for (i
= 0; i
< sc
->sc_tx_ring_count
; i
++) {
1993 struct nfe_tx_data
*data
= &ring
->data
[i
];
1995 if (sc
->sc_caps
& NFE_40BIT_ADDR
)
1996 ring
->desc64
[i
].flags
= 0;
1998 ring
->desc32
[i
].flags
= 0;
2000 if (data
->m
!= NULL
) {
2001 bus_dmamap_unload(ring
->data_tag
, data
->map
);
2008 ring
->cur
= ring
->next
= 0;
2012 nfe_init_tx_ring(struct nfe_softc
*sc __unused
,
2013 struct nfe_tx_ring
*ring __unused
)
2019 nfe_free_tx_ring(struct nfe_softc
*sc
, struct nfe_tx_ring
*ring
)
2021 if (ring
->data_tag
!= NULL
) {
2022 struct nfe_tx_data
*data
;
2025 for (i
= 0; i
< sc
->sc_tx_ring_count
; ++i
) {
2026 data
= &ring
->data
[i
];
2028 if (data
->m
!= NULL
) {
2029 bus_dmamap_unload(ring
->data_tag
, data
->map
);
2032 bus_dmamap_destroy(ring
->data_tag
, data
->map
);
2035 bus_dma_tag_destroy(ring
->data_tag
);
2038 if (ring
->data
!= NULL
)
2039 kfree(ring
->data
, M_DEVBUF
);
2041 if (ring
->tag
!= NULL
) {
2044 if (sc
->sc_caps
& NFE_40BIT_ADDR
)
2045 desc
= ring
->desc64
;
2047 desc
= ring
->desc32
;
2049 bus_dmamap_unload(ring
->tag
, ring
->map
);
2050 bus_dmamem_free(ring
->tag
, desc
, ring
->map
);
2051 bus_dma_tag_destroy(ring
->tag
);
2056 nfe_ifmedia_upd(struct ifnet
*ifp
)
2058 struct nfe_softc
*sc
= ifp
->if_softc
;
2059 struct mii_data
*mii
= device_get_softc(sc
->sc_miibus
);
2061 ASSERT_SERIALIZED(ifp
->if_serializer
);
2063 if (mii
->mii_instance
!= 0) {
2064 struct mii_softc
*miisc
;
2066 LIST_FOREACH(miisc
, &mii
->mii_phys
, mii_list
)
2067 mii_phy_reset(miisc
);
2075 nfe_ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
2077 struct nfe_softc
*sc
= ifp
->if_softc
;
2078 struct mii_data
*mii
= device_get_softc(sc
->sc_miibus
);
2080 ASSERT_SERIALIZED(ifp
->if_serializer
);
2083 ifmr
->ifm_status
= mii
->mii_media_status
;
2084 ifmr
->ifm_active
= mii
->mii_media_active
;
2088 nfe_setmulti(struct nfe_softc
*sc
)
2090 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2091 struct ifmultiaddr
*ifma
;
2092 uint8_t addr
[ETHER_ADDR_LEN
], mask
[ETHER_ADDR_LEN
];
2093 uint32_t filter
= NFE_RXFILTER_MAGIC
;
2096 if ((ifp
->if_flags
& (IFF_ALLMULTI
| IFF_PROMISC
)) != 0) {
2097 bzero(addr
, ETHER_ADDR_LEN
);
2098 bzero(mask
, ETHER_ADDR_LEN
);
2102 bcopy(etherbroadcastaddr
, addr
, ETHER_ADDR_LEN
);
2103 bcopy(etherbroadcastaddr
, mask
, ETHER_ADDR_LEN
);
2105 LIST_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
2108 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
2111 maddr
= LLADDR((struct sockaddr_dl
*)ifma
->ifma_addr
);
2112 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++) {
2113 addr
[i
] &= maddr
[i
];
2114 mask
[i
] &= ~maddr
[i
];
2118 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
2122 addr
[0] |= 0x01; /* make sure multicast bit is set */
2124 NFE_WRITE(sc
, NFE_MULTIADDR_HI
,
2125 addr
[3] << 24 | addr
[2] << 16 | addr
[1] << 8 | addr
[0]);
2126 NFE_WRITE(sc
, NFE_MULTIADDR_LO
,
2127 addr
[5] << 8 | addr
[4]);
2128 NFE_WRITE(sc
, NFE_MULTIMASK_HI
,
2129 mask
[3] << 24 | mask
[2] << 16 | mask
[1] << 8 | mask
[0]);
2130 NFE_WRITE(sc
, NFE_MULTIMASK_LO
,
2131 mask
[5] << 8 | mask
[4]);
2133 filter
|= (ifp
->if_flags
& IFF_PROMISC
) ? NFE_PROMISC
: NFE_U2M
;
2134 NFE_WRITE(sc
, NFE_RXFILTER
, filter
);
2138 nfe_get_macaddr(struct nfe_softc
*sc
, uint8_t *addr
)
2142 lo
= NFE_READ(sc
, NFE_MACADDR_LO
);
2143 hi
= NFE_READ(sc
, NFE_MACADDR_HI
);
2144 if (sc
->sc_caps
& NFE_FIX_EADDR
) {
2145 addr
[0] = (lo
>> 8) & 0xff;
2146 addr
[1] = (lo
& 0xff);
2148 addr
[2] = (hi
>> 24) & 0xff;
2149 addr
[3] = (hi
>> 16) & 0xff;
2150 addr
[4] = (hi
>> 8) & 0xff;
2151 addr
[5] = (hi
& 0xff);
2153 addr
[0] = (hi
& 0xff);
2154 addr
[1] = (hi
>> 8) & 0xff;
2155 addr
[2] = (hi
>> 16) & 0xff;
2156 addr
[3] = (hi
>> 24) & 0xff;
2158 addr
[4] = (lo
& 0xff);
2159 addr
[5] = (lo
>> 8) & 0xff;
2164 nfe_set_macaddr(struct nfe_softc
*sc
, const uint8_t *addr
)
2166 NFE_WRITE(sc
, NFE_MACADDR_LO
,
2167 addr
[5] << 8 | addr
[4]);
2168 NFE_WRITE(sc
, NFE_MACADDR_HI
,
2169 addr
[3] << 24 | addr
[2] << 16 | addr
[1] << 8 | addr
[0]);
2175 struct nfe_softc
*sc
= arg
;
2176 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2177 struct mii_data
*mii
= device_get_softc(sc
->sc_miibus
);
2179 lwkt_serialize_enter(ifp
->if_serializer
);
2182 callout_reset(&sc
->sc_tick_ch
, hz
, nfe_tick
, sc
);
2184 lwkt_serialize_exit(ifp
->if_serializer
);
2188 nfe_newbuf_std(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
, int idx
,
2191 struct nfe_rx_data
*data
= &ring
->data
[idx
];
2192 bus_dma_segment_t seg
;
2197 m
= m_getcl(wait
? MB_WAIT
: MB_DONTWAIT
, MT_DATA
, M_PKTHDR
);
2200 m
->m_len
= m
->m_pkthdr
.len
= MCLBYTES
;
2202 error
= bus_dmamap_load_mbuf_segment(ring
->data_tag
, ring
->data_tmpmap
,
2203 m
, &seg
, 1, &nsegs
, BUS_DMA_NOWAIT
);
2207 if_printf(&sc
->arpcom
.ac_if
,
2208 "could map RX mbuf %d\n", error
);
2213 if (data
->m
!= NULL
) {
2214 /* Sync and unload originally mapped mbuf */
2215 bus_dmamap_sync(ring
->data_tag
, data
->map
,
2216 BUS_DMASYNC_POSTREAD
);
2217 bus_dmamap_unload(ring
->data_tag
, data
->map
);
2220 /* Swap this DMA map with tmp DMA map */
2222 data
->map
= ring
->data_tmpmap
;
2223 ring
->data_tmpmap
= map
;
2225 /* Caller is assumed to have collected the old mbuf */
2228 nfe_set_paddr_rxdesc(sc
, ring
, idx
, seg
.ds_addr
);
2233 nfe_newbuf_jumbo(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
, int idx
,
2236 struct nfe_rx_data
*data
= &ring
->data
[idx
];
2237 struct nfe_jbuf
*jbuf
;
2240 MGETHDR(m
, wait
? MB_WAIT
: MB_DONTWAIT
, MT_DATA
);
2244 jbuf
= nfe_jalloc(sc
);
2247 if_printf(&sc
->arpcom
.ac_if
, "jumbo allocation failed "
2248 "-- packet dropped!\n");
2252 m
->m_ext
.ext_arg
= jbuf
;
2253 m
->m_ext
.ext_buf
= jbuf
->buf
;
2254 m
->m_ext
.ext_free
= nfe_jfree
;
2255 m
->m_ext
.ext_ref
= nfe_jref
;
2256 m
->m_ext
.ext_size
= NFE_JBYTES
;
2258 m
->m_data
= m
->m_ext
.ext_buf
;
2259 m
->m_flags
|= M_EXT
;
2260 m
->m_len
= m
->m_pkthdr
.len
= m
->m_ext
.ext_size
;
2262 /* Caller is assumed to have collected the old mbuf */
2265 nfe_set_paddr_rxdesc(sc
, ring
, idx
, jbuf
->physaddr
);
2270 nfe_set_paddr_rxdesc(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
, int idx
,
2271 bus_addr_t physaddr
)
2273 if (sc
->sc_caps
& NFE_40BIT_ADDR
) {
2274 struct nfe_desc64
*desc64
= &ring
->desc64
[idx
];
2276 desc64
->physaddr
[0] = htole32(NFE_ADDR_HI(physaddr
));
2277 desc64
->physaddr
[1] = htole32(NFE_ADDR_LO(physaddr
));
2279 struct nfe_desc32
*desc32
= &ring
->desc32
[idx
];
2281 desc32
->physaddr
= htole32(physaddr
);
2286 nfe_set_ready_rxdesc(struct nfe_softc
*sc
, struct nfe_rx_ring
*ring
, int idx
)
2288 if (sc
->sc_caps
& NFE_40BIT_ADDR
) {
2289 struct nfe_desc64
*desc64
= &ring
->desc64
[idx
];
2291 desc64
->length
= htole16(ring
->bufsz
);
2292 desc64
->flags
= htole16(NFE_RX_READY
);
2294 struct nfe_desc32
*desc32
= &ring
->desc32
[idx
];
2296 desc32
->length
= htole16(ring
->bufsz
);
2297 desc32
->flags
= htole16(NFE_RX_READY
);
2302 nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS
)
2304 struct nfe_softc
*sc
= arg1
;
2305 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2309 lwkt_serialize_enter(ifp
->if_serializer
);
2311 flags
= sc
->sc_flags
& ~NFE_F_DYN_IM
;
2313 if (sc
->sc_flags
& NFE_F_DYN_IM
)
2316 error
= sysctl_handle_int(oidp
, &v
, 0, req
);
2317 if (error
|| req
->newptr
== NULL
)
2321 flags
|= NFE_F_DYN_IM
;
2325 if (v
!= sc
->sc_imtime
|| (flags
^ sc
->sc_flags
)) {
2326 if (NFE_IMTIME(v
) == 0)
2329 sc
->sc_flags
= flags
;
2330 sc
->sc_irq_enable
= NFE_IRQ_ENABLE(sc
);
2332 if ((ifp
->if_flags
& (IFF_POLLING
| IFF_RUNNING
))
2334 nfe_enable_intrs(sc
);
2338 lwkt_serialize_exit(ifp
->if_serializer
);
2343 nfe_powerup(device_t dev
)
2345 struct nfe_softc
*sc
= device_get_softc(dev
);
2350 * Bring MAC and PHY out of low power state
2353 pwr_state
= NFE_READ(sc
, NFE_PWR_STATE2
) & ~NFE_PWRUP_MASK
;
2355 did
= pci_get_device(dev
);
2356 if ((did
== PCI_PRODUCT_NVIDIA_MCP51_LAN1
||
2357 did
== PCI_PRODUCT_NVIDIA_MCP51_LAN2
) &&
2358 pci_get_revid(dev
) >= 0xa3)
2359 pwr_state
|= NFE_PWRUP_REV_A3
;
2361 NFE_WRITE(sc
, NFE_PWR_STATE2
, pwr_state
);
2365 nfe_mac_reset(struct nfe_softc
*sc
)
2367 uint32_t rxtxctl
= sc
->rxtxctl_desc
| NFE_RXTX_BIT2
;
2368 uint32_t macaddr_hi
, macaddr_lo
, tx_poll
;
2370 NFE_WRITE(sc
, NFE_RXTX_CTL
, NFE_RXTX_RESET
| rxtxctl
);
2372 /* Save several registers for later restoration */
2373 macaddr_hi
= NFE_READ(sc
, NFE_MACADDR_HI
);
2374 macaddr_lo
= NFE_READ(sc
, NFE_MACADDR_LO
);
2375 tx_poll
= NFE_READ(sc
, NFE_TX_POLL
);
2377 NFE_WRITE(sc
, NFE_MAC_RESET
, NFE_RESET_ASSERT
);
2380 NFE_WRITE(sc
, NFE_MAC_RESET
, 0);
2383 /* Restore saved registers */
2384 NFE_WRITE(sc
, NFE_MACADDR_HI
, macaddr_hi
);
2385 NFE_WRITE(sc
, NFE_MACADDR_LO
, macaddr_lo
);
2386 NFE_WRITE(sc
, NFE_TX_POLL
, tx_poll
);
2388 NFE_WRITE(sc
, NFE_RXTX_CTL
, rxtxctl
);
2392 nfe_enable_intrs(struct nfe_softc
*sc
)
2395 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER.
2396 * It is unclear how wide the timer is. Base programming does
2397 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so
2398 * we don't get any interrupt moderation. TX moderation is
2399 * possible by using the timer interrupt instead of TX_DONE.
2401 * It is unclear whether there are other bits that can be
2402 * set to make the NFE device actually do interrupt moderation
2405 * For now set a 128uS interval as a placemark, but don't use
2408 if (sc
->sc_imtime
== 0)
2409 NFE_WRITE(sc
, NFE_IMTIMER
, NFE_IMTIME_DEFAULT
);
2411 NFE_WRITE(sc
, NFE_IMTIMER
, NFE_IMTIME(sc
->sc_imtime
));
2413 /* Enable interrupts */
2414 NFE_WRITE(sc
, NFE_IRQ_MASK
, sc
->sc_irq_enable
);
2416 if (sc
->sc_irq_enable
& NFE_IRQ_TIMER
)
2417 sc
->sc_flags
|= NFE_F_IRQ_TIMER
;
2419 sc
->sc_flags
&= ~NFE_F_IRQ_TIMER
;
2423 nfe_disable_intrs(struct nfe_softc
*sc
)
2425 /* Disable interrupts */
2426 NFE_WRITE(sc
, NFE_IRQ_MASK
, 0);
2427 sc
->sc_flags
&= ~NFE_F_IRQ_TIMER
;