2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
30 #include "opt_ifpoll.h"
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
41 #include <sys/serialize.h>
42 #include <sys/serialize2.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
47 #include <net/ethernet.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_poll.h>
54 #include <net/ifq_var.h>
55 #include <net/if_ringmap.h>
56 #include <net/toeplitz.h>
57 #include <net/toeplitz2.h>
58 #include <net/vlan/if_vlan_var.h>
59 #include <net/vlan/if_vlan_ether.h>
61 #include <netinet/ip.h>
62 #include <netinet/tcp.h>
64 #include <dev/netif/mii_layer/mii.h>
65 #include <dev/netif/mii_layer/miivar.h>
66 #include <dev/netif/mii_layer/jmphyreg.h>
68 #include <bus/pci/pcireg.h>
69 #include <bus/pci/pcivar.h>
72 #include <dev/netif/jme/if_jmereg.h>
73 #include <dev/netif/jme/if_jmevar.h>
75 #include "miibus_if.h"
77 #define JME_TICK_CPUID 0 /* DO NOT CHANGE THIS */
79 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
82 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
84 if ((sc)->jme_rss_debug >= (lvl)) \
85 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
87 #else /* !JME_RSS_DEBUG */
88 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
89 #endif /* JME_RSS_DEBUG */
91 static int jme_probe(device_t
);
92 static int jme_attach(device_t
);
93 static int jme_detach(device_t
);
94 static int jme_shutdown(device_t
);
95 static int jme_suspend(device_t
);
96 static int jme_resume(device_t
);
98 static int jme_miibus_readreg(device_t
, int, int);
99 static int jme_miibus_writereg(device_t
, int, int, int);
100 static void jme_miibus_statchg(device_t
);
102 static void jme_init(void *);
103 static int jme_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
104 static void jme_start(struct ifnet
*, struct ifaltq_subque
*);
105 static void jme_watchdog(struct ifnet
*);
106 static void jme_mediastatus(struct ifnet
*, struct ifmediareq
*);
107 static int jme_mediachange(struct ifnet
*);
109 static void jme_npoll(struct ifnet
*, struct ifpoll_info
*);
110 static void jme_npoll_status(struct ifnet
*);
111 static void jme_npoll_rx(struct ifnet
*, void *, int);
112 static void jme_npoll_tx(struct ifnet
*, void *, int);
114 static void jme_serialize(struct ifnet
*, enum ifnet_serialize
);
115 static void jme_deserialize(struct ifnet
*, enum ifnet_serialize
);
116 static int jme_tryserialize(struct ifnet
*, enum ifnet_serialize
);
118 static void jme_serialize_assert(struct ifnet
*, enum ifnet_serialize
,
122 static void jme_intr(void *);
123 static void jme_msix_tx(void *);
124 static void jme_msix_rx(void *);
125 static void jme_msix_status(void *);
126 static void jme_txeof(struct jme_txdata
*);
127 static void jme_rxeof(struct jme_rxdata
*, int, int);
128 static void jme_rx_intr(struct jme_softc
*, uint32_t);
129 static void jme_enable_intr(struct jme_softc
*);
130 static void jme_disable_intr(struct jme_softc
*);
131 static void jme_rx_restart(struct jme_softc
*, uint32_t);
133 static int jme_msix_setup(device_t
);
134 static void jme_msix_teardown(device_t
, int);
135 static int jme_intr_setup(device_t
);
136 static void jme_intr_teardown(device_t
);
137 static void jme_msix_try_alloc(device_t
);
138 static void jme_msix_free(device_t
);
139 static int jme_intr_alloc(device_t
);
140 static void jme_intr_free(device_t
);
141 static int jme_dma_alloc(struct jme_softc
*);
142 static void jme_dma_free(struct jme_softc
*);
143 static int jme_init_rx_ring(struct jme_rxdata
*);
144 static void jme_init_tx_ring(struct jme_txdata
*);
145 static void jme_init_ssb(struct jme_softc
*);
146 static int jme_newbuf(struct jme_rxdata
*, struct jme_rxdesc
*, int);
147 static int jme_encap(struct jme_txdata
*, struct mbuf
**, int *);
148 static void jme_rxpkt(struct jme_rxdata
*, int);
149 static int jme_rxring_dma_alloc(struct jme_rxdata
*);
150 static int jme_rxbuf_dma_alloc(struct jme_rxdata
*);
152 static void jme_tick(void *);
153 static void jme_stop(struct jme_softc
*);
154 static void jme_reset(struct jme_softc
*);
155 static void jme_set_msinum(struct jme_softc
*);
156 static void jme_set_vlan(struct jme_softc
*);
157 static void jme_set_filter(struct jme_softc
*);
158 static void jme_stop_tx(struct jme_softc
*);
159 static void jme_stop_rx(struct jme_softc
*);
160 static void jme_mac_config(struct jme_softc
*);
161 static void jme_reg_macaddr(struct jme_softc
*, uint8_t[]);
162 static int jme_eeprom_macaddr(struct jme_softc
*, uint8_t[]);
163 static int jme_eeprom_read_byte(struct jme_softc
*, uint8_t, uint8_t *);
165 static void jme_setwol(struct jme_softc
*);
166 static void jme_setlinkspeed(struct jme_softc
*);
168 static void jme_set_tx_coal(struct jme_softc
*);
169 static void jme_set_rx_coal(struct jme_softc
*);
170 static void jme_enable_rss(struct jme_softc
*);
171 static void jme_disable_rss(struct jme_softc
*);
172 static void jme_serialize_skipmain(struct jme_softc
*);
173 static void jme_deserialize_skipmain(struct jme_softc
*);
174 static void jme_phy_poweron(struct jme_softc
*);
175 static void jme_phy_poweroff(struct jme_softc
*);
176 static int jme_miiext_read(struct jme_softc
*, int);
177 static void jme_miiext_write(struct jme_softc
*, int, int);
178 static void jme_phy_init(struct jme_softc
*);
180 static void jme_sysctl_node(struct jme_softc
*);
181 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS
);
182 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS
);
183 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS
);
184 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS
);
187 * Devices supported by this driver.
189 static const struct jme_dev
{
190 uint16_t jme_vendorid
;
191 uint16_t jme_deviceid
;
193 const char *jme_name
;
195 { PCI_VENDOR_JMICRON
, PCI_PRODUCT_JMICRON_JMC250
,
197 "JMicron Inc, JMC250 Gigabit Ethernet" },
198 { PCI_VENDOR_JMICRON
, PCI_PRODUCT_JMICRON_JMC260
,
200 "JMicron Inc, JMC260 Fast Ethernet" },
204 static device_method_t jme_methods
[] = {
205 /* Device interface. */
206 DEVMETHOD(device_probe
, jme_probe
),
207 DEVMETHOD(device_attach
, jme_attach
),
208 DEVMETHOD(device_detach
, jme_detach
),
209 DEVMETHOD(device_shutdown
, jme_shutdown
),
210 DEVMETHOD(device_suspend
, jme_suspend
),
211 DEVMETHOD(device_resume
, jme_resume
),
214 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
215 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
218 DEVMETHOD(miibus_readreg
, jme_miibus_readreg
),
219 DEVMETHOD(miibus_writereg
, jme_miibus_writereg
),
220 DEVMETHOD(miibus_statchg
, jme_miibus_statchg
),
225 static driver_t jme_driver
= {
228 sizeof(struct jme_softc
)
231 static devclass_t jme_devclass
;
233 DECLARE_DUMMY_MODULE(if_jme
);
234 MODULE_DEPEND(if_jme
, miibus
, 1, 1, 1);
235 DRIVER_MODULE(if_jme
, pci
, jme_driver
, jme_devclass
, NULL
, NULL
);
236 DRIVER_MODULE(miibus
, jme
, miibus_driver
, miibus_devclass
, NULL
, NULL
);
238 static const struct {
242 } jme_rx_status
[JME_NRXRING_MAX
] = {
243 { INTR_RXQ0_COAL
| INTR_RXQ0_COAL_TO
, INTR_RXQ0_COMP
,
244 INTR_RXQ0_DESC_EMPTY
},
245 { INTR_RXQ1_COAL
| INTR_RXQ1_COAL_TO
, INTR_RXQ1_COMP
,
246 INTR_RXQ1_DESC_EMPTY
},
247 { INTR_RXQ2_COAL
| INTR_RXQ2_COAL_TO
, INTR_RXQ2_COMP
,
248 INTR_RXQ2_DESC_EMPTY
},
249 { INTR_RXQ3_COAL
| INTR_RXQ3_COAL_TO
, INTR_RXQ3_COMP
,
250 INTR_RXQ3_DESC_EMPTY
}
253 static int jme_rx_desc_count
= JME_RX_DESC_CNT_DEF
;
254 static int jme_tx_desc_count
= JME_TX_DESC_CNT_DEF
;
255 static int jme_rx_ring_count
= 0;
256 static int jme_msi_enable
= 1;
257 static int jme_msix_enable
= 1;
259 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count
);
260 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count
);
261 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count
);
262 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable
);
263 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable
);
266 jme_setup_rxdesc(struct jme_rxdesc
*rxd
)
268 struct jme_desc
*desc
;
271 desc
->buflen
= htole32(MCLBYTES
);
272 desc
->addr_lo
= htole32(JME_ADDR_LO(rxd
->rx_paddr
));
273 desc
->addr_hi
= htole32(JME_ADDR_HI(rxd
->rx_paddr
));
274 desc
->flags
= htole32(JME_RD_OWN
| JME_RD_INTR
| JME_RD_64BIT
);
278 * Read a PHY register on the MII of the JMC250.
281 jme_miibus_readreg(device_t dev
, int phy
, int reg
)
283 struct jme_softc
*sc
= device_get_softc(dev
);
287 /* For FPGA version, PHY address 0 should be ignored. */
288 if (sc
->jme_caps
& JME_CAP_FPGA
) {
292 if (sc
->jme_phyaddr
!= phy
)
296 CSR_WRITE_4(sc
, JME_SMI
, SMI_OP_READ
| SMI_OP_EXECUTE
|
297 SMI_PHY_ADDR(phy
) | SMI_REG_ADDR(reg
));
299 for (i
= JME_PHY_TIMEOUT
; i
> 0; i
--) {
301 if (((val
= CSR_READ_4(sc
, JME_SMI
)) & SMI_OP_EXECUTE
) == 0)
305 device_printf(sc
->jme_dev
, "phy read timeout: "
306 "phy %d, reg %d\n", phy
, reg
);
310 return ((val
& SMI_DATA_MASK
) >> SMI_DATA_SHIFT
);
314 * Write a PHY register on the MII of the JMC250.
317 jme_miibus_writereg(device_t dev
, int phy
, int reg
, int val
)
319 struct jme_softc
*sc
= device_get_softc(dev
);
322 /* For FPGA version, PHY address 0 should be ignored. */
323 if (sc
->jme_caps
& JME_CAP_FPGA
) {
327 if (sc
->jme_phyaddr
!= phy
)
331 CSR_WRITE_4(sc
, JME_SMI
, SMI_OP_WRITE
| SMI_OP_EXECUTE
|
332 ((val
<< SMI_DATA_SHIFT
) & SMI_DATA_MASK
) |
333 SMI_PHY_ADDR(phy
) | SMI_REG_ADDR(reg
));
335 for (i
= JME_PHY_TIMEOUT
; i
> 0; i
--) {
337 if (((val
= CSR_READ_4(sc
, JME_SMI
)) & SMI_OP_EXECUTE
) == 0)
341 device_printf(sc
->jme_dev
, "phy write timeout: "
342 "phy %d, reg %d\n", phy
, reg
);
349 * Callback from MII layer when media changes.
352 jme_miibus_statchg(device_t dev
)
354 struct jme_softc
*sc
= device_get_softc(dev
);
355 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
356 struct jme_txdata
*tdata
= &sc
->jme_cdata
.jme_tx_data
;
357 struct mii_data
*mii
;
358 struct jme_txdesc
*txd
;
363 jme_serialize_skipmain(sc
);
364 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
366 if ((ifp
->if_flags
& IFF_RUNNING
) == 0)
369 mii
= device_get_softc(sc
->jme_miibus
);
371 sc
->jme_has_link
= FALSE
;
372 if ((mii
->mii_media_status
& IFM_AVALID
) != 0) {
373 switch (IFM_SUBTYPE(mii
->mii_media_active
)) {
376 sc
->jme_has_link
= TRUE
;
379 if (sc
->jme_caps
& JME_CAP_FASTETH
)
381 sc
->jme_has_link
= TRUE
;
389 * Disabling Rx/Tx MACs have a side-effect of resetting
390 * JME_TXNDA/JME_RXNDA register to the first address of
391 * Tx/Rx descriptor address. So driver should reset its
392 * internal procucer/consumer pointer and reclaim any
393 * allocated resources. Note, just saving the value of
394 * JME_TXNDA and JME_RXNDA registers before stopping MAC
395 * and restoring JME_TXNDA/JME_RXNDA register is not
396 * sufficient to make sure correct MAC state because
397 * stopping MAC operation can take a while and hardware
398 * might have updated JME_TXNDA/JME_RXNDA registers
399 * during the stop operation.
402 /* Disable interrupts */
403 CSR_WRITE_4(sc
, JME_INTR_MASK_CLR
, JME_INTRS
);
406 ifp
->if_flags
&= ~IFF_RUNNING
;
407 ifq_clr_oactive(&ifp
->if_snd
);
409 callout_stop(&sc
->jme_tick_ch
);
411 /* Stop receiver/transmitter. */
415 for (r
= 0; r
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++r
) {
416 struct jme_rxdata
*rdata
= &sc
->jme_cdata
.jme_rx_data
[r
];
418 jme_rxeof(rdata
, -1, -1);
419 if (rdata
->jme_rxhead
!= NULL
)
420 m_freem(rdata
->jme_rxhead
);
421 JME_RXCHAIN_RESET(rdata
);
424 * Reuse configured Rx descriptors and reset
425 * procuder/consumer index.
427 rdata
->jme_rx_cons
= 0;
429 if (JME_ENABLE_HWRSS(sc
))
435 if (tdata
->jme_tx_cnt
!= 0) {
436 /* Remove queued packets for transmit. */
437 for (i
= 0; i
< tdata
->jme_tx_desc_cnt
; i
++) {
438 txd
= &tdata
->jme_txdesc
[i
];
439 if (txd
->tx_m
!= NULL
) {
440 bus_dmamap_unload( tdata
->jme_tx_tag
,
445 IFNET_STAT_INC(ifp
, oerrors
, 1);
449 jme_init_tx_ring(tdata
);
451 /* Initialize shadow status block. */
454 /* Program MAC with resolved speed/duplex/flow-control. */
455 if (sc
->jme_has_link
) {
458 CSR_WRITE_4(sc
, JME_TXCSR
, sc
->jme_txcsr
);
460 /* Set Tx ring address to the hardware. */
461 paddr
= tdata
->jme_tx_ring_paddr
;
462 CSR_WRITE_4(sc
, JME_TXDBA_HI
, JME_ADDR_HI(paddr
));
463 CSR_WRITE_4(sc
, JME_TXDBA_LO
, JME_ADDR_LO(paddr
));
465 for (r
= 0; r
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++r
) {
466 CSR_WRITE_4(sc
, JME_RXCSR
,
467 sc
->jme_rxcsr
| RXCSR_RXQ_N_SEL(r
));
469 /* Set Rx ring address to the hardware. */
470 paddr
= sc
->jme_cdata
.jme_rx_data
[r
].jme_rx_ring_paddr
;
471 CSR_WRITE_4(sc
, JME_RXDBA_HI
, JME_ADDR_HI(paddr
));
472 CSR_WRITE_4(sc
, JME_RXDBA_LO
, JME_ADDR_LO(paddr
));
475 /* Restart receiver/transmitter. */
476 CSR_WRITE_4(sc
, JME_RXCSR
, sc
->jme_rxcsr
| RXCSR_RX_ENB
|
478 CSR_WRITE_4(sc
, JME_TXCSR
, sc
->jme_txcsr
| TXCSR_TX_ENB
);
481 ifp
->if_flags
|= IFF_RUNNING
;
482 ifq_clr_oactive(&ifp
->if_snd
);
483 callout_reset_bycpu(&sc
->jme_tick_ch
, hz
, jme_tick
, sc
,
487 if (!(ifp
->if_flags
& IFF_NPOLLING
))
489 /* Reenable interrupts. */
490 CSR_WRITE_4(sc
, JME_INTR_MASK_SET
, JME_INTRS
);
494 jme_deserialize_skipmain(sc
);
498 * Get the current interface media status.
501 jme_mediastatus(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
503 struct jme_softc
*sc
= ifp
->if_softc
;
504 struct mii_data
*mii
= device_get_softc(sc
->jme_miibus
);
506 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
509 ifmr
->ifm_status
= mii
->mii_media_status
;
510 ifmr
->ifm_active
= mii
->mii_media_active
;
514 * Set hardware to newly-selected media.
517 jme_mediachange(struct ifnet
*ifp
)
519 struct jme_softc
*sc
= ifp
->if_softc
;
520 struct mii_data
*mii
= device_get_softc(sc
->jme_miibus
);
523 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
525 if (mii
->mii_instance
!= 0) {
526 struct mii_softc
*miisc
;
528 LIST_FOREACH(miisc
, &mii
->mii_phys
, mii_list
)
529 mii_phy_reset(miisc
);
531 error
= mii_mediachg(mii
);
537 jme_probe(device_t dev
)
539 const struct jme_dev
*sp
;
542 vid
= pci_get_vendor(dev
);
543 did
= pci_get_device(dev
);
544 for (sp
= jme_devs
; sp
->jme_name
!= NULL
; ++sp
) {
545 if (vid
== sp
->jme_vendorid
&& did
== sp
->jme_deviceid
) {
546 struct jme_softc
*sc
= device_get_softc(dev
);
548 sc
->jme_caps
= sp
->jme_caps
;
549 device_set_desc(dev
, sp
->jme_name
);
557 jme_eeprom_read_byte(struct jme_softc
*sc
, uint8_t addr
, uint8_t *val
)
563 for (i
= JME_TIMEOUT
; i
> 0; i
--) {
564 reg
= CSR_READ_4(sc
, JME_SMBCSR
);
565 if ((reg
& SMBCSR_HW_BUSY_MASK
) == SMBCSR_HW_IDLE
)
571 device_printf(sc
->jme_dev
, "EEPROM idle timeout!\n");
575 reg
= ((uint32_t)addr
<< SMBINTF_ADDR_SHIFT
) & SMBINTF_ADDR_MASK
;
576 CSR_WRITE_4(sc
, JME_SMBINTF
, reg
| SMBINTF_RD
| SMBINTF_CMD_TRIGGER
);
577 for (i
= JME_TIMEOUT
; i
> 0; i
--) {
579 reg
= CSR_READ_4(sc
, JME_SMBINTF
);
580 if ((reg
& SMBINTF_CMD_TRIGGER
) == 0)
585 device_printf(sc
->jme_dev
, "EEPROM read timeout!\n");
589 reg
= CSR_READ_4(sc
, JME_SMBINTF
);
590 *val
= (reg
& SMBINTF_RD_DATA_MASK
) >> SMBINTF_RD_DATA_SHIFT
;
596 jme_eeprom_macaddr(struct jme_softc
*sc
, uint8_t eaddr
[])
598 uint8_t fup
, reg
, val
;
603 if (jme_eeprom_read_byte(sc
, offset
++, &fup
) != 0 ||
604 fup
!= JME_EEPROM_SIG0
)
606 if (jme_eeprom_read_byte(sc
, offset
++, &fup
) != 0 ||
607 fup
!= JME_EEPROM_SIG1
)
611 if (jme_eeprom_read_byte(sc
, offset
, &fup
) != 0)
613 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0
, JME_EEPROM_PAGE_BAR1
) ==
614 (fup
& (JME_EEPROM_FUNC_MASK
| JME_EEPROM_PAGE_MASK
))) {
615 if (jme_eeprom_read_byte(sc
, offset
+ 1, ®
) != 0)
617 if (reg
>= JME_PAR0
&&
618 reg
< JME_PAR0
+ ETHER_ADDR_LEN
) {
619 if (jme_eeprom_read_byte(sc
, offset
+ 2,
622 eaddr
[reg
- JME_PAR0
] = val
;
626 /* Check for the end of EEPROM descriptor. */
627 if ((fup
& JME_EEPROM_DESC_END
) == JME_EEPROM_DESC_END
)
629 /* Try next eeprom descriptor. */
630 offset
+= JME_EEPROM_DESC_BYTES
;
631 } while (match
!= ETHER_ADDR_LEN
&& offset
< JME_EEPROM_END
);
633 if (match
== ETHER_ADDR_LEN
)
640 jme_reg_macaddr(struct jme_softc
*sc
, uint8_t eaddr
[])
644 /* Read station address. */
645 par0
= CSR_READ_4(sc
, JME_PAR0
);
646 par1
= CSR_READ_4(sc
, JME_PAR1
);
648 if ((par0
== 0 && par1
== 0) || (par0
& 0x1)) {
649 device_printf(sc
->jme_dev
,
650 "generating fake ethernet address.\n");
651 par0
= karc4random();
652 /* Set OUI to JMicron. */
656 eaddr
[3] = (par0
>> 16) & 0xff;
657 eaddr
[4] = (par0
>> 8) & 0xff;
658 eaddr
[5] = par0
& 0xff;
660 eaddr
[0] = (par0
>> 0) & 0xFF;
661 eaddr
[1] = (par0
>> 8) & 0xFF;
662 eaddr
[2] = (par0
>> 16) & 0xFF;
663 eaddr
[3] = (par0
>> 24) & 0xFF;
664 eaddr
[4] = (par1
>> 0) & 0xFF;
665 eaddr
[5] = (par1
>> 8) & 0xFF;
670 jme_attach(device_t dev
)
672 struct jme_softc
*sc
= device_get_softc(dev
);
673 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
676 uint8_t pcie_ptr
, rev
;
677 int error
= 0, i
, j
, rx_desc_cnt
, coal_max
, ring_cnt
;
678 uint8_t eaddr
[ETHER_ADDR_LEN
];
681 * Initialize serializers
683 lwkt_serialize_init(&sc
->jme_serialize
);
684 lwkt_serialize_init(&sc
->jme_cdata
.jme_tx_data
.jme_tx_serialize
);
685 for (i
= 0; i
< JME_NRXRING_MAX
; ++i
) {
687 &sc
->jme_cdata
.jme_rx_data
[i
].jme_rx_serialize
);
691 * Get # of RX ring descriptors
693 rx_desc_cnt
= device_getenv_int(dev
, "rx_desc_count",
695 rx_desc_cnt
= roundup(rx_desc_cnt
, JME_NDESC_ALIGN
);
696 if (rx_desc_cnt
> JME_NDESC_MAX
)
697 rx_desc_cnt
= JME_NDESC_MAX
;
700 * Get # of TX ring descriptors
702 sc
->jme_cdata
.jme_tx_data
.jme_tx_desc_cnt
=
703 device_getenv_int(dev
, "tx_desc_count", jme_tx_desc_count
);
704 sc
->jme_cdata
.jme_tx_data
.jme_tx_desc_cnt
=
705 roundup(sc
->jme_cdata
.jme_tx_data
.jme_tx_desc_cnt
, JME_NDESC_ALIGN
);
706 if (sc
->jme_cdata
.jme_tx_data
.jme_tx_desc_cnt
> JME_NDESC_MAX
)
707 sc
->jme_cdata
.jme_tx_data
.jme_tx_desc_cnt
= JME_NDESC_MAX
;
710 * Create TX/RX ring maps.
712 ring_cnt
= device_getenv_int(dev
, "rx_ring_count", jme_rx_ring_count
);
713 /* Require power-of-2 ring count. */
714 sc
->jme_rx_rmap
= if_ringmap_alloc2(dev
, ring_cnt
, JME_NRXRING_MAX
);
715 sc
->jme_cdata
.jme_rx_ring_cnt
= if_ringmap_count(sc
->jme_rx_rmap
);
717 /* Only one TX ring is supported. */
718 sc
->jme_tx_rmap
= if_ringmap_alloc(dev
, 1, 1);
722 * There is _no_ need to align or match TX/RX ring maps,
723 * since TX/RX rings are completely indepedent in this
728 * Initialize serializer array
732 KKASSERT(i
< JME_NSERIALIZE
);
733 sc
->jme_serialize_arr
[i
++] = &sc
->jme_serialize
;
735 KKASSERT(i
< JME_NSERIALIZE
);
736 sc
->jme_serialize_arr
[i
++] =
737 &sc
->jme_cdata
.jme_tx_data
.jme_tx_serialize
;
739 for (j
= 0; j
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++j
) {
740 KKASSERT(i
< JME_NSERIALIZE
);
741 sc
->jme_serialize_arr
[i
++] =
742 &sc
->jme_cdata
.jme_rx_data
[j
].jme_rx_serialize
;
745 KKASSERT(i
<= JME_NSERIALIZE
);
746 sc
->jme_serialize_cnt
= i
;
749 * Setup TX ring specific data
751 sc
->jme_cdata
.jme_tx_data
.jme_sc
= sc
;
754 * Setup RX rings specific data
756 for (i
= 0; i
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++i
) {
757 struct jme_rxdata
*rdata
= &sc
->jme_cdata
.jme_rx_data
[i
];
760 rdata
->jme_rx_coal
= jme_rx_status
[i
].jme_coal
;
761 rdata
->jme_rx_comp
= jme_rx_status
[i
].jme_comp
;
762 rdata
->jme_rx_empty
= jme_rx_status
[i
].jme_empty
;
763 rdata
->jme_rx_idx
= i
;
764 rdata
->jme_rx_desc_cnt
= rx_desc_cnt
;
768 sc
->jme_lowaddr
= BUS_SPACE_MAXADDR
;
770 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
772 callout_init_mp(&sc
->jme_tick_ch
);
775 if (pci_get_powerstate(dev
) != PCI_POWERSTATE_D0
) {
778 irq
= pci_read_config(dev
, PCIR_INTLINE
, 4);
779 mem
= pci_read_config(dev
, JME_PCIR_BAR
, 4);
781 device_printf(dev
, "chip is in D%d power mode "
782 "-- setting to D0\n", pci_get_powerstate(dev
));
784 pci_set_powerstate(dev
, PCI_POWERSTATE_D0
);
786 pci_write_config(dev
, PCIR_INTLINE
, irq
, 4);
787 pci_write_config(dev
, JME_PCIR_BAR
, mem
, 4);
789 #endif /* !BURN_BRIDGE */
791 /* Enable bus mastering */
792 pci_enable_busmaster(dev
);
797 * JMC250 supports both memory mapped and I/O register space
798 * access. Because I/O register access should use different
799 * BARs to access registers it's waste of time to use I/O
800 * register space access. JMC250 uses 16K to map entire memory
803 sc
->jme_mem_rid
= JME_PCIR_BAR
;
804 sc
->jme_mem_res
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
,
805 &sc
->jme_mem_rid
, RF_ACTIVE
);
806 if (sc
->jme_mem_res
== NULL
) {
807 device_printf(dev
, "can't allocate IO memory\n");
810 sc
->jme_mem_bt
= rman_get_bustag(sc
->jme_mem_res
);
811 sc
->jme_mem_bh
= rman_get_bushandle(sc
->jme_mem_res
);
816 error
= jme_intr_alloc(dev
);
823 reg
= CSR_READ_4(sc
, JME_CHIPMODE
);
824 if (((reg
& CHIPMODE_FPGA_REV_MASK
) >> CHIPMODE_FPGA_REV_SHIFT
) !=
826 sc
->jme_caps
|= JME_CAP_FPGA
;
828 device_printf(dev
, "FPGA revision: 0x%04x\n",
829 (reg
& CHIPMODE_FPGA_REV_MASK
) >>
830 CHIPMODE_FPGA_REV_SHIFT
);
834 /* NOTE: FM revision is put in the upper 4 bits */
835 rev
= ((reg
& CHIPMODE_REVFM_MASK
) >> CHIPMODE_REVFM_SHIFT
) << 4;
836 rev
|= (reg
& CHIPMODE_REVECO_MASK
) >> CHIPMODE_REVECO_SHIFT
;
838 device_printf(dev
, "Revision (FM/ECO): 0x%02x\n", rev
);
840 did
= pci_get_device(dev
);
842 case PCI_PRODUCT_JMICRON_JMC250
:
843 if (rev
== JME_REV1_A2
)
844 sc
->jme_workaround
|= JME_WA_EXTFIFO
| JME_WA_HDX
;
847 case PCI_PRODUCT_JMICRON_JMC260
:
848 if (rev
== JME_REV2
) {
849 sc
->jme_lowaddr
= BUS_SPACE_MAXADDR_32BIT
;
850 sc
->jme_phycom0
= 0x608a;
851 } else if (rev
== JME_REV2_2
) {
852 sc
->jme_phycom0
= 0x408a;
857 panic("unknown device id 0x%04x", did
);
859 if (rev
>= JME_REV2
) {
860 sc
->jme_clksrc
= GHC_TXOFL_CLKSRC
| GHC_TXMAC_CLKSRC
;
861 sc
->jme_clksrc_1000
= GHC_TXOFL_CLKSRC_1000
|
862 GHC_TXMAC_CLKSRC_1000
;
865 sc
->jme_caps
|= JME_CAP_PHYPWR
;
866 if (rev
>= JME_REV6
|| rev
== JME_REV5
|| rev
== JME_REV5_1
||
868 sc
->jme_phycom0
= 0x008a;
869 sc
->jme_phycom1
= 0x4109;
870 } else if (rev
== JME_REV3_1
|| rev
== JME_REV3_2
) {
871 sc
->jme_phycom0
= 0xe088;
874 if (rev
>= JME_REV2
) {
875 reg
= pci_read_config(dev
, JME_PCI_SSCTRL
, 4);
876 if ((reg
& SSCTRL_PHYMASK
) == SSCTRL_PHYEA
) {
882 /* Reset the ethernet controller. */
885 /* Map MSI/MSI-X vectors */
888 /* Get station address. */
889 reg
= CSR_READ_4(sc
, JME_SMBCSR
);
890 if (reg
& SMBCSR_EEPROM_PRESENT
)
891 error
= jme_eeprom_macaddr(sc
, eaddr
);
892 if (error
!= 0 || (reg
& SMBCSR_EEPROM_PRESENT
) == 0) {
893 if (error
!= 0 && (bootverbose
)) {
894 device_printf(dev
, "ethernet hardware address "
895 "not found in EEPROM.\n");
897 jme_reg_macaddr(sc
, eaddr
);
902 * Integrated JR0211 has fixed PHY address whereas FPGA version
903 * requires PHY probing to get correct PHY address.
905 if ((sc
->jme_caps
& JME_CAP_FPGA
) == 0) {
906 sc
->jme_phyaddr
= CSR_READ_4(sc
, JME_GPREG0
) &
907 GPREG0_PHY_ADDR_MASK
;
909 device_printf(dev
, "PHY is at address %d.\n",
916 /* Set max allowable DMA size. */
917 pcie_ptr
= pci_get_pciecap_ptr(dev
);
921 sc
->jme_caps
|= JME_CAP_PCIE
;
922 ctrl
= pci_read_config(dev
, pcie_ptr
+ PCIER_DEVCTRL
, 2);
924 device_printf(dev
, "Read request size : %d bytes.\n",
925 128 << ((ctrl
>> 12) & 0x07));
926 device_printf(dev
, "TLP payload size : %d bytes.\n",
927 128 << ((ctrl
>> 5) & 0x07));
929 switch (ctrl
& PCIEM_DEVCTL_MAX_READRQ_MASK
) {
930 case PCIEM_DEVCTL_MAX_READRQ_128
:
931 sc
->jme_tx_dma_size
= TXCSR_DMA_SIZE_128
;
933 case PCIEM_DEVCTL_MAX_READRQ_256
:
934 sc
->jme_tx_dma_size
= TXCSR_DMA_SIZE_256
;
937 sc
->jme_tx_dma_size
= TXCSR_DMA_SIZE_512
;
940 sc
->jme_rx_dma_size
= RXCSR_DMA_SIZE_128
;
942 sc
->jme_tx_dma_size
= TXCSR_DMA_SIZE_512
;
943 sc
->jme_rx_dma_size
= RXCSR_DMA_SIZE_128
;
947 if (pci_find_extcap(dev
, PCIY_PMG
, &pmc
) == 0)
948 sc
->jme_caps
|= JME_CAP_PMCAP
;
952 * Set default coalesce valves
954 sc
->jme_tx_coal_to
= PCCTX_COAL_TO_DEFAULT
;
955 sc
->jme_tx_coal_pkt
= PCCTX_COAL_PKT_DEFAULT
;
956 sc
->jme_rx_coal_to
= PCCRX_COAL_TO_DEFAULT
;
957 sc
->jme_rx_coal_pkt
= PCCRX_COAL_PKT_DEFAULT
;
960 * Adjust coalesce valves, in case that the number of TX/RX
961 * descs are set to small values by users.
963 * NOTE: coal_max will not be zero, since number of descs
964 * must aligned by JME_NDESC_ALIGN (16 currently)
966 coal_max
= sc
->jme_cdata
.jme_tx_data
.jme_tx_desc_cnt
/ 2;
967 if (coal_max
< sc
->jme_tx_coal_pkt
)
968 sc
->jme_tx_coal_pkt
= coal_max
;
970 coal_max
= sc
->jme_cdata
.jme_rx_data
[0].jme_rx_desc_cnt
/ 2;
971 if (coal_max
< sc
->jme_rx_coal_pkt
)
972 sc
->jme_rx_coal_pkt
= coal_max
;
974 sc
->jme_cdata
.jme_tx_data
.jme_tx_wreg
= JME_TXWREG_NSEGS
;
981 /* Allocate DMA stuffs */
982 error
= jme_dma_alloc(sc
);
987 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
988 ifp
->if_init
= jme_init
;
989 ifp
->if_ioctl
= jme_ioctl
;
990 ifp
->if_start
= jme_start
;
992 ifp
->if_npoll
= jme_npoll
;
994 ifp
->if_watchdog
= jme_watchdog
;
995 ifp
->if_serialize
= jme_serialize
;
996 ifp
->if_deserialize
= jme_deserialize
;
997 ifp
->if_tryserialize
= jme_tryserialize
;
999 ifp
->if_serialize_assert
= jme_serialize_assert
;
1001 ifp
->if_nmbclusters
= sc
->jme_cdata
.jme_rx_ring_cnt
*
1002 sc
->jme_cdata
.jme_rx_data
[0].jme_rx_desc_cnt
;
1003 ifq_set_maxlen(&ifp
->if_snd
,
1004 sc
->jme_cdata
.jme_tx_data
.jme_tx_desc_cnt
- JME_TXD_RSVD
);
1005 ifq_set_ready(&ifp
->if_snd
);
1007 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
1008 ifp
->if_capabilities
= IFCAP_HWCSUM
|
1011 IFCAP_VLAN_HWTAGGING
;
1012 if (sc
->jme_cdata
.jme_rx_ring_cnt
> JME_NRXRING_MIN
)
1013 ifp
->if_capabilities
|= IFCAP_RSS
;
1014 ifp
->if_capenable
= ifp
->if_capabilities
;
1017 * Disable TXCSUM by default to improve bulk data
1018 * transmit performance (+20Mbps improvement).
1020 ifp
->if_capenable
&= ~IFCAP_TXCSUM
;
1022 if (ifp
->if_capenable
& IFCAP_TXCSUM
)
1023 ifp
->if_hwassist
|= JME_CSUM_FEATURES
;
1024 ifp
->if_hwassist
|= CSUM_TSO
;
1026 /* Set up MII bus. */
1027 error
= mii_phy_probe(dev
, &sc
->jme_miibus
,
1028 jme_mediachange
, jme_mediastatus
);
1030 device_printf(dev
, "no PHY found!\n");
1035 * Save PHYADDR for FPGA mode PHY.
1037 if (sc
->jme_caps
& JME_CAP_FPGA
) {
1038 struct mii_data
*mii
= device_get_softc(sc
->jme_miibus
);
1040 if (mii
->mii_instance
!= 0) {
1041 struct mii_softc
*miisc
;
1043 LIST_FOREACH(miisc
, &mii
->mii_phys
, mii_list
) {
1044 if (miisc
->mii_phy
!= 0) {
1045 sc
->jme_phyaddr
= miisc
->mii_phy
;
1049 if (sc
->jme_phyaddr
!= 0) {
1050 device_printf(sc
->jme_dev
,
1051 "FPGA PHY is at %d\n", sc
->jme_phyaddr
);
1053 jme_miibus_writereg(dev
, sc
->jme_phyaddr
,
1054 JMPHY_CONF
, JMPHY_CONF_DEFFIFO
);
1056 /* XXX should we clear JME_WA_EXTFIFO */
1061 ether_ifattach(ifp
, eaddr
, NULL
);
1063 /* Tell the upper layer(s) we support long frames. */
1064 ifp
->if_data
.ifi_hdrlen
= sizeof(struct ether_vlan_header
);
1066 /* Setup the TX ring's CPUID */
1067 ifq_set_cpuid(&ifp
->if_snd
, sc
->jme_tx_cpuid
);
1068 ifq_set_hw_serialize(&ifp
->if_snd
,
1069 &sc
->jme_cdata
.jme_tx_data
.jme_tx_serialize
);
1071 error
= jme_intr_setup(dev
);
1073 ether_ifdetach(ifp
);
1084 jme_detach(device_t dev
)
1086 struct jme_softc
*sc
= device_get_softc(dev
);
1088 if (device_is_attached(dev
)) {
1089 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1091 ifnet_serialize_all(ifp
);
1093 jme_intr_teardown(dev
);
1094 ifnet_deserialize_all(ifp
);
1096 ether_ifdetach(ifp
);
1099 if (sc
->jme_miibus
!= NULL
)
1100 device_delete_child(dev
, sc
->jme_miibus
);
1101 bus_generic_detach(dev
);
1105 if (sc
->jme_mem_res
!= NULL
) {
1106 bus_release_resource(dev
, SYS_RES_MEMORY
, sc
->jme_mem_rid
,
1112 if (sc
->jme_rx_rmap
!= NULL
)
1113 if_ringmap_free(sc
->jme_rx_rmap
);
1114 if (sc
->jme_tx_rmap
!= NULL
)
1115 if_ringmap_free(sc
->jme_tx_rmap
);
1121 jme_sysctl_node(struct jme_softc
*sc
)
1123 struct sysctl_ctx_list
*ctx
;
1124 struct sysctl_oid
*tree
;
1125 #ifdef JME_RSS_DEBUG
1129 ctx
= device_get_sysctl_ctx(sc
->jme_dev
);
1130 tree
= device_get_sysctl_tree(sc
->jme_dev
);
1131 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1132 "tx_coal_to", CTLTYPE_INT
| CTLFLAG_RW
,
1133 sc
, 0, jme_sysctl_tx_coal_to
, "I", "jme tx coalescing timeout");
1135 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1136 "tx_coal_pkt", CTLTYPE_INT
| CTLFLAG_RW
,
1137 sc
, 0, jme_sysctl_tx_coal_pkt
, "I", "jme tx coalescing packet");
1139 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1140 "rx_coal_to", CTLTYPE_INT
| CTLFLAG_RW
,
1141 sc
, 0, jme_sysctl_rx_coal_to
, "I", "jme rx coalescing timeout");
1143 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1144 "rx_coal_pkt", CTLTYPE_INT
| CTLFLAG_RW
,
1145 sc
, 0, jme_sysctl_rx_coal_pkt
, "I", "jme rx coalescing packet");
1147 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1148 "rx_desc_count", CTLFLAG_RD
,
1149 &sc
->jme_cdata
.jme_rx_data
[0].jme_rx_desc_cnt
,
1150 0, "RX desc count");
1151 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1152 "tx_desc_count", CTLFLAG_RD
,
1153 &sc
->jme_cdata
.jme_tx_data
.jme_tx_desc_cnt
,
1154 0, "TX desc count");
1155 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1156 "rx_ring_count", CTLFLAG_RD
,
1157 &sc
->jme_cdata
.jme_rx_ring_cnt
,
1158 0, "RX ring count");
1159 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1160 "tx_wreg", CTLFLAG_RW
,
1161 &sc
->jme_cdata
.jme_tx_data
.jme_tx_wreg
, 0,
1162 "# of segments before writing to hardware register");
1164 if (sc
->jme_irq_type
== PCI_INTR_TYPE_MSIX
) {
1165 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1166 "tx_cpumap", CTLTYPE_OPAQUE
| CTLFLAG_RD
,
1167 sc
->jme_tx_rmap
, 0, if_ringmap_cpumap_sysctl
, "I",
1169 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1170 "rx_cpumap", CTLTYPE_OPAQUE
| CTLFLAG_RD
,
1171 sc
->jme_rx_rmap
, 0, if_ringmap_cpumap_sysctl
, "I",
1174 #ifdef IFPOLL_ENABLE
1175 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1176 "tx_poll_cpumap", CTLTYPE_OPAQUE
| CTLFLAG_RD
,
1177 sc
->jme_tx_rmap
, 0, if_ringmap_cpumap_sysctl
, "I",
1179 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1180 "rx_poll_cpumap", CTLTYPE_OPAQUE
| CTLFLAG_RD
,
1181 sc
->jme_rx_rmap
, 0, if_ringmap_cpumap_sysctl
, "I",
1186 #ifdef JME_RSS_DEBUG
1187 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1188 "rss_debug", CTLFLAG_RW
, &sc
->jme_rss_debug
,
1189 0, "RSS debug level");
1190 for (r
= 0; r
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++r
) {
1191 char rx_ring_desc
[32];
1193 ksnprintf(rx_ring_desc
, sizeof(rx_ring_desc
),
1194 "rx_ring%d_pkt", r
);
1195 SYSCTL_ADD_ULONG(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1196 rx_ring_desc
, CTLFLAG_RW
,
1197 &sc
->jme_cdata
.jme_rx_data
[r
].jme_rx_pkt
, "RXed packets");
1199 ksnprintf(rx_ring_desc
, sizeof(rx_ring_desc
),
1200 "rx_ring%d_emp", r
);
1201 SYSCTL_ADD_ULONG(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1202 rx_ring_desc
, CTLFLAG_RW
,
1203 &sc
->jme_cdata
.jme_rx_data
[r
].jme_rx_emp
,
1204 "# of time RX ring empty");
1210 jme_dma_alloc(struct jme_softc
*sc
)
1212 struct jme_txdata
*tdata
= &sc
->jme_cdata
.jme_tx_data
;
1213 struct jme_txdesc
*txd
;
1215 int error
, i
, asize
;
1217 asize
= __VM_CACHELINE_ALIGN(
1218 tdata
->jme_tx_desc_cnt
* sizeof(struct jme_txdesc
));
1219 tdata
->jme_txdesc
= kmalloc(asize
, M_DEVBUF
,
1220 M_WAITOK
| M_ZERO
| M_CACHEALIGN
);
1222 for (i
= 0; i
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++i
) {
1223 struct jme_rxdata
*rdata
= &sc
->jme_cdata
.jme_rx_data
[i
];
1225 asize
= __VM_CACHELINE_ALIGN(
1226 rdata
->jme_rx_desc_cnt
* sizeof(struct jme_rxdesc
));
1227 rdata
->jme_rxdesc
= kmalloc(asize
, M_DEVBUF
,
1228 M_WAITOK
| M_ZERO
| M_CACHEALIGN
);
1231 /* Create parent ring tag. */
1232 error
= bus_dma_tag_create(NULL
,/* parent */
1233 1, JME_RING_BOUNDARY
, /* algnmnt, boundary */
1234 sc
->jme_lowaddr
, /* lowaddr */
1235 BUS_SPACE_MAXADDR
, /* highaddr */
1236 BUS_SPACE_MAXSIZE_32BIT
, /* maxsize */
1238 BUS_SPACE_MAXSIZE_32BIT
, /* maxsegsize */
1240 &sc
->jme_cdata
.jme_ring_tag
);
1242 device_printf(sc
->jme_dev
,
1243 "could not create parent ring DMA tag.\n");
1248 * Create DMA stuffs for TX ring
1250 asize
= roundup2(JME_TX_RING_SIZE(tdata
), JME_TX_RING_ALIGN
);
1251 error
= bus_dmamem_coherent(sc
->jme_cdata
.jme_ring_tag
,
1252 JME_TX_RING_ALIGN
, 0,
1253 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
1254 asize
, BUS_DMA_WAITOK
| BUS_DMA_ZERO
, &dmem
);
1256 device_printf(sc
->jme_dev
, "could not allocate Tx ring.\n");
1259 tdata
->jme_tx_ring_tag
= dmem
.dmem_tag
;
1260 tdata
->jme_tx_ring_map
= dmem
.dmem_map
;
1261 tdata
->jme_tx_ring
= dmem
.dmem_addr
;
1262 tdata
->jme_tx_ring_paddr
= dmem
.dmem_busaddr
;
1265 * Create DMA stuffs for RX rings
1267 for (i
= 0; i
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++i
) {
1268 error
= jme_rxring_dma_alloc(&sc
->jme_cdata
.jme_rx_data
[i
]);
1273 /* Create parent buffer tag. */
1274 error
= bus_dma_tag_create(NULL
,/* parent */
1275 1, 0, /* algnmnt, boundary */
1276 sc
->jme_lowaddr
, /* lowaddr */
1277 BUS_SPACE_MAXADDR
, /* highaddr */
1278 BUS_SPACE_MAXSIZE_32BIT
, /* maxsize */
1280 BUS_SPACE_MAXSIZE_32BIT
, /* maxsegsize */
1282 &sc
->jme_cdata
.jme_buffer_tag
);
1284 device_printf(sc
->jme_dev
,
1285 "could not create parent buffer DMA tag.\n");
1290 * Create DMA stuffs for shadow status block
1292 asize
= roundup2(JME_SSB_SIZE
, JME_SSB_ALIGN
);
1293 error
= bus_dmamem_coherent(sc
->jme_cdata
.jme_buffer_tag
,
1294 JME_SSB_ALIGN
, 0, BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
1295 asize
, BUS_DMA_WAITOK
| BUS_DMA_ZERO
, &dmem
);
1297 device_printf(sc
->jme_dev
,
1298 "could not create shadow status block.\n");
1301 sc
->jme_cdata
.jme_ssb_tag
= dmem
.dmem_tag
;
1302 sc
->jme_cdata
.jme_ssb_map
= dmem
.dmem_map
;
1303 sc
->jme_cdata
.jme_ssb_block
= dmem
.dmem_addr
;
1304 sc
->jme_cdata
.jme_ssb_block_paddr
= dmem
.dmem_busaddr
;
1307 * Create DMA stuffs for TX buffers
1310 /* Create tag for Tx buffers. */
1311 error
= bus_dma_tag_create(sc
->jme_cdata
.jme_buffer_tag
,/* parent */
1312 1, 0, /* algnmnt, boundary */
1313 BUS_SPACE_MAXADDR
, /* lowaddr */
1314 BUS_SPACE_MAXADDR
, /* highaddr */
1315 JME_TSO_MAXSIZE
, /* maxsize */
1316 JME_MAXTXSEGS
, /* nsegments */
1317 JME_MAXSEGSIZE
, /* maxsegsize */
1318 BUS_DMA_ALLOCNOW
| BUS_DMA_WAITOK
| BUS_DMA_ONEBPAGE
,/* flags */
1319 &tdata
->jme_tx_tag
);
1321 device_printf(sc
->jme_dev
, "could not create Tx DMA tag.\n");
1325 /* Create DMA maps for Tx buffers. */
1326 for (i
= 0; i
< tdata
->jme_tx_desc_cnt
; i
++) {
1327 txd
= &tdata
->jme_txdesc
[i
];
1328 error
= bus_dmamap_create(tdata
->jme_tx_tag
,
1329 BUS_DMA_WAITOK
| BUS_DMA_ONEBPAGE
,
1334 device_printf(sc
->jme_dev
,
1335 "could not create %dth Tx dmamap.\n", i
);
1337 for (j
= 0; j
< i
; ++j
) {
1338 txd
= &tdata
->jme_txdesc
[j
];
1339 bus_dmamap_destroy(tdata
->jme_tx_tag
,
1342 bus_dma_tag_destroy(tdata
->jme_tx_tag
);
1343 tdata
->jme_tx_tag
= NULL
;
1349 * Create DMA stuffs for RX buffers
1351 for (i
= 0; i
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++i
) {
1352 error
= jme_rxbuf_dma_alloc(&sc
->jme_cdata
.jme_rx_data
[i
]);
1360 jme_dma_free(struct jme_softc
*sc
)
1362 struct jme_txdata
*tdata
= &sc
->jme_cdata
.jme_tx_data
;
1363 struct jme_txdesc
*txd
;
1364 struct jme_rxdesc
*rxd
;
1365 struct jme_rxdata
*rdata
;
1369 if (tdata
->jme_tx_ring_tag
!= NULL
) {
1370 bus_dmamap_unload(tdata
->jme_tx_ring_tag
,
1371 tdata
->jme_tx_ring_map
);
1372 bus_dmamem_free(tdata
->jme_tx_ring_tag
,
1373 tdata
->jme_tx_ring
, tdata
->jme_tx_ring_map
);
1374 bus_dma_tag_destroy(tdata
->jme_tx_ring_tag
);
1375 tdata
->jme_tx_ring_tag
= NULL
;
1379 for (r
= 0; r
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++r
) {
1380 rdata
= &sc
->jme_cdata
.jme_rx_data
[r
];
1381 if (rdata
->jme_rx_ring_tag
!= NULL
) {
1382 bus_dmamap_unload(rdata
->jme_rx_ring_tag
,
1383 rdata
->jme_rx_ring_map
);
1384 bus_dmamem_free(rdata
->jme_rx_ring_tag
,
1386 rdata
->jme_rx_ring_map
);
1387 bus_dma_tag_destroy(rdata
->jme_rx_ring_tag
);
1388 rdata
->jme_rx_ring_tag
= NULL
;
1393 if (tdata
->jme_tx_tag
!= NULL
) {
1394 for (i
= 0; i
< tdata
->jme_tx_desc_cnt
; i
++) {
1395 txd
= &tdata
->jme_txdesc
[i
];
1396 bus_dmamap_destroy(tdata
->jme_tx_tag
, txd
->tx_dmamap
);
1398 bus_dma_tag_destroy(tdata
->jme_tx_tag
);
1399 tdata
->jme_tx_tag
= NULL
;
1403 for (r
= 0; r
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++r
) {
1404 rdata
= &sc
->jme_cdata
.jme_rx_data
[r
];
1405 if (rdata
->jme_rx_tag
!= NULL
) {
1406 for (i
= 0; i
< rdata
->jme_rx_desc_cnt
; i
++) {
1407 rxd
= &rdata
->jme_rxdesc
[i
];
1408 bus_dmamap_destroy(rdata
->jme_rx_tag
,
1411 bus_dmamap_destroy(rdata
->jme_rx_tag
,
1412 rdata
->jme_rx_sparemap
);
1413 bus_dma_tag_destroy(rdata
->jme_rx_tag
);
1414 rdata
->jme_rx_tag
= NULL
;
1418 /* Shadow status block. */
1419 if (sc
->jme_cdata
.jme_ssb_tag
!= NULL
) {
1420 bus_dmamap_unload(sc
->jme_cdata
.jme_ssb_tag
,
1421 sc
->jme_cdata
.jme_ssb_map
);
1422 bus_dmamem_free(sc
->jme_cdata
.jme_ssb_tag
,
1423 sc
->jme_cdata
.jme_ssb_block
,
1424 sc
->jme_cdata
.jme_ssb_map
);
1425 bus_dma_tag_destroy(sc
->jme_cdata
.jme_ssb_tag
);
1426 sc
->jme_cdata
.jme_ssb_tag
= NULL
;
1429 if (sc
->jme_cdata
.jme_buffer_tag
!= NULL
) {
1430 bus_dma_tag_destroy(sc
->jme_cdata
.jme_buffer_tag
);
1431 sc
->jme_cdata
.jme_buffer_tag
= NULL
;
1433 if (sc
->jme_cdata
.jme_ring_tag
!= NULL
) {
1434 bus_dma_tag_destroy(sc
->jme_cdata
.jme_ring_tag
);
1435 sc
->jme_cdata
.jme_ring_tag
= NULL
;
1438 if (tdata
->jme_txdesc
!= NULL
) {
1439 kfree(tdata
->jme_txdesc
, M_DEVBUF
);
1440 tdata
->jme_txdesc
= NULL
;
1442 for (r
= 0; r
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++r
) {
1443 rdata
= &sc
->jme_cdata
.jme_rx_data
[r
];
1444 if (rdata
->jme_rxdesc
!= NULL
) {
1445 kfree(rdata
->jme_rxdesc
, M_DEVBUF
);
1446 rdata
->jme_rxdesc
= NULL
;
1452 * Make sure the interface is stopped at reboot time.
1455 jme_shutdown(device_t dev
)
1457 return jme_suspend(dev
);
1462 * Unlike other ethernet controllers, JMC250 requires
1463 * explicit resetting link speed to 10/100Mbps as gigabit
1464 * link will cunsume more power than 375mA.
1465 * Note, we reset the link speed to 10/100Mbps with
1466 * auto-negotiation but we don't know whether that operation
1467 * would succeed or not as we have no control after powering
1468 * off. If the renegotiation fail WOL may not work. Running
1469 * at 1Gbps draws more power than 375mA at 3.3V which is
1470 * specified in PCI specification and that would result in
1471 * complete shutdowning power to ethernet controller.
1474 * Save current negotiated media speed/duplex/flow-control
1475 * to softc and restore the same link again after resuming.
1476 * PHY handling such as power down/resetting to 100Mbps
1477 * may be better handled in suspend method in phy driver.
1480 jme_setlinkspeed(struct jme_softc
*sc
)
1482 struct mii_data
*mii
;
1485 JME_LOCK_ASSERT(sc
);
1487 mii
= device_get_softc(sc
->jme_miibus
);
1490 if ((mii
->mii_media_status
& IFM_AVALID
) != 0) {
1491 switch IFM_SUBTYPE(mii
->mii_media_active
) {
1501 jme_miibus_writereg(sc
->jme_dev
, sc
->jme_phyaddr
, MII_100T2CR
, 0);
1502 jme_miibus_writereg(sc
->jme_dev
, sc
->jme_phyaddr
, MII_ANAR
,
1503 ANAR_TX_FD
| ANAR_TX
| ANAR_10_FD
| ANAR_10
| ANAR_CSMA
);
1504 jme_miibus_writereg(sc
->jme_dev
, sc
->jme_phyaddr
, MII_BMCR
,
1505 BMCR_AUTOEN
| BMCR_STARTNEG
);
1508 /* Poll link state until jme(4) get a 10/100 link. */
1509 for (i
= 0; i
< MII_ANEGTICKS_GIGE
; i
++) {
1511 if ((mii
->mii_media_status
& IFM_AVALID
) != 0) {
1512 switch (IFM_SUBTYPE(mii
->mii_media_active
)) {
1522 pause("jmelnk", hz
);
1525 if (i
== MII_ANEGTICKS_GIGE
)
1526 device_printf(sc
->jme_dev
, "establishing link failed, "
1527 "WOL may not work!");
1530 * No link, force MAC to have 100Mbps, full-duplex link.
1531 * This is the last resort and may/may not work.
1533 mii
->mii_media_status
= IFM_AVALID
| IFM_ACTIVE
;
1534 mii
->mii_media_active
= IFM_ETHER
| IFM_100_TX
| IFM_FDX
;
1539 jme_setwol(struct jme_softc
*sc
)
1541 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1546 if (pci_find_extcap(sc
->jme_dev
, PCIY_PMG
, &pmc
) != 0) {
1547 /* No PME capability, PHY power down. */
1548 jme_miibus_writereg(sc
->jme_dev
, sc
->jme_phyaddr
,
1549 MII_BMCR
, BMCR_PDOWN
);
1553 gpr
= CSR_READ_4(sc
, JME_GPREG0
) & ~GPREG0_PME_ENB
;
1554 pmcs
= CSR_READ_4(sc
, JME_PMCS
);
1555 pmcs
&= ~PMCS_WOL_ENB_MASK
;
1556 if ((ifp
->if_capenable
& IFCAP_WOL_MAGIC
) != 0) {
1557 pmcs
|= PMCS_MAGIC_FRAME
| PMCS_MAGIC_FRAME_ENB
;
1558 /* Enable PME message. */
1559 gpr
|= GPREG0_PME_ENB
;
1560 /* For gigabit controllers, reset link speed to 10/100. */
1561 if ((sc
->jme_caps
& JME_CAP_FASTETH
) == 0)
1562 jme_setlinkspeed(sc
);
1565 CSR_WRITE_4(sc
, JME_PMCS
, pmcs
);
1566 CSR_WRITE_4(sc
, JME_GPREG0
, gpr
);
1569 pmstat
= pci_read_config(sc
->jme_dev
, pmc
+ PCIR_POWER_STATUS
, 2);
1570 pmstat
&= ~(PCIM_PSTAT_PME
| PCIM_PSTAT_PMEENABLE
);
1571 if ((ifp
->if_capenable
& IFCAP_WOL
) != 0)
1572 pmstat
|= PCIM_PSTAT_PME
| PCIM_PSTAT_PMEENABLE
;
1573 pci_write_config(sc
->jme_dev
, pmc
+ PCIR_POWER_STATUS
, pmstat
, 2);
1574 if ((ifp
->if_capenable
& IFCAP_WOL
) == 0) {
1575 /* No WOL, PHY power down. */
1576 jme_miibus_writereg(sc
->jme_dev
, sc
->jme_phyaddr
,
1577 MII_BMCR
, BMCR_PDOWN
);
1583 jme_suspend(device_t dev
)
1585 struct jme_softc
*sc
= device_get_softc(dev
);
1586 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1588 ifnet_serialize_all(ifp
);
1593 ifnet_deserialize_all(ifp
);
1599 jme_resume(device_t dev
)
1601 struct jme_softc
*sc
= device_get_softc(dev
);
1602 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
1607 ifnet_serialize_all(ifp
);
1610 if (pci_find_extcap(sc
->jme_dev
, PCIY_PMG
, &pmc
) != 0) {
1613 pmstat
= pci_read_config(sc
->jme_dev
,
1614 pmc
+ PCIR_POWER_STATUS
, 2);
1615 /* Disable PME clear PME status. */
1616 pmstat
&= ~PCIM_PSTAT_PMEENABLE
;
1617 pci_write_config(sc
->jme_dev
,
1618 pmc
+ PCIR_POWER_STATUS
, pmstat
, 2);
1622 if (ifp
->if_flags
& IFF_UP
)
1625 ifnet_deserialize_all(ifp
);
1631 jme_tso_pullup(struct mbuf
**mp
)
1633 int hoff
, iphlen
, thoff
;
1637 KASSERT(M_WRITABLE(m
), ("TSO mbuf not writable"));
1639 iphlen
= m
->m_pkthdr
.csum_iphlen
;
1640 thoff
= m
->m_pkthdr
.csum_thlen
;
1641 hoff
= m
->m_pkthdr
.csum_lhlen
;
1643 KASSERT(iphlen
> 0, ("invalid ip hlen"));
1644 KASSERT(thoff
> 0, ("invalid tcp hlen"));
1645 KASSERT(hoff
> 0, ("invalid ether hlen"));
1647 if (__predict_false(m
->m_len
< hoff
+ iphlen
+ thoff
)) {
1648 m
= m_pullup(m
, hoff
+ iphlen
+ thoff
);
1659 jme_encap(struct jme_txdata
*tdata
, struct mbuf
**m_head
, int *segs_used
)
1661 struct jme_txdesc
*txd
;
1662 struct jme_desc
*desc
;
1664 bus_dma_segment_t txsegs
[JME_MAXTXSEGS
];
1666 int error
, i
, prod
, symbol_desc
;
1667 uint32_t cflags
, flag64
, mss
;
1669 M_ASSERTPKTHDR((*m_head
));
1671 if ((*m_head
)->m_pkthdr
.csum_flags
& CSUM_TSO
) {
1672 /* XXX Is this necessary? */
1673 error
= jme_tso_pullup(m_head
);
1678 prod
= tdata
->jme_tx_prod
;
1679 txd
= &tdata
->jme_txdesc
[prod
];
1681 if (tdata
->jme_sc
->jme_lowaddr
!= BUS_SPACE_MAXADDR_32BIT
)
1686 maxsegs
= (tdata
->jme_tx_desc_cnt
- tdata
->jme_tx_cnt
) -
1687 (JME_TXD_RSVD
+ symbol_desc
);
1688 if (maxsegs
> JME_MAXTXSEGS
)
1689 maxsegs
= JME_MAXTXSEGS
;
1690 KASSERT(maxsegs
>= (JME_TXD_SPARE
- symbol_desc
),
1691 ("not enough segments %d", maxsegs
));
1693 error
= bus_dmamap_load_mbuf_defrag(tdata
->jme_tx_tag
,
1694 txd
->tx_dmamap
, m_head
,
1695 txsegs
, maxsegs
, &nsegs
, BUS_DMA_NOWAIT
);
1698 *segs_used
+= nsegs
;
1700 bus_dmamap_sync(tdata
->jme_tx_tag
, txd
->tx_dmamap
,
1701 BUS_DMASYNC_PREWRITE
);
1707 /* Configure checksum offload. */
1708 if (m
->m_pkthdr
.csum_flags
& CSUM_TSO
) {
1709 mss
= (uint32_t)m
->m_pkthdr
.tso_segsz
<< JME_TD_MSS_SHIFT
;
1710 cflags
|= JME_TD_TSO
;
1711 } else if (m
->m_pkthdr
.csum_flags
& JME_CSUM_FEATURES
) {
1712 if (m
->m_pkthdr
.csum_flags
& CSUM_IP
)
1713 cflags
|= JME_TD_IPCSUM
;
1714 if (m
->m_pkthdr
.csum_flags
& CSUM_TCP
)
1715 cflags
|= JME_TD_TCPCSUM
;
1716 if (m
->m_pkthdr
.csum_flags
& CSUM_UDP
)
1717 cflags
|= JME_TD_UDPCSUM
;
1720 /* Configure VLAN. */
1721 if (m
->m_flags
& M_VLANTAG
) {
1722 cflags
|= (m
->m_pkthdr
.ether_vlantag
& JME_TD_VLAN_MASK
);
1723 cflags
|= JME_TD_VLAN_TAG
;
1726 desc
= &tdata
->jme_tx_ring
[prod
];
1727 desc
->flags
= htole32(cflags
);
1728 desc
->addr_hi
= htole32(m
->m_pkthdr
.len
);
1729 if (tdata
->jme_sc
->jme_lowaddr
!= BUS_SPACE_MAXADDR_32BIT
) {
1731 * Use 64bits TX desc chain format.
1733 * The first TX desc of the chain, which is setup here,
1734 * is just a symbol TX desc carrying no payload.
1736 flag64
= JME_TD_64BIT
;
1737 desc
->buflen
= htole32(mss
);
1742 /* No effective TX desc is consumed */
1746 * Use 32bits TX desc chain format.
1748 * The first TX desc of the chain, which is setup here,
1749 * is an effective TX desc carrying the first segment of
1753 desc
->buflen
= htole32(mss
| txsegs
[0].ds_len
);
1754 desc
->addr_lo
= htole32(JME_ADDR_LO(txsegs
[0].ds_addr
));
1756 /* One effective TX desc is consumed */
1759 tdata
->jme_tx_cnt
++;
1760 KKASSERT(tdata
->jme_tx_cnt
- i
< tdata
->jme_tx_desc_cnt
- JME_TXD_RSVD
);
1761 JME_DESC_INC(prod
, tdata
->jme_tx_desc_cnt
);
1763 txd
->tx_ndesc
= 1 - i
;
1764 for (; i
< nsegs
; i
++) {
1765 desc
= &tdata
->jme_tx_ring
[prod
];
1766 desc
->buflen
= htole32(txsegs
[i
].ds_len
);
1767 desc
->addr_hi
= htole32(JME_ADDR_HI(txsegs
[i
].ds_addr
));
1768 desc
->addr_lo
= htole32(JME_ADDR_LO(txsegs
[i
].ds_addr
));
1769 desc
->flags
= htole32(JME_TD_OWN
| flag64
);
1771 tdata
->jme_tx_cnt
++;
1772 KKASSERT(tdata
->jme_tx_cnt
<=
1773 tdata
->jme_tx_desc_cnt
- JME_TXD_RSVD
);
1774 JME_DESC_INC(prod
, tdata
->jme_tx_desc_cnt
);
1777 /* Update producer index. */
1778 tdata
->jme_tx_prod
= prod
;
1780 * Finally request interrupt and give the first descriptor
1781 * owenership to hardware.
1783 desc
= txd
->tx_desc
;
1784 desc
->flags
|= htole32(JME_TD_OWN
| JME_TD_INTR
);
1787 txd
->tx_ndesc
+= nsegs
;
1797 jme_start(struct ifnet
*ifp
, struct ifaltq_subque
*ifsq
)
1799 struct jme_softc
*sc
= ifp
->if_softc
;
1800 struct jme_txdata
*tdata
= &sc
->jme_cdata
.jme_tx_data
;
1801 struct mbuf
*m_head
;
1804 ASSERT_ALTQ_SQ_DEFAULT(ifp
, ifsq
);
1805 ASSERT_SERIALIZED(&tdata
->jme_tx_serialize
);
1807 if (!sc
->jme_has_link
) {
1808 ifq_purge(&ifp
->if_snd
);
1812 if ((ifp
->if_flags
& IFF_RUNNING
) == 0 || ifq_is_oactive(&ifp
->if_snd
))
1815 if (tdata
->jme_tx_cnt
>= JME_TX_DESC_HIWAT(tdata
))
1818 while (!ifq_is_empty(&ifp
->if_snd
)) {
1820 * Check number of available TX descs, always
1821 * leave JME_TXD_RSVD free TX descs.
1823 if (tdata
->jme_tx_cnt
+ JME_TXD_SPARE
>
1824 tdata
->jme_tx_desc_cnt
- JME_TXD_RSVD
) {
1825 ifq_set_oactive(&ifp
->if_snd
);
1829 m_head
= ifq_dequeue(&ifp
->if_snd
);
1834 * Pack the data into the transmit ring. If we
1835 * don't have room, set the OACTIVE flag and wait
1836 * for the NIC to drain the ring.
1838 if (jme_encap(tdata
, &m_head
, &enq
)) {
1839 KKASSERT(m_head
== NULL
);
1840 IFNET_STAT_INC(ifp
, oerrors
, 1);
1841 ifq_set_oactive(&ifp
->if_snd
);
1845 if (enq
>= tdata
->jme_tx_wreg
) {
1846 CSR_WRITE_4(sc
, JME_TXCSR
, sc
->jme_txcsr
|
1847 TXCSR_TX_ENB
| TXCSR_TXQ_N_START(TXCSR_TXQ0
));
1852 * If there's a BPF listener, bounce a copy of this frame
1855 ETHER_BPF_MTAP(ifp
, m_head
);
1857 /* Set a timeout in case the chip goes out to lunch. */
1858 ifp
->if_timer
= JME_TX_TIMEOUT
;
1863 * Reading TXCSR takes very long time under heavy load
1864 * so cache TXCSR value and writes the ORed value with
1865 * the kick command to the TXCSR. This saves one register
1868 CSR_WRITE_4(sc
, JME_TXCSR
, sc
->jme_txcsr
| TXCSR_TX_ENB
|
1869 TXCSR_TXQ_N_START(TXCSR_TXQ0
));
1874 jme_watchdog(struct ifnet
*ifp
)
1876 struct jme_softc
*sc
= ifp
->if_softc
;
1877 struct jme_txdata
*tdata
= &sc
->jme_cdata
.jme_tx_data
;
1879 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
1881 if (!sc
->jme_has_link
) {
1882 if_printf(ifp
, "watchdog timeout (missed link)\n");
1883 IFNET_STAT_INC(ifp
, oerrors
, 1);
1889 if (tdata
->jme_tx_cnt
== 0) {
1890 if_printf(ifp
, "watchdog timeout (missed Tx interrupts) "
1892 if (!ifq_is_empty(&ifp
->if_snd
))
1897 if_printf(ifp
, "watchdog timeout\n");
1898 IFNET_STAT_INC(ifp
, oerrors
, 1);
1900 if (!ifq_is_empty(&ifp
->if_snd
))
1905 jme_ioctl(struct ifnet
*ifp
, u_long cmd
, caddr_t data
, struct ucred
*cr
)
1907 struct jme_softc
*sc
= ifp
->if_softc
;
1908 struct mii_data
*mii
= device_get_softc(sc
->jme_miibus
);
1909 struct ifreq
*ifr
= (struct ifreq
*)data
;
1910 int error
= 0, mask
;
1912 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
1916 if (ifr
->ifr_mtu
< ETHERMIN
|| ifr
->ifr_mtu
> JME_JUMBO_MTU
||
1917 (!(sc
->jme_caps
& JME_CAP_JUMBO
) &&
1918 ifr
->ifr_mtu
> JME_MAX_MTU
)) {
1923 if (ifp
->if_mtu
!= ifr
->ifr_mtu
) {
1925 * No special configuration is required when interface
1926 * MTU is changed but availability of Tx checksum
1927 * offload should be chcked against new MTU size as
1928 * FIFO size is just 2K.
1930 if (ifr
->ifr_mtu
>= JME_TX_FIFO_SIZE
) {
1931 ifp
->if_capenable
&=
1932 ~(IFCAP_TXCSUM
| IFCAP_TSO
);
1934 ~(JME_CSUM_FEATURES
| CSUM_TSO
);
1936 ifp
->if_mtu
= ifr
->ifr_mtu
;
1937 if (ifp
->if_flags
& IFF_RUNNING
)
1943 if (ifp
->if_flags
& IFF_UP
) {
1944 if (ifp
->if_flags
& IFF_RUNNING
) {
1945 if ((ifp
->if_flags
^ sc
->jme_if_flags
) &
1946 (IFF_PROMISC
| IFF_ALLMULTI
))
1952 if (ifp
->if_flags
& IFF_RUNNING
)
1955 sc
->jme_if_flags
= ifp
->if_flags
;
1960 if (ifp
->if_flags
& IFF_RUNNING
)
1966 error
= ifmedia_ioctl(ifp
, ifr
, &mii
->mii_media
, cmd
);
1970 mask
= ifr
->ifr_reqcap
^ ifp
->if_capenable
;
1972 if ((mask
& IFCAP_TXCSUM
) && ifp
->if_mtu
< JME_TX_FIFO_SIZE
) {
1973 ifp
->if_capenable
^= IFCAP_TXCSUM
;
1974 if (ifp
->if_capenable
& IFCAP_TXCSUM
)
1975 ifp
->if_hwassist
|= JME_CSUM_FEATURES
;
1977 ifp
->if_hwassist
&= ~JME_CSUM_FEATURES
;
1979 if (mask
& IFCAP_RXCSUM
) {
1982 ifp
->if_capenable
^= IFCAP_RXCSUM
;
1983 reg
= CSR_READ_4(sc
, JME_RXMAC
);
1984 reg
&= ~RXMAC_CSUM_ENB
;
1985 if (ifp
->if_capenable
& IFCAP_RXCSUM
)
1986 reg
|= RXMAC_CSUM_ENB
;
1987 CSR_WRITE_4(sc
, JME_RXMAC
, reg
);
1990 if (mask
& IFCAP_VLAN_HWTAGGING
) {
1991 ifp
->if_capenable
^= IFCAP_VLAN_HWTAGGING
;
1995 if ((mask
& IFCAP_TSO
) && ifp
->if_mtu
< JME_TX_FIFO_SIZE
) {
1996 ifp
->if_capenable
^= IFCAP_TSO
;
1997 if (ifp
->if_capenable
& IFCAP_TSO
)
1998 ifp
->if_hwassist
|= CSUM_TSO
;
2000 ifp
->if_hwassist
&= ~CSUM_TSO
;
2003 if (mask
& IFCAP_RSS
)
2004 ifp
->if_capenable
^= IFCAP_RSS
;
2008 error
= ether_ioctl(ifp
, cmd
, data
);
2015 jme_mac_config(struct jme_softc
*sc
)
2017 struct mii_data
*mii
;
2018 uint32_t ghc
, rxmac
, txmac
, txpause
, gp1
;
2019 int phyconf
= JMPHY_CONF_DEFFIFO
, hdx
= 0;
2021 mii
= device_get_softc(sc
->jme_miibus
);
2023 CSR_WRITE_4(sc
, JME_GHC
, GHC_RESET
);
2025 CSR_WRITE_4(sc
, JME_GHC
, 0);
2027 rxmac
= CSR_READ_4(sc
, JME_RXMAC
);
2028 rxmac
&= ~RXMAC_FC_ENB
;
2029 txmac
= CSR_READ_4(sc
, JME_TXMAC
);
2030 txmac
&= ~(TXMAC_CARRIER_EXT
| TXMAC_FRAME_BURST
);
2031 txpause
= CSR_READ_4(sc
, JME_TXPFC
);
2032 txpause
&= ~TXPFC_PAUSE_ENB
;
2033 if ((IFM_OPTIONS(mii
->mii_media_active
) & IFM_FDX
) != 0) {
2034 ghc
|= GHC_FULL_DUPLEX
;
2035 rxmac
&= ~RXMAC_COLL_DET_ENB
;
2036 txmac
&= ~(TXMAC_COLL_ENB
| TXMAC_CARRIER_SENSE
|
2037 TXMAC_BACKOFF
| TXMAC_CARRIER_EXT
|
2040 if ((IFM_OPTIONS(mii
->mii_media_active
) & IFM_ETH_TXPAUSE
) != 0)
2041 txpause
|= TXPFC_PAUSE_ENB
;
2042 if ((IFM_OPTIONS(mii
->mii_media_active
) & IFM_ETH_RXPAUSE
) != 0)
2043 rxmac
|= RXMAC_FC_ENB
;
2045 /* Disable retry transmit timer/retry limit. */
2046 CSR_WRITE_4(sc
, JME_TXTRHD
, CSR_READ_4(sc
, JME_TXTRHD
) &
2047 ~(TXTRHD_RT_PERIOD_ENB
| TXTRHD_RT_LIMIT_ENB
));
2049 rxmac
|= RXMAC_COLL_DET_ENB
;
2050 txmac
|= TXMAC_COLL_ENB
| TXMAC_CARRIER_SENSE
| TXMAC_BACKOFF
;
2051 /* Enable retry transmit timer/retry limit. */
2052 CSR_WRITE_4(sc
, JME_TXTRHD
, CSR_READ_4(sc
, JME_TXTRHD
) |
2053 TXTRHD_RT_PERIOD_ENB
| TXTRHD_RT_LIMIT_ENB
);
2057 * Reprogram Tx/Rx MACs with resolved speed/duplex.
2059 gp1
= CSR_READ_4(sc
, JME_GPREG1
);
2060 gp1
&= ~GPREG1_WA_HDX
;
2062 if ((IFM_OPTIONS(mii
->mii_media_active
) & IFM_FDX
) == 0)
2065 switch (IFM_SUBTYPE(mii
->mii_media_active
)) {
2067 ghc
|= GHC_SPEED_10
| sc
->jme_clksrc
;
2069 gp1
|= GPREG1_WA_HDX
;
2073 ghc
|= GHC_SPEED_100
| sc
->jme_clksrc
;
2075 gp1
|= GPREG1_WA_HDX
;
2078 * Use extended FIFO depth to workaround CRC errors
2079 * emitted by chips before JMC250B
2081 phyconf
= JMPHY_CONF_EXTFIFO
;
2085 if (sc
->jme_caps
& JME_CAP_FASTETH
)
2088 ghc
|= GHC_SPEED_1000
| sc
->jme_clksrc_1000
;
2090 txmac
|= TXMAC_CARRIER_EXT
| TXMAC_FRAME_BURST
;
2096 CSR_WRITE_4(sc
, JME_GHC
, ghc
);
2097 CSR_WRITE_4(sc
, JME_RXMAC
, rxmac
);
2098 CSR_WRITE_4(sc
, JME_TXMAC
, txmac
);
2099 CSR_WRITE_4(sc
, JME_TXPFC
, txpause
);
2101 if (sc
->jme_workaround
& JME_WA_EXTFIFO
) {
2102 jme_miibus_writereg(sc
->jme_dev
, sc
->jme_phyaddr
,
2103 JMPHY_CONF
, phyconf
);
2105 if (sc
->jme_workaround
& JME_WA_HDX
)
2106 CSR_WRITE_4(sc
, JME_GPREG1
, gp1
);
2112 struct jme_softc
*sc
= xsc
;
2113 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2117 ASSERT_SERIALIZED(&sc
->jme_serialize
);
2119 status
= CSR_READ_4(sc
, JME_INTR_REQ_STATUS
);
2120 if (status
== 0 || status
== 0xFFFFFFFF)
2123 /* Disable interrupts. */
2124 CSR_WRITE_4(sc
, JME_INTR_MASK_CLR
, JME_INTRS
);
2126 status
= CSR_READ_4(sc
, JME_INTR_STATUS
);
2127 if ((status
& JME_INTRS
) == 0 || status
== 0xFFFFFFFF)
2130 /* Reset PCC counter/timer and Ack interrupts. */
2131 status
&= ~(INTR_TXQ_COMP
| INTR_RXQ_COMP
);
2133 if (status
& (INTR_TXQ_COAL
| INTR_TXQ_COAL_TO
))
2134 status
|= INTR_TXQ_COAL
| INTR_TXQ_COAL_TO
| INTR_TXQ_COMP
;
2136 for (r
= 0; r
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++r
) {
2137 if (status
& jme_rx_status
[r
].jme_coal
) {
2138 status
|= jme_rx_status
[r
].jme_coal
|
2139 jme_rx_status
[r
].jme_comp
;
2143 CSR_WRITE_4(sc
, JME_INTR_STATUS
, status
);
2145 if (ifp
->if_flags
& IFF_RUNNING
) {
2146 struct jme_txdata
*tdata
= &sc
->jme_cdata
.jme_tx_data
;
2148 if (status
& (INTR_RXQ_COAL
| INTR_RXQ_COAL_TO
))
2149 jme_rx_intr(sc
, status
);
2151 if (status
& INTR_RXQ_DESC_EMPTY
) {
2153 * Notify hardware availability of new Rx buffers.
2154 * Reading RXCSR takes very long time under heavy
2155 * load so cache RXCSR value and writes the ORed
2156 * value with the kick command to the RXCSR. This
2157 * saves one register access cycle.
2159 CSR_WRITE_4(sc
, JME_RXCSR
, sc
->jme_rxcsr
|
2160 RXCSR_RX_ENB
| RXCSR_RXQ_START
);
2163 if (status
& (INTR_TXQ_COAL
| INTR_TXQ_COAL_TO
)) {
2164 lwkt_serialize_enter(&tdata
->jme_tx_serialize
);
2166 if (!ifq_is_empty(&ifp
->if_snd
))
2168 lwkt_serialize_exit(&tdata
->jme_tx_serialize
);
2172 /* Reenable interrupts. */
2173 CSR_WRITE_4(sc
, JME_INTR_MASK_SET
, JME_INTRS
);
2177 jme_txeof(struct jme_txdata
*tdata
)
2179 struct ifnet
*ifp
= &tdata
->jme_sc
->arpcom
.ac_if
;
2182 cons
= tdata
->jme_tx_cons
;
2183 if (cons
== tdata
->jme_tx_prod
)
2187 * Go through our Tx list and free mbufs for those
2188 * frames which have been transmitted.
2190 while (cons
!= tdata
->jme_tx_prod
) {
2191 struct jme_txdesc
*txd
, *next_txd
;
2192 uint32_t status
, next_status
;
2193 int next_cons
, nsegs
;
2195 txd
= &tdata
->jme_txdesc
[cons
];
2196 KASSERT(txd
->tx_m
!= NULL
,
2197 ("%s: freeing NULL mbuf!", __func__
));
2199 status
= le32toh(txd
->tx_desc
->flags
);
2200 if ((status
& JME_TD_OWN
) == JME_TD_OWN
)
2205 * This chip will always update the TX descriptor's
2206 * buflen field and this updating always happens
2207 * after clearing the OWN bit, so even if the OWN
2208 * bit is cleared by the chip, we still don't sure
2209 * about whether the buflen field has been updated
2210 * by the chip or not. To avoid this race, we wait
2211 * for the next TX descriptor's OWN bit to be cleared
2212 * by the chip before reusing this TX descriptor.
2215 JME_DESC_ADD(next_cons
, txd
->tx_ndesc
, tdata
->jme_tx_desc_cnt
);
2216 next_txd
= &tdata
->jme_txdesc
[next_cons
];
2217 if (next_txd
->tx_m
== NULL
)
2219 next_status
= le32toh(next_txd
->tx_desc
->flags
);
2220 if ((next_status
& JME_TD_OWN
) == JME_TD_OWN
)
2223 if (status
& (JME_TD_TMOUT
| JME_TD_RETRY_EXP
)) {
2224 IFNET_STAT_INC(ifp
, oerrors
, 1);
2226 IFNET_STAT_INC(ifp
, opackets
, 1);
2227 if (status
& JME_TD_COLLISION
) {
2228 IFNET_STAT_INC(ifp
, collisions
,
2229 le32toh(txd
->tx_desc
->buflen
) &
2230 JME_TD_BUF_LEN_MASK
);
2235 * Only the first descriptor of multi-descriptor
2236 * transmission is updated so driver have to skip entire
2237 * chained buffers for the transmiited frame. In other
2238 * words, JME_TD_OWN bit is valid only at the first
2239 * descriptor of a multi-descriptor transmission.
2241 for (nsegs
= 0; nsegs
< txd
->tx_ndesc
; nsegs
++) {
2242 tdata
->jme_tx_ring
[cons
].flags
= 0;
2243 JME_DESC_INC(cons
, tdata
->jme_tx_desc_cnt
);
2246 /* Reclaim transferred mbufs. */
2247 bus_dmamap_unload(tdata
->jme_tx_tag
, txd
->tx_dmamap
);
2250 tdata
->jme_tx_cnt
-= txd
->tx_ndesc
;
2251 KASSERT(tdata
->jme_tx_cnt
>= 0,
2252 ("%s: Active Tx desc counter was garbled", __func__
));
2255 tdata
->jme_tx_cons
= cons
;
2257 /* 1 for symbol TX descriptor */
2258 if (tdata
->jme_tx_cnt
<= JME_MAXTXSEGS
+ 1)
2261 if (tdata
->jme_tx_cnt
+ JME_TXD_SPARE
<=
2262 tdata
->jme_tx_desc_cnt
- JME_TXD_RSVD
)
2263 ifq_clr_oactive(&ifp
->if_snd
);
2266 static __inline
void
2267 jme_discard_rxbufs(struct jme_rxdata
*rdata
, int cons
, int count
)
2271 for (i
= 0; i
< count
; ++i
) {
2272 jme_setup_rxdesc(&rdata
->jme_rxdesc
[cons
]);
2273 JME_DESC_INC(cons
, rdata
->jme_rx_desc_cnt
);
2277 static __inline
struct pktinfo
*
2278 jme_pktinfo(struct pktinfo
*pi
, uint32_t flags
)
2280 if (flags
& JME_RD_IPV4
)
2281 pi
->pi_netisr
= NETISR_IP
;
2282 else if (flags
& JME_RD_IPV6
)
2283 pi
->pi_netisr
= NETISR_IPV6
;
2288 pi
->pi_l3proto
= IPPROTO_UNKNOWN
;
2290 if (flags
& JME_RD_MORE_FRAG
)
2291 pi
->pi_flags
|= PKTINFO_FLAG_FRAG
;
2292 else if (flags
& JME_RD_TCP
)
2293 pi
->pi_l3proto
= IPPROTO_TCP
;
2294 else if (flags
& JME_RD_UDP
)
2295 pi
->pi_l3proto
= IPPROTO_UDP
;
2301 /* Receive a frame. */
2303 jme_rxpkt(struct jme_rxdata
*rdata
, int cpuid
)
2305 struct ifnet
*ifp
= &rdata
->jme_sc
->arpcom
.ac_if
;
2306 struct jme_desc
*desc
;
2307 struct jme_rxdesc
*rxd
;
2308 struct mbuf
*mp
, *m
;
2309 uint32_t flags
, status
, hash
, hashinfo
;
2310 int cons
, count
, nsegs
;
2312 cons
= rdata
->jme_rx_cons
;
2313 desc
= &rdata
->jme_rx_ring
[cons
];
2315 flags
= le32toh(desc
->flags
);
2316 status
= le32toh(desc
->buflen
);
2317 hash
= le32toh(desc
->addr_hi
);
2318 hashinfo
= le32toh(desc
->addr_lo
);
2319 nsegs
= JME_RX_NSEGS(status
);
2322 /* Skip the first descriptor. */
2323 JME_DESC_INC(cons
, rdata
->jme_rx_desc_cnt
);
2326 * Clear the OWN bit of the following RX descriptors;
2327 * hardware will not clear the OWN bit except the first
2330 * Since the first RX descriptor is setup, i.e. OWN bit
2331 * on, before its followins RX descriptors, leaving the
2332 * OWN bit on the following RX descriptors will trick
2333 * the hardware into thinking that the following RX
2334 * descriptors are ready to be used too.
2336 for (count
= 1; count
< nsegs
; count
++,
2337 JME_DESC_INC(cons
, rdata
->jme_rx_desc_cnt
))
2338 rdata
->jme_rx_ring
[cons
].flags
= 0;
2340 cons
= rdata
->jme_rx_cons
;
2343 JME_RSS_DPRINTF(rdata
->jme_sc
, 15, "ring%d, flags 0x%08x, "
2344 "hash 0x%08x, hash info 0x%08x\n",
2345 rdata
->jme_rx_idx
, flags
, hash
, hashinfo
);
2347 if (status
& JME_RX_ERR_STAT
) {
2348 IFNET_STAT_INC(ifp
, ierrors
, 1);
2349 jme_discard_rxbufs(rdata
, cons
, nsegs
);
2350 #ifdef JME_SHOW_ERRORS
2351 if_printf(ifp
, "%s : receive error = 0x%pb%i\n",
2352 __func__
, JME_RX_ERR_BITS
, JME_RX_ERR(status
));
2354 rdata
->jme_rx_cons
+= nsegs
;
2355 rdata
->jme_rx_cons
%= rdata
->jme_rx_desc_cnt
;
2359 rdata
->jme_rxlen
= JME_RX_BYTES(status
) - JME_RX_PAD_BYTES
;
2360 for (count
= 0; count
< nsegs
; count
++,
2361 JME_DESC_INC(cons
, rdata
->jme_rx_desc_cnt
)) {
2362 rxd
= &rdata
->jme_rxdesc
[cons
];
2365 /* Add a new receive buffer to the ring. */
2366 if (jme_newbuf(rdata
, rxd
, 0) != 0) {
2367 IFNET_STAT_INC(ifp
, iqdrops
, 1);
2369 jme_discard_rxbufs(rdata
, cons
, nsegs
- count
);
2370 if (rdata
->jme_rxhead
!= NULL
) {
2371 m_freem(rdata
->jme_rxhead
);
2372 JME_RXCHAIN_RESET(rdata
);
2378 * Assume we've received a full sized frame.
2379 * Actual size is fixed when we encounter the end of
2380 * multi-segmented frame.
2382 mp
->m_len
= MCLBYTES
;
2384 /* Chain received mbufs. */
2385 if (rdata
->jme_rxhead
== NULL
) {
2386 rdata
->jme_rxhead
= mp
;
2387 rdata
->jme_rxtail
= mp
;
2390 * Receive processor can receive a maximum frame
2391 * size of 65535 bytes.
2393 rdata
->jme_rxtail
->m_next
= mp
;
2394 rdata
->jme_rxtail
= mp
;
2397 if (count
== nsegs
- 1) {
2398 struct pktinfo pi0
, *pi
;
2400 /* Last desc. for this frame. */
2401 m
= rdata
->jme_rxhead
;
2402 m
->m_pkthdr
.len
= rdata
->jme_rxlen
;
2404 /* Set first mbuf size. */
2405 m
->m_len
= MCLBYTES
- JME_RX_PAD_BYTES
;
2406 /* Set last mbuf size. */
2407 mp
->m_len
= rdata
->jme_rxlen
-
2408 ((MCLBYTES
- JME_RX_PAD_BYTES
) +
2409 (MCLBYTES
* (nsegs
- 2)));
2411 m
->m_len
= rdata
->jme_rxlen
;
2413 m
->m_pkthdr
.rcvif
= ifp
;
2416 * Account for 10bytes auto padding which is used
2417 * to align IP header on 32bit boundary. Also note,
2418 * CRC bytes is automatically removed by the
2421 m
->m_data
+= JME_RX_PAD_BYTES
;
2423 /* Set checksum information. */
2424 if ((ifp
->if_capenable
& IFCAP_RXCSUM
) &&
2425 (flags
& JME_RD_IPV4
)) {
2426 m
->m_pkthdr
.csum_flags
|= CSUM_IP_CHECKED
;
2427 if (flags
& JME_RD_IPCSUM
)
2428 m
->m_pkthdr
.csum_flags
|= CSUM_IP_VALID
;
2429 if ((flags
& JME_RD_MORE_FRAG
) == 0 &&
2430 ((flags
& (JME_RD_TCP
| JME_RD_TCPCSUM
)) ==
2431 (JME_RD_TCP
| JME_RD_TCPCSUM
) ||
2432 (flags
& (JME_RD_UDP
| JME_RD_UDPCSUM
)) ==
2433 (JME_RD_UDP
| JME_RD_UDPCSUM
))) {
2434 m
->m_pkthdr
.csum_flags
|=
2435 CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
;
2436 m
->m_pkthdr
.csum_data
= 0xffff;
2440 /* Check for VLAN tagged packets. */
2441 if ((ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) &&
2442 (flags
& JME_RD_VLAN_TAG
)) {
2443 m
->m_pkthdr
.ether_vlantag
=
2444 flags
& JME_RD_VLAN_MASK
;
2445 m
->m_flags
|= M_VLANTAG
;
2448 IFNET_STAT_INC(ifp
, ipackets
, 1);
2450 if (ifp
->if_capenable
& IFCAP_RSS
)
2451 pi
= jme_pktinfo(&pi0
, flags
);
2456 (hashinfo
& JME_RD_HASH_FN_MASK
) ==
2457 JME_RD_HASH_FN_TOEPLITZ
) {
2458 m_sethash(m
, toeplitz_hash(hash
));
2459 m
->m_flags
|= M_CKHASH
;
2462 #ifdef JME_RSS_DEBUG
2464 JME_RSS_DPRINTF(rdata
->jme_sc
, 10,
2465 "isr %d flags %08x, l3 %d %s\n",
2466 pi
->pi_netisr
, pi
->pi_flags
,
2468 (m
->m_flags
& M_HASH
) ? "hash" : "");
2473 ifp
->if_input(ifp
, m
, pi
, cpuid
);
2475 /* Reset mbuf chains. */
2476 JME_RXCHAIN_RESET(rdata
);
2477 #ifdef JME_RSS_DEBUG
2478 rdata
->jme_rx_pkt
++;
2483 rdata
->jme_rx_cons
+= nsegs
;
2484 rdata
->jme_rx_cons
%= rdata
->jme_rx_desc_cnt
;
2488 jme_rxeof(struct jme_rxdata
*rdata
, int count
, int cpuid
)
2490 struct jme_desc
*desc
;
2494 #ifdef IFPOLL_ENABLE
2495 if (count
>= 0 && count
-- == 0)
2498 desc
= &rdata
->jme_rx_ring
[rdata
->jme_rx_cons
];
2499 if ((le32toh(desc
->flags
) & JME_RD_OWN
) == JME_RD_OWN
)
2501 if ((le32toh(desc
->buflen
) & JME_RD_VALID
) == 0)
2505 * Check number of segments against received bytes.
2506 * Non-matching value would indicate that hardware
2507 * is still trying to update Rx descriptors. I'm not
2508 * sure whether this check is needed.
2510 nsegs
= JME_RX_NSEGS(le32toh(desc
->buflen
));
2511 pktlen
= JME_RX_BYTES(le32toh(desc
->buflen
));
2512 if (nsegs
!= howmany(pktlen
, MCLBYTES
)) {
2513 if_printf(&rdata
->jme_sc
->arpcom
.ac_if
,
2514 "RX fragment count(%d) and "
2515 "packet size(%d) mismach\n", nsegs
, pktlen
);
2521 * RSS hash and hash information may _not_ be set by the
2522 * hardware even if the OWN bit is cleared and VALID bit
2525 * If the RSS information is not delivered by the hardware
2526 * yet, we MUST NOT accept this packet, let alone reusing
2527 * its RX descriptor. If this packet was accepted and its
2528 * RX descriptor was reused before hardware delivering the
2529 * RSS information, the RX buffer's address would be trashed
2530 * by the RSS information delivered by the hardware.
2532 if (JME_ENABLE_HWRSS(rdata
->jme_sc
)) {
2533 struct jme_rxdesc
*rxd
;
2536 hashinfo
= le32toh(desc
->addr_lo
);
2537 rxd
= &rdata
->jme_rxdesc
[rdata
->jme_rx_cons
];
2540 * This test should be enough to detect the pending
2541 * RSS information delivery, given:
2542 * - If RSS hash is not calculated, the hashinfo
2543 * will be 0. However, the lower 32bits of RX
2544 * buffers' physical address will never be 0.
2545 * (see jme_rxbuf_dma_filter)
2546 * - If RSS hash is calculated, the lowest 4 bits
2547 * of hashinfo will be set, while the RX buffers
2548 * are at least 2K aligned.
2550 if (hashinfo
== JME_ADDR_LO(rxd
->rx_paddr
)) {
2551 #ifdef JME_SHOW_RSSWB
2552 if_printf(&rdata
->jme_sc
->arpcom
.ac_if
,
2553 "RSS is not written back yet\n");
2559 /* Received a frame. */
2560 jme_rxpkt(rdata
, cpuid
);
2567 struct jme_softc
*sc
= xsc
;
2568 struct mii_data
*mii
= device_get_softc(sc
->jme_miibus
);
2570 lwkt_serialize_enter(&sc
->jme_serialize
);
2572 KKASSERT(mycpuid
== JME_TICK_CPUID
);
2574 sc
->jme_in_tick
= TRUE
;
2576 sc
->jme_in_tick
= FALSE
;
2578 callout_reset(&sc
->jme_tick_ch
, hz
, jme_tick
, sc
);
2580 lwkt_serialize_exit(&sc
->jme_serialize
);
2584 jme_reset(struct jme_softc
*sc
)
2588 /* Make sure that TX and RX are stopped */
2593 CSR_WRITE_4(sc
, JME_GHC
, GHC_RESET
);
2597 * Hold reset bit before stop reset
2600 /* Disable TXMAC and TXOFL clock sources */
2601 CSR_WRITE_4(sc
, JME_GHC
, GHC_RESET
);
2602 /* Disable RXMAC clock source */
2603 val
= CSR_READ_4(sc
, JME_GPREG1
);
2604 CSR_WRITE_4(sc
, JME_GPREG1
, val
| GPREG1_DIS_RXMAC_CLKSRC
);
2606 CSR_READ_4(sc
, JME_GHC
);
2609 CSR_WRITE_4(sc
, JME_GHC
, 0);
2611 CSR_READ_4(sc
, JME_GHC
);
2614 * Clear reset bit after stop reset
2617 /* Enable TXMAC and TXOFL clock sources */
2618 CSR_WRITE_4(sc
, JME_GHC
, GHC_TXOFL_CLKSRC
| GHC_TXMAC_CLKSRC
);
2619 /* Enable RXMAC clock source */
2620 val
= CSR_READ_4(sc
, JME_GPREG1
);
2621 CSR_WRITE_4(sc
, JME_GPREG1
, val
& ~GPREG1_DIS_RXMAC_CLKSRC
);
2623 CSR_READ_4(sc
, JME_GHC
);
2625 /* Disable TXMAC and TXOFL clock sources */
2626 CSR_WRITE_4(sc
, JME_GHC
, 0);
2627 /* Disable RXMAC clock source */
2628 val
= CSR_READ_4(sc
, JME_GPREG1
);
2629 CSR_WRITE_4(sc
, JME_GPREG1
, val
| GPREG1_DIS_RXMAC_CLKSRC
);
2631 CSR_READ_4(sc
, JME_GHC
);
2633 /* Enable TX and RX */
2634 val
= CSR_READ_4(sc
, JME_TXCSR
);
2635 CSR_WRITE_4(sc
, JME_TXCSR
, val
| TXCSR_TX_ENB
);
2636 val
= CSR_READ_4(sc
, JME_RXCSR
);
2637 CSR_WRITE_4(sc
, JME_RXCSR
, val
| RXCSR_RX_ENB
);
2639 CSR_READ_4(sc
, JME_TXCSR
);
2640 CSR_READ_4(sc
, JME_RXCSR
);
2642 /* Enable TXMAC and TXOFL clock sources */
2643 CSR_WRITE_4(sc
, JME_GHC
, GHC_TXOFL_CLKSRC
| GHC_TXMAC_CLKSRC
);
2644 /* Disable RXMAC clock source */
2645 val
= CSR_READ_4(sc
, JME_GPREG1
);
2646 CSR_WRITE_4(sc
, JME_GPREG1
, val
& ~GPREG1_DIS_RXMAC_CLKSRC
);
2648 CSR_READ_4(sc
, JME_GHC
);
2650 /* Stop TX and RX */
2658 struct jme_softc
*sc
= xsc
;
2659 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2660 struct mii_data
*mii
;
2661 uint8_t eaddr
[ETHER_ADDR_LEN
];
2666 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
2669 * Cancel any pending I/O.
2674 * Reset the chip to a known state.
2679 * Setup MSI/MSI-X vectors to interrupts mapping
2683 if (JME_ENABLE_HWRSS(sc
))
2686 jme_disable_rss(sc
);
2688 /* Init RX descriptors */
2689 for (r
= 0; r
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++r
) {
2690 error
= jme_init_rx_ring(&sc
->jme_cdata
.jme_rx_data
[r
]);
2692 if_printf(ifp
, "initialization failed: "
2693 "no memory for %dth RX ring.\n", r
);
2699 /* Init TX descriptors */
2700 jme_init_tx_ring(&sc
->jme_cdata
.jme_tx_data
);
2702 /* Initialize shadow status block. */
2705 /* Reprogram the station address. */
2706 bcopy(IF_LLADDR(ifp
), eaddr
, ETHER_ADDR_LEN
);
2707 CSR_WRITE_4(sc
, JME_PAR0
,
2708 eaddr
[3] << 24 | eaddr
[2] << 16 | eaddr
[1] << 8 | eaddr
[0]);
2709 CSR_WRITE_4(sc
, JME_PAR1
, eaddr
[5] << 8 | eaddr
[4]);
2712 * Configure Tx queue.
2713 * Tx priority queue weight value : 0
2714 * Tx FIFO threshold for processing next packet : 16QW
2715 * Maximum Tx DMA length : 512
2716 * Allow Tx DMA burst.
2718 sc
->jme_txcsr
= TXCSR_TXQ_N_SEL(TXCSR_TXQ0
);
2719 sc
->jme_txcsr
|= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN
);
2720 sc
->jme_txcsr
|= TXCSR_FIFO_THRESH_16QW
;
2721 sc
->jme_txcsr
|= sc
->jme_tx_dma_size
;
2722 sc
->jme_txcsr
|= TXCSR_DMA_BURST
;
2723 CSR_WRITE_4(sc
, JME_TXCSR
, sc
->jme_txcsr
);
2725 /* Set Tx descriptor counter. */
2726 CSR_WRITE_4(sc
, JME_TXQDC
, sc
->jme_cdata
.jme_tx_data
.jme_tx_desc_cnt
);
2728 /* Set Tx ring address to the hardware. */
2729 paddr
= sc
->jme_cdata
.jme_tx_data
.jme_tx_ring_paddr
;
2730 CSR_WRITE_4(sc
, JME_TXDBA_HI
, JME_ADDR_HI(paddr
));
2731 CSR_WRITE_4(sc
, JME_TXDBA_LO
, JME_ADDR_LO(paddr
));
2733 /* Configure TxMAC parameters. */
2734 reg
= TXMAC_IFG1_DEFAULT
| TXMAC_IFG2_DEFAULT
| TXMAC_IFG_ENB
;
2735 reg
|= TXMAC_THRESH_1_PKT
;
2736 reg
|= TXMAC_CRC_ENB
| TXMAC_PAD_ENB
;
2737 CSR_WRITE_4(sc
, JME_TXMAC
, reg
);
2740 * Configure Rx queue.
2741 * FIFO full threshold for transmitting Tx pause packet : 128T
2742 * FIFO threshold for processing next packet : 128QW
2744 * Max Rx DMA length : 128
2745 * Rx descriptor retry : 32
2746 * Rx descriptor retry time gap : 256ns
2747 * Don't receive runt/bad frame.
2749 sc
->jme_rxcsr
= RXCSR_FIFO_FTHRESH_128T
;
2752 * Since Rx FIFO size is 4K bytes, receiving frames larger
2753 * than 4K bytes will suffer from Rx FIFO overruns. So
2754 * decrease FIFO threshold to reduce the FIFO overruns for
2755 * frames larger than 4000 bytes.
2756 * For best performance of standard MTU sized frames use
2757 * maximum allowable FIFO threshold, 128QW.
2759 if ((ifp
->if_mtu
+ ETHER_HDR_LEN
+ EVL_ENCAPLEN
+ ETHER_CRC_LEN
) >
2761 sc
->jme_rxcsr
|= RXCSR_FIFO_THRESH_16QW
;
2763 sc
->jme_rxcsr
|= RXCSR_FIFO_THRESH_128QW
;
2765 /* Improve PCI Express compatibility */
2766 sc
->jme_rxcsr
|= RXCSR_FIFO_THRESH_16QW
;
2768 sc
->jme_rxcsr
|= sc
->jme_rx_dma_size
;
2769 sc
->jme_rxcsr
|= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT
);
2770 sc
->jme_rxcsr
|= RXCSR_DESC_RT_GAP_256
& RXCSR_DESC_RT_GAP_MASK
;
2771 /* XXX TODO DROP_BAD */
2773 for (r
= 0; r
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++r
) {
2774 struct jme_rxdata
*rdata
= &sc
->jme_cdata
.jme_rx_data
[r
];
2776 CSR_WRITE_4(sc
, JME_RXCSR
, sc
->jme_rxcsr
| RXCSR_RXQ_N_SEL(r
));
2778 /* Set Rx descriptor counter. */
2779 CSR_WRITE_4(sc
, JME_RXQDC
, rdata
->jme_rx_desc_cnt
);
2781 /* Set Rx ring address to the hardware. */
2782 paddr
= rdata
->jme_rx_ring_paddr
;
2783 CSR_WRITE_4(sc
, JME_RXDBA_HI
, JME_ADDR_HI(paddr
));
2784 CSR_WRITE_4(sc
, JME_RXDBA_LO
, JME_ADDR_LO(paddr
));
2787 /* Clear receive filter. */
2788 CSR_WRITE_4(sc
, JME_RXMAC
, 0);
2790 /* Set up the receive filter. */
2795 * Disable all WOL bits as WOL can interfere normal Rx
2796 * operation. Also clear WOL detection status bits.
2798 reg
= CSR_READ_4(sc
, JME_PMCS
);
2799 reg
&= ~PMCS_WOL_ENB_MASK
;
2800 CSR_WRITE_4(sc
, JME_PMCS
, reg
);
2803 * Pad 10bytes right before received frame. This will greatly
2804 * help Rx performance on strict-alignment architectures as
2805 * it does not need to copy the frame to align the payload.
2807 reg
= CSR_READ_4(sc
, JME_RXMAC
);
2808 reg
|= RXMAC_PAD_10BYTES
;
2810 if (ifp
->if_capenable
& IFCAP_RXCSUM
)
2811 reg
|= RXMAC_CSUM_ENB
;
2812 CSR_WRITE_4(sc
, JME_RXMAC
, reg
);
2814 /* Configure general purpose reg0 */
2815 reg
= CSR_READ_4(sc
, JME_GPREG0
);
2816 reg
&= ~GPREG0_PCC_UNIT_MASK
;
2817 /* Set PCC timer resolution to micro-seconds unit. */
2818 reg
|= GPREG0_PCC_UNIT_US
;
2820 * Disable all shadow register posting as we have to read
2821 * JME_INTR_STATUS register in jme_intr. Also it seems
2822 * that it's hard to synchronize interrupt status between
2823 * hardware and software with shadow posting due to
2824 * requirements of bus_dmamap_sync(9).
2826 reg
|= GPREG0_SH_POST_DW7_DIS
| GPREG0_SH_POST_DW6_DIS
|
2827 GPREG0_SH_POST_DW5_DIS
| GPREG0_SH_POST_DW4_DIS
|
2828 GPREG0_SH_POST_DW3_DIS
| GPREG0_SH_POST_DW2_DIS
|
2829 GPREG0_SH_POST_DW1_DIS
| GPREG0_SH_POST_DW0_DIS
;
2830 /* Disable posting of DW0. */
2831 reg
&= ~GPREG0_POST_DW0_ENB
;
2832 /* Clear PME message. */
2833 reg
&= ~GPREG0_PME_ENB
;
2834 /* Set PHY address. */
2835 reg
&= ~GPREG0_PHY_ADDR_MASK
;
2836 reg
|= sc
->jme_phyaddr
;
2837 CSR_WRITE_4(sc
, JME_GPREG0
, reg
);
2839 /* Configure Tx queue 0 packet completion coalescing. */
2840 jme_set_tx_coal(sc
);
2842 /* Configure Rx queues packet completion coalescing. */
2843 jme_set_rx_coal(sc
);
2845 /* Configure shadow status block but don't enable posting. */
2846 paddr
= sc
->jme_cdata
.jme_ssb_block_paddr
;
2847 CSR_WRITE_4(sc
, JME_SHBASE_ADDR_HI
, JME_ADDR_HI(paddr
));
2848 CSR_WRITE_4(sc
, JME_SHBASE_ADDR_LO
, JME_ADDR_LO(paddr
));
2850 /* Disable Timer 1 and Timer 2. */
2851 CSR_WRITE_4(sc
, JME_TIMER1
, 0);
2852 CSR_WRITE_4(sc
, JME_TIMER2
, 0);
2854 /* Configure retry transmit period, retry limit value. */
2855 CSR_WRITE_4(sc
, JME_TXTRHD
,
2856 ((TXTRHD_RT_PERIOD_DEFAULT
<< TXTRHD_RT_PERIOD_SHIFT
) &
2857 TXTRHD_RT_PERIOD_MASK
) |
2858 ((TXTRHD_RT_LIMIT_DEFAULT
<< TXTRHD_RT_LIMIT_SHIFT
) &
2859 TXTRHD_RT_LIMIT_SHIFT
));
2861 #ifdef IFPOLL_ENABLE
2862 if (!(ifp
->if_flags
& IFF_NPOLLING
))
2864 /* Initialize the interrupt mask. */
2865 jme_enable_intr(sc
);
2866 CSR_WRITE_4(sc
, JME_INTR_STATUS
, 0xFFFFFFFF);
2869 * Enabling Tx/Rx DMA engines and Rx queue processing is
2870 * done after detection of valid link in jme_miibus_statchg.
2872 sc
->jme_has_link
= FALSE
;
2876 /* Set the current media. */
2877 mii
= device_get_softc(sc
->jme_miibus
);
2880 callout_reset_bycpu(&sc
->jme_tick_ch
, hz
, jme_tick
, sc
,
2883 ifp
->if_flags
|= IFF_RUNNING
;
2884 ifq_clr_oactive(&ifp
->if_snd
);
2888 jme_stop(struct jme_softc
*sc
)
2890 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
2891 struct jme_txdata
*tdata
= &sc
->jme_cdata
.jme_tx_data
;
2892 struct jme_txdesc
*txd
;
2893 struct jme_rxdesc
*rxd
;
2894 struct jme_rxdata
*rdata
;
2897 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
2900 * Mark the interface down and cancel the watchdog timer.
2902 ifp
->if_flags
&= ~IFF_RUNNING
;
2903 ifq_clr_oactive(&ifp
->if_snd
);
2906 callout_stop(&sc
->jme_tick_ch
);
2907 sc
->jme_has_link
= FALSE
;
2910 * Disable interrupts.
2912 jme_disable_intr(sc
);
2913 CSR_WRITE_4(sc
, JME_INTR_STATUS
, 0xFFFFFFFF);
2915 /* Disable updating shadow status block. */
2916 CSR_WRITE_4(sc
, JME_SHBASE_ADDR_LO
,
2917 CSR_READ_4(sc
, JME_SHBASE_ADDR_LO
) & ~SHBASE_POST_ENB
);
2919 /* Stop receiver, transmitter. */
2924 * Free partial finished RX segments
2926 for (r
= 0; r
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++r
) {
2927 rdata
= &sc
->jme_cdata
.jme_rx_data
[r
];
2928 if (rdata
->jme_rxhead
!= NULL
)
2929 m_freem(rdata
->jme_rxhead
);
2930 JME_RXCHAIN_RESET(rdata
);
2934 * Free RX and TX mbufs still in the queues.
2936 for (r
= 0; r
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++r
) {
2937 rdata
= &sc
->jme_cdata
.jme_rx_data
[r
];
2938 for (i
= 0; i
< rdata
->jme_rx_desc_cnt
; i
++) {
2939 rxd
= &rdata
->jme_rxdesc
[i
];
2940 if (rxd
->rx_m
!= NULL
) {
2941 bus_dmamap_unload(rdata
->jme_rx_tag
,
2948 for (i
= 0; i
< tdata
->jme_tx_desc_cnt
; i
++) {
2949 txd
= &tdata
->jme_txdesc
[i
];
2950 if (txd
->tx_m
!= NULL
) {
2951 bus_dmamap_unload(tdata
->jme_tx_tag
, txd
->tx_dmamap
);
2960 jme_stop_tx(struct jme_softc
*sc
)
2965 reg
= CSR_READ_4(sc
, JME_TXCSR
);
2966 if ((reg
& TXCSR_TX_ENB
) == 0)
2968 reg
&= ~TXCSR_TX_ENB
;
2969 CSR_WRITE_4(sc
, JME_TXCSR
, reg
);
2970 for (i
= JME_TIMEOUT
; i
> 0; i
--) {
2972 if ((CSR_READ_4(sc
, JME_TXCSR
) & TXCSR_TX_ENB
) == 0)
2976 device_printf(sc
->jme_dev
, "stopping transmitter timeout!\n");
2980 jme_stop_rx(struct jme_softc
*sc
)
2985 reg
= CSR_READ_4(sc
, JME_RXCSR
);
2986 if ((reg
& RXCSR_RX_ENB
) == 0)
2988 reg
&= ~RXCSR_RX_ENB
;
2989 CSR_WRITE_4(sc
, JME_RXCSR
, reg
);
2990 for (i
= JME_TIMEOUT
; i
> 0; i
--) {
2992 if ((CSR_READ_4(sc
, JME_RXCSR
) & RXCSR_RX_ENB
) == 0)
2996 device_printf(sc
->jme_dev
, "stopping receiver timeout!\n");
3000 jme_init_tx_ring(struct jme_txdata
*tdata
)
3002 struct jme_txdesc
*txd
;
3005 tdata
->jme_tx_prod
= 0;
3006 tdata
->jme_tx_cons
= 0;
3007 tdata
->jme_tx_cnt
= 0;
3009 bzero(tdata
->jme_tx_ring
, JME_TX_RING_SIZE(tdata
));
3010 for (i
= 0; i
< tdata
->jme_tx_desc_cnt
; i
++) {
3011 txd
= &tdata
->jme_txdesc
[i
];
3013 txd
->tx_desc
= &tdata
->jme_tx_ring
[i
];
3019 jme_init_ssb(struct jme_softc
*sc
)
3021 struct jme_chain_data
*cd
;
3023 cd
= &sc
->jme_cdata
;
3024 bzero(cd
->jme_ssb_block
, JME_SSB_SIZE
);
3028 jme_init_rx_ring(struct jme_rxdata
*rdata
)
3030 struct jme_rxdesc
*rxd
;
3033 KKASSERT(rdata
->jme_rxhead
== NULL
&&
3034 rdata
->jme_rxtail
== NULL
&&
3035 rdata
->jme_rxlen
== 0);
3036 rdata
->jme_rx_cons
= 0;
3038 bzero(rdata
->jme_rx_ring
, JME_RX_RING_SIZE(rdata
));
3039 for (i
= 0; i
< rdata
->jme_rx_desc_cnt
; i
++) {
3042 rxd
= &rdata
->jme_rxdesc
[i
];
3044 rxd
->rx_desc
= &rdata
->jme_rx_ring
[i
];
3045 error
= jme_newbuf(rdata
, rxd
, 1);
3053 jme_newbuf(struct jme_rxdata
*rdata
, struct jme_rxdesc
*rxd
, int init
)
3056 bus_dma_segment_t segs
;
3060 m
= m_getcl(init
? M_WAITOK
: M_NOWAIT
, MT_DATA
, M_PKTHDR
);
3064 * JMC250 has 64bit boundary alignment limitation so jme(4)
3065 * takes advantage of 10 bytes padding feature of hardware
3066 * in order not to copy entire frame to align IP header on
3069 m
->m_len
= m
->m_pkthdr
.len
= MCLBYTES
;
3071 error
= bus_dmamap_load_mbuf_segment(rdata
->jme_rx_tag
,
3072 rdata
->jme_rx_sparemap
, m
, &segs
, 1, &nsegs
,
3077 if_printf(&rdata
->jme_sc
->arpcom
.ac_if
,
3078 "can't load RX mbuf\n");
3083 if (rxd
->rx_m
!= NULL
) {
3084 bus_dmamap_sync(rdata
->jme_rx_tag
, rxd
->rx_dmamap
,
3085 BUS_DMASYNC_POSTREAD
);
3086 bus_dmamap_unload(rdata
->jme_rx_tag
, rxd
->rx_dmamap
);
3088 map
= rxd
->rx_dmamap
;
3089 rxd
->rx_dmamap
= rdata
->jme_rx_sparemap
;
3090 rdata
->jme_rx_sparemap
= map
;
3092 rxd
->rx_paddr
= segs
.ds_addr
;
3094 jme_setup_rxdesc(rxd
);
3099 jme_set_vlan(struct jme_softc
*sc
)
3101 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3104 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
3106 reg
= CSR_READ_4(sc
, JME_RXMAC
);
3107 reg
&= ~RXMAC_VLAN_ENB
;
3108 if (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
)
3109 reg
|= RXMAC_VLAN_ENB
;
3110 CSR_WRITE_4(sc
, JME_RXMAC
, reg
);
3114 jme_set_filter(struct jme_softc
*sc
)
3116 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3117 struct ifmultiaddr
*ifma
;
3122 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
3124 rxcfg
= CSR_READ_4(sc
, JME_RXMAC
);
3125 rxcfg
&= ~(RXMAC_BROADCAST
| RXMAC_PROMISC
| RXMAC_MULTICAST
|
3129 * Always accept frames destined to our station address.
3130 * Always accept broadcast frames.
3132 rxcfg
|= RXMAC_UNICAST
| RXMAC_BROADCAST
;
3134 if (ifp
->if_flags
& (IFF_PROMISC
| IFF_ALLMULTI
)) {
3135 if (ifp
->if_flags
& IFF_PROMISC
)
3136 rxcfg
|= RXMAC_PROMISC
;
3137 if (ifp
->if_flags
& IFF_ALLMULTI
)
3138 rxcfg
|= RXMAC_ALLMULTI
;
3139 CSR_WRITE_4(sc
, JME_MAR0
, 0xFFFFFFFF);
3140 CSR_WRITE_4(sc
, JME_MAR1
, 0xFFFFFFFF);
3141 CSR_WRITE_4(sc
, JME_RXMAC
, rxcfg
);
3146 * Set up the multicast address filter by passing all multicast
3147 * addresses through a CRC generator, and then using the low-order
3148 * 6 bits as an index into the 64 bit multicast hash table. The
3149 * high order bits select the register, while the rest of the bits
3150 * select the bit within the register.
3152 rxcfg
|= RXMAC_MULTICAST
;
3153 bzero(mchash
, sizeof(mchash
));
3155 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
3156 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
3158 crc
= ether_crc32_be(LLADDR((struct sockaddr_dl
*)
3159 ifma
->ifma_addr
), ETHER_ADDR_LEN
);
3161 /* Just want the 6 least significant bits. */
3164 /* Set the corresponding bit in the hash table. */
3165 mchash
[crc
>> 5] |= 1 << (crc
& 0x1f);
3168 CSR_WRITE_4(sc
, JME_MAR0
, mchash
[0]);
3169 CSR_WRITE_4(sc
, JME_MAR1
, mchash
[1]);
3170 CSR_WRITE_4(sc
, JME_RXMAC
, rxcfg
);
3174 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS
)
3176 struct jme_softc
*sc
= arg1
;
3177 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3180 ifnet_serialize_all(ifp
);
3182 v
= sc
->jme_tx_coal_to
;
3183 error
= sysctl_handle_int(oidp
, &v
, 0, req
);
3184 if (error
|| req
->newptr
== NULL
)
3187 if (v
< PCCTX_COAL_TO_MIN
|| v
> PCCTX_COAL_TO_MAX
) {
3192 if (v
!= sc
->jme_tx_coal_to
) {
3193 sc
->jme_tx_coal_to
= v
;
3194 if (ifp
->if_flags
& IFF_RUNNING
)
3195 jme_set_tx_coal(sc
);
3198 ifnet_deserialize_all(ifp
);
3203 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS
)
3205 struct jme_softc
*sc
= arg1
;
3206 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3209 ifnet_serialize_all(ifp
);
3211 v
= sc
->jme_tx_coal_pkt
;
3212 error
= sysctl_handle_int(oidp
, &v
, 0, req
);
3213 if (error
|| req
->newptr
== NULL
)
3216 if (v
< PCCTX_COAL_PKT_MIN
|| v
> PCCTX_COAL_PKT_MAX
) {
3221 if (v
!= sc
->jme_tx_coal_pkt
) {
3222 sc
->jme_tx_coal_pkt
= v
;
3223 if (ifp
->if_flags
& IFF_RUNNING
)
3224 jme_set_tx_coal(sc
);
3227 ifnet_deserialize_all(ifp
);
3232 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS
)
3234 struct jme_softc
*sc
= arg1
;
3235 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3238 ifnet_serialize_all(ifp
);
3240 v
= sc
->jme_rx_coal_to
;
3241 error
= sysctl_handle_int(oidp
, &v
, 0, req
);
3242 if (error
|| req
->newptr
== NULL
)
3245 if (v
< PCCRX_COAL_TO_MIN
|| v
> PCCRX_COAL_TO_MAX
) {
3250 if (v
!= sc
->jme_rx_coal_to
) {
3251 sc
->jme_rx_coal_to
= v
;
3252 if (ifp
->if_flags
& IFF_RUNNING
)
3253 jme_set_rx_coal(sc
);
3256 ifnet_deserialize_all(ifp
);
3261 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS
)
3263 struct jme_softc
*sc
= arg1
;
3264 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3267 ifnet_serialize_all(ifp
);
3269 v
= sc
->jme_rx_coal_pkt
;
3270 error
= sysctl_handle_int(oidp
, &v
, 0, req
);
3271 if (error
|| req
->newptr
== NULL
)
3274 if (v
< PCCRX_COAL_PKT_MIN
|| v
> PCCRX_COAL_PKT_MAX
) {
3279 if (v
!= sc
->jme_rx_coal_pkt
) {
3280 sc
->jme_rx_coal_pkt
= v
;
3281 if (ifp
->if_flags
& IFF_RUNNING
)
3282 jme_set_rx_coal(sc
);
3285 ifnet_deserialize_all(ifp
);
3290 jme_set_tx_coal(struct jme_softc
*sc
)
3294 reg
= (sc
->jme_tx_coal_to
<< PCCTX_COAL_TO_SHIFT
) &
3296 reg
|= (sc
->jme_tx_coal_pkt
<< PCCTX_COAL_PKT_SHIFT
) &
3297 PCCTX_COAL_PKT_MASK
;
3298 reg
|= PCCTX_COAL_TXQ0
;
3299 CSR_WRITE_4(sc
, JME_PCCTX
, reg
);
3303 jme_set_rx_coal(struct jme_softc
*sc
)
3308 reg
= (sc
->jme_rx_coal_to
<< PCCRX_COAL_TO_SHIFT
) &
3310 reg
|= (sc
->jme_rx_coal_pkt
<< PCCRX_COAL_PKT_SHIFT
) &
3311 PCCRX_COAL_PKT_MASK
;
3312 for (r
= 0; r
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++r
)
3313 CSR_WRITE_4(sc
, JME_PCCRX(r
), reg
);
3316 #ifdef IFPOLL_ENABLE
3319 jme_npoll_status(struct ifnet
*ifp
)
3321 struct jme_softc
*sc
= ifp
->if_softc
;
3324 ASSERT_SERIALIZED(&sc
->jme_serialize
);
3326 status
= CSR_READ_4(sc
, JME_INTR_STATUS
);
3327 if (status
& INTR_RXQ_DESC_EMPTY
) {
3328 CSR_WRITE_4(sc
, JME_INTR_STATUS
, status
& INTR_RXQ_DESC_EMPTY
);
3329 jme_rx_restart(sc
, status
);
3334 jme_npoll_rx(struct ifnet
*ifp __unused
, void *arg
, int cycle
)
3336 struct jme_rxdata
*rdata
= arg
;
3338 ASSERT_SERIALIZED(&rdata
->jme_rx_serialize
);
3340 jme_rxeof(rdata
, cycle
, mycpuid
);
3344 jme_npoll_tx(struct ifnet
*ifp
, void *arg
, int cycle __unused
)
3346 struct jme_txdata
*tdata
= arg
;
3348 ASSERT_SERIALIZED(&tdata
->jme_tx_serialize
);
3351 if (!ifq_is_empty(&ifp
->if_snd
))
3356 jme_npoll(struct ifnet
*ifp
, struct ifpoll_info
*info
)
3358 struct jme_softc
*sc
= ifp
->if_softc
;
3360 ASSERT_IFNET_SERIALIZED_ALL(ifp
);
3365 info
->ifpi_status
.status_func
= jme_npoll_status
;
3366 info
->ifpi_status
.serializer
= &sc
->jme_serialize
;
3368 cpu
= if_ringmap_cpumap(sc
->jme_tx_rmap
, 0);
3369 KKASSERT(cpu
<= netisr_ncpus
);
3370 info
->ifpi_tx
[cpu
].poll_func
= jme_npoll_tx
;
3371 info
->ifpi_tx
[cpu
].arg
= &sc
->jme_cdata
.jme_tx_data
;
3372 info
->ifpi_tx
[cpu
].serializer
=
3373 &sc
->jme_cdata
.jme_tx_data
.jme_tx_serialize
;
3374 ifq_set_cpuid(&ifp
->if_snd
, cpu
);
3376 for (i
= 0; i
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++i
) {
3377 struct jme_rxdata
*rdata
=
3378 &sc
->jme_cdata
.jme_rx_data
[i
];
3380 cpu
= if_ringmap_cpumap(sc
->jme_rx_rmap
, i
);
3381 KKASSERT(cpu
<= netisr_ncpus
);
3382 info
->ifpi_rx
[cpu
].poll_func
= jme_npoll_rx
;
3383 info
->ifpi_rx
[cpu
].arg
= rdata
;
3384 info
->ifpi_rx
[cpu
].serializer
=
3385 &rdata
->jme_rx_serialize
;
3388 if (ifp
->if_flags
& IFF_RUNNING
)
3389 jme_disable_intr(sc
);
3391 ifq_set_cpuid(&ifp
->if_snd
, sc
->jme_tx_cpuid
);
3392 if (ifp
->if_flags
& IFF_RUNNING
)
3393 jme_enable_intr(sc
);
3397 #endif /* IFPOLL_ENABLE */
3400 jme_rxring_dma_alloc(struct jme_rxdata
*rdata
)
3405 asize
= roundup2(JME_RX_RING_SIZE(rdata
), JME_RX_RING_ALIGN
);
3406 error
= bus_dmamem_coherent(rdata
->jme_sc
->jme_cdata
.jme_ring_tag
,
3407 JME_RX_RING_ALIGN
, 0,
3408 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
3409 asize
, BUS_DMA_WAITOK
| BUS_DMA_ZERO
, &dmem
);
3411 device_printf(rdata
->jme_sc
->jme_dev
,
3412 "could not allocate %dth Rx ring.\n", rdata
->jme_rx_idx
);
3415 rdata
->jme_rx_ring_tag
= dmem
.dmem_tag
;
3416 rdata
->jme_rx_ring_map
= dmem
.dmem_map
;
3417 rdata
->jme_rx_ring
= dmem
.dmem_addr
;
3418 rdata
->jme_rx_ring_paddr
= dmem
.dmem_busaddr
;
3424 jme_rxbuf_dma_alloc(struct jme_rxdata
*rdata
)
3429 lowaddr
= BUS_SPACE_MAXADDR
;
3430 if (JME_ENABLE_HWRSS(rdata
->jme_sc
)) {
3431 lowaddr
= BUS_SPACE_MAXADDR_32BIT
;
3434 /* Create tag for Rx buffers. */
3435 error
= bus_dma_tag_create(
3436 rdata
->jme_sc
->jme_cdata
.jme_buffer_tag
,/* parent */
3437 JME_RX_BUF_ALIGN
, 0, /* algnmnt, boundary */
3438 lowaddr
, /* lowaddr */
3439 BUS_SPACE_MAXADDR
, /* highaddr */
3440 MCLBYTES
, /* maxsize */
3442 MCLBYTES
, /* maxsegsize */
3443 BUS_DMA_ALLOCNOW
| BUS_DMA_WAITOK
| BUS_DMA_ALIGNED
,/* flags */
3444 &rdata
->jme_rx_tag
);
3446 device_printf(rdata
->jme_sc
->jme_dev
,
3447 "could not create %dth Rx DMA tag.\n", rdata
->jme_rx_idx
);
3451 /* Create DMA maps for Rx buffers. */
3452 error
= bus_dmamap_create(rdata
->jme_rx_tag
, BUS_DMA_WAITOK
,
3453 &rdata
->jme_rx_sparemap
);
3455 device_printf(rdata
->jme_sc
->jme_dev
,
3456 "could not create %dth spare Rx dmamap.\n",
3458 bus_dma_tag_destroy(rdata
->jme_rx_tag
);
3459 rdata
->jme_rx_tag
= NULL
;
3462 for (i
= 0; i
< rdata
->jme_rx_desc_cnt
; i
++) {
3463 struct jme_rxdesc
*rxd
= &rdata
->jme_rxdesc
[i
];
3465 error
= bus_dmamap_create(rdata
->jme_rx_tag
, BUS_DMA_WAITOK
,
3470 device_printf(rdata
->jme_sc
->jme_dev
,
3471 "could not create %dth Rx dmamap "
3472 "for %dth RX ring.\n", i
, rdata
->jme_rx_idx
);
3474 for (j
= 0; j
< i
; ++j
) {
3475 rxd
= &rdata
->jme_rxdesc
[j
];
3476 bus_dmamap_destroy(rdata
->jme_rx_tag
,
3479 bus_dmamap_destroy(rdata
->jme_rx_tag
,
3480 rdata
->jme_rx_sparemap
);
3481 bus_dma_tag_destroy(rdata
->jme_rx_tag
);
3482 rdata
->jme_rx_tag
= NULL
;
3490 jme_rx_intr(struct jme_softc
*sc
, uint32_t status
)
3492 int r
, cpuid
= mycpuid
;
3494 for (r
= 0; r
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++r
) {
3495 struct jme_rxdata
*rdata
= &sc
->jme_cdata
.jme_rx_data
[r
];
3497 if (status
& rdata
->jme_rx_coal
) {
3498 lwkt_serialize_enter(&rdata
->jme_rx_serialize
);
3499 jme_rxeof(rdata
, -1, cpuid
);
3500 lwkt_serialize_exit(&rdata
->jme_rx_serialize
);
3506 jme_enable_rss(struct jme_softc
*sc
)
3508 uint8_t key
[RSSKEY_NREGS
* RSSKEY_REGSIZE
];
3512 KASSERT(sc
->jme_cdata
.jme_rx_ring_cnt
== JME_NRXRING_2
||
3513 sc
->jme_cdata
.jme_rx_ring_cnt
== JME_NRXRING_4
,
3514 ("%s: invalid # of RX rings (%d)",
3515 sc
->arpcom
.ac_if
.if_xname
, sc
->jme_cdata
.jme_rx_ring_cnt
));
3516 jme_disable_rss(sc
);
3518 toeplitz_get_key(key
, sizeof(key
));
3519 for (i
= 0; i
< RSSKEY_NREGS
; ++i
) {
3522 keyreg
= RSSKEY_REGVAL(key
, i
);
3523 JME_RSS_DPRINTF(sc
, 5, "keyreg%d 0x%08x, reg 0x%08x\n",
3524 i
, keyreg
, RSSKEY_REG(RSSKEY_NREGS
- 1 - i
));
3526 CSR_WRITE_4(sc
, RSSKEY_REG(RSSKEY_NREGS
- 1 - i
), keyreg
);
3530 * Fill redirect table.
3532 if_ringmap_rdrtable(sc
->jme_rx_rmap
, sc
->jme_rdrtable
,
3536 for (j
= 0; j
< RSSTBL_NREGS
; ++j
) {
3539 for (i
= 0; i
< RSSTBL_REGSIZE
; ++i
) {
3542 q
= sc
->jme_rdrtable
[r
];
3543 ind
|= q
<< (i
* 8);
3546 JME_RSS_DPRINTF(sc
, 1, "ind 0x%08x\n", ind
);
3547 CSR_WRITE_4(sc
, RSSTBL_REG(j
), ind
);
3553 rssc
= RSSC_HASH_128_ENTRY
;
3554 rssc
|= RSSC_HASH_IPV4
| RSSC_HASH_IPV4_TCP
;
3555 rssc
|= sc
->jme_cdata
.jme_rx_ring_cnt
>> 1;
3556 JME_RSS_DPRINTF(sc
, 1, "rssc 0x%08x\n", rssc
);
3557 CSR_WRITE_4(sc
, JME_RSSC
, rssc
);
3561 jme_disable_rss(struct jme_softc
*sc
)
3563 CSR_WRITE_4(sc
, JME_RSSC
, RSSC_DIS_RSS
);
3567 jme_serialize(struct ifnet
*ifp
, enum ifnet_serialize slz
)
3569 struct jme_softc
*sc
= ifp
->if_softc
;
3571 ifnet_serialize_array_enter(sc
->jme_serialize_arr
,
3572 sc
->jme_serialize_cnt
, slz
);
3576 jme_deserialize(struct ifnet
*ifp
, enum ifnet_serialize slz
)
3578 struct jme_softc
*sc
= ifp
->if_softc
;
3580 ifnet_serialize_array_exit(sc
->jme_serialize_arr
,
3581 sc
->jme_serialize_cnt
, slz
);
3585 jme_tryserialize(struct ifnet
*ifp
, enum ifnet_serialize slz
)
3587 struct jme_softc
*sc
= ifp
->if_softc
;
3589 return ifnet_serialize_array_try(sc
->jme_serialize_arr
,
3590 sc
->jme_serialize_cnt
, slz
);
3596 jme_serialize_assert(struct ifnet
*ifp
, enum ifnet_serialize slz
,
3597 boolean_t serialized
)
3599 struct jme_softc
*sc
= ifp
->if_softc
;
3601 ifnet_serialize_array_assert(sc
->jme_serialize_arr
,
3602 sc
->jme_serialize_cnt
, slz
, serialized
);
3605 #endif /* INVARIANTS */
3608 jme_msix_try_alloc(device_t dev
)
3610 struct jme_softc
*sc
= device_get_softc(dev
);
3611 struct jme_msix_data
*msix
;
3612 int error
, i
, r
, msix_enable
, msix_count
;
3614 msix_count
= JME_MSIXCNT(sc
->jme_cdata
.jme_rx_ring_cnt
);
3615 KKASSERT(msix_count
<= JME_NMSIX
);
3617 msix_enable
= device_getenv_int(dev
, "msix.enable", jme_msix_enable
);
3620 * We leave the 1st MSI-X vector unused, so we
3621 * actually need msix_count + 1 MSI-X vectors.
3623 if (!msix_enable
|| pci_msix_count(dev
) < (msix_count
+ 1))
3626 for (i
= 0; i
< msix_count
; ++i
)
3627 sc
->jme_msix
[i
].jme_msix_rid
= -1;
3632 * Setup status MSI-X
3634 msix
= &sc
->jme_msix
[i
++];
3635 msix
->jme_msix_cpuid
= 0;
3636 msix
->jme_msix_arg
= sc
;
3637 msix
->jme_msix_func
= jme_msix_status
;
3638 for (r
= 0; r
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++r
) {
3639 msix
->jme_msix_intrs
|=
3640 sc
->jme_cdata
.jme_rx_data
[r
].jme_rx_empty
;
3642 msix
->jme_msix_serialize
= &sc
->jme_serialize
;
3643 ksnprintf(msix
->jme_msix_desc
, sizeof(msix
->jme_msix_desc
), "%s sts",
3644 device_get_nameunit(dev
));
3649 msix
= &sc
->jme_msix
[i
++];
3650 msix
->jme_msix_cpuid
= if_ringmap_cpumap(sc
->jme_tx_rmap
, 0);
3651 sc
->jme_tx_cpuid
= msix
->jme_msix_cpuid
;
3652 msix
->jme_msix_arg
= &sc
->jme_cdata
.jme_tx_data
;
3653 msix
->jme_msix_func
= jme_msix_tx
;
3654 msix
->jme_msix_intrs
= INTR_TXQ_COAL
| INTR_TXQ_COAL_TO
;
3655 msix
->jme_msix_serialize
= &sc
->jme_cdata
.jme_tx_data
.jme_tx_serialize
;
3656 ksnprintf(msix
->jme_msix_desc
, sizeof(msix
->jme_msix_desc
), "%s tx",
3657 device_get_nameunit(dev
));
3662 for (r
= 0; r
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++r
) {
3663 struct jme_rxdata
*rdata
= &sc
->jme_cdata
.jme_rx_data
[r
];
3665 msix
= &sc
->jme_msix
[i
++];
3666 msix
->jme_msix_cpuid
= if_ringmap_cpumap(sc
->jme_rx_rmap
, r
);
3667 KKASSERT(msix
->jme_msix_cpuid
< netisr_ncpus
);
3668 msix
->jme_msix_arg
= rdata
;
3669 msix
->jme_msix_func
= jme_msix_rx
;
3670 msix
->jme_msix_intrs
= rdata
->jme_rx_coal
;
3671 msix
->jme_msix_serialize
= &rdata
->jme_rx_serialize
;
3672 ksnprintf(msix
->jme_msix_desc
, sizeof(msix
->jme_msix_desc
),
3673 "%s rx%d", device_get_nameunit(dev
), r
);
3676 KKASSERT(i
== msix_count
);
3678 error
= pci_setup_msix(dev
);
3682 /* Setup jme_msix_cnt early, so we could cleanup */
3683 sc
->jme_msix_cnt
= msix_count
;
3685 for (i
= 0; i
< msix_count
; ++i
) {
3686 msix
= &sc
->jme_msix
[i
];
3688 msix
->jme_msix_vector
= i
+ 1;
3689 error
= pci_alloc_msix_vector(dev
, msix
->jme_msix_vector
,
3690 &msix
->jme_msix_rid
, msix
->jme_msix_cpuid
);
3694 msix
->jme_msix_res
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
,
3695 &msix
->jme_msix_rid
, RF_ACTIVE
);
3696 if (msix
->jme_msix_res
== NULL
) {
3702 for (i
= 0; i
< JME_INTR_CNT
; ++i
) {
3703 uint32_t intr_mask
= (1 << i
);
3706 if ((JME_INTRS
& intr_mask
) == 0)
3709 for (x
= 0; x
< msix_count
; ++x
) {
3710 msix
= &sc
->jme_msix
[x
];
3711 if (msix
->jme_msix_intrs
& intr_mask
) {
3714 reg
= i
/ JME_MSINUM_FACTOR
;
3715 KKASSERT(reg
< JME_MSINUM_CNT
);
3717 shift
= (i
% JME_MSINUM_FACTOR
) * 4;
3719 sc
->jme_msinum
[reg
] |=
3720 (msix
->jme_msix_vector
<< shift
);
3728 for (i
= 0; i
< JME_MSINUM_CNT
; ++i
) {
3729 device_printf(dev
, "MSINUM%d: %#x\n", i
,
3734 pci_enable_msix(dev
);
3735 sc
->jme_irq_type
= PCI_INTR_TYPE_MSIX
;
3743 jme_intr_alloc(device_t dev
)
3745 struct jme_softc
*sc
= device_get_softc(dev
);
3748 jme_msix_try_alloc(dev
);
3750 if (sc
->jme_irq_type
!= PCI_INTR_TYPE_MSIX
) {
3751 sc
->jme_irq_type
= pci_alloc_1intr(dev
, jme_msi_enable
,
3752 &sc
->jme_irq_rid
, &irq_flags
);
3754 sc
->jme_irq_res
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
,
3755 &sc
->jme_irq_rid
, irq_flags
);
3756 if (sc
->jme_irq_res
== NULL
) {
3757 device_printf(dev
, "can't allocate irq\n");
3760 sc
->jme_tx_cpuid
= rman_get_cpuid(sc
->jme_irq_res
);
3766 jme_msix_free(device_t dev
)
3768 struct jme_softc
*sc
= device_get_softc(dev
);
3771 KKASSERT(sc
->jme_msix_cnt
> 1);
3773 for (i
= 0; i
< sc
->jme_msix_cnt
; ++i
) {
3774 struct jme_msix_data
*msix
= &sc
->jme_msix
[i
];
3776 if (msix
->jme_msix_res
!= NULL
) {
3777 bus_release_resource(dev
, SYS_RES_IRQ
,
3778 msix
->jme_msix_rid
, msix
->jme_msix_res
);
3779 msix
->jme_msix_res
= NULL
;
3781 if (msix
->jme_msix_rid
>= 0) {
3782 pci_release_msix_vector(dev
, msix
->jme_msix_rid
);
3783 msix
->jme_msix_rid
= -1;
3786 pci_teardown_msix(dev
);
3790 jme_intr_free(device_t dev
)
3792 struct jme_softc
*sc
= device_get_softc(dev
);
3794 if (sc
->jme_irq_type
!= PCI_INTR_TYPE_MSIX
) {
3795 if (sc
->jme_irq_res
!= NULL
) {
3796 bus_release_resource(dev
, SYS_RES_IRQ
, sc
->jme_irq_rid
,
3799 if (sc
->jme_irq_type
== PCI_INTR_TYPE_MSI
)
3800 pci_release_msi(dev
);
3807 jme_msix_tx(void *xtdata
)
3809 struct jme_txdata
*tdata
= xtdata
;
3810 struct jme_softc
*sc
= tdata
->jme_sc
;
3811 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3813 ASSERT_SERIALIZED(&tdata
->jme_tx_serialize
);
3815 CSR_WRITE_4(sc
, JME_INTR_MASK_CLR
, INTR_TXQ_COAL
| INTR_TXQ_COAL_TO
);
3817 CSR_WRITE_4(sc
, JME_INTR_STATUS
,
3818 INTR_TXQ_COAL
| INTR_TXQ_COAL_TO
| INTR_TXQ_COMP
);
3820 if (ifp
->if_flags
& IFF_RUNNING
) {
3822 if (!ifq_is_empty(&ifp
->if_snd
))
3826 CSR_WRITE_4(sc
, JME_INTR_MASK_SET
, INTR_TXQ_COAL
| INTR_TXQ_COAL_TO
);
3830 jme_msix_rx(void *xrdata
)
3832 struct jme_rxdata
*rdata
= xrdata
;
3833 struct jme_softc
*sc
= rdata
->jme_sc
;
3834 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3836 ASSERT_SERIALIZED(&rdata
->jme_rx_serialize
);
3838 CSR_WRITE_4(sc
, JME_INTR_MASK_CLR
, rdata
->jme_rx_coal
);
3840 CSR_WRITE_4(sc
, JME_INTR_STATUS
,
3841 rdata
->jme_rx_coal
| rdata
->jme_rx_comp
);
3843 if (ifp
->if_flags
& IFF_RUNNING
)
3844 jme_rxeof(rdata
, -1, mycpuid
);
3846 CSR_WRITE_4(sc
, JME_INTR_MASK_SET
, rdata
->jme_rx_coal
);
3850 jme_msix_status(void *xsc
)
3852 struct jme_softc
*sc
= xsc
;
3853 struct ifnet
*ifp
= &sc
->arpcom
.ac_if
;
3856 ASSERT_SERIALIZED(&sc
->jme_serialize
);
3858 CSR_WRITE_4(sc
, JME_INTR_MASK_CLR
, INTR_RXQ_DESC_EMPTY
);
3860 status
= CSR_READ_4(sc
, JME_INTR_STATUS
);
3862 if (status
& INTR_RXQ_DESC_EMPTY
) {
3863 CSR_WRITE_4(sc
, JME_INTR_STATUS
, status
& INTR_RXQ_DESC_EMPTY
);
3864 if (ifp
->if_flags
& IFF_RUNNING
)
3865 jme_rx_restart(sc
, status
);
3868 CSR_WRITE_4(sc
, JME_INTR_MASK_SET
, INTR_RXQ_DESC_EMPTY
);
3872 jme_rx_restart(struct jme_softc
*sc
, uint32_t status
)
3874 int i
, cpuid
= mycpuid
;
3876 for (i
= 0; i
< sc
->jme_cdata
.jme_rx_ring_cnt
; ++i
) {
3877 struct jme_rxdata
*rdata
= &sc
->jme_cdata
.jme_rx_data
[i
];
3879 if (status
& rdata
->jme_rx_empty
) {
3880 lwkt_serialize_enter(&rdata
->jme_rx_serialize
);
3881 jme_rxeof(rdata
, -1, cpuid
);
3882 #ifdef JME_RSS_DEBUG
3883 rdata
->jme_rx_emp
++;
3885 lwkt_serialize_exit(&rdata
->jme_rx_serialize
);
3888 CSR_WRITE_4(sc
, JME_RXCSR
, sc
->jme_rxcsr
| RXCSR_RX_ENB
|
3893 jme_set_msinum(struct jme_softc
*sc
)
3897 for (i
= 0; i
< JME_MSINUM_CNT
; ++i
)
3898 CSR_WRITE_4(sc
, JME_MSINUM(i
), sc
->jme_msinum
[i
]);
3902 jme_intr_setup(device_t dev
)
3904 struct jme_softc
*sc
= device_get_softc(dev
);
3907 if (sc
->jme_irq_type
== PCI_INTR_TYPE_MSIX
)
3908 return jme_msix_setup(dev
);
3910 error
= bus_setup_intr(dev
, sc
->jme_irq_res
, INTR_MPSAFE
,
3911 jme_intr
, sc
, &sc
->jme_irq_handle
, &sc
->jme_serialize
);
3913 device_printf(dev
, "could not set up interrupt handler.\n");
3921 jme_intr_teardown(device_t dev
)
3923 struct jme_softc
*sc
= device_get_softc(dev
);
3925 if (sc
->jme_irq_type
== PCI_INTR_TYPE_MSIX
)
3926 jme_msix_teardown(dev
, sc
->jme_msix_cnt
);
3928 bus_teardown_intr(dev
, sc
->jme_irq_res
, sc
->jme_irq_handle
);
3932 jme_msix_setup(device_t dev
)
3934 struct jme_softc
*sc
= device_get_softc(dev
);
3937 for (x
= 0; x
< sc
->jme_msix_cnt
; ++x
) {
3938 struct jme_msix_data
*msix
= &sc
->jme_msix
[x
];
3941 error
= bus_setup_intr_descr(dev
, msix
->jme_msix_res
,
3942 INTR_MPSAFE
, msix
->jme_msix_func
, msix
->jme_msix_arg
,
3943 &msix
->jme_msix_handle
, msix
->jme_msix_serialize
,
3944 msix
->jme_msix_desc
);
3946 device_printf(dev
, "could not set up %s "
3947 "interrupt handler.\n", msix
->jme_msix_desc
);
3948 jme_msix_teardown(dev
, x
);
3956 jme_msix_teardown(device_t dev
, int msix_count
)
3958 struct jme_softc
*sc
= device_get_softc(dev
);
3961 for (x
= 0; x
< msix_count
; ++x
) {
3962 struct jme_msix_data
*msix
= &sc
->jme_msix
[x
];
3964 bus_teardown_intr(dev
, msix
->jme_msix_res
,
3965 msix
->jme_msix_handle
);
3970 jme_serialize_skipmain(struct jme_softc
*sc
)
3972 lwkt_serialize_array_enter(sc
->jme_serialize_arr
,
3973 sc
->jme_serialize_cnt
, 1);
3977 jme_deserialize_skipmain(struct jme_softc
*sc
)
3979 lwkt_serialize_array_exit(sc
->jme_serialize_arr
,
3980 sc
->jme_serialize_cnt
, 1);
3984 jme_enable_intr(struct jme_softc
*sc
)
3988 for (i
= 0; i
< sc
->jme_serialize_cnt
; ++i
)
3989 lwkt_serialize_handler_enable(sc
->jme_serialize_arr
[i
]);
3991 CSR_WRITE_4(sc
, JME_INTR_MASK_SET
, JME_INTRS
);
3995 jme_disable_intr(struct jme_softc
*sc
)
3999 CSR_WRITE_4(sc
, JME_INTR_MASK_CLR
, JME_INTRS
);
4001 for (i
= 0; i
< sc
->jme_serialize_cnt
; ++i
)
4002 lwkt_serialize_handler_disable(sc
->jme_serialize_arr
[i
]);
4006 jme_phy_poweron(struct jme_softc
*sc
)
4010 bmcr
= jme_miibus_readreg(sc
->jme_dev
, sc
->jme_phyaddr
, MII_BMCR
);
4011 bmcr
&= ~BMCR_PDOWN
;
4012 jme_miibus_writereg(sc
->jme_dev
, sc
->jme_phyaddr
, MII_BMCR
, bmcr
);
4014 if (sc
->jme_caps
& JME_CAP_PHYPWR
) {
4017 val
= CSR_READ_4(sc
, JME_PHYPWR
);
4018 val
&= ~(PHYPWR_DOWN1SEL
| PHYPWR_DOWN1SW
|
4019 PHYPWR_DOWN2
| PHYPWR_CLKSEL
);
4020 CSR_WRITE_4(sc
, JME_PHYPWR
, val
);
4022 val
= pci_read_config(sc
->jme_dev
, JME_PCI_PE1
, 4);
4023 val
&= ~PE1_GPREG0_PHYBG
;
4024 val
|= PE1_GPREG0_ENBG
;
4025 pci_write_config(sc
->jme_dev
, JME_PCI_PE1
, val
, 4);
4030 jme_phy_poweroff(struct jme_softc
*sc
)
4034 bmcr
= jme_miibus_readreg(sc
->jme_dev
, sc
->jme_phyaddr
, MII_BMCR
);
4036 jme_miibus_writereg(sc
->jme_dev
, sc
->jme_phyaddr
, MII_BMCR
, bmcr
);
4038 if (sc
->jme_caps
& JME_CAP_PHYPWR
) {
4041 val
= CSR_READ_4(sc
, JME_PHYPWR
);
4042 val
|= PHYPWR_DOWN1SEL
| PHYPWR_DOWN1SW
|
4043 PHYPWR_DOWN2
| PHYPWR_CLKSEL
;
4044 CSR_WRITE_4(sc
, JME_PHYPWR
, val
);
4046 val
= pci_read_config(sc
->jme_dev
, JME_PCI_PE1
, 4);
4047 val
&= ~PE1_GPREG0_PHYBG
;
4048 val
|= PE1_GPREG0_PDD3COLD
;
4049 pci_write_config(sc
->jme_dev
, JME_PCI_PE1
, val
, 4);
4054 jme_miiext_read(struct jme_softc
*sc
, int reg
)
4058 addr
= JME_MII_EXT_ADDR_RD
| reg
;
4059 jme_miibus_writereg(sc
->jme_dev
, sc
->jme_phyaddr
,
4060 JME_MII_EXT_ADDR
, addr
);
4061 return jme_miibus_readreg(sc
->jme_dev
, sc
->jme_phyaddr
,
4066 jme_miiext_write(struct jme_softc
*sc
, int reg
, int val
)
4070 addr
= JME_MII_EXT_ADDR_WR
| reg
;
4071 jme_miibus_writereg(sc
->jme_dev
, sc
->jme_phyaddr
,
4072 JME_MII_EXT_DATA
, val
);
4073 jme_miibus_writereg(sc
->jme_dev
, sc
->jme_phyaddr
,
4074 JME_MII_EXT_ADDR
, addr
);
4078 jme_phy_init(struct jme_softc
*sc
)
4083 jme_phy_poweroff(sc
);
4084 jme_phy_poweron(sc
);
4086 /* Enable PHY test 1 */
4087 gtcr
= jme_miibus_readreg(sc
->jme_dev
, sc
->jme_phyaddr
, MII_100T2CR
);
4088 gtcr
&= ~GTCR_TEST_MASK
;
4089 gtcr
|= GTCR_TEST_1
;
4090 jme_miibus_writereg(sc
->jme_dev
, sc
->jme_phyaddr
, MII_100T2CR
, gtcr
);
4092 val
= jme_miiext_read(sc
, JME_MII_EXT_COM2
);
4093 val
&= ~JME_MII_EXT_COM2_CALIB_MODE0
;
4094 val
|= JME_MII_EXT_COM2_CALIB_LATCH
| JME_MII_EXT_COM2_CALIB_EN
;
4095 jme_miiext_write(sc
, JME_MII_EXT_COM2
, val
);
4099 val
= jme_miiext_read(sc
, JME_MII_EXT_COM2
);
4100 val
&= ~(JME_MII_EXT_COM2_CALIB_MODE0
|
4101 JME_MII_EXT_COM2_CALIB_LATCH
| JME_MII_EXT_COM2_CALIB_EN
);
4102 jme_miiext_write(sc
, JME_MII_EXT_COM2
, val
);
4104 /* Disable PHY test */
4105 gtcr
= jme_miibus_readreg(sc
->jme_dev
, sc
->jme_phyaddr
, MII_100T2CR
);
4106 gtcr
&= ~GTCR_TEST_MASK
;
4107 jme_miibus_writereg(sc
->jme_dev
, sc
->jme_phyaddr
, MII_100T2CR
, gtcr
);
4109 if (sc
->jme_phycom0
!= 0)
4110 jme_miiext_write(sc
, JME_MII_EXT_COM0
, sc
->jme_phycom0
);
4111 if (sc
->jme_phycom1
!= 0)
4112 jme_miiext_write(sc
, JME_MII_EXT_COM1
, sc
->jme_phycom1
);