1 /******************************************************************************
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
9 *****************************************************************************/
11 /******************************************************************************
14 * Copyright (C) Marvell International Ltd. and/or its affiliates
16 * The computer program files contained in this folder ("Files")
17 * are provided to you under the BSD-type license terms provided
18 * below, and any use of such Files and any derivative works
19 * thereof created by you shall be governed by the following terms
22 * - Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials provided
27 * with the distribution.
28 * - Neither the name of Marvell nor the names of its contributors
29 * may be used to endorse or promote products derived from this
30 * software without specific prior written permission.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 *****************************************************************************/
49 * Copyright (c) 1997, 1998, 1999, 2000
50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
95 /* $FreeBSD: src/sys/dev/msk/if_msk.c,v 1.26 2007/12/05 09:41:58 remko Exp $ */
98 * Device driver for the Marvell Yukon II Ethernet controller.
99 * Due to lack of documentation, this driver is based on the code from
100 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
103 #include <sys/param.h>
104 #include <sys/endian.h>
105 #include <sys/kernel.h>
107 #include <sys/in_cksum.h>
108 #include <sys/interrupt.h>
109 #include <sys/malloc.h>
110 #include <sys/proc.h>
111 #include <sys/rman.h>
112 #include <sys/serialize.h>
113 #include <sys/socket.h>
114 #include <sys/sockio.h>
115 #include <sys/sysctl.h>
117 #include <net/ethernet.h>
120 #include <net/if_arp.h>
121 #include <net/if_dl.h>
122 #include <net/if_media.h>
123 #include <net/ifq_var.h>
124 #include <net/vlan/if_vlan_var.h>
126 #include <netinet/ip.h>
127 #include <netinet/ip_var.h>
129 #include <dev/netif/mii_layer/miivar.h>
131 #include <bus/pci/pcireg.h>
132 #include <bus/pci/pcivar.h>
134 #include "if_mskreg.h"
136 /* "device miibus" required. See GENERIC if you get errors here. */
137 #include "miibus_if.h"
139 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
142 * Devices supported by this driver.
144 static const struct msk_product
{
145 uint16_t msk_vendorid
;
146 uint16_t msk_deviceid
;
147 const char *msk_name
;
149 { VENDORID_SK
, DEVICEID_SK_YUKON2
,
150 "SK-9Sxx Gigabit Ethernet" },
151 { VENDORID_SK
, DEVICEID_SK_YUKON2_EXPR
,
152 "SK-9Exx Gigabit Ethernet"},
153 { VENDORID_MARVELL
, DEVICEID_MRVL_8021CU
,
154 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
155 { VENDORID_MARVELL
, DEVICEID_MRVL_8021X
,
156 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
157 { VENDORID_MARVELL
, DEVICEID_MRVL_8022CU
,
158 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
159 { VENDORID_MARVELL
, DEVICEID_MRVL_8022X
,
160 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
161 { VENDORID_MARVELL
, DEVICEID_MRVL_8061CU
,
162 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
163 { VENDORID_MARVELL
, DEVICEID_MRVL_8061X
,
164 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
165 { VENDORID_MARVELL
, DEVICEID_MRVL_8062CU
,
166 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
167 { VENDORID_MARVELL
, DEVICEID_MRVL_8062X
,
168 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
169 { VENDORID_MARVELL
, DEVICEID_MRVL_8035
,
170 "Marvell Yukon 88E8035 Fast Ethernet" },
171 { VENDORID_MARVELL
, DEVICEID_MRVL_8036
,
172 "Marvell Yukon 88E8036 Fast Ethernet" },
173 { VENDORID_MARVELL
, DEVICEID_MRVL_8038
,
174 "Marvell Yukon 88E8038 Fast Ethernet" },
175 { VENDORID_MARVELL
, DEVICEID_MRVL_8039
,
176 "Marvell Yukon 88E8039 Fast Ethernet" },
177 { VENDORID_MARVELL
, DEVICEID_MRVL_8040
,
178 "Marvell Yukon 88E8040 Fast Ethernet" },
179 { VENDORID_MARVELL
, DEVICEID_MRVL_8040T
,
180 "Marvell Yukon 88E8040T Fast Ethernet" },
181 { VENDORID_MARVELL
, DEVICEID_MRVL_8042
,
182 "Marvell Yukon 88E8042 Fast Ethernet" },
183 { VENDORID_MARVELL
, DEVICEID_MRVL_8048
,
184 "Marvell Yukon 88E8048 Fast Ethernet" },
185 { VENDORID_MARVELL
, DEVICEID_MRVL_4361
,
186 "Marvell Yukon 88E8050 Gigabit Ethernet" },
187 { VENDORID_MARVELL
, DEVICEID_MRVL_4360
,
188 "Marvell Yukon 88E8052 Gigabit Ethernet" },
189 { VENDORID_MARVELL
, DEVICEID_MRVL_4362
,
190 "Marvell Yukon 88E8053 Gigabit Ethernet" },
191 { VENDORID_MARVELL
, DEVICEID_MRVL_4363
,
192 "Marvell Yukon 88E8055 Gigabit Ethernet" },
193 { VENDORID_MARVELL
, DEVICEID_MRVL_4364
,
194 "Marvell Yukon 88E8056 Gigabit Ethernet" },
195 { VENDORID_MARVELL
, DEVICEID_MRVL_4365
,
196 "Marvell Yukon 88E8070 Gigabit Ethernet" },
197 { VENDORID_MARVELL
, DEVICEID_MRVL_436A
,
198 "Marvell Yukon 88E8058 Gigabit Ethernet" },
199 { VENDORID_MARVELL
, DEVICEID_MRVL_436B
,
200 "Marvell Yukon 88E8071 Gigabit Ethernet" },
201 { VENDORID_MARVELL
, DEVICEID_MRVL_436C
,
202 "Marvell Yukon 88E8072 Gigabit Ethernet" },
203 { VENDORID_DLINK
, DEVICEID_DLINK_DGE550SX
,
204 "D-Link 550SX Gigabit Ethernet" },
205 { VENDORID_DLINK
, DEVICEID_DLINK_DGE560T
,
206 "D-Link 560T Gigabit Ethernet" },
210 static const char *model_name
[] = {
219 static int mskc_probe(device_t
);
220 static int mskc_attach(device_t
);
221 static int mskc_detach(device_t
);
222 static int mskc_shutdown(device_t
);
223 static int mskc_suspend(device_t
);
224 static int mskc_resume(device_t
);
225 static void mskc_intr(void *);
227 static void mskc_reset(struct msk_softc
*);
228 static void mskc_set_imtimer(struct msk_softc
*);
229 static void mskc_intr_hwerr(struct msk_softc
*);
230 static int mskc_handle_events(struct msk_softc
*);
231 static void mskc_phy_power(struct msk_softc
*, int);
232 static int mskc_setup_rambuffer(struct msk_softc
*);
233 static int mskc_status_dma_alloc(struct msk_softc
*);
234 static void mskc_status_dma_free(struct msk_softc
*);
235 static int mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS
);
236 static int mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS
);
238 static int msk_probe(device_t
);
239 static int msk_attach(device_t
);
240 static int msk_detach(device_t
);
241 static int msk_miibus_readreg(device_t
, int, int);
242 static int msk_miibus_writereg(device_t
, int, int, int);
243 static void msk_miibus_statchg(device_t
);
245 static void msk_init(void *);
246 static int msk_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
247 static void msk_start(struct ifnet
*);
248 static void msk_watchdog(struct ifnet
*);
249 static int msk_mediachange(struct ifnet
*);
250 static void msk_mediastatus(struct ifnet
*, struct ifmediareq
*);
252 static void msk_tick(void *);
253 static void msk_intr_phy(struct msk_if_softc
*);
254 static void msk_intr_gmac(struct msk_if_softc
*);
256 msk_rxput(struct msk_if_softc
*);
257 static void msk_handle_hwerr(struct msk_if_softc
*, uint32_t);
258 static void msk_rxeof(struct msk_if_softc
*, uint32_t, int,
259 struct mbuf_chain
*);
260 static void msk_txeof(struct msk_if_softc
*, int);
261 static void msk_set_prefetch(struct msk_softc
*, int, bus_addr_t
, uint32_t);
262 static void msk_set_rambuffer(struct msk_if_softc
*);
263 static void msk_stop(struct msk_if_softc
*);
265 static int msk_txrx_dma_alloc(struct msk_if_softc
*);
266 static void msk_txrx_dma_free(struct msk_if_softc
*);
267 static int msk_init_rx_ring(struct msk_if_softc
*);
268 static void msk_init_tx_ring(struct msk_if_softc
*);
270 msk_discard_rxbuf(struct msk_if_softc
*, int);
271 static int msk_newbuf(struct msk_if_softc
*, int, int);
272 static int msk_encap(struct msk_if_softc
*, struct mbuf
**);
275 static int msk_init_jumbo_rx_ring(struct msk_if_softc
*);
276 static __inline
void msk_discard_jumbo_rxbuf(struct msk_if_softc
*, int);
277 static int msk_jumbo_newbuf(struct msk_if_softc
*, int);
278 static void msk_jumbo_rxeof(struct msk_if_softc
*, uint32_t, int);
279 static void *msk_jalloc(struct msk_if_softc
*);
280 static void msk_jfree(void *, void *);
283 static int msk_phy_readreg(struct msk_if_softc
*, int, int);
284 static int msk_phy_writereg(struct msk_if_softc
*, int, int, int);
286 static void msk_rxfilter(struct msk_if_softc
*);
287 static void msk_setvlan(struct msk_if_softc
*, struct ifnet
*);
288 static void msk_set_tx_stfwd(struct msk_if_softc
*);
290 static int msk_dmamem_create(device_t
, bus_size_t
, bus_dma_tag_t
*,
291 void **, bus_addr_t
*, bus_dmamap_t
*);
292 static void msk_dmamem_destroy(bus_dma_tag_t
, void *, bus_dmamap_t
);
294 static device_method_t mskc_methods
[] = {
295 /* Device interface */
296 DEVMETHOD(device_probe
, mskc_probe
),
297 DEVMETHOD(device_attach
, mskc_attach
),
298 DEVMETHOD(device_detach
, mskc_detach
),
299 DEVMETHOD(device_suspend
, mskc_suspend
),
300 DEVMETHOD(device_resume
, mskc_resume
),
301 DEVMETHOD(device_shutdown
, mskc_shutdown
),
304 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
305 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
310 static DEFINE_CLASS_0(mskc
, mskc_driver
, mskc_methods
, sizeof(struct msk_softc
));
311 static devclass_t mskc_devclass
;
313 static device_method_t msk_methods
[] = {
314 /* Device interface */
315 DEVMETHOD(device_probe
, msk_probe
),
316 DEVMETHOD(device_attach
, msk_attach
),
317 DEVMETHOD(device_detach
, msk_detach
),
318 DEVMETHOD(device_shutdown
, bus_generic_shutdown
),
321 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
322 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
325 DEVMETHOD(miibus_readreg
, msk_miibus_readreg
),
326 DEVMETHOD(miibus_writereg
, msk_miibus_writereg
),
327 DEVMETHOD(miibus_statchg
, msk_miibus_statchg
),
332 static DEFINE_CLASS_0(msk
, msk_driver
, msk_methods
, sizeof(struct msk_if_softc
));
333 static devclass_t msk_devclass
;
335 DECLARE_DUMMY_MODULE(if_msk
);
336 DRIVER_MODULE(if_msk
, pci
, mskc_driver
, mskc_devclass
, NULL
, NULL
);
337 DRIVER_MODULE(if_msk
, mskc
, msk_driver
, msk_devclass
, NULL
, NULL
);
338 DRIVER_MODULE(miibus
, msk
, miibus_driver
, miibus_devclass
, NULL
, NULL
);
340 static int mskc_intr_rate
= 0;
341 static int mskc_process_limit
= MSK_PROC_DEFAULT
;
343 TUNABLE_INT("hw.mskc.intr_rate", &mskc_intr_rate
);
344 TUNABLE_INT("hw.mskc.process_limit", &mskc_process_limit
);
347 msk_miibus_readreg(device_t dev
, int phy
, int reg
)
349 struct msk_if_softc
*sc_if
;
351 if (phy
!= PHY_ADDR_MARV
)
354 sc_if
= device_get_softc(dev
);
356 return (msk_phy_readreg(sc_if
, phy
, reg
));
360 msk_phy_readreg(struct msk_if_softc
*sc_if
, int phy
, int reg
)
362 struct msk_softc
*sc
;
365 sc
= sc_if
->msk_softc
;
367 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_SMI_CTRL
,
368 GM_SMI_CT_PHY_AD(phy
) | GM_SMI_CT_REG_AD(reg
) | GM_SMI_CT_OP_RD
);
370 for (i
= 0; i
< MSK_TIMEOUT
; i
++) {
372 val
= GMAC_READ_2(sc
, sc_if
->msk_port
, GM_SMI_CTRL
);
373 if ((val
& GM_SMI_CT_RD_VAL
) != 0) {
374 val
= GMAC_READ_2(sc
, sc_if
->msk_port
, GM_SMI_DATA
);
379 if (i
== MSK_TIMEOUT
) {
380 if_printf(sc_if
->msk_ifp
, "phy failed to come ready\n");
388 msk_miibus_writereg(device_t dev
, int phy
, int reg
, int val
)
390 struct msk_if_softc
*sc_if
;
392 if (phy
!= PHY_ADDR_MARV
)
395 sc_if
= device_get_softc(dev
);
397 return (msk_phy_writereg(sc_if
, phy
, reg
, val
));
401 msk_phy_writereg(struct msk_if_softc
*sc_if
, int phy
, int reg
, int val
)
403 struct msk_softc
*sc
;
406 sc
= sc_if
->msk_softc
;
408 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_SMI_DATA
, val
);
409 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_SMI_CTRL
,
410 GM_SMI_CT_PHY_AD(phy
) | GM_SMI_CT_REG_AD(reg
));
411 for (i
= 0; i
< MSK_TIMEOUT
; i
++) {
413 if ((GMAC_READ_2(sc
, sc_if
->msk_port
, GM_SMI_CTRL
) &
414 GM_SMI_CT_BUSY
) == 0)
417 if (i
== MSK_TIMEOUT
)
418 if_printf(sc_if
->msk_ifp
, "phy write timeout\n");
424 msk_miibus_statchg(device_t dev
)
426 struct msk_if_softc
*sc_if
;
427 struct msk_softc
*sc
;
428 struct mii_data
*mii
;
432 sc_if
= device_get_softc(dev
);
433 sc
= sc_if
->msk_softc
;
435 mii
= device_get_softc(sc_if
->msk_miibus
);
436 ifp
= sc_if
->msk_ifp
;
439 if ((mii
->mii_media_status
& (IFM_AVALID
| IFM_ACTIVE
)) ==
440 (IFM_AVALID
| IFM_ACTIVE
)) {
441 switch (IFM_SUBTYPE(mii
->mii_media_active
)) {
450 if ((sc_if
->msk_flags
& MSK_FLAG_FASTETHER
) == 0)
456 if (sc_if
->msk_link
!= 0) {
457 /* Enable Tx FIFO Underrun. */
458 CSR_WRITE_1(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_IRQ_MSK
),
459 GM_IS_TX_FF_UR
| GM_IS_RX_FF_OR
);
461 * Because mii(4) notify msk(4) that it detected link status
462 * change, there is no need to enable automatic
463 * speed/flow-control/duplex updates.
465 gmac
= GM_GPCR_AU_ALL_DIS
;
466 switch (IFM_SUBTYPE(mii
->mii_media_active
)) {
469 gmac
|= GM_GPCR_SPEED_1000
;
472 gmac
|= GM_GPCR_SPEED_100
;
478 if (((mii
->mii_media_active
& IFM_GMASK
) & IFM_FDX
) != 0)
479 gmac
|= GM_GPCR_DUP_FULL
;
480 /* Disable Rx flow control. */
481 if (((mii
->mii_media_active
& IFM_GMASK
) & IFM_FLAG0
) == 0)
482 gmac
|= GM_GPCR_FC_RX_DIS
;
483 /* Disable Tx flow control. */
484 if (((mii
->mii_media_active
& IFM_GMASK
) & IFM_FLAG1
) == 0)
485 gmac
|= GM_GPCR_FC_TX_DIS
;
486 gmac
|= GM_GPCR_RX_ENA
| GM_GPCR_TX_ENA
;
487 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
, gmac
);
488 /* Read again to ensure writing. */
489 GMAC_READ_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
);
492 if (((mii
->mii_media_active
& IFM_GMASK
) &
493 (IFM_FLAG0
| IFM_FLAG1
)) == 0)
494 gmac
= GMC_PAUSE_OFF
;
495 /* Diable pause for 10/100 Mbps in half-duplex mode. */
496 if ((((mii
->mii_media_active
& IFM_GMASK
) & IFM_FDX
) == 0) &&
497 (IFM_SUBTYPE(mii
->mii_media_active
) == IFM_100_TX
||
498 IFM_SUBTYPE(mii
->mii_media_active
) == IFM_10_T
))
499 gmac
= GMC_PAUSE_OFF
;
500 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_CTRL
), gmac
);
502 /* Enable PHY interrupt for FIFO underrun/overflow. */
503 msk_phy_writereg(sc_if
, PHY_ADDR_MARV
,
504 PHY_MARV_INT_MASK
, PHY_M_IS_FIFO_ERROR
);
507 * Link state changed to down.
508 * Disable PHY interrupts.
510 msk_phy_writereg(sc_if
, PHY_ADDR_MARV
, PHY_MARV_INT_MASK
, 0);
511 /* Disable Rx/Tx MAC. */
512 gmac
= GMAC_READ_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
);
513 if (gmac
& (GM_GPCR_RX_ENA
| GM_GPCR_TX_ENA
)) {
514 gmac
&= ~(GM_GPCR_RX_ENA
| GM_GPCR_TX_ENA
);
515 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
, gmac
);
516 /* Read again to ensure writing. */
517 GMAC_READ_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
);
523 msk_rxfilter(struct msk_if_softc
*sc_if
)
525 struct msk_softc
*sc
;
527 struct ifmultiaddr
*ifma
;
532 sc
= sc_if
->msk_softc
;
533 ifp
= sc_if
->msk_ifp
;
535 bzero(mchash
, sizeof(mchash
));
536 mode
= GMAC_READ_2(sc
, sc_if
->msk_port
, GM_RX_CTRL
);
537 if ((ifp
->if_flags
& IFF_PROMISC
) != 0) {
538 mode
&= ~(GM_RXCR_UCF_ENA
| GM_RXCR_MCF_ENA
);
539 } else if ((ifp
->if_flags
& IFF_ALLMULTI
) != 0) {
540 mode
|= (GM_RXCR_UCF_ENA
| GM_RXCR_MCF_ENA
);
544 mode
|= GM_RXCR_UCF_ENA
;
545 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
546 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
548 crc
= ether_crc32_be(LLADDR((struct sockaddr_dl
*)
549 ifma
->ifma_addr
), ETHER_ADDR_LEN
);
550 /* Just want the 6 least significant bits. */
552 /* Set the corresponding bit in the hash table. */
553 mchash
[crc
>> 5] |= 1 << (crc
& 0x1f);
555 if (mchash
[0] != 0 || mchash
[1] != 0)
556 mode
|= GM_RXCR_MCF_ENA
;
559 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_MC_ADDR_H1
,
561 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_MC_ADDR_H2
,
562 (mchash
[0] >> 16) & 0xffff);
563 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_MC_ADDR_H3
,
565 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_MC_ADDR_H4
,
566 (mchash
[1] >> 16) & 0xffff);
567 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_RX_CTRL
, mode
);
571 msk_setvlan(struct msk_if_softc
*sc_if
, struct ifnet
*ifp
)
573 struct msk_softc
*sc
;
575 sc
= sc_if
->msk_softc
;
576 if ((ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) != 0) {
577 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_CTRL_T
),
579 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
),
582 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_CTRL_T
),
584 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
),
590 msk_init_rx_ring(struct msk_if_softc
*sc_if
)
592 struct msk_ring_data
*rd
;
593 struct msk_rxdesc
*rxd
;
596 sc_if
->msk_cdata
.msk_rx_cons
= 0;
597 sc_if
->msk_cdata
.msk_rx_prod
= 0;
598 sc_if
->msk_cdata
.msk_rx_putwm
= MSK_PUT_WM
;
600 rd
= &sc_if
->msk_rdata
;
601 bzero(rd
->msk_rx_ring
, sizeof(struct msk_rx_desc
) * MSK_RX_RING_CNT
);
602 prod
= sc_if
->msk_cdata
.msk_rx_prod
;
603 for (i
= 0; i
< MSK_RX_RING_CNT
; i
++) {
604 rxd
= &sc_if
->msk_cdata
.msk_rxdesc
[prod
];
606 rxd
->rx_le
= &rd
->msk_rx_ring
[prod
];
607 if (msk_newbuf(sc_if
, prod
, 1) != 0)
609 MSK_INC(prod
, MSK_RX_RING_CNT
);
612 /* Update prefetch unit. */
613 sc_if
->msk_cdata
.msk_rx_prod
= MSK_RX_RING_CNT
- 1;
614 CSR_WRITE_2(sc_if
->msk_softc
,
615 Y2_PREF_Q_ADDR(sc_if
->msk_rxq
, PREF_UNIT_PUT_IDX_REG
),
616 sc_if
->msk_cdata
.msk_rx_prod
);
623 msk_init_jumbo_rx_ring(struct msk_if_softc
*sc_if
)
625 struct msk_ring_data
*rd
;
626 struct msk_rxdesc
*rxd
;
629 MSK_IF_LOCK_ASSERT(sc_if
);
631 sc_if
->msk_cdata
.msk_rx_cons
= 0;
632 sc_if
->msk_cdata
.msk_rx_prod
= 0;
633 sc_if
->msk_cdata
.msk_rx_putwm
= MSK_PUT_WM
;
635 rd
= &sc_if
->msk_rdata
;
636 bzero(rd
->msk_jumbo_rx_ring
,
637 sizeof(struct msk_rx_desc
) * MSK_JUMBO_RX_RING_CNT
);
638 prod
= sc_if
->msk_cdata
.msk_rx_prod
;
639 for (i
= 0; i
< MSK_JUMBO_RX_RING_CNT
; i
++) {
640 rxd
= &sc_if
->msk_cdata
.msk_jumbo_rxdesc
[prod
];
642 rxd
->rx_le
= &rd
->msk_jumbo_rx_ring
[prod
];
643 if (msk_jumbo_newbuf(sc_if
, prod
) != 0)
645 MSK_INC(prod
, MSK_JUMBO_RX_RING_CNT
);
648 bus_dmamap_sync(sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
,
649 sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
,
650 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
652 sc_if
->msk_cdata
.msk_rx_prod
= MSK_JUMBO_RX_RING_CNT
- 1;
653 CSR_WRITE_2(sc_if
->msk_softc
,
654 Y2_PREF_Q_ADDR(sc_if
->msk_rxq
, PREF_UNIT_PUT_IDX_REG
),
655 sc_if
->msk_cdata
.msk_rx_prod
);
662 msk_init_tx_ring(struct msk_if_softc
*sc_if
)
664 struct msk_ring_data
*rd
;
665 struct msk_txdesc
*txd
;
668 sc_if
->msk_cdata
.msk_tx_prod
= 0;
669 sc_if
->msk_cdata
.msk_tx_cons
= 0;
670 sc_if
->msk_cdata
.msk_tx_cnt
= 0;
672 rd
= &sc_if
->msk_rdata
;
673 bzero(rd
->msk_tx_ring
, sizeof(struct msk_tx_desc
) * MSK_TX_RING_CNT
);
674 for (i
= 0; i
< MSK_TX_RING_CNT
; i
++) {
675 txd
= &sc_if
->msk_cdata
.msk_txdesc
[i
];
677 txd
->tx_le
= &rd
->msk_tx_ring
[i
];
682 msk_discard_rxbuf(struct msk_if_softc
*sc_if
, int idx
)
684 struct msk_rx_desc
*rx_le
;
685 struct msk_rxdesc
*rxd
;
688 rxd
= &sc_if
->msk_cdata
.msk_rxdesc
[idx
];
691 rx_le
->msk_control
= htole32(m
->m_len
| OP_PACKET
| HW_OWNER
);
696 msk_discard_jumbo_rxbuf(struct msk_if_softc
*sc_if
, int idx
)
698 struct msk_rx_desc
*rx_le
;
699 struct msk_rxdesc
*rxd
;
702 rxd
= &sc_if
->msk_cdata
.msk_jumbo_rxdesc
[idx
];
705 rx_le
->msk_control
= htole32(m
->m_len
| OP_PACKET
| HW_OWNER
);
710 msk_newbuf(struct msk_if_softc
*sc_if
, int idx
, int init
)
712 struct msk_rx_desc
*rx_le
;
713 struct msk_rxdesc
*rxd
;
715 bus_dma_segment_t seg
;
719 m
= m_getcl(init
? MB_WAIT
: MB_DONTWAIT
, MT_DATA
, M_PKTHDR
);
723 m
->m_len
= m
->m_pkthdr
.len
= MCLBYTES
;
724 if ((sc_if
->msk_flags
& MSK_FLAG_RAMBUF
) == 0)
725 m_adj(m
, ETHER_ALIGN
);
727 error
= bus_dmamap_load_mbuf_segment(sc_if
->msk_cdata
.msk_rx_tag
,
728 sc_if
->msk_cdata
.msk_rx_sparemap
,
729 m
, &seg
, 1, &nseg
, BUS_DMA_NOWAIT
);
733 if_printf(&sc_if
->arpcom
.ac_if
, "can't load RX mbuf\n");
737 rxd
= &sc_if
->msk_cdata
.msk_rxdesc
[idx
];
738 if (rxd
->rx_m
!= NULL
) {
739 bus_dmamap_sync(sc_if
->msk_cdata
.msk_rx_tag
, rxd
->rx_dmamap
,
740 BUS_DMASYNC_POSTREAD
);
741 bus_dmamap_unload(sc_if
->msk_cdata
.msk_rx_tag
, rxd
->rx_dmamap
);
744 map
= rxd
->rx_dmamap
;
745 rxd
->rx_dmamap
= sc_if
->msk_cdata
.msk_rx_sparemap
;
746 sc_if
->msk_cdata
.msk_rx_sparemap
= map
;
750 rx_le
->msk_addr
= htole32(MSK_ADDR_LO(seg
.ds_addr
));
751 rx_le
->msk_control
= htole32(seg
.ds_len
| OP_PACKET
| HW_OWNER
);
758 msk_jumbo_newbuf(struct msk_if_softc
*sc_if
, int idx
)
760 struct msk_rx_desc
*rx_le
;
761 struct msk_rxdesc
*rxd
;
763 bus_dma_segment_t segs
[1];
768 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
771 buf
= msk_jalloc(sc_if
);
776 /* Attach the buffer to the mbuf. */
777 MEXTADD(m
, buf
, MSK_JLEN
, msk_jfree
, (struct msk_if_softc
*)sc_if
, 0,
779 if ((m
->m_flags
& M_EXT
) == 0) {
783 m
->m_pkthdr
.len
= m
->m_len
= MSK_JLEN
;
784 m_adj(m
, ETHER_ALIGN
);
786 if (bus_dmamap_load_mbuf_sg(sc_if
->msk_cdata
.msk_jumbo_rx_tag
,
787 sc_if
->msk_cdata
.msk_jumbo_rx_sparemap
, m
, segs
, &nsegs
,
788 BUS_DMA_NOWAIT
) != 0) {
792 KASSERT(nsegs
== 1, ("%s: %d segments returned!", __func__
, nsegs
));
794 rxd
= &sc_if
->msk_cdata
.msk_jumbo_rxdesc
[idx
];
795 if (rxd
->rx_m
!= NULL
) {
796 bus_dmamap_sync(sc_if
->msk_cdata
.msk_jumbo_rx_tag
,
797 rxd
->rx_dmamap
, BUS_DMASYNC_POSTREAD
);
798 bus_dmamap_unload(sc_if
->msk_cdata
.msk_jumbo_rx_tag
,
801 map
= rxd
->rx_dmamap
;
802 rxd
->rx_dmamap
= sc_if
->msk_cdata
.msk_jumbo_rx_sparemap
;
803 sc_if
->msk_cdata
.msk_jumbo_rx_sparemap
= map
;
804 bus_dmamap_sync(sc_if
->msk_cdata
.msk_jumbo_rx_tag
, rxd
->rx_dmamap
,
805 BUS_DMASYNC_PREREAD
);
808 rx_le
->msk_addr
= htole32(MSK_ADDR_LO(segs
[0].ds_addr
));
810 htole32(segs
[0].ds_len
| OP_PACKET
| HW_OWNER
);
820 msk_mediachange(struct ifnet
*ifp
)
822 struct msk_if_softc
*sc_if
= ifp
->if_softc
;
823 struct mii_data
*mii
;
826 mii
= device_get_softc(sc_if
->msk_miibus
);
827 error
= mii_mediachg(mii
);
833 * Report current media status.
836 msk_mediastatus(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
838 struct msk_if_softc
*sc_if
= ifp
->if_softc
;
839 struct mii_data
*mii
;
841 mii
= device_get_softc(sc_if
->msk_miibus
);
844 ifmr
->ifm_active
= mii
->mii_media_active
;
845 ifmr
->ifm_status
= mii
->mii_media_status
;
849 msk_ioctl(struct ifnet
*ifp
, u_long command
, caddr_t data
, struct ucred
*cr
)
851 struct msk_if_softc
*sc_if
;
853 struct mii_data
*mii
;
856 sc_if
= ifp
->if_softc
;
857 ifr
= (struct ifreq
*)data
;
863 if (ifr
->ifr_mtu
> MSK_JUMBO_MTU
|| ifr
->ifr_mtu
< ETHERMIN
) {
867 if (sc_if
->msk_softc
->msk_hw_id
== CHIP_ID_YUKON_FE
&&
868 ifr
->ifr_mtu
> MSK_MAX_FRAMELEN
) {
872 ifp
->if_mtu
= ifr
->ifr_mtu
;
873 if ((ifp
->if_flags
& IFF_RUNNING
) != 0)
881 if (ifp
->if_flags
& IFF_UP
) {
882 if (ifp
->if_flags
& IFF_RUNNING
) {
883 if (((ifp
->if_flags
^ sc_if
->msk_if_flags
)
884 & (IFF_PROMISC
| IFF_ALLMULTI
)) != 0)
887 if (sc_if
->msk_detach
== 0)
891 if (ifp
->if_flags
& IFF_RUNNING
)
894 sc_if
->msk_if_flags
= ifp
->if_flags
;
899 if (ifp
->if_flags
& IFF_RUNNING
)
905 mii
= device_get_softc(sc_if
->msk_miibus
);
906 error
= ifmedia_ioctl(ifp
, ifr
, &mii
->mii_media
, command
);
910 mask
= ifr
->ifr_reqcap
^ ifp
->if_capenable
;
911 if ((mask
& IFCAP_TXCSUM
) != 0) {
912 ifp
->if_capenable
^= IFCAP_TXCSUM
;
913 if ((IFCAP_TXCSUM
& ifp
->if_capenable
) != 0 &&
914 (IFCAP_TXCSUM
& ifp
->if_capabilities
) != 0)
915 ifp
->if_hwassist
|= MSK_CSUM_FEATURES
;
917 ifp
->if_hwassist
&= ~MSK_CSUM_FEATURES
;
920 if ((mask
& IFCAP_VLAN_HWTAGGING
) != 0) {
921 ifp
->if_capenable
^= IFCAP_VLAN_HWTAGGING
;
922 msk_setvlan(sc_if
, ifp
);
926 if (sc_if
->msk_framesize
> MSK_MAX_FRAMELEN
&&
927 sc_if
->msk_softc
->msk_hw_id
== CHIP_ID_YUKON_EC_U
) {
929 * In Yukon EC Ultra, TSO & checksum offload is not
930 * supported for jumbo frame.
932 ifp
->if_hwassist
&= ~MSK_CSUM_FEATURES
;
933 ifp
->if_capenable
&= ~IFCAP_TXCSUM
;
938 error
= ether_ioctl(ifp
, command
, data
);
946 mskc_probe(device_t dev
)
948 const struct msk_product
*mp
;
949 uint16_t vendor
, devid
;
951 vendor
= pci_get_vendor(dev
);
952 devid
= pci_get_device(dev
);
953 for (mp
= msk_products
; mp
->msk_name
!= NULL
; ++mp
) {
954 if (vendor
== mp
->msk_vendorid
&& devid
== mp
->msk_deviceid
) {
955 device_set_desc(dev
, mp
->msk_name
);
963 mskc_setup_rambuffer(struct msk_softc
*sc
)
968 /* Get adapter SRAM size. */
969 sc
->msk_ramsize
= CSR_READ_1(sc
, B2_E_0
) * 4;
971 device_printf(sc
->msk_dev
,
972 "RAM buffer size : %dKB\n", sc
->msk_ramsize
);
974 if (sc
->msk_ramsize
== 0)
976 sc
->msk_pflags
|= MSK_FLAG_RAMBUF
;
979 * Give receiver 2/3 of memory and round down to the multiple
980 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
983 sc
->msk_rxqsize
= rounddown((sc
->msk_ramsize
* 1024 * 2) / 3, 1024);
984 sc
->msk_txqsize
= (sc
->msk_ramsize
* 1024) - sc
->msk_rxqsize
;
985 for (i
= 0, next
= 0; i
< sc
->msk_num_port
; i
++) {
986 sc
->msk_rxqstart
[i
] = next
;
987 sc
->msk_rxqend
[i
] = next
+ sc
->msk_rxqsize
- 1;
988 next
= sc
->msk_rxqend
[i
] + 1;
989 sc
->msk_txqstart
[i
] = next
;
990 sc
->msk_txqend
[i
] = next
+ sc
->msk_txqsize
- 1;
991 next
= sc
->msk_txqend
[i
] + 1;
993 device_printf(sc
->msk_dev
,
994 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i
,
995 sc
->msk_rxqsize
/ 1024, sc
->msk_rxqstart
[i
],
997 device_printf(sc
->msk_dev
,
998 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i
,
999 sc
->msk_txqsize
/ 1024, sc
->msk_txqstart
[i
],
1008 mskc_phy_power(struct msk_softc
*sc
, int mode
)
1014 case MSK_PHY_POWERUP
:
1015 /* Switch power to VCC (WA for VAUX problem). */
1016 CSR_WRITE_1(sc
, B0_POWER_CTRL
,
1017 PC_VAUX_ENA
| PC_VCC_ENA
| PC_VAUX_OFF
| PC_VCC_ON
);
1018 /* Disable Core Clock Division, set Clock Select to 0. */
1019 CSR_WRITE_4(sc
, B2_Y2_CLK_CTRL
, Y2_CLK_DIV_DIS
);
1022 if (sc
->msk_hw_id
== CHIP_ID_YUKON_XL
&&
1023 sc
->msk_hw_rev
> CHIP_REV_YU_XL_A1
) {
1024 /* Enable bits are inverted. */
1025 val
= Y2_PCI_CLK_LNK1_DIS
| Y2_COR_CLK_LNK1_DIS
|
1026 Y2_CLK_GAT_LNK1_DIS
| Y2_PCI_CLK_LNK2_DIS
|
1027 Y2_COR_CLK_LNK2_DIS
| Y2_CLK_GAT_LNK2_DIS
;
1030 * Enable PCI & Core Clock, enable clock gating for both Links.
1032 CSR_WRITE_1(sc
, B2_Y2_CLK_GATE
, val
);
1034 val
= pci_read_config(sc
->msk_dev
, PCI_OUR_REG_1
, 4);
1035 val
&= ~(PCI_Y2_PHY1_POWD
| PCI_Y2_PHY2_POWD
);
1036 if (sc
->msk_hw_id
== CHIP_ID_YUKON_XL
) {
1037 if (sc
->msk_hw_rev
> CHIP_REV_YU_XL_A1
) {
1038 /* Deassert Low Power for 1st PHY. */
1039 val
|= PCI_Y2_PHY1_COMA
;
1040 if (sc
->msk_num_port
> 1)
1041 val
|= PCI_Y2_PHY2_COMA
;
1044 /* Release PHY from PowerDown/COMA mode. */
1045 pci_write_config(sc
->msk_dev
, PCI_OUR_REG_1
, val
, 4);
1046 switch (sc
->msk_hw_id
) {
1047 case CHIP_ID_YUKON_EC_U
:
1048 case CHIP_ID_YUKON_EX
:
1049 case CHIP_ID_YUKON_FE_P
:
1050 CSR_WRITE_2(sc
, B0_CTST
, Y2_HW_WOL_OFF
);
1052 /* Enable all clocks. */
1053 pci_write_config(sc
->msk_dev
, PCI_OUR_REG_3
, 0, 4);
1054 our
= pci_read_config(sc
->msk_dev
, PCI_OUR_REG_4
, 4);
1055 our
&= (PCI_FORCE_ASPM_REQUEST
|PCI_ASPM_GPHY_LINK_DOWN
|
1056 PCI_ASPM_INT_FIFO_EMPTY
|PCI_ASPM_CLKRUN_REQUEST
);
1057 /* Set all bits to 0 except bits 15..12. */
1058 pci_write_config(sc
->msk_dev
, PCI_OUR_REG_4
, our
, 4);
1059 our
= pci_read_config(sc
->msk_dev
, PCI_OUR_REG_5
, 4);
1060 our
&= PCI_CTL_TIM_VMAIN_AV_MSK
;
1061 pci_write_config(sc
->msk_dev
, PCI_OUR_REG_5
, our
, 4);
1062 pci_write_config(sc
->msk_dev
, PCI_CFG_REG_1
, 0, 4);
1064 * Disable status race, workaround for
1065 * Yukon EC Ultra & Yukon EX.
1067 val
= CSR_READ_4(sc
, B2_GP_IO
);
1068 val
|= GLB_GPIO_STAT_RACE_DIS
;
1069 CSR_WRITE_4(sc
, B2_GP_IO
, val
);
1070 CSR_READ_4(sc
, B2_GP_IO
);
1073 for (i
= 0; i
< sc
->msk_num_port
; i
++) {
1074 CSR_WRITE_2(sc
, MR_ADDR(i
, GMAC_LINK_CTRL
),
1076 CSR_WRITE_2(sc
, MR_ADDR(i
, GMAC_LINK_CTRL
),
1080 case MSK_PHY_POWERDOWN
:
1081 val
= pci_read_config(sc
->msk_dev
, PCI_OUR_REG_1
, 4);
1082 val
|= PCI_Y2_PHY1_POWD
| PCI_Y2_PHY2_POWD
;
1083 if (sc
->msk_hw_id
== CHIP_ID_YUKON_XL
&&
1084 sc
->msk_hw_rev
> CHIP_REV_YU_XL_A1
) {
1085 val
&= ~PCI_Y2_PHY1_COMA
;
1086 if (sc
->msk_num_port
> 1)
1087 val
&= ~PCI_Y2_PHY2_COMA
;
1089 pci_write_config(sc
->msk_dev
, PCI_OUR_REG_1
, val
, 4);
1091 val
= Y2_PCI_CLK_LNK1_DIS
| Y2_COR_CLK_LNK1_DIS
|
1092 Y2_CLK_GAT_LNK1_DIS
| Y2_PCI_CLK_LNK2_DIS
|
1093 Y2_COR_CLK_LNK2_DIS
| Y2_CLK_GAT_LNK2_DIS
;
1094 if (sc
->msk_hw_id
== CHIP_ID_YUKON_XL
&&
1095 sc
->msk_hw_rev
> CHIP_REV_YU_XL_A1
) {
1096 /* Enable bits are inverted. */
1100 * Disable PCI & Core Clock, disable clock gating for
1103 CSR_WRITE_1(sc
, B2_Y2_CLK_GATE
, val
);
1104 CSR_WRITE_1(sc
, B0_POWER_CTRL
,
1105 PC_VAUX_ENA
| PC_VCC_ENA
| PC_VAUX_ON
| PC_VCC_OFF
);
1113 mskc_reset(struct msk_softc
*sc
)
1120 CSR_WRITE_2(sc
, B0_CTST
, CS_RST_CLR
);
1123 if (sc
->msk_hw_id
== CHIP_ID_YUKON_EX
) {
1124 status
= CSR_READ_2(sc
, B28_Y2_ASF_HCU_CCSR
);
1125 /* Clear AHB bridge & microcontroller reset. */
1126 status
&= ~(Y2_ASF_HCU_CCSR_AHB_RST
|
1127 Y2_ASF_HCU_CCSR_CPU_RST_MODE
);
1128 /* Clear ASF microcontroller state. */
1129 status
&= ~ Y2_ASF_HCU_CCSR_UC_STATE_MSK
;
1130 CSR_WRITE_2(sc
, B28_Y2_ASF_HCU_CCSR
, status
);
1132 CSR_WRITE_1(sc
, B28_Y2_ASF_STAT_CMD
, Y2_ASF_RESET
);
1134 CSR_WRITE_2(sc
, B0_CTST
, Y2_ASF_DISABLE
);
1137 * Since we disabled ASF, S/W reset is required for Power Management.
1139 CSR_WRITE_2(sc
, B0_CTST
, CS_RST_SET
);
1140 CSR_WRITE_2(sc
, B0_CTST
, CS_RST_CLR
);
1142 /* Clear all error bits in the PCI status register. */
1143 status
= pci_read_config(sc
->msk_dev
, PCIR_STATUS
, 2);
1144 CSR_WRITE_1(sc
, B2_TST_CTRL1
, TST_CFG_WRITE_ON
);
1146 pci_write_config(sc
->msk_dev
, PCIR_STATUS
, status
|
1147 PCIM_STATUS_PERR
| PCIM_STATUS_SERR
| PCIM_STATUS_RMABORT
|
1148 PCIM_STATUS_RTABORT
| PCIM_STATUS_PERRREPORT
, 2);
1149 CSR_WRITE_2(sc
, B0_CTST
, CS_MRST_CLR
);
1151 switch (sc
->msk_bustype
) {
1153 /* Clear all PEX errors. */
1154 CSR_PCI_WRITE_4(sc
, PEX_UNC_ERR_STAT
, 0xffffffff);
1155 val
= CSR_PCI_READ_4(sc
, PEX_UNC_ERR_STAT
);
1156 if ((val
& PEX_RX_OV
) != 0) {
1157 sc
->msk_intrmask
&= ~Y2_IS_HW_ERR
;
1158 sc
->msk_intrhwemask
&= ~Y2_IS_PCI_EXP
;
1163 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
1164 val
= pci_read_config(sc
->msk_dev
, PCIR_CACHELNSZ
, 1);
1166 pci_write_config(sc
->msk_dev
, PCIR_CACHELNSZ
, 2, 1);
1167 if (sc
->msk_bustype
== MSK_PCIX_BUS
) {
1168 /* Set Cache Line Size opt. */
1169 val
= pci_read_config(sc
->msk_dev
, PCI_OUR_REG_1
, 4);
1171 pci_write_config(sc
->msk_dev
, PCI_OUR_REG_1
, val
, 4);
1175 /* Set PHY power state. */
1176 mskc_phy_power(sc
, MSK_PHY_POWERUP
);
1178 /* Reset GPHY/GMAC Control */
1179 for (i
= 0; i
< sc
->msk_num_port
; i
++) {
1180 /* GPHY Control reset. */
1181 CSR_WRITE_4(sc
, MR_ADDR(i
, GPHY_CTRL
), GPC_RST_SET
);
1182 CSR_WRITE_4(sc
, MR_ADDR(i
, GPHY_CTRL
), GPC_RST_CLR
);
1183 /* GMAC Control reset. */
1184 CSR_WRITE_4(sc
, MR_ADDR(i
, GMAC_CTRL
), GMC_RST_SET
);
1185 CSR_WRITE_4(sc
, MR_ADDR(i
, GMAC_CTRL
), GMC_RST_CLR
);
1186 CSR_WRITE_4(sc
, MR_ADDR(i
, GMAC_CTRL
), GMC_F_LOOPB_OFF
);
1187 if (sc
->msk_hw_id
== CHIP_ID_YUKON_EX
) {
1188 CSR_WRITE_4(sc
, MR_ADDR(i
, GMAC_CTRL
),
1189 GMC_BYP_MACSECRX_ON
| GMC_BYP_MACSECTX_ON
|
1193 CSR_WRITE_1(sc
, B2_TST_CTRL1
, TST_CFG_WRITE_OFF
);
1196 CSR_WRITE_2(sc
, B0_CTST
, Y2_LED_STAT_ON
);
1198 /* Clear TWSI IRQ. */
1199 CSR_WRITE_4(sc
, B2_I2C_IRQ
, I2C_CLR_IRQ
);
1201 /* Turn off hardware timer. */
1202 CSR_WRITE_1(sc
, B2_TI_CTRL
, TIM_STOP
);
1203 CSR_WRITE_1(sc
, B2_TI_CTRL
, TIM_CLR_IRQ
);
1205 /* Turn off descriptor polling. */
1206 CSR_WRITE_1(sc
, B28_DPT_CTRL
, DPT_STOP
);
1208 /* Turn off time stamps. */
1209 CSR_WRITE_1(sc
, GMAC_TI_ST_CTRL
, GMT_ST_STOP
);
1210 CSR_WRITE_1(sc
, GMAC_TI_ST_CTRL
, GMT_ST_CLR_IRQ
);
1212 /* Configure timeout values. */
1213 for (i
= 0; i
< sc
->msk_num_port
; i
++) {
1214 CSR_WRITE_2(sc
, SELECT_RAM_BUFFER(i
, B3_RI_CTRL
), RI_RST_SET
);
1215 CSR_WRITE_2(sc
, SELECT_RAM_BUFFER(i
, B3_RI_CTRL
), RI_RST_CLR
);
1216 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_WTO_R1
),
1218 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_WTO_XA1
),
1220 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_WTO_XS1
),
1222 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_RTO_R1
),
1224 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_RTO_XA1
),
1226 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_RTO_XS1
),
1228 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_WTO_R2
),
1230 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_WTO_XA2
),
1232 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_WTO_XS2
),
1234 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_RTO_R2
),
1236 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_RTO_XA2
),
1238 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_RTO_XS2
),
1242 /* Disable all interrupts. */
1243 CSR_WRITE_4(sc
, B0_HWE_IMSK
, 0);
1244 CSR_READ_4(sc
, B0_HWE_IMSK
);
1245 CSR_WRITE_4(sc
, B0_IMSK
, 0);
1246 CSR_READ_4(sc
, B0_IMSK
);
1249 * On dual port PCI-X card, there is an problem where status
1250 * can be received out of order due to split transactions.
1252 if (sc
->msk_bustype
== MSK_PCIX_BUS
&& sc
->msk_num_port
> 1) {
1256 pcix
= pci_get_pcixcap_ptr(sc
->msk_dev
);
1258 pcix_cmd
= pci_read_config(sc
->msk_dev
, pcix
+ 2, 2);
1259 /* Clear Max Outstanding Split Transactions. */
1261 CSR_WRITE_1(sc
, B2_TST_CTRL1
, TST_CFG_WRITE_ON
);
1262 pci_write_config(sc
->msk_dev
, pcix
+ 2, pcix_cmd
, 2);
1263 CSR_WRITE_1(sc
, B2_TST_CTRL1
, TST_CFG_WRITE_OFF
);
1265 if (sc
->msk_bustype
== MSK_PEX_BUS
) {
1268 v
= pci_read_config(sc
->msk_dev
, PEX_DEV_CTRL
, 2);
1269 /* Change Max. Read Request Size to 4096 bytes. */
1270 v
&= ~PEX_DC_MAX_RRS_MSK
;
1271 v
|= PEX_DC_MAX_RD_RQ_SIZE(5);
1272 pci_write_config(sc
->msk_dev
, PEX_DEV_CTRL
, v
, 2);
1273 width
= pci_read_config(sc
->msk_dev
, PEX_LNK_STAT
, 2);
1274 width
= (width
& PEX_LS_LINK_WI_MSK
) >> 4;
1275 v
= pci_read_config(sc
->msk_dev
, PEX_LNK_CAP
, 2);
1276 v
= (v
& PEX_LS_LINK_WI_MSK
) >> 4;
1278 device_printf(sc
->msk_dev
,
1279 "negotiated width of link(x%d) != "
1280 "max. width of link(x%d)\n", width
, v
);
1284 /* Clear status list. */
1285 bzero(sc
->msk_stat_ring
,
1286 sizeof(struct msk_stat_desc
) * MSK_STAT_RING_CNT
);
1287 sc
->msk_stat_cons
= 0;
1288 CSR_WRITE_4(sc
, STAT_CTRL
, SC_STAT_RST_SET
);
1289 CSR_WRITE_4(sc
, STAT_CTRL
, SC_STAT_RST_CLR
);
1290 /* Set the status list base address. */
1291 addr
= sc
->msk_stat_ring_paddr
;
1292 CSR_WRITE_4(sc
, STAT_LIST_ADDR_LO
, MSK_ADDR_LO(addr
));
1293 CSR_WRITE_4(sc
, STAT_LIST_ADDR_HI
, MSK_ADDR_HI(addr
));
1294 /* Set the status list last index. */
1295 CSR_WRITE_2(sc
, STAT_LAST_IDX
, MSK_STAT_RING_CNT
- 1);
1296 if (sc
->msk_hw_id
== CHIP_ID_YUKON_EC
&&
1297 sc
->msk_hw_rev
== CHIP_REV_YU_EC_A1
) {
1298 /* WA for dev. #4.3 */
1299 CSR_WRITE_2(sc
, STAT_TX_IDX_TH
, ST_TXTH_IDX_MASK
);
1300 /* WA for dev. #4.18 */
1301 CSR_WRITE_1(sc
, STAT_FIFO_WM
, 0x21);
1302 CSR_WRITE_1(sc
, STAT_FIFO_ISR_WM
, 0x07);
1304 CSR_WRITE_2(sc
, STAT_TX_IDX_TH
, 0x0a);
1305 CSR_WRITE_1(sc
, STAT_FIFO_WM
, 0x10);
1306 if (sc
->msk_hw_id
== CHIP_ID_YUKON_XL
&&
1307 sc
->msk_hw_rev
== CHIP_REV_YU_XL_A0
)
1308 CSR_WRITE_1(sc
, STAT_FIFO_ISR_WM
, 0x04);
1310 CSR_WRITE_1(sc
, STAT_FIFO_ISR_WM
, 0x10);
1311 CSR_WRITE_4(sc
, STAT_ISR_TIMER_INI
, 0x0190);
1314 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1316 CSR_WRITE_4(sc
, STAT_TX_TIMER_INI
, MSK_USECS(sc
, 1000));
1318 /* Enable status unit. */
1319 CSR_WRITE_4(sc
, STAT_CTRL
, SC_STAT_OP_ON
);
1321 CSR_WRITE_1(sc
, STAT_TX_TIMER_CTRL
, TIM_START
);
1322 CSR_WRITE_1(sc
, STAT_LEV_TIMER_CTRL
, TIM_START
);
1323 CSR_WRITE_1(sc
, STAT_ISR_TIMER_CTRL
, TIM_START
);
1327 msk_probe(device_t dev
)
1329 struct msk_softc
*sc
= device_get_softc(device_get_parent(dev
));
1333 * Not much to do here. We always know there will be
1334 * at least one GMAC present, and if there are two,
1335 * mskc_attach() will create a second device instance
1338 ksnprintf(desc
, sizeof(desc
),
1339 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1340 model_name
[sc
->msk_hw_id
- CHIP_ID_YUKON_XL
], sc
->msk_hw_id
,
1342 device_set_desc_copy(dev
, desc
);
1348 msk_attach(device_t dev
)
1350 struct msk_softc
*sc
= device_get_softc(device_get_parent(dev
));
1351 struct msk_if_softc
*sc_if
= device_get_softc(dev
);
1352 struct ifnet
*ifp
= &sc_if
->arpcom
.ac_if
;
1354 uint8_t eaddr
[ETHER_ADDR_LEN
];
1356 port
= *(int *)device_get_ivars(dev
);
1357 KKASSERT(port
== MSK_PORT_A
|| port
== MSK_PORT_B
);
1359 kfree(device_get_ivars(dev
), M_DEVBUF
);
1360 device_set_ivars(dev
, NULL
);
1362 callout_init(&sc_if
->msk_tick_ch
);
1363 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
1365 sc_if
->msk_if_dev
= dev
;
1366 sc_if
->msk_port
= port
;
1367 sc_if
->msk_softc
= sc
;
1368 sc_if
->msk_ifp
= ifp
;
1369 sc_if
->msk_flags
= sc
->msk_pflags
;
1370 sc
->msk_if
[port
] = sc_if
;
1372 /* Setup Tx/Rx queue register offsets. */
1373 if (port
== MSK_PORT_A
) {
1374 sc_if
->msk_txq
= Q_XA1
;
1375 sc_if
->msk_txsq
= Q_XS1
;
1376 sc_if
->msk_rxq
= Q_R1
;
1378 sc_if
->msk_txq
= Q_XA2
;
1379 sc_if
->msk_txsq
= Q_XS2
;
1380 sc_if
->msk_rxq
= Q_R2
;
1383 error
= msk_txrx_dma_alloc(sc_if
);
1387 ifp
->if_softc
= sc_if
;
1388 ifp
->if_mtu
= ETHERMTU
;
1389 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
1390 ifp
->if_init
= msk_init
;
1391 ifp
->if_ioctl
= msk_ioctl
;
1392 ifp
->if_start
= msk_start
;
1393 ifp
->if_watchdog
= msk_watchdog
;
1394 ifq_set_maxlen(&ifp
->if_snd
, MSK_TX_RING_CNT
- 1);
1395 ifq_set_ready(&ifp
->if_snd
);
1399 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1400 * has serious bug in Rx checksum offload for all Yukon II family
1401 * hardware. It seems there is a workaround to make it work somtimes.
1402 * However, the workaround also have to check OP code sequences to
1403 * verify whether the OP code is correct. Sometimes it should compute
1404 * IP/TCP/UDP checksum in driver in order to verify correctness of
1405 * checksum computed by hardware. If you have to compute checksum
1406 * with software to verify the hardware's checksum why have hardware
1407 * compute the checksum? I think there is no reason to spend time to
1408 * make Rx checksum offload work on Yukon II hardware.
1410 ifp
->if_capabilities
= IFCAP_TXCSUM
| IFCAP_VLAN_MTU
|
1411 IFCAP_VLAN_HWTAGGING
| IFCAP_VLAN_HWCSUM
;
1412 ifp
->if_hwassist
= MSK_CSUM_FEATURES
;
1413 ifp
->if_capenable
= ifp
->if_capabilities
;
1417 * Get station address for this interface. Note that
1418 * dual port cards actually come with three station
1419 * addresses: one for each port, plus an extra. The
1420 * extra one is used by the SysKonnect driver software
1421 * as a 'virtual' station address for when both ports
1422 * are operating in failover mode. Currently we don't
1423 * use this extra address.
1425 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
1426 eaddr
[i
] = CSR_READ_1(sc
, B2_MAC_1
+ (port
* 8) + i
);
1428 sc_if
->msk_framesize
= ifp
->if_mtu
+ ETHER_HDR_LEN
+ EVL_ENCAPLEN
;
1433 error
= mii_phy_probe(dev
, &sc_if
->msk_miibus
,
1434 msk_mediachange
, msk_mediastatus
);
1436 device_printf(sc_if
->msk_if_dev
, "no PHY found!\n");
1441 * Call MI attach routine. Can't hold locks when calling into ether_*.
1443 ether_ifattach(ifp
, eaddr
, &sc
->msk_serializer
);
1446 * Tell the upper layer(s) we support long frames.
1447 * Must appear after the call to ether_ifattach() because
1448 * ether_ifattach() sets ifi_hdrlen to the default value.
1450 ifp
->if_data
.ifi_hdrlen
= sizeof(struct ether_vlan_header
);
1456 sc
->msk_if
[port
] = NULL
;
1461 * Attach the interface. Allocate softc structures, do ifmedia
1462 * setup and ethernet/BPF attach.
1465 mskc_attach(device_t dev
)
1467 struct msk_softc
*sc
;
1468 int error
, *port
, cpuid
;
1470 sc
= device_get_softc(dev
);
1472 lwkt_serialize_init(&sc
->msk_serializer
);
1475 * Initailize sysctl variables
1477 sc
->msk_process_limit
= mskc_process_limit
;
1478 sc
->msk_intr_rate
= mskc_intr_rate
;
1480 #ifndef BURN_BRIDGES
1482 * Handle power management nonsense.
1484 if (pci_get_powerstate(dev
) != PCI_POWERSTATE_D0
) {
1485 uint32_t irq
, bar0
, bar1
;
1487 /* Save important PCI config data. */
1488 bar0
= pci_read_config(dev
, PCIR_BAR(0), 4);
1489 bar1
= pci_read_config(dev
, PCIR_BAR(1), 4);
1490 irq
= pci_read_config(dev
, PCIR_INTLINE
, 4);
1492 /* Reset the power state. */
1493 device_printf(dev
, "chip is in D%d power mode "
1494 "-- setting to D0\n", pci_get_powerstate(dev
));
1496 pci_set_powerstate(dev
, PCI_POWERSTATE_D0
);
1498 /* Restore PCI config data. */
1499 pci_write_config(dev
, PCIR_BAR(0), bar0
, 4);
1500 pci_write_config(dev
, PCIR_BAR(1), bar1
, 4);
1501 pci_write_config(dev
, PCIR_INTLINE
, irq
, 4);
1503 #endif /* BURN_BRIDGES */
1506 * Map control/status registers.
1508 pci_enable_busmaster(dev
);
1511 * Allocate I/O resource
1513 #ifdef MSK_USEIOSPACE
1514 sc
->msk_res_type
= SYS_RES_IOPORT
;
1515 sc
->msk_res_rid
= PCIR_BAR(1);
1517 sc
->msk_res_type
= SYS_RES_MEMORY
;
1518 sc
->msk_res_rid
= PCIR_BAR(0);
1520 sc
->msk_res
= bus_alloc_resource_any(dev
, sc
->msk_res_type
,
1521 &sc
->msk_res_rid
, RF_ACTIVE
);
1522 if (sc
->msk_res
== NULL
) {
1523 if (sc
->msk_res_type
== SYS_RES_MEMORY
) {
1524 sc
->msk_res_type
= SYS_RES_IOPORT
;
1525 sc
->msk_res_rid
= PCIR_BAR(1);
1527 sc
->msk_res_type
= SYS_RES_MEMORY
;
1528 sc
->msk_res_rid
= PCIR_BAR(0);
1530 sc
->msk_res
= bus_alloc_resource_any(dev
, sc
->msk_res_type
,
1533 if (sc
->msk_res
== NULL
) {
1534 device_printf(dev
, "couldn't allocate %s resources\n",
1535 sc
->msk_res_type
== SYS_RES_MEMORY
? "memory" : "I/O");
1539 sc
->msk_res_bt
= rman_get_bustag(sc
->msk_res
);
1540 sc
->msk_res_bh
= rman_get_bushandle(sc
->msk_res
);
1545 sc
->msk_irq_rid
= 0;
1546 sc
->msk_irq
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
,
1548 RF_SHAREABLE
| RF_ACTIVE
);
1549 if (sc
->msk_irq
== NULL
) {
1550 device_printf(dev
, "couldn't allocate IRQ resources\n");
1555 CSR_WRITE_2(sc
, B0_CTST
, CS_RST_CLR
);
1556 sc
->msk_hw_id
= CSR_READ_1(sc
, B2_CHIP_ID
);
1557 sc
->msk_hw_rev
= (CSR_READ_1(sc
, B2_MAC_CFG
) >> 4) & 0x0f;
1558 /* Bail out if chip is not recognized. */
1559 if (sc
->msk_hw_id
< CHIP_ID_YUKON_XL
||
1560 sc
->msk_hw_id
> CHIP_ID_YUKON_FE_P
) {
1561 device_printf(dev
, "unknown device: id=0x%02x, rev=0x%02x\n",
1562 sc
->msk_hw_id
, sc
->msk_hw_rev
);
1568 * Create sysctl tree
1570 sysctl_ctx_init(&sc
->msk_sysctl_ctx
);
1571 sc
->msk_sysctl_tree
= SYSCTL_ADD_NODE(&sc
->msk_sysctl_ctx
,
1572 SYSCTL_STATIC_CHILDREN(_hw
),
1574 device_get_nameunit(dev
),
1576 if (sc
->msk_sysctl_tree
== NULL
) {
1577 device_printf(dev
, "can't add sysctl node\n");
1582 SYSCTL_ADD_PROC(&sc
->msk_sysctl_ctx
,
1583 SYSCTL_CHILDREN(sc
->msk_sysctl_tree
),
1584 OID_AUTO
, "process_limit", CTLTYPE_INT
| CTLFLAG_RW
,
1585 &sc
->msk_process_limit
, 0, mskc_sysctl_proc_limit
,
1586 "I", "max number of Rx events to process");
1587 SYSCTL_ADD_PROC(&sc
->msk_sysctl_ctx
,
1588 SYSCTL_CHILDREN(sc
->msk_sysctl_tree
),
1589 OID_AUTO
, "intr_rate", CTLTYPE_INT
| CTLFLAG_RW
,
1590 sc
, 0, mskc_sysctl_intr_rate
,
1591 "I", "max number of interrupt per second");
1592 SYSCTL_ADD_INT(&sc
->msk_sysctl_ctx
,
1593 SYSCTL_CHILDREN(sc
->msk_sysctl_tree
), OID_AUTO
,
1594 "defrag_avoided", CTLFLAG_RW
, &sc
->msk_defrag_avoided
,
1595 0, "# of avoided m_defrag on TX path");
1596 SYSCTL_ADD_INT(&sc
->msk_sysctl_ctx
,
1597 SYSCTL_CHILDREN(sc
->msk_sysctl_tree
), OID_AUTO
,
1598 "leading_copied", CTLFLAG_RW
, &sc
->msk_leading_copied
,
1599 0, "# of leading copies on TX path");
1600 SYSCTL_ADD_INT(&sc
->msk_sysctl_ctx
,
1601 SYSCTL_CHILDREN(sc
->msk_sysctl_tree
), OID_AUTO
,
1602 "trailing_copied", CTLFLAG_RW
, &sc
->msk_trailing_copied
,
1603 0, "# of trailing copies on TX path");
1606 CSR_WRITE_2(sc
, B0_CTST
, CS_RST_SET
);
1607 CSR_WRITE_2(sc
, B0_CTST
, CS_RST_CLR
);
1608 sc
->msk_pmd
= CSR_READ_1(sc
, B2_PMD_TYP
);
1609 if (sc
->msk_pmd
== 'L' || sc
->msk_pmd
== 'S')
1610 sc
->msk_coppertype
= 0;
1612 sc
->msk_coppertype
= 1;
1613 /* Check number of MACs. */
1614 sc
->msk_num_port
= 1;
1615 if ((CSR_READ_1(sc
, B2_Y2_HW_RES
) & CFG_DUAL_MAC_MSK
) ==
1617 if (!(CSR_READ_1(sc
, B2_Y2_CLK_GATE
) & Y2_STATUS_LNK2_INAC
))
1621 /* Check bus type. */
1622 if (pci_is_pcie(sc
->msk_dev
) == 0)
1623 sc
->msk_bustype
= MSK_PEX_BUS
;
1624 else if (pci_is_pcix(sc
->msk_dev
) == 0)
1625 sc
->msk_bustype
= MSK_PCIX_BUS
;
1627 sc
->msk_bustype
= MSK_PCI_BUS
;
1629 switch (sc
->msk_hw_id
) {
1630 case CHIP_ID_YUKON_EC
:
1631 case CHIP_ID_YUKON_EC_U
:
1632 sc
->msk_clock
= 125; /* 125 Mhz */
1634 case CHIP_ID_YUKON_EX
:
1635 sc
->msk_clock
= 125; /* 125 Mhz */
1637 case CHIP_ID_YUKON_FE
:
1638 sc
->msk_clock
= 100; /* 100 Mhz */
1639 sc
->msk_pflags
|= MSK_FLAG_FASTETHER
;
1641 case CHIP_ID_YUKON_FE_P
:
1642 sc
->msk_clock
= 50; /* 50 Mhz */
1644 sc
->msk_pflags
|= MSK_FLAG_FASTETHER
;
1645 if (sc
->msk_hw_rev
== CHIP_REV_YU_FE_P_A0
) {
1648 * FE+ A0 has status LE writeback bug so msk(4)
1649 * does not rely on status word of received frame
1650 * in msk_rxeof() which in turn disables all
1651 * hardware assistance bits reported by the status
1652 * word as well as validity of the recevied frame.
1653 * Just pass received frames to upper stack with
1654 * minimal test and let upper stack handle them.
1656 sc
->msk_pflags
|= MSK_FLAG_NORXCHK
;
1659 case CHIP_ID_YUKON_XL
:
1660 sc
->msk_clock
= 156; /* 156 Mhz */
1663 sc
->msk_clock
= 156; /* 156 Mhz */
1667 error
= mskc_status_dma_alloc(sc
);
1671 /* Set base interrupt mask. */
1672 sc
->msk_intrmask
= Y2_IS_HW_ERR
| Y2_IS_STAT_BMU
;
1673 sc
->msk_intrhwemask
= Y2_IS_TIST_OV
| Y2_IS_MST_ERR
|
1674 Y2_IS_IRQ_STAT
| Y2_IS_PCI_EXP
| Y2_IS_PCI_NEXP
;
1676 /* Reset the adapter. */
1679 error
= mskc_setup_rambuffer(sc
);
1683 sc
->msk_devs
[MSK_PORT_A
] = device_add_child(dev
, "msk", -1);
1684 if (sc
->msk_devs
[MSK_PORT_A
] == NULL
) {
1685 device_printf(dev
, "failed to add child for PORT_A\n");
1689 port
= kmalloc(sizeof(*port
), M_DEVBUF
, M_WAITOK
);
1691 device_set_ivars(sc
->msk_devs
[MSK_PORT_A
], port
);
1693 if (sc
->msk_num_port
> 1) {
1694 sc
->msk_devs
[MSK_PORT_B
] = device_add_child(dev
, "msk", -1);
1695 if (sc
->msk_devs
[MSK_PORT_B
] == NULL
) {
1696 device_printf(dev
, "failed to add child for PORT_B\n");
1700 port
= kmalloc(sizeof(*port
), M_DEVBUF
, M_WAITOK
);
1702 device_set_ivars(sc
->msk_devs
[MSK_PORT_B
], port
);
1705 bus_generic_attach(dev
);
1707 error
= bus_setup_intr(dev
, sc
->msk_irq
, INTR_MPSAFE
,
1708 mskc_intr
, sc
, &sc
->msk_intrhand
,
1709 &sc
->msk_serializer
);
1711 device_printf(dev
, "couldn't set up interrupt handler\n");
1715 cpuid
= ithread_cpuid(rman_get_start(sc
->msk_irq
));
1716 KKASSERT(cpuid
>= 0 && cpuid
< ncpus
);
1718 if (sc
->msk_if
[0] != NULL
)
1719 sc
->msk_if
[0]->msk_ifp
->if_cpuid
= cpuid
;
1720 if (sc
->msk_if
[1] != NULL
)
1721 sc
->msk_if
[1]->msk_ifp
->if_cpuid
= cpuid
;
1729 * Shutdown hardware and free up resources. This can be called any
1730 * time after the mutex has been initialized. It is called in both
1731 * the error case in attach and the normal detach case so it needs
1732 * to be careful about only freeing resources that have actually been
1736 msk_detach(device_t dev
)
1738 struct msk_if_softc
*sc_if
= device_get_softc(dev
);
1740 if (device_is_attached(dev
)) {
1741 struct msk_softc
*sc
= sc_if
->msk_softc
;
1742 struct ifnet
*ifp
= &sc_if
->arpcom
.ac_if
;
1744 lwkt_serialize_enter(ifp
->if_serializer
);
1746 if (sc
->msk_intrhand
!= NULL
) {
1747 if (sc
->msk_if
[MSK_PORT_A
] != NULL
)
1748 msk_stop(sc
->msk_if
[MSK_PORT_A
]);
1749 if (sc
->msk_if
[MSK_PORT_B
] != NULL
)
1750 msk_stop(sc
->msk_if
[MSK_PORT_B
]);
1752 bus_teardown_intr(sc
->msk_dev
, sc
->msk_irq
,
1754 sc
->msk_intrhand
= NULL
;
1757 lwkt_serialize_exit(ifp
->if_serializer
);
1759 ether_ifdetach(ifp
);
1762 if (sc_if
->msk_miibus
!= NULL
)
1763 device_delete_child(dev
, sc_if
->msk_miibus
);
1765 msk_txrx_dma_free(sc_if
);
1770 mskc_detach(device_t dev
)
1772 struct msk_softc
*sc
= device_get_softc(dev
);
1776 if (device_is_attached(dev
)) {
1777 KASSERT(sc
->msk_intrhand
== NULL
,
1778 ("intr is not torn down yet\n"));
1782 for (i
= 0; i
< sc
->msk_num_port
; ++i
) {
1783 if (sc
->msk_devs
[i
] != NULL
) {
1784 port
= device_get_ivars(sc
->msk_devs
[i
]);
1786 kfree(port
, M_DEVBUF
);
1787 device_set_ivars(sc
->msk_devs
[i
], NULL
);
1789 device_delete_child(dev
, sc
->msk_devs
[i
]);
1793 /* Disable all interrupts. */
1794 CSR_WRITE_4(sc
, B0_IMSK
, 0);
1795 CSR_READ_4(sc
, B0_IMSK
);
1796 CSR_WRITE_4(sc
, B0_HWE_IMSK
, 0);
1797 CSR_READ_4(sc
, B0_HWE_IMSK
);
1800 CSR_WRITE_2(sc
, B0_CTST
, Y2_LED_STAT_OFF
);
1802 /* Put hardware reset. */
1803 CSR_WRITE_2(sc
, B0_CTST
, CS_RST_SET
);
1805 mskc_status_dma_free(sc
);
1807 if (sc
->msk_irq
!= NULL
) {
1808 bus_release_resource(dev
, SYS_RES_IRQ
, sc
->msk_irq_rid
,
1811 if (sc
->msk_res
!= NULL
) {
1812 bus_release_resource(dev
, sc
->msk_res_type
, sc
->msk_res_rid
,
1816 if (sc
->msk_sysctl_tree
!= NULL
)
1817 sysctl_ctx_free(&sc
->msk_sysctl_ctx
);
1822 /* Create status DMA region. */
1824 mskc_status_dma_alloc(struct msk_softc
*sc
)
1829 error
= bus_dmamem_coherent(NULL
/* XXX parent */, MSK_STAT_ALIGN
, 0,
1830 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
1831 MSK_STAT_RING_SZ
, BUS_DMA_WAITOK
| BUS_DMA_ZERO
, &dmem
);
1833 device_printf(sc
->msk_dev
,
1834 "failed to create status coherent DMA memory\n");
1837 sc
->msk_stat_tag
= dmem
.dmem_tag
;
1838 sc
->msk_stat_map
= dmem
.dmem_map
;
1839 sc
->msk_stat_ring
= dmem
.dmem_addr
;
1840 sc
->msk_stat_ring_paddr
= dmem
.dmem_busaddr
;
1846 mskc_status_dma_free(struct msk_softc
*sc
)
1848 /* Destroy status block. */
1849 if (sc
->msk_stat_tag
) {
1850 bus_dmamap_unload(sc
->msk_stat_tag
, sc
->msk_stat_map
);
1851 bus_dmamem_free(sc
->msk_stat_tag
, sc
->msk_stat_ring
,
1853 bus_dma_tag_destroy(sc
->msk_stat_tag
);
1854 sc
->msk_stat_tag
= NULL
;
1859 msk_txrx_dma_alloc(struct msk_if_softc
*sc_if
)
1863 struct msk_rxdesc
*jrxd
;
1864 struct msk_jpool_entry
*entry
;
1869 /* Create parent DMA tag. */
1872 * It seems that Yukon II supports full 64bits DMA operations. But
1873 * it needs two descriptors(list elements) for 64bits DMA operations.
1874 * Since we don't know what DMA address mappings(32bits or 64bits)
1875 * would be used in advance for each mbufs, we limits its DMA space
1876 * to be in range of 32bits address space. Otherwise, we should check
1877 * what DMA address is used and chain another descriptor for the
1878 * 64bits DMA operation. This also means descriptor ring size is
1879 * variable. Limiting DMA address to be in 32bit address space greatly
1880 * simplyfies descriptor handling and possibly would increase
1881 * performance a bit due to efficient handling of descriptors.
1882 * Apart from harassing checksum offloading mechanisms, it seems
1883 * it's really bad idea to use a seperate descriptor for 64bit
1884 * DMA operation to save small descriptor memory. Anyway, I've
1885 * never seen these exotic scheme on ethernet interface hardware.
1887 error
= bus_dma_tag_create(
1889 1, 0, /* alignment, boundary */
1890 BUS_SPACE_MAXADDR_32BIT
, /* lowaddr */
1891 BUS_SPACE_MAXADDR
, /* highaddr */
1892 NULL
, NULL
, /* filter, filterarg */
1893 BUS_SPACE_MAXSIZE_32BIT
, /* maxsize */
1895 BUS_SPACE_MAXSIZE_32BIT
, /* maxsegsize */
1897 &sc_if
->msk_cdata
.msk_parent_tag
);
1899 device_printf(sc_if
->msk_if_dev
,
1900 "failed to create parent DMA tag\n");
1904 /* Create DMA stuffs for Tx ring. */
1905 error
= msk_dmamem_create(sc_if
->msk_if_dev
, MSK_TX_RING_SZ
,
1906 &sc_if
->msk_cdata
.msk_tx_ring_tag
,
1907 (void *)&sc_if
->msk_rdata
.msk_tx_ring
,
1908 &sc_if
->msk_rdata
.msk_tx_ring_paddr
,
1909 &sc_if
->msk_cdata
.msk_tx_ring_map
);
1911 device_printf(sc_if
->msk_if_dev
,
1912 "failed to create TX ring DMA stuffs\n");
1916 /* Create DMA stuffs for Rx ring. */
1917 error
= msk_dmamem_create(sc_if
->msk_if_dev
, MSK_RX_RING_SZ
,
1918 &sc_if
->msk_cdata
.msk_rx_ring_tag
,
1919 (void *)&sc_if
->msk_rdata
.msk_rx_ring
,
1920 &sc_if
->msk_rdata
.msk_rx_ring_paddr
,
1921 &sc_if
->msk_cdata
.msk_rx_ring_map
);
1923 device_printf(sc_if
->msk_if_dev
,
1924 "failed to create RX ring DMA stuffs\n");
1928 /* Create tag for Tx buffers. */
1929 error
= bus_dma_tag_create(sc_if
->msk_cdata
.msk_parent_tag
,/* parent */
1930 1, 0, /* alignment, boundary */
1931 BUS_SPACE_MAXADDR
, /* lowaddr */
1932 BUS_SPACE_MAXADDR
, /* highaddr */
1933 NULL
, NULL
, /* filter, filterarg */
1934 MSK_JUMBO_FRAMELEN
, /* maxsize */
1935 MSK_MAXTXSEGS
, /* nsegments */
1936 MSK_MAXSGSIZE
, /* maxsegsize */
1937 BUS_DMA_ALLOCNOW
| BUS_DMA_WAITOK
|
1938 BUS_DMA_ONEBPAGE
, /* flags */
1939 &sc_if
->msk_cdata
.msk_tx_tag
);
1941 device_printf(sc_if
->msk_if_dev
,
1942 "failed to create Tx DMA tag\n");
1946 /* Create DMA maps for Tx buffers. */
1947 for (i
= 0; i
< MSK_TX_RING_CNT
; i
++) {
1948 struct msk_txdesc
*txd
= &sc_if
->msk_cdata
.msk_txdesc
[i
];
1950 error
= bus_dmamap_create(sc_if
->msk_cdata
.msk_tx_tag
,
1951 BUS_DMA_WAITOK
| BUS_DMA_ONEBPAGE
,
1954 device_printf(sc_if
->msk_if_dev
,
1955 "failed to create %dth Tx dmamap\n", i
);
1957 for (j
= 0; j
< i
; ++j
) {
1958 txd
= &sc_if
->msk_cdata
.msk_txdesc
[j
];
1959 bus_dmamap_destroy(sc_if
->msk_cdata
.msk_tx_tag
,
1962 bus_dma_tag_destroy(sc_if
->msk_cdata
.msk_tx_tag
);
1963 sc_if
->msk_cdata
.msk_tx_tag
= NULL
;
1970 * Workaround hardware hang which seems to happen when Rx buffer
1971 * is not aligned on multiple of FIFO word(8 bytes).
1973 if (sc_if
->msk_flags
& MSK_FLAG_RAMBUF
)
1974 rxalign
= MSK_RX_BUF_ALIGN
;
1978 /* Create tag for Rx buffers. */
1979 error
= bus_dma_tag_create(sc_if
->msk_cdata
.msk_parent_tag
,/* parent */
1980 rxalign
, 0, /* alignment, boundary */
1981 BUS_SPACE_MAXADDR
, /* lowaddr */
1982 BUS_SPACE_MAXADDR
, /* highaddr */
1983 NULL
, NULL
, /* filter, filterarg */
1984 MCLBYTES
, /* maxsize */
1986 MCLBYTES
, /* maxsegsize */
1987 BUS_DMA_ALLOCNOW
| BUS_DMA_ALIGNED
|
1988 BUS_DMA_WAITOK
, /* flags */
1989 &sc_if
->msk_cdata
.msk_rx_tag
);
1991 device_printf(sc_if
->msk_if_dev
,
1992 "failed to create Rx DMA tag\n");
1996 /* Create DMA maps for Rx buffers. */
1997 error
= bus_dmamap_create(sc_if
->msk_cdata
.msk_rx_tag
, BUS_DMA_WAITOK
,
1998 &sc_if
->msk_cdata
.msk_rx_sparemap
);
2000 device_printf(sc_if
->msk_if_dev
,
2001 "failed to create spare Rx dmamap\n");
2002 bus_dma_tag_destroy(sc_if
->msk_cdata
.msk_rx_tag
);
2003 sc_if
->msk_cdata
.msk_rx_tag
= NULL
;
2006 for (i
= 0; i
< MSK_RX_RING_CNT
; i
++) {
2007 struct msk_rxdesc
*rxd
= &sc_if
->msk_cdata
.msk_rxdesc
[i
];
2009 error
= bus_dmamap_create(sc_if
->msk_cdata
.msk_rx_tag
,
2010 BUS_DMA_WAITOK
, &rxd
->rx_dmamap
);
2012 device_printf(sc_if
->msk_if_dev
,
2013 "failed to create %dth Rx dmamap\n", i
);
2015 for (j
= 0; j
< i
; ++j
) {
2016 rxd
= &sc_if
->msk_cdata
.msk_rxdesc
[j
];
2017 bus_dmamap_destroy(sc_if
->msk_cdata
.msk_rx_tag
,
2020 bus_dmamap_destroy(sc_if
->msk_cdata
.msk_rx_tag
,
2021 sc_if
->msk_cdata
.msk_rx_sparemap
);
2022 bus_dma_tag_destroy(sc_if
->msk_cdata
.msk_rx_tag
);
2023 sc_if
->msk_cdata
.msk_rx_tag
= NULL
;
2030 SLIST_INIT(&sc_if
->msk_jfree_listhead
);
2031 SLIST_INIT(&sc_if
->msk_jinuse_listhead
);
2033 /* Create tag for jumbo Rx ring. */
2034 error
= bus_dma_tag_create(sc_if
->msk_cdata
.msk_parent_tag
,/* parent */
2035 MSK_RING_ALIGN
, 0, /* alignment, boundary */
2036 BUS_SPACE_MAXADDR
, /* lowaddr */
2037 BUS_SPACE_MAXADDR
, /* highaddr */
2038 NULL
, NULL
, /* filter, filterarg */
2039 MSK_JUMBO_RX_RING_SZ
, /* maxsize */
2041 MSK_JUMBO_RX_RING_SZ
, /* maxsegsize */
2043 NULL
, NULL
, /* lockfunc, lockarg */
2044 &sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
);
2046 device_printf(sc_if
->msk_if_dev
,
2047 "failed to create jumbo Rx ring DMA tag\n");
2051 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2052 error
= bus_dmamem_alloc(sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
,
2053 (void **)&sc_if
->msk_rdata
.msk_jumbo_rx_ring
,
2054 BUS_DMA_WAITOK
| BUS_DMA_COHERENT
| BUS_DMA_ZERO
,
2055 &sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
);
2057 device_printf(sc_if
->msk_if_dev
,
2058 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2062 ctx
.msk_busaddr
= 0;
2063 error
= bus_dmamap_load(sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
,
2064 sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
,
2065 sc_if
->msk_rdata
.msk_jumbo_rx_ring
, MSK_JUMBO_RX_RING_SZ
,
2066 msk_dmamap_cb
, &ctx
, 0);
2068 device_printf(sc_if
->msk_if_dev
,
2069 "failed to load DMA'able memory for jumbo Rx ring\n");
2072 sc_if
->msk_rdata
.msk_jumbo_rx_ring_paddr
= ctx
.msk_busaddr
;
2074 /* Create tag for jumbo buffer blocks. */
2075 error
= bus_dma_tag_create(sc_if
->msk_cdata
.msk_parent_tag
,/* parent */
2076 PAGE_SIZE
, 0, /* alignment, boundary */
2077 BUS_SPACE_MAXADDR
, /* lowaddr */
2078 BUS_SPACE_MAXADDR
, /* highaddr */
2079 NULL
, NULL
, /* filter, filterarg */
2080 MSK_JMEM
, /* maxsize */
2082 MSK_JMEM
, /* maxsegsize */
2084 NULL
, NULL
, /* lockfunc, lockarg */
2085 &sc_if
->msk_cdata
.msk_jumbo_tag
);
2087 device_printf(sc_if
->msk_if_dev
,
2088 "failed to create jumbo Rx buffer block DMA tag\n");
2092 /* Create tag for jumbo Rx buffers. */
2093 error
= bus_dma_tag_create(sc_if
->msk_cdata
.msk_parent_tag
,/* parent */
2094 PAGE_SIZE
, 0, /* alignment, boundary */
2095 BUS_SPACE_MAXADDR
, /* lowaddr */
2096 BUS_SPACE_MAXADDR
, /* highaddr */
2097 NULL
, NULL
, /* filter, filterarg */
2098 MCLBYTES
* MSK_MAXRXSEGS
, /* maxsize */
2099 MSK_MAXRXSEGS
, /* nsegments */
2100 MSK_JLEN
, /* maxsegsize */
2102 NULL
, NULL
, /* lockfunc, lockarg */
2103 &sc_if
->msk_cdata
.msk_jumbo_rx_tag
);
2105 device_printf(sc_if
->msk_if_dev
,
2106 "failed to create jumbo Rx DMA tag\n");
2110 /* Create DMA maps for jumbo Rx buffers. */
2111 if ((error
= bus_dmamap_create(sc_if
->msk_cdata
.msk_jumbo_rx_tag
, 0,
2112 &sc_if
->msk_cdata
.msk_jumbo_rx_sparemap
)) != 0) {
2113 device_printf(sc_if
->msk_if_dev
,
2114 "failed to create spare jumbo Rx dmamap\n");
2117 for (i
= 0; i
< MSK_JUMBO_RX_RING_CNT
; i
++) {
2118 jrxd
= &sc_if
->msk_cdata
.msk_jumbo_rxdesc
[i
];
2120 jrxd
->rx_dmamap
= NULL
;
2121 error
= bus_dmamap_create(sc_if
->msk_cdata
.msk_jumbo_rx_tag
, 0,
2124 device_printf(sc_if
->msk_if_dev
,
2125 "failed to create jumbo Rx dmamap\n");
2130 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */
2131 error
= bus_dmamem_alloc(sc_if
->msk_cdata
.msk_jumbo_tag
,
2132 (void **)&sc_if
->msk_rdata
.msk_jumbo_buf
,
2133 BUS_DMA_WAITOK
| BUS_DMA_COHERENT
| BUS_DMA_ZERO
,
2134 &sc_if
->msk_cdata
.msk_jumbo_map
);
2136 device_printf(sc_if
->msk_if_dev
,
2137 "failed to allocate DMA'able memory for jumbo buf\n");
2141 ctx
.msk_busaddr
= 0;
2142 error
= bus_dmamap_load(sc_if
->msk_cdata
.msk_jumbo_tag
,
2143 sc_if
->msk_cdata
.msk_jumbo_map
, sc_if
->msk_rdata
.msk_jumbo_buf
,
2144 MSK_JMEM
, msk_dmamap_cb
, &ctx
, 0);
2146 device_printf(sc_if
->msk_if_dev
,
2147 "failed to load DMA'able memory for jumbobuf\n");
2150 sc_if
->msk_rdata
.msk_jumbo_buf_paddr
= ctx
.msk_busaddr
;
2153 * Now divide it up into 9K pieces and save the addresses
2156 ptr
= sc_if
->msk_rdata
.msk_jumbo_buf
;
2157 for (i
= 0; i
< MSK_JSLOTS
; i
++) {
2158 sc_if
->msk_cdata
.msk_jslots
[i
] = ptr
;
2160 entry
= malloc(sizeof(struct msk_jpool_entry
),
2161 M_DEVBUF
, M_WAITOK
);
2162 if (entry
== NULL
) {
2163 device_printf(sc_if
->msk_if_dev
,
2164 "no memory for jumbo buffers!\n");
2169 SLIST_INSERT_HEAD(&sc_if
->msk_jfree_listhead
, entry
,
2177 msk_txrx_dma_free(struct msk_if_softc
*sc_if
)
2179 struct msk_txdesc
*txd
;
2180 struct msk_rxdesc
*rxd
;
2182 struct msk_rxdesc
*jrxd
;
2183 struct msk_jpool_entry
*entry
;
2188 MSK_JLIST_LOCK(sc_if
);
2189 while ((entry
= SLIST_FIRST(&sc_if
->msk_jinuse_listhead
))) {
2190 device_printf(sc_if
->msk_if_dev
,
2191 "asked to free buffer that is in use!\n");
2192 SLIST_REMOVE_HEAD(&sc_if
->msk_jinuse_listhead
, jpool_entries
);
2193 SLIST_INSERT_HEAD(&sc_if
->msk_jfree_listhead
, entry
,
2197 while (!SLIST_EMPTY(&sc_if
->msk_jfree_listhead
)) {
2198 entry
= SLIST_FIRST(&sc_if
->msk_jfree_listhead
);
2199 SLIST_REMOVE_HEAD(&sc_if
->msk_jfree_listhead
, jpool_entries
);
2200 free(entry
, M_DEVBUF
);
2202 MSK_JLIST_UNLOCK(sc_if
);
2204 /* Destroy jumbo buffer block. */
2205 if (sc_if
->msk_cdata
.msk_jumbo_map
)
2206 bus_dmamap_unload(sc_if
->msk_cdata
.msk_jumbo_tag
,
2207 sc_if
->msk_cdata
.msk_jumbo_map
);
2209 if (sc_if
->msk_rdata
.msk_jumbo_buf
) {
2210 bus_dmamem_free(sc_if
->msk_cdata
.msk_jumbo_tag
,
2211 sc_if
->msk_rdata
.msk_jumbo_buf
,
2212 sc_if
->msk_cdata
.msk_jumbo_map
);
2213 sc_if
->msk_rdata
.msk_jumbo_buf
= NULL
;
2214 sc_if
->msk_cdata
.msk_jumbo_map
= NULL
;
2217 /* Jumbo Rx ring. */
2218 if (sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
) {
2219 if (sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
)
2220 bus_dmamap_unload(sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
,
2221 sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
);
2222 if (sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
&&
2223 sc_if
->msk_rdata
.msk_jumbo_rx_ring
)
2224 bus_dmamem_free(sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
,
2225 sc_if
->msk_rdata
.msk_jumbo_rx_ring
,
2226 sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
);
2227 sc_if
->msk_rdata
.msk_jumbo_rx_ring
= NULL
;
2228 sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
= NULL
;
2229 bus_dma_tag_destroy(sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
);
2230 sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
= NULL
;
2233 /* Jumbo Rx buffers. */
2234 if (sc_if
->msk_cdata
.msk_jumbo_rx_tag
) {
2235 for (i
= 0; i
< MSK_JUMBO_RX_RING_CNT
; i
++) {
2236 jrxd
= &sc_if
->msk_cdata
.msk_jumbo_rxdesc
[i
];
2237 if (jrxd
->rx_dmamap
) {
2239 sc_if
->msk_cdata
.msk_jumbo_rx_tag
,
2241 jrxd
->rx_dmamap
= NULL
;
2244 if (sc_if
->msk_cdata
.msk_jumbo_rx_sparemap
) {
2245 bus_dmamap_destroy(sc_if
->msk_cdata
.msk_jumbo_rx_tag
,
2246 sc_if
->msk_cdata
.msk_jumbo_rx_sparemap
);
2247 sc_if
->msk_cdata
.msk_jumbo_rx_sparemap
= 0;
2249 bus_dma_tag_destroy(sc_if
->msk_cdata
.msk_jumbo_rx_tag
);
2250 sc_if
->msk_cdata
.msk_jumbo_rx_tag
= NULL
;
2255 msk_dmamem_destroy(sc_if
->msk_cdata
.msk_tx_ring_tag
,
2256 sc_if
->msk_rdata
.msk_tx_ring
,
2257 sc_if
->msk_cdata
.msk_tx_ring_map
);
2260 msk_dmamem_destroy(sc_if
->msk_cdata
.msk_rx_ring_tag
,
2261 sc_if
->msk_rdata
.msk_rx_ring
,
2262 sc_if
->msk_cdata
.msk_rx_ring_map
);
2265 if (sc_if
->msk_cdata
.msk_tx_tag
) {
2266 for (i
= 0; i
< MSK_TX_RING_CNT
; i
++) {
2267 txd
= &sc_if
->msk_cdata
.msk_txdesc
[i
];
2268 bus_dmamap_destroy(sc_if
->msk_cdata
.msk_tx_tag
,
2271 bus_dma_tag_destroy(sc_if
->msk_cdata
.msk_tx_tag
);
2272 sc_if
->msk_cdata
.msk_tx_tag
= NULL
;
2276 if (sc_if
->msk_cdata
.msk_rx_tag
) {
2277 for (i
= 0; i
< MSK_RX_RING_CNT
; i
++) {
2278 rxd
= &sc_if
->msk_cdata
.msk_rxdesc
[i
];
2279 bus_dmamap_destroy(sc_if
->msk_cdata
.msk_rx_tag
,
2282 bus_dmamap_destroy(sc_if
->msk_cdata
.msk_rx_tag
,
2283 sc_if
->msk_cdata
.msk_rx_sparemap
);
2284 bus_dma_tag_destroy(sc_if
->msk_cdata
.msk_rx_tag
);
2285 sc_if
->msk_cdata
.msk_rx_tag
= NULL
;
2288 if (sc_if
->msk_cdata
.msk_parent_tag
) {
2289 bus_dma_tag_destroy(sc_if
->msk_cdata
.msk_parent_tag
);
2290 sc_if
->msk_cdata
.msk_parent_tag
= NULL
;
2296 * Allocate a jumbo buffer.
2299 msk_jalloc(struct msk_if_softc
*sc_if
)
2301 struct msk_jpool_entry
*entry
;
2303 MSK_JLIST_LOCK(sc_if
);
2305 entry
= SLIST_FIRST(&sc_if
->msk_jfree_listhead
);
2307 if (entry
== NULL
) {
2308 MSK_JLIST_UNLOCK(sc_if
);
2312 SLIST_REMOVE_HEAD(&sc_if
->msk_jfree_listhead
, jpool_entries
);
2313 SLIST_INSERT_HEAD(&sc_if
->msk_jinuse_listhead
, entry
, jpool_entries
);
2315 MSK_JLIST_UNLOCK(sc_if
);
2317 return (sc_if
->msk_cdata
.msk_jslots
[entry
->slot
]);
2321 * Release a jumbo buffer.
2324 msk_jfree(void *buf
, void *args
)
2326 struct msk_if_softc
*sc_if
;
2327 struct msk_jpool_entry
*entry
;
2330 /* Extract the softc struct pointer. */
2331 sc_if
= (struct msk_if_softc
*)args
;
2332 KASSERT(sc_if
!= NULL
, ("%s: can't find softc pointer!", __func__
));
2334 MSK_JLIST_LOCK(sc_if
);
2335 /* Calculate the slot this buffer belongs to. */
2336 i
= ((vm_offset_t
)buf
2337 - (vm_offset_t
)sc_if
->msk_rdata
.msk_jumbo_buf
) / MSK_JLEN
;
2338 KASSERT(i
>= 0 && i
< MSK_JSLOTS
,
2339 ("%s: asked to free buffer that we don't manage!", __func__
));
2341 entry
= SLIST_FIRST(&sc_if
->msk_jinuse_listhead
);
2342 KASSERT(entry
!= NULL
, ("%s: buffer not in use!", __func__
));
2344 SLIST_REMOVE_HEAD(&sc_if
->msk_jinuse_listhead
, jpool_entries
);
2345 SLIST_INSERT_HEAD(&sc_if
->msk_jfree_listhead
, entry
, jpool_entries
);
2346 if (SLIST_EMPTY(&sc_if
->msk_jinuse_listhead
))
2349 MSK_JLIST_UNLOCK(sc_if
);
2354 msk_encap(struct msk_if_softc
*sc_if
, struct mbuf
**m_head
)
2356 struct msk_txdesc
*txd
, *txd_last
;
2357 struct msk_tx_desc
*tx_le
;
2360 bus_dma_segment_t txsegs
[MSK_MAXTXSEGS
];
2361 uint32_t control
, prod
, si
;
2362 uint16_t offset
, tcp_offset
;
2363 int error
, i
, nsegs
, maxsegs
, defrag
;
2365 maxsegs
= MSK_TX_RING_CNT
- sc_if
->msk_cdata
.msk_tx_cnt
-
2366 MSK_RESERVED_TX_DESC_CNT
;
2367 KASSERT(maxsegs
>= MSK_SPARE_TX_DESC_CNT
,
2368 ("not enough spare TX desc\n"));
2369 if (maxsegs
> MSK_MAXTXSEGS
)
2370 maxsegs
= MSK_MAXTXSEGS
;
2373 * Align TX buffer to 64bytes boundary. This greately improves
2374 * bulk data TX performance on my 88E8053 (+100Mbps) at least.
2375 * Try avoiding m_defrag(), if the mbufs are not chained together
2376 * by m_next (i.e. m->m_len == m->m_pkthdr.len).
2379 #define MSK_TXBUF_ALIGN 64
2380 #define MSK_TXBUF_MASK (MSK_TXBUF_ALIGN - 1)
2384 if (m
->m_len
== m
->m_pkthdr
.len
) {
2387 space
= ((uintptr_t)m
->m_data
& MSK_TXBUF_MASK
);
2389 if (M_WRITABLE(m
)) {
2390 if (M_TRAILINGSPACE(m
) >= space
) {
2392 bcopy(m
->m_data
, m
->m_data
+ space
,
2396 sc_if
->msk_softc
->msk_trailing_copied
++;
2398 space
= MSK_TXBUF_ALIGN
- space
;
2399 if (M_LEADINGSPACE(m
) >= space
) {
2400 /* e.g. Small UDP datagrams */
2407 msk_leading_copied
++;
2412 /* e.g. on forwarding path */
2417 m
= m_defrag(*m_head
, MB_DONTWAIT
);
2425 sc_if
->msk_softc
->msk_defrag_avoided
++;
2428 #undef MSK_TXBUF_MASK
2429 #undef MSK_TXBUF_ALIGN
2431 tcp_offset
= offset
= 0;
2432 if (m
->m_pkthdr
.csum_flags
& MSK_CSUM_FEATURES
) {
2434 * Since mbuf has no protocol specific structure information
2435 * in it we have to inspect protocol information here to
2436 * setup TSO and checksum offload. I don't know why Marvell
2437 * made a such decision in chip design because other GigE
2438 * hardwares normally takes care of all these chores in
2439 * hardware. However, TSO performance of Yukon II is very
2440 * good such that it's worth to implement it.
2442 struct ether_header
*eh
;
2445 /* TODO check for M_WRITABLE(m) */
2447 offset
= sizeof(struct ether_header
);
2448 m
= m_pullup(m
, offset
);
2453 eh
= mtod(m
, struct ether_header
*);
2454 /* Check if hardware VLAN insertion is off. */
2455 if (eh
->ether_type
== htons(ETHERTYPE_VLAN
)) {
2456 offset
= sizeof(struct ether_vlan_header
);
2457 m
= m_pullup(m
, offset
);
2463 m
= m_pullup(m
, offset
+ sizeof(struct ip
));
2468 ip
= (struct ip
*)(mtod(m
, char *) + offset
);
2469 offset
+= (ip
->ip_hl
<< 2);
2470 tcp_offset
= offset
;
2472 * It seems that Yukon II has Tx checksum offload bug for
2473 * small TCP packets that's less than 60 bytes in size
2474 * (e.g. TCP window probe packet, pure ACK packet).
2475 * Common work around like padding with zeros to make the
2476 * frame minimum ethernet frame size didn't work at all.
2477 * Instead of disabling checksum offload completely we
2478 * resort to S/W checksum routine when we encounter short
2480 * Short UDP packets appear to be handled correctly by
2483 if (m
->m_pkthdr
.len
< MSK_MIN_FRAMELEN
&&
2484 (m
->m_pkthdr
.csum_flags
& CSUM_TCP
) != 0) {
2487 csum
= in_cksum_skip(m
, ntohs(ip
->ip_len
) + offset
-
2488 (ip
->ip_hl
<< 2), offset
);
2489 *(uint16_t *)(m
->m_data
+ offset
+
2490 m
->m_pkthdr
.csum_data
) = csum
;
2491 m
->m_pkthdr
.csum_flags
&= ~CSUM_TCP
;
2496 prod
= sc_if
->msk_cdata
.msk_tx_prod
;
2497 txd
= &sc_if
->msk_cdata
.msk_txdesc
[prod
];
2499 map
= txd
->tx_dmamap
;
2501 error
= bus_dmamap_load_mbuf_defrag(sc_if
->msk_cdata
.msk_tx_tag
, map
,
2502 m_head
, txsegs
, maxsegs
, &nsegs
, BUS_DMA_NOWAIT
);
2508 bus_dmamap_sync(sc_if
->msk_cdata
.msk_tx_tag
, map
, BUS_DMASYNC_PREWRITE
);
2515 /* Check if we have a VLAN tag to insert. */
2516 if ((m
->m_flags
& M_VLANTAG
) != 0) {
2517 tx_le
= &sc_if
->msk_rdata
.msk_tx_ring
[prod
];
2518 tx_le
->msk_addr
= htole32(0);
2519 tx_le
->msk_control
= htole32(OP_VLAN
| HW_OWNER
|
2520 htons(m
->m_pkthdr
.ether_vtag
));
2521 sc_if
->msk_cdata
.msk_tx_cnt
++;
2522 MSK_INC(prod
, MSK_TX_RING_CNT
);
2523 control
|= INS_VLAN
;
2526 /* Check if we have to handle checksum offload. */
2527 if (m
->m_pkthdr
.csum_flags
& MSK_CSUM_FEATURES
) {
2528 tx_le
= &sc_if
->msk_rdata
.msk_tx_ring
[prod
];
2529 tx_le
->msk_addr
= htole32(((tcp_offset
+ m
->m_pkthdr
.csum_data
)
2530 & 0xffff) | ((uint32_t)tcp_offset
<< 16));
2531 tx_le
->msk_control
= htole32(1 << 16 | (OP_TCPLISW
| HW_OWNER
));
2532 control
= CALSUM
| WR_SUM
| INIT_SUM
| LOCK_SUM
;
2533 if ((m
->m_pkthdr
.csum_flags
& CSUM_UDP
) != 0)
2535 sc_if
->msk_cdata
.msk_tx_cnt
++;
2536 MSK_INC(prod
, MSK_TX_RING_CNT
);
2540 tx_le
= &sc_if
->msk_rdata
.msk_tx_ring
[prod
];
2541 tx_le
->msk_addr
= htole32(MSK_ADDR_LO(txsegs
[0].ds_addr
));
2542 tx_le
->msk_control
= htole32(txsegs
[0].ds_len
| control
|
2544 sc_if
->msk_cdata
.msk_tx_cnt
++;
2545 MSK_INC(prod
, MSK_TX_RING_CNT
);
2547 for (i
= 1; i
< nsegs
; i
++) {
2548 tx_le
= &sc_if
->msk_rdata
.msk_tx_ring
[prod
];
2549 tx_le
->msk_addr
= htole32(MSK_ADDR_LO(txsegs
[i
].ds_addr
));
2550 tx_le
->msk_control
= htole32(txsegs
[i
].ds_len
| control
|
2551 OP_BUFFER
| HW_OWNER
);
2552 sc_if
->msk_cdata
.msk_tx_cnt
++;
2553 MSK_INC(prod
, MSK_TX_RING_CNT
);
2555 /* Update producer index. */
2556 sc_if
->msk_cdata
.msk_tx_prod
= prod
;
2558 /* Set EOP on the last desciptor. */
2559 prod
= (prod
+ MSK_TX_RING_CNT
- 1) % MSK_TX_RING_CNT
;
2560 tx_le
= &sc_if
->msk_rdata
.msk_tx_ring
[prod
];
2561 tx_le
->msk_control
|= htole32(EOP
);
2563 /* Turn the first descriptor ownership to hardware. */
2564 tx_le
= &sc_if
->msk_rdata
.msk_tx_ring
[si
];
2565 tx_le
->msk_control
|= htole32(HW_OWNER
);
2567 txd
= &sc_if
->msk_cdata
.msk_txdesc
[prod
];
2568 map
= txd_last
->tx_dmamap
;
2569 txd_last
->tx_dmamap
= txd
->tx_dmamap
;
2570 txd
->tx_dmamap
= map
;
2577 msk_start(struct ifnet
*ifp
)
2579 struct msk_if_softc
*sc_if
;
2580 struct mbuf
*m_head
;
2583 sc_if
= ifp
->if_softc
;
2585 ASSERT_SERIALIZED(ifp
->if_serializer
);
2587 if (!sc_if
->msk_link
) {
2588 ifq_purge(&ifp
->if_snd
);
2592 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_OACTIVE
)) != IFF_RUNNING
)
2596 while (!ifq_is_empty(&ifp
->if_snd
)) {
2597 if (MSK_IS_OACTIVE(sc_if
)) {
2598 ifp
->if_flags
|= IFF_OACTIVE
;
2602 m_head
= ifq_dequeue(&ifp
->if_snd
, NULL
);
2607 * Pack the data into the transmit ring. If we
2608 * don't have room, set the OACTIVE flag and wait
2609 * for the NIC to drain the ring.
2611 if (msk_encap(sc_if
, &m_head
) != 0) {
2613 if (sc_if
->msk_cdata
.msk_tx_cnt
== 0) {
2616 ifp
->if_flags
|= IFF_OACTIVE
;
2623 * If there's a BPF listener, bounce a copy of this frame
2626 BPF_MTAP(ifp
, m_head
);
2631 CSR_WRITE_2(sc_if
->msk_softc
,
2632 Y2_PREF_Q_ADDR(sc_if
->msk_txq
, PREF_UNIT_PUT_IDX_REG
),
2633 sc_if
->msk_cdata
.msk_tx_prod
);
2635 /* Set a timeout in case the chip goes out to lunch. */
2636 ifp
->if_timer
= MSK_TX_TIMEOUT
;
2641 msk_watchdog(struct ifnet
*ifp
)
2643 struct msk_if_softc
*sc_if
= ifp
->if_softc
;
2647 ASSERT_SERIALIZED(ifp
->if_serializer
);
2649 if (sc_if
->msk_link
== 0) {
2651 if_printf(sc_if
->msk_ifp
, "watchdog timeout "
2659 * Reclaim first as there is a possibility of losing Tx completion
2662 ridx
= sc_if
->msk_port
== MSK_PORT_A
? STAT_TXA1_RIDX
: STAT_TXA2_RIDX
;
2663 idx
= CSR_READ_2(sc_if
->msk_softc
, ridx
);
2664 if (sc_if
->msk_cdata
.msk_tx_cons
!= idx
) {
2665 msk_txeof(sc_if
, idx
);
2666 if (sc_if
->msk_cdata
.msk_tx_cnt
== 0) {
2667 if_printf(ifp
, "watchdog timeout (missed Tx interrupts) "
2669 if (!ifq_is_empty(&ifp
->if_snd
))
2675 if_printf(ifp
, "watchdog timeout\n");
2678 if (!ifq_is_empty(&ifp
->if_snd
))
2683 mskc_shutdown(device_t dev
)
2685 struct msk_softc
*sc
= device_get_softc(dev
);
2688 lwkt_serialize_enter(&sc
->msk_serializer
);
2690 for (i
= 0; i
< sc
->msk_num_port
; i
++) {
2691 if (sc
->msk_if
[i
] != NULL
)
2692 msk_stop(sc
->msk_if
[i
]);
2695 /* Disable all interrupts. */
2696 CSR_WRITE_4(sc
, B0_IMSK
, 0);
2697 CSR_READ_4(sc
, B0_IMSK
);
2698 CSR_WRITE_4(sc
, B0_HWE_IMSK
, 0);
2699 CSR_READ_4(sc
, B0_HWE_IMSK
);
2701 /* Put hardware reset. */
2702 CSR_WRITE_2(sc
, B0_CTST
, CS_RST_SET
);
2704 lwkt_serialize_exit(&sc
->msk_serializer
);
2709 mskc_suspend(device_t dev
)
2711 struct msk_softc
*sc
= device_get_softc(dev
);
2714 lwkt_serialize_enter(&sc
->msk_serializer
);
2716 for (i
= 0; i
< sc
->msk_num_port
; i
++) {
2717 if (sc
->msk_if
[i
] != NULL
&& sc
->msk_if
[i
]->msk_ifp
!= NULL
&&
2718 ((sc
->msk_if
[i
]->msk_ifp
->if_flags
& IFF_RUNNING
) != 0))
2719 msk_stop(sc
->msk_if
[i
]);
2722 /* Disable all interrupts. */
2723 CSR_WRITE_4(sc
, B0_IMSK
, 0);
2724 CSR_READ_4(sc
, B0_IMSK
);
2725 CSR_WRITE_4(sc
, B0_HWE_IMSK
, 0);
2726 CSR_READ_4(sc
, B0_HWE_IMSK
);
2728 mskc_phy_power(sc
, MSK_PHY_POWERDOWN
);
2730 /* Put hardware reset. */
2731 CSR_WRITE_2(sc
, B0_CTST
, CS_RST_SET
);
2732 sc
->msk_suspended
= 1;
2734 lwkt_serialize_exit(&sc
->msk_serializer
);
2740 mskc_resume(device_t dev
)
2742 struct msk_softc
*sc
= device_get_softc(dev
);
2745 lwkt_serialize_enter(&sc
->msk_serializer
);
2748 for (i
= 0; i
< sc
->msk_num_port
; i
++) {
2749 if (sc
->msk_if
[i
] != NULL
&& sc
->msk_if
[i
]->msk_ifp
!= NULL
&&
2750 ((sc
->msk_if
[i
]->msk_ifp
->if_flags
& IFF_UP
) != 0))
2751 msk_init(sc
->msk_if
[i
]);
2753 sc
->msk_suspended
= 0;
2755 lwkt_serialize_exit(&sc
->msk_serializer
);
2761 msk_rxeof(struct msk_if_softc
*sc_if
, uint32_t status
, int len
,
2762 struct mbuf_chain
*chain
)
2766 struct msk_rxdesc
*rxd
;
2769 ifp
= sc_if
->msk_ifp
;
2771 cons
= sc_if
->msk_cdata
.msk_rx_cons
;
2773 rxlen
= status
>> 16;
2774 if ((status
& GMR_FS_VLAN
) != 0 &&
2775 (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) != 0)
2776 rxlen
-= EVL_ENCAPLEN
;
2777 if (sc_if
->msk_flags
& MSK_FLAG_NORXCHK
) {
2779 * For controllers that returns bogus status code
2780 * just do minimal check and let upper stack
2781 * handle this frame.
2783 if (len
> MSK_MAX_FRAMELEN
|| len
< ETHER_HDR_LEN
) {
2785 msk_discard_rxbuf(sc_if
, cons
);
2788 } else if (len
> sc_if
->msk_framesize
||
2789 ((status
& GMR_FS_ANY_ERR
) != 0) ||
2790 ((status
& GMR_FS_RX_OK
) == 0) || (rxlen
!= len
)) {
2791 /* Don't count flow-control packet as errors. */
2792 if ((status
& GMR_FS_GOOD_FC
) == 0)
2794 msk_discard_rxbuf(sc_if
, cons
);
2797 rxd
= &sc_if
->msk_cdata
.msk_rxdesc
[cons
];
2799 if (msk_newbuf(sc_if
, cons
, 0) != 0) {
2801 /* Reuse old buffer. */
2802 msk_discard_rxbuf(sc_if
, cons
);
2805 m
->m_pkthdr
.rcvif
= ifp
;
2806 m
->m_pkthdr
.len
= m
->m_len
= len
;
2809 /* Check for VLAN tagged packets. */
2810 if ((status
& GMR_FS_VLAN
) != 0 &&
2811 (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) != 0) {
2812 m
->m_pkthdr
.ether_vtag
= sc_if
->msk_vtag
;
2813 m
->m_flags
|= M_VLANTAG
;
2817 ether_input_chain(ifp
, m
, NULL
, chain
);
2820 MSK_INC(sc_if
->msk_cdata
.msk_rx_cons
, MSK_RX_RING_CNT
);
2821 MSK_INC(sc_if
->msk_cdata
.msk_rx_prod
, MSK_RX_RING_CNT
);
2826 msk_jumbo_rxeof(struct msk_if_softc
*sc_if
, uint32_t status
, int len
)
2830 struct msk_rxdesc
*jrxd
;
2833 ifp
= sc_if
->msk_ifp
;
2835 MSK_IF_LOCK_ASSERT(sc_if
);
2837 cons
= sc_if
->msk_cdata
.msk_rx_cons
;
2839 rxlen
= status
>> 16;
2840 if ((status
& GMR_FS_VLAN
) != 0 &&
2841 (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) != 0)
2842 rxlen
-= ETHER_VLAN_ENCAP_LEN
;
2843 if (len
> sc_if
->msk_framesize
||
2844 ((status
& GMR_FS_ANY_ERR
) != 0) ||
2845 ((status
& GMR_FS_RX_OK
) == 0) || (rxlen
!= len
)) {
2846 /* Don't count flow-control packet as errors. */
2847 if ((status
& GMR_FS_GOOD_FC
) == 0)
2849 msk_discard_jumbo_rxbuf(sc_if
, cons
);
2852 jrxd
= &sc_if
->msk_cdata
.msk_jumbo_rxdesc
[cons
];
2854 if (msk_jumbo_newbuf(sc_if
, cons
) != 0) {
2856 /* Reuse old buffer. */
2857 msk_discard_jumbo_rxbuf(sc_if
, cons
);
2860 m
->m_pkthdr
.rcvif
= ifp
;
2861 m
->m_pkthdr
.len
= m
->m_len
= len
;
2863 /* Check for VLAN tagged packets. */
2864 if ((status
& GMR_FS_VLAN
) != 0 &&
2865 (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) != 0) {
2866 m
->m_pkthdr
.ether_vtag
= sc_if
->msk_vtag
;
2867 m
->m_flags
|= M_VLANTAG
;
2869 MSK_IF_UNLOCK(sc_if
);
2870 (*ifp
->if_input
)(ifp
, m
);
2874 MSK_INC(sc_if
->msk_cdata
.msk_rx_cons
, MSK_JUMBO_RX_RING_CNT
);
2875 MSK_INC(sc_if
->msk_cdata
.msk_rx_prod
, MSK_JUMBO_RX_RING_CNT
);
2880 msk_txeof(struct msk_if_softc
*sc_if
, int idx
)
2882 struct msk_txdesc
*txd
;
2883 struct msk_tx_desc
*cur_tx
;
2888 ifp
= sc_if
->msk_ifp
;
2891 * Go through our tx ring and free mbufs for those
2892 * frames that have been sent.
2894 cons
= sc_if
->msk_cdata
.msk_tx_cons
;
2896 for (; cons
!= idx
; MSK_INC(cons
, MSK_TX_RING_CNT
)) {
2897 if (sc_if
->msk_cdata
.msk_tx_cnt
<= 0)
2900 cur_tx
= &sc_if
->msk_rdata
.msk_tx_ring
[cons
];
2901 control
= le32toh(cur_tx
->msk_control
);
2902 sc_if
->msk_cdata
.msk_tx_cnt
--;
2903 if ((control
& EOP
) == 0)
2905 txd
= &sc_if
->msk_cdata
.msk_txdesc
[cons
];
2906 bus_dmamap_unload(sc_if
->msk_cdata
.msk_tx_tag
, txd
->tx_dmamap
);
2909 KASSERT(txd
->tx_m
!= NULL
, ("%s: freeing NULL mbuf!",
2916 sc_if
->msk_cdata
.msk_tx_cons
= cons
;
2917 if (!MSK_IS_OACTIVE(sc_if
))
2918 ifp
->if_flags
&= ~IFF_OACTIVE
;
2919 if (sc_if
->msk_cdata
.msk_tx_cnt
== 0)
2921 /* No need to sync LEs as we didn't update LEs. */
2926 msk_tick(void *xsc_if
)
2928 struct msk_if_softc
*sc_if
= xsc_if
;
2929 struct ifnet
*ifp
= &sc_if
->arpcom
.ac_if
;
2930 struct mii_data
*mii
;
2932 lwkt_serialize_enter(ifp
->if_serializer
);
2934 mii
= device_get_softc(sc_if
->msk_miibus
);
2937 callout_reset(&sc_if
->msk_tick_ch
, hz
, msk_tick
, sc_if
);
2939 lwkt_serialize_exit(ifp
->if_serializer
);
2943 msk_intr_phy(struct msk_if_softc
*sc_if
)
2947 msk_phy_readreg(sc_if
, PHY_ADDR_MARV
, PHY_MARV_INT_STAT
);
2948 status
= msk_phy_readreg(sc_if
, PHY_ADDR_MARV
, PHY_MARV_INT_STAT
);
2949 /* Handle FIFO Underrun/Overflow? */
2950 if (status
& PHY_M_IS_FIFO_ERROR
) {
2951 device_printf(sc_if
->msk_if_dev
,
2952 "PHY FIFO underrun/overflow.\n");
2957 msk_intr_gmac(struct msk_if_softc
*sc_if
)
2959 struct msk_softc
*sc
;
2962 sc
= sc_if
->msk_softc
;
2963 status
= CSR_READ_1(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_IRQ_SRC
));
2965 /* GMAC Rx FIFO overrun. */
2966 if ((status
& GM_IS_RX_FF_OR
) != 0) {
2967 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_CTRL_T
),
2970 /* GMAC Tx FIFO underrun. */
2971 if ((status
& GM_IS_TX_FF_UR
) != 0) {
2972 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
),
2974 device_printf(sc_if
->msk_if_dev
, "Tx FIFO underrun!\n");
2977 * In case of Tx underrun, we may need to flush/reset
2978 * Tx MAC but that would also require resynchronization
2979 * with status LEs. Reintializing status LEs would
2980 * affect other port in dual MAC configuration so it
2981 * should be avoided as possible as we can.
2982 * Due to lack of documentation it's all vague guess but
2983 * it needs more investigation.
2989 msk_handle_hwerr(struct msk_if_softc
*sc_if
, uint32_t status
)
2991 struct msk_softc
*sc
;
2993 sc
= sc_if
->msk_softc
;
2994 if ((status
& Y2_IS_PAR_RD1
) != 0) {
2995 device_printf(sc_if
->msk_if_dev
,
2996 "RAM buffer read parity error\n");
2998 CSR_WRITE_2(sc
, SELECT_RAM_BUFFER(sc_if
->msk_port
, B3_RI_CTRL
),
3001 if ((status
& Y2_IS_PAR_WR1
) != 0) {
3002 device_printf(sc_if
->msk_if_dev
,
3003 "RAM buffer write parity error\n");
3005 CSR_WRITE_2(sc
, SELECT_RAM_BUFFER(sc_if
->msk_port
, B3_RI_CTRL
),
3008 if ((status
& Y2_IS_PAR_MAC1
) != 0) {
3009 device_printf(sc_if
->msk_if_dev
, "Tx MAC parity error\n");
3011 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
),
3014 if ((status
& Y2_IS_PAR_RX1
) != 0) {
3015 device_printf(sc_if
->msk_if_dev
, "Rx parity error\n");
3017 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_rxq
, Q_CSR
), BMU_CLR_IRQ_PAR
);
3019 if ((status
& (Y2_IS_TCP_TXS1
| Y2_IS_TCP_TXA1
)) != 0) {
3020 device_printf(sc_if
->msk_if_dev
, "TCP segmentation error\n");
3022 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
), BMU_CLR_IRQ_TCP
);
3027 mskc_intr_hwerr(struct msk_softc
*sc
)
3030 uint32_t tlphead
[4];
3032 status
= CSR_READ_4(sc
, B0_HWE_ISRC
);
3033 /* Time Stamp timer overflow. */
3034 if ((status
& Y2_IS_TIST_OV
) != 0)
3035 CSR_WRITE_1(sc
, GMAC_TI_ST_CTRL
, GMT_ST_CLR_IRQ
);
3036 if ((status
& Y2_IS_PCI_NEXP
) != 0) {
3038 * PCI Express Error occured which is not described in PEX
3040 * This error is also mapped either to Master Abort(
3041 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3042 * can only be cleared there.
3044 device_printf(sc
->msk_dev
,
3045 "PCI Express protocol violation error\n");
3048 if ((status
& (Y2_IS_MST_ERR
| Y2_IS_IRQ_STAT
)) != 0) {
3051 if ((status
& Y2_IS_MST_ERR
) != 0)
3052 device_printf(sc
->msk_dev
,
3053 "unexpected IRQ Status error\n");
3055 device_printf(sc
->msk_dev
,
3056 "unexpected IRQ Master error\n");
3057 /* Reset all bits in the PCI status register. */
3058 v16
= pci_read_config(sc
->msk_dev
, PCIR_STATUS
, 2);
3059 CSR_WRITE_1(sc
, B2_TST_CTRL1
, TST_CFG_WRITE_ON
);
3060 pci_write_config(sc
->msk_dev
, PCIR_STATUS
, v16
|
3061 PCIM_STATUS_PERR
| PCIM_STATUS_SERR
| PCIM_STATUS_RMABORT
|
3062 PCIM_STATUS_RTABORT
| PCIM_STATUS_PERRREPORT
, 2);
3063 CSR_WRITE_1(sc
, B2_TST_CTRL1
, TST_CFG_WRITE_OFF
);
3066 /* Check for PCI Express Uncorrectable Error. */
3067 if ((status
& Y2_IS_PCI_EXP
) != 0) {
3071 * On PCI Express bus bridges are called root complexes (RC).
3072 * PCI Express errors are recognized by the root complex too,
3073 * which requests the system to handle the problem. After
3074 * error occurence it may be that no access to the adapter
3075 * may be performed any longer.
3078 v32
= CSR_PCI_READ_4(sc
, PEX_UNC_ERR_STAT
);
3079 if ((v32
& PEX_UNSUP_REQ
) != 0) {
3080 /* Ignore unsupported request error. */
3082 device_printf(sc
->msk_dev
,
3083 "Uncorrectable PCI Express error\n");
3086 if ((v32
& (PEX_FATAL_ERRORS
| PEX_POIS_TLP
)) != 0) {
3089 /* Get TLP header form Log Registers. */
3090 for (i
= 0; i
< 4; i
++)
3091 tlphead
[i
] = CSR_PCI_READ_4(sc
,
3092 PEX_HEADER_LOG
+ i
* 4);
3093 /* Check for vendor defined broadcast message. */
3094 if (!(tlphead
[0] == 0x73004001 && tlphead
[1] == 0x7f)) {
3095 sc
->msk_intrhwemask
&= ~Y2_IS_PCI_EXP
;
3096 CSR_WRITE_4(sc
, B0_HWE_IMSK
,
3097 sc
->msk_intrhwemask
);
3098 CSR_READ_4(sc
, B0_HWE_IMSK
);
3101 /* Clear the interrupt. */
3102 CSR_WRITE_1(sc
, B2_TST_CTRL1
, TST_CFG_WRITE_ON
);
3103 CSR_PCI_WRITE_4(sc
, PEX_UNC_ERR_STAT
, 0xffffffff);
3104 CSR_WRITE_1(sc
, B2_TST_CTRL1
, TST_CFG_WRITE_OFF
);
3107 if ((status
& Y2_HWE_L1_MASK
) != 0 && sc
->msk_if
[MSK_PORT_A
] != NULL
)
3108 msk_handle_hwerr(sc
->msk_if
[MSK_PORT_A
], status
);
3109 if ((status
& Y2_HWE_L2_MASK
) != 0 && sc
->msk_if
[MSK_PORT_B
] != NULL
)
3110 msk_handle_hwerr(sc
->msk_if
[MSK_PORT_B
], status
>> 8);
3113 static __inline
void
3114 msk_rxput(struct msk_if_softc
*sc_if
)
3116 struct msk_softc
*sc
;
3118 sc
= sc_if
->msk_softc
;
3120 if (sc_if
->msk_framesize
> (MCLBYTES
- ETHER_HDR_LEN
)) {
3122 sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
,
3123 sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
,
3124 BUS_DMASYNC_PREWRITE
);
3127 CSR_WRITE_2(sc
, Y2_PREF_Q_ADDR(sc_if
->msk_rxq
,
3128 PREF_UNIT_PUT_IDX_REG
), sc_if
->msk_cdata
.msk_rx_prod
);
3132 mskc_handle_events(struct msk_softc
*sc
)
3134 struct msk_if_softc
*sc_if
;
3136 struct msk_stat_desc
*sd
;
3137 uint32_t control
, status
;
3138 int cons
, idx
, len
, port
, rxprog
;
3139 struct mbuf_chain chain
[MAXCPU
];
3141 idx
= CSR_READ_2(sc
, STAT_PUT_IDX
);
3142 if (idx
== sc
->msk_stat_cons
)
3145 ether_input_chain_init(chain
);
3147 rxput
[MSK_PORT_A
] = rxput
[MSK_PORT_B
] = 0;
3150 for (cons
= sc
->msk_stat_cons
; cons
!= idx
;) {
3151 sd
= &sc
->msk_stat_ring
[cons
];
3152 control
= le32toh(sd
->msk_control
);
3153 if ((control
& HW_OWNER
) == 0)
3156 * Marvell's FreeBSD driver updates status LE after clearing
3157 * HW_OWNER. However we don't have a way to sync single LE
3158 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3159 * an entire DMA map. So don't sync LE until we have a better
3162 control
&= ~HW_OWNER
;
3163 sd
->msk_control
= htole32(control
);
3164 status
= le32toh(sd
->msk_status
);
3165 len
= control
& STLE_LEN_MASK
;
3166 port
= (control
>> 16) & 0x01;
3167 sc_if
= sc
->msk_if
[port
];
3168 if (sc_if
== NULL
) {
3169 device_printf(sc
->msk_dev
, "invalid port opcode "
3170 "0x%08x\n", control
& STLE_OP_MASK
);
3174 switch (control
& STLE_OP_MASK
) {
3176 sc_if
->msk_vtag
= ntohs(len
);
3179 sc_if
->msk_vtag
= ntohs(len
);
3183 if (sc_if
->msk_framesize
> (MCLBYTES
- ETHER_HDR_LEN
))
3184 msk_jumbo_rxeof(sc_if
, status
, len
);
3187 msk_rxeof(sc_if
, status
, len
, chain
);
3190 * Because there is no way to sync single Rx LE
3191 * put the DMA sync operation off until the end of
3195 /* Update prefetch unit if we've passed water mark. */
3196 if (rxput
[port
] >= sc_if
->msk_cdata
.msk_rx_putwm
) {
3202 if (sc
->msk_if
[MSK_PORT_A
] != NULL
) {
3203 msk_txeof(sc
->msk_if
[MSK_PORT_A
],
3204 status
& STLE_TXA1_MSKL
);
3206 if (sc
->msk_if
[MSK_PORT_B
] != NULL
) {
3207 msk_txeof(sc
->msk_if
[MSK_PORT_B
],
3208 ((status
& STLE_TXA2_MSKL
) >>
3210 ((len
& STLE_TXA2_MSKH
) <<
3215 device_printf(sc
->msk_dev
, "unhandled opcode 0x%08x\n",
3216 control
& STLE_OP_MASK
);
3219 MSK_INC(cons
, MSK_STAT_RING_CNT
);
3220 if (rxprog
> sc
->msk_process_limit
)
3225 ether_input_dispatch(chain
);
3227 sc
->msk_stat_cons
= cons
;
3228 /* XXX We should sync status LEs here. See above notes. */
3230 if (rxput
[MSK_PORT_A
] > 0)
3231 msk_rxput(sc
->msk_if
[MSK_PORT_A
]);
3232 if (rxput
[MSK_PORT_B
] > 0)
3233 msk_rxput(sc
->msk_if
[MSK_PORT_B
]);
3235 return (sc
->msk_stat_cons
!= CSR_READ_2(sc
, STAT_PUT_IDX
));
3238 /* Legacy interrupt handler for shared interrupt. */
3240 mskc_intr(void *xsc
)
3242 struct msk_softc
*sc
;
3243 struct msk_if_softc
*sc_if0
, *sc_if1
;
3244 struct ifnet
*ifp0
, *ifp1
;
3248 ASSERT_SERIALIZED(&sc
->msk_serializer
);
3250 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3251 status
= CSR_READ_4(sc
, B0_Y2_SP_ISRC2
);
3252 if (status
== 0 || status
== 0xffffffff || sc
->msk_suspended
!= 0 ||
3253 (status
& sc
->msk_intrmask
) == 0) {
3254 CSR_WRITE_4(sc
, B0_Y2_SP_ICR
, 2);
3258 sc_if0
= sc
->msk_if
[MSK_PORT_A
];
3259 sc_if1
= sc
->msk_if
[MSK_PORT_B
];
3262 ifp0
= sc_if0
->msk_ifp
;
3264 ifp1
= sc_if1
->msk_ifp
;
3266 if ((status
& Y2_IS_IRQ_PHY1
) != 0 && sc_if0
!= NULL
)
3267 msk_intr_phy(sc_if0
);
3268 if ((status
& Y2_IS_IRQ_PHY2
) != 0 && sc_if1
!= NULL
)
3269 msk_intr_phy(sc_if1
);
3270 if ((status
& Y2_IS_IRQ_MAC1
) != 0 && sc_if0
!= NULL
)
3271 msk_intr_gmac(sc_if0
);
3272 if ((status
& Y2_IS_IRQ_MAC2
) != 0 && sc_if1
!= NULL
)
3273 msk_intr_gmac(sc_if1
);
3274 if ((status
& (Y2_IS_CHK_RX1
| Y2_IS_CHK_RX2
)) != 0) {
3275 device_printf(sc
->msk_dev
, "Rx descriptor error\n");
3276 sc
->msk_intrmask
&= ~(Y2_IS_CHK_RX1
| Y2_IS_CHK_RX2
);
3277 CSR_WRITE_4(sc
, B0_IMSK
, sc
->msk_intrmask
);
3278 CSR_READ_4(sc
, B0_IMSK
);
3280 if ((status
& (Y2_IS_CHK_TXA1
| Y2_IS_CHK_TXA2
)) != 0) {
3281 device_printf(sc
->msk_dev
, "Tx descriptor error\n");
3282 sc
->msk_intrmask
&= ~(Y2_IS_CHK_TXA1
| Y2_IS_CHK_TXA2
);
3283 CSR_WRITE_4(sc
, B0_IMSK
, sc
->msk_intrmask
);
3284 CSR_READ_4(sc
, B0_IMSK
);
3286 if ((status
& Y2_IS_HW_ERR
) != 0)
3287 mskc_intr_hwerr(sc
);
3289 while (mskc_handle_events(sc
) != 0)
3291 if ((status
& Y2_IS_STAT_BMU
) != 0)
3292 CSR_WRITE_4(sc
, STAT_CTRL
, SC_STAT_CLR_IRQ
);
3294 /* Reenable interrupts. */
3295 CSR_WRITE_4(sc
, B0_Y2_SP_ICR
, 2);
3297 if (ifp0
!= NULL
&& (ifp0
->if_flags
& IFF_RUNNING
) != 0 &&
3298 !ifq_is_empty(&ifp0
->if_snd
))
3300 if (ifp1
!= NULL
&& (ifp1
->if_flags
& IFF_RUNNING
) != 0 &&
3301 !ifq_is_empty(&ifp1
->if_snd
))
3306 msk_set_tx_stfwd(struct msk_if_softc
*sc_if
)
3308 struct msk_softc
*sc
= sc_if
->msk_softc
;
3309 struct ifnet
*ifp
= sc_if
->msk_ifp
;
3311 switch (sc
->msk_hw_id
) {
3312 case CHIP_ID_YUKON_EX
:
3313 if (sc
->msk_hw_rev
== CHIP_REV_YU_EX_A0
)
3314 goto yukon_ex_workaround
;
3315 if (ifp
->if_mtu
> ETHERMTU
) {
3317 MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
),
3318 TX_JUMBO_ENA
| TX_STFW_ENA
);
3321 MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
),
3322 TX_JUMBO_DIS
| TX_STFW_ENA
);
3326 yukon_ex_workaround
:
3327 if (ifp
->if_mtu
> ETHERMTU
) {
3328 /* Set Tx GMAC FIFO Almost Empty Threshold. */
3330 MR_ADDR(sc_if
->msk_port
, TX_GMF_AE_THR
),
3331 MSK_ECU_JUMBO_WM
<< 16 | MSK_ECU_AE_THR
);
3332 /* Disable Store & Forward mode for Tx. */
3334 MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
),
3335 TX_JUMBO_ENA
| TX_STFW_DIS
);
3337 /* Enable Store & Forward mode for Tx. */
3339 MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
),
3340 TX_JUMBO_DIS
| TX_STFW_ENA
);
3349 struct msk_if_softc
*sc_if
= xsc
;
3350 struct msk_softc
*sc
= sc_if
->msk_softc
;
3351 struct ifnet
*ifp
= sc_if
->msk_ifp
;
3352 struct mii_data
*mii
;
3353 uint16_t eaddr
[ETHER_ADDR_LEN
/ 2];
3358 ASSERT_SERIALIZED(ifp
->if_serializer
);
3360 mii
= device_get_softc(sc_if
->msk_miibus
);
3363 /* Cancel pending I/O and free all Rx/Tx buffers. */
3366 sc_if
->msk_framesize
= ifp
->if_mtu
+ ETHER_HDR_LEN
+ EVL_ENCAPLEN
;
3367 if (sc_if
->msk_framesize
> MSK_MAX_FRAMELEN
&&
3368 sc_if
->msk_softc
->msk_hw_id
== CHIP_ID_YUKON_EC_U
) {
3370 * In Yukon EC Ultra, TSO & checksum offload is not
3371 * supported for jumbo frame.
3373 ifp
->if_hwassist
&= ~MSK_CSUM_FEATURES
;
3374 ifp
->if_capenable
&= ~IFCAP_TXCSUM
;
3377 /* GMAC Control reset. */
3378 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_CTRL
), GMC_RST_SET
);
3379 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_CTRL
), GMC_RST_CLR
);
3380 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_CTRL
), GMC_F_LOOPB_OFF
);
3381 if (sc
->msk_hw_id
== CHIP_ID_YUKON_EX
) {
3382 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_CTRL
),
3383 GMC_BYP_MACSECRX_ON
| GMC_BYP_MACSECTX_ON
|
3388 * Initialize GMAC first such that speed/duplex/flow-control
3389 * parameters are renegotiated when interface is brought up.
3391 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
, 0);
3393 /* Dummy read the Interrupt Source Register. */
3394 CSR_READ_1(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_IRQ_SRC
));
3396 /* Set MIB Clear Counter Mode. */
3397 gmac
= GMAC_READ_2(sc
, sc_if
->msk_port
, GM_PHY_ADDR
);
3398 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_PHY_ADDR
, gmac
| GM_PAR_MIB_CLR
);
3399 /* Read all MIB Counters with Clear Mode set. */
3400 for (i
= 0; i
< GM_MIB_CNT_SIZE
; i
++)
3401 GMAC_READ_2(sc
, sc_if
->msk_port
, GM_MIB_CNT_BASE
+ 8 * i
);
3402 /* Clear MIB Clear Counter Mode. */
3403 gmac
&= ~GM_PAR_MIB_CLR
;
3404 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_PHY_ADDR
, gmac
);
3407 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_RX_CTRL
, GM_RXCR_CRC_DIS
);
3409 /* Setup Transmit Control Register. */
3410 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_TX_CTRL
, TX_COL_THR(TX_COL_DEF
));
3412 /* Setup Transmit Flow Control Register. */
3413 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_TX_FLOW_CTRL
, 0xffff);
3415 /* Setup Transmit Parameter Register. */
3416 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_TX_PARAM
,
3417 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF
) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF
) |
3418 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF
) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF
));
3420 gmac
= DATA_BLIND_VAL(DATA_BLIND_DEF
) |
3421 GM_SMOD_VLAN_ENA
| IPG_DATA_VAL(IPG_DATA_DEF
);
3423 if (sc_if
->msk_framesize
> MSK_MAX_FRAMELEN
)
3424 gmac
|= GM_SMOD_JUMBO_ENA
;
3425 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_SERIAL_MODE
, gmac
);
3427 /* Set station address. */
3428 bcopy(IF_LLADDR(ifp
), eaddr
, ETHER_ADDR_LEN
);
3429 for (i
= 0; i
< ETHER_ADDR_LEN
/2; i
++)
3430 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_SRC_ADDR_1L
+ i
* 4,
3432 for (i
= 0; i
< ETHER_ADDR_LEN
/2; i
++)
3433 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_SRC_ADDR_2L
+ i
* 4,
3436 /* Disable interrupts for counter overflows. */
3437 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_TX_IRQ_MSK
, 0);
3438 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_RX_IRQ_MSK
, 0);
3439 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_TR_IRQ_MSK
, 0);
3441 /* Configure Rx MAC FIFO. */
3442 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_CTRL_T
), GMF_RST_SET
);
3443 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_CTRL_T
), GMF_RST_CLR
);
3444 reg
= GMF_OPER_ON
| GMF_RX_F_FL_ON
;
3445 if (sc
->msk_hw_id
== CHIP_ID_YUKON_FE_P
||
3446 sc
->msk_hw_id
== CHIP_ID_YUKON_EX
)
3447 reg
|= GMF_RX_OVER_ON
;
3448 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_CTRL_T
), reg
);
3450 /* Set receive filter. */
3451 msk_rxfilter(sc_if
);
3453 if (sc
->msk_hw_id
== CHIP_ID_YUKON_XL
) {
3454 /* Clear flush mask - HW bug. */
3455 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_FL_MSK
), 0);
3457 /* Flush Rx MAC FIFO on any flow control or error. */
3458 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_FL_MSK
),
3463 * Set Rx FIFO flush threshold to 64 bytes 1 FIFO word
3464 * due to hardware hang on receipt of pause frames.
3466 reg
= RX_GMF_FL_THR_DEF
+ 1;
3467 /* Another magic for Yukon FE+ - From Linux. */
3468 if (sc
->msk_hw_id
== CHIP_ID_YUKON_FE_P
&&
3469 sc
->msk_hw_rev
== CHIP_REV_YU_FE_P_A0
)
3471 CSR_WRITE_2(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_FL_THR
), reg
);
3474 /* Configure Tx MAC FIFO. */
3475 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
), GMF_RST_SET
);
3476 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
), GMF_RST_CLR
);
3477 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
), GMF_OPER_ON
);
3479 /* Configure hardware VLAN tag insertion/stripping. */
3480 msk_setvlan(sc_if
, ifp
);
3482 if ((sc_if
->msk_flags
& MSK_FLAG_RAMBUF
) == 0) {
3483 /* Set Rx Pause threshould. */
3484 CSR_WRITE_1(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_LP_THR
),
3486 CSR_WRITE_1(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_UP_THR
),
3488 /* Configure store-and-forward for Tx. */
3489 msk_set_tx_stfwd(sc_if
);
3492 if (sc
->msk_hw_id
== CHIP_ID_YUKON_FE_P
&&
3493 sc
->msk_hw_rev
== CHIP_REV_YU_FE_P_A0
) {
3494 /* Disable dynamic watermark - from Linux. */
3495 reg
= CSR_READ_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_EA
));
3497 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_EA
), reg
);
3501 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3502 * arbiter as we don't use Sync Tx queue.
3504 CSR_WRITE_1(sc
, MR_ADDR(sc_if
->msk_port
, TXA_CTRL
),
3505 TXA_DIS_FSYNC
| TXA_DIS_ALLOC
| TXA_STOP_RC
);
3506 /* Enable the RAM Interface Arbiter. */
3507 CSR_WRITE_1(sc
, MR_ADDR(sc_if
->msk_port
, TXA_CTRL
), TXA_ENA_ARB
);
3509 /* Setup RAM buffer. */
3510 msk_set_rambuffer(sc_if
);
3512 /* Disable Tx sync Queue. */
3513 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_txsq
, RB_CTRL
), RB_RST_SET
);
3515 /* Setup Tx Queue Bus Memory Interface. */
3516 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
), BMU_CLR_RESET
);
3517 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
), BMU_OPER_INIT
);
3518 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
), BMU_FIFO_OP_ON
);
3519 CSR_WRITE_2(sc
, Q_ADDR(sc_if
->msk_txq
, Q_WM
), MSK_BMU_TX_WM
);
3520 switch (sc
->msk_hw_id
) {
3521 case CHIP_ID_YUKON_EC_U
:
3522 if (sc
->msk_hw_rev
== CHIP_REV_YU_EC_U_A0
) {
3523 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
3524 CSR_WRITE_2(sc
, Q_ADDR(sc_if
->msk_txq
, Q_AL
),
3528 case CHIP_ID_YUKON_EX
:
3530 * Yukon Extreme seems to have silicon bug for
3531 * automatic Tx checksum calculation capability.
3533 if (sc
->msk_hw_rev
== CHIP_REV_YU_EX_B0
) {
3534 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_F
),
3540 /* Setup Rx Queue Bus Memory Interface. */
3541 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_rxq
, Q_CSR
), BMU_CLR_RESET
);
3542 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_rxq
, Q_CSR
), BMU_OPER_INIT
);
3543 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_rxq
, Q_CSR
), BMU_FIFO_OP_ON
);
3544 CSR_WRITE_2(sc
, Q_ADDR(sc_if
->msk_rxq
, Q_WM
), MSK_BMU_RX_WM
);
3545 if (sc
->msk_hw_id
== CHIP_ID_YUKON_EC_U
&&
3546 sc
->msk_hw_rev
>= CHIP_REV_YU_EC_U_A1
) {
3547 /* MAC Rx RAM Read is controlled by hardware. */
3548 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_rxq
, Q_F
), F_M_RX_RAM_DIS
);
3551 msk_set_prefetch(sc
, sc_if
->msk_txq
,
3552 sc_if
->msk_rdata
.msk_tx_ring_paddr
, MSK_TX_RING_CNT
- 1);
3553 msk_init_tx_ring(sc_if
);
3555 /* Disable Rx checksum offload and RSS hash. */
3556 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_rxq
, Q_CSR
),
3557 BMU_DIS_RX_CHKSUM
| BMU_DIS_RX_RSS_HASH
);
3559 if (sc_if
->msk_framesize
> (MCLBYTES
- ETHER_HDR_LEN
)) {
3560 msk_set_prefetch(sc
, sc_if
->msk_rxq
,
3561 sc_if
->msk_rdata
.msk_jumbo_rx_ring_paddr
,
3562 MSK_JUMBO_RX_RING_CNT
- 1);
3563 error
= msk_init_jumbo_rx_ring(sc_if
);
3567 msk_set_prefetch(sc
, sc_if
->msk_rxq
,
3568 sc_if
->msk_rdata
.msk_rx_ring_paddr
,
3569 MSK_RX_RING_CNT
- 1);
3570 error
= msk_init_rx_ring(sc_if
);
3573 device_printf(sc_if
->msk_if_dev
,
3574 "initialization failed: no memory for Rx buffers\n");
3579 /* Configure interrupt handling. */
3580 if (sc_if
->msk_port
== MSK_PORT_A
) {
3581 sc
->msk_intrmask
|= Y2_IS_PORT_A
;
3582 sc
->msk_intrhwemask
|= Y2_HWE_L1_MASK
;
3584 sc
->msk_intrmask
|= Y2_IS_PORT_B
;
3585 sc
->msk_intrhwemask
|= Y2_HWE_L2_MASK
;
3587 CSR_WRITE_4(sc
, B0_HWE_IMSK
, sc
->msk_intrhwemask
);
3588 CSR_READ_4(sc
, B0_HWE_IMSK
);
3589 CSR_WRITE_4(sc
, B0_IMSK
, sc
->msk_intrmask
);
3590 CSR_READ_4(sc
, B0_IMSK
);
3592 sc_if
->msk_link
= 0;
3595 mskc_set_imtimer(sc
);
3597 ifp
->if_flags
|= IFF_RUNNING
;
3598 ifp
->if_flags
&= ~IFF_OACTIVE
;
3600 callout_reset(&sc_if
->msk_tick_ch
, hz
, msk_tick
, sc_if
);
3604 msk_set_rambuffer(struct msk_if_softc
*sc_if
)
3606 struct msk_softc
*sc
;
3609 if ((sc_if
->msk_flags
& MSK_FLAG_RAMBUF
) == 0)
3612 sc
= sc_if
->msk_softc
;
3614 /* Setup Rx Queue. */
3615 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_CTRL
), RB_RST_CLR
);
3616 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_START
),
3617 sc
->msk_rxqstart
[sc_if
->msk_port
] / 8);
3618 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_END
),
3619 sc
->msk_rxqend
[sc_if
->msk_port
] / 8);
3620 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_WP
),
3621 sc
->msk_rxqstart
[sc_if
->msk_port
] / 8);
3622 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_RP
),
3623 sc
->msk_rxqstart
[sc_if
->msk_port
] / 8);
3625 utpp
= (sc
->msk_rxqend
[sc_if
->msk_port
] + 1 -
3626 sc
->msk_rxqstart
[sc_if
->msk_port
] - MSK_RB_ULPP
) / 8;
3627 ltpp
= (sc
->msk_rxqend
[sc_if
->msk_port
] + 1 -
3628 sc
->msk_rxqstart
[sc_if
->msk_port
] - MSK_RB_LLPP_B
) / 8;
3629 if (sc
->msk_rxqsize
< MSK_MIN_RXQ_SIZE
)
3630 ltpp
+= (MSK_RB_LLPP_B
- MSK_RB_LLPP_S
) / 8;
3631 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_RX_UTPP
), utpp
);
3632 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_RX_LTPP
), ltpp
);
3633 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3635 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_CTRL
), RB_ENA_OP_MD
);
3636 CSR_READ_1(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_CTRL
));
3638 /* Setup Tx Queue. */
3639 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_txq
, RB_CTRL
), RB_RST_CLR
);
3640 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_txq
, RB_START
),
3641 sc
->msk_txqstart
[sc_if
->msk_port
] / 8);
3642 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_txq
, RB_END
),
3643 sc
->msk_txqend
[sc_if
->msk_port
] / 8);
3644 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_txq
, RB_WP
),
3645 sc
->msk_txqstart
[sc_if
->msk_port
] / 8);
3646 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_txq
, RB_RP
),
3647 sc
->msk_txqstart
[sc_if
->msk_port
] / 8);
3648 /* Enable Store & Forward for Tx side. */
3649 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_txq
, RB_CTRL
), RB_ENA_STFWD
);
3650 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_txq
, RB_CTRL
), RB_ENA_OP_MD
);
3651 CSR_READ_1(sc
, RB_ADDR(sc_if
->msk_txq
, RB_CTRL
));
3655 msk_set_prefetch(struct msk_softc
*sc
, int qaddr
, bus_addr_t addr
,
3659 /* Reset the prefetch unit. */
3660 CSR_WRITE_4(sc
, Y2_PREF_Q_ADDR(qaddr
, PREF_UNIT_CTRL_REG
),
3662 CSR_WRITE_4(sc
, Y2_PREF_Q_ADDR(qaddr
, PREF_UNIT_CTRL_REG
),
3664 /* Set LE base address. */
3665 CSR_WRITE_4(sc
, Y2_PREF_Q_ADDR(qaddr
, PREF_UNIT_ADDR_LOW_REG
),
3667 CSR_WRITE_4(sc
, Y2_PREF_Q_ADDR(qaddr
, PREF_UNIT_ADDR_HI_REG
),
3669 /* Set the list last index. */
3670 CSR_WRITE_2(sc
, Y2_PREF_Q_ADDR(qaddr
, PREF_UNIT_LAST_IDX_REG
),
3672 /* Turn on prefetch unit. */
3673 CSR_WRITE_4(sc
, Y2_PREF_Q_ADDR(qaddr
, PREF_UNIT_CTRL_REG
),
3675 /* Dummy read to ensure write. */
3676 CSR_READ_4(sc
, Y2_PREF_Q_ADDR(qaddr
, PREF_UNIT_CTRL_REG
));
3680 msk_stop(struct msk_if_softc
*sc_if
)
3682 struct msk_softc
*sc
= sc_if
->msk_softc
;
3683 struct ifnet
*ifp
= sc_if
->msk_ifp
;
3684 struct msk_txdesc
*txd
;
3685 struct msk_rxdesc
*rxd
;
3687 struct msk_rxdesc
*jrxd
;
3692 ASSERT_SERIALIZED(ifp
->if_serializer
);
3694 callout_stop(&sc_if
->msk_tick_ch
);
3697 /* Disable interrupts. */
3698 if (sc_if
->msk_port
== MSK_PORT_A
) {
3699 sc
->msk_intrmask
&= ~Y2_IS_PORT_A
;
3700 sc
->msk_intrhwemask
&= ~Y2_HWE_L1_MASK
;
3702 sc
->msk_intrmask
&= ~Y2_IS_PORT_B
;
3703 sc
->msk_intrhwemask
&= ~Y2_HWE_L2_MASK
;
3705 CSR_WRITE_4(sc
, B0_HWE_IMSK
, sc
->msk_intrhwemask
);
3706 CSR_READ_4(sc
, B0_HWE_IMSK
);
3707 CSR_WRITE_4(sc
, B0_IMSK
, sc
->msk_intrmask
);
3708 CSR_READ_4(sc
, B0_IMSK
);
3710 /* Disable Tx/Rx MAC. */
3711 val
= GMAC_READ_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
);
3712 val
&= ~(GM_GPCR_RX_ENA
| GM_GPCR_TX_ENA
);
3713 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
, val
);
3714 /* Read again to ensure writing. */
3715 GMAC_READ_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
);
3718 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
), BMU_STOP
);
3719 val
= CSR_READ_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
));
3720 for (i
= 0; i
< MSK_TIMEOUT
; i
++) {
3721 if ((val
& (BMU_STOP
| BMU_IDLE
)) == 0) {
3722 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
),
3724 val
= CSR_READ_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
));
3729 if (i
== MSK_TIMEOUT
)
3730 device_printf(sc_if
->msk_if_dev
, "Tx BMU stop failed\n");
3731 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_txq
, RB_CTRL
),
3732 RB_RST_SET
| RB_DIS_OP_MD
);
3734 /* Disable all GMAC interrupt. */
3735 CSR_WRITE_1(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_IRQ_MSK
), 0);
3736 /* Disable PHY interrupt. */
3737 msk_phy_writereg(sc_if
, PHY_ADDR_MARV
, PHY_MARV_INT_MASK
, 0);
3739 /* Disable the RAM Interface Arbiter. */
3740 CSR_WRITE_1(sc
, MR_ADDR(sc_if
->msk_port
, TXA_CTRL
), TXA_DIS_ARB
);
3742 /* Reset the PCI FIFO of the async Tx queue */
3743 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
),
3744 BMU_RST_SET
| BMU_FIFO_RST
);
3746 /* Reset the Tx prefetch units. */
3747 CSR_WRITE_4(sc
, Y2_PREF_Q_ADDR(sc_if
->msk_txq
, PREF_UNIT_CTRL_REG
),
3750 /* Reset the RAM Buffer async Tx queue. */
3751 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_txq
, RB_CTRL
), RB_RST_SET
);
3753 /* Reset Tx MAC FIFO. */
3754 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
), GMF_RST_SET
);
3755 /* Set Pause Off. */
3756 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_CTRL
), GMC_PAUSE_OFF
);
3759 * The Rx Stop command will not work for Yukon-2 if the BMU does not
3760 * reach the end of packet and since we can't make sure that we have
3761 * incoming data, we must reset the BMU while it is not during a DMA
3762 * transfer. Since it is possible that the Rx path is still active,
3763 * the Rx RAM buffer will be stopped first, so any possible incoming
3764 * data will not trigger a DMA. After the RAM buffer is stopped, the
3765 * BMU is polled until any DMA in progress is ended and only then it
3769 /* Disable the RAM Buffer receive queue. */
3770 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_CTRL
), RB_DIS_OP_MD
);
3771 for (i
= 0; i
< MSK_TIMEOUT
; i
++) {
3772 if (CSR_READ_1(sc
, RB_ADDR(sc_if
->msk_rxq
, Q_RSL
)) ==
3773 CSR_READ_1(sc
, RB_ADDR(sc_if
->msk_rxq
, Q_RL
)))
3777 if (i
== MSK_TIMEOUT
)
3778 device_printf(sc_if
->msk_if_dev
, "Rx BMU stop failed\n");
3779 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_rxq
, Q_CSR
),
3780 BMU_RST_SET
| BMU_FIFO_RST
);
3781 /* Reset the Rx prefetch unit. */
3782 CSR_WRITE_4(sc
, Y2_PREF_Q_ADDR(sc_if
->msk_rxq
, PREF_UNIT_CTRL_REG
),
3784 /* Reset the RAM Buffer receive queue. */
3785 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_CTRL
), RB_RST_SET
);
3786 /* Reset Rx MAC FIFO. */
3787 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_CTRL_T
), GMF_RST_SET
);
3789 /* Free Rx and Tx mbufs still in the queues. */
3790 for (i
= 0; i
< MSK_RX_RING_CNT
; i
++) {
3791 rxd
= &sc_if
->msk_cdata
.msk_rxdesc
[i
];
3792 if (rxd
->rx_m
!= NULL
) {
3793 bus_dmamap_unload(sc_if
->msk_cdata
.msk_rx_tag
,
3800 for (i
= 0; i
< MSK_JUMBO_RX_RING_CNT
; i
++) {
3801 jrxd
= &sc_if
->msk_cdata
.msk_jumbo_rxdesc
[i
];
3802 if (jrxd
->rx_m
!= NULL
) {
3803 bus_dmamap_sync(sc_if
->msk_cdata
.msk_jumbo_rx_tag
,
3804 jrxd
->rx_dmamap
, BUS_DMASYNC_POSTREAD
);
3805 bus_dmamap_unload(sc_if
->msk_cdata
.msk_jumbo_rx_tag
,
3807 m_freem(jrxd
->rx_m
);
3812 for (i
= 0; i
< MSK_TX_RING_CNT
; i
++) {
3813 txd
= &sc_if
->msk_cdata
.msk_txdesc
[i
];
3814 if (txd
->tx_m
!= NULL
) {
3815 bus_dmamap_unload(sc_if
->msk_cdata
.msk_tx_tag
,
3823 * Mark the interface down.
3825 ifp
->if_flags
&= ~(IFF_RUNNING
| IFF_OACTIVE
);
3826 sc_if
->msk_link
= 0;
3830 mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS
)
3832 return sysctl_int_range(oidp
, arg1
, arg2
, req
,
3833 MSK_PROC_MIN
, MSK_PROC_MAX
);
3837 mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS
)
3839 struct msk_softc
*sc
= arg1
;
3840 struct lwkt_serialize
*serializer
= &sc
->msk_serializer
;
3843 lwkt_serialize_enter(serializer
);
3845 v
= sc
->msk_intr_rate
;
3846 error
= sysctl_handle_int(oidp
, &v
, 0, req
);
3847 if (error
|| req
->newptr
== NULL
)
3854 if (sc
->msk_intr_rate
!= v
) {
3857 sc
->msk_intr_rate
= v
;
3858 for (i
= 0; i
< 2; ++i
) {
3859 if (sc
->msk_if
[i
] != NULL
) {
3860 flag
|= sc
->msk_if
[i
]->
3861 arpcom
.ac_if
.if_flags
& IFF_RUNNING
;
3865 mskc_set_imtimer(sc
);
3868 lwkt_serialize_exit(serializer
);
3873 msk_dmamem_create(device_t dev
, bus_size_t size
, bus_dma_tag_t
*dtag
,
3874 void **addr
, bus_addr_t
*paddr
, bus_dmamap_t
*dmap
)
3876 struct msk_if_softc
*sc_if
= device_get_softc(dev
);
3880 error
= bus_dmamem_coherent(sc_if
->msk_cdata
.msk_parent_tag
,
3882 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
3883 size
, BUS_DMA_WAITOK
| BUS_DMA_ZERO
, &dmem
);
3885 device_printf(dev
, "can't create coherent DMA memory\n");
3889 *dtag
= dmem
.dmem_tag
;
3890 *dmap
= dmem
.dmem_map
;
3891 *addr
= dmem
.dmem_addr
;
3892 *paddr
= dmem
.dmem_busaddr
;
3898 msk_dmamem_destroy(bus_dma_tag_t dtag
, void *addr
, bus_dmamap_t dmap
)
3901 bus_dmamap_unload(dtag
, dmap
);
3902 bus_dmamem_free(dtag
, addr
, dmap
);
3903 bus_dma_tag_destroy(dtag
);
3908 mskc_set_imtimer(struct msk_softc
*sc
)
3910 if (sc
->msk_intr_rate
> 0) {
3912 * XXX myk(4) seems to use 125MHz for EC/FE/XL
3913 * and 78.125MHz for rest of chip types
3915 CSR_WRITE_4(sc
, B2_IRQM_INI
,
3916 MSK_USECS(sc
, 1000000 / sc
->msk_intr_rate
));
3917 CSR_WRITE_4(sc
, B2_IRQM_MSK
, sc
->msk_intrmask
);
3918 CSR_WRITE_4(sc
, B2_IRQM_CTRL
, TIM_START
);
3920 CSR_WRITE_4(sc
, B2_IRQM_CTRL
, TIM_STOP
);