1 /******************************************************************************
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
9 *****************************************************************************/
11 /******************************************************************************
14 * Copyright (C) Marvell International Ltd. and/or its affiliates
16 * The computer program files contained in this folder ("Files")
17 * are provided to you under the BSD-type license terms provided
18 * below, and any use of such Files and any derivative works
19 * thereof created by you shall be governed by the following terms
22 * - Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials provided
27 * with the distribution.
28 * - Neither the name of Marvell nor the names of its contributors
29 * may be used to endorse or promote products derived from this
30 * software without specific prior written permission.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 *****************************************************************************/
49 * Copyright (c) 1997, 1998, 1999, 2000
50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
95 /* $FreeBSD: src/sys/dev/msk/if_msk.c,v 1.26 2007/12/05 09:41:58 remko Exp $ */
98 * Device driver for the Marvell Yukon II Ethernet controller.
99 * Due to lack of documentation, this driver is based on the code from
100 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
103 #include <sys/param.h>
104 #include <sys/endian.h>
105 #include <sys/kernel.h>
107 #include <sys/in_cksum.h>
108 #include <sys/interrupt.h>
109 #include <sys/malloc.h>
110 #include <sys/proc.h>
111 #include <sys/rman.h>
112 #include <sys/serialize.h>
113 #include <sys/socket.h>
114 #include <sys/sockio.h>
115 #include <sys/sysctl.h>
117 #include <net/ethernet.h>
120 #include <net/if_arp.h>
121 #include <net/if_dl.h>
122 #include <net/if_media.h>
123 #include <net/ifq_var.h>
124 #include <net/vlan/if_vlan_var.h>
126 #include <netinet/ip.h>
127 #include <netinet/ip_var.h>
129 #include <dev/netif/mii_layer/miivar.h>
131 #include <bus/pci/pcireg.h>
132 #include <bus/pci/pcivar.h>
134 #include "if_mskreg.h"
136 /* "device miibus" required. See GENERIC if you get errors here. */
137 #include "miibus_if.h"
139 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
142 * Devices supported by this driver.
144 static const struct msk_product
{
145 uint16_t msk_vendorid
;
146 uint16_t msk_deviceid
;
147 const char *msk_name
;
149 { VENDORID_SK
, DEVICEID_SK_YUKON2
,
150 "SK-9Sxx Gigabit Ethernet" },
151 { VENDORID_SK
, DEVICEID_SK_YUKON2_EXPR
,
152 "SK-9Exx Gigabit Ethernet"},
153 { VENDORID_MARVELL
, DEVICEID_MRVL_8021CU
,
154 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
155 { VENDORID_MARVELL
, DEVICEID_MRVL_8021X
,
156 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
157 { VENDORID_MARVELL
, DEVICEID_MRVL_8022CU
,
158 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
159 { VENDORID_MARVELL
, DEVICEID_MRVL_8022X
,
160 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
161 { VENDORID_MARVELL
, DEVICEID_MRVL_8061CU
,
162 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
163 { VENDORID_MARVELL
, DEVICEID_MRVL_8061X
,
164 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
165 { VENDORID_MARVELL
, DEVICEID_MRVL_8062CU
,
166 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
167 { VENDORID_MARVELL
, DEVICEID_MRVL_8062X
,
168 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
169 { VENDORID_MARVELL
, DEVICEID_MRVL_8035
,
170 "Marvell Yukon 88E8035 Fast Ethernet" },
171 { VENDORID_MARVELL
, DEVICEID_MRVL_8036
,
172 "Marvell Yukon 88E8036 Fast Ethernet" },
173 { VENDORID_MARVELL
, DEVICEID_MRVL_8038
,
174 "Marvell Yukon 88E8038 Fast Ethernet" },
175 { VENDORID_MARVELL
, DEVICEID_MRVL_8039
,
176 "Marvell Yukon 88E8039 Fast Ethernet" },
177 { VENDORID_MARVELL
, DEVICEID_MRVL_8040
,
178 "Marvell Yukon 88E8040 Fast Ethernet" },
179 { VENDORID_MARVELL
, DEVICEID_MRVL_8040T
,
180 "Marvell Yukon 88E8040T Fast Ethernet" },
181 { VENDORID_MARVELL
, DEVICEID_MRVL_8042
,
182 "Marvell Yukon 88E8042 Fast Ethernet" },
183 { VENDORID_MARVELL
, DEVICEID_MRVL_8048
,
184 "Marvell Yukon 88E8048 Fast Ethernet" },
185 { VENDORID_MARVELL
, DEVICEID_MRVL_4361
,
186 "Marvell Yukon 88E8050 Gigabit Ethernet" },
187 { VENDORID_MARVELL
, DEVICEID_MRVL_4360
,
188 "Marvell Yukon 88E8052 Gigabit Ethernet" },
189 { VENDORID_MARVELL
, DEVICEID_MRVL_4362
,
190 "Marvell Yukon 88E8053 Gigabit Ethernet" },
191 { VENDORID_MARVELL
, DEVICEID_MRVL_4363
,
192 "Marvell Yukon 88E8055 Gigabit Ethernet" },
193 { VENDORID_MARVELL
, DEVICEID_MRVL_4364
,
194 "Marvell Yukon 88E8056 Gigabit Ethernet" },
195 { VENDORID_MARVELL
, DEVICEID_MRVL_4365
,
196 "Marvell Yukon 88E8070 Gigabit Ethernet" },
197 { VENDORID_MARVELL
, DEVICEID_MRVL_436A
,
198 "Marvell Yukon 88E8058 Gigabit Ethernet" },
199 { VENDORID_MARVELL
, DEVICEID_MRVL_436B
,
200 "Marvell Yukon 88E8071 Gigabit Ethernet" },
201 { VENDORID_MARVELL
, DEVICEID_MRVL_436C
,
202 "Marvell Yukon 88E8072 Gigabit Ethernet" },
203 { VENDORID_MARVELL
, DEVICEID_MRVL_436D
,
204 "Marvell Yukon 88E8055 Gigabit Ethernet" },
205 { VENDORID_MARVELL
, DEVICEID_MRVL_4370
,
206 "Marvell Yukon 88E8075 Gigabit Ethernet" },
207 { VENDORID_MARVELL
, DEVICEID_MRVL_4380
,
208 "Marvell Yukon 88E8057 Gigabit Ethernet" },
209 { VENDORID_MARVELL
, DEVICEID_MRVL_4381
,
210 "Marvell Yukon 88E8059 Gigabit Ethernet" },
211 { VENDORID_DLINK
, DEVICEID_DLINK_DGE550SX
,
212 "D-Link 550SX Gigabit Ethernet" },
213 { VENDORID_DLINK
, DEVICEID_DLINK_DGE560T
,
214 "D-Link 560T Gigabit Ethernet" },
218 static const char *model_name
[] = {
231 static int mskc_probe(device_t
);
232 static int mskc_attach(device_t
);
233 static int mskc_detach(device_t
);
234 static int mskc_shutdown(device_t
);
235 static int mskc_suspend(device_t
);
236 static int mskc_resume(device_t
);
237 static void mskc_intr(void *);
239 static void mskc_reset(struct msk_softc
*);
240 static void mskc_set_imtimer(struct msk_softc
*);
241 static void mskc_intr_hwerr(struct msk_softc
*);
242 static int mskc_handle_events(struct msk_softc
*);
243 static void mskc_phy_power(struct msk_softc
*, int);
244 static int mskc_setup_rambuffer(struct msk_softc
*);
245 static int mskc_status_dma_alloc(struct msk_softc
*);
246 static void mskc_status_dma_free(struct msk_softc
*);
247 static int mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS
);
248 static int mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS
);
250 static int msk_probe(device_t
);
251 static int msk_attach(device_t
);
252 static int msk_detach(device_t
);
253 static int msk_miibus_readreg(device_t
, int, int);
254 static int msk_miibus_writereg(device_t
, int, int, int);
255 static void msk_miibus_statchg(device_t
);
257 static void msk_init(void *);
258 static int msk_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
259 static void msk_start(struct ifnet
*, struct ifaltq_subque
*);
260 static void msk_watchdog(struct ifnet
*);
261 static int msk_mediachange(struct ifnet
*);
262 static void msk_mediastatus(struct ifnet
*, struct ifmediareq
*);
264 static void msk_tick(void *);
265 static void msk_intr_phy(struct msk_if_softc
*);
266 static void msk_intr_gmac(struct msk_if_softc
*);
268 msk_rxput(struct msk_if_softc
*);
269 static void msk_handle_hwerr(struct msk_if_softc
*, uint32_t);
270 static void msk_rxeof(struct msk_if_softc
*, uint32_t, int);
271 static void msk_txeof(struct msk_if_softc
*, int);
272 static void msk_set_prefetch(struct msk_softc
*, int, bus_addr_t
, uint32_t);
273 static void msk_set_rambuffer(struct msk_if_softc
*);
274 static void msk_stop(struct msk_if_softc
*);
276 static int msk_txrx_dma_alloc(struct msk_if_softc
*);
277 static void msk_txrx_dma_free(struct msk_if_softc
*);
278 static int msk_init_rx_ring(struct msk_if_softc
*);
279 static void msk_init_tx_ring(struct msk_if_softc
*);
281 msk_discard_rxbuf(struct msk_if_softc
*, int);
282 static int msk_newbuf(struct msk_if_softc
*, int, int);
283 static int msk_encap(struct msk_if_softc
*, struct mbuf
**);
286 static int msk_init_jumbo_rx_ring(struct msk_if_softc
*);
287 static __inline
void msk_discard_jumbo_rxbuf(struct msk_if_softc
*, int);
288 static int msk_jumbo_newbuf(struct msk_if_softc
*, int);
289 static void msk_jumbo_rxeof(struct msk_if_softc
*, uint32_t, int);
290 static void *msk_jalloc(struct msk_if_softc
*);
291 static void msk_jfree(void *, void *);
294 static int msk_phy_readreg(struct msk_if_softc
*, int, int);
295 static int msk_phy_writereg(struct msk_if_softc
*, int, int, int);
297 static void msk_rxfilter(struct msk_if_softc
*);
298 static void msk_setvlan(struct msk_if_softc
*, struct ifnet
*);
299 static void msk_set_tx_stfwd(struct msk_if_softc
*);
301 static int msk_dmamem_create(device_t
, bus_size_t
, bus_dma_tag_t
*,
302 void **, bus_addr_t
*, bus_dmamap_t
*);
303 static void msk_dmamem_destroy(bus_dma_tag_t
, void *, bus_dmamap_t
);
305 static device_method_t mskc_methods
[] = {
306 /* Device interface */
307 DEVMETHOD(device_probe
, mskc_probe
),
308 DEVMETHOD(device_attach
, mskc_attach
),
309 DEVMETHOD(device_detach
, mskc_detach
),
310 DEVMETHOD(device_suspend
, mskc_suspend
),
311 DEVMETHOD(device_resume
, mskc_resume
),
312 DEVMETHOD(device_shutdown
, mskc_shutdown
),
315 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
316 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
321 static DEFINE_CLASS_0(mskc
, mskc_driver
, mskc_methods
, sizeof(struct msk_softc
));
322 static devclass_t mskc_devclass
;
324 static device_method_t msk_methods
[] = {
325 /* Device interface */
326 DEVMETHOD(device_probe
, msk_probe
),
327 DEVMETHOD(device_attach
, msk_attach
),
328 DEVMETHOD(device_detach
, msk_detach
),
329 DEVMETHOD(device_shutdown
, bus_generic_shutdown
),
332 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
333 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
336 DEVMETHOD(miibus_readreg
, msk_miibus_readreg
),
337 DEVMETHOD(miibus_writereg
, msk_miibus_writereg
),
338 DEVMETHOD(miibus_statchg
, msk_miibus_statchg
),
343 static DEFINE_CLASS_0(msk
, msk_driver
, msk_methods
, sizeof(struct msk_if_softc
));
344 static devclass_t msk_devclass
;
346 DECLARE_DUMMY_MODULE(if_msk
);
347 DRIVER_MODULE(if_msk
, pci
, mskc_driver
, mskc_devclass
, NULL
, NULL
);
348 DRIVER_MODULE(if_msk
, mskc
, msk_driver
, msk_devclass
, NULL
, NULL
);
349 DRIVER_MODULE(miibus
, msk
, miibus_driver
, miibus_devclass
, NULL
, NULL
);
351 static int mskc_msi_enable
= 0;
352 static int mskc_intr_rate
= 0;
353 static int mskc_process_limit
= MSK_PROC_DEFAULT
;
355 TUNABLE_INT("hw.mskc.intr_rate", &mskc_intr_rate
);
356 TUNABLE_INT("hw.mskc.process_limit", &mskc_process_limit
);
357 TUNABLE_INT("hw.mskc.msi.enable", &mskc_msi_enable
);
360 msk_miibus_readreg(device_t dev
, int phy
, int reg
)
362 struct msk_if_softc
*sc_if
;
364 if (phy
!= PHY_ADDR_MARV
)
367 sc_if
= device_get_softc(dev
);
369 return (msk_phy_readreg(sc_if
, phy
, reg
));
373 msk_phy_readreg(struct msk_if_softc
*sc_if
, int phy
, int reg
)
375 struct msk_softc
*sc
;
378 sc
= sc_if
->msk_softc
;
380 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_SMI_CTRL
,
381 GM_SMI_CT_PHY_AD(phy
) | GM_SMI_CT_REG_AD(reg
) | GM_SMI_CT_OP_RD
);
383 for (i
= 0; i
< MSK_TIMEOUT
; i
++) {
385 val
= GMAC_READ_2(sc
, sc_if
->msk_port
, GM_SMI_CTRL
);
386 if ((val
& GM_SMI_CT_RD_VAL
) != 0) {
387 val
= GMAC_READ_2(sc
, sc_if
->msk_port
, GM_SMI_DATA
);
392 if (i
== MSK_TIMEOUT
) {
393 if_printf(sc_if
->msk_ifp
, "phy failed to come ready\n");
401 msk_miibus_writereg(device_t dev
, int phy
, int reg
, int val
)
403 struct msk_if_softc
*sc_if
;
405 if (phy
!= PHY_ADDR_MARV
)
408 sc_if
= device_get_softc(dev
);
410 return (msk_phy_writereg(sc_if
, phy
, reg
, val
));
414 msk_phy_writereg(struct msk_if_softc
*sc_if
, int phy
, int reg
, int val
)
416 struct msk_softc
*sc
;
419 sc
= sc_if
->msk_softc
;
421 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_SMI_DATA
, val
);
422 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_SMI_CTRL
,
423 GM_SMI_CT_PHY_AD(phy
) | GM_SMI_CT_REG_AD(reg
));
424 for (i
= 0; i
< MSK_TIMEOUT
; i
++) {
426 if ((GMAC_READ_2(sc
, sc_if
->msk_port
, GM_SMI_CTRL
) &
427 GM_SMI_CT_BUSY
) == 0)
430 if (i
== MSK_TIMEOUT
)
431 if_printf(sc_if
->msk_ifp
, "phy write timeout\n");
437 msk_miibus_statchg(device_t dev
)
439 struct msk_if_softc
*sc_if
;
440 struct msk_softc
*sc
;
441 struct mii_data
*mii
;
444 sc_if
= device_get_softc(dev
);
445 sc
= sc_if
->msk_softc
;
447 mii
= device_get_softc(sc_if
->msk_miibus
);
450 if ((mii
->mii_media_status
& (IFM_AVALID
| IFM_ACTIVE
)) ==
451 (IFM_AVALID
| IFM_ACTIVE
)) {
452 switch (IFM_SUBTYPE(mii
->mii_media_active
)) {
461 if ((sc_if
->msk_flags
& MSK_FLAG_FASTETHER
) == 0)
467 if (sc_if
->msk_link
!= 0) {
468 /* Enable Tx FIFO Underrun. */
469 CSR_WRITE_1(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_IRQ_MSK
),
470 GM_IS_TX_FF_UR
| GM_IS_RX_FF_OR
);
472 * Because mii(4) notify msk(4) that it detected link status
473 * change, there is no need to enable automatic
474 * speed/flow-control/duplex updates.
476 gmac
= GM_GPCR_AU_ALL_DIS
;
477 switch (IFM_SUBTYPE(mii
->mii_media_active
)) {
480 gmac
|= GM_GPCR_SPEED_1000
;
483 gmac
|= GM_GPCR_SPEED_100
;
489 if ((mii
->mii_media_active
& IFM_GMASK
) & IFM_FDX
)
490 gmac
|= GM_GPCR_DUP_FULL
;
492 gmac
|= GM_GPCR_FC_RX_DIS
| GM_GPCR_FC_TX_DIS
;
493 /* Disable Rx flow control. */
494 if (((mii
->mii_media_active
& IFM_GMASK
) & IFM_FLAG0
) == 0)
495 gmac
|= GM_GPCR_FC_RX_DIS
;
496 /* Disable Tx flow control. */
497 if (((mii
->mii_media_active
& IFM_GMASK
) & IFM_FLAG1
) == 0)
498 gmac
|= GM_GPCR_FC_TX_DIS
;
499 gmac
|= GM_GPCR_RX_ENA
| GM_GPCR_TX_ENA
;
500 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
, gmac
);
501 /* Read again to ensure writing. */
502 GMAC_READ_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
);
504 gmac
= GMC_PAUSE_OFF
;
505 if (((mii
->mii_media_active
& IFM_GMASK
) & IFM_FLAG0
) &&
506 ((mii
->mii_media_active
& IFM_GMASK
) & IFM_FDX
))
508 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_CTRL
), gmac
);
510 /* Enable PHY interrupt for FIFO underrun/overflow. */
511 msk_phy_writereg(sc_if
, PHY_ADDR_MARV
,
512 PHY_MARV_INT_MASK
, PHY_M_IS_FIFO_ERROR
);
515 * Link state changed to down.
516 * Disable PHY interrupts.
518 msk_phy_writereg(sc_if
, PHY_ADDR_MARV
, PHY_MARV_INT_MASK
, 0);
519 /* Disable Rx/Tx MAC. */
520 gmac
= GMAC_READ_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
);
521 if (gmac
& (GM_GPCR_RX_ENA
| GM_GPCR_TX_ENA
)) {
522 gmac
&= ~(GM_GPCR_RX_ENA
| GM_GPCR_TX_ENA
);
523 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
, gmac
);
524 /* Read again to ensure writing. */
525 GMAC_READ_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
);
531 msk_rxfilter(struct msk_if_softc
*sc_if
)
533 struct msk_softc
*sc
;
535 struct ifmultiaddr
*ifma
;
540 sc
= sc_if
->msk_softc
;
541 ifp
= sc_if
->msk_ifp
;
543 bzero(mchash
, sizeof(mchash
));
544 mode
= GMAC_READ_2(sc
, sc_if
->msk_port
, GM_RX_CTRL
);
545 if ((ifp
->if_flags
& IFF_PROMISC
) != 0) {
546 mode
&= ~(GM_RXCR_UCF_ENA
| GM_RXCR_MCF_ENA
);
547 } else if ((ifp
->if_flags
& IFF_ALLMULTI
) != 0) {
548 mode
|= (GM_RXCR_UCF_ENA
| GM_RXCR_MCF_ENA
);
552 mode
|= GM_RXCR_UCF_ENA
;
553 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
554 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
556 crc
= ether_crc32_be(LLADDR((struct sockaddr_dl
*)
557 ifma
->ifma_addr
), ETHER_ADDR_LEN
);
558 /* Just want the 6 least significant bits. */
560 /* Set the corresponding bit in the hash table. */
561 mchash
[crc
>> 5] |= 1 << (crc
& 0x1f);
563 if (mchash
[0] != 0 || mchash
[1] != 0)
564 mode
|= GM_RXCR_MCF_ENA
;
567 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_MC_ADDR_H1
,
569 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_MC_ADDR_H2
,
570 (mchash
[0] >> 16) & 0xffff);
571 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_MC_ADDR_H3
,
573 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_MC_ADDR_H4
,
574 (mchash
[1] >> 16) & 0xffff);
575 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_RX_CTRL
, mode
);
579 msk_setvlan(struct msk_if_softc
*sc_if
, struct ifnet
*ifp
)
581 struct msk_softc
*sc
;
583 sc
= sc_if
->msk_softc
;
584 if ((ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) != 0) {
585 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_CTRL_T
),
587 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
),
590 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_CTRL_T
),
592 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
),
598 msk_init_rx_ring(struct msk_if_softc
*sc_if
)
600 struct msk_ring_data
*rd
;
601 struct msk_rxdesc
*rxd
;
604 sc_if
->msk_cdata
.msk_rx_cons
= 0;
605 sc_if
->msk_cdata
.msk_rx_prod
= 0;
606 sc_if
->msk_cdata
.msk_rx_putwm
= MSK_PUT_WM
;
608 rd
= &sc_if
->msk_rdata
;
609 bzero(rd
->msk_rx_ring
, sizeof(struct msk_rx_desc
) * MSK_RX_RING_CNT
);
610 prod
= sc_if
->msk_cdata
.msk_rx_prod
;
611 for (i
= 0; i
< MSK_RX_RING_CNT
; i
++) {
612 rxd
= &sc_if
->msk_cdata
.msk_rxdesc
[prod
];
614 rxd
->rx_le
= &rd
->msk_rx_ring
[prod
];
615 if (msk_newbuf(sc_if
, prod
, 1) != 0)
617 MSK_INC(prod
, MSK_RX_RING_CNT
);
620 /* Update prefetch unit. */
621 sc_if
->msk_cdata
.msk_rx_prod
= MSK_RX_RING_CNT
- 1;
622 CSR_WRITE_2(sc_if
->msk_softc
,
623 Y2_PREF_Q_ADDR(sc_if
->msk_rxq
, PREF_UNIT_PUT_IDX_REG
),
624 sc_if
->msk_cdata
.msk_rx_prod
);
631 msk_init_jumbo_rx_ring(struct msk_if_softc
*sc_if
)
633 struct msk_ring_data
*rd
;
634 struct msk_rxdesc
*rxd
;
637 MSK_IF_LOCK_ASSERT(sc_if
);
639 sc_if
->msk_cdata
.msk_rx_cons
= 0;
640 sc_if
->msk_cdata
.msk_rx_prod
= 0;
641 sc_if
->msk_cdata
.msk_rx_putwm
= MSK_PUT_WM
;
643 rd
= &sc_if
->msk_rdata
;
644 bzero(rd
->msk_jumbo_rx_ring
,
645 sizeof(struct msk_rx_desc
) * MSK_JUMBO_RX_RING_CNT
);
646 prod
= sc_if
->msk_cdata
.msk_rx_prod
;
647 for (i
= 0; i
< MSK_JUMBO_RX_RING_CNT
; i
++) {
648 rxd
= &sc_if
->msk_cdata
.msk_jumbo_rxdesc
[prod
];
650 rxd
->rx_le
= &rd
->msk_jumbo_rx_ring
[prod
];
651 if (msk_jumbo_newbuf(sc_if
, prod
) != 0)
653 MSK_INC(prod
, MSK_JUMBO_RX_RING_CNT
);
656 bus_dmamap_sync(sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
,
657 sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
,
658 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
660 sc_if
->msk_cdata
.msk_rx_prod
= MSK_JUMBO_RX_RING_CNT
- 1;
661 CSR_WRITE_2(sc_if
->msk_softc
,
662 Y2_PREF_Q_ADDR(sc_if
->msk_rxq
, PREF_UNIT_PUT_IDX_REG
),
663 sc_if
->msk_cdata
.msk_rx_prod
);
670 msk_init_tx_ring(struct msk_if_softc
*sc_if
)
672 struct msk_ring_data
*rd
;
673 struct msk_txdesc
*txd
;
676 sc_if
->msk_cdata
.msk_tx_prod
= 0;
677 sc_if
->msk_cdata
.msk_tx_cons
= 0;
678 sc_if
->msk_cdata
.msk_tx_cnt
= 0;
680 rd
= &sc_if
->msk_rdata
;
681 bzero(rd
->msk_tx_ring
, sizeof(struct msk_tx_desc
) * MSK_TX_RING_CNT
);
682 for (i
= 0; i
< MSK_TX_RING_CNT
; i
++) {
683 txd
= &sc_if
->msk_cdata
.msk_txdesc
[i
];
685 txd
->tx_le
= &rd
->msk_tx_ring
[i
];
690 msk_discard_rxbuf(struct msk_if_softc
*sc_if
, int idx
)
692 struct msk_rx_desc
*rx_le
;
693 struct msk_rxdesc
*rxd
;
696 rxd
= &sc_if
->msk_cdata
.msk_rxdesc
[idx
];
699 rx_le
->msk_control
= htole32(m
->m_len
| OP_PACKET
| HW_OWNER
);
704 msk_discard_jumbo_rxbuf(struct msk_if_softc
*sc_if
, int idx
)
706 struct msk_rx_desc
*rx_le
;
707 struct msk_rxdesc
*rxd
;
710 rxd
= &sc_if
->msk_cdata
.msk_jumbo_rxdesc
[idx
];
713 rx_le
->msk_control
= htole32(m
->m_len
| OP_PACKET
| HW_OWNER
);
718 msk_newbuf(struct msk_if_softc
*sc_if
, int idx
, int init
)
720 struct msk_rx_desc
*rx_le
;
721 struct msk_rxdesc
*rxd
;
723 bus_dma_segment_t seg
;
727 m
= m_getcl(init
? M_WAITOK
: M_NOWAIT
, MT_DATA
, M_PKTHDR
);
731 m
->m_len
= m
->m_pkthdr
.len
= MCLBYTES
;
732 if ((sc_if
->msk_flags
& MSK_FLAG_RAMBUF
) == 0)
733 m_adj(m
, ETHER_ALIGN
);
735 error
= bus_dmamap_load_mbuf_segment(sc_if
->msk_cdata
.msk_rx_tag
,
736 sc_if
->msk_cdata
.msk_rx_sparemap
,
737 m
, &seg
, 1, &nseg
, BUS_DMA_NOWAIT
);
741 if_printf(&sc_if
->arpcom
.ac_if
, "can't load RX mbuf\n");
745 rxd
= &sc_if
->msk_cdata
.msk_rxdesc
[idx
];
746 if (rxd
->rx_m
!= NULL
) {
747 bus_dmamap_sync(sc_if
->msk_cdata
.msk_rx_tag
, rxd
->rx_dmamap
,
748 BUS_DMASYNC_POSTREAD
);
749 bus_dmamap_unload(sc_if
->msk_cdata
.msk_rx_tag
, rxd
->rx_dmamap
);
752 map
= rxd
->rx_dmamap
;
753 rxd
->rx_dmamap
= sc_if
->msk_cdata
.msk_rx_sparemap
;
754 sc_if
->msk_cdata
.msk_rx_sparemap
= map
;
758 rx_le
->msk_addr
= htole32(MSK_ADDR_LO(seg
.ds_addr
));
759 rx_le
->msk_control
= htole32(seg
.ds_len
| OP_PACKET
| HW_OWNER
);
766 msk_jumbo_newbuf(struct msk_if_softc
*sc_if
, int idx
)
768 struct msk_rx_desc
*rx_le
;
769 struct msk_rxdesc
*rxd
;
771 bus_dma_segment_t segs
[1];
776 MGETHDR(m
, M_NOWAIT
, MT_DATA
);
779 buf
= msk_jalloc(sc_if
);
784 /* Attach the buffer to the mbuf. */
785 MEXTADD(m
, buf
, MSK_JLEN
, msk_jfree
, sc_if
, 0, EXT_NET_DRV
);
786 if ((m
->m_flags
& M_EXT
) == 0) {
790 m
->m_pkthdr
.len
= m
->m_len
= MSK_JLEN
;
791 m_adj(m
, ETHER_ALIGN
);
793 if (bus_dmamap_load_mbuf_sg(sc_if
->msk_cdata
.msk_jumbo_rx_tag
,
794 sc_if
->msk_cdata
.msk_jumbo_rx_sparemap
, m
, segs
, &nsegs
,
795 BUS_DMA_NOWAIT
) != 0) {
799 KASSERT(nsegs
== 1, ("%s: %d segments returned!", __func__
, nsegs
));
801 rxd
= &sc_if
->msk_cdata
.msk_jumbo_rxdesc
[idx
];
802 if (rxd
->rx_m
!= NULL
) {
803 bus_dmamap_sync(sc_if
->msk_cdata
.msk_jumbo_rx_tag
,
804 rxd
->rx_dmamap
, BUS_DMASYNC_POSTREAD
);
805 bus_dmamap_unload(sc_if
->msk_cdata
.msk_jumbo_rx_tag
,
808 map
= rxd
->rx_dmamap
;
809 rxd
->rx_dmamap
= sc_if
->msk_cdata
.msk_jumbo_rx_sparemap
;
810 sc_if
->msk_cdata
.msk_jumbo_rx_sparemap
= map
;
811 bus_dmamap_sync(sc_if
->msk_cdata
.msk_jumbo_rx_tag
, rxd
->rx_dmamap
,
812 BUS_DMASYNC_PREREAD
);
815 rx_le
->msk_addr
= htole32(MSK_ADDR_LO(segs
[0].ds_addr
));
817 htole32(segs
[0].ds_len
| OP_PACKET
| HW_OWNER
);
827 msk_mediachange(struct ifnet
*ifp
)
829 struct msk_if_softc
*sc_if
= ifp
->if_softc
;
830 struct mii_data
*mii
;
833 mii
= device_get_softc(sc_if
->msk_miibus
);
834 error
= mii_mediachg(mii
);
840 * Report current media status.
843 msk_mediastatus(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
845 struct msk_if_softc
*sc_if
= ifp
->if_softc
;
846 struct mii_data
*mii
;
848 mii
= device_get_softc(sc_if
->msk_miibus
);
851 ifmr
->ifm_active
= mii
->mii_media_active
;
852 ifmr
->ifm_status
= mii
->mii_media_status
;
856 msk_ioctl(struct ifnet
*ifp
, u_long command
, caddr_t data
, struct ucred
*cr
)
858 struct msk_if_softc
*sc_if
;
860 struct mii_data
*mii
;
863 sc_if
= ifp
->if_softc
;
864 ifr
= (struct ifreq
*)data
;
870 if (ifr
->ifr_mtu
> MSK_JUMBO_MTU
|| ifr
->ifr_mtu
< ETHERMIN
) {
874 if (sc_if
->msk_softc
->msk_hw_id
== CHIP_ID_YUKON_FE
&&
875 ifr
->ifr_mtu
> MSK_MAX_FRAMELEN
) {
879 ifp
->if_mtu
= ifr
->ifr_mtu
;
880 if ((ifp
->if_flags
& IFF_RUNNING
) != 0)
888 if (ifp
->if_flags
& IFF_UP
) {
889 if (ifp
->if_flags
& IFF_RUNNING
) {
890 if (((ifp
->if_flags
^ sc_if
->msk_if_flags
)
891 & (IFF_PROMISC
| IFF_ALLMULTI
)) != 0)
894 if (sc_if
->msk_detach
== 0)
898 if (ifp
->if_flags
& IFF_RUNNING
)
901 sc_if
->msk_if_flags
= ifp
->if_flags
;
906 if (ifp
->if_flags
& IFF_RUNNING
)
912 mii
= device_get_softc(sc_if
->msk_miibus
);
913 error
= ifmedia_ioctl(ifp
, ifr
, &mii
->mii_media
, command
);
917 mask
= ifr
->ifr_reqcap
^ ifp
->if_capenable
;
918 if ((mask
& IFCAP_TXCSUM
) != 0) {
919 ifp
->if_capenable
^= IFCAP_TXCSUM
;
920 if ((IFCAP_TXCSUM
& ifp
->if_capenable
) != 0 &&
921 (IFCAP_TXCSUM
& ifp
->if_capabilities
) != 0)
922 ifp
->if_hwassist
|= MSK_CSUM_FEATURES
;
924 ifp
->if_hwassist
&= ~MSK_CSUM_FEATURES
;
927 if ((mask
& IFCAP_VLAN_HWTAGGING
) != 0) {
928 ifp
->if_capenable
^= IFCAP_VLAN_HWTAGGING
;
929 msk_setvlan(sc_if
, ifp
);
933 if (sc_if
->msk_framesize
> MSK_MAX_FRAMELEN
&&
934 sc_if
->msk_softc
->msk_hw_id
== CHIP_ID_YUKON_EC_U
) {
936 * In Yukon EC Ultra, TSO & checksum offload is not
937 * supported for jumbo frame.
939 ifp
->if_hwassist
&= ~MSK_CSUM_FEATURES
;
940 ifp
->if_capenable
&= ~IFCAP_TXCSUM
;
945 error
= ether_ioctl(ifp
, command
, data
);
953 mskc_probe(device_t dev
)
955 const struct msk_product
*mp
;
956 uint16_t vendor
, devid
;
958 vendor
= pci_get_vendor(dev
);
959 devid
= pci_get_device(dev
);
960 for (mp
= msk_products
; mp
->msk_name
!= NULL
; ++mp
) {
961 if (vendor
== mp
->msk_vendorid
&& devid
== mp
->msk_deviceid
) {
962 device_set_desc(dev
, mp
->msk_name
);
970 mskc_setup_rambuffer(struct msk_softc
*sc
)
975 /* Get adapter SRAM size. */
976 sc
->msk_ramsize
= CSR_READ_1(sc
, B2_E_0
) * 4;
978 device_printf(sc
->msk_dev
,
979 "RAM buffer size : %dKB\n", sc
->msk_ramsize
);
981 if (sc
->msk_ramsize
== 0)
983 sc
->msk_pflags
|= MSK_FLAG_RAMBUF
;
986 * Give receiver 2/3 of memory and round down to the multiple
987 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
990 sc
->msk_rxqsize
= rounddown((sc
->msk_ramsize
* 1024 * 2) / 3, 1024);
991 sc
->msk_txqsize
= (sc
->msk_ramsize
* 1024) - sc
->msk_rxqsize
;
992 for (i
= 0, next
= 0; i
< sc
->msk_num_port
; i
++) {
993 sc
->msk_rxqstart
[i
] = next
;
994 sc
->msk_rxqend
[i
] = next
+ sc
->msk_rxqsize
- 1;
995 next
= sc
->msk_rxqend
[i
] + 1;
996 sc
->msk_txqstart
[i
] = next
;
997 sc
->msk_txqend
[i
] = next
+ sc
->msk_txqsize
- 1;
998 next
= sc
->msk_txqend
[i
] + 1;
1000 device_printf(sc
->msk_dev
,
1001 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i
,
1002 sc
->msk_rxqsize
/ 1024, sc
->msk_rxqstart
[i
],
1004 device_printf(sc
->msk_dev
,
1005 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i
,
1006 sc
->msk_txqsize
/ 1024, sc
->msk_txqstart
[i
],
1015 mskc_phy_power(struct msk_softc
*sc
, int mode
)
1021 case MSK_PHY_POWERUP
:
1022 /* Switch power to VCC (WA for VAUX problem). */
1023 CSR_WRITE_1(sc
, B0_POWER_CTRL
,
1024 PC_VAUX_ENA
| PC_VCC_ENA
| PC_VAUX_OFF
| PC_VCC_ON
);
1025 /* Disable Core Clock Division, set Clock Select to 0. */
1026 CSR_WRITE_4(sc
, B2_Y2_CLK_CTRL
, Y2_CLK_DIV_DIS
);
1029 if (sc
->msk_hw_id
== CHIP_ID_YUKON_XL
&&
1030 sc
->msk_hw_rev
> CHIP_REV_YU_XL_A1
) {
1031 /* Enable bits are inverted. */
1032 val
= Y2_PCI_CLK_LNK1_DIS
| Y2_COR_CLK_LNK1_DIS
|
1033 Y2_CLK_GAT_LNK1_DIS
| Y2_PCI_CLK_LNK2_DIS
|
1034 Y2_COR_CLK_LNK2_DIS
| Y2_CLK_GAT_LNK2_DIS
;
1037 * Enable PCI & Core Clock, enable clock gating for both Links.
1039 CSR_WRITE_1(sc
, B2_Y2_CLK_GATE
, val
);
1041 our
= CSR_PCI_READ_4(sc
, PCI_OUR_REG_1
);
1042 our
&= ~(PCI_Y2_PHY1_POWD
| PCI_Y2_PHY2_POWD
);
1043 if (sc
->msk_hw_id
== CHIP_ID_YUKON_XL
) {
1044 if (sc
->msk_hw_rev
> CHIP_REV_YU_XL_A1
) {
1045 /* Deassert Low Power for 1st PHY. */
1046 our
|= PCI_Y2_PHY1_COMA
;
1047 if (sc
->msk_num_port
> 1)
1048 our
|= PCI_Y2_PHY2_COMA
;
1051 if (sc
->msk_hw_id
== CHIP_ID_YUKON_EC_U
||
1052 sc
->msk_hw_id
== CHIP_ID_YUKON_EX
||
1053 sc
->msk_hw_id
>= CHIP_ID_YUKON_FE_P
) {
1054 val
= CSR_PCI_READ_4(sc
, PCI_OUR_REG_4
);
1055 val
&= (PCI_FORCE_ASPM_REQUEST
|
1056 PCI_ASPM_GPHY_LINK_DOWN
| PCI_ASPM_INT_FIFO_EMPTY
|
1057 PCI_ASPM_CLKRUN_REQUEST
);
1058 /* Set all bits to 0 except bits 15..12. */
1059 CSR_PCI_WRITE_4(sc
, PCI_OUR_REG_4
, val
);
1060 val
= CSR_PCI_READ_4(sc
, PCI_OUR_REG_5
);
1061 val
&= PCI_CTL_TIM_VMAIN_AV_MSK
;
1062 CSR_PCI_WRITE_4(sc
, PCI_OUR_REG_5
, val
);
1063 CSR_PCI_WRITE_4(sc
, PCI_CFG_REG_1
, 0);
1064 CSR_WRITE_2(sc
, B0_CTST
, Y2_HW_WOL_ON
);
1066 * Disable status race, workaround for
1067 * Yukon EC Ultra & Yukon EX.
1069 val
= CSR_READ_4(sc
, B2_GP_IO
);
1070 val
|= GLB_GPIO_STAT_RACE_DIS
;
1071 CSR_WRITE_4(sc
, B2_GP_IO
, val
);
1072 CSR_READ_4(sc
, B2_GP_IO
);
1074 /* Release PHY from PowerDown/COMA mode. */
1075 CSR_PCI_WRITE_4(sc
, PCI_OUR_REG_1
, our
);
1077 for (i
= 0; i
< sc
->msk_num_port
; i
++) {
1078 CSR_WRITE_2(sc
, MR_ADDR(i
, GMAC_LINK_CTRL
),
1080 CSR_WRITE_2(sc
, MR_ADDR(i
, GMAC_LINK_CTRL
),
1084 case MSK_PHY_POWERDOWN
:
1085 val
= CSR_PCI_READ_4(sc
, PCI_OUR_REG_1
);
1086 val
|= PCI_Y2_PHY1_POWD
| PCI_Y2_PHY2_POWD
;
1087 if (sc
->msk_hw_id
== CHIP_ID_YUKON_XL
&&
1088 sc
->msk_hw_rev
> CHIP_REV_YU_XL_A1
) {
1089 val
&= ~PCI_Y2_PHY1_COMA
;
1090 if (sc
->msk_num_port
> 1)
1091 val
&= ~PCI_Y2_PHY2_COMA
;
1093 CSR_PCI_WRITE_4(sc
, PCI_OUR_REG_1
, val
);
1095 val
= Y2_PCI_CLK_LNK1_DIS
| Y2_COR_CLK_LNK1_DIS
|
1096 Y2_CLK_GAT_LNK1_DIS
| Y2_PCI_CLK_LNK2_DIS
|
1097 Y2_COR_CLK_LNK2_DIS
| Y2_CLK_GAT_LNK2_DIS
;
1098 if (sc
->msk_hw_id
== CHIP_ID_YUKON_XL
&&
1099 sc
->msk_hw_rev
> CHIP_REV_YU_XL_A1
) {
1100 /* Enable bits are inverted. */
1104 * Disable PCI & Core Clock, disable clock gating for
1107 CSR_WRITE_1(sc
, B2_Y2_CLK_GATE
, val
);
1108 CSR_WRITE_1(sc
, B0_POWER_CTRL
,
1109 PC_VAUX_ENA
| PC_VCC_ENA
| PC_VAUX_ON
| PC_VCC_OFF
);
1117 mskc_reset(struct msk_softc
*sc
)
1125 if (sc
->msk_hw_id
>= CHIP_ID_YUKON_XL
&&
1126 sc
->msk_hw_id
<= CHIP_ID_YUKON_SUPR
) {
1127 if (sc
->msk_hw_id
== CHIP_ID_YUKON_EX
||
1128 sc
->msk_hw_id
== CHIP_ID_YUKON_SUPR
) {
1129 CSR_WRITE_4(sc
, B28_Y2_CPU_WDOG
, 0);
1130 status
= CSR_READ_2(sc
, B28_Y2_ASF_HCU_CCSR
);
1131 /* Clear AHB bridge & microcontroller reset. */
1132 status
&= ~(Y2_ASF_HCU_CCSR_AHB_RST
|
1133 Y2_ASF_HCU_CCSR_CPU_RST_MODE
);
1134 /* Clear ASF microcontroller state. */
1135 status
&= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK
;
1136 status
&= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK
;
1137 CSR_WRITE_2(sc
, B28_Y2_ASF_HCU_CCSR
, status
);
1138 CSR_WRITE_4(sc
, B28_Y2_CPU_WDOG
, 0);
1140 CSR_WRITE_1(sc
, B28_Y2_ASF_STAT_CMD
, Y2_ASF_RESET
);
1142 CSR_WRITE_2(sc
, B0_CTST
, Y2_ASF_DISABLE
);
1144 * Since we disabled ASF, S/W reset is required for
1147 CSR_WRITE_2(sc
, B0_CTST
, CS_RST_SET
);
1148 CSR_WRITE_2(sc
, B0_CTST
, CS_RST_CLR
);
1151 /* Clear all error bits in the PCI status register. */
1152 status
= pci_read_config(sc
->msk_dev
, PCIR_STATUS
, 2);
1153 CSR_WRITE_1(sc
, B2_TST_CTRL1
, TST_CFG_WRITE_ON
);
1155 pci_write_config(sc
->msk_dev
, PCIR_STATUS
, status
|
1156 PCIM_STATUS_PERR
| PCIM_STATUS_SERR
| PCIM_STATUS_RMABORT
|
1157 PCIM_STATUS_RTABORT
| PCIM_STATUS_PERRREPORT
, 2);
1158 CSR_WRITE_2(sc
, B0_CTST
, CS_MRST_CLR
);
1160 switch (sc
->msk_bustype
) {
1162 /* Clear all PEX errors. */
1163 CSR_PCI_WRITE_4(sc
, PEX_UNC_ERR_STAT
, 0xffffffff);
1164 val
= CSR_PCI_READ_4(sc
, PEX_UNC_ERR_STAT
);
1165 if ((val
& PEX_RX_OV
) != 0) {
1166 sc
->msk_intrmask
&= ~Y2_IS_HW_ERR
;
1167 sc
->msk_intrhwemask
&= ~Y2_IS_PCI_EXP
;
1172 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
1173 val
= pci_read_config(sc
->msk_dev
, PCIR_CACHELNSZ
, 1);
1175 pci_write_config(sc
->msk_dev
, PCIR_CACHELNSZ
, 2, 1);
1176 if (sc
->msk_bustype
== MSK_PCIX_BUS
) {
1177 /* Set Cache Line Size opt. */
1178 val
= CSR_PCI_READ_4(sc
, PCI_OUR_REG_1
);
1180 CSR_PCI_WRITE_4(sc
, PCI_OUR_REG_1
, val
);
1184 /* Set PHY power state. */
1185 mskc_phy_power(sc
, MSK_PHY_POWERUP
);
1187 /* Reset GPHY/GMAC Control */
1188 for (i
= 0; i
< sc
->msk_num_port
; i
++) {
1189 /* GPHY Control reset. */
1190 CSR_WRITE_1(sc
, MR_ADDR(i
, GPHY_CTRL
), GPC_RST_SET
);
1191 CSR_WRITE_1(sc
, MR_ADDR(i
, GPHY_CTRL
), GPC_RST_CLR
);
1192 /* GMAC Control reset. */
1193 CSR_WRITE_4(sc
, MR_ADDR(i
, GMAC_CTRL
), GMC_RST_SET
);
1194 CSR_WRITE_4(sc
, MR_ADDR(i
, GMAC_CTRL
), GMC_RST_CLR
);
1195 CSR_WRITE_4(sc
, MR_ADDR(i
, GMAC_CTRL
), GMC_F_LOOPB_OFF
);
1196 if (sc
->msk_hw_id
== CHIP_ID_YUKON_EX
||
1197 sc
->msk_hw_id
== CHIP_ID_YUKON_SUPR
) {
1198 CSR_WRITE_4(sc
, MR_ADDR(i
, GMAC_CTRL
),
1199 GMC_BYP_MACSECRX_ON
| GMC_BYP_MACSECTX_ON
|
1204 if (sc
->msk_hw_id
== CHIP_ID_YUKON_SUPR
&&
1205 sc
->msk_hw_rev
> CHIP_REV_YU_SU_B0
)
1206 CSR_PCI_WRITE_4(sc
, PCI_OUR_REG_3
, PCI_CLK_MACSEC_DIS
);
1207 if (sc
->msk_hw_id
== CHIP_ID_YUKON_OPT
&& sc
->msk_hw_rev
== 0) {
1208 /* Disable PCIe PHY powerdown(reg 0x80, bit7). */
1209 CSR_WRITE_4(sc
, Y2_PEX_PHY_DATA
, (0x0080 << 16) | 0x0080);
1211 CSR_WRITE_1(sc
, B2_TST_CTRL1
, TST_CFG_WRITE_OFF
);
1214 CSR_WRITE_2(sc
, B0_CTST
, Y2_LED_STAT_ON
);
1216 /* Clear TWSI IRQ. */
1217 CSR_WRITE_4(sc
, B2_I2C_IRQ
, I2C_CLR_IRQ
);
1219 /* Turn off hardware timer. */
1220 CSR_WRITE_1(sc
, B2_TI_CTRL
, TIM_STOP
);
1221 CSR_WRITE_1(sc
, B2_TI_CTRL
, TIM_CLR_IRQ
);
1223 /* Turn off descriptor polling. */
1224 CSR_WRITE_1(sc
, B28_DPT_CTRL
, DPT_STOP
);
1226 /* Turn off time stamps. */
1227 CSR_WRITE_1(sc
, GMAC_TI_ST_CTRL
, GMT_ST_STOP
);
1228 CSR_WRITE_1(sc
, GMAC_TI_ST_CTRL
, GMT_ST_CLR_IRQ
);
1230 if (sc
->msk_hw_id
== CHIP_ID_YUKON_XL
||
1231 sc
->msk_hw_id
== CHIP_ID_YUKON_EC
||
1232 sc
->msk_hw_id
== CHIP_ID_YUKON_FE
) {
1233 /* Configure timeout values. */
1234 for (i
= 0; i
< sc
->msk_num_port
; i
++) {
1235 CSR_WRITE_2(sc
, SELECT_RAM_BUFFER(i
, B3_RI_CTRL
),
1237 CSR_WRITE_2(sc
, SELECT_RAM_BUFFER(i
, B3_RI_CTRL
),
1239 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_WTO_R1
),
1241 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_WTO_XA1
),
1243 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_WTO_XS1
),
1245 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_RTO_R1
),
1247 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_RTO_XA1
),
1249 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_RTO_XS1
),
1251 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_WTO_R2
),
1253 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_WTO_XA2
),
1255 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_WTO_XS2
),
1257 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_RTO_R2
),
1259 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_RTO_XA2
),
1261 CSR_WRITE_1(sc
, SELECT_RAM_BUFFER(i
, B3_RI_RTO_XS2
),
1266 /* Disable all interrupts. */
1267 CSR_WRITE_4(sc
, B0_HWE_IMSK
, 0);
1268 CSR_READ_4(sc
, B0_HWE_IMSK
);
1269 CSR_WRITE_4(sc
, B0_IMSK
, 0);
1270 CSR_READ_4(sc
, B0_IMSK
);
1273 * On dual port PCI-X card, there is an problem where status
1274 * can be received out of order due to split transactions.
1276 if (sc
->msk_pcixcap
!= 0 && sc
->msk_num_port
> 1) {
1279 pcix_cmd
= pci_read_config(sc
->msk_dev
,
1280 sc
->msk_pcixcap
+ PCIXR_COMMAND
, 2);
1281 /* Clear Max Outstanding Split Transactions. */
1282 pcix_cmd
&= ~PCIXM_COMMAND_MAX_SPLITS
;
1283 CSR_WRITE_1(sc
, B2_TST_CTRL1
, TST_CFG_WRITE_ON
);
1284 pci_write_config(sc
->msk_dev
,
1285 sc
->msk_pcixcap
+ PCIXR_COMMAND
, pcix_cmd
, 2);
1286 CSR_WRITE_1(sc
, B2_TST_CTRL1
, TST_CFG_WRITE_OFF
);
1288 if (sc
->msk_pciecap
!= 0) {
1289 /* Change Max. Read Request Size to 2048 bytes. */
1290 if (pcie_get_max_readrq(sc
->msk_dev
) ==
1291 PCIEM_DEVCTL_MAX_READRQ_512
) {
1292 pcie_set_max_readrq(sc
->msk_dev
,
1293 PCIEM_DEVCTL_MAX_READRQ_2048
);
1297 /* Clear status list. */
1298 bzero(sc
->msk_stat_ring
,
1299 sizeof(struct msk_stat_desc
) * MSK_STAT_RING_CNT
);
1300 sc
->msk_stat_cons
= 0;
1301 CSR_WRITE_4(sc
, STAT_CTRL
, SC_STAT_RST_SET
);
1302 CSR_WRITE_4(sc
, STAT_CTRL
, SC_STAT_RST_CLR
);
1303 /* Set the status list base address. */
1304 addr
= sc
->msk_stat_ring_paddr
;
1305 CSR_WRITE_4(sc
, STAT_LIST_ADDR_LO
, MSK_ADDR_LO(addr
));
1306 CSR_WRITE_4(sc
, STAT_LIST_ADDR_HI
, MSK_ADDR_HI(addr
));
1307 /* Set the status list last index. */
1308 CSR_WRITE_2(sc
, STAT_LAST_IDX
, MSK_STAT_RING_CNT
- 1);
1309 if (sc
->msk_hw_id
== CHIP_ID_YUKON_EC
&&
1310 sc
->msk_hw_rev
== CHIP_REV_YU_EC_A1
) {
1311 /* WA for dev. #4.3 */
1312 CSR_WRITE_2(sc
, STAT_TX_IDX_TH
, ST_TXTH_IDX_MASK
);
1313 /* WA for dev. #4.18 */
1314 CSR_WRITE_1(sc
, STAT_FIFO_WM
, 0x21);
1315 CSR_WRITE_1(sc
, STAT_FIFO_ISR_WM
, 0x07);
1317 CSR_WRITE_2(sc
, STAT_TX_IDX_TH
, 0x0a);
1318 CSR_WRITE_1(sc
, STAT_FIFO_WM
, 0x10);
1319 if (sc
->msk_hw_id
== CHIP_ID_YUKON_XL
&&
1320 sc
->msk_hw_rev
== CHIP_REV_YU_XL_A0
)
1321 CSR_WRITE_1(sc
, STAT_FIFO_ISR_WM
, 0x04);
1323 CSR_WRITE_1(sc
, STAT_FIFO_ISR_WM
, 0x10);
1324 CSR_WRITE_4(sc
, STAT_ISR_TIMER_INI
, 0x0190);
1327 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1329 CSR_WRITE_4(sc
, STAT_TX_TIMER_INI
, MSK_USECS(sc
, 1000));
1331 /* Enable status unit. */
1332 CSR_WRITE_4(sc
, STAT_CTRL
, SC_STAT_OP_ON
);
1334 CSR_WRITE_1(sc
, STAT_TX_TIMER_CTRL
, TIM_START
);
1335 CSR_WRITE_1(sc
, STAT_LEV_TIMER_CTRL
, TIM_START
);
1336 CSR_WRITE_1(sc
, STAT_ISR_TIMER_CTRL
, TIM_START
);
1340 msk_probe(device_t dev
)
1342 struct msk_softc
*sc
= device_get_softc(device_get_parent(dev
));
1346 * Not much to do here. We always know there will be
1347 * at least one GMAC present, and if there are two,
1348 * mskc_attach() will create a second device instance
1351 ksnprintf(desc
, sizeof(desc
),
1352 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1353 model_name
[sc
->msk_hw_id
- CHIP_ID_YUKON_XL
], sc
->msk_hw_id
,
1355 device_set_desc_copy(dev
, desc
);
1361 msk_attach(device_t dev
)
1363 struct msk_softc
*sc
= device_get_softc(device_get_parent(dev
));
1364 struct msk_if_softc
*sc_if
= device_get_softc(dev
);
1365 struct ifnet
*ifp
= &sc_if
->arpcom
.ac_if
;
1367 uint8_t eaddr
[ETHER_ADDR_LEN
];
1369 port
= *(int *)device_get_ivars(dev
);
1370 KKASSERT(port
== MSK_PORT_A
|| port
== MSK_PORT_B
);
1372 kfree(device_get_ivars(dev
), M_DEVBUF
);
1373 device_set_ivars(dev
, NULL
);
1375 callout_init(&sc_if
->msk_tick_ch
);
1376 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
1378 sc_if
->msk_if_dev
= dev
;
1379 sc_if
->msk_port
= port
;
1380 sc_if
->msk_softc
= sc
;
1381 sc_if
->msk_ifp
= ifp
;
1382 sc_if
->msk_flags
= sc
->msk_pflags
;
1383 sc
->msk_if
[port
] = sc_if
;
1385 /* Setup Tx/Rx queue register offsets. */
1386 if (port
== MSK_PORT_A
) {
1387 sc_if
->msk_txq
= Q_XA1
;
1388 sc_if
->msk_txsq
= Q_XS1
;
1389 sc_if
->msk_rxq
= Q_R1
;
1391 sc_if
->msk_txq
= Q_XA2
;
1392 sc_if
->msk_txsq
= Q_XS2
;
1393 sc_if
->msk_rxq
= Q_R2
;
1396 error
= msk_txrx_dma_alloc(sc_if
);
1400 ifp
->if_softc
= sc_if
;
1401 ifp
->if_mtu
= ETHERMTU
;
1402 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
1403 ifp
->if_init
= msk_init
;
1404 ifp
->if_ioctl
= msk_ioctl
;
1405 ifp
->if_start
= msk_start
;
1406 ifp
->if_watchdog
= msk_watchdog
;
1407 ifq_set_maxlen(&ifp
->if_snd
, MSK_TX_RING_CNT
- 1);
1408 ifq_set_ready(&ifp
->if_snd
);
1412 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1413 * has serious bug in Rx checksum offload for all Yukon II family
1414 * hardware. It seems there is a workaround to make it work somtimes.
1415 * However, the workaround also have to check OP code sequences to
1416 * verify whether the OP code is correct. Sometimes it should compute
1417 * IP/TCP/UDP checksum in driver in order to verify correctness of
1418 * checksum computed by hardware. If you have to compute checksum
1419 * with software to verify the hardware's checksum why have hardware
1420 * compute the checksum? I think there is no reason to spend time to
1421 * make Rx checksum offload work on Yukon II hardware.
1423 ifp
->if_capabilities
= IFCAP_TXCSUM
| IFCAP_VLAN_MTU
|
1424 IFCAP_VLAN_HWTAGGING
| IFCAP_VLAN_HWCSUM
;
1425 ifp
->if_hwassist
= MSK_CSUM_FEATURES
;
1426 ifp
->if_capenable
= ifp
->if_capabilities
;
1430 * Get station address for this interface. Note that
1431 * dual port cards actually come with three station
1432 * addresses: one for each port, plus an extra. The
1433 * extra one is used by the SysKonnect driver software
1434 * as a 'virtual' station address for when both ports
1435 * are operating in failover mode. Currently we don't
1436 * use this extra address.
1438 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
1439 eaddr
[i
] = CSR_READ_1(sc
, B2_MAC_1
+ (port
* 8) + i
);
1441 sc_if
->msk_framesize
= ifp
->if_mtu
+ ETHER_HDR_LEN
+ EVL_ENCAPLEN
;
1446 error
= mii_phy_probe(dev
, &sc_if
->msk_miibus
,
1447 msk_mediachange
, msk_mediastatus
);
1449 device_printf(sc_if
->msk_if_dev
, "no PHY found!\n");
1454 * Call MI attach routine. Can't hold locks when calling into ether_*.
1456 ether_ifattach(ifp
, eaddr
, &sc
->msk_serializer
);
1459 * Tell the upper layer(s) we support long frames.
1460 * Must appear after the call to ether_ifattach() because
1461 * ether_ifattach() sets ifi_hdrlen to the default value.
1463 ifp
->if_data
.ifi_hdrlen
= sizeof(struct ether_vlan_header
);
1469 sc
->msk_if
[port
] = NULL
;
1474 * Attach the interface. Allocate softc structures, do ifmedia
1475 * setup and ethernet/BPF attach.
1478 mskc_attach(device_t dev
)
1480 struct msk_softc
*sc
;
1481 struct sysctl_ctx_list
*ctx
;
1482 struct sysctl_oid
*tree
;
1483 int error
, *port
, cpuid
;
1486 sc
= device_get_softc(dev
);
1488 lwkt_serialize_init(&sc
->msk_serializer
);
1491 * Initailize sysctl variables
1493 sc
->msk_process_limit
= mskc_process_limit
;
1494 sc
->msk_intr_rate
= mskc_intr_rate
;
1496 #ifndef BURN_BRIDGES
1498 * Handle power management nonsense.
1500 if (pci_get_powerstate(dev
) != PCI_POWERSTATE_D0
) {
1501 uint32_t irq
, bar0
, bar1
;
1503 /* Save important PCI config data. */
1504 bar0
= pci_read_config(dev
, PCIR_BAR(0), 4);
1505 bar1
= pci_read_config(dev
, PCIR_BAR(1), 4);
1506 irq
= pci_read_config(dev
, PCIR_INTLINE
, 4);
1508 /* Reset the power state. */
1509 device_printf(dev
, "chip is in D%d power mode "
1510 "-- setting to D0\n", pci_get_powerstate(dev
));
1512 pci_set_powerstate(dev
, PCI_POWERSTATE_D0
);
1514 /* Restore PCI config data. */
1515 pci_write_config(dev
, PCIR_BAR(0), bar0
, 4);
1516 pci_write_config(dev
, PCIR_BAR(1), bar1
, 4);
1517 pci_write_config(dev
, PCIR_INTLINE
, irq
, 4);
1519 #endif /* BURN_BRIDGES */
1522 * Map control/status registers.
1524 pci_enable_busmaster(dev
);
1527 * Allocate I/O resource
1529 #ifdef MSK_USEIOSPACE
1530 sc
->msk_res_type
= SYS_RES_IOPORT
;
1531 sc
->msk_res_rid
= PCIR_BAR(1);
1533 sc
->msk_res_type
= SYS_RES_MEMORY
;
1534 sc
->msk_res_rid
= PCIR_BAR(0);
1536 sc
->msk_res
= bus_alloc_resource_any(dev
, sc
->msk_res_type
,
1537 &sc
->msk_res_rid
, RF_ACTIVE
);
1538 if (sc
->msk_res
== NULL
) {
1539 if (sc
->msk_res_type
== SYS_RES_MEMORY
) {
1540 sc
->msk_res_type
= SYS_RES_IOPORT
;
1541 sc
->msk_res_rid
= PCIR_BAR(1);
1543 sc
->msk_res_type
= SYS_RES_MEMORY
;
1544 sc
->msk_res_rid
= PCIR_BAR(0);
1546 sc
->msk_res
= bus_alloc_resource_any(dev
, sc
->msk_res_type
,
1549 if (sc
->msk_res
== NULL
) {
1550 device_printf(dev
, "couldn't allocate %s resources\n",
1551 sc
->msk_res_type
== SYS_RES_MEMORY
? "memory" : "I/O");
1555 sc
->msk_res_bt
= rman_get_bustag(sc
->msk_res
);
1556 sc
->msk_res_bh
= rman_get_bushandle(sc
->msk_res
);
1561 sc
->msk_irq_type
= pci_alloc_1intr(dev
, mskc_msi_enable
,
1562 &sc
->msk_irq_rid
, &irq_flags
);
1564 sc
->msk_irq
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
, &sc
->msk_irq_rid
,
1566 if (sc
->msk_irq
== NULL
) {
1567 device_printf(dev
, "couldn't allocate IRQ resources\n");
1572 /* Enable all clocks before accessing any registers. */
1573 CSR_PCI_WRITE_4(sc
, PCI_OUR_REG_3
, 0);
1575 CSR_WRITE_2(sc
, B0_CTST
, CS_RST_CLR
);
1576 sc
->msk_hw_id
= CSR_READ_1(sc
, B2_CHIP_ID
);
1577 sc
->msk_hw_rev
= (CSR_READ_1(sc
, B2_MAC_CFG
) >> 4) & 0x0f;
1578 /* Bail out if chip is not recognized. */
1579 if (sc
->msk_hw_id
< CHIP_ID_YUKON_XL
||
1580 sc
->msk_hw_id
> CHIP_ID_YUKON_OPT
||
1581 sc
->msk_hw_id
== CHIP_ID_YUKON_UNKNOWN
) {
1582 device_printf(dev
, "unknown device: id=0x%02x, rev=0x%02x\n",
1583 sc
->msk_hw_id
, sc
->msk_hw_rev
);
1589 * Create sysctl tree
1591 ctx
= device_get_sysctl_ctx(dev
);
1592 tree
= device_get_sysctl_tree(dev
);
1593 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
),
1594 OID_AUTO
, "process_limit", CTLTYPE_INT
| CTLFLAG_RW
,
1595 &sc
->msk_process_limit
, 0, mskc_sysctl_proc_limit
,
1596 "I", "max number of Rx events to process");
1597 SYSCTL_ADD_PROC(ctx
, SYSCTL_CHILDREN(tree
),
1598 OID_AUTO
, "intr_rate", CTLTYPE_INT
| CTLFLAG_RW
,
1599 sc
, 0, mskc_sysctl_intr_rate
,
1600 "I", "max number of interrupt per second");
1601 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1602 "defrag_avoided", CTLFLAG_RW
, &sc
->msk_defrag_avoided
,
1603 0, "# of avoided m_defrag on TX path");
1604 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1605 "leading_copied", CTLFLAG_RW
, &sc
->msk_leading_copied
,
1606 0, "# of leading copies on TX path");
1607 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
1608 "trailing_copied", CTLFLAG_RW
, &sc
->msk_trailing_copied
,
1609 0, "# of trailing copies on TX path");
1611 sc
->msk_pmd
= CSR_READ_1(sc
, B2_PMD_TYP
);
1612 if (sc
->msk_pmd
== 'L' || sc
->msk_pmd
== 'S')
1613 sc
->msk_coppertype
= 0;
1615 sc
->msk_coppertype
= 1;
1616 /* Check number of MACs. */
1617 sc
->msk_num_port
= 1;
1618 if ((CSR_READ_1(sc
, B2_Y2_HW_RES
) & CFG_DUAL_MAC_MSK
) ==
1620 if (!(CSR_READ_1(sc
, B2_Y2_CLK_GATE
) & Y2_STATUS_LNK2_INAC
))
1624 /* Check bus type. */
1625 if (pci_is_pcie(sc
->msk_dev
) == 0) {
1626 sc
->msk_bustype
= MSK_PEX_BUS
;
1627 sc
->msk_pciecap
= pci_get_pciecap_ptr(sc
->msk_dev
);
1628 } else if (pci_is_pcix(sc
->msk_dev
) == 0) {
1629 sc
->msk_bustype
= MSK_PCIX_BUS
;
1630 sc
->msk_pcixcap
= pci_get_pcixcap_ptr(sc
->msk_dev
);
1632 sc
->msk_bustype
= MSK_PCI_BUS
;
1635 switch (sc
->msk_hw_id
) {
1636 case CHIP_ID_YUKON_EC
:
1637 case CHIP_ID_YUKON_EC_U
:
1638 sc
->msk_clock
= 125; /* 125 Mhz */
1640 case CHIP_ID_YUKON_EX
:
1641 sc
->msk_clock
= 125; /* 125 Mhz */
1643 case CHIP_ID_YUKON_FE
:
1644 sc
->msk_clock
= 100; /* 100 Mhz */
1645 sc
->msk_pflags
|= MSK_FLAG_FASTETHER
;
1647 case CHIP_ID_YUKON_FE_P
:
1648 sc
->msk_clock
= 50; /* 50 Mhz */
1650 sc
->msk_pflags
|= MSK_FLAG_FASTETHER
;
1651 if (sc
->msk_hw_rev
== CHIP_REV_YU_FE_P_A0
) {
1654 * FE+ A0 has status LE writeback bug so msk(4)
1655 * does not rely on status word of received frame
1656 * in msk_rxeof() which in turn disables all
1657 * hardware assistance bits reported by the status
1658 * word as well as validity of the recevied frame.
1659 * Just pass received frames to upper stack with
1660 * minimal test and let upper stack handle them.
1662 sc
->msk_pflags
|= MSK_FLAG_NORXCHK
;
1665 case CHIP_ID_YUKON_XL
:
1666 sc
->msk_clock
= 156; /* 156 Mhz */
1668 case CHIP_ID_YUKON_SUPR
:
1669 sc
->msk_clock
= 125; /* 125 MHz */
1671 case CHIP_ID_YUKON_UL_2
:
1672 sc
->msk_clock
= 125; /* 125 Mhz */
1674 case CHIP_ID_YUKON_OPT
:
1675 sc
->msk_clock
= 125; /* 125 MHz */
1678 sc
->msk_clock
= 156; /* 156 Mhz */
1682 error
= mskc_status_dma_alloc(sc
);
1686 /* Set base interrupt mask. */
1687 sc
->msk_intrmask
= Y2_IS_HW_ERR
| Y2_IS_STAT_BMU
;
1688 sc
->msk_intrhwemask
= Y2_IS_TIST_OV
| Y2_IS_MST_ERR
|
1689 Y2_IS_IRQ_STAT
| Y2_IS_PCI_EXP
| Y2_IS_PCI_NEXP
;
1691 /* Reset the adapter. */
1694 error
= mskc_setup_rambuffer(sc
);
1698 sc
->msk_devs
[MSK_PORT_A
] = device_add_child(dev
, "msk", -1);
1699 if (sc
->msk_devs
[MSK_PORT_A
] == NULL
) {
1700 device_printf(dev
, "failed to add child for PORT_A\n");
1704 port
= kmalloc(sizeof(*port
), M_DEVBUF
, M_WAITOK
);
1706 device_set_ivars(sc
->msk_devs
[MSK_PORT_A
], port
);
1708 if (sc
->msk_num_port
> 1) {
1709 sc
->msk_devs
[MSK_PORT_B
] = device_add_child(dev
, "msk", -1);
1710 if (sc
->msk_devs
[MSK_PORT_B
] == NULL
) {
1711 device_printf(dev
, "failed to add child for PORT_B\n");
1715 port
= kmalloc(sizeof(*port
), M_DEVBUF
, M_WAITOK
);
1717 device_set_ivars(sc
->msk_devs
[MSK_PORT_B
], port
);
1720 bus_generic_attach(dev
);
1722 cpuid
= rman_get_cpuid(sc
->msk_irq
);
1723 if (sc
->msk_if
[0] != NULL
)
1724 ifq_set_cpuid(&sc
->msk_if
[0]->msk_ifp
->if_snd
, cpuid
);
1725 if (sc
->msk_if
[1] != NULL
)
1726 ifq_set_cpuid(&sc
->msk_if
[1]->msk_ifp
->if_snd
, cpuid
);
1728 error
= bus_setup_intr(dev
, sc
->msk_irq
, INTR_MPSAFE
,
1729 mskc_intr
, sc
, &sc
->msk_intrhand
,
1730 &sc
->msk_serializer
);
1732 device_printf(dev
, "couldn't set up interrupt handler\n");
1742 * Shutdown hardware and free up resources. This can be called any
1743 * time after the mutex has been initialized. It is called in both
1744 * the error case in attach and the normal detach case so it needs
1745 * to be careful about only freeing resources that have actually been
1749 msk_detach(device_t dev
)
1751 struct msk_if_softc
*sc_if
= device_get_softc(dev
);
1753 if (device_is_attached(dev
)) {
1754 struct msk_softc
*sc
= sc_if
->msk_softc
;
1755 struct ifnet
*ifp
= &sc_if
->arpcom
.ac_if
;
1757 lwkt_serialize_enter(ifp
->if_serializer
);
1759 if (sc
->msk_intrhand
!= NULL
) {
1760 if (sc
->msk_if
[MSK_PORT_A
] != NULL
)
1761 msk_stop(sc
->msk_if
[MSK_PORT_A
]);
1762 if (sc
->msk_if
[MSK_PORT_B
] != NULL
)
1763 msk_stop(sc
->msk_if
[MSK_PORT_B
]);
1765 bus_teardown_intr(sc
->msk_dev
, sc
->msk_irq
,
1767 sc
->msk_intrhand
= NULL
;
1770 lwkt_serialize_exit(ifp
->if_serializer
);
1772 ether_ifdetach(ifp
);
1775 if (sc_if
->msk_miibus
!= NULL
)
1776 device_delete_child(dev
, sc_if
->msk_miibus
);
1778 msk_txrx_dma_free(sc_if
);
1783 mskc_detach(device_t dev
)
1785 struct msk_softc
*sc
= device_get_softc(dev
);
1789 if (device_is_attached(dev
)) {
1790 KASSERT(sc
->msk_intrhand
== NULL
,
1791 ("intr is not torn down yet"));
1795 for (i
= 0; i
< sc
->msk_num_port
; ++i
) {
1796 if (sc
->msk_devs
[i
] != NULL
) {
1797 port
= device_get_ivars(sc
->msk_devs
[i
]);
1799 kfree(port
, M_DEVBUF
);
1800 device_set_ivars(sc
->msk_devs
[i
], NULL
);
1802 device_delete_child(dev
, sc
->msk_devs
[i
]);
1806 /* Disable all interrupts. */
1807 CSR_WRITE_4(sc
, B0_IMSK
, 0);
1808 CSR_READ_4(sc
, B0_IMSK
);
1809 CSR_WRITE_4(sc
, B0_HWE_IMSK
, 0);
1810 CSR_READ_4(sc
, B0_HWE_IMSK
);
1813 CSR_WRITE_2(sc
, B0_CTST
, Y2_LED_STAT_OFF
);
1815 /* Put hardware reset. */
1816 CSR_WRITE_2(sc
, B0_CTST
, CS_RST_SET
);
1818 mskc_status_dma_free(sc
);
1820 if (sc
->msk_irq
!= NULL
) {
1821 bus_release_resource(dev
, SYS_RES_IRQ
, sc
->msk_irq_rid
,
1824 if (sc
->msk_irq_type
== PCI_INTR_TYPE_MSI
)
1825 pci_release_msi(dev
);
1827 if (sc
->msk_res
!= NULL
) {
1828 bus_release_resource(dev
, sc
->msk_res_type
, sc
->msk_res_rid
,
1835 /* Create status DMA region. */
1837 mskc_status_dma_alloc(struct msk_softc
*sc
)
1842 error
= bus_dmamem_coherent(NULL
/* XXX parent */, MSK_STAT_ALIGN
, 0,
1843 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
1844 MSK_STAT_RING_SZ
, BUS_DMA_WAITOK
| BUS_DMA_ZERO
, &dmem
);
1846 device_printf(sc
->msk_dev
,
1847 "failed to create status coherent DMA memory\n");
1850 sc
->msk_stat_tag
= dmem
.dmem_tag
;
1851 sc
->msk_stat_map
= dmem
.dmem_map
;
1852 sc
->msk_stat_ring
= dmem
.dmem_addr
;
1853 sc
->msk_stat_ring_paddr
= dmem
.dmem_busaddr
;
1859 mskc_status_dma_free(struct msk_softc
*sc
)
1861 /* Destroy status block. */
1862 if (sc
->msk_stat_tag
) {
1863 bus_dmamap_unload(sc
->msk_stat_tag
, sc
->msk_stat_map
);
1864 bus_dmamem_free(sc
->msk_stat_tag
, sc
->msk_stat_ring
,
1866 bus_dma_tag_destroy(sc
->msk_stat_tag
);
1867 sc
->msk_stat_tag
= NULL
;
1872 msk_txrx_dma_alloc(struct msk_if_softc
*sc_if
)
1876 struct msk_rxdesc
*jrxd
;
1877 struct msk_jpool_entry
*entry
;
1882 /* Create parent DMA tag. */
1885 * It seems that Yukon II supports full 64bits DMA operations. But
1886 * it needs two descriptors(list elements) for 64bits DMA operations.
1887 * Since we don't know what DMA address mappings(32bits or 64bits)
1888 * would be used in advance for each mbufs, we limits its DMA space
1889 * to be in range of 32bits address space. Otherwise, we should check
1890 * what DMA address is used and chain another descriptor for the
1891 * 64bits DMA operation. This also means descriptor ring size is
1892 * variable. Limiting DMA address to be in 32bit address space greatly
1893 * simplyfies descriptor handling and possibly would increase
1894 * performance a bit due to efficient handling of descriptors.
1895 * Apart from harassing checksum offloading mechanisms, it seems
1896 * it's really bad idea to use a seperate descriptor for 64bit
1897 * DMA operation to save small descriptor memory. Anyway, I've
1898 * never seen these exotic scheme on ethernet interface hardware.
1900 error
= bus_dma_tag_create(
1902 1, 0, /* alignment, boundary */
1903 BUS_SPACE_MAXADDR_32BIT
, /* lowaddr */
1904 BUS_SPACE_MAXADDR
, /* highaddr */
1905 NULL
, NULL
, /* filter, filterarg */
1906 BUS_SPACE_MAXSIZE_32BIT
, /* maxsize */
1908 BUS_SPACE_MAXSIZE_32BIT
, /* maxsegsize */
1910 &sc_if
->msk_cdata
.msk_parent_tag
);
1912 device_printf(sc_if
->msk_if_dev
,
1913 "failed to create parent DMA tag\n");
1917 /* Create DMA stuffs for Tx ring. */
1918 error
= msk_dmamem_create(sc_if
->msk_if_dev
, MSK_TX_RING_SZ
,
1919 &sc_if
->msk_cdata
.msk_tx_ring_tag
,
1920 (void *)&sc_if
->msk_rdata
.msk_tx_ring
,
1921 &sc_if
->msk_rdata
.msk_tx_ring_paddr
,
1922 &sc_if
->msk_cdata
.msk_tx_ring_map
);
1924 device_printf(sc_if
->msk_if_dev
,
1925 "failed to create TX ring DMA stuffs\n");
1929 /* Create DMA stuffs for Rx ring. */
1930 error
= msk_dmamem_create(sc_if
->msk_if_dev
, MSK_RX_RING_SZ
,
1931 &sc_if
->msk_cdata
.msk_rx_ring_tag
,
1932 (void *)&sc_if
->msk_rdata
.msk_rx_ring
,
1933 &sc_if
->msk_rdata
.msk_rx_ring_paddr
,
1934 &sc_if
->msk_cdata
.msk_rx_ring_map
);
1936 device_printf(sc_if
->msk_if_dev
,
1937 "failed to create RX ring DMA stuffs\n");
1941 /* Create tag for Tx buffers. */
1942 error
= bus_dma_tag_create(sc_if
->msk_cdata
.msk_parent_tag
,/* parent */
1943 1, 0, /* alignment, boundary */
1944 BUS_SPACE_MAXADDR
, /* lowaddr */
1945 BUS_SPACE_MAXADDR
, /* highaddr */
1946 NULL
, NULL
, /* filter, filterarg */
1947 MSK_JUMBO_FRAMELEN
, /* maxsize */
1948 MSK_MAXTXSEGS
, /* nsegments */
1949 MSK_MAXSGSIZE
, /* maxsegsize */
1950 BUS_DMA_ALLOCNOW
| BUS_DMA_WAITOK
|
1951 BUS_DMA_ONEBPAGE
, /* flags */
1952 &sc_if
->msk_cdata
.msk_tx_tag
);
1954 device_printf(sc_if
->msk_if_dev
,
1955 "failed to create Tx DMA tag\n");
1959 /* Create DMA maps for Tx buffers. */
1960 for (i
= 0; i
< MSK_TX_RING_CNT
; i
++) {
1961 struct msk_txdesc
*txd
= &sc_if
->msk_cdata
.msk_txdesc
[i
];
1963 error
= bus_dmamap_create(sc_if
->msk_cdata
.msk_tx_tag
,
1964 BUS_DMA_WAITOK
| BUS_DMA_ONEBPAGE
,
1967 device_printf(sc_if
->msk_if_dev
,
1968 "failed to create %dth Tx dmamap\n", i
);
1970 for (j
= 0; j
< i
; ++j
) {
1971 txd
= &sc_if
->msk_cdata
.msk_txdesc
[j
];
1972 bus_dmamap_destroy(sc_if
->msk_cdata
.msk_tx_tag
,
1975 bus_dma_tag_destroy(sc_if
->msk_cdata
.msk_tx_tag
);
1976 sc_if
->msk_cdata
.msk_tx_tag
= NULL
;
1983 * Workaround hardware hang which seems to happen when Rx buffer
1984 * is not aligned on multiple of FIFO word(8 bytes).
1986 if (sc_if
->msk_flags
& MSK_FLAG_RAMBUF
)
1987 rxalign
= MSK_RX_BUF_ALIGN
;
1991 /* Create tag for Rx buffers. */
1992 error
= bus_dma_tag_create(sc_if
->msk_cdata
.msk_parent_tag
,/* parent */
1993 rxalign
, 0, /* alignment, boundary */
1994 BUS_SPACE_MAXADDR
, /* lowaddr */
1995 BUS_SPACE_MAXADDR
, /* highaddr */
1996 NULL
, NULL
, /* filter, filterarg */
1997 MCLBYTES
, /* maxsize */
1999 MCLBYTES
, /* maxsegsize */
2000 BUS_DMA_ALLOCNOW
| BUS_DMA_ALIGNED
|
2001 BUS_DMA_WAITOK
, /* flags */
2002 &sc_if
->msk_cdata
.msk_rx_tag
);
2004 device_printf(sc_if
->msk_if_dev
,
2005 "failed to create Rx DMA tag\n");
2009 /* Create DMA maps for Rx buffers. */
2010 error
= bus_dmamap_create(sc_if
->msk_cdata
.msk_rx_tag
, BUS_DMA_WAITOK
,
2011 &sc_if
->msk_cdata
.msk_rx_sparemap
);
2013 device_printf(sc_if
->msk_if_dev
,
2014 "failed to create spare Rx dmamap\n");
2015 bus_dma_tag_destroy(sc_if
->msk_cdata
.msk_rx_tag
);
2016 sc_if
->msk_cdata
.msk_rx_tag
= NULL
;
2019 for (i
= 0; i
< MSK_RX_RING_CNT
; i
++) {
2020 struct msk_rxdesc
*rxd
= &sc_if
->msk_cdata
.msk_rxdesc
[i
];
2022 error
= bus_dmamap_create(sc_if
->msk_cdata
.msk_rx_tag
,
2023 BUS_DMA_WAITOK
, &rxd
->rx_dmamap
);
2025 device_printf(sc_if
->msk_if_dev
,
2026 "failed to create %dth Rx dmamap\n", i
);
2028 for (j
= 0; j
< i
; ++j
) {
2029 rxd
= &sc_if
->msk_cdata
.msk_rxdesc
[j
];
2030 bus_dmamap_destroy(sc_if
->msk_cdata
.msk_rx_tag
,
2033 bus_dmamap_destroy(sc_if
->msk_cdata
.msk_rx_tag
,
2034 sc_if
->msk_cdata
.msk_rx_sparemap
);
2035 bus_dma_tag_destroy(sc_if
->msk_cdata
.msk_rx_tag
);
2036 sc_if
->msk_cdata
.msk_rx_tag
= NULL
;
2043 SLIST_INIT(&sc_if
->msk_jfree_listhead
);
2044 SLIST_INIT(&sc_if
->msk_jinuse_listhead
);
2046 /* Create tag for jumbo Rx ring. */
2047 error
= bus_dma_tag_create(sc_if
->msk_cdata
.msk_parent_tag
,/* parent */
2048 MSK_RING_ALIGN
, 0, /* alignment, boundary */
2049 BUS_SPACE_MAXADDR
, /* lowaddr */
2050 BUS_SPACE_MAXADDR
, /* highaddr */
2051 NULL
, NULL
, /* filter, filterarg */
2052 MSK_JUMBO_RX_RING_SZ
, /* maxsize */
2054 MSK_JUMBO_RX_RING_SZ
, /* maxsegsize */
2056 NULL
, NULL
, /* lockfunc, lockarg */
2057 &sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
);
2059 device_printf(sc_if
->msk_if_dev
,
2060 "failed to create jumbo Rx ring DMA tag\n");
2064 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2065 error
= bus_dmamem_alloc(sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
,
2066 (void **)&sc_if
->msk_rdata
.msk_jumbo_rx_ring
,
2067 BUS_DMA_WAITOK
| BUS_DMA_COHERENT
| BUS_DMA_ZERO
,
2068 &sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
);
2070 device_printf(sc_if
->msk_if_dev
,
2071 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2075 ctx
.msk_busaddr
= 0;
2076 error
= bus_dmamap_load(sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
,
2077 sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
,
2078 sc_if
->msk_rdata
.msk_jumbo_rx_ring
, MSK_JUMBO_RX_RING_SZ
,
2079 msk_dmamap_cb
, &ctx
, 0);
2081 device_printf(sc_if
->msk_if_dev
,
2082 "failed to load DMA'able memory for jumbo Rx ring\n");
2085 sc_if
->msk_rdata
.msk_jumbo_rx_ring_paddr
= ctx
.msk_busaddr
;
2087 /* Create tag for jumbo buffer blocks. */
2088 error
= bus_dma_tag_create(sc_if
->msk_cdata
.msk_parent_tag
,/* parent */
2089 PAGE_SIZE
, 0, /* alignment, boundary */
2090 BUS_SPACE_MAXADDR
, /* lowaddr */
2091 BUS_SPACE_MAXADDR
, /* highaddr */
2092 NULL
, NULL
, /* filter, filterarg */
2093 MSK_JMEM
, /* maxsize */
2095 MSK_JMEM
, /* maxsegsize */
2097 NULL
, NULL
, /* lockfunc, lockarg */
2098 &sc_if
->msk_cdata
.msk_jumbo_tag
);
2100 device_printf(sc_if
->msk_if_dev
,
2101 "failed to create jumbo Rx buffer block DMA tag\n");
2105 /* Create tag for jumbo Rx buffers. */
2106 error
= bus_dma_tag_create(sc_if
->msk_cdata
.msk_parent_tag
,/* parent */
2107 PAGE_SIZE
, 0, /* alignment, boundary */
2108 BUS_SPACE_MAXADDR
, /* lowaddr */
2109 BUS_SPACE_MAXADDR
, /* highaddr */
2110 NULL
, NULL
, /* filter, filterarg */
2111 MCLBYTES
* MSK_MAXRXSEGS
, /* maxsize */
2112 MSK_MAXRXSEGS
, /* nsegments */
2113 MSK_JLEN
, /* maxsegsize */
2115 NULL
, NULL
, /* lockfunc, lockarg */
2116 &sc_if
->msk_cdata
.msk_jumbo_rx_tag
);
2118 device_printf(sc_if
->msk_if_dev
,
2119 "failed to create jumbo Rx DMA tag\n");
2123 /* Create DMA maps for jumbo Rx buffers. */
2124 if ((error
= bus_dmamap_create(sc_if
->msk_cdata
.msk_jumbo_rx_tag
, 0,
2125 &sc_if
->msk_cdata
.msk_jumbo_rx_sparemap
)) != 0) {
2126 device_printf(sc_if
->msk_if_dev
,
2127 "failed to create spare jumbo Rx dmamap\n");
2130 for (i
= 0; i
< MSK_JUMBO_RX_RING_CNT
; i
++) {
2131 jrxd
= &sc_if
->msk_cdata
.msk_jumbo_rxdesc
[i
];
2133 jrxd
->rx_dmamap
= NULL
;
2134 error
= bus_dmamap_create(sc_if
->msk_cdata
.msk_jumbo_rx_tag
, 0,
2137 device_printf(sc_if
->msk_if_dev
,
2138 "failed to create jumbo Rx dmamap\n");
2143 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */
2144 error
= bus_dmamem_alloc(sc_if
->msk_cdata
.msk_jumbo_tag
,
2145 (void **)&sc_if
->msk_rdata
.msk_jumbo_buf
,
2146 BUS_DMA_WAITOK
| BUS_DMA_COHERENT
| BUS_DMA_ZERO
,
2147 &sc_if
->msk_cdata
.msk_jumbo_map
);
2149 device_printf(sc_if
->msk_if_dev
,
2150 "failed to allocate DMA'able memory for jumbo buf\n");
2154 ctx
.msk_busaddr
= 0;
2155 error
= bus_dmamap_load(sc_if
->msk_cdata
.msk_jumbo_tag
,
2156 sc_if
->msk_cdata
.msk_jumbo_map
, sc_if
->msk_rdata
.msk_jumbo_buf
,
2157 MSK_JMEM
, msk_dmamap_cb
, &ctx
, 0);
2159 device_printf(sc_if
->msk_if_dev
,
2160 "failed to load DMA'able memory for jumbobuf\n");
2163 sc_if
->msk_rdata
.msk_jumbo_buf_paddr
= ctx
.msk_busaddr
;
2166 * Now divide it up into 9K pieces and save the addresses
2169 ptr
= sc_if
->msk_rdata
.msk_jumbo_buf
;
2170 for (i
= 0; i
< MSK_JSLOTS
; i
++) {
2171 sc_if
->msk_cdata
.msk_jslots
[i
] = ptr
;
2173 entry
= malloc(sizeof(struct msk_jpool_entry
),
2174 M_DEVBUF
, M_WAITOK
);
2175 if (entry
== NULL
) {
2176 device_printf(sc_if
->msk_if_dev
,
2177 "no memory for jumbo buffers!\n");
2182 SLIST_INSERT_HEAD(&sc_if
->msk_jfree_listhead
, entry
,
2190 msk_txrx_dma_free(struct msk_if_softc
*sc_if
)
2192 struct msk_txdesc
*txd
;
2193 struct msk_rxdesc
*rxd
;
2195 struct msk_rxdesc
*jrxd
;
2196 struct msk_jpool_entry
*entry
;
2201 MSK_JLIST_LOCK(sc_if
);
2202 while ((entry
= SLIST_FIRST(&sc_if
->msk_jinuse_listhead
))) {
2203 device_printf(sc_if
->msk_if_dev
,
2204 "asked to free buffer that is in use!\n");
2205 SLIST_REMOVE_HEAD(&sc_if
->msk_jinuse_listhead
, jpool_entries
);
2206 SLIST_INSERT_HEAD(&sc_if
->msk_jfree_listhead
, entry
,
2210 while (!SLIST_EMPTY(&sc_if
->msk_jfree_listhead
)) {
2211 entry
= SLIST_FIRST(&sc_if
->msk_jfree_listhead
);
2212 SLIST_REMOVE_HEAD(&sc_if
->msk_jfree_listhead
, jpool_entries
);
2213 free(entry
, M_DEVBUF
);
2215 MSK_JLIST_UNLOCK(sc_if
);
2217 /* Destroy jumbo buffer block. */
2218 if (sc_if
->msk_cdata
.msk_jumbo_map
)
2219 bus_dmamap_unload(sc_if
->msk_cdata
.msk_jumbo_tag
,
2220 sc_if
->msk_cdata
.msk_jumbo_map
);
2222 if (sc_if
->msk_rdata
.msk_jumbo_buf
) {
2223 bus_dmamem_free(sc_if
->msk_cdata
.msk_jumbo_tag
,
2224 sc_if
->msk_rdata
.msk_jumbo_buf
,
2225 sc_if
->msk_cdata
.msk_jumbo_map
);
2226 sc_if
->msk_rdata
.msk_jumbo_buf
= NULL
;
2227 sc_if
->msk_cdata
.msk_jumbo_map
= NULL
;
2230 /* Jumbo Rx ring. */
2231 if (sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
) {
2232 if (sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
)
2233 bus_dmamap_unload(sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
,
2234 sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
);
2235 if (sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
&&
2236 sc_if
->msk_rdata
.msk_jumbo_rx_ring
)
2237 bus_dmamem_free(sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
,
2238 sc_if
->msk_rdata
.msk_jumbo_rx_ring
,
2239 sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
);
2240 sc_if
->msk_rdata
.msk_jumbo_rx_ring
= NULL
;
2241 sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
= NULL
;
2242 bus_dma_tag_destroy(sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
);
2243 sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
= NULL
;
2246 /* Jumbo Rx buffers. */
2247 if (sc_if
->msk_cdata
.msk_jumbo_rx_tag
) {
2248 for (i
= 0; i
< MSK_JUMBO_RX_RING_CNT
; i
++) {
2249 jrxd
= &sc_if
->msk_cdata
.msk_jumbo_rxdesc
[i
];
2250 if (jrxd
->rx_dmamap
) {
2252 sc_if
->msk_cdata
.msk_jumbo_rx_tag
,
2254 jrxd
->rx_dmamap
= NULL
;
2257 if (sc_if
->msk_cdata
.msk_jumbo_rx_sparemap
) {
2258 bus_dmamap_destroy(sc_if
->msk_cdata
.msk_jumbo_rx_tag
,
2259 sc_if
->msk_cdata
.msk_jumbo_rx_sparemap
);
2260 sc_if
->msk_cdata
.msk_jumbo_rx_sparemap
= 0;
2262 bus_dma_tag_destroy(sc_if
->msk_cdata
.msk_jumbo_rx_tag
);
2263 sc_if
->msk_cdata
.msk_jumbo_rx_tag
= NULL
;
2268 msk_dmamem_destroy(sc_if
->msk_cdata
.msk_tx_ring_tag
,
2269 sc_if
->msk_rdata
.msk_tx_ring
,
2270 sc_if
->msk_cdata
.msk_tx_ring_map
);
2273 msk_dmamem_destroy(sc_if
->msk_cdata
.msk_rx_ring_tag
,
2274 sc_if
->msk_rdata
.msk_rx_ring
,
2275 sc_if
->msk_cdata
.msk_rx_ring_map
);
2278 if (sc_if
->msk_cdata
.msk_tx_tag
) {
2279 for (i
= 0; i
< MSK_TX_RING_CNT
; i
++) {
2280 txd
= &sc_if
->msk_cdata
.msk_txdesc
[i
];
2281 bus_dmamap_destroy(sc_if
->msk_cdata
.msk_tx_tag
,
2284 bus_dma_tag_destroy(sc_if
->msk_cdata
.msk_tx_tag
);
2285 sc_if
->msk_cdata
.msk_tx_tag
= NULL
;
2289 if (sc_if
->msk_cdata
.msk_rx_tag
) {
2290 for (i
= 0; i
< MSK_RX_RING_CNT
; i
++) {
2291 rxd
= &sc_if
->msk_cdata
.msk_rxdesc
[i
];
2292 bus_dmamap_destroy(sc_if
->msk_cdata
.msk_rx_tag
,
2295 bus_dmamap_destroy(sc_if
->msk_cdata
.msk_rx_tag
,
2296 sc_if
->msk_cdata
.msk_rx_sparemap
);
2297 bus_dma_tag_destroy(sc_if
->msk_cdata
.msk_rx_tag
);
2298 sc_if
->msk_cdata
.msk_rx_tag
= NULL
;
2301 if (sc_if
->msk_cdata
.msk_parent_tag
) {
2302 bus_dma_tag_destroy(sc_if
->msk_cdata
.msk_parent_tag
);
2303 sc_if
->msk_cdata
.msk_parent_tag
= NULL
;
2309 * Allocate a jumbo buffer.
2312 msk_jalloc(struct msk_if_softc
*sc_if
)
2314 struct msk_jpool_entry
*entry
;
2316 MSK_JLIST_LOCK(sc_if
);
2318 entry
= SLIST_FIRST(&sc_if
->msk_jfree_listhead
);
2320 if (entry
== NULL
) {
2321 MSK_JLIST_UNLOCK(sc_if
);
2325 SLIST_REMOVE_HEAD(&sc_if
->msk_jfree_listhead
, jpool_entries
);
2326 SLIST_INSERT_HEAD(&sc_if
->msk_jinuse_listhead
, entry
, jpool_entries
);
2328 MSK_JLIST_UNLOCK(sc_if
);
2330 return (sc_if
->msk_cdata
.msk_jslots
[entry
->slot
]);
2334 * Release a jumbo buffer.
2337 msk_jfree(void *buf
, void *args
)
2339 struct msk_if_softc
*sc_if
;
2340 struct msk_jpool_entry
*entry
;
2343 /* Extract the softc struct pointer. */
2344 sc_if
= (struct msk_if_softc
*)args
;
2345 KASSERT(sc_if
!= NULL
, ("%s: can't find softc pointer!", __func__
));
2347 MSK_JLIST_LOCK(sc_if
);
2348 /* Calculate the slot this buffer belongs to. */
2349 i
= ((vm_offset_t
)buf
2350 - (vm_offset_t
)sc_if
->msk_rdata
.msk_jumbo_buf
) / MSK_JLEN
;
2351 KASSERT(i
>= 0 && i
< MSK_JSLOTS
,
2352 ("%s: asked to free buffer that we don't manage!", __func__
));
2354 entry
= SLIST_FIRST(&sc_if
->msk_jinuse_listhead
);
2355 KASSERT(entry
!= NULL
, ("%s: buffer not in use!", __func__
));
2357 SLIST_REMOVE_HEAD(&sc_if
->msk_jinuse_listhead
, jpool_entries
);
2358 SLIST_INSERT_HEAD(&sc_if
->msk_jfree_listhead
, entry
, jpool_entries
);
2359 if (SLIST_EMPTY(&sc_if
->msk_jinuse_listhead
))
2362 MSK_JLIST_UNLOCK(sc_if
);
2367 msk_encap(struct msk_if_softc
*sc_if
, struct mbuf
**m_head
)
2369 struct msk_txdesc
*txd
, *txd_last
;
2370 struct msk_tx_desc
*tx_le
;
2373 bus_dma_segment_t txsegs
[MSK_MAXTXSEGS
];
2374 uint32_t control
, prod
, si
;
2375 uint16_t offset
, tcp_offset
;
2376 int error
, i
, nsegs
, maxsegs
, defrag
;
2378 maxsegs
= MSK_TX_RING_CNT
- sc_if
->msk_cdata
.msk_tx_cnt
-
2379 MSK_RESERVED_TX_DESC_CNT
;
2380 KASSERT(maxsegs
>= MSK_SPARE_TX_DESC_CNT
,
2381 ("not enough spare TX desc"));
2382 if (maxsegs
> MSK_MAXTXSEGS
)
2383 maxsegs
= MSK_MAXTXSEGS
;
2386 * Align TX buffer to 64bytes boundary. This greately improves
2387 * bulk data TX performance on my 88E8053 (+100Mbps) at least.
2388 * Try avoiding m_defrag(), if the mbufs are not chained together
2389 * by m_next (i.e. m->m_len == m->m_pkthdr.len).
2392 #define MSK_TXBUF_ALIGN 64
2393 #define MSK_TXBUF_MASK (MSK_TXBUF_ALIGN - 1)
2397 if (m
->m_len
== m
->m_pkthdr
.len
) {
2400 space
= ((uintptr_t)m
->m_data
& MSK_TXBUF_MASK
);
2402 if (M_WRITABLE(m
)) {
2403 if (M_TRAILINGSPACE(m
) >= space
) {
2405 bcopy(m
->m_data
, m
->m_data
+ space
,
2409 sc_if
->msk_softc
->msk_trailing_copied
++;
2411 space
= MSK_TXBUF_ALIGN
- space
;
2412 if (M_LEADINGSPACE(m
) >= space
) {
2413 /* e.g. Small UDP datagrams */
2420 msk_leading_copied
++;
2425 /* e.g. on forwarding path */
2430 m
= m_defrag(*m_head
, M_NOWAIT
);
2438 sc_if
->msk_softc
->msk_defrag_avoided
++;
2441 #undef MSK_TXBUF_MASK
2442 #undef MSK_TXBUF_ALIGN
2444 tcp_offset
= offset
= 0;
2445 if (m
->m_pkthdr
.csum_flags
& MSK_CSUM_FEATURES
) {
2447 * Since mbuf has no protocol specific structure information
2448 * in it we have to inspect protocol information here to
2449 * setup TSO and checksum offload. I don't know why Marvell
2450 * made a such decision in chip design because other GigE
2451 * hardwares normally takes care of all these chores in
2452 * hardware. However, TSO performance of Yukon II is very
2453 * good such that it's worth to implement it.
2455 struct ether_header
*eh
;
2458 /* TODO check for M_WRITABLE(m) */
2460 offset
= sizeof(struct ether_header
);
2461 m
= m_pullup(m
, offset
);
2466 eh
= mtod(m
, struct ether_header
*);
2467 /* Check if hardware VLAN insertion is off. */
2468 if (eh
->ether_type
== htons(ETHERTYPE_VLAN
)) {
2469 offset
= sizeof(struct ether_vlan_header
);
2470 m
= m_pullup(m
, offset
);
2476 m
= m_pullup(m
, offset
+ sizeof(struct ip
));
2481 ip
= (struct ip
*)(mtod(m
, char *) + offset
);
2482 offset
+= (ip
->ip_hl
<< 2);
2483 tcp_offset
= offset
;
2485 * It seems that Yukon II has Tx checksum offload bug for
2486 * small TCP packets that's less than 60 bytes in size
2487 * (e.g. TCP window probe packet, pure ACK packet).
2488 * Common work around like padding with zeros to make the
2489 * frame minimum ethernet frame size didn't work at all.
2490 * Instead of disabling checksum offload completely we
2491 * resort to S/W checksum routine when we encounter short
2493 * Short UDP packets appear to be handled correctly by
2496 if (m
->m_pkthdr
.len
< MSK_MIN_FRAMELEN
&&
2497 (m
->m_pkthdr
.csum_flags
& CSUM_TCP
) != 0) {
2500 csum
= in_cksum_skip(m
, ntohs(ip
->ip_len
) + offset
-
2501 (ip
->ip_hl
<< 2), offset
);
2502 *(uint16_t *)(m
->m_data
+ offset
+
2503 m
->m_pkthdr
.csum_data
) = csum
;
2504 m
->m_pkthdr
.csum_flags
&= ~CSUM_TCP
;
2509 prod
= sc_if
->msk_cdata
.msk_tx_prod
;
2510 txd
= &sc_if
->msk_cdata
.msk_txdesc
[prod
];
2512 map
= txd
->tx_dmamap
;
2514 error
= bus_dmamap_load_mbuf_defrag(sc_if
->msk_cdata
.msk_tx_tag
, map
,
2515 m_head
, txsegs
, maxsegs
, &nsegs
, BUS_DMA_NOWAIT
);
2521 bus_dmamap_sync(sc_if
->msk_cdata
.msk_tx_tag
, map
, BUS_DMASYNC_PREWRITE
);
2528 /* Check if we have a VLAN tag to insert. */
2529 if ((m
->m_flags
& M_VLANTAG
) != 0) {
2530 tx_le
= &sc_if
->msk_rdata
.msk_tx_ring
[prod
];
2531 tx_le
->msk_addr
= htole32(0);
2532 tx_le
->msk_control
= htole32(OP_VLAN
| HW_OWNER
|
2533 htons(m
->m_pkthdr
.ether_vtag
));
2534 sc_if
->msk_cdata
.msk_tx_cnt
++;
2535 MSK_INC(prod
, MSK_TX_RING_CNT
);
2536 control
|= INS_VLAN
;
2539 /* Check if we have to handle checksum offload. */
2540 if (m
->m_pkthdr
.csum_flags
& MSK_CSUM_FEATURES
) {
2541 tx_le
= &sc_if
->msk_rdata
.msk_tx_ring
[prod
];
2542 tx_le
->msk_addr
= htole32(((tcp_offset
+ m
->m_pkthdr
.csum_data
)
2543 & 0xffff) | ((uint32_t)tcp_offset
<< 16));
2544 tx_le
->msk_control
= htole32(1 << 16 | (OP_TCPLISW
| HW_OWNER
));
2545 control
= CALSUM
| WR_SUM
| INIT_SUM
| LOCK_SUM
;
2546 if ((m
->m_pkthdr
.csum_flags
& CSUM_UDP
) != 0)
2548 sc_if
->msk_cdata
.msk_tx_cnt
++;
2549 MSK_INC(prod
, MSK_TX_RING_CNT
);
2553 tx_le
= &sc_if
->msk_rdata
.msk_tx_ring
[prod
];
2554 tx_le
->msk_addr
= htole32(MSK_ADDR_LO(txsegs
[0].ds_addr
));
2555 tx_le
->msk_control
= htole32(txsegs
[0].ds_len
| control
|
2557 sc_if
->msk_cdata
.msk_tx_cnt
++;
2558 MSK_INC(prod
, MSK_TX_RING_CNT
);
2560 for (i
= 1; i
< nsegs
; i
++) {
2561 tx_le
= &sc_if
->msk_rdata
.msk_tx_ring
[prod
];
2562 tx_le
->msk_addr
= htole32(MSK_ADDR_LO(txsegs
[i
].ds_addr
));
2563 tx_le
->msk_control
= htole32(txsegs
[i
].ds_len
| control
|
2564 OP_BUFFER
| HW_OWNER
);
2565 sc_if
->msk_cdata
.msk_tx_cnt
++;
2566 MSK_INC(prod
, MSK_TX_RING_CNT
);
2568 /* Update producer index. */
2569 sc_if
->msk_cdata
.msk_tx_prod
= prod
;
2571 /* Set EOP on the last desciptor. */
2572 prod
= (prod
+ MSK_TX_RING_CNT
- 1) % MSK_TX_RING_CNT
;
2573 tx_le
= &sc_if
->msk_rdata
.msk_tx_ring
[prod
];
2574 tx_le
->msk_control
|= htole32(EOP
);
2576 /* Turn the first descriptor ownership to hardware. */
2577 tx_le
= &sc_if
->msk_rdata
.msk_tx_ring
[si
];
2578 tx_le
->msk_control
|= htole32(HW_OWNER
);
2580 txd
= &sc_if
->msk_cdata
.msk_txdesc
[prod
];
2581 map
= txd_last
->tx_dmamap
;
2582 txd_last
->tx_dmamap
= txd
->tx_dmamap
;
2583 txd
->tx_dmamap
= map
;
2590 msk_start(struct ifnet
*ifp
, struct ifaltq_subque
*ifsq
)
2592 struct msk_if_softc
*sc_if
;
2593 struct mbuf
*m_head
;
2596 sc_if
= ifp
->if_softc
;
2598 ASSERT_ALTQ_SQ_DEFAULT(ifp
, ifsq
);
2599 ASSERT_SERIALIZED(ifp
->if_serializer
);
2601 if (!sc_if
->msk_link
) {
2602 ifq_purge(&ifp
->if_snd
);
2606 if ((ifp
->if_flags
& IFF_RUNNING
) == 0 || ifq_is_oactive(&ifp
->if_snd
))
2610 while (!ifq_is_empty(&ifp
->if_snd
)) {
2611 if (MSK_IS_OACTIVE(sc_if
)) {
2612 ifq_set_oactive(&ifp
->if_snd
);
2616 m_head
= ifq_dequeue(&ifp
->if_snd
);
2621 * Pack the data into the transmit ring. If we
2622 * don't have room, set the OACTIVE flag and wait
2623 * for the NIC to drain the ring.
2625 if (msk_encap(sc_if
, &m_head
) != 0) {
2626 IFNET_STAT_INC(ifp
, oerrors
, 1);
2627 if (sc_if
->msk_cdata
.msk_tx_cnt
== 0) {
2630 ifq_set_oactive(&ifp
->if_snd
);
2637 * If there's a BPF listener, bounce a copy of this frame
2640 BPF_MTAP(ifp
, m_head
);
2645 CSR_WRITE_2(sc_if
->msk_softc
,
2646 Y2_PREF_Q_ADDR(sc_if
->msk_txq
, PREF_UNIT_PUT_IDX_REG
),
2647 sc_if
->msk_cdata
.msk_tx_prod
);
2649 /* Set a timeout in case the chip goes out to lunch. */
2650 ifp
->if_timer
= MSK_TX_TIMEOUT
;
2655 msk_watchdog(struct ifnet
*ifp
)
2657 struct msk_if_softc
*sc_if
= ifp
->if_softc
;
2661 ASSERT_SERIALIZED(ifp
->if_serializer
);
2663 if (sc_if
->msk_link
== 0) {
2665 if_printf(sc_if
->msk_ifp
, "watchdog timeout "
2667 IFNET_STAT_INC(ifp
, oerrors
, 1);
2673 * Reclaim first as there is a possibility of losing Tx completion
2676 ridx
= sc_if
->msk_port
== MSK_PORT_A
? STAT_TXA1_RIDX
: STAT_TXA2_RIDX
;
2677 idx
= CSR_READ_2(sc_if
->msk_softc
, ridx
);
2678 if (sc_if
->msk_cdata
.msk_tx_cons
!= idx
) {
2679 msk_txeof(sc_if
, idx
);
2680 if (sc_if
->msk_cdata
.msk_tx_cnt
== 0) {
2681 if_printf(ifp
, "watchdog timeout (missed Tx interrupts) "
2683 if (!ifq_is_empty(&ifp
->if_snd
))
2689 if_printf(ifp
, "watchdog timeout\n");
2690 IFNET_STAT_INC(ifp
, oerrors
, 1);
2692 if (!ifq_is_empty(&ifp
->if_snd
))
2697 mskc_shutdown(device_t dev
)
2699 struct msk_softc
*sc
= device_get_softc(dev
);
2702 lwkt_serialize_enter(&sc
->msk_serializer
);
2704 for (i
= 0; i
< sc
->msk_num_port
; i
++) {
2705 if (sc
->msk_if
[i
] != NULL
)
2706 msk_stop(sc
->msk_if
[i
]);
2709 /* Put hardware reset. */
2710 CSR_WRITE_2(sc
, B0_CTST
, CS_RST_SET
);
2712 lwkt_serialize_exit(&sc
->msk_serializer
);
2717 mskc_suspend(device_t dev
)
2719 struct msk_softc
*sc
= device_get_softc(dev
);
2722 lwkt_serialize_enter(&sc
->msk_serializer
);
2724 for (i
= 0; i
< sc
->msk_num_port
; i
++) {
2725 if (sc
->msk_if
[i
] != NULL
&& sc
->msk_if
[i
]->msk_ifp
!= NULL
&&
2726 ((sc
->msk_if
[i
]->msk_ifp
->if_flags
& IFF_RUNNING
) != 0))
2727 msk_stop(sc
->msk_if
[i
]);
2730 /* Disable all interrupts. */
2731 CSR_WRITE_4(sc
, B0_IMSK
, 0);
2732 CSR_READ_4(sc
, B0_IMSK
);
2733 CSR_WRITE_4(sc
, B0_HWE_IMSK
, 0);
2734 CSR_READ_4(sc
, B0_HWE_IMSK
);
2736 mskc_phy_power(sc
, MSK_PHY_POWERDOWN
);
2738 /* Put hardware reset. */
2739 CSR_WRITE_2(sc
, B0_CTST
, CS_RST_SET
);
2740 sc
->msk_suspended
= 1;
2742 lwkt_serialize_exit(&sc
->msk_serializer
);
2748 mskc_resume(device_t dev
)
2750 struct msk_softc
*sc
= device_get_softc(dev
);
2753 lwkt_serialize_enter(&sc
->msk_serializer
);
2755 /* Enable all clocks before accessing any registers. */
2756 CSR_PCI_WRITE_4(sc
, PCI_OUR_REG_3
, 0);
2758 for (i
= 0; i
< sc
->msk_num_port
; i
++) {
2759 if (sc
->msk_if
[i
] != NULL
&& sc
->msk_if
[i
]->msk_ifp
!= NULL
&&
2760 ((sc
->msk_if
[i
]->msk_ifp
->if_flags
& IFF_UP
) != 0))
2761 msk_init(sc
->msk_if
[i
]);
2763 sc
->msk_suspended
= 0;
2765 lwkt_serialize_exit(&sc
->msk_serializer
);
2771 msk_rxeof(struct msk_if_softc
*sc_if
, uint32_t status
, int len
)
2775 struct msk_rxdesc
*rxd
;
2778 ifp
= sc_if
->msk_ifp
;
2780 cons
= sc_if
->msk_cdata
.msk_rx_cons
;
2782 rxlen
= status
>> 16;
2783 if ((status
& GMR_FS_VLAN
) != 0 &&
2784 (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) != 0)
2785 rxlen
-= EVL_ENCAPLEN
;
2786 if (sc_if
->msk_flags
& MSK_FLAG_NORXCHK
) {
2788 * For controllers that returns bogus status code
2789 * just do minimal check and let upper stack
2790 * handle this frame.
2792 if (len
> MSK_MAX_FRAMELEN
|| len
< ETHER_HDR_LEN
) {
2793 IFNET_STAT_INC(ifp
, ierrors
, 1);
2794 msk_discard_rxbuf(sc_if
, cons
);
2797 } else if (len
> sc_if
->msk_framesize
||
2798 ((status
& GMR_FS_ANY_ERR
) != 0) ||
2799 ((status
& GMR_FS_RX_OK
) == 0) || (rxlen
!= len
)) {
2800 /* Don't count flow-control packet as errors. */
2801 if ((status
& GMR_FS_GOOD_FC
) == 0)
2802 IFNET_STAT_INC(ifp
, ierrors
, 1);
2803 msk_discard_rxbuf(sc_if
, cons
);
2806 rxd
= &sc_if
->msk_cdata
.msk_rxdesc
[cons
];
2808 if (msk_newbuf(sc_if
, cons
, 0) != 0) {
2809 IFNET_STAT_INC(ifp
, iqdrops
, 1);
2810 /* Reuse old buffer. */
2811 msk_discard_rxbuf(sc_if
, cons
);
2814 m
->m_pkthdr
.rcvif
= ifp
;
2815 m
->m_pkthdr
.len
= m
->m_len
= len
;
2816 IFNET_STAT_INC(ifp
, ipackets
, 1);
2818 /* Check for VLAN tagged packets. */
2819 if ((status
& GMR_FS_VLAN
) != 0 &&
2820 (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) != 0) {
2821 m
->m_pkthdr
.ether_vtag
= sc_if
->msk_vtag
;
2822 m
->m_flags
|= M_VLANTAG
;
2826 ifp
->if_input(ifp
, m
, NULL
, -1);
2829 MSK_INC(sc_if
->msk_cdata
.msk_rx_cons
, MSK_RX_RING_CNT
);
2830 MSK_INC(sc_if
->msk_cdata
.msk_rx_prod
, MSK_RX_RING_CNT
);
2835 msk_jumbo_rxeof(struct msk_if_softc
*sc_if
, uint32_t status
, int len
)
2839 struct msk_rxdesc
*jrxd
;
2842 ifp
= sc_if
->msk_ifp
;
2844 MSK_IF_LOCK_ASSERT(sc_if
);
2846 cons
= sc_if
->msk_cdata
.msk_rx_cons
;
2848 rxlen
= status
>> 16;
2849 if ((status
& GMR_FS_VLAN
) != 0 &&
2850 (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) != 0)
2851 rxlen
-= ETHER_VLAN_ENCAP_LEN
;
2852 if (len
> sc_if
->msk_framesize
||
2853 ((status
& GMR_FS_ANY_ERR
) != 0) ||
2854 ((status
& GMR_FS_RX_OK
) == 0) || (rxlen
!= len
)) {
2855 /* Don't count flow-control packet as errors. */
2856 if ((status
& GMR_FS_GOOD_FC
) == 0)
2858 msk_discard_jumbo_rxbuf(sc_if
, cons
);
2861 jrxd
= &sc_if
->msk_cdata
.msk_jumbo_rxdesc
[cons
];
2863 if (msk_jumbo_newbuf(sc_if
, cons
) != 0) {
2865 /* Reuse old buffer. */
2866 msk_discard_jumbo_rxbuf(sc_if
, cons
);
2869 m
->m_pkthdr
.rcvif
= ifp
;
2870 m
->m_pkthdr
.len
= m
->m_len
= len
;
2872 /* Check for VLAN tagged packets. */
2873 if ((status
& GMR_FS_VLAN
) != 0 &&
2874 (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) != 0) {
2875 m
->m_pkthdr
.ether_vtag
= sc_if
->msk_vtag
;
2876 m
->m_flags
|= M_VLANTAG
;
2878 MSK_IF_UNLOCK(sc_if
);
2879 ifp
->if_input(ifp
, m
, NULL
, -1);
2883 MSK_INC(sc_if
->msk_cdata
.msk_rx_cons
, MSK_JUMBO_RX_RING_CNT
);
2884 MSK_INC(sc_if
->msk_cdata
.msk_rx_prod
, MSK_JUMBO_RX_RING_CNT
);
2889 msk_txeof(struct msk_if_softc
*sc_if
, int idx
)
2891 struct msk_txdesc
*txd
;
2892 struct msk_tx_desc
*cur_tx
;
2897 ifp
= sc_if
->msk_ifp
;
2900 * Go through our tx ring and free mbufs for those
2901 * frames that have been sent.
2903 cons
= sc_if
->msk_cdata
.msk_tx_cons
;
2905 for (; cons
!= idx
; MSK_INC(cons
, MSK_TX_RING_CNT
)) {
2906 if (sc_if
->msk_cdata
.msk_tx_cnt
<= 0)
2909 cur_tx
= &sc_if
->msk_rdata
.msk_tx_ring
[cons
];
2910 control
= le32toh(cur_tx
->msk_control
);
2911 sc_if
->msk_cdata
.msk_tx_cnt
--;
2912 if ((control
& EOP
) == 0)
2914 txd
= &sc_if
->msk_cdata
.msk_txdesc
[cons
];
2915 bus_dmamap_unload(sc_if
->msk_cdata
.msk_tx_tag
, txd
->tx_dmamap
);
2917 IFNET_STAT_INC(ifp
, opackets
, 1);
2918 KASSERT(txd
->tx_m
!= NULL
, ("%s: freeing NULL mbuf!",
2925 sc_if
->msk_cdata
.msk_tx_cons
= cons
;
2926 if (!MSK_IS_OACTIVE(sc_if
))
2927 ifq_clr_oactive(&ifp
->if_snd
);
2928 if (sc_if
->msk_cdata
.msk_tx_cnt
== 0)
2930 /* No need to sync LEs as we didn't update LEs. */
2935 msk_tick(void *xsc_if
)
2937 struct msk_if_softc
*sc_if
= xsc_if
;
2938 struct ifnet
*ifp
= &sc_if
->arpcom
.ac_if
;
2939 struct mii_data
*mii
;
2941 lwkt_serialize_enter(ifp
->if_serializer
);
2943 mii
= device_get_softc(sc_if
->msk_miibus
);
2946 if (!sc_if
->msk_link
)
2947 msk_miibus_statchg(sc_if
->msk_if_dev
);
2948 callout_reset(&sc_if
->msk_tick_ch
, hz
, msk_tick
, sc_if
);
2950 lwkt_serialize_exit(ifp
->if_serializer
);
2954 msk_intr_phy(struct msk_if_softc
*sc_if
)
2958 msk_phy_readreg(sc_if
, PHY_ADDR_MARV
, PHY_MARV_INT_STAT
);
2959 status
= msk_phy_readreg(sc_if
, PHY_ADDR_MARV
, PHY_MARV_INT_STAT
);
2960 /* Handle FIFO Underrun/Overflow? */
2961 if (status
& PHY_M_IS_FIFO_ERROR
) {
2962 device_printf(sc_if
->msk_if_dev
,
2963 "PHY FIFO underrun/overflow.\n");
2968 msk_intr_gmac(struct msk_if_softc
*sc_if
)
2970 struct msk_softc
*sc
;
2973 sc
= sc_if
->msk_softc
;
2974 status
= CSR_READ_1(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_IRQ_SRC
));
2976 /* GMAC Rx FIFO overrun. */
2977 if ((status
& GM_IS_RX_FF_OR
) != 0) {
2978 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_CTRL_T
),
2981 /* GMAC Tx FIFO underrun. */
2982 if ((status
& GM_IS_TX_FF_UR
) != 0) {
2983 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
),
2985 device_printf(sc_if
->msk_if_dev
, "Tx FIFO underrun!\n");
2988 * In case of Tx underrun, we may need to flush/reset
2989 * Tx MAC but that would also require resynchronization
2990 * with status LEs. Reintializing status LEs would
2991 * affect other port in dual MAC configuration so it
2992 * should be avoided as possible as we can.
2993 * Due to lack of documentation it's all vague guess but
2994 * it needs more investigation.
3000 msk_handle_hwerr(struct msk_if_softc
*sc_if
, uint32_t status
)
3002 struct msk_softc
*sc
;
3004 sc
= sc_if
->msk_softc
;
3005 if ((status
& Y2_IS_PAR_RD1
) != 0) {
3006 device_printf(sc_if
->msk_if_dev
,
3007 "RAM buffer read parity error\n");
3009 CSR_WRITE_2(sc
, SELECT_RAM_BUFFER(sc_if
->msk_port
, B3_RI_CTRL
),
3012 if ((status
& Y2_IS_PAR_WR1
) != 0) {
3013 device_printf(sc_if
->msk_if_dev
,
3014 "RAM buffer write parity error\n");
3016 CSR_WRITE_2(sc
, SELECT_RAM_BUFFER(sc_if
->msk_port
, B3_RI_CTRL
),
3019 if ((status
& Y2_IS_PAR_MAC1
) != 0) {
3020 device_printf(sc_if
->msk_if_dev
, "Tx MAC parity error\n");
3022 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
),
3025 if ((status
& Y2_IS_PAR_RX1
) != 0) {
3026 device_printf(sc_if
->msk_if_dev
, "Rx parity error\n");
3028 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_rxq
, Q_CSR
), BMU_CLR_IRQ_PAR
);
3030 if ((status
& (Y2_IS_TCP_TXS1
| Y2_IS_TCP_TXA1
)) != 0) {
3031 device_printf(sc_if
->msk_if_dev
, "TCP segmentation error\n");
3033 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
), BMU_CLR_IRQ_TCP
);
3038 mskc_intr_hwerr(struct msk_softc
*sc
)
3041 uint32_t tlphead
[4];
3043 status
= CSR_READ_4(sc
, B0_HWE_ISRC
);
3044 /* Time Stamp timer overflow. */
3045 if ((status
& Y2_IS_TIST_OV
) != 0)
3046 CSR_WRITE_1(sc
, GMAC_TI_ST_CTRL
, GMT_ST_CLR_IRQ
);
3047 if ((status
& Y2_IS_PCI_NEXP
) != 0) {
3049 * PCI Express Error occured which is not described in PEX
3051 * This error is also mapped either to Master Abort(
3052 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3053 * can only be cleared there.
3055 device_printf(sc
->msk_dev
,
3056 "PCI Express protocol violation error\n");
3059 if ((status
& (Y2_IS_MST_ERR
| Y2_IS_IRQ_STAT
)) != 0) {
3062 if ((status
& Y2_IS_MST_ERR
) != 0)
3063 device_printf(sc
->msk_dev
,
3064 "unexpected IRQ Status error\n");
3066 device_printf(sc
->msk_dev
,
3067 "unexpected IRQ Master error\n");
3068 /* Reset all bits in the PCI status register. */
3069 v16
= pci_read_config(sc
->msk_dev
, PCIR_STATUS
, 2);
3070 CSR_WRITE_1(sc
, B2_TST_CTRL1
, TST_CFG_WRITE_ON
);
3071 pci_write_config(sc
->msk_dev
, PCIR_STATUS
, v16
|
3072 PCIM_STATUS_PERR
| PCIM_STATUS_SERR
| PCIM_STATUS_RMABORT
|
3073 PCIM_STATUS_RTABORT
| PCIM_STATUS_PERRREPORT
, 2);
3074 CSR_WRITE_1(sc
, B2_TST_CTRL1
, TST_CFG_WRITE_OFF
);
3077 /* Check for PCI Express Uncorrectable Error. */
3078 if ((status
& Y2_IS_PCI_EXP
) != 0) {
3082 * On PCI Express bus bridges are called root complexes (RC).
3083 * PCI Express errors are recognized by the root complex too,
3084 * which requests the system to handle the problem. After
3085 * error occurence it may be that no access to the adapter
3086 * may be performed any longer.
3089 v32
= CSR_PCI_READ_4(sc
, PEX_UNC_ERR_STAT
);
3090 if ((v32
& PEX_UNSUP_REQ
) != 0) {
3091 /* Ignore unsupported request error. */
3093 device_printf(sc
->msk_dev
,
3094 "Uncorrectable PCI Express error\n");
3097 if ((v32
& (PEX_FATAL_ERRORS
| PEX_POIS_TLP
)) != 0) {
3100 /* Get TLP header form Log Registers. */
3101 for (i
= 0; i
< 4; i
++)
3102 tlphead
[i
] = CSR_PCI_READ_4(sc
,
3103 PEX_HEADER_LOG
+ i
* 4);
3104 /* Check for vendor defined broadcast message. */
3105 if (!(tlphead
[0] == 0x73004001 && tlphead
[1] == 0x7f)) {
3106 sc
->msk_intrhwemask
&= ~Y2_IS_PCI_EXP
;
3107 CSR_WRITE_4(sc
, B0_HWE_IMSK
,
3108 sc
->msk_intrhwemask
);
3109 CSR_READ_4(sc
, B0_HWE_IMSK
);
3112 /* Clear the interrupt. */
3113 CSR_WRITE_1(sc
, B2_TST_CTRL1
, TST_CFG_WRITE_ON
);
3114 CSR_PCI_WRITE_4(sc
, PEX_UNC_ERR_STAT
, 0xffffffff);
3115 CSR_WRITE_1(sc
, B2_TST_CTRL1
, TST_CFG_WRITE_OFF
);
3118 if ((status
& Y2_HWE_L1_MASK
) != 0 && sc
->msk_if
[MSK_PORT_A
] != NULL
)
3119 msk_handle_hwerr(sc
->msk_if
[MSK_PORT_A
], status
);
3120 if ((status
& Y2_HWE_L2_MASK
) != 0 && sc
->msk_if
[MSK_PORT_B
] != NULL
)
3121 msk_handle_hwerr(sc
->msk_if
[MSK_PORT_B
], status
>> 8);
3124 static __inline
void
3125 msk_rxput(struct msk_if_softc
*sc_if
)
3127 struct msk_softc
*sc
;
3129 sc
= sc_if
->msk_softc
;
3131 if (sc_if
->msk_framesize
> (MCLBYTES
- ETHER_HDR_LEN
)) {
3133 sc_if
->msk_cdata
.msk_jumbo_rx_ring_tag
,
3134 sc_if
->msk_cdata
.msk_jumbo_rx_ring_map
,
3135 BUS_DMASYNC_PREWRITE
);
3138 CSR_WRITE_2(sc
, Y2_PREF_Q_ADDR(sc_if
->msk_rxq
,
3139 PREF_UNIT_PUT_IDX_REG
), sc_if
->msk_cdata
.msk_rx_prod
);
3143 mskc_handle_events(struct msk_softc
*sc
)
3145 struct msk_if_softc
*sc_if
;
3147 struct msk_stat_desc
*sd
;
3148 uint32_t control
, status
;
3149 int cons
, idx
, len
, port
, rxprog
;
3151 idx
= CSR_READ_2(sc
, STAT_PUT_IDX
);
3152 if (idx
== sc
->msk_stat_cons
)
3155 rxput
[MSK_PORT_A
] = rxput
[MSK_PORT_B
] = 0;
3158 for (cons
= sc
->msk_stat_cons
; cons
!= idx
;) {
3159 sd
= &sc
->msk_stat_ring
[cons
];
3160 control
= le32toh(sd
->msk_control
);
3161 if ((control
& HW_OWNER
) == 0)
3164 * Marvell's FreeBSD driver updates status LE after clearing
3165 * HW_OWNER. However we don't have a way to sync single LE
3166 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3167 * an entire DMA map. So don't sync LE until we have a better
3170 control
&= ~HW_OWNER
;
3171 sd
->msk_control
= htole32(control
);
3172 status
= le32toh(sd
->msk_status
);
3173 len
= control
& STLE_LEN_MASK
;
3174 port
= (control
>> 16) & 0x01;
3175 sc_if
= sc
->msk_if
[port
];
3176 if (sc_if
== NULL
) {
3177 device_printf(sc
->msk_dev
, "invalid port opcode "
3178 "0x%08x\n", control
& STLE_OP_MASK
);
3182 switch (control
& STLE_OP_MASK
) {
3184 sc_if
->msk_vtag
= ntohs(len
);
3187 sc_if
->msk_vtag
= ntohs(len
);
3190 if ((sc_if
->msk_ifp
->if_flags
& IFF_RUNNING
) == 0)
3193 if (sc_if
->msk_framesize
> (MCLBYTES
- ETHER_HDR_LEN
))
3194 msk_jumbo_rxeof(sc_if
, status
, len
);
3197 msk_rxeof(sc_if
, status
, len
);
3200 * Because there is no way to sync single Rx LE
3201 * put the DMA sync operation off until the end of
3205 /* Update prefetch unit if we've passed water mark. */
3206 if (rxput
[port
] >= sc_if
->msk_cdata
.msk_rx_putwm
) {
3212 if (sc
->msk_if
[MSK_PORT_A
] != NULL
) {
3213 msk_txeof(sc
->msk_if
[MSK_PORT_A
],
3214 status
& STLE_TXA1_MSKL
);
3216 if (sc
->msk_if
[MSK_PORT_B
] != NULL
) {
3217 msk_txeof(sc
->msk_if
[MSK_PORT_B
],
3218 ((status
& STLE_TXA2_MSKL
) >>
3220 ((len
& STLE_TXA2_MSKH
) <<
3225 device_printf(sc
->msk_dev
, "unhandled opcode 0x%08x\n",
3226 control
& STLE_OP_MASK
);
3229 MSK_INC(cons
, MSK_STAT_RING_CNT
);
3230 if (rxprog
> sc
->msk_process_limit
)
3234 sc
->msk_stat_cons
= cons
;
3235 /* XXX We should sync status LEs here. See above notes. */
3237 if (rxput
[MSK_PORT_A
] > 0)
3238 msk_rxput(sc
->msk_if
[MSK_PORT_A
]);
3239 if (rxput
[MSK_PORT_B
] > 0)
3240 msk_rxput(sc
->msk_if
[MSK_PORT_B
]);
3242 return (sc
->msk_stat_cons
!= CSR_READ_2(sc
, STAT_PUT_IDX
));
3245 /* Legacy interrupt handler for shared interrupt. */
3247 mskc_intr(void *xsc
)
3249 struct msk_softc
*sc
;
3250 struct msk_if_softc
*sc_if0
, *sc_if1
;
3251 struct ifnet
*ifp0
, *ifp1
;
3255 ASSERT_SERIALIZED(&sc
->msk_serializer
);
3257 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3258 status
= CSR_READ_4(sc
, B0_Y2_SP_ISRC2
);
3259 if (status
== 0 || status
== 0xffffffff || sc
->msk_suspended
!= 0 ||
3260 (status
& sc
->msk_intrmask
) == 0) {
3261 CSR_WRITE_4(sc
, B0_Y2_SP_ICR
, 2);
3265 sc_if0
= sc
->msk_if
[MSK_PORT_A
];
3266 sc_if1
= sc
->msk_if
[MSK_PORT_B
];
3269 ifp0
= sc_if0
->msk_ifp
;
3271 ifp1
= sc_if1
->msk_ifp
;
3273 if ((status
& Y2_IS_IRQ_PHY1
) != 0 && sc_if0
!= NULL
)
3274 msk_intr_phy(sc_if0
);
3275 if ((status
& Y2_IS_IRQ_PHY2
) != 0 && sc_if1
!= NULL
)
3276 msk_intr_phy(sc_if1
);
3277 if ((status
& Y2_IS_IRQ_MAC1
) != 0 && sc_if0
!= NULL
)
3278 msk_intr_gmac(sc_if0
);
3279 if ((status
& Y2_IS_IRQ_MAC2
) != 0 && sc_if1
!= NULL
)
3280 msk_intr_gmac(sc_if1
);
3281 if ((status
& (Y2_IS_CHK_RX1
| Y2_IS_CHK_RX2
)) != 0) {
3282 device_printf(sc
->msk_dev
, "Rx descriptor error\n");
3283 sc
->msk_intrmask
&= ~(Y2_IS_CHK_RX1
| Y2_IS_CHK_RX2
);
3284 CSR_WRITE_4(sc
, B0_IMSK
, sc
->msk_intrmask
);
3285 CSR_READ_4(sc
, B0_IMSK
);
3287 if ((status
& (Y2_IS_CHK_TXA1
| Y2_IS_CHK_TXA2
)) != 0) {
3288 device_printf(sc
->msk_dev
, "Tx descriptor error\n");
3289 sc
->msk_intrmask
&= ~(Y2_IS_CHK_TXA1
| Y2_IS_CHK_TXA2
);
3290 CSR_WRITE_4(sc
, B0_IMSK
, sc
->msk_intrmask
);
3291 CSR_READ_4(sc
, B0_IMSK
);
3293 if ((status
& Y2_IS_HW_ERR
) != 0)
3294 mskc_intr_hwerr(sc
);
3296 while (mskc_handle_events(sc
) != 0)
3298 if ((status
& Y2_IS_STAT_BMU
) != 0)
3299 CSR_WRITE_4(sc
, STAT_CTRL
, SC_STAT_CLR_IRQ
);
3301 /* Reenable interrupts. */
3302 CSR_WRITE_4(sc
, B0_Y2_SP_ICR
, 2);
3304 if (ifp0
!= NULL
&& (ifp0
->if_flags
& IFF_RUNNING
) != 0 &&
3305 !ifq_is_empty(&ifp0
->if_snd
))
3307 if (ifp1
!= NULL
&& (ifp1
->if_flags
& IFF_RUNNING
) != 0 &&
3308 !ifq_is_empty(&ifp1
->if_snd
))
3313 msk_set_tx_stfwd(struct msk_if_softc
*sc_if
)
3315 struct msk_softc
*sc
= sc_if
->msk_softc
;
3316 struct ifnet
*ifp
= sc_if
->msk_ifp
;
3318 if ((sc
->msk_hw_id
== CHIP_ID_YUKON_EX
&&
3319 sc
->msk_hw_rev
!= CHIP_REV_YU_EX_A0
) ||
3320 sc
->msk_hw_id
>= CHIP_ID_YUKON_SUPR
) {
3321 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
),
3324 if (ifp
->if_mtu
> ETHERMTU
) {
3325 /* Set Tx GMAC FIFO Almost Empty Threshold. */
3327 MR_ADDR(sc_if
->msk_port
, TX_GMF_AE_THR
),
3328 MSK_ECU_JUMBO_WM
<< 16 | MSK_ECU_AE_THR
);
3329 /* Disable Store & Forward mode for Tx. */
3330 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
),
3333 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
),
3342 struct msk_if_softc
*sc_if
= xsc
;
3343 struct msk_softc
*sc
= sc_if
->msk_softc
;
3344 struct ifnet
*ifp
= sc_if
->msk_ifp
;
3345 struct mii_data
*mii
;
3346 uint16_t eaddr
[ETHER_ADDR_LEN
/ 2];
3351 ASSERT_SERIALIZED(ifp
->if_serializer
);
3353 mii
= device_get_softc(sc_if
->msk_miibus
);
3356 /* Cancel pending I/O and free all Rx/Tx buffers. */
3359 sc_if
->msk_framesize
= ifp
->if_mtu
+ ETHER_HDR_LEN
+ EVL_ENCAPLEN
;
3360 if (sc_if
->msk_framesize
> MSK_MAX_FRAMELEN
&&
3361 sc_if
->msk_softc
->msk_hw_id
== CHIP_ID_YUKON_EC_U
) {
3363 * In Yukon EC Ultra, TSO & checksum offload is not
3364 * supported for jumbo frame.
3366 ifp
->if_hwassist
&= ~MSK_CSUM_FEATURES
;
3367 ifp
->if_capenable
&= ~IFCAP_TXCSUM
;
3370 /* GMAC Control reset. */
3371 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_CTRL
), GMC_RST_SET
);
3372 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_CTRL
), GMC_RST_CLR
);
3373 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_CTRL
), GMC_F_LOOPB_OFF
);
3374 if (sc
->msk_hw_id
== CHIP_ID_YUKON_EX
||
3375 sc
->msk_hw_id
== CHIP_ID_YUKON_SUPR
) {
3376 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_CTRL
),
3377 GMC_BYP_MACSECRX_ON
| GMC_BYP_MACSECTX_ON
|
3382 * Initialize GMAC first such that speed/duplex/flow-control
3383 * parameters are renegotiated when interface is brought up.
3385 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
, 0);
3387 /* Dummy read the Interrupt Source Register. */
3388 CSR_READ_1(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_IRQ_SRC
));
3390 /* Set MIB Clear Counter Mode. */
3391 gmac
= GMAC_READ_2(sc
, sc_if
->msk_port
, GM_PHY_ADDR
);
3392 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_PHY_ADDR
, gmac
| GM_PAR_MIB_CLR
);
3393 /* Read all MIB Counters with Clear Mode set. */
3394 for (i
= 0; i
< GM_MIB_CNT_SIZE
; i
++)
3395 GMAC_READ_2(sc
, sc_if
->msk_port
, GM_MIB_CNT_BASE
+ 8 * i
);
3396 /* Clear MIB Clear Counter Mode. */
3397 gmac
&= ~GM_PAR_MIB_CLR
;
3398 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_PHY_ADDR
, gmac
);
3401 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_RX_CTRL
, GM_RXCR_CRC_DIS
);
3403 /* Setup Transmit Control Register. */
3404 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_TX_CTRL
, TX_COL_THR(TX_COL_DEF
));
3406 /* Setup Transmit Flow Control Register. */
3407 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_TX_FLOW_CTRL
, 0xffff);
3409 /* Setup Transmit Parameter Register. */
3410 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_TX_PARAM
,
3411 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF
) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF
) |
3412 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF
) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF
));
3414 gmac
= DATA_BLIND_VAL(DATA_BLIND_DEF
) |
3415 GM_SMOD_VLAN_ENA
| IPG_DATA_VAL(IPG_DATA_DEF
);
3417 if (sc_if
->msk_framesize
> MSK_MAX_FRAMELEN
)
3418 gmac
|= GM_SMOD_JUMBO_ENA
;
3419 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_SERIAL_MODE
, gmac
);
3421 /* Set station address. */
3422 bcopy(IF_LLADDR(ifp
), eaddr
, ETHER_ADDR_LEN
);
3423 for (i
= 0; i
< ETHER_ADDR_LEN
/2; i
++)
3424 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_SRC_ADDR_1L
+ i
* 4,
3426 for (i
= 0; i
< ETHER_ADDR_LEN
/2; i
++)
3427 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_SRC_ADDR_2L
+ i
* 4,
3430 /* Disable interrupts for counter overflows. */
3431 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_TX_IRQ_MSK
, 0);
3432 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_RX_IRQ_MSK
, 0);
3433 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_TR_IRQ_MSK
, 0);
3435 /* Configure Rx MAC FIFO. */
3436 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_CTRL_T
), GMF_RST_SET
);
3437 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_CTRL_T
), GMF_RST_CLR
);
3438 reg
= GMF_OPER_ON
| GMF_RX_F_FL_ON
;
3439 if (sc
->msk_hw_id
== CHIP_ID_YUKON_FE_P
||
3440 sc
->msk_hw_id
== CHIP_ID_YUKON_EX
)
3441 reg
|= GMF_RX_OVER_ON
;
3442 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_CTRL_T
), reg
);
3444 /* Set receive filter. */
3445 msk_rxfilter(sc_if
);
3447 if (sc
->msk_hw_id
== CHIP_ID_YUKON_XL
) {
3448 /* Clear flush mask - HW bug. */
3449 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_FL_MSK
), 0);
3451 /* Flush Rx MAC FIFO on any flow control or error. */
3452 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_FL_MSK
),
3457 * Set Rx FIFO flush threshold to 64 bytes 1 FIFO word
3458 * due to hardware hang on receipt of pause frames.
3460 reg
= RX_GMF_FL_THR_DEF
+ 1;
3461 /* Another magic for Yukon FE+ - From Linux. */
3462 if (sc
->msk_hw_id
== CHIP_ID_YUKON_FE_P
&&
3463 sc
->msk_hw_rev
== CHIP_REV_YU_FE_P_A0
)
3465 CSR_WRITE_2(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_FL_THR
), reg
);
3468 /* Configure Tx MAC FIFO. */
3469 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
), GMF_RST_SET
);
3470 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
), GMF_RST_CLR
);
3471 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
), GMF_OPER_ON
);
3473 /* Configure hardware VLAN tag insertion/stripping. */
3474 msk_setvlan(sc_if
, ifp
);
3476 if ((sc_if
->msk_flags
& MSK_FLAG_RAMBUF
) == 0) {
3477 /* Set Rx Pause threshould. */
3478 CSR_WRITE_2(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_LP_THR
),
3480 CSR_WRITE_2(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_UP_THR
),
3482 /* Configure store-and-forward for Tx. */
3483 msk_set_tx_stfwd(sc_if
);
3486 if (sc
->msk_hw_id
== CHIP_ID_YUKON_FE_P
&&
3487 sc
->msk_hw_rev
== CHIP_REV_YU_FE_P_A0
) {
3488 /* Disable dynamic watermark - from Linux. */
3489 reg
= CSR_READ_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_EA
));
3491 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_EA
), reg
);
3495 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3496 * arbiter as we don't use Sync Tx queue.
3498 CSR_WRITE_1(sc
, MR_ADDR(sc_if
->msk_port
, TXA_CTRL
),
3499 TXA_DIS_FSYNC
| TXA_DIS_ALLOC
| TXA_STOP_RC
);
3500 /* Enable the RAM Interface Arbiter. */
3501 CSR_WRITE_1(sc
, MR_ADDR(sc_if
->msk_port
, TXA_CTRL
), TXA_ENA_ARB
);
3503 /* Setup RAM buffer. */
3504 msk_set_rambuffer(sc_if
);
3506 /* Disable Tx sync Queue. */
3507 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_txsq
, RB_CTRL
), RB_RST_SET
);
3509 /* Setup Tx Queue Bus Memory Interface. */
3510 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
), BMU_CLR_RESET
);
3511 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
), BMU_OPER_INIT
);
3512 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
), BMU_FIFO_OP_ON
);
3513 CSR_WRITE_2(sc
, Q_ADDR(sc_if
->msk_txq
, Q_WM
), MSK_BMU_TX_WM
);
3514 switch (sc
->msk_hw_id
) {
3515 case CHIP_ID_YUKON_EC_U
:
3516 if (sc
->msk_hw_rev
== CHIP_REV_YU_EC_U_A0
) {
3517 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
3518 CSR_WRITE_2(sc
, Q_ADDR(sc_if
->msk_txq
, Q_AL
),
3522 case CHIP_ID_YUKON_EX
:
3524 * Yukon Extreme seems to have silicon bug for
3525 * automatic Tx checksum calculation capability.
3527 if (sc
->msk_hw_rev
== CHIP_REV_YU_EX_B0
) {
3528 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_F
),
3534 /* Setup Rx Queue Bus Memory Interface. */
3535 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_rxq
, Q_CSR
), BMU_CLR_RESET
);
3536 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_rxq
, Q_CSR
), BMU_OPER_INIT
);
3537 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_rxq
, Q_CSR
), BMU_FIFO_OP_ON
);
3538 CSR_WRITE_2(sc
, Q_ADDR(sc_if
->msk_rxq
, Q_WM
), MSK_BMU_RX_WM
);
3539 if (sc
->msk_hw_id
== CHIP_ID_YUKON_EC_U
&&
3540 sc
->msk_hw_rev
>= CHIP_REV_YU_EC_U_A1
) {
3541 /* MAC Rx RAM Read is controlled by hardware. */
3542 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_rxq
, Q_F
), F_M_RX_RAM_DIS
);
3545 msk_set_prefetch(sc
, sc_if
->msk_txq
,
3546 sc_if
->msk_rdata
.msk_tx_ring_paddr
, MSK_TX_RING_CNT
- 1);
3547 msk_init_tx_ring(sc_if
);
3549 /* Disable Rx checksum offload and RSS hash. */
3550 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_rxq
, Q_CSR
),
3551 BMU_DIS_RX_CHKSUM
| BMU_DIS_RX_RSS_HASH
);
3553 if (sc_if
->msk_framesize
> (MCLBYTES
- ETHER_HDR_LEN
)) {
3554 msk_set_prefetch(sc
, sc_if
->msk_rxq
,
3555 sc_if
->msk_rdata
.msk_jumbo_rx_ring_paddr
,
3556 MSK_JUMBO_RX_RING_CNT
- 1);
3557 error
= msk_init_jumbo_rx_ring(sc_if
);
3561 msk_set_prefetch(sc
, sc_if
->msk_rxq
,
3562 sc_if
->msk_rdata
.msk_rx_ring_paddr
,
3563 MSK_RX_RING_CNT
- 1);
3564 error
= msk_init_rx_ring(sc_if
);
3567 device_printf(sc_if
->msk_if_dev
,
3568 "initialization failed: no memory for Rx buffers\n");
3572 if (sc
->msk_hw_id
== CHIP_ID_YUKON_EX
||
3573 sc
->msk_hw_id
== CHIP_ID_YUKON_SUPR
) {
3574 /* Disable flushing of non-ASF packets. */
3575 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_CTRL_T
),
3576 GMF_RX_MACSEC_FLUSH_OFF
);
3579 /* Configure interrupt handling. */
3580 if (sc_if
->msk_port
== MSK_PORT_A
) {
3581 sc
->msk_intrmask
|= Y2_IS_PORT_A
;
3582 sc
->msk_intrhwemask
|= Y2_HWE_L1_MASK
;
3584 sc
->msk_intrmask
|= Y2_IS_PORT_B
;
3585 sc
->msk_intrhwemask
|= Y2_HWE_L2_MASK
;
3587 CSR_WRITE_4(sc
, B0_HWE_IMSK
, sc
->msk_intrhwemask
);
3588 CSR_READ_4(sc
, B0_HWE_IMSK
);
3589 CSR_WRITE_4(sc
, B0_IMSK
, sc
->msk_intrmask
);
3590 CSR_READ_4(sc
, B0_IMSK
);
3592 sc_if
->msk_link
= 0;
3595 mskc_set_imtimer(sc
);
3597 ifp
->if_flags
|= IFF_RUNNING
;
3598 ifq_clr_oactive(&ifp
->if_snd
);
3600 callout_reset(&sc_if
->msk_tick_ch
, hz
, msk_tick
, sc_if
);
3604 msk_set_rambuffer(struct msk_if_softc
*sc_if
)
3606 struct msk_softc
*sc
;
3609 if ((sc_if
->msk_flags
& MSK_FLAG_RAMBUF
) == 0)
3612 sc
= sc_if
->msk_softc
;
3614 /* Setup Rx Queue. */
3615 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_CTRL
), RB_RST_CLR
);
3616 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_START
),
3617 sc
->msk_rxqstart
[sc_if
->msk_port
] / 8);
3618 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_END
),
3619 sc
->msk_rxqend
[sc_if
->msk_port
] / 8);
3620 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_WP
),
3621 sc
->msk_rxqstart
[sc_if
->msk_port
] / 8);
3622 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_RP
),
3623 sc
->msk_rxqstart
[sc_if
->msk_port
] / 8);
3625 utpp
= (sc
->msk_rxqend
[sc_if
->msk_port
] + 1 -
3626 sc
->msk_rxqstart
[sc_if
->msk_port
] - MSK_RB_ULPP
) / 8;
3627 ltpp
= (sc
->msk_rxqend
[sc_if
->msk_port
] + 1 -
3628 sc
->msk_rxqstart
[sc_if
->msk_port
] - MSK_RB_LLPP_B
) / 8;
3629 if (sc
->msk_rxqsize
< MSK_MIN_RXQ_SIZE
)
3630 ltpp
+= (MSK_RB_LLPP_B
- MSK_RB_LLPP_S
) / 8;
3631 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_RX_UTPP
), utpp
);
3632 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_RX_LTPP
), ltpp
);
3633 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3635 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_CTRL
), RB_ENA_OP_MD
);
3636 CSR_READ_1(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_CTRL
));
3638 /* Setup Tx Queue. */
3639 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_txq
, RB_CTRL
), RB_RST_CLR
);
3640 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_txq
, RB_START
),
3641 sc
->msk_txqstart
[sc_if
->msk_port
] / 8);
3642 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_txq
, RB_END
),
3643 sc
->msk_txqend
[sc_if
->msk_port
] / 8);
3644 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_txq
, RB_WP
),
3645 sc
->msk_txqstart
[sc_if
->msk_port
] / 8);
3646 CSR_WRITE_4(sc
, RB_ADDR(sc_if
->msk_txq
, RB_RP
),
3647 sc
->msk_txqstart
[sc_if
->msk_port
] / 8);
3648 /* Enable Store & Forward for Tx side. */
3649 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_txq
, RB_CTRL
), RB_ENA_STFWD
);
3650 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_txq
, RB_CTRL
), RB_ENA_OP_MD
);
3651 CSR_READ_1(sc
, RB_ADDR(sc_if
->msk_txq
, RB_CTRL
));
3655 msk_set_prefetch(struct msk_softc
*sc
, int qaddr
, bus_addr_t addr
,
3659 /* Reset the prefetch unit. */
3660 CSR_WRITE_4(sc
, Y2_PREF_Q_ADDR(qaddr
, PREF_UNIT_CTRL_REG
),
3662 CSR_WRITE_4(sc
, Y2_PREF_Q_ADDR(qaddr
, PREF_UNIT_CTRL_REG
),
3664 /* Set LE base address. */
3665 CSR_WRITE_4(sc
, Y2_PREF_Q_ADDR(qaddr
, PREF_UNIT_ADDR_LOW_REG
),
3667 CSR_WRITE_4(sc
, Y2_PREF_Q_ADDR(qaddr
, PREF_UNIT_ADDR_HI_REG
),
3669 /* Set the list last index. */
3670 CSR_WRITE_2(sc
, Y2_PREF_Q_ADDR(qaddr
, PREF_UNIT_LAST_IDX_REG
),
3672 /* Turn on prefetch unit. */
3673 CSR_WRITE_4(sc
, Y2_PREF_Q_ADDR(qaddr
, PREF_UNIT_CTRL_REG
),
3675 /* Dummy read to ensure write. */
3676 CSR_READ_4(sc
, Y2_PREF_Q_ADDR(qaddr
, PREF_UNIT_CTRL_REG
));
3680 msk_stop(struct msk_if_softc
*sc_if
)
3682 struct msk_softc
*sc
= sc_if
->msk_softc
;
3683 struct ifnet
*ifp
= sc_if
->msk_ifp
;
3684 struct msk_txdesc
*txd
;
3685 struct msk_rxdesc
*rxd
;
3687 struct msk_rxdesc
*jrxd
;
3692 ASSERT_SERIALIZED(ifp
->if_serializer
);
3694 callout_stop(&sc_if
->msk_tick_ch
);
3697 /* Disable interrupts. */
3698 if (sc_if
->msk_port
== MSK_PORT_A
) {
3699 sc
->msk_intrmask
&= ~Y2_IS_PORT_A
;
3700 sc
->msk_intrhwemask
&= ~Y2_HWE_L1_MASK
;
3702 sc
->msk_intrmask
&= ~Y2_IS_PORT_B
;
3703 sc
->msk_intrhwemask
&= ~Y2_HWE_L2_MASK
;
3705 CSR_WRITE_4(sc
, B0_HWE_IMSK
, sc
->msk_intrhwemask
);
3706 CSR_READ_4(sc
, B0_HWE_IMSK
);
3707 CSR_WRITE_4(sc
, B0_IMSK
, sc
->msk_intrmask
);
3708 CSR_READ_4(sc
, B0_IMSK
);
3710 /* Disable Tx/Rx MAC. */
3711 val
= GMAC_READ_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
);
3712 val
&= ~(GM_GPCR_RX_ENA
| GM_GPCR_TX_ENA
);
3713 GMAC_WRITE_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
, val
);
3714 /* Read again to ensure writing. */
3715 GMAC_READ_2(sc
, sc_if
->msk_port
, GM_GP_CTRL
);
3718 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
), BMU_STOP
);
3719 val
= CSR_READ_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
));
3720 for (i
= 0; i
< MSK_TIMEOUT
; i
++) {
3721 if ((val
& (BMU_STOP
| BMU_IDLE
)) == 0) {
3722 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
),
3724 val
= CSR_READ_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
));
3729 if (i
== MSK_TIMEOUT
)
3730 device_printf(sc_if
->msk_if_dev
, "Tx BMU stop failed\n");
3731 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_txq
, RB_CTRL
),
3732 RB_RST_SET
| RB_DIS_OP_MD
);
3734 /* Disable all GMAC interrupt. */
3735 CSR_WRITE_1(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_IRQ_MSK
), 0);
3736 /* Disable PHY interrupt. */
3737 msk_phy_writereg(sc_if
, PHY_ADDR_MARV
, PHY_MARV_INT_MASK
, 0);
3739 /* Disable the RAM Interface Arbiter. */
3740 CSR_WRITE_1(sc
, MR_ADDR(sc_if
->msk_port
, TXA_CTRL
), TXA_DIS_ARB
);
3742 /* Reset the PCI FIFO of the async Tx queue */
3743 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_txq
, Q_CSR
),
3744 BMU_RST_SET
| BMU_FIFO_RST
);
3746 /* Reset the Tx prefetch units. */
3747 CSR_WRITE_4(sc
, Y2_PREF_Q_ADDR(sc_if
->msk_txq
, PREF_UNIT_CTRL_REG
),
3750 /* Reset the RAM Buffer async Tx queue. */
3751 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_txq
, RB_CTRL
), RB_RST_SET
);
3753 /* Reset Tx MAC FIFO. */
3754 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, TX_GMF_CTRL_T
), GMF_RST_SET
);
3755 /* Set Pause Off. */
3756 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, GMAC_CTRL
), GMC_PAUSE_OFF
);
3759 * The Rx Stop command will not work for Yukon-2 if the BMU does not
3760 * reach the end of packet and since we can't make sure that we have
3761 * incoming data, we must reset the BMU while it is not during a DMA
3762 * transfer. Since it is possible that the Rx path is still active,
3763 * the Rx RAM buffer will be stopped first, so any possible incoming
3764 * data will not trigger a DMA. After the RAM buffer is stopped, the
3765 * BMU is polled until any DMA in progress is ended and only then it
3769 /* Disable the RAM Buffer receive queue. */
3770 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_CTRL
), RB_DIS_OP_MD
);
3771 for (i
= 0; i
< MSK_TIMEOUT
; i
++) {
3772 if (CSR_READ_1(sc
, RB_ADDR(sc_if
->msk_rxq
, Q_RSL
)) ==
3773 CSR_READ_1(sc
, RB_ADDR(sc_if
->msk_rxq
, Q_RL
)))
3777 if (i
== MSK_TIMEOUT
)
3778 device_printf(sc_if
->msk_if_dev
, "Rx BMU stop failed\n");
3779 CSR_WRITE_4(sc
, Q_ADDR(sc_if
->msk_rxq
, Q_CSR
),
3780 BMU_RST_SET
| BMU_FIFO_RST
);
3781 /* Reset the Rx prefetch unit. */
3782 CSR_WRITE_4(sc
, Y2_PREF_Q_ADDR(sc_if
->msk_rxq
, PREF_UNIT_CTRL_REG
),
3784 /* Reset the RAM Buffer receive queue. */
3785 CSR_WRITE_1(sc
, RB_ADDR(sc_if
->msk_rxq
, RB_CTRL
), RB_RST_SET
);
3786 /* Reset Rx MAC FIFO. */
3787 CSR_WRITE_4(sc
, MR_ADDR(sc_if
->msk_port
, RX_GMF_CTRL_T
), GMF_RST_SET
);
3789 /* Free Rx and Tx mbufs still in the queues. */
3790 for (i
= 0; i
< MSK_RX_RING_CNT
; i
++) {
3791 rxd
= &sc_if
->msk_cdata
.msk_rxdesc
[i
];
3792 if (rxd
->rx_m
!= NULL
) {
3793 bus_dmamap_unload(sc_if
->msk_cdata
.msk_rx_tag
,
3800 for (i
= 0; i
< MSK_JUMBO_RX_RING_CNT
; i
++) {
3801 jrxd
= &sc_if
->msk_cdata
.msk_jumbo_rxdesc
[i
];
3802 if (jrxd
->rx_m
!= NULL
) {
3803 bus_dmamap_sync(sc_if
->msk_cdata
.msk_jumbo_rx_tag
,
3804 jrxd
->rx_dmamap
, BUS_DMASYNC_POSTREAD
);
3805 bus_dmamap_unload(sc_if
->msk_cdata
.msk_jumbo_rx_tag
,
3807 m_freem(jrxd
->rx_m
);
3812 for (i
= 0; i
< MSK_TX_RING_CNT
; i
++) {
3813 txd
= &sc_if
->msk_cdata
.msk_txdesc
[i
];
3814 if (txd
->tx_m
!= NULL
) {
3815 bus_dmamap_unload(sc_if
->msk_cdata
.msk_tx_tag
,
3823 * Mark the interface down.
3825 ifp
->if_flags
&= ~IFF_RUNNING
;
3826 ifq_clr_oactive(&ifp
->if_snd
);
3827 sc_if
->msk_link
= 0;
3831 mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS
)
3833 return sysctl_int_range(oidp
, arg1
, arg2
, req
,
3834 MSK_PROC_MIN
, MSK_PROC_MAX
);
3838 mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS
)
3840 struct msk_softc
*sc
= arg1
;
3841 struct lwkt_serialize
*serializer
= &sc
->msk_serializer
;
3844 lwkt_serialize_enter(serializer
);
3846 v
= sc
->msk_intr_rate
;
3847 error
= sysctl_handle_int(oidp
, &v
, 0, req
);
3848 if (error
|| req
->newptr
== NULL
)
3855 if (sc
->msk_intr_rate
!= v
) {
3858 sc
->msk_intr_rate
= v
;
3859 for (i
= 0; i
< 2; ++i
) {
3860 if (sc
->msk_if
[i
] != NULL
) {
3861 flag
|= sc
->msk_if
[i
]->
3862 arpcom
.ac_if
.if_flags
& IFF_RUNNING
;
3866 mskc_set_imtimer(sc
);
3869 lwkt_serialize_exit(serializer
);
3874 msk_dmamem_create(device_t dev
, bus_size_t size
, bus_dma_tag_t
*dtag
,
3875 void **addr
, bus_addr_t
*paddr
, bus_dmamap_t
*dmap
)
3877 struct msk_if_softc
*sc_if
= device_get_softc(dev
);
3881 error
= bus_dmamem_coherent(sc_if
->msk_cdata
.msk_parent_tag
,
3883 BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
3884 size
, BUS_DMA_WAITOK
| BUS_DMA_ZERO
, &dmem
);
3886 device_printf(dev
, "can't create coherent DMA memory\n");
3890 *dtag
= dmem
.dmem_tag
;
3891 *dmap
= dmem
.dmem_map
;
3892 *addr
= dmem
.dmem_addr
;
3893 *paddr
= dmem
.dmem_busaddr
;
3899 msk_dmamem_destroy(bus_dma_tag_t dtag
, void *addr
, bus_dmamap_t dmap
)
3902 bus_dmamap_unload(dtag
, dmap
);
3903 bus_dmamem_free(dtag
, addr
, dmap
);
3904 bus_dma_tag_destroy(dtag
);
3909 mskc_set_imtimer(struct msk_softc
*sc
)
3911 if (sc
->msk_intr_rate
> 0) {
3913 * XXX myk(4) seems to use 125MHz for EC/FE/XL
3914 * and 78.125MHz for rest of chip types
3916 CSR_WRITE_4(sc
, B2_IRQM_INI
,
3917 MSK_USECS(sc
, 1000000 / sc
->msk_intr_rate
));
3918 CSR_WRITE_4(sc
, B2_IRQM_MSK
, sc
->msk_intrmask
);
3919 CSR_WRITE_4(sc
, B2_IRQM_CTRL
, TIM_START
);
3921 CSR_WRITE_4(sc
, B2_IRQM_CTRL
, TIM_STOP
);