cxgbe/t4_tom: Read the chip's DDP page sizes and save them in a
[freebsd-src.git] / sys / arm / lpc / if_lpe.c
blob45ca363528c52f146bc23b4e20951c693a192647
1 /*-
2 * Copyright (c) 2011 Jakub Wojciech Klama <jceel@FreeBSD.org>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/endian.h>
32 #include <sys/systm.h>
33 #include <sys/sockio.h>
34 #include <sys/mbuf.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/rman.h>
41 #include <sys/bus.h>
42 #include <sys/socket.h>
43 #include <machine/bus.h>
44 #include <machine/intr.h>
46 #include <net/if.h>
47 #include <net/if_arp.h>
48 #include <net/ethernet.h>
49 #include <net/if_dl.h>
50 #include <net/if_media.h>
51 #include <net/if_types.h>
52 #include <net/if_var.h>
54 #include <net/bpf.h>
56 #include <dev/ofw/ofw_bus.h>
57 #include <dev/ofw/ofw_bus_subr.h>
59 #include <dev/mii/mii.h>
60 #include <dev/mii/miivar.h>
62 #include <arm/lpc/lpcreg.h>
63 #include <arm/lpc/lpcvar.h>
64 #include <arm/lpc/if_lpereg.h>
66 #include "miibus_if.h"
68 #ifdef DEBUG
69 #define debugf(fmt, args...) do { printf("%s(): ", __func__); \
70 printf(fmt,##args); } while (0)
71 #else
72 #define debugf(fmt, args...)
73 #endif
75 struct lpe_dmamap_arg {
76 bus_addr_t lpe_dma_busaddr;
79 struct lpe_rxdesc {
80 struct mbuf * lpe_rxdesc_mbuf;
81 bus_dmamap_t lpe_rxdesc_dmamap;
84 struct lpe_txdesc {
85 int lpe_txdesc_first;
86 struct mbuf * lpe_txdesc_mbuf;
87 bus_dmamap_t lpe_txdesc_dmamap;
90 struct lpe_chain_data {
91 bus_dma_tag_t lpe_parent_tag;
92 bus_dma_tag_t lpe_tx_ring_tag;
93 bus_dmamap_t lpe_tx_ring_map;
94 bus_dma_tag_t lpe_tx_status_tag;
95 bus_dmamap_t lpe_tx_status_map;
96 bus_dma_tag_t lpe_tx_buf_tag;
97 bus_dma_tag_t lpe_rx_ring_tag;
98 bus_dmamap_t lpe_rx_ring_map;
99 bus_dma_tag_t lpe_rx_status_tag;
100 bus_dmamap_t lpe_rx_status_map;
101 bus_dma_tag_t lpe_rx_buf_tag;
102 struct lpe_rxdesc lpe_rx_desc[LPE_RXDESC_NUM];
103 struct lpe_txdesc lpe_tx_desc[LPE_TXDESC_NUM];
104 int lpe_tx_prod;
105 int lpe_tx_last;
106 int lpe_tx_used;
109 struct lpe_ring_data {
110 struct lpe_hwdesc * lpe_rx_ring;
111 struct lpe_hwstatus * lpe_rx_status;
112 bus_addr_t lpe_rx_ring_phys;
113 bus_addr_t lpe_rx_status_phys;
114 struct lpe_hwdesc * lpe_tx_ring;
115 struct lpe_hwstatus * lpe_tx_status;
116 bus_addr_t lpe_tx_ring_phys;
117 bus_addr_t lpe_tx_status_phys;
120 struct lpe_softc {
121 struct ifnet * lpe_ifp;
122 struct mtx lpe_mtx;
123 phandle_t lpe_ofw;
124 device_t lpe_dev;
125 device_t lpe_miibus;
126 uint8_t lpe_enaddr[6];
127 struct resource * lpe_mem_res;
128 struct resource * lpe_irq_res;
129 void * lpe_intrhand;
130 bus_space_tag_t lpe_bst;
131 bus_space_handle_t lpe_bsh;
132 #define LPE_FLAG_LINK (1 << 0)
133 uint32_t lpe_flags;
134 int lpe_watchdog_timer;
135 struct callout lpe_tick;
136 struct lpe_chain_data lpe_cdata;
137 struct lpe_ring_data lpe_rdata;
140 static int lpe_probe(device_t);
141 static int lpe_attach(device_t);
142 static int lpe_detach(device_t);
143 static int lpe_miibus_readreg(device_t, int, int);
144 static int lpe_miibus_writereg(device_t, int, int, int);
145 static void lpe_miibus_statchg(device_t);
147 static void lpe_reset(struct lpe_softc *);
148 static void lpe_init(void *);
149 static void lpe_init_locked(struct lpe_softc *);
150 static void lpe_start(struct ifnet *);
151 static void lpe_start_locked(struct ifnet *);
152 static void lpe_stop(struct lpe_softc *);
153 static void lpe_stop_locked(struct lpe_softc *);
154 static int lpe_ioctl(struct ifnet *, u_long, caddr_t);
155 static void lpe_set_rxmode(struct lpe_softc *);
156 static void lpe_set_rxfilter(struct lpe_softc *);
157 static void lpe_intr(void *);
158 static void lpe_rxintr(struct lpe_softc *);
159 static void lpe_txintr(struct lpe_softc *);
160 static void lpe_tick(void *);
161 static void lpe_watchdog(struct lpe_softc *);
162 static int lpe_encap(struct lpe_softc *, struct mbuf **);
163 static int lpe_dma_alloc(struct lpe_softc *);
164 static int lpe_dma_alloc_rx(struct lpe_softc *);
165 static int lpe_dma_alloc_tx(struct lpe_softc *);
166 static int lpe_init_rx(struct lpe_softc *);
167 static int lpe_init_rxbuf(struct lpe_softc *, int);
168 static void lpe_discard_rxbuf(struct lpe_softc *, int);
169 static void lpe_dmamap_cb(void *, bus_dma_segment_t *, int, int);
170 static int lpe_ifmedia_upd(struct ifnet *);
171 static void lpe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
173 #define lpe_lock(_sc) mtx_lock(&(_sc)->lpe_mtx)
174 #define lpe_unlock(_sc) mtx_unlock(&(_sc)->lpe_mtx)
175 #define lpe_lock_assert(_sc) mtx_assert(&(_sc)->lpe_mtx, MA_OWNED)
177 #define lpe_read_4(_sc, _reg) \
178 bus_space_read_4((_sc)->lpe_bst, (_sc)->lpe_bsh, (_reg))
179 #define lpe_write_4(_sc, _reg, _val) \
180 bus_space_write_4((_sc)->lpe_bst, (_sc)->lpe_bsh, (_reg), (_val))
182 #define LPE_HWDESC_RXERRS (LPE_HWDESC_CRCERROR | LPE_HWDESC_SYMBOLERROR | \
183 LPE_HWDESC_LENGTHERROR | LPE_HWDESC_ALIGNERROR | LPE_HWDESC_OVERRUN | \
184 LPE_HWDESC_RXNODESCR)
186 #define LPE_HWDESC_TXERRS (LPE_HWDESC_EXCDEFER | LPE_HWDESC_EXCCOLL | \
187 LPE_HWDESC_LATECOLL | LPE_HWDESC_UNDERRUN | LPE_HWDESC_TXNODESCR)
189 static int
190 lpe_probe(device_t dev)
193 if (!ofw_bus_status_okay(dev))
194 return (ENXIO);
196 if (!ofw_bus_is_compatible(dev, "lpc,ethernet"))
197 return (ENXIO);
199 device_set_desc(dev, "LPC32x0 10/100 Ethernet");
200 return (BUS_PROBE_DEFAULT);
203 static int
204 lpe_attach(device_t dev)
206 struct lpe_softc *sc = device_get_softc(dev);
207 struct ifnet *ifp;
208 int rid, i;
209 uint32_t val;
211 sc->lpe_dev = dev;
212 sc->lpe_ofw = ofw_bus_get_node(dev);
214 i = OF_getprop(sc->lpe_ofw, "local-mac-address", (void *)&sc->lpe_enaddr, 6);
215 if (i != 6) {
216 sc->lpe_enaddr[0] = 0x00;
217 sc->lpe_enaddr[1] = 0x11;
218 sc->lpe_enaddr[2] = 0x22;
219 sc->lpe_enaddr[3] = 0x33;
220 sc->lpe_enaddr[4] = 0x44;
221 sc->lpe_enaddr[5] = 0x55;
224 mtx_init(&sc->lpe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
225 MTX_DEF);
227 callout_init_mtx(&sc->lpe_tick, &sc->lpe_mtx, 0);
229 rid = 0;
230 sc->lpe_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
231 RF_ACTIVE);
232 if (!sc->lpe_mem_res) {
233 device_printf(dev, "cannot allocate memory window\n");
234 goto fail;
237 sc->lpe_bst = rman_get_bustag(sc->lpe_mem_res);
238 sc->lpe_bsh = rman_get_bushandle(sc->lpe_mem_res);
240 rid = 0;
241 sc->lpe_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
242 RF_ACTIVE);
243 if (!sc->lpe_irq_res) {
244 device_printf(dev, "cannot allocate interrupt\n");
245 goto fail;
248 sc->lpe_ifp = if_alloc(IFT_ETHER);
249 if (!sc->lpe_ifp) {
250 device_printf(dev, "cannot allocated ifnet\n");
251 goto fail;
254 ifp = sc->lpe_ifp;
256 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
257 ifp->if_softc = sc;
258 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
259 ifp->if_start = lpe_start;
260 ifp->if_ioctl = lpe_ioctl;
261 ifp->if_init = lpe_init;
262 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
263 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
264 IFQ_SET_READY(&ifp->if_snd);
266 ether_ifattach(ifp, sc->lpe_enaddr);
268 if (bus_setup_intr(dev, sc->lpe_irq_res, INTR_TYPE_NET, NULL,
269 lpe_intr, sc, &sc->lpe_intrhand)) {
270 device_printf(dev, "cannot establish interrupt handler\n");
271 ether_ifdetach(ifp);
272 goto fail;
275 /* Enable Ethernet clock */
276 lpc_pwr_write(dev, LPC_CLKPWR_MACCLK_CTRL,
277 LPC_CLKPWR_MACCLK_CTRL_REG |
278 LPC_CLKPWR_MACCLK_CTRL_SLAVE |
279 LPC_CLKPWR_MACCLK_CTRL_MASTER |
280 LPC_CLKPWR_MACCLK_CTRL_HDWINF(3));
282 /* Reset chip */
283 lpe_reset(sc);
285 /* Initialize MII */
286 val = lpe_read_4(sc, LPE_COMMAND);
287 lpe_write_4(sc, LPE_COMMAND, val | LPE_COMMAND_RMII);
289 if (mii_attach(dev, &sc->lpe_miibus, ifp, lpe_ifmedia_upd,
290 lpe_ifmedia_sts, BMSR_DEFCAPMASK, 0x01,
291 MII_OFFSET_ANY, 0)) {
292 device_printf(dev, "cannot find PHY\n");
293 goto fail;
296 lpe_dma_alloc(sc);
298 return (0);
300 fail:
301 if (sc->lpe_ifp)
302 if_free(sc->lpe_ifp);
303 if (sc->lpe_intrhand)
304 bus_teardown_intr(dev, sc->lpe_irq_res, sc->lpe_intrhand);
305 if (sc->lpe_irq_res)
306 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lpe_irq_res);
307 if (sc->lpe_mem_res)
308 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lpe_mem_res);
309 return (ENXIO);
312 static int
313 lpe_detach(device_t dev)
315 struct lpe_softc *sc = device_get_softc(dev);
317 lpe_stop(sc);
319 if_free(sc->lpe_ifp);
320 bus_teardown_intr(dev, sc->lpe_irq_res, sc->lpe_intrhand);
321 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lpe_irq_res);
322 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lpe_mem_res);
324 return (0);
327 static int
328 lpe_miibus_readreg(device_t dev, int phy, int reg)
330 struct lpe_softc *sc = device_get_softc(dev);
331 uint32_t val;
332 int result;
334 lpe_write_4(sc, LPE_MCMD, LPE_MCMD_READ);
335 lpe_write_4(sc, LPE_MADR,
336 (reg & LPE_MADR_REGMASK) << LPE_MADR_REGSHIFT |
337 (phy & LPE_MADR_PHYMASK) << LPE_MADR_PHYSHIFT);
339 val = lpe_read_4(sc, LPE_MIND);
341 /* Wait until request is completed */
342 while (val & LPE_MIND_BUSY) {
343 val = lpe_read_4(sc, LPE_MIND);
344 DELAY(10);
347 if (val & LPE_MIND_INVALID)
348 return (0);
350 lpe_write_4(sc, LPE_MCMD, 0);
351 result = (lpe_read_4(sc, LPE_MRDD) & LPE_MRDD_DATAMASK);
352 debugf("phy=%d reg=%d result=0x%04x\n", phy, reg, result);
354 return (result);
357 static int
358 lpe_miibus_writereg(device_t dev, int phy, int reg, int data)
360 struct lpe_softc *sc = device_get_softc(dev);
361 uint32_t val;
363 debugf("phy=%d reg=%d data=0x%04x\n", phy, reg, data);
365 lpe_write_4(sc, LPE_MCMD, LPE_MCMD_WRITE);
366 lpe_write_4(sc, LPE_MADR,
367 (reg & LPE_MADR_REGMASK) << LPE_MADR_REGSHIFT |
368 (phy & LPE_MADR_PHYMASK) << LPE_MADR_PHYSHIFT);
370 lpe_write_4(sc, LPE_MWTD, (data & LPE_MWTD_DATAMASK));
372 val = lpe_read_4(sc, LPE_MIND);
374 /* Wait until request is completed */
375 while (val & LPE_MIND_BUSY) {
376 val = lpe_read_4(sc, LPE_MIND);
377 DELAY(10);
380 return (0);
383 static void
384 lpe_miibus_statchg(device_t dev)
386 struct lpe_softc *sc = device_get_softc(dev);
387 struct mii_data *mii = device_get_softc(sc->lpe_miibus);
389 lpe_lock(sc);
391 if ((mii->mii_media_status & IFM_ACTIVE) &&
392 (mii->mii_media_status & IFM_AVALID))
393 sc->lpe_flags |= LPE_FLAG_LINK;
394 else
395 sc->lpe_flags &= ~LPE_FLAG_LINK;
397 lpe_unlock(sc);
400 static void
401 lpe_reset(struct lpe_softc *sc)
403 uint32_t mac1;
405 /* Enter soft reset mode */
406 mac1 = lpe_read_4(sc, LPE_MAC1);
407 lpe_write_4(sc, LPE_MAC1, mac1 | LPE_MAC1_SOFTRESET | LPE_MAC1_RESETTX |
408 LPE_MAC1_RESETMCSTX | LPE_MAC1_RESETRX | LPE_MAC1_RESETMCSRX);
410 /* Reset registers, Tx path and Rx path */
411 lpe_write_4(sc, LPE_COMMAND, LPE_COMMAND_REGRESET |
412 LPE_COMMAND_TXRESET | LPE_COMMAND_RXRESET);
414 /* Set station address */
415 lpe_write_4(sc, LPE_SA2, sc->lpe_enaddr[1] << 8 | sc->lpe_enaddr[0]);
416 lpe_write_4(sc, LPE_SA1, sc->lpe_enaddr[3] << 8 | sc->lpe_enaddr[2]);
417 lpe_write_4(sc, LPE_SA0, sc->lpe_enaddr[5] << 8 | sc->lpe_enaddr[4]);
419 /* Leave soft reset mode */
420 mac1 = lpe_read_4(sc, LPE_MAC1);
421 lpe_write_4(sc, LPE_MAC1, mac1 & ~(LPE_MAC1_SOFTRESET | LPE_MAC1_RESETTX |
422 LPE_MAC1_RESETMCSTX | LPE_MAC1_RESETRX | LPE_MAC1_RESETMCSRX));
425 static void
426 lpe_init(void *arg)
428 struct lpe_softc *sc = (struct lpe_softc *)arg;
430 lpe_lock(sc);
431 lpe_init_locked(sc);
432 lpe_unlock(sc);
435 static void
436 lpe_init_locked(struct lpe_softc *sc)
438 struct ifnet *ifp = sc->lpe_ifp;
439 uint32_t cmd, mac1;
441 lpe_lock_assert(sc);
443 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
444 return;
446 /* Enable Tx and Rx */
447 cmd = lpe_read_4(sc, LPE_COMMAND);
448 lpe_write_4(sc, LPE_COMMAND, cmd | LPE_COMMAND_RXENABLE |
449 LPE_COMMAND_TXENABLE | LPE_COMMAND_PASSRUNTFRAME);
451 /* Enable receive */
452 mac1 = lpe_read_4(sc, LPE_MAC1);
453 lpe_write_4(sc, LPE_MAC1, /*mac1 |*/ LPE_MAC1_RXENABLE | LPE_MAC1_PASSALL);
455 lpe_write_4(sc, LPE_MAC2, LPE_MAC2_CRCENABLE | LPE_MAC2_PADCRCENABLE |
456 LPE_MAC2_FULLDUPLEX);
458 lpe_write_4(sc, LPE_MCFG, LPE_MCFG_CLKSEL(7));
460 /* Set up Rx filter */
461 lpe_set_rxmode(sc);
463 /* Enable interrupts */
464 lpe_write_4(sc, LPE_INTENABLE, LPE_INT_RXOVERRUN | LPE_INT_RXERROR |
465 LPE_INT_RXFINISH | LPE_INT_RXDONE | LPE_INT_TXUNDERRUN |
466 LPE_INT_TXERROR | LPE_INT_TXFINISH | LPE_INT_TXDONE);
468 sc->lpe_cdata.lpe_tx_prod = 0;
469 sc->lpe_cdata.lpe_tx_last = 0;
470 sc->lpe_cdata.lpe_tx_used = 0;
472 lpe_init_rx(sc);
474 /* Initialize Rx packet and status descriptor heads */
475 lpe_write_4(sc, LPE_RXDESC, sc->lpe_rdata.lpe_rx_ring_phys);
476 lpe_write_4(sc, LPE_RXSTATUS, sc->lpe_rdata.lpe_rx_status_phys);
477 lpe_write_4(sc, LPE_RXDESC_NUMBER, LPE_RXDESC_NUM - 1);
478 lpe_write_4(sc, LPE_RXDESC_CONS, 0);
480 /* Initialize Tx packet and status descriptor heads */
481 lpe_write_4(sc, LPE_TXDESC, sc->lpe_rdata.lpe_tx_ring_phys);
482 lpe_write_4(sc, LPE_TXSTATUS, sc->lpe_rdata.lpe_tx_status_phys);
483 lpe_write_4(sc, LPE_TXDESC_NUMBER, LPE_TXDESC_NUM - 1);
484 lpe_write_4(sc, LPE_TXDESC_PROD, 0);
486 ifp->if_drv_flags |= IFF_DRV_RUNNING;
487 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
489 callout_reset(&sc->lpe_tick, hz, lpe_tick, sc);
492 static void
493 lpe_start(struct ifnet *ifp)
495 struct lpe_softc *sc = (struct lpe_softc *)ifp->if_softc;
497 lpe_lock(sc);
498 lpe_start_locked(ifp);
499 lpe_unlock(sc);
502 static void
503 lpe_start_locked(struct ifnet *ifp)
505 struct lpe_softc *sc = (struct lpe_softc *)ifp->if_softc;
506 struct mbuf *m_head;
507 int encap = 0;
509 lpe_lock_assert(sc);
511 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
512 if (lpe_read_4(sc, LPE_TXDESC_PROD) ==
513 lpe_read_4(sc, LPE_TXDESC_CONS) - 5)
514 break;
516 /* Dequeue first packet */
517 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
518 if (!m_head)
519 break;
521 lpe_encap(sc, &m_head);
523 encap++;
526 /* Submit new descriptor list */
527 if (encap) {
528 lpe_write_4(sc, LPE_TXDESC_PROD, sc->lpe_cdata.lpe_tx_prod);
529 sc->lpe_watchdog_timer = 5;
534 static int
535 lpe_encap(struct lpe_softc *sc, struct mbuf **m_head)
537 struct lpe_txdesc *txd;
538 struct lpe_hwdesc *hwd;
539 bus_dma_segment_t segs[LPE_MAXFRAGS];
540 int i, err, nsegs, prod;
542 lpe_lock_assert(sc);
543 M_ASSERTPKTHDR((*m_head));
545 prod = sc->lpe_cdata.lpe_tx_prod;
546 txd = &sc->lpe_cdata.lpe_tx_desc[prod];
548 debugf("starting with prod=%d\n", prod);
550 err = bus_dmamap_load_mbuf_sg(sc->lpe_cdata.lpe_tx_buf_tag,
551 txd->lpe_txdesc_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
553 if (err)
554 return (err);
556 if (nsegs == 0) {
557 m_freem(*m_head);
558 *m_head = NULL;
559 return (EIO);
562 bus_dmamap_sync(sc->lpe_cdata.lpe_tx_buf_tag, txd->lpe_txdesc_dmamap,
563 BUS_DMASYNC_PREREAD);
564 bus_dmamap_sync(sc->lpe_cdata.lpe_tx_ring_tag, sc->lpe_cdata.lpe_tx_ring_map,
565 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
567 txd->lpe_txdesc_first = 1;
568 txd->lpe_txdesc_mbuf = *m_head;
570 for (i = 0; i < nsegs; i++) {
571 hwd = &sc->lpe_rdata.lpe_tx_ring[prod];
572 hwd->lhr_data = segs[i].ds_addr;
573 hwd->lhr_control = segs[i].ds_len - 1;
575 if (i == nsegs - 1) {
576 hwd->lhr_control |= LPE_HWDESC_LASTFLAG;
577 hwd->lhr_control |= LPE_HWDESC_INTERRUPT;
578 hwd->lhr_control |= LPE_HWDESC_CRC;
579 hwd->lhr_control |= LPE_HWDESC_PAD;
582 LPE_INC(prod, LPE_TXDESC_NUM);
585 bus_dmamap_sync(sc->lpe_cdata.lpe_tx_ring_tag, sc->lpe_cdata.lpe_tx_ring_map,
586 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
588 sc->lpe_cdata.lpe_tx_used += nsegs;
589 sc->lpe_cdata.lpe_tx_prod = prod;
591 return (0);
594 static void
595 lpe_stop(struct lpe_softc *sc)
597 lpe_lock(sc);
598 lpe_stop_locked(sc);
599 lpe_unlock(sc);
602 static void
603 lpe_stop_locked(struct lpe_softc *sc)
605 lpe_lock_assert(sc);
607 callout_stop(&sc->lpe_tick);
609 /* Disable interrupts */
610 lpe_write_4(sc, LPE_INTCLEAR, 0xffffffff);
612 /* Stop EMAC */
613 lpe_write_4(sc, LPE_MAC1, 0);
614 lpe_write_4(sc, LPE_MAC2, 0);
615 lpe_write_4(sc, LPE_COMMAND, 0);
617 sc->lpe_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
618 sc->lpe_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
621 static int
622 lpe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
624 struct lpe_softc *sc = ifp->if_softc;
625 struct mii_data *mii = device_get_softc(sc->lpe_miibus);
626 struct ifreq *ifr = (struct ifreq *)data;
627 int err = 0;
629 switch (cmd) {
630 case SIOCSIFFLAGS:
631 lpe_lock(sc);
632 if (ifp->if_flags & IFF_UP) {
633 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
634 lpe_set_rxmode(sc);
635 lpe_set_rxfilter(sc);
636 } else
637 lpe_init_locked(sc);
638 } else
639 lpe_stop(sc);
640 lpe_unlock(sc);
641 break;
642 case SIOCADDMULTI:
643 case SIOCDELMULTI:
644 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
645 lpe_lock(sc);
646 lpe_set_rxfilter(sc);
647 lpe_unlock(sc);
649 break;
650 case SIOCGIFMEDIA:
651 case SIOCSIFMEDIA:
652 err = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
653 break;
654 default:
655 err = ether_ioctl(ifp, cmd, data);
656 break;
659 return (err);
662 static void lpe_set_rxmode(struct lpe_softc *sc)
664 struct ifnet *ifp = sc->lpe_ifp;
665 uint32_t rxfilt;
667 rxfilt = LPE_RXFILTER_UNIHASH | LPE_RXFILTER_MULTIHASH | LPE_RXFILTER_PERFECT;
669 if (ifp->if_flags & IFF_BROADCAST)
670 rxfilt |= LPE_RXFILTER_BROADCAST;
672 if (ifp->if_flags & IFF_PROMISC)
673 rxfilt |= LPE_RXFILTER_UNICAST | LPE_RXFILTER_MULTICAST;
675 if (ifp->if_flags & IFF_ALLMULTI)
676 rxfilt |= LPE_RXFILTER_MULTICAST;
678 lpe_write_4(sc, LPE_RXFILTER_CTRL, rxfilt);
681 static void lpe_set_rxfilter(struct lpe_softc *sc)
683 struct ifnet *ifp = sc->lpe_ifp;
684 struct ifmultiaddr *ifma;
685 int index;
686 uint32_t hashl, hashh;
688 hashl = 0;
689 hashh = 0;
691 if_maddr_rlock(ifp);
692 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
693 if (ifma->ifma_addr->sa_family != AF_LINK)
694 continue;
696 index = ether_crc32_be(LLADDR((struct sockaddr_dl *)
697 ifma->ifma_addr), ETHER_ADDR_LEN) >> 23 & 0x3f;
699 if (index > 31)
700 hashh |= (1 << (index - 32));
701 else
702 hashl |= (1 << index);
704 if_maddr_runlock(ifp);
706 /* Program new hash filter */
707 lpe_write_4(sc, LPE_HASHFILTER_L, hashl);
708 lpe_write_4(sc, LPE_HASHFILTER_H, hashh);
711 static void
712 lpe_intr(void *arg)
714 struct lpe_softc *sc = (struct lpe_softc *)arg;
715 uint32_t intstatus;
717 debugf("status=0x%08x\n", lpe_read_4(sc, LPE_INTSTATUS));
719 lpe_lock(sc);
721 while ((intstatus = lpe_read_4(sc, LPE_INTSTATUS))) {
722 if (intstatus & LPE_INT_RXDONE)
723 lpe_rxintr(sc);
725 if (intstatus & LPE_INT_TXDONE)
726 lpe_txintr(sc);
728 lpe_write_4(sc, LPE_INTCLEAR, 0xffff);
731 lpe_unlock(sc);
734 static void
735 lpe_rxintr(struct lpe_softc *sc)
737 struct ifnet *ifp = sc->lpe_ifp;
738 struct lpe_hwdesc *hwd;
739 struct lpe_hwstatus *hws;
740 struct lpe_rxdesc *rxd;
741 struct mbuf *m;
742 int prod, cons;
744 for (;;) {
745 prod = lpe_read_4(sc, LPE_RXDESC_PROD);
746 cons = lpe_read_4(sc, LPE_RXDESC_CONS);
748 if (prod == cons)
749 break;
751 rxd = &sc->lpe_cdata.lpe_rx_desc[cons];
752 hwd = &sc->lpe_rdata.lpe_rx_ring[cons];
753 hws = &sc->lpe_rdata.lpe_rx_status[cons];
755 /* Check received frame for errors */
756 if (hws->lhs_info & LPE_HWDESC_RXERRS) {
757 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
758 lpe_discard_rxbuf(sc, cons);
759 lpe_init_rxbuf(sc, cons);
760 goto skip;
763 m = rxd->lpe_rxdesc_mbuf;
764 m->m_pkthdr.rcvif = ifp;
765 m->m_data += 2;
767 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
769 lpe_unlock(sc);
770 (*ifp->if_input)(ifp, m);
771 lpe_lock(sc);
773 lpe_init_rxbuf(sc, cons);
774 skip:
775 LPE_INC(cons, LPE_RXDESC_NUM);
776 lpe_write_4(sc, LPE_RXDESC_CONS, cons);
780 static void
781 lpe_txintr(struct lpe_softc *sc)
783 struct ifnet *ifp = sc->lpe_ifp;
784 struct lpe_hwdesc *hwd;
785 struct lpe_hwstatus *hws;
786 struct lpe_txdesc *txd;
787 int cons, last;
789 for (;;) {
790 cons = lpe_read_4(sc, LPE_TXDESC_CONS);
791 last = sc->lpe_cdata.lpe_tx_last;
793 if (cons == last)
794 break;
796 txd = &sc->lpe_cdata.lpe_tx_desc[last];
797 hwd = &sc->lpe_rdata.lpe_tx_ring[last];
798 hws = &sc->lpe_rdata.lpe_tx_status[last];
800 bus_dmamap_sync(sc->lpe_cdata.lpe_tx_buf_tag,
801 txd->lpe_txdesc_dmamap, BUS_DMASYNC_POSTWRITE);
803 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, LPE_HWDESC_COLLISIONS(hws->lhs_info));
805 if (hws->lhs_info & LPE_HWDESC_TXERRS)
806 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
807 else
808 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
810 if (txd->lpe_txdesc_first) {
811 bus_dmamap_unload(sc->lpe_cdata.lpe_tx_buf_tag,
812 txd->lpe_txdesc_dmamap);
814 m_freem(txd->lpe_txdesc_mbuf);
815 txd->lpe_txdesc_mbuf = NULL;
816 txd->lpe_txdesc_first = 0;
819 sc->lpe_cdata.lpe_tx_used--;
820 LPE_INC(sc->lpe_cdata.lpe_tx_last, LPE_TXDESC_NUM);
823 if (!sc->lpe_cdata.lpe_tx_used)
824 sc->lpe_watchdog_timer = 0;
827 static void
828 lpe_tick(void *arg)
830 struct lpe_softc *sc = (struct lpe_softc *)arg;
831 struct mii_data *mii = device_get_softc(sc->lpe_miibus);
833 lpe_lock_assert(sc);
835 mii_tick(mii);
836 lpe_watchdog(sc);
838 callout_reset(&sc->lpe_tick, hz, lpe_tick, sc);
841 static void
842 lpe_watchdog(struct lpe_softc *sc)
844 struct ifnet *ifp = sc->lpe_ifp;
846 lpe_lock_assert(sc);
848 if (sc->lpe_watchdog_timer == 0 || sc->lpe_watchdog_timer--)
849 return;
851 /* Chip has stopped responding */
852 device_printf(sc->lpe_dev, "WARNING: chip hangup, restarting...\n");
853 lpe_stop_locked(sc);
854 lpe_init_locked(sc);
856 /* Try to resend packets */
857 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
858 lpe_start_locked(ifp);
861 static int
862 lpe_dma_alloc(struct lpe_softc *sc)
864 int err;
866 /* Create parent DMA tag */
867 err = bus_dma_tag_create(
868 bus_get_dma_tag(sc->lpe_dev),
869 1, 0, /* alignment, boundary */
870 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
871 BUS_SPACE_MAXADDR, /* highaddr */
872 NULL, NULL, /* filter, filterarg */
873 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
874 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsegsize, flags */
875 NULL, NULL, /* lockfunc, lockarg */
876 &sc->lpe_cdata.lpe_parent_tag);
878 if (err) {
879 device_printf(sc->lpe_dev, "cannot create parent DMA tag\n");
880 return (err);
883 err = lpe_dma_alloc_rx(sc);
884 if (err)
885 return (err);
887 err = lpe_dma_alloc_tx(sc);
888 if (err)
889 return (err);
891 return (0);
894 static int
895 lpe_dma_alloc_rx(struct lpe_softc *sc)
897 struct lpe_rxdesc *rxd;
898 struct lpe_dmamap_arg ctx;
899 int err, i;
901 /* Create tag for Rx ring */
902 err = bus_dma_tag_create(
903 sc->lpe_cdata.lpe_parent_tag,
904 LPE_DESC_ALIGN, 0, /* alignment, boundary */
905 BUS_SPACE_MAXADDR, /* lowaddr */
906 BUS_SPACE_MAXADDR, /* highaddr */
907 NULL, NULL, /* filter, filterarg */
908 LPE_RXDESC_SIZE, 1, /* maxsize, nsegments */
909 LPE_RXDESC_SIZE, 0, /* maxsegsize, flags */
910 NULL, NULL, /* lockfunc, lockarg */
911 &sc->lpe_cdata.lpe_rx_ring_tag);
913 if (err) {
914 device_printf(sc->lpe_dev, "cannot create Rx ring DMA tag\n");
915 goto fail;
918 /* Create tag for Rx status ring */
919 err = bus_dma_tag_create(
920 sc->lpe_cdata.lpe_parent_tag,
921 LPE_DESC_ALIGN, 0, /* alignment, boundary */
922 BUS_SPACE_MAXADDR, /* lowaddr */
923 BUS_SPACE_MAXADDR, /* highaddr */
924 NULL, NULL, /* filter, filterarg */
925 LPE_RXSTATUS_SIZE, 1, /* maxsize, nsegments */
926 LPE_RXSTATUS_SIZE, 0, /* maxsegsize, flags */
927 NULL, NULL, /* lockfunc, lockarg */
928 &sc->lpe_cdata.lpe_rx_status_tag);
930 if (err) {
931 device_printf(sc->lpe_dev, "cannot create Rx status ring DMA tag\n");
932 goto fail;
935 /* Create tag for Rx buffers */
936 err = bus_dma_tag_create(
937 sc->lpe_cdata.lpe_parent_tag,
938 LPE_DESC_ALIGN, 0, /* alignment, boundary */
939 BUS_SPACE_MAXADDR, /* lowaddr */
940 BUS_SPACE_MAXADDR, /* highaddr */
941 NULL, NULL, /* filter, filterarg */
942 MCLBYTES * LPE_RXDESC_NUM, /* maxsize */
943 LPE_RXDESC_NUM, /* segments */
944 MCLBYTES, 0, /* maxsegsize, flags */
945 NULL, NULL, /* lockfunc, lockarg */
946 &sc->lpe_cdata.lpe_rx_buf_tag);
948 if (err) {
949 device_printf(sc->lpe_dev, "cannot create Rx buffers DMA tag\n");
950 goto fail;
953 /* Allocate Rx DMA ring */
954 err = bus_dmamem_alloc(sc->lpe_cdata.lpe_rx_ring_tag,
955 (void **)&sc->lpe_rdata.lpe_rx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
956 BUS_DMA_ZERO, &sc->lpe_cdata.lpe_rx_ring_map);
958 err = bus_dmamap_load(sc->lpe_cdata.lpe_rx_ring_tag,
959 sc->lpe_cdata.lpe_rx_ring_map, sc->lpe_rdata.lpe_rx_ring,
960 LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
962 sc->lpe_rdata.lpe_rx_ring_phys = ctx.lpe_dma_busaddr;
964 /* Allocate Rx status ring */
965 err = bus_dmamem_alloc(sc->lpe_cdata.lpe_rx_status_tag,
966 (void **)&sc->lpe_rdata.lpe_rx_status, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
967 BUS_DMA_ZERO, &sc->lpe_cdata.lpe_rx_status_map);
969 err = bus_dmamap_load(sc->lpe_cdata.lpe_rx_status_tag,
970 sc->lpe_cdata.lpe_rx_status_map, sc->lpe_rdata.lpe_rx_status,
971 LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
973 sc->lpe_rdata.lpe_rx_status_phys = ctx.lpe_dma_busaddr;
976 /* Create Rx buffers DMA map */
977 for (i = 0; i < LPE_RXDESC_NUM; i++) {
978 rxd = &sc->lpe_cdata.lpe_rx_desc[i];
979 rxd->lpe_rxdesc_mbuf = NULL;
980 rxd->lpe_rxdesc_dmamap = NULL;
982 err = bus_dmamap_create(sc->lpe_cdata.lpe_rx_buf_tag, 0,
983 &rxd->lpe_rxdesc_dmamap);
985 if (err) {
986 device_printf(sc->lpe_dev, "cannot create Rx DMA map\n");
987 return (err);
991 return (0);
992 fail:
993 return (err);
996 static int
997 lpe_dma_alloc_tx(struct lpe_softc *sc)
999 struct lpe_txdesc *txd;
1000 struct lpe_dmamap_arg ctx;
1001 int err, i;
1003 /* Create tag for Tx ring */
1004 err = bus_dma_tag_create(
1005 sc->lpe_cdata.lpe_parent_tag,
1006 LPE_DESC_ALIGN, 0, /* alignment, boundary */
1007 BUS_SPACE_MAXADDR, /* lowaddr */
1008 BUS_SPACE_MAXADDR, /* highaddr */
1009 NULL, NULL, /* filter, filterarg */
1010 LPE_TXDESC_SIZE, 1, /* maxsize, nsegments */
1011 LPE_TXDESC_SIZE, 0, /* maxsegsize, flags */
1012 NULL, NULL, /* lockfunc, lockarg */
1013 &sc->lpe_cdata.lpe_tx_ring_tag);
1015 if (err) {
1016 device_printf(sc->lpe_dev, "cannot create Tx ring DMA tag\n");
1017 goto fail;
1020 /* Create tag for Tx status ring */
1021 err = bus_dma_tag_create(
1022 sc->lpe_cdata.lpe_parent_tag,
1023 LPE_DESC_ALIGN, 0, /* alignment, boundary */
1024 BUS_SPACE_MAXADDR, /* lowaddr */
1025 BUS_SPACE_MAXADDR, /* highaddr */
1026 NULL, NULL, /* filter, filterarg */
1027 LPE_TXSTATUS_SIZE, 1, /* maxsize, nsegments */
1028 LPE_TXSTATUS_SIZE, 0, /* maxsegsize, flags */
1029 NULL, NULL, /* lockfunc, lockarg */
1030 &sc->lpe_cdata.lpe_tx_status_tag);
1032 if (err) {
1033 device_printf(sc->lpe_dev, "cannot create Tx status ring DMA tag\n");
1034 goto fail;
1037 /* Create tag for Tx buffers */
1038 err = bus_dma_tag_create(
1039 sc->lpe_cdata.lpe_parent_tag,
1040 LPE_DESC_ALIGN, 0, /* alignment, boundary */
1041 BUS_SPACE_MAXADDR, /* lowaddr */
1042 BUS_SPACE_MAXADDR, /* highaddr */
1043 NULL, NULL, /* filter, filterarg */
1044 MCLBYTES * LPE_TXDESC_NUM, /* maxsize */
1045 LPE_TXDESC_NUM, /* segments */
1046 MCLBYTES, 0, /* maxsegsize, flags */
1047 NULL, NULL, /* lockfunc, lockarg */
1048 &sc->lpe_cdata.lpe_tx_buf_tag);
1050 if (err) {
1051 device_printf(sc->lpe_dev, "cannot create Tx buffers DMA tag\n");
1052 goto fail;
1055 /* Allocate Tx DMA ring */
1056 err = bus_dmamem_alloc(sc->lpe_cdata.lpe_tx_ring_tag,
1057 (void **)&sc->lpe_rdata.lpe_tx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1058 BUS_DMA_ZERO, &sc->lpe_cdata.lpe_tx_ring_map);
1060 err = bus_dmamap_load(sc->lpe_cdata.lpe_tx_ring_tag,
1061 sc->lpe_cdata.lpe_tx_ring_map, sc->lpe_rdata.lpe_tx_ring,
1062 LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
1064 sc->lpe_rdata.lpe_tx_ring_phys = ctx.lpe_dma_busaddr;
1066 /* Allocate Tx status ring */
1067 err = bus_dmamem_alloc(sc->lpe_cdata.lpe_tx_status_tag,
1068 (void **)&sc->lpe_rdata.lpe_tx_status, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1069 BUS_DMA_ZERO, &sc->lpe_cdata.lpe_tx_status_map);
1071 err = bus_dmamap_load(sc->lpe_cdata.lpe_tx_status_tag,
1072 sc->lpe_cdata.lpe_tx_status_map, sc->lpe_rdata.lpe_tx_status,
1073 LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
1075 sc->lpe_rdata.lpe_tx_status_phys = ctx.lpe_dma_busaddr;
1078 /* Create Tx buffers DMA map */
1079 for (i = 0; i < LPE_TXDESC_NUM; i++) {
1080 txd = &sc->lpe_cdata.lpe_tx_desc[i];
1081 txd->lpe_txdesc_mbuf = NULL;
1082 txd->lpe_txdesc_dmamap = NULL;
1083 txd->lpe_txdesc_first = 0;
1085 err = bus_dmamap_create(sc->lpe_cdata.lpe_tx_buf_tag, 0,
1086 &txd->lpe_txdesc_dmamap);
1088 if (err) {
1089 device_printf(sc->lpe_dev, "cannot create Tx DMA map\n");
1090 return (err);
1094 return (0);
1095 fail:
1096 return (err);
1099 static int
1100 lpe_init_rx(struct lpe_softc *sc)
1102 int i, err;
1104 for (i = 0; i < LPE_RXDESC_NUM; i++) {
1105 err = lpe_init_rxbuf(sc, i);
1106 if (err)
1107 return (err);
1110 return (0);
1113 static int
1114 lpe_init_rxbuf(struct lpe_softc *sc, int n)
1116 struct lpe_rxdesc *rxd;
1117 struct lpe_hwdesc *hwd;
1118 struct lpe_hwstatus *hws;
1119 struct mbuf *m;
1120 bus_dma_segment_t segs[1];
1121 int nsegs;
1123 rxd = &sc->lpe_cdata.lpe_rx_desc[n];
1124 hwd = &sc->lpe_rdata.lpe_rx_ring[n];
1125 hws = &sc->lpe_rdata.lpe_rx_status[n];
1126 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1128 if (!m) {
1129 device_printf(sc->lpe_dev, "WARNING: mbufs exhausted!\n");
1130 return (ENOBUFS);
1133 m->m_len = m->m_pkthdr.len = MCLBYTES;
1135 bus_dmamap_unload(sc->lpe_cdata.lpe_rx_buf_tag, rxd->lpe_rxdesc_dmamap);
1137 if (bus_dmamap_load_mbuf_sg(sc->lpe_cdata.lpe_rx_buf_tag,
1138 rxd->lpe_rxdesc_dmamap, m, segs, &nsegs, 0)) {
1139 m_freem(m);
1140 return (ENOBUFS);
1143 bus_dmamap_sync(sc->lpe_cdata.lpe_rx_buf_tag, rxd->lpe_rxdesc_dmamap,
1144 BUS_DMASYNC_PREREAD);
1146 rxd->lpe_rxdesc_mbuf = m;
1147 hwd->lhr_data = segs[0].ds_addr + 2;
1148 hwd->lhr_control = (segs[0].ds_len - 1) | LPE_HWDESC_INTERRUPT;
1150 return (0);
1153 static void
1154 lpe_discard_rxbuf(struct lpe_softc *sc, int n)
1156 struct lpe_rxdesc *rxd;
1157 struct lpe_hwdesc *hwd;
1159 rxd = &sc->lpe_cdata.lpe_rx_desc[n];
1160 hwd = &sc->lpe_rdata.lpe_rx_ring[n];
1162 bus_dmamap_unload(sc->lpe_cdata.lpe_rx_buf_tag, rxd->lpe_rxdesc_dmamap);
1164 hwd->lhr_data = 0;
1165 hwd->lhr_control = 0;
1167 if (rxd->lpe_rxdesc_mbuf) {
1168 m_freem(rxd->lpe_rxdesc_mbuf);
1169 rxd->lpe_rxdesc_mbuf = NULL;
1173 static void
1174 lpe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1176 struct lpe_dmamap_arg *ctx;
1178 if (error)
1179 return;
1181 ctx = (struct lpe_dmamap_arg *)arg;
1182 ctx->lpe_dma_busaddr = segs[0].ds_addr;
1185 static int
1186 lpe_ifmedia_upd(struct ifnet *ifp)
1188 return (0);
1191 static void
1192 lpe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1194 struct lpe_softc *sc = ifp->if_softc;
1195 struct mii_data *mii = device_get_softc(sc->lpe_miibus);
1197 lpe_lock(sc);
1198 mii_pollstat(mii);
1199 ifmr->ifm_active = mii->mii_media_active;
1200 ifmr->ifm_status = mii->mii_media_status;
1201 lpe_unlock(sc);
1204 static device_method_t lpe_methods[] = {
1205 /* Device interface */
1206 DEVMETHOD(device_probe, lpe_probe),
1207 DEVMETHOD(device_attach, lpe_attach),
1208 DEVMETHOD(device_detach, lpe_detach),
1210 /* Bus interface */
1211 DEVMETHOD(bus_print_child, bus_generic_print_child),
1213 /* MII interface */
1214 DEVMETHOD(miibus_readreg, lpe_miibus_readreg),
1215 DEVMETHOD(miibus_writereg, lpe_miibus_writereg),
1216 DEVMETHOD(miibus_statchg, lpe_miibus_statchg),
1217 { 0, 0 }
1220 static driver_t lpe_driver = {
1221 "lpe",
1222 lpe_methods,
1223 sizeof(struct lpe_softc),
1226 static devclass_t lpe_devclass;
1228 DRIVER_MODULE(lpe, simplebus, lpe_driver, lpe_devclass, 0, 0);
1229 DRIVER_MODULE(miibus, lpe, miibus_driver, miibus_devclass, 0, 0);
1230 MODULE_DEPEND(lpe, obio, 1, 1, 1);
1231 MODULE_DEPEND(lpe, miibus, 1, 1, 1);
1232 MODULE_DEPEND(lpe, ether, 1, 1, 1);