cxgbe/t4_tom: Read the chip's DDP page sizes and save them in a
[freebsd-src.git] / sys / powerpc / ps3 / if_glc.c
blob3b13ac0898d4f3c3ac5f69ad68bd647c885ee814
1 /*-
2 * Copyright (C) 2010 Nathan Whitehorn
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
20 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
21 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
22 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
23 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * $FreeBSD$
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/sockio.h>
31 #include <sys/endian.h>
32 #include <sys/lock.h>
33 #include <sys/mbuf.h>
34 #include <sys/module.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/kernel.h>
38 #include <sys/socket.h>
40 #include <vm/vm.h>
41 #include <vm/pmap.h>
43 #include <net/bpf.h>
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <net/ethernet.h>
47 #include <net/if_media.h>
48 #include <net/if_types.h>
49 #include <net/if_dl.h>
51 #include <machine/pio.h>
52 #include <machine/bus.h>
53 #include <machine/platform.h>
54 #include <machine/resource.h>
55 #include <sys/bus.h>
56 #include <sys/rman.h>
58 #include "ps3bus.h"
59 #include "ps3-hvcall.h"
60 #include "if_glcreg.h"
62 static int glc_probe(device_t);
63 static int glc_attach(device_t);
64 static void glc_init(void *xsc);
65 static void glc_start(struct ifnet *ifp);
66 static int glc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
67 static void glc_set_multicast(struct glc_softc *sc);
68 static int glc_add_rxbuf(struct glc_softc *sc, int idx);
69 static int glc_add_rxbuf_dma(struct glc_softc *sc, int idx);
70 static int glc_encap(struct glc_softc *sc, struct mbuf **m_head,
71 bus_addr_t *pktdesc);
72 static int glc_intr_filter(void *xsc);
73 static void glc_intr(void *xsc);
74 static void glc_tick(void *xsc);
75 static void glc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
76 static int glc_media_change(struct ifnet *ifp);
78 static MALLOC_DEFINE(M_GLC, "gelic", "PS3 GELIC ethernet");
80 static device_method_t glc_methods[] = {
81 /* Device interface */
82 DEVMETHOD(device_probe, glc_probe),
83 DEVMETHOD(device_attach, glc_attach),
85 { 0, 0 }
88 static driver_t glc_driver = {
89 "glc",
90 glc_methods,
91 sizeof(struct glc_softc)
94 static devclass_t glc_devclass;
96 DRIVER_MODULE(glc, ps3bus, glc_driver, glc_devclass, 0, 0);
98 static int
99 glc_probe(device_t dev)
102 if (ps3bus_get_bustype(dev) != PS3_BUSTYPE_SYSBUS ||
103 ps3bus_get_devtype(dev) != PS3_DEVTYPE_GELIC)
104 return (ENXIO);
106 device_set_desc(dev, "Playstation 3 GELIC Network Controller");
107 return (BUS_PROBE_SPECIFIC);
110 static void
111 glc_getphys(void *xaddr, bus_dma_segment_t *segs, int nsegs, int error)
113 if (error != 0)
114 return;
116 *(bus_addr_t *)xaddr = segs[0].ds_addr;
119 static int
120 glc_attach(device_t dev)
122 struct glc_softc *sc;
123 struct glc_txsoft *txs;
124 uint64_t mac64, val, junk;
125 int i, err;
127 sc = device_get_softc(dev);
129 sc->sc_bus = ps3bus_get_bus(dev);
130 sc->sc_dev = ps3bus_get_device(dev);
131 sc->sc_self = dev;
133 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
134 MTX_DEF);
135 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
136 sc->next_txdma_slot = 0;
137 sc->bsy_txdma_slots = 0;
138 sc->sc_next_rxdma_slot = 0;
139 sc->first_used_txdma_slot = -1;
142 * Shut down existing tasks.
145 lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
146 lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
148 sc->sc_ifp = if_alloc(IFT_ETHER);
149 sc->sc_ifp->if_softc = sc;
152 * Get MAC address and VLAN id
155 lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_MAC_ADDRESS,
156 0, 0, 0, &mac64, &junk);
157 memcpy(sc->sc_enaddr, &((uint8_t *)&mac64)[2], sizeof(sc->sc_enaddr));
158 sc->sc_tx_vlan = sc->sc_rx_vlan = -1;
159 err = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_VLAN_ID,
160 GELIC_VLAN_TX_ETHERNET, 0, 0, &val, &junk);
161 if (err == 0)
162 sc->sc_tx_vlan = val;
163 err = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_VLAN_ID,
164 GELIC_VLAN_RX_ETHERNET, 0, 0, &val, &junk);
165 if (err == 0)
166 sc->sc_rx_vlan = val;
169 * Set up interrupt handler
171 sc->sc_irqid = 0;
172 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqid,
173 RF_ACTIVE);
174 if (sc->sc_irq == NULL) {
175 device_printf(dev, "Could not allocate IRQ!\n");
176 mtx_destroy(&sc->sc_mtx);
177 return (ENXIO);
180 bus_setup_intr(dev, sc->sc_irq,
181 INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY,
182 glc_intr_filter, glc_intr, sc, &sc->sc_irqctx);
183 sc->sc_hwirq_status = (uint64_t *)contigmalloc(8, M_GLC, M_ZERO, 0,
184 BUS_SPACE_MAXADDR_32BIT, 8, PAGE_SIZE);
185 lv1_net_set_interrupt_status_indicator(sc->sc_bus, sc->sc_dev,
186 vtophys(sc->sc_hwirq_status), 0);
187 lv1_net_set_interrupt_mask(sc->sc_bus, sc->sc_dev,
188 GELIC_INT_RXDONE | GELIC_INT_RXFRAME | GELIC_INT_PHY |
189 GELIC_INT_TX_CHAIN_END, 0);
192 * Set up DMA.
195 err = bus_dma_tag_create(bus_get_dma_tag(dev), 32, 0,
196 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
197 129*sizeof(struct glc_dmadesc), 1, 128*sizeof(struct glc_dmadesc),
198 0, NULL,NULL, &sc->sc_dmadesc_tag);
200 err = bus_dmamem_alloc(sc->sc_dmadesc_tag, (void **)&sc->sc_txdmadesc,
201 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
202 &sc->sc_txdmadesc_map);
203 err = bus_dmamap_load(sc->sc_dmadesc_tag, sc->sc_txdmadesc_map,
204 sc->sc_txdmadesc, 128*sizeof(struct glc_dmadesc), glc_getphys,
205 &sc->sc_txdmadesc_phys, 0);
206 err = bus_dmamem_alloc(sc->sc_dmadesc_tag, (void **)&sc->sc_rxdmadesc,
207 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
208 &sc->sc_rxdmadesc_map);
209 err = bus_dmamap_load(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
210 sc->sc_rxdmadesc, 128*sizeof(struct glc_dmadesc), glc_getphys,
211 &sc->sc_rxdmadesc_phys, 0);
213 err = bus_dma_tag_create(bus_get_dma_tag(dev), 128, 0,
214 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
215 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,NULL,
216 &sc->sc_rxdma_tag);
217 err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
218 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
219 BUS_SPACE_MAXSIZE_32BIT, 16, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,NULL,
220 &sc->sc_txdma_tag);
222 /* init transmit descriptors */
223 STAILQ_INIT(&sc->sc_txfreeq);
224 STAILQ_INIT(&sc->sc_txdirtyq);
226 /* create TX DMA maps */
227 err = ENOMEM;
228 for (i = 0; i < GLC_MAX_TX_PACKETS; i++) {
229 txs = &sc->sc_txsoft[i];
230 txs->txs_mbuf = NULL;
231 err = bus_dmamap_create(sc->sc_txdma_tag, 0, &txs->txs_dmamap);
232 if (err) {
233 device_printf(dev,
234 "unable to create TX DMA map %d, error = %d\n",
235 i, err);
237 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
240 /* Create the receive buffer DMA maps. */
241 for (i = 0; i < GLC_MAX_RX_PACKETS; i++) {
242 err = bus_dmamap_create(sc->sc_rxdma_tag, 0,
243 &sc->sc_rxsoft[i].rxs_dmamap);
244 if (err) {
245 device_printf(dev,
246 "unable to create RX DMA map %d, error = %d\n",
247 i, err);
249 sc->sc_rxsoft[i].rxs_mbuf = NULL;
253 * Attach to network stack
256 if_initname(sc->sc_ifp, device_get_name(dev), device_get_unit(dev));
257 sc->sc_ifp->if_mtu = ETHERMTU;
258 sc->sc_ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
259 sc->sc_ifp->if_hwassist = CSUM_TCP | CSUM_UDP;
260 sc->sc_ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_RXCSUM;
261 sc->sc_ifp->if_capenable = IFCAP_HWCSUM | IFCAP_RXCSUM;
262 sc->sc_ifp->if_start = glc_start;
263 sc->sc_ifp->if_ioctl = glc_ioctl;
264 sc->sc_ifp->if_init = glc_init;
266 ifmedia_init(&sc->sc_media, IFM_IMASK, glc_media_change,
267 glc_media_status);
268 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
269 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
270 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
271 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
272 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
273 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
274 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
276 IFQ_SET_MAXLEN(&sc->sc_ifp->if_snd, GLC_MAX_TX_PACKETS);
277 sc->sc_ifp->if_snd.ifq_drv_maxlen = GLC_MAX_TX_PACKETS;
278 IFQ_SET_READY(&sc->sc_ifp->if_snd);
280 ether_ifattach(sc->sc_ifp, sc->sc_enaddr);
281 sc->sc_ifp->if_hwassist = 0;
283 return (0);
285 mtx_destroy(&sc->sc_mtx);
286 if_free(sc->sc_ifp);
287 return (ENXIO);
290 static void
291 glc_init_locked(struct glc_softc *sc)
293 int i, error;
294 struct glc_rxsoft *rxs;
295 struct glc_txsoft *txs;
297 mtx_assert(&sc->sc_mtx, MA_OWNED);
299 lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
300 lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
302 glc_set_multicast(sc);
304 for (i = 0; i < GLC_MAX_RX_PACKETS; i++) {
305 rxs = &sc->sc_rxsoft[i];
306 rxs->rxs_desc_slot = i;
308 if (rxs->rxs_mbuf == NULL) {
309 glc_add_rxbuf(sc, i);
311 if (rxs->rxs_mbuf == NULL) {
312 rxs->rxs_desc_slot = -1;
313 break;
317 glc_add_rxbuf_dma(sc, i);
318 bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
319 BUS_DMASYNC_PREREAD);
322 /* Clear TX dirty queue */
323 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
324 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
325 bus_dmamap_unload(sc->sc_txdma_tag, txs->txs_dmamap);
327 if (txs->txs_mbuf != NULL) {
328 m_freem(txs->txs_mbuf);
329 txs->txs_mbuf = NULL;
332 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
334 sc->first_used_txdma_slot = -1;
335 sc->bsy_txdma_slots = 0;
337 error = lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
338 sc->sc_rxsoft[0].rxs_desc, 0);
339 if (error != 0)
340 device_printf(sc->sc_self,
341 "lv1_net_start_rx_dma error: %d\n", error);
343 sc->sc_ifp->if_drv_flags |= IFF_DRV_RUNNING;
344 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
345 sc->sc_ifpflags = sc->sc_ifp->if_flags;
347 sc->sc_wdog_timer = 0;
348 callout_reset(&sc->sc_tick_ch, hz, glc_tick, sc);
351 static void
352 glc_stop(void *xsc)
354 struct glc_softc *sc = xsc;
356 mtx_assert(&sc->sc_mtx, MA_OWNED);
358 lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
359 lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
362 static void
363 glc_init(void *xsc)
365 struct glc_softc *sc = xsc;
367 mtx_lock(&sc->sc_mtx);
368 glc_init_locked(sc);
369 mtx_unlock(&sc->sc_mtx);
372 static void
373 glc_tick(void *xsc)
375 struct glc_softc *sc = xsc;
377 mtx_assert(&sc->sc_mtx, MA_OWNED);
380 * XXX: Sometimes the RX queue gets stuck. Poke it periodically until
381 * we figure out why. This will fail harmlessly if the RX queue is
382 * already running.
384 lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
385 sc->sc_rxsoft[sc->sc_next_rxdma_slot].rxs_desc, 0);
387 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) {
388 callout_reset(&sc->sc_tick_ch, hz, glc_tick, sc);
389 return;
392 /* Problems */
393 device_printf(sc->sc_self, "device timeout\n");
395 glc_init_locked(sc);
398 static void
399 glc_start_locked(struct ifnet *ifp)
401 struct glc_softc *sc = ifp->if_softc;
402 bus_addr_t first, pktdesc;
403 int kickstart = 0;
404 int error;
405 struct mbuf *mb_head;
407 mtx_assert(&sc->sc_mtx, MA_OWNED);
408 first = 0;
410 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
411 IFF_DRV_RUNNING)
412 return;
414 if (STAILQ_EMPTY(&sc->sc_txdirtyq))
415 kickstart = 1;
417 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
418 IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head);
420 if (mb_head == NULL)
421 break;
423 /* Check if the ring buffer is full */
424 if (sc->bsy_txdma_slots > 125) {
425 /* Put the packet back and stop */
426 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
427 IFQ_DRV_PREPEND(&ifp->if_snd, mb_head);
428 break;
431 BPF_MTAP(ifp, mb_head);
433 if (sc->sc_tx_vlan >= 0)
434 mb_head = ether_vlanencap(mb_head, sc->sc_tx_vlan);
436 if (glc_encap(sc, &mb_head, &pktdesc)) {
437 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
438 break;
441 if (first == 0)
442 first = pktdesc;
445 if (kickstart && first != 0) {
446 error = lv1_net_start_tx_dma(sc->sc_bus, sc->sc_dev, first, 0);
447 if (error != 0)
448 device_printf(sc->sc_self,
449 "lv1_net_start_tx_dma error: %d\n", error);
450 sc->sc_wdog_timer = 5;
454 static void
455 glc_start(struct ifnet *ifp)
457 struct glc_softc *sc = ifp->if_softc;
459 mtx_lock(&sc->sc_mtx);
460 glc_start_locked(ifp);
461 mtx_unlock(&sc->sc_mtx);
464 static int
465 glc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
467 struct glc_softc *sc = ifp->if_softc;
468 struct ifreq *ifr = (struct ifreq *)data;
469 int err = 0;
471 switch (cmd) {
472 case SIOCSIFFLAGS:
473 mtx_lock(&sc->sc_mtx);
474 if ((ifp->if_flags & IFF_UP) != 0) {
475 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
476 ((ifp->if_flags ^ sc->sc_ifpflags) &
477 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
478 glc_set_multicast(sc);
479 else
480 glc_init_locked(sc);
482 else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
483 glc_stop(sc);
484 sc->sc_ifpflags = ifp->if_flags;
485 mtx_unlock(&sc->sc_mtx);
486 break;
487 case SIOCADDMULTI:
488 case SIOCDELMULTI:
489 mtx_lock(&sc->sc_mtx);
490 glc_set_multicast(sc);
491 mtx_unlock(&sc->sc_mtx);
492 break;
493 case SIOCGIFMEDIA:
494 case SIOCSIFMEDIA:
495 err = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
496 break;
497 default:
498 err = ether_ioctl(ifp, cmd, data);
499 break;
502 return (err);
505 static void
506 glc_set_multicast(struct glc_softc *sc)
508 struct ifnet *ifp = sc->sc_ifp;
509 struct ifmultiaddr *inm;
510 uint64_t addr;
511 int naddrs;
513 /* Clear multicast filter */
514 lv1_net_remove_multicast_address(sc->sc_bus, sc->sc_dev, 0, 1);
516 /* Add broadcast */
517 lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev,
518 0xffffffffffffL, 0);
520 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
521 lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev, 0, 1);
522 } else {
523 if_maddr_rlock(ifp);
524 naddrs = 1; /* Include broadcast */
525 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
526 if (inm->ifma_addr->sa_family != AF_LINK)
527 continue;
528 addr = 0;
529 memcpy(&((uint8_t *)(&addr))[2],
530 LLADDR((struct sockaddr_dl *)inm->ifma_addr),
531 ETHER_ADDR_LEN);
533 lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev,
534 addr, 0);
537 * Filter can only hold 32 addresses, so fall back to
538 * the IFF_ALLMULTI case if we have too many.
540 if (++naddrs >= 32) {
541 lv1_net_add_multicast_address(sc->sc_bus,
542 sc->sc_dev, 0, 1);
543 break;
546 if_maddr_runlock(ifp);
550 static int
551 glc_add_rxbuf(struct glc_softc *sc, int idx)
553 struct glc_rxsoft *rxs = &sc->sc_rxsoft[idx];
554 struct mbuf *m;
555 bus_dma_segment_t segs[1];
556 int error, nsegs;
558 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
559 if (m == NULL)
560 return (ENOBUFS);
561 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
563 if (rxs->rxs_mbuf != NULL) {
564 bus_dmamap_sync(sc->sc_rxdma_tag, rxs->rxs_dmamap,
565 BUS_DMASYNC_POSTREAD);
566 bus_dmamap_unload(sc->sc_rxdma_tag, rxs->rxs_dmamap);
569 error = bus_dmamap_load_mbuf_sg(sc->sc_rxdma_tag, rxs->rxs_dmamap, m,
570 segs, &nsegs, BUS_DMA_NOWAIT);
571 if (error != 0) {
572 device_printf(sc->sc_self,
573 "cannot load RS DMA map %d, error = %d\n", idx, error);
574 m_freem(m);
575 return (error);
577 /* If nsegs is wrong then the stack is corrupt. */
578 KASSERT(nsegs == 1,
579 ("%s: too many DMA segments (%d)", __func__, nsegs));
580 rxs->rxs_mbuf = m;
581 rxs->segment = segs[0];
583 bus_dmamap_sync(sc->sc_rxdma_tag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD);
585 return (0);
588 static int
589 glc_add_rxbuf_dma(struct glc_softc *sc, int idx)
591 struct glc_rxsoft *rxs = &sc->sc_rxsoft[idx];
593 bzero(&sc->sc_rxdmadesc[idx], sizeof(sc->sc_rxdmadesc[idx]));
594 sc->sc_rxdmadesc[idx].paddr = rxs->segment.ds_addr;
595 sc->sc_rxdmadesc[idx].len = rxs->segment.ds_len;
596 sc->sc_rxdmadesc[idx].next = sc->sc_rxdmadesc_phys +
597 ((idx + 1) % GLC_MAX_RX_PACKETS)*sizeof(sc->sc_rxdmadesc[idx]);
598 sc->sc_rxdmadesc[idx].cmd_stat = GELIC_DESCR_OWNED;
600 rxs->rxs_desc_slot = idx;
601 rxs->rxs_desc = sc->sc_rxdmadesc_phys + idx*sizeof(struct glc_dmadesc);
603 return (0);
606 static int
607 glc_encap(struct glc_softc *sc, struct mbuf **m_head, bus_addr_t *pktdesc)
609 bus_dma_segment_t segs[16];
610 struct glc_txsoft *txs;
611 struct mbuf *m;
612 bus_addr_t firstslotphys;
613 int i, idx, nsegs, nsegs_max;
614 int err = 0;
616 /* Max number of segments is the number of free DMA slots */
617 nsegs_max = 128 - sc->bsy_txdma_slots;
619 if (nsegs_max > 16 || sc->first_used_txdma_slot < 0)
620 nsegs_max = 16;
622 /* Get a work queue entry. */
623 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
624 /* Ran out of descriptors. */
625 return (ENOBUFS);
628 nsegs = 0;
629 for (m = *m_head; m != NULL; m = m->m_next)
630 nsegs++;
632 if (nsegs > nsegs_max) {
633 m = m_collapse(*m_head, M_NOWAIT, nsegs_max);
634 if (m == NULL) {
635 m_freem(*m_head);
636 *m_head = NULL;
637 return (ENOBUFS);
639 *m_head = m;
642 err = bus_dmamap_load_mbuf_sg(sc->sc_txdma_tag, txs->txs_dmamap,
643 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
644 if (err != 0) {
645 m_freem(*m_head);
646 *m_head = NULL;
647 return (err);
650 KASSERT(nsegs <= 128 - sc->bsy_txdma_slots,
651 ("GLC: Mapped too many (%d) DMA segments with %d available",
652 nsegs, 128 - sc->bsy_txdma_slots));
654 if (nsegs == 0) {
655 m_freem(*m_head);
656 *m_head = NULL;
657 return (EIO);
660 txs->txs_ndescs = nsegs;
661 txs->txs_firstdesc = sc->next_txdma_slot;
663 idx = txs->txs_firstdesc;
664 firstslotphys = sc->sc_txdmadesc_phys +
665 txs->txs_firstdesc*sizeof(struct glc_dmadesc);
667 for (i = 0; i < nsegs; i++) {
668 bzero(&sc->sc_txdmadesc[idx], sizeof(sc->sc_txdmadesc[idx]));
669 sc->sc_txdmadesc[idx].paddr = segs[i].ds_addr;
670 sc->sc_txdmadesc[idx].len = segs[i].ds_len;
671 sc->sc_txdmadesc[idx].next = sc->sc_txdmadesc_phys +
672 ((idx + 1) % GLC_MAX_TX_PACKETS)*sizeof(struct glc_dmadesc);
673 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_NOIPSEC;
675 if (i+1 == nsegs) {
676 txs->txs_lastdesc = idx;
677 sc->sc_txdmadesc[idx].next = 0;
678 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_LAST;
681 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
682 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_CSUM_TCP;
683 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
684 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_CSUM_UDP;
685 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_DESCR_OWNED;
687 idx = (idx + 1) % GLC_MAX_TX_PACKETS;
689 sc->next_txdma_slot = idx;
690 sc->bsy_txdma_slots += nsegs;
691 if (txs->txs_firstdesc != 0)
692 idx = txs->txs_firstdesc - 1;
693 else
694 idx = GLC_MAX_TX_PACKETS - 1;
696 if (sc->first_used_txdma_slot < 0)
697 sc->first_used_txdma_slot = txs->txs_firstdesc;
699 bus_dmamap_sync(sc->sc_txdma_tag, txs->txs_dmamap,
700 BUS_DMASYNC_PREWRITE);
701 sc->sc_txdmadesc[idx].next = firstslotphys;
703 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
704 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
705 txs->txs_mbuf = *m_head;
706 *pktdesc = firstslotphys;
708 return (0);
711 static void
712 glc_rxintr(struct glc_softc *sc)
714 int i, restart_rxdma, error;
715 struct mbuf *m;
716 struct ifnet *ifp = sc->sc_ifp;
718 bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
719 BUS_DMASYNC_POSTREAD);
721 restart_rxdma = 0;
722 while ((sc->sc_rxdmadesc[sc->sc_next_rxdma_slot].cmd_stat &
723 GELIC_DESCR_OWNED) == 0) {
724 i = sc->sc_next_rxdma_slot;
725 sc->sc_next_rxdma_slot++;
726 if (sc->sc_next_rxdma_slot >= GLC_MAX_RX_PACKETS)
727 sc->sc_next_rxdma_slot = 0;
729 if (sc->sc_rxdmadesc[i].cmd_stat & GELIC_CMDSTAT_CHAIN_END)
730 restart_rxdma = 1;
732 if (sc->sc_rxdmadesc[i].rxerror & GELIC_RXERRORS) {
733 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
734 goto requeue;
737 m = sc->sc_rxsoft[i].rxs_mbuf;
738 if (sc->sc_rxdmadesc[i].data_stat & GELIC_RX_IPCSUM) {
739 m->m_pkthdr.csum_flags |=
740 CSUM_IP_CHECKED | CSUM_IP_VALID;
742 if (sc->sc_rxdmadesc[i].data_stat & GELIC_RX_TCPUDPCSUM) {
743 m->m_pkthdr.csum_flags |=
744 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
745 m->m_pkthdr.csum_data = 0xffff;
748 if (glc_add_rxbuf(sc, i)) {
749 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
750 goto requeue;
753 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
754 m->m_pkthdr.rcvif = ifp;
755 m->m_len = sc->sc_rxdmadesc[i].valid_size;
756 m->m_pkthdr.len = m->m_len;
759 * Remove VLAN tag. Even on early firmwares that do not allow
760 * multiple VLANs, the VLAN tag is still in place here.
762 m_adj(m, 2);
764 mtx_unlock(&sc->sc_mtx);
765 (*ifp->if_input)(ifp, m);
766 mtx_lock(&sc->sc_mtx);
768 requeue:
769 glc_add_rxbuf_dma(sc, i);
772 bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
773 BUS_DMASYNC_PREWRITE);
775 if (restart_rxdma) {
776 error = lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
777 sc->sc_rxsoft[sc->sc_next_rxdma_slot].rxs_desc, 0);
778 if (error != 0)
779 device_printf(sc->sc_self,
780 "lv1_net_start_rx_dma error: %d\n", error);
784 static void
785 glc_txintr(struct glc_softc *sc)
787 struct ifnet *ifp = sc->sc_ifp;
788 struct glc_txsoft *txs;
789 int progress = 0, kickstart = 0, error;
791 bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_txdmadesc_map,
792 BUS_DMASYNC_POSTREAD);
794 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
795 if (sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat
796 & GELIC_DESCR_OWNED)
797 break;
799 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
800 bus_dmamap_unload(sc->sc_txdma_tag, txs->txs_dmamap);
801 sc->bsy_txdma_slots -= txs->txs_ndescs;
803 if (txs->txs_mbuf != NULL) {
804 m_freem(txs->txs_mbuf);
805 txs->txs_mbuf = NULL;
808 if ((sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat & 0xf0000000)
809 != 0) {
810 lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
811 kickstart = 1;
812 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
815 if (sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat &
816 GELIC_CMDSTAT_CHAIN_END)
817 kickstart = 1;
819 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
820 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
821 progress = 1;
824 if (txs != NULL)
825 sc->first_used_txdma_slot = txs->txs_firstdesc;
826 else
827 sc->first_used_txdma_slot = -1;
829 if (kickstart || txs != NULL) {
830 /* Speculatively (or necessarily) start the TX queue again */
831 error = lv1_net_start_tx_dma(sc->sc_bus, sc->sc_dev,
832 sc->sc_txdmadesc_phys +
833 txs->txs_firstdesc*sizeof(struct glc_dmadesc), 0);
834 if (error != 0)
835 device_printf(sc->sc_self,
836 "lv1_net_start_tx_dma error: %d\n", error);
839 if (progress) {
841 * We freed some descriptors, so reset IFF_DRV_OACTIVE
842 * and restart.
844 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
845 sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
847 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
848 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
849 glc_start_locked(ifp);
853 static int
854 glc_intr_filter(void *xsc)
856 struct glc_softc *sc = xsc;
858 powerpc_sync();
859 atomic_set_64(&sc->sc_interrupt_status, *sc->sc_hwirq_status);
860 return (FILTER_SCHEDULE_THREAD);
863 static void
864 glc_intr(void *xsc)
866 struct glc_softc *sc = xsc;
867 uint64_t status, linkstat, junk;
869 mtx_lock(&sc->sc_mtx);
871 status = atomic_readandclear_64(&sc->sc_interrupt_status);
873 if (status == 0) {
874 mtx_unlock(&sc->sc_mtx);
875 return;
878 if (status & (GELIC_INT_RXDONE | GELIC_INT_RXFRAME))
879 glc_rxintr(sc);
881 if (status & (GELIC_INT_TXDONE | GELIC_INT_TX_CHAIN_END))
882 glc_txintr(sc);
884 if (status & GELIC_INT_PHY) {
885 lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_LINK_STATUS,
886 GELIC_VLAN_TX_ETHERNET, 0, 0, &linkstat, &junk);
888 linkstat = (linkstat & GELIC_LINK_UP) ?
889 LINK_STATE_UP : LINK_STATE_DOWN;
890 if (linkstat != sc->sc_ifp->if_link_state)
891 if_link_state_change(sc->sc_ifp, linkstat);
894 mtx_unlock(&sc->sc_mtx);
897 static void
898 glc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
900 struct glc_softc *sc = ifp->if_softc;
901 uint64_t status, junk;
903 ifmr->ifm_status = IFM_AVALID;
904 ifmr->ifm_active = IFM_ETHER;
906 lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_LINK_STATUS,
907 GELIC_VLAN_TX_ETHERNET, 0, 0, &status, &junk);
909 if (status & GELIC_LINK_UP)
910 ifmr->ifm_status |= IFM_ACTIVE;
912 if (status & GELIC_SPEED_10)
913 ifmr->ifm_active |= IFM_10_T;
914 else if (status & GELIC_SPEED_100)
915 ifmr->ifm_active |= IFM_100_TX;
916 else if (status & GELIC_SPEED_1000)
917 ifmr->ifm_active |= IFM_1000_T;
919 if (status & GELIC_FULL_DUPLEX)
920 ifmr->ifm_active |= IFM_FDX;
921 else
922 ifmr->ifm_active |= IFM_HDX;
925 static int
926 glc_media_change(struct ifnet *ifp)
928 struct glc_softc *sc = ifp->if_softc;
929 uint64_t mode, junk;
930 int result;
932 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
933 return (EINVAL);
935 switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
936 case IFM_AUTO:
937 mode = GELIC_AUTO_NEG;
938 break;
939 case IFM_10_T:
940 mode = GELIC_SPEED_10;
941 break;
942 case IFM_100_TX:
943 mode = GELIC_SPEED_100;
944 break;
945 case IFM_1000_T:
946 mode = GELIC_SPEED_1000 | GELIC_FULL_DUPLEX;
947 break;
948 default:
949 return (EINVAL);
952 if (IFM_OPTIONS(sc->sc_media.ifm_media) & IFM_FDX)
953 mode |= GELIC_FULL_DUPLEX;
955 result = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_SET_LINK_MODE,
956 GELIC_VLAN_TX_ETHERNET, mode, 0, &junk, &junk);
958 return (result ? EIO : 0);