ND 1.24 updates
[tomato.git] / release / src / shared / hnddma.c
blob8818584f5ed4a3c9b13d5bffb5e77232ebe64bcd
1 /*
2 * Generic Broadcom Home Networking Division (HND) DMA module.
3 * This supports the following chips: BCM42xx, 44xx, 47xx .
5 * Copyright 2005, Broadcom Corporation
6 * All Rights Reserved.
7 *
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
13 * $Id: hnddma.c,v 1.7 2005/03/07 08:35:32 kanki Exp $
16 #include <typedefs.h>
17 #include <osl.h>
18 #include <bcmendian.h>
19 #include <sbconfig.h>
20 #include <bcmutils.h>
22 struct dma_info; /* forward declaration */
23 #define di_t struct dma_info
24 #include <hnddma.h>
26 /* debug/trace */
27 #define DMA_ERROR(args)
28 #define DMA_TRACE(args)
30 /* default dma message level(if input msg_level pointer is null in dma_attach()) */
31 static uint dma_msg_level = 0;
33 #define MAXNAMEL 8
34 #define MAXDD (DMAMAXRINGSZ / sizeof (dmadd_t))
36 /* dma engine software state */
37 typedef struct dma_info {
38 hnddma_t hnddma; /* exported structure */
39 uint *msg_level; /* message level pointer */
41 char name[MAXNAMEL]; /* callers name for diag msgs */
42 void *drv; /* driver handle */
43 void *osh; /* os handle */
44 dmaregs_t *regs; /* dma engine registers */
46 dmadd_t *txd; /* pointer to chip-specific tx descriptor ring */
47 uint txin; /* index of next descriptor to reclaim */
48 uint txout; /* index of next descriptor to post */
49 uint txavail; /* # free tx descriptors */
50 void **txp; /* pointer to parallel array of pointers to packets */
51 ulong txdpa; /* physical address of descriptor ring */
52 uint txdalign; /* #bytes added to alloc'd mem to align txd */
53 uint txdalloc; /* #bytes allocated for the ring */
55 dmadd_t *rxd; /* pointer to chip-specific rx descriptor ring */
56 uint rxin; /* index of next descriptor to reclaim */
57 uint rxout; /* index of next descriptor to post */
58 void **rxp; /* pointer to parallel array of pointers to packets */
59 ulong rxdpa; /* physical address of descriptor ring */
60 uint rxdalign; /* #bytes added to alloc'd mem to align rxd */
61 uint rxdalloc; /* #bytes allocated for the ring */
63 /* tunables */
64 uint ntxd; /* # tx descriptors */
65 uint nrxd; /* # rx descriptors */
66 uint rxbufsize; /* rx buffer size in bytes */
67 uint nrxpost; /* # rx buffers to keep posted */
68 uint rxoffset; /* rxcontrol offset */
69 uint ddoffset; /* add to get dma address of descriptor ring */
70 uint dataoffset; /* add to get dma address of data buffer */
71 } dma_info_t;
73 /* descriptor bumping macros */
74 #define XXD(x, n) ((x) & ((n) - 1))
75 #define TXD(x) XXD((x), di->ntxd)
76 #define RXD(x) XXD((x), di->nrxd)
77 #define NEXTTXD(i) TXD(i + 1)
78 #define PREVTXD(i) TXD(i - 1)
79 #define NEXTRXD(i) RXD(i + 1)
80 #define NTXDACTIVE(h, t) TXD(t - h)
81 #define NRXDACTIVE(h, t) RXD(t - h)
83 /* macros to convert between byte offsets and indexes */
84 #define B2I(bytes) ((bytes) / sizeof (dmadd_t))
85 #define I2B(index) ((index) * sizeof (dmadd_t))
88 * This assume the largest i/o address is, in fact, the pci big window
89 * and that the pci core sb2pcitranslation2 register has been left with
90 * the default 0x0 pci base address.
92 #define MAXDMAADDR SB_PCI_DMA_SZ
93 #define DMA_ADDRESSABLE(x) !((x) & ~(MAXDMAADDR - 1))
95 /* prototypes */
97 void*
98 dma_attach(void *drv, void *osh, char *name, dmaregs_t *regs, uint ntxd, uint nrxd,
99 uint rxbufsize, uint nrxpost, uint rxoffset, uint ddoffset, uint dataoffset, uint *msg_level)
101 dma_info_t *di;
102 uint size;
103 void *va;
105 ASSERT(ntxd <= MAXDD);
106 ASSERT(ISPOWEROF2(ntxd));
107 ASSERT(nrxd <= MAXDD);
108 ASSERT(ISPOWEROF2(nrxd));
110 /* allocate private info structure */
111 if ((di = MALLOC(osh, sizeof (dma_info_t))) == NULL) {
112 return (NULL);
114 bzero((char*)di, sizeof (dma_info_t));
116 /* allocate tx packet pointer vector */
117 if (ntxd) {
118 size = ntxd * sizeof (void*);
119 if ((di->txp = MALLOC(osh, size)) == NULL)
120 goto fail;
121 bzero((char*)di->txp, size);
124 /* allocate rx packet pointer vector */
125 if (nrxd) {
126 size = nrxd * sizeof (void*);
127 if ((di->rxp = MALLOC(osh, size)) == NULL)
128 goto fail;
129 bzero((char*)di->rxp, size);
132 /* set message level */
133 di->msg_level = msg_level ? msg_level : &dma_msg_level;
135 DMA_TRACE(("%s: dma_attach: drv %p osh %p regs %p ntxd %d nrxd %d rxbufsize %d nrxpost %d rxoffset %d ddoffset 0x%x dataoffset 0x%x\n", name, drv, osh, regs, ntxd, nrxd, rxbufsize, nrxpost, rxoffset, ddoffset, dataoffset));
137 /* make a private copy of our callers name */
138 strncpy(di->name, name, MAXNAMEL);
139 di->name[MAXNAMEL-1] = '\0';
141 di->drv = drv;
142 di->osh = osh;
143 di->regs = regs;
145 /* allocate transmit descriptor ring */
146 if (ntxd) {
147 /* only need ntxd descriptors but it must be DMARINGALIGNed */
148 size = ntxd * sizeof (dmadd_t);
149 if (!ISALIGNED(DMA_CONSISTENT_ALIGN, DMARINGALIGN))
150 size += DMARINGALIGN;
151 if ((va = DMA_ALLOC_CONSISTENT(osh, size, &di->txdpa)) == NULL)
152 goto fail;
153 di->txd = (dmadd_t*) ROUNDUP((uintptr)va, DMARINGALIGN);
154 di->txdalign = (uint)((int8*)di->txd - (int8*)va);
155 di->txdpa += di->txdalign;
156 di->txdalloc = size;
157 ASSERT(ISALIGNED((uintptr)di->txd, DMARINGALIGN));
158 ASSERT(DMA_ADDRESSABLE(di->txdpa));
161 /* allocate receive descriptor ring */
162 if (nrxd) {
163 /* only need nrxd descriptors but it must be DMARINGALIGNed */
164 size = nrxd * sizeof (dmadd_t);
165 if (!ISALIGNED(DMA_CONSISTENT_ALIGN, DMARINGALIGN))
166 size += DMARINGALIGN;
167 if ((va = DMA_ALLOC_CONSISTENT(osh, size, &di->rxdpa)) == NULL)
168 goto fail;
169 di->rxd = (dmadd_t*) ROUNDUP((uintptr)va, DMARINGALIGN);
170 di->rxdalign = (uint)((int8*)di->rxd - (int8*)va);
171 di->rxdpa += di->rxdalign;
172 di->rxdalloc = size;
173 ASSERT(ISALIGNED((uintptr)di->rxd, DMARINGALIGN));
174 ASSERT(DMA_ADDRESSABLE(di->rxdpa));
177 /* save tunables */
178 di->ntxd = ntxd;
179 di->nrxd = nrxd;
180 di->rxbufsize = rxbufsize;
181 di->nrxpost = nrxpost;
182 di->rxoffset = rxoffset;
183 di->ddoffset = ddoffset;
184 di->dataoffset = dataoffset;
186 return ((void*)di);
188 fail:
189 dma_detach((void*)di);
190 return (NULL);
193 /* may be called with core in reset */
194 void
195 dma_detach(dma_info_t *di)
197 if (di == NULL)
198 return;
200 DMA_TRACE(("%s: dma_detach\n", di->name));
202 /* shouldn't be here if descriptors are unreclaimed */
203 ASSERT(di->txin == di->txout);
204 ASSERT(di->rxin == di->rxout);
206 /* free dma descriptor rings */
207 if (di->txd)
208 DMA_FREE_CONSISTENT(di->osh, ((int8*)di->txd - di->txdalign),
209 di->txdalloc, (di->txdpa - di->txdalign));
210 if (di->rxd)
211 DMA_FREE_CONSISTENT(di->osh, ((int8*)di->rxd - di->rxdalign),
212 di->rxdalloc, (di->rxdpa - di->rxdalign));
214 /* free packet pointer vectors */
215 if (di->txp)
216 MFREE(di->osh, (void*)di->txp, (di->ntxd * sizeof (void*)));
217 if (di->rxp)
218 MFREE(di->osh, (void*)di->rxp, (di->nrxd * sizeof (void*)));
220 /* free our private info structure */
221 MFREE(di->osh, (void*)di, sizeof (dma_info_t));
225 void
226 dma_txreset(dma_info_t *di)
228 uint32 status;
230 DMA_TRACE(("%s: dma_txreset\n", di->name));
232 /* suspend tx DMA first */
233 W_REG(&di->regs->xmtcontrol, XC_SE);
234 SPINWAIT((status = (R_REG(&di->regs->xmtstatus) & XS_XS_MASK)) != XS_XS_DISABLED &&
235 status != XS_XS_IDLE &&
236 status != XS_XS_STOPPED,
237 10000);
239 W_REG(&di->regs->xmtcontrol, 0);
240 SPINWAIT((status = (R_REG(&di->regs->xmtstatus) & XS_XS_MASK)) != XS_XS_DISABLED,
241 10000);
243 if (status != XS_XS_DISABLED) {
244 DMA_ERROR(("%s: dma_txreset: dma cannot be stopped\n", di->name));
247 /* wait for the last transaction to complete */
248 OSL_DELAY(300);
251 void
252 dma_rxreset(dma_info_t *di)
254 uint32 status;
256 DMA_TRACE(("%s: dma_rxreset\n", di->name));
258 W_REG(&di->regs->rcvcontrol, 0);
259 SPINWAIT((status = (R_REG(&di->regs->rcvstatus) & RS_RS_MASK)) != RS_RS_DISABLED,
260 10000);
262 if (status != RS_RS_DISABLED) {
263 DMA_ERROR(("%s: dma_rxreset: dma cannot be stopped\n", di->name));
267 void
268 dma_txinit(dma_info_t *di)
270 DMA_TRACE(("%s: dma_txinit\n", di->name));
272 di->txin = di->txout = 0;
273 di->txavail = di->ntxd - 1;
275 /* clear tx descriptor ring */
276 BZERO_SM((void*)di->txd, (di->ntxd * sizeof (dmadd_t)));
278 W_REG(&di->regs->xmtcontrol, XC_XE);
279 W_REG(&di->regs->xmtaddr, (di->txdpa + di->ddoffset));
282 bool
283 dma_txenabled(dma_info_t *di)
285 uint32 xc;
287 /* If the chip is dead, it is not enabled :-) */
288 xc = R_REG(&di->regs->xmtcontrol);
289 return ((xc != 0xffffffff) && (xc & XC_XE));
292 void
293 dma_txsuspend(dma_info_t *di)
295 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
296 OR_REG(&di->regs->xmtcontrol, XC_SE);
299 void
300 dma_txresume(dma_info_t *di)
302 DMA_TRACE(("%s: dma_txresume\n", di->name));
303 AND_REG(&di->regs->xmtcontrol, ~XC_SE);
306 bool
307 dma_txsuspended(dma_info_t *di)
309 if (!(R_REG(&di->regs->xmtcontrol) & XC_SE))
310 return 0;
312 if ((R_REG(&di->regs->xmtstatus) & XS_XS_MASK) != XS_XS_IDLE)
313 return 0;
315 OSL_DELAY(2);
316 return ((R_REG(&di->regs->xmtstatus) & XS_XS_MASK) == XS_XS_IDLE);
319 bool
320 dma_txstopped(dma_info_t *di)
322 return ((R_REG(&di->regs->xmtstatus) & XS_XS_MASK) == XS_XS_STOPPED);
325 bool
326 dma_rxstopped(dma_info_t *di)
328 return ((R_REG(&di->regs->rcvstatus) & RS_RS_MASK) == RS_RS_STOPPED);
331 void
332 dma_fifoloopbackenable(dma_info_t *di)
334 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
335 OR_REG(&di->regs->xmtcontrol, XC_LE);
338 void
339 dma_rxinit(dma_info_t *di)
341 DMA_TRACE(("%s: dma_rxinit\n", di->name));
343 di->rxin = di->rxout = 0;
345 /* clear rx descriptor ring */
346 BZERO_SM((void*)di->rxd, (di->nrxd * sizeof (dmadd_t)));
348 dma_rxenable(di);
349 W_REG(&di->regs->rcvaddr, (di->rxdpa + di->ddoffset));
352 void
353 dma_rxenable(dma_info_t *di)
355 DMA_TRACE(("%s: dma_rxenable\n", di->name));
356 W_REG(&di->regs->rcvcontrol, ((di->rxoffset << RC_RO_SHIFT) | RC_RE));
359 bool
360 dma_rxenabled(dma_info_t *di)
362 uint32 rc;
364 rc = R_REG(&di->regs->rcvcontrol);
365 return ((rc != 0xffffffff) && (rc & RC_RE));
369 * The BCM47XX family supports full 32bit dma engine buffer addressing so
370 * dma buffers can cross 4 Kbyte page boundaries.
373 dma_txfast(dma_info_t *di, void *p0, uint32 coreflags)
375 void *p, *next;
376 uchar *data;
377 uint len;
378 uint txout;
379 uint32 ctrl;
380 uint32 pa;
382 DMA_TRACE(("%s: dma_txfast\n", di->name));
384 txout = di->txout;
385 ctrl = 0;
388 * Walk the chain of packet buffers
389 * allocating and initializing transmit descriptor entries.
391 for (p = p0; p; p = next) {
392 data = PKTDATA(di->drv, p);
393 len = PKTLEN(di->drv, p);
394 next = PKTNEXT(di->drv, p);
396 /* return nonzero if out of tx descriptors */
397 if (NEXTTXD(txout) == di->txin)
398 goto outoftxd;
400 if (len == 0)
401 continue;
403 /* get physical address of buffer start */
404 pa = (uint32) DMA_MAP(di->osh, data, len, DMA_TX, p);
405 ASSERT(DMA_ADDRESSABLE(pa));
407 /* build the descriptor control value */
408 ctrl = len & CTRL_BC_MASK;
410 ctrl |= coreflags;
412 if (p == p0)
413 ctrl |= CTRL_SOF;
414 if (next == NULL)
415 ctrl |= (CTRL_IOC | CTRL_EOF);
416 if (txout == (di->ntxd - 1))
417 ctrl |= CTRL_EOT;
419 /* init the tx descriptor */
420 W_SM(&di->txd[txout].ctrl, BUS_SWAP32(ctrl));
421 W_SM(&di->txd[txout].addr, BUS_SWAP32(pa + di->dataoffset));
423 ASSERT(di->txp[txout] == NULL);
425 txout = NEXTTXD(txout);
428 /* if last txd eof not set, fix it */
429 if (!(ctrl & CTRL_EOF))
430 W_SM(&di->txd[PREVTXD(txout)].ctrl, BUS_SWAP32(ctrl | CTRL_IOC | CTRL_EOF));
432 /* save the packet */
433 di->txp[PREVTXD(txout)] = p0;
435 /* bump the tx descriptor index */
436 di->txout = txout;
438 /* kick the chip */
439 W_REG(&di->regs->xmtptr, I2B(txout));
441 /* tx flow control */
442 di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
444 return (0);
446 outoftxd:
447 DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
448 PKTFREE(di->drv, p0, TRUE);
449 di->txavail = 0;
450 di->hnddma.txnobuf++;
451 return (-1);
454 #define PAGESZ 4096
455 #define PAGEBASE(x) ((uintptr)(x) & ~4095)
458 * Just like above except go through the extra effort of splitting
459 * buffers that cross 4Kbyte boundaries into multiple tx descriptors.
462 dma_tx(dma_info_t *di, void *p0, uint32 coreflags)
464 void *p, *next;
465 uchar *data;
466 uint plen, len;
467 uchar *page, *start, *end;
468 uint txout;
469 uint32 ctrl;
470 uint32 pa;
472 DMA_TRACE(("%s: dma_tx\n", di->name));
474 txout = di->txout;
475 ctrl = 0;
478 * Walk the chain of packet buffers
479 * splitting those that cross 4 Kbyte boundaries
480 * allocating and initializing transmit descriptor entries.
482 for (p = p0; p; p = next) {
483 data = PKTDATA(di->drv, p);
484 plen = PKTLEN(di->drv, p);
485 next = PKTNEXT(di->drv, p);
487 if (plen == 0)
488 continue;
490 for (page = (uchar*)PAGEBASE(data);
491 page <= (uchar*)PAGEBASE(data + plen - 1);
492 page += PAGESZ) {
494 /* return nonzero if out of tx descriptors */
495 if (NEXTTXD(txout) == di->txin)
496 goto outoftxd;
498 start = (page == (uchar*)PAGEBASE(data))? data: page;
499 end = (page == (uchar*)PAGEBASE(data + plen))?
500 (data + plen): (page + PAGESZ);
501 len = (uint)(end - start);
503 /* build the descriptor control value */
504 ctrl = len & CTRL_BC_MASK;
506 ctrl |= coreflags;
508 if ((p == p0) && (start == data))
509 ctrl |= CTRL_SOF;
510 if ((next == NULL) && (end == (data + plen)))
511 ctrl |= (CTRL_IOC | CTRL_EOF);
512 if (txout == (di->ntxd - 1))
513 ctrl |= CTRL_EOT;
515 /* get physical address of buffer start */
516 pa = (uint32) DMA_MAP(di->osh, start, len, DMA_TX, p);
517 ASSERT(DMA_ADDRESSABLE(pa));
519 /* init the tx descriptor */
520 W_SM(&di->txd[txout].ctrl, BUS_SWAP32(ctrl));
521 W_SM(&di->txd[txout].addr, BUS_SWAP32(pa + di->dataoffset));
523 ASSERT(di->txp[txout] == NULL);
525 txout = NEXTTXD(txout);
529 /* if last txd eof not set, fix it */
530 if (!(ctrl & CTRL_EOF))
531 W_SM(&di->txd[PREVTXD(txout)].ctrl, BUS_SWAP32(ctrl | CTRL_IOC | CTRL_EOF));
533 /* save the packet */
534 di->txp[PREVTXD(txout)] = p0;
536 /* bump the tx descriptor index */
537 di->txout = txout;
539 /* kick the chip */
540 W_REG(&di->regs->xmtptr, I2B(txout));
542 /* tx flow control */
543 di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
545 return (0);
547 outoftxd:
548 DMA_ERROR(("%s: dma_tx: out of txds\n", di->name));
549 PKTFREE(di->drv, p0, TRUE);
550 di->txavail = 0;
551 di->hnddma.txnobuf++;
552 return (-1);
555 /* returns a pointer to the next frame received, or NULL if there are no more */
556 void*
557 dma_rx(dma_info_t *di)
559 void *p;
560 uint len;
561 int skiplen = 0;
563 while ((p = dma_getnextrxp(di, FALSE))) {
564 /* skip giant packets which span multiple rx descriptors */
565 if (skiplen > 0) {
566 skiplen -= di->rxbufsize;
567 if (skiplen < 0)
568 skiplen = 0;
569 PKTFREE(di->drv, p, FALSE);
570 continue;
573 len = ltoh16(*(uint16*)(PKTDATA(di->drv, p)));
574 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
576 /* bad frame length check */
577 if (len > (di->rxbufsize - di->rxoffset)) {
578 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di->name, len));
579 if (len > 0)
580 skiplen = len - (di->rxbufsize - di->rxoffset);
581 PKTFREE(di->drv, p, FALSE);
582 di->hnddma.rxgiants++;
583 continue;
586 /* set actual length */
587 PKTSETLEN(di->drv, p, (di->rxoffset + len));
589 break;
592 return (p);
595 /* post receive buffers */
596 void
597 dma_rxfill(dma_info_t *di)
599 void *p;
600 uint rxin, rxout;
601 uint ctrl;
602 uint n;
603 uint i;
604 uint32 pa;
605 uint rxbufsize;
608 * Determine how many receive buffers we're lacking
609 * from the full complement, allocate, initialize,
610 * and post them, then update the chip rx lastdscr.
613 rxin = di->rxin;
614 rxout = di->rxout;
615 rxbufsize = di->rxbufsize;
617 n = di->nrxpost - NRXDACTIVE(rxin, rxout);
619 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
621 for (i = 0; i < n; i++) {
622 if ((p = PKTGET(di->drv, rxbufsize, FALSE)) == NULL) {
623 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di->name));
624 di->hnddma.rxnobuf++;
625 break;
628 *(uint32*)(OSL_UNCACHED(PKTDATA(di->drv, p))) = 0;
630 pa = (uint32) DMA_MAP(di->osh, PKTDATA(di->drv, p), rxbufsize, DMA_RX, p);
631 ASSERT(ISALIGNED(pa, 4));
632 ASSERT(DMA_ADDRESSABLE(pa));
634 /* save the free packet pointer */
635 ASSERT(di->rxp[rxout] == NULL);
636 di->rxp[rxout] = p;
638 /* prep the descriptor control value */
639 ctrl = rxbufsize;
640 if (rxout == (di->nrxd - 1))
641 ctrl |= CTRL_EOT;
643 /* init the rx descriptor */
644 W_SM(&di->rxd[rxout].ctrl, BUS_SWAP32(ctrl));
645 W_SM(&di->rxd[rxout].addr, BUS_SWAP32(pa + di->dataoffset));
647 rxout = NEXTRXD(rxout);
650 di->rxout = rxout;
652 /* update the chip lastdscr pointer */
653 W_REG(&di->regs->rcvptr, I2B(rxout));
656 void
657 dma_txreclaim(dma_info_t *di, bool forceall)
659 void *p;
661 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, forceall ? "all" : ""));
663 while ((p = dma_getnexttxp(di, forceall)))
664 PKTFREE(di->drv, p, TRUE);
668 * Reclaim next completed txd (txds if using chained buffers) and
669 * return associated packet.
670 * If 'force' is true, reclaim txd(s) and return associated packet
671 * regardless of the value of the hardware "curr" pointer.
673 void*
674 dma_getnexttxp(dma_info_t *di, bool forceall)
676 uint start, end, i;
677 void *txp;
679 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
681 txp = NULL;
683 start = di->txin;
684 if (forceall)
685 end = di->txout;
686 else
687 end = B2I(R_REG(&di->regs->xmtstatus) & XS_CD_MASK);
689 if ((start == 0) && (end > di->txout))
690 goto bogus;
692 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
693 DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->txd[i].addr)) - di->dataoffset),
694 (BUS_SWAP32(R_SM(&di->txd[i].ctrl)) & CTRL_BC_MASK), DMA_TX, di->txp[i]);
695 W_SM(&di->txd[i].addr, 0xdeadbeef);
696 txp = di->txp[i];
697 di->txp[i] = NULL;
700 di->txin = i;
702 /* tx flow control */
703 di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
705 return (txp);
707 bogus:
709 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
710 start, end, di->txout, forceall));
712 return (NULL);
715 /* like getnexttxp but no reclaim */
716 void*
717 dma_peeknexttxp(dma_info_t *di)
719 uint end, i;
721 end = B2I(R_REG(&di->regs->xmtstatus) & XS_CD_MASK);
723 for (i = di->txin; i != end; i = NEXTTXD(i))
724 if (di->txp[i])
725 return (di->txp[i]);
727 return (NULL);
730 void
731 dma_rxreclaim(dma_info_t *di)
733 void *p;
735 DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
737 while ((p = dma_getnextrxp(di, TRUE)))
738 PKTFREE(di->drv, p, FALSE);
741 void *
742 dma_getnextrxp(dma_info_t *di, bool forceall)
744 uint i;
745 void *rxp;
747 /* if forcing, dma engine must be disabled */
748 ASSERT(!forceall || !dma_rxenabled(di));
750 i = di->rxin;
752 /* return if no packets posted */
753 if (i == di->rxout)
754 return (NULL);
756 /* ignore curr if forceall */
757 if (!forceall && (i == B2I(R_REG(&di->regs->rcvstatus) & RS_CD_MASK)))
758 return (NULL);
760 /* get the packet pointer that corresponds to the rx descriptor */
761 rxp = di->rxp[i];
762 ASSERT(rxp);
763 di->rxp[i] = NULL;
765 /* clear this packet from the descriptor ring */
766 DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->rxd[i].addr)) - di->dataoffset),
767 di->rxbufsize, DMA_RX, rxp);
768 W_SM(&di->rxd[i].addr, 0xdeadbeef);
770 di->rxin = NEXTRXD(i);
772 return (rxp);
776 uintptr
777 dma_getvar(dma_info_t *di, char *name)
779 if (!strcmp(name, "&txavail"))
780 return ((uintptr) &di->txavail);
781 else {
782 ASSERT(0);
784 return (0);
787 void
788 dma_txblock(dma_info_t *di)
790 di->txavail = 0;
793 void
794 dma_txunblock(dma_info_t *di)
796 di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
799 uint
800 dma_txactive(dma_info_t *di)
802 return (NTXDACTIVE(di->txin, di->txout));
806 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
808 void
809 dma_txrotate(di_t *di)
811 uint ad;
812 uint nactive;
813 uint rot;
814 uint old, new;
815 uint32 w;
816 uint first, last;
818 ASSERT(dma_txsuspended(di));
820 nactive = dma_txactive(di);
821 ad = B2I((R_REG(&di->regs->xmtstatus) & XS_AD_MASK) >> XS_AD_SHIFT);
822 rot = TXD(ad - di->txin);
824 ASSERT(rot < di->ntxd);
826 /* full-ring case is a lot harder - don't worry about this */
827 if (rot >= (di->ntxd - nactive)) {
828 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
829 return;
832 first = di->txin;
833 last = PREVTXD(di->txout);
835 /* move entries starting at last and moving backwards to first */
836 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
837 new = TXD(old + rot);
840 * Move the tx dma descriptor.
841 * EOT is set only in the last entry in the ring.
843 w = R_SM(&di->txd[old].ctrl) & ~CTRL_EOT;
844 if (new == (di->ntxd - 1))
845 w |= CTRL_EOT;
846 W_SM(&di->txd[new].ctrl, w);
847 W_SM(&di->txd[new].addr, R_SM(&di->txd[old].addr));
849 /* zap the old tx dma descriptor address field */
850 W_SM(&di->txd[old].addr, 0xdeadbeef);
852 /* move the corresponding txp[] entry */
853 ASSERT(di->txp[new] == NULL);
854 di->txp[new] = di->txp[old];
855 di->txp[old] = NULL;
858 /* update txin and txout */
859 di->txin = ad;
860 di->txout = TXD(di->txout + rot);
861 di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
863 /* kick the chip */
864 W_REG(&di->regs->xmtptr, I2B(di->txout));