2 * Generic Broadcom Home Networking Division (HND) DMA module.
3 * This supports the following chips: BCM42xx, 44xx, 47xx .
5 * Copyright 2005, Broadcom Corporation
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
13 * $Id: hnddma.c,v 1.7 2005/03/07 08:35:32 kanki Exp $
18 #include <bcmendian.h>
22 struct dma_info
; /* forward declaration */
23 #define di_t struct dma_info
27 #define DMA_ERROR(args)
28 #define DMA_TRACE(args)
30 /* default dma message level(if input msg_level pointer is null in dma_attach()) */
31 static uint dma_msg_level
= 0;
34 #define MAXDD (DMAMAXRINGSZ / sizeof (dmadd_t))
36 /* dma engine software state */
37 typedef struct dma_info
{
38 hnddma_t hnddma
; /* exported structure */
39 uint
*msg_level
; /* message level pointer */
41 char name
[MAXNAMEL
]; /* callers name for diag msgs */
42 void *drv
; /* driver handle */
43 void *osh
; /* os handle */
44 dmaregs_t
*regs
; /* dma engine registers */
46 dmadd_t
*txd
; /* pointer to chip-specific tx descriptor ring */
47 uint txin
; /* index of next descriptor to reclaim */
48 uint txout
; /* index of next descriptor to post */
49 uint txavail
; /* # free tx descriptors */
50 void **txp
; /* pointer to parallel array of pointers to packets */
51 ulong txdpa
; /* physical address of descriptor ring */
52 uint txdalign
; /* #bytes added to alloc'd mem to align txd */
53 uint txdalloc
; /* #bytes allocated for the ring */
55 dmadd_t
*rxd
; /* pointer to chip-specific rx descriptor ring */
56 uint rxin
; /* index of next descriptor to reclaim */
57 uint rxout
; /* index of next descriptor to post */
58 void **rxp
; /* pointer to parallel array of pointers to packets */
59 ulong rxdpa
; /* physical address of descriptor ring */
60 uint rxdalign
; /* #bytes added to alloc'd mem to align rxd */
61 uint rxdalloc
; /* #bytes allocated for the ring */
64 uint ntxd
; /* # tx descriptors */
65 uint nrxd
; /* # rx descriptors */
66 uint rxbufsize
; /* rx buffer size in bytes */
67 uint nrxpost
; /* # rx buffers to keep posted */
68 uint rxoffset
; /* rxcontrol offset */
69 uint ddoffset
; /* add to get dma address of descriptor ring */
70 uint dataoffset
; /* add to get dma address of data buffer */
73 /* descriptor bumping macros */
74 #define XXD(x, n) ((x) & ((n) - 1))
75 #define TXD(x) XXD((x), di->ntxd)
76 #define RXD(x) XXD((x), di->nrxd)
77 #define NEXTTXD(i) TXD(i + 1)
78 #define PREVTXD(i) TXD(i - 1)
79 #define NEXTRXD(i) RXD(i + 1)
80 #define NTXDACTIVE(h, t) TXD(t - h)
81 #define NRXDACTIVE(h, t) RXD(t - h)
83 /* macros to convert between byte offsets and indexes */
84 #define B2I(bytes) ((bytes) / sizeof (dmadd_t))
85 #define I2B(index) ((index) * sizeof (dmadd_t))
88 * This assume the largest i/o address is, in fact, the pci big window
89 * and that the pci core sb2pcitranslation2 register has been left with
90 * the default 0x0 pci base address.
92 #define MAXDMAADDR SB_PCI_DMA_SZ
93 #define DMA_ADDRESSABLE(x) !((x) & ~(MAXDMAADDR - 1))
98 dma_attach(void *drv
, void *osh
, char *name
, dmaregs_t
*regs
, uint ntxd
, uint nrxd
,
99 uint rxbufsize
, uint nrxpost
, uint rxoffset
, uint ddoffset
, uint dataoffset
, uint
*msg_level
)
105 ASSERT(ntxd
<= MAXDD
);
106 ASSERT(ISPOWEROF2(ntxd
));
107 ASSERT(nrxd
<= MAXDD
);
108 ASSERT(ISPOWEROF2(nrxd
));
110 /* allocate private info structure */
111 if ((di
= MALLOC(osh
, sizeof (dma_info_t
))) == NULL
) {
114 bzero((char*)di
, sizeof (dma_info_t
));
116 /* allocate tx packet pointer vector */
118 size
= ntxd
* sizeof (void*);
119 if ((di
->txp
= MALLOC(osh
, size
)) == NULL
)
121 bzero((char*)di
->txp
, size
);
124 /* allocate rx packet pointer vector */
126 size
= nrxd
* sizeof (void*);
127 if ((di
->rxp
= MALLOC(osh
, size
)) == NULL
)
129 bzero((char*)di
->rxp
, size
);
132 /* set message level */
133 di
->msg_level
= msg_level
? msg_level
: &dma_msg_level
;
135 DMA_TRACE(("%s: dma_attach: drv %p osh %p regs %p ntxd %d nrxd %d rxbufsize %d nrxpost %d rxoffset %d ddoffset 0x%x dataoffset 0x%x\n", name
, drv
, osh
, regs
, ntxd
, nrxd
, rxbufsize
, nrxpost
, rxoffset
, ddoffset
, dataoffset
));
137 /* make a private copy of our callers name */
138 strncpy(di
->name
, name
, MAXNAMEL
);
139 di
->name
[MAXNAMEL
-1] = '\0';
145 /* allocate transmit descriptor ring */
147 /* only need ntxd descriptors but it must be DMARINGALIGNed */
148 size
= ntxd
* sizeof (dmadd_t
);
149 if (!ISALIGNED(DMA_CONSISTENT_ALIGN
, DMARINGALIGN
))
150 size
+= DMARINGALIGN
;
151 if ((va
= DMA_ALLOC_CONSISTENT(osh
, size
, &di
->txdpa
)) == NULL
)
153 di
->txd
= (dmadd_t
*) ROUNDUP((uintptr
)va
, DMARINGALIGN
);
154 di
->txdalign
= (uint
)((int8
*)di
->txd
- (int8
*)va
);
155 di
->txdpa
+= di
->txdalign
;
157 ASSERT(ISALIGNED((uintptr
)di
->txd
, DMARINGALIGN
));
158 ASSERT(DMA_ADDRESSABLE(di
->txdpa
));
161 /* allocate receive descriptor ring */
163 /* only need nrxd descriptors but it must be DMARINGALIGNed */
164 size
= nrxd
* sizeof (dmadd_t
);
165 if (!ISALIGNED(DMA_CONSISTENT_ALIGN
, DMARINGALIGN
))
166 size
+= DMARINGALIGN
;
167 if ((va
= DMA_ALLOC_CONSISTENT(osh
, size
, &di
->rxdpa
)) == NULL
)
169 di
->rxd
= (dmadd_t
*) ROUNDUP((uintptr
)va
, DMARINGALIGN
);
170 di
->rxdalign
= (uint
)((int8
*)di
->rxd
- (int8
*)va
);
171 di
->rxdpa
+= di
->rxdalign
;
173 ASSERT(ISALIGNED((uintptr
)di
->rxd
, DMARINGALIGN
));
174 ASSERT(DMA_ADDRESSABLE(di
->rxdpa
));
180 di
->rxbufsize
= rxbufsize
;
181 di
->nrxpost
= nrxpost
;
182 di
->rxoffset
= rxoffset
;
183 di
->ddoffset
= ddoffset
;
184 di
->dataoffset
= dataoffset
;
189 dma_detach((void*)di
);
193 /* may be called with core in reset */
195 dma_detach(dma_info_t
*di
)
200 DMA_TRACE(("%s: dma_detach\n", di
->name
));
202 /* shouldn't be here if descriptors are unreclaimed */
203 ASSERT(di
->txin
== di
->txout
);
204 ASSERT(di
->rxin
== di
->rxout
);
206 /* free dma descriptor rings */
208 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)di
->txd
- di
->txdalign
),
209 di
->txdalloc
, (di
->txdpa
- di
->txdalign
));
211 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)di
->rxd
- di
->rxdalign
),
212 di
->rxdalloc
, (di
->rxdpa
- di
->rxdalign
));
214 /* free packet pointer vectors */
216 MFREE(di
->osh
, (void*)di
->txp
, (di
->ntxd
* sizeof (void*)));
218 MFREE(di
->osh
, (void*)di
->rxp
, (di
->nrxd
* sizeof (void*)));
220 /* free our private info structure */
221 MFREE(di
->osh
, (void*)di
, sizeof (dma_info_t
));
226 dma_txreset(dma_info_t
*di
)
230 DMA_TRACE(("%s: dma_txreset\n", di
->name
));
232 /* suspend tx DMA first */
233 W_REG(&di
->regs
->xmtcontrol
, XC_SE
);
234 SPINWAIT((status
= (R_REG(&di
->regs
->xmtstatus
) & XS_XS_MASK
)) != XS_XS_DISABLED
&&
235 status
!= XS_XS_IDLE
&&
236 status
!= XS_XS_STOPPED
,
239 W_REG(&di
->regs
->xmtcontrol
, 0);
240 SPINWAIT((status
= (R_REG(&di
->regs
->xmtstatus
) & XS_XS_MASK
)) != XS_XS_DISABLED
,
243 if (status
!= XS_XS_DISABLED
) {
244 DMA_ERROR(("%s: dma_txreset: dma cannot be stopped\n", di
->name
));
247 /* wait for the last transaction to complete */
252 dma_rxreset(dma_info_t
*di
)
256 DMA_TRACE(("%s: dma_rxreset\n", di
->name
));
258 W_REG(&di
->regs
->rcvcontrol
, 0);
259 SPINWAIT((status
= (R_REG(&di
->regs
->rcvstatus
) & RS_RS_MASK
)) != RS_RS_DISABLED
,
262 if (status
!= RS_RS_DISABLED
) {
263 DMA_ERROR(("%s: dma_rxreset: dma cannot be stopped\n", di
->name
));
268 dma_txinit(dma_info_t
*di
)
270 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
272 di
->txin
= di
->txout
= 0;
273 di
->txavail
= di
->ntxd
- 1;
275 /* clear tx descriptor ring */
276 BZERO_SM((void*)di
->txd
, (di
->ntxd
* sizeof (dmadd_t
)));
278 W_REG(&di
->regs
->xmtcontrol
, XC_XE
);
279 W_REG(&di
->regs
->xmtaddr
, (di
->txdpa
+ di
->ddoffset
));
283 dma_txenabled(dma_info_t
*di
)
287 /* If the chip is dead, it is not enabled :-) */
288 xc
= R_REG(&di
->regs
->xmtcontrol
);
289 return ((xc
!= 0xffffffff) && (xc
& XC_XE
));
293 dma_txsuspend(dma_info_t
*di
)
295 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
296 OR_REG(&di
->regs
->xmtcontrol
, XC_SE
);
300 dma_txresume(dma_info_t
*di
)
302 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
303 AND_REG(&di
->regs
->xmtcontrol
, ~XC_SE
);
307 dma_txsuspended(dma_info_t
*di
)
309 if (!(R_REG(&di
->regs
->xmtcontrol
) & XC_SE
))
312 if ((R_REG(&di
->regs
->xmtstatus
) & XS_XS_MASK
) != XS_XS_IDLE
)
316 return ((R_REG(&di
->regs
->xmtstatus
) & XS_XS_MASK
) == XS_XS_IDLE
);
320 dma_txstopped(dma_info_t
*di
)
322 return ((R_REG(&di
->regs
->xmtstatus
) & XS_XS_MASK
) == XS_XS_STOPPED
);
326 dma_rxstopped(dma_info_t
*di
)
328 return ((R_REG(&di
->regs
->rcvstatus
) & RS_RS_MASK
) == RS_RS_STOPPED
);
332 dma_fifoloopbackenable(dma_info_t
*di
)
334 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di
->name
));
335 OR_REG(&di
->regs
->xmtcontrol
, XC_LE
);
339 dma_rxinit(dma_info_t
*di
)
341 DMA_TRACE(("%s: dma_rxinit\n", di
->name
));
343 di
->rxin
= di
->rxout
= 0;
345 /* clear rx descriptor ring */
346 BZERO_SM((void*)di
->rxd
, (di
->nrxd
* sizeof (dmadd_t
)));
349 W_REG(&di
->regs
->rcvaddr
, (di
->rxdpa
+ di
->ddoffset
));
353 dma_rxenable(dma_info_t
*di
)
355 DMA_TRACE(("%s: dma_rxenable\n", di
->name
));
356 W_REG(&di
->regs
->rcvcontrol
, ((di
->rxoffset
<< RC_RO_SHIFT
) | RC_RE
));
360 dma_rxenabled(dma_info_t
*di
)
364 rc
= R_REG(&di
->regs
->rcvcontrol
);
365 return ((rc
!= 0xffffffff) && (rc
& RC_RE
));
369 * The BCM47XX family supports full 32bit dma engine buffer addressing so
370 * dma buffers can cross 4 Kbyte page boundaries.
373 dma_txfast(dma_info_t
*di
, void *p0
, uint32 coreflags
)
382 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
388 * Walk the chain of packet buffers
389 * allocating and initializing transmit descriptor entries.
391 for (p
= p0
; p
; p
= next
) {
392 data
= PKTDATA(di
->drv
, p
);
393 len
= PKTLEN(di
->drv
, p
);
394 next
= PKTNEXT(di
->drv
, p
);
396 /* return nonzero if out of tx descriptors */
397 if (NEXTTXD(txout
) == di
->txin
)
403 /* get physical address of buffer start */
404 pa
= (uint32
) DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
);
405 ASSERT(DMA_ADDRESSABLE(pa
));
407 /* build the descriptor control value */
408 ctrl
= len
& CTRL_BC_MASK
;
415 ctrl
|= (CTRL_IOC
| CTRL_EOF
);
416 if (txout
== (di
->ntxd
- 1))
419 /* init the tx descriptor */
420 W_SM(&di
->txd
[txout
].ctrl
, BUS_SWAP32(ctrl
));
421 W_SM(&di
->txd
[txout
].addr
, BUS_SWAP32(pa
+ di
->dataoffset
));
423 ASSERT(di
->txp
[txout
] == NULL
);
425 txout
= NEXTTXD(txout
);
428 /* if last txd eof not set, fix it */
429 if (!(ctrl
& CTRL_EOF
))
430 W_SM(&di
->txd
[PREVTXD(txout
)].ctrl
, BUS_SWAP32(ctrl
| CTRL_IOC
| CTRL_EOF
));
432 /* save the packet */
433 di
->txp
[PREVTXD(txout
)] = p0
;
435 /* bump the tx descriptor index */
439 W_REG(&di
->regs
->xmtptr
, I2B(txout
));
441 /* tx flow control */
442 di
->txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
447 DMA_ERROR(("%s: dma_txfast: out of txds\n", di
->name
));
448 PKTFREE(di
->drv
, p0
, TRUE
);
450 di
->hnddma
.txnobuf
++;
455 #define PAGEBASE(x) ((uintptr)(x) & ~4095)
458 * Just like above except go through the extra effort of splitting
459 * buffers that cross 4Kbyte boundaries into multiple tx descriptors.
462 dma_tx(dma_info_t
*di
, void *p0
, uint32 coreflags
)
467 uchar
*page
, *start
, *end
;
472 DMA_TRACE(("%s: dma_tx\n", di
->name
));
478 * Walk the chain of packet buffers
479 * splitting those that cross 4 Kbyte boundaries
480 * allocating and initializing transmit descriptor entries.
482 for (p
= p0
; p
; p
= next
) {
483 data
= PKTDATA(di
->drv
, p
);
484 plen
= PKTLEN(di
->drv
, p
);
485 next
= PKTNEXT(di
->drv
, p
);
490 for (page
= (uchar
*)PAGEBASE(data
);
491 page
<= (uchar
*)PAGEBASE(data
+ plen
- 1);
494 /* return nonzero if out of tx descriptors */
495 if (NEXTTXD(txout
) == di
->txin
)
498 start
= (page
== (uchar
*)PAGEBASE(data
))? data
: page
;
499 end
= (page
== (uchar
*)PAGEBASE(data
+ plen
))?
500 (data
+ plen
): (page
+ PAGESZ
);
501 len
= (uint
)(end
- start
);
503 /* build the descriptor control value */
504 ctrl
= len
& CTRL_BC_MASK
;
508 if ((p
== p0
) && (start
== data
))
510 if ((next
== NULL
) && (end
== (data
+ plen
)))
511 ctrl
|= (CTRL_IOC
| CTRL_EOF
);
512 if (txout
== (di
->ntxd
- 1))
515 /* get physical address of buffer start */
516 pa
= (uint32
) DMA_MAP(di
->osh
, start
, len
, DMA_TX
, p
);
517 ASSERT(DMA_ADDRESSABLE(pa
));
519 /* init the tx descriptor */
520 W_SM(&di
->txd
[txout
].ctrl
, BUS_SWAP32(ctrl
));
521 W_SM(&di
->txd
[txout
].addr
, BUS_SWAP32(pa
+ di
->dataoffset
));
523 ASSERT(di
->txp
[txout
] == NULL
);
525 txout
= NEXTTXD(txout
);
529 /* if last txd eof not set, fix it */
530 if (!(ctrl
& CTRL_EOF
))
531 W_SM(&di
->txd
[PREVTXD(txout
)].ctrl
, BUS_SWAP32(ctrl
| CTRL_IOC
| CTRL_EOF
));
533 /* save the packet */
534 di
->txp
[PREVTXD(txout
)] = p0
;
536 /* bump the tx descriptor index */
540 W_REG(&di
->regs
->xmtptr
, I2B(txout
));
542 /* tx flow control */
543 di
->txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
548 DMA_ERROR(("%s: dma_tx: out of txds\n", di
->name
));
549 PKTFREE(di
->drv
, p0
, TRUE
);
551 di
->hnddma
.txnobuf
++;
555 /* returns a pointer to the next frame received, or NULL if there are no more */
557 dma_rx(dma_info_t
*di
)
563 while ((p
= dma_getnextrxp(di
, FALSE
))) {
564 /* skip giant packets which span multiple rx descriptors */
566 skiplen
-= di
->rxbufsize
;
569 PKTFREE(di
->drv
, p
, FALSE
);
573 len
= ltoh16(*(uint16
*)(PKTDATA(di
->drv
, p
)));
574 DMA_TRACE(("%s: dma_rx len %d\n", di
->name
, len
));
576 /* bad frame length check */
577 if (len
> (di
->rxbufsize
- di
->rxoffset
)) {
578 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di
->name
, len
));
580 skiplen
= len
- (di
->rxbufsize
- di
->rxoffset
);
581 PKTFREE(di
->drv
, p
, FALSE
);
582 di
->hnddma
.rxgiants
++;
586 /* set actual length */
587 PKTSETLEN(di
->drv
, p
, (di
->rxoffset
+ len
));
595 /* post receive buffers */
597 dma_rxfill(dma_info_t
*di
)
608 * Determine how many receive buffers we're lacking
609 * from the full complement, allocate, initialize,
610 * and post them, then update the chip rx lastdscr.
615 rxbufsize
= di
->rxbufsize
;
617 n
= di
->nrxpost
- NRXDACTIVE(rxin
, rxout
);
619 DMA_TRACE(("%s: dma_rxfill: post %d\n", di
->name
, n
));
621 for (i
= 0; i
< n
; i
++) {
622 if ((p
= PKTGET(di
->drv
, rxbufsize
, FALSE
)) == NULL
) {
623 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di
->name
));
624 di
->hnddma
.rxnobuf
++;
628 *(uint32
*)(OSL_UNCACHED(PKTDATA(di
->drv
, p
))) = 0;
630 pa
= (uint32
) DMA_MAP(di
->osh
, PKTDATA(di
->drv
, p
), rxbufsize
, DMA_RX
, p
);
631 ASSERT(ISALIGNED(pa
, 4));
632 ASSERT(DMA_ADDRESSABLE(pa
));
634 /* save the free packet pointer */
635 ASSERT(di
->rxp
[rxout
] == NULL
);
638 /* prep the descriptor control value */
640 if (rxout
== (di
->nrxd
- 1))
643 /* init the rx descriptor */
644 W_SM(&di
->rxd
[rxout
].ctrl
, BUS_SWAP32(ctrl
));
645 W_SM(&di
->rxd
[rxout
].addr
, BUS_SWAP32(pa
+ di
->dataoffset
));
647 rxout
= NEXTRXD(rxout
);
652 /* update the chip lastdscr pointer */
653 W_REG(&di
->regs
->rcvptr
, I2B(rxout
));
657 dma_txreclaim(dma_info_t
*di
, bool forceall
)
661 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
, forceall
? "all" : ""));
663 while ((p
= dma_getnexttxp(di
, forceall
)))
664 PKTFREE(di
->drv
, p
, TRUE
);
668 * Reclaim next completed txd (txds if using chained buffers) and
669 * return associated packet.
670 * If 'force' is true, reclaim txd(s) and return associated packet
671 * regardless of the value of the hardware "curr" pointer.
674 dma_getnexttxp(dma_info_t
*di
, bool forceall
)
679 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
, forceall
? "all" : ""));
687 end
= B2I(R_REG(&di
->regs
->xmtstatus
) & XS_CD_MASK
);
689 if ((start
== 0) && (end
> di
->txout
))
692 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
693 DMA_UNMAP(di
->osh
, (BUS_SWAP32(R_SM(&di
->txd
[i
].addr
)) - di
->dataoffset
),
694 (BUS_SWAP32(R_SM(&di
->txd
[i
].ctrl
)) & CTRL_BC_MASK
), DMA_TX
, di
->txp
[i
]);
695 W_SM(&di
->txd
[i
].addr
, 0xdeadbeef);
702 /* tx flow control */
703 di
->txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
709 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
710 start, end, di->txout, forceall));
715 /* like getnexttxp but no reclaim */
717 dma_peeknexttxp(dma_info_t
*di
)
721 end
= B2I(R_REG(&di
->regs
->xmtstatus
) & XS_CD_MASK
);
723 for (i
= di
->txin
; i
!= end
; i
= NEXTTXD(i
))
731 dma_rxreclaim(dma_info_t
*di
)
735 DMA_TRACE(("%s: dma_rxreclaim\n", di
->name
));
737 while ((p
= dma_getnextrxp(di
, TRUE
)))
738 PKTFREE(di
->drv
, p
, FALSE
);
742 dma_getnextrxp(dma_info_t
*di
, bool forceall
)
747 /* if forcing, dma engine must be disabled */
748 ASSERT(!forceall
|| !dma_rxenabled(di
));
752 /* return if no packets posted */
756 /* ignore curr if forceall */
757 if (!forceall
&& (i
== B2I(R_REG(&di
->regs
->rcvstatus
) & RS_CD_MASK
)))
760 /* get the packet pointer that corresponds to the rx descriptor */
765 /* clear this packet from the descriptor ring */
766 DMA_UNMAP(di
->osh
, (BUS_SWAP32(R_SM(&di
->rxd
[i
].addr
)) - di
->dataoffset
),
767 di
->rxbufsize
, DMA_RX
, rxp
);
768 W_SM(&di
->rxd
[i
].addr
, 0xdeadbeef);
770 di
->rxin
= NEXTRXD(i
);
777 dma_getvar(dma_info_t
*di
, char *name
)
779 if (!strcmp(name
, "&txavail"))
780 return ((uintptr
) &di
->txavail
);
788 dma_txblock(dma_info_t
*di
)
794 dma_txunblock(dma_info_t
*di
)
796 di
->txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
800 dma_txactive(dma_info_t
*di
)
802 return (NTXDACTIVE(di
->txin
, di
->txout
));
806 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
809 dma_txrotate(di_t
*di
)
818 ASSERT(dma_txsuspended(di
));
820 nactive
= dma_txactive(di
);
821 ad
= B2I((R_REG(&di
->regs
->xmtstatus
) & XS_AD_MASK
) >> XS_AD_SHIFT
);
822 rot
= TXD(ad
- di
->txin
);
824 ASSERT(rot
< di
->ntxd
);
826 /* full-ring case is a lot harder - don't worry about this */
827 if (rot
>= (di
->ntxd
- nactive
)) {
828 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
833 last
= PREVTXD(di
->txout
);
835 /* move entries starting at last and moving backwards to first */
836 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
837 new = TXD(old
+ rot
);
840 * Move the tx dma descriptor.
841 * EOT is set only in the last entry in the ring.
843 w
= R_SM(&di
->txd
[old
].ctrl
) & ~CTRL_EOT
;
844 if (new == (di
->ntxd
- 1))
846 W_SM(&di
->txd
[new].ctrl
, w
);
847 W_SM(&di
->txd
[new].addr
, R_SM(&di
->txd
[old
].addr
));
849 /* zap the old tx dma descriptor address field */
850 W_SM(&di
->txd
[old
].addr
, 0xdeadbeef);
852 /* move the corresponding txp[] entry */
853 ASSERT(di
->txp
[new] == NULL
);
854 di
->txp
[new] = di
->txp
[old
];
858 /* update txin and txout */
860 di
->txout
= TXD(di
->txout
+ rot
);
861 di
->txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
864 W_REG(&di
->regs
->xmtptr
, I2B(di
->txout
));