2 * Generic Broadcom Home Networking Division (HND) DMA module.
3 * This supports the following chips: BCM42xx, 44xx, 47xx .
5 * Copyright 2004, Broadcom Corporation
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
19 #include <bcmendian.h>
29 #define DMA_ERROR(args)
30 #define DMA_TRACE(args)
32 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
33 static uint dma_msg_level
=
36 #define MAXNAMEL 8 /* 8 char names */
38 #define DI_INFO(dmah) (dma_info_t *)dmah
40 /* dma engine software state */
41 typedef struct dma_info
{
42 struct hnddma_pub hnddma
; /* exported structure, don't use hnddma_t,
43 * which could be const
45 uint
*msg_level
; /* message level pointer */
46 char name
[MAXNAMEL
]; /* callers name for diag msgs */
48 void *osh
; /* os handle */
49 sb_t
*sbh
; /* sb handle */
51 bool dma64
; /* dma64 enabled */
52 bool addrext
; /* this dma engine supports DmaExtendedAddrChanges */
54 dma32regs_t
*d32txregs
; /* 32 bits dma tx engine registers */
55 dma32regs_t
*d32rxregs
; /* 32 bits dma rx engine registers */
56 dma64regs_t
*d64txregs
; /* 64 bits dma tx engine registers */
57 dma64regs_t
*d64rxregs
; /* 64 bits dma rx engine registers */
59 uint32 dma64align
; /* either 8k or 4k depends on number of dd */
60 dma32dd_t
*txd32
; /* pointer to dma32 tx descriptor ring */
61 dma64dd_t
*txd64
; /* pointer to dma64 tx descriptor ring */
62 uint ntxd
; /* # tx descriptors tunable */
63 uint txin
; /* index of next descriptor to reclaim */
64 uint txout
; /* index of next descriptor to post */
65 void **txp
; /* pointer to parallel array of pointers to packets */
66 osldma_t
*tx_dmah
; /* DMA TX descriptor ring handle */
67 osldma_t
**txp_dmah
; /* DMA TX packet data handle */
68 ulong txdpa
; /* physical address of descriptor ring */
69 uint txdalign
; /* #bytes added to alloc'd mem to align txd */
70 uint txdalloc
; /* #bytes allocated for the ring */
72 dma32dd_t
*rxd32
; /* pointer to dma32 rx descriptor ring */
73 dma64dd_t
*rxd64
; /* pointer to dma64 rx descriptor ring */
74 uint nrxd
; /* # rx descriptors tunable */
75 uint rxin
; /* index of next descriptor to reclaim */
76 uint rxout
; /* index of next descriptor to post */
77 void **rxp
; /* pointer to parallel array of pointers to packets */
78 osldma_t
*rx_dmah
; /* DMA RX descriptor ring handle */
79 osldma_t
**rxp_dmah
; /* DMA RX packet data handle */
80 ulong rxdpa
; /* physical address of descriptor ring */
81 uint rxdalign
; /* #bytes added to alloc'd mem to align rxd */
82 uint rxdalloc
; /* #bytes allocated for the ring */
85 uint rxbufsize
; /* rx buffer size in bytes,
86 not including the extra headroom
88 uint nrxpost
; /* # rx buffers to keep posted */
89 uint rxoffset
; /* rxcontrol offset */
90 uint ddoffsetlow
; /* add to get dma address of descriptor ring, low 32 bits */
91 uint ddoffsethigh
; /* high 32 bits */
92 uint dataoffsetlow
; /* add to get dma address of data buffer, low 32 bits */
93 uint dataoffsethigh
; /* high 32 bits */
97 #define DMA64_ENAB(di) ((di)->dma64)
98 #define DMA64_CAP TRUE
100 #define DMA64_ENAB(di) (0)
101 #define DMA64_CAP FALSE
104 /* descriptor bumping macros */
105 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
106 #define TXD(x) XXD((x), di->ntxd)
107 #define RXD(x) XXD((x), di->nrxd)
108 #define NEXTTXD(i) TXD(i + 1)
109 #define PREVTXD(i) TXD(i - 1)
110 #define NEXTRXD(i) RXD(i + 1)
111 #define NTXDACTIVE(h, t) TXD(t - h)
112 #define NRXDACTIVE(h, t) RXD(t - h)
114 /* macros to convert between byte offsets and indexes */
115 #define B2I(bytes, type) ((bytes) / sizeof(type))
116 #define I2B(index, type) ((index) * sizeof(type))
118 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
119 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
122 /* common prototypes */
123 static bool _dma_isaddrext(dma_info_t
*di
);
124 static bool _dma_alloc(dma_info_t
*di
, uint direction
);
125 static void _dma_detach(dma_info_t
*di
);
126 static void _dma_ddtable_init(dma_info_t
*di
, uint direction
, ulong pa
);
127 static void _dma_rxinit(dma_info_t
*di
);
128 static void *_dma_rx(dma_info_t
*di
);
129 static void _dma_rxfill(dma_info_t
*di
);
130 static void _dma_rxreclaim(dma_info_t
*di
);
131 static void _dma_rxenable(dma_info_t
*di
);
132 static void * _dma_getnextrxp(dma_info_t
*di
, bool forceall
);
134 static void _dma_txblock(dma_info_t
*di
);
135 static void _dma_txunblock(dma_info_t
*di
);
136 static uint
_dma_txactive(dma_info_t
*di
);
138 static void* _dma_peeknexttxp(dma_info_t
*di
);
139 static uintptr
_dma_getvar(dma_info_t
*di
, const char *name
);
140 static void _dma_counterreset(dma_info_t
*di
);
141 static void _dma_fifoloopbackenable(dma_info_t
*di
);
143 /* ** 32 bit DMA prototypes */
144 static bool dma32_alloc(dma_info_t
*di
, uint direction
);
145 static bool dma32_txreset(dma_info_t
*di
);
146 static bool dma32_rxreset(dma_info_t
*di
);
147 static bool dma32_txsuspendedidle(dma_info_t
*di
);
148 static int dma32_txfast(dma_info_t
*di
, void *p0
, bool commit
);
149 static void *dma32_getnexttxp(dma_info_t
*di
, bool forceall
);
150 static void *dma32_getnextrxp(dma_info_t
*di
, bool forceall
);
151 static void dma32_txrotate(dma_info_t
*di
);
152 static bool dma32_rxidle(dma_info_t
*di
);
153 static void dma32_txinit(dma_info_t
*di
);
154 static bool dma32_txenabled(dma_info_t
*di
);
155 static void dma32_txsuspend(dma_info_t
*di
);
156 static void dma32_txresume(dma_info_t
*di
);
157 static bool dma32_txsuspended(dma_info_t
*di
);
158 static void dma32_txreclaim(dma_info_t
*di
, bool forceall
);
159 static bool dma32_txstopped(dma_info_t
*di
);
160 static bool dma32_rxstopped(dma_info_t
*di
);
161 static bool dma32_rxenabled(dma_info_t
*di
);
162 static bool _dma32_addrext(osl_t
*osh
, dma32regs_t
*dma32regs
);
164 /* ** 64 bit DMA prototypes and stubs */
166 static bool dma64_alloc(dma_info_t
*di
, uint direction
);
167 static bool dma64_txreset(dma_info_t
*di
);
168 static bool dma64_rxreset(dma_info_t
*di
);
169 static bool dma64_txsuspendedidle(dma_info_t
*di
);
170 static int dma64_txfast(dma_info_t
*di
, void *p0
, bool commit
);
171 static void *dma64_getnexttxp(dma_info_t
*di
, bool forceall
);
172 static void *dma64_getnextrxp(dma_info_t
*di
, bool forceall
);
173 static void dma64_txrotate(dma_info_t
*di
);
175 static bool dma64_rxidle(dma_info_t
*di
);
176 static void dma64_txinit(dma_info_t
*di
);
177 static bool dma64_txenabled(dma_info_t
*di
);
178 static void dma64_txsuspend(dma_info_t
*di
);
179 static void dma64_txresume(dma_info_t
*di
);
180 static bool dma64_txsuspended(dma_info_t
*di
);
181 static void dma64_txreclaim(dma_info_t
*di
, bool forceall
);
182 static bool dma64_txstopped(dma_info_t
*di
);
183 static bool dma64_rxstopped(dma_info_t
*di
);
184 static bool dma64_rxenabled(dma_info_t
*di
);
185 static bool _dma64_addrext(osl_t
*osh
, dma64regs_t
*dma64regs
);
188 static bool dma64_alloc(dma_info_t
*di
, uint direction
) { return FALSE
; }
189 static bool dma64_txreset(dma_info_t
*di
) { return FALSE
; }
190 static bool dma64_rxreset(dma_info_t
*di
) { return FALSE
; }
191 static bool dma64_txsuspendedidle(dma_info_t
*di
) { return FALSE
;}
192 static int dma64_txfast(dma_info_t
*di
, void *p0
, bool commit
) { return 0; }
193 static void *dma64_getnexttxp(dma_info_t
*di
, bool forceall
) { return NULL
; }
194 static void *dma64_getnextrxp(dma_info_t
*di
, bool forceall
) { return NULL
; }
195 static void dma64_txrotate(dma_info_t
*di
) { return; }
197 static bool dma64_rxidle(dma_info_t
*di
) { return FALSE
; }
198 static void dma64_txinit(dma_info_t
*di
) { return; }
199 static bool dma64_txenabled(dma_info_t
*di
) { return FALSE
; }
200 static void dma64_txsuspend(dma_info_t
*di
) { return; }
201 static void dma64_txresume(dma_info_t
*di
) { return; }
202 static bool dma64_txsuspended(dma_info_t
*di
) {return FALSE
; }
203 static void dma64_txreclaim(dma_info_t
*di
, bool forceall
) { return; }
204 static bool dma64_txstopped(dma_info_t
*di
) { return FALSE
; }
205 static bool dma64_rxstopped(dma_info_t
*di
) { return FALSE
; }
206 static bool dma64_rxenabled(dma_info_t
*di
) { return FALSE
; }
207 static bool _dma64_addrext(osl_t
*osh
, dma64regs_t
*dma64regs
) { return FALSE
; }
209 #endif /* BCMDMA64 */
213 static di_fcn_t dma64proc
= {
214 (di_detach_t
)_dma_detach
,
215 (di_txinit_t
)dma64_txinit
,
216 (di_txreset_t
)dma64_txreset
,
217 (di_txenabled_t
)dma64_txenabled
,
218 (di_txsuspend_t
)dma64_txsuspend
,
219 (di_txresume_t
)dma64_txresume
,
220 (di_txsuspended_t
)dma64_txsuspended
,
221 (di_txsuspendedidle_t
)dma64_txsuspendedidle
,
222 (di_txfast_t
)dma64_txfast
,
223 (di_txstopped_t
)dma64_txstopped
,
224 (di_txreclaim_t
)dma64_txreclaim
,
225 (di_getnexttxp_t
)dma64_getnexttxp
,
226 (di_peeknexttxp_t
)_dma_peeknexttxp
,
227 (di_txblock_t
)_dma_txblock
,
228 (di_txunblock_t
)_dma_txunblock
,
229 (di_txactive_t
)_dma_txactive
,
230 (di_txrotate_t
)dma64_txrotate
,
232 (di_rxinit_t
)_dma_rxinit
,
233 (di_rxreset_t
)dma64_rxreset
,
234 (di_rxidle_t
)dma64_rxidle
,
235 (di_rxstopped_t
)dma64_rxstopped
,
236 (di_rxenable_t
)_dma_rxenable
,
237 (di_rxenabled_t
)dma64_rxenabled
,
239 (di_rxfill_t
)_dma_rxfill
,
240 (di_rxreclaim_t
)_dma_rxreclaim
,
241 (di_getnextrxp_t
)_dma_getnextrxp
,
243 (di_fifoloopbackenable_t
)_dma_fifoloopbackenable
,
244 (di_getvar_t
)_dma_getvar
,
245 (di_counterreset_t
)_dma_counterreset
,
253 static di_fcn_t dma32proc
= {
254 (di_detach_t
)_dma_detach
,
255 (di_txinit_t
)dma32_txinit
,
256 (di_txreset_t
)dma32_txreset
,
257 (di_txenabled_t
)dma32_txenabled
,
258 (di_txsuspend_t
)dma32_txsuspend
,
259 (di_txresume_t
)dma32_txresume
,
260 (di_txsuspended_t
)dma32_txsuspended
,
261 (di_txsuspendedidle_t
)dma32_txsuspendedidle
,
262 (di_txfast_t
)dma32_txfast
,
263 (di_txstopped_t
)dma32_txstopped
,
264 (di_txreclaim_t
)dma32_txreclaim
,
265 (di_getnexttxp_t
)dma32_getnexttxp
,
266 (di_peeknexttxp_t
)_dma_peeknexttxp
,
267 (di_txblock_t
)_dma_txblock
,
268 (di_txunblock_t
)_dma_txunblock
,
269 (di_txactive_t
)_dma_txactive
,
270 (di_txrotate_t
)dma32_txrotate
,
272 (di_rxinit_t
)_dma_rxinit
,
273 (di_rxreset_t
)dma32_rxreset
,
274 (di_rxidle_t
)dma32_rxidle
,
275 (di_rxstopped_t
)dma32_rxstopped
,
276 (di_rxenable_t
)_dma_rxenable
,
277 (di_rxenabled_t
)dma32_rxenabled
,
279 (di_rxfill_t
)_dma_rxfill
,
280 (di_rxreclaim_t
)_dma_rxreclaim
,
281 (di_getnextrxp_t
)_dma_getnextrxp
,
283 (di_fifoloopbackenable_t
)_dma_fifoloopbackenable
,
284 (di_getvar_t
)_dma_getvar
,
285 (di_counterreset_t
)_dma_counterreset
,
294 dma_attach(osl_t
*osh
, char *name
, sb_t
*sbh
, void *dmaregstx
, void *dmaregsrx
,
295 uint ntxd
, uint nrxd
, uint rxbufsize
, uint nrxpost
, uint rxoffset
, uint
*msg_level
)
300 /* allocate private info structure */
301 if ((di
= MALLOC(osh
, sizeof (dma_info_t
))) == NULL
) {
304 bzero((char *)di
, sizeof(dma_info_t
));
306 di
->msg_level
= msg_level
? msg_level
: &dma_msg_level
;
308 /* old chips w/o sb is no longer supported */
311 di
->dma64
= ((sb_coreflagshi(sbh
, 0, 0) & SBTMH_DMA64
) == SBTMH_DMA64
);
315 DMA_ERROR(("dma_attach: driver doesn't have the capability to support "
321 /* check arguments */
322 ASSERT(ISPOWEROF2(ntxd
));
323 ASSERT(ISPOWEROF2(nrxd
));
325 ASSERT(dmaregsrx
== NULL
);
327 ASSERT(dmaregstx
== NULL
);
330 /* init dma reg pointer */
332 ASSERT(ntxd
<= D64MAXDD
);
333 ASSERT(nrxd
<= D64MAXDD
);
334 di
->d64txregs
= (dma64regs_t
*)dmaregstx
;
335 di
->d64rxregs
= (dma64regs_t
*)dmaregsrx
;
337 di
->dma64align
= D64RINGALIGN
;
338 if ((ntxd
< D64MAXDD
/ 2) && (nrxd
< D64MAXDD
/ 2)) {
339 /* for smaller dd table, HW relax the alignment requirement */
340 di
->dma64align
= D64RINGALIGN
/ 2;
343 ASSERT(ntxd
<= D32MAXDD
);
344 ASSERT(nrxd
<= D32MAXDD
);
345 di
->d32txregs
= (dma32regs_t
*)dmaregstx
;
346 di
->d32rxregs
= (dma32regs_t
*)dmaregsrx
;
349 DMA_TRACE(("%s: dma_attach: %s osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d "
350 "rxoffset %d dmaregstx %p dmaregsrx %p\n",
351 name
, (di
->dma64
? "DMA64" : "DMA32"), osh
, ntxd
, nrxd
, rxbufsize
,
352 nrxpost
, rxoffset
, dmaregstx
, dmaregsrx
));
354 /* make a private copy of our callers name */
355 strncpy(di
->name
, name
, MAXNAMEL
);
356 di
->name
[MAXNAMEL
-1] = '\0';
365 /* the actual dma size doesn't include the extra headroom */
366 if (rxbufsize
> BCMEXTRAHDROOM
)
367 di
->rxbufsize
= rxbufsize
- BCMEXTRAHDROOM
;
369 di
->rxbufsize
= rxbufsize
;
371 di
->nrxpost
= nrxpost
;
372 di
->rxoffset
= rxoffset
;
375 * figure out the DMA physical address offset for dd and data
376 * for old chips w/o sb, use zero
377 * for new chips w sb,
378 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
379 * Other bus: use zero
380 * SB_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
383 di
->dataoffsetlow
= 0;
384 /* for pci bus, add offset */
385 if (sbh
->bustype
== PCI_BUS
) {
386 if ((sbh
->buscoretype
== SB_PCIE
) && di
->dma64
) {
387 /* pcie with DMA64 */
389 di
->ddoffsethigh
= SB_PCIE_DMA_H32
;
391 /* pci(DMA32/DMA64) or pcie with DMA32 */
392 di
->ddoffsetlow
= SB_PCI_DMA
;
393 di
->ddoffsethigh
= 0;
395 di
->dataoffsetlow
= di
->ddoffsetlow
;
396 di
->dataoffsethigh
= di
->ddoffsethigh
;
399 #if defined(__mips__) && defined(IL_BIGENDIAN)
400 di
->dataoffsetlow
= di
->dataoffsetlow
+ SB_SDRAM_SWAPPED
;
403 di
->addrext
= _dma_isaddrext(di
);
405 /* allocate tx packet pointer vector */
407 size
= ntxd
* sizeof(void *);
408 if ((di
->txp
= MALLOC(osh
, size
)) == NULL
) {
409 DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n",
410 di
->name
, MALLOCED(osh
)));
413 bzero((char *)di
->txp
, size
);
416 /* allocate rx packet pointer vector */
418 size
= nrxd
* sizeof(void *);
419 if ((di
->rxp
= MALLOC(osh
, size
)) == NULL
) {
420 DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n",
421 di
->name
, MALLOCED(osh
)));
424 bzero((char *)di
->rxp
, size
);
427 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
429 if (!_dma_alloc(di
, DMA_TX
))
433 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
435 if (!_dma_alloc(di
, DMA_RX
))
439 if ((di
->ddoffsetlow
== SB_PCI_DMA
) && (di
->txdpa
> SB_PCI_DMA_SZ
) && !di
->addrext
) {
440 DMA_ERROR(("%s: dma_attach: txdpa 0x%lx: addrext not supported\n",
441 di
->name
, di
->txdpa
));
444 if ((di
->ddoffsetlow
== SB_PCI_DMA
) && (di
->rxdpa
> SB_PCI_DMA_SZ
) && !di
->addrext
) {
445 DMA_ERROR(("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n",
446 di
->name
, di
->rxdpa
));
450 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh "
451 "0x%x addrext %d\n", di
->ddoffsetlow
, di
->ddoffsethigh
, di
->dataoffsetlow
,
452 di
->dataoffsethigh
, di
->addrext
));
454 /* allocate tx packet pointer vector and DMA mapping vectors */
457 size
= ntxd
* sizeof(osldma_t
**);
458 if ((di
->txp_dmah
= (osldma_t
**)MALLOC(osh
, size
)) == NULL
)
460 bzero((char*)di
->txp_dmah
, size
);
464 /* allocate rx packet pointer vector and DMA mapping vectors */
467 size
= nrxd
* sizeof(osldma_t
**);
468 if ((di
->rxp_dmah
= (osldma_t
**)MALLOC(osh
, size
)) == NULL
)
470 bzero((char*)di
->rxp_dmah
, size
);
475 /* initialize opsvec of function pointers */
476 di
->hnddma
.di_fn
= DMA64_ENAB(di
) ? dma64proc
: dma32proc
;
478 return ((hnddma_t
*)di
);
485 /* init the tx or rx descriptor */
487 dma32_dd_upd(dma_info_t
*di
, dma32dd_t
*ddring
, ulong pa
, uint outidx
, uint32
*flags
,
490 /* dma32 uses 32 bits control to fit both flags and bufcounter */
491 *flags
= *flags
| (bufcount
& CTRL_BC_MASK
);
493 if ((di
->dataoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
)) {
494 W_SM(&ddring
[outidx
].addr
, BUS_SWAP32(pa
+ di
->dataoffsetlow
));
495 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
497 /* address extension */
500 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
501 pa
&= ~PCI32ADDR_HIGH
;
503 *flags
|= (ae
<< CTRL_AE_SHIFT
);
504 W_SM(&ddring
[outidx
].addr
, BUS_SWAP32(pa
+ di
->dataoffsetlow
));
505 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
510 dma64_dd_upd(dma_info_t
*di
, dma64dd_t
*ddring
, ulong pa
, uint outidx
, uint32
*flags
,
513 uint32 ctrl2
= bufcount
& D64_CTRL2_BC_MASK
;
515 /* PCI bus with big(>1G) physical address, use address extension */
516 if ((di
->dataoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
)) {
517 W_SM(&ddring
[outidx
].addrlow
, BUS_SWAP32(pa
+ di
->dataoffsetlow
));
518 W_SM(&ddring
[outidx
].addrhigh
, BUS_SWAP32(0 + di
->dataoffsethigh
));
519 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
520 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
522 /* address extension */
526 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
527 pa
&= ~PCI32ADDR_HIGH
;
529 ctrl2
|= (ae
<< D64_CTRL2_AE_SHIFT
) & D64_CTRL2_AE
;
530 W_SM(&ddring
[outidx
].addrlow
, BUS_SWAP32(pa
+ di
->dataoffsetlow
));
531 W_SM(&ddring
[outidx
].addrhigh
, BUS_SWAP32(0 + di
->dataoffsethigh
));
532 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
533 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
538 _dma32_addrext(osl_t
*osh
, dma32regs_t
*dma32regs
)
542 OR_REG(osh
, &dma32regs
->control
, XC_AE
);
543 w
= R_REG(osh
, &dma32regs
->control
);
544 AND_REG(osh
, &dma32regs
->control
, ~XC_AE
);
545 return ((w
& XC_AE
) == XC_AE
);
549 _dma_alloc(dma_info_t
*di
, uint direction
)
551 if (DMA64_ENAB(di
)) {
552 return dma64_alloc(di
, direction
);
554 return dma32_alloc(di
, direction
);
558 /* !! may be called with core in reset */
560 _dma_detach(dma_info_t
*di
)
565 DMA_TRACE(("%s: dma_detach\n", di
->name
));
567 /* shouldn't be here if descriptors are unreclaimed */
568 ASSERT(di
->txin
== di
->txout
);
569 ASSERT(di
->rxin
== di
->rxout
);
571 /* free dma descriptor rings */
572 if (DMA64_ENAB(di
)) {
574 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->txd64
- di
->txdalign
),
575 di
->txdalloc
, (di
->txdpa
- di
->txdalign
), &di
->tx_dmah
);
577 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->rxd64
- di
->rxdalign
),
578 di
->rxdalloc
, (di
->rxdpa
- di
->rxdalign
), &di
->rx_dmah
);
581 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->txd32
- di
->txdalign
),
582 di
->txdalloc
, (di
->txdpa
- di
->txdalign
), &di
->tx_dmah
);
584 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->rxd32
- di
->rxdalign
),
585 di
->rxdalloc
, (di
->rxdpa
- di
->rxdalign
), &di
->rx_dmah
);
588 /* free packet pointer vectors */
590 MFREE(di
->osh
, (void *)di
->txp
, (di
->ntxd
* sizeof(void *)));
592 MFREE(di
->osh
, (void *)di
->rxp
, (di
->nrxd
* sizeof(void *)));
594 /* free tx packet DMA handles */
596 MFREE(di
->osh
, (void *)di
->txp_dmah
, di
->ntxd
* sizeof(osldma_t
**));
598 /* free rx packet DMA handles */
600 MFREE(di
->osh
, (void *)di
->rxp_dmah
, di
->nrxd
* sizeof(osldma_t
**));
602 /* free our private info structure */
603 MFREE(di
->osh
, (void *)di
, sizeof(dma_info_t
));
607 /* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
609 _dma_isaddrext(dma_info_t
*di
)
611 if (DMA64_ENAB(di
)) {
612 /* DMA64 supports full 32 bits or 64 bits. AE is always valid */
614 /* not all tx or rx channel are available */
615 if (di
->d64txregs
!= NULL
) {
616 if (!_dma64_addrext(di
->osh
, di
->d64txregs
)) {
617 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n",
622 } else if (di
->d64rxregs
!= NULL
) {
623 if (!_dma64_addrext(di
->osh
, di
->d64rxregs
)) {
624 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n",
631 } else if (di
->d32txregs
)
632 return (_dma32_addrext(di
->osh
, di
->d32txregs
));
633 else if (di
->d32rxregs
)
634 return (_dma32_addrext(di
->osh
, di
->d32rxregs
));
638 /* initialize descriptor table base address */
640 _dma_ddtable_init(dma_info_t
*di
, uint direction
, ulong pa
)
642 if (DMA64_ENAB(di
)) {
644 if ((di
->ddoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
)) {
645 if (direction
== DMA_TX
) {
646 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, (pa
+ di
->ddoffsetlow
));
647 W_REG(di
->osh
, &di
->d64txregs
->addrhigh
, di
->ddoffsethigh
);
649 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, (pa
+ di
->ddoffsetlow
));
650 W_REG(di
->osh
, &di
->d64rxregs
->addrhigh
, di
->ddoffsethigh
);
653 /* DMA64 32bits address extension */
657 /* shift the high bit(s) from pa to ae */
658 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
659 pa
&= ~PCI32ADDR_HIGH
;
661 if (direction
== DMA_TX
) {
662 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, (pa
+ di
->ddoffsetlow
));
663 W_REG(di
->osh
, &di
->d64txregs
->addrhigh
, di
->ddoffsethigh
);
664 SET_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_AE
,
665 (ae
<< D64_XC_AE_SHIFT
));
667 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, (pa
+ di
->ddoffsetlow
));
668 W_REG(di
->osh
, &di
->d64rxregs
->addrhigh
, di
->ddoffsethigh
);
669 SET_REG(di
->osh
, &di
->d64rxregs
->control
, D64_RC_AE
,
670 (ae
<< D64_RC_AE_SHIFT
));
675 if ((di
->ddoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
)) {
676 if (direction
== DMA_TX
)
677 W_REG(di
->osh
, &di
->d32txregs
->addr
, (pa
+ di
->ddoffsetlow
));
679 W_REG(di
->osh
, &di
->d32rxregs
->addr
, (pa
+ di
->ddoffsetlow
));
681 /* dma32 address extension */
685 /* shift the high bit(s) from pa to ae */
686 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
687 pa
&= ~PCI32ADDR_HIGH
;
689 if (direction
== DMA_TX
) {
690 W_REG(di
->osh
, &di
->d32txregs
->addr
, (pa
+ di
->ddoffsetlow
));
691 SET_REG(di
->osh
, &di
->d32txregs
->control
, XC_AE
, ae
<<XC_AE_SHIFT
);
693 W_REG(di
->osh
, &di
->d32rxregs
->addr
, (pa
+ di
->ddoffsetlow
));
694 SET_REG(di
->osh
, &di
->d32rxregs
->control
, RC_AE
, ae
<<RC_AE_SHIFT
);
701 _dma_fifoloopbackenable(dma_info_t
*di
)
703 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di
->name
));
705 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_LE
);
707 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_LE
);
711 _dma_rxinit(dma_info_t
*di
)
713 DMA_TRACE(("%s: dma_rxinit\n", di
->name
));
718 di
->rxin
= di
->rxout
= 0;
720 /* clear rx descriptor ring */
722 BZERO_SM((void *)(uintptr
)di
->rxd64
, (di
->nrxd
* sizeof(dma64dd_t
)));
724 BZERO_SM((void *)(uintptr
)di
->rxd32
, (di
->nrxd
* sizeof(dma32dd_t
)));
727 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
731 _dma_rxenable(dma_info_t
*di
)
733 DMA_TRACE(("%s: dma_rxenable\n", di
->name
));
736 W_REG(di
->osh
, &di
->d64rxregs
->control
,
737 ((di
->rxoffset
<< D64_RC_RO_SHIFT
) | D64_RC_RE
));
739 W_REG(di
->osh
, &di
->d32rxregs
->control
, ((di
->rxoffset
<< RC_RO_SHIFT
) | RC_RE
));
742 /* !! rx entry routine, returns a pointer to the next frame received,
743 * or NULL if there are no more
746 _dma_rx(dma_info_t
*di
)
752 while ((p
= _dma_getnextrxp(di
, FALSE
))) {
753 /* skip giant packets which span multiple rx descriptors */
755 skiplen
-= di
->rxbufsize
;
758 PKTFREE(di
->osh
, p
, FALSE
);
762 len
= ltoh16(*(uint16
*)(PKTDATA(di
->osh
, p
)));
763 DMA_TRACE(("%s: dma_rx len %d\n", di
->name
, len
));
765 /* bad frame length check */
766 if (len
> (di
->rxbufsize
- di
->rxoffset
)) {
767 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di
->name
, len
));
769 skiplen
= len
- (di
->rxbufsize
- di
->rxoffset
);
770 PKTFREE(di
->osh
, p
, FALSE
);
771 di
->hnddma
.rxgiants
++;
775 /* set actual length */
776 PKTSETLEN(di
->osh
, p
, (di
->rxoffset
+ len
));
784 /* post receive buffers */
786 _dma_rxfill(dma_info_t
*di
)
794 uint extra_offset
= 0;
797 * Determine how many receive buffers we're lacking
798 * from the full complement, allocate, initialize,
799 * and post them, then update the chip rx lastdscr.
805 n
= di
->nrxpost
- NRXDACTIVE(rxin
, rxout
);
807 DMA_TRACE(("%s: dma_rxfill: post %d\n", di
->name
, n
));
809 if (di
->rxbufsize
> BCMEXTRAHDROOM
)
810 extra_offset
= BCMEXTRAHDROOM
;
812 for (i
= 0; i
< n
; i
++) {
813 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
816 if ((p
= PKTGET(di
->osh
, di
->rxbufsize
+ extra_offset
,
818 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di
->name
));
819 di
->hnddma
.rxnobuf
++;
822 /* reserve an extra headroom, if applicable */
824 PKTPULL(di
->osh
, p
, extra_offset
);
826 /* Do a cached write instead of uncached write since DMA_MAP
827 * will flush the cache.
829 *(uint32
*)(PKTDATA(di
->osh
, p
)) = 0;
831 pa
= (uint32
) DMA_MAP(di
->osh
, PKTDATA(di
->osh
, p
),
832 di
->rxbufsize
, DMA_RX
, p
,
833 &di
->rxp_dmah
[rxout
]);
835 ASSERT(ISALIGNED(pa
, 4));
837 /* save the free packet pointer */
838 ASSERT(di
->rxp
[rxout
] == NULL
);
841 /* reset flags for each descriptor */
843 if (DMA64_ENAB(di
)) {
844 if (rxout
== (di
->nrxd
- 1))
845 flags
= D64_CTRL1_EOT
;
847 dma64_dd_upd(di
, di
->rxd64
, pa
, rxout
, &flags
, di
->rxbufsize
);
849 if (rxout
== (di
->nrxd
- 1))
852 dma32_dd_upd(di
, di
->rxd32
, pa
, rxout
, &flags
, di
->rxbufsize
);
854 rxout
= NEXTRXD(rxout
);
859 /* update the chip lastdscr pointer */
860 if (DMA64_ENAB(di
)) {
861 W_REG(di
->osh
, &di
->d64rxregs
->ptr
, I2B(rxout
, dma64dd_t
));
863 W_REG(di
->osh
, &di
->d32rxregs
->ptr
, I2B(rxout
, dma32dd_t
));
867 /* like getnexttxp but no reclaim */
869 _dma_peeknexttxp(dma_info_t
*di
)
876 if (DMA64_ENAB(di
)) {
877 end
= B2I(R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
, dma64dd_t
);
879 end
= B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
882 for (i
= di
->txin
; i
!= end
; i
= NEXTTXD(i
))
890 _dma_rxreclaim(dma_info_t
*di
)
894 /* "unused local" warning suppression for OSLs that
895 * define PKTFREE() without using the di->osh arg
899 DMA_TRACE(("%s: dma_rxreclaim\n", di
->name
));
901 while ((p
= _dma_getnextrxp(di
, TRUE
)))
902 PKTFREE(di
->osh
, p
, FALSE
);
906 _dma_getnextrxp(dma_info_t
*di
, bool forceall
)
911 if (DMA64_ENAB(di
)) {
912 return dma64_getnextrxp(di
, forceall
);
914 return dma32_getnextrxp(di
, forceall
);
919 _dma_txblock(dma_info_t
*di
)
921 di
->hnddma
.txavail
= 0;
925 _dma_txunblock(dma_info_t
*di
)
927 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
931 _dma_txactive(dma_info_t
*di
)
933 return (NTXDACTIVE(di
->txin
, di
->txout
));
937 _dma_counterreset(dma_info_t
*di
)
939 /* reset all software counter */
940 di
->hnddma
.rxgiants
= 0;
941 di
->hnddma
.rxnobuf
= 0;
942 di
->hnddma
.txnobuf
= 0;
945 /* get the address of the var in order to change later */
947 _dma_getvar(dma_info_t
*di
, const char *name
)
949 if (!strcmp(name
, "&txavail"))
950 return ((uintptr
) &(di
->hnddma
.txavail
));
958 dma_txpioloopback(osl_t
*osh
, dma32regs_t
*regs
)
960 OR_REG(osh
, ®s
->control
, XC_LE
);
965 /* 32 bits DMA functions */
967 dma32_txinit(dma_info_t
*di
)
969 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
974 di
->txin
= di
->txout
= 0;
975 di
->hnddma
.txavail
= di
->ntxd
- 1;
977 /* clear tx descriptor ring */
978 BZERO_SM((void *)(uintptr
)di
->txd32
, (di
->ntxd
* sizeof(dma32dd_t
)));
979 W_REG(di
->osh
, &di
->d32txregs
->control
, XC_XE
);
980 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
984 dma32_txenabled(dma_info_t
*di
)
988 /* If the chip is dead, it is not enabled :-) */
989 xc
= R_REG(di
->osh
, &di
->d32txregs
->control
);
990 return ((xc
!= 0xffffffff) && (xc
& XC_XE
));
994 dma32_txsuspend(dma_info_t
*di
)
996 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
1001 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1005 dma32_txresume(dma_info_t
*di
)
1007 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
1012 AND_REG(di
->osh
, &di
->d32txregs
->control
, ~XC_SE
);
1016 dma32_txsuspended(dma_info_t
*di
)
1018 return (di
->ntxd
== 0) || ((R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
) == XC_SE
);
1022 dma32_txreclaim(dma_info_t
*di
, bool forceall
)
1026 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
, forceall
? "all" : ""));
1028 while ((p
= dma32_getnexttxp(di
, forceall
)))
1029 PKTFREE(di
->osh
, p
, TRUE
);
1033 dma32_txstopped(dma_info_t
*di
)
1035 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) == XS_XS_STOPPED
);
1039 dma32_rxstopped(dma_info_t
*di
)
1041 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_RS_MASK
) == RS_RS_STOPPED
);
1045 dma32_alloc(dma_info_t
*di
, uint direction
)
1051 ddlen
= sizeof(dma32dd_t
);
1053 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
1055 if (!ISALIGNED(DMA_CONSISTENT_ALIGN
, D32RINGALIGN
))
1056 size
+= D32RINGALIGN
;
1059 if (direction
== DMA_TX
) {
1060 if ((va
= DMA_ALLOC_CONSISTENT(di
->osh
, size
, &di
->txdpa
, &di
->tx_dmah
)) == NULL
) {
1061 DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
1066 di
->txd32
= (dma32dd_t
*) ROUNDUP((uintptr
)va
, D32RINGALIGN
);
1067 di
->txdalign
= (uint
)((int8
*)(uintptr
)di
->txd32
- (int8
*)va
);
1068 di
->txdpa
+= di
->txdalign
;
1069 di
->txdalloc
= size
;
1070 ASSERT(ISALIGNED((uintptr
)di
->txd32
, D32RINGALIGN
));
1072 if ((va
= DMA_ALLOC_CONSISTENT(di
->osh
, size
, &di
->rxdpa
, &di
->rx_dmah
)) == NULL
) {
1073 DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
1077 di
->rxd32
= (dma32dd_t
*) ROUNDUP((uintptr
)va
, D32RINGALIGN
);
1078 di
->rxdalign
= (uint
)((int8
*)(uintptr
)di
->rxd32
- (int8
*)va
);
1079 di
->rxdpa
+= di
->rxdalign
;
1080 di
->rxdalloc
= size
;
1081 ASSERT(ISALIGNED((uintptr
)di
->rxd32
, D32RINGALIGN
));
1088 dma32_txreset(dma_info_t
*di
)
1095 /* suspend tx DMA first */
1096 W_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1097 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
))
1098 != XS_XS_DISABLED
) &&
1099 (status
!= XS_XS_IDLE
) &&
1100 (status
!= XS_XS_STOPPED
),
1103 W_REG(di
->osh
, &di
->d32txregs
->control
, 0);
1104 SPINWAIT(((status
= (R_REG(di
->osh
,
1105 &di
->d32txregs
->status
) & XS_XS_MASK
)) != XS_XS_DISABLED
),
1108 /* wait for the last transaction to complete */
1111 return (status
== XS_XS_DISABLED
);
1115 dma32_rxidle(dma_info_t
*di
)
1117 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
1122 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
) ==
1123 R_REG(di
->osh
, &di
->d32rxregs
->ptr
));
1127 dma32_rxreset(dma_info_t
*di
)
1134 W_REG(di
->osh
, &di
->d32rxregs
->control
, 0);
1135 SPINWAIT(((status
= (R_REG(di
->osh
,
1136 &di
->d32rxregs
->status
) & RS_RS_MASK
)) != RS_RS_DISABLED
),
1139 return (status
== RS_RS_DISABLED
);
1143 dma32_rxenabled(dma_info_t
*di
)
1147 rc
= R_REG(di
->osh
, &di
->d32rxregs
->control
);
1148 return ((rc
!= 0xffffffff) && (rc
& RC_RE
));
1152 dma32_txsuspendedidle(dma_info_t
*di
)
1157 if (!(R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
))
1160 if ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) != XS_XS_IDLE
)
1164 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) == XS_XS_IDLE
);
1167 /* !! tx entry routine
1168 * supports full 32bit dma engine buffer addressing so
1169 * dma buffers can cross 4 Kbyte page boundaries.
1172 dma32_txfast(dma_info_t
*di
, void *p0
, bool commit
)
1181 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
1186 * Walk the chain of packet buffers
1187 * allocating and initializing transmit descriptor entries.
1189 for (p
= p0
; p
; p
= next
) {
1190 data
= PKTDATA(di
->osh
, p
);
1191 len
= PKTLEN(di
->osh
, p
);
1192 next
= PKTNEXT(di
->osh
, p
);
1194 /* return nonzero if out of tx descriptors */
1195 if (NEXTTXD(txout
) == di
->txin
)
1201 /* get physical address of buffer start */
1202 pa
= (uint32
) DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
, &di
->txp_dmah
[txout
]);
1208 flags
|= (CTRL_IOC
| CTRL_EOF
);
1209 if (txout
== (di
->ntxd
- 1))
1212 dma32_dd_upd(di
, di
->txd32
, pa
, txout
, &flags
, len
);
1213 ASSERT(di
->txp
[txout
] == NULL
);
1215 txout
= NEXTTXD(txout
);
1218 /* if last txd eof not set, fix it */
1219 if (!(flags
& CTRL_EOF
))
1220 W_SM(&di
->txd32
[PREVTXD(txout
)].ctrl
, BUS_SWAP32(flags
| CTRL_IOC
| CTRL_EOF
));
1222 /* save the packet */
1223 di
->txp
[PREVTXD(txout
)] = p0
;
1225 /* bump the tx descriptor index */
1230 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(txout
, dma32dd_t
));
1232 /* tx flow control */
1233 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1238 DMA_ERROR(("%s: dma_txfast: out of txds\n", di
->name
));
1239 PKTFREE(di
->osh
, p0
, TRUE
);
1240 di
->hnddma
.txavail
= 0;
1241 di
->hnddma
.txnobuf
++;
1246 * Reclaim next completed txd (txds if using chained buffers) and
1247 * return associated packet.
1248 * If 'force' is true, reclaim txd(s) and return associated packet
1249 * regardless of the value of the hardware "curr" pointer.
1252 dma32_getnexttxp(dma_info_t
*di
, bool forceall
)
1257 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
, forceall
? "all" : ""));
1268 end
= B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1270 if ((start
== 0) && (end
> di
->txout
))
1273 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
1274 DMA_UNMAP(di
->osh
, (BUS_SWAP32(R_SM(&di
->txd32
[i
].addr
)) - di
->dataoffsetlow
),
1275 (BUS_SWAP32(R_SM(&di
->txd32
[i
].ctrl
)) & CTRL_BC_MASK
),
1276 DMA_TX
, di
->txp
[i
], &di
->txp_dmah
[i
]);
1278 W_SM(&di
->txd32
[i
].addr
, 0xdeadbeef);
1285 /* tx flow control */
1286 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1292 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
1293 start, end, di->txout, forceall));
1299 dma32_getnextrxp(dma_info_t
*di
, bool forceall
)
1304 /* if forcing, dma engine must be disabled */
1305 ASSERT(!forceall
|| !dma32_rxenabled(di
));
1309 /* return if no packets posted */
1313 /* ignore curr if forceall */
1314 if (!forceall
&& (i
== B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
, dma32dd_t
)))
1317 /* get the packet pointer that corresponds to the rx descriptor */
1322 /* clear this packet from the descriptor ring */
1323 DMA_UNMAP(di
->osh
, (BUS_SWAP32(R_SM(&di
->rxd32
[i
].addr
)) - di
->dataoffsetlow
),
1324 di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
1326 W_SM(&di
->rxd32
[i
].addr
, 0xdeadbeef);
1328 di
->rxin
= NEXTRXD(i
);
1334 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1337 dma32_txrotate(dma_info_t
*di
)
1346 ASSERT(dma32_txsuspendedidle(di
));
1348 nactive
= _dma_txactive(di
);
1349 ad
= B2I(((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_AD_MASK
) >> XS_AD_SHIFT
), dma32dd_t
);
1350 rot
= TXD(ad
- di
->txin
);
1352 ASSERT(rot
< di
->ntxd
);
1354 /* full-ring case is a lot harder - don't worry about this */
1355 if (rot
>= (di
->ntxd
- nactive
)) {
1356 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
1361 last
= PREVTXD(di
->txout
);
1363 /* move entries starting at last and moving backwards to first */
1364 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
1365 new = TXD(old
+ rot
);
1368 * Move the tx dma descriptor.
1369 * EOT is set only in the last entry in the ring.
1371 w
= BUS_SWAP32(R_SM(&di
->txd32
[old
].ctrl
)) & ~CTRL_EOT
;
1372 if (new == (di
->ntxd
- 1))
1374 W_SM(&di
->txd32
[new].ctrl
, BUS_SWAP32(w
));
1375 W_SM(&di
->txd32
[new].addr
, R_SM(&di
->txd32
[old
].addr
));
1377 /* zap the old tx dma descriptor address field */
1378 W_SM(&di
->txd32
[old
].addr
, BUS_SWAP32(0xdeadbeef));
1380 /* move the corresponding txp[] entry */
1381 ASSERT(di
->txp
[new] == NULL
);
1382 di
->txp
[new] = di
->txp
[old
];
1383 di
->txp
[old
] = NULL
;
1386 /* update txin and txout */
1388 di
->txout
= TXD(di
->txout
+ rot
);
1389 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1392 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(di
->txout
, dma32dd_t
));
1395 /* 64 bits DMA functions */
1399 dma64_txinit(dma_info_t
*di
)
1401 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
1406 di
->txin
= di
->txout
= 0;
1407 di
->hnddma
.txavail
= di
->ntxd
- 1;
1409 /* clear tx descriptor ring */
1410 BZERO_SM((void *)(uintptr
)di
->txd64
, (di
->ntxd
* sizeof(dma64dd_t
)));
1411 W_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_XE
);
1412 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
1416 dma64_txenabled(dma_info_t
*di
)
1420 /* If the chip is dead, it is not enabled :-) */
1421 xc
= R_REG(di
->osh
, &di
->d64txregs
->control
);
1422 return ((xc
!= 0xffffffff) && (xc
& D64_XC_XE
));
1426 dma64_txsuspend(dma_info_t
*di
)
1428 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
1433 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
1437 dma64_txresume(dma_info_t
*di
)
1439 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
1444 AND_REG(di
->osh
, &di
->d64txregs
->control
, ~D64_XC_SE
);
1448 dma64_txsuspended(dma_info_t
*di
)
1450 return (di
->ntxd
== 0) || ((R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
)
1455 dma64_txreclaim(dma_info_t
*di
, bool forceall
)
1459 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
, forceall
? "all" : ""));
1461 while ((p
= dma64_getnexttxp(di
, forceall
)))
1462 PKTFREE(di
->osh
, p
, TRUE
);
1466 dma64_txstopped(dma_info_t
*di
)
1468 return ((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) == D64_XS0_XS_STOPPED
);
1472 dma64_rxstopped(dma_info_t
*di
)
1474 return ((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
) == D64_RS0_RS_STOPPED
);
1478 dma64_alloc(dma_info_t
*di
, uint direction
)
1485 ddlen
= sizeof(dma64dd_t
);
1487 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
1489 alignbytes
= di
->dma64align
;
1491 if (!ISALIGNED(DMA_CONSISTENT_ALIGN
, alignbytes
))
1494 if (direction
== DMA_TX
) {
1495 if ((va
= DMA_ALLOC_CONSISTENT(di
->osh
, size
, &di
->txdpa
, &di
->tx_dmah
)) == NULL
) {
1496 DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
1501 di
->txd64
= (dma64dd_t
*) ROUNDUP((uintptr
)va
, alignbytes
);
1502 di
->txdalign
= (uint
)((int8
*)(uintptr
)di
->txd64
- (int8
*)va
);
1503 di
->txdpa
+= di
->txdalign
;
1504 di
->txdalloc
= size
;
1505 ASSERT(ISALIGNED((uintptr
)di
->txd64
, alignbytes
));
1507 if ((va
= DMA_ALLOC_CONSISTENT(di
->osh
, size
, &di
->rxdpa
, &di
->rx_dmah
)) == NULL
) {
1508 DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
1512 di
->rxd64
= (dma64dd_t
*) ROUNDUP((uintptr
)va
, alignbytes
);
1513 di
->rxdalign
= (uint
)((int8
*)(uintptr
)di
->rxd64
- (int8
*)va
);
1514 di
->rxdpa
+= di
->rxdalign
;
1515 di
->rxdalloc
= size
;
1516 ASSERT(ISALIGNED((uintptr
)di
->rxd64
, alignbytes
));
1523 dma64_txreset(dma_info_t
*di
)
1530 /* suspend tx DMA first */
1531 W_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
1532 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
1533 D64_XS0_XS_DISABLED
) &&
1534 (status
!= D64_XS0_XS_IDLE
) &&
1535 (status
!= D64_XS0_XS_STOPPED
),
1538 W_REG(di
->osh
, &di
->d64txregs
->control
, 0);
1539 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
1540 D64_XS0_XS_DISABLED
),
1543 /* wait for the last transaction to complete */
1546 return (status
== D64_XS0_XS_DISABLED
);
1550 dma64_rxidle(dma_info_t
*di
)
1552 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
1557 return ((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) ==
1558 R_REG(di
->osh
, &di
->d64rxregs
->ptr
));
1562 dma64_rxreset(dma_info_t
*di
)
1569 W_REG(di
->osh
, &di
->d64rxregs
->control
, 0);
1570 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
)) !=
1571 D64_RS0_RS_DISABLED
),
1574 return (status
== D64_RS0_RS_DISABLED
);
1578 dma64_rxenabled(dma_info_t
*di
)
1582 rc
= R_REG(di
->osh
, &di
->d64rxregs
->control
);
1583 return ((rc
!= 0xffffffff) && (rc
& D64_RC_RE
));
1587 dma64_txsuspendedidle(dma_info_t
*di
)
1593 if (!(R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
))
1596 if ((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) == D64_XS0_XS_IDLE
)
1603 /* !! tx entry routine */
1605 dma64_txfast(dma_info_t
*di
, void *p0
, bool commit
)
1614 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
1619 * Walk the chain of packet buffers
1620 * allocating and initializing transmit descriptor entries.
1622 for (p
= p0
; p
; p
= next
) {
1623 data
= PKTDATA(di
->osh
, p
);
1624 len
= PKTLEN(di
->osh
, p
);
1625 next
= PKTNEXT(di
->osh
, p
);
1627 /* return nonzero if out of tx descriptors */
1628 if (NEXTTXD(txout
) == di
->txin
)
1634 /* get physical address of buffer start */
1635 pa
= (uint32
) DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
, &di
->txp_dmah
[txout
]);
1639 flags
|= D64_CTRL1_SOF
;
1641 flags
|= (D64_CTRL1_IOC
| D64_CTRL1_EOF
);
1642 if (txout
== (di
->ntxd
- 1))
1643 flags
|= D64_CTRL1_EOT
;
1645 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
1646 ASSERT(di
->txp
[txout
] == NULL
);
1648 txout
= NEXTTXD(txout
);
1651 /* if last txd eof not set, fix it */
1652 if (!(flags
& D64_CTRL1_EOF
))
1653 W_SM(&di
->txd64
[PREVTXD(txout
)].ctrl1
,
1654 BUS_SWAP32(flags
| D64_CTRL1_IOC
| D64_CTRL1_EOF
));
1656 /* save the packet */
1657 di
->txp
[PREVTXD(txout
)] = p0
;
1659 /* bump the tx descriptor index */
1664 W_REG(di
->osh
, &di
->d64txregs
->ptr
, I2B(txout
, dma64dd_t
));
1666 /* tx flow control */
1667 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1672 DMA_ERROR(("%s: dma_txfast: out of txds\n", di
->name
));
1673 PKTFREE(di
->osh
, p0
, TRUE
);
1674 di
->hnddma
.txavail
= 0;
1675 di
->hnddma
.txnobuf
++;
1680 * Reclaim next completed txd (txds if using chained buffers) and
1681 * return associated packet.
1682 * If 'force' is true, reclaim txd(s) and return associated packet
1683 * regardless of the value of the hardware "curr" pointer.
1686 dma64_getnexttxp(dma_info_t
*di
, bool forceall
)
1691 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
, forceall
? "all" : ""));
1702 end
= B2I(R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
, dma64dd_t
);
1704 if ((start
== 0) && (end
> di
->txout
))
1707 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
1708 DMA_UNMAP(di
->osh
, (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrlow
)) - di
->dataoffsetlow
),
1709 (BUS_SWAP32(R_SM(&di
->txd64
[i
].ctrl2
)) & D64_CTRL2_BC_MASK
),
1710 DMA_TX
, di
->txp
[i
], &di
->txp_dmah
[i
]);
1712 W_SM(&di
->txd64
[i
].addrlow
, 0xdeadbeef);
1713 W_SM(&di
->txd64
[i
].addrhigh
, 0xdeadbeef);
1721 /* tx flow control */
1722 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1728 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
1729 start, end, di->txout, forceall));
1735 dma64_getnextrxp(dma_info_t
*di
, bool forceall
)
1740 /* if forcing, dma engine must be disabled */
1741 ASSERT(!forceall
|| !dma64_rxenabled(di
));
1745 /* return if no packets posted */
1749 /* ignore curr if forceall */
1751 (i
== B2I(R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
, dma64dd_t
)))
1754 /* get the packet pointer that corresponds to the rx descriptor */
1759 /* clear this packet from the descriptor ring */
1760 DMA_UNMAP(di
->osh
, (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrlow
)) - di
->dataoffsetlow
),
1761 di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
1763 W_SM(&di
->rxd64
[i
].addrlow
, 0xdeadbeef);
1764 W_SM(&di
->rxd64
[i
].addrhigh
, 0xdeadbeef);
1766 di
->rxin
= NEXTRXD(i
);
1772 _dma64_addrext(osl_t
*osh
, dma64regs_t
*dma64regs
)
1775 OR_REG(osh
, &dma64regs
->control
, D64_XC_AE
);
1776 w
= R_REG(osh
, &dma64regs
->control
);
1777 AND_REG(osh
, &dma64regs
->control
, ~D64_XC_AE
);
1778 return ((w
& D64_XC_AE
) == D64_XC_AE
);
1782 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1785 dma64_txrotate(dma_info_t
*di
)
1794 ASSERT(dma64_txsuspendedidle(di
));
1796 nactive
= _dma_txactive(di
);
1797 ad
= B2I((R_REG(di
->osh
, &di
->d64txregs
->status1
) & D64_XS1_AD_MASK
), dma64dd_t
);
1798 rot
= TXD(ad
- di
->txin
);
1800 ASSERT(rot
< di
->ntxd
);
1802 /* full-ring case is a lot harder - don't worry about this */
1803 if (rot
>= (di
->ntxd
- nactive
)) {
1804 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
1809 last
= PREVTXD(di
->txout
);
1811 /* move entries starting at last and moving backwards to first */
1812 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
1813 new = TXD(old
+ rot
);
1816 * Move the tx dma descriptor.
1817 * EOT is set only in the last entry in the ring.
1819 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl1
)) & ~D64_CTRL1_EOT
;
1820 if (new == (di
->ntxd
- 1))
1822 W_SM(&di
->txd64
[new].ctrl1
, BUS_SWAP32(w
));
1824 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl2
));
1825 W_SM(&di
->txd64
[new].ctrl2
, BUS_SWAP32(w
));
1827 W_SM(&di
->txd64
[new].addrlow
, R_SM(&di
->txd64
[old
].addrlow
));
1828 W_SM(&di
->txd64
[new].addrhigh
, R_SM(&di
->txd64
[old
].addrhigh
));
1830 /* zap the old tx dma descriptor address field */
1831 W_SM(&di
->txd64
[old
].addrlow
, BUS_SWAP32(0xdeadbeef));
1832 W_SM(&di
->txd64
[old
].addrhigh
, BUS_SWAP32(0xdeadbeef));
1834 /* move the corresponding txp[] entry */
1835 ASSERT(di
->txp
[new] == NULL
);
1836 di
->txp
[new] = di
->txp
[old
];
1837 di
->txp
[old
] = NULL
;
1840 /* update txin and txout */
1842 di
->txout
= TXD(di
->txout
+ rot
);
1843 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1846 W_REG(di
->osh
, &di
->d64txregs
->ptr
, I2B(di
->txout
, dma64dd_t
));
1849 #endif /* BCMDMA64 */
1852 dma_addrwidth(sb_t
*sbh
, void *dmaregs
)
1854 dma32regs_t
*dma32regs
;
1860 /* DMA engine is 64-bit capable */
1861 if (((sb_coreflagshi(sbh
, 0, 0) & SBTMH_DMA64
) == SBTMH_DMA64
)) {
1862 /* backplane are 64 bits capable */
1863 if (sb_backplane64(sbh
))
1864 /* If bus is System Backplane or PCIE then we can access 64-bits */
1865 if ((BUSTYPE(sbh
->bustype
) == SB_BUS
) ||
1866 ((BUSTYPE(sbh
->bustype
) == PCI_BUS
) &&
1867 sbh
->buscoretype
== SB_PCIE
))
1868 return (DMADDRWIDTH_64
);
1870 /* DMA64 is always 32 bits capable, AE is always TRUE */
1872 ASSERT(_dma64_addrext(osh
, (dma64regs_t
*)dmaregs
));
1874 return (DMADDRWIDTH_32
);
1878 /* Start checking for 32-bit / 30-bit addressing */
1879 dma32regs
= (dma32regs_t
*)dmaregs
;
1881 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
1882 if ((BUSTYPE(sbh
->bustype
) == SB_BUS
) ||
1883 ((BUSTYPE(sbh
->bustype
) == PCI_BUS
) && sbh
->buscoretype
== SB_PCIE
) ||
1884 (_dma32_addrext(osh
, dma32regs
)))
1885 return (DMADDRWIDTH_32
);
1888 return (DMADDRWIDTH_30
);