2 * Generic Broadcom Home Networking Division (HND) DMA module.
3 * This supports the following chips: BCM42xx, 44xx, 47xx .
5 * Copyright (C) 2010, Broadcom Corporation. All Rights Reserved.
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
14 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
16 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
17 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 * $Id: hnddma.c,v 1.231.10.17 2011-01-27 19:03:18 Exp $
26 #include <bcmendian.h>
36 #define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args
37 #define DMA_TRACE(args) if (!(*di->msg_level & 2)); else printf args
38 #elif defined(BCMDBG_ERR)
39 #define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args
40 #define DMA_TRACE(args)
42 #define DMA_ERROR(args)
43 #define DMA_TRACE(args)
46 #define DMA_NONE(args)
49 #define d32txregs dregs.d32_u.txregs_32
50 #define d32rxregs dregs.d32_u.rxregs_32
51 #define txd32 dregs.d32_u.txd_32
52 #define rxd32 dregs.d32_u.rxd_32
54 #define d64txregs dregs.d64_u.txregs_64
55 #define d64rxregs dregs.d64_u.rxregs_64
56 #define txd64 dregs.d64_u.txd_64
57 #define rxd64 dregs.d64_u.rxd_64
59 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
60 static uint dma_msg_level
=
65 #endif /* BCMDBG_ERR */
67 #define MAXNAMEL 8 /* 8 char names */
69 #define DI_INFO(dmah) ((dma_info_t *)dmah)
72 #define POOL_ENAB(di) ((di)->pktpool->inited)
73 #else /* BCMPKTPOOL */
74 #define POOL_ENAB(di) 0
75 #endif /* BCMPKTPOOL */
77 /* dma engine software state */
78 typedef struct dma_info
{
79 struct hnddma_pub hnddma
; /* exported structure, don't use hnddma_t,
80 * which could be const
82 uint
*msg_level
; /* message level pointer */
83 char name
[MAXNAMEL
]; /* callers name for diag msgs */
85 void *osh
; /* os handle */
86 si_t
*sih
; /* sb handle */
88 bool dma64
; /* this dma engine is operating in 64-bit mode */
89 bool addrext
; /* this dma engine supports DmaExtendedAddrChanges */
93 dma32regs_t
*txregs_32
; /* 32-bit dma tx engine registers */
94 dma32regs_t
*rxregs_32
; /* 32-bit dma rx engine registers */
95 dma32dd_t
*txd_32
; /* pointer to dma32 tx descriptor ring */
96 dma32dd_t
*rxd_32
; /* pointer to dma32 rx descriptor ring */
99 dma64regs_t
*txregs_64
; /* 64-bit dma tx engine registers */
100 dma64regs_t
*rxregs_64
; /* 64-bit dma rx engine registers */
101 dma64dd_t
*txd_64
; /* pointer to dma64 tx descriptor ring */
102 dma64dd_t
*rxd_64
; /* pointer to dma64 rx descriptor ring */
106 uint16 dmadesc_align
; /* alignment requirement for dma descriptors */
108 uint16 ntxd
; /* # tx descriptors tunable */
109 uint16 txin
; /* index of next descriptor to reclaim */
110 uint16 txout
; /* index of next descriptor to post */
111 void **txp
; /* pointer to parallel array of pointers to packets */
112 osldma_t
*tx_dmah
; /* DMA TX descriptor ring handle */
113 hnddma_seg_map_t
*txp_dmah
; /* DMA MAP meta-data handle */
114 dmaaddr_t txdpa
; /* Aligned physical address of descriptor ring */
115 dmaaddr_t txdpaorig
; /* Original physical address of descriptor ring */
116 uint16 txdalign
; /* #bytes added to alloc'd mem to align txd */
117 uint32 txdalloc
; /* #bytes allocated for the ring */
118 uint32 xmtptrbase
; /* When using unaligned descriptors, the ptr register
119 * is not just an index, it needs all 13 bits to be
120 * an offset from the addr register.
123 uint16 nrxd
; /* # rx descriptors tunable */
124 uint16 rxin
; /* index of next descriptor to reclaim */
125 uint16 rxout
; /* index of next descriptor to post */
126 void **rxp
; /* pointer to parallel array of pointers to packets */
127 osldma_t
*rx_dmah
; /* DMA RX descriptor ring handle */
128 hnddma_seg_map_t
*rxp_dmah
; /* DMA MAP meta-data handle */
129 dmaaddr_t rxdpa
; /* Aligned physical address of descriptor ring */
130 dmaaddr_t rxdpaorig
; /* Original physical address of descriptor ring */
131 uint16 rxdalign
; /* #bytes added to alloc'd mem to align rxd */
132 uint32 rxdalloc
; /* #bytes allocated for the ring */
133 uint32 rcvptrbase
; /* Base for ptr reg when using unaligned descriptors */
136 uint16 rxbufsize
; /* rx buffer size in bytes,
137 * not including the extra headroom
139 uint rxextrahdrroom
; /* extra rx headroom, reverseved to assist upper stack
140 * e.g. some rx pkt buffers will be bridged to tx side
141 * without byte copying. The extra headroom needs to be
142 * large enough to fit txheader needs.
143 * Some dongle driver may not need it.
145 uint nrxpost
; /* # rx buffers to keep posted */
146 uint rxoffset
; /* rxcontrol offset */
147 uint ddoffsetlow
; /* add to get dma address of descriptor ring, low 32 bits */
148 uint ddoffsethigh
; /* high 32 bits */
149 uint dataoffsetlow
; /* add to get dma address of data buffer, low 32 bits */
150 uint dataoffsethigh
; /* high 32 bits */
151 bool aligndesc_4k
; /* descriptor base need to be aligned or not */
152 pktpool_t
*pktpool
; /* pktpool */
153 uint dma_avoidance_cnt
;
157 * If BCMDMA32 is defined, hnddma will support both 32-bit and 64-bit DMA engines.
158 * Otherwise it will support only 64-bit.
160 * DMA32_ENAB indicates whether hnddma is compiled with support for 32-bit DMA engines.
161 * DMA64_ENAB indicates whether hnddma is compiled with support for 64-bit DMA engines.
163 * DMA64_MODE indicates whether the current DMA engine is running as 64-bit.
166 #define DMA32_ENAB(di) 1
167 #define DMA64_ENAB(di) 1
168 #define DMA64_MODE(di) ((di)->dma64)
169 #else /* !BCMDMA32 */
170 #define DMA32_ENAB(di) 0
171 #define DMA64_ENAB(di) 1
172 #define DMA64_MODE(di) 1
173 #endif /* !BCMDMA32 */
175 /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
176 #ifdef BCMDMASGLISTOSL
177 #define DMASGLIST_ENAB TRUE
179 #define DMASGLIST_ENAB FALSE
180 #endif /* BCMDMASGLISTOSL */
182 /* descriptor bumping macros */
183 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
184 #define TXD(x) XXD((x), di->ntxd)
185 #define RXD(x) XXD((x), di->nrxd)
186 #define NEXTTXD(i) TXD((i) + 1)
187 #define PREVTXD(i) TXD((i) - 1)
188 #define NEXTRXD(i) RXD((i) + 1)
189 #define PREVRXD(i) RXD((i) - 1)
191 #define NTXDACTIVE(h, t) TXD((t) - (h))
192 #define NRXDACTIVE(h, t) RXD((t) - (h))
194 /* macros to convert between byte offsets and indexes */
195 #define B2I(bytes, type) ((bytes) / sizeof(type))
196 #define I2B(index, type) ((index) * sizeof(type))
198 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
199 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
201 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
202 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
204 /* Common prototypes */
205 static bool _dma_isaddrext(dma_info_t
*di
);
206 static bool _dma_descriptor_align(dma_info_t
*di
);
207 static bool _dma_alloc(dma_info_t
*di
, uint direction
);
208 static void _dma_detach(dma_info_t
*di
);
209 static void _dma_ddtable_init(dma_info_t
*di
, uint direction
, dmaaddr_t pa
);
210 static void _dma_rxinit(dma_info_t
*di
);
211 static void *_dma_rx(dma_info_t
*di
);
212 static bool _dma_rxfill(dma_info_t
*di
);
213 static void _dma_rxreclaim(dma_info_t
*di
);
214 static void _dma_rxenable(dma_info_t
*di
);
215 static void *_dma_getnextrxp(dma_info_t
*di
, bool forceall
);
216 static void _dma_rx_param_get(dma_info_t
*di
, uint16
*rxoffset
, uint16
*rxbufsize
);
218 static void _dma_txblock(dma_info_t
*di
);
219 static void _dma_txunblock(dma_info_t
*di
);
220 static uint
_dma_txactive(dma_info_t
*di
);
221 static uint
_dma_rxactive(dma_info_t
*di
);
222 static uint
_dma_txpending(dma_info_t
*di
);
223 static uint
_dma_txcommitted(dma_info_t
*di
);
225 static void *_dma_peeknexttxp(dma_info_t
*di
);
226 static void *_dma_peeknextrxp(dma_info_t
*di
);
227 static uintptr
_dma_getvar(dma_info_t
*di
, const char *name
);
228 static void _dma_counterreset(dma_info_t
*di
);
229 static void _dma_fifoloopbackenable(dma_info_t
*di
);
230 static uint
_dma_ctrlflags(dma_info_t
*di
, uint mask
, uint flags
);
231 static uint8
dma_align_sizetobits(uint size
);
232 static void *dma_ringalloc(osl_t
*osh
, uint32 boundary
, uint size
, uint16
*alignbits
, uint
* alloced
,
233 dmaaddr_t
*descpa
, osldma_t
**dmah
);
234 static uint
_dma_avoidancecnt(dma_info_t
*di
);
236 /* Prototypes for 32-bit routines */
237 static bool dma32_alloc(dma_info_t
*di
, uint direction
);
238 static bool dma32_txreset(dma_info_t
*di
);
239 static bool dma32_rxreset(dma_info_t
*di
);
240 static bool dma32_txsuspendedidle(dma_info_t
*di
);
241 static int dma32_txfast(dma_info_t
*di
, void *p0
, bool commit
);
242 static void *dma32_getnexttxp(dma_info_t
*di
, txd_range_t range
);
243 static void *dma32_getnextrxp(dma_info_t
*di
, bool forceall
);
244 static void dma32_txrotate(dma_info_t
*di
);
245 static bool dma32_rxidle(dma_info_t
*di
);
246 static void dma32_txinit(dma_info_t
*di
);
247 static bool dma32_txenabled(dma_info_t
*di
);
248 static void dma32_txsuspend(dma_info_t
*di
);
249 static void dma32_txresume(dma_info_t
*di
);
250 static bool dma32_txsuspended(dma_info_t
*di
);
252 static void dma32_txflush(dma_info_t
*di
);
253 static void dma32_txflush_clear(dma_info_t
*di
);
254 #endif /* WL_MULTIQUEUE */
255 static void dma32_txreclaim(dma_info_t
*di
, txd_range_t range
);
256 static bool dma32_txstopped(dma_info_t
*di
);
257 static bool dma32_rxstopped(dma_info_t
*di
);
258 static bool dma32_rxenabled(dma_info_t
*di
);
259 #if defined(BCMDBG) || defined(BCMDBG_DUMP)
260 static void dma32_dumpring(dma_info_t
*di
, struct bcmstrbuf
*b
, dma32dd_t
*ring
, uint start
,
261 uint end
, uint max_num
);
262 static void dma32_dump(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
263 static void dma32_dumptx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
264 static void dma32_dumprx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
265 #endif /* defined(BCMDBG) || defined(BCMDBG_DUMP) */
267 static bool _dma32_addrext(osl_t
*osh
, dma32regs_t
*dma32regs
);
269 /* Prototypes for 64-bit routines */
270 static bool dma64_alloc(dma_info_t
*di
, uint direction
);
271 static bool dma64_txreset(dma_info_t
*di
);
272 static bool dma64_rxreset(dma_info_t
*di
);
273 static bool dma64_txsuspendedidle(dma_info_t
*di
);
274 static int dma64_txfast(dma_info_t
*di
, void *p0
, bool commit
);
276 static int dma64_txunframed(dma_info_t
*di
, void *p0
, uint len
, bool commit
);
277 static void *dma64_getpos(dma_info_t
*di
, bool direction
);
279 static void *dma64_getnexttxp(dma_info_t
*di
, txd_range_t range
);
280 static void *dma64_getnextrxp(dma_info_t
*di
, bool forceall
);
281 static void dma64_txrotate(dma_info_t
*di
);
283 static bool dma64_rxidle(dma_info_t
*di
);
284 static void dma64_txinit(dma_info_t
*di
);
285 static bool dma64_txenabled(dma_info_t
*di
);
286 static void dma64_txsuspend(dma_info_t
*di
);
287 static void dma64_txresume(dma_info_t
*di
);
288 static bool dma64_txsuspended(dma_info_t
*di
);
290 static void dma64_txflush(dma_info_t
*di
);
291 static void dma64_txflush_clear(dma_info_t
*di
);
292 #endif /* WL_MULTIQUEUE */
293 static void dma64_txreclaim(dma_info_t
*di
, txd_range_t range
);
294 static bool dma64_txstopped(dma_info_t
*di
);
295 static bool dma64_rxstopped(dma_info_t
*di
);
296 static bool dma64_rxenabled(dma_info_t
*di
);
297 static bool _dma64_addrext(osl_t
*osh
, dma64regs_t
*dma64regs
);
300 STATIC INLINE uint32
parity32(uint32 data
);
302 #if defined(BCMDBG) || defined(BCMDBG_DUMP)
303 static void dma64_dumpring(dma_info_t
*di
, struct bcmstrbuf
*b
, dma64dd_t
*ring
, uint start
,
304 uint end
, uint max_num
);
305 static void dma64_dump(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
306 static void dma64_dumptx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
307 static void dma64_dumprx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
308 #endif /* defined(BCMDBG) || defined(BCMDBG_DUMP) */
311 const di_fcn_t dma64proc
= {
312 (di_detach_t
)_dma_detach
,
313 (di_txinit_t
)dma64_txinit
,
314 (di_txreset_t
)dma64_txreset
,
315 (di_txenabled_t
)dma64_txenabled
,
316 (di_txsuspend_t
)dma64_txsuspend
,
317 (di_txresume_t
)dma64_txresume
,
318 (di_txsuspended_t
)dma64_txsuspended
,
319 (di_txsuspendedidle_t
)dma64_txsuspendedidle
,
321 (di_txflush_t
)dma64_txflush
,
322 (di_txflush_clear_t
)dma64_txflush_clear
,
323 #endif /* WL_MULTIQUEUE */
324 (di_txfast_t
)dma64_txfast
,
326 (di_txunframed_t
)dma64_txunframed
,
327 (di_getpos_t
)dma64_getpos
,
329 (di_txstopped_t
)dma64_txstopped
,
330 (di_txreclaim_t
)dma64_txreclaim
,
331 (di_getnexttxp_t
)dma64_getnexttxp
,
332 (di_peeknexttxp_t
)_dma_peeknexttxp
,
333 (di_txblock_t
)_dma_txblock
,
334 (di_txunblock_t
)_dma_txunblock
,
335 (di_txactive_t
)_dma_txactive
,
336 (di_txrotate_t
)dma64_txrotate
,
338 (di_rxinit_t
)_dma_rxinit
,
339 (di_rxreset_t
)dma64_rxreset
,
340 (di_rxidle_t
)dma64_rxidle
,
341 (di_rxstopped_t
)dma64_rxstopped
,
342 (di_rxenable_t
)_dma_rxenable
,
343 (di_rxenabled_t
)dma64_rxenabled
,
345 (di_rxfill_t
)_dma_rxfill
,
346 (di_rxreclaim_t
)_dma_rxreclaim
,
347 (di_getnextrxp_t
)_dma_getnextrxp
,
348 (di_peeknextrxp_t
)_dma_peeknextrxp
,
349 (di_rxparam_get_t
)_dma_rx_param_get
,
351 (di_fifoloopbackenable_t
)_dma_fifoloopbackenable
,
352 (di_getvar_t
)_dma_getvar
,
353 (di_counterreset_t
)_dma_counterreset
,
354 (di_ctrlflags_t
)_dma_ctrlflags
,
356 #if defined(BCMDBG) || defined(BCMDBG_DUMP)
357 (di_dump_t
)dma64_dump
,
358 (di_dumptx_t
)dma64_dumptx
,
359 (di_dumprx_t
)dma64_dumprx
,
364 #endif /* defined(BCMDBG) || defined(BCMDBG_DUMP) */
365 (di_rxactive_t
)_dma_rxactive
,
366 (di_txpending_t
)_dma_txpending
,
367 (di_txcommitted_t
)_dma_txcommitted
,
368 (di_avoidancecnt_t
)_dma_avoidancecnt
,
372 static const di_fcn_t dma32proc
= {
373 (di_detach_t
)_dma_detach
,
374 (di_txinit_t
)dma32_txinit
,
375 (di_txreset_t
)dma32_txreset
,
376 (di_txenabled_t
)dma32_txenabled
,
377 (di_txsuspend_t
)dma32_txsuspend
,
378 (di_txresume_t
)dma32_txresume
,
379 (di_txsuspended_t
)dma32_txsuspended
,
380 (di_txsuspendedidle_t
)dma32_txsuspendedidle
,
382 (di_txflush_t
)dma32_txflush
,
383 (di_txflush_clear_t
)dma32_txflush_clear
,
384 #endif /* WL_MULTIQUEUE */
385 (di_txfast_t
)dma32_txfast
,
390 (di_txstopped_t
)dma32_txstopped
,
391 (di_txreclaim_t
)dma32_txreclaim
,
392 (di_getnexttxp_t
)dma32_getnexttxp
,
393 (di_peeknexttxp_t
)_dma_peeknexttxp
,
394 (di_txblock_t
)_dma_txblock
,
395 (di_txunblock_t
)_dma_txunblock
,
396 (di_txactive_t
)_dma_txactive
,
397 (di_txrotate_t
)dma32_txrotate
,
399 (di_rxinit_t
)_dma_rxinit
,
400 (di_rxreset_t
)dma32_rxreset
,
401 (di_rxidle_t
)dma32_rxidle
,
402 (di_rxstopped_t
)dma32_rxstopped
,
403 (di_rxenable_t
)_dma_rxenable
,
404 (di_rxenabled_t
)dma32_rxenabled
,
406 (di_rxfill_t
)_dma_rxfill
,
407 (di_rxreclaim_t
)_dma_rxreclaim
,
408 (di_getnextrxp_t
)_dma_getnextrxp
,
409 (di_peeknextrxp_t
)_dma_peeknextrxp
,
410 (di_rxparam_get_t
)_dma_rx_param_get
,
412 (di_fifoloopbackenable_t
)_dma_fifoloopbackenable
,
413 (di_getvar_t
)_dma_getvar
,
414 (di_counterreset_t
)_dma_counterreset
,
415 (di_ctrlflags_t
)_dma_ctrlflags
,
417 #if defined(BCMDBG) || defined(BCMDBG_DUMP)
418 (di_dump_t
)dma32_dump
,
419 (di_dumptx_t
)dma32_dumptx
,
420 (di_dumprx_t
)dma32_dumprx
,
425 #endif /* defined(BCMDBG) || defined(BCMDBG_DUMP) */
426 (di_rxactive_t
)_dma_rxactive
,
427 (di_txpending_t
)_dma_txpending
,
428 (di_txcommitted_t
)_dma_txcommitted
,
429 (di_avoidancecnt_t
)_dma_avoidancecnt
,
434 dma_attach(osl_t
*osh
, char *name
, si_t
*sih
, void *dmaregstx
, void *dmaregsrx
,
435 uint ntxd
, uint nrxd
, uint rxbufsize
, int rxextheadroom
, uint nrxpost
, uint rxoffset
,
441 /* allocate private info structure */
442 if ((di
= MALLOC(osh
, sizeof (dma_info_t
))) == NULL
) {
444 printf("dma_attach: out of memory, malloced %d bytes\n", MALLOCED(osh
));
449 bzero((char *)di
, sizeof(dma_info_t
));
451 di
->msg_level
= msg_level
? msg_level
: &dma_msg_level
;
453 /* old chips w/o sb is no longer supported */
457 di
->dma64
= ((si_core_sflags(sih
, 0, 0) & SISF_DMA64
) == SISF_DMA64
);
461 /* check arguments */
462 ASSERT(ISPOWEROF2(ntxd
));
463 ASSERT(ISPOWEROF2(nrxd
));
466 ASSERT(dmaregsrx
== NULL
);
468 ASSERT(dmaregstx
== NULL
);
470 /* init dma reg pointer */
471 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
472 ASSERT(ntxd
<= D64MAXDD
);
473 ASSERT(nrxd
<= D64MAXDD
);
474 di
->d64txregs
= (dma64regs_t
*)dmaregstx
;
475 di
->d64rxregs
= (dma64regs_t
*)dmaregsrx
;
476 di
->hnddma
.di_fn
= (const di_fcn_t
*)&dma64proc
;
477 } else if (DMA32_ENAB(di
)) {
478 ASSERT(ntxd
<= D32MAXDD
);
479 ASSERT(nrxd
<= D32MAXDD
);
480 di
->d32txregs
= (dma32regs_t
*)dmaregstx
;
481 di
->d32rxregs
= (dma32regs_t
*)dmaregsrx
;
482 di
->hnddma
.di_fn
= (const di_fcn_t
*)&dma32proc
;
484 DMA_ERROR(("dma_attach: driver doesn't support 32-bit DMA\n"));
489 /* init the pktpool code if need be */
491 di
->pktpool
= &pktpool_shared
;
492 #endif /* BCMPKTPOOL */
495 /* Default flags (which can be changed by the driver calling dma_ctrlflags
496 * before enable): For backwards compatibility both Rx Overflow Continue
497 * and Parity are DISABLED.
500 di
->hnddma
.di_fn
->ctrlflags(&di
->hnddma
, DMA_CTRL_ROC
| DMA_CTRL_PEN
, 0);
502 DMA_TRACE(("%s: dma_attach: %s osh %p flags 0x%x ntxd %d nrxd %d rxbufsize %d "
503 "rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n",
504 name
, (DMA64_MODE(di
) ? "DMA64" : "DMA32"),
505 osh
, di
->hnddma
.dmactrlflags
, ntxd
, nrxd
,
506 rxbufsize
, rxextheadroom
, nrxpost
, rxoffset
, dmaregstx
, dmaregsrx
));
508 /* make a private copy of our callers name */
509 strncpy(di
->name
, name
, MAXNAMEL
);
510 di
->name
[MAXNAMEL
-1] = '\0';
516 di
->ntxd
= (uint16
)ntxd
;
517 di
->nrxd
= (uint16
)nrxd
;
519 /* the actual dma size doesn't include the extra headroom */
520 di
->rxextrahdrroom
= (rxextheadroom
== -1) ? BCMEXTRAHDROOM
: rxextheadroom
;
521 if (rxbufsize
> BCMEXTRAHDROOM
)
522 di
->rxbufsize
= (uint16
)(rxbufsize
- di
->rxextrahdrroom
);
524 di
->rxbufsize
= (uint16
)rxbufsize
;
526 di
->nrxpost
= (uint16
)nrxpost
;
527 di
->rxoffset
= (uint8
)rxoffset
;
530 * figure out the DMA physical address offset for dd and data
531 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
532 * Other bus: use zero
533 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
536 di
->dataoffsetlow
= 0;
537 /* for pci bus, add offset */
538 if (sih
->bustype
== PCI_BUS
) {
539 if ((sih
->buscoretype
== PCIE_CORE_ID
) && DMA64_MODE(di
)) {
540 /* pcie with DMA64 */
542 di
->ddoffsethigh
= SI_PCIE_DMA_H32
;
544 /* pci(DMA32/DMA64) or pcie with DMA32 */
545 if ((CHIPID(sih
->chip
) == BCM4322_CHIP_ID
) ||
546 (CHIPID(sih
->chip
) == BCM4342_CHIP_ID
) ||
547 (CHIPID(sih
->chip
) == BCM43221_CHIP_ID
) ||
548 (CHIPID(sih
->chip
) == BCM43231_CHIP_ID
) ||
549 (CHIPID(sih
->chip
) == BCM43111_CHIP_ID
) ||
550 (CHIPID(sih
->chip
) == BCM43112_CHIP_ID
) ||
551 (CHIPID(sih
->chip
) == BCM43222_CHIP_ID
) ||
552 (CHIPID(sih
->chip
) == BCM43420_CHIP_ID
))
553 di
->ddoffsetlow
= SI_PCI_DMA2
;
555 di
->ddoffsetlow
= SI_PCI_DMA
;
557 di
->ddoffsethigh
= 0;
559 di
->dataoffsetlow
= di
->ddoffsetlow
;
560 di
->dataoffsethigh
= di
->ddoffsethigh
;
563 #if defined(__mips__) && defined(IL_BIGENDIAN)
564 di
->dataoffsetlow
= di
->dataoffsetlow
+ SI_SDRAM_SWAPPED
;
565 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
566 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
567 if ((si_coreid(sih
) == SDIOD_CORE_ID
) && ((si_corerev(sih
) > 0) && (si_corerev(sih
) <= 2)))
569 else if ((si_coreid(sih
) == I2S_CORE_ID
) &&
570 ((si_corerev(sih
) == 0) || (si_corerev(sih
) == 1)))
573 di
->addrext
= _dma_isaddrext(di
);
575 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
576 di
->aligndesc_4k
= _dma_descriptor_align(di
);
577 if (di
->aligndesc_4k
) {
578 if (DMA64_MODE(di
)) {
579 di
->dmadesc_align
= D64RINGALIGN_BITS
;
580 if ((ntxd
< D64MAXDD
/ 2) && (nrxd
< D64MAXDD
/ 2)) {
581 /* for smaller dd table, HW relax the alignment requirement */
582 di
->dmadesc_align
= D64RINGALIGN_BITS
- 1;
585 di
->dmadesc_align
= D32RINGALIGN_BITS
;
587 /* The start address of descriptor table should be algined to cache line size,
588 * or other structure may share a cache line with it, which can lead to memory
589 * overlapping due to cache write-back operation. In the case of MIPS 74k, the
590 * cache line size is 32 bytes.
593 di
->dmadesc_align
= 5; /* 32 byte alignment */
595 di
->dmadesc_align
= 4; /* 16 byte alignment */
599 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
600 di
->aligndesc_4k
, di
->dmadesc_align
));
602 /* allocate tx packet pointer vector */
604 size
= ntxd
* sizeof(void *);
605 if ((di
->txp
= MALLOC(osh
, size
)) == NULL
) {
606 DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n",
607 di
->name
, MALLOCED(osh
)));
610 bzero((char *)di
->txp
, size
);
613 /* allocate rx packet pointer vector */
615 size
= nrxd
* sizeof(void *);
616 if ((di
->rxp
= MALLOC(osh
, size
)) == NULL
) {
617 DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n",
618 di
->name
, MALLOCED(osh
)));
621 bzero((char *)di
->rxp
, size
);
624 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
626 if (!_dma_alloc(di
, DMA_TX
))
630 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
632 if (!_dma_alloc(di
, DMA_RX
))
636 if ((di
->ddoffsetlow
!= 0) && !di
->addrext
) {
637 if (PHYSADDRLO(di
->txdpa
) > SI_PCI_DMA_SZ
) {
638 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n",
639 di
->name
, (uint32
)PHYSADDRLO(di
->txdpa
)));
642 if (PHYSADDRLO(di
->rxdpa
) > SI_PCI_DMA_SZ
) {
643 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n",
644 di
->name
, (uint32
)PHYSADDRLO(di
->rxdpa
)));
649 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh "
650 "0x%x addrext %d\n", di
->ddoffsetlow
, di
->ddoffsethigh
, di
->dataoffsetlow
,
651 di
->dataoffsethigh
, di
->addrext
));
653 /* allocate DMA mapping vectors */
654 if (DMASGLIST_ENAB
) {
656 size
= ntxd
* sizeof(hnddma_seg_map_t
);
657 if ((di
->txp_dmah
= (hnddma_seg_map_t
*)MALLOC(osh
, size
)) == NULL
)
659 bzero((char*)di
->txp_dmah
, size
);
663 size
= nrxd
* sizeof(hnddma_seg_map_t
);
664 if ((di
->rxp_dmah
= (hnddma_seg_map_t
*)MALLOC(osh
, size
)) == NULL
)
666 bzero((char*)di
->rxp_dmah
, size
);
670 return ((hnddma_t
*)di
);
677 /* init the tx or rx descriptor */
679 dma32_dd_upd(dma_info_t
*di
, dma32dd_t
*ddring
, dmaaddr_t pa
, uint outidx
, uint32
*flags
,
682 /* dma32 uses 32-bit control to fit both flags and bufcounter */
683 *flags
= *flags
| (bufcount
& CTRL_BC_MASK
);
685 if ((di
->dataoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
686 W_SM(&ddring
[outidx
].addr
, BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
687 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
689 /* address extension */
692 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
693 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
695 *flags
|= (ae
<< CTRL_AE_SHIFT
);
696 W_SM(&ddring
[outidx
].addr
, BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
697 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
701 /* Check for odd number of 1's */
702 STATIC INLINE uint32
parity32(uint32 data
)
713 #define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
716 dma64_dd_upd(dma_info_t
*di
, dma64dd_t
*ddring
, dmaaddr_t pa
, uint outidx
, uint32
*flags
,
719 uint32 ctrl2
= bufcount
& D64_CTRL2_BC_MASK
;
721 /* PCI bus with big(>1G) physical address, use address extension */
722 #if defined(__mips__) && defined(IL_BIGENDIAN)
723 if ((di
->dataoffsetlow
== SI_SDRAM_SWAPPED
) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
725 if ((di
->dataoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
726 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
727 ASSERT((PHYSADDRHI(pa
) & PCI64ADDR_HIGH
) == 0);
729 W_SM(&ddring
[outidx
].addrlow
, BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
730 W_SM(&ddring
[outidx
].addrhigh
, BUS_SWAP32(PHYSADDRHI(pa
) + di
->dataoffsethigh
));
731 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
732 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
734 /* address extension for 32-bit PCI */
738 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
739 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
740 ASSERT(PHYSADDRHI(pa
) == 0);
742 ctrl2
|= (ae
<< D64_CTRL2_AE_SHIFT
) & D64_CTRL2_AE
;
743 W_SM(&ddring
[outidx
].addrlow
, BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
744 W_SM(&ddring
[outidx
].addrhigh
, BUS_SWAP32(0 + di
->dataoffsethigh
));
745 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
746 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
748 if (di
->hnddma
.dmactrlflags
& DMA_CTRL_PEN
) {
749 if (DMA64_DD_PARITY(&ddring
[outidx
])) {
750 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
| D64_CTRL2_PARITY
));
756 _dma32_addrext(osl_t
*osh
, dma32regs_t
*dma32regs
)
760 OR_REG(osh
, &dma32regs
->control
, XC_AE
);
761 w
= R_REG(osh
, &dma32regs
->control
);
762 AND_REG(osh
, &dma32regs
->control
, ~XC_AE
);
763 return ((w
& XC_AE
) == XC_AE
);
767 _dma_alloc(dma_info_t
*di
, uint direction
)
769 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
770 return dma64_alloc(di
, direction
);
771 } else if (DMA32_ENAB(di
)) {
772 return dma32_alloc(di
, direction
);
777 /* !! may be called with core in reset */
779 _dma_detach(dma_info_t
*di
)
782 DMA_TRACE(("%s: dma_detach\n", di
->name
));
784 /* shouldn't be here if descriptors are unreclaimed */
785 ASSERT(di
->txin
== di
->txout
);
786 ASSERT(di
->rxin
== di
->rxout
);
788 /* free dma descriptor rings */
789 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
791 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->txd64
- di
->txdalign
),
792 di
->txdalloc
, (di
->txdpaorig
), &di
->tx_dmah
);
794 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->rxd64
- di
->rxdalign
),
795 di
->rxdalloc
, (di
->rxdpaorig
), &di
->rx_dmah
);
796 } else if (DMA32_ENAB(di
)) {
798 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->txd32
- di
->txdalign
),
799 di
->txdalloc
, (di
->txdpaorig
), &di
->tx_dmah
);
801 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->rxd32
- di
->rxdalign
),
802 di
->rxdalloc
, (di
->rxdpaorig
), &di
->rx_dmah
);
806 /* free packet pointer vectors */
808 MFREE(di
->osh
, (void *)di
->txp
, (di
->ntxd
* sizeof(void *)));
810 MFREE(di
->osh
, (void *)di
->rxp
, (di
->nrxd
* sizeof(void *)));
812 /* free tx packet DMA handles */
814 MFREE(di
->osh
, (void *)di
->txp_dmah
, di
->ntxd
* sizeof(hnddma_seg_map_t
));
816 /* free rx packet DMA handles */
818 MFREE(di
->osh
, (void *)di
->rxp_dmah
, di
->nrxd
* sizeof(hnddma_seg_map_t
));
820 /* free our private info structure */
821 MFREE(di
->osh
, (void *)di
, sizeof(dma_info_t
));
826 _dma_descriptor_align(dma_info_t
*di
)
828 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
831 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
832 if (di
->d64txregs
!= NULL
) {
833 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, 0xff0);
834 addrl
= R_REG(di
->osh
, &di
->d64txregs
->addrlow
);
837 } else if (di
->d64rxregs
!= NULL
) {
838 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, 0xff0);
839 addrl
= R_REG(di
->osh
, &di
->d64rxregs
->addrlow
);
847 /* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
849 _dma_isaddrext(dma_info_t
*di
)
851 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
852 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
854 /* not all tx or rx channel are available */
855 if (di
->d64txregs
!= NULL
) {
856 if (!_dma64_addrext(di
->osh
, di
->d64txregs
)) {
857 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n",
862 } else if (di
->d64rxregs
!= NULL
) {
863 if (!_dma64_addrext(di
->osh
, di
->d64rxregs
)) {
864 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n",
871 } else if (DMA32_ENAB(di
)) {
873 return (_dma32_addrext(di
->osh
, di
->d32txregs
));
874 else if (di
->d32rxregs
)
875 return (_dma32_addrext(di
->osh
, di
->d32rxregs
));
882 /* initialize descriptor table base address */
884 _dma_ddtable_init(dma_info_t
*di
, uint direction
, dmaaddr_t pa
)
886 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
887 if (!di
->aligndesc_4k
) {
888 if (direction
== DMA_TX
)
889 di
->xmtptrbase
= PHYSADDRLO(pa
);
891 di
->rcvptrbase
= PHYSADDRLO(pa
);
894 if ((di
->ddoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
895 if (direction
== DMA_TX
) {
896 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, (PHYSADDRLO(pa
) +
898 W_REG(di
->osh
, &di
->d64txregs
->addrhigh
, (PHYSADDRHI(pa
) +
901 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, (PHYSADDRLO(pa
) +
903 W_REG(di
->osh
, &di
->d64rxregs
->addrhigh
, (PHYSADDRHI(pa
) +
907 /* DMA64 32bits address extension */
910 ASSERT(PHYSADDRHI(pa
) == 0);
912 /* shift the high bit(s) from pa to ae */
913 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
914 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
916 if (direction
== DMA_TX
) {
917 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, (PHYSADDRLO(pa
) +
919 W_REG(di
->osh
, &di
->d64txregs
->addrhigh
, di
->ddoffsethigh
);
920 SET_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_AE
,
921 (ae
<< D64_XC_AE_SHIFT
));
923 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, (PHYSADDRLO(pa
) +
925 W_REG(di
->osh
, &di
->d64rxregs
->addrhigh
, di
->ddoffsethigh
);
926 SET_REG(di
->osh
, &di
->d64rxregs
->control
, D64_RC_AE
,
927 (ae
<< D64_RC_AE_SHIFT
));
931 } else if (DMA32_ENAB(di
)) {
932 ASSERT(PHYSADDRHI(pa
) == 0);
933 if ((di
->ddoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
934 if (direction
== DMA_TX
)
935 W_REG(di
->osh
, &di
->d32txregs
->addr
, (PHYSADDRLO(pa
) +
938 W_REG(di
->osh
, &di
->d32rxregs
->addr
, (PHYSADDRLO(pa
) +
941 /* dma32 address extension */
945 /* shift the high bit(s) from pa to ae */
946 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
947 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
949 if (direction
== DMA_TX
) {
950 W_REG(di
->osh
, &di
->d32txregs
->addr
, (PHYSADDRLO(pa
) +
952 SET_REG(di
->osh
, &di
->d32txregs
->control
, XC_AE
, ae
<<XC_AE_SHIFT
);
954 W_REG(di
->osh
, &di
->d32rxregs
->addr
, (PHYSADDRLO(pa
) +
956 SET_REG(di
->osh
, &di
->d32rxregs
->control
, RC_AE
, ae
<<RC_AE_SHIFT
);
964 _dma_fifoloopbackenable(dma_info_t
*di
)
966 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di
->name
));
968 if (DMA64_ENAB(di
) && DMA64_MODE(di
))
969 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_LE
);
970 else if (DMA32_ENAB(di
))
971 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_LE
);
977 _dma_rxinit(dma_info_t
*di
)
979 DMA_TRACE(("%s: dma_rxinit\n", di
->name
));
984 di
->rxin
= di
->rxout
= 0;
986 /* clear rx descriptor ring */
987 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
988 BZERO_SM((void *)(uintptr
)di
->rxd64
, (di
->nrxd
* sizeof(dma64dd_t
)));
990 /* DMA engine with out alignment requirement requires table to be inited
991 * before enabling the engine
993 if (!di
->aligndesc_4k
)
994 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
998 if (di
->aligndesc_4k
)
999 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
1000 } else if (DMA32_ENAB(di
)) {
1001 BZERO_SM((void *)(uintptr
)di
->rxd32
, (di
->nrxd
* sizeof(dma32dd_t
)));
1003 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
1009 _dma_rxenable(dma_info_t
*di
)
1011 uint dmactrlflags
= di
->hnddma
.dmactrlflags
;
1013 DMA_TRACE(("%s: dma_rxenable\n", di
->name
));
1015 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1016 uint32 control
= (R_REG(di
->osh
, &di
->d64rxregs
->control
) & D64_RC_AE
) | D64_RC_RE
;
1018 if ((dmactrlflags
& DMA_CTRL_PEN
) == 0)
1019 control
|= D64_RC_PD
;
1021 if (dmactrlflags
& DMA_CTRL_ROC
)
1022 control
|= D64_RC_OC
;
1024 W_REG(di
->osh
, &di
->d64rxregs
->control
,
1025 ((di
->rxoffset
<< D64_RC_RO_SHIFT
) | control
));
1026 } else if (DMA32_ENAB(di
)) {
1027 uint32 control
= (R_REG(di
->osh
, &di
->d32rxregs
->control
) & RC_AE
) | RC_RE
;
1029 if ((dmactrlflags
& DMA_CTRL_PEN
) == 0)
1032 if (dmactrlflags
& DMA_CTRL_ROC
)
1035 W_REG(di
->osh
, &di
->d32rxregs
->control
,
1036 ((di
->rxoffset
<< RC_RO_SHIFT
) | control
));
1042 _dma_rx_param_get(dma_info_t
*di
, uint16
*rxoffset
, uint16
*rxbufsize
)
1044 /* the normal values fit into 16 bits */
1045 *rxoffset
= (uint16
)di
->rxoffset
;
1046 *rxbufsize
= (uint16
)di
->rxbufsize
;
1049 /* !! rx entry routine
1050 * returns a pointer to the next frame received, or NULL if there are no more
1051 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
1053 * otherwise, it's treated as giant pkt and will be tossed.
1054 * The DMA scattering starts with normal DMA header, followed by first buffer data.
1055 * After it reaches the max size of buffer, the data continues in next DMA descriptor
1056 * buffer WITHOUT DMA header
1058 static void * BCMFASTPATH
1059 _dma_rx(dma_info_t
*di
)
1061 void *p
, *head
, *tail
;
1067 head
= _dma_getnextrxp(di
, FALSE
);
1071 #if !defined(__mips__)
1072 len
= ltoh16(*(uint16
*)(PKTDATA(di
->osh
, head
)));
1076 for (read_count
= 200; (!(len
= ltoh16(*(uint16
*)OSL_UNCACHED(PKTDATA(di
->osh
, head
)))) &&
1077 read_count
); read_count
--) {
1078 if (CHIPID(di
->sih
->chip
) == BCM5356_CHIP_ID
)
1084 DMA_ERROR(("%s: dma_rx: frame length (%d)\n", di
->name
, len
));
1085 PKTFREE(di
->osh
, head
, FALSE
);
1090 #endif /* defined(__mips__) */
1091 DMA_TRACE(("%s: dma_rx len %d\n", di
->name
, len
));
1093 /* set actual length */
1094 pkt_len
= MIN((di
->rxoffset
+ len
), di
->rxbufsize
);
1095 PKTSETLEN(di
->osh
, head
, pkt_len
);
1096 resid
= len
- (di
->rxbufsize
- di
->rxoffset
);
1098 /* check for single or multi-buffer rx */
1101 while ((resid
> 0) && (p
= _dma_getnextrxp(di
, FALSE
))) {
1102 PKTSETNEXT(di
->osh
, tail
, p
);
1103 pkt_len
= MIN(resid
, (int)di
->rxbufsize
);
1104 PKTSETLEN(di
->osh
, p
, pkt_len
);
1107 resid
-= di
->rxbufsize
;
1114 cur
= (DMA64_ENAB(di
) && DMA64_MODE(di
)) ?
1115 B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
1116 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
) :
1117 B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
,
1119 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
1120 di
->rxin
, di
->rxout
, cur
));
1124 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_RXMULTI
) == 0) {
1125 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di
->name
, len
));
1126 PKTFREE(di
->osh
, head
, FALSE
);
1127 di
->hnddma
.rxgiants
++;
1135 /* post receive buffers
1136 * return FALSE is refill failed completely and ring is empty
1137 * this will stall the rx dma and user might want to call rxfill again asap
1138 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
1140 static bool BCMFASTPATH
1141 _dma_rxfill(dma_info_t
*di
)
1149 uint extra_offset
= 0;
1151 uint alignment_req
= (di
->hnddma
.dmactrlflags
& DMA_CTRL_USB_BOUNDRY4KB_WAR
) ?
1152 16 : 1; /* MUST BE POWER of 2 */
1157 * Determine how many receive buffers we're lacking
1158 * from the full complement, allocate, initialize,
1159 * and post them, then update the chip rx lastdscr.
1165 n
= di
->nrxpost
- NRXDACTIVE(rxin
, rxout
);
1167 DMA_TRACE(("%s: dma_rxfill: post %d\n", di
->name
, n
));
1169 for (i
= 0; i
< n
; i
++) {
1170 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
1171 size to be allocated
1173 if (POOL_ENAB(di
) && di
->rxbufsize
> PKTPOOL_MIN_LEN
) {
1174 ASSERT(di
->pktpool
);
1175 p
= pktpool_get(di
->pktpool
);
1178 PKTPOOLSETSTATE(p
, POOL_RXFILL
);
1179 #endif /* BCMDBG_POOL */
1182 if (di
->rxbufsize
> BCMEXTRAHDROOM
)
1183 extra_offset
= di
->rxextrahdrroom
;
1187 p
= PKTGET(di
->osh
, (di
->rxbufsize
+ extra_offset
+ alignment_req
- 1),
1191 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di
->name
));
1193 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1194 if (dma64_rxidle(di
)) {
1195 DMA_ERROR(("%s: rxfill64: ring is empty !\n",
1199 } else if (DMA32_ENAB(di
)) {
1200 if (dma32_rxidle(di
)) {
1201 DMA_ERROR(("%s: rxfill32: ring is empty !\n",
1208 di
->hnddma
.rxnobuf
++;
1211 /* reserve an extra headroom, if applicable */
1212 if (di
->hnddma
.dmactrlflags
& DMA_CTRL_USB_BOUNDRY4KB_WAR
) {
1213 extra_offset
+= ((alignment_req
- (unsigned long)PKTDATA(di
->osh
, p
))
1214 & (alignment_req
- 1));
1217 PKTPULL(di
->osh
, p
, extra_offset
);
1220 /* mark as ctf buffer for fast mapping */
1221 if(CTF_ENAB(kcih
)) {
1222 ASSERT((((uint32
)PKTDATA(di
->osh
, p
)) & 31)==0);
1223 PKTSETCTF(di
->osh
, p
);
1227 /* Do a cached write instead of uncached write since DMA_MAP
1228 * will flush the cache.
1230 *(uint32
*)(PKTDATA(di
->osh
, p
)) = 0;
1233 bzero(&di
->rxp_dmah
[rxout
], sizeof(hnddma_seg_map_t
));
1235 pa
= DMA_MAP(di
->osh
, PKTDATA(di
->osh
, p
),
1236 di
->rxbufsize
, DMA_RX
, p
,
1237 &di
->rxp_dmah
[rxout
]);
1240 /* DMA_MAP will invalidated the cache but may or maynot write-back
1241 * to cache. So update len uncached.
1243 *(uint32
*)OSL_UNCACHED((PKTDATA(di
->osh
, p
))) = 0;
1244 #endif /* __mips__ */
1246 ASSERT(ISALIGNED(PHYSADDRLO(pa
), 4));
1248 /* save the free packet pointer */
1249 ASSERT(di
->rxp
[rxout
] == NULL
);
1252 /* reset flags for each descriptor */
1254 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1255 if (rxout
== (di
->nrxd
- 1))
1256 flags
= D64_CTRL1_EOT
;
1258 dma64_dd_upd(di
, di
->rxd64
, pa
, rxout
, &flags
, di
->rxbufsize
);
1259 } else if (DMA32_ENAB(di
)) {
1260 if (rxout
== (di
->nrxd
- 1))
1263 ASSERT(PHYSADDRHI(pa
) == 0);
1264 dma32_dd_upd(di
, di
->rxd32
, pa
, rxout
, &flags
, di
->rxbufsize
);
1267 rxout
= NEXTRXD(rxout
);
1272 /* update the chip lastdscr pointer */
1273 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1274 W_REG(di
->osh
, &di
->d64rxregs
->ptr
, di
->rcvptrbase
+ I2B(rxout
, dma64dd_t
));
1275 } else if (DMA32_ENAB(di
)) {
1276 W_REG(di
->osh
, &di
->d32rxregs
->ptr
, I2B(rxout
, dma32dd_t
));
1283 /* like getnexttxp but no reclaim */
1285 _dma_peeknexttxp(dma_info_t
*di
)
1292 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1293 end
= B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
) -
1294 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
);
1295 } else if (DMA32_ENAB(di
)) {
1296 end
= B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1300 for (i
= di
->txin
; i
!= end
; i
= NEXTTXD(i
))
1302 return (di
->txp
[i
]);
1307 /* like getnextrxp but not take off the ring */
1309 _dma_peeknextrxp(dma_info_t
*di
)
1316 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1317 end
= B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
1318 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
);
1319 } else if (DMA32_ENAB(di
)) {
1320 end
= B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
, dma32dd_t
);
1324 for (i
= di
->rxin
; i
!= end
; i
= NEXTRXD(i
))
1326 return (di
->rxp
[i
]);
1332 _dma_rxreclaim(dma_info_t
*di
)
1336 /* "unused local" warning suppression for OSLs that
1337 * define PKTFREE() without using the di->osh arg
1341 DMA_TRACE(("%s: dma_rxreclaim\n", di
->name
));
1343 while ((p
= _dma_getnextrxp(di
, TRUE
)))
1344 PKTFREE(di
->osh
, p
, FALSE
);
1347 static void * BCMFASTPATH
1348 _dma_getnextrxp(dma_info_t
*di
, bool forceall
)
1353 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1354 return dma64_getnextrxp(di
, forceall
);
1355 } else if (DMA32_ENAB(di
)) {
1356 return dma32_getnextrxp(di
, forceall
);
1362 _dma_txblock(dma_info_t
*di
)
1364 di
->hnddma
.txavail
= 0;
1368 _dma_txunblock(dma_info_t
*di
)
1370 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1374 _dma_txactive(dma_info_t
*di
)
1376 return NTXDACTIVE(di
->txin
, di
->txout
);
1380 _dma_txpending(dma_info_t
*di
)
1384 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1385 curr
= B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
) -
1386 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
);
1387 } else if (DMA32_ENAB(di
)) {
1388 curr
= B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1392 return NTXDACTIVE(curr
, di
->txout
);
1396 _dma_txcommitted(dma_info_t
*di
)
1399 uint txin
= di
->txin
;
1401 if (txin
== di
->txout
)
1404 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1405 ptr
= B2I(R_REG(di
->osh
, &di
->d64txregs
->ptr
), dma64dd_t
);
1406 } else if (DMA32_ENAB(di
)) {
1407 ptr
= B2I(R_REG(di
->osh
, &di
->d32txregs
->ptr
), dma32dd_t
);
1411 return NTXDACTIVE(di
->txin
, ptr
);
1415 _dma_rxactive(dma_info_t
*di
)
1417 return NRXDACTIVE(di
->rxin
, di
->rxout
);
1421 _dma_counterreset(dma_info_t
*di
)
1423 /* reset all software counter */
1424 di
->hnddma
.rxgiants
= 0;
1425 di
->hnddma
.rxnobuf
= 0;
1426 di
->hnddma
.txnobuf
= 0;
1430 _dma_ctrlflags(dma_info_t
*di
, uint mask
, uint flags
)
1435 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di
->name
));
1439 dmactrlflags
= di
->hnddma
.dmactrlflags
;
1441 ASSERT((flags
& ~mask
) == 0);
1443 dmactrlflags
&= ~mask
;
1444 dmactrlflags
|= flags
;
1446 /* If trying to enable parity, check if parity is actually supported */
1447 if (dmactrlflags
& DMA_CTRL_PEN
) {
1450 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1451 control
= R_REG(di
->osh
, &di
->d64txregs
->control
);
1452 W_REG(di
->osh
, &di
->d64txregs
->control
, control
| D64_XC_PD
);
1453 if (R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_PD
) {
1454 /* We *can* disable it so it is supported,
1455 * restore control register
1457 W_REG(di
->osh
, &di
->d64txregs
->control
, control
);
1459 /* Not supported, don't allow it to be enabled */
1460 dmactrlflags
&= ~DMA_CTRL_PEN
;
1462 } else if (DMA32_ENAB(di
)) {
1463 control
= R_REG(di
->osh
, &di
->d32txregs
->control
);
1464 W_REG(di
->osh
, &di
->d32txregs
->control
, control
| XC_PD
);
1465 if (R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_PD
) {
1466 W_REG(di
->osh
, &di
->d32txregs
->control
, control
);
1468 /* Not supported, don't allow it to be enabled */
1469 dmactrlflags
&= ~DMA_CTRL_PEN
;
1475 di
->hnddma
.dmactrlflags
= dmactrlflags
;
1477 return (dmactrlflags
);
1480 /* get the address of the var in order to change later */
1482 _dma_getvar(dma_info_t
*di
, const char *name
)
1484 if (!strcmp(name
, "&txavail"))
1485 return ((uintptr
) &(di
->hnddma
.txavail
));
1493 _dma_avoidancecnt(dma_info_t
*di
)
1495 return (di
->dma_avoidance_cnt
);
1499 dma_txpioloopback(osl_t
*osh
, dma32regs_t
*regs
)
1501 OR_REG(osh
, ®s
->control
, XC_LE
);
1505 uint8
dma_align_sizetobits(uint size
)
1509 ASSERT(!(size
& (size
-1)));
1510 while (size
>>= 1) {
1516 /* This function ensures that the DMA descriptor ring will not get allocated
1517 * across Page boundary. If the allocation is done across the page boundary
1518 * at the first time, then it is freed and the allocation is done at
1519 * descriptor ring size aligned location. This will ensure that the ring will
1520 * not cross page boundary
1523 dma_ringalloc(osl_t
*osh
, uint32 boundary
, uint size
, uint16
*alignbits
, uint
* alloced
,
1524 dmaaddr_t
*descpa
, osldma_t
**dmah
)
1527 uint32 desc_strtaddr
;
1528 uint32 alignbytes
= 1 << *alignbits
;
1530 if (NULL
== (va
= DMA_ALLOC_CONSISTENT(osh
, size
, *alignbits
, alloced
, descpa
, dmah
)))
1533 desc_strtaddr
= (uint32
)ROUNDUP((uintptr
)va
, alignbytes
);
1534 if (((desc_strtaddr
+ size
- 1) & boundary
) != (desc_strtaddr
1536 *alignbits
= dma_align_sizetobits(size
);
1537 DMA_FREE_CONSISTENT(osh
, va
,
1538 size
, *descpa
, dmah
);
1539 va
= DMA_ALLOC_CONSISTENT(osh
, size
, *alignbits
, alloced
, descpa
, dmah
);
1544 #if defined(BCMDBG) || defined(BCMDBG_DUMP)
1546 dma32_dumpring(dma_info_t
*di
, struct bcmstrbuf
*b
, dma32dd_t
*ring
, uint start
, uint end
,
1551 for (i
= start
; i
!= end
; i
= XXD((i
+ 1), max_num
)) {
1552 /* in the format of high->low 8 bytes */
1553 bcm_bprintf(b
, "ring index %d: 0x%x %x\n",
1554 i
, R_SM(&ring
[i
].addr
), R_SM(&ring
[i
].ctrl
));
1559 dma32_dumptx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1564 bcm_bprintf(b
, "DMA32: txd32 %p txdpa 0x%lx txp %p txin %d txout %d "
1565 "txavail %d txnodesc %d\n", di
->txd32
, PHYSADDRLO(di
->txdpa
), di
->txp
, di
->txin
,
1566 di
->txout
, di
->hnddma
.txavail
, di
->hnddma
.txnodesc
);
1568 bcm_bprintf(b
, "xmtcontrol 0x%x xmtaddr 0x%x xmtptr 0x%x xmtstatus 0x%x\n",
1569 R_REG(di
->osh
, &di
->d32txregs
->control
),
1570 R_REG(di
->osh
, &di
->d32txregs
->addr
),
1571 R_REG(di
->osh
, &di
->d32txregs
->ptr
),
1572 R_REG(di
->osh
, &di
->d32txregs
->status
));
1574 if (dumpring
&& di
->txd32
)
1575 dma32_dumpring(di
, b
, di
->txd32
, di
->txin
, di
->txout
, di
->ntxd
);
1579 dma32_dumprx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1584 bcm_bprintf(b
, "DMA32: rxd32 %p rxdpa 0x%lx rxp %p rxin %d rxout %d\n",
1585 di
->rxd32
, PHYSADDRLO(di
->rxdpa
), di
->rxp
, di
->rxin
, di
->rxout
);
1587 bcm_bprintf(b
, "rcvcontrol 0x%x rcvaddr 0x%x rcvptr 0x%x rcvstatus 0x%x\n",
1588 R_REG(di
->osh
, &di
->d32rxregs
->control
),
1589 R_REG(di
->osh
, &di
->d32rxregs
->addr
),
1590 R_REG(di
->osh
, &di
->d32rxregs
->ptr
),
1591 R_REG(di
->osh
, &di
->d32rxregs
->status
));
1592 if (di
->rxd32
&& dumpring
)
1593 dma32_dumpring(di
, b
, di
->rxd32
, di
->rxin
, di
->rxout
, di
->nrxd
);
1597 dma32_dump(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1599 dma32_dumptx(di
, b
, dumpring
);
1600 dma32_dumprx(di
, b
, dumpring
);
1604 dma64_dumpring(dma_info_t
*di
, struct bcmstrbuf
*b
, dma64dd_t
*ring
, uint start
, uint end
,
1609 for (i
= start
; i
!= end
; i
= XXD((i
+ 1), max_num
)) {
1610 /* in the format of high->low 16 bytes */
1611 bcm_bprintf(b
, "ring index %d: 0x%x %x %x %x\n",
1612 i
, R_SM(&ring
[i
].addrhigh
), R_SM(&ring
[i
].addrlow
),
1613 R_SM(&ring
[i
].ctrl2
), R_SM(&ring
[i
].ctrl1
));
1618 dma64_dumptx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1623 bcm_bprintf(b
, "DMA64: txd64 %p txdpa 0x%lx txdpahi 0x%lx txp %p txin %d txout %d "
1624 "txavail %d txnodesc %d\n", di
->txd64
, PHYSADDRLO(di
->txdpa
),
1625 PHYSADDRHI(di
->txdpaorig
), di
->txp
, di
->txin
, di
->txout
, di
->hnddma
.txavail
,
1626 di
->hnddma
.txnodesc
);
1628 bcm_bprintf(b
, "xmtcontrol 0x%x xmtaddrlow 0x%x xmtaddrhigh 0x%x "
1629 "xmtptr 0x%x xmtstatus0 0x%x xmtstatus1 0x%x\n",
1630 R_REG(di
->osh
, &di
->d64txregs
->control
),
1631 R_REG(di
->osh
, &di
->d64txregs
->addrlow
),
1632 R_REG(di
->osh
, &di
->d64txregs
->addrhigh
),
1633 R_REG(di
->osh
, &di
->d64txregs
->ptr
),
1634 R_REG(di
->osh
, &di
->d64txregs
->status0
),
1635 R_REG(di
->osh
, &di
->d64txregs
->status1
));
1637 bcm_bprintf(b
, "DMA64: DMA avoidance applied %d\n", di
->dma_avoidance_cnt
);
1639 if (dumpring
&& di
->txd64
) {
1640 dma64_dumpring(di
, b
, di
->txd64
, di
->txin
, di
->txout
, di
->ntxd
);
1645 dma64_dumprx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1650 bcm_bprintf(b
, "DMA64: rxd64 %p rxdpa 0x%lx rxdpahi 0x%lx rxp %p rxin %d rxout %d\n",
1651 di
->rxd64
, PHYSADDRLO(di
->rxdpa
), PHYSADDRHI(di
->rxdpaorig
), di
->rxp
,
1652 di
->rxin
, di
->rxout
);
1654 bcm_bprintf(b
, "rcvcontrol 0x%x rcvaddrlow 0x%x rcvaddrhigh 0x%x rcvptr "
1655 "0x%x rcvstatus0 0x%x rcvstatus1 0x%x\n",
1656 R_REG(di
->osh
, &di
->d64rxregs
->control
),
1657 R_REG(di
->osh
, &di
->d64rxregs
->addrlow
),
1658 R_REG(di
->osh
, &di
->d64rxregs
->addrhigh
),
1659 R_REG(di
->osh
, &di
->d64rxregs
->ptr
),
1660 R_REG(di
->osh
, &di
->d64rxregs
->status0
),
1661 R_REG(di
->osh
, &di
->d64rxregs
->status1
));
1662 if (di
->rxd64
&& dumpring
) {
1663 dma64_dumpring(di
, b
, di
->rxd64
, di
->rxin
, di
->rxout
, di
->nrxd
);
1668 dma64_dump(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1670 dma64_dumptx(di
, b
, dumpring
);
1671 dma64_dumprx(di
, b
, dumpring
);
1674 #endif /* BCMDBG || BCMDBG_DUMP */
1677 /* 32-bit DMA functions */
1680 dma32_txinit(dma_info_t
*di
)
1682 uint32 control
= XC_XE
;
1684 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
1689 di
->txin
= di
->txout
= 0;
1690 di
->hnddma
.txavail
= di
->ntxd
- 1;
1692 /* clear tx descriptor ring */
1693 BZERO_SM((void *)(uintptr
)di
->txd32
, (di
->ntxd
* sizeof(dma32dd_t
)));
1695 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_PEN
) == 0)
1697 W_REG(di
->osh
, &di
->d32txregs
->control
, control
);
1698 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
1702 dma32_txenabled(dma_info_t
*di
)
1706 /* If the chip is dead, it is not enabled :-) */
1707 xc
= R_REG(di
->osh
, &di
->d32txregs
->control
);
1708 return ((xc
!= 0xffffffff) && (xc
& XC_XE
));
1712 dma32_txsuspend(dma_info_t
*di
)
1714 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
1719 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1723 dma32_txresume(dma_info_t
*di
)
1725 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
1730 AND_REG(di
->osh
, &di
->d32txregs
->control
, ~XC_SE
);
1734 dma32_txsuspended(dma_info_t
*di
)
1736 return (di
->ntxd
== 0) || ((R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
) == XC_SE
);
1739 #ifdef WL_MULTIQUEUE
1741 dma32_txflush(dma_info_t
*di
)
1743 DMA_TRACE(("%s: dma_txflush\n", di
->name
));
1748 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
| XC_FL
);
1752 dma32_txflush_clear(dma_info_t
*di
)
1756 DMA_TRACE(("%s: dma_txflush_clear\n", di
->name
));
1761 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
))
1762 != XS_XS_DISABLED
) &&
1763 (status
!= XS_XS_IDLE
) &&
1764 (status
!= XS_XS_STOPPED
),
1766 AND_REG(di
->osh
, &di
->d32txregs
->control
, ~XC_FL
);
1768 #endif /* WL_MULTIQUEUE */
1771 dma32_txreclaim(dma_info_t
*di
, txd_range_t range
)
1775 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
,
1776 (range
== HNDDMA_RANGE_ALL
) ? "all" :
1777 ((range
== HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" : "transfered")));
1779 if (di
->txin
== di
->txout
)
1782 while ((p
= dma32_getnexttxp(di
, range
)))
1783 PKTFREE(di
->osh
, p
, TRUE
);
1787 dma32_txstopped(dma_info_t
*di
)
1789 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) == XS_XS_STOPPED
);
1793 dma32_rxstopped(dma_info_t
*di
)
1795 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_RS_MASK
) == RS_RS_STOPPED
);
1799 dma32_alloc(dma_info_t
*di
, uint direction
)
1808 ddlen
= sizeof(dma32dd_t
);
1810 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
1813 align_bits
= di
->dmadesc_align
;
1814 align
= (1 << align_bits
);
1816 if (direction
== DMA_TX
) {
1817 if ((va
= dma_ringalloc(di
->osh
, D32RINGALIGN
, size
, &align_bits
, &alloced
,
1818 &di
->txdpaorig
, &di
->tx_dmah
)) == NULL
) {
1819 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
1824 PHYSADDRHISET(di
->txdpa
, 0);
1825 ASSERT(PHYSADDRHI(di
->txdpaorig
) == 0);
1826 di
->txd32
= (dma32dd_t
*)ROUNDUP((uintptr
)va
, align
);
1827 di
->txdalign
= (uint
)((int8
*)(uintptr
)di
->txd32
- (int8
*)va
);
1829 PHYSADDRLOSET(di
->txdpa
, PHYSADDRLO(di
->txdpaorig
) + di
->txdalign
);
1830 /* Make sure that alignment didn't overflow */
1831 ASSERT(PHYSADDRLO(di
->txdpa
) >= PHYSADDRLO(di
->txdpaorig
));
1833 di
->txdalloc
= alloced
;
1834 ASSERT(ISALIGNED((uintptr
)di
->txd32
, align
));
1836 if ((va
= dma_ringalloc(di
->osh
, D32RINGALIGN
, size
, &align_bits
, &alloced
,
1837 &di
->rxdpaorig
, &di
->rx_dmah
)) == NULL
) {
1838 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
1843 PHYSADDRHISET(di
->rxdpa
, 0);
1844 ASSERT(PHYSADDRHI(di
->rxdpaorig
) == 0);
1845 di
->rxd32
= (dma32dd_t
*)ROUNDUP((uintptr
)va
, align
);
1846 di
->rxdalign
= (uint
)((int8
*)(uintptr
)di
->rxd32
- (int8
*)va
);
1848 PHYSADDRLOSET(di
->rxdpa
, PHYSADDRLO(di
->rxdpaorig
) + di
->rxdalign
);
1849 /* Make sure that alignment didn't overflow */
1850 ASSERT(PHYSADDRLO(di
->rxdpa
) >= PHYSADDRLO(di
->rxdpaorig
));
1851 di
->rxdalloc
= alloced
;
1852 ASSERT(ISALIGNED((uintptr
)di
->rxd32
, align
));
1859 dma32_txreset(dma_info_t
*di
)
1866 /* suspend tx DMA first */
1867 W_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1868 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
))
1869 != XS_XS_DISABLED
) &&
1870 (status
!= XS_XS_IDLE
) &&
1871 (status
!= XS_XS_STOPPED
),
1874 W_REG(di
->osh
, &di
->d32txregs
->control
, 0);
1875 SPINWAIT(((status
= (R_REG(di
->osh
,
1876 &di
->d32txregs
->status
) & XS_XS_MASK
)) != XS_XS_DISABLED
),
1879 /* wait for the last transaction to complete */
1882 return (status
== XS_XS_DISABLED
);
1886 dma32_rxidle(dma_info_t
*di
)
1888 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
1893 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
) ==
1894 R_REG(di
->osh
, &di
->d32rxregs
->ptr
));
1898 dma32_rxreset(dma_info_t
*di
)
1905 W_REG(di
->osh
, &di
->d32rxregs
->control
, 0);
1906 SPINWAIT(((status
= (R_REG(di
->osh
,
1907 &di
->d32rxregs
->status
) & RS_RS_MASK
)) != RS_RS_DISABLED
),
1910 return (status
== RS_RS_DISABLED
);
1914 dma32_rxenabled(dma_info_t
*di
)
1918 rc
= R_REG(di
->osh
, &di
->d32rxregs
->control
);
1919 return ((rc
!= 0xffffffff) && (rc
& RC_RE
));
1923 dma32_txsuspendedidle(dma_info_t
*di
)
1928 if (!(R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
))
1931 if ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) != XS_XS_IDLE
)
1935 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) == XS_XS_IDLE
);
1938 /* !! tx entry routine
1939 * supports full 32bit dma engine buffer addressing so
1940 * dma buffers can cross 4 Kbyte page boundaries.
1942 * WARNING: call must check the return value for error.
1943 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
1946 dma32_txfast(dma_info_t
*di
, void *p0
, bool commit
)
1955 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
1960 * Walk the chain of packet buffers
1961 * allocating and initializing transmit descriptor entries.
1963 for (p
= p0
; p
; p
= next
) {
1965 hnddma_seg_map_t
*map
;
1967 data
= PKTDATA(di
->osh
, p
);
1968 len
= PKTLEN(di
->osh
, p
);
1970 len
+= PKTDMAPAD(di
->osh
, p
);
1972 next
= PKTNEXT(di
->osh
, p
);
1974 /* return nonzero if out of tx descriptors */
1975 if (NEXTTXD(txout
) == di
->txin
)
1982 bzero(&di
->txp_dmah
[txout
], sizeof(hnddma_seg_map_t
));
1984 /* get physical address of buffer start */
1985 pa
= DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
, &di
->txp_dmah
[txout
]);
1987 if (DMASGLIST_ENAB
) {
1988 map
= &di
->txp_dmah
[txout
];
1990 /* See if all the segments can be accounted for */
1991 if (map
->nsegs
> (uint
)(di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1))
1998 for (j
= 1; j
<= nsegs
; j
++) {
2000 if (p
== p0
&& j
== 1)
2003 /* With a DMA segment list, Descriptor table is filled
2004 * using the segment list instead of looping over
2005 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
2006 * end of segment list is reached.
2008 if ((!DMASGLIST_ENAB
&& next
== NULL
) ||
2009 (DMASGLIST_ENAB
&& j
== nsegs
))
2010 flags
|= (CTRL_IOC
| CTRL_EOF
);
2011 if (txout
== (di
->ntxd
- 1))
2014 if (DMASGLIST_ENAB
) {
2015 len
= map
->segs
[j
- 1].length
;
2016 pa
= map
->segs
[j
- 1].addr
;
2018 ASSERT(PHYSADDRHI(pa
) == 0);
2020 dma32_dd_upd(di
, di
->txd32
, pa
, txout
, &flags
, len
);
2021 ASSERT(di
->txp
[txout
] == NULL
);
2023 txout
= NEXTTXD(txout
);
2026 /* See above. No need to loop over individual buffers */
2032 /* if last txd eof not set, fix it */
2033 if (!(flags
& CTRL_EOF
))
2034 W_SM(&di
->txd32
[PREVTXD(txout
)].ctrl
,
2035 BUS_SWAP32(flags
| CTRL_IOC
| CTRL_EOF
));
2037 /* save the packet */
2038 di
->txp
[PREVTXD(txout
)] = p0
;
2040 /* bump the tx descriptor index */
2046 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(txout
, dma32dd_t
));
2048 /* tx flow control */
2049 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2054 DMA_ERROR(("%s: dma_txfast: out of txds\n", di
->name
));
2055 PKTFREE(di
->osh
, p0
, TRUE
);
2056 di
->hnddma
.txavail
= 0;
2057 di
->hnddma
.txnobuf
++;
2058 di
->hnddma
.txnodesc
++;
2063 * Reclaim next completed txd (txds if using chained buffers) in the range
2064 * specified and return associated packet.
2065 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
2066 * transmitted as noted by the hardware "CurrDescr" pointer.
2067 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
2068 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
2069 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
2070 * return associated packet regardless of the value of hardware pointers.
2073 dma32_getnexttxp(dma_info_t
*di
, txd_range_t range
)
2075 uint16 start
, end
, i
;
2079 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
,
2080 (range
== HNDDMA_RANGE_ALL
) ? "all" :
2081 ((range
== HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" : "transfered")));
2089 if (range
== HNDDMA_RANGE_ALL
)
2092 dma32regs_t
*dregs
= di
->d32txregs
;
2094 end
= (uint16
)B2I(R_REG(di
->osh
, &dregs
->status
) & XS_CD_MASK
, dma32dd_t
);
2096 if (range
== HNDDMA_RANGE_TRANSFERED
) {
2097 active_desc
= (uint16
)((R_REG(di
->osh
, &dregs
->status
) & XS_AD_MASK
) >>
2099 active_desc
= (uint16
)B2I(active_desc
, dma32dd_t
);
2100 if (end
!= active_desc
)
2101 end
= PREVTXD(active_desc
);
2105 if ((start
== 0) && (end
> di
->txout
))
2108 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
2110 hnddma_seg_map_t
*map
= NULL
;
2111 uint size
, j
, nsegs
;
2113 PHYSADDRLOSET(pa
, (BUS_SWAP32(R_SM(&di
->txd32
[i
].addr
)) - di
->dataoffsetlow
));
2114 PHYSADDRHISET(pa
, 0);
2116 if (DMASGLIST_ENAB
) {
2117 map
= &di
->txp_dmah
[i
];
2118 size
= map
->origsize
;
2121 size
= (BUS_SWAP32(R_SM(&di
->txd32
[i
].ctrl
)) & CTRL_BC_MASK
);
2125 for (j
= nsegs
; j
> 0; j
--) {
2126 W_SM(&di
->txd32
[i
].addr
, 0xdeadbeef);
2134 DMA_UNMAP(di
->osh
, pa
, size
, DMA_TX
, txp
, map
);
2139 /* tx flow control */
2140 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2145 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
2146 start
, end
, di
->txout
, forceall
));
2151 dma32_getnextrxp(dma_info_t
*di
, bool forceall
)
2156 /* if forcing, dma engine must be disabled */
2157 ASSERT(!forceall
|| !dma32_rxenabled(di
));
2161 /* return if no packets posted */
2165 curr
= B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
, dma32dd_t
);
2167 /* ignore curr if forceall */
2168 if (!forceall
&& (i
== curr
))
2171 /* get the packet pointer that corresponds to the rx descriptor */
2176 PHYSADDRLOSET(pa
, (BUS_SWAP32(R_SM(&di
->rxd32
[i
].addr
)) - di
->dataoffsetlow
));
2177 PHYSADDRHISET(pa
, 0);
2179 /* clear this packet from the descriptor ring */
2180 DMA_UNMAP(di
->osh
, pa
,
2181 di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
2183 W_SM(&di
->rxd32
[i
].addr
, 0xdeadbeef);
2185 di
->rxin
= NEXTRXD(i
);
2191 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
2194 dma32_txrotate(dma_info_t
*di
)
2203 ASSERT(dma32_txsuspendedidle(di
));
2205 nactive
= _dma_txactive(di
);
2206 ad
= (uint16
) (B2I(((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_AD_MASK
) >> XS_AD_SHIFT
),
2208 rot
= TXD(ad
- di
->txin
);
2210 ASSERT(rot
< di
->ntxd
);
2212 /* full-ring case is a lot harder - don't worry about this */
2213 if (rot
>= (di
->ntxd
- nactive
)) {
2214 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
2219 last
= PREVTXD(di
->txout
);
2221 /* move entries starting at last and moving backwards to first */
2222 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
2223 new = TXD(old
+ rot
);
2226 * Move the tx dma descriptor.
2227 * EOT is set only in the last entry in the ring.
2229 w
= BUS_SWAP32(R_SM(&di
->txd32
[old
].ctrl
)) & ~CTRL_EOT
;
2230 if (new == (di
->ntxd
- 1))
2232 W_SM(&di
->txd32
[new].ctrl
, BUS_SWAP32(w
));
2233 W_SM(&di
->txd32
[new].addr
, R_SM(&di
->txd32
[old
].addr
));
2235 /* zap the old tx dma descriptor address field */
2236 W_SM(&di
->txd32
[old
].addr
, BUS_SWAP32(0xdeadbeef));
2238 /* move the corresponding txp[] entry */
2239 ASSERT(di
->txp
[new] == NULL
);
2240 di
->txp
[new] = di
->txp
[old
];
2242 /* Move the segment map as well */
2243 if (DMASGLIST_ENAB
) {
2244 bcopy(&di
->txp_dmah
[old
], &di
->txp_dmah
[new], sizeof(hnddma_seg_map_t
));
2245 bzero(&di
->txp_dmah
[old
], sizeof(hnddma_seg_map_t
));
2248 di
->txp
[old
] = NULL
;
2251 /* update txin and txout */
2253 di
->txout
= TXD(di
->txout
+ rot
);
2254 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2257 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(di
->txout
, dma32dd_t
));
2260 /* 64-bit DMA functions */
2263 dma64_txinit(dma_info_t
*di
)
2265 uint32 control
= D64_XC_XE
;
2267 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
2272 di
->txin
= di
->txout
= 0;
2273 di
->hnddma
.txavail
= di
->ntxd
- 1;
2275 /* clear tx descriptor ring */
2276 BZERO_SM((void *)(uintptr
)di
->txd64
, (di
->ntxd
* sizeof(dma64dd_t
)));
2278 /* DMA engine with out alignment requirement requires table to be inited
2279 * before enabling the engine
2281 if (!di
->aligndesc_4k
)
2282 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
2284 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_PEN
) == 0)
2285 control
|= D64_XC_PD
;
2286 OR_REG(di
->osh
, &di
->d64txregs
->control
, control
);
2288 /* DMA engine with alignment requirement requires table to be inited
2289 * before enabling the engine
2291 if (di
->aligndesc_4k
)
2292 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
2296 dma64_txenabled(dma_info_t
*di
)
2300 /* If the chip is dead, it is not enabled :-) */
2301 xc
= R_REG(di
->osh
, &di
->d64txregs
->control
);
2302 return ((xc
!= 0xffffffff) && (xc
& D64_XC_XE
));
2306 dma64_txsuspend(dma_info_t
*di
)
2308 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
2313 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
2317 dma64_txresume(dma_info_t
*di
)
2319 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
2324 AND_REG(di
->osh
, &di
->d64txregs
->control
, ~D64_XC_SE
);
2328 dma64_txsuspended(dma_info_t
*di
)
2330 return (di
->ntxd
== 0) ||
2331 ((R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
) == D64_XC_SE
);
2334 #ifdef WL_MULTIQUEUE
2336 dma64_txflush(dma_info_t
*di
)
2338 DMA_TRACE(("%s: dma_txflush\n", di
->name
));
2343 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
| D64_XC_FL
);
2347 dma64_txflush_clear(dma_info_t
*di
)
2351 DMA_TRACE(("%s: dma_txflush_clear\n", di
->name
));
2356 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
2357 D64_XS0_XS_DISABLED
) &&
2358 (status
!= D64_XS0_XS_IDLE
) &&
2359 (status
!= D64_XS0_XS_STOPPED
),
2361 AND_REG(di
->osh
, &di
->d64txregs
->control
, ~D64_XC_FL
);
2363 #endif /* WL_MULTIQUEUE */
2365 static void BCMFASTPATH
2366 dma64_txreclaim(dma_info_t
*di
, txd_range_t range
)
2370 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
,
2371 (range
== HNDDMA_RANGE_ALL
) ? "all" :
2372 ((range
== HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" : "transfered")));
2374 if (di
->txin
== di
->txout
)
2377 while ((p
= dma64_getnexttxp(di
, range
))) {
2378 /* For unframed data, we don't have any packets to free */
2379 if (!(di
->hnddma
.dmactrlflags
& DMA_CTRL_UNFRAMED
))
2380 PKTFREE(di
->osh
, p
, TRUE
);
2385 dma64_txstopped(dma_info_t
*di
)
2387 return ((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) == D64_XS0_XS_STOPPED
);
2391 dma64_rxstopped(dma_info_t
*di
)
2393 return ((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
) == D64_RS0_RS_STOPPED
);
2397 dma64_alloc(dma_info_t
*di
, uint direction
)
2406 ddlen
= sizeof(dma64dd_t
);
2408 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
2409 align_bits
= di
->dmadesc_align
;
2410 align
= (1 << align_bits
);
2412 if (direction
== DMA_TX
) {
2413 if ((va
= dma_ringalloc(di
->osh
, D64RINGALIGN
, size
, &align_bits
, &alloced
,
2414 &di
->txdpaorig
, &di
->tx_dmah
)) == NULL
) {
2415 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
2419 align
= (1 << align_bits
);
2420 di
->txd64
= (dma64dd_t
*)ROUNDUP((uintptr
)va
, align
);
2421 di
->txdalign
= (uint
)((int8
*)(uintptr
)di
->txd64
- (int8
*)va
);
2422 PHYSADDRLOSET(di
->txdpa
, PHYSADDRLO(di
->txdpaorig
) + di
->txdalign
);
2423 /* Make sure that alignment didn't overflow */
2424 ASSERT(PHYSADDRLO(di
->txdpa
) >= PHYSADDRLO(di
->txdpaorig
));
2426 PHYSADDRHISET(di
->txdpa
, PHYSADDRHI(di
->txdpaorig
));
2427 di
->txdalloc
= alloced
;
2428 ASSERT(ISALIGNED((uintptr
)di
->txd64
, align
));
2430 if ((va
= dma_ringalloc(di
->osh
, D64RINGALIGN
, size
, &align_bits
, &alloced
,
2431 &di
->rxdpaorig
, &di
->rx_dmah
)) == NULL
) {
2432 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
2436 align
= (1 << align_bits
);
2437 di
->rxd64
= (dma64dd_t
*)ROUNDUP((uintptr
)va
, align
);
2438 di
->rxdalign
= (uint
)((int8
*)(uintptr
)di
->rxd64
- (int8
*)va
);
2439 PHYSADDRLOSET(di
->rxdpa
, PHYSADDRLO(di
->rxdpaorig
) + di
->rxdalign
);
2440 /* Make sure that alignment didn't overflow */
2441 ASSERT(PHYSADDRLO(di
->rxdpa
) >= PHYSADDRLO(di
->rxdpaorig
));
2443 PHYSADDRHISET(di
->rxdpa
, PHYSADDRHI(di
->rxdpaorig
));
2444 di
->rxdalloc
= alloced
;
2445 ASSERT(ISALIGNED((uintptr
)di
->rxd64
, align
));
2452 dma64_txreset(dma_info_t
*di
)
2459 /* suspend tx DMA first */
2460 W_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
2461 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
2462 D64_XS0_XS_DISABLED
) &&
2463 (status
!= D64_XS0_XS_IDLE
) &&
2464 (status
!= D64_XS0_XS_STOPPED
),
2467 W_REG(di
->osh
, &di
->d64txregs
->control
, 0);
2468 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
2469 D64_XS0_XS_DISABLED
),
2472 /* wait for the last transaction to complete */
2475 return (status
== D64_XS0_XS_DISABLED
);
2479 dma64_rxidle(dma_info_t
*di
)
2481 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
2486 return ((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) ==
2487 (R_REG(di
->osh
, &di
->d64rxregs
->ptr
) & D64_RS0_CD_MASK
));
2491 dma64_rxreset(dma_info_t
*di
)
2498 W_REG(di
->osh
, &di
->d64rxregs
->control
, 0);
2499 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
)) !=
2500 D64_RS0_RS_DISABLED
), 10000);
2502 return (status
== D64_RS0_RS_DISABLED
);
2506 dma64_rxenabled(dma_info_t
*di
)
2510 rc
= R_REG(di
->osh
, &di
->d64rxregs
->control
);
2511 return ((rc
!= 0xffffffff) && (rc
& D64_RC_RE
));
2515 dma64_txsuspendedidle(dma_info_t
*di
)
2521 if (!(R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
))
2524 if ((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) == D64_XS0_XS_IDLE
)
2531 /* Useful when sending unframed data. This allows us to get a progress report from the DMA.
2532 * We return a pointer to the beginning of the DATA buffer of the current descriptor.
2533 * If DMA is idle, we return NULL.
2536 dma64_getpos(dma_info_t
*di
, bool direction
)
2542 if (direction
== DMA_TX
) {
2543 cur_idx
= B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
) -
2544 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
);
2545 idle
= !NTXDACTIVE(di
->txin
, di
->txout
);
2546 va
= di
->txp
[cur_idx
];
2548 cur_idx
= B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
2549 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
);
2550 idle
= !NRXDACTIVE(di
->rxin
, di
->rxout
);
2551 va
= di
->rxp
[cur_idx
];
2554 /* If DMA is IDLE, return NULL */
2556 DMA_TRACE(("%s: DMA idle, return NULL\n", __FUNCTION__
));
2563 /* TX of unframed data
2565 * Adds a DMA ring descriptor for the data pointed to by "buf".
2566 * This is for DMA of a buffer of data and is unlike other hnddma TX functions
2567 * that take a pointer to a "packet"
2568 * Each call to this is results in a single descriptor being added for "len" bytes of
2569 * data starting at "buf", it doesn't handle chained buffers.
2572 dma64_txunframed(dma_info_t
*di
, void *buf
, uint len
, bool commit
)
2576 dmaaddr_t pa
; /* phys addr */
2580 /* return nonzero if out of tx descriptors */
2581 if (NEXTTXD(txout
) == di
->txin
)
2587 pa
= DMA_MAP(di
->osh
, buf
, len
, DMA_TX
, NULL
, &di
->txp_dmah
[txout
]);
2589 flags
= (D64_CTRL1_SOF
| D64_CTRL1_IOC
| D64_CTRL1_EOF
);
2591 if (txout
== (di
->ntxd
- 1))
2592 flags
|= D64_CTRL1_EOT
;
2594 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
2595 ASSERT(di
->txp
[txout
] == NULL
);
2597 /* save the buffer pointer - used by dma_getpos */
2598 di
->txp
[txout
] = buf
;
2600 txout
= NEXTTXD(txout
);
2601 /* bump the tx descriptor index */
2606 W_REG(di
->osh
, &di
->d64txregs
->ptr
, di
->xmtptrbase
+ I2B(txout
, dma64dd_t
));
2609 /* tx flow control */
2610 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2615 DMA_ERROR(("%s: %s: out of txds !!!\n", di
->name
, __FUNCTION__
));
2616 di
->hnddma
.txavail
= 0;
2617 di
->hnddma
.txnobuf
++;
2623 /* !! tx entry routine
2624 * WARNING: call must check the return value for error.
2625 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
2627 static int BCMFASTPATH
2628 dma64_txfast(dma_info_t
*di
, void *p0
, bool commit
)
2638 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
2641 war
= (di
->hnddma
.dmactrlflags
& DMA_CTRL_DMA_AVOIDANCE_WAR
) ? TRUE
: FALSE
;
2644 * Walk the chain of packet buffers
2645 * allocating and initializing transmit descriptor entries.
2647 for (p
= p0
; p
; p
= next
) {
2648 uint nsegs
, j
, segsadd
;
2649 hnddma_seg_map_t
*map
= NULL
;
2651 data
= PKTDATA(di
->osh
, p
);
2652 len
= PKTLEN(di
->osh
, p
);
2654 len
+= PKTDMAPAD(di
->osh
, p
);
2655 #endif /* BCM_DMAPAD */
2656 next
= PKTNEXT(di
->osh
, p
);
2658 /* return nonzero if out of tx descriptors */
2659 if (NEXTTXD(txout
) == di
->txin
)
2665 /* get physical address of buffer start */
2667 bzero(&di
->txp_dmah
[txout
], sizeof(hnddma_seg_map_t
));
2669 pa
= DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
, &di
->txp_dmah
[txout
]);
2671 if (DMASGLIST_ENAB
) {
2672 map
= &di
->txp_dmah
[txout
];
2674 /* See if all the segments can be accounted for */
2675 if (map
->nsegs
> (uint
)(di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1))
2683 for (j
= 1; j
<= nsegs
; j
++) {
2685 if (p
== p0
&& j
== 1)
2686 flags
|= D64_CTRL1_SOF
;
2688 /* With a DMA segment list, Descriptor table is filled
2689 * using the segment list instead of looping over
2690 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
2691 * end of segment list is reached.
2693 if ((!DMASGLIST_ENAB
&& next
== NULL
) ||
2694 (DMASGLIST_ENAB
&& j
== nsegs
))
2695 flags
|= (D64_CTRL1_IOC
| D64_CTRL1_EOF
);
2696 if (txout
== (di
->ntxd
- 1))
2697 flags
|= D64_CTRL1_EOT
;
2699 if (DMASGLIST_ENAB
) {
2700 len
= map
->segs
[j
- 1].length
;
2701 pa
= map
->segs
[j
- 1].addr
;
2702 if (len
> 128 && war
) {
2703 uint remain
, new_len
, align64
;
2704 /* check for 64B aligned of pa */
2705 align64
= (uint
)(PHYSADDRLO(pa
) & 0x3f);
2706 align64
= (64 - align64
) & 0x3f;
2707 new_len
= len
- align64
;
2708 remain
= new_len
% 128;
2709 if (remain
> 0 && remain
<= 4) {
2712 flags
& (~(D64_CTRL1_EOF
| D64_CTRL1_IOC
));
2713 flags
&= ~(D64_CTRL1_SOF
| D64_CTRL1_EOT
);
2715 dma64_dd_upd(di
, di
->txd64
, pa
, txout
,
2716 &tmp_flags
, len
-remain
);
2717 ASSERT(di
->txp
[txout
] == NULL
);
2718 txout
= NEXTTXD(txout
);
2719 /* return nonzero if out of tx descriptors */
2720 if (txout
== di
->txin
) {
2721 DMA_ERROR(("%s: dma_txfast: Out-of-DMA"
2722 " descriptors (txin %d txout %d"
2723 " nsegs %d)\n", __FUNCTION__
,
2724 di
->txin
, di
->txout
, nsegs
));
2727 if (txout
== (di
->ntxd
- 1))
2728 flags
|= D64_CTRL1_EOT
;
2729 buf_addr_lo
= PHYSADDRLO(pa
);
2730 PHYSADDRLOSET(pa
, (PHYSADDRLO(pa
) + (len
-remain
)));
2731 if (PHYSADDRLO(pa
) < buf_addr_lo
) {
2732 PHYSADDRHISET(pa
, (PHYSADDRHI(pa
) + 1));
2736 di
->dma_avoidance_cnt
++;
2740 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
2741 ASSERT(di
->txp
[txout
] == NULL
);
2743 txout
= NEXTTXD(txout
);
2744 /* return nonzero if out of tx descriptors */
2745 if (txout
== di
->txin
) {
2746 DMA_ERROR(("%s: dma_txfast: Out-of-DMA descriptors"
2747 " (txin %d txout %d nsegs %d)\n", __FUNCTION__
,
2748 di
->txin
, di
->txout
, nsegs
));
2752 if (segsadd
&& DMASGLIST_ENAB
)
2753 map
->nsegs
+= segsadd
;
2755 /* See above. No need to loop over individual buffers */
2761 /* if last txd eof not set, fix it */
2762 if (!(flags
& D64_CTRL1_EOF
))
2763 W_SM(&di
->txd64
[PREVTXD(txout
)].ctrl1
,
2764 BUS_SWAP32(flags
| D64_CTRL1_IOC
| D64_CTRL1_EOF
));
2766 /* save the packet */
2767 di
->txp
[PREVTXD(txout
)] = p0
;
2769 /* bump the tx descriptor index */
2775 W_REG(di
->osh
, &di
->d64txregs
->ptr
, di
->xmtptrbase
+ I2B(txout
, dma64dd_t
));
2777 /* tx flow control */
2778 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2783 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di
->name
));
2784 PKTFREE(di
->osh
, p0
, TRUE
);
2785 di
->hnddma
.txavail
= 0;
2786 di
->hnddma
.txnobuf
++;
2791 * Reclaim next completed txd (txds if using chained buffers) in the range
2792 * specified and return associated packet.
2793 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
2794 * transmitted as noted by the hardware "CurrDescr" pointer.
2795 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
2796 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
2797 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
2798 * return associated packet regardless of the value of hardware pointers.
2800 static void * BCMFASTPATH
2801 dma64_getnexttxp(dma_info_t
*di
, txd_range_t range
)
2803 uint16 start
, end
, i
;
2807 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
,
2808 (range
== HNDDMA_RANGE_ALL
) ? "all" :
2809 ((range
== HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" : "transfered")));
2817 if (range
== HNDDMA_RANGE_ALL
)
2820 dma64regs_t
*dregs
= di
->d64txregs
;
2822 end
= (uint16
)(B2I(((R_REG(di
->osh
, &dregs
->status0
) & D64_XS0_CD_MASK
) -
2823 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
));
2825 if (range
== HNDDMA_RANGE_TRANSFERED
) {
2826 active_desc
= (uint16
)(R_REG(di
->osh
, &dregs
->status1
) & D64_XS1_AD_MASK
);
2827 active_desc
= (active_desc
- di
->xmtptrbase
) & D64_XS0_CD_MASK
;
2828 active_desc
= B2I(active_desc
, dma64dd_t
);
2829 if (end
!= active_desc
)
2830 end
= PREVTXD(active_desc
);
2834 if ((start
== 0) && (end
> di
->txout
))
2837 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
2839 hnddma_seg_map_t
*map
= NULL
;
2840 uint size
, j
, nsegs
;
2842 PHYSADDRLOSET(pa
, (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrlow
)) - di
->dataoffsetlow
));
2843 PHYSADDRHISET(pa
, (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrhigh
)) - di
->dataoffsethigh
));
2845 if (DMASGLIST_ENAB
) {
2846 map
= &di
->txp_dmah
[i
];
2847 size
= map
->origsize
;
2850 size
= (BUS_SWAP32(R_SM(&di
->txd64
[i
].ctrl2
)) & D64_CTRL2_BC_MASK
);
2854 for (j
= nsegs
; j
> 0; j
--) {
2855 W_SM(&di
->txd64
[i
].addrlow
, 0xdeadbeef);
2856 W_SM(&di
->txd64
[i
].addrhigh
, 0xdeadbeef);
2864 DMA_UNMAP(di
->osh
, pa
, size
, DMA_TX
, txp
, map
);
2869 /* tx flow control */
2870 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2875 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
2876 start
, end
, di
->txout
, forceall
));
2880 static void * BCMFASTPATH
2881 dma64_getnextrxp(dma_info_t
*di
, bool forceall
)
2887 /* if forcing, dma engine must be disabled */
2888 ASSERT(!forceall
|| !dma64_rxenabled(di
));
2892 /* return if no packets posted */
2896 curr
= B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
2897 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
);
2899 /* ignore curr if forceall */
2900 if (!forceall
&& (i
== curr
))
2903 /* get the packet pointer that corresponds to the rx descriptor */
2908 PHYSADDRLOSET(pa
, (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrlow
)) - di
->dataoffsetlow
));
2909 PHYSADDRHISET(pa
, (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrhigh
)) - di
->dataoffsethigh
));
2911 /* clear this packet from the descriptor ring */
2912 DMA_UNMAP(di
->osh
, pa
,
2913 di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
2915 W_SM(&di
->rxd64
[i
].addrlow
, 0xdeadbeef);
2916 W_SM(&di
->rxd64
[i
].addrhigh
, 0xdeadbeef);
2918 di
->rxin
= NEXTRXD(i
);
2924 _dma64_addrext(osl_t
*osh
, dma64regs_t
*dma64regs
)
2927 OR_REG(osh
, &dma64regs
->control
, D64_XC_AE
);
2928 w
= R_REG(osh
, &dma64regs
->control
);
2929 AND_REG(osh
, &dma64regs
->control
, ~D64_XC_AE
);
2930 return ((w
& D64_XC_AE
) == D64_XC_AE
);
2934 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
2937 dma64_txrotate(dma_info_t
*di
)
2946 ASSERT(dma64_txsuspendedidle(di
));
2948 nactive
= _dma_txactive(di
);
2949 ad
= (uint16
)(B2I((((R_REG(di
->osh
, &di
->d64txregs
->status1
) & D64_XS1_AD_MASK
)
2950 - di
->xmtptrbase
) & D64_XS1_AD_MASK
), dma64dd_t
));
2951 rot
= TXD(ad
- di
->txin
);
2953 ASSERT(rot
< di
->ntxd
);
2955 /* full-ring case is a lot harder - don't worry about this */
2956 if (rot
>= (di
->ntxd
- nactive
)) {
2957 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
2962 last
= PREVTXD(di
->txout
);
2964 /* move entries starting at last and moving backwards to first */
2965 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
2966 new = TXD(old
+ rot
);
2969 * Move the tx dma descriptor.
2970 * EOT is set only in the last entry in the ring.
2972 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl1
)) & ~D64_CTRL1_EOT
;
2973 if (new == (di
->ntxd
- 1))
2975 W_SM(&di
->txd64
[new].ctrl1
, BUS_SWAP32(w
));
2977 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl2
));
2978 W_SM(&di
->txd64
[new].ctrl2
, BUS_SWAP32(w
));
2980 W_SM(&di
->txd64
[new].addrlow
, R_SM(&di
->txd64
[old
].addrlow
));
2981 W_SM(&di
->txd64
[new].addrhigh
, R_SM(&di
->txd64
[old
].addrhigh
));
2983 /* zap the old tx dma descriptor address field */
2984 W_SM(&di
->txd64
[old
].addrlow
, BUS_SWAP32(0xdeadbeef));
2985 W_SM(&di
->txd64
[old
].addrhigh
, BUS_SWAP32(0xdeadbeef));
2987 /* move the corresponding txp[] entry */
2988 ASSERT(di
->txp
[new] == NULL
);
2989 di
->txp
[new] = di
->txp
[old
];
2992 if (DMASGLIST_ENAB
) {
2993 bcopy(&di
->txp_dmah
[old
], &di
->txp_dmah
[new], sizeof(hnddma_seg_map_t
));
2994 bzero(&di
->txp_dmah
[old
], sizeof(hnddma_seg_map_t
));
2997 di
->txp
[old
] = NULL
;
3000 /* update txin and txout */
3002 di
->txout
= TXD(di
->txout
+ rot
);
3003 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
3006 W_REG(di
->osh
, &di
->d64txregs
->ptr
, di
->xmtptrbase
+ I2B(di
->txout
, dma64dd_t
));
3010 dma_addrwidth(si_t
*sih
, void *dmaregs
)
3012 dma32regs_t
*dma32regs
;
3017 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
3018 /* DMA engine is 64-bit capable */
3019 if ((si_core_sflags(sih
, 0, 0) & SISF_DMA64
) == SISF_DMA64
) {
3020 /* backplane are 64-bit capable */
3021 if (si_backplane64(sih
))
3022 /* If bus is System Backplane or PCIE then we can access 64-bits */
3023 if ((BUSTYPE(sih
->bustype
) == SI_BUS
) ||
3024 ((BUSTYPE(sih
->bustype
) == PCI_BUS
) &&
3025 (sih
->buscoretype
== PCIE_CORE_ID
)))
3026 return (DMADDRWIDTH_64
);
3028 /* DMA64 is always 32-bit capable, AE is always TRUE */
3029 ASSERT(_dma64_addrext(osh
, (dma64regs_t
*)dmaregs
));
3031 return (DMADDRWIDTH_32
);
3034 /* Start checking for 32-bit / 30-bit addressing */
3035 dma32regs
= (dma32regs_t
*)dmaregs
;
3037 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
3038 if ((BUSTYPE(sih
->bustype
) == SI_BUS
) ||
3039 ((BUSTYPE(sih
->bustype
) == PCI_BUS
) && sih
->buscoretype
== PCIE_CORE_ID
) ||
3040 (_dma32_addrext(osh
, dma32regs
)))
3041 return (DMADDRWIDTH_32
);
3044 return (DMADDRWIDTH_30
);