2 * Generic Broadcom Home Networking Division (HND) DMA module.
3 * This supports the following chips: BCM42xx, 44xx, 47xx .
5 * Copyright (C) 2011, Broadcom Corporation. All Rights Reserved.
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
14 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
16 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
17 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 * $Id: hnddma.c 321146 2012-03-14 08:27:23Z $
27 #include <bcmendian.h>
37 #define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args
38 #define DMA_TRACE(args) if (!(*di->msg_level & 2)); else printf args
39 #elif defined(BCMDBG_ERR)
40 #define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args
41 #define DMA_TRACE(args)
43 #define DMA_ERROR(args)
44 #define DMA_TRACE(args)
47 #define DMA_NONE(args)
50 #define d32txregs dregs.d32_u.txregs_32
51 #define d32rxregs dregs.d32_u.rxregs_32
52 #define txd32 dregs.d32_u.txd_32
53 #define rxd32 dregs.d32_u.rxd_32
55 #define d64txregs dregs.d64_u.txregs_64
56 #define d64rxregs dregs.d64_u.rxregs_64
57 #define txd64 dregs.d64_u.txd_64
58 #define rxd64 dregs.d64_u.rxd_64
60 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
61 static uint dma_msg_level
=
66 #endif /* BCMDBG_ERR */
68 #define MAXNAMEL 8 /* 8 char names */
70 #define DI_INFO(dmah) ((dma_info_t *)dmah)
72 /* dma engine software state */
73 typedef struct dma_info
{
74 struct hnddma_pub hnddma
; /* exported structure, don't use hnddma_t,
75 * which could be const
77 uint
*msg_level
; /* message level pointer */
78 char name
[MAXNAMEL
]; /* callers name for diag msgs */
80 void *osh
; /* os handle */
81 si_t
*sih
; /* sb handle */
83 bool dma64
; /* this dma engine is operating in 64-bit mode */
84 bool addrext
; /* this dma engine supports DmaExtendedAddrChanges */
88 dma32regs_t
*txregs_32
; /* 32-bit dma tx engine registers */
89 dma32regs_t
*rxregs_32
; /* 32-bit dma rx engine registers */
90 dma32dd_t
*txd_32
; /* pointer to dma32 tx descriptor ring */
91 dma32dd_t
*rxd_32
; /* pointer to dma32 rx descriptor ring */
94 dma64regs_t
*txregs_64
; /* 64-bit dma tx engine registers */
95 dma64regs_t
*rxregs_64
; /* 64-bit dma rx engine registers */
96 dma64dd_t
*txd_64
; /* pointer to dma64 tx descriptor ring */
97 dma64dd_t
*rxd_64
; /* pointer to dma64 rx descriptor ring */
101 uint16 dmadesc_align
; /* alignment requirement for dma descriptors */
103 uint16 ntxd
; /* # tx descriptors tunable */
104 uint16 txin
; /* index of next descriptor to reclaim */
105 uint16 txout
; /* index of next descriptor to post */
106 void **txp
; /* pointer to parallel array of pointers to packets */
107 osldma_t
*tx_dmah
; /* DMA TX descriptor ring handle */
108 hnddma_seg_map_t
*txp_dmah
; /* DMA MAP meta-data handle */
109 dmaaddr_t txdpa
; /* Aligned physical address of descriptor ring */
110 dmaaddr_t txdpaorig
; /* Original physical address of descriptor ring */
111 uint16 txdalign
; /* #bytes added to alloc'd mem to align txd */
112 uint32 txdalloc
; /* #bytes allocated for the ring */
113 uint32 xmtptrbase
; /* When using unaligned descriptors, the ptr register
114 * is not just an index, it needs all 13 bits to be
115 * an offset from the addr register.
118 uint16 nrxd
; /* # rx descriptors tunable */
119 uint16 rxin
; /* index of next descriptor to reclaim */
120 uint16 rxout
; /* index of next descriptor to post */
121 void **rxp
; /* pointer to parallel array of pointers to packets */
122 osldma_t
*rx_dmah
; /* DMA RX descriptor ring handle */
123 hnddma_seg_map_t
*rxp_dmah
; /* DMA MAP meta-data handle */
124 dmaaddr_t rxdpa
; /* Aligned physical address of descriptor ring */
125 dmaaddr_t rxdpaorig
; /* Original physical address of descriptor ring */
126 uint16 rxdalign
; /* #bytes added to alloc'd mem to align rxd */
127 uint32 rxdalloc
; /* #bytes allocated for the ring */
128 uint32 rcvptrbase
; /* Base for ptr reg when using unaligned descriptors */
131 uint16 rxbufsize
; /* rx buffer size in bytes,
132 * not including the extra headroom
134 uint rxextrahdrroom
; /* extra rx headroom, reverseved to assist upper stack
135 * e.g. some rx pkt buffers will be bridged to tx side
136 * without byte copying. The extra headroom needs to be
137 * large enough to fit txheader needs.
138 * Some dongle driver may not need it.
140 uint nrxpost
; /* # rx buffers to keep posted */
141 uint rxoffset
; /* rxcontrol offset */
142 uint ddoffsetlow
; /* add to get dma address of descriptor ring, low 32 bits */
143 uint ddoffsethigh
; /* high 32 bits */
144 uint dataoffsetlow
; /* add to get dma address of data buffer, low 32 bits */
145 uint dataoffsethigh
; /* high 32 bits */
146 bool aligndesc_4k
; /* descriptor base need to be aligned or not */
147 uint8 rxburstlen
; /* burstlen field for rx (for cores supporting burstlen) */
148 uint8 txburstlen
; /* burstlen field for tx (for cores supporting burstlen) */
149 uint8 txmultioutstdrd
; /* tx multiple outstanding reads */
150 uint8 txprefetchctl
; /* prefetch control for tx */
151 uint8 txprefetchthresh
; /* prefetch threshold for tx */
152 uint8 rxprefetchctl
; /* prefetch control for rx */
153 uint8 rxprefetchthresh
; /* prefetch threshold for rx */
154 pktpool_t
*pktpool
; /* pktpool */
155 uint dma_avoidance_cnt
;
159 * If BCMDMA32 is defined, hnddma will support both 32-bit and 64-bit DMA engines.
160 * Otherwise it will support only 64-bit.
162 * DMA32_ENAB indicates whether hnddma is compiled with support for 32-bit DMA engines.
163 * DMA64_ENAB indicates whether hnddma is compiled with support for 64-bit DMA engines.
165 * DMA64_MODE indicates whether the current DMA engine is running as 64-bit.
168 #define DMA32_ENAB(di) 1
169 #define DMA64_ENAB(di) 1
170 #define DMA64_MODE(di) ((di)->dma64)
171 #else /* !BCMDMA32 */
172 #define DMA32_ENAB(di) 0
173 #define DMA64_ENAB(di) 1
174 #define DMA64_MODE(di) 1
175 #endif /* !BCMDMA32 */
177 /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
178 #ifdef BCMDMASGLISTOSL
179 #define DMASGLIST_ENAB TRUE
181 #define DMASGLIST_ENAB FALSE
182 #endif /* BCMDMASGLISTOSL */
184 /* descriptor bumping macros */
185 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
186 #define TXD(x) XXD((x), di->ntxd)
187 #define RXD(x) XXD((x), di->nrxd)
188 #define NEXTTXD(i) TXD((i) + 1)
189 #define PREVTXD(i) TXD((i) - 1)
190 #define NEXTRXD(i) RXD((i) + 1)
191 #define PREVRXD(i) RXD((i) - 1)
193 #define NTXDACTIVE(h, t) TXD((t) - (h))
194 #define NRXDACTIVE(h, t) RXD((t) - (h))
196 /* macros to convert between byte offsets and indexes */
197 #define B2I(bytes, type) ((bytes) / sizeof(type))
198 #define I2B(index, type) ((index) * sizeof(type))
200 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
201 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
203 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
204 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
206 /* Common prototypes */
207 static bool _dma_isaddrext(dma_info_t
*di
);
208 static bool _dma_descriptor_align(dma_info_t
*di
);
209 static bool _dma_alloc(dma_info_t
*di
, uint direction
);
210 static void _dma_detach(dma_info_t
*di
);
211 static void _dma_ddtable_init(dma_info_t
*di
, uint direction
, dmaaddr_t pa
);
212 static void _dma_rxinit(dma_info_t
*di
);
213 static void *_dma_rx(dma_info_t
*di
);
214 static bool _dma_rxfill(dma_info_t
*di
);
215 static void _dma_rxreclaim(dma_info_t
*di
);
216 static void _dma_rxenable(dma_info_t
*di
);
217 static void *_dma_getnextrxp(dma_info_t
*di
, bool forceall
);
218 static void _dma_rx_param_get(dma_info_t
*di
, uint16
*rxoffset
, uint16
*rxbufsize
);
220 static void _dma_txblock(dma_info_t
*di
);
221 static void _dma_txunblock(dma_info_t
*di
);
222 static uint
_dma_txactive(dma_info_t
*di
);
223 static uint
_dma_rxactive(dma_info_t
*di
);
224 static uint
_dma_txpending(dma_info_t
*di
);
225 static uint
_dma_txcommitted(dma_info_t
*di
);
227 static void *_dma_peeknexttxp(dma_info_t
*di
);
228 static int _dma_peekntxp(dma_info_t
*di
, int *len
, void *txps
[], txd_range_t range
);
229 static void *_dma_peeknextrxp(dma_info_t
*di
);
230 static uintptr
_dma_getvar(dma_info_t
*di
, const char *name
);
231 static void _dma_counterreset(dma_info_t
*di
);
232 static void _dma_fifoloopbackenable(dma_info_t
*di
);
233 static uint
_dma_ctrlflags(dma_info_t
*di
, uint mask
, uint flags
);
234 static uint8
dma_align_sizetobits(uint size
);
235 static void *dma_ringalloc(osl_t
*osh
, uint32 boundary
, uint size
, uint16
*alignbits
, uint
* alloced
,
236 dmaaddr_t
*descpa
, osldma_t
**dmah
);
237 static int _dma_pktpool_set(dma_info_t
*di
, pktpool_t
*pool
);
238 static bool _dma_rxtx_error(dma_info_t
*di
, bool istx
);
239 static void _dma_burstlen_set(dma_info_t
*di
, uint8 rxburstlen
, uint8 txburstlen
);
240 static uint
_dma_avoidancecnt(dma_info_t
*di
);
241 static void _dma_param_set(dma_info_t
*di
, uint16 paramid
, uint16 paramval
);
242 static bool _dma_glom_enable(dma_info_t
*di
, uint32 val
);
245 /* Prototypes for 32-bit routines */
246 static bool dma32_alloc(dma_info_t
*di
, uint direction
);
247 static bool dma32_txreset(dma_info_t
*di
);
248 static bool dma32_rxreset(dma_info_t
*di
);
249 static bool dma32_txsuspendedidle(dma_info_t
*di
);
250 static int dma32_txfast(dma_info_t
*di
, void *p0
, bool commit
);
251 static void *dma32_getnexttxp(dma_info_t
*di
, txd_range_t range
);
252 static void *dma32_getnextrxp(dma_info_t
*di
, bool forceall
);
253 static void dma32_txrotate(dma_info_t
*di
);
254 static bool dma32_rxidle(dma_info_t
*di
);
255 static void dma32_txinit(dma_info_t
*di
);
256 static bool dma32_txenabled(dma_info_t
*di
);
257 static void dma32_txsuspend(dma_info_t
*di
);
258 static void dma32_txresume(dma_info_t
*di
);
259 static bool dma32_txsuspended(dma_info_t
*di
);
261 static void dma32_txflush(dma_info_t
*di
);
262 static void dma32_txflush_clear(dma_info_t
*di
);
263 #endif /* WL_MULTIQUEUE */
264 static void dma32_txreclaim(dma_info_t
*di
, txd_range_t range
);
265 static bool dma32_txstopped(dma_info_t
*di
);
266 static bool dma32_rxstopped(dma_info_t
*di
);
267 static bool dma32_rxenabled(dma_info_t
*di
);
269 static void dma32_dumpring(dma_info_t
*di
, struct bcmstrbuf
*b
, dma32dd_t
*ring
, uint start
,
270 uint end
, uint max_num
);
271 static void dma32_dump(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
272 static void dma32_dumptx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
273 static void dma32_dumprx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
276 static bool _dma32_addrext(osl_t
*osh
, dma32regs_t
*dma32regs
);
278 /* Prototypes for 64-bit routines */
279 static bool dma64_alloc(dma_info_t
*di
, uint direction
);
280 static bool dma64_txreset(dma_info_t
*di
);
281 static bool dma64_rxreset(dma_info_t
*di
);
282 static bool dma64_txsuspendedidle(dma_info_t
*di
);
283 static int dma64_txfast(dma_info_t
*di
, void *p0
, bool commit
);
284 static int dma64_txunframed(dma_info_t
*di
, void *p0
, uint len
, bool commit
);
285 static void *dma64_getpos(dma_info_t
*di
, bool direction
);
286 static void *dma64_getnexttxp(dma_info_t
*di
, txd_range_t range
);
287 static void *dma64_getnextrxp(dma_info_t
*di
, bool forceall
);
288 static void dma64_txrotate(dma_info_t
*di
);
290 static bool dma64_rxidle(dma_info_t
*di
);
291 static void dma64_txinit(dma_info_t
*di
);
292 static bool dma64_txenabled(dma_info_t
*di
);
293 static void dma64_txsuspend(dma_info_t
*di
);
294 static void dma64_txresume(dma_info_t
*di
);
295 static bool dma64_txsuspended(dma_info_t
*di
);
297 static void dma64_txflush(dma_info_t
*di
);
298 static void dma64_txflush_clear(dma_info_t
*di
);
299 #endif /* WL_MULTIQUEUE */
300 static void dma64_txreclaim(dma_info_t
*di
, txd_range_t range
);
301 static bool dma64_txstopped(dma_info_t
*di
);
302 static bool dma64_rxstopped(dma_info_t
*di
);
303 static bool dma64_rxenabled(dma_info_t
*di
);
304 static bool _dma64_addrext(osl_t
*osh
, dma64regs_t
*dma64regs
);
307 STATIC INLINE uint32
parity32(uint32 data
);
310 static void dma64_dumpring(dma_info_t
*di
, struct bcmstrbuf
*b
, dma64dd_t
*ring
, uint start
,
311 uint end
, uint max_num
);
312 static void dma64_dump(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
313 static void dma64_dumptx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
314 static void dma64_dumprx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
318 const di_fcn_t dma64proc
= {
319 (di_detach_t
)_dma_detach
,
320 (di_txinit_t
)dma64_txinit
,
321 (di_txreset_t
)dma64_txreset
,
322 (di_txenabled_t
)dma64_txenabled
,
323 (di_txsuspend_t
)dma64_txsuspend
,
324 (di_txresume_t
)dma64_txresume
,
325 (di_txsuspended_t
)dma64_txsuspended
,
326 (di_txsuspendedidle_t
)dma64_txsuspendedidle
,
328 (di_txflush_t
)dma64_txflush
,
329 (di_txflush_clear_t
)dma64_txflush_clear
,
330 #endif /* WL_MULTIQUEUE */
331 (di_txfast_t
)dma64_txfast
,
332 (di_txunframed_t
)dma64_txunframed
,
333 (di_getpos_t
)dma64_getpos
,
334 (di_txstopped_t
)dma64_txstopped
,
335 (di_txreclaim_t
)dma64_txreclaim
,
336 (di_getnexttxp_t
)dma64_getnexttxp
,
337 (di_peeknexttxp_t
)_dma_peeknexttxp
,
338 (di_peekntxp_t
)_dma_peekntxp
,
339 (di_txblock_t
)_dma_txblock
,
340 (di_txunblock_t
)_dma_txunblock
,
341 (di_txactive_t
)_dma_txactive
,
342 (di_txrotate_t
)dma64_txrotate
,
344 (di_rxinit_t
)_dma_rxinit
,
345 (di_rxreset_t
)dma64_rxreset
,
346 (di_rxidle_t
)dma64_rxidle
,
347 (di_rxstopped_t
)dma64_rxstopped
,
348 (di_rxenable_t
)_dma_rxenable
,
349 (di_rxenabled_t
)dma64_rxenabled
,
351 (di_rxfill_t
)_dma_rxfill
,
352 (di_rxreclaim_t
)_dma_rxreclaim
,
353 (di_getnextrxp_t
)_dma_getnextrxp
,
354 (di_peeknextrxp_t
)_dma_peeknextrxp
,
355 (di_rxparam_get_t
)_dma_rx_param_get
,
357 (di_fifoloopbackenable_t
)_dma_fifoloopbackenable
,
358 (di_getvar_t
)_dma_getvar
,
359 (di_counterreset_t
)_dma_counterreset
,
360 (di_ctrlflags_t
)_dma_ctrlflags
,
363 (di_dump_t
)dma64_dump
,
364 (di_dumptx_t
)dma64_dumptx
,
365 (di_dumprx_t
)dma64_dumprx
,
371 (di_rxactive_t
)_dma_rxactive
,
372 (di_txpending_t
)_dma_txpending
,
373 (di_txcommitted_t
)_dma_txcommitted
,
374 (di_pktpool_set_t
)_dma_pktpool_set
,
375 (di_rxtxerror_t
)_dma_rxtx_error
,
376 (di_burstlen_set_t
)_dma_burstlen_set
,
377 (di_avoidancecnt_t
)_dma_avoidancecnt
,
378 (di_param_set_t
)_dma_param_set
,
379 (dma_glom_enable_t
)_dma_glom_enable
,
383 static const di_fcn_t dma32proc
= {
384 (di_detach_t
)_dma_detach
,
385 (di_txinit_t
)dma32_txinit
,
386 (di_txreset_t
)dma32_txreset
,
387 (di_txenabled_t
)dma32_txenabled
,
388 (di_txsuspend_t
)dma32_txsuspend
,
389 (di_txresume_t
)dma32_txresume
,
390 (di_txsuspended_t
)dma32_txsuspended
,
391 (di_txsuspendedidle_t
)dma32_txsuspendedidle
,
393 (di_txflush_t
)dma32_txflush
,
394 (di_txflush_clear_t
)dma32_txflush_clear
,
395 #endif /* WL_MULTIQUEUE */
396 (di_txfast_t
)dma32_txfast
,
399 (di_txstopped_t
)dma32_txstopped
,
400 (di_txreclaim_t
)dma32_txreclaim
,
401 (di_getnexttxp_t
)dma32_getnexttxp
,
402 (di_peeknexttxp_t
)_dma_peeknexttxp
,
403 (di_peekntxp_t
)_dma_peekntxp
,
404 (di_txblock_t
)_dma_txblock
,
405 (di_txunblock_t
)_dma_txunblock
,
406 (di_txactive_t
)_dma_txactive
,
407 (di_txrotate_t
)dma32_txrotate
,
409 (di_rxinit_t
)_dma_rxinit
,
410 (di_rxreset_t
)dma32_rxreset
,
411 (di_rxidle_t
)dma32_rxidle
,
412 (di_rxstopped_t
)dma32_rxstopped
,
413 (di_rxenable_t
)_dma_rxenable
,
414 (di_rxenabled_t
)dma32_rxenabled
,
416 (di_rxfill_t
)_dma_rxfill
,
417 (di_rxreclaim_t
)_dma_rxreclaim
,
418 (di_getnextrxp_t
)_dma_getnextrxp
,
419 (di_peeknextrxp_t
)_dma_peeknextrxp
,
420 (di_rxparam_get_t
)_dma_rx_param_get
,
422 (di_fifoloopbackenable_t
)_dma_fifoloopbackenable
,
423 (di_getvar_t
)_dma_getvar
,
424 (di_counterreset_t
)_dma_counterreset
,
425 (di_ctrlflags_t
)_dma_ctrlflags
,
428 (di_dump_t
)dma32_dump
,
429 (di_dumptx_t
)dma32_dumptx
,
430 (di_dumprx_t
)dma32_dumprx
,
436 (di_rxactive_t
)_dma_rxactive
,
437 (di_txpending_t
)_dma_txpending
,
438 (di_txcommitted_t
)_dma_txcommitted
,
439 (di_pktpool_set_t
)_dma_pktpool_set
,
440 (di_rxtxerror_t
)_dma_rxtx_error
,
441 (di_burstlen_set_t
)_dma_burstlen_set
,
442 (di_avoidancecnt_t
)_dma_avoidancecnt
,
443 (di_param_set_t
)_dma_param_set
,
449 dma_attach(osl_t
*osh
, const char *name
, si_t
*sih
,
450 volatile void *dmaregstx
, volatile void *dmaregsrx
,
451 uint ntxd
, uint nrxd
, uint rxbufsize
, int rxextheadroom
, uint nrxpost
, uint rxoffset
,
457 /* allocate private info structure */
458 if ((di
= MALLOC(osh
, sizeof (dma_info_t
))) == NULL
) {
460 DMA_ERROR(("%s: out of memory, malloced %d bytes\n", __FUNCTION__
, MALLOCED(osh
)));
465 bzero(di
, sizeof(dma_info_t
));
467 di
->msg_level
= msg_level
? msg_level
: &dma_msg_level
;
469 /* old chips w/o sb is no longer supported */
473 di
->dma64
= ((si_core_sflags(sih
, 0, 0) & SISF_DMA64
) == SISF_DMA64
);
477 /* check arguments */
478 ASSERT(ISPOWEROF2(ntxd
));
479 ASSERT(ISPOWEROF2(nrxd
));
482 ASSERT(dmaregsrx
== NULL
);
484 ASSERT(dmaregstx
== NULL
);
486 /* init dma reg pointer */
487 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
488 ASSERT(ntxd
<= D64MAXDD
);
489 ASSERT(nrxd
<= D64MAXDD
);
490 di
->d64txregs
= (dma64regs_t
*)dmaregstx
;
491 di
->d64rxregs
= (dma64regs_t
*)dmaregsrx
;
492 di
->hnddma
.di_fn
= (const di_fcn_t
*)&dma64proc
;
493 } else if (DMA32_ENAB(di
)) {
494 ASSERT(ntxd
<= D32MAXDD
);
495 ASSERT(nrxd
<= D32MAXDD
);
496 di
->d32txregs
= (dma32regs_t
*)dmaregstx
;
497 di
->d32rxregs
= (dma32regs_t
*)dmaregsrx
;
498 di
->hnddma
.di_fn
= (const di_fcn_t
*)&dma32proc
;
500 DMA_ERROR(("%s: driver doesn't support 32-bit DMA\n", __FUNCTION__
));
505 /* Default flags (which can be changed by the driver calling dma_ctrlflags
506 * before enable): For backwards compatibility both Rx Overflow Continue
507 * and Parity are DISABLED.
510 di
->hnddma
.di_fn
->ctrlflags(&di
->hnddma
, DMA_CTRL_ROC
| DMA_CTRL_PEN
, 0);
512 /* Get the default values (POR) of the burstlen. This can be overridden by the modules
513 * if this has to be different. Otherwise this value will be used to program the control
514 * register after the reset or during the init.
517 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
518 di
->rxburstlen
= (R_REG(di
->osh
,
519 &di
->d64rxregs
->control
) & D64_RC_BL_MASK
) >> D64_RC_BL_SHIFT
;
520 di
->rxprefetchctl
= (R_REG(di
->osh
,
521 &di
->d64rxregs
->control
) & D64_RC_PC_MASK
) >> D64_RC_PC_SHIFT
;
522 di
->rxprefetchthresh
= (R_REG(di
->osh
,
523 &di
->d64rxregs
->control
) & D64_RC_PT_MASK
) >> D64_RC_PT_SHIFT
;
524 } else if (DMA32_ENAB(di
)) {
525 di
->rxburstlen
= (R_REG(di
->osh
,
526 &di
->d32rxregs
->control
) & RC_BL_MASK
) >> RC_BL_SHIFT
;
527 di
->rxprefetchctl
= (R_REG(di
->osh
,
528 &di
->d32rxregs
->control
) & RC_PC_MASK
) >> RC_PC_SHIFT
;
529 di
->rxprefetchthresh
= (R_REG(di
->osh
,
530 &di
->d32rxregs
->control
) & RC_PT_MASK
) >> RC_PT_SHIFT
;
534 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
535 di
->txburstlen
= (R_REG(di
->osh
,
536 &di
->d64txregs
->control
) & D64_XC_BL_MASK
) >> D64_XC_BL_SHIFT
;
537 di
->txmultioutstdrd
= (R_REG(di
->osh
,
538 &di
->d64txregs
->control
) & D64_XC_MR_MASK
) >> D64_XC_MR_SHIFT
;
539 di
->txprefetchctl
= (R_REG(di
->osh
,
540 &di
->d64txregs
->control
) & D64_XC_PC_MASK
) >> D64_XC_PC_SHIFT
;
541 di
->txprefetchthresh
= (R_REG(di
->osh
,
542 &di
->d64txregs
->control
) & D64_XC_PT_MASK
) >> D64_XC_PT_SHIFT
;
543 } else if (DMA32_ENAB(di
)) {
544 di
->txburstlen
= (R_REG(di
->osh
,
545 &di
->d32txregs
->control
) & XC_BL_MASK
) >> XC_BL_SHIFT
;
546 di
->txmultioutstdrd
= (R_REG(di
->osh
,
547 &di
->d32txregs
->control
) & XC_MR_MASK
) >> XC_MR_SHIFT
;
548 di
->txprefetchctl
= (R_REG(di
->osh
,
549 &di
->d32txregs
->control
) & XC_PC_MASK
) >> XC_PC_SHIFT
;
550 di
->txprefetchthresh
= (R_REG(di
->osh
,
551 &di
->d32txregs
->control
) & XC_PT_MASK
) >> XC_PT_SHIFT
;
555 DMA_TRACE(("%s: %s: %s osh %p flags 0x%x ntxd %d nrxd %d rxbufsize %d "
556 "rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n",
557 name
, __FUNCTION__
, (DMA64_MODE(di
) ? "DMA64" : "DMA32"),
558 osh
, di
->hnddma
.dmactrlflags
, ntxd
, nrxd
,
559 rxbufsize
, rxextheadroom
, nrxpost
, rxoffset
, dmaregstx
, dmaregsrx
));
561 /* make a private copy of our callers name */
562 strncpy(di
->name
, name
, MAXNAMEL
);
563 di
->name
[MAXNAMEL
-1] = '\0';
569 di
->ntxd
= (uint16
)ntxd
;
570 di
->nrxd
= (uint16
)nrxd
;
572 /* the actual dma size doesn't include the extra headroom */
573 di
->rxextrahdrroom
= (rxextheadroom
== -1) ? BCMEXTRAHDROOM
: rxextheadroom
;
574 if (rxbufsize
> BCMEXTRAHDROOM
)
575 di
->rxbufsize
= (uint16
)(rxbufsize
- di
->rxextrahdrroom
);
577 di
->rxbufsize
= (uint16
)rxbufsize
;
579 di
->nrxpost
= (uint16
)nrxpost
;
580 di
->rxoffset
= (uint8
)rxoffset
;
583 * figure out the DMA physical address offset for dd and data
584 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
585 * Other bus: use zero
586 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
589 di
->dataoffsetlow
= 0;
590 /* for pci bus, add offset */
591 if (sih
->bustype
== PCI_BUS
) {
592 if ((sih
->buscoretype
== PCIE_CORE_ID
||
593 sih
->buscoretype
== PCIE2_CORE_ID
) &&
595 /* pcie with DMA64 */
597 di
->ddoffsethigh
= SI_PCIE_DMA_H32
;
599 /* pci(DMA32/DMA64) or pcie with DMA32 */
600 if ((CHIPID(sih
->chip
) == BCM4322_CHIP_ID
) ||
601 (CHIPID(sih
->chip
) == BCM4342_CHIP_ID
) ||
602 (CHIPID(sih
->chip
) == BCM43221_CHIP_ID
) ||
603 (CHIPID(sih
->chip
) == BCM43231_CHIP_ID
) ||
604 (CHIPID(sih
->chip
) == BCM43111_CHIP_ID
) ||
605 (CHIPID(sih
->chip
) == BCM43112_CHIP_ID
) ||
606 (CHIPID(sih
->chip
) == BCM43222_CHIP_ID
))
607 di
->ddoffsetlow
= SI_PCI_DMA2
;
609 di
->ddoffsetlow
= SI_PCI_DMA
;
611 di
->ddoffsethigh
= 0;
613 di
->dataoffsetlow
= di
->ddoffsetlow
;
614 di
->dataoffsethigh
= di
->ddoffsethigh
;
617 #if defined(__mips__) && defined(IL_BIGENDIAN)
618 di
->dataoffsetlow
= di
->dataoffsetlow
+ SI_SDRAM_SWAPPED
;
619 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
620 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
621 if ((si_coreid(sih
) == SDIOD_CORE_ID
) && ((si_corerev(sih
) > 0) && (si_corerev(sih
) <= 2)))
623 else if ((si_coreid(sih
) == I2S_CORE_ID
) &&
624 ((si_corerev(sih
) == 0) || (si_corerev(sih
) == 1)))
627 di
->addrext
= _dma_isaddrext(di
);
629 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
630 di
->aligndesc_4k
= _dma_descriptor_align(di
);
631 if (di
->aligndesc_4k
) {
632 if (DMA64_MODE(di
)) {
633 di
->dmadesc_align
= D64RINGALIGN_BITS
;
634 if ((ntxd
< D64MAXDD
/ 2) && (nrxd
< D64MAXDD
/ 2)) {
635 /* for smaller dd table, HW relax the alignment requirement */
636 di
->dmadesc_align
= D64RINGALIGN_BITS
- 1;
639 di
->dmadesc_align
= D32RINGALIGN_BITS
;
641 /* The start address of descriptor table should be algined to cache line size,
642 * or other structure may share a cache line with it, which can lead to memory
643 * overlapping due to cache write-back operation. In the case of MIPS 74k, the
644 * cache line size is 32 bytes.
647 di
->dmadesc_align
= 5; /* 32 byte alignment */
649 di
->dmadesc_align
= 4; /* 16 byte alignment */
653 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
654 di
->aligndesc_4k
, di
->dmadesc_align
));
656 /* allocate tx packet pointer vector */
658 size
= ntxd
* sizeof(void *);
659 if ((di
->txp
= MALLOC(osh
, size
)) == NULL
) {
660 DMA_ERROR(("%s: %s: out of tx memory, malloced %d bytes\n",
661 di
->name
, __FUNCTION__
, MALLOCED(osh
)));
664 bzero(di
->txp
, size
);
667 /* allocate rx packet pointer vector */
669 size
= nrxd
* sizeof(void *);
670 if ((di
->rxp
= MALLOC(osh
, size
)) == NULL
) {
671 DMA_ERROR(("%s: %s: out of rx memory, malloced %d bytes\n",
672 di
->name
, __FUNCTION__
, MALLOCED(osh
)));
675 bzero(di
->rxp
, size
);
678 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
680 if (!_dma_alloc(di
, DMA_TX
))
684 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
686 if (!_dma_alloc(di
, DMA_RX
))
690 if ((di
->ddoffsetlow
!= 0) && !di
->addrext
) {
691 if (PHYSADDRLO(di
->txdpa
) > SI_PCI_DMA_SZ
) {
692 DMA_ERROR(("%s: %s: txdpa 0x%x: addrext not supported\n",
693 di
->name
, __FUNCTION__
, (uint32
)PHYSADDRLO(di
->txdpa
)));
696 if (PHYSADDRLO(di
->rxdpa
) > SI_PCI_DMA_SZ
) {
697 DMA_ERROR(("%s: %s: rxdpa 0x%x: addrext not supported\n",
698 di
->name
, __FUNCTION__
, (uint32
)PHYSADDRLO(di
->rxdpa
)));
703 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh "
704 "0x%x addrext %d\n", di
->ddoffsetlow
, di
->ddoffsethigh
, di
->dataoffsetlow
,
705 di
->dataoffsethigh
, di
->addrext
));
707 /* allocate DMA mapping vectors */
708 if (DMASGLIST_ENAB
) {
710 size
= ntxd
* sizeof(hnddma_seg_map_t
);
711 if ((di
->txp_dmah
= (hnddma_seg_map_t
*)MALLOC(osh
, size
)) == NULL
)
713 bzero(di
->txp_dmah
, size
);
717 size
= nrxd
* sizeof(hnddma_seg_map_t
);
718 if ((di
->rxp_dmah
= (hnddma_seg_map_t
*)MALLOC(osh
, size
)) == NULL
)
720 bzero(di
->rxp_dmah
, size
);
724 return ((hnddma_t
*)di
);
731 /* init the tx or rx descriptor */
733 dma32_dd_upd(dma_info_t
*di
, dma32dd_t
*ddring
, dmaaddr_t pa
, uint outidx
, uint32
*flags
,
736 /* dma32 uses 32-bit control to fit both flags and bufcounter */
737 *flags
= *flags
| (bufcount
& CTRL_BC_MASK
);
739 if ((di
->dataoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
740 W_SM(&ddring
[outidx
].addr
, BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
741 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
743 /* address extension */
746 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
747 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
749 *flags
|= (ae
<< CTRL_AE_SHIFT
);
750 W_SM(&ddring
[outidx
].addr
, BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
751 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
755 /* Check for odd number of 1's */
756 STATIC INLINE uint32
parity32(uint32 data
)
767 #define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
770 dma64_dd_upd(dma_info_t
*di
, dma64dd_t
*ddring
, dmaaddr_t pa
, uint outidx
, uint32
*flags
,
773 uint32 ctrl2
= bufcount
& D64_CTRL2_BC_MASK
;
775 /* PCI bus with big(>1G) physical address, use address extension */
776 #if defined(__mips__) && defined(IL_BIGENDIAN)
777 if ((di
->dataoffsetlow
== SI_SDRAM_SWAPPED
) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
779 if ((di
->dataoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
780 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
781 ASSERT((PHYSADDRHI(pa
) & PCI64ADDR_HIGH
) == 0);
783 W_SM(&ddring
[outidx
].addrlow
, BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
784 W_SM(&ddring
[outidx
].addrhigh
, BUS_SWAP32(PHYSADDRHI(pa
) + di
->dataoffsethigh
));
785 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
786 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
788 /* address extension for 32-bit PCI */
792 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
793 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
794 ASSERT(PHYSADDRHI(pa
) == 0);
796 ctrl2
|= (ae
<< D64_CTRL2_AE_SHIFT
) & D64_CTRL2_AE
;
797 W_SM(&ddring
[outidx
].addrlow
, BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
798 W_SM(&ddring
[outidx
].addrhigh
, BUS_SWAP32(0 + di
->dataoffsethigh
));
799 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
800 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
802 if (di
->hnddma
.dmactrlflags
& DMA_CTRL_PEN
) {
803 if (DMA64_DD_PARITY(&ddring
[outidx
])) {
804 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
| D64_CTRL2_PARITY
));
810 _dma32_addrext(osl_t
*osh
, dma32regs_t
*dma32regs
)
814 OR_REG(osh
, &dma32regs
->control
, XC_AE
);
815 w
= R_REG(osh
, &dma32regs
->control
);
816 AND_REG(osh
, &dma32regs
->control
, ~XC_AE
);
817 return ((w
& XC_AE
) == XC_AE
);
821 _dma_alloc(dma_info_t
*di
, uint direction
)
823 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
824 return dma64_alloc(di
, direction
);
825 } else if (DMA32_ENAB(di
)) {
826 return dma32_alloc(di
, direction
);
831 /* !! may be called with core in reset */
833 _dma_detach(dma_info_t
*di
)
836 DMA_TRACE(("%s: dma_detach\n", di
->name
));
838 /* shouldn't be here if descriptors are unreclaimed */
839 ASSERT(di
->txin
== di
->txout
);
840 ASSERT(di
->rxin
== di
->rxout
);
842 /* free dma descriptor rings */
843 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
845 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->txd64
- di
->txdalign
),
846 di
->txdalloc
, (di
->txdpaorig
), &di
->tx_dmah
);
848 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->rxd64
- di
->rxdalign
),
849 di
->rxdalloc
, (di
->rxdpaorig
), &di
->rx_dmah
);
850 } else if (DMA32_ENAB(di
)) {
852 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->txd32
- di
->txdalign
),
853 di
->txdalloc
, (di
->txdpaorig
), &di
->tx_dmah
);
855 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->rxd32
- di
->rxdalign
),
856 di
->rxdalloc
, (di
->rxdpaorig
), &di
->rx_dmah
);
860 /* free packet pointer vectors */
862 MFREE(di
->osh
, (void *)di
->txp
, (di
->ntxd
* sizeof(void *)));
864 MFREE(di
->osh
, (void *)di
->rxp
, (di
->nrxd
* sizeof(void *)));
866 /* free tx packet DMA handles */
868 MFREE(di
->osh
, (void *)di
->txp_dmah
, di
->ntxd
* sizeof(hnddma_seg_map_t
));
870 /* free rx packet DMA handles */
872 MFREE(di
->osh
, (void *)di
->rxp_dmah
, di
->nrxd
* sizeof(hnddma_seg_map_t
));
874 /* free our private info structure */
875 MFREE(di
->osh
, (void *)di
, sizeof(dma_info_t
));
880 _dma_descriptor_align(dma_info_t
*di
)
882 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
885 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
886 if (di
->d64txregs
!= NULL
) {
887 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, 0xff0);
888 addrl
= R_REG(di
->osh
, &di
->d64txregs
->addrlow
);
891 } else if (di
->d64rxregs
!= NULL
) {
892 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, 0xff0);
893 addrl
= R_REG(di
->osh
, &di
->d64rxregs
->addrlow
);
901 /* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
903 _dma_isaddrext(dma_info_t
*di
)
905 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
906 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
908 /* not all tx or rx channel are available */
909 if (di
->d64txregs
!= NULL
) {
910 if (!_dma64_addrext(di
->osh
, di
->d64txregs
)) {
911 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n",
916 } else if (di
->d64rxregs
!= NULL
) {
917 if (!_dma64_addrext(di
->osh
, di
->d64rxregs
)) {
918 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n",
925 } else if (DMA32_ENAB(di
)) {
927 return (_dma32_addrext(di
->osh
, di
->d32txregs
));
928 else if (di
->d32rxregs
)
929 return (_dma32_addrext(di
->osh
, di
->d32rxregs
));
936 /* initialize descriptor table base address */
938 _dma_ddtable_init(dma_info_t
*di
, uint direction
, dmaaddr_t pa
)
940 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
941 if (!di
->aligndesc_4k
) {
942 if (direction
== DMA_TX
)
943 di
->xmtptrbase
= PHYSADDRLO(pa
);
945 di
->rcvptrbase
= PHYSADDRLO(pa
);
948 if ((di
->ddoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
949 if (direction
== DMA_TX
) {
950 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, (PHYSADDRLO(pa
) +
952 W_REG(di
->osh
, &di
->d64txregs
->addrhigh
, (PHYSADDRHI(pa
) +
955 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, (PHYSADDRLO(pa
) +
957 W_REG(di
->osh
, &di
->d64rxregs
->addrhigh
, (PHYSADDRHI(pa
) +
961 /* DMA64 32bits address extension */
964 ASSERT(PHYSADDRHI(pa
) == 0);
966 /* shift the high bit(s) from pa to ae */
967 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
968 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
970 if (direction
== DMA_TX
) {
971 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, (PHYSADDRLO(pa
) +
973 W_REG(di
->osh
, &di
->d64txregs
->addrhigh
, di
->ddoffsethigh
);
974 SET_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_AE
,
975 (ae
<< D64_XC_AE_SHIFT
));
977 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, (PHYSADDRLO(pa
) +
979 W_REG(di
->osh
, &di
->d64rxregs
->addrhigh
, di
->ddoffsethigh
);
980 SET_REG(di
->osh
, &di
->d64rxregs
->control
, D64_RC_AE
,
981 (ae
<< D64_RC_AE_SHIFT
));
985 } else if (DMA32_ENAB(di
)) {
986 ASSERT(PHYSADDRHI(pa
) == 0);
987 if ((di
->ddoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
988 if (direction
== DMA_TX
)
989 W_REG(di
->osh
, &di
->d32txregs
->addr
, (PHYSADDRLO(pa
) +
992 W_REG(di
->osh
, &di
->d32rxregs
->addr
, (PHYSADDRLO(pa
) +
995 /* dma32 address extension */
999 /* shift the high bit(s) from pa to ae */
1000 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
1001 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
1003 if (direction
== DMA_TX
) {
1004 W_REG(di
->osh
, &di
->d32txregs
->addr
, (PHYSADDRLO(pa
) +
1006 SET_REG(di
->osh
, &di
->d32txregs
->control
, XC_AE
, ae
<<XC_AE_SHIFT
);
1008 W_REG(di
->osh
, &di
->d32rxregs
->addr
, (PHYSADDRLO(pa
) +
1010 SET_REG(di
->osh
, &di
->d32rxregs
->control
, RC_AE
, ae
<<RC_AE_SHIFT
);
1018 _dma_fifoloopbackenable(dma_info_t
*di
)
1020 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di
->name
));
1022 if (DMA64_ENAB(di
) && DMA64_MODE(di
))
1023 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_LE
);
1024 else if (DMA32_ENAB(di
))
1025 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_LE
);
1031 _dma_rxinit(dma_info_t
*di
)
1033 DMA_TRACE(("%s: dma_rxinit\n", di
->name
));
1038 di
->rxin
= di
->rxout
= 0;
1040 /* clear rx descriptor ring */
1041 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1042 BZERO_SM((void *)(uintptr
)di
->rxd64
, (di
->nrxd
* sizeof(dma64dd_t
)));
1044 /* DMA engine with out alignment requirement requires table to be inited
1045 * before enabling the engine
1047 if (!di
->aligndesc_4k
)
1048 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
1052 if (di
->aligndesc_4k
)
1053 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
1054 } else if (DMA32_ENAB(di
)) {
1055 BZERO_SM((void *)(uintptr
)di
->rxd32
, (di
->nrxd
* sizeof(dma32dd_t
)));
1057 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
1063 _dma_rxenable(dma_info_t
*di
)
1065 uint dmactrlflags
= di
->hnddma
.dmactrlflags
;
1067 DMA_TRACE(("%s: dma_rxenable\n", di
->name
));
1069 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1070 uint32 control
= (R_REG(di
->osh
, &di
->d64rxregs
->control
) & D64_RC_AE
) | D64_RC_RE
;
1072 if ((dmactrlflags
& DMA_CTRL_PEN
) == 0)
1073 control
|= D64_RC_PD
;
1075 if (dmactrlflags
& DMA_CTRL_ROC
)
1076 control
|= D64_RC_OC
;
1078 /* These bits 20:18 (burstLen) of control register can be written but will take
1079 * effect only if these bits are valid. So this will not affect previous versions
1080 * of the DMA. They will continue to have those bits set to 0.
1082 control
&= ~D64_RC_BL_MASK
;
1083 control
|= (di
->rxburstlen
<< D64_RC_BL_SHIFT
);
1085 control
&= ~D64_RC_PC_MASK
;
1086 control
|= (di
->rxprefetchctl
<< D64_RC_PC_SHIFT
);
1088 control
&= ~D64_RC_PT_MASK
;
1089 control
|= (di
->rxprefetchthresh
<< D64_RC_PT_SHIFT
);
1091 W_REG(di
->osh
, &di
->d64rxregs
->control
,
1092 ((di
->rxoffset
<< D64_RC_RO_SHIFT
) | control
));
1093 } else if (DMA32_ENAB(di
)) {
1094 uint32 control
= (R_REG(di
->osh
, &di
->d32rxregs
->control
) & RC_AE
) | RC_RE
;
1096 if ((dmactrlflags
& DMA_CTRL_PEN
) == 0)
1099 if (dmactrlflags
& DMA_CTRL_ROC
)
1102 /* These bits 20:18 (burstLen) of control register can be written but will take
1103 * effect only if these bits are valid. So this will not affect previous versions
1104 * of the DMA. They will continue to have those bits set to 0.
1106 control
&= ~RC_BL_MASK
;
1107 control
|= (di
->rxburstlen
<< RC_BL_SHIFT
);
1109 control
&= ~RC_PC_MASK
;
1110 control
|= (di
->rxprefetchctl
<< RC_PC_SHIFT
);
1112 control
&= ~RC_PT_MASK
;
1113 control
|= (di
->rxprefetchthresh
<< RC_PT_SHIFT
);
1115 W_REG(di
->osh
, &di
->d32rxregs
->control
,
1116 ((di
->rxoffset
<< RC_RO_SHIFT
) | control
));
1122 _dma_rx_param_get(dma_info_t
*di
, uint16
*rxoffset
, uint16
*rxbufsize
)
1124 /* the normal values fit into 16 bits */
1125 *rxoffset
= (uint16
)di
->rxoffset
;
1126 *rxbufsize
= (uint16
)di
->rxbufsize
;
1129 /* !! rx entry routine
1130 * returns a pointer to the next frame received, or NULL if there are no more
1131 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
1133 * otherwise, it's treated as giant pkt and will be tossed.
1134 * The DMA scattering starts with normal DMA header, followed by first buffer data.
1135 * After it reaches the max size of buffer, the data continues in next DMA descriptor
1136 * buffer WITHOUT DMA header
1138 static void * BCMFASTPATH
1139 _dma_rx(dma_info_t
*di
)
1141 void *p
, *head
, *tail
;
1146 dma64regs_t
*dregs
= di
->d64rxregs
;
1150 head
= _dma_getnextrxp(di
, FALSE
);
1154 #if !defined(__mips__)
1156 if ((R_REG(osh
, &dregs
->control
) & D64_RC_GE
)) {
1157 /* In case of glommed pkt get length from hwheader */
1158 len
= ltoh16(*((uint16
*)(PKTDATA(di
->osh
, head
)) + di
->rxoffset
/2 + 2)) + 4;
1160 *(uint16
*)(PKTDATA(di
->osh
, head
)) = len
;
1162 len
= ltoh16(*(uint16
*)(PKTDATA(di
->osh
, head
)));
1165 len
= ltoh16(*(uint16
*)(PKTDATA(di
->osh
, head
)));
1170 for (read_count
= 200; (!(len
= ltoh16(*(uint16
*)OSL_UNCACHED(PKTDATA(di
->osh
, head
)))) &&
1171 read_count
); read_count
--) {
1172 if (CHIPID(di
->sih
->chip
) == BCM5356_CHIP_ID
)
1178 DMA_ERROR(("%s: dma_rx: frame length (%d)\n", di
->name
, len
));
1179 PKTFREE(di
->osh
, head
, FALSE
);
1184 #endif /* defined(__mips__) */
1185 DMA_TRACE(("%s: dma_rx len %d\n", di
->name
, len
));
1187 /* set actual length */
1188 pkt_len
= MIN((di
->rxoffset
+ len
), di
->rxbufsize
);
1189 PKTSETLEN(di
->osh
, head
, pkt_len
);
1190 resid
= len
- (di
->rxbufsize
- di
->rxoffset
);
1192 /* check for single or multi-buffer rx */
1195 while ((resid
> 0) && (p
= _dma_getnextrxp(di
, FALSE
))) {
1196 PKTSETNEXT(di
->osh
, tail
, p
);
1197 pkt_len
= MIN(resid
, (int)di
->rxbufsize
);
1198 PKTSETLEN(di
->osh
, p
, pkt_len
);
1201 resid
-= di
->rxbufsize
;
1208 cur
= (DMA64_ENAB(di
) && DMA64_MODE(di
)) ?
1209 B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
1210 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
) :
1211 B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
,
1213 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
1214 di
->rxin
, di
->rxout
, cur
));
1218 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_RXMULTI
) == 0) {
1219 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di
->name
, len
));
1220 PKTFREE(di
->osh
, head
, FALSE
);
1221 di
->hnddma
.rxgiants
++;
1229 /* post receive buffers
1230 * return FALSE is refill failed completely and ring is empty
1231 * this will stall the rx dma and user might want to call rxfill again asap
1232 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
1234 static bool BCMFASTPATH
1235 _dma_rxfill(dma_info_t
*di
)
1243 uint extra_offset
= 0, extra_pad
;
1245 uint alignment_req
= (di
->hnddma
.dmactrlflags
& DMA_CTRL_USB_BOUNDRY4KB_WAR
) ?
1246 16 : 1; /* MUST BE POWER of 2 */
1251 * Determine how many receive buffers we're lacking
1252 * from the full complement, allocate, initialize,
1253 * and post them, then update the chip rx lastdscr.
1259 n
= di
->nrxpost
- NRXDACTIVE(rxin
, rxout
);
1261 if (di
->rxbufsize
> BCMEXTRAHDROOM
)
1262 extra_offset
= di
->rxextrahdrroom
;
1264 DMA_TRACE(("%s: dma_rxfill: post %d\n", di
->name
, n
));
1266 for (i
= 0; i
< n
; i
++) {
1267 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
1268 size to be allocated
1270 if (POOL_ENAB(di
->pktpool
)) {
1271 ASSERT(di
->pktpool
);
1272 p
= pktpool_get(di
->pktpool
);
1275 PKTPOOLSETSTATE(p
, POOL_RXFILL
);
1276 #endif /* BCMDBG_POOL */
1279 p
= PKTGET(di
->osh
, (di
->rxbufsize
+ extra_offset
+ alignment_req
- 1),
1283 DMA_TRACE(("%s: dma_rxfill: out of rxbufs\n", di
->name
));
1285 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1286 if (dma64_rxidle(di
)) {
1287 DMA_TRACE(("%s: rxfill64: ring is empty !\n",
1291 } else if (DMA32_ENAB(di
)) {
1292 if (dma32_rxidle(di
)) {
1293 DMA_TRACE(("%s: rxfill32: ring is empty !\n",
1300 di
->hnddma
.rxnobuf
++;
1303 /* reserve an extra headroom, if applicable */
1304 if (di
->hnddma
.dmactrlflags
& DMA_CTRL_USB_BOUNDRY4KB_WAR
) {
1305 extra_pad
= ((alignment_req
- (uint
)(((unsigned long)PKTDATA(di
->osh
, p
) -
1306 (unsigned long)(uchar
*)0))) & (alignment_req
- 1));
1310 if (extra_offset
+ extra_pad
)
1311 PKTPULL(di
->osh
, p
, extra_offset
+ extra_pad
);
1314 /* mark as ctf buffer for fast mapping */
1315 if (CTF_ENAB(kcih
)) {
1316 ASSERT((((uint32
)PKTDATA(di
->osh
, p
)) & 31) == 0);
1317 PKTSETCTF(di
->osh
, p
);
1321 /* Do a cached write instead of uncached write since DMA_MAP
1322 * will flush the cache.
1324 *(uint32
*)(PKTDATA(di
->osh
, p
)) = 0;
1327 bzero(&di
->rxp_dmah
[rxout
], sizeof(hnddma_seg_map_t
));
1329 pa
= DMA_MAP(di
->osh
, PKTDATA(di
->osh
, p
),
1330 di
->rxbufsize
, DMA_RX
, p
,
1331 &di
->rxp_dmah
[rxout
]);
1333 ASSERT(ISALIGNED(PHYSADDRLO(pa
), 4));
1336 /* Do a un-cached write now that DMA_MAP has invalidated the cache
1338 *(uint32
*)OSL_UNCACHED((PKTDATA(di
->osh
, p
))) = 0;
1339 #endif /* __mips__ */
1341 /* save the free packet pointer */
1342 ASSERT(di
->rxp
[rxout
] == NULL
);
1345 /* reset flags for each descriptor */
1347 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1348 if (rxout
== (di
->nrxd
- 1))
1349 flags
= D64_CTRL1_EOT
;
1351 dma64_dd_upd(di
, di
->rxd64
, pa
, rxout
, &flags
, di
->rxbufsize
);
1352 } else if (DMA32_ENAB(di
)) {
1353 if (rxout
== (di
->nrxd
- 1))
1356 ASSERT(PHYSADDRHI(pa
) == 0);
1357 dma32_dd_upd(di
, di
->rxd32
, pa
, rxout
, &flags
, di
->rxbufsize
);
1360 rxout
= NEXTRXD(rxout
);
1365 /* update the chip lastdscr pointer */
1366 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1367 W_REG(di
->osh
, &di
->d64rxregs
->ptr
, di
->rcvptrbase
+ I2B(rxout
, dma64dd_t
));
1368 } else if (DMA32_ENAB(di
)) {
1369 W_REG(di
->osh
, &di
->d32rxregs
->ptr
, I2B(rxout
, dma32dd_t
));
1376 /* like getnexttxp but no reclaim */
1378 _dma_peeknexttxp(dma_info_t
*di
)
1385 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1386 end
= B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
) -
1387 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
);
1388 } else if (DMA32_ENAB(di
)) {
1389 end
= B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1393 for (i
= di
->txin
; i
!= end
; i
= NEXTTXD(i
))
1395 return (di
->txp
[i
]);
1401 _dma_peekntxp(dma_info_t
*di
, int *len
, void *txps
[], txd_range_t range
)
1403 uint start
, end
, i
, act
;
1407 DMA_TRACE(("%s: dma_peekntxp\n", di
->name
));
1412 if (di
->ntxd
== 0) {
1422 if (range
== HNDDMA_RANGE_ALL
)
1425 if (DMA64_ENAB(di
)) {
1426 end
= B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
) -
1427 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
);
1429 act
= (uint
)(R_REG(di
->osh
, &di
->d64txregs
->status1
) & D64_XS1_AD_MASK
);
1430 act
= (act
- di
->xmtptrbase
) & D64_XS0_CD_MASK
;
1431 act
= B2I(act
, dma64dd_t
);
1433 end
= B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1435 act
= (uint
)((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_AD_MASK
) >>
1437 act
= (uint
)B2I(act
, dma32dd_t
);
1444 if ((start
== 0) && (end
> di
->txout
))
1448 for (i
= start
; i
!= end
; i
= NEXTTXD(i
)) {
1462 /* like getnextrxp but not take off the ring */
1464 _dma_peeknextrxp(dma_info_t
*di
)
1471 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1472 end
= B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
1473 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
);
1474 } else if (DMA32_ENAB(di
)) {
1475 end
= B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
, dma32dd_t
);
1479 for (i
= di
->rxin
; i
!= end
; i
= NEXTRXD(i
))
1481 return (di
->rxp
[i
]);
1487 _dma_rxreclaim(dma_info_t
*di
)
1493 /* "unused local" warning suppression for OSLs that
1494 * define PKTFREE() without using the di->osh arg
1499 DMA_TRACE(("%s: dma_rxreclaim\n", di
->name
));
1501 if (POOL_ENAB(di
->pktpool
) &&
1502 ((origcb
= pktpool_emptycb_disabled(di
->pktpool
)) == FALSE
))
1503 pktpool_emptycb_disable(di
->pktpool
, TRUE
);
1505 while ((p
= _dma_getnextrxp(di
, TRUE
)))
1506 PKTFREE(di
->osh
, p
, FALSE
);
1508 if (origcb
== FALSE
)
1509 pktpool_emptycb_disable(di
->pktpool
, FALSE
);
1512 static void * BCMFASTPATH
1513 _dma_getnextrxp(dma_info_t
*di
, bool forceall
)
1518 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1519 return dma64_getnextrxp(di
, forceall
);
1520 } else if (DMA32_ENAB(di
)) {
1521 return dma32_getnextrxp(di
, forceall
);
1527 _dma_txblock(dma_info_t
*di
)
1529 di
->hnddma
.txavail
= 0;
1533 _dma_txunblock(dma_info_t
*di
)
1535 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1539 _dma_txactive(dma_info_t
*di
)
1541 return NTXDACTIVE(di
->txin
, di
->txout
);
1545 _dma_txpending(dma_info_t
*di
)
1549 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1550 curr
= B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
) -
1551 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
);
1552 } else if (DMA32_ENAB(di
)) {
1553 curr
= B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1557 return NTXDACTIVE(curr
, di
->txout
);
1561 _dma_txcommitted(dma_info_t
*di
)
1564 uint txin
= di
->txin
;
1566 if (txin
== di
->txout
)
1569 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1570 ptr
= B2I(R_REG(di
->osh
, &di
->d64txregs
->ptr
), dma64dd_t
);
1571 } else if (DMA32_ENAB(di
)) {
1572 ptr
= B2I(R_REG(di
->osh
, &di
->d32txregs
->ptr
), dma32dd_t
);
1576 return NTXDACTIVE(di
->txin
, ptr
);
1580 _dma_rxactive(dma_info_t
*di
)
1582 return NRXDACTIVE(di
->rxin
, di
->rxout
);
1586 _dma_counterreset(dma_info_t
*di
)
1588 /* reset all software counter */
1589 di
->hnddma
.rxgiants
= 0;
1590 di
->hnddma
.rxnobuf
= 0;
1591 di
->hnddma
.txnobuf
= 0;
1595 _dma_ctrlflags(dma_info_t
*di
, uint mask
, uint flags
)
1600 DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n"));
1604 dmactrlflags
= di
->hnddma
.dmactrlflags
;
1605 ASSERT((flags
& ~mask
) == 0);
1607 dmactrlflags
&= ~mask
;
1608 dmactrlflags
|= flags
;
1610 /* If trying to enable parity, check if parity is actually supported */
1611 if (dmactrlflags
& DMA_CTRL_PEN
) {
1614 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1615 control
= R_REG(di
->osh
, &di
->d64txregs
->control
);
1616 W_REG(di
->osh
, &di
->d64txregs
->control
, control
| D64_XC_PD
);
1617 if (R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_PD
) {
1618 /* We *can* disable it so it is supported,
1619 * restore control register
1621 W_REG(di
->osh
, &di
->d64txregs
->control
, control
);
1623 /* Not supported, don't allow it to be enabled */
1624 dmactrlflags
&= ~DMA_CTRL_PEN
;
1626 } else if (DMA32_ENAB(di
)) {
1627 control
= R_REG(di
->osh
, &di
->d32txregs
->control
);
1628 W_REG(di
->osh
, &di
->d32txregs
->control
, control
| XC_PD
);
1629 if (R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_PD
) {
1630 W_REG(di
->osh
, &di
->d32txregs
->control
, control
);
1632 /* Not supported, don't allow it to be enabled */
1633 dmactrlflags
&= ~DMA_CTRL_PEN
;
1639 di
->hnddma
.dmactrlflags
= dmactrlflags
;
1641 return (dmactrlflags
);
1644 /* get the address of the var in order to change later */
1646 _dma_getvar(dma_info_t
*di
, const char *name
)
1648 if (!strcmp(name
, "&txavail"))
1649 return ((uintptr
) &(di
->hnddma
.txavail
));
1657 _dma_avoidancecnt(dma_info_t
*di
)
1659 return (di
->dma_avoidance_cnt
);
1663 dma_txpioloopback(osl_t
*osh
, dma32regs_t
*regs
)
1665 OR_REG(osh
, ®s
->control
, XC_LE
);
1669 uint8
dma_align_sizetobits(uint size
)
1673 ASSERT(!(size
& (size
-1)));
1674 while (size
>>= 1) {
1680 /* This function ensures that the DMA descriptor ring will not get allocated
1681 * across Page boundary. If the allocation is done across the page boundary
1682 * at the first time, then it is freed and the allocation is done at
1683 * descriptor ring size aligned location. This will ensure that the ring will
1684 * not cross page boundary
1687 dma_ringalloc(osl_t
*osh
, uint32 boundary
, uint size
, uint16
*alignbits
, uint
* alloced
,
1688 dmaaddr_t
*descpa
, osldma_t
**dmah
)
1691 uint32 desc_strtaddr
;
1692 uint32 alignbytes
= 1 << *alignbits
;
1694 if ((va
= DMA_ALLOC_CONSISTENT(osh
, size
, *alignbits
, alloced
, descpa
, dmah
)) == NULL
)
1697 desc_strtaddr
= (uint32
)ROUNDUP((uint
)PHYSADDRLO(*descpa
), alignbytes
);
1698 if (((desc_strtaddr
+ size
- 1) & boundary
) !=
1699 (desc_strtaddr
& boundary
)) {
1700 *alignbits
= dma_align_sizetobits(size
);
1701 DMA_FREE_CONSISTENT(osh
, va
,
1702 size
, *descpa
, dmah
);
1703 va
= DMA_ALLOC_CONSISTENT(osh
, size
, *alignbits
, alloced
, descpa
, dmah
);
1710 dma32_dumpring(dma_info_t
*di
, struct bcmstrbuf
*b
, dma32dd_t
*ring
, uint start
, uint end
,
1715 for (i
= start
; i
!= end
; i
= XXD((i
+ 1), max_num
)) {
1716 /* in the format of high->low 8 bytes */
1717 bcm_bprintf(b
, "ring index %d: 0x%x %x\n",
1718 i
, R_SM(&ring
[i
].addr
), R_SM(&ring
[i
].ctrl
));
1723 dma32_dumptx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1728 bcm_bprintf(b
, "DMA32: txd32 %p txdpa 0x%lx txp %p txin %d txout %d "
1729 "txavail %d txnodesc %d\n", di
->txd32
, PHYSADDRLO(di
->txdpa
), di
->txp
, di
->txin
,
1730 di
->txout
, di
->hnddma
.txavail
, di
->hnddma
.txnodesc
);
1732 bcm_bprintf(b
, "xmtcontrol 0x%x xmtaddr 0x%x xmtptr 0x%x xmtstatus 0x%x\n",
1733 R_REG(di
->osh
, &di
->d32txregs
->control
),
1734 R_REG(di
->osh
, &di
->d32txregs
->addr
),
1735 R_REG(di
->osh
, &di
->d32txregs
->ptr
),
1736 R_REG(di
->osh
, &di
->d32txregs
->status
));
1738 if (dumpring
&& di
->txd32
)
1739 dma32_dumpring(di
, b
, di
->txd32
, di
->txin
, di
->txout
, di
->ntxd
);
1743 dma32_dumprx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1748 bcm_bprintf(b
, "DMA32: rxd32 %p rxdpa 0x%lx rxp %p rxin %d rxout %d\n",
1749 di
->rxd32
, PHYSADDRLO(di
->rxdpa
), di
->rxp
, di
->rxin
, di
->rxout
);
1751 bcm_bprintf(b
, "rcvcontrol 0x%x rcvaddr 0x%x rcvptr 0x%x rcvstatus 0x%x\n",
1752 R_REG(di
->osh
, &di
->d32rxregs
->control
),
1753 R_REG(di
->osh
, &di
->d32rxregs
->addr
),
1754 R_REG(di
->osh
, &di
->d32rxregs
->ptr
),
1755 R_REG(di
->osh
, &di
->d32rxregs
->status
));
1756 if (di
->rxd32
&& dumpring
)
1757 dma32_dumpring(di
, b
, di
->rxd32
, di
->rxin
, di
->rxout
, di
->nrxd
);
1761 dma32_dump(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1763 dma32_dumptx(di
, b
, dumpring
);
1764 dma32_dumprx(di
, b
, dumpring
);
1768 dma64_dumpring(dma_info_t
*di
, struct bcmstrbuf
*b
, dma64dd_t
*ring
, uint start
, uint end
,
1773 for (i
= start
; i
!= end
; i
= XXD((i
+ 1), max_num
)) {
1774 /* in the format of high->low 16 bytes */
1775 bcm_bprintf(b
, "ring index %d: 0x%x %x %x %x\n",
1776 i
, R_SM(&ring
[i
].addrhigh
), R_SM(&ring
[i
].addrlow
),
1777 R_SM(&ring
[i
].ctrl2
), R_SM(&ring
[i
].ctrl1
));
1782 dma64_dumptx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1787 bcm_bprintf(b
, "DMA64: txd64 %p txdpa 0x%lx txdpahi 0x%lx txp %p txin %d txout %d "
1788 "txavail %d txnodesc %d\n", di
->txd64
, PHYSADDRLO(di
->txdpa
),
1789 PHYSADDRHI(di
->txdpaorig
), di
->txp
, di
->txin
, di
->txout
, di
->hnddma
.txavail
,
1790 di
->hnddma
.txnodesc
);
1792 bcm_bprintf(b
, "xmtcontrol 0x%x xmtaddrlow 0x%x xmtaddrhigh 0x%x "
1793 "xmtptr 0x%x xmtstatus0 0x%x xmtstatus1 0x%x\n",
1794 R_REG(di
->osh
, &di
->d64txregs
->control
),
1795 R_REG(di
->osh
, &di
->d64txregs
->addrlow
),
1796 R_REG(di
->osh
, &di
->d64txregs
->addrhigh
),
1797 R_REG(di
->osh
, &di
->d64txregs
->ptr
),
1798 R_REG(di
->osh
, &di
->d64txregs
->status0
),
1799 R_REG(di
->osh
, &di
->d64txregs
->status1
));
1801 bcm_bprintf(b
, "DMA64: DMA avoidance applied %d\n", di
->dma_avoidance_cnt
);
1803 if (dumpring
&& di
->txd64
) {
1804 dma64_dumpring(di
, b
, di
->txd64
, di
->txin
, di
->txout
, di
->ntxd
);
1809 dma64_dumprx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1814 bcm_bprintf(b
, "DMA64: rxd64 %p rxdpa 0x%lx rxdpahi 0x%lx rxp %p rxin %d rxout %d\n",
1815 di
->rxd64
, PHYSADDRLO(di
->rxdpa
), PHYSADDRHI(di
->rxdpaorig
), di
->rxp
,
1816 di
->rxin
, di
->rxout
);
1818 bcm_bprintf(b
, "rcvcontrol 0x%x rcvaddrlow 0x%x rcvaddrhigh 0x%x rcvptr "
1819 "0x%x rcvstatus0 0x%x rcvstatus1 0x%x\n",
1820 R_REG(di
->osh
, &di
->d64rxregs
->control
),
1821 R_REG(di
->osh
, &di
->d64rxregs
->addrlow
),
1822 R_REG(di
->osh
, &di
->d64rxregs
->addrhigh
),
1823 R_REG(di
->osh
, &di
->d64rxregs
->ptr
),
1824 R_REG(di
->osh
, &di
->d64rxregs
->status0
),
1825 R_REG(di
->osh
, &di
->d64rxregs
->status1
));
1826 if (di
->rxd64
&& dumpring
) {
1827 dma64_dumpring(di
, b
, di
->rxd64
, di
->rxin
, di
->rxout
, di
->nrxd
);
1832 dma64_dump(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1834 dma64_dumptx(di
, b
, dumpring
);
1835 dma64_dumprx(di
, b
, dumpring
);
1841 /* 32-bit DMA functions */
1844 dma32_txinit(dma_info_t
*di
)
1846 uint32 control
= XC_XE
;
1848 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
1853 di
->txin
= di
->txout
= 0;
1854 di
->hnddma
.txavail
= di
->ntxd
- 1;
1856 /* clear tx descriptor ring */
1857 BZERO_SM(DISCARD_QUAL(di
->txd32
, void), (di
->ntxd
* sizeof(dma32dd_t
)));
1859 /* These bits 20:18 (burstLen) of control register can be written but will take
1860 * effect only if these bits are valid. So this will not affect previous versions
1861 * of the DMA. They will continue to have those bits set to 0.
1863 control
|= (di
->txburstlen
<< XC_BL_SHIFT
);
1864 control
|= (di
->txmultioutstdrd
<< XC_MR_SHIFT
);
1865 control
|= (di
->txprefetchctl
<< XC_PC_SHIFT
);
1866 control
|= (di
->txprefetchthresh
<< XC_PT_SHIFT
);
1868 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_PEN
) == 0)
1870 W_REG(di
->osh
, &di
->d32txregs
->control
, control
);
1871 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
1875 dma32_txenabled(dma_info_t
*di
)
1879 /* If the chip is dead, it is not enabled :-) */
1880 xc
= R_REG(di
->osh
, &di
->d32txregs
->control
);
1881 return ((xc
!= 0xffffffff) && (xc
& XC_XE
));
1885 dma32_txsuspend(dma_info_t
*di
)
1887 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
1892 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1896 dma32_txresume(dma_info_t
*di
)
1898 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
1903 AND_REG(di
->osh
, &di
->d32txregs
->control
, ~XC_SE
);
1907 dma32_txsuspended(dma_info_t
*di
)
1909 return (di
->ntxd
== 0) || ((R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
) == XC_SE
);
1912 #ifdef WL_MULTIQUEUE
1914 dma32_txflush(dma_info_t
*di
)
1916 DMA_TRACE(("%s: dma_txflush\n", di
->name
));
1921 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
| XC_FL
);
1925 dma32_txflush_clear(dma_info_t
*di
)
1929 DMA_TRACE(("%s: dma_txflush_clear\n", di
->name
));
1934 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
))
1935 != XS_XS_DISABLED
) &&
1936 (status
!= XS_XS_IDLE
) &&
1937 (status
!= XS_XS_STOPPED
),
1939 AND_REG(di
->osh
, &di
->d32txregs
->control
, ~XC_FL
);
1941 #endif /* WL_MULTIQUEUE */
1944 dma32_txreclaim(dma_info_t
*di
, txd_range_t range
)
1948 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
,
1949 (range
== HNDDMA_RANGE_ALL
) ? "all" :
1950 ((range
== HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" : "transfered")));
1952 if (di
->txin
== di
->txout
)
1955 while ((p
= dma32_getnexttxp(di
, range
)))
1956 PKTFREE(di
->osh
, p
, TRUE
);
1960 dma32_txstopped(dma_info_t
*di
)
1962 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) == XS_XS_STOPPED
);
1966 dma32_rxstopped(dma_info_t
*di
)
1968 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_RS_MASK
) == RS_RS_STOPPED
);
1972 dma32_alloc(dma_info_t
*di
, uint direction
)
1981 ddlen
= sizeof(dma32dd_t
);
1983 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
1986 align_bits
= di
->dmadesc_align
;
1987 align
= (1 << align_bits
);
1989 if (direction
== DMA_TX
) {
1990 if ((va
= dma_ringalloc(di
->osh
, D32RINGALIGN
, size
, &align_bits
, &alloced
,
1991 &di
->txdpaorig
, &di
->tx_dmah
)) == NULL
) {
1992 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
1997 PHYSADDRHISET(di
->txdpa
, 0);
1998 ASSERT(PHYSADDRHI(di
->txdpaorig
) == 0);
1999 di
->txd32
= (dma32dd_t
*)ROUNDUP((uintptr
)va
, align
);
2000 di
->txdalign
= (uint
)((int8
*)(uintptr
)di
->txd32
- (int8
*)va
);
2002 PHYSADDRLOSET(di
->txdpa
, PHYSADDRLO(di
->txdpaorig
) + di
->txdalign
);
2003 /* Make sure that alignment didn't overflow */
2004 ASSERT(PHYSADDRLO(di
->txdpa
) >= PHYSADDRLO(di
->txdpaorig
));
2006 di
->txdalloc
= alloced
;
2007 ASSERT(ISALIGNED(di
->txd32
, align
));
2009 if ((va
= dma_ringalloc(di
->osh
, D32RINGALIGN
, size
, &align_bits
, &alloced
,
2010 &di
->rxdpaorig
, &di
->rx_dmah
)) == NULL
) {
2011 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
2016 PHYSADDRHISET(di
->rxdpa
, 0);
2017 ASSERT(PHYSADDRHI(di
->rxdpaorig
) == 0);
2018 di
->rxd32
= (dma32dd_t
*)ROUNDUP((uintptr
)va
, align
);
2019 di
->rxdalign
= (uint
)((int8
*)(uintptr
)di
->rxd32
- (int8
*)va
);
2021 PHYSADDRLOSET(di
->rxdpa
, PHYSADDRLO(di
->rxdpaorig
) + di
->rxdalign
);
2022 /* Make sure that alignment didn't overflow */
2023 ASSERT(PHYSADDRLO(di
->rxdpa
) >= PHYSADDRLO(di
->rxdpaorig
));
2024 di
->rxdalloc
= alloced
;
2025 ASSERT(ISALIGNED(di
->rxd32
, align
));
2032 dma32_txreset(dma_info_t
*di
)
2039 /* suspend tx DMA first */
2040 W_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
2041 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
))
2042 != XS_XS_DISABLED
) &&
2043 (status
!= XS_XS_IDLE
) &&
2044 (status
!= XS_XS_STOPPED
),
2047 W_REG(di
->osh
, &di
->d32txregs
->control
, 0);
2048 SPINWAIT(((status
= (R_REG(di
->osh
,
2049 &di
->d32txregs
->status
) & XS_XS_MASK
)) != XS_XS_DISABLED
),
2052 /* We should be disabled at this point */
2053 if (status
!= XS_XS_DISABLED
) {
2054 DMA_ERROR(("%s: status != D64_XS0_XS_DISABLED 0x%x\n", __FUNCTION__
, status
));
2055 ASSERT(status
== XS_XS_DISABLED
);
2059 return (status
== XS_XS_DISABLED
);
2063 dma32_rxidle(dma_info_t
*di
)
2065 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
2070 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
) ==
2071 R_REG(di
->osh
, &di
->d32rxregs
->ptr
));
2075 dma32_rxreset(dma_info_t
*di
)
2082 W_REG(di
->osh
, &di
->d32rxregs
->control
, 0);
2083 SPINWAIT(((status
= (R_REG(di
->osh
,
2084 &di
->d32rxregs
->status
) & RS_RS_MASK
)) != RS_RS_DISABLED
),
2087 return (status
== RS_RS_DISABLED
);
2091 dma32_rxenabled(dma_info_t
*di
)
2095 rc
= R_REG(di
->osh
, &di
->d32rxregs
->control
);
2096 return ((rc
!= 0xffffffff) && (rc
& RC_RE
));
2100 dma32_txsuspendedidle(dma_info_t
*di
)
2105 if (!(R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
))
2108 if ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) != XS_XS_IDLE
)
2112 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) == XS_XS_IDLE
);
2115 /* !! tx entry routine
2116 * supports full 32bit dma engine buffer addressing so
2117 * dma buffers can cross 4 Kbyte page boundaries.
2119 * WARNING: call must check the return value for error.
2120 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
2123 dma32_txfast(dma_info_t
*di
, void *p0
, bool commit
)
2132 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
2137 * Walk the chain of packet buffers
2138 * allocating and initializing transmit descriptor entries.
2140 for (p
= p0
; p
; p
= next
) {
2142 hnddma_seg_map_t
*map
;
2144 data
= PKTDATA(di
->osh
, p
);
2145 len
= PKTLEN(di
->osh
, p
);
2147 len
+= PKTDMAPAD(di
->osh
, p
);
2149 next
= PKTNEXT(di
->osh
, p
);
2151 /* return nonzero if out of tx descriptors */
2152 if (NEXTTXD(txout
) == di
->txin
)
2159 bzero(&di
->txp_dmah
[txout
], sizeof(hnddma_seg_map_t
));
2161 /* get physical address of buffer start */
2162 pa
= DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
, &di
->txp_dmah
[txout
]);
2164 if (DMASGLIST_ENAB
) {
2165 map
= &di
->txp_dmah
[txout
];
2167 /* See if all the segments can be accounted for */
2168 if (map
->nsegs
> (uint
)(di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1))
2175 for (j
= 1; j
<= nsegs
; j
++) {
2177 if (p
== p0
&& j
== 1)
2180 /* With a DMA segment list, Descriptor table is filled
2181 * using the segment list instead of looping over
2182 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
2183 * end of segment list is reached.
2185 if ((!DMASGLIST_ENAB
&& next
== NULL
) ||
2186 (DMASGLIST_ENAB
&& j
== nsegs
))
2187 flags
|= (CTRL_IOC
| CTRL_EOF
);
2188 if (txout
== (di
->ntxd
- 1))
2191 if (DMASGLIST_ENAB
) {
2192 len
= map
->segs
[j
- 1].length
;
2193 pa
= map
->segs
[j
- 1].addr
;
2195 ASSERT(PHYSADDRHI(pa
) == 0);
2197 dma32_dd_upd(di
, di
->txd32
, pa
, txout
, &flags
, len
);
2198 ASSERT(di
->txp
[txout
] == NULL
);
2200 txout
= NEXTTXD(txout
);
2203 /* See above. No need to loop over individual buffers */
2208 /* if last txd eof not set, fix it */
2209 if (!(flags
& CTRL_EOF
))
2210 W_SM(&di
->txd32
[PREVTXD(txout
)].ctrl
, BUS_SWAP32(flags
| CTRL_IOC
| CTRL_EOF
));
2212 /* save the packet */
2213 di
->txp
[PREVTXD(txout
)] = p0
;
2215 /* bump the tx descriptor index */
2220 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(txout
, dma32dd_t
));
2222 /* tx flow control */
2223 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2228 DMA_ERROR(("%s: dma_txfast: out of txds\n", di
->name
));
2229 PKTFREE(di
->osh
, p0
, TRUE
);
2230 di
->hnddma
.txavail
= 0;
2231 di
->hnddma
.txnobuf
++;
2232 di
->hnddma
.txnodesc
++;
2237 * Reclaim next completed txd (txds if using chained buffers) in the range
2238 * specified and return associated packet.
2239 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
2240 * transmitted as noted by the hardware "CurrDescr" pointer.
2241 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
2242 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
2243 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
2244 * return associated packet regardless of the value of hardware pointers.
2247 dma32_getnexttxp(dma_info_t
*di
, txd_range_t range
)
2249 uint16 start
, end
, i
;
2253 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
,
2254 (range
== HNDDMA_RANGE_ALL
) ? "all" :
2255 ((range
== HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" : "transfered")));
2263 if (range
== HNDDMA_RANGE_ALL
)
2266 dma32regs_t
*dregs
= di
->d32txregs
;
2268 end
= (uint16
)B2I(R_REG(di
->osh
, &dregs
->status
) & XS_CD_MASK
, dma32dd_t
);
2270 if (range
== HNDDMA_RANGE_TRANSFERED
) {
2271 active_desc
= (uint16
)((R_REG(di
->osh
, &dregs
->status
) & XS_AD_MASK
) >>
2273 active_desc
= (uint16
)B2I(active_desc
, dma32dd_t
);
2274 if (end
!= active_desc
)
2275 end
= PREVTXD(active_desc
);
2279 if ((start
== 0) && (end
> di
->txout
))
2282 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
2284 hnddma_seg_map_t
*map
= NULL
;
2285 uint size
, j
, nsegs
;
2287 PHYSADDRLOSET(pa
, (BUS_SWAP32(R_SM(&di
->txd32
[i
].addr
)) - di
->dataoffsetlow
));
2288 PHYSADDRHISET(pa
, 0);
2290 if (DMASGLIST_ENAB
) {
2291 map
= &di
->txp_dmah
[i
];
2292 size
= map
->origsize
;
2295 size
= (BUS_SWAP32(R_SM(&di
->txd32
[i
].ctrl
)) & CTRL_BC_MASK
);
2299 for (j
= nsegs
; j
> 0; j
--) {
2300 W_SM(&di
->txd32
[i
].addr
, 0xdeadbeef);
2308 DMA_UNMAP(di
->osh
, pa
, size
, DMA_TX
, txp
, map
);
2313 /* tx flow control */
2314 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2319 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
2320 start
, end
, di
->txout
, forceall
));
2325 dma32_getnextrxp(dma_info_t
*di
, bool forceall
)
2330 /* if forcing, dma engine must be disabled */
2331 ASSERT(!forceall
|| !dma32_rxenabled(di
));
2335 /* return if no packets posted */
2339 curr
= B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
, dma32dd_t
);
2341 /* ignore curr if forceall */
2342 if (!forceall
&& (i
== curr
))
2345 /* get the packet pointer that corresponds to the rx descriptor */
2350 PHYSADDRLOSET(pa
, (BUS_SWAP32(R_SM(&di
->rxd32
[i
].addr
)) - di
->dataoffsetlow
));
2351 PHYSADDRHISET(pa
, 0);
2353 /* clear this packet from the descriptor ring */
2354 DMA_UNMAP(di
->osh
, pa
,
2355 di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
2357 W_SM(&di
->rxd32
[i
].addr
, 0xdeadbeef);
2359 di
->rxin
= NEXTRXD(i
);
2365 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
2368 dma32_txrotate(dma_info_t
*di
)
2377 ASSERT(dma32_txsuspendedidle(di
));
2379 nactive
= _dma_txactive(di
);
2380 ad
= (uint16
) (B2I(((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_AD_MASK
) >> XS_AD_SHIFT
),
2382 rot
= TXD(ad
- di
->txin
);
2384 ASSERT(rot
< di
->ntxd
);
2386 /* full-ring case is a lot harder - don't worry about this */
2387 if (rot
>= (di
->ntxd
- nactive
)) {
2388 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
2393 last
= PREVTXD(di
->txout
);
2395 /* move entries starting at last and moving backwards to first */
2396 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
2397 new = TXD(old
+ rot
);
2400 * Move the tx dma descriptor.
2401 * EOT is set only in the last entry in the ring.
2403 w
= BUS_SWAP32(R_SM(&di
->txd32
[old
].ctrl
)) & ~CTRL_EOT
;
2404 if (new == (di
->ntxd
- 1))
2406 W_SM(&di
->txd32
[new].ctrl
, BUS_SWAP32(w
));
2407 W_SM(&di
->txd32
[new].addr
, R_SM(&di
->txd32
[old
].addr
));
2409 /* zap the old tx dma descriptor address field */
2410 W_SM(&di
->txd32
[old
].addr
, BUS_SWAP32(0xdeadbeef));
2412 /* move the corresponding txp[] entry */
2413 ASSERT(di
->txp
[new] == NULL
);
2414 di
->txp
[new] = di
->txp
[old
];
2416 /* Move the segment map as well */
2417 if (DMASGLIST_ENAB
) {
2418 bcopy(&di
->txp_dmah
[old
], &di
->txp_dmah
[new], sizeof(hnddma_seg_map_t
));
2419 bzero(&di
->txp_dmah
[old
], sizeof(hnddma_seg_map_t
));
2422 di
->txp
[old
] = NULL
;
2425 /* update txin and txout */
2427 di
->txout
= TXD(di
->txout
+ rot
);
2428 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2431 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(di
->txout
, dma32dd_t
));
2434 /* 64-bit DMA functions */
2437 dma64_txinit(dma_info_t
*di
)
2441 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
2446 di
->txin
= di
->txout
= 0;
2447 di
->hnddma
.txavail
= di
->ntxd
- 1;
2449 /* clear tx descriptor ring */
2450 BZERO_SM((void *)(uintptr
)di
->txd64
, (di
->ntxd
* sizeof(dma64dd_t
)));
2452 /* These bits 20:18 (burstLen) of control register can be written but will take
2453 * effect only if these bits are valid. So this will not affect previous versions
2454 * of the DMA. They will continue to have those bits set to 0.
2456 control
= R_REG(di
->osh
, &di
->d64txregs
->control
);
2457 control
= (control
& ~D64_XC_BL_MASK
) | (di
->txburstlen
<< D64_XC_BL_SHIFT
);
2458 control
= (control
& ~D64_XC_MR_MASK
) | (di
->txmultioutstdrd
<< D64_XC_MR_SHIFT
);
2459 control
= (control
& ~D64_XC_PC_MASK
) | (di
->txprefetchctl
<< D64_XC_PC_SHIFT
);
2460 control
= (control
& ~D64_XC_PT_MASK
) | (di
->txprefetchthresh
<< D64_XC_PT_SHIFT
);
2461 W_REG(di
->osh
, &di
->d64txregs
->control
, control
);
2463 control
= D64_XC_XE
;
2464 /* DMA engine with out alignment requirement requires table to be inited
2465 * before enabling the engine
2467 if (!di
->aligndesc_4k
)
2468 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
2470 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_PEN
) == 0)
2471 control
|= D64_XC_PD
;
2472 OR_REG(di
->osh
, &di
->d64txregs
->control
, control
);
2474 /* DMA engine with alignment requirement requires table to be inited
2475 * before enabling the engine
2477 if (di
->aligndesc_4k
)
2478 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
2482 dma64_txenabled(dma_info_t
*di
)
2486 /* If the chip is dead, it is not enabled :-) */
2487 xc
= R_REG(di
->osh
, &di
->d64txregs
->control
);
2488 return ((xc
!= 0xffffffff) && (xc
& D64_XC_XE
));
2492 dma64_txsuspend(dma_info_t
*di
)
2494 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
2499 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
2503 dma64_txresume(dma_info_t
*di
)
2505 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
2510 AND_REG(di
->osh
, &di
->d64txregs
->control
, ~D64_XC_SE
);
2514 dma64_txsuspended(dma_info_t
*di
)
2516 return (di
->ntxd
== 0) ||
2517 ((R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
) == D64_XC_SE
);
2520 #ifdef WL_MULTIQUEUE
2522 dma64_txflush(dma_info_t
*di
)
2524 DMA_TRACE(("%s: dma_txflush\n", di
->name
));
2529 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
| D64_XC_FL
);
2533 dma64_txflush_clear(dma_info_t
*di
)
2537 DMA_TRACE(("%s: dma_txflush_clear\n", di
->name
));
2542 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
2543 D64_XS0_XS_DISABLED
) &&
2544 (status
!= D64_XS0_XS_IDLE
) &&
2545 (status
!= D64_XS0_XS_STOPPED
),
2547 AND_REG(di
->osh
, &di
->d64txregs
->control
, ~D64_XC_FL
);
2549 #endif /* WL_MULTIQUEUE */
2551 static void BCMFASTPATH
2552 dma64_txreclaim(dma_info_t
*di
, txd_range_t range
)
2556 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
,
2557 (range
== HNDDMA_RANGE_ALL
) ? "all" :
2558 ((range
== HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" : "transfered")));
2560 if (di
->txin
== di
->txout
)
2563 while ((p
= dma64_getnexttxp(di
, range
))) {
2564 /* For unframed data, we don't have any packets to free */
2565 if (!(di
->hnddma
.dmactrlflags
& DMA_CTRL_UNFRAMED
))
2566 PKTFREE(di
->osh
, p
, TRUE
);
2571 dma64_txstopped(dma_info_t
*di
)
2573 return ((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) == D64_XS0_XS_STOPPED
);
2577 dma64_rxstopped(dma_info_t
*di
)
2579 return ((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
) == D64_RS0_RS_STOPPED
);
2583 dma64_alloc(dma_info_t
*di
, uint direction
)
2592 ddlen
= sizeof(dma64dd_t
);
2594 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
2595 align_bits
= di
->dmadesc_align
;
2596 align
= (1 << align_bits
);
2598 if (direction
== DMA_TX
) {
2599 if ((va
= dma_ringalloc(di
->osh
, D64RINGALIGN
, size
, &align_bits
, &alloced
,
2600 &di
->txdpaorig
, &di
->tx_dmah
)) == NULL
) {
2601 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
2605 align
= (1 << align_bits
);
2607 /* adjust the pa by rounding up to the alignment */
2608 PHYSADDRLOSET(di
->txdpa
, ROUNDUP(PHYSADDRLO(di
->txdpaorig
), align
));
2609 PHYSADDRHISET(di
->txdpa
, PHYSADDRHI(di
->txdpaorig
));
2611 /* Make sure that alignment didn't overflow */
2612 ASSERT(PHYSADDRLO(di
->txdpa
) >= PHYSADDRLO(di
->txdpaorig
));
2614 /* find the alignment offset that was used */
2615 di
->txdalign
= (uint
)(PHYSADDRLO(di
->txdpa
) - PHYSADDRLO(di
->txdpaorig
));
2617 /* adjust the va by the same offset */
2618 di
->txd64
= (dma64dd_t
*)((uintptr
)va
+ di
->txdalign
);
2620 di
->txdalloc
= alloced
;
2621 ASSERT(ISALIGNED(PHYSADDRLO(di
->txdpa
), align
));
2623 if ((va
= dma_ringalloc(di
->osh
, D64RINGALIGN
, size
, &align_bits
, &alloced
,
2624 &di
->rxdpaorig
, &di
->rx_dmah
)) == NULL
) {
2625 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
2629 align
= (1 << align_bits
);
2631 /* adjust the pa by rounding up to the alignment */
2632 PHYSADDRLOSET(di
->rxdpa
, ROUNDUP(PHYSADDRLO(di
->rxdpaorig
), align
));
2633 PHYSADDRHISET(di
->rxdpa
, PHYSADDRHI(di
->rxdpaorig
));
2635 /* Make sure that alignment didn't overflow */
2636 ASSERT(PHYSADDRLO(di
->rxdpa
) >= PHYSADDRLO(di
->rxdpaorig
));
2638 /* find the alignment offset that was used */
2639 di
->rxdalign
= (uint
)(PHYSADDRLO(di
->rxdpa
) - PHYSADDRLO(di
->rxdpaorig
));
2641 /* adjust the va by the same offset */
2642 di
->rxd64
= (dma64dd_t
*)((uintptr
)va
+ di
->rxdalign
);
2644 di
->rxdalloc
= alloced
;
2645 ASSERT(ISALIGNED(PHYSADDRLO(di
->rxdpa
), align
));
2652 dma64_txreset(dma_info_t
*di
)
2659 /* suspend tx DMA first */
2660 W_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
2661 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
2662 D64_XS0_XS_DISABLED
) &&
2663 (status
!= D64_XS0_XS_IDLE
) &&
2664 (status
!= D64_XS0_XS_STOPPED
),
2667 W_REG(di
->osh
, &di
->d64txregs
->control
, 0);
2668 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
2669 D64_XS0_XS_DISABLED
),
2672 /* We should be disabled at this point */
2673 if (status
!= D64_XS0_XS_DISABLED
) {
2674 DMA_ERROR(("%s: status != D64_XS0_XS_DISABLED 0x%x\n", __FUNCTION__
, status
));
2675 ASSERT(status
== D64_XS0_XS_DISABLED
);
2679 return (status
== D64_XS0_XS_DISABLED
);
2683 dma64_rxidle(dma_info_t
*di
)
2685 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
2690 return ((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) ==
2691 (R_REG(di
->osh
, &di
->d64rxregs
->ptr
) & D64_RS0_CD_MASK
));
2695 dma64_rxreset(dma_info_t
*di
)
2702 W_REG(di
->osh
, &di
->d64rxregs
->control
, 0);
2703 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
)) !=
2704 D64_RS0_RS_DISABLED
), 10000);
2706 return (status
== D64_RS0_RS_DISABLED
);
2710 dma64_rxenabled(dma_info_t
*di
)
2714 rc
= R_REG(di
->osh
, &di
->d64rxregs
->control
);
2715 return ((rc
!= 0xffffffff) && (rc
& D64_RC_RE
));
2719 dma64_txsuspendedidle(dma_info_t
*di
)
2725 if (!(R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
))
2728 if ((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) == D64_XS0_XS_IDLE
)
2734 /* Useful when sending unframed data. This allows us to get a progress report from the DMA.
2735 * We return a pointer to the beginning of the data buffer of the current descriptor.
2736 * If DMA is idle, we return NULL.
2739 dma64_getpos(dma_info_t
*di
, bool direction
)
2745 if (direction
== DMA_TX
) {
2746 cur_idx
= B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
) -
2747 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
);
2748 idle
= !NTXDACTIVE(di
->txin
, di
->txout
);
2749 va
= di
->txp
[cur_idx
];
2751 cur_idx
= B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
2752 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
);
2753 idle
= !NRXDACTIVE(di
->rxin
, di
->rxout
);
2754 va
= di
->rxp
[cur_idx
];
2757 /* If DMA is IDLE, return NULL */
2759 DMA_TRACE(("%s: DMA idle, return NULL\n", __FUNCTION__
));
2766 /* TX of unframed data
2768 * Adds a DMA ring descriptor for the data pointed to by "buf".
2769 * This is for DMA of a buffer of data and is unlike other hnddma TX functions
2770 * that take a pointer to a "packet"
2771 * Each call to this is results in a single descriptor being added for "len" bytes of
2772 * data starting at "buf", it doesn't handle chained buffers.
2775 dma64_txunframed(dma_info_t
*di
, void *buf
, uint len
, bool commit
)
2779 dmaaddr_t pa
; /* phys addr */
2783 /* return nonzero if out of tx descriptors */
2784 if (NEXTTXD(txout
) == di
->txin
)
2790 pa
= DMA_MAP(di
->osh
, buf
, len
, DMA_TX
, NULL
, &di
->txp_dmah
[txout
]);
2792 flags
= (D64_CTRL1_SOF
| D64_CTRL1_IOC
| D64_CTRL1_EOF
);
2794 if (txout
== (di
->ntxd
- 1))
2795 flags
|= D64_CTRL1_EOT
;
2797 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
2798 ASSERT(di
->txp
[txout
] == NULL
);
2800 /* save the buffer pointer - used by dma_getpos */
2801 di
->txp
[txout
] = buf
;
2803 txout
= NEXTTXD(txout
);
2804 /* bump the tx descriptor index */
2809 W_REG(di
->osh
, &di
->d64txregs
->ptr
, di
->xmtptrbase
+ I2B(txout
, dma64dd_t
));
2812 /* tx flow control */
2813 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2818 DMA_ERROR(("%s: %s: out of txds !!!\n", di
->name
, __FUNCTION__
));
2819 di
->hnddma
.txavail
= 0;
2820 di
->hnddma
.txnobuf
++;
2825 /* !! tx entry routine
2826 * WARNING: call must check the return value for error.
2827 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
2829 static int BCMFASTPATH
2830 dma64_txfast(dma_info_t
*di
, void *p0
, bool commit
)
2840 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
2843 war
= (di
->hnddma
.dmactrlflags
& DMA_CTRL_DMA_AVOIDANCE_WAR
) ? TRUE
: FALSE
;
2846 * Walk the chain of packet buffers
2847 * allocating and initializing transmit descriptor entries.
2849 for (p
= p0
; p
; p
= next
) {
2850 uint nsegs
, j
, segsadd
;
2851 hnddma_seg_map_t
*map
= NULL
;
2853 data
= PKTDATA(di
->osh
, p
);
2854 len
= PKTLEN(di
->osh
, p
);
2856 len
+= PKTDMAPAD(di
->osh
, p
);
2857 #endif /* BCM_DMAPAD */
2858 next
= PKTNEXT(di
->osh
, p
);
2860 /* return nonzero if out of tx descriptors */
2861 if (NEXTTXD(txout
) == di
->txin
)
2867 /* get physical address of buffer start */
2869 bzero(&di
->txp_dmah
[txout
], sizeof(hnddma_seg_map_t
));
2871 pa
= DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
, &di
->txp_dmah
[txout
]);
2873 if (DMASGLIST_ENAB
) {
2874 map
= &di
->txp_dmah
[txout
];
2876 /* See if all the segments can be accounted for */
2877 if (map
->nsegs
> (uint
)(di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1))
2885 for (j
= 1; j
<= nsegs
; j
++) {
2887 if (p
== p0
&& j
== 1)
2888 flags
|= D64_CTRL1_SOF
;
2890 /* With a DMA segment list, Descriptor table is filled
2891 * using the segment list instead of looping over
2892 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
2893 * end of segment list is reached.
2895 if ((!DMASGLIST_ENAB
&& next
== NULL
) ||
2896 (DMASGLIST_ENAB
&& j
== nsegs
))
2897 flags
|= (D64_CTRL1_IOC
| D64_CTRL1_EOF
);
2898 if (txout
== (di
->ntxd
- 1))
2899 flags
|= D64_CTRL1_EOT
;
2901 if (DMASGLIST_ENAB
) {
2902 len
= map
->segs
[j
- 1].length
;
2903 pa
= map
->segs
[j
- 1].addr
;
2904 if (len
> 128 && war
) {
2905 uint remain
, new_len
, align64
;
2906 /* check for 64B aligned of pa */
2907 align64
= (uint
)(PHYSADDRLO(pa
) & 0x3f);
2908 align64
= (64 - align64
) & 0x3f;
2909 new_len
= len
- align64
;
2910 remain
= new_len
% 128;
2911 if (remain
> 0 && remain
<= 4) {
2914 flags
& (~(D64_CTRL1_EOF
| D64_CTRL1_IOC
));
2915 flags
&= ~(D64_CTRL1_SOF
| D64_CTRL1_EOT
);
2917 dma64_dd_upd(di
, di
->txd64
, pa
, txout
,
2918 &tmp_flags
, len
-remain
);
2919 ASSERT(di
->txp
[txout
] == NULL
);
2920 txout
= NEXTTXD(txout
);
2921 /* return nonzero if out of tx descriptors */
2922 if (txout
== di
->txin
) {
2923 DMA_ERROR(("%s: dma_txfast: Out-of-DMA"
2924 " descriptors (txin %d txout %d"
2925 " nsegs %d)\n", __FUNCTION__
,
2926 di
->txin
, di
->txout
, nsegs
));
2929 if (txout
== (di
->ntxd
- 1))
2930 flags
|= D64_CTRL1_EOT
;
2931 buf_addr_lo
= PHYSADDRLO(pa
);
2932 PHYSADDRLOSET(pa
, (PHYSADDRLO(pa
) + (len
-remain
)));
2933 if (PHYSADDRLO(pa
) < buf_addr_lo
) {
2934 PHYSADDRHISET(pa
, (PHYSADDRHI(pa
) + 1));
2938 di
->dma_avoidance_cnt
++;
2942 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
2943 ASSERT(di
->txp
[txout
] == NULL
);
2945 txout
= NEXTTXD(txout
);
2946 /* return nonzero if out of tx descriptors */
2947 if (txout
== di
->txin
) {
2948 DMA_ERROR(("%s: dma_txfast: Out-of-DMA descriptors"
2949 " (txin %d txout %d nsegs %d)\n", __FUNCTION__
,
2950 di
->txin
, di
->txout
, nsegs
));
2954 if (segsadd
&& DMASGLIST_ENAB
)
2955 map
->nsegs
+= segsadd
;
2957 /* See above. No need to loop over individual buffers */
2962 /* if last txd eof not set, fix it */
2963 if (!(flags
& D64_CTRL1_EOF
))
2964 W_SM(&di
->txd64
[PREVTXD(txout
)].ctrl1
,
2965 BUS_SWAP32(flags
| D64_CTRL1_IOC
| D64_CTRL1_EOF
));
2967 /* save the packet */
2968 di
->txp
[PREVTXD(txout
)] = p0
;
2970 /* bump the tx descriptor index */
2975 W_REG(di
->osh
, &di
->d64txregs
->ptr
, di
->xmtptrbase
+ I2B(txout
, dma64dd_t
));
2977 /* tx flow control */
2978 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2983 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di
->name
));
2984 PKTFREE(di
->osh
, p0
, TRUE
);
2985 di
->hnddma
.txavail
= 0;
2986 di
->hnddma
.txnobuf
++;
2991 * Reclaim next completed txd (txds if using chained buffers) in the range
2992 * specified and return associated packet.
2993 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
2994 * transmitted as noted by the hardware "CurrDescr" pointer.
2995 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
2996 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
2997 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
2998 * return associated packet regardless of the value of hardware pointers.
3000 static void * BCMFASTPATH
3001 dma64_getnexttxp(dma_info_t
*di
, txd_range_t range
)
3003 uint16 start
, end
, i
;
3007 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
,
3008 (range
== HNDDMA_RANGE_ALL
) ? "all" :
3009 ((range
== HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" : "transfered")));
3017 if (range
== HNDDMA_RANGE_ALL
)
3020 dma64regs_t
*dregs
= di
->d64txregs
;
3022 end
= (uint16
)(B2I(((R_REG(di
->osh
, &dregs
->status0
) & D64_XS0_CD_MASK
) -
3023 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
));
3025 if (range
== HNDDMA_RANGE_TRANSFERED
) {
3026 active_desc
= (uint16
)(R_REG(di
->osh
, &dregs
->status1
) & D64_XS1_AD_MASK
);
3027 active_desc
= (active_desc
- di
->xmtptrbase
) & D64_XS0_CD_MASK
;
3028 active_desc
= B2I(active_desc
, dma64dd_t
);
3029 if (end
!= active_desc
)
3030 end
= PREVTXD(active_desc
);
3034 if ((start
== 0) && (end
> di
->txout
))
3037 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
3039 hnddma_seg_map_t
*map
= NULL
;
3040 uint size
, j
, nsegs
;
3042 PHYSADDRLOSET(pa
, (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrlow
)) - di
->dataoffsetlow
));
3043 PHYSADDRHISET(pa
, (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrhigh
)) - di
->dataoffsethigh
));
3045 if (DMASGLIST_ENAB
) {
3046 map
= &di
->txp_dmah
[i
];
3047 size
= map
->origsize
;
3050 size
= (BUS_SWAP32(R_SM(&di
->txd64
[i
].ctrl2
)) & D64_CTRL2_BC_MASK
);
3054 for (j
= nsegs
; j
> 0; j
--) {
3055 W_SM(&di
->txd64
[i
].addrlow
, 0xdeadbeef);
3056 W_SM(&di
->txd64
[i
].addrhigh
, 0xdeadbeef);
3064 DMA_UNMAP(di
->osh
, pa
, size
, DMA_TX
, txp
, map
);
3069 /* tx flow control */
3070 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
3075 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
3076 start
, end
, di
->txout
, forceall
));
3080 static void * BCMFASTPATH
3081 dma64_getnextrxp(dma_info_t
*di
, bool forceall
)
3087 /* if forcing, dma engine must be disabled */
3088 ASSERT(!forceall
|| !dma64_rxenabled(di
));
3092 /* return if no packets posted */
3096 curr
= B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
3097 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
);
3099 /* ignore curr if forceall */
3100 if (!forceall
&& (i
== curr
))
3103 /* get the packet pointer that corresponds to the rx descriptor */
3108 PHYSADDRLOSET(pa
, (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrlow
)) - di
->dataoffsetlow
));
3109 PHYSADDRHISET(pa
, (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrhigh
)) - di
->dataoffsethigh
));
3111 /* clear this packet from the descriptor ring */
3112 DMA_UNMAP(di
->osh
, pa
,
3113 di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
3115 W_SM(&di
->rxd64
[i
].addrlow
, 0xdeadbeef);
3116 W_SM(&di
->rxd64
[i
].addrhigh
, 0xdeadbeef);
3118 di
->rxin
= NEXTRXD(i
);
3124 _dma64_addrext(osl_t
*osh
, dma64regs_t
*dma64regs
)
3127 OR_REG(osh
, &dma64regs
->control
, D64_XC_AE
);
3128 w
= R_REG(osh
, &dma64regs
->control
);
3129 AND_REG(osh
, &dma64regs
->control
, ~D64_XC_AE
);
3130 return ((w
& D64_XC_AE
) == D64_XC_AE
);
3134 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
3137 dma64_txrotate(dma_info_t
*di
)
3146 ASSERT(dma64_txsuspendedidle(di
));
3148 nactive
= _dma_txactive(di
);
3149 ad
= (uint16
)(B2I((((R_REG(di
->osh
, &di
->d64txregs
->status1
) & D64_XS1_AD_MASK
)
3150 - di
->xmtptrbase
) & D64_XS1_AD_MASK
), dma64dd_t
));
3151 rot
= TXD(ad
- di
->txin
);
3153 ASSERT(rot
< di
->ntxd
);
3155 /* full-ring case is a lot harder - don't worry about this */
3156 if (rot
>= (di
->ntxd
- nactive
)) {
3157 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
3162 last
= PREVTXD(di
->txout
);
3164 /* move entries starting at last and moving backwards to first */
3165 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
3166 new = TXD(old
+ rot
);
3169 * Move the tx dma descriptor.
3170 * EOT is set only in the last entry in the ring.
3172 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl1
)) & ~D64_CTRL1_EOT
;
3173 if (new == (di
->ntxd
- 1))
3175 W_SM(&di
->txd64
[new].ctrl1
, BUS_SWAP32(w
));
3177 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl2
));
3178 W_SM(&di
->txd64
[new].ctrl2
, BUS_SWAP32(w
));
3180 W_SM(&di
->txd64
[new].addrlow
, R_SM(&di
->txd64
[old
].addrlow
));
3181 W_SM(&di
->txd64
[new].addrhigh
, R_SM(&di
->txd64
[old
].addrhigh
));
3183 /* zap the old tx dma descriptor address field */
3184 W_SM(&di
->txd64
[old
].addrlow
, BUS_SWAP32(0xdeadbeef));
3185 W_SM(&di
->txd64
[old
].addrhigh
, BUS_SWAP32(0xdeadbeef));
3187 /* move the corresponding txp[] entry */
3188 ASSERT(di
->txp
[new] == NULL
);
3189 di
->txp
[new] = di
->txp
[old
];
3192 if (DMASGLIST_ENAB
) {
3193 bcopy(&di
->txp_dmah
[old
], &di
->txp_dmah
[new], sizeof(hnddma_seg_map_t
));
3194 bzero(&di
->txp_dmah
[old
], sizeof(hnddma_seg_map_t
));
3197 di
->txp
[old
] = NULL
;
3200 /* update txin and txout */
3202 di
->txout
= TXD(di
->txout
+ rot
);
3203 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
3206 W_REG(di
->osh
, &di
->d64txregs
->ptr
, di
->xmtptrbase
+ I2B(di
->txout
, dma64dd_t
));
3210 dma_addrwidth(si_t
*sih
, void *dmaregs
)
3212 dma32regs_t
*dma32regs
;
3217 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
3218 /* DMA engine is 64-bit capable */
3219 if ((si_core_sflags(sih
, 0, 0) & SISF_DMA64
) == SISF_DMA64
) {
3220 /* backplane are 64-bit capable */
3221 if (si_backplane64(sih
))
3222 /* If bus is System Backplane or PCIE then we can access 64-bits */
3223 if ((BUSTYPE(sih
->bustype
) == SI_BUS
) ||
3224 ((BUSTYPE(sih
->bustype
) == PCI_BUS
) &&
3225 (sih
->buscoretype
== PCIE_CORE_ID
)))
3226 return (DMADDRWIDTH_64
);
3228 /* DMA64 is always 32-bit capable, AE is always TRUE */
3229 ASSERT(_dma64_addrext(osh
, (dma64regs_t
*)dmaregs
));
3231 return (DMADDRWIDTH_32
);
3234 /* Start checking for 32-bit / 30-bit addressing */
3235 dma32regs
= (dma32regs_t
*)dmaregs
;
3237 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
3238 if ((BUSTYPE(sih
->bustype
) == SI_BUS
) ||
3239 ((BUSTYPE(sih
->bustype
) == PCI_BUS
) && sih
->buscoretype
== PCIE_CORE_ID
) ||
3240 (_dma32_addrext(osh
, dma32regs
)))
3241 return (DMADDRWIDTH_32
);
3244 return (DMADDRWIDTH_30
);
3248 _dma_pktpool_set(dma_info_t
*di
, pktpool_t
*pool
)
3251 ASSERT(di
->pktpool
== NULL
);
3257 _dma_rxtx_error(dma_info_t
*di
, bool istx
)
3261 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
3265 status1
= R_REG(di
->osh
, &di
->d64txregs
->status1
);
3267 if ((status1
& D64_XS1_XE_MASK
) != D64_XS1_XE_NOERR
)
3274 status1
= R_REG(di
->osh
, &di
->d64rxregs
->status1
);
3276 if ((status1
& D64_RS1_RE_MASK
) != D64_RS1_RE_NOERR
)
3282 } else if (DMA32_ENAB(di
)) {
3293 _dma_burstlen_set(dma_info_t
*di
, uint8 rxburstlen
, uint8 txburstlen
)
3295 di
->rxburstlen
= rxburstlen
;
3296 di
->txburstlen
= txburstlen
;
3300 _dma_param_set(dma_info_t
*di
, uint16 paramid
, uint16 paramval
)
3303 case HNDDMA_PID_TX_MULTI_OUTSTD_RD
:
3304 di
->txmultioutstdrd
= (uint8
)paramval
;
3307 case HNDDMA_PID_TX_PREFETCH_CTL
:
3308 di
->txprefetchctl
= (uint8
)paramval
;
3311 case HNDDMA_PID_TX_PREFETCH_THRESH
:
3312 di
->txprefetchthresh
= (uint8
)paramval
;
3315 case HNDDMA_PID_TX_BURSTLEN
:
3316 di
->txburstlen
= (uint8
)paramval
;
3319 case HNDDMA_PID_RX_PREFETCH_CTL
:
3320 di
->rxprefetchctl
= (uint8
)paramval
;
3323 case HNDDMA_PID_RX_PREFETCH_THRESH
:
3324 di
->rxprefetchthresh
= (uint8
)paramval
;
3327 case HNDDMA_PID_RX_BURSTLEN
:
3328 di
->rxburstlen
= (uint8
)paramval
;
3337 _dma_glom_enable(dma_info_t
*di
, uint32 val
)
3339 dma64regs_t
*dregs
= di
->d64rxregs
;
3342 OR_REG(di
->osh
, &dregs
->control
, D64_RC_GE
);
3343 if (!(R_REG(di
->osh
, &dregs
->control
) & D64_RC_GE
))
3346 AND_REG(di
->osh
, &dregs
->control
, ~D64_RC_GE
);