2 * Generic Broadcom Home Networking Division (HND) DMA module.
3 * This supports the following chips: BCM42xx, 44xx, 47xx .
5 * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved.
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
14 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
16 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
17 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 * $Id: hnddma.c 411691 2013-07-10 04:19:32Z $
27 #include <bcmendian.h>
37 #define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args
38 #define DMA_TRACE(args) if (!(*di->msg_level & 2)); else printf args
39 #elif defined(BCMDBG_ERR)
40 #define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args
41 #define DMA_TRACE(args)
43 #define DMA_ERROR(args)
44 #define DMA_TRACE(args)
47 #define DMA_NONE(args)
50 #define d32txregs dregs.d32_u.txregs_32
51 #define d32rxregs dregs.d32_u.rxregs_32
52 #define txd32 dregs.d32_u.txd_32
53 #define rxd32 dregs.d32_u.rxd_32
55 #define d64txregs dregs.d64_u.txregs_64
56 #define d64rxregs dregs.d64_u.rxregs_64
57 #define txd64 dregs.d64_u.txd_64
58 #define rxd64 dregs.d64_u.rxd_64
60 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
61 static uint dma_msg_level
=
66 #endif /* BCMDBG_ERR */
68 #define MAXNAMEL 8 /* 8 char names */
70 #define DI_INFO(dmah) ((dma_info_t *)dmah)
72 /** dma engine software state */
73 typedef struct dma_info
{
74 struct hnddma_pub hnddma
; /* exported structure, don't use hnddma_t,
75 * which could be const
77 uint
*msg_level
; /* message level pointer */
78 char name
[MAXNAMEL
]; /* callers name for diag msgs */
80 void *osh
; /* os handle */
81 si_t
*sih
; /* sb handle */
83 bool dma64
; /* this dma engine is operating in 64-bit mode */
84 bool addrext
; /* this dma engine supports DmaExtendedAddrChanges */
88 dma32regs_t
*txregs_32
; /* 32-bit dma tx engine registers */
89 dma32regs_t
*rxregs_32
; /* 32-bit dma rx engine registers */
90 dma32dd_t
*txd_32
; /* pointer to dma32 tx descriptor ring */
91 dma32dd_t
*rxd_32
; /* pointer to dma32 rx descriptor ring */
94 dma64regs_t
*txregs_64
; /* 64-bit dma tx engine registers */
95 dma64regs_t
*rxregs_64
; /* 64-bit dma rx engine registers */
96 dma64dd_t
*txd_64
; /* pointer to dma64 tx descriptor ring */
97 dma64dd_t
*rxd_64
; /* pointer to dma64 rx descriptor ring */
101 uint16 dmadesc_align
; /* alignment requirement for dma descriptors */
103 uint16 ntxd
; /* # tx descriptors tunable */
104 uint16 txin
; /* index of next descriptor to reclaim */
105 uint16 txout
; /* index of next descriptor to post */
106 void **txp
; /* pointer to parallel array of pointers to packets */
107 osldma_t
*tx_dmah
; /* DMA TX descriptor ring handle */
108 hnddma_seg_map_t
*txp_dmah
; /* DMA MAP meta-data handle */
109 dmaaddr_t txdpa
; /* Aligned physical address of descriptor ring */
110 dmaaddr_t txdpaorig
; /* Original physical address of descriptor ring */
111 uint16 txdalign
; /* #bytes added to alloc'd mem to align txd */
112 uint32 txdalloc
; /* #bytes allocated for the ring */
113 uint32 xmtptrbase
; /* When using unaligned descriptors, the ptr register
114 * is not just an index, it needs all 13 bits to be
115 * an offset from the addr register.
118 uint16 nrxd
; /* # rx descriptors tunable */
119 uint16 rxin
; /* index of next descriptor to reclaim */
120 uint16 rxout
; /* index of next descriptor to post */
121 void **rxp
; /* pointer to parallel array of pointers to packets */
122 osldma_t
*rx_dmah
; /* DMA RX descriptor ring handle */
123 hnddma_seg_map_t
*rxp_dmah
; /* DMA MAP meta-data handle */
124 dmaaddr_t rxdpa
; /* Aligned physical address of descriptor ring */
125 dmaaddr_t rxdpaorig
; /* Original physical address of descriptor ring */
126 uint16 rxdalign
; /* #bytes added to alloc'd mem to align rxd */
127 uint32 rxdalloc
; /* #bytes allocated for the ring */
128 uint32 rcvptrbase
; /* Base for ptr reg when using unaligned descriptors */
131 uint16 rxbufsize
; /* rx buffer size in bytes,
132 * not including the extra headroom
134 uint rxextrahdrroom
; /* extra rx headroom, reverseved to assist upper stack
135 * e.g. some rx pkt buffers will be bridged to tx side
136 * without byte copying. The extra headroom needs to be
137 * large enough to fit txheader needs.
138 * Some dongle driver may not need it.
140 uint nrxpost
; /* # rx buffers to keep posted */
141 uint rxoffset
; /* rxcontrol offset */
142 uint ddoffsetlow
; /* add to get dma address of descriptor ring, low 32 bits */
143 uint ddoffsethigh
; /* high 32 bits */
144 uint dataoffsetlow
; /* add to get dma address of data buffer, low 32 bits */
145 uint dataoffsethigh
; /* high 32 bits */
146 bool aligndesc_4k
; /* descriptor base need to be aligned or not */
147 uint8 rxburstlen
; /* burstlen field for rx (for cores supporting burstlen) */
148 uint8 txburstlen
; /* burstlen field for tx (for cores supporting burstlen) */
149 uint8 txmultioutstdrd
; /* tx multiple outstanding reads */
150 uint8 txprefetchctl
; /* prefetch control for tx */
151 uint8 txprefetchthresh
; /* prefetch threshold for tx */
152 uint8 rxprefetchctl
; /* prefetch control for rx */
153 uint8 rxprefetchthresh
; /* prefetch threshold for rx */
154 pktpool_t
*pktpool
; /* pktpool */
155 uint dma_avoidance_cnt
;
157 uint32 d64_xs0_cd_mask
; /* tx current descriptor pointer mask */
158 uint32 d64_xs1_ad_mask
; /* tx active descriptor mask */
159 uint32 d64_rs0_cd_mask
; /* rx current descriptor pointer mask */
160 uint16 rs0cd
; /* cached value of rcvstatus0 currdescr */
161 uint16 xs0cd
; /* cached value of xmtstatus0 currdescr */
162 uint16 xs0cd_snapshot
; /* snapshot of xmtstatus0 currdescr */
166 * If BCMDMA32 is defined, hnddma will support both 32-bit and 64-bit DMA engines.
167 * Otherwise it will support only 64-bit.
169 * DMA32_ENAB indicates whether hnddma is compiled with support for 32-bit DMA engines.
170 * DMA64_ENAB indicates whether hnddma is compiled with support for 64-bit DMA engines.
172 * DMA64_MODE indicates whether the current DMA engine is running as 64-bit.
175 #define DMA32_ENAB(di) 1
176 #define DMA64_ENAB(di) 1
177 #define DMA64_MODE(di) ((di)->dma64)
178 #else /* !BCMDMA32 */
179 #define DMA32_ENAB(di) 0
180 #define DMA64_ENAB(di) 1
181 #define DMA64_MODE(di) 1
182 #endif /* !BCMDMA32 */
184 /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
185 #ifdef BCMDMASGLISTOSL
186 #define DMASGLIST_ENAB TRUE
188 #define DMASGLIST_ENAB FALSE
189 #endif /* BCMDMASGLISTOSL */
191 /* descriptor bumping macros */
192 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
193 #define TXD(x) XXD((x), di->ntxd)
194 #define RXD(x) XXD((x), di->nrxd)
195 #define NEXTTXD(i) TXD((i) + 1)
196 #define PREVTXD(i) TXD((i) - 1)
197 #define NEXTRXD(i) RXD((i) + 1)
198 #define PREVRXD(i) RXD((i) - 1)
200 #define NTXDACTIVE(h, t) TXD((t) - (h))
201 #define NRXDACTIVE(h, t) RXD((t) - (h))
203 /* macros to convert between byte offsets and indexes */
204 #define B2I(bytes, type) ((uint16)((bytes) / sizeof(type)))
205 #define I2B(index, type) ((index) * sizeof(type))
207 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
208 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
210 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
211 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
213 /* Common prototypes */
214 static bool _dma_isaddrext(dma_info_t
*di
);
215 static bool _dma_descriptor_align(dma_info_t
*di
);
216 static bool _dma_alloc(dma_info_t
*di
, uint direction
);
217 static void _dma_detach(dma_info_t
*di
);
218 static void _dma_ddtable_init(dma_info_t
*di
, uint direction
, dmaaddr_t pa
);
219 static void _dma_rxinit(dma_info_t
*di
);
220 static void *_dma_rx(dma_info_t
*di
);
221 static bool _dma_rxfill(dma_info_t
*di
);
222 static void _dma_rxreclaim(dma_info_t
*di
);
223 static void _dma_rxenable(dma_info_t
*di
);
224 static void *_dma_getnextrxp(dma_info_t
*di
, bool forceall
);
225 static void _dma_rx_param_get(dma_info_t
*di
, uint16
*rxoffset
, uint16
*rxbufsize
);
227 static void _dma_txblock(dma_info_t
*di
);
228 static void _dma_txunblock(dma_info_t
*di
);
229 static uint
_dma_txactive(dma_info_t
*di
);
230 static uint
_dma_rxactive(dma_info_t
*di
);
231 static uint
_dma_activerxbuf(dma_info_t
*di
);
232 static uint
_dma_txpending(dma_info_t
*di
);
233 static uint
_dma_txcommitted(dma_info_t
*di
);
235 static void *_dma_peeknexttxp(dma_info_t
*di
);
236 static int _dma_peekntxp(dma_info_t
*di
, int *len
, void *txps
[], txd_range_t range
);
237 static void *_dma_peeknextrxp(dma_info_t
*di
);
238 static uintptr
_dma_getvar(dma_info_t
*di
, const char *name
);
239 static void _dma_counterreset(dma_info_t
*di
);
240 static void _dma_fifoloopbackenable(dma_info_t
*di
);
241 static uint
_dma_ctrlflags(dma_info_t
*di
, uint mask
, uint flags
);
242 static uint8
dma_align_sizetobits(uint size
);
243 static void *dma_ringalloc(osl_t
*osh
, uint32 boundary
, uint size
, uint16
*alignbits
, uint
* alloced
,
244 dmaaddr_t
*descpa
, osldma_t
**dmah
);
245 static int _dma_pktpool_set(dma_info_t
*di
, pktpool_t
*pool
);
246 static bool _dma_rxtx_error(dma_info_t
*di
, bool istx
);
247 static void _dma_burstlen_set(dma_info_t
*di
, uint8 rxburstlen
, uint8 txburstlen
);
248 static uint
_dma_avoidancecnt(dma_info_t
*di
);
249 static void _dma_param_set(dma_info_t
*di
, uint16 paramid
, uint16 paramval
);
250 static bool _dma_glom_enable(dma_info_t
*di
, uint32 val
);
253 /* Prototypes for 32-bit routines */
254 static bool dma32_alloc(dma_info_t
*di
, uint direction
);
255 static bool dma32_txreset(dma_info_t
*di
);
256 static bool dma32_rxreset(dma_info_t
*di
);
257 static bool dma32_txsuspendedidle(dma_info_t
*di
);
258 static int dma32_txfast(dma_info_t
*di
, void *p0
, bool commit
);
259 static void *dma32_getnexttxp(dma_info_t
*di
, txd_range_t range
);
260 static void *dma32_getnextrxp(dma_info_t
*di
, bool forceall
);
261 static void dma32_txrotate(dma_info_t
*di
);
262 static bool dma32_rxidle(dma_info_t
*di
);
263 static void dma32_txinit(dma_info_t
*di
);
264 static bool dma32_txenabled(dma_info_t
*di
);
265 static void dma32_txsuspend(dma_info_t
*di
);
266 static void dma32_txresume(dma_info_t
*di
);
267 static bool dma32_txsuspended(dma_info_t
*di
);
268 static void dma32_txflush(dma_info_t
*di
);
269 static void dma32_txflush_clear(dma_info_t
*di
);
270 static void dma32_txreclaim(dma_info_t
*di
, txd_range_t range
);
271 static bool dma32_txstopped(dma_info_t
*di
);
272 static bool dma32_rxstopped(dma_info_t
*di
);
273 static bool dma32_rxenabled(dma_info_t
*di
);
275 static void dma32_dumpring(dma_info_t
*di
, struct bcmstrbuf
*b
, dma32dd_t
*ring
, uint start
,
276 uint end
, uint max_num
);
277 static void dma32_dump(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
278 static void dma32_dumptx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
279 static void dma32_dumprx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
282 static bool _dma32_addrext(osl_t
*osh
, dma32regs_t
*dma32regs
);
284 /* Prototypes for 64-bit routines */
285 static bool dma64_alloc(dma_info_t
*di
, uint direction
);
286 static bool dma64_txreset(dma_info_t
*di
);
287 static bool dma64_rxreset(dma_info_t
*di
);
288 static bool dma64_txsuspendedidle(dma_info_t
*di
);
289 static int dma64_txfast(dma_info_t
*di
, void *p0
, bool commit
);
290 static int dma64_txunframed(dma_info_t
*di
, void *p0
, uint len
, bool commit
);
291 static void *dma64_getpos(dma_info_t
*di
, bool direction
);
292 static void *dma64_getnexttxp(dma_info_t
*di
, txd_range_t range
);
293 static void *dma64_getnextrxp(dma_info_t
*di
, bool forceall
);
294 static void dma64_txrotate(dma_info_t
*di
);
296 static bool dma64_rxidle(dma_info_t
*di
);
297 static void dma64_txinit(dma_info_t
*di
);
298 static bool dma64_txenabled(dma_info_t
*di
);
299 static void dma64_txsuspend(dma_info_t
*di
);
300 static void dma64_txresume(dma_info_t
*di
);
301 static bool dma64_txsuspended(dma_info_t
*di
);
302 static void dma64_txflush(dma_info_t
*di
);
303 static void dma64_txflush_clear(dma_info_t
*di
);
304 static void dma64_txreclaim(dma_info_t
*di
, txd_range_t range
);
305 static bool dma64_txstopped(dma_info_t
*di
);
306 static bool dma64_rxstopped(dma_info_t
*di
);
307 static bool dma64_rxenabled(dma_info_t
*di
);
308 static bool _dma64_addrext(osl_t
*osh
, dma64regs_t
*dma64regs
);
311 STATIC INLINE uint32
parity32(uint32 data
);
314 static void dma64_dumpring(dma_info_t
*di
, struct bcmstrbuf
*b
, dma64dd_t
*ring
, uint start
,
315 uint end
, uint max_num
);
316 static void dma64_dump(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
317 static void dma64_dumptx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
318 static void dma64_dumprx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
322 const di_fcn_t dma64proc
= {
323 (di_detach_t
)_dma_detach
,
324 (di_txinit_t
)dma64_txinit
,
325 (di_txreset_t
)dma64_txreset
,
326 (di_txenabled_t
)dma64_txenabled
,
327 (di_txsuspend_t
)dma64_txsuspend
,
328 (di_txresume_t
)dma64_txresume
,
329 (di_txsuspended_t
)dma64_txsuspended
,
330 (di_txsuspendedidle_t
)dma64_txsuspendedidle
,
331 (di_txflush_t
)dma64_txflush
,
332 (di_txflush_clear_t
)dma64_txflush_clear
,
333 (di_txfast_t
)dma64_txfast
,
334 (di_txunframed_t
)dma64_txunframed
,
335 (di_getpos_t
)dma64_getpos
,
336 (di_txstopped_t
)dma64_txstopped
,
337 (di_txreclaim_t
)dma64_txreclaim
,
338 (di_getnexttxp_t
)dma64_getnexttxp
,
339 (di_peeknexttxp_t
)_dma_peeknexttxp
,
340 (di_peekntxp_t
)_dma_peekntxp
,
341 (di_txblock_t
)_dma_txblock
,
342 (di_txunblock_t
)_dma_txunblock
,
343 (di_txactive_t
)_dma_txactive
,
344 (di_txrotate_t
)dma64_txrotate
,
346 (di_rxinit_t
)_dma_rxinit
,
347 (di_rxreset_t
)dma64_rxreset
,
348 (di_rxidle_t
)dma64_rxidle
,
349 (di_rxstopped_t
)dma64_rxstopped
,
350 (di_rxenable_t
)_dma_rxenable
,
351 (di_rxenabled_t
)dma64_rxenabled
,
353 (di_rxfill_t
)_dma_rxfill
,
354 (di_rxreclaim_t
)_dma_rxreclaim
,
355 (di_getnextrxp_t
)_dma_getnextrxp
,
356 (di_peeknextrxp_t
)_dma_peeknextrxp
,
357 (di_rxparam_get_t
)_dma_rx_param_get
,
359 (di_fifoloopbackenable_t
)_dma_fifoloopbackenable
,
360 (di_getvar_t
)_dma_getvar
,
361 (di_counterreset_t
)_dma_counterreset
,
362 (di_ctrlflags_t
)_dma_ctrlflags
,
365 (di_dump_t
)dma64_dump
,
366 (di_dumptx_t
)dma64_dumptx
,
367 (di_dumprx_t
)dma64_dumprx
,
373 (di_rxactive_t
)_dma_rxactive
,
374 (di_txpending_t
)_dma_txpending
,
375 (di_txcommitted_t
)_dma_txcommitted
,
376 (di_pktpool_set_t
)_dma_pktpool_set
,
377 (di_rxtxerror_t
)_dma_rxtx_error
,
378 (di_burstlen_set_t
)_dma_burstlen_set
,
379 (di_avoidancecnt_t
)_dma_avoidancecnt
,
380 (di_param_set_t
)_dma_param_set
,
381 (dma_glom_enable_t
)_dma_glom_enable
,
382 (dma_active_rxbuf_t
)_dma_activerxbuf
,
386 static const di_fcn_t dma32proc
= {
387 (di_detach_t
)_dma_detach
,
388 (di_txinit_t
)dma32_txinit
,
389 (di_txreset_t
)dma32_txreset
,
390 (di_txenabled_t
)dma32_txenabled
,
391 (di_txsuspend_t
)dma32_txsuspend
,
392 (di_txresume_t
)dma32_txresume
,
393 (di_txsuspended_t
)dma32_txsuspended
,
394 (di_txsuspendedidle_t
)dma32_txsuspendedidle
,
395 (di_txflush_t
)dma32_txflush
,
396 (di_txflush_clear_t
)dma32_txflush_clear
,
397 (di_txfast_t
)dma32_txfast
,
400 (di_txstopped_t
)dma32_txstopped
,
401 (di_txreclaim_t
)dma32_txreclaim
,
402 (di_getnexttxp_t
)dma32_getnexttxp
,
403 (di_peeknexttxp_t
)_dma_peeknexttxp
,
404 (di_peekntxp_t
)_dma_peekntxp
,
405 (di_txblock_t
)_dma_txblock
,
406 (di_txunblock_t
)_dma_txunblock
,
407 (di_txactive_t
)_dma_txactive
,
408 (di_txrotate_t
)dma32_txrotate
,
410 (di_rxinit_t
)_dma_rxinit
,
411 (di_rxreset_t
)dma32_rxreset
,
412 (di_rxidle_t
)dma32_rxidle
,
413 (di_rxstopped_t
)dma32_rxstopped
,
414 (di_rxenable_t
)_dma_rxenable
,
415 (di_rxenabled_t
)dma32_rxenabled
,
417 (di_rxfill_t
)_dma_rxfill
,
418 (di_rxreclaim_t
)_dma_rxreclaim
,
419 (di_getnextrxp_t
)_dma_getnextrxp
,
420 (di_peeknextrxp_t
)_dma_peeknextrxp
,
421 (di_rxparam_get_t
)_dma_rx_param_get
,
423 (di_fifoloopbackenable_t
)_dma_fifoloopbackenable
,
424 (di_getvar_t
)_dma_getvar
,
425 (di_counterreset_t
)_dma_counterreset
,
426 (di_ctrlflags_t
)_dma_ctrlflags
,
429 (di_dump_t
)dma32_dump
,
430 (di_dumptx_t
)dma32_dumptx
,
431 (di_dumprx_t
)dma32_dumprx
,
437 (di_rxactive_t
)_dma_rxactive
,
438 (di_txpending_t
)_dma_txpending
,
439 (di_txcommitted_t
)_dma_txcommitted
,
440 (di_pktpool_set_t
)_dma_pktpool_set
,
441 (di_rxtxerror_t
)_dma_rxtx_error
,
442 (di_burstlen_set_t
)_dma_burstlen_set
,
443 (di_avoidancecnt_t
)_dma_avoidancecnt
,
444 (di_param_set_t
)_dma_param_set
,
451 dma_attach(osl_t
*osh
, const char *name
, si_t
*sih
,
452 volatile void *dmaregstx
, volatile void *dmaregsrx
,
453 uint ntxd
, uint nrxd
, uint rxbufsize
, int rxextheadroom
, uint nrxpost
, uint rxoffset
,
460 /* allocate private info structure */
461 if ((di
= MALLOC(osh
, sizeof (dma_info_t
))) == NULL
) {
463 DMA_ERROR(("%s: out of memory, malloced %d bytes\n", __FUNCTION__
, MALLOCED(osh
)));
468 bzero(di
, sizeof(dma_info_t
));
470 di
->msg_level
= msg_level
? msg_level
: &dma_msg_level
;
472 /* old chips w/o sb is no longer supported */
476 di
->dma64
= ((si_core_sflags(sih
, 0, 0) & SISF_DMA64
) == SISF_DMA64
);
480 /* check arguments */
481 ASSERT(ISPOWEROF2(ntxd
));
482 ASSERT(ISPOWEROF2(nrxd
));
485 ASSERT(dmaregsrx
== NULL
);
487 ASSERT(dmaregstx
== NULL
);
489 /* init dma reg pointer */
490 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
491 di
->d64txregs
= (dma64regs_t
*)dmaregstx
;
492 di
->d64rxregs
= (dma64regs_t
*)dmaregsrx
;
493 di
->hnddma
.di_fn
= (const di_fcn_t
*)&dma64proc
;
494 } else if (DMA32_ENAB(di
)) {
495 ASSERT(ntxd
<= D32MAXDD
);
496 ASSERT(nrxd
<= D32MAXDD
);
497 di
->d32txregs
= (dma32regs_t
*)dmaregstx
;
498 di
->d32rxregs
= (dma32regs_t
*)dmaregsrx
;
499 di
->hnddma
.di_fn
= (const di_fcn_t
*)&dma32proc
;
501 DMA_ERROR(("%s: driver doesn't support 32-bit DMA\n", __FUNCTION__
));
506 /* Default flags (which can be changed by the driver calling dma_ctrlflags
507 * before enable): For backwards compatibility both Rx Overflow Continue
508 * and Parity are DISABLED.
511 di
->hnddma
.di_fn
->ctrlflags(&di
->hnddma
, DMA_CTRL_ROC
| DMA_CTRL_PEN
, 0);
513 DMA_TRACE(("%s: %s: %s osh %p flags 0x%x ntxd %d nrxd %d rxbufsize %d "
514 "rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n",
515 name
, __FUNCTION__
, (DMA64_MODE(di
) ? "DMA64" : "DMA32"),
516 osh
, di
->hnddma
.dmactrlflags
, ntxd
, nrxd
,
517 rxbufsize
, rxextheadroom
, nrxpost
, rxoffset
, dmaregstx
, dmaregsrx
));
519 /* make a private copy of our callers name */
520 strncpy(di
->name
, name
, MAXNAMEL
);
521 di
->name
[MAXNAMEL
-1] = '\0';
527 di
->ntxd
= (uint16
)ntxd
;
528 di
->nrxd
= (uint16
)nrxd
;
530 /* the actual dma size doesn't include the extra headroom */
531 di
->rxextrahdrroom
= (rxextheadroom
== -1) ? BCMEXTRAHDROOM
: rxextheadroom
;
532 if (rxbufsize
> BCMEXTRAHDROOM
)
533 di
->rxbufsize
= (uint16
)(rxbufsize
- di
->rxextrahdrroom
);
535 di
->rxbufsize
= (uint16
)rxbufsize
;
537 di
->nrxpost
= (uint16
)nrxpost
;
538 di
->rxoffset
= (uint8
)rxoffset
;
540 /* Get the default values (POR) of the burstlen. This can be overridden by the modules
541 * if this has to be different. Otherwise this value will be used to program the control
542 * register after the reset or during the init.
545 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
546 /* detect the dma descriptor address mask,
547 * should be 0x1fff before 4360B0, 0xffff start from 4360B0
549 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, 0xffffffff);
550 mask
= R_REG(di
->osh
, &di
->d64rxregs
->addrlow
);
553 mask
= R_REG(di
->osh
, &di
->d64rxregs
->ptr
) | 0xf;
557 DMA_TRACE(("%s: dma_rx_mask: %08x\n", di
->name
, mask
));
558 di
->d64_rs0_cd_mask
= mask
;
561 ASSERT(nrxd
<= D64MAXDD
);
563 ASSERT(nrxd
<= D64MAXDD_LARGE
);
565 di
->rxburstlen
= (R_REG(di
->osh
,
566 &di
->d64rxregs
->control
) & D64_RC_BL_MASK
) >> D64_RC_BL_SHIFT
;
567 di
->rxprefetchctl
= (R_REG(di
->osh
,
568 &di
->d64rxregs
->control
) & D64_RC_PC_MASK
) >>
570 di
->rxprefetchthresh
= (R_REG(di
->osh
,
571 &di
->d64rxregs
->control
) & D64_RC_PT_MASK
) >>
573 } else if (DMA32_ENAB(di
)) {
574 di
->rxburstlen
= (R_REG(di
->osh
,
575 &di
->d32rxregs
->control
) & RC_BL_MASK
) >> RC_BL_SHIFT
;
576 di
->rxprefetchctl
= (R_REG(di
->osh
,
577 &di
->d32rxregs
->control
) & RC_PC_MASK
) >> RC_PC_SHIFT
;
578 di
->rxprefetchthresh
= (R_REG(di
->osh
,
579 &di
->d32rxregs
->control
) & RC_PT_MASK
) >> RC_PT_SHIFT
;
583 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
585 /* detect the dma descriptor address mask,
586 * should be 0x1fff before 4360B0, 0xffff start from 4360B0
588 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, 0xffffffff);
589 mask
= R_REG(di
->osh
, &di
->d64txregs
->addrlow
);
592 mask
= R_REG(di
->osh
, &di
->d64txregs
->ptr
) | 0xf;
596 DMA_TRACE(("%s: dma_tx_mask: %08x\n", di
->name
, mask
));
597 di
->d64_xs0_cd_mask
= mask
;
598 di
->d64_xs1_ad_mask
= mask
;
601 ASSERT(ntxd
<= D64MAXDD
);
603 ASSERT(ntxd
<= D64MAXDD_LARGE
);
605 di
->txburstlen
= (R_REG(di
->osh
,
606 &di
->d64txregs
->control
) & D64_XC_BL_MASK
) >> D64_XC_BL_SHIFT
;
607 di
->txmultioutstdrd
= (R_REG(di
->osh
,
608 &di
->d64txregs
->control
) & D64_XC_MR_MASK
) >> D64_XC_MR_SHIFT
;
609 di
->txprefetchctl
= (R_REG(di
->osh
,
610 &di
->d64txregs
->control
) & D64_XC_PC_MASK
) >> D64_XC_PC_SHIFT
;
611 di
->txprefetchthresh
= (R_REG(di
->osh
,
612 &di
->d64txregs
->control
) & D64_XC_PT_MASK
) >> D64_XC_PT_SHIFT
;
613 } else if (DMA32_ENAB(di
)) {
614 di
->txburstlen
= (R_REG(di
->osh
,
615 &di
->d32txregs
->control
) & XC_BL_MASK
) >> XC_BL_SHIFT
;
616 di
->txmultioutstdrd
= (R_REG(di
->osh
,
617 &di
->d32txregs
->control
) & XC_MR_MASK
) >> XC_MR_SHIFT
;
618 di
->txprefetchctl
= (R_REG(di
->osh
,
619 &di
->d32txregs
->control
) & XC_PC_MASK
) >> XC_PC_SHIFT
;
620 di
->txprefetchthresh
= (R_REG(di
->osh
,
621 &di
->d32txregs
->control
) & XC_PT_MASK
) >> XC_PT_SHIFT
;
626 * figure out the DMA physical address offset for dd and data
627 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
628 * Other bus: use zero
629 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
632 di
->dataoffsetlow
= 0;
633 /* for pci bus, add offset */
634 if (sih
->bustype
== PCI_BUS
) {
635 if ((sih
->buscoretype
== PCIE_CORE_ID
||
636 sih
->buscoretype
== PCIE2_CORE_ID
) &&
638 /* pcie with DMA64 */
640 di
->ddoffsethigh
= SI_PCIE_DMA_H32
;
642 /* pci(DMA32/DMA64) or pcie with DMA32 */
643 if ((CHIPID(sih
->chip
) == BCM4322_CHIP_ID
) ||
644 (CHIPID(sih
->chip
) == BCM4342_CHIP_ID
) ||
645 (CHIPID(sih
->chip
) == BCM43221_CHIP_ID
) ||
646 (CHIPID(sih
->chip
) == BCM43231_CHIP_ID
) ||
647 (CHIPID(sih
->chip
) == BCM43111_CHIP_ID
) ||
648 (CHIPID(sih
->chip
) == BCM43112_CHIP_ID
) ||
649 (CHIPID(sih
->chip
) == BCM43222_CHIP_ID
))
650 di
->ddoffsetlow
= SI_PCI_DMA2
;
652 di
->ddoffsetlow
= SI_PCI_DMA
;
654 di
->ddoffsethigh
= 0;
656 di
->dataoffsetlow
= di
->ddoffsetlow
;
657 di
->dataoffsethigh
= di
->ddoffsethigh
;
660 #if defined(__mips__) && defined(IL_BIGENDIAN)
661 di
->dataoffsetlow
= di
->dataoffsetlow
+ SI_SDRAM_SWAPPED
;
662 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
663 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
664 if ((si_coreid(sih
) == SDIOD_CORE_ID
) && ((si_corerev(sih
) > 0) && (si_corerev(sih
) <= 2)))
666 else if ((si_coreid(sih
) == I2S_CORE_ID
) &&
667 ((si_corerev(sih
) == 0) || (si_corerev(sih
) == 1)))
670 di
->addrext
= _dma_isaddrext(di
);
672 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
673 di
->aligndesc_4k
= _dma_descriptor_align(di
);
674 if (di
->aligndesc_4k
) {
675 if (DMA64_MODE(di
)) {
676 di
->dmadesc_align
= D64RINGALIGN_BITS
;
677 if ((ntxd
< D64MAXDD
/ 2) && (nrxd
< D64MAXDD
/ 2)) {
678 /* for smaller dd table, HW relax the alignment requirement */
679 di
->dmadesc_align
= D64RINGALIGN_BITS
- 1;
682 di
->dmadesc_align
= D32RINGALIGN_BITS
;
684 /* The start address of descriptor table should be algined to cache line size,
685 * or other structure may share a cache line with it, which can lead to memory
686 * overlapping due to cache write-back operation. In the case of MIPS 74k, the
687 * cache line size is 32 bytes.
690 di
->dmadesc_align
= 5; /* 32 byte alignment */
692 di
->dmadesc_align
= 4; /* 16 byte alignment */
696 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
697 di
->aligndesc_4k
, di
->dmadesc_align
));
699 /* allocate tx packet pointer vector */
701 size
= ntxd
* sizeof(void *);
702 if ((di
->txp
= MALLOC(osh
, size
)) == NULL
) {
703 DMA_ERROR(("%s: %s: out of tx memory, malloced %d bytes\n",
704 di
->name
, __FUNCTION__
, MALLOCED(osh
)));
707 bzero(di
->txp
, size
);
710 /* allocate rx packet pointer vector */
712 size
= nrxd
* sizeof(void *);
713 if ((di
->rxp
= MALLOC(osh
, size
)) == NULL
) {
714 DMA_ERROR(("%s: %s: out of rx memory, malloced %d bytes\n",
715 di
->name
, __FUNCTION__
, MALLOCED(osh
)));
718 bzero(di
->rxp
, size
);
721 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
723 if (!_dma_alloc(di
, DMA_TX
))
727 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
729 if (!_dma_alloc(di
, DMA_RX
))
733 if ((di
->ddoffsetlow
!= 0) && !di
->addrext
) {
734 if (PHYSADDRLO(di
->txdpa
) > SI_PCI_DMA_SZ
) {
735 DMA_ERROR(("%s: %s: txdpa 0x%x: addrext not supported\n",
736 di
->name
, __FUNCTION__
, (uint32
)PHYSADDRLO(di
->txdpa
)));
739 if (PHYSADDRLO(di
->rxdpa
) > SI_PCI_DMA_SZ
) {
740 DMA_ERROR(("%s: %s: rxdpa 0x%x: addrext not supported\n",
741 di
->name
, __FUNCTION__
, (uint32
)PHYSADDRLO(di
->rxdpa
)));
746 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh "
747 "0x%x addrext %d\n", di
->ddoffsetlow
, di
->ddoffsethigh
, di
->dataoffsetlow
,
748 di
->dataoffsethigh
, di
->addrext
));
750 /* allocate DMA mapping vectors */
751 if (DMASGLIST_ENAB
) {
753 size
= ntxd
* sizeof(hnddma_seg_map_t
);
754 if ((di
->txp_dmah
= (hnddma_seg_map_t
*)MALLOC(osh
, size
)) == NULL
)
756 bzero(di
->txp_dmah
, size
);
760 size
= nrxd
* sizeof(hnddma_seg_map_t
);
761 if ((di
->rxp_dmah
= (hnddma_seg_map_t
*)MALLOC(osh
, size
)) == NULL
)
763 bzero(di
->rxp_dmah
, size
);
767 return ((hnddma_t
*)di
);
774 /** init the tx or rx descriptor */
776 dma32_dd_upd(dma_info_t
*di
, dma32dd_t
*ddring
, dmaaddr_t pa
, uint outidx
, uint32
*flags
,
779 /* dma32 uses 32-bit control to fit both flags and bufcounter */
780 *flags
= *flags
| (bufcount
& CTRL_BC_MASK
);
782 if ((di
->dataoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
783 W_SM(&ddring
[outidx
].addr
, BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
784 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
786 /* address extension */
789 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
790 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
792 *flags
|= (ae
<< CTRL_AE_SHIFT
);
793 W_SM(&ddring
[outidx
].addr
, BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
794 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
798 /** Check for odd number of 1's */
799 STATIC INLINE uint32
parity32(uint32 data
)
810 #define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
813 dma64_dd_upd(dma_info_t
*di
, dma64dd_t
*ddring
, dmaaddr_t pa
, uint outidx
, uint32
*flags
,
816 uint32 ctrl2
= bufcount
& D64_CTRL2_BC_MASK
;
818 /* PCI bus with big(>1G) physical address, use address extension */
819 #if defined(__mips__) && defined(IL_BIGENDIAN)
820 if ((di
->dataoffsetlow
== SI_SDRAM_SWAPPED
) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
822 if ((di
->dataoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
823 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
824 ASSERT((PHYSADDRHI(pa
) & PCI64ADDR_HIGH
) == 0);
826 W_SM(&ddring
[outidx
].addrlow
, BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
827 W_SM(&ddring
[outidx
].addrhigh
, BUS_SWAP32(PHYSADDRHI(pa
) + di
->dataoffsethigh
));
828 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
829 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
831 /* address extension for 32-bit PCI */
835 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
836 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
837 ASSERT(PHYSADDRHI(pa
) == 0);
839 ctrl2
|= (ae
<< D64_CTRL2_AE_SHIFT
) & D64_CTRL2_AE
;
840 W_SM(&ddring
[outidx
].addrlow
, BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
841 W_SM(&ddring
[outidx
].addrhigh
, BUS_SWAP32(0 + di
->dataoffsethigh
));
842 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
843 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
845 if (di
->hnddma
.dmactrlflags
& DMA_CTRL_PEN
) {
846 if (DMA64_DD_PARITY(&ddring
[outidx
])) {
847 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
| D64_CTRL2_PARITY
));
851 #if defined(BCM47XX_CA9) && !defined(__NetBSD__)
852 DMA_MAP(di
->osh
, (void *)(((uint
)(&ddring
[outidx
])) & ~0x1f), 32, DMA_TX
, NULL
, NULL
);
853 #endif /* BCM47XX_CA9 && !__NetBSD__ */
857 _dma32_addrext(osl_t
*osh
, dma32regs_t
*dma32regs
)
861 OR_REG(osh
, &dma32regs
->control
, XC_AE
);
862 w
= R_REG(osh
, &dma32regs
->control
);
863 AND_REG(osh
, &dma32regs
->control
, ~XC_AE
);
864 return ((w
& XC_AE
) == XC_AE
);
868 _dma_alloc(dma_info_t
*di
, uint direction
)
870 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
871 return dma64_alloc(di
, direction
);
872 } else if (DMA32_ENAB(di
)) {
873 return dma32_alloc(di
, direction
);
878 /** !! may be called with core in reset */
880 _dma_detach(dma_info_t
*di
)
883 DMA_TRACE(("%s: dma_detach\n", di
->name
));
885 /* shouldn't be here if descriptors are unreclaimed */
886 ASSERT(di
->txin
== di
->txout
);
887 ASSERT(di
->rxin
== di
->rxout
);
889 /* free dma descriptor rings */
890 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
892 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->txd64
- di
->txdalign
),
893 di
->txdalloc
, (di
->txdpaorig
), di
->tx_dmah
);
895 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->rxd64
- di
->rxdalign
),
896 di
->rxdalloc
, (di
->rxdpaorig
), di
->rx_dmah
);
897 } else if (DMA32_ENAB(di
)) {
899 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->txd32
- di
->txdalign
),
900 di
->txdalloc
, (di
->txdpaorig
), di
->tx_dmah
);
902 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->rxd32
- di
->rxdalign
),
903 di
->rxdalloc
, (di
->rxdpaorig
), di
->rx_dmah
);
907 /* free packet pointer vectors */
909 MFREE(di
->osh
, (void *)di
->txp
, (di
->ntxd
* sizeof(void *)));
911 MFREE(di
->osh
, (void *)di
->rxp
, (di
->nrxd
* sizeof(void *)));
913 /* free tx packet DMA handles */
915 MFREE(di
->osh
, (void *)di
->txp_dmah
, di
->ntxd
* sizeof(hnddma_seg_map_t
));
917 /* free rx packet DMA handles */
919 MFREE(di
->osh
, (void *)di
->rxp_dmah
, di
->nrxd
* sizeof(hnddma_seg_map_t
));
921 /* free our private info structure */
922 MFREE(di
->osh
, (void *)di
, sizeof(dma_info_t
));
927 _dma_descriptor_align(dma_info_t
*di
)
929 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
932 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
933 if (di
->d64txregs
!= NULL
) {
934 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, 0xff0);
935 addrl
= R_REG(di
->osh
, &di
->d64txregs
->addrlow
);
938 } else if (di
->d64rxregs
!= NULL
) {
939 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, 0xff0);
940 addrl
= R_REG(di
->osh
, &di
->d64rxregs
->addrlow
);
948 /** return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
950 _dma_isaddrext(dma_info_t
*di
)
952 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
953 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
955 /* not all tx or rx channel are available */
956 if (di
->d64txregs
!= NULL
) {
957 if (!_dma64_addrext(di
->osh
, di
->d64txregs
)) {
958 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n",
963 } else if (di
->d64rxregs
!= NULL
) {
964 if (!_dma64_addrext(di
->osh
, di
->d64rxregs
)) {
965 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n",
972 } else if (DMA32_ENAB(di
)) {
974 return (_dma32_addrext(di
->osh
, di
->d32txregs
));
975 else if (di
->d32rxregs
)
976 return (_dma32_addrext(di
->osh
, di
->d32rxregs
));
983 /** initialize descriptor table base address */
985 _dma_ddtable_init(dma_info_t
*di
, uint direction
, dmaaddr_t pa
)
987 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
988 if (!di
->aligndesc_4k
) {
989 if (direction
== DMA_TX
)
990 di
->xmtptrbase
= PHYSADDRLO(pa
);
992 di
->rcvptrbase
= PHYSADDRLO(pa
);
995 if ((di
->ddoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
996 if (direction
== DMA_TX
) {
997 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, (PHYSADDRLO(pa
) +
999 W_REG(di
->osh
, &di
->d64txregs
->addrhigh
, (PHYSADDRHI(pa
) +
1002 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, (PHYSADDRLO(pa
) +
1004 W_REG(di
->osh
, &di
->d64rxregs
->addrhigh
, (PHYSADDRHI(pa
) +
1008 /* DMA64 32bits address extension */
1010 ASSERT(di
->addrext
);
1011 ASSERT(PHYSADDRHI(pa
) == 0);
1013 /* shift the high bit(s) from pa to ae */
1014 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
1015 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
1017 if (direction
== DMA_TX
) {
1018 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, (PHYSADDRLO(pa
) +
1020 W_REG(di
->osh
, &di
->d64txregs
->addrhigh
, di
->ddoffsethigh
);
1021 SET_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_AE
,
1022 (ae
<< D64_XC_AE_SHIFT
));
1024 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, (PHYSADDRLO(pa
) +
1026 W_REG(di
->osh
, &di
->d64rxregs
->addrhigh
, di
->ddoffsethigh
);
1027 SET_REG(di
->osh
, &di
->d64rxregs
->control
, D64_RC_AE
,
1028 (ae
<< D64_RC_AE_SHIFT
));
1032 } else if (DMA32_ENAB(di
)) {
1033 ASSERT(PHYSADDRHI(pa
) == 0);
1034 if ((di
->ddoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
1035 if (direction
== DMA_TX
)
1036 W_REG(di
->osh
, &di
->d32txregs
->addr
, (PHYSADDRLO(pa
) +
1039 W_REG(di
->osh
, &di
->d32rxregs
->addr
, (PHYSADDRLO(pa
) +
1042 /* dma32 address extension */
1044 ASSERT(di
->addrext
);
1046 /* shift the high bit(s) from pa to ae */
1047 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
1048 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
1050 if (direction
== DMA_TX
) {
1051 W_REG(di
->osh
, &di
->d32txregs
->addr
, (PHYSADDRLO(pa
) +
1053 SET_REG(di
->osh
, &di
->d32txregs
->control
, XC_AE
, ae
<<XC_AE_SHIFT
);
1055 W_REG(di
->osh
, &di
->d32rxregs
->addr
, (PHYSADDRLO(pa
) +
1057 SET_REG(di
->osh
, &di
->d32rxregs
->control
, RC_AE
, ae
<<RC_AE_SHIFT
);
1065 _dma_fifoloopbackenable(dma_info_t
*di
)
1067 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di
->name
));
1069 if (DMA64_ENAB(di
) && DMA64_MODE(di
))
1070 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_LE
);
1071 else if (DMA32_ENAB(di
))
1072 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_LE
);
1078 _dma_rxinit(dma_info_t
*di
)
1080 DMA_TRACE(("%s: dma_rxinit\n", di
->name
));
1085 /* During the reset procedure, the active rxd may not be zero if pktpool is
1086 * enabled, we need to reclaim active rxd to avoid rxd being leaked.
1088 if ((POOL_ENAB(di
->pktpool
)) && (NRXDACTIVE(di
->rxin
, di
->rxout
))) {
1092 ASSERT(di
->rxin
== di
->rxout
);
1093 di
->rxin
= di
->rxout
= di
->rs0cd
= 0;
1095 /* clear rx descriptor ring */
1096 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1097 BZERO_SM((void *)(uintptr
)di
->rxd64
, (di
->nrxd
* sizeof(dma64dd_t
)));
1099 /* DMA engine with out alignment requirement requires table to be inited
1100 * before enabling the engine
1102 if (!di
->aligndesc_4k
)
1103 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
1107 if (di
->aligndesc_4k
)
1108 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
1109 } else if (DMA32_ENAB(di
)) {
1110 BZERO_SM((void *)(uintptr
)di
->rxd32
, (di
->nrxd
* sizeof(dma32dd_t
)));
1112 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
1118 _dma_rxenable(dma_info_t
*di
)
1120 uint dmactrlflags
= di
->hnddma
.dmactrlflags
;
1122 DMA_TRACE(("%s: dma_rxenable\n", di
->name
));
1124 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1125 uint32 control
= (R_REG(di
->osh
, &di
->d64rxregs
->control
) & D64_RC_AE
) | D64_RC_RE
;
1127 if ((dmactrlflags
& DMA_CTRL_PEN
) == 0)
1128 control
|= D64_RC_PD
;
1130 if (dmactrlflags
& DMA_CTRL_ROC
)
1131 control
|= D64_RC_OC
;
1133 /* These bits 20:18 (burstLen) of control register can be written but will take
1134 * effect only if these bits are valid. So this will not affect previous versions
1135 * of the DMA. They will continue to have those bits set to 0.
1137 control
&= ~D64_RC_BL_MASK
;
1138 control
|= (di
->rxburstlen
<< D64_RC_BL_SHIFT
);
1140 control
&= ~D64_RC_PC_MASK
;
1141 control
|= (di
->rxprefetchctl
<< D64_RC_PC_SHIFT
);
1143 control
&= ~D64_RC_PT_MASK
;
1144 control
|= (di
->rxprefetchthresh
<< D64_RC_PT_SHIFT
);
1146 W_REG(di
->osh
, &di
->d64rxregs
->control
,
1147 ((di
->rxoffset
<< D64_RC_RO_SHIFT
) | control
));
1148 } else if (DMA32_ENAB(di
)) {
1149 uint32 control
= (R_REG(di
->osh
, &di
->d32rxregs
->control
) & RC_AE
) | RC_RE
;
1151 if ((dmactrlflags
& DMA_CTRL_PEN
) == 0)
1154 if (dmactrlflags
& DMA_CTRL_ROC
)
1157 /* These bits 20:18 (burstLen) of control register can be written but will take
1158 * effect only if these bits are valid. So this will not affect previous versions
1159 * of the DMA. They will continue to have those bits set to 0.
1161 control
&= ~RC_BL_MASK
;
1162 control
|= (di
->rxburstlen
<< RC_BL_SHIFT
);
1164 control
&= ~RC_PC_MASK
;
1165 control
|= (di
->rxprefetchctl
<< RC_PC_SHIFT
);
1167 control
&= ~RC_PT_MASK
;
1168 control
|= (di
->rxprefetchthresh
<< RC_PT_SHIFT
);
1170 W_REG(di
->osh
, &di
->d32rxregs
->control
,
1171 ((di
->rxoffset
<< RC_RO_SHIFT
) | control
));
1177 _dma_rx_param_get(dma_info_t
*di
, uint16
*rxoffset
, uint16
*rxbufsize
)
1179 /* the normal values fit into 16 bits */
1180 *rxoffset
= (uint16
)di
->rxoffset
;
1181 *rxbufsize
= (uint16
)di
->rxbufsize
;
1185 * !! rx entry routine
1186 * returns a pointer to the next frame received, or NULL if there are no more
1187 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
1189 * otherwise, it's treated as giant pkt and will be tossed.
1190 * The DMA scattering starts with normal DMA header, followed by first buffer data.
1191 * After it reaches the max size of buffer, the data continues in next DMA descriptor
1192 * buffer WITHOUT DMA header
1194 static void * BCMFASTPATH
1195 _dma_rx(dma_info_t
*di
)
1197 void *p
, *head
, *tail
;
1201 #if defined(BCM4335) || defined(BCM4345) || defined(BCM4350) || defined(BCM43602)
1202 dma64regs_t
*dregs
= di
->d64rxregs
;
1206 head
= _dma_getnextrxp(di
, FALSE
);
1210 #if (!defined(__mips__) && !defined(BCM47XX_CA9))
1211 #if defined(BCM4335) || defined(BCM4345) || defined(BCM4350) || defined(BCM43602)
1212 if ((R_REG(osh
, &dregs
->control
) & D64_RC_GE
)) {
1213 /* In case of glommed pkt get length from hwheader */
1214 len
= ltoh16(*((uint16
*)(PKTDATA(di
->osh
, head
)) + di
->rxoffset
/2 + 2)) + 4;
1216 *(uint16
*)(PKTDATA(di
->osh
, head
)) = len
;
1218 len
= ltoh16(*(uint16
*)(PKTDATA(di
->osh
, head
)));
1221 len
= ltoh16(*(uint16
*)(PKTDATA(di
->osh
, head
)));
1226 #if defined(__mips__) || defined(__NetBSD__)
1227 for (read_count
= 200;
1228 (!(len
= ltoh16(*(uint16
*)OSL_UNCACHED(PKTDATA(di
->osh
, head
)))) &&
1229 read_count
); read_count
--) {
1230 if (CHIPID(di
->sih
->chip
) == BCM5356_CHIP_ID
)
1235 for (read_count
= 200; read_count
; read_count
--) {
1236 len
= ltoh16(*(uint16
*)PKTDATA(di
->osh
, head
));
1239 DMA_MAP(di
->osh
, PKTDATA(di
->osh
, head
), 32, DMA_RX
, NULL
, NULL
);
1242 #endif /* __mips__ */
1245 DMA_ERROR(("%s: dma_rx: frame length (%d)\n", di
->name
, len
));
1246 PKTFREE(di
->osh
, head
, FALSE
);
1251 #endif /* defined(__mips__) */
1252 DMA_TRACE(("%s: dma_rx len %d\n", di
->name
, len
));
1254 /* set actual length */
1255 pkt_len
= MIN((di
->rxoffset
+ len
), di
->rxbufsize
);
1256 PKTSETLEN(di
->osh
, head
, pkt_len
);
1257 resid
= len
- (di
->rxbufsize
- di
->rxoffset
);
1259 /* check for single or multi-buffer rx */
1262 /* get rid of compiler warning */
1266 while ((resid
> 0) && (p
= _dma_getnextrxp(di
, FALSE
))) {
1267 PKTSETNEXT(di
->osh
, tail
, p
);
1268 pkt_len
= MIN(resid
, (int)di
->rxbufsize
);
1269 PKTSETLEN(di
->osh
, p
, pkt_len
);
1272 resid
-= di
->rxbufsize
;
1279 cur
= (DMA64_ENAB(di
) && DMA64_MODE(di
)) ?
1280 B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
1281 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
) :
1282 B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
,
1284 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
1285 di
->rxin
, di
->rxout
, cur
));
1289 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_RXMULTI
) == 0) {
1290 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di
->name
, len
));
1291 PKTFREE(di
->osh
, head
, FALSE
);
1292 di
->hnddma
.rxgiants
++;
1301 * post receive buffers
1302 * return FALSE is refill failed completely and ring is empty
1303 * this will stall the rx dma and user might want to call rxfill again asap
1304 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
1306 static bool BCMFASTPATH
1307 _dma_rxfill(dma_info_t
*di
)
1315 uint extra_offset
= 0, extra_pad
;
1317 uint alignment_req
= (di
->hnddma
.dmactrlflags
& DMA_CTRL_USB_BOUNDRY4KB_WAR
) ?
1318 16 : 1; /* MUST BE POWER of 2 */
1323 * Determine how many receive buffers we're lacking
1324 * from the full complement, allocate, initialize,
1325 * and post them, then update the chip rx lastdscr.
1331 n
= di
->nrxpost
- NRXDACTIVE(rxin
, rxout
);
1333 if (di
->rxbufsize
> BCMEXTRAHDROOM
)
1334 extra_offset
= di
->rxextrahdrroom
;
1336 DMA_TRACE(("%s: dma_rxfill: post %d\n", di
->name
, n
));
1338 for (i
= 0; i
< n
; i
++) {
1339 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
1340 size to be allocated
1342 if (POOL_ENAB(di
->pktpool
)) {
1343 ASSERT(di
->pktpool
);
1344 p
= pktpool_get(di
->pktpool
);
1347 PKTPOOLSETSTATE(p
, POOL_RXFILL
);
1348 #endif /* BCMDBG_POOL */
1351 p
= PKTGET(di
->osh
, (di
->rxbufsize
+ extra_offset
+ alignment_req
- 1),
1355 DMA_TRACE(("%s: dma_rxfill: out of rxbufs\n", di
->name
));
1357 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1358 if (dma64_rxidle(di
)) {
1359 DMA_TRACE(("%s: rxfill64: ring is empty !\n",
1363 } else if (DMA32_ENAB(di
)) {
1364 if (dma32_rxidle(di
)) {
1365 DMA_TRACE(("%s: rxfill32: ring is empty !\n",
1372 di
->hnddma
.rxnobuf
++;
1375 /* reserve an extra headroom, if applicable */
1376 if (di
->hnddma
.dmactrlflags
& DMA_CTRL_USB_BOUNDRY4KB_WAR
) {
1377 extra_pad
= ((alignment_req
- (uint
)(((unsigned long)PKTDATA(di
->osh
, p
) -
1378 (unsigned long)(uchar
*)0))) & (alignment_req
- 1));
1382 if (extra_offset
+ extra_pad
)
1383 PKTPULL(di
->osh
, p
, extra_offset
+ extra_pad
);
1386 /* mark as ctf buffer for fast mapping */
1387 if (CTF_ENAB(kcih
)) {
1388 ASSERT((((uint32
)PKTDATA(di
->osh
, p
)) & 31) == 0);
1389 PKTSETCTF(di
->osh
, p
);
1393 /* Do a cached write instead of uncached write since DMA_MAP
1394 * will flush the cache.
1396 *(uint32
*)(PKTDATA(di
->osh
, p
)) = 0;
1397 #if defined(linux) && defined(BCM47XX_CA9)
1398 DMA_MAP(di
->osh
, (void *)((uint
)PKTDATA(di
->osh
, p
) & ~0x1f),
1399 32, DMA_TX
, NULL
, NULL
);
1403 bzero(&di
->rxp_dmah
[rxout
], sizeof(hnddma_seg_map_t
));
1405 pa
= DMA_MAP(di
->osh
, PKTDATA(di
->osh
, p
),
1406 di
->rxbufsize
, DMA_RX
, p
,
1407 &di
->rxp_dmah
[rxout
]);
1409 ASSERT(ISALIGNED(PHYSADDRLO(pa
), 4));
1412 /* Do a un-cached write now that DMA_MAP has invalidated the cache
1414 *(uint32
*)OSL_UNCACHED((PKTDATA(di
->osh
, p
))) = 0;
1415 #endif /* __mips__ */
1417 /* save the free packet pointer */
1418 ASSERT(di
->rxp
[rxout
] == NULL
);
1421 /* reset flags for each descriptor */
1423 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1424 if (rxout
== (di
->nrxd
- 1))
1425 flags
= D64_CTRL1_EOT
;
1427 dma64_dd_upd(di
, di
->rxd64
, pa
, rxout
, &flags
, di
->rxbufsize
);
1428 } else if (DMA32_ENAB(di
)) {
1429 if (rxout
== (di
->nrxd
- 1))
1432 ASSERT(PHYSADDRHI(pa
) == 0);
1433 dma32_dd_upd(di
, di
->rxd32
, pa
, rxout
, &flags
, di
->rxbufsize
);
1436 rxout
= NEXTRXD(rxout
);
1441 /* update the chip lastdscr pointer */
1442 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1443 W_REG(di
->osh
, &di
->d64rxregs
->ptr
, di
->rcvptrbase
+ I2B(rxout
, dma64dd_t
));
1444 } else if (DMA32_ENAB(di
)) {
1445 W_REG(di
->osh
, &di
->d32rxregs
->ptr
, I2B(rxout
, dma32dd_t
));
1452 /** like getnexttxp but no reclaim */
1454 _dma_peeknexttxp(dma_info_t
*di
)
1461 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1462 end
= (uint16
)B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
) -
1463 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
);
1465 } else if (DMA32_ENAB(di
)) {
1466 end
= (uint16
)B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1471 for (i
= di
->txin
; i
!= end
; i
= NEXTTXD(i
))
1473 return (di
->txp
[i
]);
1479 _dma_peekntxp(dma_info_t
*di
, int *len
, void *txps
[], txd_range_t range
)
1481 uint16 start
, end
, i
;
1486 DMA_TRACE(("%s: dma_peekntxp\n", di
->name
));
1491 if (di
->ntxd
== 0) {
1501 if (range
== HNDDMA_RANGE_ALL
)
1504 if (DMA64_ENAB(di
)) {
1505 end
= B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
) -
1506 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
);
1508 act
= (uint
)(R_REG(di
->osh
, &di
->d64txregs
->status1
) & D64_XS1_AD_MASK
);
1509 act
= (act
- di
->xmtptrbase
) & D64_XS0_CD_MASK
;
1510 act
= (uint
)B2I(act
, dma64dd_t
);
1512 end
= B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1514 act
= (uint
)((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_AD_MASK
) >>
1516 act
= (uint
)B2I(act
, dma32dd_t
);
1524 if ((start
== 0) && (end
> di
->txout
))
1528 for (i
= start
; i
!= end
; i
= NEXTTXD(i
)) {
1542 /** like getnextrxp but not take off the ring */
1544 _dma_peeknextrxp(dma_info_t
*di
)
1551 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1552 end
= (uint16
)B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
1553 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
);
1555 } else if (DMA32_ENAB(di
)) {
1556 end
= (uint16
)B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
, dma32dd_t
);
1561 for (i
= di
->rxin
; i
!= end
; i
= NEXTRXD(i
))
1563 return (di
->rxp
[i
]);
1569 _dma_rxreclaim(dma_info_t
*di
)
1575 /* "unused local" warning suppression for OSLs that
1576 * define PKTFREE() without using the di->osh arg
1581 DMA_TRACE(("%s: dma_rxreclaim\n", di
->name
));
1583 if (POOL_ENAB(di
->pktpool
) &&
1584 ((origcb
= pktpool_emptycb_disabled(di
->pktpool
)) == FALSE
))
1585 pktpool_emptycb_disable(di
->pktpool
, TRUE
);
1587 while ((p
= _dma_getnextrxp(di
, TRUE
)))
1588 PKTFREE(di
->osh
, p
, FALSE
);
1590 if (origcb
== FALSE
)
1591 pktpool_emptycb_disable(di
->pktpool
, FALSE
);
1594 static void * BCMFASTPATH
1595 _dma_getnextrxp(dma_info_t
*di
, bool forceall
)
1600 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1601 return dma64_getnextrxp(di
, forceall
);
1602 } else if (DMA32_ENAB(di
)) {
1603 return dma32_getnextrxp(di
, forceall
);
1609 _dma_txblock(dma_info_t
*di
)
1611 di
->hnddma
.txavail
= 0;
1615 _dma_txunblock(dma_info_t
*di
)
1617 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1621 _dma_txactive(dma_info_t
*di
)
1623 return NTXDACTIVE(di
->txin
, di
->txout
);
1627 _dma_txpending(dma_info_t
*di
)
1631 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1632 curr
= B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
) -
1633 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
);
1635 } else if (DMA32_ENAB(di
)) {
1636 curr
= B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1641 return NTXDACTIVE(curr
, di
->txout
);
1645 _dma_txcommitted(dma_info_t
*di
)
1648 uint txin
= di
->txin
;
1650 if (txin
== di
->txout
)
1653 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1654 ptr
= B2I(R_REG(di
->osh
, &di
->d64txregs
->ptr
), dma64dd_t
);
1655 } else if (DMA32_ENAB(di
)) {
1656 ptr
= B2I(R_REG(di
->osh
, &di
->d32txregs
->ptr
), dma32dd_t
);
1660 return NTXDACTIVE(di
->txin
, ptr
);
1664 _dma_rxactive(dma_info_t
*di
)
1666 return NRXDACTIVE(di
->rxin
, di
->rxout
);
1670 _dma_activerxbuf(dma_info_t
*di
)
1673 curr
= B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
1674 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
);
1675 ptr
= B2I(((R_REG(di
->osh
, &di
->d64rxregs
->ptr
) & D64_RS0_CD_MASK
) -
1676 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
);
1677 return NRXDACTIVE(curr
, ptr
);
1682 _dma_counterreset(dma_info_t
*di
)
1684 /* reset all software counter */
1685 di
->hnddma
.rxgiants
= 0;
1686 di
->hnddma
.rxnobuf
= 0;
1687 di
->hnddma
.txnobuf
= 0;
1691 _dma_ctrlflags(dma_info_t
*di
, uint mask
, uint flags
)
1696 DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n"));
1700 dmactrlflags
= di
->hnddma
.dmactrlflags
;
1701 ASSERT((flags
& ~mask
) == 0);
1703 dmactrlflags
&= ~mask
;
1704 dmactrlflags
|= flags
;
1706 /* If trying to enable parity, check if parity is actually supported */
1707 if (dmactrlflags
& DMA_CTRL_PEN
) {
1710 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1711 control
= R_REG(di
->osh
, &di
->d64txregs
->control
);
1712 W_REG(di
->osh
, &di
->d64txregs
->control
, control
| D64_XC_PD
);
1713 if (R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_PD
) {
1714 /* We *can* disable it so it is supported,
1715 * restore control register
1717 W_REG(di
->osh
, &di
->d64txregs
->control
, control
);
1719 /* Not supported, don't allow it to be enabled */
1720 dmactrlflags
&= ~DMA_CTRL_PEN
;
1722 } else if (DMA32_ENAB(di
)) {
1723 control
= R_REG(di
->osh
, &di
->d32txregs
->control
);
1724 W_REG(di
->osh
, &di
->d32txregs
->control
, control
| XC_PD
);
1725 if (R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_PD
) {
1726 W_REG(di
->osh
, &di
->d32txregs
->control
, control
);
1728 /* Not supported, don't allow it to be enabled */
1729 dmactrlflags
&= ~DMA_CTRL_PEN
;
1735 di
->hnddma
.dmactrlflags
= dmactrlflags
;
1737 return (dmactrlflags
);
1740 /** get the address of the var in order to change later */
1742 _dma_getvar(dma_info_t
*di
, const char *name
)
1744 if (!strcmp(name
, "&txavail"))
1745 return ((uintptr
) &(di
->hnddma
.txavail
));
1753 _dma_avoidancecnt(dma_info_t
*di
)
1755 return (di
->dma_avoidance_cnt
);
1759 dma_txpioloopback(osl_t
*osh
, dma32regs_t
*regs
)
1761 OR_REG(osh
, ®s
->control
, XC_LE
);
1765 uint8
dma_align_sizetobits(uint size
)
1769 ASSERT(!(size
& (size
-1)));
1770 while (size
>>= 1) {
1777 * This function ensures that the DMA descriptor ring will not get allocated
1778 * across Page boundary. If the allocation is done across the page boundary
1779 * at the first time, then it is freed and the allocation is done at
1780 * descriptor ring size aligned location. This will ensure that the ring will
1781 * not cross page boundary
1784 dma_ringalloc(osl_t
*osh
, uint32 boundary
, uint size
, uint16
*alignbits
, uint
* alloced
,
1785 dmaaddr_t
*descpa
, osldma_t
**dmah
)
1788 uint32 desc_strtaddr
;
1789 uint32 alignbytes
= 1 << *alignbits
;
1791 if ((va
= DMA_ALLOC_CONSISTENT(osh
, size
, *alignbits
, alloced
, descpa
, dmah
)) == NULL
)
1794 desc_strtaddr
= (uint32
)ROUNDUP((uint
)PHYSADDRLO(*descpa
), alignbytes
);
1795 if (((desc_strtaddr
+ size
- 1) & boundary
) !=
1796 (desc_strtaddr
& boundary
)) {
1797 *alignbits
= dma_align_sizetobits(size
);
1798 DMA_FREE_CONSISTENT(osh
, va
,
1799 size
, *descpa
, *dmah
);
1800 va
= DMA_ALLOC_CONSISTENT(osh
, size
, *alignbits
, alloced
, descpa
, dmah
);
1807 dma32_dumpring(dma_info_t
*di
, struct bcmstrbuf
*b
, dma32dd_t
*ring
, uint start
, uint end
,
1812 for (i
= start
; i
!= end
; i
= XXD((i
+ 1), max_num
)) {
1813 /* in the format of high->low 8 bytes */
1814 bcm_bprintf(b
, "ring index %d: 0x%x %x\n",
1815 i
, R_SM(&ring
[i
].addr
), R_SM(&ring
[i
].ctrl
));
1820 dma32_dumptx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1825 bcm_bprintf(b
, "DMA32: txd32 %p txdpa 0x%lx txp %p txin %d txout %d "
1826 "txavail %d txnodesc %d\n", di
->txd32
, PHYSADDRLO(di
->txdpa
), di
->txp
, di
->txin
,
1827 di
->txout
, di
->hnddma
.txavail
, di
->hnddma
.txnodesc
);
1829 bcm_bprintf(b
, "xmtcontrol 0x%x xmtaddr 0x%x xmtptr 0x%x xmtstatus 0x%x\n",
1830 R_REG(di
->osh
, &di
->d32txregs
->control
),
1831 R_REG(di
->osh
, &di
->d32txregs
->addr
),
1832 R_REG(di
->osh
, &di
->d32txregs
->ptr
),
1833 R_REG(di
->osh
, &di
->d32txregs
->status
));
1835 if (dumpring
&& di
->txd32
)
1836 dma32_dumpring(di
, b
, di
->txd32
, di
->txin
, di
->txout
, di
->ntxd
);
1840 dma32_dumprx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1845 bcm_bprintf(b
, "DMA32: rxd32 %p rxdpa 0x%lx rxp %p rxin %d rxout %d\n",
1846 di
->rxd32
, PHYSADDRLO(di
->rxdpa
), di
->rxp
, di
->rxin
, di
->rxout
);
1848 bcm_bprintf(b
, "rcvcontrol 0x%x rcvaddr 0x%x rcvptr 0x%x rcvstatus 0x%x\n",
1849 R_REG(di
->osh
, &di
->d32rxregs
->control
),
1850 R_REG(di
->osh
, &di
->d32rxregs
->addr
),
1851 R_REG(di
->osh
, &di
->d32rxregs
->ptr
),
1852 R_REG(di
->osh
, &di
->d32rxregs
->status
));
1853 if (di
->rxd32
&& dumpring
)
1854 dma32_dumpring(di
, b
, di
->rxd32
, di
->rxin
, di
->rxout
, di
->nrxd
);
1858 dma32_dump(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1860 dma32_dumptx(di
, b
, dumpring
);
1861 dma32_dumprx(di
, b
, dumpring
);
1865 dma64_dumpring(dma_info_t
*di
, struct bcmstrbuf
*b
, dma64dd_t
*ring
, uint start
, uint end
,
1870 for (i
= start
; i
!= end
; i
= XXD((i
+ 1), max_num
)) {
1871 /* in the format of high->low 16 bytes */
1872 bcm_bprintf(b
, "ring index %d: 0x%x %x %x %x\n",
1873 i
, R_SM(&ring
[i
].addrhigh
), R_SM(&ring
[i
].addrlow
),
1874 R_SM(&ring
[i
].ctrl2
), R_SM(&ring
[i
].ctrl1
));
1879 dma64_dumptx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1884 bcm_bprintf(b
, "DMA64: txd64 %p txdpa 0x%lx txdpahi 0x%lx txp %p txin %d txout %d "
1885 "txavail %d txnodesc %d\n", di
->txd64
, PHYSADDRLO(di
->txdpa
),
1886 PHYSADDRHI(di
->txdpaorig
), di
->txp
, di
->txin
, di
->txout
, di
->hnddma
.txavail
,
1887 di
->hnddma
.txnodesc
);
1889 bcm_bprintf(b
, "xmtcontrol 0x%x xmtaddrlow 0x%x xmtaddrhigh 0x%x "
1890 "xmtptr 0x%x xmtstatus0 0x%x xmtstatus1 0x%x\n",
1891 R_REG(di
->osh
, &di
->d64txregs
->control
),
1892 R_REG(di
->osh
, &di
->d64txregs
->addrlow
),
1893 R_REG(di
->osh
, &di
->d64txregs
->addrhigh
),
1894 R_REG(di
->osh
, &di
->d64txregs
->ptr
),
1895 R_REG(di
->osh
, &di
->d64txregs
->status0
),
1896 R_REG(di
->osh
, &di
->d64txregs
->status1
));
1898 bcm_bprintf(b
, "DMA64: DMA avoidance applied %d\n", di
->dma_avoidance_cnt
);
1900 if (dumpring
&& di
->txd64
) {
1901 dma64_dumpring(di
, b
, di
->txd64
, di
->txin
, di
->txout
, di
->ntxd
);
1906 dma64_dumprx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1911 bcm_bprintf(b
, "DMA64: rxd64 %p rxdpa 0x%lx rxdpahi 0x%lx rxp %p rxin %d rxout %d\n",
1912 di
->rxd64
, PHYSADDRLO(di
->rxdpa
), PHYSADDRHI(di
->rxdpaorig
), di
->rxp
,
1913 di
->rxin
, di
->rxout
);
1915 bcm_bprintf(b
, "rcvcontrol 0x%x rcvaddrlow 0x%x rcvaddrhigh 0x%x rcvptr "
1916 "0x%x rcvstatus0 0x%x rcvstatus1 0x%x\n",
1917 R_REG(di
->osh
, &di
->d64rxregs
->control
),
1918 R_REG(di
->osh
, &di
->d64rxregs
->addrlow
),
1919 R_REG(di
->osh
, &di
->d64rxregs
->addrhigh
),
1920 R_REG(di
->osh
, &di
->d64rxregs
->ptr
),
1921 R_REG(di
->osh
, &di
->d64rxregs
->status0
),
1922 R_REG(di
->osh
, &di
->d64rxregs
->status1
));
1923 if (di
->rxd64
&& dumpring
) {
1924 dma64_dumpring(di
, b
, di
->rxd64
, di
->rxin
, di
->rxout
, di
->nrxd
);
1929 dma64_dump(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1931 dma64_dumptx(di
, b
, dumpring
);
1932 dma64_dumprx(di
, b
, dumpring
);
1938 /* 32-bit DMA functions */
1941 dma32_txinit(dma_info_t
*di
)
1943 uint32 control
= XC_XE
;
1945 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
1950 di
->txin
= di
->txout
= di
->xs0cd
= 0;
1951 di
->hnddma
.txavail
= di
->ntxd
- 1;
1953 /* clear tx descriptor ring */
1954 BZERO_SM(DISCARD_QUAL(di
->txd32
, void), (di
->ntxd
* sizeof(dma32dd_t
)));
1956 /* These bits 20:18 (burstLen) of control register can be written but will take
1957 * effect only if these bits are valid. So this will not affect previous versions
1958 * of the DMA. They will continue to have those bits set to 0.
1960 control
|= (di
->txburstlen
<< XC_BL_SHIFT
);
1961 control
|= (di
->txmultioutstdrd
<< XC_MR_SHIFT
);
1962 control
|= (di
->txprefetchctl
<< XC_PC_SHIFT
);
1963 control
|= (di
->txprefetchthresh
<< XC_PT_SHIFT
);
1965 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_PEN
) == 0)
1967 W_REG(di
->osh
, &di
->d32txregs
->control
, control
);
1968 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
1972 dma32_txenabled(dma_info_t
*di
)
1976 /* If the chip is dead, it is not enabled :-) */
1977 xc
= R_REG(di
->osh
, &di
->d32txregs
->control
);
1978 return ((xc
!= 0xffffffff) && (xc
& XC_XE
));
1982 dma32_txsuspend(dma_info_t
*di
)
1984 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
1989 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1993 dma32_txresume(dma_info_t
*di
)
1995 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
2000 AND_REG(di
->osh
, &di
->d32txregs
->control
, ~XC_SE
);
2004 dma32_txsuspended(dma_info_t
*di
)
2006 return (di
->ntxd
== 0) || ((R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
) == XC_SE
);
2010 dma32_txflush(dma_info_t
*di
)
2012 DMA_TRACE(("%s: dma_txflush\n", di
->name
));
2017 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
| XC_FL
);
2021 dma32_txflush_clear(dma_info_t
*di
)
2025 DMA_TRACE(("%s: dma_txflush_clear\n", di
->name
));
2030 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
))
2031 != XS_XS_DISABLED
) &&
2032 (status
!= XS_XS_IDLE
) &&
2033 (status
!= XS_XS_STOPPED
),
2035 AND_REG(di
->osh
, &di
->d32txregs
->control
, ~XC_FL
);
2039 dma32_txreclaim(dma_info_t
*di
, txd_range_t range
)
2043 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
,
2044 (range
== HNDDMA_RANGE_ALL
) ? "all" :
2045 ((range
== HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" : "transfered")));
2047 if (di
->txin
== di
->txout
)
2050 while ((p
= dma32_getnexttxp(di
, range
)))
2051 PKTFREE(di
->osh
, p
, TRUE
);
2055 dma32_txstopped(dma_info_t
*di
)
2057 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) == XS_XS_STOPPED
);
2061 dma32_rxstopped(dma_info_t
*di
)
2063 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_RS_MASK
) == RS_RS_STOPPED
);
2067 dma32_alloc(dma_info_t
*di
, uint direction
)
2076 ddlen
= sizeof(dma32dd_t
);
2078 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
2081 align_bits
= di
->dmadesc_align
;
2082 align
= (1 << align_bits
);
2084 if (direction
== DMA_TX
) {
2085 if ((va
= dma_ringalloc(di
->osh
, D32RINGALIGN
, size
, &align_bits
, &alloced
,
2086 &di
->txdpaorig
, &di
->tx_dmah
)) == NULL
) {
2087 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
2092 PHYSADDRHISET(di
->txdpa
, 0);
2093 ASSERT(PHYSADDRHI(di
->txdpaorig
) == 0);
2094 di
->txd32
= (dma32dd_t
*)ROUNDUP((uintptr
)va
, align
);
2095 di
->txdalign
= (uint
)((int8
*)(uintptr
)di
->txd32
- (int8
*)va
);
2097 PHYSADDRLOSET(di
->txdpa
, PHYSADDRLO(di
->txdpaorig
) + di
->txdalign
);
2098 /* Make sure that alignment didn't overflow */
2099 ASSERT(PHYSADDRLO(di
->txdpa
) >= PHYSADDRLO(di
->txdpaorig
));
2101 di
->txdalloc
= alloced
;
2102 ASSERT(ISALIGNED(di
->txd32
, align
));
2104 if ((va
= dma_ringalloc(di
->osh
, D32RINGALIGN
, size
, &align_bits
, &alloced
,
2105 &di
->rxdpaorig
, &di
->rx_dmah
)) == NULL
) {
2106 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
2111 PHYSADDRHISET(di
->rxdpa
, 0);
2112 ASSERT(PHYSADDRHI(di
->rxdpaorig
) == 0);
2113 di
->rxd32
= (dma32dd_t
*)ROUNDUP((uintptr
)va
, align
);
2114 di
->rxdalign
= (uint
)((int8
*)(uintptr
)di
->rxd32
- (int8
*)va
);
2116 PHYSADDRLOSET(di
->rxdpa
, PHYSADDRLO(di
->rxdpaorig
) + di
->rxdalign
);
2117 /* Make sure that alignment didn't overflow */
2118 ASSERT(PHYSADDRLO(di
->rxdpa
) >= PHYSADDRLO(di
->rxdpaorig
));
2119 di
->rxdalloc
= alloced
;
2120 ASSERT(ISALIGNED(di
->rxd32
, align
));
2127 dma32_txreset(dma_info_t
*di
)
2134 /* suspend tx DMA first */
2135 W_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
2136 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
))
2137 != XS_XS_DISABLED
) &&
2138 (status
!= XS_XS_IDLE
) &&
2139 (status
!= XS_XS_STOPPED
),
2142 W_REG(di
->osh
, &di
->d32txregs
->control
, 0);
2143 SPINWAIT(((status
= (R_REG(di
->osh
,
2144 &di
->d32txregs
->status
) & XS_XS_MASK
)) != XS_XS_DISABLED
),
2147 /* We should be disabled at this point */
2148 if (status
!= XS_XS_DISABLED
) {
2149 DMA_ERROR(("%s: status != D64_XS0_XS_DISABLED 0x%x\n", __FUNCTION__
, status
));
2150 ASSERT(status
== XS_XS_DISABLED
);
2154 return (status
== XS_XS_DISABLED
);
2158 dma32_rxidle(dma_info_t
*di
)
2160 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
2165 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
) ==
2166 R_REG(di
->osh
, &di
->d32rxregs
->ptr
));
2170 dma32_rxreset(dma_info_t
*di
)
2177 W_REG(di
->osh
, &di
->d32rxregs
->control
, 0);
2178 SPINWAIT(((status
= (R_REG(di
->osh
,
2179 &di
->d32rxregs
->status
) & RS_RS_MASK
)) != RS_RS_DISABLED
),
2182 return (status
== RS_RS_DISABLED
);
2186 dma32_rxenabled(dma_info_t
*di
)
2190 rc
= R_REG(di
->osh
, &di
->d32rxregs
->control
);
2191 return ((rc
!= 0xffffffff) && (rc
& RC_RE
));
2195 dma32_txsuspendedidle(dma_info_t
*di
)
2200 if (!(R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
))
2203 if ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) != XS_XS_IDLE
)
2207 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) == XS_XS_IDLE
);
2211 * !! tx entry routine
2212 * supports full 32bit dma engine buffer addressing so
2213 * dma buffers can cross 4 Kbyte page boundaries.
2215 * WARNING: call must check the return value for error.
2216 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
2219 dma32_txfast(dma_info_t
*di
, void *p0
, bool commit
)
2228 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
2233 * Walk the chain of packet buffers
2234 * allocating and initializing transmit descriptor entries.
2236 for (p
= p0
; p
; p
= next
) {
2238 hnddma_seg_map_t
*map
;
2240 data
= PKTDATA(di
->osh
, p
);
2241 len
= PKTLEN(di
->osh
, p
);
2243 len
+= PKTDMAPAD(di
->osh
, p
);
2245 next
= PKTNEXT(di
->osh
, p
);
2247 /* return nonzero if out of tx descriptors */
2248 if (NEXTTXD(txout
) == di
->txin
)
2255 bzero(&di
->txp_dmah
[txout
], sizeof(hnddma_seg_map_t
));
2257 /* get physical address of buffer start */
2258 pa
= DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
, &di
->txp_dmah
[txout
]);
2260 if (DMASGLIST_ENAB
) {
2261 map
= &di
->txp_dmah
[txout
];
2263 /* See if all the segments can be accounted for */
2264 if (map
->nsegs
> (uint
)(di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1))
2271 for (j
= 1; j
<= nsegs
; j
++) {
2273 if (p
== p0
&& j
== 1)
2276 /* With a DMA segment list, Descriptor table is filled
2277 * using the segment list instead of looping over
2278 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
2279 * end of segment list is reached.
2281 if ((!DMASGLIST_ENAB
&& next
== NULL
) ||
2282 (DMASGLIST_ENAB
&& j
== nsegs
))
2283 flags
|= (CTRL_IOC
| CTRL_EOF
);
2284 if (txout
== (di
->ntxd
- 1))
2287 if (DMASGLIST_ENAB
) {
2288 len
= map
->segs
[j
- 1].length
;
2289 pa
= map
->segs
[j
- 1].addr
;
2291 ASSERT(PHYSADDRHI(pa
) == 0);
2293 dma32_dd_upd(di
, di
->txd32
, pa
, txout
, &flags
, len
);
2294 ASSERT(di
->txp
[txout
] == NULL
);
2296 txout
= NEXTTXD(txout
);
2299 /* See above. No need to loop over individual buffers */
2304 /* if last txd eof not set, fix it */
2305 if (!(flags
& CTRL_EOF
))
2306 W_SM(&di
->txd32
[PREVTXD(txout
)].ctrl
, BUS_SWAP32(flags
| CTRL_IOC
| CTRL_EOF
));
2308 /* save the packet */
2309 di
->txp
[PREVTXD(txout
)] = p0
;
2311 /* bump the tx descriptor index */
2316 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(txout
, dma32dd_t
));
2318 /* tx flow control */
2319 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2324 DMA_ERROR(("%s: dma_txfast: out of txds\n", di
->name
));
2325 PKTFREE(di
->osh
, p0
, TRUE
);
2326 di
->hnddma
.txavail
= 0;
2327 di
->hnddma
.txnobuf
++;
2328 di
->hnddma
.txnodesc
++;
2333 * Reclaim next completed txd (txds if using chained buffers) in the range
2334 * specified and return associated packet.
2335 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
2336 * transmitted as noted by the hardware "CurrDescr" pointer.
2337 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
2338 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
2339 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
2340 * return associated packet regardless of the value of hardware pointers.
2343 dma32_getnexttxp(dma_info_t
*di
, txd_range_t range
)
2345 uint16 start
, end
, i
;
2349 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
,
2350 (range
== HNDDMA_RANGE_ALL
) ? "all" :
2351 ((range
== HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" : "transfered")));
2359 if (range
== HNDDMA_RANGE_ALL
)
2362 dma32regs_t
*dregs
= di
->d32txregs
;
2364 if (di
->txin
== di
->xs0cd
) {
2365 end
= (uint16
)B2I(R_REG(di
->osh
, &dregs
->status
) & XS_CD_MASK
, dma32dd_t
);
2371 if (range
== HNDDMA_RANGE_TRANSFERED
) {
2372 active_desc
= (uint16
)((R_REG(di
->osh
, &dregs
->status
) & XS_AD_MASK
) >>
2374 active_desc
= (uint16
)B2I(active_desc
, dma32dd_t
);
2375 if (end
!= active_desc
)
2376 end
= PREVTXD(active_desc
);
2380 if ((start
== 0) && (end
> di
->txout
))
2383 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
2385 hnddma_seg_map_t
*map
= NULL
;
2386 uint size
, j
, nsegs
;
2388 PHYSADDRLOSET(pa
, (BUS_SWAP32(R_SM(&di
->txd32
[i
].addr
)) - di
->dataoffsetlow
));
2389 PHYSADDRHISET(pa
, 0);
2391 if (DMASGLIST_ENAB
) {
2392 map
= &di
->txp_dmah
[i
];
2393 size
= map
->origsize
;
2396 size
= (BUS_SWAP32(R_SM(&di
->txd32
[i
].ctrl
)) & CTRL_BC_MASK
);
2400 for (j
= nsegs
; j
> 0; j
--) {
2401 W_SM(&di
->txd32
[i
].addr
, 0xdeadbeef);
2409 DMA_UNMAP(di
->osh
, pa
, size
, DMA_TX
, txp
, map
);
2414 /* tx flow control */
2415 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2420 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
2421 start
, end
, di
->txout
, forceall
));
2426 dma32_getnextrxp(dma_info_t
*di
, bool forceall
)
2431 /* if forcing, dma engine must be disabled */
2432 ASSERT(!forceall
|| !dma32_rxenabled(di
));
2436 /* return if no packets posted */
2440 if (di
->rxin
== di
->rs0cd
) {
2441 curr
= (uint16
)B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
, dma32dd_t
);
2446 /* ignore curr if forceall */
2447 if (!forceall
&& (i
== curr
))
2450 /* get the packet pointer that corresponds to the rx descriptor */
2455 PHYSADDRLOSET(pa
, (BUS_SWAP32(R_SM(&di
->rxd32
[i
].addr
)) - di
->dataoffsetlow
));
2456 PHYSADDRHISET(pa
, 0);
2458 /* clear this packet from the descriptor ring */
2459 DMA_UNMAP(di
->osh
, pa
,
2460 di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
2462 W_SM(&di
->rxd32
[i
].addr
, 0xdeadbeef);
2464 di
->rxin
= NEXTRXD(i
);
2470 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
2473 dma32_txrotate(dma_info_t
*di
)
2482 ASSERT(dma32_txsuspendedidle(di
));
2484 nactive
= _dma_txactive(di
);
2485 ad
= B2I(((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_AD_MASK
) >> XS_AD_SHIFT
), dma32dd_t
);
2486 rot
= TXD(ad
- di
->txin
);
2488 ASSERT(rot
< di
->ntxd
);
2490 /* full-ring case is a lot harder - don't worry about this */
2491 if (rot
>= (di
->ntxd
- nactive
)) {
2492 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
2497 last
= PREVTXD(di
->txout
);
2499 /* move entries starting at last and moving backwards to first */
2500 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
2501 new = TXD(old
+ rot
);
2504 * Move the tx dma descriptor.
2505 * EOT is set only in the last entry in the ring.
2507 w
= BUS_SWAP32(R_SM(&di
->txd32
[old
].ctrl
)) & ~CTRL_EOT
;
2508 if (new == (di
->ntxd
- 1))
2510 W_SM(&di
->txd32
[new].ctrl
, BUS_SWAP32(w
));
2511 W_SM(&di
->txd32
[new].addr
, R_SM(&di
->txd32
[old
].addr
));
2513 /* zap the old tx dma descriptor address field */
2514 W_SM(&di
->txd32
[old
].addr
, BUS_SWAP32(0xdeadbeef));
2516 /* move the corresponding txp[] entry */
2517 ASSERT(di
->txp
[new] == NULL
);
2518 di
->txp
[new] = di
->txp
[old
];
2520 /* Move the segment map as well */
2521 if (DMASGLIST_ENAB
) {
2522 bcopy(&di
->txp_dmah
[old
], &di
->txp_dmah
[new], sizeof(hnddma_seg_map_t
));
2523 bzero(&di
->txp_dmah
[old
], sizeof(hnddma_seg_map_t
));
2526 di
->txp
[old
] = NULL
;
2529 /* update txin and txout */
2531 di
->txout
= TXD(di
->txout
+ rot
);
2532 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2535 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(di
->txout
, dma32dd_t
));
2538 /* 64-bit DMA functions */
2541 dma64_txinit(dma_info_t
*di
)
2545 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
2550 di
->txin
= di
->txout
= di
->xs0cd
= di
->xs0cd_snapshot
= 0;
2551 di
->hnddma
.txavail
= di
->ntxd
- 1;
2553 /* clear tx descriptor ring */
2554 BZERO_SM((void *)(uintptr
)di
->txd64
, (di
->ntxd
* sizeof(dma64dd_t
)));
2556 /* These bits 20:18 (burstLen) of control register can be written but will take
2557 * effect only if these bits are valid. So this will not affect previous versions
2558 * of the DMA. They will continue to have those bits set to 0.
2560 control
= R_REG(di
->osh
, &di
->d64txregs
->control
);
2561 control
= (control
& ~D64_XC_BL_MASK
) | (di
->txburstlen
<< D64_XC_BL_SHIFT
);
2562 control
= (control
& ~D64_XC_MR_MASK
) | (di
->txmultioutstdrd
<< D64_XC_MR_SHIFT
);
2563 control
= (control
& ~D64_XC_PC_MASK
) | (di
->txprefetchctl
<< D64_XC_PC_SHIFT
);
2564 control
= (control
& ~D64_XC_PT_MASK
) | (di
->txprefetchthresh
<< D64_XC_PT_SHIFT
);
2565 W_REG(di
->osh
, &di
->d64txregs
->control
, control
);
2567 control
= D64_XC_XE
;
2568 /* DMA engine with out alignment requirement requires table to be inited
2569 * before enabling the engine
2571 if (!di
->aligndesc_4k
)
2572 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
2574 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_PEN
) == 0)
2575 control
|= D64_XC_PD
;
2576 OR_REG(di
->osh
, &di
->d64txregs
->control
, control
);
2578 /* DMA engine with alignment requirement requires table to be inited
2579 * before enabling the engine
2581 if (di
->aligndesc_4k
)
2582 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
2586 dma64_txenabled(dma_info_t
*di
)
2590 /* If the chip is dead, it is not enabled :-) */
2591 xc
= R_REG(di
->osh
, &di
->d64txregs
->control
);
2592 return ((xc
!= 0xffffffff) && (xc
& D64_XC_XE
));
2596 dma64_txsuspend(dma_info_t
*di
)
2598 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
2603 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
2607 dma64_txresume(dma_info_t
*di
)
2609 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
2614 AND_REG(di
->osh
, &di
->d64txregs
->control
, ~D64_XC_SE
);
2618 dma64_txsuspended(dma_info_t
*di
)
2620 return (di
->ntxd
== 0) ||
2621 ((R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
) == D64_XC_SE
);
2625 dma64_txflush(dma_info_t
*di
)
2627 DMA_TRACE(("%s: dma_txflush\n", di
->name
));
2632 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
| D64_XC_FL
);
2636 dma64_txflush_clear(dma_info_t
*di
)
2640 DMA_TRACE(("%s: dma_txflush_clear\n", di
->name
));
2645 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
2646 D64_XS0_XS_DISABLED
) &&
2647 (status
!= D64_XS0_XS_IDLE
) &&
2648 (status
!= D64_XS0_XS_STOPPED
),
2650 AND_REG(di
->osh
, &di
->d64txregs
->control
, ~D64_XC_FL
);
2653 static void BCMFASTPATH
2654 dma64_txreclaim(dma_info_t
*di
, txd_range_t range
)
2658 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
,
2659 (range
== HNDDMA_RANGE_ALL
) ? "all" :
2660 ((range
== HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" : "transfered")));
2662 if (di
->txin
== di
->txout
)
2665 while ((p
= dma64_getnexttxp(di
, range
))) {
2666 /* For unframed data, we don't have any packets to free */
2667 if (!(di
->hnddma
.dmactrlflags
& DMA_CTRL_UNFRAMED
))
2668 PKTFREE(di
->osh
, p
, TRUE
);
2673 dma64_txstopped(dma_info_t
*di
)
2675 return ((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) == D64_XS0_XS_STOPPED
);
2679 dma64_rxstopped(dma_info_t
*di
)
2681 return ((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
) == D64_RS0_RS_STOPPED
);
2685 dma64_alloc(dma_info_t
*di
, uint direction
)
2694 ddlen
= sizeof(dma64dd_t
);
2696 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
2697 align_bits
= di
->dmadesc_align
;
2698 align
= (1 << align_bits
);
2700 if (direction
== DMA_TX
) {
2701 if ((va
= dma_ringalloc(di
->osh
,
2702 (di
->d64_xs0_cd_mask
== 0x1fff) ? D64RINGBOUNDARY
: D64RINGBOUNDARY_LARGE
,
2703 size
, &align_bits
, &alloced
,
2704 &di
->txdpaorig
, &di
->tx_dmah
)) == NULL
) {
2705 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
2709 align
= (1 << align_bits
);
2711 /* adjust the pa by rounding up to the alignment */
2712 PHYSADDRLOSET(di
->txdpa
, ROUNDUP(PHYSADDRLO(di
->txdpaorig
), align
));
2713 PHYSADDRHISET(di
->txdpa
, PHYSADDRHI(di
->txdpaorig
));
2715 /* Make sure that alignment didn't overflow */
2716 ASSERT(PHYSADDRLO(di
->txdpa
) >= PHYSADDRLO(di
->txdpaorig
));
2718 /* find the alignment offset that was used */
2719 di
->txdalign
= (uint
)(PHYSADDRLO(di
->txdpa
) - PHYSADDRLO(di
->txdpaorig
));
2721 /* adjust the va by the same offset */
2722 di
->txd64
= (dma64dd_t
*)((uintptr
)va
+ di
->txdalign
);
2724 di
->txdalloc
= alloced
;
2725 ASSERT(ISALIGNED(PHYSADDRLO(di
->txdpa
), align
));
2727 if ((va
= dma_ringalloc(di
->osh
,
2728 (di
->d64_rs0_cd_mask
== 0x1fff) ? D64RINGBOUNDARY
: D64RINGBOUNDARY_LARGE
,
2729 size
, &align_bits
, &alloced
,
2730 &di
->rxdpaorig
, &di
->rx_dmah
)) == NULL
) {
2731 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
2735 align
= (1 << align_bits
);
2737 /* adjust the pa by rounding up to the alignment */
2738 PHYSADDRLOSET(di
->rxdpa
, ROUNDUP(PHYSADDRLO(di
->rxdpaorig
), align
));
2739 PHYSADDRHISET(di
->rxdpa
, PHYSADDRHI(di
->rxdpaorig
));
2741 /* Make sure that alignment didn't overflow */
2742 ASSERT(PHYSADDRLO(di
->rxdpa
) >= PHYSADDRLO(di
->rxdpaorig
));
2744 /* find the alignment offset that was used */
2745 di
->rxdalign
= (uint
)(PHYSADDRLO(di
->rxdpa
) - PHYSADDRLO(di
->rxdpaorig
));
2747 /* adjust the va by the same offset */
2748 di
->rxd64
= (dma64dd_t
*)((uintptr
)va
+ di
->rxdalign
);
2750 di
->rxdalloc
= alloced
;
2751 ASSERT(ISALIGNED(PHYSADDRLO(di
->rxdpa
), align
));
2758 dma64_txreset(dma_info_t
*di
)
2765 /* suspend tx DMA first */
2766 W_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
2767 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
2768 D64_XS0_XS_DISABLED
) &&
2769 (status
!= D64_XS0_XS_IDLE
) &&
2770 (status
!= D64_XS0_XS_STOPPED
),
2773 W_REG(di
->osh
, &di
->d64txregs
->control
, 0);
2774 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
2775 D64_XS0_XS_DISABLED
),
2778 /* We should be disabled at this point */
2779 if (status
!= D64_XS0_XS_DISABLED
) {
2780 DMA_ERROR(("%s: status != D64_XS0_XS_DISABLED 0x%x\n", __FUNCTION__
, status
));
2781 ASSERT(status
== D64_XS0_XS_DISABLED
);
2785 return (status
== D64_XS0_XS_DISABLED
);
2789 dma64_rxidle(dma_info_t
*di
)
2791 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
2796 return ((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) ==
2797 (R_REG(di
->osh
, &di
->d64rxregs
->ptr
) & D64_RS0_CD_MASK
));
2801 dma64_rxreset(dma_info_t
*di
)
2808 W_REG(di
->osh
, &di
->d64rxregs
->control
, 0);
2809 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
)) !=
2810 D64_RS0_RS_DISABLED
), 10000);
2812 return (status
== D64_RS0_RS_DISABLED
);
2816 dma64_rxenabled(dma_info_t
*di
)
2820 rc
= R_REG(di
->osh
, &di
->d64rxregs
->control
);
2821 return ((rc
!= 0xffffffff) && (rc
& D64_RC_RE
));
2825 dma64_txsuspendedidle(dma_info_t
*di
)
2831 if (!(R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
))
2834 if ((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) == D64_XS0_XS_IDLE
)
2841 * Useful when sending unframed data. This allows us to get a progress report from the DMA.
2842 * We return a pointer to the beginning of the data buffer of the current descriptor.
2843 * If DMA is idle, we return NULL.
2846 dma64_getpos(dma_info_t
*di
, bool direction
)
2852 if (direction
== DMA_TX
) {
2853 cur_idx
= B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
) -
2854 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
);
2855 idle
= !NTXDACTIVE(di
->txin
, di
->txout
);
2856 va
= di
->txp
[cur_idx
];
2858 cur_idx
= B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
2859 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
);
2860 idle
= !NRXDACTIVE(di
->rxin
, di
->rxout
);
2861 va
= di
->rxp
[cur_idx
];
2864 /* If DMA is IDLE, return NULL */
2866 DMA_TRACE(("%s: DMA idle, return NULL\n", __FUNCTION__
));
2874 * TX of unframed data
2876 * Adds a DMA ring descriptor for the data pointed to by "buf".
2877 * This is for DMA of a buffer of data and is unlike other hnddma TX functions
2878 * that take a pointer to a "packet"
2879 * Each call to this is results in a single descriptor being added for "len" bytes of
2880 * data starting at "buf", it doesn't handle chained buffers.
2883 dma64_txunframed(dma_info_t
*di
, void *buf
, uint len
, bool commit
)
2887 dmaaddr_t pa
; /* phys addr */
2891 /* return nonzero if out of tx descriptors */
2892 if (NEXTTXD(txout
) == di
->txin
)
2898 pa
= DMA_MAP(di
->osh
, buf
, len
, DMA_TX
, NULL
, &di
->txp_dmah
[txout
]);
2900 flags
= (D64_CTRL1_SOF
| D64_CTRL1_IOC
| D64_CTRL1_EOF
);
2902 if (txout
== (di
->ntxd
- 1))
2903 flags
|= D64_CTRL1_EOT
;
2905 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
2906 ASSERT(di
->txp
[txout
] == NULL
);
2908 /* save the buffer pointer - used by dma_getpos */
2909 di
->txp
[txout
] = buf
;
2911 txout
= NEXTTXD(txout
);
2912 /* bump the tx descriptor index */
2917 W_REG(di
->osh
, &di
->d64txregs
->ptr
, di
->xmtptrbase
+ I2B(txout
, dma64dd_t
));
2920 /* tx flow control */
2921 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2926 DMA_ERROR(("%s: %s: out of txds !!!\n", di
->name
, __FUNCTION__
));
2927 di
->hnddma
.txavail
= 0;
2928 di
->hnddma
.txnobuf
++;
2934 * !! tx entry routine
2935 * WARNING: call must check the return value for error.
2936 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
2938 static int BCMFASTPATH
2939 dma64_txfast(dma_info_t
*di
, void *p0
, bool commit
)
2949 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
2952 war
= (di
->hnddma
.dmactrlflags
& DMA_CTRL_DMA_AVOIDANCE_WAR
) ? TRUE
: FALSE
;
2955 * Walk the chain of packet buffers
2956 * allocating and initializing transmit descriptor entries.
2958 for (p
= p0
; p
; p
= next
) {
2959 uint nsegs
, j
, segsadd
;
2960 hnddma_seg_map_t
*map
= NULL
;
2962 data
= PKTDATA(di
->osh
, p
);
2963 len
= PKTLEN(di
->osh
, p
);
2965 len
+= PKTDMAPAD(di
->osh
, p
);
2966 #endif /* BCM_DMAPAD */
2967 next
= PKTNEXT(di
->osh
, p
);
2969 /* return nonzero if out of tx descriptors */
2970 if (NEXTTXD(txout
) == di
->txin
)
2976 /* get physical address of buffer start */
2978 bzero(&di
->txp_dmah
[txout
], sizeof(hnddma_seg_map_t
));
2980 pa
= DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
, &di
->txp_dmah
[txout
]);
2982 if (DMASGLIST_ENAB
) {
2983 map
= &di
->txp_dmah
[txout
];
2985 /* See if all the segments can be accounted for */
2986 if (map
->nsegs
> (uint
)(di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1))
2994 for (j
= 1; j
<= nsegs
; j
++) {
2996 if (p
== p0
&& j
== 1)
2997 flags
|= D64_CTRL1_SOF
;
2999 /* With a DMA segment list, Descriptor table is filled
3000 * using the segment list instead of looping over
3001 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
3002 * end of segment list is reached.
3004 if ((!DMASGLIST_ENAB
&& next
== NULL
) ||
3005 (DMASGLIST_ENAB
&& j
== nsegs
))
3006 flags
|= (D64_CTRL1_IOC
| D64_CTRL1_EOF
);
3007 if (txout
== (di
->ntxd
- 1))
3008 flags
|= D64_CTRL1_EOT
;
3010 if (DMASGLIST_ENAB
) {
3011 len
= map
->segs
[j
- 1].length
;
3012 pa
= map
->segs
[j
- 1].addr
;
3013 if (len
> 128 && war
) {
3014 uint remain
, new_len
, align64
;
3015 /* check for 64B aligned of pa */
3016 align64
= (uint
)(PHYSADDRLO(pa
) & 0x3f);
3017 align64
= (64 - align64
) & 0x3f;
3018 new_len
= len
- align64
;
3019 remain
= new_len
% 128;
3020 if (remain
> 0 && remain
<= 4) {
3023 flags
& (~(D64_CTRL1_EOF
| D64_CTRL1_IOC
));
3024 flags
&= ~(D64_CTRL1_SOF
| D64_CTRL1_EOT
);
3026 dma64_dd_upd(di
, di
->txd64
, pa
, txout
,
3027 &tmp_flags
, len
-remain
);
3028 ASSERT(di
->txp
[txout
] == NULL
);
3029 txout
= NEXTTXD(txout
);
3030 /* return nonzero if out of tx descriptors */
3031 if (txout
== di
->txin
) {
3032 DMA_ERROR(("%s: dma_txfast: Out-of-DMA"
3033 " descriptors (txin %d txout %d"
3034 " nsegs %d)\n", __FUNCTION__
,
3035 di
->txin
, di
->txout
, nsegs
));
3038 if (txout
== (di
->ntxd
- 1))
3039 flags
|= D64_CTRL1_EOT
;
3040 buf_addr_lo
= PHYSADDRLO(pa
);
3041 PHYSADDRLOSET(pa
, (PHYSADDRLO(pa
) + (len
-remain
)));
3042 if (PHYSADDRLO(pa
) < buf_addr_lo
) {
3043 PHYSADDRHISET(pa
, (PHYSADDRHI(pa
) + 1));
3047 di
->dma_avoidance_cnt
++;
3051 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
3052 ASSERT(di
->txp
[txout
] == NULL
);
3054 txout
= NEXTTXD(txout
);
3055 /* return nonzero if out of tx descriptors */
3056 if (txout
== di
->txin
) {
3057 DMA_ERROR(("%s: dma_txfast: Out-of-DMA descriptors"
3058 " (txin %d txout %d nsegs %d)\n", __FUNCTION__
,
3059 di
->txin
, di
->txout
, nsegs
));
3063 if (segsadd
&& DMASGLIST_ENAB
)
3064 map
->nsegs
+= segsadd
;
3066 /* See above. No need to loop over individual buffers */
3071 /* if last txd eof not set, fix it */
3072 if (!(flags
& D64_CTRL1_EOF
))
3073 W_SM(&di
->txd64
[PREVTXD(txout
)].ctrl1
,
3074 BUS_SWAP32(flags
| D64_CTRL1_IOC
| D64_CTRL1_EOF
));
3076 /* save the packet */
3077 di
->txp
[PREVTXD(txout
)] = p0
;
3079 /* bump the tx descriptor index */
3084 W_REG(di
->osh
, &di
->d64txregs
->ptr
, di
->xmtptrbase
+ I2B(txout
, dma64dd_t
));
3086 /* tx flow control */
3087 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
3092 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di
->name
));
3093 PKTFREE(di
->osh
, p0
, TRUE
);
3094 di
->hnddma
.txavail
= 0;
3095 di
->hnddma
.txnobuf
++;
3100 * Reclaim next completed txd (txds if using chained buffers) in the range
3101 * specified and return associated packet.
3102 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
3103 * transmitted as noted by the hardware "CurrDescr" pointer.
3104 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
3105 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
3106 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
3107 * return associated packet regardless of the value of hardware pointers.
3109 static void * BCMFASTPATH
3110 dma64_getnexttxp(dma_info_t
*di
, txd_range_t range
)
3112 uint16 start
, end
, i
;
3116 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
,
3117 (range
== HNDDMA_RANGE_ALL
) ? "all" :
3118 ((range
== HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" : "transfered")));
3126 if (range
== HNDDMA_RANGE_ALL
)
3129 dma64regs_t
*dregs
= di
->d64txregs
;
3131 if (di
->txin
== di
->xs0cd
) {
3132 end
= (uint16
)(B2I(((R_REG(di
->osh
, &dregs
->status0
) & D64_XS0_CD_MASK
) -
3133 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
));
3138 if (range
== HNDDMA_RANGE_TRANSFERED
) {
3139 active_desc
= (uint16
)(R_REG(di
->osh
, &dregs
->status1
) & D64_XS1_AD_MASK
);
3140 active_desc
= (active_desc
- di
->xmtptrbase
) & D64_XS0_CD_MASK
;
3141 active_desc
= B2I(active_desc
, dma64dd_t
);
3142 if (end
!= active_desc
)
3143 end
= PREVTXD(active_desc
);
3148 if ((start
== 0) && (end
> di
->txout
))
3151 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
3152 hnddma_seg_map_t
*map
= NULL
;
3153 uint size
, j
, nsegs
;
3155 #if ((!defined(__mips__) && !defined(BCM47XX_CA9)) || defined(__NetBSD__))
3157 PHYSADDRLOSET(pa
, (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrlow
)) - di
->dataoffsetlow
));
3158 PHYSADDRHISET(pa
, (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrhigh
)) - di
->dataoffsethigh
));
3161 if (DMASGLIST_ENAB
) {
3162 map
= &di
->txp_dmah
[i
];
3163 size
= map
->origsize
;
3165 if (nsegs
> (uint
)NTXDACTIVE(i
, end
)) {
3170 #if ((!defined(__mips__) && !defined(BCM47XX_CA9)) || defined(__NetBSD__))
3171 size
= (BUS_SWAP32(R_SM(&di
->txd64
[i
].ctrl2
)) & D64_CTRL2_BC_MASK
);
3176 for (j
= nsegs
; j
> 0; j
--) {
3177 #if ((!defined(__mips__) && !defined(BCM47XX_CA9)) || defined(__NetBSD__))
3178 W_SM(&di
->txd64
[i
].addrlow
, 0xdeadbeef);
3179 W_SM(&di
->txd64
[i
].addrhigh
, 0xdeadbeef);
3188 #if ((!defined(__mips__) && !defined(BCM47XX_CA9)) || defined(__NetBSD__))
3189 DMA_UNMAP(di
->osh
, pa
, size
, DMA_TX
, txp
, map
);
3195 /* tx flow control */
3196 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
3201 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
3202 start
, end
, di
->txout
, forceall
));
3206 static void * BCMFASTPATH
3207 dma64_getnextrxp(dma_info_t
*di
, bool forceall
)
3211 #if ((!defined(__mips__) && !defined(BCM47XX_CA9)) || defined(__NetBSD__))
3215 /* if forcing, dma engine must be disabled */
3216 ASSERT(!forceall
|| !dma64_rxenabled(di
));
3220 /* return if no packets posted */
3224 if (di
->rxin
== di
->rs0cd
) {
3225 curr
= (uint16
)B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
3226 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
);
3231 /* ignore curr if forceall */
3232 if (!forceall
&& (i
== curr
))
3235 /* get the packet pointer that corresponds to the rx descriptor */
3240 #if ((!defined(__mips__) && !defined(BCM47XX_CA9)) || defined(__NetBSD__))
3241 PHYSADDRLOSET(pa
, (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrlow
)) - di
->dataoffsetlow
));
3242 PHYSADDRHISET(pa
, (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrhigh
)) - di
->dataoffsethigh
));
3244 /* clear this packet from the descriptor ring */
3245 DMA_UNMAP(di
->osh
, pa
,
3246 di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
3248 W_SM(&di
->rxd64
[i
].addrlow
, 0xdeadbeef);
3249 W_SM(&di
->rxd64
[i
].addrhigh
, 0xdeadbeef);
3250 #endif /* ((!defined(__mips__) && !defined(BCM47XX_CA9)) || defined(__NetBSD__)) */
3252 di
->rxin
= NEXTRXD(i
);
3258 _dma64_addrext(osl_t
*osh
, dma64regs_t
*dma64regs
)
3261 OR_REG(osh
, &dma64regs
->control
, D64_XC_AE
);
3262 w
= R_REG(osh
, &dma64regs
->control
);
3263 AND_REG(osh
, &dma64regs
->control
, ~D64_XC_AE
);
3264 return ((w
& D64_XC_AE
) == D64_XC_AE
);
3268 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
3271 dma64_txrotate(dma_info_t
*di
)
3280 ASSERT(dma64_txsuspendedidle(di
));
3282 nactive
= _dma_txactive(di
);
3283 ad
= B2I((((R_REG(di
->osh
, &di
->d64txregs
->status1
) & D64_XS1_AD_MASK
)
3284 - di
->xmtptrbase
) & D64_XS1_AD_MASK
), dma64dd_t
);
3285 rot
= TXD(ad
- di
->txin
);
3287 ASSERT(rot
< di
->ntxd
);
3289 /* full-ring case is a lot harder - don't worry about this */
3290 if (rot
>= (di
->ntxd
- nactive
)) {
3291 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
3296 last
= PREVTXD(di
->txout
);
3298 /* move entries starting at last and moving backwards to first */
3299 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
3300 new = TXD(old
+ rot
);
3303 * Move the tx dma descriptor.
3304 * EOT is set only in the last entry in the ring.
3306 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl1
)) & ~D64_CTRL1_EOT
;
3307 if (new == (di
->ntxd
- 1))
3309 W_SM(&di
->txd64
[new].ctrl1
, BUS_SWAP32(w
));
3311 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl2
));
3312 W_SM(&di
->txd64
[new].ctrl2
, BUS_SWAP32(w
));
3314 W_SM(&di
->txd64
[new].addrlow
, R_SM(&di
->txd64
[old
].addrlow
));
3315 W_SM(&di
->txd64
[new].addrhigh
, R_SM(&di
->txd64
[old
].addrhigh
));
3317 /* zap the old tx dma descriptor address field */
3318 W_SM(&di
->txd64
[old
].addrlow
, BUS_SWAP32(0xdeadbeef));
3319 W_SM(&di
->txd64
[old
].addrhigh
, BUS_SWAP32(0xdeadbeef));
3321 /* move the corresponding txp[] entry */
3322 ASSERT(di
->txp
[new] == NULL
);
3323 di
->txp
[new] = di
->txp
[old
];
3326 if (DMASGLIST_ENAB
) {
3327 bcopy(&di
->txp_dmah
[old
], &di
->txp_dmah
[new], sizeof(hnddma_seg_map_t
));
3328 bzero(&di
->txp_dmah
[old
], sizeof(hnddma_seg_map_t
));
3331 di
->txp
[old
] = NULL
;
3334 /* update txin and txout */
3336 di
->txout
= TXD(di
->txout
+ rot
);
3337 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
3340 W_REG(di
->osh
, &di
->d64txregs
->ptr
, di
->xmtptrbase
+ I2B(di
->txout
, dma64dd_t
));
3344 BCMATTACHFN(dma_addrwidth
)(si_t
*sih
, void *dmaregs
)
3346 dma32regs_t
*dma32regs
;
3351 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
3352 /* DMA engine is 64-bit capable */
3353 if ((si_core_sflags(sih
, 0, 0) & SISF_DMA64
) == SISF_DMA64
) {
3354 /* backplane are 64-bit capable */
3355 if (si_backplane64(sih
))
3356 /* If bus is System Backplane or PCIE then we can access 64-bits */
3357 if ((BUSTYPE(sih
->bustype
) == SI_BUS
) ||
3358 ((BUSTYPE(sih
->bustype
) == PCI_BUS
) &&
3359 ((sih
->buscoretype
== PCIE_CORE_ID
) ||
3360 (sih
->buscoretype
== PCIE2_CORE_ID
))))
3361 return (DMADDRWIDTH_64
);
3363 /* DMA64 is always 32-bit capable, AE is always TRUE */
3364 ASSERT(_dma64_addrext(osh
, (dma64regs_t
*)dmaregs
));
3366 return (DMADDRWIDTH_32
);
3369 /* Start checking for 32-bit / 30-bit addressing */
3370 dma32regs
= (dma32regs_t
*)dmaregs
;
3372 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
3373 if ((BUSTYPE(sih
->bustype
) == SI_BUS
) ||
3374 ((BUSTYPE(sih
->bustype
) == PCI_BUS
) &&
3375 ((sih
->buscoretype
== PCIE_CORE_ID
) ||
3376 (sih
->buscoretype
== PCIE2_CORE_ID
))) ||
3377 (_dma32_addrext(osh
, dma32regs
)))
3378 return (DMADDRWIDTH_32
);
3381 return (DMADDRWIDTH_30
);
3385 _dma_pktpool_set(dma_info_t
*di
, pktpool_t
*pool
)
3388 ASSERT(di
->pktpool
== NULL
);
3394 _dma_rxtx_error(dma_info_t
*di
, bool istx
)
3399 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
3403 status1
= R_REG(di
->osh
, &di
->d64txregs
->status1
);
3405 if ((status1
& D64_XS1_XE_MASK
) != D64_XS1_XE_NOERR
)
3407 else if (si_coreid(di
->sih
) == GMAC_CORE_ID
&& si_corerev(di
->sih
) >= 4) {
3408 curr
= (uint16
)(B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) &
3409 D64_XS0_CD_MASK
) - di
->xmtptrbase
) &
3410 D64_XS0_CD_MASK
, dma64dd_t
));
3412 if (NTXDACTIVE(di
->txin
, di
->txout
) != 0 &&
3413 curr
== di
->xs0cd_snapshot
) {
3418 di
->xs0cd_snapshot
= di
->xs0cd
= curr
;
3427 status1
= R_REG(di
->osh
, &di
->d64rxregs
->status1
);
3429 if ((status1
& D64_RS1_RE_MASK
) != D64_RS1_RE_NOERR
)
3435 } else if (DMA32_ENAB(di
)) {
3446 _dma_burstlen_set(dma_info_t
*di
, uint8 rxburstlen
, uint8 txburstlen
)
3448 di
->rxburstlen
= rxburstlen
;
3449 di
->txburstlen
= txburstlen
;
3453 _dma_param_set(dma_info_t
*di
, uint16 paramid
, uint16 paramval
)
3456 case HNDDMA_PID_TX_MULTI_OUTSTD_RD
:
3457 di
->txmultioutstdrd
= (uint8
)paramval
;
3460 case HNDDMA_PID_TX_PREFETCH_CTL
:
3461 di
->txprefetchctl
= (uint8
)paramval
;
3464 case HNDDMA_PID_TX_PREFETCH_THRESH
:
3465 di
->txprefetchthresh
= (uint8
)paramval
;
3468 case HNDDMA_PID_TX_BURSTLEN
:
3469 di
->txburstlen
= (uint8
)paramval
;
3472 case HNDDMA_PID_RX_PREFETCH_CTL
:
3473 di
->rxprefetchctl
= (uint8
)paramval
;
3476 case HNDDMA_PID_RX_PREFETCH_THRESH
:
3477 di
->rxprefetchthresh
= (uint8
)paramval
;
3480 case HNDDMA_PID_RX_BURSTLEN
:
3481 di
->rxburstlen
= (uint8
)paramval
;
3490 _dma_glom_enable(dma_info_t
*di
, uint32 val
)
3492 dma64regs_t
*dregs
= di
->d64rxregs
;
3495 OR_REG(di
->osh
, &dregs
->control
, D64_RC_GE
);
3496 if (!(R_REG(di
->osh
, &dregs
->control
) & D64_RC_GE
))
3499 AND_REG(di
->osh
, &dregs
->control
, ~D64_RC_GE
);