staging: brcm80211: fix 'ERROR: "foo * bar" should be "foo *bar"'
[linux-2.6/libata-dev.git] / drivers / staging / brcm80211 / util / hnddma.c
blob4b642d66239a7d3a6a039d2dad18e35a44da0aab
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <typedefs.h>
18 #include <bcmdefs.h>
19 #include <bcmdevs.h>
20 #include <osl.h>
21 #include <bcmendian.h>
22 #include <hndsoc.h>
23 #include <bcmutils.h>
24 #include <siutils.h>
26 #include <sbhnddma.h>
27 #include <hnddma.h>
29 /* debug/trace */
30 #ifdef BCMDBG
31 #define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args
32 #define DMA_TRACE(args) if (!(*di->msg_level & 2)); else printf args
33 #else
34 #define DMA_ERROR(args)
35 #define DMA_TRACE(args)
36 #endif /* BCMDBG */
38 #define DMA_NONE(args)
40 #define d32txregs dregs.d32_u.txregs_32
41 #define d32rxregs dregs.d32_u.rxregs_32
42 #define txd32 dregs.d32_u.txd_32
43 #define rxd32 dregs.d32_u.rxd_32
45 #define d64txregs dregs.d64_u.txregs_64
46 #define d64rxregs dregs.d64_u.rxregs_64
47 #define txd64 dregs.d64_u.txd_64
48 #define rxd64 dregs.d64_u.rxd_64
50 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
51 static uint dma_msg_level = 0;
53 #define MAXNAMEL 8 /* 8 char names */
55 #define DI_INFO(dmah) ((dma_info_t *)dmah)
57 /* dma engine software state */
58 typedef struct dma_info {
59 struct hnddma_pub hnddma; /* exported structure, don't use hnddma_t,
60 * which could be const
62 uint *msg_level; /* message level pointer */
63 char name[MAXNAMEL]; /* callers name for diag msgs */
65 void *osh; /* os handle */
66 si_t *sih; /* sb handle */
68 bool dma64; /* this dma engine is operating in 64-bit mode */
69 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
71 union {
72 struct {
73 dma32regs_t *txregs_32; /* 32-bit dma tx engine registers */
74 dma32regs_t *rxregs_32; /* 32-bit dma rx engine registers */
75 dma32dd_t *txd_32; /* pointer to dma32 tx descriptor ring */
76 dma32dd_t *rxd_32; /* pointer to dma32 rx descriptor ring */
77 } d32_u;
78 struct {
79 dma64regs_t *txregs_64; /* 64-bit dma tx engine registers */
80 dma64regs_t *rxregs_64; /* 64-bit dma rx engine registers */
81 dma64dd_t *txd_64; /* pointer to dma64 tx descriptor ring */
82 dma64dd_t *rxd_64; /* pointer to dma64 rx descriptor ring */
83 } d64_u;
84 } dregs;
86 uint16 dmadesc_align; /* alignment requirement for dma descriptors */
88 uint16 ntxd; /* # tx descriptors tunable */
89 uint16 txin; /* index of next descriptor to reclaim */
90 uint16 txout; /* index of next descriptor to post */
91 void **txp; /* pointer to parallel array of pointers to packets */
92 osldma_t *tx_dmah; /* DMA TX descriptor ring handle */
93 hnddma_seg_map_t *txp_dmah; /* DMA MAP meta-data handle */
94 dmaaddr_t txdpa; /* Aligned physical address of descriptor ring */
95 dmaaddr_t txdpaorig; /* Original physical address of descriptor ring */
96 uint16 txdalign; /* #bytes added to alloc'd mem to align txd */
97 uint32 txdalloc; /* #bytes allocated for the ring */
98 uint32 xmtptrbase; /* When using unaligned descriptors, the ptr register
99 * is not just an index, it needs all 13 bits to be
100 * an offset from the addr register.
103 uint16 nrxd; /* # rx descriptors tunable */
104 uint16 rxin; /* index of next descriptor to reclaim */
105 uint16 rxout; /* index of next descriptor to post */
106 void **rxp; /* pointer to parallel array of pointers to packets */
107 osldma_t *rx_dmah; /* DMA RX descriptor ring handle */
108 hnddma_seg_map_t *rxp_dmah; /* DMA MAP meta-data handle */
109 dmaaddr_t rxdpa; /* Aligned physical address of descriptor ring */
110 dmaaddr_t rxdpaorig; /* Original physical address of descriptor ring */
111 uint16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
112 uint32 rxdalloc; /* #bytes allocated for the ring */
113 uint32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
115 /* tunables */
116 uint16 rxbufsize; /* rx buffer size in bytes,
117 * not including the extra headroom
119 uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper stack
120 * e.g. some rx pkt buffers will be bridged to tx side
121 * without byte copying. The extra headroom needs to be
122 * large enough to fit txheader needs.
123 * Some dongle driver may not need it.
125 uint nrxpost; /* # rx buffers to keep posted */
126 uint rxoffset; /* rxcontrol offset */
127 uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
128 uint ddoffsethigh; /* high 32 bits */
129 uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
130 uint dataoffsethigh; /* high 32 bits */
131 bool aligndesc_4k; /* descriptor base need to be aligned or not */
132 } dma_info_t;
135 * If BCMDMA32 is defined, hnddma will support both 32-bit and 64-bit DMA engines.
136 * Otherwise it will support only 64-bit.
138 * DMA32_ENAB indicates whether hnddma is compiled with support for 32-bit DMA engines.
139 * DMA64_ENAB indicates whether hnddma is compiled with support for 64-bit DMA engines.
141 * DMA64_MODE indicates whether the current DMA engine is running as 64-bit.
143 #ifdef BCMDMA32
144 #define DMA32_ENAB(di) 1
145 #define DMA64_ENAB(di) 1
146 #define DMA64_MODE(di) ((di)->dma64)
147 #else /* !BCMDMA32 */
148 #define DMA32_ENAB(di) 0
149 #define DMA64_ENAB(di) 1
150 #define DMA64_MODE(di) 1
151 #endif /* !BCMDMA32 */
153 /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
154 #ifdef BCMDMASGLISTOSL
155 #define DMASGLIST_ENAB TRUE
156 #else
157 #define DMASGLIST_ENAB FALSE
158 #endif /* BCMDMASGLISTOSL */
160 /* descriptor bumping macros */
161 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
162 #define TXD(x) XXD((x), di->ntxd)
163 #define RXD(x) XXD((x), di->nrxd)
164 #define NEXTTXD(i) TXD((i) + 1)
165 #define PREVTXD(i) TXD((i) - 1)
166 #define NEXTRXD(i) RXD((i) + 1)
167 #define PREVRXD(i) RXD((i) - 1)
169 #define NTXDACTIVE(h, t) TXD((t) - (h))
170 #define NRXDACTIVE(h, t) RXD((t) - (h))
172 /* macros to convert between byte offsets and indexes */
173 #define B2I(bytes, type) ((bytes) / sizeof(type))
174 #define I2B(index, type) ((index) * sizeof(type))
176 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
177 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
179 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
180 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
182 /* Common prototypes */
183 static bool _dma_isaddrext(dma_info_t *di);
184 static bool _dma_descriptor_align(dma_info_t *di);
185 static bool _dma_alloc(dma_info_t *di, uint direction);
186 static void _dma_detach(dma_info_t *di);
187 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa);
188 static void _dma_rxinit(dma_info_t *di);
189 static void *_dma_rx(dma_info_t *di);
190 static bool _dma_rxfill(dma_info_t *di);
191 static void _dma_rxreclaim(dma_info_t *di);
192 static void _dma_rxenable(dma_info_t *di);
193 static void *_dma_getnextrxp(dma_info_t *di, bool forceall);
194 static void _dma_rx_param_get(dma_info_t *di, uint16 *rxoffset,
195 uint16 *rxbufsize);
197 static void _dma_txblock(dma_info_t *di);
198 static void _dma_txunblock(dma_info_t *di);
199 static uint _dma_txactive(dma_info_t *di);
200 static uint _dma_rxactive(dma_info_t *di);
201 static uint _dma_txpending(dma_info_t *di);
202 static uint _dma_txcommitted(dma_info_t *di);
204 static void *_dma_peeknexttxp(dma_info_t *di);
205 static void *_dma_peeknextrxp(dma_info_t *di);
206 static uintptr _dma_getvar(dma_info_t *di, const char *name);
207 static void _dma_counterreset(dma_info_t *di);
208 static void _dma_fifoloopbackenable(dma_info_t *di);
209 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags);
210 static uint8 dma_align_sizetobits(uint size);
211 static void *dma_ringalloc(osl_t *osh, uint32 boundary, uint size,
212 uint16 *alignbits, uint *alloced,
213 dmaaddr_t *descpa, osldma_t **dmah);
215 /* Prototypes for 32-bit routines */
216 static bool dma32_alloc(dma_info_t *di, uint direction);
217 static bool dma32_txreset(dma_info_t *di);
218 static bool dma32_rxreset(dma_info_t *di);
219 static bool dma32_txsuspendedidle(dma_info_t *di);
220 static int dma32_txfast(dma_info_t *di, void *p0, bool commit);
221 static void *dma32_getnexttxp(dma_info_t *di, txd_range_t range);
222 static void *dma32_getnextrxp(dma_info_t *di, bool forceall);
223 static void dma32_txrotate(dma_info_t *di);
224 static bool dma32_rxidle(dma_info_t *di);
225 static void dma32_txinit(dma_info_t *di);
226 static bool dma32_txenabled(dma_info_t *di);
227 static void dma32_txsuspend(dma_info_t *di);
228 static void dma32_txresume(dma_info_t *di);
229 static bool dma32_txsuspended(dma_info_t *di);
230 static void dma32_txreclaim(dma_info_t *di, txd_range_t range);
231 static bool dma32_txstopped(dma_info_t *di);
232 static bool dma32_rxstopped(dma_info_t *di);
233 static bool dma32_rxenabled(dma_info_t *di);
235 static bool _dma32_addrext(osl_t *osh, dma32regs_t *dma32regs);
237 /* Prototypes for 64-bit routines */
238 static bool dma64_alloc(dma_info_t *di, uint direction);
239 static bool dma64_txreset(dma_info_t *di);
240 static bool dma64_rxreset(dma_info_t *di);
241 static bool dma64_txsuspendedidle(dma_info_t *di);
242 static int dma64_txfast(dma_info_t *di, void *p0, bool commit);
243 static int dma64_txunframed(dma_info_t *di, void *p0, uint len, bool commit);
244 static void *dma64_getpos(dma_info_t *di, bool direction);
245 static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range);
246 static void *dma64_getnextrxp(dma_info_t *di, bool forceall);
247 static void dma64_txrotate(dma_info_t *di);
249 static bool dma64_rxidle(dma_info_t *di);
250 static void dma64_txinit(dma_info_t *di);
251 static bool dma64_txenabled(dma_info_t *di);
252 static void dma64_txsuspend(dma_info_t *di);
253 static void dma64_txresume(dma_info_t *di);
254 static bool dma64_txsuspended(dma_info_t *di);
255 static void dma64_txreclaim(dma_info_t *di, txd_range_t range);
256 static bool dma64_txstopped(dma_info_t *di);
257 static bool dma64_rxstopped(dma_info_t *di);
258 static bool dma64_rxenabled(dma_info_t *di);
259 static bool _dma64_addrext(osl_t *osh, dma64regs_t *dma64regs);
261 STATIC INLINE uint32 parity32(uint32 data);
263 const di_fcn_t dma64proc = {
264 (di_detach_t) _dma_detach,
265 (di_txinit_t) dma64_txinit,
266 (di_txreset_t) dma64_txreset,
267 (di_txenabled_t) dma64_txenabled,
268 (di_txsuspend_t) dma64_txsuspend,
269 (di_txresume_t) dma64_txresume,
270 (di_txsuspended_t) dma64_txsuspended,
271 (di_txsuspendedidle_t) dma64_txsuspendedidle,
272 (di_txfast_t) dma64_txfast,
273 (di_txunframed_t) dma64_txunframed,
274 (di_getpos_t) dma64_getpos,
275 (di_txstopped_t) dma64_txstopped,
276 (di_txreclaim_t) dma64_txreclaim,
277 (di_getnexttxp_t) dma64_getnexttxp,
278 (di_peeknexttxp_t) _dma_peeknexttxp,
279 (di_txblock_t) _dma_txblock,
280 (di_txunblock_t) _dma_txunblock,
281 (di_txactive_t) _dma_txactive,
282 (di_txrotate_t) dma64_txrotate,
284 (di_rxinit_t) _dma_rxinit,
285 (di_rxreset_t) dma64_rxreset,
286 (di_rxidle_t) dma64_rxidle,
287 (di_rxstopped_t) dma64_rxstopped,
288 (di_rxenable_t) _dma_rxenable,
289 (di_rxenabled_t) dma64_rxenabled,
290 (di_rx_t) _dma_rx,
291 (di_rxfill_t) _dma_rxfill,
292 (di_rxreclaim_t) _dma_rxreclaim,
293 (di_getnextrxp_t) _dma_getnextrxp,
294 (di_peeknextrxp_t) _dma_peeknextrxp,
295 (di_rxparam_get_t) _dma_rx_param_get,
297 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
298 (di_getvar_t) _dma_getvar,
299 (di_counterreset_t) _dma_counterreset,
300 (di_ctrlflags_t) _dma_ctrlflags,
301 NULL,
302 NULL,
303 NULL,
304 (di_rxactive_t) _dma_rxactive,
305 (di_txpending_t) _dma_txpending,
306 (di_txcommitted_t) _dma_txcommitted,
310 static const di_fcn_t dma32proc = {
311 (di_detach_t) _dma_detach,
312 (di_txinit_t) dma32_txinit,
313 (di_txreset_t) dma32_txreset,
314 (di_txenabled_t) dma32_txenabled,
315 (di_txsuspend_t) dma32_txsuspend,
316 (di_txresume_t) dma32_txresume,
317 (di_txsuspended_t) dma32_txsuspended,
318 (di_txsuspendedidle_t) dma32_txsuspendedidle,
319 (di_txfast_t) dma32_txfast,
320 NULL,
321 NULL,
322 (di_txstopped_t) dma32_txstopped,
323 (di_txreclaim_t) dma32_txreclaim,
324 (di_getnexttxp_t) dma32_getnexttxp,
325 (di_peeknexttxp_t) _dma_peeknexttxp,
326 (di_txblock_t) _dma_txblock,
327 (di_txunblock_t) _dma_txunblock,
328 (di_txactive_t) _dma_txactive,
329 (di_txrotate_t) dma32_txrotate,
331 (di_rxinit_t) _dma_rxinit,
332 (di_rxreset_t) dma32_rxreset,
333 (di_rxidle_t) dma32_rxidle,
334 (di_rxstopped_t) dma32_rxstopped,
335 (di_rxenable_t) _dma_rxenable,
336 (di_rxenabled_t) dma32_rxenabled,
337 (di_rx_t) _dma_rx,
338 (di_rxfill_t) _dma_rxfill,
339 (di_rxreclaim_t) _dma_rxreclaim,
340 (di_getnextrxp_t) _dma_getnextrxp,
341 (di_peeknextrxp_t) _dma_peeknextrxp,
342 (di_rxparam_get_t) _dma_rx_param_get,
344 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
345 (di_getvar_t) _dma_getvar,
346 (di_counterreset_t) _dma_counterreset,
347 (di_ctrlflags_t) _dma_ctrlflags,
348 NULL,
349 NULL,
350 NULL,
351 (di_rxactive_t) _dma_rxactive,
352 (di_txpending_t) _dma_txpending,
353 (di_txcommitted_t) _dma_txcommitted,
357 hnddma_t *dma_attach(osl_t *osh, char *name, si_t *sih, void *dmaregstx,
358 void *dmaregsrx, uint ntxd, uint nrxd, uint rxbufsize,
359 int rxextheadroom, uint nrxpost, uint rxoffset,
360 uint *msg_level)
362 dma_info_t *di;
363 uint size;
365 /* allocate private info structure */
366 if ((di = MALLOC(osh, sizeof(dma_info_t))) == NULL) {
367 #ifdef BCMDBG
368 printf("dma_attach: out of memory, malloced %d bytes\n",
369 MALLOCED(osh));
370 #endif
371 return (NULL);
374 bzero((char *)di, sizeof(dma_info_t));
376 di->msg_level = msg_level ? msg_level : &dma_msg_level;
378 /* old chips w/o sb is no longer supported */
379 ASSERT(sih != NULL);
381 if (DMA64_ENAB(di))
382 di->dma64 =
383 ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
384 else
385 di->dma64 = 0;
387 /* check arguments */
388 ASSERT(ISPOWEROF2(ntxd));
389 ASSERT(ISPOWEROF2(nrxd));
391 if (nrxd == 0)
392 ASSERT(dmaregsrx == NULL);
393 if (ntxd == 0)
394 ASSERT(dmaregstx == NULL);
396 /* init dma reg pointer */
397 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
398 ASSERT(ntxd <= D64MAXDD);
399 ASSERT(nrxd <= D64MAXDD);
400 di->d64txregs = (dma64regs_t *) dmaregstx;
401 di->d64rxregs = (dma64regs_t *) dmaregsrx;
402 di->hnddma.di_fn = (const di_fcn_t *)&dma64proc;
403 } else if (DMA32_ENAB(di)) {
404 ASSERT(ntxd <= D32MAXDD);
405 ASSERT(nrxd <= D32MAXDD);
406 di->d32txregs = (dma32regs_t *) dmaregstx;
407 di->d32rxregs = (dma32regs_t *) dmaregsrx;
408 di->hnddma.di_fn = (const di_fcn_t *)&dma32proc;
409 } else {
410 DMA_ERROR(("dma_attach: driver doesn't support 32-bit DMA\n"));
411 ASSERT(0);
412 goto fail;
415 /* Default flags (which can be changed by the driver calling dma_ctrlflags
416 * before enable): For backwards compatibility both Rx Overflow Continue
417 * and Parity are DISABLED.
418 * supports it.
420 di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN,
423 DMA_TRACE(("%s: dma_attach: %s osh %p flags 0x%x ntxd %d nrxd %d rxbufsize %d " "rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n", name, (DMA64_MODE(di) ? "DMA64" : "DMA32"), osh, di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize, rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
425 /* make a private copy of our callers name */
426 strncpy(di->name, name, MAXNAMEL);
427 di->name[MAXNAMEL - 1] = '\0';
429 di->osh = osh;
430 di->sih = sih;
432 /* save tunables */
433 di->ntxd = (uint16) ntxd;
434 di->nrxd = (uint16) nrxd;
436 /* the actual dma size doesn't include the extra headroom */
437 di->rxextrahdrroom =
438 (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
439 if (rxbufsize > BCMEXTRAHDROOM)
440 di->rxbufsize = (uint16) (rxbufsize - di->rxextrahdrroom);
441 else
442 di->rxbufsize = (uint16) rxbufsize;
444 di->nrxpost = (uint16) nrxpost;
445 di->rxoffset = (uint8) rxoffset;
448 * figure out the DMA physical address offset for dd and data
449 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
450 * Other bus: use zero
451 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
453 di->ddoffsetlow = 0;
454 di->dataoffsetlow = 0;
455 /* for pci bus, add offset */
456 if (sih->bustype == PCI_BUS) {
457 if ((sih->buscoretype == PCIE_CORE_ID) && DMA64_MODE(di)) {
458 /* pcie with DMA64 */
459 di->ddoffsetlow = 0;
460 di->ddoffsethigh = SI_PCIE_DMA_H32;
461 } else {
462 /* pci(DMA32/DMA64) or pcie with DMA32 */
463 di->ddoffsetlow = SI_PCI_DMA;
464 di->ddoffsethigh = 0;
466 di->dataoffsetlow = di->ddoffsetlow;
467 di->dataoffsethigh = di->ddoffsethigh;
469 #if defined(__mips__) && defined(IL_BIGENDIAN)
470 di->dataoffsetlow = di->dataoffsetlow + SI_SDRAM_SWAPPED;
471 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
472 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
473 if ((si_coreid(sih) == SDIOD_CORE_ID)
474 && ((si_corerev(sih) > 0) && (si_corerev(sih) <= 2)))
475 di->addrext = 0;
476 else if ((si_coreid(sih) == I2S_CORE_ID) &&
477 ((si_corerev(sih) == 0) || (si_corerev(sih) == 1)))
478 di->addrext = 0;
479 else
480 di->addrext = _dma_isaddrext(di);
482 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
483 di->aligndesc_4k = _dma_descriptor_align(di);
484 if (di->aligndesc_4k) {
485 if (DMA64_MODE(di)) {
486 di->dmadesc_align = D64RINGALIGN_BITS;
487 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
488 /* for smaller dd table, HW relax the alignment requirement */
489 di->dmadesc_align = D64RINGALIGN_BITS - 1;
491 } else
492 di->dmadesc_align = D32RINGALIGN_BITS;
493 } else
494 di->dmadesc_align = 4; /* 16 byte alignment */
496 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
497 di->aligndesc_4k, di->dmadesc_align));
499 /* allocate tx packet pointer vector */
500 if (ntxd) {
501 size = ntxd * sizeof(void *);
502 if ((di->txp = MALLOC(osh, size)) == NULL) {
503 DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n", di->name, MALLOCED(osh)));
504 goto fail;
506 bzero((char *)di->txp, size);
509 /* allocate rx packet pointer vector */
510 if (nrxd) {
511 size = nrxd * sizeof(void *);
512 if ((di->rxp = MALLOC(osh, size)) == NULL) {
513 DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n", di->name, MALLOCED(osh)));
514 goto fail;
516 bzero((char *)di->rxp, size);
519 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
520 if (ntxd) {
521 if (!_dma_alloc(di, DMA_TX))
522 goto fail;
525 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
526 if (nrxd) {
527 if (!_dma_alloc(di, DMA_RX))
528 goto fail;
531 if ((di->ddoffsetlow != 0) && !di->addrext) {
532 if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) {
533 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (uint32) PHYSADDRLO(di->txdpa)));
534 goto fail;
536 if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) {
537 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (uint32) PHYSADDRLO(di->rxdpa)));
538 goto fail;
542 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));
544 /* allocate DMA mapping vectors */
545 if (DMASGLIST_ENAB) {
546 if (ntxd) {
547 size = ntxd * sizeof(hnddma_seg_map_t);
548 if ((di->txp_dmah =
549 (hnddma_seg_map_t *) MALLOC(osh, size)) == NULL)
550 goto fail;
551 bzero((char *)di->txp_dmah, size);
554 if (nrxd) {
555 size = nrxd * sizeof(hnddma_seg_map_t);
556 if ((di->rxp_dmah =
557 (hnddma_seg_map_t *) MALLOC(osh, size)) == NULL)
558 goto fail;
559 bzero((char *)di->rxp_dmah, size);
563 return ((hnddma_t *) di);
565 fail:
566 _dma_detach(di);
567 return (NULL);
570 /* init the tx or rx descriptor */
571 static INLINE void
572 dma32_dd_upd(dma_info_t *di, dma32dd_t *ddring, dmaaddr_t pa, uint outidx,
573 uint32 *flags, uint32 bufcount)
575 /* dma32 uses 32-bit control to fit both flags and bufcounter */
576 *flags = *flags | (bufcount & CTRL_BC_MASK);
578 if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
579 W_SM(&ddring[outidx].addr,
580 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
581 W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
582 } else {
583 /* address extension */
584 uint32 ae;
585 ASSERT(di->addrext);
586 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
587 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
589 *flags |= (ae << CTRL_AE_SHIFT);
590 W_SM(&ddring[outidx].addr,
591 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
592 W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
596 /* Check for odd number of 1's */
597 STATIC INLINE uint32 parity32(uint32 data)
599 data ^= data >> 16;
600 data ^= data >> 8;
601 data ^= data >> 4;
602 data ^= data >> 2;
603 data ^= data >> 1;
605 return (data & 1);
608 #define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
610 static INLINE void
611 dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, dmaaddr_t pa, uint outidx,
612 uint32 *flags, uint32 bufcount)
614 uint32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
616 /* PCI bus with big(>1G) physical address, use address extension */
617 #if defined(__mips__) && defined(IL_BIGENDIAN)
618 if ((di->dataoffsetlow == SI_SDRAM_SWAPPED)
619 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
620 #else
621 if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
622 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
623 ASSERT((PHYSADDRHI(pa) & PCI64ADDR_HIGH) == 0);
625 W_SM(&ddring[outidx].addrlow,
626 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
627 W_SM(&ddring[outidx].addrhigh,
628 BUS_SWAP32(PHYSADDRHI(pa) + di->dataoffsethigh));
629 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
630 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
631 } else {
632 /* address extension for 32-bit PCI */
633 uint32 ae;
634 ASSERT(di->addrext);
636 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
637 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
638 ASSERT(PHYSADDRHI(pa) == 0);
640 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
641 W_SM(&ddring[outidx].addrlow,
642 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
643 W_SM(&ddring[outidx].addrhigh,
644 BUS_SWAP32(0 + di->dataoffsethigh));
645 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
646 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
648 if (di->hnddma.dmactrlflags & DMA_CTRL_PEN) {
649 if (DMA64_DD_PARITY(&ddring[outidx])) {
650 W_SM(&ddring[outidx].ctrl2,
651 BUS_SWAP32(ctrl2 | D64_CTRL2_PARITY));
656 static bool _dma32_addrext(osl_t *osh, dma32regs_t *dma32regs)
658 uint32 w;
660 OR_REG(osh, &dma32regs->control, XC_AE);
661 w = R_REG(osh, &dma32regs->control);
662 AND_REG(osh, &dma32regs->control, ~XC_AE);
663 return ((w & XC_AE) == XC_AE);
666 static bool _dma_alloc(dma_info_t *di, uint direction)
668 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
669 return dma64_alloc(di, direction);
670 } else if (DMA32_ENAB(di)) {
671 return dma32_alloc(di, direction);
672 } else
673 ASSERT(0);
676 /* !! may be called with core in reset */
677 static void _dma_detach(dma_info_t *di)
680 DMA_TRACE(("%s: dma_detach\n", di->name));
682 /* shouldn't be here if descriptors are unreclaimed */
683 ASSERT(di->txin == di->txout);
684 ASSERT(di->rxin == di->rxout);
686 /* free dma descriptor rings */
687 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
688 if (di->txd64)
689 DMA_FREE_CONSISTENT(di->osh,
690 ((int8 *) (uintptr) di->txd64 -
691 di->txdalign), di->txdalloc,
692 (di->txdpaorig), &di->tx_dmah);
693 if (di->rxd64)
694 DMA_FREE_CONSISTENT(di->osh,
695 ((int8 *) (uintptr) di->rxd64 -
696 di->rxdalign), di->rxdalloc,
697 (di->rxdpaorig), &di->rx_dmah);
698 } else if (DMA32_ENAB(di)) {
699 if (di->txd32)
700 DMA_FREE_CONSISTENT(di->osh,
701 ((int8 *) (uintptr) di->txd32 -
702 di->txdalign), di->txdalloc,
703 (di->txdpaorig), &di->tx_dmah);
704 if (di->rxd32)
705 DMA_FREE_CONSISTENT(di->osh,
706 ((int8 *) (uintptr) di->rxd32 -
707 di->rxdalign), di->rxdalloc,
708 (di->rxdpaorig), &di->rx_dmah);
709 } else
710 ASSERT(0);
712 /* free packet pointer vectors */
713 if (di->txp)
714 MFREE(di->osh, (void *)di->txp, (di->ntxd * sizeof(void *)));
715 if (di->rxp)
716 MFREE(di->osh, (void *)di->rxp, (di->nrxd * sizeof(void *)));
718 /* free tx packet DMA handles */
719 if (di->txp_dmah)
720 MFREE(di->osh, (void *)di->txp_dmah,
721 di->ntxd * sizeof(hnddma_seg_map_t));
723 /* free rx packet DMA handles */
724 if (di->rxp_dmah)
725 MFREE(di->osh, (void *)di->rxp_dmah,
726 di->nrxd * sizeof(hnddma_seg_map_t));
728 /* free our private info structure */
729 MFREE(di->osh, (void *)di, sizeof(dma_info_t));
733 static bool _dma_descriptor_align(dma_info_t *di)
735 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
736 uint32 addrl;
738 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
739 if (di->d64txregs != NULL) {
740 W_REG(di->osh, &di->d64txregs->addrlow, 0xff0);
741 addrl = R_REG(di->osh, &di->d64txregs->addrlow);
742 if (addrl != 0)
743 return FALSE;
744 } else if (di->d64rxregs != NULL) {
745 W_REG(di->osh, &di->d64rxregs->addrlow, 0xff0);
746 addrl = R_REG(di->osh, &di->d64rxregs->addrlow);
747 if (addrl != 0)
748 return FALSE;
751 return TRUE;
754 /* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
755 static bool _dma_isaddrext(dma_info_t *di)
757 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
758 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
760 /* not all tx or rx channel are available */
761 if (di->d64txregs != NULL) {
762 if (!_dma64_addrext(di->osh, di->d64txregs)) {
763 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n", di->name));
764 ASSERT(0);
766 return TRUE;
767 } else if (di->d64rxregs != NULL) {
768 if (!_dma64_addrext(di->osh, di->d64rxregs)) {
769 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n", di->name));
770 ASSERT(0);
772 return TRUE;
774 return FALSE;
775 } else if (DMA32_ENAB(di)) {
776 if (di->d32txregs)
777 return (_dma32_addrext(di->osh, di->d32txregs));
778 else if (di->d32rxregs)
779 return (_dma32_addrext(di->osh, di->d32rxregs));
780 } else
781 ASSERT(0);
783 return FALSE;
786 /* initialize descriptor table base address */
787 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa)
789 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
790 if (!di->aligndesc_4k) {
791 if (direction == DMA_TX)
792 di->xmtptrbase = PHYSADDRLO(pa);
793 else
794 di->rcvptrbase = PHYSADDRLO(pa);
797 if ((di->ddoffsetlow == 0)
798 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
799 if (direction == DMA_TX) {
800 W_REG(di->osh, &di->d64txregs->addrlow,
801 (PHYSADDRLO(pa) + di->ddoffsetlow));
802 W_REG(di->osh, &di->d64txregs->addrhigh,
803 (PHYSADDRHI(pa) + di->ddoffsethigh));
804 } else {
805 W_REG(di->osh, &di->d64rxregs->addrlow,
806 (PHYSADDRLO(pa) + di->ddoffsetlow));
807 W_REG(di->osh, &di->d64rxregs->addrhigh,
808 (PHYSADDRHI(pa) + di->ddoffsethigh));
810 } else {
811 /* DMA64 32bits address extension */
812 uint32 ae;
813 ASSERT(di->addrext);
814 ASSERT(PHYSADDRHI(pa) == 0);
816 /* shift the high bit(s) from pa to ae */
817 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
818 PCI32ADDR_HIGH_SHIFT;
819 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
821 if (direction == DMA_TX) {
822 W_REG(di->osh, &di->d64txregs->addrlow,
823 (PHYSADDRLO(pa) + di->ddoffsetlow));
824 W_REG(di->osh, &di->d64txregs->addrhigh,
825 di->ddoffsethigh);
826 SET_REG(di->osh, &di->d64txregs->control,
827 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
828 } else {
829 W_REG(di->osh, &di->d64rxregs->addrlow,
830 (PHYSADDRLO(pa) + di->ddoffsetlow));
831 W_REG(di->osh, &di->d64rxregs->addrhigh,
832 di->ddoffsethigh);
833 SET_REG(di->osh, &di->d64rxregs->control,
834 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
838 } else if (DMA32_ENAB(di)) {
839 ASSERT(PHYSADDRHI(pa) == 0);
840 if ((di->ddoffsetlow == 0)
841 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
842 if (direction == DMA_TX)
843 W_REG(di->osh, &di->d32txregs->addr,
844 (PHYSADDRLO(pa) + di->ddoffsetlow));
845 else
846 W_REG(di->osh, &di->d32rxregs->addr,
847 (PHYSADDRLO(pa) + di->ddoffsetlow));
848 } else {
849 /* dma32 address extension */
850 uint32 ae;
851 ASSERT(di->addrext);
853 /* shift the high bit(s) from pa to ae */
854 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
855 PCI32ADDR_HIGH_SHIFT;
856 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
858 if (direction == DMA_TX) {
859 W_REG(di->osh, &di->d32txregs->addr,
860 (PHYSADDRLO(pa) + di->ddoffsetlow));
861 SET_REG(di->osh, &di->d32txregs->control, XC_AE,
862 ae << XC_AE_SHIFT);
863 } else {
864 W_REG(di->osh, &di->d32rxregs->addr,
865 (PHYSADDRLO(pa) + di->ddoffsetlow));
866 SET_REG(di->osh, &di->d32rxregs->control, RC_AE,
867 ae << RC_AE_SHIFT);
870 } else
871 ASSERT(0);
874 static void _dma_fifoloopbackenable(dma_info_t *di)
876 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
878 if (DMA64_ENAB(di) && DMA64_MODE(di))
879 OR_REG(di->osh, &di->d64txregs->control, D64_XC_LE);
880 else if (DMA32_ENAB(di))
881 OR_REG(di->osh, &di->d32txregs->control, XC_LE);
882 else
883 ASSERT(0);
886 static void _dma_rxinit(dma_info_t *di)
888 DMA_TRACE(("%s: dma_rxinit\n", di->name));
890 if (di->nrxd == 0)
891 return;
893 di->rxin = di->rxout = 0;
895 /* clear rx descriptor ring */
896 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
897 BZERO_SM((void *)(uintptr) di->rxd64,
898 (di->nrxd * sizeof(dma64dd_t)));
900 /* DMA engine with out alignment requirement requires table to be inited
901 * before enabling the engine
903 if (!di->aligndesc_4k)
904 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
906 _dma_rxenable(di);
908 if (di->aligndesc_4k)
909 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
910 } else if (DMA32_ENAB(di)) {
911 BZERO_SM((void *)(uintptr) di->rxd32,
912 (di->nrxd * sizeof(dma32dd_t)));
913 _dma_rxenable(di);
914 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
915 } else
916 ASSERT(0);
919 static void _dma_rxenable(dma_info_t *di)
921 uint dmactrlflags = di->hnddma.dmactrlflags;
923 DMA_TRACE(("%s: dma_rxenable\n", di->name));
925 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
926 uint32 control =
927 (R_REG(di->osh, &di->d64rxregs->control) & D64_RC_AE) |
928 D64_RC_RE;
930 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
931 control |= D64_RC_PD;
933 if (dmactrlflags & DMA_CTRL_ROC)
934 control |= D64_RC_OC;
936 W_REG(di->osh, &di->d64rxregs->control,
937 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
938 } else if (DMA32_ENAB(di)) {
939 uint32 control =
940 (R_REG(di->osh, &di->d32rxregs->control) & RC_AE) | RC_RE;
942 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
943 control |= RC_PD;
945 if (dmactrlflags & DMA_CTRL_ROC)
946 control |= RC_OC;
948 W_REG(di->osh, &di->d32rxregs->control,
949 ((di->rxoffset << RC_RO_SHIFT) | control));
950 } else
951 ASSERT(0);
954 static void
955 _dma_rx_param_get(dma_info_t *di, uint16 *rxoffset, uint16 *rxbufsize)
957 /* the normal values fit into 16 bits */
958 *rxoffset = (uint16) di->rxoffset;
959 *rxbufsize = (uint16) di->rxbufsize;
962 /* !! rx entry routine
963 * returns a pointer to the next frame received, or NULL if there are no more
964 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
965 * with pkts chain
966 * otherwise, it's treated as giant pkt and will be tossed.
967 * The DMA scattering starts with normal DMA header, followed by first buffer data.
968 * After it reaches the max size of buffer, the data continues in next DMA descriptor
969 * buffer WITHOUT DMA header
971 static void *BCMFASTPATH _dma_rx(dma_info_t *di)
973 void *p, *head, *tail;
974 uint len;
975 uint pkt_len;
976 int resid = 0;
978 next_frame:
979 head = _dma_getnextrxp(di, FALSE);
980 if (head == NULL)
981 return (NULL);
983 len = ltoh16(*(uint16 *) (PKTDATA(head)));
984 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
986 #if defined(__mips__)
987 if (!len) {
988 while (!(len = *(uint16 *) OSL_UNCACHED(PKTDATA(head))))
989 OSL_DELAY(1);
991 *(uint16 *) PKTDATA(head) = htol16((uint16) len);
993 #endif /* defined(__mips__) */
995 /* set actual length */
996 pkt_len = MIN((di->rxoffset + len), di->rxbufsize);
997 PKTSETLEN(head, pkt_len);
998 resid = len - (di->rxbufsize - di->rxoffset);
1000 /* check for single or multi-buffer rx */
1001 if (resid > 0) {
1002 tail = head;
1003 while ((resid > 0) && (p = _dma_getnextrxp(di, FALSE))) {
1004 PKTSETNEXT(tail, p);
1005 pkt_len = MIN(resid, (int)di->rxbufsize);
1006 PKTSETLEN(p, pkt_len);
1008 tail = p;
1009 resid -= di->rxbufsize;
1012 #ifdef BCMDBG
1013 if (resid > 0) {
1014 uint cur;
1015 ASSERT(p == NULL);
1016 cur = (DMA64_ENAB(di) && DMA64_MODE(di)) ?
1017 B2I(((R_REG(di->osh, &di->d64rxregs->status0) &
1018 D64_RS0_CD_MASK) -
1019 di->rcvptrbase) & D64_RS0_CD_MASK,
1020 dma64dd_t) : B2I(R_REG(di->osh,
1021 &di->d32rxregs->
1022 status) & RS_CD_MASK,
1023 dma32dd_t);
1024 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
1025 di->rxin, di->rxout, cur));
1027 #endif /* BCMDBG */
1029 if ((di->hnddma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
1030 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
1031 di->name, len));
1032 PKTFREE(di->osh, head, FALSE);
1033 di->hnddma.rxgiants++;
1034 goto next_frame;
1038 return (head);
1041 /* post receive buffers
1042 * return FALSE is refill failed completely and ring is empty
1043 * this will stall the rx dma and user might want to call rxfill again asap
1044 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
1046 static bool BCMFASTPATH _dma_rxfill(dma_info_t *di)
1048 void *p;
1049 uint16 rxin, rxout;
1050 uint32 flags = 0;
1051 uint n;
1052 uint i;
1053 dmaaddr_t pa;
1054 uint extra_offset = 0;
1055 bool ring_empty;
1057 ring_empty = FALSE;
1060 * Determine how many receive buffers we're lacking
1061 * from the full complement, allocate, initialize,
1062 * and post them, then update the chip rx lastdscr.
1065 rxin = di->rxin;
1066 rxout = di->rxout;
1068 n = di->nrxpost - NRXDACTIVE(rxin, rxout);
1070 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
1072 if (di->rxbufsize > BCMEXTRAHDROOM)
1073 extra_offset = di->rxextrahdrroom;
1075 for (i = 0; i < n; i++) {
1076 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
1077 size to be allocated
1080 p = osl_pktget(di->osh, di->rxbufsize + extra_offset);
1082 if (p == NULL) {
1083 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
1084 di->name));
1085 if (i == 0) {
1086 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1087 if (dma64_rxidle(di)) {
1088 DMA_ERROR(("%s: rxfill64: ring is empty !\n", di->name));
1089 ring_empty = TRUE;
1091 } else if (DMA32_ENAB(di)) {
1092 if (dma32_rxidle(di)) {
1093 DMA_ERROR(("%s: rxfill32: ring is empty !\n", di->name));
1094 ring_empty = TRUE;
1096 } else
1097 ASSERT(0);
1099 di->hnddma.rxnobuf++;
1100 break;
1102 /* reserve an extra headroom, if applicable */
1103 if (extra_offset)
1104 PKTPULL(p, extra_offset);
1106 /* Do a cached write instead of uncached write since DMA_MAP
1107 * will flush the cache.
1109 *(uint32 *) (PKTDATA(p)) = 0;
1111 if (DMASGLIST_ENAB)
1112 bzero(&di->rxp_dmah[rxout], sizeof(hnddma_seg_map_t));
1114 pa = DMA_MAP(di->osh, PKTDATA(p),
1115 di->rxbufsize, DMA_RX, p, &di->rxp_dmah[rxout]);
1117 ASSERT(ISALIGNED(PHYSADDRLO(pa), 4));
1119 /* save the free packet pointer */
1120 ASSERT(di->rxp[rxout] == NULL);
1121 di->rxp[rxout] = p;
1123 /* reset flags for each descriptor */
1124 flags = 0;
1125 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1126 if (rxout == (di->nrxd - 1))
1127 flags = D64_CTRL1_EOT;
1129 dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
1130 di->rxbufsize);
1131 } else if (DMA32_ENAB(di)) {
1132 if (rxout == (di->nrxd - 1))
1133 flags = CTRL_EOT;
1135 ASSERT(PHYSADDRHI(pa) == 0);
1136 dma32_dd_upd(di, di->rxd32, pa, rxout, &flags,
1137 di->rxbufsize);
1138 } else
1139 ASSERT(0);
1140 rxout = NEXTRXD(rxout);
1143 di->rxout = rxout;
1145 /* update the chip lastdscr pointer */
1146 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1147 W_REG(di->osh, &di->d64rxregs->ptr,
1148 di->rcvptrbase + I2B(rxout, dma64dd_t));
1149 } else if (DMA32_ENAB(di)) {
1150 W_REG(di->osh, &di->d32rxregs->ptr, I2B(rxout, dma32dd_t));
1151 } else
1152 ASSERT(0);
1154 return ring_empty;
1157 /* like getnexttxp but no reclaim */
1158 static void *_dma_peeknexttxp(dma_info_t *di)
1160 uint end, i;
1162 if (di->ntxd == 0)
1163 return (NULL);
1165 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1166 end =
1167 B2I(((R_REG(di->osh, &di->d64txregs->status0) &
1168 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
1169 dma64dd_t);
1170 } else if (DMA32_ENAB(di)) {
1171 end =
1172 B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK,
1173 dma32dd_t);
1174 } else
1175 ASSERT(0);
1177 for (i = di->txin; i != end; i = NEXTTXD(i))
1178 if (di->txp[i])
1179 return (di->txp[i]);
1181 return (NULL);
1184 /* like getnextrxp but not take off the ring */
1185 static void *_dma_peeknextrxp(dma_info_t *di)
1187 uint end, i;
1189 if (di->nrxd == 0)
1190 return (NULL);
1192 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1193 end =
1194 B2I(((R_REG(di->osh, &di->d64rxregs->status0) &
1195 D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK,
1196 dma64dd_t);
1197 } else if (DMA32_ENAB(di)) {
1198 end =
1199 B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK,
1200 dma32dd_t);
1201 } else
1202 ASSERT(0);
1204 for (i = di->rxin; i != end; i = NEXTRXD(i))
1205 if (di->rxp[i])
1206 return (di->rxp[i]);
1208 return (NULL);
1211 static void _dma_rxreclaim(dma_info_t *di)
1213 void *p;
1215 /* "unused local" warning suppression for OSLs that
1216 * define PKTFREE() without using the di->osh arg
1218 di = di;
1220 DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
1222 while ((p = _dma_getnextrxp(di, TRUE)))
1223 PKTFREE(di->osh, p, FALSE);
1226 static void *BCMFASTPATH _dma_getnextrxp(dma_info_t *di, bool forceall)
1228 if (di->nrxd == 0)
1229 return (NULL);
1231 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1232 return dma64_getnextrxp(di, forceall);
1233 } else if (DMA32_ENAB(di)) {
1234 return dma32_getnextrxp(di, forceall);
1235 } else
1236 ASSERT(0);
1239 static void _dma_txblock(dma_info_t *di)
1241 di->hnddma.txavail = 0;
1244 static void _dma_txunblock(dma_info_t *di)
1246 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1249 static uint _dma_txactive(dma_info_t *di)
1251 return NTXDACTIVE(di->txin, di->txout);
1254 static uint _dma_txpending(dma_info_t *di)
1256 uint curr;
1258 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1259 curr =
1260 B2I(((R_REG(di->osh, &di->d64txregs->status0) &
1261 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
1262 dma64dd_t);
1263 } else if (DMA32_ENAB(di)) {
1264 curr =
1265 B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK,
1266 dma32dd_t);
1267 } else
1268 ASSERT(0);
1270 return NTXDACTIVE(curr, di->txout);
1273 static uint _dma_txcommitted(dma_info_t *di)
1275 uint ptr;
1276 uint txin = di->txin;
1278 if (txin == di->txout)
1279 return 0;
1281 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1282 ptr = B2I(R_REG(di->osh, &di->d64txregs->ptr), dma64dd_t);
1283 } else if (DMA32_ENAB(di)) {
1284 ptr = B2I(R_REG(di->osh, &di->d32txregs->ptr), dma32dd_t);
1285 } else
1286 ASSERT(0);
1288 return NTXDACTIVE(di->txin, ptr);
1291 static uint _dma_rxactive(dma_info_t *di)
1293 return NRXDACTIVE(di->rxin, di->rxout);
1296 static void _dma_counterreset(dma_info_t *di)
1298 /* reset all software counter */
1299 di->hnddma.rxgiants = 0;
1300 di->hnddma.rxnobuf = 0;
1301 di->hnddma.txnobuf = 0;
1304 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags)
1306 uint dmactrlflags = di->hnddma.dmactrlflags;
1308 if (di == NULL) {
1309 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
1310 return (0);
1313 ASSERT((flags & ~mask) == 0);
1315 dmactrlflags &= ~mask;
1316 dmactrlflags |= flags;
1318 /* If trying to enable parity, check if parity is actually supported */
1319 if (dmactrlflags & DMA_CTRL_PEN) {
1320 uint32 control;
1322 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1323 control = R_REG(di->osh, &di->d64txregs->control);
1324 W_REG(di->osh, &di->d64txregs->control,
1325 control | D64_XC_PD);
1326 if (R_REG(di->osh, &di->d64txregs->control) & D64_XC_PD) {
1327 /* We *can* disable it so it is supported,
1328 * restore control register
1330 W_REG(di->osh, &di->d64txregs->control,
1331 control);
1332 } else {
1333 /* Not supported, don't allow it to be enabled */
1334 dmactrlflags &= ~DMA_CTRL_PEN;
1336 } else if (DMA32_ENAB(di)) {
1337 control = R_REG(di->osh, &di->d32txregs->control);
1338 W_REG(di->osh, &di->d32txregs->control,
1339 control | XC_PD);
1340 if (R_REG(di->osh, &di->d32txregs->control) & XC_PD) {
1341 W_REG(di->osh, &di->d32txregs->control,
1342 control);
1343 } else {
1344 /* Not supported, don't allow it to be enabled */
1345 dmactrlflags &= ~DMA_CTRL_PEN;
1347 } else
1348 ASSERT(0);
1351 di->hnddma.dmactrlflags = dmactrlflags;
1353 return (dmactrlflags);
1356 /* get the address of the var in order to change later */
1357 static uintptr _dma_getvar(dma_info_t *di, const char *name)
1359 if (!strcmp(name, "&txavail"))
1360 return ((uintptr) & (di->hnddma.txavail));
1361 else {
1362 ASSERT(0);
1364 return (0);
1367 void dma_txpioloopback(osl_t *osh, dma32regs_t *regs)
1369 OR_REG(osh, &regs->control, XC_LE);
1372 static
1373 uint8 dma_align_sizetobits(uint size)
1375 uint8 bitpos = 0;
1376 ASSERT(size);
1377 ASSERT(!(size & (size - 1)));
1378 while (size >>= 1) {
1379 bitpos++;
1381 return (bitpos);
1384 /* This function ensures that the DMA descriptor ring will not get allocated
1385 * across Page boundary. If the allocation is done across the page boundary
1386 * at the first time, then it is freed and the allocation is done at
1387 * descriptor ring size aligned location. This will ensure that the ring will
1388 * not cross page boundary
1390 static void *dma_ringalloc(osl_t *osh, uint32 boundary, uint size,
1391 uint16 *alignbits, uint *alloced,
1392 dmaaddr_t *descpa, osldma_t **dmah)
1394 void *va;
1395 uint32 desc_strtaddr;
1396 uint32 alignbytes = 1 << *alignbits;
1398 if (NULL ==
1399 (va =
1400 DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced, descpa,
1401 dmah)))
1402 return NULL;
1404 desc_strtaddr = (uint32) ROUNDUP((uintptr) va, alignbytes);
1405 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
1406 & boundary)) {
1407 *alignbits = dma_align_sizetobits(size);
1408 DMA_FREE_CONSISTENT(osh, va, size, *descpa, dmah);
1409 va = DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced,
1410 descpa, dmah);
1412 return va;
1415 /* 32-bit DMA functions */
1417 static void dma32_txinit(dma_info_t *di)
1419 uint32 control = XC_XE;
1421 DMA_TRACE(("%s: dma_txinit\n", di->name));
1423 if (di->ntxd == 0)
1424 return;
1426 di->txin = di->txout = 0;
1427 di->hnddma.txavail = di->ntxd - 1;
1429 /* clear tx descriptor ring */
1430 BZERO_SM((void *)(uintptr) di->txd32, (di->ntxd * sizeof(dma32dd_t)));
1432 if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
1433 control |= XC_PD;
1434 W_REG(di->osh, &di->d32txregs->control, control);
1435 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1438 static bool dma32_txenabled(dma_info_t *di)
1440 uint32 xc;
1442 /* If the chip is dead, it is not enabled :-) */
1443 xc = R_REG(di->osh, &di->d32txregs->control);
1444 return ((xc != 0xffffffff) && (xc & XC_XE));
1447 static void dma32_txsuspend(dma_info_t *di)
1449 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
1451 if (di->ntxd == 0)
1452 return;
1454 OR_REG(di->osh, &di->d32txregs->control, XC_SE);
1457 static void dma32_txresume(dma_info_t *di)
1459 DMA_TRACE(("%s: dma_txresume\n", di->name));
1461 if (di->ntxd == 0)
1462 return;
1464 AND_REG(di->osh, &di->d32txregs->control, ~XC_SE);
1467 static bool dma32_txsuspended(dma_info_t *di)
1469 return (di->ntxd == 0)
1470 || ((R_REG(di->osh, &di->d32txregs->control) & XC_SE) == XC_SE);
1473 static void dma32_txreclaim(dma_info_t *di, txd_range_t range)
1475 void *p;
1477 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
1478 (range == HNDDMA_RANGE_ALL) ? "all" :
1479 ((range ==
1480 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1481 "transfered")));
1483 if (di->txin == di->txout)
1484 return;
1486 while ((p = dma32_getnexttxp(di, range)))
1487 PKTFREE(di->osh, p, TRUE);
1490 static bool dma32_txstopped(dma_info_t *di)
1492 return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
1493 XS_XS_STOPPED);
1496 static bool dma32_rxstopped(dma_info_t *di)
1498 return ((R_REG(di->osh, &di->d32rxregs->status) & RS_RS_MASK) ==
1499 RS_RS_STOPPED);
1502 static bool dma32_alloc(dma_info_t *di, uint direction)
1504 uint size;
1505 uint ddlen;
1506 void *va;
1507 uint alloced;
1508 uint16 align;
1509 uint16 align_bits;
1511 ddlen = sizeof(dma32dd_t);
1513 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
1515 alloced = 0;
1516 align_bits = di->dmadesc_align;
1517 align = (1 << align_bits);
1519 if (direction == DMA_TX) {
1520 if ((va =
1521 dma_ringalloc(di->osh, D32RINGALIGN, size, &align_bits,
1522 &alloced, &di->txdpaorig,
1523 &di->tx_dmah)) == NULL) {
1524 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
1525 return FALSE;
1528 PHYSADDRHISET(di->txdpa, 0);
1529 ASSERT(PHYSADDRHI(di->txdpaorig) == 0);
1530 di->txd32 = (dma32dd_t *) ROUNDUP((uintptr) va, align);
1531 di->txdalign =
1532 (uint) ((int8 *) (uintptr) di->txd32 - (int8 *) va);
1534 PHYSADDRLOSET(di->txdpa,
1535 PHYSADDRLO(di->txdpaorig) + di->txdalign);
1536 /* Make sure that alignment didn't overflow */
1537 ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig));
1539 di->txdalloc = alloced;
1540 ASSERT(ISALIGNED((uintptr) di->txd32, align));
1541 } else {
1542 if ((va =
1543 dma_ringalloc(di->osh, D32RINGALIGN, size, &align_bits,
1544 &alloced, &di->rxdpaorig,
1545 &di->rx_dmah)) == NULL) {
1546 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
1547 return FALSE;
1550 PHYSADDRHISET(di->rxdpa, 0);
1551 ASSERT(PHYSADDRHI(di->rxdpaorig) == 0);
1552 di->rxd32 = (dma32dd_t *) ROUNDUP((uintptr) va, align);
1553 di->rxdalign =
1554 (uint) ((int8 *) (uintptr) di->rxd32 - (int8 *) va);
1556 PHYSADDRLOSET(di->rxdpa,
1557 PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
1558 /* Make sure that alignment didn't overflow */
1559 ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig));
1560 di->rxdalloc = alloced;
1561 ASSERT(ISALIGNED((uintptr) di->rxd32, align));
1564 return TRUE;
1567 static bool dma32_txreset(dma_info_t *di)
1569 uint32 status;
1571 if (di->ntxd == 0)
1572 return TRUE;
1574 /* suspend tx DMA first */
1575 W_REG(di->osh, &di->d32txregs->control, XC_SE);
1576 SPINWAIT(((status =
1577 (R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK))
1578 != XS_XS_DISABLED) && (status != XS_XS_IDLE)
1579 && (status != XS_XS_STOPPED), (10000));
1581 W_REG(di->osh, &di->d32txregs->control, 0);
1582 SPINWAIT(((status = (R_REG(di->osh,
1583 &di->d32txregs->status) & XS_XS_MASK)) !=
1584 XS_XS_DISABLED), 10000);
1586 /* wait for the last transaction to complete */
1587 OSL_DELAY(300);
1589 return (status == XS_XS_DISABLED);
1592 static bool dma32_rxidle(dma_info_t *di)
1594 DMA_TRACE(("%s: dma_rxidle\n", di->name));
1596 if (di->nrxd == 0)
1597 return TRUE;
1599 return ((R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK) ==
1600 R_REG(di->osh, &di->d32rxregs->ptr));
1603 static bool dma32_rxreset(dma_info_t *di)
1605 uint32 status;
1607 if (di->nrxd == 0)
1608 return TRUE;
1610 W_REG(di->osh, &di->d32rxregs->control, 0);
1611 SPINWAIT(((status = (R_REG(di->osh,
1612 &di->d32rxregs->status) & RS_RS_MASK)) !=
1613 RS_RS_DISABLED), 10000);
1615 return (status == RS_RS_DISABLED);
1618 static bool dma32_rxenabled(dma_info_t *di)
1620 uint32 rc;
1622 rc = R_REG(di->osh, &di->d32rxregs->control);
1623 return ((rc != 0xffffffff) && (rc & RC_RE));
1626 static bool dma32_txsuspendedidle(dma_info_t *di)
1628 if (di->ntxd == 0)
1629 return TRUE;
1631 if (!(R_REG(di->osh, &di->d32txregs->control) & XC_SE))
1632 return 0;
1634 if ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE)
1635 return 0;
1637 OSL_DELAY(2);
1638 return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
1639 XS_XS_IDLE);
1642 /* !! tx entry routine
1643 * supports full 32bit dma engine buffer addressing so
1644 * dma buffers can cross 4 Kbyte page boundaries.
1646 * WARNING: call must check the return value for error.
1647 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
1649 static int dma32_txfast(dma_info_t *di, void *p0, bool commit)
1651 void *p, *next;
1652 uchar *data;
1653 uint len;
1654 uint16 txout;
1655 uint32 flags = 0;
1656 dmaaddr_t pa;
1658 DMA_TRACE(("%s: dma_txfast\n", di->name));
1660 txout = di->txout;
1663 * Walk the chain of packet buffers
1664 * allocating and initializing transmit descriptor entries.
1666 for (p = p0; p; p = next) {
1667 uint nsegs, j;
1668 hnddma_seg_map_t *map;
1670 data = PKTDATA(p);
1671 len = PKTLEN(p);
1672 #ifdef BCM_DMAPAD
1673 len += PKTDMAPAD(di->osh, p);
1674 #endif
1675 next = PKTNEXT(p);
1677 /* return nonzero if out of tx descriptors */
1678 if (NEXTTXD(txout) == di->txin)
1679 goto outoftxd;
1681 if (len == 0)
1682 continue;
1684 if (DMASGLIST_ENAB)
1685 bzero(&di->txp_dmah[txout], sizeof(hnddma_seg_map_t));
1687 /* get physical address of buffer start */
1688 pa = DMA_MAP(di->osh, data, len, DMA_TX, p,
1689 &di->txp_dmah[txout]);
1691 if (DMASGLIST_ENAB) {
1692 map = &di->txp_dmah[txout];
1694 /* See if all the segments can be accounted for */
1695 if (map->nsegs >
1696 (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
1698 goto outoftxd;
1700 nsegs = map->nsegs;
1701 } else
1702 nsegs = 1;
1704 for (j = 1; j <= nsegs; j++) {
1705 flags = 0;
1706 if (p == p0 && j == 1)
1707 flags |= CTRL_SOF;
1709 /* With a DMA segment list, Descriptor table is filled
1710 * using the segment list instead of looping over
1711 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
1712 * end of segment list is reached.
1714 if ((!DMASGLIST_ENAB && next == NULL) ||
1715 (DMASGLIST_ENAB && j == nsegs))
1716 flags |= (CTRL_IOC | CTRL_EOF);
1717 if (txout == (di->ntxd - 1))
1718 flags |= CTRL_EOT;
1720 if (DMASGLIST_ENAB) {
1721 len = map->segs[j - 1].length;
1722 pa = map->segs[j - 1].addr;
1724 ASSERT(PHYSADDRHI(pa) == 0);
1726 dma32_dd_upd(di, di->txd32, pa, txout, &flags, len);
1727 ASSERT(di->txp[txout] == NULL);
1729 txout = NEXTTXD(txout);
1732 /* See above. No need to loop over individual buffers */
1733 if (DMASGLIST_ENAB)
1734 break;
1737 /* if last txd eof not set, fix it */
1738 if (!(flags & CTRL_EOF))
1739 W_SM(&di->txd32[PREVTXD(txout)].ctrl,
1740 BUS_SWAP32(flags | CTRL_IOC | CTRL_EOF));
1742 /* save the packet */
1743 di->txp[PREVTXD(txout)] = p0;
1745 /* bump the tx descriptor index */
1746 di->txout = txout;
1748 /* kick the chip */
1749 if (commit)
1750 W_REG(di->osh, &di->d32txregs->ptr, I2B(txout, dma32dd_t));
1752 /* tx flow control */
1753 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1755 return (0);
1757 outoftxd:
1758 DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
1759 PKTFREE(di->osh, p0, TRUE);
1760 di->hnddma.txavail = 0;
1761 di->hnddma.txnobuf++;
1762 return (-1);
1766 * Reclaim next completed txd (txds if using chained buffers) in the range
1767 * specified and return associated packet.
1768 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1769 * transmitted as noted by the hardware "CurrDescr" pointer.
1770 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
1771 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
1772 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1773 * return associated packet regardless of the value of hardware pointers.
1775 static void *dma32_getnexttxp(dma_info_t *di, txd_range_t range)
1777 uint16 start, end, i;
1778 uint16 active_desc;
1779 void *txp;
1781 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
1782 (range == HNDDMA_RANGE_ALL) ? "all" :
1783 ((range ==
1784 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1785 "transfered")));
1787 if (di->ntxd == 0)
1788 return (NULL);
1790 txp = NULL;
1792 start = di->txin;
1793 if (range == HNDDMA_RANGE_ALL)
1794 end = di->txout;
1795 else {
1796 dma32regs_t *dregs = di->d32txregs;
1798 end =
1799 (uint16) B2I(R_REG(di->osh, &dregs->status) & XS_CD_MASK,
1800 dma32dd_t);
1802 if (range == HNDDMA_RANGE_TRANSFERED) {
1803 active_desc =
1804 (uint16) ((R_REG(di->osh, &dregs->status) &
1805 XS_AD_MASK) >> XS_AD_SHIFT);
1806 active_desc = (uint16) B2I(active_desc, dma32dd_t);
1807 if (end != active_desc)
1808 end = PREVTXD(active_desc);
1812 if ((start == 0) && (end > di->txout))
1813 goto bogus;
1815 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
1816 dmaaddr_t pa;
1817 hnddma_seg_map_t *map = NULL;
1818 uint size, j, nsegs;
1820 PHYSADDRLOSET(pa,
1821 (BUS_SWAP32(R_SM(&di->txd32[i].addr)) -
1822 di->dataoffsetlow));
1823 PHYSADDRHISET(pa, 0);
1825 if (DMASGLIST_ENAB) {
1826 map = &di->txp_dmah[i];
1827 size = map->origsize;
1828 nsegs = map->nsegs;
1829 } else {
1830 size =
1831 (BUS_SWAP32(R_SM(&di->txd32[i].ctrl)) &
1832 CTRL_BC_MASK);
1833 nsegs = 1;
1836 for (j = nsegs; j > 0; j--) {
1837 W_SM(&di->txd32[i].addr, 0xdeadbeef);
1839 txp = di->txp[i];
1840 di->txp[i] = NULL;
1841 if (j > 1)
1842 i = NEXTTXD(i);
1845 DMA_UNMAP(di->osh, pa, size, DMA_TX, txp, map);
1848 di->txin = i;
1850 /* tx flow control */
1851 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1853 return (txp);
1855 bogus:
1856 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
1857 return (NULL);
1860 static void *dma32_getnextrxp(dma_info_t *di, bool forceall)
1862 uint i, curr;
1863 void *rxp;
1864 dmaaddr_t pa;
1865 /* if forcing, dma engine must be disabled */
1866 ASSERT(!forceall || !dma32_rxenabled(di));
1868 i = di->rxin;
1870 /* return if no packets posted */
1871 if (i == di->rxout)
1872 return (NULL);
1874 curr =
1875 B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK, dma32dd_t);
1877 /* ignore curr if forceall */
1878 if (!forceall && (i == curr))
1879 return (NULL);
1881 /* get the packet pointer that corresponds to the rx descriptor */
1882 rxp = di->rxp[i];
1883 ASSERT(rxp);
1884 di->rxp[i] = NULL;
1886 PHYSADDRLOSET(pa,
1887 (BUS_SWAP32(R_SM(&di->rxd32[i].addr)) -
1888 di->dataoffsetlow));
1889 PHYSADDRHISET(pa, 0);
1891 /* clear this packet from the descriptor ring */
1892 DMA_UNMAP(di->osh, pa, di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
1894 W_SM(&di->rxd32[i].addr, 0xdeadbeef);
1896 di->rxin = NEXTRXD(i);
1898 return (rxp);
1902 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1904 static void dma32_txrotate(dma_info_t *di)
1906 uint16 ad;
1907 uint nactive;
1908 uint rot;
1909 uint16 old, new;
1910 uint32 w;
1911 uint16 first, last;
1913 ASSERT(dma32_txsuspendedidle(di));
1915 nactive = _dma_txactive(di);
1916 ad = (uint16) (B2I
1917 (((R_REG(di->osh, &di->d32txregs->status) & XS_AD_MASK)
1918 >> XS_AD_SHIFT), dma32dd_t));
1919 rot = TXD(ad - di->txin);
1921 ASSERT(rot < di->ntxd);
1923 /* full-ring case is a lot harder - don't worry about this */
1924 if (rot >= (di->ntxd - nactive)) {
1925 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
1926 return;
1929 first = di->txin;
1930 last = PREVTXD(di->txout);
1932 /* move entries starting at last and moving backwards to first */
1933 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
1934 new = TXD(old + rot);
1937 * Move the tx dma descriptor.
1938 * EOT is set only in the last entry in the ring.
1940 w = BUS_SWAP32(R_SM(&di->txd32[old].ctrl)) & ~CTRL_EOT;
1941 if (new == (di->ntxd - 1))
1942 w |= CTRL_EOT;
1943 W_SM(&di->txd32[new].ctrl, BUS_SWAP32(w));
1944 W_SM(&di->txd32[new].addr, R_SM(&di->txd32[old].addr));
1946 /* zap the old tx dma descriptor address field */
1947 W_SM(&di->txd32[old].addr, BUS_SWAP32(0xdeadbeef));
1949 /* move the corresponding txp[] entry */
1950 ASSERT(di->txp[new] == NULL);
1951 di->txp[new] = di->txp[old];
1953 /* Move the segment map as well */
1954 if (DMASGLIST_ENAB) {
1955 bcopy(&di->txp_dmah[old], &di->txp_dmah[new],
1956 sizeof(hnddma_seg_map_t));
1957 bzero(&di->txp_dmah[old], sizeof(hnddma_seg_map_t));
1960 di->txp[old] = NULL;
1963 /* update txin and txout */
1964 di->txin = ad;
1965 di->txout = TXD(di->txout + rot);
1966 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1968 /* kick the chip */
1969 W_REG(di->osh, &di->d32txregs->ptr, I2B(di->txout, dma32dd_t));
1972 /* 64-bit DMA functions */
1974 static void dma64_txinit(dma_info_t *di)
1976 uint32 control = D64_XC_XE;
1978 DMA_TRACE(("%s: dma_txinit\n", di->name));
1980 if (di->ntxd == 0)
1981 return;
1983 di->txin = di->txout = 0;
1984 di->hnddma.txavail = di->ntxd - 1;
1986 /* clear tx descriptor ring */
1987 BZERO_SM((void *)(uintptr) di->txd64, (di->ntxd * sizeof(dma64dd_t)));
1989 /* DMA engine with out alignment requirement requires table to be inited
1990 * before enabling the engine
1992 if (!di->aligndesc_4k)
1993 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1995 if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
1996 control |= D64_XC_PD;
1997 OR_REG(di->osh, &di->d64txregs->control, control);
1999 /* DMA engine with alignment requirement requires table to be inited
2000 * before enabling the engine
2002 if (di->aligndesc_4k)
2003 _dma_ddtable_init(di, DMA_TX, di->txdpa);
2006 static bool dma64_txenabled(dma_info_t *di)
2008 uint32 xc;
2010 /* If the chip is dead, it is not enabled :-) */
2011 xc = R_REG(di->osh, &di->d64txregs->control);
2012 return ((xc != 0xffffffff) && (xc & D64_XC_XE));
2015 static void dma64_txsuspend(dma_info_t *di)
2017 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
2019 if (di->ntxd == 0)
2020 return;
2022 OR_REG(di->osh, &di->d64txregs->control, D64_XC_SE);
2025 static void dma64_txresume(dma_info_t *di)
2027 DMA_TRACE(("%s: dma_txresume\n", di->name));
2029 if (di->ntxd == 0)
2030 return;
2032 AND_REG(di->osh, &di->d64txregs->control, ~D64_XC_SE);
2035 static bool dma64_txsuspended(dma_info_t *di)
2037 return (di->ntxd == 0) ||
2038 ((R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE) ==
2039 D64_XC_SE);
2042 static void BCMFASTPATH dma64_txreclaim(dma_info_t *di, txd_range_t range)
2044 void *p;
2046 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
2047 (range == HNDDMA_RANGE_ALL) ? "all" :
2048 ((range ==
2049 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
2050 "transfered")));
2052 if (di->txin == di->txout)
2053 return;
2055 while ((p = dma64_getnexttxp(di, range))) {
2056 /* For unframed data, we don't have any packets to free */
2057 if (!(di->hnddma.dmactrlflags & DMA_CTRL_UNFRAMED))
2058 PKTFREE(di->osh, p, TRUE);
2062 static bool dma64_txstopped(dma_info_t *di)
2064 return ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
2065 D64_XS0_XS_STOPPED);
2068 static bool dma64_rxstopped(dma_info_t *di)
2070 return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK) ==
2071 D64_RS0_RS_STOPPED);
2074 static bool dma64_alloc(dma_info_t *di, uint direction)
2076 uint16 size;
2077 uint ddlen;
2078 void *va;
2079 uint alloced = 0;
2080 uint16 align;
2081 uint16 align_bits;
2083 ddlen = sizeof(dma64dd_t);
2085 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
2086 align_bits = di->dmadesc_align;
2087 align = (1 << align_bits);
2089 if (direction == DMA_TX) {
2090 if ((va =
2091 dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits,
2092 &alloced, &di->txdpaorig,
2093 &di->tx_dmah)) == NULL) {
2094 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
2095 return FALSE;
2097 align = (1 << align_bits);
2098 di->txd64 = (dma64dd_t *) ROUNDUP((uintptr) va, align);
2099 di->txdalign =
2100 (uint) ((int8 *) (uintptr) di->txd64 - (int8 *) va);
2101 PHYSADDRLOSET(di->txdpa,
2102 PHYSADDRLO(di->txdpaorig) + di->txdalign);
2103 /* Make sure that alignment didn't overflow */
2104 ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig));
2106 PHYSADDRHISET(di->txdpa, PHYSADDRHI(di->txdpaorig));
2107 di->txdalloc = alloced;
2108 ASSERT(ISALIGNED((uintptr) di->txd64, align));
2109 } else {
2110 if ((va =
2111 dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits,
2112 &alloced, &di->rxdpaorig,
2113 &di->rx_dmah)) == NULL) {
2114 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
2115 return FALSE;
2117 align = (1 << align_bits);
2118 di->rxd64 = (dma64dd_t *) ROUNDUP((uintptr) va, align);
2119 di->rxdalign =
2120 (uint) ((int8 *) (uintptr) di->rxd64 - (int8 *) va);
2121 PHYSADDRLOSET(di->rxdpa,
2122 PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
2123 /* Make sure that alignment didn't overflow */
2124 ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig));
2126 PHYSADDRHISET(di->rxdpa, PHYSADDRHI(di->rxdpaorig));
2127 di->rxdalloc = alloced;
2128 ASSERT(ISALIGNED((uintptr) di->rxd64, align));
2131 return TRUE;
2134 static bool dma64_txreset(dma_info_t *di)
2136 uint32 status;
2138 if (di->ntxd == 0)
2139 return TRUE;
2141 /* suspend tx DMA first */
2142 W_REG(di->osh, &di->d64txregs->control, D64_XC_SE);
2143 SPINWAIT(((status =
2144 (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK))
2145 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
2146 && (status != D64_XS0_XS_STOPPED), 10000);
2148 W_REG(di->osh, &di->d64txregs->control, 0);
2149 SPINWAIT(((status =
2150 (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK))
2151 != D64_XS0_XS_DISABLED), 10000);
2153 /* wait for the last transaction to complete */
2154 OSL_DELAY(300);
2156 return (status == D64_XS0_XS_DISABLED);
2159 static bool dma64_rxidle(dma_info_t *di)
2161 DMA_TRACE(("%s: dma_rxidle\n", di->name));
2163 if (di->nrxd == 0)
2164 return TRUE;
2166 return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
2167 (R_REG(di->osh, &di->d64rxregs->ptr) & D64_RS0_CD_MASK));
2170 static bool dma64_rxreset(dma_info_t *di)
2172 uint32 status;
2174 if (di->nrxd == 0)
2175 return TRUE;
2177 W_REG(di->osh, &di->d64rxregs->control, 0);
2178 SPINWAIT(((status =
2179 (R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK))
2180 != D64_RS0_RS_DISABLED), 10000);
2182 return (status == D64_RS0_RS_DISABLED);
2185 static bool dma64_rxenabled(dma_info_t *di)
2187 uint32 rc;
2189 rc = R_REG(di->osh, &di->d64rxregs->control);
2190 return ((rc != 0xffffffff) && (rc & D64_RC_RE));
2193 static bool dma64_txsuspendedidle(dma_info_t *di)
2196 if (di->ntxd == 0)
2197 return TRUE;
2199 if (!(R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE))
2200 return 0;
2202 if ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
2203 D64_XS0_XS_IDLE)
2204 return 1;
2206 return 0;
2209 /* Useful when sending unframed data. This allows us to get a progress report from the DMA.
2210 * We return a pointer to the beginning of the DATA buffer of the current descriptor.
2211 * If DMA is idle, we return NULL.
2213 static void *dma64_getpos(dma_info_t *di, bool direction)
2215 void *va;
2216 bool idle;
2217 uint32 cd_offset;
2219 if (direction == DMA_TX) {
2220 cd_offset =
2221 R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK;
2222 idle = !NTXDACTIVE(di->txin, di->txout);
2223 va = di->txp[B2I(cd_offset, dma64dd_t)];
2224 } else {
2225 cd_offset =
2226 R_REG(di->osh, &di->d64rxregs->status0) & D64_XS0_CD_MASK;
2227 idle = !NRXDACTIVE(di->rxin, di->rxout);
2228 va = di->rxp[B2I(cd_offset, dma64dd_t)];
2231 /* If DMA is IDLE, return NULL */
2232 if (idle) {
2233 DMA_TRACE(("%s: DMA idle, return NULL\n", __func__));
2234 va = NULL;
2237 return va;
2240 /* TX of unframed data
2242 * Adds a DMA ring descriptor for the data pointed to by "buf".
2243 * This is for DMA of a buffer of data and is unlike other hnddma TX functions
2244 * that take a pointer to a "packet"
2245 * Each call to this is results in a single descriptor being added for "len" bytes of
2246 * data starting at "buf", it doesn't handle chained buffers.
2248 static int dma64_txunframed(dma_info_t *di, void *buf, uint len, bool commit)
2250 uint16 txout;
2251 uint32 flags = 0;
2252 dmaaddr_t pa; /* phys addr */
2254 txout = di->txout;
2256 /* return nonzero if out of tx descriptors */
2257 if (NEXTTXD(txout) == di->txin)
2258 goto outoftxd;
2260 if (len == 0)
2261 return 0;
2263 pa = DMA_MAP(di->osh, buf, len, DMA_TX, NULL, &di->txp_dmah[txout]);
2265 flags = (D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF);
2267 if (txout == (di->ntxd - 1))
2268 flags |= D64_CTRL1_EOT;
2270 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
2271 ASSERT(di->txp[txout] == NULL);
2273 /* save the buffer pointer - used by dma_getpos */
2274 di->txp[txout] = buf;
2276 txout = NEXTTXD(txout);
2277 /* bump the tx descriptor index */
2278 di->txout = txout;
2280 /* kick the chip */
2281 if (commit) {
2282 W_REG(di->osh, &di->d64txregs->ptr,
2283 di->xmtptrbase + I2B(txout, dma64dd_t));
2286 /* tx flow control */
2287 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2289 return (0);
2291 outoftxd:
2292 DMA_ERROR(("%s: %s: out of txds !!!\n", di->name, __func__));
2293 di->hnddma.txavail = 0;
2294 di->hnddma.txnobuf++;
2295 return (-1);
2298 /* !! tx entry routine
2299 * WARNING: call must check the return value for error.
2300 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
2302 static int BCMFASTPATH dma64_txfast(dma_info_t *di, void *p0, bool commit)
2304 void *p, *next;
2305 uchar *data;
2306 uint len;
2307 uint16 txout;
2308 uint32 flags = 0;
2309 dmaaddr_t pa;
2311 DMA_TRACE(("%s: dma_txfast\n", di->name));
2313 txout = di->txout;
2316 * Walk the chain of packet buffers
2317 * allocating and initializing transmit descriptor entries.
2319 for (p = p0; p; p = next) {
2320 uint nsegs, j;
2321 hnddma_seg_map_t *map;
2323 data = PKTDATA(p);
2324 len = PKTLEN(p);
2325 #ifdef BCM_DMAPAD
2326 len += PKTDMAPAD(di->osh, p);
2327 #endif /* BCM_DMAPAD */
2328 next = PKTNEXT(p);
2330 /* return nonzero if out of tx descriptors */
2331 if (NEXTTXD(txout) == di->txin)
2332 goto outoftxd;
2334 if (len == 0)
2335 continue;
2337 /* get physical address of buffer start */
2338 if (DMASGLIST_ENAB)
2339 bzero(&di->txp_dmah[txout], sizeof(hnddma_seg_map_t));
2341 pa = DMA_MAP(di->osh, data, len, DMA_TX, p,
2342 &di->txp_dmah[txout]);
2344 if (DMASGLIST_ENAB) {
2345 map = &di->txp_dmah[txout];
2347 /* See if all the segments can be accounted for */
2348 if (map->nsegs >
2349 (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
2351 goto outoftxd;
2353 nsegs = map->nsegs;
2354 } else
2355 nsegs = 1;
2357 for (j = 1; j <= nsegs; j++) {
2358 flags = 0;
2359 if (p == p0 && j == 1)
2360 flags |= D64_CTRL1_SOF;
2362 /* With a DMA segment list, Descriptor table is filled
2363 * using the segment list instead of looping over
2364 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
2365 * end of segment list is reached.
2367 if ((!DMASGLIST_ENAB && next == NULL) ||
2368 (DMASGLIST_ENAB && j == nsegs))
2369 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
2370 if (txout == (di->ntxd - 1))
2371 flags |= D64_CTRL1_EOT;
2373 if (DMASGLIST_ENAB) {
2374 len = map->segs[j - 1].length;
2375 pa = map->segs[j - 1].addr;
2377 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
2378 ASSERT(di->txp[txout] == NULL);
2380 txout = NEXTTXD(txout);
2383 /* See above. No need to loop over individual buffers */
2384 if (DMASGLIST_ENAB)
2385 break;
2388 /* if last txd eof not set, fix it */
2389 if (!(flags & D64_CTRL1_EOF))
2390 W_SM(&di->txd64[PREVTXD(txout)].ctrl1,
2391 BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
2393 /* save the packet */
2394 di->txp[PREVTXD(txout)] = p0;
2396 /* bump the tx descriptor index */
2397 di->txout = txout;
2399 /* kick the chip */
2400 if (commit)
2401 W_REG(di->osh, &di->d64txregs->ptr,
2402 di->xmtptrbase + I2B(txout, dma64dd_t));
2404 /* tx flow control */
2405 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2407 return (0);
2409 outoftxd:
2410 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
2411 PKTFREE(di->osh, p0, TRUE);
2412 di->hnddma.txavail = 0;
2413 di->hnddma.txnobuf++;
2414 return (-1);
2418 * Reclaim next completed txd (txds if using chained buffers) in the range
2419 * specified and return associated packet.
2420 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
2421 * transmitted as noted by the hardware "CurrDescr" pointer.
2422 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
2423 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
2424 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
2425 * return associated packet regardless of the value of hardware pointers.
2427 static void *BCMFASTPATH dma64_getnexttxp(dma_info_t *di, txd_range_t range)
2429 uint16 start, end, i;
2430 uint16 active_desc;
2431 void *txp;
2433 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
2434 (range == HNDDMA_RANGE_ALL) ? "all" :
2435 ((range ==
2436 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
2437 "transfered")));
2439 if (di->ntxd == 0)
2440 return (NULL);
2442 txp = NULL;
2444 start = di->txin;
2445 if (range == HNDDMA_RANGE_ALL)
2446 end = di->txout;
2447 else {
2448 dma64regs_t *dregs = di->d64txregs;
2450 end =
2451 (uint16) (B2I
2452 (((R_REG(di->osh, &dregs->status0) &
2453 D64_XS0_CD_MASK) -
2454 di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t));
2456 if (range == HNDDMA_RANGE_TRANSFERED) {
2457 active_desc =
2458 (uint16) (R_REG(di->osh, &dregs->status1) &
2459 D64_XS1_AD_MASK);
2460 active_desc =
2461 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
2462 active_desc = B2I(active_desc, dma64dd_t);
2463 if (end != active_desc)
2464 end = PREVTXD(active_desc);
2468 if ((start == 0) && (end > di->txout))
2469 goto bogus;
2471 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
2472 dmaaddr_t pa;
2473 hnddma_seg_map_t *map = NULL;
2474 uint size, j, nsegs;
2476 PHYSADDRLOSET(pa,
2477 (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) -
2478 di->dataoffsetlow));
2479 PHYSADDRHISET(pa,
2480 (BUS_SWAP32(R_SM(&di->txd64[i].addrhigh)) -
2481 di->dataoffsethigh));
2483 if (DMASGLIST_ENAB) {
2484 map = &di->txp_dmah[i];
2485 size = map->origsize;
2486 nsegs = map->nsegs;
2487 } else {
2488 size =
2489 (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) &
2490 D64_CTRL2_BC_MASK);
2491 nsegs = 1;
2494 for (j = nsegs; j > 0; j--) {
2495 W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
2496 W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
2498 txp = di->txp[i];
2499 di->txp[i] = NULL;
2500 if (j > 1)
2501 i = NEXTTXD(i);
2504 DMA_UNMAP(di->osh, pa, size, DMA_TX, txp, map);
2507 di->txin = i;
2509 /* tx flow control */
2510 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2512 return (txp);
2514 bogus:
2515 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
2516 return (NULL);
2519 static void *BCMFASTPATH dma64_getnextrxp(dma_info_t *di, bool forceall)
2521 uint i, curr;
2522 void *rxp;
2523 dmaaddr_t pa;
2525 /* if forcing, dma engine must be disabled */
2526 ASSERT(!forceall || !dma64_rxenabled(di));
2528 i = di->rxin;
2530 /* return if no packets posted */
2531 if (i == di->rxout)
2532 return (NULL);
2534 curr =
2535 B2I(((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) -
2536 di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t);
2538 /* ignore curr if forceall */
2539 if (!forceall && (i == curr))
2540 return (NULL);
2542 /* get the packet pointer that corresponds to the rx descriptor */
2543 rxp = di->rxp[i];
2544 ASSERT(rxp);
2545 di->rxp[i] = NULL;
2547 PHYSADDRLOSET(pa,
2548 (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) -
2549 di->dataoffsetlow));
2550 PHYSADDRHISET(pa,
2551 (BUS_SWAP32(R_SM(&di->rxd64[i].addrhigh)) -
2552 di->dataoffsethigh));
2554 /* clear this packet from the descriptor ring */
2555 DMA_UNMAP(di->osh, pa, di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
2557 W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
2558 W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
2560 di->rxin = NEXTRXD(i);
2562 return (rxp);
2565 static bool _dma64_addrext(osl_t *osh, dma64regs_t * dma64regs)
2567 uint32 w;
2568 OR_REG(osh, &dma64regs->control, D64_XC_AE);
2569 w = R_REG(osh, &dma64regs->control);
2570 AND_REG(osh, &dma64regs->control, ~D64_XC_AE);
2571 return ((w & D64_XC_AE) == D64_XC_AE);
2575 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
2577 static void dma64_txrotate(dma_info_t *di)
2579 uint16 ad;
2580 uint nactive;
2581 uint rot;
2582 uint16 old, new;
2583 uint32 w;
2584 uint16 first, last;
2586 ASSERT(dma64_txsuspendedidle(di));
2588 nactive = _dma_txactive(di);
2589 ad = (uint16) (B2I
2590 ((((R_REG(di->osh, &di->d64txregs->status1) &
2591 D64_XS1_AD_MASK)
2592 - di->xmtptrbase) & D64_XS1_AD_MASK), dma64dd_t));
2593 rot = TXD(ad - di->txin);
2595 ASSERT(rot < di->ntxd);
2597 /* full-ring case is a lot harder - don't worry about this */
2598 if (rot >= (di->ntxd - nactive)) {
2599 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
2600 return;
2603 first = di->txin;
2604 last = PREVTXD(di->txout);
2606 /* move entries starting at last and moving backwards to first */
2607 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
2608 new = TXD(old + rot);
2611 * Move the tx dma descriptor.
2612 * EOT is set only in the last entry in the ring.
2614 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT;
2615 if (new == (di->ntxd - 1))
2616 w |= D64_CTRL1_EOT;
2617 W_SM(&di->txd64[new].ctrl1, BUS_SWAP32(w));
2619 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl2));
2620 W_SM(&di->txd64[new].ctrl2, BUS_SWAP32(w));
2622 W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
2623 W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
2625 /* zap the old tx dma descriptor address field */
2626 W_SM(&di->txd64[old].addrlow, BUS_SWAP32(0xdeadbeef));
2627 W_SM(&di->txd64[old].addrhigh, BUS_SWAP32(0xdeadbeef));
2629 /* move the corresponding txp[] entry */
2630 ASSERT(di->txp[new] == NULL);
2631 di->txp[new] = di->txp[old];
2633 /* Move the map */
2634 if (DMASGLIST_ENAB) {
2635 bcopy(&di->txp_dmah[old], &di->txp_dmah[new],
2636 sizeof(hnddma_seg_map_t));
2637 bzero(&di->txp_dmah[old], sizeof(hnddma_seg_map_t));
2640 di->txp[old] = NULL;
2643 /* update txin and txout */
2644 di->txin = ad;
2645 di->txout = TXD(di->txout + rot);
2646 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2648 /* kick the chip */
2649 W_REG(di->osh, &di->d64txregs->ptr,
2650 di->xmtptrbase + I2B(di->txout, dma64dd_t));
2653 uint dma_addrwidth(si_t *sih, void *dmaregs)
2655 dma32regs_t *dma32regs;
2656 osl_t *osh;
2658 osh = si_osh(sih);
2660 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
2661 /* DMA engine is 64-bit capable */
2662 if ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64) {
2663 /* backplane are 64-bit capable */
2664 if (si_backplane64(sih))
2665 /* If bus is System Backplane or PCIE then we can access 64-bits */
2666 if ((BUSTYPE(sih->bustype) == SI_BUS) ||
2667 ((BUSTYPE(sih->bustype) == PCI_BUS) &&
2668 (sih->buscoretype == PCIE_CORE_ID)))
2669 return (DMADDRWIDTH_64);
2671 /* DMA64 is always 32-bit capable, AE is always TRUE */
2672 ASSERT(_dma64_addrext(osh, (dma64regs_t *) dmaregs));
2674 return (DMADDRWIDTH_32);
2677 /* Start checking for 32-bit / 30-bit addressing */
2678 dma32regs = (dma32regs_t *) dmaregs;
2680 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
2681 if ((BUSTYPE(sih->bustype) == SI_BUS) ||
2682 ((BUSTYPE(sih->bustype) == PCI_BUS)
2683 && sih->buscoretype == PCIE_CORE_ID)
2684 || (_dma32_addrext(osh, dma32regs)))
2685 return (DMADDRWIDTH_32);
2687 /* Fallthru */
2688 return (DMADDRWIDTH_30);