2 * Generic Broadcom Home Networking Division (HND) DMA module.
3 * This supports the following chips: BCM42xx, 44xx, 47xx .
5 * Copyright (C) 2009, Broadcom Corporation
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
13 * $Id: hnddma.c,v 1.167.2.34 2010/01/07 01:55:15 Exp $
20 #include <bcmendian.h>
30 #define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args
31 #define DMA_TRACE(args) if (!(*di->msg_level & 2)); else printf args
32 #elif defined(BCMDBG_ERR)
33 #define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args
34 #define DMA_TRACE(args)
36 #define DMA_ERROR(args)
37 #define DMA_TRACE(args)
40 #define DMA_NONE(args)
42 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
43 static uint dma_msg_level
=
50 #define MAXNAMEL 8 /* 8 char names */
52 #define DI_INFO(dmah) ((dma_info_t *)dmah)
54 /* dma engine software state */
55 typedef struct dma_info
{
56 struct hnddma_pub hnddma
; /* exported structure, don't use hnddma_t,
57 * which could be const
59 uint
*msg_level
; /* message level pointer */
60 char name
[MAXNAMEL
]; /* callers name for diag msgs */
62 void *osh
; /* os handle */
63 si_t
*sih
; /* sb handle */
65 bool dma64
; /* dma64 enabled */
66 bool addrext
; /* this dma engine supports DmaExtendedAddrChanges */
68 dma32regs_t
*d32txregs
; /* 32 bits dma tx engine registers */
69 dma32regs_t
*d32rxregs
; /* 32 bits dma rx engine registers */
70 dma64regs_t
*d64txregs
; /* 64 bits dma tx engine registers */
71 dma64regs_t
*d64rxregs
; /* 64 bits dma rx engine registers */
73 uint32 dma64align
; /* either 8k or 4k depends on number of dd */
74 dma32dd_t
*txd32
; /* pointer to dma32 tx descriptor ring */
75 dma64dd_t
*txd64
; /* pointer to dma64 tx descriptor ring */
76 uint ntxd
; /* # tx descriptors tunable */
77 uint txin
; /* index of next descriptor to reclaim */
78 uint txout
; /* index of next descriptor to post */
79 void **txp
; /* pointer to parallel array of pointers to packets */
80 osldma_t
*tx_dmah
; /* DMA TX descriptor ring handle */
81 hnddma_seg_map_t
*txp_dmah
; /* DMA MAP meta-data handle */
82 dmaaddr_t txdpa
; /* Aligned physical address of descriptor ring */
83 dmaaddr_t txdpaorig
; /* Original physical address of descriptor ring */
84 uint txdalign
; /* #bytes added to alloc'd mem to align txd */
85 uint txdalloc
; /* #bytes allocated for the ring */
86 uint32 xmtptrbase
; /* When using unaligned descriptors, the ptr register
87 * is not just an index, it needs all 13 bits to be
88 * an offset from the addr register.
91 dma32dd_t
*rxd32
; /* pointer to dma32 rx descriptor ring */
92 dma64dd_t
*rxd64
; /* pointer to dma64 rx descriptor ring */
93 uint nrxd
; /* # rx descriptors tunable */
94 uint rxin
; /* index of next descriptor to reclaim */
95 uint rxout
; /* index of next descriptor to post */
96 void **rxp
; /* pointer to parallel array of pointers to packets */
97 osldma_t
*rx_dmah
; /* DMA RX descriptor ring handle */
98 hnddma_seg_map_t
*rxp_dmah
; /* DMA MAP meta-data handle */
99 dmaaddr_t rxdpa
; /* Aligned physical address of descriptor ring */
100 dmaaddr_t rxdpaorig
; /* Original physical address of descriptor ring */
101 uint rxdalign
; /* #bytes added to alloc'd mem to align rxd */
102 uint rxdalloc
; /* #bytes allocated for the ring */
103 uint32 rcvptrbase
; /* Base for ptr reg when using unaligned descriptors */
106 uint rxbufsize
; /* rx buffer size in bytes,
107 * not including the extra headroom
109 uint rxextrahdrroom
; /* extra rx headroom, reverseved to assist upper stack
110 * e.g. some rx pkt buffers will be bridged to tx side
111 * without byte copying. The extra headroom needs to be
112 * large enough to fit txheader needs.
113 * Some dongle driver may not need it.
115 uint nrxpost
; /* # rx buffers to keep posted */
116 uint rxoffset
; /* rxcontrol offset */
117 uint ddoffsetlow
; /* add to get dma address of descriptor ring, low 32 bits */
118 uint ddoffsethigh
; /* high 32 bits */
119 uint dataoffsetlow
; /* add to get dma address of data buffer, low 32 bits */
120 uint dataoffsethigh
; /* high 32 bits */
124 #define DMA64_ENAB(di) ((di)->dma64)
125 #define DMA64_CAP TRUE
127 #define DMA64_ENAB(di) (0)
128 #define DMA64_CAP FALSE
131 /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
132 #ifdef BCMDMASGLISTOSL
133 #define DMASGLIST_ENAB TRUE
135 #define DMASGLIST_ENAB FALSE
138 /* descriptor bumping macros */
139 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
140 #define TXD(x) XXD((x), di->ntxd)
141 #define RXD(x) XXD((x), di->nrxd)
142 #define NEXTTXD(i) TXD((i) + 1)
143 #define PREVTXD(i) TXD((i) - 1)
144 #define NEXTRXD(i) RXD((i) + 1)
145 #define PREVRXD(i) RXD((i) - 1)
146 #define NTXDACTIVE(h, t) TXD((t) - (h))
147 #define NRXDACTIVE(h, t) RXD((t) - (h))
149 /* macros to convert between byte offsets and indexes */
150 #define B2I(bytes, type) ((bytes) / sizeof(type))
151 #define I2B(index, type) ((index) * sizeof(type))
153 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
154 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
156 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
157 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
159 /* common prototypes */
160 static bool _dma_isaddrext(dma_info_t
*di
);
161 static bool _dma_alloc(dma_info_t
*di
, uint direction
);
162 static void _dma_detach(dma_info_t
*di
);
163 static void _dma_ddtable_init(dma_info_t
*di
, uint direction
, dmaaddr_t pa
);
164 static void _dma_rxinit(dma_info_t
*di
);
165 static void *_dma_rx(dma_info_t
*di
);
166 static bool _dma_rxfill(dma_info_t
*di
);
167 static void _dma_rxreclaim(dma_info_t
*di
);
168 static void _dma_rxenable(dma_info_t
*di
);
169 static void *_dma_getnextrxp(dma_info_t
*di
, bool forceall
);
170 static void _dma_rx_param_get(dma_info_t
*di
, uint16
*rxoffset
, uint16
*rxbufsize
);
172 static void _dma_txblock(dma_info_t
*di
);
173 static void _dma_txunblock(dma_info_t
*di
);
174 static uint
_dma_txactive(dma_info_t
*di
);
175 static uint
_dma_rxactive(dma_info_t
*di
);
176 static uint
_dma_txpending(dma_info_t
*di
);
177 static uint
_dma_txcommitted(dma_info_t
*di
);
179 static void* _dma_peeknexttxp(dma_info_t
*di
);
180 static void* _dma_peeknextrxp(dma_info_t
*di
);
181 static uintptr
_dma_getvar(dma_info_t
*di
, const char *name
);
182 static void _dma_counterreset(dma_info_t
*di
);
183 static void _dma_fifoloopbackenable(dma_info_t
*di
);
184 static uint
_dma_ctrlflags(dma_info_t
*di
, uint mask
, uint flags
);
186 /* ** 32 bit DMA prototypes */
187 static bool dma32_alloc(dma_info_t
*di
, uint direction
);
188 static bool dma32_txreset(dma_info_t
*di
);
189 static bool dma32_rxreset(dma_info_t
*di
);
190 static bool dma32_txsuspendedidle(dma_info_t
*di
);
191 static int dma32_txfast(dma_info_t
*di
, void *p0
, bool commit
);
192 static void *dma32_getnexttxp(dma_info_t
*di
, txd_range_t range
);
193 static void *dma32_getnextrxp(dma_info_t
*di
, bool forceall
);
194 static void dma32_txrotate(dma_info_t
*di
);
195 static bool dma32_rxidle(dma_info_t
*di
);
196 static void dma32_txinit(dma_info_t
*di
);
197 static bool dma32_txenabled(dma_info_t
*di
);
198 static void dma32_txsuspend(dma_info_t
*di
);
199 static void dma32_txresume(dma_info_t
*di
);
200 static bool dma32_txsuspended(dma_info_t
*di
);
201 static void dma32_txreclaim(dma_info_t
*di
, txd_range_t range
);
202 static bool dma32_txstopped(dma_info_t
*di
);
203 static bool dma32_rxstopped(dma_info_t
*di
);
204 static bool dma32_rxenabled(dma_info_t
*di
);
205 static bool _dma32_addrext(osl_t
*osh
, dma32regs_t
*dma32regs
);
207 /* ** 64 bit DMA prototypes and stubs */
209 static bool dma64_alloc(dma_info_t
*di
, uint direction
);
210 static bool dma64_txreset(dma_info_t
*di
);
211 static bool dma64_rxreset(dma_info_t
*di
);
212 static bool dma64_txsuspendedidle(dma_info_t
*di
);
213 static int dma64_txfast(dma_info_t
*di
, void *p0
, bool commit
);
214 static void *dma64_getnexttxp(dma_info_t
*di
, txd_range_t range
);
215 static void *dma64_getnextrxp(dma_info_t
*di
, bool forceall
);
216 static void dma64_txrotate(dma_info_t
*di
);
218 static bool dma64_rxidle(dma_info_t
*di
);
219 static void dma64_txinit(dma_info_t
*di
);
220 static bool dma64_txenabled(dma_info_t
*di
);
221 static void dma64_txsuspend(dma_info_t
*di
);
222 static void dma64_txresume(dma_info_t
*di
);
223 static bool dma64_txsuspended(dma_info_t
*di
);
224 static void dma64_txreclaim(dma_info_t
*di
, txd_range_t range
);
225 static bool dma64_txstopped(dma_info_t
*di
);
226 static bool dma64_rxstopped(dma_info_t
*di
);
227 static bool dma64_rxenabled(dma_info_t
*di
);
228 static bool _dma64_addrext(osl_t
*osh
, dma64regs_t
*dma64regs
);
231 static bool dma64_alloc(dma_info_t
*di
, uint direction
) { return FALSE
; }
232 static bool dma64_txreset(dma_info_t
*di
) { return FALSE
; }
233 static bool dma64_rxreset(dma_info_t
*di
) { return FALSE
; }
234 static bool dma64_txsuspendedidle(dma_info_t
*di
) { return FALSE
;}
235 static int dma64_txfast(dma_info_t
*di
, void *p0
, bool commit
) { return 0; }
236 static void *dma64_getnexttxp(dma_info_t
*di
, txd_range_t range
) { return NULL
; }
237 static void *dma64_getnextrxp(dma_info_t
*di
, bool forceall
) { return NULL
; }
238 static void dma64_txrotate(dma_info_t
*di
) { return; }
240 static bool dma64_rxidle(dma_info_t
*di
) { return FALSE
; }
241 static void dma64_txinit(dma_info_t
*di
) { return; }
242 static bool dma64_txenabled(dma_info_t
*di
) { return FALSE
; }
243 static void dma64_txsuspend(dma_info_t
*di
) { return; }
244 static void dma64_txresume(dma_info_t
*di
) { return; }
245 static bool dma64_txsuspended(dma_info_t
*di
) {return FALSE
; }
246 static void dma64_txreclaim(dma_info_t
*di
, txd_range_t range
) { return; }
247 static bool dma64_txstopped(dma_info_t
*di
) { return FALSE
; }
248 static bool dma64_rxstopped(dma_info_t
*di
) { return FALSE
; }
249 static bool dma64_rxenabled(dma_info_t
*di
) { return FALSE
; }
250 static bool _dma64_addrext(osl_t
*osh
, dma64regs_t
*dma64regs
) { return FALSE
; }
252 #endif /* BCMDMA64 */
254 #if defined(BCMDBG) || defined(BCMDBG_DUMP)
255 static void dma32_dumpring(dma_info_t
*di
, struct bcmstrbuf
*b
, dma32dd_t
*ring
, uint start
,
256 uint end
, uint max_num
);
257 static void dma32_dump(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
258 static void dma32_dumptx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
259 static void dma32_dumprx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
261 static void dma64_dumpring(dma_info_t
*di
, struct bcmstrbuf
*b
, dma64dd_t
*ring
, uint start
,
262 uint end
, uint max_num
);
263 static void dma64_dump(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
264 static void dma64_dumptx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
265 static void dma64_dumprx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
);
269 static di_fcn_t dma64proc
= {
270 (di_detach_t
)_dma_detach
,
271 (di_txinit_t
)dma64_txinit
,
272 (di_txreset_t
)dma64_txreset
,
273 (di_txenabled_t
)dma64_txenabled
,
274 (di_txsuspend_t
)dma64_txsuspend
,
275 (di_txresume_t
)dma64_txresume
,
276 (di_txsuspended_t
)dma64_txsuspended
,
277 (di_txsuspendedidle_t
)dma64_txsuspendedidle
,
278 (di_txfast_t
)dma64_txfast
,
279 (di_txstopped_t
)dma64_txstopped
,
280 (di_txreclaim_t
)dma64_txreclaim
,
281 (di_getnexttxp_t
)dma64_getnexttxp
,
282 (di_peeknexttxp_t
)_dma_peeknexttxp
,
283 (di_txblock_t
)_dma_txblock
,
284 (di_txunblock_t
)_dma_txunblock
,
285 (di_txactive_t
)_dma_txactive
,
286 (di_txrotate_t
)dma64_txrotate
,
288 (di_rxinit_t
)_dma_rxinit
,
289 (di_rxreset_t
)dma64_rxreset
,
290 (di_rxidle_t
)dma64_rxidle
,
291 (di_rxstopped_t
)dma64_rxstopped
,
292 (di_rxenable_t
)_dma_rxenable
,
293 (di_rxenabled_t
)dma64_rxenabled
,
295 (di_rxfill_t
)_dma_rxfill
,
296 (di_rxreclaim_t
)_dma_rxreclaim
,
297 (di_getnextrxp_t
)_dma_getnextrxp
,
298 (di_peeknextrxp_t
)_dma_peeknextrxp
,
299 (di_rxparam_get_t
)_dma_rx_param_get
,
301 (di_fifoloopbackenable_t
)_dma_fifoloopbackenable
,
302 (di_getvar_t
)_dma_getvar
,
303 (di_counterreset_t
)_dma_counterreset
,
304 (di_ctrlflags_t
)_dma_ctrlflags
,
306 #if defined(BCMDBG) || defined(BCMDBG_DUMP)
307 (di_dump_t
)dma64_dump
,
308 (di_dumptx_t
)dma64_dumptx
,
309 (di_dumprx_t
)dma64_dumprx
,
315 (di_rxactive_t
)_dma_rxactive
,
316 (di_txpending_t
)_dma_txpending
,
317 (di_txcommitted_t
)_dma_txcommitted
,
321 static di_fcn_t dma32proc
= {
322 (di_detach_t
)_dma_detach
,
323 (di_txinit_t
)dma32_txinit
,
324 (di_txreset_t
)dma32_txreset
,
325 (di_txenabled_t
)dma32_txenabled
,
326 (di_txsuspend_t
)dma32_txsuspend
,
327 (di_txresume_t
)dma32_txresume
,
328 (di_txsuspended_t
)dma32_txsuspended
,
329 (di_txsuspendedidle_t
)dma32_txsuspendedidle
,
330 (di_txfast_t
)dma32_txfast
,
331 (di_txstopped_t
)dma32_txstopped
,
332 (di_txreclaim_t
)dma32_txreclaim
,
333 (di_getnexttxp_t
)dma32_getnexttxp
,
334 (di_peeknexttxp_t
)_dma_peeknexttxp
,
335 (di_txblock_t
)_dma_txblock
,
336 (di_txunblock_t
)_dma_txunblock
,
337 (di_txactive_t
)_dma_txactive
,
338 (di_txrotate_t
)dma32_txrotate
,
340 (di_rxinit_t
)_dma_rxinit
,
341 (di_rxreset_t
)dma32_rxreset
,
342 (di_rxidle_t
)dma32_rxidle
,
343 (di_rxstopped_t
)dma32_rxstopped
,
344 (di_rxenable_t
)_dma_rxenable
,
345 (di_rxenabled_t
)dma32_rxenabled
,
347 (di_rxfill_t
)_dma_rxfill
,
348 (di_rxreclaim_t
)_dma_rxreclaim
,
349 (di_getnextrxp_t
)_dma_getnextrxp
,
350 (di_peeknextrxp_t
)_dma_peeknextrxp
,
351 (di_rxparam_get_t
)_dma_rx_param_get
,
353 (di_fifoloopbackenable_t
)_dma_fifoloopbackenable
,
354 (di_getvar_t
)_dma_getvar
,
355 (di_counterreset_t
)_dma_counterreset
,
356 (di_ctrlflags_t
)_dma_ctrlflags
,
358 #if defined(BCMDBG) || defined(BCMDBG_DUMP)
359 (di_dump_t
)dma32_dump
,
360 (di_dumptx_t
)dma32_dumptx
,
361 (di_dumprx_t
)dma32_dumprx
,
367 (di_rxactive_t
)_dma_rxactive
,
368 (di_txpending_t
)_dma_txpending
,
369 (di_txcommitted_t
)_dma_txcommitted
,
375 dma_attach(osl_t
*osh
, char *name
, si_t
*sih
, void *dmaregstx
, void *dmaregsrx
,
376 uint ntxd
, uint nrxd
, uint rxbufsize
, int rxextheadroom
, uint nrxpost
, uint rxoffset
,
382 /* allocate private info structure */
383 if ((di
= MALLOC(osh
, sizeof (dma_info_t
))) == NULL
) {
385 printf("dma_attach: out of memory, malloced %d bytes\n", MALLOCED(osh
));
389 bzero((char *)di
, sizeof(dma_info_t
));
391 di
->msg_level
= msg_level
? msg_level
: &dma_msg_level
;
393 /* old chips w/o sb is no longer supported */
396 di
->dma64
= ((si_core_sflags(sih
, 0, 0) & SISF_DMA64
) == SISF_DMA64
);
400 DMA_ERROR(("dma_attach: driver doesn't have the capability to support "
406 /* check arguments */
407 ASSERT(ISPOWEROF2(ntxd
));
408 ASSERT(ISPOWEROF2(nrxd
));
410 ASSERT(dmaregsrx
== NULL
);
412 ASSERT(dmaregstx
== NULL
);
415 /* init dma reg pointer */
417 ASSERT(ntxd
<= D64MAXDD
);
418 ASSERT(nrxd
<= D64MAXDD
);
419 di
->d64txregs
= (dma64regs_t
*)dmaregstx
;
420 di
->d64rxregs
= (dma64regs_t
*)dmaregsrx
;
422 di
->dma64align
= D64RINGALIGN
;
423 if ((ntxd
< D64MAXDD
/ 2) && (nrxd
< D64MAXDD
/ 2)) {
424 /* for smaller dd table, HW relax the alignment requirement */
425 di
->dma64align
= D64RINGALIGN
/ 2;
428 /* initialize opsvec of function pointers */
429 di
->hnddma
.di_fn
= dma64proc
;
431 ASSERT(ntxd
<= D32MAXDD
);
432 ASSERT(nrxd
<= D32MAXDD
);
433 di
->d32txregs
= (dma32regs_t
*)dmaregstx
;
434 di
->d32rxregs
= (dma32regs_t
*)dmaregsrx
;
436 /* initialize opsvec of function pointers */
437 di
->hnddma
.di_fn
= dma32proc
;
441 /* Default flags (which can be changed by the driver calling dma_ctrlflags
442 * before enable): Rx Overflow Continue DISABLED, Parity DISABLED.
444 di
->hnddma
.di_fn
.ctrlflags(&di
->hnddma
, DMA_CTRL_ROC
| DMA_CTRL_PEN
, 0);
446 DMA_TRACE(("%s: dma_attach: %s osh %p flags 0x%x ntxd %d nrxd %d rxbufsize %d "
447 "rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n",
448 name
, (di
->dma64
? "DMA64" : "DMA32"), osh
, di
->hnddma
.dmactrlflags
, ntxd
, nrxd
,
449 rxbufsize
, rxextheadroom
, nrxpost
, rxoffset
, dmaregstx
, dmaregsrx
));
451 /* make a private copy of our callers name */
452 strncpy(di
->name
, name
, MAXNAMEL
);
453 di
->name
[MAXNAMEL
-1] = '\0';
462 /* the actual dma size doesn't include the extra headroom */
463 di
->rxextrahdrroom
= (rxextheadroom
== -1) ? BCMEXTRAHDROOM
: rxextheadroom
;
464 if (rxbufsize
> BCMEXTRAHDROOM
)
465 di
->rxbufsize
= rxbufsize
- di
->rxextrahdrroom
;
467 di
->rxbufsize
= rxbufsize
;
469 di
->nrxpost
= nrxpost
;
470 di
->rxoffset
= rxoffset
;
473 * figure out the DMA physical address offset for dd and data
474 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
475 * Other bus: use zero
476 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
479 di
->dataoffsetlow
= 0;
480 /* for pci bus, add offset */
481 if (sih
->bustype
== PCI_BUS
) {
482 if ((sih
->buscoretype
== PCIE_CORE_ID
) && di
->dma64
) {
483 /* pcie with DMA64 */
485 di
->ddoffsethigh
= SI_PCIE_DMA_H32
;
487 /* pci(DMA32/DMA64) or pcie with DMA32 */
488 if ((sih
->chip
== BCM4322_CHIP_ID
) ||
489 (sih
->chip
== BCM43221_CHIP_ID
) ||
490 (sih
->chip
== BCM43231_CHIP_ID
) ||
491 (sih
->chip
== BCM4342_CHIP_ID
) ||
492 (sih
->chip
== BCM43111_CHIP_ID
) ||
493 (sih
->chip
== BCM43112_CHIP_ID
) ||
494 (sih
->chip
== BCM43222_CHIP_ID
))
495 di
->ddoffsetlow
= SI_PCI_DMA2
;
497 di
->ddoffsetlow
= SI_PCI_DMA
;
499 di
->ddoffsethigh
= 0;
501 di
->dataoffsetlow
= di
->ddoffsetlow
;
502 di
->dataoffsethigh
= di
->ddoffsethigh
;
505 #if defined(__mips__) && defined(IL_BIGENDIAN)
506 di
->dataoffsetlow
= di
->dataoffsetlow
+ SI_SDRAM_SWAPPED
;
509 di
->addrext
= _dma_isaddrext(di
);
511 /* allocate tx packet pointer vector */
513 size
= ntxd
* sizeof(void *);
514 if ((di
->txp
= MALLOC(osh
, size
)) == NULL
) {
515 DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n",
516 di
->name
, MALLOCED(osh
)));
519 bzero((char *)di
->txp
, size
);
522 /* allocate rx packet pointer vector */
524 size
= nrxd
* sizeof(void *);
525 if ((di
->rxp
= MALLOC(osh
, size
)) == NULL
) {
526 DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n",
527 di
->name
, MALLOCED(osh
)));
530 bzero((char *)di
->rxp
, size
);
533 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
535 if (!_dma_alloc(di
, DMA_TX
))
539 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
541 if (!_dma_alloc(di
, DMA_RX
))
545 if ((di
->ddoffsetlow
!= 0) && !di
->addrext
) {
546 if (PHYSADDRLO(di
->txdpa
) > SI_PCI_DMA_SZ
) {
547 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n",
548 di
->name
, (uint32
)PHYSADDRLO(di
->txdpa
)));
551 if (PHYSADDRLO(di
->rxdpa
) > SI_PCI_DMA_SZ
) {
552 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n",
553 di
->name
, (uint32
)PHYSADDRLO(di
->rxdpa
)));
558 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh "
559 "0x%x addrext %d\n", di
->ddoffsetlow
, di
->ddoffsethigh
, di
->dataoffsetlow
,
560 di
->dataoffsethigh
, di
->addrext
));
562 /* allocate DMA mapping vectors */
563 if (DMASGLIST_ENAB
) {
565 size
= ntxd
* sizeof(hnddma_seg_map_t
);
566 if ((di
->txp_dmah
= (hnddma_seg_map_t
*)MALLOC(osh
, size
)) == NULL
)
568 bzero((char*)di
->txp_dmah
, size
);
572 size
= nrxd
* sizeof(hnddma_seg_map_t
);
573 if ((di
->rxp_dmah
= (hnddma_seg_map_t
*)MALLOC(osh
, size
)) == NULL
)
575 bzero((char*)di
->rxp_dmah
, size
);
579 return ((hnddma_t
*)di
);
586 /* init the tx or rx descriptor */
588 dma32_dd_upd(dma_info_t
*di
, dma32dd_t
*ddring
, dmaaddr_t pa
, uint outidx
, uint32
*flags
,
591 /* dma32 uses 32 bits control to fit both flags and bufcounter */
592 *flags
= *flags
| (bufcount
& CTRL_BC_MASK
);
594 if ((di
->dataoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
595 W_SM(&ddring
[outidx
].addr
, BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
596 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
598 /* address extension */
601 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
602 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
604 *flags
|= (ae
<< CTRL_AE_SHIFT
);
605 W_SM(&ddring
[outidx
].addr
, BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
606 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
611 dma64_dd_upd(dma_info_t
*di
, dma64dd_t
*ddring
, dmaaddr_t pa
, uint outidx
, uint32
*flags
,
614 uint32 ctrl2
= bufcount
& D64_CTRL2_BC_MASK
;
616 /* PCI bus with big(>1G) physical address, use address extension */
617 if ((di
->dataoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
618 ASSERT((PHYSADDRHI(pa
) & PCI64ADDR_HIGH
) == 0);
620 W_SM(&ddring
[outidx
].addrlow
, BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
621 W_SM(&ddring
[outidx
].addrhigh
, BUS_SWAP32(PHYSADDRHI(pa
) + di
->dataoffsethigh
));
622 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
623 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
625 /* address extension for 32-bit PCI */
629 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
630 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
631 ASSERT(PHYSADDRHI(pa
) == 0);
633 ctrl2
|= (ae
<< D64_CTRL2_AE_SHIFT
) & D64_CTRL2_AE
;
634 W_SM(&ddring
[outidx
].addrlow
, BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
635 W_SM(&ddring
[outidx
].addrhigh
, BUS_SWAP32(0 + di
->dataoffsethigh
));
636 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
637 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
642 _dma32_addrext(osl_t
*osh
, dma32regs_t
*dma32regs
)
646 OR_REG(osh
, &dma32regs
->control
, XC_AE
);
647 w
= R_REG(osh
, &dma32regs
->control
);
648 AND_REG(osh
, &dma32regs
->control
, ~XC_AE
);
649 return ((w
& XC_AE
) == XC_AE
);
653 _dma_alloc(dma_info_t
*di
, uint direction
)
655 if (DMA64_ENAB(di
)) {
656 return dma64_alloc(di
, direction
);
658 return dma32_alloc(di
, direction
);
662 /* !! may be called with core in reset */
664 _dma_detach(dma_info_t
*di
)
667 DMA_TRACE(("%s: dma_detach\n", di
->name
));
669 /* shouldn't be here if descriptors are unreclaimed */
670 ASSERT(di
->txin
== di
->txout
);
671 ASSERT(di
->rxin
== di
->rxout
);
673 /* free dma descriptor rings */
674 if (DMA64_ENAB(di
)) {
676 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->txd64
- di
->txdalign
),
677 di
->txdalloc
, (di
->txdpaorig
), &di
->tx_dmah
);
679 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->rxd64
- di
->rxdalign
),
680 di
->rxdalloc
, (di
->rxdpaorig
), &di
->rx_dmah
);
683 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->txd32
- di
->txdalign
),
684 di
->txdalloc
, (di
->txdpaorig
), &di
->tx_dmah
);
686 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->rxd32
- di
->rxdalign
),
687 di
->rxdalloc
, (di
->rxdpaorig
), &di
->rx_dmah
);
690 /* free packet pointer vectors */
692 MFREE(di
->osh
, (void *)di
->txp
, (di
->ntxd
* sizeof(void *)));
694 MFREE(di
->osh
, (void *)di
->rxp
, (di
->nrxd
* sizeof(void *)));
696 /* free tx packet DMA handles */
698 MFREE(di
->osh
, (void *)di
->txp_dmah
, di
->ntxd
* sizeof(hnddma_seg_map_t
));
700 /* free rx packet DMA handles */
702 MFREE(di
->osh
, (void *)di
->rxp_dmah
, di
->nrxd
* sizeof(hnddma_seg_map_t
));
704 /* free our private info structure */
705 MFREE(di
->osh
, (void *)di
, sizeof(dma_info_t
));
709 /* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
711 _dma_isaddrext(dma_info_t
*di
)
713 if (DMA64_ENAB(di
)) {
714 /* DMA64 supports full 32 bits or 64 bits. AE is always valid */
716 /* not all tx or rx channel are available */
717 if (di
->d64txregs
!= NULL
) {
718 if (!_dma64_addrext(di
->osh
, di
->d64txregs
)) {
719 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n",
724 } else if (di
->d64rxregs
!= NULL
) {
725 if (!_dma64_addrext(di
->osh
, di
->d64rxregs
)) {
726 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n",
733 } else if (di
->d32txregs
)
734 return (_dma32_addrext(di
->osh
, di
->d32txregs
));
735 else if (di
->d32rxregs
)
736 return (_dma32_addrext(di
->osh
, di
->d32rxregs
));
740 /* initialize descriptor table base address */
742 _dma_ddtable_init(dma_info_t
*di
, uint direction
, dmaaddr_t pa
)
744 if (DMA64_ENAB(di
)) {
748 /* Figure out if this engine requires aligned descriptors */
749 if (direction
== DMA_TX
) {
750 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, 0xff0);
751 addrl
= R_REG(di
->osh
, &di
->d64txregs
->addrlow
);
753 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, 0xff0);
754 addrl
= R_REG(di
->osh
, &di
->d64rxregs
->addrlow
);
757 DMA_NONE(("dd_table_init: DMA engine requires aligned descriptors\n"));
759 DMA_NONE(("dd_table_init: DMA engine accepts unaligned descriptors\n"));
760 if (direction
== DMA_TX
) {
761 di
->xmtptrbase
= PHYSADDRLO(pa
);
763 di
->rcvptrbase
= PHYSADDRLO(pa
);
767 if ((di
->ddoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
768 if (direction
== DMA_TX
) {
769 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, (PHYSADDRLO(pa
) +
771 W_REG(di
->osh
, &di
->d64txregs
->addrhigh
, (PHYSADDRHI(pa
) +
774 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, (PHYSADDRLO(pa
) +
776 W_REG(di
->osh
, &di
->d64rxregs
->addrhigh
, (PHYSADDRHI(pa
) +
780 /* DMA64 32bits address extension */
783 ASSERT(PHYSADDRHI(pa
) == 0);
785 /* shift the high bit(s) from pa to ae */
786 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
787 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
789 if (direction
== DMA_TX
) {
790 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, (PHYSADDRLO(pa
) +
792 W_REG(di
->osh
, &di
->d64txregs
->addrhigh
, di
->ddoffsethigh
);
793 SET_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_AE
,
794 (ae
<< D64_XC_AE_SHIFT
));
796 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, (PHYSADDRLO(pa
) +
798 W_REG(di
->osh
, &di
->d64rxregs
->addrhigh
, di
->ddoffsethigh
);
799 SET_REG(di
->osh
, &di
->d64rxregs
->control
, D64_RC_AE
,
800 (ae
<< D64_RC_AE_SHIFT
));
805 ASSERT(PHYSADDRHI(pa
) == 0);
806 if ((di
->ddoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
807 if (direction
== DMA_TX
)
808 W_REG(di
->osh
, &di
->d32txregs
->addr
, (PHYSADDRLO(pa
) +
811 W_REG(di
->osh
, &di
->d32rxregs
->addr
, (PHYSADDRLO(pa
) +
814 /* dma32 address extension */
818 /* shift the high bit(s) from pa to ae */
819 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
820 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
822 if (direction
== DMA_TX
) {
823 W_REG(di
->osh
, &di
->d32txregs
->addr
, (PHYSADDRLO(pa
) +
825 SET_REG(di
->osh
, &di
->d32txregs
->control
, XC_AE
, ae
<<XC_AE_SHIFT
);
827 W_REG(di
->osh
, &di
->d32rxregs
->addr
, (PHYSADDRLO(pa
) +
829 SET_REG(di
->osh
, &di
->d32rxregs
->control
, RC_AE
, ae
<<RC_AE_SHIFT
);
836 _dma_fifoloopbackenable(dma_info_t
*di
)
838 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di
->name
));
840 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_LE
);
842 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_LE
);
846 _dma_rxinit(dma_info_t
*di
)
848 DMA_TRACE(("%s: dma_rxinit\n", di
->name
));
853 di
->rxin
= di
->rxout
= 0;
855 /* clear rx descriptor ring */
856 if (DMA64_ENAB(di
)) {
857 BZERO_SM((void *)(uintptr
)di
->rxd64
, (di
->nrxd
* sizeof(dma64dd_t
)));
858 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
862 BZERO_SM((void *)(uintptr
)di
->rxd32
, (di
->nrxd
* sizeof(dma32dd_t
)));
864 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
869 _dma_rxenable(dma_info_t
*di
)
871 uint dmactrlflags
= di
->hnddma
.dmactrlflags
;
873 DMA_TRACE(("%s: dma_rxenable\n", di
->name
));
875 if (DMA64_ENAB(di
)) {
876 uint32 control
= (R_REG(di
->osh
, &di
->d64rxregs
->control
) & D64_RC_AE
) | D64_RC_RE
;
878 /* Unconditional for now */
879 /* if ((dmactrlflags & DMA_CTRL_PEN) == 0) */
880 control
|= D64_RC_PD
;
882 if (dmactrlflags
& DMA_CTRL_ROC
)
883 control
|= D64_RC_OC
;
885 W_REG(di
->osh
, &di
->d64rxregs
->control
,
886 ((di
->rxoffset
<< D64_RC_RO_SHIFT
) | control
));
888 uint32 control
= (R_REG(di
->osh
, &di
->d32rxregs
->control
) & RC_AE
) | RC_RE
;
890 /* Unconditional for now */
891 /* if ((dmactrlflags & DMA_CTRL_PEN) == 0) */
894 if (dmactrlflags
& DMA_CTRL_ROC
)
897 W_REG(di
->osh
, &di
->d32rxregs
->control
,
898 ((di
->rxoffset
<< RC_RO_SHIFT
) | control
));
903 _dma_rx_param_get(dma_info_t
*di
, uint16
*rxoffset
, uint16
*rxbufsize
)
905 /* the normal values fit into 16 bits */
906 *rxoffset
= (uint16
)di
->rxoffset
;
907 *rxbufsize
= (uint16
)di
->rxbufsize
;
910 /* !! rx entry routine
911 * returns a pointer to the next frame received, or NULL if there are no more
912 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
914 * otherwise, it's treated as giant pkt and will be tossed.
915 * The DMA scattering starts with normal DMA header, followed by first buffer data.
916 * After it reaches the max size of buffer, the data continues in next DMA descriptor
917 * buffer WITHOUT DMA header
919 static void * BCMFASTPATH
920 _dma_rx(dma_info_t
*di
)
922 void *p
, *head
, *tail
;
928 head
= _dma_getnextrxp(di
, FALSE
);
932 len
= ltoh16(*(uint16
*)(PKTDATA(di
->osh
, head
)));
933 DMA_TRACE(("%s: dma_rx len %d\n", di
->name
, len
));
935 #if defined(__mips__)
937 while (!(len
= *(uint16
*)OSL_UNCACHED(PKTDATA(di
->osh
, head
))))
940 *(uint16
*)PKTDATA(di
->osh
, head
) = htol16((uint16
)len
);
942 #endif /* defined(__mips__) */
944 /* set actual length */
945 pkt_len
= MIN((di
->rxoffset
+ len
), di
->rxbufsize
);
946 PKTSETLEN(di
->osh
, head
, pkt_len
);
947 resid
= len
- (di
->rxbufsize
- di
->rxoffset
);
949 /* check for single or multi-buffer rx */
952 while ((resid
> 0) && (p
= _dma_getnextrxp(di
, FALSE
))) {
953 PKTSETNEXT(di
->osh
, tail
, p
);
954 pkt_len
= MIN(resid
, (int)di
->rxbufsize
);
955 PKTSETLEN(di
->osh
, p
, pkt_len
);
958 resid
-= di
->rxbufsize
;
965 cur
= (DMA64_ENAB(di
)) ?
966 B2I(R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
,
968 B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
,
970 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
971 di
->rxin
, di
->rxout
, cur
));
975 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_RXMULTI
) == 0) {
976 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di
->name
, len
));
977 PKTFREE(di
->osh
, head
, FALSE
);
978 di
->hnddma
.rxgiants
++;
986 /* post receive buffers
987 * return FALSE is refill failed completely and ring is empty
988 * this will stall the rx dma and user might want to call rxfill again asap
989 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
991 static bool BCMFASTPATH
992 _dma_rxfill(dma_info_t
*di
)
1000 uint extra_offset
= 0;
1006 * Determine how many receive buffers we're lacking
1007 * from the full complement, allocate, initialize,
1008 * and post them, then update the chip rx lastdscr.
1014 n
= di
->nrxpost
- NRXDACTIVE(rxin
, rxout
);
1016 DMA_TRACE(("%s: dma_rxfill: post %d\n", di
->name
, n
));
1018 if (di
->rxbufsize
> BCMEXTRAHDROOM
)
1019 extra_offset
= di
->rxextrahdrroom
;
1021 for (i
= 0; i
< n
; i
++) {
1022 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
1023 size to be allocated
1025 if ((p
= PKTGET(di
->osh
, di
->rxbufsize
+ extra_offset
, FALSE
)) == NULL
) {
1026 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di
->name
));
1028 if (DMA64_ENAB(di
)) {
1029 if (dma64_rxidle(di
)) {
1030 DMA_ERROR(("%s: rxfill64: ring is empty !\n",
1035 if (dma32_rxidle(di
)) {
1036 DMA_ERROR(("%s: rxfill32: ring is empty !\n",
1042 di
->hnddma
.rxnobuf
++;
1045 /* reserve an extra headroom, if applicable */
1047 PKTPULL(di
->osh
, p
, extra_offset
);
1049 /* Do a cached write instead of uncached write since DMA_MAP
1050 * will flush the cache.
1052 *(uint32
*)(PKTDATA(di
->osh
, p
)) = 0;
1055 bzero(&di
->rxp_dmah
[rxout
], sizeof(hnddma_seg_map_t
));
1057 pa
= DMA_MAP(di
->osh
, PKTDATA(di
->osh
, p
),
1058 di
->rxbufsize
, DMA_RX
, p
,
1059 &di
->rxp_dmah
[rxout
]);
1061 ASSERT(ISALIGNED(PHYSADDRLO(pa
), 4));
1063 /* save the free packet pointer */
1064 ASSERT(di
->rxp
[rxout
] == NULL
);
1067 /* reset flags for each descriptor */
1069 if (DMA64_ENAB(di
)) {
1070 if (rxout
== (di
->nrxd
- 1))
1071 flags
= D64_CTRL1_EOT
;
1073 dma64_dd_upd(di
, di
->rxd64
, pa
, rxout
, &flags
, di
->rxbufsize
);
1075 if (rxout
== (di
->nrxd
- 1))
1078 ASSERT(PHYSADDRHI(pa
) == 0);
1079 dma32_dd_upd(di
, di
->rxd32
, pa
, rxout
, &flags
, di
->rxbufsize
);
1081 rxout
= NEXTRXD(rxout
);
1086 /* update the chip lastdscr pointer */
1087 if (DMA64_ENAB(di
)) {
1088 W_REG(di
->osh
, &di
->d64rxregs
->ptr
, di
->rcvptrbase
+ I2B(rxout
, dma64dd_t
));
1090 W_REG(di
->osh
, &di
->d32rxregs
->ptr
, I2B(rxout
, dma32dd_t
));
1096 /* like getnexttxp but no reclaim */
1098 _dma_peeknexttxp(dma_info_t
*di
)
1105 if (DMA64_ENAB(di
)) {
1106 end
= B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
) -
1107 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
);
1109 end
= B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1112 for (i
= di
->txin
; i
!= end
; i
= NEXTTXD(i
))
1114 return (di
->txp
[i
]);
1119 /* like getnextrxp but not take off the ring */
1121 _dma_peeknextrxp(dma_info_t
*di
)
1128 if (DMA64_ENAB(di
)) {
1129 end
= B2I(R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
, dma64dd_t
);
1131 end
= B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
, dma32dd_t
);
1134 for (i
= di
->rxin
; i
!= end
; i
= NEXTRXD(i
))
1136 return (di
->rxp
[i
]);
1142 _dma_rxreclaim(dma_info_t
*di
)
1146 /* "unused local" warning suppression for OSLs that
1147 * define PKTFREE() without using the di->osh arg
1151 DMA_TRACE(("%s: dma_rxreclaim\n", di
->name
));
1153 while ((p
= _dma_getnextrxp(di
, TRUE
)))
1154 PKTFREE(di
->osh
, p
, FALSE
);
1157 static void * BCMFASTPATH
1158 _dma_getnextrxp(dma_info_t
*di
, bool forceall
)
1163 if (DMA64_ENAB(di
)) {
1164 return dma64_getnextrxp(di
, forceall
);
1166 return dma32_getnextrxp(di
, forceall
);
1171 _dma_txblock(dma_info_t
*di
)
1173 di
->hnddma
.txavail
= 0;
1177 _dma_txunblock(dma_info_t
*di
)
1179 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1183 _dma_txactive(dma_info_t
*di
)
1185 return (NTXDACTIVE(di
->txin
, di
->txout
));
1189 _dma_txpending(dma_info_t
*di
)
1192 if (DMA64_ENAB(di
)) {
1193 curr
= B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
) -
1194 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
);
1196 curr
= B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1198 return (NTXDACTIVE(curr
, di
->txout
));
1202 _dma_txcommitted(dma_info_t
*di
)
1205 uint txin
= di
->txin
;
1207 if (txin
== di
->txout
)
1210 if (DMA64_ENAB(di
)) {
1211 ptr
= B2I(R_REG(di
->osh
, &di
->d64txregs
->ptr
), dma64dd_t
);
1213 ptr
= B2I(R_REG(di
->osh
, &di
->d32txregs
->ptr
), dma32dd_t
);
1215 return (NTXDACTIVE(di
->txin
, ptr
));
1219 _dma_rxactive(dma_info_t
*di
)
1221 return (NRXDACTIVE(di
->rxin
, di
->rxout
));
1225 _dma_counterreset(dma_info_t
*di
)
1227 /* reset all software counter */
1228 di
->hnddma
.rxgiants
= 0;
1229 di
->hnddma
.rxnobuf
= 0;
1230 di
->hnddma
.txnobuf
= 0;
1234 _dma_ctrlflags(dma_info_t
*di
, uint mask
, uint flags
)
1236 uint dmactrlflags
= di
->hnddma
.dmactrlflags
;
1239 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di
->name
));
1243 ASSERT((flags
& ~mask
) == 0);
1245 dmactrlflags
&= ~mask
;
1246 dmactrlflags
|= flags
;
1248 /* If trying to enable parity, check if parity is actually supported */
1249 if (dmactrlflags
& DMA_CTRL_PEN
) {
1252 if (DMA64_ENAB(di
)) {
1253 control
= R_REG(di
->osh
, &di
->d64txregs
->control
);
1254 W_REG(di
->osh
, &di
->d64txregs
->control
, control
| D64_XC_PD
);
1255 if (R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_PD
) {
1256 /* We *can* disable it so it is supported,
1257 * restore control register
1259 W_REG(di
->osh
, &di
->d64txregs
->control
, control
);
1261 /* Not supported, don't allow it to be enabled */
1262 dmactrlflags
&= ~DMA_CTRL_PEN
;
1265 control
= R_REG(di
->osh
, &di
->d32txregs
->control
);
1266 W_REG(di
->osh
, &di
->d32txregs
->control
, control
| XC_PD
);
1267 if (R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_PD
) {
1268 W_REG(di
->osh
, &di
->d32txregs
->control
, control
);
1270 /* Not supported, don't allow it to be enabled */
1271 dmactrlflags
&= ~DMA_CTRL_PEN
;
1276 di
->hnddma
.dmactrlflags
= dmactrlflags
;
1278 return (dmactrlflags
);
1281 /* get the address of the var in order to change later */
1283 _dma_getvar(dma_info_t
*di
, const char *name
)
1285 if (!strcmp(name
, "&txavail"))
1286 return ((uintptr
) &(di
->hnddma
.txavail
));
1294 dma_txpioloopback(osl_t
*osh
, dma32regs_t
*regs
)
1296 OR_REG(osh
, ®s
->control
, XC_LE
);
1299 #if defined(BCMDBG) || defined(BCMDBG_DUMP)
1301 dma32_dumpring(dma_info_t
*di
, struct bcmstrbuf
*b
, dma32dd_t
*ring
, uint start
, uint end
,
1306 for (i
= start
; i
!= end
; i
= XXD((i
+ 1), max_num
)) {
1307 /* in the format of high->low 8 bytes */
1308 bcm_bprintf(b
, "ring index %d: 0x%x %x\n", i
, ring
[i
].addr
, ring
[i
].ctrl
);
1313 dma32_dumptx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1318 bcm_bprintf(b
, "DMA32: txd32 %p txdpa 0x%lx txp %p txin %d txout %d "
1319 "txavail %d\n", di
->txd32
, PHYSADDRLO(di
->txdpa
), di
->txp
, di
->txin
,
1320 di
->txout
, di
->hnddma
.txavail
);
1322 bcm_bprintf(b
, "xmtcontrol 0x%x xmtaddr 0x%x xmtptr 0x%x xmtstatus 0x%x\n",
1323 R_REG(di
->osh
, &di
->d32txregs
->control
),
1324 R_REG(di
->osh
, &di
->d32txregs
->addr
),
1325 R_REG(di
->osh
, &di
->d32txregs
->ptr
),
1326 R_REG(di
->osh
, &di
->d32txregs
->status
));
1328 if (dumpring
&& di
->txd32
)
1329 dma32_dumpring(di
, b
, di
->txd32
, di
->txin
, di
->txout
, di
->ntxd
);
1333 dma32_dumprx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1338 bcm_bprintf(b
, "DMA32: rxd32 %p rxdpa 0x%lx rxp %p rxin %d rxout %d\n",
1339 di
->rxd32
, PHYSADDRLO(di
->rxdpa
), di
->rxp
, di
->rxin
, di
->rxout
);
1341 bcm_bprintf(b
, "rcvcontrol 0x%x rcvaddr 0x%x rcvptr 0x%x rcvstatus 0x%x\n",
1342 R_REG(di
->osh
, &di
->d32rxregs
->control
),
1343 R_REG(di
->osh
, &di
->d32rxregs
->addr
),
1344 R_REG(di
->osh
, &di
->d32rxregs
->ptr
),
1345 R_REG(di
->osh
, &di
->d32rxregs
->status
));
1346 if (di
->rxd32
&& dumpring
)
1347 dma32_dumpring(di
, b
, di
->rxd32
, di
->rxin
, di
->rxout
, di
->nrxd
);
1351 dma32_dump(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1353 dma32_dumptx(di
, b
, dumpring
);
1354 dma32_dumprx(di
, b
, dumpring
);
1358 dma64_dumpring(dma_info_t
*di
, struct bcmstrbuf
*b
, dma64dd_t
*ring
, uint start
, uint end
,
1363 for (i
= start
; i
!= end
; i
= XXD((i
+ 1), max_num
)) {
1364 /* in the format of high->low 16 bytes */
1365 bcm_bprintf(b
, "ring index %d: 0x%x %x %x %x\n",
1366 i
, ring
[i
].addrhigh
, ring
[i
].addrlow
, ring
[i
].ctrl2
, ring
[i
].ctrl1
);
1371 dma64_dumptx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1376 bcm_bprintf(b
, "DMA64: txd64 %p txdpa 0x%lx txdpahi 0x%lx txp %p txin %d txout %d "
1377 "txavail %d\n", di
->txd64
, PHYSADDRLO(di
->txdpa
), PHYSADDRHI(di
->txdpaorig
),
1378 di
->txp
, di
->txin
, di
->txout
, di
->hnddma
.txavail
);
1380 bcm_bprintf(b
, "xmtcontrol 0x%x xmtaddrlow 0x%x xmtaddrhigh 0x%x "
1381 "xmtptr 0x%x xmtstatus0 0x%x xmtstatus1 0x%x\n",
1382 R_REG(di
->osh
, &di
->d64txregs
->control
),
1383 R_REG(di
->osh
, &di
->d64txregs
->addrlow
),
1384 R_REG(di
->osh
, &di
->d64txregs
->addrhigh
),
1385 R_REG(di
->osh
, &di
->d64txregs
->ptr
),
1386 R_REG(di
->osh
, &di
->d64txregs
->status0
),
1387 R_REG(di
->osh
, &di
->d64txregs
->status1
));
1389 if (dumpring
&& di
->txd64
) {
1390 dma64_dumpring(di
, b
, di
->txd64
, di
->txin
, di
->txout
, di
->ntxd
);
1395 dma64_dumprx(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1400 bcm_bprintf(b
, "DMA64: rxd64 %p rxdpa 0x%lx rxdpahi 0x%lx rxp %p rxin %d rxout %d\n",
1401 di
->rxd64
, PHYSADDRLO(di
->rxdpa
), PHYSADDRHI(di
->rxdpaorig
), di
->rxp
,
1402 di
->rxin
, di
->rxout
);
1404 bcm_bprintf(b
, "rcvcontrol 0x%x rcvaddrlow 0x%x rcvaddrhigh 0x%x rcvptr "
1405 "0x%x rcvstatus0 0x%x rcvstatus1 0x%x\n",
1406 R_REG(di
->osh
, &di
->d64rxregs
->control
),
1407 R_REG(di
->osh
, &di
->d64rxregs
->addrlow
),
1408 R_REG(di
->osh
, &di
->d64rxregs
->addrhigh
),
1409 R_REG(di
->osh
, &di
->d64rxregs
->ptr
),
1410 R_REG(di
->osh
, &di
->d64rxregs
->status0
),
1411 R_REG(di
->osh
, &di
->d64rxregs
->status1
));
1412 if (di
->rxd64
&& dumpring
) {
1413 dma64_dumpring(di
, b
, di
->rxd64
, di
->rxin
, di
->rxout
, di
->nrxd
);
1418 dma64_dump(dma_info_t
*di
, struct bcmstrbuf
*b
, bool dumpring
)
1420 dma64_dumptx(di
, b
, dumpring
);
1421 dma64_dumprx(di
, b
, dumpring
);
1424 #endif /* BCMDBG || BCMDBG_DUMP */
1427 /* 32 bits DMA functions */
1429 dma32_txinit(dma_info_t
*di
)
1431 uint32 control
= XC_XE
;
1433 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
1438 di
->txin
= di
->txout
= 0;
1439 di
->hnddma
.txavail
= di
->ntxd
- 1;
1441 /* clear tx descriptor ring */
1442 BZERO_SM((void *)(uintptr
)di
->txd32
, (di
->ntxd
* sizeof(dma32dd_t
)));
1444 /* Unconditional for now */
1445 /* if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0) */
1447 W_REG(di
->osh
, &di
->d32txregs
->control
, control
);
1448 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
1452 dma32_txenabled(dma_info_t
*di
)
1456 /* If the chip is dead, it is not enabled :-) */
1457 xc
= R_REG(di
->osh
, &di
->d32txregs
->control
);
1458 return ((xc
!= 0xffffffff) && (xc
& XC_XE
));
1462 dma32_txsuspend(dma_info_t
*di
)
1464 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
1469 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1473 dma32_txresume(dma_info_t
*di
)
1475 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
1480 AND_REG(di
->osh
, &di
->d32txregs
->control
, ~XC_SE
);
1484 dma32_txsuspended(dma_info_t
*di
)
1486 return (di
->ntxd
== 0) || ((R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
) == XC_SE
);
1490 dma32_txreclaim(dma_info_t
*di
, txd_range_t range
)
1494 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
,
1495 (range
== HNDDMA_RANGE_ALL
) ? "all" :
1496 ((range
== HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" : "transfered")));
1498 if (di
->txin
== di
->txout
)
1501 while ((p
= dma32_getnexttxp(di
, range
)))
1502 PKTFREE(di
->osh
, p
, TRUE
);
1506 dma32_txstopped(dma_info_t
*di
)
1508 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) == XS_XS_STOPPED
);
1512 dma32_rxstopped(dma_info_t
*di
)
1514 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_RS_MASK
) == RS_RS_STOPPED
);
1518 dma32_alloc(dma_info_t
*di
, uint direction
)
1524 ddlen
= sizeof(dma32dd_t
);
1526 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
1528 if (!ISALIGNED(DMA_CONSISTENT_ALIGN
, D32RINGALIGN
))
1529 size
+= D32RINGALIGN
;
1531 if (direction
== DMA_TX
) {
1532 if ((va
= DMA_ALLOC_CONSISTENT(di
->osh
, size
,
1533 &di
->txdpaorig
, &di
->tx_dmah
)) == NULL
) {
1534 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
1539 PHYSADDRHISET(di
->txdpa
, 0);
1540 ASSERT(PHYSADDRHI(di
->txdpaorig
) == 0);
1541 di
->txd32
= (dma32dd_t
*) ROUNDUP((uintptr
)va
, D32RINGALIGN
);
1542 di
->txdalign
= (uint
)((int8
*)(uintptr
)di
->txd32
- (int8
*)va
);
1544 PHYSADDRLOSET(di
->txdpa
, PHYSADDRLO(di
->txdpaorig
) + di
->txdalign
);
1545 /* Make sure that alignment didn't overflow */
1546 ASSERT(PHYSADDRLO(di
->txdpa
) >= PHYSADDRLO(di
->txdpaorig
));
1548 di
->txdalloc
= size
;
1549 ASSERT(ISALIGNED((uintptr
)di
->txd32
, D32RINGALIGN
));
1551 if ((va
= DMA_ALLOC_CONSISTENT(di
->osh
, size
, &di
->rxdpaorig
,
1552 &di
->rx_dmah
)) == NULL
) {
1553 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
1558 PHYSADDRHISET(di
->rxdpa
, 0);
1559 ASSERT(PHYSADDRHI(di
->rxdpaorig
) == 0);
1560 di
->rxd32
= (dma32dd_t
*) ROUNDUP((uintptr
)va
, D32RINGALIGN
);
1561 di
->rxdalign
= (uint
)((int8
*)(uintptr
)di
->rxd32
- (int8
*)va
);
1563 PHYSADDRLOSET(di
->rxdpa
, PHYSADDRLO(di
->rxdpaorig
) + di
->rxdalign
);
1564 /* Make sure that alignment didn't overflow */
1565 ASSERT(PHYSADDRLO(di
->rxdpa
) >= PHYSADDRLO(di
->rxdpaorig
));
1566 di
->rxdalloc
= size
;
1567 ASSERT(ISALIGNED((uintptr
)di
->rxd32
, D32RINGALIGN
));
1574 dma32_txreset(dma_info_t
*di
)
1581 /* suspend tx DMA first */
1582 W_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1583 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
))
1584 != XS_XS_DISABLED
) &&
1585 (status
!= XS_XS_IDLE
) &&
1586 (status
!= XS_XS_STOPPED
),
1589 W_REG(di
->osh
, &di
->d32txregs
->control
, 0);
1590 SPINWAIT(((status
= (R_REG(di
->osh
,
1591 &di
->d32txregs
->status
) & XS_XS_MASK
)) != XS_XS_DISABLED
),
1594 /* wait for the last transaction to complete */
1597 return (status
== XS_XS_DISABLED
);
1601 dma32_rxidle(dma_info_t
*di
)
1603 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
1608 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
) ==
1609 R_REG(di
->osh
, &di
->d32rxregs
->ptr
));
1613 dma32_rxreset(dma_info_t
*di
)
1620 W_REG(di
->osh
, &di
->d32rxregs
->control
, 0);
1621 SPINWAIT(((status
= (R_REG(di
->osh
,
1622 &di
->d32rxregs
->status
) & RS_RS_MASK
)) != RS_RS_DISABLED
),
1625 return (status
== RS_RS_DISABLED
);
1629 dma32_rxenabled(dma_info_t
*di
)
1633 rc
= R_REG(di
->osh
, &di
->d32rxregs
->control
);
1634 return ((rc
!= 0xffffffff) && (rc
& RC_RE
));
1638 dma32_txsuspendedidle(dma_info_t
*di
)
1643 if (!(R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
))
1646 if ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) != XS_XS_IDLE
)
1650 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) == XS_XS_IDLE
);
1653 /* !! tx entry routine
1654 * supports full 32bit dma engine buffer addressing so
1655 * dma buffers can cross 4 Kbyte page boundaries.
1657 * WARNING: call must check the return value for error.
1658 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
1661 dma32_txfast(dma_info_t
*di
, void *p0
, bool commit
)
1670 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
1675 * Walk the chain of packet buffers
1676 * allocating and initializing transmit descriptor entries.
1678 for (p
= p0
; p
; p
= next
) {
1680 hnddma_seg_map_t
*map
;
1682 data
= PKTDATA(di
->osh
, p
);
1683 len
= PKTLEN(di
->osh
, p
);
1685 len
+= PKTDMAPAD(di
->osh
, p
);
1687 next
= PKTNEXT(di
->osh
, p
);
1689 /* return nonzero if out of tx descriptors */
1690 if (NEXTTXD(txout
) == di
->txin
)
1697 bzero(&di
->txp_dmah
[txout
], sizeof(hnddma_seg_map_t
));
1699 /* get physical address of buffer start */
1700 pa
= DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
, &di
->txp_dmah
[txout
]);
1702 if (DMASGLIST_ENAB
) {
1703 map
= &di
->txp_dmah
[txout
];
1705 /* See if all the segments can be accounted for */
1706 if (map
->nsegs
> (di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1))
1713 for (j
= 1; j
<= nsegs
; j
++) {
1715 if (p
== p0
&& j
== 1)
1718 /* With a DMA segment list, Descriptor table is filled
1719 * using the segment list instead of looping over
1720 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
1721 * end of segment list is reached.
1723 if ((!DMASGLIST_ENAB
&& next
== NULL
) ||
1724 (DMASGLIST_ENAB
&& j
== nsegs
))
1725 flags
|= (CTRL_IOC
| CTRL_EOF
);
1726 if (txout
== (di
->ntxd
- 1))
1729 if (DMASGLIST_ENAB
) {
1730 len
= map
->segs
[j
- 1].length
;
1731 pa
= map
->segs
[j
- 1].addr
;
1733 ASSERT(PHYSADDRHI(pa
) == 0);
1735 dma32_dd_upd(di
, di
->txd32
, pa
, txout
, &flags
, len
);
1736 ASSERT(di
->txp
[txout
] == NULL
);
1738 txout
= NEXTTXD(txout
);
1741 /* See above. No need to loop over individual buffers */
1746 /* if last txd eof not set, fix it */
1747 if (!(flags
& CTRL_EOF
))
1748 W_SM(&di
->txd32
[PREVTXD(txout
)].ctrl
, BUS_SWAP32(flags
| CTRL_IOC
| CTRL_EOF
));
1750 /* save the packet */
1751 di
->txp
[PREVTXD(txout
)] = p0
;
1753 /* bump the tx descriptor index */
1758 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(txout
, dma32dd_t
));
1760 /* tx flow control */
1761 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1766 DMA_ERROR(("%s: dma_txfast: out of txds\n", di
->name
));
1767 PKTFREE(di
->osh
, p0
, TRUE
);
1768 di
->hnddma
.txavail
= 0;
1769 di
->hnddma
.txnobuf
++;
1774 * Reclaim next completed txd (txds if using chained buffers) in the range
1775 * specified and return associated packet.
1776 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1777 * transmitted as noted by the hardware "CurrDescr" pointer.
1778 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
1779 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
1780 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1781 * return associated packet regardless of the value of hardware pointers.
1784 dma32_getnexttxp(dma_info_t
*di
, txd_range_t range
)
1790 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
,
1791 (range
== HNDDMA_RANGE_ALL
) ? "all" :
1792 ((range
== HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" : "transfered")));
1800 if (range
== HNDDMA_RANGE_ALL
)
1803 end
= B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1805 if (range
== HNDDMA_RANGE_TRANSFERED
) {
1806 active_desc
= ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_AD_MASK
) >>
1808 active_desc
= B2I(active_desc
, dma32dd_t
);
1809 if (end
!= active_desc
)
1810 end
= PREVTXD(active_desc
);
1814 if ((start
== 0) && (end
> di
->txout
))
1817 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
1819 hnddma_seg_map_t
*map
= NULL
;
1820 uint size
, j
, nsegs
;
1822 PHYSADDRLOSET(pa
, (BUS_SWAP32(R_SM(&di
->txd32
[i
].addr
)) - di
->dataoffsetlow
));
1823 PHYSADDRHISET(pa
, 0);
1825 if (DMASGLIST_ENAB
) {
1826 map
= &di
->txp_dmah
[i
];
1827 size
= map
->origsize
;
1830 size
= (BUS_SWAP32(R_SM(&di
->txd32
[i
].ctrl
)) & CTRL_BC_MASK
);
1834 for (j
= nsegs
; j
> 0; j
--) {
1835 W_SM(&di
->txd32
[i
].addr
, 0xdeadbeef);
1843 DMA_UNMAP(di
->osh
, pa
, size
, DMA_TX
, txp
, map
);
1848 /* tx flow control */
1849 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1855 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
1856 start, end, di->txout, forceall));
1862 dma32_getnextrxp(dma_info_t
*di
, bool forceall
)
1867 /* if forcing, dma engine must be disabled */
1868 ASSERT(!forceall
|| !dma32_rxenabled(di
));
1872 /* return if no packets posted */
1876 curr
= B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
, dma32dd_t
);
1878 /* ignore curr if forceall */
1879 if (!forceall
&& (i
== curr
))
1882 /* get the packet pointer that corresponds to the rx descriptor */
1887 PHYSADDRLOSET(pa
, (BUS_SWAP32(R_SM(&di
->rxd32
[i
].addr
)) - di
->dataoffsetlow
));
1888 PHYSADDRHISET(pa
, 0);
1890 /* clear this packet from the descriptor ring */
1891 DMA_UNMAP(di
->osh
, pa
,
1892 di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
1894 W_SM(&di
->rxd32
[i
].addr
, 0xdeadbeef);
1896 di
->rxin
= NEXTRXD(i
);
1902 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1905 dma32_txrotate(dma_info_t
*di
)
1914 ASSERT(dma32_txsuspendedidle(di
));
1916 nactive
= _dma_txactive(di
);
1917 ad
= B2I(((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_AD_MASK
) >> XS_AD_SHIFT
), dma32dd_t
);
1918 rot
= TXD(ad
- di
->txin
);
1920 ASSERT(rot
< di
->ntxd
);
1922 /* full-ring case is a lot harder - don't worry about this */
1923 if (rot
>= (di
->ntxd
- nactive
)) {
1924 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
1929 last
= PREVTXD(di
->txout
);
1931 /* move entries starting at last and moving backwards to first */
1932 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
1933 new = TXD(old
+ rot
);
1936 * Move the tx dma descriptor.
1937 * EOT is set only in the last entry in the ring.
1939 w
= BUS_SWAP32(R_SM(&di
->txd32
[old
].ctrl
)) & ~CTRL_EOT
;
1940 if (new == (di
->ntxd
- 1))
1942 W_SM(&di
->txd32
[new].ctrl
, BUS_SWAP32(w
));
1943 W_SM(&di
->txd32
[new].addr
, R_SM(&di
->txd32
[old
].addr
));
1945 /* zap the old tx dma descriptor address field */
1946 W_SM(&di
->txd32
[old
].addr
, BUS_SWAP32(0xdeadbeef));
1948 /* move the corresponding txp[] entry */
1949 ASSERT(di
->txp
[new] == NULL
);
1950 di
->txp
[new] = di
->txp
[old
];
1952 /* Move the segment map as well */
1953 if (DMASGLIST_ENAB
) {
1954 bcopy(&di
->txp_dmah
[old
], &di
->txp_dmah
[new], sizeof(hnddma_seg_map_t
));
1955 bzero(&di
->txp_dmah
[old
], sizeof(hnddma_seg_map_t
));
1958 di
->txp
[old
] = NULL
;
1961 /* update txin and txout */
1963 di
->txout
= TXD(di
->txout
+ rot
);
1964 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1967 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(di
->txout
, dma32dd_t
));
1970 /* 64 bits DMA functions */
1974 dma64_txinit(dma_info_t
*di
)
1976 uint32 control
= D64_XC_XE
;
1978 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
1983 di
->txin
= di
->txout
= 0;
1984 di
->hnddma
.txavail
= di
->ntxd
- 1;
1986 /* clear tx descriptor ring */
1987 BZERO_SM((void *)(uintptr
)di
->txd64
, (di
->ntxd
* sizeof(dma64dd_t
)));
1989 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
1991 /* Unconditional for now */
1992 /* if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0) */
1993 control
|= D64_XC_PD
;
1994 OR_REG(di
->osh
, &di
->d64txregs
->control
, control
);
1998 dma64_txenabled(dma_info_t
*di
)
2002 /* If the chip is dead, it is not enabled :-) */
2003 xc
= R_REG(di
->osh
, &di
->d64txregs
->control
);
2004 return ((xc
!= 0xffffffff) && (xc
& D64_XC_XE
));
2008 dma64_txsuspend(dma_info_t
*di
)
2010 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
2015 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
2019 dma64_txresume(dma_info_t
*di
)
2021 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
2026 AND_REG(di
->osh
, &di
->d64txregs
->control
, ~D64_XC_SE
);
2030 dma64_txsuspended(dma_info_t
*di
)
2032 return (di
->ntxd
== 0) || ((R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
)
2036 static void BCMFASTPATH
2037 dma64_txreclaim(dma_info_t
*di
, txd_range_t range
)
2041 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
,
2042 (range
== HNDDMA_RANGE_ALL
) ? "all" :
2043 ((range
== HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" : "transfered")));
2045 if (di
->txin
== di
->txout
)
2048 while ((p
= dma64_getnexttxp(di
, range
)))
2049 PKTFREE(di
->osh
, p
, TRUE
);
2053 dma64_txstopped(dma_info_t
*di
)
2055 return ((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) == D64_XS0_XS_STOPPED
);
2059 dma64_rxstopped(dma_info_t
*di
)
2061 return ((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
) == D64_RS0_RS_STOPPED
);
2065 dma64_alloc(dma_info_t
*di
, uint direction
)
2072 ddlen
= sizeof(dma64dd_t
);
2074 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
2076 alignbytes
= di
->dma64align
;
2078 if (!ISALIGNED(DMA_CONSISTENT_ALIGN
, alignbytes
))
2081 if (direction
== DMA_TX
) {
2082 if ((va
= DMA_ALLOC_CONSISTENT(di
->osh
, size
, &di
->txdpaorig
,
2083 &di
->tx_dmah
)) == NULL
) {
2084 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
2089 di
->txd64
= (dma64dd_t
*) ROUNDUP((uintptr
)va
, alignbytes
);
2090 di
->txdalign
= (uint
)((int8
*)(uintptr
)di
->txd64
- (int8
*)va
);
2091 PHYSADDRLOSET(di
->txdpa
, PHYSADDRLO(di
->txdpaorig
) + di
->txdalign
);
2092 /* Make sure that alignment didn't overflow */
2093 ASSERT(PHYSADDRLO(di
->txdpa
) >= PHYSADDRLO(di
->txdpaorig
));
2095 PHYSADDRHISET(di
->txdpa
, PHYSADDRHI(di
->txdpaorig
));
2096 di
->txdalloc
= size
;
2097 ASSERT(ISALIGNED((uintptr
)di
->txd64
, alignbytes
));
2099 if ((va
= DMA_ALLOC_CONSISTENT(di
->osh
, size
, &di
->rxdpaorig
,
2100 &di
->rx_dmah
)) == NULL
) {
2101 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
2105 di
->rxd64
= (dma64dd_t
*) ROUNDUP((uintptr
)va
, alignbytes
);
2106 di
->rxdalign
= (uint
)((int8
*)(uintptr
)di
->rxd64
- (int8
*)va
);
2107 PHYSADDRLOSET(di
->rxdpa
, PHYSADDRLO(di
->rxdpaorig
) + di
->rxdalign
);
2108 /* Make sure that alignment didn't overflow */
2109 ASSERT(PHYSADDRLO(di
->rxdpa
) >= PHYSADDRLO(di
->rxdpaorig
));
2111 PHYSADDRHISET(di
->rxdpa
, PHYSADDRHI(di
->rxdpaorig
));
2112 di
->rxdalloc
= size
;
2113 ASSERT(ISALIGNED((uintptr
)di
->rxd64
, alignbytes
));
2120 dma64_txreset(dma_info_t
*di
)
2127 /* suspend tx DMA first */
2128 W_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
2129 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
2130 D64_XS0_XS_DISABLED
) &&
2131 (status
!= D64_XS0_XS_IDLE
) &&
2132 (status
!= D64_XS0_XS_STOPPED
),
2135 W_REG(di
->osh
, &di
->d64txregs
->control
, 0);
2136 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
2137 D64_XS0_XS_DISABLED
),
2140 /* wait for the last transaction to complete */
2143 return (status
== D64_XS0_XS_DISABLED
);
2147 dma64_rxidle(dma_info_t
*di
)
2149 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
2154 return ((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) ==
2155 (R_REG(di
->osh
, &di
->d64rxregs
->ptr
) & D64_RS0_CD_MASK
));
2159 dma64_rxreset(dma_info_t
*di
)
2166 W_REG(di
->osh
, &di
->d64rxregs
->control
, 0);
2167 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
)) !=
2168 D64_RS0_RS_DISABLED
),
2171 return (status
== D64_RS0_RS_DISABLED
);
2175 dma64_rxenabled(dma_info_t
*di
)
2179 rc
= R_REG(di
->osh
, &di
->d64rxregs
->control
);
2180 return ((rc
!= 0xffffffff) && (rc
& D64_RC_RE
));
2184 dma64_txsuspendedidle(dma_info_t
*di
)
2190 if (!(R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
))
2193 if ((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) == D64_XS0_XS_IDLE
)
2200 /* !! tx entry routine
2201 * WARNING: call must check the return value for error.
2202 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
2204 static int BCMFASTPATH
2205 dma64_txfast(dma_info_t
*di
, void *p0
, bool commit
)
2214 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
2219 * Walk the chain of packet buffers
2220 * allocating and initializing transmit descriptor entries.
2222 for (p
= p0
; p
; p
= next
) {
2224 hnddma_seg_map_t
*map
;
2226 data
= PKTDATA(di
->osh
, p
);
2227 len
= PKTLEN(di
->osh
, p
);
2229 len
+= PKTDMAPAD(di
->osh
, p
);
2231 next
= PKTNEXT(di
->osh
, p
);
2233 /* return nonzero if out of tx descriptors */
2234 if (NEXTTXD(txout
) == di
->txin
)
2240 /* get physical address of buffer start */
2242 bzero(&di
->txp_dmah
[txout
], sizeof(hnddma_seg_map_t
));
2244 pa
= DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
, &di
->txp_dmah
[txout
]);
2246 if (DMASGLIST_ENAB
) {
2247 map
= &di
->txp_dmah
[txout
];
2249 /* See if all the segments can be accounted for */
2250 if (map
->nsegs
> (di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1))
2257 for (j
= 1; j
<= nsegs
; j
++) {
2259 if (p
== p0
&& j
== 1)
2260 flags
|= D64_CTRL1_SOF
;
2262 /* With a DMA segment list, Descriptor table is filled
2263 * using the segment list instead of looping over
2264 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
2265 * end of segment list is reached.
2267 if ((!DMASGLIST_ENAB
&& next
== NULL
) ||
2268 (DMASGLIST_ENAB
&& j
== nsegs
))
2269 flags
|= (D64_CTRL1_IOC
| D64_CTRL1_EOF
);
2270 if (txout
== (di
->ntxd
- 1))
2271 flags
|= D64_CTRL1_EOT
;
2273 if (DMASGLIST_ENAB
) {
2274 len
= map
->segs
[j
- 1].length
;
2275 pa
= map
->segs
[j
- 1].addr
;
2277 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
2278 ASSERT(di
->txp
[txout
] == NULL
);
2280 txout
= NEXTTXD(txout
);
2283 /* See above. No need to loop over individual buffers */
2288 /* if last txd eof not set, fix it */
2289 if (!(flags
& D64_CTRL1_EOF
))
2290 W_SM(&di
->txd64
[PREVTXD(txout
)].ctrl1
,
2291 BUS_SWAP32(flags
| D64_CTRL1_IOC
| D64_CTRL1_EOF
));
2293 /* save the packet */
2294 di
->txp
[PREVTXD(txout
)] = p0
;
2296 /* bump the tx descriptor index */
2301 W_REG(di
->osh
, &di
->d64txregs
->ptr
, di
->xmtptrbase
+ I2B(txout
, dma64dd_t
));
2303 /* tx flow control */
2304 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2309 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di
->name
));
2310 PKTFREE(di
->osh
, p0
, TRUE
);
2311 di
->hnddma
.txavail
= 0;
2312 di
->hnddma
.txnobuf
++;
2317 * Reclaim next completed txd (txds if using chained buffers) in the range
2318 * specified and return associated packet.
2319 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
2320 * transmitted as noted by the hardware "CurrDescr" pointer.
2321 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
2322 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
2323 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
2324 * return associated packet regardless of the value of hardware pointers.
2326 static void * BCMFASTPATH
2327 dma64_getnexttxp(dma_info_t
*di
, txd_range_t range
)
2333 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
,
2334 (range
== HNDDMA_RANGE_ALL
) ? "all" :
2335 ((range
== HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" : "transfered")));
2343 if (range
== HNDDMA_RANGE_ALL
)
2346 dma64regs_t
*dregs
= di
->d64txregs
;
2348 end
= (uint16
)(B2I(((R_REG(di
->osh
, &dregs
->status0
) & D64_XS0_CD_MASK
) -
2349 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
));
2351 if (range
== HNDDMA_RANGE_TRANSFERED
) {
2352 active_desc
= (uint16
)(R_REG(di
->osh
, &dregs
->status1
) & D64_XS1_AD_MASK
);
2353 active_desc
= (active_desc
- di
->xmtptrbase
) & D64_XS0_CD_MASK
;
2354 active_desc
= B2I(active_desc
, dma64dd_t
);
2355 if (end
!= active_desc
)
2356 end
= PREVTXD(active_desc
);
2361 if ((start
== 0) && (end
> di
->txout
))
2364 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
2366 hnddma_seg_map_t
*map
= NULL
;
2367 uint size
, j
, nsegs
;
2369 PHYSADDRLOSET(pa
, (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrlow
)) - di
->dataoffsetlow
));
2370 PHYSADDRHISET(pa
, (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrhigh
)) - di
->dataoffsethigh
));
2372 if (DMASGLIST_ENAB
) {
2373 map
= &di
->txp_dmah
[i
];
2374 size
= map
->origsize
;
2377 size
= (BUS_SWAP32(R_SM(&di
->txd64
[i
].ctrl2
)) & D64_CTRL2_BC_MASK
);
2381 for (j
= nsegs
; j
> 0; j
--) {
2382 W_SM(&di
->txd64
[i
].addrlow
, 0xdeadbeef);
2383 W_SM(&di
->txd64
[i
].addrhigh
, 0xdeadbeef);
2391 DMA_UNMAP(di
->osh
, pa
, size
, DMA_TX
, txp
, map
);
2396 /* tx flow control */
2397 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2403 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
2404 start, end, di->txout, forceall));
2409 static void * BCMFASTPATH
2410 dma64_getnextrxp(dma_info_t
*di
, bool forceall
)
2416 /* if forcing, dma engine must be disabled */
2417 ASSERT(!forceall
|| !dma64_rxenabled(di
));
2421 /* return if no packets posted */
2425 curr
= B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
2426 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
);
2428 /* ignore curr if forceall */
2429 if (!forceall
&& (i
== curr
))
2432 /* get the packet pointer that corresponds to the rx descriptor */
2437 PHYSADDRLOSET(pa
, (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrlow
)) - di
->dataoffsetlow
));
2438 PHYSADDRHISET(pa
, (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrhigh
)) - di
->dataoffsethigh
));
2440 /* clear this packet from the descriptor ring */
2441 DMA_UNMAP(di
->osh
, pa
,
2442 di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
2444 W_SM(&di
->rxd64
[i
].addrlow
, 0xdeadbeef);
2445 W_SM(&di
->rxd64
[i
].addrhigh
, 0xdeadbeef);
2447 di
->rxin
= NEXTRXD(i
);
2453 _dma64_addrext(osl_t
*osh
, dma64regs_t
*dma64regs
)
2456 OR_REG(osh
, &dma64regs
->control
, D64_XC_AE
);
2457 w
= R_REG(osh
, &dma64regs
->control
);
2458 AND_REG(osh
, &dma64regs
->control
, ~D64_XC_AE
);
2459 return ((w
& D64_XC_AE
) == D64_XC_AE
);
2463 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
2466 dma64_txrotate(dma_info_t
*di
)
2475 ASSERT(dma64_txsuspendedidle(di
));
2477 nactive
= _dma_txactive(di
);
2478 ad
= B2I((R_REG(di
->osh
, &di
->d64txregs
->status1
) & D64_XS1_AD_MASK
), dma64dd_t
);
2479 rot
= TXD(ad
- di
->txin
);
2481 ASSERT(rot
< di
->ntxd
);
2483 /* full-ring case is a lot harder - don't worry about this */
2484 if (rot
>= (di
->ntxd
- nactive
)) {
2485 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
2490 last
= PREVTXD(di
->txout
);
2492 /* move entries starting at last and moving backwards to first */
2493 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
2494 new = TXD(old
+ rot
);
2497 * Move the tx dma descriptor.
2498 * EOT is set only in the last entry in the ring.
2500 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl1
)) & ~D64_CTRL1_EOT
;
2501 if (new == (di
->ntxd
- 1))
2503 W_SM(&di
->txd64
[new].ctrl1
, BUS_SWAP32(w
));
2505 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl2
));
2506 W_SM(&di
->txd64
[new].ctrl2
, BUS_SWAP32(w
));
2508 W_SM(&di
->txd64
[new].addrlow
, R_SM(&di
->txd64
[old
].addrlow
));
2509 W_SM(&di
->txd64
[new].addrhigh
, R_SM(&di
->txd64
[old
].addrhigh
));
2511 /* zap the old tx dma descriptor address field */
2512 W_SM(&di
->txd64
[old
].addrlow
, BUS_SWAP32(0xdeadbeef));
2513 W_SM(&di
->txd64
[old
].addrhigh
, BUS_SWAP32(0xdeadbeef));
2515 /* move the corresponding txp[] entry */
2516 ASSERT(di
->txp
[new] == NULL
);
2517 di
->txp
[new] = di
->txp
[old
];
2520 if (DMASGLIST_ENAB
) {
2521 bcopy(&di
->txp_dmah
[old
], &di
->txp_dmah
[new], sizeof(hnddma_seg_map_t
));
2522 bzero(&di
->txp_dmah
[old
], sizeof(hnddma_seg_map_t
));
2525 di
->txp
[old
] = NULL
;
2528 /* update txin and txout */
2530 di
->txout
= TXD(di
->txout
+ rot
);
2531 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2534 W_REG(di
->osh
, &di
->d64txregs
->ptr
, di
->xmtptrbase
+ I2B(di
->txout
, dma64dd_t
));
2537 #endif /* BCMDMA64 */
2540 dma_addrwidth(si_t
*sih
, void *dmaregs
)
2542 dma32regs_t
*dma32regs
;
2547 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
2549 /* DMA engine is 64-bit capable */
2550 if ((si_core_sflags(sih
, 0, 0) & SISF_DMA64
) == SISF_DMA64
) {
2551 /* backplane are 64 bits capable */
2552 if (si_backplane64(sih
))
2553 /* If bus is System Backplane or PCIE then we can access 64-bits */
2554 if ((BUSTYPE(sih
->bustype
) == SI_BUS
) ||
2555 ((BUSTYPE(sih
->bustype
) == PCI_BUS
) &&
2556 (sih
->buscoretype
== PCIE_CORE_ID
)))
2557 return (DMADDRWIDTH_64
);
2559 /* DMA64 is always 32 bits capable, AE is always TRUE */
2561 ASSERT(_dma64_addrext(osh
, (dma64regs_t
*)dmaregs
));
2563 return (DMADDRWIDTH_32
);
2567 /* Start checking for 32-bit / 30-bit addressing */
2568 dma32regs
= (dma32regs_t
*)dmaregs
;
2570 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
2571 if ((BUSTYPE(sih
->bustype
) == SI_BUS
) ||
2572 ((BUSTYPE(sih
->bustype
) == PCI_BUS
) && sih
->buscoretype
== PCIE_CORE_ID
) ||
2573 (_dma32_addrext(osh
, dma32regs
)))
2574 return (DMADDRWIDTH_32
);
2577 return (DMADDRWIDTH_30
);