2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/netdevice.h>
20 #include <linux/pci.h>
31 #include <asm/addrspace.h>
35 #error "hnddma.c shouldn't be needed for FULLMAC"
40 #define DMA_ERROR(args) \
42 if (!(*di->msg_level & 1)) \
47 #define DMA_TRACE(args) \
49 if (!(*di->msg_level & 2)) \
55 #define DMA_ERROR(args)
56 #define DMA_TRACE(args)
59 #define DMA_NONE(args)
61 #define d64txregs dregs.d64_u.txregs_64
62 #define d64rxregs dregs.d64_u.rxregs_64
63 #define txd64 dregs.d64_u.txd_64
64 #define rxd64 dregs.d64_u.rxd_64
66 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
67 static uint dma_msg_level
;
69 #define MAXNAMEL 8 /* 8 char names */
71 #define DI_INFO(dmah) ((dma_info_t *)dmah)
73 #define R_SM(r) (*(r))
74 #define W_SM(r, v) (*(r) = (v))
76 /* dma engine software state */
77 typedef struct dma_info
{
78 struct hnddma_pub hnddma
; /* exported structure */
79 uint
*msg_level
; /* message level pointer */
80 char name
[MAXNAMEL
]; /* callers name for diag msgs */
82 void *pbus
; /* bus handle */
84 bool dma64
; /* this dma engine is operating in 64-bit mode */
85 bool addrext
; /* this dma engine supports DmaExtendedAddrChanges */
89 dma64regs_t
*txregs_64
; /* 64-bit dma tx engine registers */
90 dma64regs_t
*rxregs_64
; /* 64-bit dma rx engine registers */
91 dma64dd_t
*txd_64
; /* pointer to dma64 tx descriptor ring */
92 dma64dd_t
*rxd_64
; /* pointer to dma64 rx descriptor ring */
96 u16 dmadesc_align
; /* alignment requirement for dma descriptors */
98 u16 ntxd
; /* # tx descriptors tunable */
99 u16 txin
; /* index of next descriptor to reclaim */
100 u16 txout
; /* index of next descriptor to post */
101 void **txp
; /* pointer to parallel array of pointers to packets */
102 hnddma_seg_map_t
*txp_dmah
; /* DMA MAP meta-data handle */
103 dmaaddr_t txdpa
; /* Aligned physical address of descriptor ring */
104 dmaaddr_t txdpaorig
; /* Original physical address of descriptor ring */
105 u16 txdalign
; /* #bytes added to alloc'd mem to align txd */
106 u32 txdalloc
; /* #bytes allocated for the ring */
107 u32 xmtptrbase
; /* When using unaligned descriptors, the ptr register
108 * is not just an index, it needs all 13 bits to be
109 * an offset from the addr register.
112 u16 nrxd
; /* # rx descriptors tunable */
113 u16 rxin
; /* index of next descriptor to reclaim */
114 u16 rxout
; /* index of next descriptor to post */
115 void **rxp
; /* pointer to parallel array of pointers to packets */
116 hnddma_seg_map_t
*rxp_dmah
; /* DMA MAP meta-data handle */
117 dmaaddr_t rxdpa
; /* Aligned physical address of descriptor ring */
118 dmaaddr_t rxdpaorig
; /* Original physical address of descriptor ring */
119 u16 rxdalign
; /* #bytes added to alloc'd mem to align rxd */
120 u32 rxdalloc
; /* #bytes allocated for the ring */
121 u32 rcvptrbase
; /* Base for ptr reg when using unaligned descriptors */
124 unsigned int rxbufsize
; /* rx buffer size in bytes,
125 * not including the extra headroom
127 uint rxextrahdrroom
; /* extra rx headroom, reverseved to assist upper stack
128 * e.g. some rx pkt buffers will be bridged to tx side
129 * without byte copying. The extra headroom needs to be
130 * large enough to fit txheader needs.
131 * Some dongle driver may not need it.
133 uint nrxpost
; /* # rx buffers to keep posted */
134 unsigned int rxoffset
; /* rxcontrol offset */
135 uint ddoffsetlow
; /* add to get dma address of descriptor ring, low 32 bits */
136 uint ddoffsethigh
; /* high 32 bits */
137 uint dataoffsetlow
; /* add to get dma address of data buffer, low 32 bits */
138 uint dataoffsethigh
; /* high 32 bits */
139 bool aligndesc_4k
; /* descriptor base need to be aligned or not */
142 /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
143 #ifdef BCMDMASGLISTOSL
144 #define DMASGLIST_ENAB true
146 #define DMASGLIST_ENAB false
147 #endif /* BCMDMASGLISTOSL */
149 /* descriptor bumping macros */
150 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
151 #define TXD(x) XXD((x), di->ntxd)
152 #define RXD(x) XXD((x), di->nrxd)
153 #define NEXTTXD(i) TXD((i) + 1)
154 #define PREVTXD(i) TXD((i) - 1)
155 #define NEXTRXD(i) RXD((i) + 1)
156 #define PREVRXD(i) RXD((i) - 1)
158 #define NTXDACTIVE(h, t) TXD((t) - (h))
159 #define NRXDACTIVE(h, t) RXD((t) - (h))
161 /* macros to convert between byte offsets and indexes */
162 #define B2I(bytes, type) ((bytes) / sizeof(type))
163 #define I2B(index, type) ((index) * sizeof(type))
165 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
166 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
168 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
169 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
171 /* Common prototypes */
172 static bool _dma_isaddrext(dma_info_t
*di
);
173 static bool _dma_descriptor_align(dma_info_t
*di
);
174 static bool _dma_alloc(dma_info_t
*di
, uint direction
);
175 static void _dma_detach(dma_info_t
*di
);
176 static void _dma_ddtable_init(dma_info_t
*di
, uint direction
, dmaaddr_t pa
);
177 static void _dma_rxinit(dma_info_t
*di
);
178 static void *_dma_rx(dma_info_t
*di
);
179 static bool _dma_rxfill(dma_info_t
*di
);
180 static void _dma_rxreclaim(dma_info_t
*di
);
181 static void _dma_rxenable(dma_info_t
*di
);
182 static void *_dma_getnextrxp(dma_info_t
*di
, bool forceall
);
183 static void _dma_rx_param_get(dma_info_t
*di
, u16
*rxoffset
,
186 static void _dma_txblock(dma_info_t
*di
);
187 static void _dma_txunblock(dma_info_t
*di
);
188 static uint
_dma_txactive(dma_info_t
*di
);
189 static uint
_dma_rxactive(dma_info_t
*di
);
190 static uint
_dma_txpending(dma_info_t
*di
);
191 static uint
_dma_txcommitted(dma_info_t
*di
);
193 static void *_dma_peeknexttxp(dma_info_t
*di
);
194 static void *_dma_peeknextrxp(dma_info_t
*di
);
195 static unsigned long _dma_getvar(dma_info_t
*di
, const char *name
);
196 static void _dma_counterreset(dma_info_t
*di
);
197 static void _dma_fifoloopbackenable(dma_info_t
*di
);
198 static uint
_dma_ctrlflags(dma_info_t
*di
, uint mask
, uint flags
);
199 static u8
dma_align_sizetobits(uint size
);
200 static void *dma_ringalloc(dma_info_t
*di
, u32 boundary
, uint size
,
201 u16
*alignbits
, uint
*alloced
,
204 /* Prototypes for 64-bit routines */
205 static bool dma64_alloc(dma_info_t
*di
, uint direction
);
206 static bool dma64_txreset(dma_info_t
*di
);
207 static bool dma64_rxreset(dma_info_t
*di
);
208 static bool dma64_txsuspendedidle(dma_info_t
*di
);
209 static int dma64_txfast(dma_info_t
*di
, struct sk_buff
*p0
, bool commit
);
210 static int dma64_txunframed(dma_info_t
*di
, void *p0
, uint len
, bool commit
);
211 static void *dma64_getpos(dma_info_t
*di
, bool direction
);
212 static void *dma64_getnexttxp(dma_info_t
*di
, txd_range_t range
);
213 static void *dma64_getnextrxp(dma_info_t
*di
, bool forceall
);
214 static void dma64_txrotate(dma_info_t
*di
);
216 static bool dma64_rxidle(dma_info_t
*di
);
217 static void dma64_txinit(dma_info_t
*di
);
218 static bool dma64_txenabled(dma_info_t
*di
);
219 static void dma64_txsuspend(dma_info_t
*di
);
220 static void dma64_txresume(dma_info_t
*di
);
221 static bool dma64_txsuspended(dma_info_t
*di
);
222 static void dma64_txreclaim(dma_info_t
*di
, txd_range_t range
);
223 static bool dma64_txstopped(dma_info_t
*di
);
224 static bool dma64_rxstopped(dma_info_t
*di
);
225 static bool dma64_rxenabled(dma_info_t
*di
);
226 static bool _dma64_addrext(dma64regs_t
*dma64regs
);
228 static inline u32
parity32(u32 data
);
230 const di_fcn_t dma64proc
= {
231 (di_detach_t
) _dma_detach
,
232 (di_txinit_t
) dma64_txinit
,
233 (di_txreset_t
) dma64_txreset
,
234 (di_txenabled_t
) dma64_txenabled
,
235 (di_txsuspend_t
) dma64_txsuspend
,
236 (di_txresume_t
) dma64_txresume
,
237 (di_txsuspended_t
) dma64_txsuspended
,
238 (di_txsuspendedidle_t
) dma64_txsuspendedidle
,
239 (di_txfast_t
) dma64_txfast
,
240 (di_txunframed_t
) dma64_txunframed
,
241 (di_getpos_t
) dma64_getpos
,
242 (di_txstopped_t
) dma64_txstopped
,
243 (di_txreclaim_t
) dma64_txreclaim
,
244 (di_getnexttxp_t
) dma64_getnexttxp
,
245 (di_peeknexttxp_t
) _dma_peeknexttxp
,
246 (di_txblock_t
) _dma_txblock
,
247 (di_txunblock_t
) _dma_txunblock
,
248 (di_txactive_t
) _dma_txactive
,
249 (di_txrotate_t
) dma64_txrotate
,
251 (di_rxinit_t
) _dma_rxinit
,
252 (di_rxreset_t
) dma64_rxreset
,
253 (di_rxidle_t
) dma64_rxidle
,
254 (di_rxstopped_t
) dma64_rxstopped
,
255 (di_rxenable_t
) _dma_rxenable
,
256 (di_rxenabled_t
) dma64_rxenabled
,
258 (di_rxfill_t
) _dma_rxfill
,
259 (di_rxreclaim_t
) _dma_rxreclaim
,
260 (di_getnextrxp_t
) _dma_getnextrxp
,
261 (di_peeknextrxp_t
) _dma_peeknextrxp
,
262 (di_rxparam_get_t
) _dma_rx_param_get
,
264 (di_fifoloopbackenable_t
) _dma_fifoloopbackenable
,
265 (di_getvar_t
) _dma_getvar
,
266 (di_counterreset_t
) _dma_counterreset
,
267 (di_ctrlflags_t
) _dma_ctrlflags
,
271 (di_rxactive_t
) _dma_rxactive
,
272 (di_txpending_t
) _dma_txpending
,
273 (di_txcommitted_t
) _dma_txcommitted
,
277 struct hnddma_pub
*dma_attach(char *name
, si_t
*sih
,
278 void *dmaregstx
, void *dmaregsrx
, uint ntxd
,
279 uint nrxd
, uint rxbufsize
, int rxextheadroom
,
280 uint nrxpost
, uint rxoffset
, uint
*msg_level
)
285 /* allocate private info structure */
286 di
= kzalloc(sizeof(dma_info_t
), GFP_ATOMIC
);
289 printk(KERN_ERR
"dma_attach: out of memory\n");
294 di
->msg_level
= msg_level
? msg_level
: &dma_msg_level
;
296 /* old chips w/o sb is no longer supported */
299 di
->dma64
= ((si_core_sflags(sih
, 0, 0) & SISF_DMA64
) == SISF_DMA64
);
301 /* check arguments */
302 ASSERT(ISPOWEROF2(ntxd
));
303 ASSERT(ISPOWEROF2(nrxd
));
306 ASSERT(dmaregsrx
== NULL
);
308 ASSERT(dmaregstx
== NULL
);
310 /* init dma reg pointer */
311 ASSERT(ntxd
<= D64MAXDD
);
312 ASSERT(nrxd
<= D64MAXDD
);
313 di
->d64txregs
= (dma64regs_t
*) dmaregstx
;
314 di
->d64rxregs
= (dma64regs_t
*) dmaregsrx
;
315 di
->hnddma
.di_fn
= (const di_fcn_t
*)&dma64proc
;
317 /* Default flags (which can be changed by the driver calling dma_ctrlflags
318 * before enable): For backwards compatibility both Rx Overflow Continue
319 * and Parity are DISABLED.
322 di
->hnddma
.di_fn
->ctrlflags(&di
->hnddma
, DMA_CTRL_ROC
| DMA_CTRL_PEN
,
325 DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
326 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
327 "dmaregstx %p dmaregsrx %p\n", name
, "DMA64",
328 di
->hnddma
.dmactrlflags
, ntxd
, nrxd
, rxbufsize
,
329 rxextheadroom
, nrxpost
, rxoffset
, dmaregstx
, dmaregsrx
));
331 /* make a private copy of our callers name */
332 strncpy(di
->name
, name
, MAXNAMEL
);
333 di
->name
[MAXNAMEL
- 1] = '\0';
335 di
->pbus
= ((struct si_info
*)sih
)->pbus
;
338 di
->ntxd
= (u16
) ntxd
;
339 di
->nrxd
= (u16
) nrxd
;
341 /* the actual dma size doesn't include the extra headroom */
343 (rxextheadroom
== -1) ? BCMEXTRAHDROOM
: rxextheadroom
;
344 if (rxbufsize
> BCMEXTRAHDROOM
)
345 di
->rxbufsize
= (u16
) (rxbufsize
- di
->rxextrahdrroom
);
347 di
->rxbufsize
= (u16
) rxbufsize
;
349 di
->nrxpost
= (u16
) nrxpost
;
350 di
->rxoffset
= (u8
) rxoffset
;
353 * figure out the DMA physical address offset for dd and data
354 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
355 * Other bus: use zero
356 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
359 di
->dataoffsetlow
= 0;
360 /* for pci bus, add offset */
361 if (sih
->bustype
== PCI_BUS
) {
362 /* pcie with DMA64 */
364 di
->ddoffsethigh
= SI_PCIE_DMA_H32
;
365 di
->dataoffsetlow
= di
->ddoffsetlow
;
366 di
->dataoffsethigh
= di
->ddoffsethigh
;
368 #if defined(__mips__) && defined(IL_BIGENDIAN)
369 di
->dataoffsetlow
= di
->dataoffsetlow
+ SI_SDRAM_SWAPPED
;
370 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
371 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
372 if ((si_coreid(sih
) == SDIOD_CORE_ID
)
373 && ((si_corerev(sih
) > 0) && (si_corerev(sih
) <= 2)))
375 else if ((si_coreid(sih
) == I2S_CORE_ID
) &&
376 ((si_corerev(sih
) == 0) || (si_corerev(sih
) == 1)))
379 di
->addrext
= _dma_isaddrext(di
);
381 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
382 di
->aligndesc_4k
= _dma_descriptor_align(di
);
383 if (di
->aligndesc_4k
) {
384 di
->dmadesc_align
= D64RINGALIGN_BITS
;
385 if ((ntxd
< D64MAXDD
/ 2) && (nrxd
< D64MAXDD
/ 2)) {
386 /* for smaller dd table, HW relax alignment reqmnt */
387 di
->dmadesc_align
= D64RINGALIGN_BITS
- 1;
390 di
->dmadesc_align
= 4; /* 16 byte alignment */
392 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
393 di
->aligndesc_4k
, di
->dmadesc_align
));
395 /* allocate tx packet pointer vector */
397 size
= ntxd
* sizeof(void *);
398 di
->txp
= kzalloc(size
, GFP_ATOMIC
);
399 if (di
->txp
== NULL
) {
400 DMA_ERROR(("%s: dma_attach: out of tx memory\n", di
->name
));
405 /* allocate rx packet pointer vector */
407 size
= nrxd
* sizeof(void *);
408 di
->rxp
= kzalloc(size
, GFP_ATOMIC
);
409 if (di
->rxp
== NULL
) {
410 DMA_ERROR(("%s: dma_attach: out of rx memory\n", di
->name
));
415 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
417 if (!_dma_alloc(di
, DMA_TX
))
421 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
423 if (!_dma_alloc(di
, DMA_RX
))
427 if ((di
->ddoffsetlow
!= 0) && !di
->addrext
) {
428 if (PHYSADDRLO(di
->txdpa
) > SI_PCI_DMA_SZ
) {
429 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di
->name
, (u32
) PHYSADDRLO(di
->txdpa
)));
432 if (PHYSADDRLO(di
->rxdpa
) > SI_PCI_DMA_SZ
) {
433 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di
->name
, (u32
) PHYSADDRLO(di
->rxdpa
)));
438 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di
->ddoffsetlow
, di
->ddoffsethigh
, di
->dataoffsetlow
, di
->dataoffsethigh
, di
->addrext
));
440 /* allocate DMA mapping vectors */
441 if (DMASGLIST_ENAB
) {
443 size
= ntxd
* sizeof(hnddma_seg_map_t
);
444 di
->txp_dmah
= kzalloc(size
, GFP_ATOMIC
);
445 if (di
->txp_dmah
== NULL
)
450 size
= nrxd
* sizeof(hnddma_seg_map_t
);
451 di
->rxp_dmah
= kzalloc(size
, GFP_ATOMIC
);
452 if (di
->rxp_dmah
== NULL
)
457 return (struct hnddma_pub
*) di
;
464 /* Check for odd number of 1's */
465 static inline u32
parity32(u32 data
)
476 #define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
479 dma64_dd_upd(dma_info_t
*di
, dma64dd_t
*ddring
, dmaaddr_t pa
, uint outidx
,
480 u32
*flags
, u32 bufcount
)
482 u32 ctrl2
= bufcount
& D64_CTRL2_BC_MASK
;
484 /* PCI bus with big(>1G) physical address, use address extension */
485 #if defined(__mips__) && defined(IL_BIGENDIAN)
486 if ((di
->dataoffsetlow
== SI_SDRAM_SWAPPED
)
487 || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
489 if ((di
->dataoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
490 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
491 ASSERT((PHYSADDRHI(pa
) & PCI64ADDR_HIGH
) == 0);
493 W_SM(&ddring
[outidx
].addrlow
,
494 BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
495 W_SM(&ddring
[outidx
].addrhigh
,
496 BUS_SWAP32(PHYSADDRHI(pa
) + di
->dataoffsethigh
));
497 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
498 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
500 /* address extension for 32-bit PCI */
504 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
505 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
506 ASSERT(PHYSADDRHI(pa
) == 0);
508 ctrl2
|= (ae
<< D64_CTRL2_AE_SHIFT
) & D64_CTRL2_AE
;
509 W_SM(&ddring
[outidx
].addrlow
,
510 BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
511 W_SM(&ddring
[outidx
].addrhigh
,
512 BUS_SWAP32(0 + di
->dataoffsethigh
));
513 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
514 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
516 if (di
->hnddma
.dmactrlflags
& DMA_CTRL_PEN
) {
517 if (DMA64_DD_PARITY(&ddring
[outidx
])) {
518 W_SM(&ddring
[outidx
].ctrl2
,
519 BUS_SWAP32(ctrl2
| D64_CTRL2_PARITY
));
524 static bool _dma_alloc(dma_info_t
*di
, uint direction
)
526 return dma64_alloc(di
, direction
);
529 void *dma_alloc_consistent(struct pci_dev
*pdev
, uint size
, u16 align_bits
,
530 uint
*alloced
, unsigned long *pap
)
533 u16 align
= (1 << align_bits
);
534 if (!IS_ALIGNED(PAGE_SIZE
, align
))
538 return pci_alloc_consistent(pdev
, size
, (dma_addr_t
*) pap
);
541 /* !! may be called with core in reset */
542 static void _dma_detach(dma_info_t
*di
)
545 DMA_TRACE(("%s: dma_detach\n", di
->name
));
547 /* shouldn't be here if descriptors are unreclaimed */
548 ASSERT(di
->txin
== di
->txout
);
549 ASSERT(di
->rxin
== di
->rxout
);
551 /* free dma descriptor rings */
553 pci_free_consistent(di
->pbus
, di
->txdalloc
,
554 ((s8
*)di
->txd64
- di
->txdalign
),
557 pci_free_consistent(di
->pbus
, di
->rxdalloc
,
558 ((s8
*)di
->rxd64
- di
->rxdalign
),
561 /* free packet pointer vectors */
565 /* free tx packet DMA handles */
568 /* free rx packet DMA handles */
571 /* free our private info structure */
576 static bool _dma_descriptor_align(dma_info_t
*di
)
580 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
581 if (di
->d64txregs
!= NULL
) {
582 W_REG(&di
->d64txregs
->addrlow
, 0xff0);
583 addrl
= R_REG(&di
->d64txregs
->addrlow
);
586 } else if (di
->d64rxregs
!= NULL
) {
587 W_REG(&di
->d64rxregs
->addrlow
, 0xff0);
588 addrl
= R_REG(&di
->d64rxregs
->addrlow
);
595 /* return true if this dma engine supports DmaExtendedAddrChanges, otherwise false */
596 static bool _dma_isaddrext(dma_info_t
*di
)
598 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
600 /* not all tx or rx channel are available */
601 if (di
->d64txregs
!= NULL
) {
602 if (!_dma64_addrext(di
->d64txregs
)) {
603 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
604 "AE set\n", di
->name
));
608 } else if (di
->d64rxregs
!= NULL
) {
609 if (!_dma64_addrext(di
->d64rxregs
)) {
610 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
611 "AE set\n", di
->name
));
619 /* initialize descriptor table base address */
620 static void _dma_ddtable_init(dma_info_t
*di
, uint direction
, dmaaddr_t pa
)
622 if (!di
->aligndesc_4k
) {
623 if (direction
== DMA_TX
)
624 di
->xmtptrbase
= PHYSADDRLO(pa
);
626 di
->rcvptrbase
= PHYSADDRLO(pa
);
629 if ((di
->ddoffsetlow
== 0)
630 || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
631 if (direction
== DMA_TX
) {
632 W_REG(&di
->d64txregs
->addrlow
,
633 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
634 W_REG(&di
->d64txregs
->addrhigh
,
635 (PHYSADDRHI(pa
) + di
->ddoffsethigh
));
637 W_REG(&di
->d64rxregs
->addrlow
,
638 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
639 W_REG(&di
->d64rxregs
->addrhigh
,
640 (PHYSADDRHI(pa
) + di
->ddoffsethigh
));
643 /* DMA64 32bits address extension */
646 ASSERT(PHYSADDRHI(pa
) == 0);
648 /* shift the high bit(s) from pa to ae */
649 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >>
650 PCI32ADDR_HIGH_SHIFT
;
651 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
653 if (direction
== DMA_TX
) {
654 W_REG(&di
->d64txregs
->addrlow
,
655 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
656 W_REG(&di
->d64txregs
->addrhigh
,
658 SET_REG(&di
->d64txregs
->control
,
659 D64_XC_AE
, (ae
<< D64_XC_AE_SHIFT
));
661 W_REG(&di
->d64rxregs
->addrlow
,
662 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
663 W_REG(&di
->d64rxregs
->addrhigh
,
665 SET_REG(&di
->d64rxregs
->control
,
666 D64_RC_AE
, (ae
<< D64_RC_AE_SHIFT
));
671 static void _dma_fifoloopbackenable(dma_info_t
*di
)
673 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di
->name
));
675 OR_REG(&di
->d64txregs
->control
, D64_XC_LE
);
678 static void _dma_rxinit(dma_info_t
*di
)
680 DMA_TRACE(("%s: dma_rxinit\n", di
->name
));
685 di
->rxin
= di
->rxout
= 0;
687 /* clear rx descriptor ring */
688 memset((void *)di
->rxd64
, '\0',
689 (di
->nrxd
* sizeof(dma64dd_t
)));
691 /* DMA engine with out alignment requirement requires table to be inited
692 * before enabling the engine
694 if (!di
->aligndesc_4k
)
695 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
699 if (di
->aligndesc_4k
)
700 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
703 static void _dma_rxenable(dma_info_t
*di
)
705 uint dmactrlflags
= di
->hnddma
.dmactrlflags
;
708 DMA_TRACE(("%s: dma_rxenable\n", di
->name
));
711 (R_REG(&di
->d64rxregs
->control
) & D64_RC_AE
) |
714 if ((dmactrlflags
& DMA_CTRL_PEN
) == 0)
715 control
|= D64_RC_PD
;
717 if (dmactrlflags
& DMA_CTRL_ROC
)
718 control
|= D64_RC_OC
;
720 W_REG(&di
->d64rxregs
->control
,
721 ((di
->rxoffset
<< D64_RC_RO_SHIFT
) | control
));
725 _dma_rx_param_get(dma_info_t
*di
, u16
*rxoffset
, u16
*rxbufsize
)
727 /* the normal values fit into 16 bits */
728 *rxoffset
= (u16
) di
->rxoffset
;
729 *rxbufsize
= (u16
) di
->rxbufsize
;
732 /* !! rx entry routine
733 * returns a pointer to the next frame received, or NULL if there are no more
734 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
736 * otherwise, it's treated as giant pkt and will be tossed.
737 * The DMA scattering starts with normal DMA header, followed by first buffer data.
738 * After it reaches the max size of buffer, the data continues in next DMA descriptor
739 * buffer WITHOUT DMA header
741 static void *BCMFASTPATH
_dma_rx(dma_info_t
*di
)
743 struct sk_buff
*p
, *head
, *tail
;
749 head
= _dma_getnextrxp(di
, false);
753 len
= le16_to_cpu(*(u16
*) (head
->data
));
754 DMA_TRACE(("%s: dma_rx len %d\n", di
->name
, len
));
756 #if defined(__mips__)
757 #define OSL_UNCACHED(va) ((void *)KSEG1ADDR((va)))
759 while (!(len
= *(u16
*) OSL_UNCACHED(head
->data
)))
762 *(u16
*) (head
->data
) = cpu_to_le16((u16
) len
);
764 #endif /* defined(__mips__) */
766 /* set actual length */
767 pkt_len
= min((di
->rxoffset
+ len
), di
->rxbufsize
);
768 __skb_trim(head
, pkt_len
);
769 resid
= len
- (di
->rxbufsize
- di
->rxoffset
);
771 /* check for single or multi-buffer rx */
774 while ((resid
> 0) && (p
= _dma_getnextrxp(di
, false))) {
776 pkt_len
= min(resid
, (int)di
->rxbufsize
);
777 __skb_trim(p
, pkt_len
);
780 resid
-= di
->rxbufsize
;
788 B2I(((R_REG(&di
->d64rxregs
->status0
) &
790 di
->rcvptrbase
) & D64_RS0_CD_MASK
,
792 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
793 di
->rxin
, di
->rxout
, cur
));
797 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_RXMULTI
) == 0) {
798 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
800 pkt_buf_free_skb(head
);
801 di
->hnddma
.rxgiants
++;
809 /* post receive buffers
810 * return false is refill failed completely and ring is empty
811 * this will stall the rx dma and user might want to call rxfill again asap
812 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
814 static bool BCMFASTPATH
_dma_rxfill(dma_info_t
*di
)
822 uint extra_offset
= 0;
828 * Determine how many receive buffers we're lacking
829 * from the full complement, allocate, initialize,
830 * and post them, then update the chip rx lastdscr.
836 n
= di
->nrxpost
- NRXDACTIVE(rxin
, rxout
);
838 DMA_TRACE(("%s: dma_rxfill: post %d\n", di
->name
, n
));
840 if (di
->rxbufsize
> BCMEXTRAHDROOM
)
841 extra_offset
= di
->rxextrahdrroom
;
843 for (i
= 0; i
< n
; i
++) {
844 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
848 p
= pkt_buf_get_skb(di
->rxbufsize
+ extra_offset
);
851 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
853 if (i
== 0 && dma64_rxidle(di
)) {
854 DMA_ERROR(("%s: rxfill64: ring is empty !\n",
858 di
->hnddma
.rxnobuf
++;
861 /* reserve an extra headroom, if applicable */
863 skb_pull(p
, extra_offset
);
865 /* Do a cached write instead of uncached write since DMA_MAP
866 * will flush the cache.
868 *(u32
*) (p
->data
) = 0;
871 memset(&di
->rxp_dmah
[rxout
], 0,
872 sizeof(hnddma_seg_map_t
));
874 pa
= pci_map_single(di
->pbus
, p
->data
,
875 di
->rxbufsize
, PCI_DMA_FROMDEVICE
);
877 ASSERT(IS_ALIGNED(PHYSADDRLO(pa
), 4));
879 /* save the free packet pointer */
880 ASSERT(di
->rxp
[rxout
] == NULL
);
883 /* reset flags for each descriptor */
885 if (rxout
== (di
->nrxd
- 1))
886 flags
= D64_CTRL1_EOT
;
888 dma64_dd_upd(di
, di
->rxd64
, pa
, rxout
, &flags
,
890 rxout
= NEXTRXD(rxout
);
895 /* update the chip lastdscr pointer */
896 W_REG(&di
->d64rxregs
->ptr
,
897 di
->rcvptrbase
+ I2B(rxout
, dma64dd_t
));
902 /* like getnexttxp but no reclaim */
903 static void *_dma_peeknexttxp(dma_info_t
*di
)
911 B2I(((R_REG(&di
->d64txregs
->status0
) &
912 D64_XS0_CD_MASK
) - di
->xmtptrbase
) & D64_XS0_CD_MASK
,
915 for (i
= di
->txin
; i
!= end
; i
= NEXTTXD(i
))
922 /* like getnextrxp but not take off the ring */
923 static void *_dma_peeknextrxp(dma_info_t
*di
)
931 B2I(((R_REG(&di
->d64rxregs
->status0
) &
932 D64_RS0_CD_MASK
) - di
->rcvptrbase
) & D64_RS0_CD_MASK
,
935 for (i
= di
->rxin
; i
!= end
; i
= NEXTRXD(i
))
942 static void _dma_rxreclaim(dma_info_t
*di
)
946 DMA_TRACE(("%s: dma_rxreclaim\n", di
->name
));
948 while ((p
= _dma_getnextrxp(di
, true)))
952 static void *BCMFASTPATH
_dma_getnextrxp(dma_info_t
*di
, bool forceall
)
957 return dma64_getnextrxp(di
, forceall
);
960 static void _dma_txblock(dma_info_t
*di
)
962 di
->hnddma
.txavail
= 0;
965 static void _dma_txunblock(dma_info_t
*di
)
967 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
970 static uint
_dma_txactive(dma_info_t
*di
)
972 return NTXDACTIVE(di
->txin
, di
->txout
);
975 static uint
_dma_txpending(dma_info_t
*di
)
980 B2I(((R_REG(&di
->d64txregs
->status0
) &
981 D64_XS0_CD_MASK
) - di
->xmtptrbase
) & D64_XS0_CD_MASK
,
984 return NTXDACTIVE(curr
, di
->txout
);
987 static uint
_dma_txcommitted(dma_info_t
*di
)
990 uint txin
= di
->txin
;
992 if (txin
== di
->txout
)
995 ptr
= B2I(R_REG(&di
->d64txregs
->ptr
), dma64dd_t
);
997 return NTXDACTIVE(di
->txin
, ptr
);
1000 static uint
_dma_rxactive(dma_info_t
*di
)
1002 return NRXDACTIVE(di
->rxin
, di
->rxout
);
1005 static void _dma_counterreset(dma_info_t
*di
)
1007 /* reset all software counter */
1008 di
->hnddma
.rxgiants
= 0;
1009 di
->hnddma
.rxnobuf
= 0;
1010 di
->hnddma
.txnobuf
= 0;
1013 static uint
_dma_ctrlflags(dma_info_t
*di
, uint mask
, uint flags
)
1015 uint dmactrlflags
= di
->hnddma
.dmactrlflags
;
1018 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di
->name
));
1022 ASSERT((flags
& ~mask
) == 0);
1024 dmactrlflags
&= ~mask
;
1025 dmactrlflags
|= flags
;
1027 /* If trying to enable parity, check if parity is actually supported */
1028 if (dmactrlflags
& DMA_CTRL_PEN
) {
1031 control
= R_REG(&di
->d64txregs
->control
);
1032 W_REG(&di
->d64txregs
->control
,
1033 control
| D64_XC_PD
);
1034 if (R_REG(&di
->d64txregs
->control
) & D64_XC_PD
) {
1035 /* We *can* disable it so it is supported,
1036 * restore control register
1038 W_REG(&di
->d64txregs
->control
,
1041 /* Not supported, don't allow it to be enabled */
1042 dmactrlflags
&= ~DMA_CTRL_PEN
;
1046 di
->hnddma
.dmactrlflags
= dmactrlflags
;
1048 return dmactrlflags
;
1051 /* get the address of the var in order to change later */
1052 static unsigned long _dma_getvar(dma_info_t
*di
, const char *name
)
1054 if (!strcmp(name
, "&txavail"))
1055 return (unsigned long)&(di
->hnddma
.txavail
);
1063 u8
dma_align_sizetobits(uint size
)
1067 ASSERT(!(size
& (size
- 1)));
1068 while (size
>>= 1) {
1074 /* This function ensures that the DMA descriptor ring will not get allocated
1075 * across Page boundary. If the allocation is done across the page boundary
1076 * at the first time, then it is freed and the allocation is done at
1077 * descriptor ring size aligned location. This will ensure that the ring will
1078 * not cross page boundary
1080 static void *dma_ringalloc(dma_info_t
*di
, u32 boundary
, uint size
,
1081 u16
*alignbits
, uint
*alloced
,
1086 u32 alignbytes
= 1 << *alignbits
;
1088 va
= dma_alloc_consistent(di
->pbus
, size
, *alignbits
, alloced
, descpa
);
1093 desc_strtaddr
= (u32
) roundup((unsigned long)va
, alignbytes
);
1094 if (((desc_strtaddr
+ size
- 1) & boundary
) != (desc_strtaddr
1096 *alignbits
= dma_align_sizetobits(size
);
1097 pci_free_consistent(di
->pbus
, size
, va
, *descpa
);
1098 va
= dma_alloc_consistent(di
->pbus
, size
, *alignbits
,
1104 /* 64-bit DMA functions */
1106 static void dma64_txinit(dma_info_t
*di
)
1108 u32 control
= D64_XC_XE
;
1110 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
1115 di
->txin
= di
->txout
= 0;
1116 di
->hnddma
.txavail
= di
->ntxd
- 1;
1118 /* clear tx descriptor ring */
1119 memset((void *)di
->txd64
, '\0', (di
->ntxd
* sizeof(dma64dd_t
)));
1121 /* DMA engine with out alignment requirement requires table to be inited
1122 * before enabling the engine
1124 if (!di
->aligndesc_4k
)
1125 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
1127 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_PEN
) == 0)
1128 control
|= D64_XC_PD
;
1129 OR_REG(&di
->d64txregs
->control
, control
);
1131 /* DMA engine with alignment requirement requires table to be inited
1132 * before enabling the engine
1134 if (di
->aligndesc_4k
)
1135 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
1138 static bool dma64_txenabled(dma_info_t
*di
)
1142 /* If the chip is dead, it is not enabled :-) */
1143 xc
= R_REG(&di
->d64txregs
->control
);
1144 return (xc
!= 0xffffffff) && (xc
& D64_XC_XE
);
1147 static void dma64_txsuspend(dma_info_t
*di
)
1149 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
1154 OR_REG(&di
->d64txregs
->control
, D64_XC_SE
);
1157 static void dma64_txresume(dma_info_t
*di
)
1159 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
1164 AND_REG(&di
->d64txregs
->control
, ~D64_XC_SE
);
1167 static bool dma64_txsuspended(dma_info_t
*di
)
1169 return (di
->ntxd
== 0) ||
1170 ((R_REG(&di
->d64txregs
->control
) & D64_XC_SE
) ==
1174 static void BCMFASTPATH
dma64_txreclaim(dma_info_t
*di
, txd_range_t range
)
1178 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
,
1179 (range
== HNDDMA_RANGE_ALL
) ? "all" :
1181 HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" :
1184 if (di
->txin
== di
->txout
)
1187 while ((p
= dma64_getnexttxp(di
, range
))) {
1188 /* For unframed data, we don't have any packets to free */
1189 if (!(di
->hnddma
.dmactrlflags
& DMA_CTRL_UNFRAMED
))
1190 pkt_buf_free_skb(p
);
1194 static bool dma64_txstopped(dma_info_t
*di
)
1196 return ((R_REG(&di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) ==
1197 D64_XS0_XS_STOPPED
);
1200 static bool dma64_rxstopped(dma_info_t
*di
)
1202 return ((R_REG(&di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
) ==
1203 D64_RS0_RS_STOPPED
);
1206 static bool dma64_alloc(dma_info_t
*di
, uint direction
)
1215 ddlen
= sizeof(dma64dd_t
);
1217 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
1218 align_bits
= di
->dmadesc_align
;
1219 align
= (1 << align_bits
);
1221 if (direction
== DMA_TX
) {
1222 va
= dma_ringalloc(di
, D64RINGALIGN
, size
, &align_bits
,
1223 &alloced
, &di
->txdpaorig
);
1225 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di
->name
));
1228 align
= (1 << align_bits
);
1229 di
->txd64
= (dma64dd_t
*) roundup((unsigned long)va
, align
);
1230 di
->txdalign
= (uint
) ((s8
*)di
->txd64
- (s8
*) va
);
1231 PHYSADDRLOSET(di
->txdpa
,
1232 PHYSADDRLO(di
->txdpaorig
) + di
->txdalign
);
1233 /* Make sure that alignment didn't overflow */
1234 ASSERT(PHYSADDRLO(di
->txdpa
) >= PHYSADDRLO(di
->txdpaorig
));
1236 PHYSADDRHISET(di
->txdpa
, PHYSADDRHI(di
->txdpaorig
));
1237 di
->txdalloc
= alloced
;
1238 ASSERT(IS_ALIGNED((unsigned long)di
->txd64
, align
));
1240 va
= dma_ringalloc(di
, D64RINGALIGN
, size
, &align_bits
,
1241 &alloced
, &di
->rxdpaorig
);
1243 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di
->name
));
1246 align
= (1 << align_bits
);
1247 di
->rxd64
= (dma64dd_t
*) roundup((unsigned long)va
, align
);
1248 di
->rxdalign
= (uint
) ((s8
*)di
->rxd64
- (s8
*) va
);
1249 PHYSADDRLOSET(di
->rxdpa
,
1250 PHYSADDRLO(di
->rxdpaorig
) + di
->rxdalign
);
1251 /* Make sure that alignment didn't overflow */
1252 ASSERT(PHYSADDRLO(di
->rxdpa
) >= PHYSADDRLO(di
->rxdpaorig
));
1254 PHYSADDRHISET(di
->rxdpa
, PHYSADDRHI(di
->rxdpaorig
));
1255 di
->rxdalloc
= alloced
;
1256 ASSERT(IS_ALIGNED((unsigned long)di
->rxd64
, align
));
1262 static bool dma64_txreset(dma_info_t
*di
)
1269 /* suspend tx DMA first */
1270 W_REG(&di
->d64txregs
->control
, D64_XC_SE
);
1272 (R_REG(&di
->d64txregs
->status0
) & D64_XS0_XS_MASK
))
1273 != D64_XS0_XS_DISABLED
) && (status
!= D64_XS0_XS_IDLE
)
1274 && (status
!= D64_XS0_XS_STOPPED
), 10000);
1276 W_REG(&di
->d64txregs
->control
, 0);
1278 (R_REG(&di
->d64txregs
->status0
) & D64_XS0_XS_MASK
))
1279 != D64_XS0_XS_DISABLED
), 10000);
1281 /* wait for the last transaction to complete */
1284 return status
== D64_XS0_XS_DISABLED
;
1287 static bool dma64_rxidle(dma_info_t
*di
)
1289 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
1294 return ((R_REG(&di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) ==
1295 (R_REG(&di
->d64rxregs
->ptr
) & D64_RS0_CD_MASK
));
1298 static bool dma64_rxreset(dma_info_t
*di
)
1305 W_REG(&di
->d64rxregs
->control
, 0);
1307 (R_REG(&di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
))
1308 != D64_RS0_RS_DISABLED
), 10000);
1310 return status
== D64_RS0_RS_DISABLED
;
1313 static bool dma64_rxenabled(dma_info_t
*di
)
1317 rc
= R_REG(&di
->d64rxregs
->control
);
1318 return (rc
!= 0xffffffff) && (rc
& D64_RC_RE
);
1321 static bool dma64_txsuspendedidle(dma_info_t
*di
)
1327 if (!(R_REG(&di
->d64txregs
->control
) & D64_XC_SE
))
1330 if ((R_REG(&di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) ==
1337 /* Useful when sending unframed data. This allows us to get a progress report from the DMA.
1338 * We return a pointer to the beginning of the DATA buffer of the current descriptor.
1339 * If DMA is idle, we return NULL.
1341 static void *dma64_getpos(dma_info_t
*di
, bool direction
)
1347 if (direction
== DMA_TX
) {
1349 R_REG(&di
->d64txregs
->status0
) & D64_XS0_CD_MASK
;
1350 idle
= !NTXDACTIVE(di
->txin
, di
->txout
);
1351 va
= di
->txp
[B2I(cd_offset
, dma64dd_t
)];
1354 R_REG(&di
->d64rxregs
->status0
) & D64_XS0_CD_MASK
;
1355 idle
= !NRXDACTIVE(di
->rxin
, di
->rxout
);
1356 va
= di
->rxp
[B2I(cd_offset
, dma64dd_t
)];
1359 /* If DMA is IDLE, return NULL */
1361 DMA_TRACE(("%s: DMA idle, return NULL\n", __func__
));
1368 /* TX of unframed data
1370 * Adds a DMA ring descriptor for the data pointed to by "buf".
1371 * This is for DMA of a buffer of data and is unlike other hnddma TX functions
1372 * that take a pointer to a "packet"
1373 * Each call to this is results in a single descriptor being added for "len" bytes of
1374 * data starting at "buf", it doesn't handle chained buffers.
1376 static int dma64_txunframed(dma_info_t
*di
, void *buf
, uint len
, bool commit
)
1380 dmaaddr_t pa
; /* phys addr */
1384 /* return nonzero if out of tx descriptors */
1385 if (NEXTTXD(txout
) == di
->txin
)
1391 pa
= pci_map_single(di
->pbus
, buf
, len
, PCI_DMA_TODEVICE
);
1393 flags
= (D64_CTRL1_SOF
| D64_CTRL1_IOC
| D64_CTRL1_EOF
);
1395 if (txout
== (di
->ntxd
- 1))
1396 flags
|= D64_CTRL1_EOT
;
1398 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
1399 ASSERT(di
->txp
[txout
] == NULL
);
1401 /* save the buffer pointer - used by dma_getpos */
1402 di
->txp
[txout
] = buf
;
1404 txout
= NEXTTXD(txout
);
1405 /* bump the tx descriptor index */
1410 W_REG(&di
->d64txregs
->ptr
,
1411 di
->xmtptrbase
+ I2B(txout
, dma64dd_t
));
1414 /* tx flow control */
1415 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1420 DMA_ERROR(("%s: %s: out of txds !!!\n", di
->name
, __func__
));
1421 di
->hnddma
.txavail
= 0;
1422 di
->hnddma
.txnobuf
++;
1426 /* !! tx entry routine
1427 * WARNING: call must check the return value for error.
1428 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
1430 static int BCMFASTPATH
dma64_txfast(dma_info_t
*di
, struct sk_buff
*p0
,
1433 struct sk_buff
*p
, *next
;
1434 unsigned char *data
;
1440 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
1445 * Walk the chain of packet buffers
1446 * allocating and initializing transmit descriptor entries.
1448 for (p
= p0
; p
; p
= next
) {
1450 hnddma_seg_map_t
*map
;
1456 /* return nonzero if out of tx descriptors */
1457 if (NEXTTXD(txout
) == di
->txin
)
1463 /* get physical address of buffer start */
1465 memset(&di
->txp_dmah
[txout
], 0,
1466 sizeof(hnddma_seg_map_t
));
1468 pa
= pci_map_single(di
->pbus
, data
, len
, PCI_DMA_TODEVICE
);
1470 if (DMASGLIST_ENAB
) {
1471 map
= &di
->txp_dmah
[txout
];
1473 /* See if all the segments can be accounted for */
1475 (uint
) (di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) -
1483 for (j
= 1; j
<= nsegs
; j
++) {
1485 if (p
== p0
&& j
== 1)
1486 flags
|= D64_CTRL1_SOF
;
1488 /* With a DMA segment list, Descriptor table is filled
1489 * using the segment list instead of looping over
1490 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
1491 * end of segment list is reached.
1493 if ((!DMASGLIST_ENAB
&& next
== NULL
) ||
1494 (DMASGLIST_ENAB
&& j
== nsegs
))
1495 flags
|= (D64_CTRL1_IOC
| D64_CTRL1_EOF
);
1496 if (txout
== (di
->ntxd
- 1))
1497 flags
|= D64_CTRL1_EOT
;
1499 if (DMASGLIST_ENAB
) {
1500 len
= map
->segs
[j
- 1].length
;
1501 pa
= map
->segs
[j
- 1].addr
;
1503 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
1504 ASSERT(di
->txp
[txout
] == NULL
);
1506 txout
= NEXTTXD(txout
);
1509 /* See above. No need to loop over individual buffers */
1514 /* if last txd eof not set, fix it */
1515 if (!(flags
& D64_CTRL1_EOF
))
1516 W_SM(&di
->txd64
[PREVTXD(txout
)].ctrl1
,
1517 BUS_SWAP32(flags
| D64_CTRL1_IOC
| D64_CTRL1_EOF
));
1519 /* save the packet */
1520 di
->txp
[PREVTXD(txout
)] = p0
;
1522 /* bump the tx descriptor index */
1527 W_REG(&di
->d64txregs
->ptr
,
1528 di
->xmtptrbase
+ I2B(txout
, dma64dd_t
));
1530 /* tx flow control */
1531 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1536 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di
->name
));
1537 pkt_buf_free_skb(p0
);
1538 di
->hnddma
.txavail
= 0;
1539 di
->hnddma
.txnobuf
++;
1544 * Reclaim next completed txd (txds if using chained buffers) in the range
1545 * specified and return associated packet.
1546 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1547 * transmitted as noted by the hardware "CurrDescr" pointer.
1548 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
1549 * transferred by the DMA as noted by the hardware "ActiveDescr" pointer.
1550 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1551 * return associated packet regardless of the value of hardware pointers.
1553 static void *BCMFASTPATH
dma64_getnexttxp(dma_info_t
*di
, txd_range_t range
)
1559 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
,
1560 (range
== HNDDMA_RANGE_ALL
) ? "all" :
1562 HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" :
1571 if (range
== HNDDMA_RANGE_ALL
)
1574 dma64regs_t
*dregs
= di
->d64txregs
;
1578 (((R_REG(&dregs
->status0
) &
1580 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
));
1582 if (range
== HNDDMA_RANGE_TRANSFERED
) {
1584 (u16
) (R_REG(&dregs
->status1
) &
1587 (active_desc
- di
->xmtptrbase
) & D64_XS0_CD_MASK
;
1588 active_desc
= B2I(active_desc
, dma64dd_t
);
1589 if (end
!= active_desc
)
1590 end
= PREVTXD(active_desc
);
1594 if ((start
== 0) && (end
> di
->txout
))
1597 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
1599 hnddma_seg_map_t
*map
= NULL
;
1600 uint size
, j
, nsegs
;
1603 (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrlow
)) -
1604 di
->dataoffsetlow
));
1606 (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrhigh
)) -
1607 di
->dataoffsethigh
));
1609 if (DMASGLIST_ENAB
) {
1610 map
= &di
->txp_dmah
[i
];
1611 size
= map
->origsize
;
1615 (BUS_SWAP32(R_SM(&di
->txd64
[i
].ctrl2
)) &
1620 for (j
= nsegs
; j
> 0; j
--) {
1621 W_SM(&di
->txd64
[i
].addrlow
, 0xdeadbeef);
1622 W_SM(&di
->txd64
[i
].addrhigh
, 0xdeadbeef);
1630 pci_unmap_single(di
->pbus
, pa
, size
, PCI_DMA_TODEVICE
);
1635 /* tx flow control */
1636 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1641 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start
, end
, di
->txout
, forceall
));
1645 static void *BCMFASTPATH
dma64_getnextrxp(dma_info_t
*di
, bool forceall
)
1651 /* if forcing, dma engine must be disabled */
1652 ASSERT(!forceall
|| !dma64_rxenabled(di
));
1656 /* return if no packets posted */
1661 B2I(((R_REG(&di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
1662 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
);
1664 /* ignore curr if forceall */
1665 if (!forceall
&& (i
== curr
))
1668 /* get the packet pointer that corresponds to the rx descriptor */
1674 (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrlow
)) -
1675 di
->dataoffsetlow
));
1677 (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrhigh
)) -
1678 di
->dataoffsethigh
));
1680 /* clear this packet from the descriptor ring */
1681 pci_unmap_single(di
->pbus
, pa
, di
->rxbufsize
, PCI_DMA_FROMDEVICE
);
1683 W_SM(&di
->rxd64
[i
].addrlow
, 0xdeadbeef);
1684 W_SM(&di
->rxd64
[i
].addrhigh
, 0xdeadbeef);
1686 di
->rxin
= NEXTRXD(i
);
1691 static bool _dma64_addrext(dma64regs_t
*dma64regs
)
1694 OR_REG(&dma64regs
->control
, D64_XC_AE
);
1695 w
= R_REG(&dma64regs
->control
);
1696 AND_REG(&dma64regs
->control
, ~D64_XC_AE
);
1697 return (w
& D64_XC_AE
) == D64_XC_AE
;
1701 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1703 static void dma64_txrotate(dma_info_t
*di
)
1712 ASSERT(dma64_txsuspendedidle(di
));
1714 nactive
= _dma_txactive(di
);
1716 ((((R_REG(&di
->d64txregs
->status1
) &
1718 - di
->xmtptrbase
) & D64_XS1_AD_MASK
), dma64dd_t
));
1719 rot
= TXD(ad
- di
->txin
);
1721 ASSERT(rot
< di
->ntxd
);
1723 /* full-ring case is a lot harder - don't worry about this */
1724 if (rot
>= (di
->ntxd
- nactive
)) {
1725 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
1730 last
= PREVTXD(di
->txout
);
1732 /* move entries starting at last and moving backwards to first */
1733 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
1734 new = TXD(old
+ rot
);
1737 * Move the tx dma descriptor.
1738 * EOT is set only in the last entry in the ring.
1740 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl1
)) & ~D64_CTRL1_EOT
;
1741 if (new == (di
->ntxd
- 1))
1743 W_SM(&di
->txd64
[new].ctrl1
, BUS_SWAP32(w
));
1745 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl2
));
1746 W_SM(&di
->txd64
[new].ctrl2
, BUS_SWAP32(w
));
1748 W_SM(&di
->txd64
[new].addrlow
, R_SM(&di
->txd64
[old
].addrlow
));
1749 W_SM(&di
->txd64
[new].addrhigh
, R_SM(&di
->txd64
[old
].addrhigh
));
1751 /* zap the old tx dma descriptor address field */
1752 W_SM(&di
->txd64
[old
].addrlow
, BUS_SWAP32(0xdeadbeef));
1753 W_SM(&di
->txd64
[old
].addrhigh
, BUS_SWAP32(0xdeadbeef));
1755 /* move the corresponding txp[] entry */
1756 ASSERT(di
->txp
[new] == NULL
);
1757 di
->txp
[new] = di
->txp
[old
];
1760 if (DMASGLIST_ENAB
) {
1761 memcpy(&di
->txp_dmah
[new], &di
->txp_dmah
[old
],
1762 sizeof(hnddma_seg_map_t
));
1763 memset(&di
->txp_dmah
[old
], 0, sizeof(hnddma_seg_map_t
));
1766 di
->txp
[old
] = NULL
;
1769 /* update txin and txout */
1771 di
->txout
= TXD(di
->txout
+ rot
);
1772 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1775 W_REG(&di
->d64txregs
->ptr
,
1776 di
->xmtptrbase
+ I2B(di
->txout
, dma64dd_t
));
1779 uint
dma_addrwidth(si_t
*sih
, void *dmaregs
)
1781 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
1782 /* DMA engine is 64-bit capable */
1783 if ((si_core_sflags(sih
, 0, 0) & SISF_DMA64
) == SISF_DMA64
) {
1784 /* backplane are 64-bit capable */
1785 if (si_backplane64(sih
))
1786 /* If bus is System Backplane or PCIE then we can access 64-bits */
1787 if ((sih
->bustype
== SI_BUS
) ||
1788 ((sih
->bustype
== PCI_BUS
) &&
1789 (sih
->buscoretype
== PCIE_CORE_ID
)))
1790 return DMADDRWIDTH_64
;
1792 ASSERT(0); /* DMA hardware not supported by this driver*/
1793 return DMADDRWIDTH_64
;
1797 * Mac80211 initiated actions sometimes require packets in the DMA queue to be
1798 * modified. The modified portion of the packet is not under control of the DMA
1799 * engine. This function calls a caller-supplied function for each packet in
1800 * the caller specified dma chain.
1802 void dma_walk_packets(struct hnddma_pub
*dmah
, void (*callback_fnc
)
1803 (void *pkt
, void *arg_a
), void *arg_a
)
1805 dma_info_t
*di
= (dma_info_t
*) dmah
;
1807 uint end
= di
->txout
;
1808 struct sk_buff
*skb
;
1809 struct ieee80211_tx_info
*tx_info
;
1812 skb
= (struct sk_buff
*)di
->txp
[i
];
1814 tx_info
= (struct ieee80211_tx_info
*)skb
->cb
;
1815 (callback_fnc
)(tx_info
, arg_a
);