3 Broadcom B43 wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <linux/etherdevice.h>
41 #include <asm/div64.h>
46 struct b43_dmadesc_generic
*op32_idx2desc(struct b43_dmaring
*ring
,
48 struct b43_dmadesc_meta
**meta
)
50 struct b43_dmadesc32
*desc
;
52 *meta
= &(ring
->meta
[slot
]);
53 desc
= ring
->descbase
;
56 return (struct b43_dmadesc_generic
*)desc
;
59 static void op32_fill_descriptor(struct b43_dmaring
*ring
,
60 struct b43_dmadesc_generic
*desc
,
61 dma_addr_t dmaaddr
, u16 bufsize
,
62 int start
, int end
, int irq
)
64 struct b43_dmadesc32
*descbase
= ring
->descbase
;
70 slot
= (int)(&(desc
->dma32
) - descbase
);
71 B43_WARN_ON(!(slot
>= 0 && slot
< ring
->nr_slots
));
73 addr
= (u32
) (dmaaddr
& ~SSB_DMA_TRANSLATION_MASK
);
74 addrext
= (u32
) (dmaaddr
& SSB_DMA_TRANSLATION_MASK
)
75 >> SSB_DMA_TRANSLATION_SHIFT
;
76 addr
|= ssb_dma_translation(ring
->dev
->dev
);
77 ctl
= (bufsize
- ring
->frameoffset
)
78 & B43_DMA32_DCTL_BYTECNT
;
79 if (slot
== ring
->nr_slots
- 1)
80 ctl
|= B43_DMA32_DCTL_DTABLEEND
;
82 ctl
|= B43_DMA32_DCTL_FRAMESTART
;
84 ctl
|= B43_DMA32_DCTL_FRAMEEND
;
86 ctl
|= B43_DMA32_DCTL_IRQ
;
87 ctl
|= (addrext
<< B43_DMA32_DCTL_ADDREXT_SHIFT
)
88 & B43_DMA32_DCTL_ADDREXT_MASK
;
90 desc
->dma32
.control
= cpu_to_le32(ctl
);
91 desc
->dma32
.address
= cpu_to_le32(addr
);
94 static void op32_poke_tx(struct b43_dmaring
*ring
, int slot
)
96 b43_dma_write(ring
, B43_DMA32_TXINDEX
,
97 (u32
) (slot
* sizeof(struct b43_dmadesc32
)));
100 static void op32_tx_suspend(struct b43_dmaring
*ring
)
102 b43_dma_write(ring
, B43_DMA32_TXCTL
, b43_dma_read(ring
, B43_DMA32_TXCTL
)
103 | B43_DMA32_TXSUSPEND
);
106 static void op32_tx_resume(struct b43_dmaring
*ring
)
108 b43_dma_write(ring
, B43_DMA32_TXCTL
, b43_dma_read(ring
, B43_DMA32_TXCTL
)
109 & ~B43_DMA32_TXSUSPEND
);
112 static int op32_get_current_rxslot(struct b43_dmaring
*ring
)
116 val
= b43_dma_read(ring
, B43_DMA32_RXSTATUS
);
117 val
&= B43_DMA32_RXDPTR
;
119 return (val
/ sizeof(struct b43_dmadesc32
));
122 static void op32_set_current_rxslot(struct b43_dmaring
*ring
, int slot
)
124 b43_dma_write(ring
, B43_DMA32_RXINDEX
,
125 (u32
) (slot
* sizeof(struct b43_dmadesc32
)));
128 static const struct b43_dma_ops dma32_ops
= {
129 .idx2desc
= op32_idx2desc
,
130 .fill_descriptor
= op32_fill_descriptor
,
131 .poke_tx
= op32_poke_tx
,
132 .tx_suspend
= op32_tx_suspend
,
133 .tx_resume
= op32_tx_resume
,
134 .get_current_rxslot
= op32_get_current_rxslot
,
135 .set_current_rxslot
= op32_set_current_rxslot
,
140 struct b43_dmadesc_generic
*op64_idx2desc(struct b43_dmaring
*ring
,
142 struct b43_dmadesc_meta
**meta
)
144 struct b43_dmadesc64
*desc
;
146 *meta
= &(ring
->meta
[slot
]);
147 desc
= ring
->descbase
;
148 desc
= &(desc
[slot
]);
150 return (struct b43_dmadesc_generic
*)desc
;
153 static void op64_fill_descriptor(struct b43_dmaring
*ring
,
154 struct b43_dmadesc_generic
*desc
,
155 dma_addr_t dmaaddr
, u16 bufsize
,
156 int start
, int end
, int irq
)
158 struct b43_dmadesc64
*descbase
= ring
->descbase
;
160 u32 ctl0
= 0, ctl1
= 0;
164 slot
= (int)(&(desc
->dma64
) - descbase
);
165 B43_WARN_ON(!(slot
>= 0 && slot
< ring
->nr_slots
));
167 addrlo
= (u32
) (dmaaddr
& 0xFFFFFFFF);
168 addrhi
= (((u64
) dmaaddr
>> 32) & ~SSB_DMA_TRANSLATION_MASK
);
169 addrext
= (((u64
) dmaaddr
>> 32) & SSB_DMA_TRANSLATION_MASK
)
170 >> SSB_DMA_TRANSLATION_SHIFT
;
171 addrhi
|= (ssb_dma_translation(ring
->dev
->dev
) << 1);
172 if (slot
== ring
->nr_slots
- 1)
173 ctl0
|= B43_DMA64_DCTL0_DTABLEEND
;
175 ctl0
|= B43_DMA64_DCTL0_FRAMESTART
;
177 ctl0
|= B43_DMA64_DCTL0_FRAMEEND
;
179 ctl0
|= B43_DMA64_DCTL0_IRQ
;
180 ctl1
|= (bufsize
- ring
->frameoffset
)
181 & B43_DMA64_DCTL1_BYTECNT
;
182 ctl1
|= (addrext
<< B43_DMA64_DCTL1_ADDREXT_SHIFT
)
183 & B43_DMA64_DCTL1_ADDREXT_MASK
;
185 desc
->dma64
.control0
= cpu_to_le32(ctl0
);
186 desc
->dma64
.control1
= cpu_to_le32(ctl1
);
187 desc
->dma64
.address_low
= cpu_to_le32(addrlo
);
188 desc
->dma64
.address_high
= cpu_to_le32(addrhi
);
191 static void op64_poke_tx(struct b43_dmaring
*ring
, int slot
)
193 b43_dma_write(ring
, B43_DMA64_TXINDEX
,
194 (u32
) (slot
* sizeof(struct b43_dmadesc64
)));
197 static void op64_tx_suspend(struct b43_dmaring
*ring
)
199 b43_dma_write(ring
, B43_DMA64_TXCTL
, b43_dma_read(ring
, B43_DMA64_TXCTL
)
200 | B43_DMA64_TXSUSPEND
);
203 static void op64_tx_resume(struct b43_dmaring
*ring
)
205 b43_dma_write(ring
, B43_DMA64_TXCTL
, b43_dma_read(ring
, B43_DMA64_TXCTL
)
206 & ~B43_DMA64_TXSUSPEND
);
209 static int op64_get_current_rxslot(struct b43_dmaring
*ring
)
213 val
= b43_dma_read(ring
, B43_DMA64_RXSTATUS
);
214 val
&= B43_DMA64_RXSTATDPTR
;
216 return (val
/ sizeof(struct b43_dmadesc64
));
219 static void op64_set_current_rxslot(struct b43_dmaring
*ring
, int slot
)
221 b43_dma_write(ring
, B43_DMA64_RXINDEX
,
222 (u32
) (slot
* sizeof(struct b43_dmadesc64
)));
225 static const struct b43_dma_ops dma64_ops
= {
226 .idx2desc
= op64_idx2desc
,
227 .fill_descriptor
= op64_fill_descriptor
,
228 .poke_tx
= op64_poke_tx
,
229 .tx_suspend
= op64_tx_suspend
,
230 .tx_resume
= op64_tx_resume
,
231 .get_current_rxslot
= op64_get_current_rxslot
,
232 .set_current_rxslot
= op64_set_current_rxslot
,
235 static inline int free_slots(struct b43_dmaring
*ring
)
237 return (ring
->nr_slots
- ring
->used_slots
);
240 static inline int next_slot(struct b43_dmaring
*ring
, int slot
)
242 B43_WARN_ON(!(slot
>= -1 && slot
<= ring
->nr_slots
- 1));
243 if (slot
== ring
->nr_slots
- 1)
248 static inline int prev_slot(struct b43_dmaring
*ring
, int slot
)
250 B43_WARN_ON(!(slot
>= 0 && slot
<= ring
->nr_slots
- 1));
252 return ring
->nr_slots
- 1;
256 #ifdef CONFIG_B43_DEBUG
257 static void update_max_used_slots(struct b43_dmaring
*ring
,
258 int current_used_slots
)
260 if (current_used_slots
<= ring
->max_used_slots
)
262 ring
->max_used_slots
= current_used_slots
;
263 if (b43_debug(ring
->dev
, B43_DBG_DMAVERBOSE
)) {
264 b43dbg(ring
->dev
->wl
,
265 "max_used_slots increased to %d on %s ring %d\n",
266 ring
->max_used_slots
,
267 ring
->tx
? "TX" : "RX", ring
->index
);
272 void update_max_used_slots(struct b43_dmaring
*ring
, int current_used_slots
)
277 /* Request a slot for usage. */
278 static inline int request_slot(struct b43_dmaring
*ring
)
282 B43_WARN_ON(!ring
->tx
);
283 B43_WARN_ON(ring
->stopped
);
284 B43_WARN_ON(free_slots(ring
) == 0);
286 slot
= next_slot(ring
, ring
->current_slot
);
287 ring
->current_slot
= slot
;
290 update_max_used_slots(ring
, ring
->used_slots
);
295 static u16
b43_dmacontroller_base(enum b43_dmatype type
, int controller_idx
)
297 static const u16 map64
[] = {
298 B43_MMIO_DMA64_BASE0
,
299 B43_MMIO_DMA64_BASE1
,
300 B43_MMIO_DMA64_BASE2
,
301 B43_MMIO_DMA64_BASE3
,
302 B43_MMIO_DMA64_BASE4
,
303 B43_MMIO_DMA64_BASE5
,
305 static const u16 map32
[] = {
306 B43_MMIO_DMA32_BASE0
,
307 B43_MMIO_DMA32_BASE1
,
308 B43_MMIO_DMA32_BASE2
,
309 B43_MMIO_DMA32_BASE3
,
310 B43_MMIO_DMA32_BASE4
,
311 B43_MMIO_DMA32_BASE5
,
314 if (type
== B43_DMA_64BIT
) {
315 B43_WARN_ON(!(controller_idx
>= 0 &&
316 controller_idx
< ARRAY_SIZE(map64
)));
317 return map64
[controller_idx
];
319 B43_WARN_ON(!(controller_idx
>= 0 &&
320 controller_idx
< ARRAY_SIZE(map32
)));
321 return map32
[controller_idx
];
325 dma_addr_t
map_descbuffer(struct b43_dmaring
*ring
,
326 unsigned char *buf
, size_t len
, int tx
)
331 dmaaddr
= ssb_dma_map_single(ring
->dev
->dev
,
332 buf
, len
, DMA_TO_DEVICE
);
334 dmaaddr
= ssb_dma_map_single(ring
->dev
->dev
,
335 buf
, len
, DMA_FROM_DEVICE
);
342 void unmap_descbuffer(struct b43_dmaring
*ring
,
343 dma_addr_t addr
, size_t len
, int tx
)
346 ssb_dma_unmap_single(ring
->dev
->dev
,
347 addr
, len
, DMA_TO_DEVICE
);
349 ssb_dma_unmap_single(ring
->dev
->dev
,
350 addr
, len
, DMA_FROM_DEVICE
);
355 void sync_descbuffer_for_cpu(struct b43_dmaring
*ring
,
356 dma_addr_t addr
, size_t len
)
358 B43_WARN_ON(ring
->tx
);
359 ssb_dma_sync_single_for_cpu(ring
->dev
->dev
,
360 addr
, len
, DMA_FROM_DEVICE
);
364 void sync_descbuffer_for_device(struct b43_dmaring
*ring
,
365 dma_addr_t addr
, size_t len
)
367 B43_WARN_ON(ring
->tx
);
368 ssb_dma_sync_single_for_device(ring
->dev
->dev
,
369 addr
, len
, DMA_FROM_DEVICE
);
373 void free_descriptor_buffer(struct b43_dmaring
*ring
,
374 struct b43_dmadesc_meta
*meta
)
377 dev_kfree_skb_any(meta
->skb
);
382 static int alloc_ringmemory(struct b43_dmaring
*ring
)
384 gfp_t flags
= GFP_KERNEL
;
386 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
387 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
388 * has shown that 4K is sufficient for the latter as long as the buffer
389 * does not cross an 8K boundary.
391 * For unknown reasons - possibly a hardware error - the BCM4311 rev
392 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
393 * which accounts for the GFP_DMA flag below.
395 * The flags here must match the flags in free_ringmemory below!
397 if (ring
->type
== B43_DMA_64BIT
)
399 ring
->descbase
= ssb_dma_alloc_consistent(ring
->dev
->dev
,
401 &(ring
->dmabase
), flags
);
402 if (!ring
->descbase
) {
403 b43err(ring
->dev
->wl
, "DMA ringmemory allocation failed\n");
406 memset(ring
->descbase
, 0, B43_DMA_RINGMEMSIZE
);
411 static void free_ringmemory(struct b43_dmaring
*ring
)
413 gfp_t flags
= GFP_KERNEL
;
415 if (ring
->type
== B43_DMA_64BIT
)
418 ssb_dma_free_consistent(ring
->dev
->dev
, B43_DMA_RINGMEMSIZE
,
419 ring
->descbase
, ring
->dmabase
, flags
);
422 /* Reset the RX DMA channel */
423 static int b43_dmacontroller_rx_reset(struct b43_wldev
*dev
, u16 mmio_base
,
424 enum b43_dmatype type
)
432 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_RXCTL
: B43_DMA32_RXCTL
;
433 b43_write32(dev
, mmio_base
+ offset
, 0);
434 for (i
= 0; i
< 10; i
++) {
435 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_RXSTATUS
:
437 value
= b43_read32(dev
, mmio_base
+ offset
);
438 if (type
== B43_DMA_64BIT
) {
439 value
&= B43_DMA64_RXSTAT
;
440 if (value
== B43_DMA64_RXSTAT_DISABLED
) {
445 value
&= B43_DMA32_RXSTATE
;
446 if (value
== B43_DMA32_RXSTAT_DISABLED
) {
454 b43err(dev
->wl
, "DMA RX reset timed out\n");
461 /* Reset the TX DMA channel */
462 static int b43_dmacontroller_tx_reset(struct b43_wldev
*dev
, u16 mmio_base
,
463 enum b43_dmatype type
)
471 for (i
= 0; i
< 10; i
++) {
472 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_TXSTATUS
:
474 value
= b43_read32(dev
, mmio_base
+ offset
);
475 if (type
== B43_DMA_64BIT
) {
476 value
&= B43_DMA64_TXSTAT
;
477 if (value
== B43_DMA64_TXSTAT_DISABLED
||
478 value
== B43_DMA64_TXSTAT_IDLEWAIT
||
479 value
== B43_DMA64_TXSTAT_STOPPED
)
482 value
&= B43_DMA32_TXSTATE
;
483 if (value
== B43_DMA32_TXSTAT_DISABLED
||
484 value
== B43_DMA32_TXSTAT_IDLEWAIT
||
485 value
== B43_DMA32_TXSTAT_STOPPED
)
490 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_TXCTL
: B43_DMA32_TXCTL
;
491 b43_write32(dev
, mmio_base
+ offset
, 0);
492 for (i
= 0; i
< 10; i
++) {
493 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_TXSTATUS
:
495 value
= b43_read32(dev
, mmio_base
+ offset
);
496 if (type
== B43_DMA_64BIT
) {
497 value
&= B43_DMA64_TXSTAT
;
498 if (value
== B43_DMA64_TXSTAT_DISABLED
) {
503 value
&= B43_DMA32_TXSTATE
;
504 if (value
== B43_DMA32_TXSTAT_DISABLED
) {
512 b43err(dev
->wl
, "DMA TX reset timed out\n");
515 /* ensure the reset is completed. */
521 /* Check if a DMA mapping address is invalid. */
522 static bool b43_dma_mapping_error(struct b43_dmaring
*ring
,
524 size_t buffersize
, bool dma_to_device
)
526 if (unlikely(ssb_dma_mapping_error(ring
->dev
->dev
, addr
)))
529 switch (ring
->type
) {
531 if ((u64
)addr
+ buffersize
> (1ULL << 30))
535 if ((u64
)addr
+ buffersize
> (1ULL << 32))
539 /* Currently we can't have addresses beyond
540 * 64bit in the kernel. */
544 /* The address is OK. */
548 /* We can't support this address. Unmap it again. */
549 unmap_descbuffer(ring
, addr
, buffersize
, dma_to_device
);
554 static bool b43_rx_buffer_is_poisoned(struct b43_dmaring
*ring
, struct sk_buff
*skb
)
556 unsigned char *f
= skb
->data
+ ring
->frameoffset
;
558 return ((f
[0] & f
[1] & f
[2] & f
[3] & f
[4] & f
[5] & f
[6] & f
[7]) == 0xFF);
561 static void b43_poison_rx_buffer(struct b43_dmaring
*ring
, struct sk_buff
*skb
)
563 struct b43_rxhdr_fw4
*rxhdr
;
564 unsigned char *frame
;
566 /* This poisons the RX buffer to detect DMA failures. */
568 rxhdr
= (struct b43_rxhdr_fw4
*)(skb
->data
);
569 rxhdr
->frame_len
= 0;
571 B43_WARN_ON(ring
->rx_buffersize
< ring
->frameoffset
+ sizeof(struct b43_plcp_hdr6
) + 2);
572 frame
= skb
->data
+ ring
->frameoffset
;
573 memset(frame
, 0xFF, sizeof(struct b43_plcp_hdr6
) + 2 /* padding */);
576 static int setup_rx_descbuffer(struct b43_dmaring
*ring
,
577 struct b43_dmadesc_generic
*desc
,
578 struct b43_dmadesc_meta
*meta
, gfp_t gfp_flags
)
583 B43_WARN_ON(ring
->tx
);
585 skb
= __dev_alloc_skb(ring
->rx_buffersize
, gfp_flags
);
588 b43_poison_rx_buffer(ring
, skb
);
589 dmaaddr
= map_descbuffer(ring
, skb
->data
, ring
->rx_buffersize
, 0);
590 if (b43_dma_mapping_error(ring
, dmaaddr
, ring
->rx_buffersize
, 0)) {
591 /* ugh. try to realloc in zone_dma */
592 gfp_flags
|= GFP_DMA
;
594 dev_kfree_skb_any(skb
);
596 skb
= __dev_alloc_skb(ring
->rx_buffersize
, gfp_flags
);
599 b43_poison_rx_buffer(ring
, skb
);
600 dmaaddr
= map_descbuffer(ring
, skb
->data
,
601 ring
->rx_buffersize
, 0);
604 if (b43_dma_mapping_error(ring
, dmaaddr
, ring
->rx_buffersize
, 0)) {
605 b43err(ring
->dev
->wl
, "RX DMA buffer allocation failed\n");
606 dev_kfree_skb_any(skb
);
611 meta
->dmaaddr
= dmaaddr
;
612 ring
->ops
->fill_descriptor(ring
, desc
, dmaaddr
,
613 ring
->rx_buffersize
, 0, 0, 0);
618 /* Allocate the initial descbuffers.
619 * This is used for an RX ring only.
621 static int alloc_initial_descbuffers(struct b43_dmaring
*ring
)
623 int i
, err
= -ENOMEM
;
624 struct b43_dmadesc_generic
*desc
;
625 struct b43_dmadesc_meta
*meta
;
627 for (i
= 0; i
< ring
->nr_slots
; i
++) {
628 desc
= ring
->ops
->idx2desc(ring
, i
, &meta
);
630 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_KERNEL
);
632 b43err(ring
->dev
->wl
,
633 "Failed to allocate initial descbuffers\n");
638 ring
->used_slots
= ring
->nr_slots
;
644 for (i
--; i
>= 0; i
--) {
645 desc
= ring
->ops
->idx2desc(ring
, i
, &meta
);
647 unmap_descbuffer(ring
, meta
->dmaaddr
, ring
->rx_buffersize
, 0);
648 dev_kfree_skb(meta
->skb
);
653 /* Do initial setup of the DMA controller.
654 * Reset the controller, write the ring busaddress
655 * and switch the "enable" bit on.
657 static int dmacontroller_setup(struct b43_dmaring
*ring
)
662 u32 trans
= ssb_dma_translation(ring
->dev
->dev
);
665 if (ring
->type
== B43_DMA_64BIT
) {
666 u64 ringbase
= (u64
) (ring
->dmabase
);
668 addrext
= ((ringbase
>> 32) & SSB_DMA_TRANSLATION_MASK
)
669 >> SSB_DMA_TRANSLATION_SHIFT
;
670 value
= B43_DMA64_TXENABLE
;
671 value
|= (addrext
<< B43_DMA64_TXADDREXT_SHIFT
)
672 & B43_DMA64_TXADDREXT_MASK
;
673 b43_dma_write(ring
, B43_DMA64_TXCTL
, value
);
674 b43_dma_write(ring
, B43_DMA64_TXRINGLO
,
675 (ringbase
& 0xFFFFFFFF));
676 b43_dma_write(ring
, B43_DMA64_TXRINGHI
,
678 ~SSB_DMA_TRANSLATION_MASK
)
681 u32 ringbase
= (u32
) (ring
->dmabase
);
683 addrext
= (ringbase
& SSB_DMA_TRANSLATION_MASK
)
684 >> SSB_DMA_TRANSLATION_SHIFT
;
685 value
= B43_DMA32_TXENABLE
;
686 value
|= (addrext
<< B43_DMA32_TXADDREXT_SHIFT
)
687 & B43_DMA32_TXADDREXT_MASK
;
688 b43_dma_write(ring
, B43_DMA32_TXCTL
, value
);
689 b43_dma_write(ring
, B43_DMA32_TXRING
,
690 (ringbase
& ~SSB_DMA_TRANSLATION_MASK
)
694 err
= alloc_initial_descbuffers(ring
);
697 if (ring
->type
== B43_DMA_64BIT
) {
698 u64 ringbase
= (u64
) (ring
->dmabase
);
700 addrext
= ((ringbase
>> 32) & SSB_DMA_TRANSLATION_MASK
)
701 >> SSB_DMA_TRANSLATION_SHIFT
;
702 value
= (ring
->frameoffset
<< B43_DMA64_RXFROFF_SHIFT
);
703 value
|= B43_DMA64_RXENABLE
;
704 value
|= (addrext
<< B43_DMA64_RXADDREXT_SHIFT
)
705 & B43_DMA64_RXADDREXT_MASK
;
706 b43_dma_write(ring
, B43_DMA64_RXCTL
, value
);
707 b43_dma_write(ring
, B43_DMA64_RXRINGLO
,
708 (ringbase
& 0xFFFFFFFF));
709 b43_dma_write(ring
, B43_DMA64_RXRINGHI
,
711 ~SSB_DMA_TRANSLATION_MASK
)
713 b43_dma_write(ring
, B43_DMA64_RXINDEX
, ring
->nr_slots
*
714 sizeof(struct b43_dmadesc64
));
716 u32 ringbase
= (u32
) (ring
->dmabase
);
718 addrext
= (ringbase
& SSB_DMA_TRANSLATION_MASK
)
719 >> SSB_DMA_TRANSLATION_SHIFT
;
720 value
= (ring
->frameoffset
<< B43_DMA32_RXFROFF_SHIFT
);
721 value
|= B43_DMA32_RXENABLE
;
722 value
|= (addrext
<< B43_DMA32_RXADDREXT_SHIFT
)
723 & B43_DMA32_RXADDREXT_MASK
;
724 b43_dma_write(ring
, B43_DMA32_RXCTL
, value
);
725 b43_dma_write(ring
, B43_DMA32_RXRING
,
726 (ringbase
& ~SSB_DMA_TRANSLATION_MASK
)
728 b43_dma_write(ring
, B43_DMA32_RXINDEX
, ring
->nr_slots
*
729 sizeof(struct b43_dmadesc32
));
737 /* Shutdown the DMA controller. */
738 static void dmacontroller_cleanup(struct b43_dmaring
*ring
)
741 b43_dmacontroller_tx_reset(ring
->dev
, ring
->mmio_base
,
743 if (ring
->type
== B43_DMA_64BIT
) {
744 b43_dma_write(ring
, B43_DMA64_TXRINGLO
, 0);
745 b43_dma_write(ring
, B43_DMA64_TXRINGHI
, 0);
747 b43_dma_write(ring
, B43_DMA32_TXRING
, 0);
749 b43_dmacontroller_rx_reset(ring
->dev
, ring
->mmio_base
,
751 if (ring
->type
== B43_DMA_64BIT
) {
752 b43_dma_write(ring
, B43_DMA64_RXRINGLO
, 0);
753 b43_dma_write(ring
, B43_DMA64_RXRINGHI
, 0);
755 b43_dma_write(ring
, B43_DMA32_RXRING
, 0);
759 static void free_all_descbuffers(struct b43_dmaring
*ring
)
761 struct b43_dmadesc_generic
*desc
;
762 struct b43_dmadesc_meta
*meta
;
765 if (!ring
->used_slots
)
767 for (i
= 0; i
< ring
->nr_slots
; i
++) {
768 desc
= ring
->ops
->idx2desc(ring
, i
, &meta
);
771 B43_WARN_ON(!ring
->tx
);
775 unmap_descbuffer(ring
, meta
->dmaaddr
,
778 unmap_descbuffer(ring
, meta
->dmaaddr
,
779 ring
->rx_buffersize
, 0);
781 free_descriptor_buffer(ring
, meta
);
785 static u64
supported_dma_mask(struct b43_wldev
*dev
)
790 tmp
= b43_read32(dev
, SSB_TMSHIGH
);
791 if (tmp
& SSB_TMSHIGH_DMA64
)
792 return DMA_64BIT_MASK
;
793 mmio_base
= b43_dmacontroller_base(0, 0);
794 b43_write32(dev
, mmio_base
+ B43_DMA32_TXCTL
, B43_DMA32_TXADDREXT_MASK
);
795 tmp
= b43_read32(dev
, mmio_base
+ B43_DMA32_TXCTL
);
796 if (tmp
& B43_DMA32_TXADDREXT_MASK
)
797 return DMA_32BIT_MASK
;
799 return DMA_30BIT_MASK
;
802 static enum b43_dmatype
dma_mask_to_engine_type(u64 dmamask
)
804 if (dmamask
== DMA_30BIT_MASK
)
805 return B43_DMA_30BIT
;
806 if (dmamask
== DMA_32BIT_MASK
)
807 return B43_DMA_32BIT
;
808 if (dmamask
== DMA_64BIT_MASK
)
809 return B43_DMA_64BIT
;
811 return B43_DMA_30BIT
;
814 /* Main initialization function. */
816 struct b43_dmaring
*b43_setup_dmaring(struct b43_wldev
*dev
,
817 int controller_index
,
819 enum b43_dmatype type
)
821 struct b43_dmaring
*ring
;
825 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
829 ring
->nr_slots
= B43_RXRING_SLOTS
;
831 ring
->nr_slots
= B43_TXRING_SLOTS
;
833 ring
->meta
= kcalloc(ring
->nr_slots
, sizeof(struct b43_dmadesc_meta
),
840 ring
->mmio_base
= b43_dmacontroller_base(type
, controller_index
);
841 ring
->index
= controller_index
;
842 if (type
== B43_DMA_64BIT
)
843 ring
->ops
= &dma64_ops
;
845 ring
->ops
= &dma32_ops
;
848 ring
->current_slot
= -1;
850 if (ring
->index
== 0) {
851 ring
->rx_buffersize
= B43_DMA0_RX_BUFFERSIZE
;
852 ring
->frameoffset
= B43_DMA0_RX_FRAMEOFFSET
;
853 } else if (ring
->index
== 3) {
854 ring
->rx_buffersize
= B43_DMA3_RX_BUFFERSIZE
;
855 ring
->frameoffset
= B43_DMA3_RX_FRAMEOFFSET
;
859 spin_lock_init(&ring
->lock
);
860 #ifdef CONFIG_B43_DEBUG
861 ring
->last_injected_overflow
= jiffies
;
865 ring
->txhdr_cache
= kcalloc(ring
->nr_slots
,
868 if (!ring
->txhdr_cache
)
871 /* test for ability to dma to txhdr_cache */
872 dma_test
= ssb_dma_map_single(dev
->dev
,
877 if (b43_dma_mapping_error(ring
, dma_test
,
878 b43_txhdr_size(dev
), 1)) {
880 kfree(ring
->txhdr_cache
);
881 ring
->txhdr_cache
= kcalloc(ring
->nr_slots
,
883 GFP_KERNEL
| GFP_DMA
);
884 if (!ring
->txhdr_cache
)
887 dma_test
= ssb_dma_map_single(dev
->dev
,
892 if (b43_dma_mapping_error(ring
, dma_test
,
893 b43_txhdr_size(dev
), 1)) {
896 "TXHDR DMA allocation failed\n");
897 goto err_kfree_txhdr_cache
;
901 ssb_dma_unmap_single(dev
->dev
,
902 dma_test
, b43_txhdr_size(dev
),
906 err
= alloc_ringmemory(ring
);
908 goto err_kfree_txhdr_cache
;
909 err
= dmacontroller_setup(ring
);
911 goto err_free_ringmemory
;
917 free_ringmemory(ring
);
918 err_kfree_txhdr_cache
:
919 kfree(ring
->txhdr_cache
);
928 #define divide(a, b) ({ \
934 #define modulo(a, b) ({ \
939 /* Main cleanup function. */
940 static void b43_destroy_dmaring(struct b43_dmaring
*ring
,
941 const char *ringname
)
946 #ifdef CONFIG_B43_DEBUG
948 /* Print some statistics. */
949 u64 failed_packets
= ring
->nr_failed_tx_packets
;
950 u64 succeed_packets
= ring
->nr_succeed_tx_packets
;
951 u64 nr_packets
= failed_packets
+ succeed_packets
;
952 u64 permille_failed
= 0, average_tries
= 0;
955 permille_failed
= divide(failed_packets
* 1000, nr_packets
);
957 average_tries
= divide(ring
->nr_total_packet_tries
* 100, nr_packets
);
959 b43dbg(ring
->dev
->wl
, "DMA-%u %s: "
960 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
961 "Average tries %llu.%02llu\n",
962 (unsigned int)(ring
->type
), ringname
,
963 ring
->max_used_slots
,
965 (unsigned long long)failed_packets
,
966 (unsigned long long)nr_packets
,
967 (unsigned long long)divide(permille_failed
, 10),
968 (unsigned long long)modulo(permille_failed
, 10),
969 (unsigned long long)divide(average_tries
, 100),
970 (unsigned long long)modulo(average_tries
, 100));
974 /* Device IRQs are disabled prior entering this function,
975 * so no need to take care of concurrency with rx handler stuff.
977 dmacontroller_cleanup(ring
);
978 free_all_descbuffers(ring
);
979 free_ringmemory(ring
);
981 kfree(ring
->txhdr_cache
);
986 #define destroy_ring(dma, ring) do { \
987 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
988 (dma)->ring = NULL; \
991 void b43_dma_free(struct b43_wldev
*dev
)
995 if (b43_using_pio_transfers(dev
))
999 destroy_ring(dma
, rx_ring
);
1000 destroy_ring(dma
, tx_ring_AC_BK
);
1001 destroy_ring(dma
, tx_ring_AC_BE
);
1002 destroy_ring(dma
, tx_ring_AC_VI
);
1003 destroy_ring(dma
, tx_ring_AC_VO
);
1004 destroy_ring(dma
, tx_ring_mcast
);
1007 static int b43_dma_set_mask(struct b43_wldev
*dev
, u64 mask
)
1009 u64 orig_mask
= mask
;
1013 /* Try to set the DMA mask. If it fails, try falling back to a
1014 * lower mask, as we can always also support a lower one. */
1016 err
= ssb_dma_set_mask(dev
->dev
, mask
);
1019 if (mask
== DMA_64BIT_MASK
) {
1020 mask
= DMA_32BIT_MASK
;
1024 if (mask
== DMA_32BIT_MASK
) {
1025 mask
= DMA_30BIT_MASK
;
1029 b43err(dev
->wl
, "The machine/kernel does not support "
1030 "the required %u-bit DMA mask\n",
1031 (unsigned int)dma_mask_to_engine_type(orig_mask
));
1035 b43info(dev
->wl
, "DMA mask fallback from %u-bit to %u-bit\n",
1036 (unsigned int)dma_mask_to_engine_type(orig_mask
),
1037 (unsigned int)dma_mask_to_engine_type(mask
));
1043 int b43_dma_init(struct b43_wldev
*dev
)
1045 struct b43_dma
*dma
= &dev
->dma
;
1048 enum b43_dmatype type
;
1050 dmamask
= supported_dma_mask(dev
);
1051 type
= dma_mask_to_engine_type(dmamask
);
1052 err
= b43_dma_set_mask(dev
, dmamask
);
1057 /* setup TX DMA channels. */
1058 dma
->tx_ring_AC_BK
= b43_setup_dmaring(dev
, 0, 1, type
);
1059 if (!dma
->tx_ring_AC_BK
)
1062 dma
->tx_ring_AC_BE
= b43_setup_dmaring(dev
, 1, 1, type
);
1063 if (!dma
->tx_ring_AC_BE
)
1064 goto err_destroy_bk
;
1066 dma
->tx_ring_AC_VI
= b43_setup_dmaring(dev
, 2, 1, type
);
1067 if (!dma
->tx_ring_AC_VI
)
1068 goto err_destroy_be
;
1070 dma
->tx_ring_AC_VO
= b43_setup_dmaring(dev
, 3, 1, type
);
1071 if (!dma
->tx_ring_AC_VO
)
1072 goto err_destroy_vi
;
1074 dma
->tx_ring_mcast
= b43_setup_dmaring(dev
, 4, 1, type
);
1075 if (!dma
->tx_ring_mcast
)
1076 goto err_destroy_vo
;
1078 /* setup RX DMA channel. */
1079 dma
->rx_ring
= b43_setup_dmaring(dev
, 0, 0, type
);
1081 goto err_destroy_mcast
;
1083 /* No support for the TX status DMA ring. */
1084 B43_WARN_ON(dev
->dev
->id
.revision
< 5);
1086 b43dbg(dev
->wl
, "%u-bit DMA initialized\n",
1087 (unsigned int)type
);
1093 destroy_ring(dma
, tx_ring_mcast
);
1095 destroy_ring(dma
, tx_ring_AC_VO
);
1097 destroy_ring(dma
, tx_ring_AC_VI
);
1099 destroy_ring(dma
, tx_ring_AC_BE
);
1101 destroy_ring(dma
, tx_ring_AC_BK
);
1105 /* Generate a cookie for the TX header. */
1106 static u16
generate_cookie(struct b43_dmaring
*ring
, int slot
)
1110 /* Use the upper 4 bits of the cookie as
1111 * DMA controller ID and store the slot number
1112 * in the lower 12 bits.
1113 * Note that the cookie must never be 0, as this
1114 * is a special value used in RX path.
1115 * It can also not be 0xFFFF because that is special
1116 * for multicast frames.
1118 cookie
= (((u16
)ring
->index
+ 1) << 12);
1119 B43_WARN_ON(slot
& ~0x0FFF);
1120 cookie
|= (u16
)slot
;
1125 /* Inspect a cookie and find out to which controller/slot it belongs. */
1127 struct b43_dmaring
*parse_cookie(struct b43_wldev
*dev
, u16 cookie
, int *slot
)
1129 struct b43_dma
*dma
= &dev
->dma
;
1130 struct b43_dmaring
*ring
= NULL
;
1132 switch (cookie
& 0xF000) {
1134 ring
= dma
->tx_ring_AC_BK
;
1137 ring
= dma
->tx_ring_AC_BE
;
1140 ring
= dma
->tx_ring_AC_VI
;
1143 ring
= dma
->tx_ring_AC_VO
;
1146 ring
= dma
->tx_ring_mcast
;
1151 *slot
= (cookie
& 0x0FFF);
1152 B43_WARN_ON(!(ring
&& *slot
>= 0 && *slot
< ring
->nr_slots
));
1157 static int dma_tx_fragment(struct b43_dmaring
*ring
,
1158 struct sk_buff
*skb
)
1160 const struct b43_dma_ops
*ops
= ring
->ops
;
1161 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1163 int slot
, old_top_slot
, old_used_slots
;
1165 struct b43_dmadesc_generic
*desc
;
1166 struct b43_dmadesc_meta
*meta
;
1167 struct b43_dmadesc_meta
*meta_hdr
;
1168 struct sk_buff
*bounce_skb
;
1170 size_t hdrsize
= b43_txhdr_size(ring
->dev
);
1172 #define SLOTS_PER_PACKET 2
1174 old_top_slot
= ring
->current_slot
;
1175 old_used_slots
= ring
->used_slots
;
1177 /* Get a slot for the header. */
1178 slot
= request_slot(ring
);
1179 desc
= ops
->idx2desc(ring
, slot
, &meta_hdr
);
1180 memset(meta_hdr
, 0, sizeof(*meta_hdr
));
1182 header
= &(ring
->txhdr_cache
[slot
* hdrsize
]);
1183 cookie
= generate_cookie(ring
, slot
);
1184 err
= b43_generate_txhdr(ring
->dev
, header
,
1185 skb
->data
, skb
->len
, info
, cookie
);
1186 if (unlikely(err
)) {
1187 ring
->current_slot
= old_top_slot
;
1188 ring
->used_slots
= old_used_slots
;
1192 meta_hdr
->dmaaddr
= map_descbuffer(ring
, (unsigned char *)header
,
1194 if (b43_dma_mapping_error(ring
, meta_hdr
->dmaaddr
, hdrsize
, 1)) {
1195 ring
->current_slot
= old_top_slot
;
1196 ring
->used_slots
= old_used_slots
;
1199 ops
->fill_descriptor(ring
, desc
, meta_hdr
->dmaaddr
,
1202 /* Get a slot for the payload. */
1203 slot
= request_slot(ring
);
1204 desc
= ops
->idx2desc(ring
, slot
, &meta
);
1205 memset(meta
, 0, sizeof(*meta
));
1208 meta
->is_last_fragment
= 1;
1210 meta
->dmaaddr
= map_descbuffer(ring
, skb
->data
, skb
->len
, 1);
1211 /* create a bounce buffer in zone_dma on mapping failure. */
1212 if (b43_dma_mapping_error(ring
, meta
->dmaaddr
, skb
->len
, 1)) {
1213 bounce_skb
= __dev_alloc_skb(skb
->len
, GFP_ATOMIC
| GFP_DMA
);
1215 ring
->current_slot
= old_top_slot
;
1216 ring
->used_slots
= old_used_slots
;
1221 memcpy(skb_put(bounce_skb
, skb
->len
), skb
->data
, skb
->len
);
1222 dev_kfree_skb_any(skb
);
1225 meta
->dmaaddr
= map_descbuffer(ring
, skb
->data
, skb
->len
, 1);
1226 if (b43_dma_mapping_error(ring
, meta
->dmaaddr
, skb
->len
, 1)) {
1227 ring
->current_slot
= old_top_slot
;
1228 ring
->used_slots
= old_used_slots
;
1230 goto out_free_bounce
;
1234 ops
->fill_descriptor(ring
, desc
, meta
->dmaaddr
, skb
->len
, 0, 1, 1);
1236 if (info
->flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
) {
1237 /* Tell the firmware about the cookie of the last
1238 * mcast frame, so it can clear the more-data bit in it. */
1239 b43_shm_write16(ring
->dev
, B43_SHM_SHARED
,
1240 B43_SHM_SH_MCASTCOOKIE
, cookie
);
1242 /* Now transfer the whole frame. */
1244 ops
->poke_tx(ring
, next_slot(ring
, slot
));
1248 dev_kfree_skb_any(skb
);
1250 unmap_descbuffer(ring
, meta_hdr
->dmaaddr
,
1255 static inline int should_inject_overflow(struct b43_dmaring
*ring
)
1257 #ifdef CONFIG_B43_DEBUG
1258 if (unlikely(b43_debug(ring
->dev
, B43_DBG_DMAOVERFLOW
))) {
1259 /* Check if we should inject another ringbuffer overflow
1260 * to test handling of this situation in the stack. */
1261 unsigned long next_overflow
;
1263 next_overflow
= ring
->last_injected_overflow
+ HZ
;
1264 if (time_after(jiffies
, next_overflow
)) {
1265 ring
->last_injected_overflow
= jiffies
;
1266 b43dbg(ring
->dev
->wl
,
1267 "Injecting TX ring overflow on "
1268 "DMA controller %d\n", ring
->index
);
1272 #endif /* CONFIG_B43_DEBUG */
1276 /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
1277 static struct b43_dmaring
* select_ring_by_priority(struct b43_wldev
*dev
,
1280 struct b43_dmaring
*ring
;
1282 if (b43_modparam_qos
) {
1283 /* 0 = highest priority */
1284 switch (queue_prio
) {
1289 ring
= dev
->dma
.tx_ring_AC_VO
;
1292 ring
= dev
->dma
.tx_ring_AC_VI
;
1295 ring
= dev
->dma
.tx_ring_AC_BE
;
1298 ring
= dev
->dma
.tx_ring_AC_BK
;
1302 ring
= dev
->dma
.tx_ring_AC_BE
;
1307 int b43_dma_tx(struct b43_wldev
*dev
, struct sk_buff
*skb
)
1309 struct b43_dmaring
*ring
;
1310 struct ieee80211_hdr
*hdr
;
1312 unsigned long flags
;
1313 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1315 hdr
= (struct ieee80211_hdr
*)skb
->data
;
1316 if (info
->flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
) {
1317 /* The multicast ring will be sent after the DTIM */
1318 ring
= dev
->dma
.tx_ring_mcast
;
1319 /* Set the more-data bit. Ucode will clear it on
1320 * the last frame for us. */
1321 hdr
->frame_control
|= cpu_to_le16(IEEE80211_FCTL_MOREDATA
);
1323 /* Decide by priority where to put this frame. */
1324 ring
= select_ring_by_priority(
1325 dev
, skb_get_queue_mapping(skb
));
1328 spin_lock_irqsave(&ring
->lock
, flags
);
1329 B43_WARN_ON(!ring
->tx
);
1330 if (unlikely(free_slots(ring
) < SLOTS_PER_PACKET
)) {
1331 b43warn(dev
->wl
, "DMA queue overflow\n");
1335 /* Check if the queue was stopped in mac80211,
1336 * but we got called nevertheless.
1337 * That would be a mac80211 bug. */
1338 B43_WARN_ON(ring
->stopped
);
1340 /* Assign the queue number to the ring (if not already done before)
1341 * so TX status handling can use it. The queue to ring mapping is
1342 * static, so we don't need to store it per frame. */
1343 ring
->queue_prio
= skb_get_queue_mapping(skb
);
1345 err
= dma_tx_fragment(ring
, skb
);
1346 if (unlikely(err
== -ENOKEY
)) {
1347 /* Drop this packet, as we don't have the encryption key
1348 * anymore and must not transmit it unencrypted. */
1349 dev_kfree_skb_any(skb
);
1353 if (unlikely(err
)) {
1354 b43err(dev
->wl
, "DMA tx mapping failure\n");
1357 ring
->nr_tx_packets
++;
1358 if ((free_slots(ring
) < SLOTS_PER_PACKET
) ||
1359 should_inject_overflow(ring
)) {
1360 /* This TX ring is full. */
1361 ieee80211_stop_queue(dev
->wl
->hw
, skb_get_queue_mapping(skb
));
1363 if (b43_debug(dev
, B43_DBG_DMAVERBOSE
)) {
1364 b43dbg(dev
->wl
, "Stopped TX ring %d\n", ring
->index
);
1368 spin_unlock_irqrestore(&ring
->lock
, flags
);
1373 /* Called with IRQs disabled. */
1374 void b43_dma_handle_txstatus(struct b43_wldev
*dev
,
1375 const struct b43_txstatus
*status
)
1377 const struct b43_dma_ops
*ops
;
1378 struct b43_dmaring
*ring
;
1379 struct b43_dmadesc_generic
*desc
;
1380 struct b43_dmadesc_meta
*meta
;
1384 ring
= parse_cookie(dev
, status
->cookie
, &slot
);
1385 if (unlikely(!ring
))
1388 spin_lock(&ring
->lock
); /* IRQs are already disabled. */
1390 B43_WARN_ON(!ring
->tx
);
1393 B43_WARN_ON(!(slot
>= 0 && slot
< ring
->nr_slots
));
1394 desc
= ops
->idx2desc(ring
, slot
, &meta
);
1397 unmap_descbuffer(ring
, meta
->dmaaddr
, meta
->skb
->len
,
1400 unmap_descbuffer(ring
, meta
->dmaaddr
,
1401 b43_txhdr_size(dev
), 1);
1403 if (meta
->is_last_fragment
) {
1404 struct ieee80211_tx_info
*info
;
1408 info
= IEEE80211_SKB_CB(meta
->skb
);
1411 * Call back to inform the ieee80211 subsystem about
1412 * the status of the transmission.
1414 frame_succeed
= b43_fill_txstatus_report(dev
, info
, status
);
1415 #ifdef CONFIG_B43_DEBUG
1417 ring
->nr_succeed_tx_packets
++;
1419 ring
->nr_failed_tx_packets
++;
1420 ring
->nr_total_packet_tries
+= status
->frame_count
;
1422 ieee80211_tx_status_irqsafe(dev
->wl
->hw
, meta
->skb
);
1424 /* skb is freed by ieee80211_tx_status_irqsafe() */
1427 /* No need to call free_descriptor_buffer here, as
1428 * this is only the txhdr, which is not allocated.
1430 B43_WARN_ON(meta
->skb
);
1433 /* Everything unmapped and free'd. So it's not used anymore. */
1436 if (meta
->is_last_fragment
)
1438 slot
= next_slot(ring
, slot
);
1440 dev
->stats
.last_tx
= jiffies
;
1441 if (ring
->stopped
) {
1442 B43_WARN_ON(free_slots(ring
) < SLOTS_PER_PACKET
);
1443 ieee80211_wake_queue(dev
->wl
->hw
, ring
->queue_prio
);
1445 if (b43_debug(dev
, B43_DBG_DMAVERBOSE
)) {
1446 b43dbg(dev
->wl
, "Woke up TX ring %d\n", ring
->index
);
1450 spin_unlock(&ring
->lock
);
1453 void b43_dma_get_tx_stats(struct b43_wldev
*dev
,
1454 struct ieee80211_tx_queue_stats
*stats
)
1456 const int nr_queues
= dev
->wl
->hw
->queues
;
1457 struct b43_dmaring
*ring
;
1458 unsigned long flags
;
1461 for (i
= 0; i
< nr_queues
; i
++) {
1462 ring
= select_ring_by_priority(dev
, i
);
1464 spin_lock_irqsave(&ring
->lock
, flags
);
1465 stats
[i
].len
= ring
->used_slots
/ SLOTS_PER_PACKET
;
1466 stats
[i
].limit
= ring
->nr_slots
/ SLOTS_PER_PACKET
;
1467 stats
[i
].count
= ring
->nr_tx_packets
;
1468 spin_unlock_irqrestore(&ring
->lock
, flags
);
1472 static void dma_rx(struct b43_dmaring
*ring
, int *slot
)
1474 const struct b43_dma_ops
*ops
= ring
->ops
;
1475 struct b43_dmadesc_generic
*desc
;
1476 struct b43_dmadesc_meta
*meta
;
1477 struct b43_rxhdr_fw4
*rxhdr
;
1478 struct sk_buff
*skb
;
1483 desc
= ops
->idx2desc(ring
, *slot
, &meta
);
1485 sync_descbuffer_for_cpu(ring
, meta
->dmaaddr
, ring
->rx_buffersize
);
1488 rxhdr
= (struct b43_rxhdr_fw4
*)skb
->data
;
1489 len
= le16_to_cpu(rxhdr
->frame_len
);
1496 len
= le16_to_cpu(rxhdr
->frame_len
);
1497 } while (len
== 0 && i
++ < 5);
1498 if (unlikely(len
== 0)) {
1499 dmaaddr
= meta
->dmaaddr
;
1500 goto drop_recycle_buffer
;
1503 if (unlikely(b43_rx_buffer_is_poisoned(ring
, skb
))) {
1504 /* Something went wrong with the DMA.
1505 * The device did not touch the buffer and did not overwrite the poison. */
1506 b43dbg(ring
->dev
->wl
, "DMA RX: Dropping poisoned buffer.\n");
1507 dmaaddr
= meta
->dmaaddr
;
1508 goto drop_recycle_buffer
;
1510 if (unlikely(len
> ring
->rx_buffersize
)) {
1511 /* The data did not fit into one descriptor buffer
1512 * and is split over multiple buffers.
1513 * This should never happen, as we try to allocate buffers
1514 * big enough. So simply ignore this packet.
1520 desc
= ops
->idx2desc(ring
, *slot
, &meta
);
1521 /* recycle the descriptor buffer. */
1522 b43_poison_rx_buffer(ring
, meta
->skb
);
1523 sync_descbuffer_for_device(ring
, meta
->dmaaddr
,
1524 ring
->rx_buffersize
);
1525 *slot
= next_slot(ring
, *slot
);
1527 tmp
-= ring
->rx_buffersize
;
1531 b43err(ring
->dev
->wl
, "DMA RX buffer too small "
1532 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1533 len
, ring
->rx_buffersize
, cnt
);
1537 dmaaddr
= meta
->dmaaddr
;
1538 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_ATOMIC
);
1539 if (unlikely(err
)) {
1540 b43dbg(ring
->dev
->wl
, "DMA RX: setup_rx_descbuffer() failed\n");
1541 goto drop_recycle_buffer
;
1544 unmap_descbuffer(ring
, dmaaddr
, ring
->rx_buffersize
, 0);
1545 skb_put(skb
, len
+ ring
->frameoffset
);
1546 skb_pull(skb
, ring
->frameoffset
);
1548 b43_rx(ring
->dev
, skb
, rxhdr
);
1552 drop_recycle_buffer
:
1553 /* Poison and recycle the RX buffer. */
1554 b43_poison_rx_buffer(ring
, skb
);
1555 sync_descbuffer_for_device(ring
, dmaaddr
, ring
->rx_buffersize
);
1558 void b43_dma_rx(struct b43_dmaring
*ring
)
1560 const struct b43_dma_ops
*ops
= ring
->ops
;
1561 int slot
, current_slot
;
1564 B43_WARN_ON(ring
->tx
);
1565 current_slot
= ops
->get_current_rxslot(ring
);
1566 B43_WARN_ON(!(current_slot
>= 0 && current_slot
< ring
->nr_slots
));
1568 slot
= ring
->current_slot
;
1569 for (; slot
!= current_slot
; slot
= next_slot(ring
, slot
)) {
1570 dma_rx(ring
, &slot
);
1571 update_max_used_slots(ring
, ++used_slots
);
1573 ops
->set_current_rxslot(ring
, slot
);
1574 ring
->current_slot
= slot
;
1577 static void b43_dma_tx_suspend_ring(struct b43_dmaring
*ring
)
1579 unsigned long flags
;
1581 spin_lock_irqsave(&ring
->lock
, flags
);
1582 B43_WARN_ON(!ring
->tx
);
1583 ring
->ops
->tx_suspend(ring
);
1584 spin_unlock_irqrestore(&ring
->lock
, flags
);
1587 static void b43_dma_tx_resume_ring(struct b43_dmaring
*ring
)
1589 unsigned long flags
;
1591 spin_lock_irqsave(&ring
->lock
, flags
);
1592 B43_WARN_ON(!ring
->tx
);
1593 ring
->ops
->tx_resume(ring
);
1594 spin_unlock_irqrestore(&ring
->lock
, flags
);
1597 void b43_dma_tx_suspend(struct b43_wldev
*dev
)
1599 b43_power_saving_ctl_bits(dev
, B43_PS_AWAKE
);
1600 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring_AC_BK
);
1601 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring_AC_BE
);
1602 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring_AC_VI
);
1603 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring_AC_VO
);
1604 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring_mcast
);
1607 void b43_dma_tx_resume(struct b43_wldev
*dev
)
1609 b43_dma_tx_resume_ring(dev
->dma
.tx_ring_mcast
);
1610 b43_dma_tx_resume_ring(dev
->dma
.tx_ring_AC_VO
);
1611 b43_dma_tx_resume_ring(dev
->dma
.tx_ring_AC_VI
);
1612 b43_dma_tx_resume_ring(dev
->dma
.tx_ring_AC_BE
);
1613 b43_dma_tx_resume_ring(dev
->dma
.tx_ring_AC_BK
);
1614 b43_power_saving_ctl_bits(dev
, 0);
1617 #ifdef CONFIG_B43_PIO
1618 static void direct_fifo_rx(struct b43_wldev
*dev
, enum b43_dmatype type
,
1619 u16 mmio_base
, bool enable
)
1623 if (type
== B43_DMA_64BIT
) {
1624 ctl
= b43_read32(dev
, mmio_base
+ B43_DMA64_RXCTL
);
1625 ctl
&= ~B43_DMA64_RXDIRECTFIFO
;
1627 ctl
|= B43_DMA64_RXDIRECTFIFO
;
1628 b43_write32(dev
, mmio_base
+ B43_DMA64_RXCTL
, ctl
);
1630 ctl
= b43_read32(dev
, mmio_base
+ B43_DMA32_RXCTL
);
1631 ctl
&= ~B43_DMA32_RXDIRECTFIFO
;
1633 ctl
|= B43_DMA32_RXDIRECTFIFO
;
1634 b43_write32(dev
, mmio_base
+ B43_DMA32_RXCTL
, ctl
);
1638 /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1639 * This is called from PIO code, so DMA structures are not available. */
1640 void b43_dma_direct_fifo_rx(struct b43_wldev
*dev
,
1641 unsigned int engine_index
, bool enable
)
1643 enum b43_dmatype type
;
1646 type
= dma_mask_to_engine_type(supported_dma_mask(dev
));
1648 mmio_base
= b43_dmacontroller_base(type
, engine_index
);
1649 direct_fifo_rx(dev
, type
, mmio_base
, enable
);
1651 #endif /* CONFIG_B43_PIO */