3 Broadcom B43 wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <linux/etherdevice.h>
45 struct b43_dmadesc_generic
*op32_idx2desc(struct b43_dmaring
*ring
,
47 struct b43_dmadesc_meta
**meta
)
49 struct b43_dmadesc32
*desc
;
51 *meta
= &(ring
->meta
[slot
]);
52 desc
= ring
->descbase
;
55 return (struct b43_dmadesc_generic
*)desc
;
58 static void op32_fill_descriptor(struct b43_dmaring
*ring
,
59 struct b43_dmadesc_generic
*desc
,
60 dma_addr_t dmaaddr
, u16 bufsize
,
61 int start
, int end
, int irq
)
63 struct b43_dmadesc32
*descbase
= ring
->descbase
;
69 slot
= (int)(&(desc
->dma32
) - descbase
);
70 B43_WARN_ON(!(slot
>= 0 && slot
< ring
->nr_slots
));
72 addr
= (u32
) (dmaaddr
& ~SSB_DMA_TRANSLATION_MASK
);
73 addrext
= (u32
) (dmaaddr
& SSB_DMA_TRANSLATION_MASK
)
74 >> SSB_DMA_TRANSLATION_SHIFT
;
75 addr
|= ssb_dma_translation(ring
->dev
->dev
);
76 ctl
= (bufsize
- ring
->frameoffset
)
77 & B43_DMA32_DCTL_BYTECNT
;
78 if (slot
== ring
->nr_slots
- 1)
79 ctl
|= B43_DMA32_DCTL_DTABLEEND
;
81 ctl
|= B43_DMA32_DCTL_FRAMESTART
;
83 ctl
|= B43_DMA32_DCTL_FRAMEEND
;
85 ctl
|= B43_DMA32_DCTL_IRQ
;
86 ctl
|= (addrext
<< B43_DMA32_DCTL_ADDREXT_SHIFT
)
87 & B43_DMA32_DCTL_ADDREXT_MASK
;
89 desc
->dma32
.control
= cpu_to_le32(ctl
);
90 desc
->dma32
.address
= cpu_to_le32(addr
);
93 static void op32_poke_tx(struct b43_dmaring
*ring
, int slot
)
95 b43_dma_write(ring
, B43_DMA32_TXINDEX
,
96 (u32
) (slot
* sizeof(struct b43_dmadesc32
)));
99 static void op32_tx_suspend(struct b43_dmaring
*ring
)
101 b43_dma_write(ring
, B43_DMA32_TXCTL
, b43_dma_read(ring
, B43_DMA32_TXCTL
)
102 | B43_DMA32_TXSUSPEND
);
105 static void op32_tx_resume(struct b43_dmaring
*ring
)
107 b43_dma_write(ring
, B43_DMA32_TXCTL
, b43_dma_read(ring
, B43_DMA32_TXCTL
)
108 & ~B43_DMA32_TXSUSPEND
);
111 static int op32_get_current_rxslot(struct b43_dmaring
*ring
)
115 val
= b43_dma_read(ring
, B43_DMA32_RXSTATUS
);
116 val
&= B43_DMA32_RXDPTR
;
118 return (val
/ sizeof(struct b43_dmadesc32
));
121 static void op32_set_current_rxslot(struct b43_dmaring
*ring
, int slot
)
123 b43_dma_write(ring
, B43_DMA32_RXINDEX
,
124 (u32
) (slot
* sizeof(struct b43_dmadesc32
)));
127 static const struct b43_dma_ops dma32_ops
= {
128 .idx2desc
= op32_idx2desc
,
129 .fill_descriptor
= op32_fill_descriptor
,
130 .poke_tx
= op32_poke_tx
,
131 .tx_suspend
= op32_tx_suspend
,
132 .tx_resume
= op32_tx_resume
,
133 .get_current_rxslot
= op32_get_current_rxslot
,
134 .set_current_rxslot
= op32_set_current_rxslot
,
139 struct b43_dmadesc_generic
*op64_idx2desc(struct b43_dmaring
*ring
,
141 struct b43_dmadesc_meta
**meta
)
143 struct b43_dmadesc64
*desc
;
145 *meta
= &(ring
->meta
[slot
]);
146 desc
= ring
->descbase
;
147 desc
= &(desc
[slot
]);
149 return (struct b43_dmadesc_generic
*)desc
;
152 static void op64_fill_descriptor(struct b43_dmaring
*ring
,
153 struct b43_dmadesc_generic
*desc
,
154 dma_addr_t dmaaddr
, u16 bufsize
,
155 int start
, int end
, int irq
)
157 struct b43_dmadesc64
*descbase
= ring
->descbase
;
159 u32 ctl0
= 0, ctl1
= 0;
163 slot
= (int)(&(desc
->dma64
) - descbase
);
164 B43_WARN_ON(!(slot
>= 0 && slot
< ring
->nr_slots
));
166 addrlo
= (u32
) (dmaaddr
& 0xFFFFFFFF);
167 addrhi
= (((u64
) dmaaddr
>> 32) & ~SSB_DMA_TRANSLATION_MASK
);
168 addrext
= (((u64
) dmaaddr
>> 32) & SSB_DMA_TRANSLATION_MASK
)
169 >> SSB_DMA_TRANSLATION_SHIFT
;
170 addrhi
|= (ssb_dma_translation(ring
->dev
->dev
) << 1);
171 if (slot
== ring
->nr_slots
- 1)
172 ctl0
|= B43_DMA64_DCTL0_DTABLEEND
;
174 ctl0
|= B43_DMA64_DCTL0_FRAMESTART
;
176 ctl0
|= B43_DMA64_DCTL0_FRAMEEND
;
178 ctl0
|= B43_DMA64_DCTL0_IRQ
;
179 ctl1
|= (bufsize
- ring
->frameoffset
)
180 & B43_DMA64_DCTL1_BYTECNT
;
181 ctl1
|= (addrext
<< B43_DMA64_DCTL1_ADDREXT_SHIFT
)
182 & B43_DMA64_DCTL1_ADDREXT_MASK
;
184 desc
->dma64
.control0
= cpu_to_le32(ctl0
);
185 desc
->dma64
.control1
= cpu_to_le32(ctl1
);
186 desc
->dma64
.address_low
= cpu_to_le32(addrlo
);
187 desc
->dma64
.address_high
= cpu_to_le32(addrhi
);
190 static void op64_poke_tx(struct b43_dmaring
*ring
, int slot
)
192 b43_dma_write(ring
, B43_DMA64_TXINDEX
,
193 (u32
) (slot
* sizeof(struct b43_dmadesc64
)));
196 static void op64_tx_suspend(struct b43_dmaring
*ring
)
198 b43_dma_write(ring
, B43_DMA64_TXCTL
, b43_dma_read(ring
, B43_DMA64_TXCTL
)
199 | B43_DMA64_TXSUSPEND
);
202 static void op64_tx_resume(struct b43_dmaring
*ring
)
204 b43_dma_write(ring
, B43_DMA64_TXCTL
, b43_dma_read(ring
, B43_DMA64_TXCTL
)
205 & ~B43_DMA64_TXSUSPEND
);
208 static int op64_get_current_rxslot(struct b43_dmaring
*ring
)
212 val
= b43_dma_read(ring
, B43_DMA64_RXSTATUS
);
213 val
&= B43_DMA64_RXSTATDPTR
;
215 return (val
/ sizeof(struct b43_dmadesc64
));
218 static void op64_set_current_rxslot(struct b43_dmaring
*ring
, int slot
)
220 b43_dma_write(ring
, B43_DMA64_RXINDEX
,
221 (u32
) (slot
* sizeof(struct b43_dmadesc64
)));
224 static const struct b43_dma_ops dma64_ops
= {
225 .idx2desc
= op64_idx2desc
,
226 .fill_descriptor
= op64_fill_descriptor
,
227 .poke_tx
= op64_poke_tx
,
228 .tx_suspend
= op64_tx_suspend
,
229 .tx_resume
= op64_tx_resume
,
230 .get_current_rxslot
= op64_get_current_rxslot
,
231 .set_current_rxslot
= op64_set_current_rxslot
,
234 static inline int free_slots(struct b43_dmaring
*ring
)
236 return (ring
->nr_slots
- ring
->used_slots
);
239 static inline int next_slot(struct b43_dmaring
*ring
, int slot
)
241 B43_WARN_ON(!(slot
>= -1 && slot
<= ring
->nr_slots
- 1));
242 if (slot
== ring
->nr_slots
- 1)
247 static inline int prev_slot(struct b43_dmaring
*ring
, int slot
)
249 B43_WARN_ON(!(slot
>= 0 && slot
<= ring
->nr_slots
- 1));
251 return ring
->nr_slots
- 1;
255 #ifdef CONFIG_B43_DEBUG
256 static void update_max_used_slots(struct b43_dmaring
*ring
,
257 int current_used_slots
)
259 if (current_used_slots
<= ring
->max_used_slots
)
261 ring
->max_used_slots
= current_used_slots
;
262 if (b43_debug(ring
->dev
, B43_DBG_DMAVERBOSE
)) {
263 b43dbg(ring
->dev
->wl
,
264 "max_used_slots increased to %d on %s ring %d\n",
265 ring
->max_used_slots
,
266 ring
->tx
? "TX" : "RX", ring
->index
);
271 void update_max_used_slots(struct b43_dmaring
*ring
, int current_used_slots
)
276 /* Request a slot for usage. */
277 static inline int request_slot(struct b43_dmaring
*ring
)
281 B43_WARN_ON(!ring
->tx
);
282 B43_WARN_ON(ring
->stopped
);
283 B43_WARN_ON(free_slots(ring
) == 0);
285 slot
= next_slot(ring
, ring
->current_slot
);
286 ring
->current_slot
= slot
;
289 update_max_used_slots(ring
, ring
->used_slots
);
294 /* Mac80211-queue to b43-ring mapping */
295 static struct b43_dmaring
*priority_to_txring(struct b43_wldev
*dev
,
298 struct b43_dmaring
*ring
;
300 /*FIXME: For now we always run on TX-ring-1 */
301 return dev
->dma
.tx_ring1
;
303 /* 0 = highest priority */
304 switch (queue_priority
) {
309 ring
= dev
->dma
.tx_ring3
;
312 ring
= dev
->dma
.tx_ring2
;
315 ring
= dev
->dma
.tx_ring1
;
318 ring
= dev
->dma
.tx_ring0
;
325 /* b43-ring to mac80211-queue mapping */
326 static inline int txring_to_priority(struct b43_dmaring
*ring
)
328 static const u8 idx_to_prio
[] = { 3, 2, 1, 0, };
331 /*FIXME: have only one queue, for now */
335 if (B43_WARN_ON(index
>= ARRAY_SIZE(idx_to_prio
)))
337 return idx_to_prio
[index
];
340 static u16
b43_dmacontroller_base(enum b43_dmatype type
, int controller_idx
)
342 static const u16 map64
[] = {
343 B43_MMIO_DMA64_BASE0
,
344 B43_MMIO_DMA64_BASE1
,
345 B43_MMIO_DMA64_BASE2
,
346 B43_MMIO_DMA64_BASE3
,
347 B43_MMIO_DMA64_BASE4
,
348 B43_MMIO_DMA64_BASE5
,
350 static const u16 map32
[] = {
351 B43_MMIO_DMA32_BASE0
,
352 B43_MMIO_DMA32_BASE1
,
353 B43_MMIO_DMA32_BASE2
,
354 B43_MMIO_DMA32_BASE3
,
355 B43_MMIO_DMA32_BASE4
,
356 B43_MMIO_DMA32_BASE5
,
359 if (type
== B43_DMA_64BIT
) {
360 B43_WARN_ON(!(controller_idx
>= 0 &&
361 controller_idx
< ARRAY_SIZE(map64
)));
362 return map64
[controller_idx
];
364 B43_WARN_ON(!(controller_idx
>= 0 &&
365 controller_idx
< ARRAY_SIZE(map32
)));
366 return map32
[controller_idx
];
370 dma_addr_t
map_descbuffer(struct b43_dmaring
*ring
,
371 unsigned char *buf
, size_t len
, int tx
)
376 dmaaddr
= dma_map_single(ring
->dev
->dev
->dma_dev
,
377 buf
, len
, DMA_TO_DEVICE
);
379 dmaaddr
= dma_map_single(ring
->dev
->dev
->dma_dev
,
380 buf
, len
, DMA_FROM_DEVICE
);
387 void unmap_descbuffer(struct b43_dmaring
*ring
,
388 dma_addr_t addr
, size_t len
, int tx
)
391 dma_unmap_single(ring
->dev
->dev
->dma_dev
,
392 addr
, len
, DMA_TO_DEVICE
);
394 dma_unmap_single(ring
->dev
->dev
->dma_dev
,
395 addr
, len
, DMA_FROM_DEVICE
);
400 void sync_descbuffer_for_cpu(struct b43_dmaring
*ring
,
401 dma_addr_t addr
, size_t len
)
403 B43_WARN_ON(ring
->tx
);
404 dma_sync_single_for_cpu(ring
->dev
->dev
->dma_dev
,
405 addr
, len
, DMA_FROM_DEVICE
);
409 void sync_descbuffer_for_device(struct b43_dmaring
*ring
,
410 dma_addr_t addr
, size_t len
)
412 B43_WARN_ON(ring
->tx
);
413 dma_sync_single_for_device(ring
->dev
->dev
->dma_dev
,
414 addr
, len
, DMA_FROM_DEVICE
);
418 void free_descriptor_buffer(struct b43_dmaring
*ring
,
419 struct b43_dmadesc_meta
*meta
)
422 dev_kfree_skb_any(meta
->skb
);
427 static int alloc_ringmemory(struct b43_dmaring
*ring
)
429 struct device
*dma_dev
= ring
->dev
->dev
->dma_dev
;
430 gfp_t flags
= GFP_KERNEL
;
432 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
433 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
434 * has shown that 4K is sufficient for the latter as long as the buffer
435 * does not cross an 8K boundary.
437 * For unknown reasons - possibly a hardware error - the BCM4311 rev
438 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
439 * which accounts for the GFP_DMA flag below.
441 if (ring
->type
== B43_DMA_64BIT
)
443 ring
->descbase
= dma_alloc_coherent(dma_dev
, B43_DMA_RINGMEMSIZE
,
444 &(ring
->dmabase
), flags
);
445 if (!ring
->descbase
) {
446 b43err(ring
->dev
->wl
, "DMA ringmemory allocation failed\n");
449 memset(ring
->descbase
, 0, B43_DMA_RINGMEMSIZE
);
454 static void free_ringmemory(struct b43_dmaring
*ring
)
456 struct device
*dma_dev
= ring
->dev
->dev
->dma_dev
;
458 dma_free_coherent(dma_dev
, B43_DMA_RINGMEMSIZE
,
459 ring
->descbase
, ring
->dmabase
);
462 /* Reset the RX DMA channel */
463 static int b43_dmacontroller_rx_reset(struct b43_wldev
*dev
, u16 mmio_base
,
464 enum b43_dmatype type
)
472 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_RXCTL
: B43_DMA32_RXCTL
;
473 b43_write32(dev
, mmio_base
+ offset
, 0);
474 for (i
= 0; i
< 10; i
++) {
475 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_RXSTATUS
:
477 value
= b43_read32(dev
, mmio_base
+ offset
);
478 if (type
== B43_DMA_64BIT
) {
479 value
&= B43_DMA64_RXSTAT
;
480 if (value
== B43_DMA64_RXSTAT_DISABLED
) {
485 value
&= B43_DMA32_RXSTATE
;
486 if (value
== B43_DMA32_RXSTAT_DISABLED
) {
494 b43err(dev
->wl
, "DMA RX reset timed out\n");
501 /* Reset the TX DMA channel */
502 static int b43_dmacontroller_tx_reset(struct b43_wldev
*dev
, u16 mmio_base
,
503 enum b43_dmatype type
)
511 for (i
= 0; i
< 10; i
++) {
512 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_TXSTATUS
:
514 value
= b43_read32(dev
, mmio_base
+ offset
);
515 if (type
== B43_DMA_64BIT
) {
516 value
&= B43_DMA64_TXSTAT
;
517 if (value
== B43_DMA64_TXSTAT_DISABLED
||
518 value
== B43_DMA64_TXSTAT_IDLEWAIT
||
519 value
== B43_DMA64_TXSTAT_STOPPED
)
522 value
&= B43_DMA32_TXSTATE
;
523 if (value
== B43_DMA32_TXSTAT_DISABLED
||
524 value
== B43_DMA32_TXSTAT_IDLEWAIT
||
525 value
== B43_DMA32_TXSTAT_STOPPED
)
530 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_TXCTL
: B43_DMA32_TXCTL
;
531 b43_write32(dev
, mmio_base
+ offset
, 0);
532 for (i
= 0; i
< 10; i
++) {
533 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_TXSTATUS
:
535 value
= b43_read32(dev
, mmio_base
+ offset
);
536 if (type
== B43_DMA_64BIT
) {
537 value
&= B43_DMA64_TXSTAT
;
538 if (value
== B43_DMA64_TXSTAT_DISABLED
) {
543 value
&= B43_DMA32_TXSTATE
;
544 if (value
== B43_DMA32_TXSTAT_DISABLED
) {
552 b43err(dev
->wl
, "DMA TX reset timed out\n");
555 /* ensure the reset is completed. */
561 /* Check if a DMA mapping address is invalid. */
562 static bool b43_dma_mapping_error(struct b43_dmaring
*ring
,
564 size_t buffersize
, bool dma_to_device
)
566 if (unlikely(dma_mapping_error(addr
)))
569 switch (ring
->type
) {
571 if ((u64
)addr
+ buffersize
> (1ULL << 30))
575 if ((u64
)addr
+ buffersize
> (1ULL << 32))
579 /* Currently we can't have addresses beyond
580 * 64bit in the kernel. */
584 /* The address is OK. */
588 /* We can't support this address. Unmap it again. */
589 unmap_descbuffer(ring
, addr
, buffersize
, dma_to_device
);
594 static int setup_rx_descbuffer(struct b43_dmaring
*ring
,
595 struct b43_dmadesc_generic
*desc
,
596 struct b43_dmadesc_meta
*meta
, gfp_t gfp_flags
)
598 struct b43_rxhdr_fw4
*rxhdr
;
599 struct b43_hwtxstatus
*txstat
;
603 B43_WARN_ON(ring
->tx
);
605 skb
= __dev_alloc_skb(ring
->rx_buffersize
, gfp_flags
);
608 dmaaddr
= map_descbuffer(ring
, skb
->data
, ring
->rx_buffersize
, 0);
609 if (b43_dma_mapping_error(ring
, dmaaddr
, ring
->rx_buffersize
, 0)) {
610 /* ugh. try to realloc in zone_dma */
611 gfp_flags
|= GFP_DMA
;
613 dev_kfree_skb_any(skb
);
615 skb
= __dev_alloc_skb(ring
->rx_buffersize
, gfp_flags
);
618 dmaaddr
= map_descbuffer(ring
, skb
->data
,
619 ring
->rx_buffersize
, 0);
622 if (b43_dma_mapping_error(ring
, dmaaddr
, ring
->rx_buffersize
, 0)) {
623 b43err(ring
->dev
->wl
, "RX DMA buffer allocation failed\n");
624 dev_kfree_skb_any(skb
);
629 meta
->dmaaddr
= dmaaddr
;
630 ring
->ops
->fill_descriptor(ring
, desc
, dmaaddr
,
631 ring
->rx_buffersize
, 0, 0, 0);
633 rxhdr
= (struct b43_rxhdr_fw4
*)(skb
->data
);
634 rxhdr
->frame_len
= 0;
635 txstat
= (struct b43_hwtxstatus
*)(skb
->data
);
641 /* Allocate the initial descbuffers.
642 * This is used for an RX ring only.
644 static int alloc_initial_descbuffers(struct b43_dmaring
*ring
)
646 int i
, err
= -ENOMEM
;
647 struct b43_dmadesc_generic
*desc
;
648 struct b43_dmadesc_meta
*meta
;
650 for (i
= 0; i
< ring
->nr_slots
; i
++) {
651 desc
= ring
->ops
->idx2desc(ring
, i
, &meta
);
653 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_KERNEL
);
655 b43err(ring
->dev
->wl
,
656 "Failed to allocate initial descbuffers\n");
661 ring
->used_slots
= ring
->nr_slots
;
667 for (i
--; i
>= 0; i
--) {
668 desc
= ring
->ops
->idx2desc(ring
, i
, &meta
);
670 unmap_descbuffer(ring
, meta
->dmaaddr
, ring
->rx_buffersize
, 0);
671 dev_kfree_skb(meta
->skb
);
676 /* Do initial setup of the DMA controller.
677 * Reset the controller, write the ring busaddress
678 * and switch the "enable" bit on.
680 static int dmacontroller_setup(struct b43_dmaring
*ring
)
685 u32 trans
= ssb_dma_translation(ring
->dev
->dev
);
688 if (ring
->type
== B43_DMA_64BIT
) {
689 u64 ringbase
= (u64
) (ring
->dmabase
);
691 addrext
= ((ringbase
>> 32) & SSB_DMA_TRANSLATION_MASK
)
692 >> SSB_DMA_TRANSLATION_SHIFT
;
693 value
= B43_DMA64_TXENABLE
;
694 value
|= (addrext
<< B43_DMA64_TXADDREXT_SHIFT
)
695 & B43_DMA64_TXADDREXT_MASK
;
696 b43_dma_write(ring
, B43_DMA64_TXCTL
, value
);
697 b43_dma_write(ring
, B43_DMA64_TXRINGLO
,
698 (ringbase
& 0xFFFFFFFF));
699 b43_dma_write(ring
, B43_DMA64_TXRINGHI
,
701 ~SSB_DMA_TRANSLATION_MASK
)
704 u32 ringbase
= (u32
) (ring
->dmabase
);
706 addrext
= (ringbase
& SSB_DMA_TRANSLATION_MASK
)
707 >> SSB_DMA_TRANSLATION_SHIFT
;
708 value
= B43_DMA32_TXENABLE
;
709 value
|= (addrext
<< B43_DMA32_TXADDREXT_SHIFT
)
710 & B43_DMA32_TXADDREXT_MASK
;
711 b43_dma_write(ring
, B43_DMA32_TXCTL
, value
);
712 b43_dma_write(ring
, B43_DMA32_TXRING
,
713 (ringbase
& ~SSB_DMA_TRANSLATION_MASK
)
717 err
= alloc_initial_descbuffers(ring
);
720 if (ring
->type
== B43_DMA_64BIT
) {
721 u64 ringbase
= (u64
) (ring
->dmabase
);
723 addrext
= ((ringbase
>> 32) & SSB_DMA_TRANSLATION_MASK
)
724 >> SSB_DMA_TRANSLATION_SHIFT
;
725 value
= (ring
->frameoffset
<< B43_DMA64_RXFROFF_SHIFT
);
726 value
|= B43_DMA64_RXENABLE
;
727 value
|= (addrext
<< B43_DMA64_RXADDREXT_SHIFT
)
728 & B43_DMA64_RXADDREXT_MASK
;
729 b43_dma_write(ring
, B43_DMA64_RXCTL
, value
);
730 b43_dma_write(ring
, B43_DMA64_RXRINGLO
,
731 (ringbase
& 0xFFFFFFFF));
732 b43_dma_write(ring
, B43_DMA64_RXRINGHI
,
734 ~SSB_DMA_TRANSLATION_MASK
)
736 b43_dma_write(ring
, B43_DMA64_RXINDEX
, ring
->nr_slots
*
737 sizeof(struct b43_dmadesc64
));
739 u32 ringbase
= (u32
) (ring
->dmabase
);
741 addrext
= (ringbase
& SSB_DMA_TRANSLATION_MASK
)
742 >> SSB_DMA_TRANSLATION_SHIFT
;
743 value
= (ring
->frameoffset
<< B43_DMA32_RXFROFF_SHIFT
);
744 value
|= B43_DMA32_RXENABLE
;
745 value
|= (addrext
<< B43_DMA32_RXADDREXT_SHIFT
)
746 & B43_DMA32_RXADDREXT_MASK
;
747 b43_dma_write(ring
, B43_DMA32_RXCTL
, value
);
748 b43_dma_write(ring
, B43_DMA32_RXRING
,
749 (ringbase
& ~SSB_DMA_TRANSLATION_MASK
)
751 b43_dma_write(ring
, B43_DMA32_RXINDEX
, ring
->nr_slots
*
752 sizeof(struct b43_dmadesc32
));
760 /* Shutdown the DMA controller. */
761 static void dmacontroller_cleanup(struct b43_dmaring
*ring
)
764 b43_dmacontroller_tx_reset(ring
->dev
, ring
->mmio_base
,
766 if (ring
->type
== B43_DMA_64BIT
) {
767 b43_dma_write(ring
, B43_DMA64_TXRINGLO
, 0);
768 b43_dma_write(ring
, B43_DMA64_TXRINGHI
, 0);
770 b43_dma_write(ring
, B43_DMA32_TXRING
, 0);
772 b43_dmacontroller_rx_reset(ring
->dev
, ring
->mmio_base
,
774 if (ring
->type
== B43_DMA_64BIT
) {
775 b43_dma_write(ring
, B43_DMA64_RXRINGLO
, 0);
776 b43_dma_write(ring
, B43_DMA64_RXRINGHI
, 0);
778 b43_dma_write(ring
, B43_DMA32_RXRING
, 0);
782 static void free_all_descbuffers(struct b43_dmaring
*ring
)
784 struct b43_dmadesc_generic
*desc
;
785 struct b43_dmadesc_meta
*meta
;
788 if (!ring
->used_slots
)
790 for (i
= 0; i
< ring
->nr_slots
; i
++) {
791 desc
= ring
->ops
->idx2desc(ring
, i
, &meta
);
794 B43_WARN_ON(!ring
->tx
);
798 unmap_descbuffer(ring
, meta
->dmaaddr
,
801 unmap_descbuffer(ring
, meta
->dmaaddr
,
802 ring
->rx_buffersize
, 0);
804 free_descriptor_buffer(ring
, meta
);
808 static u64
supported_dma_mask(struct b43_wldev
*dev
)
813 tmp
= b43_read32(dev
, SSB_TMSHIGH
);
814 if (tmp
& SSB_TMSHIGH_DMA64
)
815 return DMA_64BIT_MASK
;
816 mmio_base
= b43_dmacontroller_base(0, 0);
817 b43_write32(dev
, mmio_base
+ B43_DMA32_TXCTL
, B43_DMA32_TXADDREXT_MASK
);
818 tmp
= b43_read32(dev
, mmio_base
+ B43_DMA32_TXCTL
);
819 if (tmp
& B43_DMA32_TXADDREXT_MASK
)
820 return DMA_32BIT_MASK
;
822 return DMA_30BIT_MASK
;
825 static enum b43_dmatype
dma_mask_to_engine_type(u64 dmamask
)
827 if (dmamask
== DMA_30BIT_MASK
)
828 return B43_DMA_30BIT
;
829 if (dmamask
== DMA_32BIT_MASK
)
830 return B43_DMA_32BIT
;
831 if (dmamask
== DMA_64BIT_MASK
)
832 return B43_DMA_64BIT
;
834 return B43_DMA_30BIT
;
837 /* Main initialization function. */
839 struct b43_dmaring
*b43_setup_dmaring(struct b43_wldev
*dev
,
840 int controller_index
,
842 enum b43_dmatype type
)
844 struct b43_dmaring
*ring
;
849 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
854 nr_slots
= B43_RXRING_SLOTS
;
856 nr_slots
= B43_TXRING_SLOTS
;
858 ring
->meta
= kcalloc(nr_slots
, sizeof(struct b43_dmadesc_meta
),
863 ring
->txhdr_cache
= kcalloc(nr_slots
,
866 if (!ring
->txhdr_cache
)
869 /* test for ability to dma to txhdr_cache */
870 dma_test
= dma_map_single(dev
->dev
->dma_dev
,
875 if (b43_dma_mapping_error(ring
, dma_test
,
876 b43_txhdr_size(dev
), 1)) {
878 kfree(ring
->txhdr_cache
);
879 ring
->txhdr_cache
= kcalloc(nr_slots
,
881 GFP_KERNEL
| GFP_DMA
);
882 if (!ring
->txhdr_cache
)
885 dma_test
= dma_map_single(dev
->dev
->dma_dev
,
890 if (b43_dma_mapping_error(ring
, dma_test
,
891 b43_txhdr_size(dev
), 1)) {
894 "TXHDR DMA allocation failed\n");
895 goto err_kfree_txhdr_cache
;
899 dma_unmap_single(dev
->dev
->dma_dev
,
900 dma_test
, b43_txhdr_size(dev
),
905 ring
->nr_slots
= nr_slots
;
906 ring
->mmio_base
= b43_dmacontroller_base(type
, controller_index
);
907 ring
->index
= controller_index
;
908 if (type
== B43_DMA_64BIT
)
909 ring
->ops
= &dma64_ops
;
911 ring
->ops
= &dma32_ops
;
914 ring
->current_slot
= -1;
916 if (ring
->index
== 0) {
917 ring
->rx_buffersize
= B43_DMA0_RX_BUFFERSIZE
;
918 ring
->frameoffset
= B43_DMA0_RX_FRAMEOFFSET
;
919 } else if (ring
->index
== 3) {
920 ring
->rx_buffersize
= B43_DMA3_RX_BUFFERSIZE
;
921 ring
->frameoffset
= B43_DMA3_RX_FRAMEOFFSET
;
925 spin_lock_init(&ring
->lock
);
926 #ifdef CONFIG_B43_DEBUG
927 ring
->last_injected_overflow
= jiffies
;
930 err
= alloc_ringmemory(ring
);
932 goto err_kfree_txhdr_cache
;
933 err
= dmacontroller_setup(ring
);
935 goto err_free_ringmemory
;
941 free_ringmemory(ring
);
942 err_kfree_txhdr_cache
:
943 kfree(ring
->txhdr_cache
);
952 /* Main cleanup function. */
953 static void b43_destroy_dmaring(struct b43_dmaring
*ring
)
958 b43dbg(ring
->dev
->wl
, "DMA-%u 0x%04X (%s) max used slots: %d/%d\n",
959 (unsigned int)(ring
->type
),
961 (ring
->tx
) ? "TX" : "RX", ring
->max_used_slots
, ring
->nr_slots
);
962 /* Device IRQs are disabled prior entering this function,
963 * so no need to take care of concurrency with rx handler stuff.
965 dmacontroller_cleanup(ring
);
966 free_all_descbuffers(ring
);
967 free_ringmemory(ring
);
969 kfree(ring
->txhdr_cache
);
974 void b43_dma_free(struct b43_wldev
*dev
)
976 struct b43_dma
*dma
= &dev
->dma
;
978 b43_destroy_dmaring(dma
->rx_ring3
);
979 dma
->rx_ring3
= NULL
;
980 b43_destroy_dmaring(dma
->rx_ring0
);
981 dma
->rx_ring0
= NULL
;
983 b43_destroy_dmaring(dma
->tx_ring5
);
984 dma
->tx_ring5
= NULL
;
985 b43_destroy_dmaring(dma
->tx_ring4
);
986 dma
->tx_ring4
= NULL
;
987 b43_destroy_dmaring(dma
->tx_ring3
);
988 dma
->tx_ring3
= NULL
;
989 b43_destroy_dmaring(dma
->tx_ring2
);
990 dma
->tx_ring2
= NULL
;
991 b43_destroy_dmaring(dma
->tx_ring1
);
992 dma
->tx_ring1
= NULL
;
993 b43_destroy_dmaring(dma
->tx_ring0
);
994 dma
->tx_ring0
= NULL
;
997 static int b43_dma_set_mask(struct b43_wldev
*dev
, u64 mask
)
999 u64 orig_mask
= mask
;
1003 /* Try to set the DMA mask. If it fails, try falling back to a
1004 * lower mask, as we can always also support a lower one. */
1006 err
= ssb_dma_set_mask(dev
->dev
, mask
);
1009 if (mask
== DMA_64BIT_MASK
) {
1010 mask
= DMA_32BIT_MASK
;
1014 if (mask
== DMA_32BIT_MASK
) {
1015 mask
= DMA_30BIT_MASK
;
1019 b43err(dev
->wl
, "The machine/kernel does not support "
1020 "the required %u-bit DMA mask\n",
1021 (unsigned int)dma_mask_to_engine_type(orig_mask
));
1025 b43info(dev
->wl
, "DMA mask fallback from %u-bit to %u-bit\n",
1026 (unsigned int)dma_mask_to_engine_type(orig_mask
),
1027 (unsigned int)dma_mask_to_engine_type(mask
));
1033 int b43_dma_init(struct b43_wldev
*dev
)
1035 struct b43_dma
*dma
= &dev
->dma
;
1036 struct b43_dmaring
*ring
;
1039 enum b43_dmatype type
;
1041 dmamask
= supported_dma_mask(dev
);
1042 type
= dma_mask_to_engine_type(dmamask
);
1043 err
= b43_dma_set_mask(dev
, dmamask
);
1048 /* setup TX DMA channels. */
1049 ring
= b43_setup_dmaring(dev
, 0, 1, type
);
1052 dma
->tx_ring0
= ring
;
1054 ring
= b43_setup_dmaring(dev
, 1, 1, type
);
1056 goto err_destroy_tx0
;
1057 dma
->tx_ring1
= ring
;
1059 ring
= b43_setup_dmaring(dev
, 2, 1, type
);
1061 goto err_destroy_tx1
;
1062 dma
->tx_ring2
= ring
;
1064 ring
= b43_setup_dmaring(dev
, 3, 1, type
);
1066 goto err_destroy_tx2
;
1067 dma
->tx_ring3
= ring
;
1069 ring
= b43_setup_dmaring(dev
, 4, 1, type
);
1071 goto err_destroy_tx3
;
1072 dma
->tx_ring4
= ring
;
1074 ring
= b43_setup_dmaring(dev
, 5, 1, type
);
1076 goto err_destroy_tx4
;
1077 dma
->tx_ring5
= ring
;
1079 /* setup RX DMA channels. */
1080 ring
= b43_setup_dmaring(dev
, 0, 0, type
);
1082 goto err_destroy_tx5
;
1083 dma
->rx_ring0
= ring
;
1085 if (dev
->dev
->id
.revision
< 5) {
1086 ring
= b43_setup_dmaring(dev
, 3, 0, type
);
1088 goto err_destroy_rx0
;
1089 dma
->rx_ring3
= ring
;
1092 b43dbg(dev
->wl
, "%u-bit DMA initialized\n",
1093 (unsigned int)type
);
1099 b43_destroy_dmaring(dma
->rx_ring0
);
1100 dma
->rx_ring0
= NULL
;
1102 b43_destroy_dmaring(dma
->tx_ring5
);
1103 dma
->tx_ring5
= NULL
;
1105 b43_destroy_dmaring(dma
->tx_ring4
);
1106 dma
->tx_ring4
= NULL
;
1108 b43_destroy_dmaring(dma
->tx_ring3
);
1109 dma
->tx_ring3
= NULL
;
1111 b43_destroy_dmaring(dma
->tx_ring2
);
1112 dma
->tx_ring2
= NULL
;
1114 b43_destroy_dmaring(dma
->tx_ring1
);
1115 dma
->tx_ring1
= NULL
;
1117 b43_destroy_dmaring(dma
->tx_ring0
);
1118 dma
->tx_ring0
= NULL
;
1122 /* Generate a cookie for the TX header. */
1123 static u16
generate_cookie(struct b43_dmaring
*ring
, int slot
)
1125 u16 cookie
= 0x1000;
1127 /* Use the upper 4 bits of the cookie as
1128 * DMA controller ID and store the slot number
1129 * in the lower 12 bits.
1130 * Note that the cookie must never be 0, as this
1131 * is a special value used in RX path.
1132 * It can also not be 0xFFFF because that is special
1133 * for multicast frames.
1135 switch (ring
->index
) {
1157 B43_WARN_ON(slot
& ~0x0FFF);
1158 cookie
|= (u16
) slot
;
1163 /* Inspect a cookie and find out to which controller/slot it belongs. */
1165 struct b43_dmaring
*parse_cookie(struct b43_wldev
*dev
, u16 cookie
, int *slot
)
1167 struct b43_dma
*dma
= &dev
->dma
;
1168 struct b43_dmaring
*ring
= NULL
;
1170 switch (cookie
& 0xF000) {
1172 ring
= dma
->tx_ring0
;
1175 ring
= dma
->tx_ring1
;
1178 ring
= dma
->tx_ring2
;
1181 ring
= dma
->tx_ring3
;
1184 ring
= dma
->tx_ring4
;
1187 ring
= dma
->tx_ring5
;
1192 *slot
= (cookie
& 0x0FFF);
1193 B43_WARN_ON(!(ring
&& *slot
>= 0 && *slot
< ring
->nr_slots
));
1198 static int dma_tx_fragment(struct b43_dmaring
*ring
,
1199 struct sk_buff
*skb
,
1200 struct ieee80211_tx_control
*ctl
)
1202 const struct b43_dma_ops
*ops
= ring
->ops
;
1204 int slot
, old_top_slot
, old_used_slots
;
1206 struct b43_dmadesc_generic
*desc
;
1207 struct b43_dmadesc_meta
*meta
;
1208 struct b43_dmadesc_meta
*meta_hdr
;
1209 struct sk_buff
*bounce_skb
;
1211 size_t hdrsize
= b43_txhdr_size(ring
->dev
);
1213 #define SLOTS_PER_PACKET 2
1214 B43_WARN_ON(skb_shinfo(skb
)->nr_frags
);
1216 old_top_slot
= ring
->current_slot
;
1217 old_used_slots
= ring
->used_slots
;
1219 /* Get a slot for the header. */
1220 slot
= request_slot(ring
);
1221 desc
= ops
->idx2desc(ring
, slot
, &meta_hdr
);
1222 memset(meta_hdr
, 0, sizeof(*meta_hdr
));
1224 header
= &(ring
->txhdr_cache
[slot
* hdrsize
]);
1225 cookie
= generate_cookie(ring
, slot
);
1226 err
= b43_generate_txhdr(ring
->dev
, header
,
1227 skb
->data
, skb
->len
, ctl
, cookie
);
1228 if (unlikely(err
)) {
1229 ring
->current_slot
= old_top_slot
;
1230 ring
->used_slots
= old_used_slots
;
1234 meta_hdr
->dmaaddr
= map_descbuffer(ring
, (unsigned char *)header
,
1236 if (b43_dma_mapping_error(ring
, meta_hdr
->dmaaddr
, hdrsize
, 1)) {
1237 ring
->current_slot
= old_top_slot
;
1238 ring
->used_slots
= old_used_slots
;
1241 ops
->fill_descriptor(ring
, desc
, meta_hdr
->dmaaddr
,
1244 /* Get a slot for the payload. */
1245 slot
= request_slot(ring
);
1246 desc
= ops
->idx2desc(ring
, slot
, &meta
);
1247 memset(meta
, 0, sizeof(*meta
));
1249 memcpy(&meta
->txstat
.control
, ctl
, sizeof(*ctl
));
1251 meta
->is_last_fragment
= 1;
1253 meta
->dmaaddr
= map_descbuffer(ring
, skb
->data
, skb
->len
, 1);
1254 /* create a bounce buffer in zone_dma on mapping failure. */
1255 if (b43_dma_mapping_error(ring
, meta
->dmaaddr
, skb
->len
, 1)) {
1256 bounce_skb
= __dev_alloc_skb(skb
->len
, GFP_ATOMIC
| GFP_DMA
);
1258 ring
->current_slot
= old_top_slot
;
1259 ring
->used_slots
= old_used_slots
;
1264 memcpy(skb_put(bounce_skb
, skb
->len
), skb
->data
, skb
->len
);
1265 dev_kfree_skb_any(skb
);
1268 meta
->dmaaddr
= map_descbuffer(ring
, skb
->data
, skb
->len
, 1);
1269 if (b43_dma_mapping_error(ring
, meta
->dmaaddr
, skb
->len
, 1)) {
1270 ring
->current_slot
= old_top_slot
;
1271 ring
->used_slots
= old_used_slots
;
1273 goto out_free_bounce
;
1277 ops
->fill_descriptor(ring
, desc
, meta
->dmaaddr
, skb
->len
, 0, 1, 1);
1279 if (ctl
->flags
& IEEE80211_TXCTL_SEND_AFTER_DTIM
) {
1280 /* Tell the firmware about the cookie of the last
1281 * mcast frame, so it can clear the more-data bit in it. */
1282 b43_shm_write16(ring
->dev
, B43_SHM_SHARED
,
1283 B43_SHM_SH_MCASTCOOKIE
, cookie
);
1285 /* Now transfer the whole frame. */
1287 ops
->poke_tx(ring
, next_slot(ring
, slot
));
1291 dev_kfree_skb_any(skb
);
1293 unmap_descbuffer(ring
, meta_hdr
->dmaaddr
,
1298 static inline int should_inject_overflow(struct b43_dmaring
*ring
)
1300 #ifdef CONFIG_B43_DEBUG
1301 if (unlikely(b43_debug(ring
->dev
, B43_DBG_DMAOVERFLOW
))) {
1302 /* Check if we should inject another ringbuffer overflow
1303 * to test handling of this situation in the stack. */
1304 unsigned long next_overflow
;
1306 next_overflow
= ring
->last_injected_overflow
+ HZ
;
1307 if (time_after(jiffies
, next_overflow
)) {
1308 ring
->last_injected_overflow
= jiffies
;
1309 b43dbg(ring
->dev
->wl
,
1310 "Injecting TX ring overflow on "
1311 "DMA controller %d\n", ring
->index
);
1315 #endif /* CONFIG_B43_DEBUG */
1319 int b43_dma_tx(struct b43_wldev
*dev
,
1320 struct sk_buff
*skb
, struct ieee80211_tx_control
*ctl
)
1322 struct b43_dmaring
*ring
;
1323 struct ieee80211_hdr
*hdr
;
1325 unsigned long flags
;
1327 if (unlikely(skb
->len
< 2 + 2 + 6)) {
1328 /* Too short, this can't be a valid frame. */
1332 hdr
= (struct ieee80211_hdr
*)skb
->data
;
1333 if (ctl
->flags
& IEEE80211_TXCTL_SEND_AFTER_DTIM
) {
1334 /* The multicast ring will be sent after the DTIM */
1335 ring
= dev
->dma
.tx_ring4
;
1336 /* Set the more-data bit. Ucode will clear it on
1337 * the last frame for us. */
1338 hdr
->frame_control
|= cpu_to_le16(IEEE80211_FCTL_MOREDATA
);
1340 /* Decide by priority where to put this frame. */
1341 ring
= priority_to_txring(dev
, ctl
->queue
);
1344 spin_lock_irqsave(&ring
->lock
, flags
);
1345 B43_WARN_ON(!ring
->tx
);
1346 if (unlikely(free_slots(ring
) < SLOTS_PER_PACKET
)) {
1347 b43warn(dev
->wl
, "DMA queue overflow\n");
1351 /* Check if the queue was stopped in mac80211,
1352 * but we got called nevertheless.
1353 * That would be a mac80211 bug. */
1354 B43_WARN_ON(ring
->stopped
);
1356 err
= dma_tx_fragment(ring
, skb
, ctl
);
1357 if (unlikely(err
== -ENOKEY
)) {
1358 /* Drop this packet, as we don't have the encryption key
1359 * anymore and must not transmit it unencrypted. */
1360 dev_kfree_skb_any(skb
);
1364 if (unlikely(err
)) {
1365 b43err(dev
->wl
, "DMA tx mapping failure\n");
1368 ring
->nr_tx_packets
++;
1369 if ((free_slots(ring
) < SLOTS_PER_PACKET
) ||
1370 should_inject_overflow(ring
)) {
1371 /* This TX ring is full. */
1372 ieee80211_stop_queue(dev
->wl
->hw
, txring_to_priority(ring
));
1374 if (b43_debug(dev
, B43_DBG_DMAVERBOSE
)) {
1375 b43dbg(dev
->wl
, "Stopped TX ring %d\n", ring
->index
);
1379 spin_unlock_irqrestore(&ring
->lock
, flags
);
1384 /* Called with IRQs disabled. */
1385 void b43_dma_handle_txstatus(struct b43_wldev
*dev
,
1386 const struct b43_txstatus
*status
)
1388 const struct b43_dma_ops
*ops
;
1389 struct b43_dmaring
*ring
;
1390 struct b43_dmadesc_generic
*desc
;
1391 struct b43_dmadesc_meta
*meta
;
1394 ring
= parse_cookie(dev
, status
->cookie
, &slot
);
1395 if (unlikely(!ring
))
1398 spin_lock(&ring
->lock
); /* IRQs are already disabled. */
1400 B43_WARN_ON(!ring
->tx
);
1403 B43_WARN_ON(!(slot
>= 0 && slot
< ring
->nr_slots
));
1404 desc
= ops
->idx2desc(ring
, slot
, &meta
);
1407 unmap_descbuffer(ring
, meta
->dmaaddr
, meta
->skb
->len
,
1410 unmap_descbuffer(ring
, meta
->dmaaddr
,
1411 b43_txhdr_size(dev
), 1);
1413 if (meta
->is_last_fragment
) {
1414 B43_WARN_ON(!meta
->skb
);
1415 /* Call back to inform the ieee80211 subsystem about the
1416 * status of the transmission.
1417 * Some fields of txstat are already filled in dma_tx().
1419 if (status
->acked
) {
1420 meta
->txstat
.flags
|= IEEE80211_TX_STATUS_ACK
;
1422 if (!(meta
->txstat
.control
.flags
1423 & IEEE80211_TXCTL_NO_ACK
))
1424 meta
->txstat
.excessive_retries
= 1;
1426 if (status
->frame_count
== 0) {
1427 /* The frame was not transmitted at all. */
1428 meta
->txstat
.retry_count
= 0;
1430 meta
->txstat
.retry_count
= status
->frame_count
- 1;
1431 ieee80211_tx_status_irqsafe(dev
->wl
->hw
, meta
->skb
,
1433 /* skb is freed by ieee80211_tx_status_irqsafe() */
1436 /* No need to call free_descriptor_buffer here, as
1437 * this is only the txhdr, which is not allocated.
1439 B43_WARN_ON(meta
->skb
);
1442 /* Everything unmapped and free'd. So it's not used anymore. */
1445 if (meta
->is_last_fragment
)
1447 slot
= next_slot(ring
, slot
);
1449 dev
->stats
.last_tx
= jiffies
;
1450 if (ring
->stopped
) {
1451 B43_WARN_ON(free_slots(ring
) < SLOTS_PER_PACKET
);
1452 ieee80211_wake_queue(dev
->wl
->hw
, txring_to_priority(ring
));
1454 if (b43_debug(dev
, B43_DBG_DMAVERBOSE
)) {
1455 b43dbg(dev
->wl
, "Woke up TX ring %d\n", ring
->index
);
1459 spin_unlock(&ring
->lock
);
1462 void b43_dma_get_tx_stats(struct b43_wldev
*dev
,
1463 struct ieee80211_tx_queue_stats
*stats
)
1465 const int nr_queues
= dev
->wl
->hw
->queues
;
1466 struct b43_dmaring
*ring
;
1467 struct ieee80211_tx_queue_stats_data
*data
;
1468 unsigned long flags
;
1471 for (i
= 0; i
< nr_queues
; i
++) {
1472 data
= &(stats
->data
[i
]);
1473 ring
= priority_to_txring(dev
, i
);
1475 spin_lock_irqsave(&ring
->lock
, flags
);
1476 data
->len
= ring
->used_slots
/ SLOTS_PER_PACKET
;
1477 data
->limit
= ring
->nr_slots
/ SLOTS_PER_PACKET
;
1478 data
->count
= ring
->nr_tx_packets
;
1479 spin_unlock_irqrestore(&ring
->lock
, flags
);
1483 static void dma_rx(struct b43_dmaring
*ring
, int *slot
)
1485 const struct b43_dma_ops
*ops
= ring
->ops
;
1486 struct b43_dmadesc_generic
*desc
;
1487 struct b43_dmadesc_meta
*meta
;
1488 struct b43_rxhdr_fw4
*rxhdr
;
1489 struct sk_buff
*skb
;
1494 desc
= ops
->idx2desc(ring
, *slot
, &meta
);
1496 sync_descbuffer_for_cpu(ring
, meta
->dmaaddr
, ring
->rx_buffersize
);
1499 if (ring
->index
== 3) {
1500 /* We received an xmit status. */
1501 struct b43_hwtxstatus
*hw
= (struct b43_hwtxstatus
*)skb
->data
;
1504 while (hw
->cookie
== 0) {
1511 b43_handle_hwtxstatus(ring
->dev
, hw
);
1512 /* recycle the descriptor buffer. */
1513 sync_descbuffer_for_device(ring
, meta
->dmaaddr
,
1514 ring
->rx_buffersize
);
1518 rxhdr
= (struct b43_rxhdr_fw4
*)skb
->data
;
1519 len
= le16_to_cpu(rxhdr
->frame_len
);
1526 len
= le16_to_cpu(rxhdr
->frame_len
);
1527 } while (len
== 0 && i
++ < 5);
1528 if (unlikely(len
== 0)) {
1529 /* recycle the descriptor buffer. */
1530 sync_descbuffer_for_device(ring
, meta
->dmaaddr
,
1531 ring
->rx_buffersize
);
1535 if (unlikely(len
> ring
->rx_buffersize
)) {
1536 /* The data did not fit into one descriptor buffer
1537 * and is split over multiple buffers.
1538 * This should never happen, as we try to allocate buffers
1539 * big enough. So simply ignore this packet.
1545 desc
= ops
->idx2desc(ring
, *slot
, &meta
);
1546 /* recycle the descriptor buffer. */
1547 sync_descbuffer_for_device(ring
, meta
->dmaaddr
,
1548 ring
->rx_buffersize
);
1549 *slot
= next_slot(ring
, *slot
);
1551 tmp
-= ring
->rx_buffersize
;
1555 b43err(ring
->dev
->wl
, "DMA RX buffer too small "
1556 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1557 len
, ring
->rx_buffersize
, cnt
);
1561 dmaaddr
= meta
->dmaaddr
;
1562 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_ATOMIC
);
1563 if (unlikely(err
)) {
1564 b43dbg(ring
->dev
->wl
, "DMA RX: setup_rx_descbuffer() failed\n");
1565 sync_descbuffer_for_device(ring
, dmaaddr
, ring
->rx_buffersize
);
1569 unmap_descbuffer(ring
, dmaaddr
, ring
->rx_buffersize
, 0);
1570 skb_put(skb
, len
+ ring
->frameoffset
);
1571 skb_pull(skb
, ring
->frameoffset
);
1573 b43_rx(ring
->dev
, skb
, rxhdr
);
1578 void b43_dma_rx(struct b43_dmaring
*ring
)
1580 const struct b43_dma_ops
*ops
= ring
->ops
;
1581 int slot
, current_slot
;
1584 B43_WARN_ON(ring
->tx
);
1585 current_slot
= ops
->get_current_rxslot(ring
);
1586 B43_WARN_ON(!(current_slot
>= 0 && current_slot
< ring
->nr_slots
));
1588 slot
= ring
->current_slot
;
1589 for (; slot
!= current_slot
; slot
= next_slot(ring
, slot
)) {
1590 dma_rx(ring
, &slot
);
1591 update_max_used_slots(ring
, ++used_slots
);
1593 ops
->set_current_rxslot(ring
, slot
);
1594 ring
->current_slot
= slot
;
1597 static void b43_dma_tx_suspend_ring(struct b43_dmaring
*ring
)
1599 unsigned long flags
;
1601 spin_lock_irqsave(&ring
->lock
, flags
);
1602 B43_WARN_ON(!ring
->tx
);
1603 ring
->ops
->tx_suspend(ring
);
1604 spin_unlock_irqrestore(&ring
->lock
, flags
);
1607 static void b43_dma_tx_resume_ring(struct b43_dmaring
*ring
)
1609 unsigned long flags
;
1611 spin_lock_irqsave(&ring
->lock
, flags
);
1612 B43_WARN_ON(!ring
->tx
);
1613 ring
->ops
->tx_resume(ring
);
1614 spin_unlock_irqrestore(&ring
->lock
, flags
);
1617 void b43_dma_tx_suspend(struct b43_wldev
*dev
)
1619 b43_power_saving_ctl_bits(dev
, B43_PS_AWAKE
);
1620 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring0
);
1621 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring1
);
1622 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring2
);
1623 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring3
);
1624 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring4
);
1625 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring5
);
1628 void b43_dma_tx_resume(struct b43_wldev
*dev
)
1630 b43_dma_tx_resume_ring(dev
->dma
.tx_ring5
);
1631 b43_dma_tx_resume_ring(dev
->dma
.tx_ring4
);
1632 b43_dma_tx_resume_ring(dev
->dma
.tx_ring3
);
1633 b43_dma_tx_resume_ring(dev
->dma
.tx_ring2
);
1634 b43_dma_tx_resume_ring(dev
->dma
.tx_ring1
);
1635 b43_dma_tx_resume_ring(dev
->dma
.tx_ring0
);
1636 b43_power_saving_ctl_bits(dev
, 0);