3 Broadcom BCM43xx wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
31 #include "bcm43xx_dma.h"
32 #include "bcm43xx_main.h"
33 #include "bcm43xx_debugfs.h"
34 #include "bcm43xx_power.h"
35 #include "bcm43xx_xmit.h"
37 #include <linux/dmapool.h>
38 #include <linux/pci.h>
39 #include <linux/delay.h>
40 #include <linux/skbuff.h>
41 #include <asm/semaphore.h>
44 static inline int free_slots(struct bcm43xx_dmaring
*ring
)
46 return (ring
->nr_slots
- ring
->used_slots
);
49 static inline int next_slot(struct bcm43xx_dmaring
*ring
, int slot
)
51 assert(slot
>= -1 && slot
<= ring
->nr_slots
- 1);
52 if (slot
== ring
->nr_slots
- 1)
57 static inline int prev_slot(struct bcm43xx_dmaring
*ring
, int slot
)
59 assert(slot
>= 0 && slot
<= ring
->nr_slots
- 1);
61 return ring
->nr_slots
- 1;
65 /* Request a slot for usage. */
67 int request_slot(struct bcm43xx_dmaring
*ring
)
72 assert(!ring
->suspended
);
73 assert(free_slots(ring
) != 0);
75 slot
= next_slot(ring
, ring
->current_slot
);
76 ring
->current_slot
= slot
;
79 /* Check the number of available slots and suspend TX,
80 * if we are running low on free slots.
82 if (unlikely(free_slots(ring
) < ring
->suspend_mark
)) {
83 netif_stop_queue(ring
->bcm
->net_dev
);
86 #ifdef CONFIG_BCM43XX_DEBUG
87 if (ring
->used_slots
> ring
->max_used_slots
)
88 ring
->max_used_slots
= ring
->used_slots
;
89 #endif /* CONFIG_BCM43XX_DEBUG*/
94 /* Return a slot to the free slots. */
96 void return_slot(struct bcm43xx_dmaring
*ring
, int slot
)
102 /* Check if TX is suspended and check if we have
103 * enough free slots to resume it again.
105 if (unlikely(ring
->suspended
)) {
106 if (free_slots(ring
) >= ring
->resume_mark
) {
108 netif_wake_queue(ring
->bcm
->net_dev
);
114 dma_addr_t
map_descbuffer(struct bcm43xx_dmaring
*ring
,
122 dmaaddr
= dma_map_single(&ring
->bcm
->pci_dev
->dev
,
126 dmaaddr
= dma_map_single(&ring
->bcm
->pci_dev
->dev
,
135 void unmap_descbuffer(struct bcm43xx_dmaring
*ring
,
141 dma_unmap_single(&ring
->bcm
->pci_dev
->dev
,
145 dma_unmap_single(&ring
->bcm
->pci_dev
->dev
,
152 void sync_descbuffer_for_cpu(struct bcm43xx_dmaring
*ring
,
158 dma_sync_single_for_cpu(&ring
->bcm
->pci_dev
->dev
,
159 addr
, len
, DMA_FROM_DEVICE
);
163 void sync_descbuffer_for_device(struct bcm43xx_dmaring
*ring
,
169 dma_sync_single_for_device(&ring
->bcm
->pci_dev
->dev
,
170 addr
, len
, DMA_FROM_DEVICE
);
173 /* Unmap and free a descriptor buffer. */
175 void free_descriptor_buffer(struct bcm43xx_dmaring
*ring
,
176 struct bcm43xx_dmadesc
*desc
,
177 struct bcm43xx_dmadesc_meta
*meta
,
182 dev_kfree_skb_irq(meta
->skb
);
184 dev_kfree_skb(meta
->skb
);
188 static int alloc_ringmemory(struct bcm43xx_dmaring
*ring
)
190 struct device
*dev
= &(ring
->bcm
->pci_dev
->dev
);
192 ring
->vbase
= dma_alloc_coherent(dev
, BCM43xx_DMA_RINGMEMSIZE
,
193 &(ring
->dmabase
), GFP_KERNEL
);
195 printk(KERN_ERR PFX
"DMA ringmemory allocation failed\n");
198 if (ring
->dmabase
+ BCM43xx_DMA_RINGMEMSIZE
> BCM43xx_DMA_BUSADDRMAX
) {
199 printk(KERN_ERR PFX
">>>FATAL ERROR<<< DMA RINGMEMORY >1G "
200 "(0x%08x, len: %lu)\n",
201 ring
->dmabase
, BCM43xx_DMA_RINGMEMSIZE
);
202 dma_free_coherent(dev
, BCM43xx_DMA_RINGMEMSIZE
,
203 ring
->vbase
, ring
->dmabase
);
206 assert(!(ring
->dmabase
& 0x000003FF));
207 memset(ring
->vbase
, 0, BCM43xx_DMA_RINGMEMSIZE
);
212 static void free_ringmemory(struct bcm43xx_dmaring
*ring
)
214 struct device
*dev
= &(ring
->bcm
->pci_dev
->dev
);
216 dma_free_coherent(dev
, BCM43xx_DMA_RINGMEMSIZE
,
217 ring
->vbase
, ring
->dmabase
);
220 /* Reset the RX DMA channel */
221 int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private
*bcm
,
228 mmio_base
+ BCM43xx_DMA_RX_CONTROL
,
230 for (i
= 0; i
< 1000; i
++) {
231 value
= bcm43xx_read32(bcm
,
232 mmio_base
+ BCM43xx_DMA_RX_STATUS
);
233 value
&= BCM43xx_DMA_RXSTAT_STAT_MASK
;
234 if (value
== BCM43xx_DMA_RXSTAT_STAT_DISABLED
) {
241 printk(KERN_ERR PFX
"Error: Wait on DMA RX status timed out.\n");
248 /* Reset the RX DMA channel */
249 int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private
*bcm
,
255 for (i
= 0; i
< 1000; i
++) {
256 value
= bcm43xx_read32(bcm
,
257 mmio_base
+ BCM43xx_DMA_TX_STATUS
);
258 value
&= BCM43xx_DMA_TXSTAT_STAT_MASK
;
259 if (value
== BCM43xx_DMA_TXSTAT_STAT_DISABLED
||
260 value
== BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT
||
261 value
== BCM43xx_DMA_TXSTAT_STAT_STOPPED
)
266 mmio_base
+ BCM43xx_DMA_TX_CONTROL
,
268 for (i
= 0; i
< 1000; i
++) {
269 value
= bcm43xx_read32(bcm
,
270 mmio_base
+ BCM43xx_DMA_TX_STATUS
);
271 value
&= BCM43xx_DMA_TXSTAT_STAT_MASK
;
272 if (value
== BCM43xx_DMA_TXSTAT_STAT_DISABLED
) {
279 printk(KERN_ERR PFX
"Error: Wait on DMA TX status timed out.\n");
282 /* ensure the reset is completed. */
288 static int setup_rx_descbuffer(struct bcm43xx_dmaring
*ring
,
289 struct bcm43xx_dmadesc
*desc
,
290 struct bcm43xx_dmadesc_meta
*meta
,
293 struct bcm43xx_rxhdr
*rxhdr
;
297 const int slot
= (int)(desc
- ring
->vbase
);
300 assert(slot
>= 0 && slot
< ring
->nr_slots
);
303 skb
= __dev_alloc_skb(ring
->rx_buffersize
, gfp_flags
);
306 dmaaddr
= map_descbuffer(ring
, skb
->data
, ring
->rx_buffersize
, 0);
307 if (unlikely(dmaaddr
+ ring
->rx_buffersize
> BCM43xx_DMA_BUSADDRMAX
)) {
308 unmap_descbuffer(ring
, dmaaddr
, ring
->rx_buffersize
, 0);
309 dev_kfree_skb_any(skb
);
310 printk(KERN_ERR PFX
">>>FATAL ERROR<<< DMA RX SKB >1G "
311 "(0x%08x, len: %u)\n",
312 dmaaddr
, ring
->rx_buffersize
);
316 meta
->dmaaddr
= dmaaddr
;
317 skb
->dev
= ring
->bcm
->net_dev
;
318 desc_addr
= (u32
)(dmaaddr
+ ring
->memoffset
);
319 desc_ctl
= (BCM43xx_DMADTOR_BYTECNT_MASK
&
320 (u32
)(ring
->rx_buffersize
- ring
->frameoffset
));
321 if (slot
== ring
->nr_slots
- 1)
322 desc_ctl
|= BCM43xx_DMADTOR_DTABLEEND
;
323 set_desc_addr(desc
, desc_addr
);
324 set_desc_ctl(desc
, desc_ctl
);
326 rxhdr
= (struct bcm43xx_rxhdr
*)(skb
->data
);
327 rxhdr
->frame_length
= 0;
333 /* Allocate the initial descbuffers.
334 * This is used for an RX ring only.
336 static int alloc_initial_descbuffers(struct bcm43xx_dmaring
*ring
)
338 int i
, err
= -ENOMEM
;
339 struct bcm43xx_dmadesc
*desc
;
340 struct bcm43xx_dmadesc_meta
*meta
;
342 for (i
= 0; i
< ring
->nr_slots
; i
++) {
343 desc
= ring
->vbase
+ i
;
344 meta
= ring
->meta
+ i
;
346 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_KERNEL
);
350 ring
->used_slots
= ring
->nr_slots
;
356 for (i
--; i
>= 0; i
--) {
357 desc
= ring
->vbase
+ i
;
358 meta
= ring
->meta
+ i
;
360 unmap_descbuffer(ring
, meta
->dmaaddr
, ring
->rx_buffersize
, 0);
361 dev_kfree_skb(meta
->skb
);
366 /* Do initial setup of the DMA controller.
367 * Reset the controller, write the ring busaddress
368 * and switch the "enable" bit on.
370 static int dmacontroller_setup(struct bcm43xx_dmaring
*ring
)
376 /* Set Transmit Control register to "transmit enable" */
377 bcm43xx_dma_write(ring
, BCM43xx_DMA_TX_CONTROL
,
378 BCM43xx_DMA_TXCTRL_ENABLE
);
379 /* Set Transmit Descriptor ring address. */
380 bcm43xx_dma_write(ring
, BCM43xx_DMA_TX_DESC_RING
,
381 ring
->dmabase
+ ring
->memoffset
);
383 err
= alloc_initial_descbuffers(ring
);
386 /* Set Receive Control "receive enable" and frame offset */
387 value
= (ring
->frameoffset
<< BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT
);
388 value
|= BCM43xx_DMA_RXCTRL_ENABLE
;
389 bcm43xx_dma_write(ring
, BCM43xx_DMA_RX_CONTROL
, value
);
390 /* Set Receive Descriptor ring address. */
391 bcm43xx_dma_write(ring
, BCM43xx_DMA_RX_DESC_RING
,
392 ring
->dmabase
+ ring
->memoffset
);
393 /* Init the descriptor pointer. */
394 bcm43xx_dma_write(ring
, BCM43xx_DMA_RX_DESC_INDEX
, 200);
401 /* Shutdown the DMA controller. */
402 static void dmacontroller_cleanup(struct bcm43xx_dmaring
*ring
)
405 bcm43xx_dmacontroller_tx_reset(ring
->bcm
, ring
->mmio_base
);
406 /* Zero out Transmit Descriptor ring address. */
407 bcm43xx_dma_write(ring
, BCM43xx_DMA_TX_DESC_RING
, 0);
409 bcm43xx_dmacontroller_rx_reset(ring
->bcm
, ring
->mmio_base
);
410 /* Zero out Receive Descriptor ring address. */
411 bcm43xx_dma_write(ring
, BCM43xx_DMA_RX_DESC_RING
, 0);
415 static void free_all_descbuffers(struct bcm43xx_dmaring
*ring
)
417 struct bcm43xx_dmadesc
*desc
;
418 struct bcm43xx_dmadesc_meta
*meta
;
421 if (!ring
->used_slots
)
423 for (i
= 0; i
< ring
->nr_slots
; i
++) {
424 desc
= ring
->vbase
+ i
;
425 meta
= ring
->meta
+ i
;
432 unmap_descbuffer(ring
, meta
->dmaaddr
,
435 unmap_descbuffer(ring
, meta
->dmaaddr
,
436 ring
->rx_buffersize
, 0);
438 free_descriptor_buffer(ring
, desc
, meta
, 0);
442 /* Main initialization function. */
444 struct bcm43xx_dmaring
* bcm43xx_setup_dmaring(struct bcm43xx_private
*bcm
,
445 u16 dma_controller_base
,
446 int nr_descriptor_slots
,
449 struct bcm43xx_dmaring
*ring
;
452 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
456 ring
->meta
= kzalloc(sizeof(*ring
->meta
) * nr_descriptor_slots
,
461 ring
->memoffset
= BCM43xx_DMA_DMABUSADDROFFSET
;
462 #ifdef CONFIG_BCM947XX
463 if (bcm
->pci_dev
->bus
->number
== 0)
468 ring
->nr_slots
= nr_descriptor_slots
;
469 ring
->suspend_mark
= ring
->nr_slots
* BCM43xx_TXSUSPEND_PERCENT
/ 100;
470 ring
->resume_mark
= ring
->nr_slots
* BCM43xx_TXRESUME_PERCENT
/ 100;
471 assert(ring
->suspend_mark
< ring
->resume_mark
);
472 ring
->mmio_base
= dma_controller_base
;
475 ring
->current_slot
= -1;
477 switch (dma_controller_base
) {
478 case BCM43xx_MMIO_DMA1_BASE
:
479 ring
->rx_buffersize
= BCM43xx_DMA1_RXBUFFERSIZE
;
480 ring
->frameoffset
= BCM43xx_DMA1_RX_FRAMEOFFSET
;
482 case BCM43xx_MMIO_DMA4_BASE
:
483 ring
->rx_buffersize
= BCM43xx_DMA4_RXBUFFERSIZE
;
484 ring
->frameoffset
= BCM43xx_DMA4_RX_FRAMEOFFSET
;
491 err
= alloc_ringmemory(ring
);
494 err
= dmacontroller_setup(ring
);
496 goto err_free_ringmemory
;
502 free_ringmemory(ring
);
511 /* Main cleanup function. */
512 static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring
*ring
)
517 dprintk(KERN_INFO PFX
"DMA 0x%04x (%s) max used slots: %d/%d\n",
519 (ring
->tx
) ? "TX" : "RX",
520 ring
->max_used_slots
, ring
->nr_slots
);
521 /* Device IRQs are disabled prior entering this function,
522 * so no need to take care of concurrency with rx handler stuff.
524 dmacontroller_cleanup(ring
);
525 free_all_descbuffers(ring
);
526 free_ringmemory(ring
);
532 void bcm43xx_dma_free(struct bcm43xx_private
*bcm
)
534 struct bcm43xx_dma
*dma
= bcm43xx_current_dma(bcm
);
536 bcm43xx_destroy_dmaring(dma
->rx_ring1
);
537 dma
->rx_ring1
= NULL
;
538 bcm43xx_destroy_dmaring(dma
->rx_ring0
);
539 dma
->rx_ring0
= NULL
;
540 bcm43xx_destroy_dmaring(dma
->tx_ring3
);
541 dma
->tx_ring3
= NULL
;
542 bcm43xx_destroy_dmaring(dma
->tx_ring2
);
543 dma
->tx_ring2
= NULL
;
544 bcm43xx_destroy_dmaring(dma
->tx_ring1
);
545 dma
->tx_ring1
= NULL
;
546 bcm43xx_destroy_dmaring(dma
->tx_ring0
);
547 dma
->tx_ring0
= NULL
;
550 int bcm43xx_dma_init(struct bcm43xx_private
*bcm
)
552 struct bcm43xx_dma
*dma
= bcm43xx_current_dma(bcm
);
553 struct bcm43xx_dmaring
*ring
;
556 /* setup TX DMA channels. */
557 ring
= bcm43xx_setup_dmaring(bcm
, BCM43xx_MMIO_DMA1_BASE
,
558 BCM43xx_TXRING_SLOTS
, 1);
561 dma
->tx_ring0
= ring
;
563 ring
= bcm43xx_setup_dmaring(bcm
, BCM43xx_MMIO_DMA2_BASE
,
564 BCM43xx_TXRING_SLOTS
, 1);
566 goto err_destroy_tx0
;
567 dma
->tx_ring1
= ring
;
569 ring
= bcm43xx_setup_dmaring(bcm
, BCM43xx_MMIO_DMA3_BASE
,
570 BCM43xx_TXRING_SLOTS
, 1);
572 goto err_destroy_tx1
;
573 dma
->tx_ring2
= ring
;
575 ring
= bcm43xx_setup_dmaring(bcm
, BCM43xx_MMIO_DMA4_BASE
,
576 BCM43xx_TXRING_SLOTS
, 1);
578 goto err_destroy_tx2
;
579 dma
->tx_ring3
= ring
;
581 /* setup RX DMA channels. */
582 ring
= bcm43xx_setup_dmaring(bcm
, BCM43xx_MMIO_DMA1_BASE
,
583 BCM43xx_RXRING_SLOTS
, 0);
585 goto err_destroy_tx3
;
586 dma
->rx_ring0
= ring
;
588 if (bcm
->current_core
->rev
< 5) {
589 ring
= bcm43xx_setup_dmaring(bcm
, BCM43xx_MMIO_DMA4_BASE
,
590 BCM43xx_RXRING_SLOTS
, 0);
592 goto err_destroy_rx0
;
593 dma
->rx_ring1
= ring
;
596 dprintk(KERN_INFO PFX
"DMA initialized\n");
602 bcm43xx_destroy_dmaring(dma
->rx_ring0
);
603 dma
->rx_ring0
= NULL
;
605 bcm43xx_destroy_dmaring(dma
->tx_ring3
);
606 dma
->tx_ring3
= NULL
;
608 bcm43xx_destroy_dmaring(dma
->tx_ring2
);
609 dma
->tx_ring2
= NULL
;
611 bcm43xx_destroy_dmaring(dma
->tx_ring1
);
612 dma
->tx_ring1
= NULL
;
614 bcm43xx_destroy_dmaring(dma
->tx_ring0
);
615 dma
->tx_ring0
= NULL
;
619 /* Generate a cookie for the TX header. */
620 static u16
generate_cookie(struct bcm43xx_dmaring
*ring
,
625 /* Use the upper 4 bits of the cookie as
626 * DMA controller ID and store the slot number
627 * in the lower 12 bits
629 switch (ring
->mmio_base
) {
632 case BCM43xx_MMIO_DMA1_BASE
:
634 case BCM43xx_MMIO_DMA2_BASE
:
637 case BCM43xx_MMIO_DMA3_BASE
:
640 case BCM43xx_MMIO_DMA4_BASE
:
644 assert(((u16
)slot
& 0xF000) == 0x0000);
650 /* Inspect a cookie and find out to which controller/slot it belongs. */
652 struct bcm43xx_dmaring
* parse_cookie(struct bcm43xx_private
*bcm
,
653 u16 cookie
, int *slot
)
655 struct bcm43xx_dma
*dma
= bcm43xx_current_dma(bcm
);
656 struct bcm43xx_dmaring
*ring
= NULL
;
658 switch (cookie
& 0xF000) {
660 ring
= dma
->tx_ring0
;
663 ring
= dma
->tx_ring1
;
666 ring
= dma
->tx_ring2
;
669 ring
= dma
->tx_ring3
;
674 *slot
= (cookie
& 0x0FFF);
675 assert(*slot
>= 0 && *slot
< ring
->nr_slots
);
680 static void dmacontroller_poke_tx(struct bcm43xx_dmaring
*ring
,
683 /* Everything is ready to start. Buffers are DMA mapped and
684 * associated with slots.
685 * "slot" is the last slot of the new frame we want to transmit.
686 * Close your seat belts now, please.
689 slot
= next_slot(ring
, slot
);
690 bcm43xx_dma_write(ring
, BCM43xx_DMA_TX_DESC_INDEX
,
691 (u32
)(slot
* sizeof(struct bcm43xx_dmadesc
)));
694 static int dma_tx_fragment(struct bcm43xx_dmaring
*ring
,
699 struct bcm43xx_dmadesc
*desc
;
700 struct bcm43xx_dmadesc_meta
*meta
;
704 assert(skb_shinfo(skb
)->nr_frags
== 0);
706 slot
= request_slot(ring
);
707 desc
= ring
->vbase
+ slot
;
708 meta
= ring
->meta
+ slot
;
710 /* Add a device specific TX header. */
711 assert(skb_headroom(skb
) >= sizeof(struct bcm43xx_txhdr
));
712 /* Reserve enough headroom for the device tx header. */
713 __skb_push(skb
, sizeof(struct bcm43xx_txhdr
));
714 /* Now calculate and add the tx header.
715 * The tx header includes the PLCP header.
717 bcm43xx_generate_txhdr(ring
->bcm
,
718 (struct bcm43xx_txhdr
*)skb
->data
,
719 skb
->data
+ sizeof(struct bcm43xx_txhdr
),
720 skb
->len
- sizeof(struct bcm43xx_txhdr
),
722 generate_cookie(ring
, slot
));
725 meta
->dmaaddr
= map_descbuffer(ring
, skb
->data
, skb
->len
, 1);
726 if (unlikely(meta
->dmaaddr
+ skb
->len
> BCM43xx_DMA_BUSADDRMAX
)) {
727 return_slot(ring
, slot
);
728 printk(KERN_ERR PFX
">>>FATAL ERROR<<< DMA TX SKB >1G "
729 "(0x%08x, len: %u)\n",
730 meta
->dmaaddr
, skb
->len
);
734 desc_addr
= (u32
)(meta
->dmaaddr
+ ring
->memoffset
);
735 desc_ctl
= BCM43xx_DMADTOR_FRAMESTART
| BCM43xx_DMADTOR_FRAMEEND
;
736 desc_ctl
|= BCM43xx_DMADTOR_COMPIRQ
;
737 desc_ctl
|= (BCM43xx_DMADTOR_BYTECNT_MASK
&
738 (u32
)(meta
->skb
->len
- ring
->frameoffset
));
739 if (slot
== ring
->nr_slots
- 1)
740 desc_ctl
|= BCM43xx_DMADTOR_DTABLEEND
;
742 set_desc_ctl(desc
, desc_ctl
);
743 set_desc_addr(desc
, desc_addr
);
744 /* Now transfer the whole frame. */
745 dmacontroller_poke_tx(ring
, slot
);
750 int bcm43xx_dma_tx(struct bcm43xx_private
*bcm
,
751 struct ieee80211_txb
*txb
)
753 /* We just received a packet from the kernel network subsystem.
754 * Add headers and DMA map the memory. Poke
755 * the device to send the stuff.
756 * Note that this is called from atomic context.
758 struct bcm43xx_dmaring
*ring
= bcm43xx_current_dma(bcm
)->tx_ring1
;
763 if (unlikely(free_slots(ring
) < txb
->nr_frags
)) {
764 /* The queue should be stopped,
765 * if we are low on free slots.
766 * If this ever triggers, we have to lower the suspend_mark.
768 dprintkl(KERN_ERR PFX
"Out of DMA descriptor slots!\n");
772 for (i
= 0; i
< txb
->nr_frags
; i
++) {
773 skb
= txb
->fragments
[i
];
774 /* Take skb from ieee80211_txb_free */
775 txb
->fragments
[i
] = NULL
;
776 dma_tx_fragment(ring
, skb
, i
);
777 //TODO: handle failure of dma_tx_fragment
779 ieee80211_txb_free(txb
);
784 void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private
*bcm
,
785 struct bcm43xx_xmitstatus
*status
)
787 struct bcm43xx_dma
*dma
= bcm43xx_current_dma(bcm
);
788 struct bcm43xx_dmaring
*ring
;
789 struct bcm43xx_dmadesc
*desc
;
790 struct bcm43xx_dmadesc_meta
*meta
;
791 int is_last_fragment
;
794 ring
= parse_cookie(bcm
, status
->cookie
, &slot
);
797 assert(get_desc_ctl(ring
->vbase
+ slot
) & BCM43xx_DMADTOR_FRAMESTART
);
799 assert(slot
>= 0 && slot
< ring
->nr_slots
);
800 desc
= ring
->vbase
+ slot
;
801 meta
= ring
->meta
+ slot
;
803 is_last_fragment
= !!(get_desc_ctl(desc
) & BCM43xx_DMADTOR_FRAMEEND
);
804 unmap_descbuffer(ring
, meta
->dmaaddr
, meta
->skb
->len
, 1);
805 free_descriptor_buffer(ring
, desc
, meta
, 1);
806 /* Everything belonging to the slot is unmapped
807 * and freed, so we can return it.
809 return_slot(ring
, slot
);
811 if (is_last_fragment
)
813 slot
= next_slot(ring
, slot
);
815 bcm
->stats
.last_tx
= jiffies
;
818 static void dma_rx(struct bcm43xx_dmaring
*ring
,
821 struct bcm43xx_dmadesc
*desc
;
822 struct bcm43xx_dmadesc_meta
*meta
;
823 struct bcm43xx_rxhdr
*rxhdr
;
829 desc
= ring
->vbase
+ *slot
;
830 meta
= ring
->meta
+ *slot
;
832 sync_descbuffer_for_cpu(ring
, meta
->dmaaddr
, ring
->rx_buffersize
);
835 if (ring
->mmio_base
== BCM43xx_MMIO_DMA4_BASE
) {
836 /* We received an xmit status. */
837 struct bcm43xx_hwxmitstatus
*hw
= (struct bcm43xx_hwxmitstatus
*)skb
->data
;
838 struct bcm43xx_xmitstatus stat
;
840 stat
.cookie
= le16_to_cpu(hw
->cookie
);
841 stat
.flags
= hw
->flags
;
842 stat
.cnt1
= hw
->cnt1
;
843 stat
.cnt2
= hw
->cnt2
;
844 stat
.seq
= le16_to_cpu(hw
->seq
);
845 stat
.unknown
= le16_to_cpu(hw
->unknown
);
847 bcm43xx_debugfs_log_txstat(ring
->bcm
, &stat
);
848 bcm43xx_dma_handle_xmitstatus(ring
->bcm
, &stat
);
849 /* recycle the descriptor buffer. */
850 sync_descbuffer_for_device(ring
, meta
->dmaaddr
, ring
->rx_buffersize
);
854 rxhdr
= (struct bcm43xx_rxhdr
*)skb
->data
;
855 len
= le16_to_cpu(rxhdr
->frame_length
);
862 len
= le16_to_cpu(rxhdr
->frame_length
);
863 } while (len
== 0 && i
++ < 5);
864 if (unlikely(len
== 0)) {
865 /* recycle the descriptor buffer. */
866 sync_descbuffer_for_device(ring
, meta
->dmaaddr
,
867 ring
->rx_buffersize
);
871 if (unlikely(len
> ring
->rx_buffersize
)) {
872 /* The data did not fit into one descriptor buffer
873 * and is split over multiple buffers.
874 * This should never happen, as we try to allocate buffers
875 * big enough. So simply ignore this packet.
881 desc
= ring
->vbase
+ *slot
;
882 meta
= ring
->meta
+ *slot
;
883 /* recycle the descriptor buffer. */
884 sync_descbuffer_for_device(ring
, meta
->dmaaddr
,
885 ring
->rx_buffersize
);
886 *slot
= next_slot(ring
, *slot
);
888 tmp
-= ring
->rx_buffersize
;
892 printkl(KERN_ERR PFX
"DMA RX buffer too small "
893 "(len: %u, buffer: %u, nr-dropped: %d)\n",
894 len
, ring
->rx_buffersize
, cnt
);
897 len
-= IEEE80211_FCS_LEN
;
899 dmaaddr
= meta
->dmaaddr
;
900 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_ATOMIC
);
902 dprintkl(KERN_ERR PFX
"DMA RX: setup_rx_descbuffer() failed\n");
903 sync_descbuffer_for_device(ring
, dmaaddr
,
904 ring
->rx_buffersize
);
908 unmap_descbuffer(ring
, dmaaddr
, ring
->rx_buffersize
, 0);
909 skb_put(skb
, len
+ ring
->frameoffset
);
910 skb_pull(skb
, ring
->frameoffset
);
912 err
= bcm43xx_rx(ring
->bcm
, skb
, rxhdr
);
914 dev_kfree_skb_irq(skb
);
922 void bcm43xx_dma_rx(struct bcm43xx_dmaring
*ring
)
926 int slot
, current_slot
;
927 #ifdef CONFIG_BCM43XX_DEBUG
932 status
= bcm43xx_dma_read(ring
, BCM43xx_DMA_RX_STATUS
);
933 descptr
= (status
& BCM43xx_DMA_RXSTAT_DPTR_MASK
);
934 current_slot
= descptr
/ sizeof(struct bcm43xx_dmadesc
);
935 assert(current_slot
>= 0 && current_slot
< ring
->nr_slots
);
937 slot
= ring
->current_slot
;
938 for ( ; slot
!= current_slot
; slot
= next_slot(ring
, slot
)) {
940 #ifdef CONFIG_BCM43XX_DEBUG
941 if (++used_slots
> ring
->max_used_slots
)
942 ring
->max_used_slots
= used_slots
;
945 bcm43xx_dma_write(ring
, BCM43xx_DMA_RX_DESC_INDEX
,
946 (u32
)(slot
* sizeof(struct bcm43xx_dmadesc
)));
947 ring
->current_slot
= slot
;
950 void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring
*ring
)
953 bcm43xx_power_saving_ctl_bits(ring
->bcm
, -1, 1);
954 bcm43xx_dma_write(ring
, BCM43xx_DMA_TX_CONTROL
,
955 bcm43xx_dma_read(ring
, BCM43xx_DMA_TX_CONTROL
)
956 | BCM43xx_DMA_TXCTRL_SUSPEND
);
959 void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring
*ring
)
962 bcm43xx_dma_write(ring
, BCM43xx_DMA_TX_CONTROL
,
963 bcm43xx_dma_read(ring
, BCM43xx_DMA_TX_CONTROL
)
964 & ~BCM43xx_DMA_TXCTRL_SUSPEND
);
965 bcm43xx_power_saving_ctl_bits(ring
->bcm
, -1, -1);
968 /* vim: set ts=8 sw=8 sts=8: */