3 Broadcom BCM43xx wireless driver
7 Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
22 Boston, MA 02110-1301, USA.
27 #include "bcm43xx_pio.h"
28 #include "bcm43xx_main.h"
30 #include <linux/delay.h>
34 u16
bcm43xx_pio_read(struct bcm43xx_pioqueue
*queue
,
37 return bcm43xx_read16(queue
->bcm
, queue
->mmio_base
+ offset
);
41 void bcm43xx_pio_write(struct bcm43xx_pioqueue
*queue
,
42 u16 offset
, u16 value
)
44 bcm43xx_write16(queue
->bcm
, queue
->mmio_base
+ offset
, value
);
48 void tx_start(struct bcm43xx_pioqueue
*queue
)
50 bcm43xx_pio_write(queue
, BCM43xx_PIO_TXCTL
, BCM43xx_PIO_TXCTL_INIT
);
54 void tx_octet(struct bcm43xx_pioqueue
*queue
,
57 if (queue
->bcm
->current_core
->rev
< 3) {
58 bcm43xx_pio_write(queue
, BCM43xx_PIO_TXDATA
, octet
);
59 bcm43xx_pio_write(queue
, BCM43xx_PIO_TXCTL
, BCM43xx_PIO_TXCTL_WRITEHI
);
61 bcm43xx_pio_write(queue
, BCM43xx_PIO_TXCTL
, BCM43xx_PIO_TXCTL_WRITEHI
);
62 bcm43xx_pio_write(queue
, BCM43xx_PIO_TXDATA
, octet
);
67 void tx_data(struct bcm43xx_pioqueue
*queue
,
74 if (queue
->bcm
->current_core
->rev
< 3) {
75 data
= be16_to_cpu( *((u16
*)packet
) );
76 bcm43xx_pio_write(queue
, BCM43xx_PIO_TXDATA
, data
);
79 bcm43xx_pio_write(queue
, BCM43xx_PIO_TXCTL
,
80 BCM43xx_PIO_TXCTL_WRITELO
| BCM43xx_PIO_TXCTL_WRITEHI
);
81 for ( ; i
< octets
- 1; i
+= 2) {
82 data
= be16_to_cpu( *((u16
*)(packet
+ i
)) );
83 bcm43xx_pio_write(queue
, BCM43xx_PIO_TXDATA
, data
);
86 tx_octet(queue
, packet
[octets
- 1]);
90 void tx_complete(struct bcm43xx_pioqueue
*queue
,
95 if (queue
->bcm
->current_core
->rev
< 3) {
96 data
= be16_to_cpu( *((u16
*)(skb
->data
+ skb
->len
- 2)) );
97 bcm43xx_pio_write(queue
, BCM43xx_PIO_TXDATA
, data
);
98 bcm43xx_pio_write(queue
, BCM43xx_PIO_TXCTL
,
99 BCM43xx_PIO_TXCTL_WRITEHI
| BCM43xx_PIO_TXCTL_COMPLETE
);
101 bcm43xx_pio_write(queue
, BCM43xx_PIO_TXCTL
, BCM43xx_PIO_TXCTL_COMPLETE
);
106 u16
generate_cookie(struct bcm43xx_pioqueue
*queue
,
111 /* We use the upper 4 bits for the PIO
112 * controller ID and the lower 12 bits
113 * for the packet index (in the cache).
115 switch (queue
->mmio_base
) {
118 case BCM43xx_MMIO_PIO1_BASE
:
120 case BCM43xx_MMIO_PIO2_BASE
:
123 case BCM43xx_MMIO_PIO3_BASE
:
126 case BCM43xx_MMIO_PIO4_BASE
:
130 assert(((u16
)packetindex
& 0xF000) == 0x0000);
131 cookie
|= (u16
)packetindex
;
137 struct bcm43xx_pioqueue
* parse_cookie(struct bcm43xx_private
*bcm
,
139 struct bcm43xx_pio_txpacket
**packet
)
141 struct bcm43xx_pioqueue
*queue
= NULL
;
144 switch (cookie
& 0xF000) {
146 queue
= bcm
->current_core
->pio
->queue0
;
149 queue
= bcm
->current_core
->pio
->queue1
;
152 queue
= bcm
->current_core
->pio
->queue2
;
155 queue
= bcm
->current_core
->pio
->queue3
;
161 packetindex
= (cookie
& 0x0FFF);
162 assert(packetindex
>= 0 && packetindex
< BCM43xx_PIO_MAXTXPACKETS
);
163 *packet
= queue
->__tx_packets_cache
+ packetindex
;
169 void pio_tx_write_fragment(struct bcm43xx_pioqueue
*queue
,
171 struct bcm43xx_pio_txpacket
*packet
)
175 assert(skb_shinfo(skb
)->nr_frags
== 0);
176 assert(skb_headroom(skb
) >= sizeof(struct bcm43xx_txhdr
));
178 __skb_push(skb
, sizeof(struct bcm43xx_txhdr
));
179 bcm43xx_generate_txhdr(queue
->bcm
,
180 (struct bcm43xx_txhdr
*)skb
->data
,
181 skb
->data
+ sizeof(struct bcm43xx_txhdr
),
182 skb
->len
- sizeof(struct bcm43xx_txhdr
),
183 (packet
->xmitted_frags
== 0),
184 generate_cookie(queue
, pio_txpacket_getindex(packet
)));
188 if (queue
->bcm
->current_core
->rev
< 3) //FIXME: && this is the last packet in the queue.
190 tx_data(queue
, (u8
*)skb
->data
, octets
);
191 tx_complete(queue
, skb
);
195 int pio_tx_packet(struct bcm43xx_pio_txpacket
*packet
)
197 struct bcm43xx_pioqueue
*queue
= packet
->queue
;
198 struct ieee80211_txb
*txb
= packet
->txb
;
203 for (i
= packet
->xmitted_frags
; i
< txb
->nr_frags
; i
++) {
204 skb
= txb
->fragments
[i
];
206 octets
= (u16
)skb
->len
+ sizeof(struct bcm43xx_txhdr
);
208 assert(queue
->tx_devq_size
>= octets
);
209 assert(queue
->tx_devq_packets
<= BCM43xx_PIO_MAXTXDEVQPACKETS
);
210 assert(queue
->tx_devq_used
<= queue
->tx_devq_size
);
211 /* Check if there is sufficient free space on the device
212 * TX queue. If not, return and let the TX-work-handler
215 if (queue
->tx_devq_packets
== BCM43xx_PIO_MAXTXDEVQPACKETS
)
217 if (queue
->tx_devq_used
+ octets
> queue
->tx_devq_size
)
219 /* Now poke the device. */
220 pio_tx_write_fragment(queue
, skb
, packet
);
222 /* Account for the packet size.
223 * (We must not overflow the device TX queue)
225 queue
->tx_devq_packets
++;
226 queue
->tx_devq_used
+= octets
;
228 assert(packet
->xmitted_frags
<= packet
->txb
->nr_frags
);
229 packet
->xmitted_frags
++;
230 packet
->xmitted_octets
+= octets
;
232 list_move_tail(&packet
->list
, &queue
->txrunning
);
237 static void free_txpacket(struct bcm43xx_pio_txpacket
*packet
)
239 struct bcm43xx_pioqueue
*queue
= packet
->queue
;
241 ieee80211_txb_free(packet
->txb
);
243 list_move(&packet
->list
, &packet
->queue
->txfree
);
245 assert(queue
->tx_devq_used
>= packet
->xmitted_octets
);
246 queue
->tx_devq_used
-= packet
->xmitted_octets
;
247 assert(queue
->tx_devq_packets
>= packet
->xmitted_frags
);
248 queue
->tx_devq_packets
-= packet
->xmitted_frags
;
251 static void txwork_handler(void *d
)
253 struct bcm43xx_pioqueue
*queue
= d
;
255 struct bcm43xx_pio_txpacket
*packet
, *tmp_packet
;
258 spin_lock_irqsave(&queue
->txlock
, flags
);
259 list_for_each_entry_safe(packet
, tmp_packet
, &queue
->txqueue
, list
) {
260 assert(packet
->xmitted_frags
< packet
->txb
->nr_frags
);
261 if (packet
->xmitted_frags
== 0) {
265 /* Check if the device queue is big
266 * enough for every fragment. If not, drop the
269 for (i
= 0; i
< packet
->txb
->nr_frags
; i
++) {
270 skb
= packet
->txb
->fragments
[i
];
271 if (unlikely(skb
->len
> queue
->tx_devq_size
)) {
272 dprintkl(KERN_ERR PFX
"PIO TX device queue too small. "
273 "Dropping packet...\n");
274 free_txpacket(packet
);
279 /* Now try to transmit the packet.
280 * This may not completely succeed.
282 err
= pio_tx_packet(packet
);
288 spin_unlock_irqrestore(&queue
->txlock
, flags
);
291 static void setup_txqueues(struct bcm43xx_pioqueue
*queue
)
293 struct bcm43xx_pio_txpacket
*packet
;
296 for (i
= 0; i
< BCM43xx_PIO_MAXTXPACKETS
; i
++) {
297 packet
= queue
->__tx_packets_cache
+ i
;
299 packet
->queue
= queue
;
300 INIT_LIST_HEAD(&packet
->list
);
302 list_add(&packet
->list
, &queue
->txfree
);
307 struct bcm43xx_pioqueue
* bcm43xx_setup_pioqueue(struct bcm43xx_private
*bcm
,
310 struct bcm43xx_pioqueue
*queue
;
314 queue
= kmalloc(sizeof(*queue
), GFP_KERNEL
);
317 memset(queue
, 0, sizeof(*queue
));
320 queue
->mmio_base
= pio_mmio_base
;
322 INIT_LIST_HEAD(&queue
->txfree
);
323 INIT_LIST_HEAD(&queue
->txqueue
);
324 INIT_LIST_HEAD(&queue
->txrunning
);
325 spin_lock_init(&queue
->txlock
);
326 INIT_WORK(&queue
->txwork
, txwork_handler
, queue
);
328 value
= bcm43xx_read32(bcm
, BCM43xx_MMIO_STATUS_BITFIELD
);
329 value
|= BCM43xx_SBF_XFER_REG_BYTESWAP
;
330 bcm43xx_write32(bcm
, BCM43xx_MMIO_STATUS_BITFIELD
, value
);
332 qsize
= bcm43xx_read16(bcm
, queue
->mmio_base
+ BCM43xx_PIO_TXQBUFSIZE
);
333 if (qsize
<= BCM43xx_PIO_TXQADJUST
) {
334 printk(KERN_ERR PFX
"PIO tx queue too small (%u)\n", qsize
);
337 qsize
-= BCM43xx_PIO_TXQADJUST
;
338 queue
->tx_devq_size
= qsize
;
340 setup_txqueues(queue
);
351 static void cancel_transfers(struct bcm43xx_pioqueue
*queue
)
353 struct bcm43xx_pio_txpacket
*packet
, *tmp_packet
;
355 netif_tx_disable(queue
->bcm
->net_dev
);
356 assert(queue
->bcm
->shutting_down
);
357 cancel_delayed_work(&queue
->txwork
);
358 flush_workqueue(queue
->bcm
->workqueue
);
360 list_for_each_entry_safe(packet
, tmp_packet
, &queue
->txrunning
, list
)
361 free_txpacket(packet
);
362 list_for_each_entry_safe(packet
, tmp_packet
, &queue
->txqueue
, list
)
363 free_txpacket(packet
);
366 static void bcm43xx_destroy_pioqueue(struct bcm43xx_pioqueue
*queue
)
371 cancel_transfers(queue
);
375 void bcm43xx_pio_free(struct bcm43xx_private
*bcm
)
377 bcm43xx_destroy_pioqueue(bcm
->current_core
->pio
->queue3
);
378 bcm
->current_core
->pio
->queue3
= NULL
;
379 bcm43xx_destroy_pioqueue(bcm
->current_core
->pio
->queue2
);
380 bcm
->current_core
->pio
->queue2
= NULL
;
381 bcm43xx_destroy_pioqueue(bcm
->current_core
->pio
->queue1
);
382 bcm
->current_core
->pio
->queue1
= NULL
;
383 bcm43xx_destroy_pioqueue(bcm
->current_core
->pio
->queue0
);
384 bcm
->current_core
->pio
->queue0
= NULL
;
387 int bcm43xx_pio_init(struct bcm43xx_private
*bcm
)
389 struct bcm43xx_pioqueue
*queue
;
392 queue
= bcm43xx_setup_pioqueue(bcm
, BCM43xx_MMIO_PIO1_BASE
);
395 bcm
->current_core
->pio
->queue0
= queue
;
397 queue
= bcm43xx_setup_pioqueue(bcm
, BCM43xx_MMIO_PIO2_BASE
);
400 bcm
->current_core
->pio
->queue1
= queue
;
402 queue
= bcm43xx_setup_pioqueue(bcm
, BCM43xx_MMIO_PIO3_BASE
);
405 bcm
->current_core
->pio
->queue2
= queue
;
407 queue
= bcm43xx_setup_pioqueue(bcm
, BCM43xx_MMIO_PIO4_BASE
);
410 bcm
->current_core
->pio
->queue3
= queue
;
412 if (bcm
->current_core
->rev
< 3)
413 bcm
->irq_savedstate
|= BCM43xx_IRQ_PIO_WORKAROUND
;
415 dprintk(KERN_INFO PFX
"PIO initialized\n");
421 bcm43xx_destroy_pioqueue(bcm
->current_core
->pio
->queue2
);
422 bcm
->current_core
->pio
->queue2
= NULL
;
424 bcm43xx_destroy_pioqueue(bcm
->current_core
->pio
->queue1
);
425 bcm
->current_core
->pio
->queue1
= NULL
;
427 bcm43xx_destroy_pioqueue(bcm
->current_core
->pio
->queue0
);
428 bcm
->current_core
->pio
->queue0
= NULL
;
433 int pio_transfer_txb(struct bcm43xx_pioqueue
*queue
,
434 struct ieee80211_txb
*txb
)
436 struct bcm43xx_pio_txpacket
*packet
;
440 spin_lock_irqsave(&queue
->txlock
, flags
);
441 assert(!queue
->tx_suspended
);
442 assert(!list_empty(&queue
->txfree
));
444 tmp
= bcm43xx_pio_read(queue
, BCM43xx_PIO_TXCTL
);
445 if (tmp
& BCM43xx_PIO_TXCTL_SUSPEND
) {
446 spin_unlock_irqrestore(&queue
->txlock
, flags
);
450 packet
= list_entry(queue
->txfree
.next
, struct bcm43xx_pio_txpacket
, list
);
453 list_move_tail(&packet
->list
, &queue
->txqueue
);
454 packet
->xmitted_octets
= 0;
455 packet
->xmitted_frags
= 0;
457 /* Suspend TX, if we are out of packets in the "free" queue. */
458 if (unlikely(list_empty(&queue
->txfree
))) {
459 netif_stop_queue(queue
->bcm
->net_dev
);
460 queue
->tx_suspended
= 1;
463 spin_unlock_irqrestore(&queue
->txlock
, flags
);
464 queue_work(queue
->bcm
->workqueue
, &queue
->txwork
);
469 int fastcall
bcm43xx_pio_transfer_txb(struct bcm43xx_private
*bcm
,
470 struct ieee80211_txb
*txb
)
472 return pio_transfer_txb(bcm
->current_core
->pio
->queue1
, txb
);
476 bcm43xx_pio_handle_xmitstatus(struct bcm43xx_private
*bcm
,
477 struct bcm43xx_xmitstatus
*status
)
479 struct bcm43xx_pioqueue
*queue
;
480 struct bcm43xx_pio_txpacket
*packet
;
483 queue
= parse_cookie(bcm
, status
->cookie
, &packet
);
485 spin_lock_irqsave(&queue
->txlock
, flags
);
486 free_txpacket(packet
);
487 if (unlikely(queue
->tx_suspended
)) {
488 queue
->tx_suspended
= 0;
489 netif_wake_queue(queue
->bcm
->net_dev
);
492 /* If there are packets on the txqueue,
493 * start the work handler again.
495 if (!list_empty(&queue
->txqueue
)) {
496 queue_work(queue
->bcm
->workqueue
,
499 spin_unlock_irqrestore(&queue
->txlock
, flags
);
502 static void pio_rx_error(struct bcm43xx_pioqueue
*queue
,
505 printk("PIO RX error: %s\n", error
);
506 bcm43xx_pio_write(queue
, BCM43xx_PIO_RXCTL
, BCM43xx_PIO_RXCTL_READY
);
510 bcm43xx_pio_rx(struct bcm43xx_pioqueue
*queue
)
512 u16 preamble
[21] = { 0 };
513 struct bcm43xx_rxhdr
*rxhdr
;
517 int preamble_readwords
;
520 tmp
= bcm43xx_pio_read(queue
, BCM43xx_PIO_RXCTL
);
521 if (!(tmp
& BCM43xx_PIO_RXCTL_DATAAVAILABLE
)) {
522 dprintkl(KERN_ERR PFX
"PIO RX: No data available\n");
525 bcm43xx_pio_write(queue
, BCM43xx_PIO_RXCTL
, BCM43xx_PIO_RXCTL_DATAAVAILABLE
);
527 for (i
= 0; i
< 10; i
++) {
528 tmp
= bcm43xx_pio_read(queue
, BCM43xx_PIO_RXCTL
);
529 if (tmp
& BCM43xx_PIO_RXCTL_READY
)
533 dprintkl(KERN_ERR PFX
"PIO RX timed out\n");
537 len
= le16_to_cpu(bcm43xx_pio_read(queue
, BCM43xx_PIO_RXDATA
));
538 if (unlikely(len
> 0x700)) {
539 pio_rx_error(queue
, "len > 0x700");
542 if (unlikely(len
== 0 && queue
->mmio_base
!= BCM43xx_MMIO_PIO4_BASE
)) {
543 pio_rx_error(queue
, "len == 0");
546 preamble
[0] = cpu_to_le16(len
);
547 if (queue
->mmio_base
== BCM43xx_MMIO_PIO4_BASE
)
548 preamble_readwords
= 14 / sizeof(u16
);
550 preamble_readwords
= 18 / sizeof(u16
);
551 for (i
= 0; i
< preamble_readwords
; i
++) {
552 tmp
= bcm43xx_pio_read(queue
, BCM43xx_PIO_RXDATA
);
553 preamble
[i
+ 1] = cpu_to_be16(tmp
);
555 rxhdr
= (struct bcm43xx_rxhdr
*)preamble
;
556 if (unlikely(rxhdr
->flags2
& BCM43xx_RXHDR_FLAGS2_INVALIDFRAME
)) {
557 pio_rx_error(queue
, "invalid frame");
558 if (queue
->mmio_base
== BCM43xx_MMIO_PIO1_BASE
) {
559 for (i
= 0; i
< 15; i
++)
560 bcm43xx_pio_read(queue
, BCM43xx_PIO_RXDATA
); /* dummy read. */
566 if (queue
->mmio_base
== BCM43xx_MMIO_PIO4_BASE
) {
567 bcm43xx_rx_transmitstatus(queue
->bcm
,
568 (const struct bcm43xx_hwxmitstatus
*)(preamble
+ 1));
572 skb
= dev_alloc_skb(len
);
573 if (unlikely(!skb
)) {
574 pio_rx_error(queue
, "out of memory");
578 for (i
= 0; i
< len
- 1; i
+= 2) {
579 tmp
= cpu_to_be16(bcm43xx_pio_read(queue
, BCM43xx_PIO_RXDATA
));
580 *((u16
*)(skb
->data
+ i
)) = tmp
;
583 tmp
= cpu_to_be16(bcm43xx_pio_read(queue
, BCM43xx_PIO_RXDATA
));
584 skb
->data
[len
- 1] = (tmp
& 0x00FF);
585 skb
->data
[0] = (tmp
& 0xFF00) >> 8;
587 err
= bcm43xx_rx(queue
->bcm
, skb
, rxhdr
);
589 dev_kfree_skb_irq(skb
);
592 /* vim: set ts=8 sw=8 sts=8: */