2 * This file is part of wl12xx
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/kernel.h>
26 #include <linux/module.h>
34 static bool wl12xx_tx_double_buffer_busy(struct wl12xx
*wl
, u32 data_out_count
)
36 int used
, data_in_count
;
38 data_in_count
= wl
->data_in_count
;
40 if (data_in_count
< data_out_count
)
41 /* data_in_count has wrapped */
42 data_in_count
+= TX_STATUS_DATA_OUT_COUNT_MASK
+ 1;
44 used
= data_in_count
- data_out_count
;
47 WARN_ON(used
> DP_TX_PACKET_RING_CHUNK_NUM
);
49 if (used
>= DP_TX_PACKET_RING_CHUNK_NUM
)
55 static int wl12xx_tx_path_status(struct wl12xx
*wl
)
57 u32 status
, addr
, data_out_count
;
60 addr
= wl
->data_path
->tx_control_addr
;
61 status
= wl12xx_mem_read32(wl
, addr
);
62 data_out_count
= status
& TX_STATUS_DATA_OUT_COUNT_MASK
;
63 busy
= wl12xx_tx_double_buffer_busy(wl
, data_out_count
);
71 static int wl12xx_tx_id(struct wl12xx
*wl
, struct sk_buff
*skb
)
75 for (i
= 0; i
< FW_TX_CMPLT_BLOCK_SIZE
; i
++)
76 if (wl
->tx_frames
[i
] == NULL
) {
77 wl
->tx_frames
[i
] = skb
;
84 static void wl12xx_tx_control(struct tx_double_buffer_desc
*tx_hdr
,
85 struct ieee80211_tx_info
*control
, u16 fc
)
87 *(u16
*)&tx_hdr
->control
= 0;
89 tx_hdr
->control
.rate_policy
= 0;
92 tx_hdr
->control
.packet_type
= 0;
94 if (control
->flags
& IEEE80211_TX_CTL_NO_ACK
)
95 tx_hdr
->control
.ack_policy
= 1;
97 tx_hdr
->control
.tx_complete
= 1;
99 if ((fc
& IEEE80211_FTYPE_DATA
) &&
100 ((fc
& IEEE80211_STYPE_QOS_DATA
) ||
101 (fc
& IEEE80211_STYPE_QOS_NULLFUNC
)))
102 tx_hdr
->control
.qos
= 1;
105 /* RSN + MIC = 8 + 8 = 16 bytes (worst case - AES). */
106 #define MAX_MSDU_SECURITY_LENGTH 16
107 #define MAX_MPDU_SECURITY_LENGTH 16
108 #define WLAN_QOS_HDR_LEN 26
109 #define MAX_MPDU_HEADER_AND_SECURITY (MAX_MPDU_SECURITY_LENGTH + \
111 #define HW_BLOCK_SIZE 252
112 static void wl12xx_tx_frag_block_num(struct tx_double_buffer_desc
*tx_hdr
)
114 u16 payload_len
, frag_threshold
, mem_blocks
;
115 u16 num_mpdus
, mem_blocks_per_frag
;
117 frag_threshold
= IEEE80211_MAX_FRAG_THRESHOLD
;
118 tx_hdr
->frag_threshold
= cpu_to_le16(frag_threshold
);
120 payload_len
= tx_hdr
->length
+ MAX_MSDU_SECURITY_LENGTH
;
122 if (payload_len
> frag_threshold
) {
123 mem_blocks_per_frag
=
124 ((frag_threshold
+ MAX_MPDU_HEADER_AND_SECURITY
) /
126 num_mpdus
= payload_len
/ frag_threshold
;
127 mem_blocks
= num_mpdus
* mem_blocks_per_frag
;
128 payload_len
-= num_mpdus
* frag_threshold
;
132 mem_blocks_per_frag
= 0;
137 mem_blocks
+= (payload_len
/ HW_BLOCK_SIZE
) + 1;
140 mem_blocks
+= min(num_mpdus
, mem_blocks_per_frag
);
142 tx_hdr
->num_mem_blocks
= mem_blocks
;
145 static int wl12xx_tx_fill_hdr(struct wl12xx
*wl
, struct sk_buff
*skb
,
146 struct ieee80211_tx_info
*control
)
148 struct tx_double_buffer_desc
*tx_hdr
;
149 struct ieee80211_rate
*rate
;
156 id
= wl12xx_tx_id(wl
, skb
);
160 fc
= *(u16
*)skb
->data
;
161 tx_hdr
= (struct tx_double_buffer_desc
*) skb_push(skb
,
164 tx_hdr
->length
= cpu_to_le16(skb
->len
- sizeof(*tx_hdr
));
165 rate
= ieee80211_get_tx_rate(wl
->hw
, control
);
166 tx_hdr
->rate
= cpu_to_le16(rate
->hw_value
);
167 tx_hdr
->expiry_time
= cpu_to_le32(1 << 16);
170 /* FIXME: how to get the correct queue id? */
171 tx_hdr
->xmit_queue
= 0;
173 wl12xx_tx_control(tx_hdr
, control
, fc
);
174 wl12xx_tx_frag_block_num(tx_hdr
);
179 /* We copy the packet to the target */
180 static int wl12xx_tx_send_packet(struct wl12xx
*wl
, struct sk_buff
*skb
,
181 struct ieee80211_tx_info
*control
)
183 struct tx_double_buffer_desc
*tx_hdr
;
190 tx_hdr
= (struct tx_double_buffer_desc
*) skb
->data
;
192 if (control
->control
.hw_key
&&
193 control
->control
.hw_key
->alg
== ALG_TKIP
) {
198 fc
= *(u16
*)(skb
->data
+ sizeof(*tx_hdr
));
199 tx_hdr
->length
+= WL12XX_TKIP_IV_SPACE
;
201 hdrlen
= ieee80211_hdrlen(fc
);
203 pos
= skb_push(skb
, WL12XX_TKIP_IV_SPACE
);
204 memmove(pos
, pos
+ WL12XX_TKIP_IV_SPACE
,
205 sizeof(*tx_hdr
) + hdrlen
);
208 /* Revisit. This is a workaround for getting non-aligned packets.
209 This happens at least with EAPOL packets from the user space.
210 Our DMA requires packets to be aligned on a 4-byte boundary.
212 if (unlikely((long)skb
->data
& 0x03)) {
213 int offset
= (4 - (long)skb
->data
) & 0x03;
214 wl12xx_debug(DEBUG_TX
, "skb offset %d", offset
);
216 /* check whether the current skb can be used */
217 if (!skb_cloned(skb
) && (skb_tailroom(skb
) >= offset
)) {
218 unsigned char *src
= skb
->data
;
220 /* align the buffer on a 4-byte boundary */
221 skb_reserve(skb
, offset
);
222 memmove(skb
->data
, src
, skb
->len
);
224 wl12xx_info("No handler, fixme!");
229 /* Our skb->data at this point includes the HW header */
230 len
= WL12XX_TX_ALIGN(skb
->len
);
232 if (wl
->data_in_count
& 0x1)
233 addr
= wl
->data_path
->tx_packet_ring_addr
+
234 wl
->data_path
->tx_packet_ring_chunk_size
;
236 addr
= wl
->data_path
->tx_packet_ring_addr
;
238 wl12xx_spi_mem_write(wl
, addr
, skb
->data
, len
);
240 wl12xx_debug(DEBUG_TX
, "tx id %u skb 0x%p payload %u rate 0x%x",
241 tx_hdr
->id
, skb
, tx_hdr
->length
, tx_hdr
->rate
);
246 static void wl12xx_tx_trigger(struct wl12xx
*wl
)
250 if (wl
->data_in_count
& 0x1) {
251 addr
= ACX_REG_INTERRUPT_TRIG_H
;
252 data
= INTR_TRIG_TX_PROC1
;
254 addr
= ACX_REG_INTERRUPT_TRIG
;
255 data
= INTR_TRIG_TX_PROC0
;
258 wl12xx_reg_write32(wl
, addr
, data
);
260 /* Bumping data in */
261 wl
->data_in_count
= (wl
->data_in_count
+ 1) &
262 TX_STATUS_DATA_OUT_COUNT_MASK
;
265 /* caller must hold wl->mutex */
266 static int wl12xx_tx_frame(struct wl12xx
*wl
, struct sk_buff
*skb
)
268 struct ieee80211_tx_info
*info
;
272 info
= IEEE80211_SKB_CB(skb
);
274 if (info
->control
.hw_key
) {
275 idx
= info
->control
.hw_key
->hw_key_idx
;
276 if (unlikely(wl
->default_key
!= idx
)) {
277 ret
= wl12xx_acx_default_key(wl
, idx
);
283 ret
= wl12xx_tx_path_status(wl
);
287 ret
= wl12xx_tx_fill_hdr(wl
, skb
, info
);
291 ret
= wl12xx_tx_send_packet(wl
, skb
, info
);
295 wl12xx_tx_trigger(wl
);
300 void wl12xx_tx_work(struct work_struct
*work
)
302 struct wl12xx
*wl
= container_of(work
, struct wl12xx
, tx_work
);
304 bool woken_up
= false;
307 mutex_lock(&wl
->mutex
);
309 if (unlikely(wl
->state
== WL12XX_STATE_OFF
))
312 while ((skb
= skb_dequeue(&wl
->tx_queue
))) {
314 wl12xx_ps_elp_wakeup(wl
);
318 ret
= wl12xx_tx_frame(wl
, skb
);
320 /* firmware buffer is full, stop queues */
321 wl12xx_debug(DEBUG_TX
, "tx_work: fw buffer full, "
323 ieee80211_stop_queues(wl
->hw
);
324 wl
->tx_queue_stopped
= true;
325 skb_queue_head(&wl
->tx_queue
, skb
);
327 } else if (ret
< 0) {
335 wl12xx_ps_elp_sleep(wl
);
337 mutex_unlock(&wl
->mutex
);
340 static const char *wl12xx_tx_parse_status(u8 status
)
342 /* 8 bit status field, one character per bit plus null */
346 memset(buf
, 0, sizeof(buf
));
348 if (status
& TX_DMA_ERROR
)
350 if (status
& TX_DISABLED
)
352 if (status
& TX_RETRY_EXCEEDED
)
354 if (status
& TX_TIMEOUT
)
356 if (status
& TX_KEY_NOT_FOUND
)
358 if (status
& TX_ENCRYPT_FAIL
)
360 if (status
& TX_UNAVAILABLE_PRIORITY
)
363 /* bit 0 is unused apparently */
368 static void wl12xx_tx_packet_cb(struct wl12xx
*wl
,
369 struct tx_result
*result
)
371 struct ieee80211_tx_info
*info
;
376 skb
= wl
->tx_frames
[result
->id
];
378 wl12xx_error("SKB for packet %d is NULL", result
->id
);
382 info
= IEEE80211_SKB_CB(skb
);
384 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
) &&
385 (result
->status
== TX_SUCCESS
))
386 info
->flags
|= IEEE80211_TX_STAT_ACK
;
388 info
->status
.rates
[0].count
= result
->ack_failures
+ 1;
389 wl
->stats
.retry_count
+= result
->ack_failures
;
392 * We have to remove our private TX header before pushing
393 * the skb back to mac80211.
395 frame
= skb_pull(skb
, sizeof(struct tx_double_buffer_desc
));
396 if (info
->control
.hw_key
&&
397 info
->control
.hw_key
->alg
== ALG_TKIP
) {
398 hdrlen
= ieee80211_get_hdrlen_from_skb(skb
);
399 memmove(frame
+ WL12XX_TKIP_IV_SPACE
, frame
, hdrlen
);
400 skb_pull(skb
, WL12XX_TKIP_IV_SPACE
);
403 wl12xx_debug(DEBUG_TX
, "tx status id %u skb 0x%p failures %u rate 0x%x"
405 result
->id
, skb
, result
->ack_failures
, result
->rate
,
406 result
->status
, wl12xx_tx_parse_status(result
->status
));
409 ieee80211_tx_status(wl
->hw
, skb
);
411 wl
->tx_frames
[result
->id
] = NULL
;
413 if (wl
->tx_queue_stopped
) {
414 wl12xx_debug(DEBUG_TX
, "cb: queue was stopped");
416 skb
= skb_dequeue(&wl
->tx_queue
);
418 /* The skb can be NULL because tx_work might have been
419 scheduled before the queue was stopped making the
423 ret
= wl12xx_tx_frame(wl
, skb
);
425 /* firmware buffer is still full */
426 wl12xx_debug(DEBUG_TX
, "cb: fw buffer "
428 skb_queue_head(&wl
->tx_queue
, skb
);
430 } else if (ret
< 0) {
436 wl12xx_debug(DEBUG_TX
, "cb: waking queues");
437 ieee80211_wake_queues(wl
->hw
);
438 wl
->tx_queue_stopped
= false;
442 /* Called upon reception of a TX complete interrupt */
443 void wl12xx_tx_complete(struct wl12xx
*wl
)
445 int i
, result_index
, num_complete
= 0;
446 struct tx_result result
[FW_TX_CMPLT_BLOCK_SIZE
], *result_ptr
;
448 if (unlikely(wl
->state
!= WL12XX_STATE_ON
))
451 /* First we read the result */
452 wl12xx_spi_mem_read(wl
, wl
->data_path
->tx_complete_addr
,
453 result
, sizeof(result
));
455 result_index
= wl
->next_tx_complete
;
457 for (i
= 0; i
< ARRAY_SIZE(result
); i
++) {
458 result_ptr
= &result
[result_index
];
460 if (result_ptr
->done_1
== 1 &&
461 result_ptr
->done_2
== 1) {
462 wl12xx_tx_packet_cb(wl
, result_ptr
);
464 result_ptr
->done_1
= 0;
465 result_ptr
->done_2
= 0;
467 result_index
= (result_index
+ 1) &
468 (FW_TX_CMPLT_BLOCK_SIZE
- 1);
475 /* Every completed frame needs to be acknowledged */
478 * If we've wrapped, we have to clear
479 * the results in 2 steps.
481 if (result_index
> wl
->next_tx_complete
) {
482 /* Only 1 write is needed */
483 wl12xx_spi_mem_write(wl
,
484 wl
->data_path
->tx_complete_addr
+
485 (wl
->next_tx_complete
*
486 sizeof(struct tx_result
)),
487 &result
[wl
->next_tx_complete
],
489 sizeof(struct tx_result
));
492 } else if (result_index
< wl
->next_tx_complete
) {
493 /* 2 writes are needed */
494 wl12xx_spi_mem_write(wl
,
495 wl
->data_path
->tx_complete_addr
+
496 (wl
->next_tx_complete
*
497 sizeof(struct tx_result
)),
498 &result
[wl
->next_tx_complete
],
499 (FW_TX_CMPLT_BLOCK_SIZE
-
500 wl
->next_tx_complete
) *
501 sizeof(struct tx_result
));
503 wl12xx_spi_mem_write(wl
,
504 wl
->data_path
->tx_complete_addr
,
507 FW_TX_CMPLT_BLOCK_SIZE
+
508 wl
->next_tx_complete
) *
509 sizeof(struct tx_result
));
512 /* We have to write the whole array */
513 wl12xx_spi_mem_write(wl
,
514 wl
->data_path
->tx_complete_addr
,
516 FW_TX_CMPLT_BLOCK_SIZE
*
517 sizeof(struct tx_result
));
522 wl
->next_tx_complete
= result_index
;
525 /* caller must hold wl->mutex */
526 void wl12xx_tx_flush(struct wl12xx
*wl
)
530 struct ieee80211_tx_info
*info
;
533 /* control->flags = 0; FIXME */
535 while ((skb
= skb_dequeue(&wl
->tx_queue
))) {
536 info
= IEEE80211_SKB_CB(skb
);
538 wl12xx_debug(DEBUG_TX
, "flushing skb 0x%p", skb
);
540 if (!(info
->flags
& IEEE80211_TX_CTL_REQ_TX_STATUS
))
543 ieee80211_tx_status(wl
->hw
, skb
);
546 for (i
= 0; i
< FW_TX_CMPLT_BLOCK_SIZE
; i
++)
547 if (wl
->tx_frames
[i
] != NULL
) {
548 skb
= wl
->tx_frames
[i
];
549 info
= IEEE80211_SKB_CB(skb
);
551 if (!(info
->flags
& IEEE80211_TX_CTL_REQ_TX_STATUS
))
554 ieee80211_tx_status(wl
->hw
, skb
);
555 wl
->tx_frames
[i
] = NULL
;