1 /******************************************************************************
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
39 #include "iwl-helpers.h"
40 #include "iwl-agn-hw.h"
44 * mac80211 queues, ACs, hardware queues, FIFOs.
46 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
48 * Mac80211 uses the following numbers, which we get as from it
49 * by way of skb_get_queue_mapping(skb):
57 * Regular (not A-MPDU) frames are put into hardware queues corresponding
58 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
59 * own queue per aggregation session (RA/TID combination), such queues are
60 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
61 * order to map frames to the right queue, we also need an AC->hw queue
62 * mapping. This is implemented here.
64 * Due to the way hw queues are set up (by the hw specific modules like
65 * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
69 static const u8 tid_to_ac
[] = {
70 /* this matches the mac80211 numbers */
71 2, 3, 3, 2, 1, 1, 0, 0
74 static const u8 ac_to_fifo
[] = {
81 static inline int get_fifo_from_ac(u8 ac
)
83 return ac_to_fifo
[ac
];
86 static inline int get_ac_from_tid(u16 tid
)
88 if (likely(tid
< ARRAY_SIZE(tid_to_ac
)))
89 return tid_to_ac
[tid
];
91 /* no support for TIDs 8-15 yet */
95 static inline int get_fifo_from_tid(u16 tid
)
97 if (likely(tid
< ARRAY_SIZE(tid_to_ac
)))
98 return get_fifo_from_ac(tid_to_ac
[tid
]);
100 /* no support for TIDs 8-15 yet */
105 * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
107 void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv
*priv
,
108 struct iwl_tx_queue
*txq
,
111 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
= priv
->scd_bc_tbls
.addr
;
112 int write_ptr
= txq
->q
.write_ptr
;
113 int txq_id
= txq
->q
.id
;
116 u16 len
= byte_cnt
+ IWL_TX_CRC_SIZE
+ IWL_TX_DELIMITER_SIZE
;
119 WARN_ON(len
> 0xFFF || write_ptr
>= TFD_QUEUE_SIZE_MAX
);
121 if (txq_id
!= priv
->cmd_queue
) {
122 sta_id
= txq
->cmd
[txq
->q
.write_ptr
]->cmd
.tx
.sta_id
;
123 sec_ctl
= txq
->cmd
[txq
->q
.write_ptr
]->cmd
.tx
.sec_ctl
;
125 switch (sec_ctl
& TX_CMD_SEC_MSK
) {
129 case TX_CMD_SEC_TKIP
:
133 len
+= WEP_IV_LEN
+ WEP_ICV_LEN
;
138 bc_ent
= cpu_to_le16((len
& 0xFFF) | (sta_id
<< 12));
140 scd_bc_tbl
[txq_id
].tfd_offset
[write_ptr
] = bc_ent
;
142 if (write_ptr
< TFD_QUEUE_SIZE_BC_DUP
)
144 tfd_offset
[TFD_QUEUE_SIZE_MAX
+ write_ptr
] = bc_ent
;
147 void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv
*priv
,
148 struct iwl_tx_queue
*txq
)
150 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
= priv
->scd_bc_tbls
.addr
;
151 int txq_id
= txq
->q
.id
;
152 int read_ptr
= txq
->q
.read_ptr
;
156 WARN_ON(read_ptr
>= TFD_QUEUE_SIZE_MAX
);
158 if (txq_id
!= priv
->cmd_queue
)
159 sta_id
= txq
->cmd
[read_ptr
]->cmd
.tx
.sta_id
;
161 bc_ent
= cpu_to_le16(1 | (sta_id
<< 12));
162 scd_bc_tbl
[txq_id
].tfd_offset
[read_ptr
] = bc_ent
;
164 if (read_ptr
< TFD_QUEUE_SIZE_BC_DUP
)
166 tfd_offset
[TFD_QUEUE_SIZE_MAX
+ read_ptr
] = bc_ent
;
169 static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv
*priv
, u16 ra_tid
,
176 scd_q2ratid
= ra_tid
& IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK
;
178 tbl_dw_addr
= priv
->scd_base_addr
+
179 IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id
);
181 tbl_dw
= iwl_read_targ_mem(priv
, tbl_dw_addr
);
184 tbl_dw
= (scd_q2ratid
<< 16) | (tbl_dw
& 0x0000FFFF);
186 tbl_dw
= scd_q2ratid
| (tbl_dw
& 0xFFFF0000);
188 iwl_write_targ_mem(priv
, tbl_dw_addr
, tbl_dw
);
193 static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv
*priv
, u16 txq_id
)
195 /* Simply stop the queue, but don't change any configuration;
196 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
198 IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id
),
199 (0 << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE
)|
200 (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN
));
203 void iwlagn_set_wr_ptrs(struct iwl_priv
*priv
,
204 int txq_id
, u32 index
)
206 iwl_write_direct32(priv
, HBUS_TARG_WRPTR
,
207 (index
& 0xff) | (txq_id
<< 8));
208 iwl_write_prph(priv
, IWLAGN_SCD_QUEUE_RDPTR(txq_id
), index
);
211 void iwlagn_tx_queue_set_status(struct iwl_priv
*priv
,
212 struct iwl_tx_queue
*txq
,
213 int tx_fifo_id
, int scd_retry
)
215 int txq_id
= txq
->q
.id
;
216 int active
= test_bit(txq_id
, &priv
->txq_ctx_active_msk
) ? 1 : 0;
218 iwl_write_prph(priv
, IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id
),
219 (active
<< IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE
) |
220 (tx_fifo_id
<< IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF
) |
221 (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL
) |
222 IWLAGN_SCD_QUEUE_STTS_REG_MSK
);
224 txq
->sched_retry
= scd_retry
;
226 IWL_DEBUG_INFO(priv
, "%s %s Queue %d on FIFO %d\n",
227 active
? "Activate" : "Deactivate",
228 scd_retry
? "BA" : "AC/CMD", txq_id
, tx_fifo_id
);
231 int iwlagn_txq_agg_enable(struct iwl_priv
*priv
, int txq_id
,
232 int tx_fifo
, int sta_id
, int tid
, u16 ssn_idx
)
238 if ((IWLAGN_FIRST_AMPDU_QUEUE
> txq_id
) ||
239 (IWLAGN_FIRST_AMPDU_QUEUE
+ priv
->cfg
->num_of_ampdu_queues
242 "queue number out of range: %d, must be %d to %d\n",
243 txq_id
, IWLAGN_FIRST_AMPDU_QUEUE
,
244 IWLAGN_FIRST_AMPDU_QUEUE
+
245 priv
->cfg
->num_of_ampdu_queues
- 1);
249 ra_tid
= BUILD_RAxTID(sta_id
, tid
);
251 /* Modify device's station table to Tx this TID */
252 ret
= iwl_sta_tx_modify_enable_tid(priv
, sta_id
, tid
);
256 spin_lock_irqsave(&priv
->lock
, flags
);
258 /* Stop this Tx queue before configuring it */
259 iwlagn_tx_queue_stop_scheduler(priv
, txq_id
);
261 /* Map receiver-address / traffic-ID to this queue */
262 iwlagn_tx_queue_set_q2ratid(priv
, ra_tid
, txq_id
);
264 /* Set this queue as a chain-building queue */
265 iwl_set_bits_prph(priv
, IWLAGN_SCD_QUEUECHAIN_SEL
, (1<<txq_id
));
267 /* enable aggregations for the queue */
268 iwl_set_bits_prph(priv
, IWLAGN_SCD_AGGR_SEL
, (1<<txq_id
));
270 /* Place first TFD at index corresponding to start sequence number.
271 * Assumes that ssn_idx is valid (!= 0xFFF) */
272 priv
->txq
[txq_id
].q
.read_ptr
= (ssn_idx
& 0xff);
273 priv
->txq
[txq_id
].q
.write_ptr
= (ssn_idx
& 0xff);
274 iwlagn_set_wr_ptrs(priv
, txq_id
, ssn_idx
);
276 /* Set up Tx window size and frame limit for this queue */
277 iwl_write_targ_mem(priv
, priv
->scd_base_addr
+
278 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id
) +
281 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS
) &
282 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK
) |
284 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS
) &
285 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK
));
287 iwl_set_bits_prph(priv
, IWLAGN_SCD_INTERRUPT_MASK
, (1 << txq_id
));
289 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
290 iwlagn_tx_queue_set_status(priv
, &priv
->txq
[txq_id
], tx_fifo
, 1);
292 spin_unlock_irqrestore(&priv
->lock
, flags
);
297 int iwlagn_txq_agg_disable(struct iwl_priv
*priv
, u16 txq_id
,
298 u16 ssn_idx
, u8 tx_fifo
)
300 if ((IWLAGN_FIRST_AMPDU_QUEUE
> txq_id
) ||
301 (IWLAGN_FIRST_AMPDU_QUEUE
+ priv
->cfg
->num_of_ampdu_queues
304 "queue number out of range: %d, must be %d to %d\n",
305 txq_id
, IWLAGN_FIRST_AMPDU_QUEUE
,
306 IWLAGN_FIRST_AMPDU_QUEUE
+
307 priv
->cfg
->num_of_ampdu_queues
- 1);
311 iwlagn_tx_queue_stop_scheduler(priv
, txq_id
);
313 iwl_clear_bits_prph(priv
, IWLAGN_SCD_AGGR_SEL
, (1 << txq_id
));
315 priv
->txq
[txq_id
].q
.read_ptr
= (ssn_idx
& 0xff);
316 priv
->txq
[txq_id
].q
.write_ptr
= (ssn_idx
& 0xff);
317 /* supposes that ssn_idx is valid (!= 0xFFF) */
318 iwlagn_set_wr_ptrs(priv
, txq_id
, ssn_idx
);
320 iwl_clear_bits_prph(priv
, IWLAGN_SCD_INTERRUPT_MASK
, (1 << txq_id
));
321 iwl_txq_ctx_deactivate(priv
, txq_id
);
322 iwlagn_tx_queue_set_status(priv
, &priv
->txq
[txq_id
], tx_fifo
, 0);
328 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
329 * must be called under priv->lock and mac access
331 void iwlagn_txq_set_sched(struct iwl_priv
*priv
, u32 mask
)
333 iwl_write_prph(priv
, IWLAGN_SCD_TXFACT
, mask
);
336 static inline int get_queue_from_ac(u16 ac
)
342 * handle build REPLY_TX command notification.
344 static void iwlagn_tx_cmd_build_basic(struct iwl_priv
*priv
,
346 struct iwl_tx_cmd
*tx_cmd
,
347 struct ieee80211_tx_info
*info
,
348 struct ieee80211_hdr
*hdr
,
351 __le16 fc
= hdr
->frame_control
;
352 __le32 tx_flags
= tx_cmd
->tx_flags
;
354 tx_cmd
->stop_time
.life_time
= TX_CMD_LIFE_TIME_INFINITE
;
355 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
)) {
356 tx_flags
|= TX_CMD_FLG_ACK_MSK
;
357 if (ieee80211_is_mgmt(fc
))
358 tx_flags
|= TX_CMD_FLG_SEQ_CTL_MSK
;
359 if (ieee80211_is_probe_resp(fc
) &&
360 !(le16_to_cpu(hdr
->seq_ctrl
) & 0xf))
361 tx_flags
|= TX_CMD_FLG_TSF_MSK
;
363 tx_flags
&= (~TX_CMD_FLG_ACK_MSK
);
364 tx_flags
|= TX_CMD_FLG_SEQ_CTL_MSK
;
367 if (ieee80211_is_back_req(fc
))
368 tx_flags
|= TX_CMD_FLG_ACK_MSK
| TX_CMD_FLG_IMM_BA_RSP_MASK
;
369 else if (info
->band
== IEEE80211_BAND_2GHZ
&&
370 priv
->cfg
->advanced_bt_coexist
&&
371 (ieee80211_is_auth(fc
) || ieee80211_is_assoc_req(fc
) ||
372 ieee80211_is_reassoc_req(fc
) ||
373 skb
->protocol
== cpu_to_be16(ETH_P_PAE
)))
374 tx_flags
|= TX_CMD_FLG_IGNORE_BT
;
377 tx_cmd
->sta_id
= std_id
;
378 if (ieee80211_has_morefrags(fc
))
379 tx_flags
|= TX_CMD_FLG_MORE_FRAG_MSK
;
381 if (ieee80211_is_data_qos(fc
)) {
382 u8
*qc
= ieee80211_get_qos_ctl(hdr
);
383 tx_cmd
->tid_tspec
= qc
[0] & 0xf;
384 tx_flags
&= ~TX_CMD_FLG_SEQ_CTL_MSK
;
386 tx_flags
|= TX_CMD_FLG_SEQ_CTL_MSK
;
389 priv
->cfg
->ops
->utils
->tx_cmd_protection(priv
, info
, fc
, &tx_flags
);
391 tx_flags
&= ~(TX_CMD_FLG_ANT_SEL_MSK
);
392 if (ieee80211_is_mgmt(fc
)) {
393 if (ieee80211_is_assoc_req(fc
) || ieee80211_is_reassoc_req(fc
))
394 tx_cmd
->timeout
.pm_frame_timeout
= cpu_to_le16(3);
396 tx_cmd
->timeout
.pm_frame_timeout
= cpu_to_le16(2);
398 tx_cmd
->timeout
.pm_frame_timeout
= 0;
401 tx_cmd
->driver_txop
= 0;
402 tx_cmd
->tx_flags
= tx_flags
;
403 tx_cmd
->next_frame_len
= 0;
406 #define RTS_DFAULT_RETRY_LIMIT 60
408 static void iwlagn_tx_cmd_build_rate(struct iwl_priv
*priv
,
409 struct iwl_tx_cmd
*tx_cmd
,
410 struct ieee80211_tx_info
*info
,
419 /* Set retry limit on DATA packets and Probe Responses*/
420 if (ieee80211_is_probe_resp(fc
))
421 data_retry_limit
= 3;
423 data_retry_limit
= IWLAGN_DEFAULT_TX_RETRY
;
424 tx_cmd
->data_retry_limit
= data_retry_limit
;
426 /* Set retry limit on RTS packets */
427 rts_retry_limit
= RTS_DFAULT_RETRY_LIMIT
;
428 if (data_retry_limit
< rts_retry_limit
)
429 rts_retry_limit
= data_retry_limit
;
430 tx_cmd
->rts_retry_limit
= rts_retry_limit
;
432 /* DATA packets will use the uCode station table for rate/antenna
434 if (ieee80211_is_data(fc
)) {
435 tx_cmd
->initial_rate_index
= 0;
436 tx_cmd
->tx_flags
|= TX_CMD_FLG_STA_RATE_MSK
;
441 * If the current TX rate stored in mac80211 has the MCS bit set, it's
442 * not really a TX rate. Thus, we use the lowest supported rate for
443 * this band. Also use the lowest supported rate if the stored rate
446 rate_idx
= info
->control
.rates
[0].idx
;
447 if (info
->control
.rates
[0].flags
& IEEE80211_TX_RC_MCS
||
448 (rate_idx
< 0) || (rate_idx
> IWL_RATE_COUNT_LEGACY
))
449 rate_idx
= rate_lowest_index(&priv
->bands
[info
->band
],
451 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
452 if (info
->band
== IEEE80211_BAND_5GHZ
)
453 rate_idx
+= IWL_FIRST_OFDM_RATE
;
454 /* Get PLCP rate for tx_cmd->rate_n_flags */
455 rate_plcp
= iwl_rates
[rate_idx
].plcp
;
456 /* Zero out flags for this packet */
459 /* Set CCK flag as needed */
460 if ((rate_idx
>= IWL_FIRST_CCK_RATE
) && (rate_idx
<= IWL_LAST_CCK_RATE
))
461 rate_flags
|= RATE_MCS_CCK_MSK
;
463 /* Set up antennas */
464 if (priv
->cfg
->advanced_bt_coexist
&& priv
->bt_full_concurrent
) {
465 /* operated as 1x1 in full concurrency mode */
466 priv
->mgmt_tx_ant
= iwl_toggle_tx_ant(priv
, priv
->mgmt_tx_ant
,
467 first_antenna(priv
->hw_params
.valid_tx_ant
));
469 priv
->mgmt_tx_ant
= iwl_toggle_tx_ant(priv
, priv
->mgmt_tx_ant
,
470 priv
->hw_params
.valid_tx_ant
);
471 rate_flags
|= iwl_ant_idx_to_flags(priv
->mgmt_tx_ant
);
473 /* Set the rate in the TX cmd */
474 tx_cmd
->rate_n_flags
= iwl_hw_set_rate_n_flags(rate_plcp
, rate_flags
);
477 static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv
*priv
,
478 struct ieee80211_tx_info
*info
,
479 struct iwl_tx_cmd
*tx_cmd
,
480 struct sk_buff
*skb_frag
,
483 struct ieee80211_key_conf
*keyconf
= info
->control
.hw_key
;
485 switch (keyconf
->cipher
) {
486 case WLAN_CIPHER_SUITE_CCMP
:
487 tx_cmd
->sec_ctl
= TX_CMD_SEC_CCM
;
488 memcpy(tx_cmd
->key
, keyconf
->key
, keyconf
->keylen
);
489 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
)
490 tx_cmd
->tx_flags
|= TX_CMD_FLG_AGG_CCMP_MSK
;
491 IWL_DEBUG_TX(priv
, "tx_cmd with AES hwcrypto\n");
494 case WLAN_CIPHER_SUITE_TKIP
:
495 tx_cmd
->sec_ctl
= TX_CMD_SEC_TKIP
;
496 ieee80211_get_tkip_key(keyconf
, skb_frag
,
497 IEEE80211_TKIP_P2_KEY
, tx_cmd
->key
);
498 IWL_DEBUG_TX(priv
, "tx_cmd with tkip hwcrypto\n");
501 case WLAN_CIPHER_SUITE_WEP104
:
502 tx_cmd
->sec_ctl
|= TX_CMD_SEC_KEY128
;
504 case WLAN_CIPHER_SUITE_WEP40
:
505 tx_cmd
->sec_ctl
|= (TX_CMD_SEC_WEP
|
506 (keyconf
->keyidx
& TX_CMD_SEC_MSK
) << TX_CMD_SEC_SHIFT
);
508 memcpy(&tx_cmd
->key
[3], keyconf
->key
, keyconf
->keylen
);
510 IWL_DEBUG_TX(priv
, "Configuring packet for WEP encryption "
511 "with key %d\n", keyconf
->keyidx
);
515 IWL_ERR(priv
, "Unknown encode cipher %x\n", keyconf
->cipher
);
521 * start REPLY_TX command process
523 int iwlagn_tx_skb(struct iwl_priv
*priv
, struct sk_buff
*skb
)
525 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
526 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
527 struct ieee80211_sta
*sta
= info
->control
.sta
;
528 struct iwl_station_priv
*sta_priv
= NULL
;
529 struct iwl_tx_queue
*txq
;
531 struct iwl_device_cmd
*out_cmd
;
532 struct iwl_cmd_meta
*out_meta
;
533 struct iwl_tx_cmd
*tx_cmd
;
534 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
536 dma_addr_t phys_addr
;
537 dma_addr_t txcmd_phys
;
538 dma_addr_t scratch_phys
;
539 u16 len
, len_org
, firstlen
, secondlen
;
544 u8 wait_write_ptr
= 0;
549 if (info
->control
.vif
)
550 ctx
= iwl_rxon_ctx_from_vif(info
->control
.vif
);
552 spin_lock_irqsave(&priv
->lock
, flags
);
553 if (iwl_is_rfkill(priv
)) {
554 IWL_DEBUG_DROP(priv
, "Dropping - RF KILL\n");
558 fc
= hdr
->frame_control
;
560 #ifdef CONFIG_IWLWIFI_DEBUG
561 if (ieee80211_is_auth(fc
))
562 IWL_DEBUG_TX(priv
, "Sending AUTH frame\n");
563 else if (ieee80211_is_assoc_req(fc
))
564 IWL_DEBUG_TX(priv
, "Sending ASSOC frame\n");
565 else if (ieee80211_is_reassoc_req(fc
))
566 IWL_DEBUG_TX(priv
, "Sending REASSOC frame\n");
569 hdr_len
= ieee80211_hdrlen(fc
);
571 /* Find index into station table for destination station */
572 sta_id
= iwl_sta_id_or_broadcast(priv
, ctx
, info
->control
.sta
);
573 if (sta_id
== IWL_INVALID_STATION
) {
574 IWL_DEBUG_DROP(priv
, "Dropping - INVALID STATION: %pM\n",
579 IWL_DEBUG_TX(priv
, "station Id %d\n", sta_id
);
582 sta_priv
= (void *)sta
->drv_priv
;
584 if (sta_priv
&& sta_priv
->asleep
) {
585 WARN_ON(!(info
->flags
& IEEE80211_TX_CTL_PSPOLL_RESPONSE
));
587 * This sends an asynchronous command to the device,
588 * but we can rely on it being processed before the
589 * next frame is processed -- and the next frame to
590 * this station is the one that will consume this
592 * For now set the counter to just 1 since we do not
595 iwl_sta_modify_sleep_tx_count(priv
, sta_id
, 1);
598 txq_id
= get_queue_from_ac(skb_get_queue_mapping(skb
));
600 /* irqs already disabled/saved above when locking priv->lock */
601 spin_lock(&priv
->sta_lock
);
603 if (ieee80211_is_data_qos(fc
)) {
604 qc
= ieee80211_get_qos_ctl(hdr
);
605 tid
= qc
[0] & IEEE80211_QOS_CTL_TID_MASK
;
606 if (WARN_ON_ONCE(tid
>= MAX_TID_COUNT
)) {
607 spin_unlock(&priv
->sta_lock
);
610 seq_number
= priv
->stations
[sta_id
].tid
[tid
].seq_number
;
611 seq_number
&= IEEE80211_SCTL_SEQ
;
612 hdr
->seq_ctrl
= hdr
->seq_ctrl
&
613 cpu_to_le16(IEEE80211_SCTL_FRAG
);
614 hdr
->seq_ctrl
|= cpu_to_le16(seq_number
);
616 /* aggregation is on for this <sta,tid> */
617 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
&&
618 priv
->stations
[sta_id
].tid
[tid
].agg
.state
== IWL_AGG_ON
) {
619 txq_id
= priv
->stations
[sta_id
].tid
[tid
].agg
.txq_id
;
623 txq
= &priv
->txq
[txq_id
];
624 swq_id
= txq
->swq_id
;
627 if (unlikely(iwl_queue_space(q
) < q
->high_mark
)) {
628 spin_unlock(&priv
->sta_lock
);
632 if (ieee80211_is_data_qos(fc
)) {
633 priv
->stations
[sta_id
].tid
[tid
].tfds_in_queue
++;
634 if (!ieee80211_has_morefrags(fc
))
635 priv
->stations
[sta_id
].tid
[tid
].seq_number
= seq_number
;
638 spin_unlock(&priv
->sta_lock
);
640 /* Set up driver data for this TFD */
641 memset(&(txq
->txb
[q
->write_ptr
]), 0, sizeof(struct iwl_tx_info
));
642 txq
->txb
[q
->write_ptr
].skb
= skb
;
643 txq
->txb
[q
->write_ptr
].ctx
= ctx
;
645 /* Set up first empty entry in queue's array of Tx/cmd buffers */
646 out_cmd
= txq
->cmd
[q
->write_ptr
];
647 out_meta
= &txq
->meta
[q
->write_ptr
];
648 tx_cmd
= &out_cmd
->cmd
.tx
;
649 memset(&out_cmd
->hdr
, 0, sizeof(out_cmd
->hdr
));
650 memset(tx_cmd
, 0, sizeof(struct iwl_tx_cmd
));
653 * Set up the Tx-command (not MAC!) header.
654 * Store the chosen Tx queue and TFD index within the sequence field;
655 * after Tx, uCode's Tx response will return this value so driver can
656 * locate the frame within the tx queue and do post-tx processing.
658 out_cmd
->hdr
.cmd
= REPLY_TX
;
659 out_cmd
->hdr
.sequence
= cpu_to_le16((u16
)(QUEUE_TO_SEQ(txq_id
) |
660 INDEX_TO_SEQ(q
->write_ptr
)));
662 /* Copy MAC header from skb into command buffer */
663 memcpy(tx_cmd
->hdr
, hdr
, hdr_len
);
666 /* Total # bytes to be transmitted */
668 tx_cmd
->len
= cpu_to_le16(len
);
670 if (info
->control
.hw_key
)
671 iwlagn_tx_cmd_build_hwcrypto(priv
, info
, tx_cmd
, skb
, sta_id
);
673 /* TODO need this for burst mode later on */
674 iwlagn_tx_cmd_build_basic(priv
, skb
, tx_cmd
, info
, hdr
, sta_id
);
675 iwl_dbg_log_tx_data_frame(priv
, len
, hdr
);
677 iwlagn_tx_cmd_build_rate(priv
, tx_cmd
, info
, fc
);
679 iwl_update_stats(priv
, true, fc
, len
);
681 * Use the first empty entry in this queue's command buffer array
682 * to contain the Tx command and MAC header concatenated together
683 * (payload data will be in another buffer).
684 * Size of this varies, due to varying MAC header length.
685 * If end is not dword aligned, we'll have 2 extra bytes at the end
686 * of the MAC header (device reads on dword boundaries).
687 * We'll tell device about this padding later.
689 len
= sizeof(struct iwl_tx_cmd
) +
690 sizeof(struct iwl_cmd_header
) + hdr_len
;
693 firstlen
= len
= (len
+ 3) & ~3;
700 /* Tell NIC about any 2-byte padding after MAC header */
702 tx_cmd
->tx_flags
|= TX_CMD_FLG_MH_PAD_MSK
;
704 /* Physical address of this Tx command's header (not MAC header!),
705 * within command buffer array. */
706 txcmd_phys
= pci_map_single(priv
->pci_dev
,
708 PCI_DMA_BIDIRECTIONAL
);
709 dma_unmap_addr_set(out_meta
, mapping
, txcmd_phys
);
710 dma_unmap_len_set(out_meta
, len
, len
);
711 /* Add buffer containing Tx command and MAC(!) header to TFD's
713 priv
->cfg
->ops
->lib
->txq_attach_buf_to_tfd(priv
, txq
,
714 txcmd_phys
, len
, 1, 0);
716 if (!ieee80211_has_morefrags(hdr
->frame_control
)) {
717 txq
->need_update
= 1;
720 txq
->need_update
= 0;
723 /* Set up TFD's 2nd entry to point directly to remainder of skb,
724 * if any (802.11 null frames have no payload). */
725 secondlen
= len
= skb
->len
- hdr_len
;
727 phys_addr
= pci_map_single(priv
->pci_dev
, skb
->data
+ hdr_len
,
728 len
, PCI_DMA_TODEVICE
);
729 priv
->cfg
->ops
->lib
->txq_attach_buf_to_tfd(priv
, txq
,
734 scratch_phys
= txcmd_phys
+ sizeof(struct iwl_cmd_header
) +
735 offsetof(struct iwl_tx_cmd
, scratch
);
737 len
= sizeof(struct iwl_tx_cmd
) +
738 sizeof(struct iwl_cmd_header
) + hdr_len
;
739 /* take back ownership of DMA buffer to enable update */
740 pci_dma_sync_single_for_cpu(priv
->pci_dev
, txcmd_phys
,
741 len
, PCI_DMA_BIDIRECTIONAL
);
742 tx_cmd
->dram_lsb_ptr
= cpu_to_le32(scratch_phys
);
743 tx_cmd
->dram_msb_ptr
= iwl_get_dma_hi_addr(scratch_phys
);
745 IWL_DEBUG_TX(priv
, "sequence nr = 0X%x\n",
746 le16_to_cpu(out_cmd
->hdr
.sequence
));
747 IWL_DEBUG_TX(priv
, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd
->tx_flags
));
748 iwl_print_hex_dump(priv
, IWL_DL_TX
, (u8
*)tx_cmd
, sizeof(*tx_cmd
));
749 iwl_print_hex_dump(priv
, IWL_DL_TX
, (u8
*)tx_cmd
->hdr
, hdr_len
);
751 /* Set up entry for this TFD in Tx byte-count array */
752 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
)
753 priv
->cfg
->ops
->lib
->txq_update_byte_cnt_tbl(priv
, txq
,
754 le16_to_cpu(tx_cmd
->len
));
756 pci_dma_sync_single_for_device(priv
->pci_dev
, txcmd_phys
,
757 len
, PCI_DMA_BIDIRECTIONAL
);
759 trace_iwlwifi_dev_tx(priv
,
760 &((struct iwl_tfd
*)txq
->tfds
)[txq
->q
.write_ptr
],
761 sizeof(struct iwl_tfd
),
762 &out_cmd
->hdr
, firstlen
,
763 skb
->data
+ hdr_len
, secondlen
);
765 /* Tell device the write index *just past* this latest filled TFD */
766 q
->write_ptr
= iwl_queue_inc_wrap(q
->write_ptr
, q
->n_bd
);
767 iwl_txq_update_write_ptr(priv
, txq
);
768 spin_unlock_irqrestore(&priv
->lock
, flags
);
771 * At this point the frame is "transmitted" successfully
772 * and we will get a TX status notification eventually,
773 * regardless of the value of ret. "ret" only indicates
774 * whether or not we should update the write pointer.
777 /* avoid atomic ops if it isn't an associated client */
778 if (sta_priv
&& sta_priv
->client
)
779 atomic_inc(&sta_priv
->pending_frames
);
781 if ((iwl_queue_space(q
) < q
->high_mark
) && priv
->mac80211_registered
) {
782 if (wait_write_ptr
) {
783 spin_lock_irqsave(&priv
->lock
, flags
);
784 txq
->need_update
= 1;
785 iwl_txq_update_write_ptr(priv
, txq
);
786 spin_unlock_irqrestore(&priv
->lock
, flags
);
788 iwl_stop_queue(priv
, txq
->swq_id
);
795 spin_unlock_irqrestore(&priv
->lock
, flags
);
799 static inline int iwlagn_alloc_dma_ptr(struct iwl_priv
*priv
,
800 struct iwl_dma_ptr
*ptr
, size_t size
)
802 ptr
->addr
= dma_alloc_coherent(&priv
->pci_dev
->dev
, size
, &ptr
->dma
,
810 static inline void iwlagn_free_dma_ptr(struct iwl_priv
*priv
,
811 struct iwl_dma_ptr
*ptr
)
813 if (unlikely(!ptr
->addr
))
816 dma_free_coherent(&priv
->pci_dev
->dev
, ptr
->size
, ptr
->addr
, ptr
->dma
);
817 memset(ptr
, 0, sizeof(*ptr
));
821 * iwlagn_hw_txq_ctx_free - Free TXQ Context
823 * Destroy all TX DMA queues and structures
825 void iwlagn_hw_txq_ctx_free(struct iwl_priv
*priv
)
831 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++)
832 if (txq_id
== priv
->cmd_queue
)
833 iwl_cmd_queue_free(priv
);
835 iwl_tx_queue_free(priv
, txq_id
);
837 iwlagn_free_dma_ptr(priv
, &priv
->kw
);
839 iwlagn_free_dma_ptr(priv
, &priv
->scd_bc_tbls
);
841 /* free tx queue structure */
842 iwl_free_txq_mem(priv
);
846 * iwlagn_txq_ctx_alloc - allocate TX queue context
847 * Allocate all Tx DMA structures and initialize them
852 int iwlagn_txq_ctx_alloc(struct iwl_priv
*priv
)
855 int txq_id
, slots_num
;
858 /* Free all tx/cmd queues and keep-warm buffer */
859 iwlagn_hw_txq_ctx_free(priv
);
861 ret
= iwlagn_alloc_dma_ptr(priv
, &priv
->scd_bc_tbls
,
862 priv
->hw_params
.scd_bc_tbls_size
);
864 IWL_ERR(priv
, "Scheduler BC Table allocation failed\n");
867 /* Alloc keep-warm buffer */
868 ret
= iwlagn_alloc_dma_ptr(priv
, &priv
->kw
, IWL_KW_SIZE
);
870 IWL_ERR(priv
, "Keep Warm allocation failed\n");
874 /* allocate tx queue structure */
875 ret
= iwl_alloc_txq_mem(priv
);
879 spin_lock_irqsave(&priv
->lock
, flags
);
881 /* Turn off all Tx DMA fifos */
882 priv
->cfg
->ops
->lib
->txq_set_sched(priv
, 0);
884 /* Tell NIC where to find the "keep warm" buffer */
885 iwl_write_direct32(priv
, FH_KW_MEM_ADDR_REG
, priv
->kw
.dma
>> 4);
887 spin_unlock_irqrestore(&priv
->lock
, flags
);
889 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
890 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++) {
891 slots_num
= (txq_id
== priv
->cmd_queue
) ?
892 TFD_CMD_SLOTS
: TFD_TX_CMD_SLOTS
;
893 ret
= iwl_tx_queue_init(priv
, &priv
->txq
[txq_id
], slots_num
,
896 IWL_ERR(priv
, "Tx %d queue init failed\n", txq_id
);
904 iwlagn_hw_txq_ctx_free(priv
);
905 iwlagn_free_dma_ptr(priv
, &priv
->kw
);
907 iwlagn_free_dma_ptr(priv
, &priv
->scd_bc_tbls
);
912 void iwlagn_txq_ctx_reset(struct iwl_priv
*priv
)
914 int txq_id
, slots_num
;
917 spin_lock_irqsave(&priv
->lock
, flags
);
919 /* Turn off all Tx DMA fifos */
920 priv
->cfg
->ops
->lib
->txq_set_sched(priv
, 0);
922 /* Tell NIC where to find the "keep warm" buffer */
923 iwl_write_direct32(priv
, FH_KW_MEM_ADDR_REG
, priv
->kw
.dma
>> 4);
925 spin_unlock_irqrestore(&priv
->lock
, flags
);
927 /* Alloc and init all Tx queues, including the command queue (#4) */
928 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++) {
929 slots_num
= txq_id
== priv
->cmd_queue
?
930 TFD_CMD_SLOTS
: TFD_TX_CMD_SLOTS
;
931 iwl_tx_queue_reset(priv
, &priv
->txq
[txq_id
], slots_num
, txq_id
);
936 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
938 void iwlagn_txq_ctx_stop(struct iwl_priv
*priv
)
943 /* Turn off all Tx DMA fifos */
944 spin_lock_irqsave(&priv
->lock
, flags
);
946 priv
->cfg
->ops
->lib
->txq_set_sched(priv
, 0);
948 /* Stop each Tx DMA channel, and wait for it to be idle */
949 for (ch
= 0; ch
< priv
->hw_params
.dma_chnl_num
; ch
++) {
950 iwl_write_direct32(priv
, FH_TCSR_CHNL_TX_CONFIG_REG(ch
), 0x0);
951 if (iwl_poll_direct_bit(priv
, FH_TSSR_TX_STATUS_REG
,
952 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch
),
954 IWL_ERR(priv
, "Failing on timeout while stopping"
955 " DMA channel %d [0x%08x]", ch
,
956 iwl_read_direct32(priv
, FH_TSSR_TX_STATUS_REG
));
958 spin_unlock_irqrestore(&priv
->lock
, flags
);
962 * Find first available (lowest unused) Tx Queue, mark it "active".
963 * Called only when finding queue for aggregation.
964 * Should never return anything < 7, because they should already
965 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
967 static int iwlagn_txq_ctx_activate_free(struct iwl_priv
*priv
)
971 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++)
972 if (!test_and_set_bit(txq_id
, &priv
->txq_ctx_active_msk
))
977 int iwlagn_tx_agg_start(struct iwl_priv
*priv
, struct ieee80211_vif
*vif
,
978 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
)
985 struct iwl_tid_data
*tid_data
;
987 tx_fifo
= get_fifo_from_tid(tid
);
988 if (unlikely(tx_fifo
< 0))
991 IWL_WARN(priv
, "%s on ra = %pM tid = %d\n",
992 __func__
, sta
->addr
, tid
);
994 sta_id
= iwl_sta_id(sta
);
995 if (sta_id
== IWL_INVALID_STATION
) {
996 IWL_ERR(priv
, "Start AGG on invalid station\n");
999 if (unlikely(tid
>= MAX_TID_COUNT
))
1002 if (priv
->stations
[sta_id
].tid
[tid
].agg
.state
!= IWL_AGG_OFF
) {
1003 IWL_ERR(priv
, "Start AGG when state is not IWL_AGG_OFF !\n");
1007 txq_id
= iwlagn_txq_ctx_activate_free(priv
);
1009 IWL_ERR(priv
, "No free aggregation queue available\n");
1013 spin_lock_irqsave(&priv
->sta_lock
, flags
);
1014 tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
1015 *ssn
= SEQ_TO_SN(tid_data
->seq_number
);
1016 tid_data
->agg
.txq_id
= txq_id
;
1017 priv
->txq
[txq_id
].swq_id
= iwl_virtual_agg_queue_num(get_ac_from_tid(tid
), txq_id
);
1018 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
1020 ret
= priv
->cfg
->ops
->lib
->txq_agg_enable(priv
, txq_id
, tx_fifo
,
1025 spin_lock_irqsave(&priv
->sta_lock
, flags
);
1026 tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
1027 if (tid_data
->tfds_in_queue
== 0) {
1028 IWL_DEBUG_HT(priv
, "HW queue is empty\n");
1029 tid_data
->agg
.state
= IWL_AGG_ON
;
1030 ieee80211_start_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
1032 IWL_DEBUG_HT(priv
, "HW queue is NOT empty: %d packets in HW queue\n",
1033 tid_data
->tfds_in_queue
);
1034 tid_data
->agg
.state
= IWL_EMPTYING_HW_QUEUE_ADDBA
;
1036 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
1040 int iwlagn_tx_agg_stop(struct iwl_priv
*priv
, struct ieee80211_vif
*vif
,
1041 struct ieee80211_sta
*sta
, u16 tid
)
1043 int tx_fifo_id
, txq_id
, sta_id
, ssn
;
1044 struct iwl_tid_data
*tid_data
;
1045 int write_ptr
, read_ptr
;
1046 unsigned long flags
;
1048 tx_fifo_id
= get_fifo_from_tid(tid
);
1049 if (unlikely(tx_fifo_id
< 0))
1052 sta_id
= iwl_sta_id(sta
);
1054 if (sta_id
== IWL_INVALID_STATION
) {
1055 IWL_ERR(priv
, "Invalid station for AGG tid %d\n", tid
);
1059 spin_lock_irqsave(&priv
->sta_lock
, flags
);
1061 tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
1062 ssn
= (tid_data
->seq_number
& IEEE80211_SCTL_SEQ
) >> 4;
1063 txq_id
= tid_data
->agg
.txq_id
;
1065 switch (priv
->stations
[sta_id
].tid
[tid
].agg
.state
) {
1066 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
1068 * This can happen if the peer stops aggregation
1069 * again before we've had a chance to drain the
1070 * queue we selected previously, i.e. before the
1071 * session was really started completely.
1073 IWL_DEBUG_HT(priv
, "AGG stop before setup done\n");
1078 IWL_WARN(priv
, "Stopping AGG while state not ON or starting\n");
1081 write_ptr
= priv
->txq
[txq_id
].q
.write_ptr
;
1082 read_ptr
= priv
->txq
[txq_id
].q
.read_ptr
;
1084 /* The queue is not empty */
1085 if (write_ptr
!= read_ptr
) {
1086 IWL_DEBUG_HT(priv
, "Stopping a non empty AGG HW QUEUE\n");
1087 priv
->stations
[sta_id
].tid
[tid
].agg
.state
=
1088 IWL_EMPTYING_HW_QUEUE_DELBA
;
1089 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
1093 IWL_DEBUG_HT(priv
, "HW queue is empty\n");
1095 priv
->stations
[sta_id
].tid
[tid
].agg
.state
= IWL_AGG_OFF
;
1097 /* do not restore/save irqs */
1098 spin_unlock(&priv
->sta_lock
);
1099 spin_lock(&priv
->lock
);
1102 * the only reason this call can fail is queue number out of range,
1103 * which can happen if uCode is reloaded and all the station
1104 * information are lost. if it is outside the range, there is no need
1105 * to deactivate the uCode queue, just return "success" to allow
1106 * mac80211 to clean up it own data.
1108 priv
->cfg
->ops
->lib
->txq_agg_disable(priv
, txq_id
, ssn
,
1110 spin_unlock_irqrestore(&priv
->lock
, flags
);
1112 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
1117 int iwlagn_txq_check_empty(struct iwl_priv
*priv
,
1118 int sta_id
, u8 tid
, int txq_id
)
1120 struct iwl_queue
*q
= &priv
->txq
[txq_id
].q
;
1121 u8
*addr
= priv
->stations
[sta_id
].sta
.sta
.addr
;
1122 struct iwl_tid_data
*tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
1123 struct iwl_rxon_context
*ctx
;
1125 ctx
= &priv
->contexts
[priv
->stations
[sta_id
].ctxid
];
1127 lockdep_assert_held(&priv
->sta_lock
);
1129 switch (priv
->stations
[sta_id
].tid
[tid
].agg
.state
) {
1130 case IWL_EMPTYING_HW_QUEUE_DELBA
:
1131 /* We are reclaiming the last packet of the */
1132 /* aggregated HW queue */
1133 if ((txq_id
== tid_data
->agg
.txq_id
) &&
1134 (q
->read_ptr
== q
->write_ptr
)) {
1135 u16 ssn
= SEQ_TO_SN(tid_data
->seq_number
);
1136 int tx_fifo
= get_fifo_from_tid(tid
);
1137 IWL_DEBUG_HT(priv
, "HW queue empty: continue DELBA flow\n");
1138 priv
->cfg
->ops
->lib
->txq_agg_disable(priv
, txq_id
,
1140 tid_data
->agg
.state
= IWL_AGG_OFF
;
1141 ieee80211_stop_tx_ba_cb_irqsafe(ctx
->vif
, addr
, tid
);
1144 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
1145 /* We are reclaiming the last packet of the queue */
1146 if (tid_data
->tfds_in_queue
== 0) {
1147 IWL_DEBUG_HT(priv
, "HW queue empty: continue ADDBA flow\n");
1148 tid_data
->agg
.state
= IWL_AGG_ON
;
1149 ieee80211_start_tx_ba_cb_irqsafe(ctx
->vif
, addr
, tid
);
1157 static void iwlagn_tx_status(struct iwl_priv
*priv
, struct iwl_tx_info
*tx_info
)
1159 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*) tx_info
->skb
->data
;
1160 struct ieee80211_sta
*sta
;
1161 struct iwl_station_priv
*sta_priv
;
1164 sta
= ieee80211_find_sta(tx_info
->ctx
->vif
, hdr
->addr1
);
1166 sta_priv
= (void *)sta
->drv_priv
;
1167 /* avoid atomic ops if this isn't a client */
1168 if (sta_priv
->client
&&
1169 atomic_dec_return(&sta_priv
->pending_frames
) == 0)
1170 ieee80211_sta_block_awake(priv
->hw
, sta
, false);
1174 ieee80211_tx_status_irqsafe(priv
->hw
, tx_info
->skb
);
1177 int iwlagn_tx_queue_reclaim(struct iwl_priv
*priv
, int txq_id
, int index
)
1179 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
1180 struct iwl_queue
*q
= &txq
->q
;
1181 struct iwl_tx_info
*tx_info
;
1183 struct ieee80211_hdr
*hdr
;
1185 if ((index
>= q
->n_bd
) || (iwl_queue_used(q
, index
) == 0)) {
1186 IWL_ERR(priv
, "Read index for DMA queue txq id (%d), index %d, "
1187 "is out of range [0-%d] %d %d.\n", txq_id
,
1188 index
, q
->n_bd
, q
->write_ptr
, q
->read_ptr
);
1192 for (index
= iwl_queue_inc_wrap(index
, q
->n_bd
);
1193 q
->read_ptr
!= index
;
1194 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
1196 tx_info
= &txq
->txb
[txq
->q
.read_ptr
];
1197 iwlagn_tx_status(priv
, tx_info
);
1199 hdr
= (struct ieee80211_hdr
*)tx_info
->skb
->data
;
1200 if (hdr
&& ieee80211_is_data_qos(hdr
->frame_control
))
1202 tx_info
->skb
= NULL
;
1204 if (priv
->cfg
->ops
->lib
->txq_inval_byte_cnt_tbl
)
1205 priv
->cfg
->ops
->lib
->txq_inval_byte_cnt_tbl(priv
, txq
);
1207 priv
->cfg
->ops
->lib
->txq_free_tfd(priv
, txq
);
1213 * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack
1215 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1216 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1218 static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv
*priv
,
1219 struct iwl_ht_agg
*agg
,
1220 struct iwl_compressed_ba_resp
*ba_resp
)
1224 u16 seq_ctl
= le16_to_cpu(ba_resp
->seq_ctl
);
1225 u16 scd_flow
= le16_to_cpu(ba_resp
->scd_flow
);
1226 u64 bitmap
, sent_bitmap
;
1228 struct ieee80211_tx_info
*info
;
1230 if (unlikely(!agg
->wait_for_ba
)) {
1231 IWL_ERR(priv
, "Received BA when not expected\n");
1235 /* Mark that the expected block-ack response arrived */
1236 agg
->wait_for_ba
= 0;
1237 IWL_DEBUG_TX_REPLY(priv
, "BA %d %d\n", agg
->start_idx
, ba_resp
->seq_ctl
);
1239 /* Calculate shift to align block-ack bits with our Tx window bits */
1240 sh
= agg
->start_idx
- SEQ_TO_INDEX(seq_ctl
>> 4);
1241 if (sh
< 0) /* tbw something is wrong with indices */
1244 /* don't use 64-bit values for now */
1245 bitmap
= le64_to_cpu(ba_resp
->bitmap
) >> sh
;
1247 if (agg
->frame_count
> (64 - sh
)) {
1248 IWL_DEBUG_TX_REPLY(priv
, "more frames than bitmap size");
1252 /* check for success or failure according to the
1253 * transmitted bitmap and block-ack bitmap */
1254 sent_bitmap
= bitmap
& agg
->bitmap
;
1256 /* For each frame attempted in aggregation,
1257 * update driver's record of tx frame's status. */
1259 while (sent_bitmap
) {
1260 ack
= sent_bitmap
& 1ULL;
1262 IWL_DEBUG_TX_REPLY(priv
, "%s ON i=%d idx=%d raw=%d\n",
1263 ack
? "ACK" : "NACK", i
, (agg
->start_idx
+ i
) & 0xff,
1264 agg
->start_idx
+ i
);
1269 info
= IEEE80211_SKB_CB(priv
->txq
[scd_flow
].txb
[agg
->start_idx
].skb
);
1270 memset(&info
->status
, 0, sizeof(info
->status
));
1271 info
->flags
|= IEEE80211_TX_STAT_ACK
;
1272 info
->flags
|= IEEE80211_TX_STAT_AMPDU
;
1273 info
->status
.ampdu_ack_len
= successes
;
1274 info
->status
.ampdu_len
= agg
->frame_count
;
1275 iwlagn_hwrate_to_tx_control(priv
, agg
->rate_n_flags
, info
);
1277 IWL_DEBUG_TX_REPLY(priv
, "Bitmap %llx\n", (unsigned long long)bitmap
);
1283 * translate ucode response to mac80211 tx status control values
1285 void iwlagn_hwrate_to_tx_control(struct iwl_priv
*priv
, u32 rate_n_flags
,
1286 struct ieee80211_tx_info
*info
)
1288 struct ieee80211_tx_rate
*r
= &info
->control
.rates
[0];
1290 info
->antenna_sel_tx
=
1291 ((rate_n_flags
& RATE_MCS_ANT_ABC_MSK
) >> RATE_MCS_ANT_POS
);
1292 if (rate_n_flags
& RATE_MCS_HT_MSK
)
1293 r
->flags
|= IEEE80211_TX_RC_MCS
;
1294 if (rate_n_flags
& RATE_MCS_GF_MSK
)
1295 r
->flags
|= IEEE80211_TX_RC_GREEN_FIELD
;
1296 if (rate_n_flags
& RATE_MCS_HT40_MSK
)
1297 r
->flags
|= IEEE80211_TX_RC_40_MHZ_WIDTH
;
1298 if (rate_n_flags
& RATE_MCS_DUP_MSK
)
1299 r
->flags
|= IEEE80211_TX_RC_DUP_DATA
;
1300 if (rate_n_flags
& RATE_MCS_SGI_MSK
)
1301 r
->flags
|= IEEE80211_TX_RC_SHORT_GI
;
1302 r
->idx
= iwlagn_hwrate_to_mac80211_idx(rate_n_flags
, info
->band
);
1306 * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1308 * Handles block-acknowledge notification from device, which reports success
1309 * of frames sent via aggregation.
1311 void iwlagn_rx_reply_compressed_ba(struct iwl_priv
*priv
,
1312 struct iwl_rx_mem_buffer
*rxb
)
1314 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1315 struct iwl_compressed_ba_resp
*ba_resp
= &pkt
->u
.compressed_ba
;
1316 struct iwl_tx_queue
*txq
= NULL
;
1317 struct iwl_ht_agg
*agg
;
1321 unsigned long flags
;
1323 /* "flow" corresponds to Tx queue */
1324 u16 scd_flow
= le16_to_cpu(ba_resp
->scd_flow
);
1326 /* "ssn" is start of block-ack Tx window, corresponds to index
1327 * (in Tx queue's circular buffer) of first TFD/frame in window */
1328 u16 ba_resp_scd_ssn
= le16_to_cpu(ba_resp
->scd_ssn
);
1330 if (scd_flow
>= priv
->hw_params
.max_txq_num
) {
1332 "BUG_ON scd_flow is bigger than number of queues\n");
1336 txq
= &priv
->txq
[scd_flow
];
1337 sta_id
= ba_resp
->sta_id
;
1339 agg
= &priv
->stations
[sta_id
].tid
[tid
].agg
;
1340 if (unlikely(agg
->txq_id
!= scd_flow
)) {
1342 * FIXME: this is a uCode bug which need to be addressed,
1343 * log the information and return for now!
1344 * since it is possible happen very often and in order
1345 * not to fill the syslog, don't enable the logging by default
1347 IWL_DEBUG_TX_REPLY(priv
,
1348 "BA scd_flow %d does not match txq_id %d\n",
1349 scd_flow
, agg
->txq_id
);
1353 /* Find index just before block-ack window */
1354 index
= iwl_queue_dec_wrap(ba_resp_scd_ssn
& 0xff, txq
->q
.n_bd
);
1356 spin_lock_irqsave(&priv
->sta_lock
, flags
);
1358 IWL_DEBUG_TX_REPLY(priv
, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1361 (u8
*) &ba_resp
->sta_addr_lo32
,
1363 IWL_DEBUG_TX_REPLY(priv
, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1364 "%d, scd_ssn = %d\n",
1367 (unsigned long long)le64_to_cpu(ba_resp
->bitmap
),
1370 IWL_DEBUG_TX_REPLY(priv
, "DAT start_idx = %d, bitmap = 0x%llx\n",
1372 (unsigned long long)agg
->bitmap
);
1374 /* Update driver's record of ACK vs. not for each frame in window */
1375 iwlagn_tx_status_reply_compressed_ba(priv
, agg
, ba_resp
);
1377 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1378 * block-ack window (we assume that they've been successfully
1379 * transmitted ... if not, it's too late anyway). */
1380 if (txq
->q
.read_ptr
!= (ba_resp_scd_ssn
& 0xff)) {
1381 /* calculate mac80211 ampdu sw queue to wake */
1382 int freed
= iwlagn_tx_queue_reclaim(priv
, scd_flow
, index
);
1383 iwl_free_tfds_in_queue(priv
, sta_id
, tid
, freed
);
1385 if ((iwl_queue_space(&txq
->q
) > txq
->q
.low_mark
) &&
1386 priv
->mac80211_registered
&&
1387 (agg
->state
!= IWL_EMPTYING_HW_QUEUE_DELBA
))
1388 iwl_wake_queue(priv
, txq
->swq_id
);
1390 iwlagn_txq_check_empty(priv
, sta_id
, tid
, scd_flow
);
1393 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);