1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 Intel Deutschland GmbH
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *****************************************************************************/
67 #include <net/mac80211.h>
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
78 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm
*mvm
)
80 return iwl_mvm_has_new_rx_api(mvm
) ?
81 sizeof(struct iwl_mvm_add_sta_cmd
) :
82 sizeof(struct iwl_mvm_add_sta_cmd_v7
);
85 static int iwl_mvm_find_free_sta_id(struct iwl_mvm
*mvm
,
86 enum nl80211_iftype iftype
)
91 BUILD_BUG_ON(IWL_MVM_STATION_COUNT
> 32);
92 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
));
94 lockdep_assert_held(&mvm
->mutex
);
96 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
97 if (iftype
!= NL80211_IFTYPE_STATION
)
98 reserved_ids
= BIT(0);
100 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
101 for (sta_id
= 0; sta_id
< IWL_MVM_STATION_COUNT
; sta_id
++) {
102 if (BIT(sta_id
) & reserved_ids
)
105 if (!rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
106 lockdep_is_held(&mvm
->mutex
)))
109 return IWL_MVM_STATION_COUNT
;
112 /* send station add/update command to firmware */
113 int iwl_mvm_sta_send_to_fw(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
114 bool update
, unsigned int flags
)
116 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
117 struct iwl_mvm_add_sta_cmd add_sta_cmd
= {
118 .sta_id
= mvm_sta
->sta_id
,
119 .mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
),
120 .add_modify
= update
? 1 : 0,
121 .station_flags_msk
= cpu_to_le32(STA_FLG_FAT_EN_MSK
|
122 STA_FLG_MIMO_EN_MSK
),
123 .tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
),
127 u32 agg_size
= 0, mpdu_dens
= 0;
129 if (!update
|| (flags
& STA_MODIFY_QUEUES
)) {
130 add_sta_cmd
.tfd_queue_msk
= cpu_to_le32(mvm_sta
->tfd_queue_msk
);
131 memcpy(&add_sta_cmd
.addr
, sta
->addr
, ETH_ALEN
);
133 if (flags
& STA_MODIFY_QUEUES
)
134 add_sta_cmd
.modify_mask
|= STA_MODIFY_QUEUES
;
137 switch (sta
->bandwidth
) {
138 case IEEE80211_STA_RX_BW_160
:
139 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_160MHZ
);
141 case IEEE80211_STA_RX_BW_80
:
142 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_80MHZ
);
144 case IEEE80211_STA_RX_BW_40
:
145 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_40MHZ
);
147 case IEEE80211_STA_RX_BW_20
:
148 if (sta
->ht_cap
.ht_supported
)
149 add_sta_cmd
.station_flags
|=
150 cpu_to_le32(STA_FLG_FAT_EN_20MHZ
);
154 switch (sta
->rx_nss
) {
156 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
159 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2
);
162 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3
);
166 switch (sta
->smps_mode
) {
167 case IEEE80211_SMPS_AUTOMATIC
:
168 case IEEE80211_SMPS_NUM_MODES
:
171 case IEEE80211_SMPS_STATIC
:
173 add_sta_cmd
.station_flags
&= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK
);
174 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
176 case IEEE80211_SMPS_DYNAMIC
:
177 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_RTS_MIMO_PROT
);
179 case IEEE80211_SMPS_OFF
:
184 if (sta
->ht_cap
.ht_supported
) {
185 add_sta_cmd
.station_flags_msk
|=
186 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK
|
187 STA_FLG_AGG_MPDU_DENS_MSK
);
189 mpdu_dens
= sta
->ht_cap
.ampdu_density
;
192 if (sta
->vht_cap
.vht_supported
) {
193 agg_size
= sta
->vht_cap
.cap
&
194 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK
;
196 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT
;
197 } else if (sta
->ht_cap
.ht_supported
) {
198 agg_size
= sta
->ht_cap
.ampdu_factor
;
201 add_sta_cmd
.station_flags
|=
202 cpu_to_le32(agg_size
<< STA_FLG_MAX_AGG_SIZE_SHIFT
);
203 add_sta_cmd
.station_flags
|=
204 cpu_to_le32(mpdu_dens
<< STA_FLG_AGG_MPDU_DENS_SHIFT
);
206 status
= ADD_STA_SUCCESS
;
207 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
208 iwl_mvm_add_sta_cmd_size(mvm
),
209 &add_sta_cmd
, &status
);
213 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
214 case ADD_STA_SUCCESS
:
215 IWL_DEBUG_ASSOC(mvm
, "ADD_STA PASSED\n");
219 IWL_ERR(mvm
, "ADD_STA failed\n");
226 static void iwl_mvm_rx_agg_session_expired(unsigned long data
)
228 struct iwl_mvm_baid_data __rcu
**rcu_ptr
= (void *)data
;
229 struct iwl_mvm_baid_data
*ba_data
;
230 struct ieee80211_sta
*sta
;
231 struct iwl_mvm_sta
*mvm_sta
;
232 unsigned long timeout
;
236 ba_data
= rcu_dereference(*rcu_ptr
);
238 if (WARN_ON(!ba_data
))
241 if (!ba_data
->timeout
)
244 timeout
= ba_data
->last_rx
+ TU_TO_JIFFIES(ba_data
->timeout
* 2);
245 if (time_is_after_jiffies(timeout
)) {
246 mod_timer(&ba_data
->session_timer
, timeout
);
251 sta
= rcu_dereference(ba_data
->mvm
->fw_id_to_mac_id
[ba_data
->sta_id
]);
252 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
253 ieee80211_stop_rx_ba_session_offl(mvm_sta
->vif
,
254 sta
->addr
, ba_data
->tid
);
259 static int iwl_mvm_tdls_sta_init(struct iwl_mvm
*mvm
,
260 struct ieee80211_sta
*sta
)
262 unsigned long used_hw_queues
;
263 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
264 unsigned int wdg_timeout
=
265 iwl_mvm_get_wd_timeout(mvm
, NULL
, true, false);
268 lockdep_assert_held(&mvm
->mutex
);
270 used_hw_queues
= iwl_mvm_get_used_hw_queues(mvm
, NULL
);
272 /* Find available queues, and allocate them to the ACs */
273 for (ac
= 0; ac
< IEEE80211_NUM_ACS
; ac
++) {
274 u8 queue
= find_first_zero_bit(&used_hw_queues
,
275 mvm
->first_agg_queue
);
277 if (queue
>= mvm
->first_agg_queue
) {
278 IWL_ERR(mvm
, "Failed to allocate STA queue\n");
282 __set_bit(queue
, &used_hw_queues
);
283 mvmsta
->hw_queue
[ac
] = queue
;
286 /* Found a place for all queues - enable them */
287 for (ac
= 0; ac
< IEEE80211_NUM_ACS
; ac
++) {
288 iwl_mvm_enable_ac_txq(mvm
, mvmsta
->hw_queue
[ac
],
289 mvmsta
->hw_queue
[ac
],
290 iwl_mvm_ac_to_tx_fifo
[ac
], 0,
292 mvmsta
->tfd_queue_msk
|= BIT(mvmsta
->hw_queue
[ac
]);
298 static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm
*mvm
,
299 struct ieee80211_sta
*sta
)
301 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
302 unsigned long sta_msk
;
305 lockdep_assert_held(&mvm
->mutex
);
307 /* disable the TDLS STA-specific queues */
308 sta_msk
= mvmsta
->tfd_queue_msk
;
309 for_each_set_bit(i
, &sta_msk
, sizeof(sta_msk
) * BITS_PER_BYTE
)
310 iwl_mvm_disable_txq(mvm
, i
, i
, IWL_MAX_TID_COUNT
, 0);
313 /* Disable aggregations for a bitmap of TIDs for a given station */
314 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm
*mvm
, int queue
,
315 unsigned long disable_agg_tids
,
318 struct iwl_mvm_add_sta_cmd cmd
= {};
319 struct ieee80211_sta
*sta
;
320 struct iwl_mvm_sta
*mvmsta
;
325 spin_lock_bh(&mvm
->queue_info_lock
);
326 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
327 spin_unlock_bh(&mvm
->queue_info_lock
);
331 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
333 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
338 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
340 mvmsta
->tid_disable_agg
|= disable_agg_tids
;
342 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
343 cmd
.sta_id
= mvmsta
->sta_id
;
344 cmd
.add_modify
= STA_MODE_MODIFY
;
345 cmd
.modify_mask
= STA_MODIFY_QUEUES
;
346 if (disable_agg_tids
)
347 cmd
.modify_mask
|= STA_MODIFY_TID_DISABLE_TX
;
349 cmd
.modify_mask
|= STA_MODIFY_QUEUE_REMOVAL
;
350 cmd
.tfd_queue_msk
= cpu_to_le32(mvmsta
->tfd_queue_msk
);
351 cmd
.tid_disable_tx
= cpu_to_le16(mvmsta
->tid_disable_agg
);
355 /* Notify FW of queue removal from the STA queues */
356 status
= ADD_STA_SUCCESS
;
357 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
358 iwl_mvm_add_sta_cmd_size(mvm
),
364 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm
*mvm
, int queue
)
366 struct ieee80211_sta
*sta
;
367 struct iwl_mvm_sta
*mvmsta
;
368 unsigned long tid_bitmap
;
369 unsigned long agg_tids
= 0;
373 lockdep_assert_held(&mvm
->mutex
);
375 spin_lock_bh(&mvm
->queue_info_lock
);
376 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
377 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
378 spin_unlock_bh(&mvm
->queue_info_lock
);
380 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
381 lockdep_is_held(&mvm
->mutex
));
383 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
)))
386 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
388 spin_lock_bh(&mvmsta
->lock
);
389 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
390 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
)
391 agg_tids
|= BIT(tid
);
393 spin_unlock_bh(&mvmsta
->lock
);
399 * Remove a queue from a station's resources.
400 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
401 * doesn't disable the queue
403 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm
*mvm
, int queue
)
405 struct ieee80211_sta
*sta
;
406 struct iwl_mvm_sta
*mvmsta
;
407 unsigned long tid_bitmap
;
408 unsigned long disable_agg_tids
= 0;
412 lockdep_assert_held(&mvm
->mutex
);
414 spin_lock_bh(&mvm
->queue_info_lock
);
415 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
416 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
417 spin_unlock_bh(&mvm
->queue_info_lock
);
421 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
423 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
428 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
430 spin_lock_bh(&mvmsta
->lock
);
431 /* Unmap MAC queues and TIDs from this queue */
432 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
433 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
)
434 disable_agg_tids
|= BIT(tid
);
435 mvmsta
->tid_data
[tid
].txq_id
= IEEE80211_INVAL_HW_QUEUE
;
438 mvmsta
->tfd_queue_msk
&= ~BIT(queue
); /* Don't use this queue anymore */
439 spin_unlock_bh(&mvmsta
->lock
);
443 spin_lock_bh(&mvm
->queue_info_lock
);
444 /* Unmap MAC queues and TIDs from this queue */
445 mvm
->queue_info
[queue
].hw_queue_to_mac80211
= 0;
446 mvm
->queue_info
[queue
].hw_queue_refcount
= 0;
447 mvm
->queue_info
[queue
].tid_bitmap
= 0;
448 spin_unlock_bh(&mvm
->queue_info_lock
);
450 return disable_agg_tids
;
453 static int iwl_mvm_get_shared_queue(struct iwl_mvm
*mvm
,
454 unsigned long tfd_queue_mask
, u8 ac
)
457 u8 ac_to_queue
[IEEE80211_NUM_ACS
];
460 lockdep_assert_held(&mvm
->queue_info_lock
);
462 memset(&ac_to_queue
, IEEE80211_INVAL_HW_QUEUE
, sizeof(ac_to_queue
));
464 /* See what ACs the existing queues for this STA have */
465 for_each_set_bit(i
, &tfd_queue_mask
, IWL_MVM_DQA_MAX_DATA_QUEUE
) {
466 /* Only DATA queues can be shared */
467 if (i
< IWL_MVM_DQA_MIN_DATA_QUEUE
&&
468 i
!= IWL_MVM_DQA_BSS_CLIENT_QUEUE
)
471 ac_to_queue
[mvm
->queue_info
[i
].mac80211_ac
] = i
;
475 * The queue to share is chosen only from DATA queues as follows (in
476 * descending priority):
479 * 3. Highest AC queue that is lower than new AC
480 * 4. Any existing AC (there always is at least 1 DATA queue)
483 /* Priority 1: An AC_BE queue */
484 if (ac_to_queue
[IEEE80211_AC_BE
] != IEEE80211_INVAL_HW_QUEUE
)
485 queue
= ac_to_queue
[IEEE80211_AC_BE
];
486 /* Priority 2: Same AC queue */
487 else if (ac_to_queue
[ac
] != IEEE80211_INVAL_HW_QUEUE
)
488 queue
= ac_to_queue
[ac
];
489 /* Priority 3a: If new AC is VO and VI exists - use VI */
490 else if (ac
== IEEE80211_AC_VO
&&
491 ac_to_queue
[IEEE80211_AC_VI
] != IEEE80211_INVAL_HW_QUEUE
)
492 queue
= ac_to_queue
[IEEE80211_AC_VI
];
493 /* Priority 3b: No BE so only AC less than the new one is BK */
494 else if (ac_to_queue
[IEEE80211_AC_BK
] != IEEE80211_INVAL_HW_QUEUE
)
495 queue
= ac_to_queue
[IEEE80211_AC_BK
];
496 /* Priority 4a: No BE nor BK - use VI if exists */
497 else if (ac_to_queue
[IEEE80211_AC_VI
] != IEEE80211_INVAL_HW_QUEUE
)
498 queue
= ac_to_queue
[IEEE80211_AC_VI
];
499 /* Priority 4b: No BE, BK nor VI - use VO if exists */
500 else if (ac_to_queue
[IEEE80211_AC_VO
] != IEEE80211_INVAL_HW_QUEUE
)
501 queue
= ac_to_queue
[IEEE80211_AC_VO
];
503 /* Make sure queue found (or not) is legal */
504 if (!((queue
>= IWL_MVM_DQA_MIN_MGMT_QUEUE
&&
505 queue
<= IWL_MVM_DQA_MAX_MGMT_QUEUE
) ||
506 (queue
>= IWL_MVM_DQA_MIN_DATA_QUEUE
&&
507 queue
<= IWL_MVM_DQA_MAX_DATA_QUEUE
) ||
508 (queue
== IWL_MVM_DQA_BSS_CLIENT_QUEUE
))) {
509 IWL_ERR(mvm
, "No DATA queues available to share\n");
516 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm
*mvm
,
517 struct ieee80211_sta
*sta
, u8 ac
, int tid
,
518 struct ieee80211_hdr
*hdr
)
520 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
521 struct iwl_trans_txq_scd_cfg cfg
= {
522 .fifo
= iwl_mvm_ac_to_tx_fifo
[ac
],
523 .sta_id
= mvmsta
->sta_id
,
525 .frame_limit
= IWL_FRAME_LIMIT
,
527 unsigned int wdg_timeout
=
528 iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
, false, false);
529 u8 mac_queue
= mvmsta
->vif
->hw_queue
[ac
];
531 bool using_inactive_queue
= false;
532 unsigned long disable_agg_tids
= 0;
533 enum iwl_mvm_agg_state queue_state
;
534 bool shared_queue
= false;
536 unsigned long tfd_queue_mask
;
539 lockdep_assert_held(&mvm
->mutex
);
541 spin_lock_bh(&mvmsta
->lock
);
542 tfd_queue_mask
= mvmsta
->tfd_queue_msk
;
543 spin_unlock_bh(&mvmsta
->lock
);
545 spin_lock_bh(&mvm
->queue_info_lock
);
548 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
551 if (!ieee80211_is_data_qos(hdr
->frame_control
) ||
552 ieee80211_is_qos_nullfunc(hdr
->frame_control
)) {
553 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
554 IWL_MVM_DQA_MIN_MGMT_QUEUE
,
555 IWL_MVM_DQA_MAX_MGMT_QUEUE
);
556 if (queue
>= IWL_MVM_DQA_MIN_MGMT_QUEUE
)
557 IWL_DEBUG_TX_QUEUES(mvm
, "Found free MGMT queue #%d\n",
560 /* If no such queue is found, we'll use a DATA queue instead */
563 if ((queue
< 0 && mvmsta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
) &&
564 (mvm
->queue_info
[mvmsta
->reserved_queue
].status
==
565 IWL_MVM_QUEUE_RESERVED
||
566 mvm
->queue_info
[mvmsta
->reserved_queue
].status
==
567 IWL_MVM_QUEUE_INACTIVE
)) {
568 queue
= mvmsta
->reserved_queue
;
569 mvm
->queue_info
[queue
].reserved
= true;
570 IWL_DEBUG_TX_QUEUES(mvm
, "Using reserved queue #%d\n", queue
);
574 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
575 IWL_MVM_DQA_MIN_DATA_QUEUE
,
576 IWL_MVM_DQA_MAX_DATA_QUEUE
);
579 * Check if this queue is already allocated but inactive.
580 * In such a case, we'll need to first free this queue before enabling
581 * it again, so we'll mark it as reserved to make sure no new traffic
585 mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_INACTIVE
) {
586 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_RESERVED
;
587 using_inactive_queue
= true;
588 IWL_DEBUG_TX_QUEUES(mvm
,
589 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
590 queue
, mvmsta
->sta_id
, tid
);
593 /* No free queue - we'll have to share */
595 queue
= iwl_mvm_get_shared_queue(mvm
, tfd_queue_mask
, ac
);
598 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_SHARED
;
603 * Mark TXQ as ready, even though it hasn't been fully configured yet,
604 * to make sure no one else takes it.
605 * This will allow avoiding re-acquiring the lock at the end of the
606 * configuration. On error we'll mark it back as free.
608 if ((queue
> 0) && !shared_queue
)
609 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
611 spin_unlock_bh(&mvm
->queue_info_lock
);
613 /* This shouldn't happen - out of queues */
614 if (WARN_ON(queue
<= 0)) {
615 IWL_ERR(mvm
, "No available queues for tid %d on sta_id %d\n",
621 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
622 * but for configuring the SCD to send A-MPDUs we need to mark the queue
624 * Mark all DATA queues as allowing to be aggregated at some point
626 cfg
.aggregate
= (queue
>= IWL_MVM_DQA_MIN_DATA_QUEUE
||
627 queue
== IWL_MVM_DQA_BSS_CLIENT_QUEUE
);
630 * If this queue was previously inactive (idle) - we need to free it
633 if (using_inactive_queue
) {
634 struct iwl_scd_txq_cfg_cmd cmd
= {
640 disable_agg_tids
= iwl_mvm_remove_sta_queue_marking(mvm
, queue
);
642 spin_lock_bh(&mvm
->queue_info_lock
);
643 ac
= mvm
->queue_info
[queue
].mac80211_ac
;
644 cmd
.sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
645 cmd
.tx_fifo
= iwl_mvm_ac_to_tx_fifo
[ac
];
646 spin_unlock_bh(&mvm
->queue_info_lock
);
648 /* Disable the queue */
649 iwl_mvm_invalidate_sta_queue(mvm
, queue
, disable_agg_tids
,
651 iwl_trans_txq_disable(mvm
->trans
, queue
, false);
652 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
),
656 "Failed to free inactive queue %d (ret=%d)\n",
659 /* Re-mark the inactive queue as inactive */
660 spin_lock_bh(&mvm
->queue_info_lock
);
661 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_INACTIVE
;
662 spin_unlock_bh(&mvm
->queue_info_lock
);
668 IWL_DEBUG_TX_QUEUES(mvm
,
669 "Allocating %squeue #%d to sta %d on tid %d\n",
670 shared_queue
? "shared " : "", queue
,
671 mvmsta
->sta_id
, tid
);
674 /* Disable any open aggs on this queue */
675 disable_agg_tids
= iwl_mvm_get_queue_agg_tids(mvm
, queue
);
677 if (disable_agg_tids
) {
678 IWL_DEBUG_TX_QUEUES(mvm
, "Disabling aggs on queue %d\n",
680 iwl_mvm_invalidate_sta_queue(mvm
, queue
,
681 disable_agg_tids
, false);
684 /* Mark queue as shared in transport */
685 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, true);
687 /* TODO: a redirection may be required - DQA phase 2 */
690 ssn
= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr
->seq_ctrl
));
691 iwl_mvm_enable_txq(mvm
, queue
, mac_queue
, ssn
, &cfg
,
694 spin_lock_bh(&mvmsta
->lock
);
695 mvmsta
->tid_data
[tid
].txq_id
= queue
;
696 mvmsta
->tid_data
[tid
].is_tid_active
= true;
697 mvmsta
->tfd_queue_msk
|= BIT(queue
);
698 queue_state
= mvmsta
->tid_data
[tid
].state
;
700 if (mvmsta
->reserved_queue
== queue
)
701 mvmsta
->reserved_queue
= IEEE80211_INVAL_HW_QUEUE
;
702 spin_unlock_bh(&mvmsta
->lock
);
705 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, true, STA_MODIFY_QUEUES
);
709 /* If we need to re-enable aggregations... */
710 if (queue_state
== IWL_AGG_ON
) {
711 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
720 iwl_mvm_disable_txq(mvm
, queue
, mac_queue
, tid
, 0);
725 static inline u8
iwl_mvm_tid_to_ac_queue(int tid
)
727 if (tid
== IWL_MAX_TID_COUNT
)
728 return IEEE80211_AC_VO
; /* MGMT */
730 return tid_to_mac80211_ac
[tid
];
733 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm
*mvm
,
734 struct ieee80211_sta
*sta
, int tid
)
736 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
737 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
739 struct ieee80211_hdr
*hdr
;
740 struct sk_buff_head deferred_tx
;
742 bool no_queue
= false; /* Marks if there is a problem with the queue */
745 lockdep_assert_held(&mvm
->mutex
);
747 skb
= skb_peek(&tid_data
->deferred_tx_frames
);
750 hdr
= (void *)skb
->data
;
752 ac
= iwl_mvm_tid_to_ac_queue(tid
);
753 mac_queue
= IEEE80211_SKB_CB(skb
)->hw_queue
;
755 if (tid_data
->txq_id
== IEEE80211_INVAL_HW_QUEUE
&&
756 iwl_mvm_sta_alloc_queue(mvm
, sta
, ac
, tid
, hdr
)) {
758 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
759 mvmsta
->sta_id
, tid
);
762 * Mark queue as problematic so later the deferred traffic is
763 * freed, as we can do nothing with it
768 __skb_queue_head_init(&deferred_tx
);
770 /* Disable bottom-halves when entering TX path */
772 spin_lock(&mvmsta
->lock
);
773 skb_queue_splice_init(&tid_data
->deferred_tx_frames
, &deferred_tx
);
774 spin_unlock(&mvmsta
->lock
);
776 while ((skb
= __skb_dequeue(&deferred_tx
)))
777 if (no_queue
|| iwl_mvm_tx_skb(mvm
, skb
, sta
))
778 ieee80211_free_txskb(mvm
->hw
, skb
);
782 iwl_mvm_start_mac_queues(mvm
, BIT(mac_queue
));
785 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct
*wk
)
787 struct iwl_mvm
*mvm
= container_of(wk
, struct iwl_mvm
,
789 struct ieee80211_sta
*sta
;
790 struct iwl_mvm_sta
*mvmsta
;
791 unsigned long deferred_tid_traffic
;
794 /* Check inactivity of queues */
795 iwl_mvm_inactivity_check(mvm
);
797 mutex_lock(&mvm
->mutex
);
799 /* Go over all stations with deferred traffic */
800 for_each_set_bit(sta_id
, mvm
->sta_deferred_frames
,
801 IWL_MVM_STATION_COUNT
) {
802 clear_bit(sta_id
, mvm
->sta_deferred_frames
);
803 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
804 lockdep_is_held(&mvm
->mutex
));
805 if (IS_ERR_OR_NULL(sta
))
808 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
809 deferred_tid_traffic
= mvmsta
->deferred_traffic_tid_map
;
811 for_each_set_bit(tid
, &deferred_tid_traffic
,
812 IWL_MAX_TID_COUNT
+ 1)
813 iwl_mvm_tx_deferred_stream(mvm
, sta
, tid
);
816 mutex_unlock(&mvm
->mutex
);
819 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm
*mvm
,
820 struct ieee80211_sta
*sta
,
821 enum nl80211_iftype vif_type
)
823 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
827 * Check for inactive queues, so we don't reach a situation where we
828 * can't add a STA due to a shortage in queues that doesn't really exist
830 iwl_mvm_inactivity_check(mvm
);
832 spin_lock_bh(&mvm
->queue_info_lock
);
834 /* Make sure we have free resources for this STA */
835 if (vif_type
== NL80211_IFTYPE_STATION
&& !sta
->tdls
&&
836 !mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].hw_queue_refcount
&&
837 (mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].status
==
839 queue
= IWL_MVM_DQA_BSS_CLIENT_QUEUE
;
841 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
842 IWL_MVM_DQA_MIN_DATA_QUEUE
,
843 IWL_MVM_DQA_MAX_DATA_QUEUE
);
845 spin_unlock_bh(&mvm
->queue_info_lock
);
846 IWL_ERR(mvm
, "No available queues for new station\n");
849 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_RESERVED
;
851 spin_unlock_bh(&mvm
->queue_info_lock
);
853 mvmsta
->reserved_queue
= queue
;
855 IWL_DEBUG_TX_QUEUES(mvm
, "Reserving data queue #%d for sta_id %d\n",
856 queue
, mvmsta
->sta_id
);
861 int iwl_mvm_add_sta(struct iwl_mvm
*mvm
,
862 struct ieee80211_vif
*vif
,
863 struct ieee80211_sta
*sta
)
865 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
866 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
867 struct iwl_mvm_rxq_dup_data
*dup_data
;
870 lockdep_assert_held(&mvm
->mutex
);
872 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
))
873 sta_id
= iwl_mvm_find_free_sta_id(mvm
,
874 ieee80211_vif_type_p2p(vif
));
876 sta_id
= mvm_sta
->sta_id
;
878 if (sta_id
== IWL_MVM_STATION_COUNT
)
881 spin_lock_init(&mvm_sta
->lock
);
883 mvm_sta
->sta_id
= sta_id
;
884 mvm_sta
->mac_id_n_color
= FW_CMD_ID_AND_COLOR(mvmvif
->id
,
887 mvm_sta
->max_agg_bufsize
= LINK_QUAL_AGG_FRAME_LIMIT_DEF
;
888 mvm_sta
->tx_protection
= 0;
889 mvm_sta
->tt_tx_protection
= false;
891 /* HW restart, don't assume the memory has been zeroed */
892 atomic_set(&mvm
->pending_frames
[sta_id
], 0);
893 mvm_sta
->tid_disable_agg
= 0xffff; /* No aggs at first */
894 mvm_sta
->tfd_queue_msk
= 0;
897 * Allocate new queues for a TDLS station, unless we're in DQA mode,
898 * and then they'll be allocated dynamically
900 if (!iwl_mvm_is_dqa_supported(mvm
) && sta
->tdls
) {
901 ret
= iwl_mvm_tdls_sta_init(mvm
, sta
);
904 } else if (!iwl_mvm_is_dqa_supported(mvm
)) {
905 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++)
906 if (vif
->hw_queue
[i
] != IEEE80211_INVAL_HW_QUEUE
)
907 mvm_sta
->tfd_queue_msk
|= BIT(vif
->hw_queue
[i
]);
910 /* for HW restart - reset everything but the sequence number */
911 for (i
= 0; i
<= IWL_MAX_TID_COUNT
; i
++) {
912 u16 seq
= mvm_sta
->tid_data
[i
].seq_number
;
913 memset(&mvm_sta
->tid_data
[i
], 0, sizeof(mvm_sta
->tid_data
[i
]));
914 mvm_sta
->tid_data
[i
].seq_number
= seq
;
916 if (!iwl_mvm_is_dqa_supported(mvm
))
920 * Mark all queues for this STA as unallocated and defer TX
921 * frames until the queue is allocated
923 mvm_sta
->tid_data
[i
].txq_id
= IEEE80211_INVAL_HW_QUEUE
;
924 skb_queue_head_init(&mvm_sta
->tid_data
[i
].deferred_tx_frames
);
926 mvm_sta
->deferred_traffic_tid_map
= 0;
927 mvm_sta
->agg_tids
= 0;
929 if (iwl_mvm_has_new_rx_api(mvm
) &&
930 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
931 dup_data
= kcalloc(mvm
->trans
->num_rx_queues
,
936 mvm_sta
->dup_data
= dup_data
;
939 if (iwl_mvm_is_dqa_supported(mvm
)) {
940 ret
= iwl_mvm_reserve_sta_stream(mvm
, sta
,
941 ieee80211_vif_type_p2p(vif
));
946 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, false, 0);
950 if (vif
->type
== NL80211_IFTYPE_STATION
) {
952 WARN_ON(mvmvif
->ap_sta_id
!= IWL_MVM_STATION_COUNT
);
953 mvmvif
->ap_sta_id
= sta_id
;
955 WARN_ON(mvmvif
->ap_sta_id
== IWL_MVM_STATION_COUNT
);
959 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[sta_id
], sta
);
964 if (!iwl_mvm_is_dqa_supported(mvm
) && sta
->tdls
)
965 iwl_mvm_tdls_sta_deinit(mvm
, sta
);
969 int iwl_mvm_update_sta(struct iwl_mvm
*mvm
,
970 struct ieee80211_vif
*vif
,
971 struct ieee80211_sta
*sta
)
973 return iwl_mvm_sta_send_to_fw(mvm
, sta
, true, 0);
976 int iwl_mvm_drain_sta(struct iwl_mvm
*mvm
, struct iwl_mvm_sta
*mvmsta
,
979 struct iwl_mvm_add_sta_cmd cmd
= {};
983 lockdep_assert_held(&mvm
->mutex
);
985 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
986 cmd
.sta_id
= mvmsta
->sta_id
;
987 cmd
.add_modify
= STA_MODE_MODIFY
;
988 cmd
.station_flags
= drain
? cpu_to_le32(STA_FLG_DRAIN_FLOW
) : 0;
989 cmd
.station_flags_msk
= cpu_to_le32(STA_FLG_DRAIN_FLOW
);
991 status
= ADD_STA_SUCCESS
;
992 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
993 iwl_mvm_add_sta_cmd_size(mvm
),
998 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
999 case ADD_STA_SUCCESS
:
1000 IWL_DEBUG_INFO(mvm
, "Frames for staid %d will drained in fw\n",
1005 IWL_ERR(mvm
, "Couldn't drain frames for staid %d\n",
1014 * Remove a station from the FW table. Before sending the command to remove
1015 * the station validate that the station is indeed known to the driver (sanity
1018 static int iwl_mvm_rm_sta_common(struct iwl_mvm
*mvm
, u8 sta_id
)
1020 struct ieee80211_sta
*sta
;
1021 struct iwl_mvm_rm_sta_cmd rm_sta_cmd
= {
1026 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
1027 lockdep_is_held(&mvm
->mutex
));
1029 /* Note: internal stations are marked as error values */
1031 IWL_ERR(mvm
, "Invalid station id\n");
1035 ret
= iwl_mvm_send_cmd_pdu(mvm
, REMOVE_STA
, 0,
1036 sizeof(rm_sta_cmd
), &rm_sta_cmd
);
1038 IWL_ERR(mvm
, "Failed to remove station. Id=%d\n", sta_id
);
1045 void iwl_mvm_sta_drained_wk(struct work_struct
*wk
)
1047 struct iwl_mvm
*mvm
= container_of(wk
, struct iwl_mvm
, sta_drained_wk
);
1051 * The mutex is needed because of the SYNC cmd, but not only: if the
1052 * work would run concurrently with iwl_mvm_rm_sta, it would run before
1053 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
1054 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
1057 mutex_lock(&mvm
->mutex
);
1059 for_each_set_bit(sta_id
, mvm
->sta_drained
, IWL_MVM_STATION_COUNT
) {
1061 struct ieee80211_sta
*sta
=
1062 rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
1063 lockdep_is_held(&mvm
->mutex
));
1066 * This station is in use or RCU-removed; the latter happens in
1067 * managed mode, where mac80211 removes the station before we
1068 * can remove it from firmware (we can only do that after the
1069 * MAC is marked unassociated), and possibly while the deauth
1070 * frame to disconnect from the AP is still queued. Then, the
1071 * station pointer is -ENOENT when the last skb is reclaimed.
1073 if (!IS_ERR(sta
) || PTR_ERR(sta
) == -ENOENT
)
1076 if (PTR_ERR(sta
) == -EINVAL
) {
1077 IWL_ERR(mvm
, "Drained sta %d, but it is internal?\n",
1083 IWL_ERR(mvm
, "Drained sta %d, but it was NULL?\n",
1088 WARN_ON(PTR_ERR(sta
) != -EBUSY
);
1089 /* This station was removed and we waited until it got drained,
1090 * we can now proceed and remove it.
1092 ret
= iwl_mvm_rm_sta_common(mvm
, sta_id
);
1095 "Couldn't remove sta %d after it was drained\n",
1099 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta_id
], NULL
);
1100 clear_bit(sta_id
, mvm
->sta_drained
);
1102 if (mvm
->tfd_drained
[sta_id
]) {
1103 unsigned long i
, msk
= mvm
->tfd_drained
[sta_id
];
1105 for_each_set_bit(i
, &msk
, sizeof(msk
) * BITS_PER_BYTE
)
1106 iwl_mvm_disable_txq(mvm
, i
, i
,
1107 IWL_MAX_TID_COUNT
, 0);
1109 mvm
->tfd_drained
[sta_id
] = 0;
1110 IWL_DEBUG_TDLS(mvm
, "Drained sta %d, with queues %ld\n",
1115 mutex_unlock(&mvm
->mutex
);
1118 static void iwl_mvm_disable_sta_queues(struct iwl_mvm
*mvm
,
1119 struct ieee80211_vif
*vif
,
1120 struct iwl_mvm_sta
*mvm_sta
)
1125 lockdep_assert_held(&mvm
->mutex
);
1127 for (i
= 0; i
< ARRAY_SIZE(mvm_sta
->tid_data
); i
++) {
1128 if (mvm_sta
->tid_data
[i
].txq_id
== IEEE80211_INVAL_HW_QUEUE
)
1131 ac
= iwl_mvm_tid_to_ac_queue(i
);
1132 iwl_mvm_disable_txq(mvm
, mvm_sta
->tid_data
[i
].txq_id
,
1133 vif
->hw_queue
[ac
], i
, 0);
1134 mvm_sta
->tid_data
[i
].txq_id
= IEEE80211_INVAL_HW_QUEUE
;
1138 int iwl_mvm_rm_sta(struct iwl_mvm
*mvm
,
1139 struct ieee80211_vif
*vif
,
1140 struct ieee80211_sta
*sta
)
1142 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1143 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1146 lockdep_assert_held(&mvm
->mutex
);
1148 if (iwl_mvm_has_new_rx_api(mvm
))
1149 kfree(mvm_sta
->dup_data
);
1151 if ((vif
->type
== NL80211_IFTYPE_STATION
&&
1152 mvmvif
->ap_sta_id
== mvm_sta
->sta_id
) ||
1153 iwl_mvm_is_dqa_supported(mvm
)){
1154 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, true);
1157 /* flush its queues here since we are freeing mvm_sta */
1158 ret
= iwl_mvm_flush_tx_path(mvm
, mvm_sta
->tfd_queue_msk
, 0);
1161 ret
= iwl_trans_wait_tx_queue_empty(mvm
->trans
,
1162 mvm_sta
->tfd_queue_msk
);
1165 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, false);
1167 /* If DQA is supported - the queues can be disabled now */
1168 if (iwl_mvm_is_dqa_supported(mvm
))
1169 iwl_mvm_disable_sta_queues(mvm
, vif
, mvm_sta
);
1171 if (vif
->type
== NL80211_IFTYPE_STATION
&&
1172 mvmvif
->ap_sta_id
== mvm_sta
->sta_id
) {
1173 /* if associated - we can't remove the AP STA now */
1174 if (vif
->bss_conf
.assoc
)
1177 /* unassoc - go ahead - remove the AP STA now */
1178 mvmvif
->ap_sta_id
= IWL_MVM_STATION_COUNT
;
1180 /* clear d0i3_ap_sta_id if no longer relevant */
1181 if (mvm
->d0i3_ap_sta_id
== mvm_sta
->sta_id
)
1182 mvm
->d0i3_ap_sta_id
= IWL_MVM_STATION_COUNT
;
1187 * This shouldn't happen - the TDLS channel switch should be canceled
1188 * before the STA is removed.
1190 if (WARN_ON_ONCE(mvm
->tdls_cs
.peer
.sta_id
== mvm_sta
->sta_id
)) {
1191 mvm
->tdls_cs
.peer
.sta_id
= IWL_MVM_STATION_COUNT
;
1192 cancel_delayed_work(&mvm
->tdls_cs
.dwork
);
1196 * Make sure that the tx response code sees the station as -EBUSY and
1197 * calls the drain worker.
1199 spin_lock_bh(&mvm_sta
->lock
);
1201 * There are frames pending on the AC queues for this station.
1202 * We need to wait until all the frames are drained...
1204 if (atomic_read(&mvm
->pending_frames
[mvm_sta
->sta_id
])) {
1205 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[mvm_sta
->sta_id
],
1207 spin_unlock_bh(&mvm_sta
->lock
);
1209 /* disable TDLS sta queues on drain complete */
1211 mvm
->tfd_drained
[mvm_sta
->sta_id
] =
1212 mvm_sta
->tfd_queue_msk
;
1213 IWL_DEBUG_TDLS(mvm
, "Draining TDLS sta %d\n",
1217 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, true);
1219 spin_unlock_bh(&mvm_sta
->lock
);
1221 if (!iwl_mvm_is_dqa_supported(mvm
) && sta
->tdls
)
1222 iwl_mvm_tdls_sta_deinit(mvm
, sta
);
1224 ret
= iwl_mvm_rm_sta_common(mvm
, mvm_sta
->sta_id
);
1225 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[mvm_sta
->sta_id
], NULL
);
1231 int iwl_mvm_rm_sta_id(struct iwl_mvm
*mvm
,
1232 struct ieee80211_vif
*vif
,
1235 int ret
= iwl_mvm_rm_sta_common(mvm
, sta_id
);
1237 lockdep_assert_held(&mvm
->mutex
);
1239 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta_id
], NULL
);
1243 int iwl_mvm_allocate_int_sta(struct iwl_mvm
*mvm
,
1244 struct iwl_mvm_int_sta
*sta
,
1245 u32 qmask
, enum nl80211_iftype iftype
)
1247 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1248 sta
->sta_id
= iwl_mvm_find_free_sta_id(mvm
, iftype
);
1249 if (WARN_ON_ONCE(sta
->sta_id
== IWL_MVM_STATION_COUNT
))
1253 sta
->tfd_queue_msk
= qmask
;
1255 /* put a non-NULL value so iterating over the stations won't stop */
1256 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[sta
->sta_id
], ERR_PTR(-EINVAL
));
1260 static void iwl_mvm_dealloc_int_sta(struct iwl_mvm
*mvm
,
1261 struct iwl_mvm_int_sta
*sta
)
1263 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta
->sta_id
], NULL
);
1264 memset(sta
, 0, sizeof(struct iwl_mvm_int_sta
));
1265 sta
->sta_id
= IWL_MVM_STATION_COUNT
;
1268 static int iwl_mvm_add_int_sta_common(struct iwl_mvm
*mvm
,
1269 struct iwl_mvm_int_sta
*sta
,
1271 u16 mac_id
, u16 color
)
1273 struct iwl_mvm_add_sta_cmd cmd
;
1277 lockdep_assert_held(&mvm
->mutex
);
1279 memset(&cmd
, 0, sizeof(cmd
));
1280 cmd
.sta_id
= sta
->sta_id
;
1281 cmd
.mac_id_n_color
= cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id
,
1284 cmd
.tfd_queue_msk
= cpu_to_le32(sta
->tfd_queue_msk
);
1285 cmd
.tid_disable_tx
= cpu_to_le16(0xffff);
1288 memcpy(cmd
.addr
, addr
, ETH_ALEN
);
1290 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1291 iwl_mvm_add_sta_cmd_size(mvm
),
1296 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1297 case ADD_STA_SUCCESS
:
1298 IWL_DEBUG_INFO(mvm
, "Internal station added.\n");
1302 IWL_ERR(mvm
, "Add internal station failed, status=0x%x\n",
1309 int iwl_mvm_add_aux_sta(struct iwl_mvm
*mvm
)
1311 unsigned int wdg_timeout
= iwlmvm_mod_params
.tfd_q_hang_detect
?
1312 mvm
->cfg
->base_params
->wd_timeout
:
1313 IWL_WATCHDOG_DISABLED
;
1316 lockdep_assert_held(&mvm
->mutex
);
1318 /* Map Aux queue to fifo - needs to happen before adding Aux station */
1319 if (!iwl_mvm_is_dqa_supported(mvm
))
1320 iwl_mvm_enable_ac_txq(mvm
, mvm
->aux_queue
, mvm
->aux_queue
,
1321 IWL_MVM_TX_FIFO_MCAST
, 0, wdg_timeout
);
1323 /* Allocate aux station and assign to it the aux queue */
1324 ret
= iwl_mvm_allocate_int_sta(mvm
, &mvm
->aux_sta
, BIT(mvm
->aux_queue
),
1325 NL80211_IFTYPE_UNSPECIFIED
);
1329 if (iwl_mvm_is_dqa_supported(mvm
)) {
1330 struct iwl_trans_txq_scd_cfg cfg
= {
1331 .fifo
= IWL_MVM_TX_FIFO_MCAST
,
1332 .sta_id
= mvm
->aux_sta
.sta_id
,
1333 .tid
= IWL_MAX_TID_COUNT
,
1335 .frame_limit
= IWL_FRAME_LIMIT
,
1338 iwl_mvm_enable_txq(mvm
, mvm
->aux_queue
, mvm
->aux_queue
, 0, &cfg
,
1342 ret
= iwl_mvm_add_int_sta_common(mvm
, &mvm
->aux_sta
, NULL
,
1346 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
1350 int iwl_mvm_add_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1352 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1354 lockdep_assert_held(&mvm
->mutex
);
1355 return iwl_mvm_add_int_sta_common(mvm
, &mvm
->snif_sta
, vif
->addr
,
1359 int iwl_mvm_rm_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1363 lockdep_assert_held(&mvm
->mutex
);
1365 ret
= iwl_mvm_rm_sta_common(mvm
, mvm
->snif_sta
.sta_id
);
1367 IWL_WARN(mvm
, "Failed sending remove station\n");
1372 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm
*mvm
)
1374 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->snif_sta
);
1377 void iwl_mvm_del_aux_sta(struct iwl_mvm
*mvm
)
1379 lockdep_assert_held(&mvm
->mutex
);
1381 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
1385 * Send the add station command for the vif's broadcast station.
1386 * Assumes that the station was already allocated.
1388 * @mvm: the mvm component
1389 * @vif: the interface to which the broadcast station is added
1390 * @bsta: the broadcast station to add.
1392 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1394 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1395 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->bcast_sta
;
1396 static const u8 _baddr
[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1397 const u8
*baddr
= _baddr
;
1399 lockdep_assert_held(&mvm
->mutex
);
1401 if (iwl_mvm_is_dqa_supported(mvm
)) {
1402 struct iwl_trans_txq_scd_cfg cfg
= {
1403 .fifo
= IWL_MVM_TX_FIFO_VO
,
1404 .sta_id
= mvmvif
->bcast_sta
.sta_id
,
1405 .tid
= IWL_MAX_TID_COUNT
,
1407 .frame_limit
= IWL_FRAME_LIMIT
,
1409 unsigned int wdg_timeout
=
1410 iwl_mvm_get_wd_timeout(mvm
, vif
, false, false);
1413 if ((vif
->type
== NL80211_IFTYPE_AP
) &&
1414 (mvmvif
->bcast_sta
.tfd_queue_msk
&
1415 BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE
)))
1416 queue
= IWL_MVM_DQA_AP_PROBE_RESP_QUEUE
;
1417 else if ((vif
->type
== NL80211_IFTYPE_P2P_DEVICE
) &&
1418 (mvmvif
->bcast_sta
.tfd_queue_msk
&
1419 BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE
)))
1420 queue
= IWL_MVM_DQA_P2P_DEVICE_QUEUE
;
1421 else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
1424 iwl_mvm_enable_txq(mvm
, queue
, vif
->hw_queue
[0], 0, &cfg
,
1428 if (vif
->type
== NL80211_IFTYPE_ADHOC
)
1429 baddr
= vif
->bss_conf
.bssid
;
1431 if (WARN_ON_ONCE(bsta
->sta_id
== IWL_MVM_STATION_COUNT
))
1434 return iwl_mvm_add_int_sta_common(mvm
, bsta
, baddr
,
1435 mvmvif
->id
, mvmvif
->color
);
1438 /* Send the FW a request to remove the station from it's internal data
1439 * structures, but DO NOT remove the entry from the local data structures. */
1440 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1442 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1445 lockdep_assert_held(&mvm
->mutex
);
1447 ret
= iwl_mvm_rm_sta_common(mvm
, mvmvif
->bcast_sta
.sta_id
);
1449 IWL_WARN(mvm
, "Failed sending remove station\n");
1453 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1455 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1458 lockdep_assert_held(&mvm
->mutex
);
1460 if (!iwl_mvm_is_dqa_supported(mvm
))
1461 qmask
= iwl_mvm_mac_get_queues_mask(vif
);
1463 if (vif
->type
== NL80211_IFTYPE_AP
) {
1465 * The firmware defines the TFD queue mask to only be relevant
1466 * for *unicast* queues, so the multicast (CAB) queue shouldn't
1469 qmask
&= ~BIT(vif
->cab_queue
);
1471 if (iwl_mvm_is_dqa_supported(mvm
))
1472 qmask
|= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE
);
1473 } else if (iwl_mvm_is_dqa_supported(mvm
) &&
1474 vif
->type
== NL80211_IFTYPE_P2P_DEVICE
) {
1475 qmask
|= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE
);
1478 return iwl_mvm_allocate_int_sta(mvm
, &mvmvif
->bcast_sta
, qmask
,
1479 ieee80211_vif_type_p2p(vif
));
1482 /* Allocate a new station entry for the broadcast station to the given vif,
1483 * and send it to the FW.
1484 * Note that each P2P mac should have its own broadcast station.
1486 * @mvm: the mvm component
1487 * @vif: the interface to which the broadcast station is added
1488 * @bsta: the broadcast station to add. */
1489 int iwl_mvm_add_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1491 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1492 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->bcast_sta
;
1495 lockdep_assert_held(&mvm
->mutex
);
1497 ret
= iwl_mvm_alloc_bcast_sta(mvm
, vif
);
1501 ret
= iwl_mvm_send_add_bcast_sta(mvm
, vif
);
1504 iwl_mvm_dealloc_int_sta(mvm
, bsta
);
1509 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1511 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1513 iwl_mvm_dealloc_int_sta(mvm
, &mvmvif
->bcast_sta
);
1517 * Send the FW a request to remove the station from it's internal data
1518 * structures, and in addition remove it from the local data structure.
1520 int iwl_mvm_rm_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1524 lockdep_assert_held(&mvm
->mutex
);
1526 ret
= iwl_mvm_send_rm_bcast_sta(mvm
, vif
);
1528 iwl_mvm_dealloc_bcast_sta(mvm
, vif
);
1533 #define IWL_MAX_RX_BA_SESSIONS 16
1535 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm
*mvm
, u8 baid
)
1537 struct iwl_mvm_delba_notif notif
= {
1538 .metadata
.type
= IWL_MVM_RXQ_NOTIF_DEL_BA
,
1542 iwl_mvm_sync_rx_queues_internal(mvm
, (void *)¬if
, sizeof(notif
));
1545 static void iwl_mvm_free_reorder(struct iwl_mvm
*mvm
,
1546 struct iwl_mvm_baid_data
*data
)
1550 iwl_mvm_sync_rxq_del_ba(mvm
, data
->baid
);
1552 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
1554 struct iwl_mvm_reorder_buffer
*reorder_buf
=
1555 &data
->reorder_buf
[i
];
1557 spin_lock_bh(&reorder_buf
->lock
);
1558 if (likely(!reorder_buf
->num_stored
)) {
1559 spin_unlock_bh(&reorder_buf
->lock
);
1564 * This shouldn't happen in regular DELBA since the internal
1565 * delBA notification should trigger a release of all frames in
1566 * the reorder buffer.
1570 for (j
= 0; j
< reorder_buf
->buf_size
; j
++)
1571 __skb_queue_purge(&reorder_buf
->entries
[j
]);
1573 * Prevent timer re-arm. This prevents a very far fetched case
1574 * where we timed out on the notification. There may be prior
1575 * RX frames pending in the RX queue before the notification
1576 * that might get processed between now and the actual deletion
1577 * and we would re-arm the timer although we are deleting the
1580 reorder_buf
->removed
= true;
1581 spin_unlock_bh(&reorder_buf
->lock
);
1582 del_timer_sync(&reorder_buf
->reorder_timer
);
1586 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm
*mvm
,
1588 struct iwl_mvm_baid_data
*data
,
1589 u16 ssn
, u8 buf_size
)
1593 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
1594 struct iwl_mvm_reorder_buffer
*reorder_buf
=
1595 &data
->reorder_buf
[i
];
1598 reorder_buf
->num_stored
= 0;
1599 reorder_buf
->head_sn
= ssn
;
1600 reorder_buf
->buf_size
= buf_size
;
1601 /* rx reorder timer */
1602 reorder_buf
->reorder_timer
.function
=
1603 iwl_mvm_reorder_timer_expired
;
1604 reorder_buf
->reorder_timer
.data
= (unsigned long)reorder_buf
;
1605 init_timer(&reorder_buf
->reorder_timer
);
1606 spin_lock_init(&reorder_buf
->lock
);
1607 reorder_buf
->mvm
= mvm
;
1608 reorder_buf
->queue
= i
;
1609 reorder_buf
->sta_id
= sta_id
;
1610 for (j
= 0; j
< reorder_buf
->buf_size
; j
++)
1611 __skb_queue_head_init(&reorder_buf
->entries
[j
]);
1615 int iwl_mvm_sta_rx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
1616 int tid
, u16 ssn
, bool start
, u8 buf_size
, u16 timeout
)
1618 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1619 struct iwl_mvm_add_sta_cmd cmd
= {};
1620 struct iwl_mvm_baid_data
*baid_data
= NULL
;
1624 lockdep_assert_held(&mvm
->mutex
);
1626 if (start
&& mvm
->rx_ba_sessions
>= IWL_MAX_RX_BA_SESSIONS
) {
1627 IWL_WARN(mvm
, "Not enough RX BA SESSIONS\n");
1631 if (iwl_mvm_has_new_rx_api(mvm
) && start
) {
1633 * Allocate here so if allocation fails we can bail out early
1634 * before starting the BA session in the firmware
1636 baid_data
= kzalloc(sizeof(*baid_data
) +
1637 mvm
->trans
->num_rx_queues
*
1638 sizeof(baid_data
->reorder_buf
[0]),
1644 cmd
.mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
);
1645 cmd
.sta_id
= mvm_sta
->sta_id
;
1646 cmd
.add_modify
= STA_MODE_MODIFY
;
1648 cmd
.add_immediate_ba_tid
= (u8
) tid
;
1649 cmd
.add_immediate_ba_ssn
= cpu_to_le16(ssn
);
1650 cmd
.rx_ba_window
= cpu_to_le16((u16
)buf_size
);
1652 cmd
.remove_immediate_ba_tid
= (u8
) tid
;
1654 cmd
.modify_mask
= start
? STA_MODIFY_ADD_BA_TID
:
1655 STA_MODIFY_REMOVE_BA_TID
;
1657 status
= ADD_STA_SUCCESS
;
1658 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1659 iwl_mvm_add_sta_cmd_size(mvm
),
1664 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1665 case ADD_STA_SUCCESS
:
1666 IWL_DEBUG_INFO(mvm
, "RX BA Session %sed in fw\n",
1667 start
? "start" : "stopp");
1669 case ADD_STA_IMMEDIATE_BA_FAILURE
:
1670 IWL_WARN(mvm
, "RX BA Session refused by fw\n");
1675 IWL_ERR(mvm
, "RX BA Session failed %sing, status 0x%x\n",
1676 start
? "start" : "stopp", status
);
1686 mvm
->rx_ba_sessions
++;
1688 if (!iwl_mvm_has_new_rx_api(mvm
))
1691 if (WARN_ON(!(status
& IWL_ADD_STA_BAID_VALID_MASK
))) {
1695 baid
= (u8
)((status
& IWL_ADD_STA_BAID_MASK
) >>
1696 IWL_ADD_STA_BAID_SHIFT
);
1697 baid_data
->baid
= baid
;
1698 baid_data
->timeout
= timeout
;
1699 baid_data
->last_rx
= jiffies
;
1700 init_timer(&baid_data
->session_timer
);
1701 baid_data
->session_timer
.function
=
1702 iwl_mvm_rx_agg_session_expired
;
1703 baid_data
->session_timer
.data
=
1704 (unsigned long)&mvm
->baid_map
[baid
];
1705 baid_data
->mvm
= mvm
;
1706 baid_data
->tid
= tid
;
1707 baid_data
->sta_id
= mvm_sta
->sta_id
;
1709 mvm_sta
->tid_to_baid
[tid
] = baid
;
1711 mod_timer(&baid_data
->session_timer
,
1712 TU_TO_EXP_TIME(timeout
* 2));
1714 iwl_mvm_init_reorder_buffer(mvm
, mvm_sta
->sta_id
,
1715 baid_data
, ssn
, buf_size
);
1717 * protect the BA data with RCU to cover a case where our
1718 * internal RX sync mechanism will timeout (not that it's
1719 * supposed to happen) and we will free the session data while
1720 * RX is being processed in parallel
1722 WARN_ON(rcu_access_pointer(mvm
->baid_map
[baid
]));
1723 rcu_assign_pointer(mvm
->baid_map
[baid
], baid_data
);
1724 } else if (mvm
->rx_ba_sessions
> 0) {
1725 u8 baid
= mvm_sta
->tid_to_baid
[tid
];
1727 /* check that restart flow didn't zero the counter */
1728 mvm
->rx_ba_sessions
--;
1729 if (!iwl_mvm_has_new_rx_api(mvm
))
1732 if (WARN_ON(baid
== IWL_RX_REORDER_DATA_INVALID_BAID
))
1735 baid_data
= rcu_access_pointer(mvm
->baid_map
[baid
]);
1736 if (WARN_ON(!baid_data
))
1739 /* synchronize all rx queues so we can safely delete */
1740 iwl_mvm_free_reorder(mvm
, baid_data
);
1741 del_timer_sync(&baid_data
->session_timer
);
1742 RCU_INIT_POINTER(mvm
->baid_map
[baid
], NULL
);
1743 kfree_rcu(baid_data
, rcu_head
);
1752 int iwl_mvm_sta_tx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
1753 int tid
, u8 queue
, bool start
)
1755 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1756 struct iwl_mvm_add_sta_cmd cmd
= {};
1760 lockdep_assert_held(&mvm
->mutex
);
1763 mvm_sta
->tfd_queue_msk
|= BIT(queue
);
1764 mvm_sta
->tid_disable_agg
&= ~BIT(tid
);
1766 /* In DQA-mode the queue isn't removed on agg termination */
1767 if (!iwl_mvm_is_dqa_supported(mvm
))
1768 mvm_sta
->tfd_queue_msk
&= ~BIT(queue
);
1769 mvm_sta
->tid_disable_agg
|= BIT(tid
);
1772 cmd
.mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
);
1773 cmd
.sta_id
= mvm_sta
->sta_id
;
1774 cmd
.add_modify
= STA_MODE_MODIFY
;
1775 cmd
.modify_mask
= STA_MODIFY_QUEUES
| STA_MODIFY_TID_DISABLE_TX
;
1776 cmd
.tfd_queue_msk
= cpu_to_le32(mvm_sta
->tfd_queue_msk
);
1777 cmd
.tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
);
1779 status
= ADD_STA_SUCCESS
;
1780 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1781 iwl_mvm_add_sta_cmd_size(mvm
),
1786 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1787 case ADD_STA_SUCCESS
:
1791 IWL_ERR(mvm
, "TX BA Session failed %sing, status 0x%x\n",
1792 start
? "start" : "stopp", status
);
1799 const u8 tid_to_mac80211_ac
[] = {
1808 IEEE80211_AC_VO
, /* We treat MGMT as TID 8, which is set as AC_VO */
1811 static const u8 tid_to_ucode_ac
[] = {
1822 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
1823 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
)
1825 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1826 struct iwl_mvm_tid_data
*tid_data
;
1830 if (WARN_ON_ONCE(tid
>= IWL_MAX_TID_COUNT
))
1833 if (mvmsta
->tid_data
[tid
].state
!= IWL_AGG_OFF
) {
1834 IWL_ERR(mvm
, "Start AGG when state is not IWL_AGG_OFF %d!\n",
1835 mvmsta
->tid_data
[tid
].state
);
1839 lockdep_assert_held(&mvm
->mutex
);
1841 spin_lock_bh(&mvmsta
->lock
);
1843 /* possible race condition - we entered D0i3 while starting agg */
1844 if (test_bit(IWL_MVM_STATUS_IN_D0I3
, &mvm
->status
)) {
1845 spin_unlock_bh(&mvmsta
->lock
);
1846 IWL_ERR(mvm
, "Entered D0i3 while starting Tx agg\n");
1850 spin_lock_bh(&mvm
->queue_info_lock
);
1853 * Note the possible cases:
1854 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
1855 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
1856 * one and mark it as reserved
1857 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
1858 * non-DQA mode, since the TXQ hasn't yet been allocated
1860 txq_id
= mvmsta
->tid_data
[tid
].txq_id
;
1861 if (!iwl_mvm_is_dqa_supported(mvm
) ||
1862 mvm
->queue_info
[txq_id
].status
!= IWL_MVM_QUEUE_READY
) {
1863 txq_id
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
1864 mvm
->first_agg_queue
,
1865 mvm
->last_agg_queue
);
1868 spin_unlock_bh(&mvm
->queue_info_lock
);
1869 IWL_ERR(mvm
, "Failed to allocate agg queue\n");
1873 /* TXQ hasn't yet been enabled, so mark it only as reserved */
1874 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_RESERVED
;
1876 spin_unlock_bh(&mvm
->queue_info_lock
);
1878 IWL_DEBUG_TX_QUEUES(mvm
,
1879 "AGG for tid %d will be on queue #%d\n",
1882 tid_data
= &mvmsta
->tid_data
[tid
];
1883 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
1884 tid_data
->txq_id
= txq_id
;
1885 *ssn
= tid_data
->ssn
;
1887 IWL_DEBUG_TX_QUEUES(mvm
,
1888 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
1889 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->ssn
,
1890 tid_data
->next_reclaimed
);
1892 if (tid_data
->ssn
== tid_data
->next_reclaimed
) {
1893 tid_data
->state
= IWL_AGG_STARTING
;
1894 ieee80211_start_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
1896 tid_data
->state
= IWL_EMPTYING_HW_QUEUE_ADDBA
;
1902 spin_unlock_bh(&mvmsta
->lock
);
1907 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
1908 struct ieee80211_sta
*sta
, u16 tid
, u8 buf_size
,
1911 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1912 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
1913 unsigned int wdg_timeout
=
1914 iwl_mvm_get_wd_timeout(mvm
, vif
, sta
->tdls
, false);
1916 bool alloc_queue
= true;
1919 struct iwl_trans_txq_scd_cfg cfg
= {
1920 .sta_id
= mvmsta
->sta_id
,
1922 .frame_limit
= buf_size
,
1926 BUILD_BUG_ON((sizeof(mvmsta
->agg_tids
) * BITS_PER_BYTE
)
1927 != IWL_MAX_TID_COUNT
);
1929 buf_size
= min_t(int, buf_size
, LINK_QUAL_AGG_FRAME_LIMIT_DEF
);
1931 spin_lock_bh(&mvmsta
->lock
);
1932 ssn
= tid_data
->ssn
;
1933 queue
= tid_data
->txq_id
;
1934 tid_data
->state
= IWL_AGG_ON
;
1935 mvmsta
->agg_tids
|= BIT(tid
);
1936 tid_data
->ssn
= 0xffff;
1937 tid_data
->amsdu_in_ampdu_allowed
= amsdu
;
1938 spin_unlock_bh(&mvmsta
->lock
);
1940 cfg
.fifo
= iwl_mvm_ac_to_tx_fifo
[tid_to_mac80211_ac
[tid
]];
1942 /* In DQA mode, the existing queue might need to be reconfigured */
1943 if (iwl_mvm_is_dqa_supported(mvm
)) {
1944 spin_lock_bh(&mvm
->queue_info_lock
);
1945 /* Maybe there is no need to even alloc a queue... */
1946 if (mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_READY
)
1947 alloc_queue
= false;
1948 spin_unlock_bh(&mvm
->queue_info_lock
);
1951 * Only reconfig the SCD for the queue if the window size has
1952 * changed from current (become smaller)
1954 if (!alloc_queue
&& buf_size
< mvmsta
->max_agg_bufsize
) {
1956 * If reconfiguring an existing queue, it first must be
1959 ret
= iwl_trans_wait_tx_queue_empty(mvm
->trans
,
1963 "Error draining queue before reconfig\n");
1967 ret
= iwl_mvm_reconfig_scd(mvm
, queue
, cfg
.fifo
,
1968 mvmsta
->sta_id
, tid
,
1972 "Error reconfiguring TXQ #%d\n", queue
);
1979 iwl_mvm_enable_txq(mvm
, queue
,
1980 vif
->hw_queue
[tid_to_mac80211_ac
[tid
]], ssn
,
1983 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
1987 /* No need to mark as reserved */
1988 spin_lock_bh(&mvm
->queue_info_lock
);
1989 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
1990 spin_unlock_bh(&mvm
->queue_info_lock
);
1993 * Even though in theory the peer could have different
1994 * aggregation reorder buffer sizes for different sessions,
1995 * our ucode doesn't allow for that and has a global limit
1996 * for each station. Therefore, use the minimum of all the
1997 * aggregation sessions and our default value.
1999 mvmsta
->max_agg_bufsize
=
2000 min(mvmsta
->max_agg_bufsize
, buf_size
);
2001 mvmsta
->lq_sta
.lq
.agg_frame_cnt_limit
= mvmsta
->max_agg_bufsize
;
2003 IWL_DEBUG_HT(mvm
, "Tx aggregation enabled on ra = %pM tid = %d\n",
2006 return iwl_mvm_send_lq_cmd(mvm
, &mvmsta
->lq_sta
.lq
, false);
2009 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2010 struct ieee80211_sta
*sta
, u16 tid
)
2012 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2013 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2019 * If mac80211 is cleaning its state, then say that we finished since
2020 * our state has been cleared anyway.
2022 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
2023 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2027 spin_lock_bh(&mvmsta
->lock
);
2029 txq_id
= tid_data
->txq_id
;
2031 IWL_DEBUG_TX_QUEUES(mvm
, "Stop AGG: sta %d tid %d q %d state %d\n",
2032 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->state
);
2034 mvmsta
->agg_tids
&= ~BIT(tid
);
2036 spin_lock_bh(&mvm
->queue_info_lock
);
2038 * The TXQ is marked as reserved only if no traffic came through yet
2039 * This means no traffic has been sent on this TID (agg'd or not), so
2040 * we no longer have use for the queue. Since it hasn't even been
2041 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2044 if (mvm
->queue_info
[txq_id
].status
== IWL_MVM_QUEUE_RESERVED
)
2045 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_FREE
;
2046 spin_unlock_bh(&mvm
->queue_info_lock
);
2048 switch (tid_data
->state
) {
2050 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
2052 IWL_DEBUG_TX_QUEUES(mvm
,
2053 "ssn = %d, next_recl = %d\n",
2054 tid_data
->ssn
, tid_data
->next_reclaimed
);
2056 /* There are still packets for this RA / TID in the HW */
2057 if (tid_data
->ssn
!= tid_data
->next_reclaimed
) {
2058 tid_data
->state
= IWL_EMPTYING_HW_QUEUE_DELBA
;
2063 tid_data
->ssn
= 0xffff;
2064 tid_data
->state
= IWL_AGG_OFF
;
2065 spin_unlock_bh(&mvmsta
->lock
);
2067 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2069 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
2071 if (!iwl_mvm_is_dqa_supported(mvm
)) {
2072 int mac_queue
= vif
->hw_queue
[tid_to_mac80211_ac
[tid
]];
2074 iwl_mvm_disable_txq(mvm
, txq_id
, mac_queue
, tid
, 0);
2077 case IWL_AGG_STARTING
:
2078 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
2080 * The agg session has been stopped before it was set up. This
2081 * can happen when the AddBA timer times out for example.
2084 /* No barriers since we are under mutex */
2085 lockdep_assert_held(&mvm
->mutex
);
2087 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2088 tid_data
->state
= IWL_AGG_OFF
;
2093 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2094 mvmsta
->sta_id
, tid
, tid_data
->state
);
2096 "\ttid_data->txq_id = %d\n", tid_data
->txq_id
);
2100 spin_unlock_bh(&mvmsta
->lock
);
2105 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2106 struct ieee80211_sta
*sta
, u16 tid
)
2108 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2109 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2111 enum iwl_mvm_agg_state old_state
;
2114 * First set the agg state to OFF to avoid calling
2115 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2117 spin_lock_bh(&mvmsta
->lock
);
2118 txq_id
= tid_data
->txq_id
;
2119 IWL_DEBUG_TX_QUEUES(mvm
, "Flush AGG: sta %d tid %d q %d state %d\n",
2120 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->state
);
2121 old_state
= tid_data
->state
;
2122 tid_data
->state
= IWL_AGG_OFF
;
2123 mvmsta
->agg_tids
&= ~BIT(tid
);
2124 spin_unlock_bh(&mvmsta
->lock
);
2126 spin_lock_bh(&mvm
->queue_info_lock
);
2128 * The TXQ is marked as reserved only if no traffic came through yet
2129 * This means no traffic has been sent on this TID (agg'd or not), so
2130 * we no longer have use for the queue. Since it hasn't even been
2131 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2134 if (mvm
->queue_info
[txq_id
].status
== IWL_MVM_QUEUE_RESERVED
)
2135 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_FREE
;
2136 spin_unlock_bh(&mvm
->queue_info_lock
);
2138 if (old_state
>= IWL_AGG_ON
) {
2139 iwl_mvm_drain_sta(mvm
, mvmsta
, true);
2140 if (iwl_mvm_flush_tx_path(mvm
, BIT(txq_id
), 0))
2141 IWL_ERR(mvm
, "Couldn't flush the AGG queue\n");
2142 iwl_trans_wait_tx_queue_empty(mvm
->trans
,
2143 mvmsta
->tfd_queue_msk
);
2144 iwl_mvm_drain_sta(mvm
, mvmsta
, false);
2146 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
2148 if (!iwl_mvm_is_dqa_supported(mvm
)) {
2149 int mac_queue
= vif
->hw_queue
[tid_to_mac80211_ac
[tid
]];
2151 iwl_mvm_disable_txq(mvm
, tid_data
->txq_id
, mac_queue
,
2159 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm
*mvm
)
2161 int i
, max
= -1, max_offs
= -1;
2163 lockdep_assert_held(&mvm
->mutex
);
2165 /* Pick the unused key offset with the highest 'deleted'
2166 * counter. Every time a key is deleted, all the counters
2167 * are incremented and the one that was just deleted is
2168 * reset to zero. Thus, the highest counter is the one
2169 * that was deleted longest ago. Pick that one.
2171 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
2172 if (test_bit(i
, mvm
->fw_key_table
))
2174 if (mvm
->fw_key_deleted
[i
] > max
) {
2175 max
= mvm
->fw_key_deleted
[i
];
2181 return STA_KEY_IDX_INVALID
;
2186 static struct iwl_mvm_sta
*iwl_mvm_get_key_sta(struct iwl_mvm
*mvm
,
2187 struct ieee80211_vif
*vif
,
2188 struct ieee80211_sta
*sta
)
2190 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2193 return iwl_mvm_sta_from_mac80211(sta
);
2196 * The device expects GTKs for station interfaces to be
2197 * installed as GTKs for the AP station. If we have no
2198 * station ID, then use AP's station ID.
2200 if (vif
->type
== NL80211_IFTYPE_STATION
&&
2201 mvmvif
->ap_sta_id
!= IWL_MVM_STATION_COUNT
) {
2202 u8 sta_id
= mvmvif
->ap_sta_id
;
2205 * It is possible that the 'sta' parameter is NULL,
2206 * for example when a GTK is removed - the sta_id will then
2207 * be the AP ID, and no station was passed by mac80211.
2209 return iwl_mvm_sta_from_staid_protected(mvm
, sta_id
);
2215 static int iwl_mvm_send_sta_key(struct iwl_mvm
*mvm
,
2216 struct iwl_mvm_sta
*mvm_sta
,
2217 struct ieee80211_key_conf
*keyconf
, bool mcast
,
2218 u32 tkip_iv32
, u16
*tkip_p1k
, u32 cmd_flags
,
2221 struct iwl_mvm_add_sta_key_cmd cmd
= {};
2227 u8 sta_id
= mvm_sta
->sta_id
;
2229 keyidx
= (keyconf
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
2230 STA_KEY_FLG_KEYID_MSK
;
2231 key_flags
= cpu_to_le16(keyidx
);
2232 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP
);
2234 switch (keyconf
->cipher
) {
2235 case WLAN_CIPHER_SUITE_TKIP
:
2236 key_flags
|= cpu_to_le16(STA_KEY_FLG_TKIP
);
2237 cmd
.tkip_rx_tsc_byte2
= tkip_iv32
;
2238 for (i
= 0; i
< 5; i
++)
2239 cmd
.tkip_rx_ttak
[i
] = cpu_to_le16(tkip_p1k
[i
]);
2240 memcpy(cmd
.key
, keyconf
->key
, keyconf
->keylen
);
2242 case WLAN_CIPHER_SUITE_CCMP
:
2243 key_flags
|= cpu_to_le16(STA_KEY_FLG_CCM
);
2244 memcpy(cmd
.key
, keyconf
->key
, keyconf
->keylen
);
2246 case WLAN_CIPHER_SUITE_WEP104
:
2247 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES
);
2249 case WLAN_CIPHER_SUITE_WEP40
:
2250 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP
);
2251 memcpy(cmd
.key
+ 3, keyconf
->key
, keyconf
->keylen
);
2253 case WLAN_CIPHER_SUITE_GCMP_256
:
2254 key_flags
|= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES
);
2256 case WLAN_CIPHER_SUITE_GCMP
:
2257 key_flags
|= cpu_to_le16(STA_KEY_FLG_GCMP
);
2258 memcpy(cmd
.key
, keyconf
->key
, keyconf
->keylen
);
2261 key_flags
|= cpu_to_le16(STA_KEY_FLG_EXT
);
2262 memcpy(cmd
.key
, keyconf
->key
, keyconf
->keylen
);
2266 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
2268 cmd
.key_offset
= key_offset
;
2269 cmd
.key_flags
= key_flags
;
2270 cmd
.sta_id
= sta_id
;
2272 status
= ADD_STA_SUCCESS
;
2273 if (cmd_flags
& CMD_ASYNC
)
2274 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA_KEY
, CMD_ASYNC
,
2277 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, sizeof(cmd
),
2281 case ADD_STA_SUCCESS
:
2282 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: set dynamic key passed\n");
2286 IWL_ERR(mvm
, "MODIFY_STA: set dynamic key failed\n");
2293 static int iwl_mvm_send_sta_igtk(struct iwl_mvm
*mvm
,
2294 struct ieee80211_key_conf
*keyconf
,
2295 u8 sta_id
, bool remove_key
)
2297 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd
= {};
2299 /* verify the key details match the required command's expectations */
2300 if (WARN_ON((keyconf
->cipher
!= WLAN_CIPHER_SUITE_AES_CMAC
) ||
2301 (keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
) ||
2302 (keyconf
->keyidx
!= 4 && keyconf
->keyidx
!= 5)))
2305 igtk_cmd
.key_id
= cpu_to_le32(keyconf
->keyidx
);
2306 igtk_cmd
.sta_id
= cpu_to_le32(sta_id
);
2309 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_NOT_VALID
);
2311 struct ieee80211_key_seq seq
;
2314 memcpy(igtk_cmd
.IGTK
, keyconf
->key
, keyconf
->keylen
);
2315 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
2316 pn
= seq
.aes_cmac
.pn
;
2317 igtk_cmd
.receive_seq_cnt
= cpu_to_le64(((u64
) pn
[5] << 0) |
2318 ((u64
) pn
[4] << 8) |
2319 ((u64
) pn
[3] << 16) |
2320 ((u64
) pn
[2] << 24) |
2321 ((u64
) pn
[1] << 32) |
2322 ((u64
) pn
[0] << 40));
2325 IWL_DEBUG_INFO(mvm
, "%s igtk for sta %u\n",
2326 remove_key
? "removing" : "installing",
2329 return iwl_mvm_send_cmd_pdu(mvm
, MGMT_MCAST_KEY
, 0,
2330 sizeof(igtk_cmd
), &igtk_cmd
);
2334 static inline u8
*iwl_mvm_get_mac_addr(struct iwl_mvm
*mvm
,
2335 struct ieee80211_vif
*vif
,
2336 struct ieee80211_sta
*sta
)
2338 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2343 if (vif
->type
== NL80211_IFTYPE_STATION
&&
2344 mvmvif
->ap_sta_id
!= IWL_MVM_STATION_COUNT
) {
2345 u8 sta_id
= mvmvif
->ap_sta_id
;
2346 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
2347 lockdep_is_held(&mvm
->mutex
));
2355 static int __iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
2356 struct ieee80211_vif
*vif
,
2357 struct ieee80211_sta
*sta
,
2358 struct ieee80211_key_conf
*keyconf
,
2362 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2365 struct ieee80211_key_seq seq
;
2368 switch (keyconf
->cipher
) {
2369 case WLAN_CIPHER_SUITE_TKIP
:
2370 addr
= iwl_mvm_get_mac_addr(mvm
, vif
, sta
);
2371 /* get phase 1 key from mac80211 */
2372 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
2373 ieee80211_get_tkip_rx_p1k(keyconf
, addr
, seq
.tkip
.iv32
, p1k
);
2374 ret
= iwl_mvm_send_sta_key(mvm
, mvm_sta
, keyconf
, mcast
,
2375 seq
.tkip
.iv32
, p1k
, 0, key_offset
);
2377 case WLAN_CIPHER_SUITE_CCMP
:
2378 case WLAN_CIPHER_SUITE_WEP40
:
2379 case WLAN_CIPHER_SUITE_WEP104
:
2380 case WLAN_CIPHER_SUITE_GCMP
:
2381 case WLAN_CIPHER_SUITE_GCMP_256
:
2382 ret
= iwl_mvm_send_sta_key(mvm
, mvm_sta
, keyconf
, mcast
,
2383 0, NULL
, 0, key_offset
);
2386 ret
= iwl_mvm_send_sta_key(mvm
, mvm_sta
, keyconf
, mcast
,
2387 0, NULL
, 0, key_offset
);
2393 static int __iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
, u8 sta_id
,
2394 struct ieee80211_key_conf
*keyconf
,
2397 struct iwl_mvm_add_sta_key_cmd cmd
= {};
2402 key_flags
= cpu_to_le16((keyconf
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
2403 STA_KEY_FLG_KEYID_MSK
);
2404 key_flags
|= cpu_to_le16(STA_KEY_FLG_NO_ENC
| STA_KEY_FLG_WEP_KEY_MAP
);
2405 key_flags
|= cpu_to_le16(STA_KEY_NOT_VALID
);
2408 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
2410 cmd
.key_flags
= key_flags
;
2411 cmd
.key_offset
= keyconf
->hw_key_idx
;
2412 cmd
.sta_id
= sta_id
;
2414 status
= ADD_STA_SUCCESS
;
2415 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, sizeof(cmd
),
2419 case ADD_STA_SUCCESS
:
2420 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: remove sta key passed\n");
2424 IWL_ERR(mvm
, "MODIFY_STA: remove sta key failed\n");
2431 int iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
2432 struct ieee80211_vif
*vif
,
2433 struct ieee80211_sta
*sta
,
2434 struct ieee80211_key_conf
*keyconf
,
2437 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
2438 struct iwl_mvm_sta
*mvm_sta
;
2441 static const u8 __maybe_unused zero_addr
[ETH_ALEN
] = {0};
2443 lockdep_assert_held(&mvm
->mutex
);
2445 /* Get the station id from the mvm local station table */
2446 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
2448 IWL_ERR(mvm
, "Failed to find station\n");
2451 sta_id
= mvm_sta
->sta_id
;
2453 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
) {
2454 ret
= iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
, false);
2459 * It is possible that the 'sta' parameter is NULL, and thus
2460 * there is a need to retrieve the sta from the local station table.
2463 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
2464 lockdep_is_held(&mvm
->mutex
));
2465 if (IS_ERR_OR_NULL(sta
)) {
2466 IWL_ERR(mvm
, "Invalid station id\n");
2471 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta
)->vif
!= vif
))
2474 /* If the key_offset is not pre-assigned, we need to find a
2475 * new offset to use. In normal cases, the offset is not
2476 * pre-assigned, but during HW_RESTART we want to reuse the
2477 * same indices, so we pass them when this function is called.
2479 * In D3 entry, we need to hardcoded the indices (because the
2480 * firmware hardcodes the PTK offset to 0). In this case, we
2481 * need to make sure we don't overwrite the hw_key_idx in the
2482 * keyconf structure, because otherwise we cannot configure
2483 * the original ones back when resuming.
2485 if (key_offset
== STA_KEY_IDX_INVALID
) {
2486 key_offset
= iwl_mvm_set_fw_key_idx(mvm
);
2487 if (key_offset
== STA_KEY_IDX_INVALID
)
2489 keyconf
->hw_key_idx
= key_offset
;
2492 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
, key_offset
, mcast
);
2497 * For WEP, the same key is used for multicast and unicast. Upload it
2498 * again, using the same key offset, and now pointing the other one
2499 * to the same key slot (offset).
2500 * If this fails, remove the original as well.
2502 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
2503 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
) {
2504 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
,
2505 key_offset
, !mcast
);
2507 __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
2512 __set_bit(key_offset
, mvm
->fw_key_table
);
2515 IWL_DEBUG_WEP(mvm
, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
2516 keyconf
->cipher
, keyconf
->keylen
, keyconf
->keyidx
,
2517 sta
? sta
->addr
: zero_addr
, ret
);
2521 int iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
,
2522 struct ieee80211_vif
*vif
,
2523 struct ieee80211_sta
*sta
,
2524 struct ieee80211_key_conf
*keyconf
)
2526 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
2527 struct iwl_mvm_sta
*mvm_sta
;
2528 u8 sta_id
= IWL_MVM_STATION_COUNT
;
2531 lockdep_assert_held(&mvm
->mutex
);
2533 /* Get the station from the mvm local station table */
2534 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
2536 IWL_DEBUG_WEP(mvm
, "mvm remove dynamic key: idx=%d sta=%d\n",
2537 keyconf
->keyidx
, sta_id
);
2539 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
)
2540 return iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
, true);
2542 if (!__test_and_clear_bit(keyconf
->hw_key_idx
, mvm
->fw_key_table
)) {
2543 IWL_ERR(mvm
, "offset %d not used in fw key table.\n",
2544 keyconf
->hw_key_idx
);
2548 /* track which key was deleted last */
2549 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
2550 if (mvm
->fw_key_deleted
[i
] < U8_MAX
)
2551 mvm
->fw_key_deleted
[i
]++;
2553 mvm
->fw_key_deleted
[keyconf
->hw_key_idx
] = 0;
2556 IWL_DEBUG_WEP(mvm
, "station non-existent, early return.\n");
2560 sta_id
= mvm_sta
->sta_id
;
2562 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
2566 /* delete WEP key twice to get rid of (now useless) offset */
2567 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
2568 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
)
2569 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, !mcast
);
2574 void iwl_mvm_update_tkip_key(struct iwl_mvm
*mvm
,
2575 struct ieee80211_vif
*vif
,
2576 struct ieee80211_key_conf
*keyconf
,
2577 struct ieee80211_sta
*sta
, u32 iv32
,
2580 struct iwl_mvm_sta
*mvm_sta
;
2581 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
2585 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
2586 if (WARN_ON_ONCE(!mvm_sta
))
2588 iwl_mvm_send_sta_key(mvm
, mvm_sta
, keyconf
, mcast
,
2589 iv32
, phase1key
, CMD_ASYNC
, keyconf
->hw_key_idx
);
2595 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm
*mvm
,
2596 struct ieee80211_sta
*sta
)
2598 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2599 struct iwl_mvm_add_sta_cmd cmd
= {
2600 .add_modify
= STA_MODE_MODIFY
,
2601 .sta_id
= mvmsta
->sta_id
,
2602 .station_flags_msk
= cpu_to_le32(STA_FLG_PS
),
2603 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
2607 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
2608 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
2610 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
2613 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm
*mvm
,
2614 struct ieee80211_sta
*sta
,
2615 enum ieee80211_frame_release_type reason
,
2616 u16 cnt
, u16 tids
, bool more_data
,
2619 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2620 struct iwl_mvm_add_sta_cmd cmd
= {
2621 .add_modify
= STA_MODE_MODIFY
,
2622 .sta_id
= mvmsta
->sta_id
,
2623 .modify_mask
= STA_MODIFY_SLEEPING_STA_TX_COUNT
,
2624 .sleep_tx_count
= cpu_to_le16(cnt
),
2625 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
2628 unsigned long _tids
= tids
;
2630 /* convert TIDs to ACs - we don't support TSPEC so that's OK
2631 * Note that this field is reserved and unused by firmware not
2632 * supporting GO uAPSD, so it's safe to always do this.
2634 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
)
2635 cmd
.awake_acs
|= BIT(tid_to_ucode_ac
[tid
]);
2637 /* If we're releasing frames from aggregation queues then check if the
2638 * all queues combined that we're releasing frames from have
2639 * - more frames than the service period, in which case more_data
2641 * - fewer than 'cnt' frames, in which case we need to adjust the
2642 * firmware command (but do that unconditionally)
2645 int remaining
= cnt
;
2648 spin_lock_bh(&mvmsta
->lock
);
2649 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
) {
2650 struct iwl_mvm_tid_data
*tid_data
;
2653 tid_data
= &mvmsta
->tid_data
[tid
];
2654 if (WARN(tid_data
->state
!= IWL_AGG_ON
&&
2655 tid_data
->state
!= IWL_EMPTYING_HW_QUEUE_DELBA
,
2656 "TID %d state is %d\n",
2657 tid
, tid_data
->state
)) {
2658 spin_unlock_bh(&mvmsta
->lock
);
2659 ieee80211_sta_eosp(sta
);
2663 n_queued
= iwl_mvm_tid_queued(tid_data
);
2664 if (n_queued
> remaining
) {
2669 remaining
-= n_queued
;
2671 sleep_tx_count
= cnt
- remaining
;
2672 if (reason
== IEEE80211_FRAME_RELEASE_UAPSD
)
2673 mvmsta
->sleep_tx_count
= sleep_tx_count
;
2674 spin_unlock_bh(&mvmsta
->lock
);
2676 cmd
.sleep_tx_count
= cpu_to_le16(sleep_tx_count
);
2677 if (WARN_ON(cnt
- remaining
== 0)) {
2678 ieee80211_sta_eosp(sta
);
2683 /* Note: this is ignored by firmware not supporting GO uAPSD */
2685 cmd
.sleep_state_flags
|= cpu_to_le16(STA_SLEEP_STATE_MOREDATA
);
2687 if (reason
== IEEE80211_FRAME_RELEASE_PSPOLL
) {
2688 mvmsta
->next_status_eosp
= true;
2689 cmd
.sleep_state_flags
|= cpu_to_le16(STA_SLEEP_STATE_PS_POLL
);
2691 cmd
.sleep_state_flags
|= cpu_to_le16(STA_SLEEP_STATE_UAPSD
);
2694 /* block the Tx queues until the FW updated the sleep Tx count */
2695 iwl_trans_block_txq_ptrs(mvm
->trans
, true);
2697 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
,
2698 CMD_ASYNC
| CMD_WANT_ASYNC_CALLBACK
,
2699 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
2701 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
2704 void iwl_mvm_rx_eosp_notif(struct iwl_mvm
*mvm
,
2705 struct iwl_rx_cmd_buffer
*rxb
)
2707 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
2708 struct iwl_mvm_eosp_notification
*notif
= (void *)pkt
->data
;
2709 struct ieee80211_sta
*sta
;
2710 u32 sta_id
= le32_to_cpu(notif
->sta_id
);
2712 if (WARN_ON_ONCE(sta_id
>= IWL_MVM_STATION_COUNT
))
2716 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
2717 if (!IS_ERR_OR_NULL(sta
))
2718 ieee80211_sta_eosp(sta
);
2722 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm
*mvm
,
2723 struct iwl_mvm_sta
*mvmsta
, bool disable
)
2725 struct iwl_mvm_add_sta_cmd cmd
= {
2726 .add_modify
= STA_MODE_MODIFY
,
2727 .sta_id
= mvmsta
->sta_id
,
2728 .station_flags
= disable
? cpu_to_le32(STA_FLG_DISABLE_TX
) : 0,
2729 .station_flags_msk
= cpu_to_le32(STA_FLG_DISABLE_TX
),
2730 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
2734 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
2735 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
2737 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
2740 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm
*mvm
,
2741 struct ieee80211_sta
*sta
,
2744 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2746 spin_lock_bh(&mvm_sta
->lock
);
2748 if (mvm_sta
->disable_tx
== disable
) {
2749 spin_unlock_bh(&mvm_sta
->lock
);
2753 mvm_sta
->disable_tx
= disable
;
2756 * Tell mac80211 to start/stop queuing tx for this station,
2757 * but don't stop queuing if there are still pending frames
2760 if (disable
|| !atomic_read(&mvm
->pending_frames
[mvm_sta
->sta_id
]))
2761 ieee80211_sta_block_awake(mvm
->hw
, sta
, disable
);
2763 iwl_mvm_sta_modify_disable_tx(mvm
, mvm_sta
, disable
);
2765 spin_unlock_bh(&mvm_sta
->lock
);
2768 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm
*mvm
,
2769 struct iwl_mvm_vif
*mvmvif
,
2772 struct ieee80211_sta
*sta
;
2773 struct iwl_mvm_sta
*mvm_sta
;
2776 lockdep_assert_held(&mvm
->mutex
);
2778 /* Block/unblock all the stations of the given mvmvif */
2779 for (i
= 0; i
< IWL_MVM_STATION_COUNT
; i
++) {
2780 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[i
],
2781 lockdep_is_held(&mvm
->mutex
));
2782 if (IS_ERR_OR_NULL(sta
))
2785 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2786 if (mvm_sta
->mac_id_n_color
!=
2787 FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
))
2790 iwl_mvm_sta_modify_disable_tx_ap(mvm
, sta
, disable
);
2794 void iwl_mvm_csa_client_absent(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2796 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2797 struct iwl_mvm_sta
*mvmsta
;
2801 mvmsta
= iwl_mvm_sta_from_staid_rcu(mvm
, mvmvif
->ap_sta_id
);
2803 if (!WARN_ON(!mvmsta
))
2804 iwl_mvm_sta_modify_disable_tx(mvm
, mvmsta
, true);