ath9k: rework tx queue selection and fix queue stopping/waking
[linux-2.6.git] / drivers / net / wireless / ath / ath9k / virtual.c
blob4008f51d34c8cb0ad908ce9dd71be27332a098e3
1 /*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/slab.h>
19 #include "ath9k.h"
21 struct ath9k_vif_iter_data {
22 const u8 *hw_macaddr;
23 u8 mask[ETH_ALEN];
26 static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
28 struct ath9k_vif_iter_data *iter_data = data;
29 int i;
31 for (i = 0; i < ETH_ALEN; i++)
32 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
35 void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
37 struct ath_wiphy *aphy = hw->priv;
38 struct ath_softc *sc = aphy->sc;
39 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
40 struct ath9k_vif_iter_data iter_data;
41 int i;
44 * Use the hardware MAC address as reference, the hardware uses it
45 * together with the BSSID mask when matching addresses.
47 iter_data.hw_macaddr = common->macaddr;
48 memset(&iter_data.mask, 0xff, ETH_ALEN);
50 if (vif)
51 ath9k_vif_iter(&iter_data, vif->addr, vif);
53 /* Get list of all active MAC addresses */
54 spin_lock_bh(&sc->wiphy_lock);
55 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
56 &iter_data);
57 for (i = 0; i < sc->num_sec_wiphy; i++) {
58 if (sc->sec_wiphy[i] == NULL)
59 continue;
60 ieee80211_iterate_active_interfaces_atomic(
61 sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data);
63 spin_unlock_bh(&sc->wiphy_lock);
65 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
66 ath_hw_setbssidmask(common);
69 int ath9k_wiphy_add(struct ath_softc *sc)
71 int i, error;
72 struct ath_wiphy *aphy;
73 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
74 struct ieee80211_hw *hw;
75 u8 addr[ETH_ALEN];
77 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
78 if (hw == NULL)
79 return -ENOMEM;
81 spin_lock_bh(&sc->wiphy_lock);
82 for (i = 0; i < sc->num_sec_wiphy; i++) {
83 if (sc->sec_wiphy[i] == NULL)
84 break;
87 if (i == sc->num_sec_wiphy) {
88 /* No empty slot available; increase array length */
89 struct ath_wiphy **n;
90 n = krealloc(sc->sec_wiphy,
91 (sc->num_sec_wiphy + 1) *
92 sizeof(struct ath_wiphy *),
93 GFP_ATOMIC);
94 if (n == NULL) {
95 spin_unlock_bh(&sc->wiphy_lock);
96 ieee80211_free_hw(hw);
97 return -ENOMEM;
99 n[i] = NULL;
100 sc->sec_wiphy = n;
101 sc->num_sec_wiphy++;
104 SET_IEEE80211_DEV(hw, sc->dev);
106 aphy = hw->priv;
107 aphy->sc = sc;
108 aphy->hw = hw;
109 sc->sec_wiphy[i] = aphy;
110 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
111 spin_unlock_bh(&sc->wiphy_lock);
113 memcpy(addr, common->macaddr, ETH_ALEN);
114 addr[0] |= 0x02; /* Locally managed address */
116 * XOR virtual wiphy index into the least significant bits to generate
117 * a different MAC address for each virtual wiphy.
119 addr[5] ^= i & 0xff;
120 addr[4] ^= (i & 0xff00) >> 8;
121 addr[3] ^= (i & 0xff0000) >> 16;
123 SET_IEEE80211_PERM_ADDR(hw, addr);
125 ath9k_set_hw_capab(sc, hw);
127 error = ieee80211_register_hw(hw);
129 if (error == 0) {
130 /* Make sure wiphy scheduler is started (if enabled) */
131 ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
134 return error;
137 int ath9k_wiphy_del(struct ath_wiphy *aphy)
139 struct ath_softc *sc = aphy->sc;
140 int i;
142 spin_lock_bh(&sc->wiphy_lock);
143 for (i = 0; i < sc->num_sec_wiphy; i++) {
144 if (aphy == sc->sec_wiphy[i]) {
145 sc->sec_wiphy[i] = NULL;
146 spin_unlock_bh(&sc->wiphy_lock);
147 ieee80211_unregister_hw(aphy->hw);
148 ieee80211_free_hw(aphy->hw);
149 return 0;
152 spin_unlock_bh(&sc->wiphy_lock);
153 return -ENOENT;
156 static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
157 struct ieee80211_vif *vif, const u8 *bssid,
158 int ps)
160 struct ath_softc *sc = aphy->sc;
161 struct ath_tx_control txctl;
162 struct sk_buff *skb;
163 struct ieee80211_hdr *hdr;
164 __le16 fc;
165 struct ieee80211_tx_info *info;
167 skb = dev_alloc_skb(24);
168 if (skb == NULL)
169 return -ENOMEM;
170 hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
171 memset(hdr, 0, 24);
172 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
173 IEEE80211_FCTL_TODS);
174 if (ps)
175 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
176 hdr->frame_control = fc;
177 memcpy(hdr->addr1, bssid, ETH_ALEN);
178 memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN);
179 memcpy(hdr->addr3, bssid, ETH_ALEN);
181 info = IEEE80211_SKB_CB(skb);
182 memset(info, 0, sizeof(*info));
183 info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS;
184 info->control.vif = vif;
185 info->control.rates[0].idx = 0;
186 info->control.rates[0].count = 4;
187 info->control.rates[1].idx = -1;
189 memset(&txctl, 0, sizeof(struct ath_tx_control));
190 txctl.txq = sc->tx.txq_map[WME_AC_VO];
191 txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
193 if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
194 goto exit;
196 return 0;
197 exit:
198 dev_kfree_skb_any(skb);
199 return -1;
202 static bool __ath9k_wiphy_pausing(struct ath_softc *sc)
204 int i;
205 if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING)
206 return true;
207 for (i = 0; i < sc->num_sec_wiphy; i++) {
208 if (sc->sec_wiphy[i] &&
209 sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING)
210 return true;
212 return false;
215 static bool ath9k_wiphy_pausing(struct ath_softc *sc)
217 bool ret;
218 spin_lock_bh(&sc->wiphy_lock);
219 ret = __ath9k_wiphy_pausing(sc);
220 spin_unlock_bh(&sc->wiphy_lock);
221 return ret;
224 static bool __ath9k_wiphy_scanning(struct ath_softc *sc)
226 int i;
227 if (sc->pri_wiphy->state == ATH_WIPHY_SCAN)
228 return true;
229 for (i = 0; i < sc->num_sec_wiphy; i++) {
230 if (sc->sec_wiphy[i] &&
231 sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN)
232 return true;
234 return false;
237 bool ath9k_wiphy_scanning(struct ath_softc *sc)
239 bool ret;
240 spin_lock_bh(&sc->wiphy_lock);
241 ret = __ath9k_wiphy_scanning(sc);
242 spin_unlock_bh(&sc->wiphy_lock);
243 return ret;
246 static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy);
248 /* caller must hold wiphy_lock */
249 static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy)
251 if (aphy == NULL)
252 return;
253 if (aphy->chan_idx != aphy->sc->chan_idx)
254 return; /* wiphy not on the selected channel */
255 __ath9k_wiphy_unpause(aphy);
258 static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
260 int i;
261 spin_lock_bh(&sc->wiphy_lock);
262 __ath9k_wiphy_unpause_ch(sc->pri_wiphy);
263 for (i = 0; i < sc->num_sec_wiphy; i++)
264 __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]);
265 spin_unlock_bh(&sc->wiphy_lock);
268 void ath9k_wiphy_chan_work(struct work_struct *work)
270 struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
271 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
272 struct ath_wiphy *aphy = sc->next_wiphy;
274 if (aphy == NULL)
275 return;
278 * All pending interfaces paused; ready to change
279 * channels.
282 /* Change channels */
283 mutex_lock(&sc->mutex);
284 /* XXX: remove me eventually */
285 ath9k_update_ichannel(sc, aphy->hw,
286 &sc->sc_ah->channels[sc->chan_idx]);
288 /* sync hw configuration for hw code */
289 common->hw = aphy->hw;
291 ath_update_chainmask(sc, sc->chan_is_ht);
292 if (ath_set_channel(sc, aphy->hw,
293 &sc->sc_ah->channels[sc->chan_idx]) < 0) {
294 printk(KERN_DEBUG "ath9k: Failed to set channel for new "
295 "virtual wiphy\n");
296 mutex_unlock(&sc->mutex);
297 return;
299 mutex_unlock(&sc->mutex);
301 ath9k_wiphy_unpause_channel(sc);
305 * ath9k version of ieee80211_tx_status() for TX frames that are generated
306 * internally in the driver.
308 void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
310 struct ath_wiphy *aphy = hw->priv;
311 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
313 if ((tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_PAUSE) &&
314 aphy->state == ATH_WIPHY_PAUSING) {
315 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
316 printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
317 "frame\n", wiphy_name(hw->wiphy));
319 * The AP did not reply; ignore this to allow us to
320 * continue.
323 aphy->state = ATH_WIPHY_PAUSED;
324 if (!ath9k_wiphy_pausing(aphy->sc)) {
326 * Drop from tasklet to work to allow mutex for channel
327 * change.
329 ieee80211_queue_work(aphy->sc->hw,
330 &aphy->sc->chan_work);
334 dev_kfree_skb(skb);
337 static void ath9k_mark_paused(struct ath_wiphy *aphy)
339 struct ath_softc *sc = aphy->sc;
340 aphy->state = ATH_WIPHY_PAUSED;
341 if (!__ath9k_wiphy_pausing(sc))
342 ieee80211_queue_work(sc->hw, &sc->chan_work);
345 static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
347 struct ath_wiphy *aphy = data;
348 struct ath_vif *avp = (void *) vif->drv_priv;
350 switch (vif->type) {
351 case NL80211_IFTYPE_STATION:
352 if (!vif->bss_conf.assoc) {
353 ath9k_mark_paused(aphy);
354 break;
356 /* TODO: could avoid this if already in PS mode */
357 if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) {
358 printk(KERN_DEBUG "%s: failed to send PS nullfunc\n",
359 __func__);
360 ath9k_mark_paused(aphy);
362 break;
363 case NL80211_IFTYPE_AP:
364 /* Beacon transmission is paused by aphy->state change */
365 ath9k_mark_paused(aphy);
366 break;
367 default:
368 break;
372 /* caller must hold wiphy_lock */
373 static int __ath9k_wiphy_pause(struct ath_wiphy *aphy)
375 ieee80211_stop_queues(aphy->hw);
376 aphy->state = ATH_WIPHY_PAUSING;
378 * TODO: handle PAUSING->PAUSED for the case where there are multiple
379 * active vifs (now we do it on the first vif getting ready; should be
380 * on the last)
382 ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter,
383 aphy);
384 return 0;
387 int ath9k_wiphy_pause(struct ath_wiphy *aphy)
389 int ret;
390 spin_lock_bh(&aphy->sc->wiphy_lock);
391 ret = __ath9k_wiphy_pause(aphy);
392 spin_unlock_bh(&aphy->sc->wiphy_lock);
393 return ret;
396 static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
398 struct ath_wiphy *aphy = data;
399 struct ath_vif *avp = (void *) vif->drv_priv;
401 switch (vif->type) {
402 case NL80211_IFTYPE_STATION:
403 if (!vif->bss_conf.assoc)
404 break;
405 ath9k_send_nullfunc(aphy, vif, avp->bssid, 0);
406 break;
407 case NL80211_IFTYPE_AP:
408 /* Beacon transmission is re-enabled by aphy->state change */
409 break;
410 default:
411 break;
415 /* caller must hold wiphy_lock */
416 static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy)
418 ieee80211_iterate_active_interfaces_atomic(aphy->hw,
419 ath9k_unpause_iter, aphy);
420 aphy->state = ATH_WIPHY_ACTIVE;
421 ieee80211_wake_queues(aphy->hw);
422 return 0;
425 int ath9k_wiphy_unpause(struct ath_wiphy *aphy)
427 int ret;
428 spin_lock_bh(&aphy->sc->wiphy_lock);
429 ret = __ath9k_wiphy_unpause(aphy);
430 spin_unlock_bh(&aphy->sc->wiphy_lock);
431 return ret;
434 static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc)
436 int i;
437 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE)
438 sc->pri_wiphy->state = ATH_WIPHY_PAUSED;
439 for (i = 0; i < sc->num_sec_wiphy; i++) {
440 if (sc->sec_wiphy[i] &&
441 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE)
442 sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED;
446 /* caller must hold wiphy_lock */
447 static void __ath9k_wiphy_pause_all(struct ath_softc *sc)
449 int i;
450 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
451 __ath9k_wiphy_pause(sc->pri_wiphy);
452 for (i = 0; i < sc->num_sec_wiphy; i++) {
453 if (sc->sec_wiphy[i] &&
454 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
455 __ath9k_wiphy_pause(sc->sec_wiphy[i]);
459 int ath9k_wiphy_select(struct ath_wiphy *aphy)
461 struct ath_softc *sc = aphy->sc;
462 bool now;
464 spin_lock_bh(&sc->wiphy_lock);
465 if (__ath9k_wiphy_scanning(sc)) {
467 * For now, we are using mac80211 sw scan and it expects to
468 * have full control over channel changes, so avoid wiphy
469 * scheduling during a scan. This could be optimized if the
470 * scanning control were moved into the driver.
472 spin_unlock_bh(&sc->wiphy_lock);
473 return -EBUSY;
475 if (__ath9k_wiphy_pausing(sc)) {
476 if (sc->wiphy_select_failures == 0)
477 sc->wiphy_select_first_fail = jiffies;
478 sc->wiphy_select_failures++;
479 if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
481 printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
482 "out; disable/enable hw to recover\n");
483 __ath9k_wiphy_mark_all_paused(sc);
485 * TODO: this workaround to fix hardware is unlikely to
486 * be specific to virtual wiphy changes. It can happen
487 * on normal channel change, too, and as such, this
488 * should really be made more generic. For example,
489 * tricker radio disable/enable on GTT interrupt burst
490 * (say, 10 GTT interrupts received without any TX
491 * frame being completed)
493 spin_unlock_bh(&sc->wiphy_lock);
494 ath_radio_disable(sc, aphy->hw);
495 ath_radio_enable(sc, aphy->hw);
496 /* Only the primary wiphy hw is used for queuing work */
497 ieee80211_queue_work(aphy->sc->hw,
498 &aphy->sc->chan_work);
499 return -EBUSY; /* previous select still in progress */
501 spin_unlock_bh(&sc->wiphy_lock);
502 return -EBUSY; /* previous select still in progress */
504 sc->wiphy_select_failures = 0;
506 /* Store the new channel */
507 sc->chan_idx = aphy->chan_idx;
508 sc->chan_is_ht = aphy->chan_is_ht;
509 sc->next_wiphy = aphy;
511 __ath9k_wiphy_pause_all(sc);
512 now = !__ath9k_wiphy_pausing(aphy->sc);
513 spin_unlock_bh(&sc->wiphy_lock);
515 if (now) {
516 /* Ready to request channel change immediately */
517 ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
521 * wiphys will be unpaused in ath9k_tx_status() once channel has been
522 * changed if any wiphy needs time to become paused.
525 return 0;
528 bool ath9k_wiphy_started(struct ath_softc *sc)
530 int i;
531 spin_lock_bh(&sc->wiphy_lock);
532 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
533 spin_unlock_bh(&sc->wiphy_lock);
534 return true;
536 for (i = 0; i < sc->num_sec_wiphy; i++) {
537 if (sc->sec_wiphy[i] &&
538 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) {
539 spin_unlock_bh(&sc->wiphy_lock);
540 return true;
543 spin_unlock_bh(&sc->wiphy_lock);
544 return false;
547 static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy,
548 struct ath_wiphy *selected)
550 if (selected->state == ATH_WIPHY_SCAN) {
551 if (aphy == selected)
552 return;
554 * Pause all other wiphys for the duration of the scan even if
555 * they are on the current channel now.
557 } else if (aphy->chan_idx == selected->chan_idx)
558 return;
559 aphy->state = ATH_WIPHY_PAUSED;
560 ieee80211_stop_queues(aphy->hw);
563 void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
564 struct ath_wiphy *selected)
566 int i;
567 spin_lock_bh(&sc->wiphy_lock);
568 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
569 ath9k_wiphy_pause_chan(sc->pri_wiphy, selected);
570 for (i = 0; i < sc->num_sec_wiphy; i++) {
571 if (sc->sec_wiphy[i] &&
572 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
573 ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected);
575 spin_unlock_bh(&sc->wiphy_lock);
578 void ath9k_wiphy_work(struct work_struct *work)
580 struct ath_softc *sc = container_of(work, struct ath_softc,
581 wiphy_work.work);
582 struct ath_wiphy *aphy = NULL;
583 bool first = true;
585 spin_lock_bh(&sc->wiphy_lock);
587 if (sc->wiphy_scheduler_int == 0) {
588 /* wiphy scheduler is disabled */
589 spin_unlock_bh(&sc->wiphy_lock);
590 return;
593 try_again:
594 sc->wiphy_scheduler_index++;
595 while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) {
596 aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1];
597 if (aphy && aphy->state != ATH_WIPHY_INACTIVE)
598 break;
600 sc->wiphy_scheduler_index++;
601 aphy = NULL;
603 if (aphy == NULL) {
604 sc->wiphy_scheduler_index = 0;
605 if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) {
606 if (first) {
607 first = false;
608 goto try_again;
610 /* No wiphy is ready to be scheduled */
611 } else
612 aphy = sc->pri_wiphy;
615 spin_unlock_bh(&sc->wiphy_lock);
617 if (aphy &&
618 aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN &&
619 ath9k_wiphy_select(aphy)) {
620 printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy "
621 "change\n");
624 ieee80211_queue_delayed_work(sc->hw,
625 &sc->wiphy_work,
626 sc->wiphy_scheduler_int);
629 void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int)
631 cancel_delayed_work_sync(&sc->wiphy_work);
632 sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int);
633 if (sc->wiphy_scheduler_int)
634 ieee80211_queue_delayed_work(sc->hw, &sc->wiphy_work,
635 sc->wiphy_scheduler_int);
638 /* caller must hold wiphy_lock */
639 bool ath9k_all_wiphys_idle(struct ath_softc *sc)
641 unsigned int i;
642 if (!sc->pri_wiphy->idle)
643 return false;
644 for (i = 0; i < sc->num_sec_wiphy; i++) {
645 struct ath_wiphy *aphy = sc->sec_wiphy[i];
646 if (!aphy)
647 continue;
648 if (!aphy->idle)
649 return false;
651 return true;
654 /* caller must hold wiphy_lock */
655 void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle)
657 struct ath_softc *sc = aphy->sc;
659 aphy->idle = idle;
660 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
661 "Marking %s as %s\n",
662 wiphy_name(aphy->hw->wiphy),
663 idle ? "idle" : "not-idle");
665 /* Only bother starting a queue on an active virtual wiphy */
666 bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
668 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
669 unsigned int i;
670 bool txq_started = false;
672 spin_lock_bh(&sc->wiphy_lock);
674 /* Start the primary wiphy */
675 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
676 ieee80211_wake_queue(hw, skb_queue);
677 txq_started = true;
678 goto unlock;
681 /* Now start the secondary wiphy queues */
682 for (i = 0; i < sc->num_sec_wiphy; i++) {
683 struct ath_wiphy *aphy = sc->sec_wiphy[i];
684 if (!aphy)
685 continue;
686 if (aphy->state != ATH_WIPHY_ACTIVE)
687 continue;
689 hw = aphy->hw;
690 ieee80211_wake_queue(hw, skb_queue);
691 txq_started = true;
692 break;
695 unlock:
696 spin_unlock_bh(&sc->wiphy_lock);
697 return txq_started;
700 /* Go ahead and propagate information to all virtual wiphys, it won't hurt */
701 void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue)
703 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
704 unsigned int i;
706 spin_lock_bh(&sc->wiphy_lock);
708 /* Stop the primary wiphy */
709 ieee80211_stop_queue(hw, skb_queue);
711 /* Now stop the secondary wiphy queues */
712 for (i = 0; i < sc->num_sec_wiphy; i++) {
713 struct ath_wiphy *aphy = sc->sec_wiphy[i];
714 if (!aphy)
715 continue;
716 hw = aphy->hw;
717 ieee80211_stop_queue(hw, skb_queue);
719 spin_unlock_bh(&sc->wiphy_lock);