2 * Atheros CARL9170 driver
4 * mac80211 interaction code
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
23 * This file incorporates work covered by the following copyright and
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/etherdevice.h>
44 #include <linux/random.h>
45 #include <net/mac80211.h>
46 #include <net/cfg80211.h>
51 static int modparam_nohwcrypt
;
52 module_param_named(nohwcrypt
, modparam_nohwcrypt
, bool, S_IRUGO
);
53 MODULE_PARM_DESC(nohwcrypt
, "Disable hardware crypto offload.");
56 module_param_named(noht
, modparam_noht
, int, S_IRUGO
);
57 MODULE_PARM_DESC(noht
, "Disable MPDU aggregation.");
59 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
60 .bitrate = (_bitrate), \
62 .hw_value = (_hw_rate) | (_txpidx) << 4, \
65 struct ieee80211_rate __carl9170_ratetable
[] = {
67 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE
),
68 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE
),
69 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE
),
81 #define carl9170_g_ratetable (__carl9170_ratetable + 0)
82 #define carl9170_g_ratetable_size 12
83 #define carl9170_a_ratetable (__carl9170_ratetable + 4)
84 #define carl9170_a_ratetable_size 8
87 * NB: The hw_value is used as an index into the carl9170_phy_freq_params
88 * array in phy.c so that we don't have to do frequency lookups!
90 #define CHAN(_freq, _idx) { \
91 .center_freq = (_freq), \
93 .max_power = 18, /* XXX */ \
96 static struct ieee80211_channel carl9170_2ghz_chantable
[] = {
113 static struct ieee80211_channel carl9170_5ghz_chantable
[] = {
152 #define CARL9170_HT_CAP \
154 .ht_supported = true, \
155 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
156 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
157 IEEE80211_HT_CAP_SGI_40 | \
158 IEEE80211_HT_CAP_DSSSCCK40 | \
159 IEEE80211_HT_CAP_SM_PS, \
160 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
161 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
163 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
164 .rx_highest = cpu_to_le16(300), \
165 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
169 static struct ieee80211_supported_band carl9170_band_2GHz
= {
170 .channels
= carl9170_2ghz_chantable
,
171 .n_channels
= ARRAY_SIZE(carl9170_2ghz_chantable
),
172 .bitrates
= carl9170_g_ratetable
,
173 .n_bitrates
= carl9170_g_ratetable_size
,
174 .ht_cap
= CARL9170_HT_CAP
,
177 static struct ieee80211_supported_band carl9170_band_5GHz
= {
178 .channels
= carl9170_5ghz_chantable
,
179 .n_channels
= ARRAY_SIZE(carl9170_5ghz_chantable
),
180 .bitrates
= carl9170_a_ratetable
,
181 .n_bitrates
= carl9170_a_ratetable_size
,
182 .ht_cap
= CARL9170_HT_CAP
,
185 static void carl9170_ampdu_gc(struct ar9170
*ar
)
187 struct carl9170_sta_tid
*tid_info
;
191 list_for_each_entry_rcu(tid_info
, &ar
->tx_ampdu_list
, list
) {
192 spin_lock_bh(&ar
->tx_ampdu_list_lock
);
193 if (tid_info
->state
== CARL9170_TID_STATE_SHUTDOWN
) {
194 tid_info
->state
= CARL9170_TID_STATE_KILLED
;
195 list_del_rcu(&tid_info
->list
);
196 ar
->tx_ampdu_list_len
--;
197 list_add_tail(&tid_info
->tmp_list
, &tid_gc
);
199 spin_unlock_bh(&ar
->tx_ampdu_list_lock
);
202 rcu_assign_pointer(ar
->tx_ampdu_iter
, tid_info
);
207 while (!list_empty(&tid_gc
)) {
209 tid_info
= list_first_entry(&tid_gc
, struct carl9170_sta_tid
,
212 while ((skb
= __skb_dequeue(&tid_info
->queue
)))
213 carl9170_tx_status(ar
, skb
, false);
215 list_del_init(&tid_info
->tmp_list
);
220 static void carl9170_flush(struct ar9170
*ar
, bool drop_queued
)
226 * We can only drop frames which have not been uploaded
230 for (i
= 0; i
< ar
->hw
->queues
; i
++) {
233 while ((skb
= skb_dequeue(&ar
->tx_pending
[i
]))) {
234 struct ieee80211_tx_info
*info
;
236 info
= IEEE80211_SKB_CB(skb
);
237 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
)
238 atomic_dec(&ar
->tx_ampdu_upload
);
240 carl9170_tx_status(ar
, skb
, false);
245 /* Wait for all other outstanding frames to timeout. */
246 if (atomic_read(&ar
->tx_total_queued
))
247 WARN_ON(wait_for_completion_timeout(&ar
->tx_flush
, HZ
) == 0);
250 static void carl9170_flush_ba(struct ar9170
*ar
)
252 struct sk_buff_head free
;
253 struct carl9170_sta_tid
*tid_info
;
256 __skb_queue_head_init(&free
);
259 spin_lock_bh(&ar
->tx_ampdu_list_lock
);
260 list_for_each_entry_rcu(tid_info
, &ar
->tx_ampdu_list
, list
) {
261 if (tid_info
->state
> CARL9170_TID_STATE_SUSPEND
) {
262 tid_info
->state
= CARL9170_TID_STATE_SUSPEND
;
264 spin_lock(&tid_info
->lock
);
265 while ((skb
= __skb_dequeue(&tid_info
->queue
)))
266 __skb_queue_tail(&free
, skb
);
267 spin_unlock(&tid_info
->lock
);
270 spin_unlock_bh(&ar
->tx_ampdu_list_lock
);
273 while ((skb
= __skb_dequeue(&free
)))
274 carl9170_tx_status(ar
, skb
, false);
277 static void carl9170_zap_queues(struct ar9170
*ar
)
279 struct carl9170_vif_info
*cvif
;
282 carl9170_ampdu_gc(ar
);
284 carl9170_flush_ba(ar
);
285 carl9170_flush(ar
, true);
287 for (i
= 0; i
< ar
->hw
->queues
; i
++) {
288 spin_lock_bh(&ar
->tx_status
[i
].lock
);
289 while (!skb_queue_empty(&ar
->tx_status
[i
])) {
292 skb
= skb_peek(&ar
->tx_status
[i
]);
293 carl9170_tx_get_skb(skb
);
294 spin_unlock_bh(&ar
->tx_status
[i
].lock
);
295 carl9170_tx_drop(ar
, skb
);
296 spin_lock_bh(&ar
->tx_status
[i
].lock
);
297 carl9170_tx_put_skb(skb
);
299 spin_unlock_bh(&ar
->tx_status
[i
].lock
);
302 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT
< 1);
303 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD
< CARL9170_NUM_TX_LIMIT_SOFT
);
304 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD
>= CARL9170_BAW_BITS
);
306 /* reinitialize queues statistics */
307 memset(&ar
->tx_stats
, 0, sizeof(ar
->tx_stats
));
308 for (i
= 0; i
< ar
->hw
->queues
; i
++)
309 ar
->tx_stats
[i
].limit
= CARL9170_NUM_TX_LIMIT_HARD
;
311 for (i
= 0; i
< DIV_ROUND_UP(ar
->fw
.mem_blocks
, BITS_PER_LONG
); i
++)
312 ar
->mem_bitmap
[i
] = 0;
315 list_for_each_entry_rcu(cvif
, &ar
->vif_list
, list
) {
316 spin_lock_bh(&ar
->beacon_lock
);
317 dev_kfree_skb_any(cvif
->beacon
);
319 spin_unlock_bh(&ar
->beacon_lock
);
323 atomic_set(&ar
->tx_ampdu_upload
, 0);
324 atomic_set(&ar
->tx_ampdu_scheduler
, 0);
325 atomic_set(&ar
->tx_total_pending
, 0);
326 atomic_set(&ar
->tx_total_queued
, 0);
327 atomic_set(&ar
->mem_free_blocks
, ar
->fw
.mem_blocks
);
330 #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
332 queue.aifs = ai_fs; \
333 queue.cw_min = cwmin; \
334 queue.cw_max = cwmax; \
335 queue.txop = _txop; \
338 static int carl9170_op_start(struct ieee80211_hw
*hw
)
340 struct ar9170
*ar
= hw
->priv
;
343 mutex_lock(&ar
->mutex
);
345 carl9170_zap_queues(ar
);
347 /* reset QoS defaults */
348 CARL9170_FILL_QUEUE(ar
->edcf
[AR9170_TXQ_VO
], 2, 3, 7, 47);
349 CARL9170_FILL_QUEUE(ar
->edcf
[AR9170_TXQ_VI
], 2, 7, 15, 94);
350 CARL9170_FILL_QUEUE(ar
->edcf
[AR9170_TXQ_BE
], 3, 15, 1023, 0);
351 CARL9170_FILL_QUEUE(ar
->edcf
[AR9170_TXQ_BK
], 7, 15, 1023, 0);
352 CARL9170_FILL_QUEUE(ar
->edcf
[AR9170_TXQ_SPECIAL
], 2, 3, 7, 0);
354 ar
->current_factor
= ar
->current_density
= -1;
355 /* "The first key is unique." */
357 ar
->filter_state
= 0;
358 ar
->ps
.last_action
= jiffies
;
359 ar
->ps
.last_slept
= jiffies
;
360 ar
->erp_mode
= CARL9170_ERP_AUTO
;
361 ar
->rx_software_decryption
= false;
362 ar
->disable_offload
= false;
364 for (i
= 0; i
< ar
->hw
->queues
; i
++) {
365 ar
->queue_stop_timeout
[i
] = jiffies
;
366 ar
->max_queue_stop_timeout
[i
] = 0;
369 atomic_set(&ar
->mem_allocs
, 0);
371 err
= carl9170_usb_open(ar
);
375 err
= carl9170_init_mac(ar
);
379 err
= carl9170_set_qos(ar
);
383 if (ar
->fw
.rx_filter
) {
384 err
= carl9170_rx_filter(ar
, CARL9170_RX_FILTER_OTHER_RA
|
385 CARL9170_RX_FILTER_CTL_OTHER
| CARL9170_RX_FILTER_BAD
);
390 err
= carl9170_write_reg(ar
, AR9170_MAC_REG_DMA_TRIGGER
,
391 AR9170_DMA_TRIGGER_RXQ
);
395 /* Clear key-cache */
396 for (i
= 0; i
< AR9170_CAM_MAX_USER
+ 4; i
++) {
397 err
= carl9170_upload_key(ar
, i
, NULL
, AR9170_ENC_ALG_NONE
,
402 err
= carl9170_upload_key(ar
, i
, NULL
, AR9170_ENC_ALG_NONE
,
407 if (i
< AR9170_CAM_MAX_USER
) {
408 err
= carl9170_disable_key(ar
, i
);
414 carl9170_set_state_when(ar
, CARL9170_IDLE
, CARL9170_STARTED
);
416 ieee80211_queue_delayed_work(ar
->hw
, &ar
->stat_work
,
417 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK
)));
419 ieee80211_wake_queues(ar
->hw
);
423 mutex_unlock(&ar
->mutex
);
427 static void carl9170_cancel_worker(struct ar9170
*ar
)
429 cancel_delayed_work_sync(&ar
->stat_work
);
430 cancel_delayed_work_sync(&ar
->tx_janitor
);
431 #ifdef CONFIG_CARL9170_LEDS
432 cancel_delayed_work_sync(&ar
->led_work
);
433 #endif /* CONFIG_CARL9170_LEDS */
434 cancel_work_sync(&ar
->ps_work
);
435 cancel_work_sync(&ar
->ping_work
);
436 cancel_work_sync(&ar
->ampdu_work
);
439 static void carl9170_op_stop(struct ieee80211_hw
*hw
)
441 struct ar9170
*ar
= hw
->priv
;
443 carl9170_set_state_when(ar
, CARL9170_STARTED
, CARL9170_IDLE
);
445 ieee80211_stop_queues(ar
->hw
);
447 mutex_lock(&ar
->mutex
);
448 if (IS_ACCEPTING_CMD(ar
)) {
449 rcu_assign_pointer(ar
->beacon_iter
, NULL
);
451 carl9170_led_set_state(ar
, 0);
454 carl9170_write_reg(ar
, AR9170_MAC_REG_DMA_TRIGGER
, 0);
455 carl9170_usb_stop(ar
);
458 carl9170_zap_queues(ar
);
459 mutex_unlock(&ar
->mutex
);
461 carl9170_cancel_worker(ar
);
464 static void carl9170_restart_work(struct work_struct
*work
)
466 struct ar9170
*ar
= container_of(work
, struct ar9170
,
471 ar
->filter_state
= 0;
472 carl9170_cancel_worker(ar
);
474 mutex_lock(&ar
->mutex
);
475 err
= carl9170_usb_restart(ar
);
476 if (net_ratelimit()) {
478 dev_err(&ar
->udev
->dev
, "Failed to restart device "
481 dev_info(&ar
->udev
->dev
, "device restarted "
486 carl9170_zap_queues(ar
);
487 mutex_unlock(&ar
->mutex
);
489 ar
->restart_counter
++;
490 atomic_set(&ar
->pending_restarts
, 0);
492 ieee80211_restart_hw(ar
->hw
);
495 * The reset was unsuccessful and the device seems to
496 * be dead. But there's still one option: a low-level
497 * usb subsystem reset...
500 carl9170_usb_reset(ar
);
504 void carl9170_restart(struct ar9170
*ar
, const enum carl9170_restart_reasons r
)
506 carl9170_set_state_when(ar
, CARL9170_STARTED
, CARL9170_IDLE
);
509 * Sometimes, an error can trigger several different reset events.
510 * By ignoring these *surplus* reset events, the device won't be
511 * killed again, right after it has recovered.
513 if (atomic_inc_return(&ar
->pending_restarts
) > 1) {
514 dev_dbg(&ar
->udev
->dev
, "ignoring restart (%d)\n", r
);
518 ieee80211_stop_queues(ar
->hw
);
520 dev_err(&ar
->udev
->dev
, "restart device (%d)\n", r
);
522 if (!WARN_ON(r
== CARL9170_RR_NO_REASON
) ||
523 !WARN_ON(r
>= __CARL9170_RR_LAST
))
529 if (IS_ACCEPTING_CMD(ar
) && !ar
->needs_full_reset
)
530 ieee80211_queue_work(ar
->hw
, &ar
->restart_work
);
532 carl9170_usb_reset(ar
);
535 * At this point, the device instance might have vanished/disabled.
536 * So, don't put any code which access the ar9170 struct
537 * without proper protection.
541 static void carl9170_ping_work(struct work_struct
*work
)
543 struct ar9170
*ar
= container_of(work
, struct ar9170
, ping_work
);
549 mutex_lock(&ar
->mutex
);
550 err
= carl9170_echo_test(ar
, 0xdeadbeef);
552 carl9170_restart(ar
, CARL9170_RR_UNRESPONSIVE_DEVICE
);
553 mutex_unlock(&ar
->mutex
);
556 static int carl9170_init_interface(struct ar9170
*ar
,
557 struct ieee80211_vif
*vif
)
559 struct ath_common
*common
= &ar
->common
;
563 WARN_ON_ONCE(IS_STARTED(ar
));
567 memcpy(common
->macaddr
, vif
->addr
, ETH_ALEN
);
569 if (modparam_nohwcrypt
||
570 ((vif
->type
!= NL80211_IFTYPE_STATION
) &&
571 (vif
->type
!= NL80211_IFTYPE_AP
))) {
572 ar
->rx_software_decryption
= true;
573 ar
->disable_offload
= true;
576 err
= carl9170_set_operating_mode(ar
);
580 static int carl9170_op_add_interface(struct ieee80211_hw
*hw
,
581 struct ieee80211_vif
*vif
)
583 struct carl9170_vif_info
*vif_priv
= (void *) vif
->drv_priv
;
584 struct ieee80211_vif
*main_vif
;
585 struct ar9170
*ar
= hw
->priv
;
586 int vif_id
= -1, err
= 0;
588 mutex_lock(&ar
->mutex
);
590 if (vif_priv
->active
) {
592 * Skip the interface structure initialization,
593 * if the vif survived the _restart call.
595 vif_id
= vif_priv
->id
;
596 vif_priv
->enable_beacon
= false;
598 spin_lock_bh(&ar
->beacon_lock
);
599 dev_kfree_skb_any(vif_priv
->beacon
);
600 vif_priv
->beacon
= NULL
;
601 spin_unlock_bh(&ar
->beacon_lock
);
606 main_vif
= carl9170_get_main_vif(ar
);
609 switch (main_vif
->type
) {
610 case NL80211_IFTYPE_STATION
:
611 if (vif
->type
== NL80211_IFTYPE_STATION
)
619 case NL80211_IFTYPE_AP
:
620 if ((vif
->type
== NL80211_IFTYPE_STATION
) ||
621 (vif
->type
== NL80211_IFTYPE_WDS
) ||
622 (vif
->type
== NL80211_IFTYPE_AP
))
635 vif_id
= bitmap_find_free_region(&ar
->vif_bitmap
, ar
->fw
.vif_num
, 0);
644 BUG_ON(ar
->vif_priv
[vif_id
].id
!= vif_id
);
646 vif_priv
->active
= true;
647 vif_priv
->id
= vif_id
;
648 vif_priv
->enable_beacon
= false;
650 list_add_tail_rcu(&vif_priv
->list
, &ar
->vif_list
);
651 rcu_assign_pointer(ar
->vif_priv
[vif_id
].vif
, vif
);
654 if (carl9170_get_main_vif(ar
) == vif
) {
655 rcu_assign_pointer(ar
->beacon_iter
, vif_priv
);
658 err
= carl9170_init_interface(ar
, vif
);
663 err
= carl9170_mod_virtual_mac(ar
, vif_id
, vif
->addr
);
669 if (ar
->fw
.tx_seq_table
) {
670 err
= carl9170_write_reg(ar
, ar
->fw
.tx_seq_table
+ vif_id
* 4,
677 if (err
&& (vif_id
>= 0)) {
678 vif_priv
->active
= false;
679 bitmap_release_region(&ar
->vif_bitmap
, vif_id
, 0);
681 rcu_assign_pointer(ar
->vif_priv
[vif_id
].vif
, NULL
);
682 list_del_rcu(&vif_priv
->list
);
683 mutex_unlock(&ar
->mutex
);
687 ar
->ps
.off_override
|= PS_OFF_VIF
;
689 mutex_unlock(&ar
->mutex
);
695 static void carl9170_op_remove_interface(struct ieee80211_hw
*hw
,
696 struct ieee80211_vif
*vif
)
698 struct carl9170_vif_info
*vif_priv
= (void *) vif
->drv_priv
;
699 struct ieee80211_vif
*main_vif
;
700 struct ar9170
*ar
= hw
->priv
;
703 mutex_lock(&ar
->mutex
);
705 if (WARN_ON_ONCE(!vif_priv
->active
))
711 main_vif
= carl9170_get_main_vif(ar
);
715 vif_priv
->active
= false;
716 WARN_ON(vif_priv
->enable_beacon
);
717 vif_priv
->enable_beacon
= false;
718 list_del_rcu(&vif_priv
->list
);
719 rcu_assign_pointer(ar
->vif_priv
[id
].vif
, NULL
);
721 if (vif
== main_vif
) {
725 WARN_ON(carl9170_init_interface(ar
,
726 carl9170_get_main_vif(ar
)));
728 carl9170_set_operating_mode(ar
);
733 WARN_ON(carl9170_mod_virtual_mac(ar
, id
, NULL
));
736 carl9170_update_beacon(ar
, false);
737 carl9170_flush_cab(ar
, id
);
739 spin_lock_bh(&ar
->beacon_lock
);
740 dev_kfree_skb_any(vif_priv
->beacon
);
741 vif_priv
->beacon
= NULL
;
742 spin_unlock_bh(&ar
->beacon_lock
);
744 bitmap_release_region(&ar
->vif_bitmap
, id
, 0);
746 carl9170_set_beacon_timers(ar
);
749 ar
->ps
.off_override
&= ~PS_OFF_VIF
;
752 mutex_unlock(&ar
->mutex
);
757 void carl9170_ps_check(struct ar9170
*ar
)
759 ieee80211_queue_work(ar
->hw
, &ar
->ps_work
);
762 /* caller must hold ar->mutex */
763 static int carl9170_ps_update(struct ar9170
*ar
)
768 if (!ar
->ps
.off_override
)
769 ps
= (ar
->hw
->conf
.flags
& IEEE80211_CONF_PS
);
771 if (ps
!= ar
->ps
.state
) {
772 err
= carl9170_powersave(ar
, ps
);
776 if (ar
->ps
.state
&& !ps
) {
777 ar
->ps
.sleep_ms
= jiffies_to_msecs(jiffies
-
782 ar
->ps
.last_slept
= jiffies
;
784 ar
->ps
.last_action
= jiffies
;
791 static void carl9170_ps_work(struct work_struct
*work
)
793 struct ar9170
*ar
= container_of(work
, struct ar9170
,
795 mutex_lock(&ar
->mutex
);
797 WARN_ON_ONCE(carl9170_ps_update(ar
) != 0);
798 mutex_unlock(&ar
->mutex
);
801 static int carl9170_update_survey(struct ar9170
*ar
, bool flush
, bool noise
)
806 err
= carl9170_get_noisefloor(ar
);
811 if (ar
->fw
.hw_counters
) {
812 err
= carl9170_collect_tally(ar
);
818 memset(&ar
->tally
, 0, sizeof(ar
->tally
));
823 static void carl9170_stat_work(struct work_struct
*work
)
825 struct ar9170
*ar
= container_of(work
, struct ar9170
, stat_work
.work
);
828 mutex_lock(&ar
->mutex
);
829 err
= carl9170_update_survey(ar
, false, true);
830 mutex_unlock(&ar
->mutex
);
835 ieee80211_queue_delayed_work(ar
->hw
, &ar
->stat_work
,
836 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK
)));
839 static int carl9170_op_config(struct ieee80211_hw
*hw
, u32 changed
)
841 struct ar9170
*ar
= hw
->priv
;
844 mutex_lock(&ar
->mutex
);
845 if (changed
& IEEE80211_CONF_CHANGE_LISTEN_INTERVAL
) {
850 if (changed
& IEEE80211_CONF_CHANGE_PS
) {
851 err
= carl9170_ps_update(ar
);
856 if (changed
& IEEE80211_CONF_CHANGE_POWER
) {
861 if (changed
& IEEE80211_CONF_CHANGE_SMPS
) {
866 if (changed
& IEEE80211_CONF_CHANGE_CHANNEL
) {
867 /* adjust slot time for 5 GHz */
868 err
= carl9170_set_slot_time(ar
);
872 err
= carl9170_update_survey(ar
, true, false);
876 err
= carl9170_set_channel(ar
, hw
->conf
.channel
,
877 hw
->conf
.channel_type
, CARL9170_RFI_NONE
);
881 err
= carl9170_update_survey(ar
, false, true);
885 err
= carl9170_set_dyn_sifs_ack(ar
);
889 err
= carl9170_set_rts_cts_rate(ar
);
895 mutex_unlock(&ar
->mutex
);
899 static u64
carl9170_op_prepare_multicast(struct ieee80211_hw
*hw
,
900 struct netdev_hw_addr_list
*mc_list
)
902 struct netdev_hw_addr
*ha
;
905 /* always get broadcast frames */
906 mchash
= 1ULL << (0xff >> 2);
908 netdev_hw_addr_list_for_each(ha
, mc_list
)
909 mchash
|= 1ULL << (ha
->addr
[5] >> 2);
914 static void carl9170_op_configure_filter(struct ieee80211_hw
*hw
,
915 unsigned int changed_flags
,
916 unsigned int *new_flags
,
919 struct ar9170
*ar
= hw
->priv
;
921 /* mask supported flags */
922 *new_flags
&= FIF_ALLMULTI
| ar
->rx_filter_caps
;
924 if (!IS_ACCEPTING_CMD(ar
))
927 mutex_lock(&ar
->mutex
);
929 ar
->filter_state
= *new_flags
;
931 * We can support more by setting the sniffer bit and
932 * then checking the error flags, later.
935 if (*new_flags
& FIF_ALLMULTI
)
938 if (multicast
!= ar
->cur_mc_hash
)
939 WARN_ON(carl9170_update_multicast(ar
, multicast
));
941 if (changed_flags
& (FIF_OTHER_BSS
| FIF_PROMISC_IN_BSS
)) {
942 ar
->sniffer_enabled
= !!(*new_flags
&
943 (FIF_OTHER_BSS
| FIF_PROMISC_IN_BSS
));
945 WARN_ON(carl9170_set_operating_mode(ar
));
948 if (ar
->fw
.rx_filter
&& changed_flags
& ar
->rx_filter_caps
) {
951 if (!(*new_flags
& (FIF_FCSFAIL
| FIF_PLCPFAIL
)))
952 rx_filter
|= CARL9170_RX_FILTER_BAD
;
954 if (!(*new_flags
& FIF_CONTROL
))
955 rx_filter
|= CARL9170_RX_FILTER_CTL_OTHER
;
957 if (!(*new_flags
& FIF_PSPOLL
))
958 rx_filter
|= CARL9170_RX_FILTER_CTL_PSPOLL
;
960 if (!(*new_flags
& (FIF_OTHER_BSS
| FIF_PROMISC_IN_BSS
))) {
961 rx_filter
|= CARL9170_RX_FILTER_OTHER_RA
;
962 rx_filter
|= CARL9170_RX_FILTER_DECRY_FAIL
;
965 WARN_ON(carl9170_rx_filter(ar
, rx_filter
));
968 mutex_unlock(&ar
->mutex
);
972 static void carl9170_op_bss_info_changed(struct ieee80211_hw
*hw
,
973 struct ieee80211_vif
*vif
,
974 struct ieee80211_bss_conf
*bss_conf
,
977 struct ar9170
*ar
= hw
->priv
;
978 struct ath_common
*common
= &ar
->common
;
980 struct carl9170_vif_info
*vif_priv
;
981 struct ieee80211_vif
*main_vif
;
983 mutex_lock(&ar
->mutex
);
984 vif_priv
= (void *) vif
->drv_priv
;
985 main_vif
= carl9170_get_main_vif(ar
);
986 if (WARN_ON(!main_vif
))
989 if (changed
& BSS_CHANGED_BEACON_ENABLED
) {
990 struct carl9170_vif_info
*iter
;
993 vif_priv
->enable_beacon
= bss_conf
->enable_beacon
;
995 list_for_each_entry_rcu(iter
, &ar
->vif_list
, list
) {
996 if (iter
->active
&& iter
->enable_beacon
)
1002 ar
->beacon_enabled
= i
;
1005 if (changed
& BSS_CHANGED_BEACON
) {
1006 err
= carl9170_update_beacon(ar
, false);
1011 if (changed
& (BSS_CHANGED_BEACON_ENABLED
| BSS_CHANGED_BEACON
|
1012 BSS_CHANGED_BEACON_INT
)) {
1014 if (main_vif
!= vif
) {
1015 bss_conf
->beacon_int
= main_vif
->bss_conf
.beacon_int
;
1016 bss_conf
->dtim_period
= main_vif
->bss_conf
.dtim_period
;
1020 * Therefore a hard limit for the broadcast traffic should
1021 * prevent false alarms.
1023 if (vif
->type
!= NL80211_IFTYPE_STATION
&&
1024 (bss_conf
->beacon_int
* bss_conf
->dtim_period
>=
1025 (CARL9170_QUEUE_STUCK_TIMEOUT
/ 2))) {
1030 err
= carl9170_set_beacon_timers(ar
);
1035 if (changed
& BSS_CHANGED_HT
) {
1042 if (main_vif
!= vif
)
1046 * The following settings can only be changed by the
1050 if (changed
& BSS_CHANGED_BSSID
) {
1051 memcpy(common
->curbssid
, bss_conf
->bssid
, ETH_ALEN
);
1052 err
= carl9170_set_operating_mode(ar
);
1057 if (changed
& BSS_CHANGED_ASSOC
) {
1058 ar
->common
.curaid
= bss_conf
->aid
;
1059 err
= carl9170_set_beacon_timers(ar
);
1064 if (changed
& BSS_CHANGED_ERP_SLOT
) {
1065 err
= carl9170_set_slot_time(ar
);
1070 if (changed
& BSS_CHANGED_BASIC_RATES
) {
1071 err
= carl9170_set_mac_rates(ar
);
1077 WARN_ON_ONCE(err
&& IS_STARTED(ar
));
1078 mutex_unlock(&ar
->mutex
);
1081 static u64
carl9170_op_get_tsf(struct ieee80211_hw
*hw
,
1082 struct ieee80211_vif
*vif
)
1084 struct ar9170
*ar
= hw
->priv
;
1085 struct carl9170_tsf_rsp tsf
;
1088 mutex_lock(&ar
->mutex
);
1089 err
= carl9170_exec_cmd(ar
, CARL9170_CMD_READ_TSF
,
1090 0, NULL
, sizeof(tsf
), &tsf
);
1091 mutex_unlock(&ar
->mutex
);
1095 return le64_to_cpu(tsf
.tsf_64
);
1098 static int carl9170_op_set_key(struct ieee80211_hw
*hw
, enum set_key_cmd cmd
,
1099 struct ieee80211_vif
*vif
,
1100 struct ieee80211_sta
*sta
,
1101 struct ieee80211_key_conf
*key
)
1103 struct ar9170
*ar
= hw
->priv
;
1107 if (ar
->disable_offload
|| !vif
)
1111 * We have to fall back to software encryption, whenever
1112 * the user choose to participates in an IBSS or is connected
1113 * to more than one network.
1115 * This is very unfortunate, because some machines cannot handle
1116 * the high througput speed in 802.11n networks.
1119 if (!is_main_vif(ar
, vif
)) {
1120 mutex_lock(&ar
->mutex
);
1125 * While the hardware supports *catch-all* key, for offloading
1126 * group-key en-/de-cryption. The way of how the hardware
1127 * decides which keyId maps to which key, remains a mystery...
1129 if ((vif
->type
!= NL80211_IFTYPE_STATION
&&
1130 vif
->type
!= NL80211_IFTYPE_ADHOC
) &&
1131 !(key
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
))
1134 switch (key
->cipher
) {
1135 case WLAN_CIPHER_SUITE_WEP40
:
1136 ktype
= AR9170_ENC_ALG_WEP64
;
1138 case WLAN_CIPHER_SUITE_WEP104
:
1139 ktype
= AR9170_ENC_ALG_WEP128
;
1141 case WLAN_CIPHER_SUITE_TKIP
:
1142 ktype
= AR9170_ENC_ALG_TKIP
;
1144 case WLAN_CIPHER_SUITE_CCMP
:
1145 ktype
= AR9170_ENC_ALG_AESCCMP
;
1151 mutex_lock(&ar
->mutex
);
1152 if (cmd
== SET_KEY
) {
1153 if (!IS_STARTED(ar
)) {
1158 if (!(key
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
)) {
1161 i
= 64 + key
->keyidx
;
1163 for (i
= 0; i
< 64; i
++)
1164 if (!(ar
->usedkeys
& BIT(i
)))
1170 key
->hw_key_idx
= i
;
1172 err
= carl9170_upload_key(ar
, i
, sta
? sta
->addr
: NULL
,
1174 min_t(u8
, 16, key
->keylen
));
1178 if (key
->cipher
== WLAN_CIPHER_SUITE_TKIP
) {
1179 err
= carl9170_upload_key(ar
, i
, sta
? sta
->addr
:
1186 * hardware is not capable generating MMIC
1187 * of fragmented frames!
1189 key
->flags
|= IEEE80211_KEY_FLAG_GENERATE_MMIC
;
1193 ar
->usedkeys
|= BIT(i
);
1195 key
->flags
|= IEEE80211_KEY_FLAG_GENERATE_IV
;
1197 if (!IS_STARTED(ar
)) {
1198 /* The device is gone... together with the key ;-) */
1203 if (key
->hw_key_idx
< 64) {
1204 ar
->usedkeys
&= ~BIT(key
->hw_key_idx
);
1206 err
= carl9170_upload_key(ar
, key
->hw_key_idx
, NULL
,
1207 AR9170_ENC_ALG_NONE
, 0,
1212 if (key
->cipher
== WLAN_CIPHER_SUITE_TKIP
) {
1213 err
= carl9170_upload_key(ar
, key
->hw_key_idx
,
1215 AR9170_ENC_ALG_NONE
,
1223 err
= carl9170_disable_key(ar
, key
->hw_key_idx
);
1229 mutex_unlock(&ar
->mutex
);
1233 if (!ar
->rx_software_decryption
) {
1234 ar
->rx_software_decryption
= true;
1235 carl9170_set_operating_mode(ar
);
1237 mutex_unlock(&ar
->mutex
);
1241 static int carl9170_op_sta_add(struct ieee80211_hw
*hw
,
1242 struct ieee80211_vif
*vif
,
1243 struct ieee80211_sta
*sta
)
1245 struct carl9170_sta_info
*sta_info
= (void *) sta
->drv_priv
;
1248 atomic_set(&sta_info
->pending_frames
, 0);
1250 if (sta
->ht_cap
.ht_supported
) {
1251 if (sta
->ht_cap
.ampdu_density
> 6) {
1253 * HW does support 16us AMPDU density.
1254 * No HT-Xmit for station.
1260 for (i
= 0; i
< CARL9170_NUM_TID
; i
++)
1261 rcu_assign_pointer(sta_info
->agg
[i
], NULL
);
1263 sta_info
->ampdu_max_len
= 1 << (3 + sta
->ht_cap
.ampdu_factor
);
1264 sta_info
->ht_sta
= true;
1270 static int carl9170_op_sta_remove(struct ieee80211_hw
*hw
,
1271 struct ieee80211_vif
*vif
,
1272 struct ieee80211_sta
*sta
)
1274 struct ar9170
*ar
= hw
->priv
;
1275 struct carl9170_sta_info
*sta_info
= (void *) sta
->drv_priv
;
1277 bool cleanup
= false;
1279 if (sta
->ht_cap
.ht_supported
) {
1281 sta_info
->ht_sta
= false;
1284 for (i
= 0; i
< CARL9170_NUM_TID
; i
++) {
1285 struct carl9170_sta_tid
*tid_info
;
1287 tid_info
= rcu_dereference(sta_info
->agg
[i
]);
1288 rcu_assign_pointer(sta_info
->agg
[i
], NULL
);
1293 spin_lock_bh(&ar
->tx_ampdu_list_lock
);
1294 if (tid_info
->state
> CARL9170_TID_STATE_SHUTDOWN
)
1295 tid_info
->state
= CARL9170_TID_STATE_SHUTDOWN
;
1296 spin_unlock_bh(&ar
->tx_ampdu_list_lock
);
1302 carl9170_ampdu_gc(ar
);
1308 static int carl9170_op_conf_tx(struct ieee80211_hw
*hw
, u16 queue
,
1309 const struct ieee80211_tx_queue_params
*param
)
1311 struct ar9170
*ar
= hw
->priv
;
1314 mutex_lock(&ar
->mutex
);
1315 if (queue
< ar
->hw
->queues
) {
1316 memcpy(&ar
->edcf
[ar9170_qmap
[queue
]], param
, sizeof(*param
));
1317 ret
= carl9170_set_qos(ar
);
1322 mutex_unlock(&ar
->mutex
);
1326 static void carl9170_ampdu_work(struct work_struct
*work
)
1328 struct ar9170
*ar
= container_of(work
, struct ar9170
,
1331 if (!IS_STARTED(ar
))
1334 mutex_lock(&ar
->mutex
);
1335 carl9170_ampdu_gc(ar
);
1336 mutex_unlock(&ar
->mutex
);
1339 static int carl9170_op_ampdu_action(struct ieee80211_hw
*hw
,
1340 struct ieee80211_vif
*vif
,
1341 enum ieee80211_ampdu_mlme_action action
,
1342 struct ieee80211_sta
*sta
,
1343 u16 tid
, u16
*ssn
, u8 buf_size
)
1345 struct ar9170
*ar
= hw
->priv
;
1346 struct carl9170_sta_info
*sta_info
= (void *) sta
->drv_priv
;
1347 struct carl9170_sta_tid
*tid_info
;
1353 case IEEE80211_AMPDU_TX_START
:
1354 if (!sta_info
->ht_sta
)
1358 if (rcu_dereference(sta_info
->agg
[tid
])) {
1363 tid_info
= kzalloc(sizeof(struct carl9170_sta_tid
),
1370 tid_info
->hsn
= tid_info
->bsn
= tid_info
->snx
= (*ssn
);
1371 tid_info
->state
= CARL9170_TID_STATE_PROGRESS
;
1372 tid_info
->tid
= tid
;
1373 tid_info
->max
= sta_info
->ampdu_max_len
;
1375 INIT_LIST_HEAD(&tid_info
->list
);
1376 INIT_LIST_HEAD(&tid_info
->tmp_list
);
1377 skb_queue_head_init(&tid_info
->queue
);
1378 spin_lock_init(&tid_info
->lock
);
1380 spin_lock_bh(&ar
->tx_ampdu_list_lock
);
1381 ar
->tx_ampdu_list_len
++;
1382 list_add_tail_rcu(&tid_info
->list
, &ar
->tx_ampdu_list
);
1383 rcu_assign_pointer(sta_info
->agg
[tid
], tid_info
);
1384 spin_unlock_bh(&ar
->tx_ampdu_list_lock
);
1387 ieee80211_start_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
1390 case IEEE80211_AMPDU_TX_STOP
:
1392 tid_info
= rcu_dereference(sta_info
->agg
[tid
]);
1394 spin_lock_bh(&ar
->tx_ampdu_list_lock
);
1395 if (tid_info
->state
> CARL9170_TID_STATE_SHUTDOWN
)
1396 tid_info
->state
= CARL9170_TID_STATE_SHUTDOWN
;
1397 spin_unlock_bh(&ar
->tx_ampdu_list_lock
);
1400 rcu_assign_pointer(sta_info
->agg
[tid
], NULL
);
1403 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
1404 ieee80211_queue_work(ar
->hw
, &ar
->ampdu_work
);
1407 case IEEE80211_AMPDU_TX_OPERATIONAL
:
1409 tid_info
= rcu_dereference(sta_info
->agg
[tid
]);
1411 sta_info
->stats
[tid
].clear
= true;
1412 sta_info
->stats
[tid
].req
= false;
1415 bitmap_zero(tid_info
->bitmap
, CARL9170_BAW_SIZE
);
1416 tid_info
->state
= CARL9170_TID_STATE_IDLE
;
1420 if (WARN_ON_ONCE(!tid_info
))
1425 case IEEE80211_AMPDU_RX_START
:
1426 case IEEE80211_AMPDU_RX_STOP
:
1427 /* Handled by hardware */
1437 #ifdef CONFIG_CARL9170_WPC
1438 static int carl9170_register_wps_button(struct ar9170
*ar
)
1440 struct input_dev
*input
;
1443 if (!(ar
->features
& CARL9170_WPS_BUTTON
))
1446 input
= input_allocate_device();
1450 snprintf(ar
->wps
.name
, sizeof(ar
->wps
.name
), "%s WPS Button",
1451 wiphy_name(ar
->hw
->wiphy
));
1453 snprintf(ar
->wps
.phys
, sizeof(ar
->wps
.phys
),
1454 "ieee80211/%s/input0", wiphy_name(ar
->hw
->wiphy
));
1456 input
->name
= ar
->wps
.name
;
1457 input
->phys
= ar
->wps
.phys
;
1458 input
->id
.bustype
= BUS_USB
;
1459 input
->dev
.parent
= &ar
->hw
->wiphy
->dev
;
1461 input_set_capability(input
, EV_KEY
, KEY_WPS_BUTTON
);
1463 err
= input_register_device(input
);
1465 input_free_device(input
);
1469 ar
->wps
.pbc
= input
;
1472 #endif /* CONFIG_CARL9170_WPC */
1474 #ifdef CONFIG_CARL9170_HWRNG
1475 static int carl9170_rng_get(struct ar9170
*ar
)
1478 #define RW (CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
1479 #define RB (CARL9170_MAX_CMD_PAYLOAD_LEN)
1481 static const __le32 rng_load
[RW
] = {
1482 [0 ... (RW
- 1)] = cpu_to_le32(AR9170_RAND_REG_NUM
)};
1486 unsigned int i
, off
= 0, transfer
, count
;
1489 BUILD_BUG_ON(RB
> CARL9170_MAX_CMD_PAYLOAD_LEN
);
1491 if (!IS_ACCEPTING_CMD(ar
) || !ar
->rng
.initialized
)
1494 count
= ARRAY_SIZE(ar
->rng
.cache
);
1496 err
= carl9170_exec_cmd(ar
, CARL9170_CMD_RREG
,
1497 RB
, (u8
*) rng_load
,
1502 transfer
= min_t(unsigned int, count
, RW
);
1503 for (i
= 0; i
< transfer
; i
++)
1504 ar
->rng
.cache
[off
+ i
] = buf
[i
];
1510 ar
->rng
.cache_idx
= 0;
1517 static int carl9170_rng_read(struct hwrng
*rng
, u32
*data
)
1519 struct ar9170
*ar
= (struct ar9170
*)rng
->priv
;
1522 mutex_lock(&ar
->mutex
);
1523 if (ar
->rng
.cache_idx
>= ARRAY_SIZE(ar
->rng
.cache
)) {
1524 ret
= carl9170_rng_get(ar
);
1526 mutex_unlock(&ar
->mutex
);
1531 *data
= ar
->rng
.cache
[ar
->rng
.cache_idx
++];
1532 mutex_unlock(&ar
->mutex
);
1537 static void carl9170_unregister_hwrng(struct ar9170
*ar
)
1539 if (ar
->rng
.initialized
) {
1540 hwrng_unregister(&ar
->rng
.rng
);
1541 ar
->rng
.initialized
= false;
1545 static int carl9170_register_hwrng(struct ar9170
*ar
)
1549 snprintf(ar
->rng
.name
, ARRAY_SIZE(ar
->rng
.name
),
1550 "%s_%s", KBUILD_MODNAME
, wiphy_name(ar
->hw
->wiphy
));
1551 ar
->rng
.rng
.name
= ar
->rng
.name
;
1552 ar
->rng
.rng
.data_read
= carl9170_rng_read
;
1553 ar
->rng
.rng
.priv
= (unsigned long)ar
;
1555 if (WARN_ON(ar
->rng
.initialized
))
1558 err
= hwrng_register(&ar
->rng
.rng
);
1560 dev_err(&ar
->udev
->dev
, "Failed to register the random "
1561 "number generator (%d)\n", err
);
1565 ar
->rng
.initialized
= true;
1567 err
= carl9170_rng_get(ar
);
1569 carl9170_unregister_hwrng(ar
);
1575 #endif /* CONFIG_CARL9170_HWRNG */
1577 static int carl9170_op_get_survey(struct ieee80211_hw
*hw
, int idx
,
1578 struct survey_info
*survey
)
1580 struct ar9170
*ar
= hw
->priv
;
1581 struct ieee80211_channel
*chan
;
1582 struct ieee80211_supported_band
*band
;
1589 if (idx
== chan
->hw_value
) {
1590 mutex_lock(&ar
->mutex
);
1591 err
= carl9170_update_survey(ar
, false, true);
1592 mutex_unlock(&ar
->mutex
);
1597 for (b
= 0; b
< IEEE80211_NUM_BANDS
; b
++) {
1598 band
= ar
->hw
->wiphy
->bands
[b
];
1603 for (i
= 0; i
< band
->n_channels
; i
++) {
1604 if (band
->channels
[i
].hw_value
== idx
) {
1605 chan
= &band
->channels
[i
];
1613 memcpy(survey
, &ar
->survey
[idx
], sizeof(*survey
));
1615 survey
->channel
= chan
;
1616 survey
->filled
= SURVEY_INFO_NOISE_DBM
;
1618 if (ar
->channel
== chan
)
1619 survey
->filled
|= SURVEY_INFO_IN_USE
;
1621 if (ar
->fw
.hw_counters
) {
1622 survey
->filled
|= SURVEY_INFO_CHANNEL_TIME
|
1623 SURVEY_INFO_CHANNEL_TIME_BUSY
|
1624 SURVEY_INFO_CHANNEL_TIME_TX
;
1630 static void carl9170_op_flush(struct ieee80211_hw
*hw
, bool drop
)
1632 struct ar9170
*ar
= hw
->priv
;
1635 mutex_lock(&ar
->mutex
);
1636 for_each_set_bit(vid
, &ar
->vif_bitmap
, ar
->fw
.vif_num
)
1637 carl9170_flush_cab(ar
, vid
);
1639 carl9170_flush(ar
, drop
);
1640 mutex_unlock(&ar
->mutex
);
1643 static int carl9170_op_get_stats(struct ieee80211_hw
*hw
,
1644 struct ieee80211_low_level_stats
*stats
)
1646 struct ar9170
*ar
= hw
->priv
;
1648 memset(stats
, 0, sizeof(*stats
));
1649 stats
->dot11ACKFailureCount
= ar
->tx_ack_failures
;
1650 stats
->dot11FCSErrorCount
= ar
->tx_fcs_errors
;
1654 static void carl9170_op_sta_notify(struct ieee80211_hw
*hw
,
1655 struct ieee80211_vif
*vif
,
1656 enum sta_notify_cmd cmd
,
1657 struct ieee80211_sta
*sta
)
1659 struct carl9170_sta_info
*sta_info
= (void *) sta
->drv_priv
;
1662 case STA_NOTIFY_SLEEP
:
1663 sta_info
->sleeping
= true;
1664 if (atomic_read(&sta_info
->pending_frames
))
1665 ieee80211_sta_block_awake(hw
, sta
, true);
1668 case STA_NOTIFY_AWAKE
:
1669 sta_info
->sleeping
= false;
1674 static bool carl9170_tx_frames_pending(struct ieee80211_hw
*hw
)
1676 struct ar9170
*ar
= hw
->priv
;
1678 return !!atomic_read(&ar
->tx_total_queued
);
1681 static const struct ieee80211_ops carl9170_ops
= {
1682 .start
= carl9170_op_start
,
1683 .stop
= carl9170_op_stop
,
1684 .tx
= carl9170_op_tx
,
1685 .flush
= carl9170_op_flush
,
1686 .add_interface
= carl9170_op_add_interface
,
1687 .remove_interface
= carl9170_op_remove_interface
,
1688 .config
= carl9170_op_config
,
1689 .prepare_multicast
= carl9170_op_prepare_multicast
,
1690 .configure_filter
= carl9170_op_configure_filter
,
1691 .conf_tx
= carl9170_op_conf_tx
,
1692 .bss_info_changed
= carl9170_op_bss_info_changed
,
1693 .get_tsf
= carl9170_op_get_tsf
,
1694 .set_key
= carl9170_op_set_key
,
1695 .sta_add
= carl9170_op_sta_add
,
1696 .sta_remove
= carl9170_op_sta_remove
,
1697 .sta_notify
= carl9170_op_sta_notify
,
1698 .get_survey
= carl9170_op_get_survey
,
1699 .get_stats
= carl9170_op_get_stats
,
1700 .ampdu_action
= carl9170_op_ampdu_action
,
1701 .tx_frames_pending
= carl9170_tx_frames_pending
,
1704 void *carl9170_alloc(size_t priv_size
)
1706 struct ieee80211_hw
*hw
;
1708 struct sk_buff
*skb
;
1712 * this buffer is used for rx stream reconstruction.
1713 * Under heavy load this device (or the transport layer?)
1714 * tends to split the streams into separate rx descriptors.
1717 skb
= __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE
, GFP_KERNEL
);
1721 hw
= ieee80211_alloc_hw(priv_size
, &carl9170_ops
);
1727 ar
->rx_failover
= skb
;
1729 memset(&ar
->rx_plcp
, 0, sizeof(struct ar9170_rx_head
));
1730 ar
->rx_has_plcp
= false;
1733 * Here's a hidden pitfall!
1735 * All 4 AC queues work perfectly well under _legacy_ operation.
1736 * However as soon as aggregation is enabled, the traffic flow
1737 * gets very bumpy. Therefore we have to _switch_ to a
1738 * software AC with a single HW queue.
1740 hw
->queues
= __AR9170_NUM_TXQ
;
1742 mutex_init(&ar
->mutex
);
1743 spin_lock_init(&ar
->beacon_lock
);
1744 spin_lock_init(&ar
->cmd_lock
);
1745 spin_lock_init(&ar
->tx_stats_lock
);
1746 spin_lock_init(&ar
->tx_ampdu_list_lock
);
1747 spin_lock_init(&ar
->mem_lock
);
1748 spin_lock_init(&ar
->state_lock
);
1749 atomic_set(&ar
->pending_restarts
, 0);
1751 for (i
= 0; i
< ar
->hw
->queues
; i
++) {
1752 skb_queue_head_init(&ar
->tx_status
[i
]);
1753 skb_queue_head_init(&ar
->tx_pending
[i
]);
1755 INIT_WORK(&ar
->ps_work
, carl9170_ps_work
);
1756 INIT_WORK(&ar
->ping_work
, carl9170_ping_work
);
1757 INIT_WORK(&ar
->restart_work
, carl9170_restart_work
);
1758 INIT_WORK(&ar
->ampdu_work
, carl9170_ampdu_work
);
1759 INIT_DELAYED_WORK(&ar
->stat_work
, carl9170_stat_work
);
1760 INIT_DELAYED_WORK(&ar
->tx_janitor
, carl9170_tx_janitor
);
1761 INIT_LIST_HEAD(&ar
->tx_ampdu_list
);
1762 rcu_assign_pointer(ar
->tx_ampdu_iter
,
1763 (struct carl9170_sta_tid
*) &ar
->tx_ampdu_list
);
1765 bitmap_zero(&ar
->vif_bitmap
, ar
->fw
.vif_num
);
1766 INIT_LIST_HEAD(&ar
->vif_list
);
1767 init_completion(&ar
->tx_flush
);
1769 /* firmware decides which modes we support */
1770 hw
->wiphy
->interface_modes
= 0;
1772 hw
->flags
|= IEEE80211_HW_RX_INCLUDES_FCS
|
1773 IEEE80211_HW_REPORTS_TX_ACK_STATUS
|
1774 IEEE80211_HW_SUPPORTS_PS
|
1775 IEEE80211_HW_PS_NULLFUNC_STACK
|
1776 IEEE80211_HW_NEED_DTIM_PERIOD
|
1777 IEEE80211_HW_SIGNAL_DBM
;
1779 if (!modparam_noht
) {
1781 * see the comment above, why we allow the user
1782 * to disable HT by a module parameter.
1784 hw
->flags
|= IEEE80211_HW_AMPDU_AGGREGATION
;
1787 hw
->extra_tx_headroom
= sizeof(struct _carl9170_tx_superframe
);
1788 hw
->sta_data_size
= sizeof(struct carl9170_sta_info
);
1789 hw
->vif_data_size
= sizeof(struct carl9170_vif_info
);
1791 hw
->max_rates
= CARL9170_TX_MAX_RATES
;
1792 hw
->max_rate_tries
= CARL9170_TX_USER_RATE_TRIES
;
1794 for (i
= 0; i
< ARRAY_SIZE(ar
->noise
); i
++)
1795 ar
->noise
[i
] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1797 hw
->wiphy
->flags
&= ~WIPHY_FLAG_PS_ON_BY_DEFAULT
;
1802 return ERR_PTR(-ENOMEM
);
1805 static int carl9170_read_eeprom(struct ar9170
*ar
)
1807 #define RW 8 /* number of words to read at once */
1808 #define RB (sizeof(u32) * RW)
1809 u8
*eeprom
= (void *)&ar
->eeprom
;
1813 BUILD_BUG_ON(sizeof(ar
->eeprom
) & 3);
1815 BUILD_BUG_ON(RB
> CARL9170_MAX_CMD_LEN
- 4);
1817 /* don't want to handle trailing remains */
1818 BUILD_BUG_ON(sizeof(ar
->eeprom
) % RB
);
1821 for (i
= 0; i
< sizeof(ar
->eeprom
) / RB
; i
++) {
1822 for (j
= 0; j
< RW
; j
++)
1823 offsets
[j
] = cpu_to_le32(AR9170_EEPROM_START
+
1826 err
= carl9170_exec_cmd(ar
, CARL9170_CMD_RREG
,
1827 RB
, (u8
*) &offsets
,
1828 RB
, eeprom
+ RB
* i
);
1838 static int carl9170_parse_eeprom(struct ar9170
*ar
)
1840 struct ath_regulatory
*regulatory
= &ar
->common
.regulatory
;
1841 unsigned int rx_streams
, tx_streams
, tx_params
= 0;
1845 if (ar
->eeprom
.length
== cpu_to_le16(0xffff))
1848 rx_streams
= hweight8(ar
->eeprom
.rx_mask
);
1849 tx_streams
= hweight8(ar
->eeprom
.tx_mask
);
1851 if (rx_streams
!= tx_streams
) {
1852 tx_params
= IEEE80211_HT_MCS_TX_RX_DIFF
;
1854 WARN_ON(!(tx_streams
>= 1 && tx_streams
<=
1855 IEEE80211_HT_MCS_TX_MAX_STREAMS
));
1857 tx_params
= (tx_streams
- 1) <<
1858 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT
;
1860 carl9170_band_2GHz
.ht_cap
.mcs
.tx_params
|= tx_params
;
1861 carl9170_band_5GHz
.ht_cap
.mcs
.tx_params
|= tx_params
;
1864 if (ar
->eeprom
.operating_flags
& AR9170_OPFLAG_2GHZ
) {
1865 ar
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
1866 &carl9170_band_2GHz
;
1867 chans
+= carl9170_band_2GHz
.n_channels
;
1870 if (ar
->eeprom
.operating_flags
& AR9170_OPFLAG_5GHZ
) {
1871 ar
->hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
1872 &carl9170_band_5GHz
;
1873 chans
+= carl9170_band_5GHz
.n_channels
;
1880 ar
->survey
= kzalloc(sizeof(struct survey_info
) * chans
, GFP_KERNEL
);
1883 ar
->num_channels
= chans
;
1886 * I measured this, a bandswitch takes roughly
1887 * 135 ms and a frequency switch about 80.
1889 * FIXME: measure these values again once EEPROM settings
1890 * are used, that will influence them!
1893 ar
->hw
->channel_change_time
= 135 * 1000;
1895 ar
->hw
->channel_change_time
= 80 * 1000;
1897 regulatory
->current_rd
= le16_to_cpu(ar
->eeprom
.reg_domain
[0]);
1898 regulatory
->current_rd_ext
= le16_to_cpu(ar
->eeprom
.reg_domain
[1]);
1900 /* second part of wiphy init */
1901 SET_IEEE80211_PERM_ADDR(ar
->hw
, ar
->eeprom
.mac_address
);
1906 static int carl9170_reg_notifier(struct wiphy
*wiphy
,
1907 struct regulatory_request
*request
)
1909 struct ieee80211_hw
*hw
= wiphy_to_ieee80211_hw(wiphy
);
1910 struct ar9170
*ar
= hw
->priv
;
1912 return ath_reg_notifier_apply(wiphy
, request
, &ar
->common
.regulatory
);
1915 int carl9170_register(struct ar9170
*ar
)
1917 struct ath_regulatory
*regulatory
= &ar
->common
.regulatory
;
1920 if (WARN_ON(ar
->mem_bitmap
))
1923 ar
->mem_bitmap
= kzalloc(roundup(ar
->fw
.mem_blocks
, BITS_PER_LONG
) *
1924 sizeof(unsigned long), GFP_KERNEL
);
1926 if (!ar
->mem_bitmap
)
1929 /* try to read EEPROM, init MAC addr */
1930 err
= carl9170_read_eeprom(ar
);
1934 err
= carl9170_fw_fix_eeprom(ar
);
1938 err
= carl9170_parse_eeprom(ar
);
1942 err
= ath_regd_init(regulatory
, ar
->hw
->wiphy
,
1943 carl9170_reg_notifier
);
1947 if (modparam_noht
) {
1948 carl9170_band_2GHz
.ht_cap
.ht_supported
= false;
1949 carl9170_band_5GHz
.ht_cap
.ht_supported
= false;
1952 for (i
= 0; i
< ar
->fw
.vif_num
; i
++) {
1953 ar
->vif_priv
[i
].id
= i
;
1954 ar
->vif_priv
[i
].vif
= NULL
;
1957 err
= ieee80211_register_hw(ar
->hw
);
1961 /* mac80211 interface is now registered */
1962 ar
->registered
= true;
1964 if (!ath_is_world_regd(regulatory
))
1965 regulatory_hint(ar
->hw
->wiphy
, regulatory
->alpha2
);
1967 #ifdef CONFIG_CARL9170_DEBUGFS
1968 carl9170_debugfs_register(ar
);
1969 #endif /* CONFIG_CARL9170_DEBUGFS */
1971 err
= carl9170_led_init(ar
);
1975 #ifdef CONFIG_CARL9170_LEDS
1976 err
= carl9170_led_register(ar
);
1979 #endif /* CONFIG_CARL9170_LEDS */
1981 #ifdef CONFIG_CARL9170_WPC
1982 err
= carl9170_register_wps_button(ar
);
1985 #endif /* CONFIG_CARL9170_WPC */
1987 #ifdef CONFIG_CARL9170_HWRNG
1988 err
= carl9170_register_hwrng(ar
);
1991 #endif /* CONFIG_CARL9170_HWRNG */
1993 dev_info(&ar
->udev
->dev
, "Atheros AR9170 is registered as '%s'\n",
1994 wiphy_name(ar
->hw
->wiphy
));
1999 carl9170_unregister(ar
);
2003 void carl9170_unregister(struct ar9170
*ar
)
2005 if (!ar
->registered
)
2008 ar
->registered
= false;
2010 #ifdef CONFIG_CARL9170_LEDS
2011 carl9170_led_unregister(ar
);
2012 #endif /* CONFIG_CARL9170_LEDS */
2014 #ifdef CONFIG_CARL9170_DEBUGFS
2015 carl9170_debugfs_unregister(ar
);
2016 #endif /* CONFIG_CARL9170_DEBUGFS */
2018 #ifdef CONFIG_CARL9170_WPC
2020 input_unregister_device(ar
->wps
.pbc
);
2023 #endif /* CONFIG_CARL9170_WPC */
2025 #ifdef CONFIG_CARL9170_HWRNG
2026 carl9170_unregister_hwrng(ar
);
2027 #endif /* CONFIG_CARL9170_HWRNG */
2029 carl9170_cancel_worker(ar
);
2030 cancel_work_sync(&ar
->restart_work
);
2032 ieee80211_unregister_hw(ar
->hw
);
2035 void carl9170_free(struct ar9170
*ar
)
2037 WARN_ON(ar
->registered
);
2038 WARN_ON(IS_INITIALIZED(ar
));
2040 kfree_skb(ar
->rx_failover
);
2041 ar
->rx_failover
= NULL
;
2043 kfree(ar
->mem_bitmap
);
2044 ar
->mem_bitmap
= NULL
;
2049 mutex_destroy(&ar
->mutex
);
2051 ieee80211_free_hw(ar
->hw
);