mac80211: add ieee80211_vif param to tsf functions
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / wireless / ath / carl9170 / main.c
blob8b780d6d470f4f4cfb9315382e82811d8cb5c3db
1 /*
2 * Atheros CARL9170 driver
4 * mac80211 interaction code
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/etherdevice.h>
44 #include <linux/random.h>
45 #include <net/mac80211.h>
46 #include <net/cfg80211.h>
47 #include "hw.h"
48 #include "carl9170.h"
49 #include "cmd.h"
51 static int modparam_nohwcrypt;
52 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
53 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
55 int modparam_noht;
56 module_param_named(noht, modparam_noht, int, S_IRUGO);
57 MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
59 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
60 .bitrate = (_bitrate), \
61 .flags = (_flags), \
62 .hw_value = (_hw_rate) | (_txpidx) << 4, \
65 struct ieee80211_rate __carl9170_ratetable[] = {
66 RATE(10, 0, 0, 0),
67 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
68 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
69 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
70 RATE(60, 0xb, 0, 0),
71 RATE(90, 0xf, 0, 0),
72 RATE(120, 0xa, 0, 0),
73 RATE(180, 0xe, 0, 0),
74 RATE(240, 0x9, 0, 0),
75 RATE(360, 0xd, 1, 0),
76 RATE(480, 0x8, 2, 0),
77 RATE(540, 0xc, 3, 0),
79 #undef RATE
81 #define carl9170_g_ratetable (__carl9170_ratetable + 0)
82 #define carl9170_g_ratetable_size 12
83 #define carl9170_a_ratetable (__carl9170_ratetable + 4)
84 #define carl9170_a_ratetable_size 8
87 * NB: The hw_value is used as an index into the carl9170_phy_freq_params
88 * array in phy.c so that we don't have to do frequency lookups!
90 #define CHAN(_freq, _idx) { \
91 .center_freq = (_freq), \
92 .hw_value = (_idx), \
93 .max_power = 18, /* XXX */ \
96 static struct ieee80211_channel carl9170_2ghz_chantable[] = {
97 CHAN(2412, 0),
98 CHAN(2417, 1),
99 CHAN(2422, 2),
100 CHAN(2427, 3),
101 CHAN(2432, 4),
102 CHAN(2437, 5),
103 CHAN(2442, 6),
104 CHAN(2447, 7),
105 CHAN(2452, 8),
106 CHAN(2457, 9),
107 CHAN(2462, 10),
108 CHAN(2467, 11),
109 CHAN(2472, 12),
110 CHAN(2484, 13),
113 static struct ieee80211_channel carl9170_5ghz_chantable[] = {
114 CHAN(4920, 14),
115 CHAN(4940, 15),
116 CHAN(4960, 16),
117 CHAN(4980, 17),
118 CHAN(5040, 18),
119 CHAN(5060, 19),
120 CHAN(5080, 20),
121 CHAN(5180, 21),
122 CHAN(5200, 22),
123 CHAN(5220, 23),
124 CHAN(5240, 24),
125 CHAN(5260, 25),
126 CHAN(5280, 26),
127 CHAN(5300, 27),
128 CHAN(5320, 28),
129 CHAN(5500, 29),
130 CHAN(5520, 30),
131 CHAN(5540, 31),
132 CHAN(5560, 32),
133 CHAN(5580, 33),
134 CHAN(5600, 34),
135 CHAN(5620, 35),
136 CHAN(5640, 36),
137 CHAN(5660, 37),
138 CHAN(5680, 38),
139 CHAN(5700, 39),
140 CHAN(5745, 40),
141 CHAN(5765, 41),
142 CHAN(5785, 42),
143 CHAN(5805, 43),
144 CHAN(5825, 44),
145 CHAN(5170, 45),
146 CHAN(5190, 46),
147 CHAN(5210, 47),
148 CHAN(5230, 48),
150 #undef CHAN
152 #define CARL9170_HT_CAP \
154 .ht_supported = true, \
155 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
156 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
157 IEEE80211_HT_CAP_SGI_40 | \
158 IEEE80211_HT_CAP_DSSSCCK40 | \
159 IEEE80211_HT_CAP_SM_PS, \
160 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
161 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
162 .mcs = { \
163 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
164 .rx_highest = cpu_to_le16(300), \
165 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
166 }, \
169 static struct ieee80211_supported_band carl9170_band_2GHz = {
170 .channels = carl9170_2ghz_chantable,
171 .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable),
172 .bitrates = carl9170_g_ratetable,
173 .n_bitrates = carl9170_g_ratetable_size,
174 .ht_cap = CARL9170_HT_CAP,
177 static struct ieee80211_supported_band carl9170_band_5GHz = {
178 .channels = carl9170_5ghz_chantable,
179 .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable),
180 .bitrates = carl9170_a_ratetable,
181 .n_bitrates = carl9170_a_ratetable_size,
182 .ht_cap = CARL9170_HT_CAP,
185 static void carl9170_ampdu_gc(struct ar9170 *ar)
187 struct carl9170_sta_tid *tid_info;
188 LIST_HEAD(tid_gc);
190 rcu_read_lock();
191 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
192 spin_lock_bh(&ar->tx_ampdu_list_lock);
193 if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
194 tid_info->state = CARL9170_TID_STATE_KILLED;
195 list_del_rcu(&tid_info->list);
196 ar->tx_ampdu_list_len--;
197 list_add_tail(&tid_info->tmp_list, &tid_gc);
199 spin_unlock_bh(&ar->tx_ampdu_list_lock);
202 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
203 rcu_read_unlock();
205 synchronize_rcu();
207 while (!list_empty(&tid_gc)) {
208 struct sk_buff *skb;
209 tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
210 tmp_list);
212 while ((skb = __skb_dequeue(&tid_info->queue)))
213 carl9170_tx_status(ar, skb, false);
215 list_del_init(&tid_info->tmp_list);
216 kfree(tid_info);
220 static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
222 if (drop_queued) {
223 int i;
226 * We can only drop frames which have not been uploaded
227 * to the device yet.
230 for (i = 0; i < ar->hw->queues; i++) {
231 struct sk_buff *skb;
233 while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
234 struct ieee80211_tx_info *info;
236 info = IEEE80211_SKB_CB(skb);
237 if (info->flags & IEEE80211_TX_CTL_AMPDU)
238 atomic_dec(&ar->tx_ampdu_upload);
240 carl9170_tx_status(ar, skb, false);
245 /* Wait for all other outstanding frames to timeout. */
246 if (atomic_read(&ar->tx_total_queued))
247 WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
250 static void carl9170_flush_ba(struct ar9170 *ar)
252 struct sk_buff_head free;
253 struct carl9170_sta_tid *tid_info;
254 struct sk_buff *skb;
256 __skb_queue_head_init(&free);
258 rcu_read_lock();
259 spin_lock_bh(&ar->tx_ampdu_list_lock);
260 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
261 if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
262 tid_info->state = CARL9170_TID_STATE_SUSPEND;
264 spin_lock(&tid_info->lock);
265 while ((skb = __skb_dequeue(&tid_info->queue)))
266 __skb_queue_tail(&free, skb);
267 spin_unlock(&tid_info->lock);
270 spin_unlock_bh(&ar->tx_ampdu_list_lock);
271 rcu_read_unlock();
273 while ((skb = __skb_dequeue(&free)))
274 carl9170_tx_status(ar, skb, false);
277 static void carl9170_zap_queues(struct ar9170 *ar)
279 struct carl9170_vif_info *cvif;
280 unsigned int i;
282 carl9170_ampdu_gc(ar);
284 carl9170_flush_ba(ar);
285 carl9170_flush(ar, true);
287 for (i = 0; i < ar->hw->queues; i++) {
288 spin_lock_bh(&ar->tx_status[i].lock);
289 while (!skb_queue_empty(&ar->tx_status[i])) {
290 struct sk_buff *skb;
292 skb = skb_peek(&ar->tx_status[i]);
293 carl9170_tx_get_skb(skb);
294 spin_unlock_bh(&ar->tx_status[i].lock);
295 carl9170_tx_drop(ar, skb);
296 spin_lock_bh(&ar->tx_status[i].lock);
297 carl9170_tx_put_skb(skb);
299 spin_unlock_bh(&ar->tx_status[i].lock);
302 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
303 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
304 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
306 /* reinitialize queues statistics */
307 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
308 for (i = 0; i < ar->hw->queues; i++)
309 ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
311 for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
312 ar->mem_bitmap[i] = 0;
314 rcu_read_lock();
315 list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
316 spin_lock_bh(&ar->beacon_lock);
317 dev_kfree_skb_any(cvif->beacon);
318 cvif->beacon = NULL;
319 spin_unlock_bh(&ar->beacon_lock);
321 rcu_read_unlock();
323 atomic_set(&ar->tx_ampdu_upload, 0);
324 atomic_set(&ar->tx_ampdu_scheduler, 0);
325 atomic_set(&ar->tx_total_pending, 0);
326 atomic_set(&ar->tx_total_queued, 0);
327 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
330 #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
331 do { \
332 queue.aifs = ai_fs; \
333 queue.cw_min = cwmin; \
334 queue.cw_max = cwmax; \
335 queue.txop = _txop; \
336 } while (0)
338 static int carl9170_op_start(struct ieee80211_hw *hw)
340 struct ar9170 *ar = hw->priv;
341 int err, i;
343 mutex_lock(&ar->mutex);
345 carl9170_zap_queues(ar);
347 /* reset QoS defaults */
348 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47);
349 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94);
350 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0);
351 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0);
352 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
354 ar->current_factor = ar->current_density = -1;
355 /* "The first key is unique." */
356 ar->usedkeys = 1;
357 ar->filter_state = 0;
358 ar->ps.last_action = jiffies;
359 ar->ps.last_slept = jiffies;
360 ar->erp_mode = CARL9170_ERP_AUTO;
361 ar->rx_software_decryption = false;
362 ar->disable_offload = false;
364 for (i = 0; i < ar->hw->queues; i++) {
365 ar->queue_stop_timeout[i] = jiffies;
366 ar->max_queue_stop_timeout[i] = 0;
369 atomic_set(&ar->mem_allocs, 0);
371 err = carl9170_usb_open(ar);
372 if (err)
373 goto out;
375 err = carl9170_init_mac(ar);
376 if (err)
377 goto out;
379 err = carl9170_set_qos(ar);
380 if (err)
381 goto out;
383 if (ar->fw.rx_filter) {
384 err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
385 CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
386 if (err)
387 goto out;
390 err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
391 AR9170_DMA_TRIGGER_RXQ);
392 if (err)
393 goto out;
395 /* Clear key-cache */
396 for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
397 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
398 0, NULL, 0);
399 if (err)
400 goto out;
402 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
403 1, NULL, 0);
404 if (err)
405 goto out;
407 if (i < AR9170_CAM_MAX_USER) {
408 err = carl9170_disable_key(ar, i);
409 if (err)
410 goto out;
414 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
416 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
417 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
419 ieee80211_wake_queues(ar->hw);
420 err = 0;
422 out:
423 mutex_unlock(&ar->mutex);
424 return err;
427 static void carl9170_cancel_worker(struct ar9170 *ar)
429 cancel_delayed_work_sync(&ar->stat_work);
430 cancel_delayed_work_sync(&ar->tx_janitor);
431 #ifdef CONFIG_CARL9170_LEDS
432 cancel_delayed_work_sync(&ar->led_work);
433 #endif /* CONFIG_CARL9170_LEDS */
434 cancel_work_sync(&ar->ps_work);
435 cancel_work_sync(&ar->ping_work);
436 cancel_work_sync(&ar->ampdu_work);
439 static void carl9170_op_stop(struct ieee80211_hw *hw)
441 struct ar9170 *ar = hw->priv;
443 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
445 ieee80211_stop_queues(ar->hw);
447 mutex_lock(&ar->mutex);
448 if (IS_ACCEPTING_CMD(ar)) {
449 rcu_assign_pointer(ar->beacon_iter, NULL);
451 carl9170_led_set_state(ar, 0);
453 /* stop DMA */
454 carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
455 carl9170_usb_stop(ar);
458 carl9170_zap_queues(ar);
459 mutex_unlock(&ar->mutex);
461 carl9170_cancel_worker(ar);
464 static void carl9170_restart_work(struct work_struct *work)
466 struct ar9170 *ar = container_of(work, struct ar9170,
467 restart_work);
468 int err;
470 ar->usedkeys = 0;
471 ar->filter_state = 0;
472 carl9170_cancel_worker(ar);
474 mutex_lock(&ar->mutex);
475 err = carl9170_usb_restart(ar);
476 if (net_ratelimit()) {
477 if (err) {
478 dev_err(&ar->udev->dev, "Failed to restart device "
479 " (%d).\n", err);
480 } else {
481 dev_info(&ar->udev->dev, "device restarted "
482 "successfully.\n");
486 carl9170_zap_queues(ar);
487 mutex_unlock(&ar->mutex);
488 if (!err) {
489 ar->restart_counter++;
490 atomic_set(&ar->pending_restarts, 0);
492 ieee80211_restart_hw(ar->hw);
493 } else {
495 * The reset was unsuccessful and the device seems to
496 * be dead. But there's still one option: a low-level
497 * usb subsystem reset...
500 carl9170_usb_reset(ar);
504 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
506 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
509 * Sometimes, an error can trigger several different reset events.
510 * By ignoring these *surplus* reset events, the device won't be
511 * killed again, right after it has recovered.
513 if (atomic_inc_return(&ar->pending_restarts) > 1) {
514 dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
515 return;
518 ieee80211_stop_queues(ar->hw);
520 dev_err(&ar->udev->dev, "restart device (%d)\n", r);
522 if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
523 !WARN_ON(r >= __CARL9170_RR_LAST))
524 ar->last_reason = r;
526 if (!ar->registered)
527 return;
529 if (IS_ACCEPTING_CMD(ar) && !ar->needs_full_reset)
530 ieee80211_queue_work(ar->hw, &ar->restart_work);
531 else
532 carl9170_usb_reset(ar);
535 * At this point, the device instance might have vanished/disabled.
536 * So, don't put any code which access the ar9170 struct
537 * without proper protection.
541 static void carl9170_ping_work(struct work_struct *work)
543 struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
544 int err;
546 if (!IS_STARTED(ar))
547 return;
549 mutex_lock(&ar->mutex);
550 err = carl9170_echo_test(ar, 0xdeadbeef);
551 if (err)
552 carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
553 mutex_unlock(&ar->mutex);
556 static int carl9170_init_interface(struct ar9170 *ar,
557 struct ieee80211_vif *vif)
559 struct ath_common *common = &ar->common;
560 int err;
562 if (!vif) {
563 WARN_ON_ONCE(IS_STARTED(ar));
564 return 0;
567 memcpy(common->macaddr, vif->addr, ETH_ALEN);
569 if (modparam_nohwcrypt ||
570 ((vif->type != NL80211_IFTYPE_STATION) &&
571 (vif->type != NL80211_IFTYPE_AP))) {
572 ar->rx_software_decryption = true;
573 ar->disable_offload = true;
576 err = carl9170_set_operating_mode(ar);
577 return err;
580 static int carl9170_op_add_interface(struct ieee80211_hw *hw,
581 struct ieee80211_vif *vif)
583 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
584 struct ieee80211_vif *main_vif;
585 struct ar9170 *ar = hw->priv;
586 int vif_id = -1, err = 0;
588 mutex_lock(&ar->mutex);
589 rcu_read_lock();
590 if (vif_priv->active) {
592 * Skip the interface structure initialization,
593 * if the vif survived the _restart call.
595 vif_id = vif_priv->id;
596 vif_priv->enable_beacon = false;
598 spin_lock_bh(&ar->beacon_lock);
599 dev_kfree_skb_any(vif_priv->beacon);
600 vif_priv->beacon = NULL;
601 spin_unlock_bh(&ar->beacon_lock);
603 goto init;
606 main_vif = carl9170_get_main_vif(ar);
608 if (main_vif) {
609 switch (main_vif->type) {
610 case NL80211_IFTYPE_STATION:
611 if (vif->type == NL80211_IFTYPE_STATION)
612 break;
614 err = -EBUSY;
615 rcu_read_unlock();
617 goto unlock;
619 case NL80211_IFTYPE_AP:
620 if ((vif->type == NL80211_IFTYPE_STATION) ||
621 (vif->type == NL80211_IFTYPE_WDS) ||
622 (vif->type == NL80211_IFTYPE_AP))
623 break;
625 err = -EBUSY;
626 rcu_read_unlock();
627 goto unlock;
629 default:
630 rcu_read_unlock();
631 goto unlock;
635 vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
637 if (vif_id < 0) {
638 rcu_read_unlock();
640 err = -ENOSPC;
641 goto unlock;
644 BUG_ON(ar->vif_priv[vif_id].id != vif_id);
646 vif_priv->active = true;
647 vif_priv->id = vif_id;
648 vif_priv->enable_beacon = false;
649 ar->vifs++;
650 list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
651 rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
653 init:
654 if (carl9170_get_main_vif(ar) == vif) {
655 rcu_assign_pointer(ar->beacon_iter, vif_priv);
656 rcu_read_unlock();
658 err = carl9170_init_interface(ar, vif);
659 if (err)
660 goto unlock;
661 } else {
662 rcu_read_unlock();
663 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
665 if (err)
666 goto unlock;
669 if (ar->fw.tx_seq_table) {
670 err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
672 if (err)
673 goto unlock;
676 unlock:
677 if (err && (vif_id >= 0)) {
678 vif_priv->active = false;
679 bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
680 ar->vifs--;
681 rcu_assign_pointer(ar->vif_priv[vif_id].vif, NULL);
682 list_del_rcu(&vif_priv->list);
683 mutex_unlock(&ar->mutex);
684 synchronize_rcu();
685 } else {
686 if (ar->vifs > 1)
687 ar->ps.off_override |= PS_OFF_VIF;
689 mutex_unlock(&ar->mutex);
692 return err;
695 static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
696 struct ieee80211_vif *vif)
698 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
699 struct ieee80211_vif *main_vif;
700 struct ar9170 *ar = hw->priv;
701 unsigned int id;
703 mutex_lock(&ar->mutex);
705 if (WARN_ON_ONCE(!vif_priv->active))
706 goto unlock;
708 ar->vifs--;
710 rcu_read_lock();
711 main_vif = carl9170_get_main_vif(ar);
713 id = vif_priv->id;
715 vif_priv->active = false;
716 WARN_ON(vif_priv->enable_beacon);
717 vif_priv->enable_beacon = false;
718 list_del_rcu(&vif_priv->list);
719 rcu_assign_pointer(ar->vif_priv[id].vif, NULL);
721 if (vif == main_vif) {
722 rcu_read_unlock();
724 if (ar->vifs) {
725 WARN_ON(carl9170_init_interface(ar,
726 carl9170_get_main_vif(ar)));
727 } else {
728 carl9170_set_operating_mode(ar);
730 } else {
731 rcu_read_unlock();
733 WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
736 carl9170_update_beacon(ar, false);
737 carl9170_flush_cab(ar, id);
739 spin_lock_bh(&ar->beacon_lock);
740 dev_kfree_skb_any(vif_priv->beacon);
741 vif_priv->beacon = NULL;
742 spin_unlock_bh(&ar->beacon_lock);
744 bitmap_release_region(&ar->vif_bitmap, id, 0);
746 carl9170_set_beacon_timers(ar);
748 if (ar->vifs == 1)
749 ar->ps.off_override &= ~PS_OFF_VIF;
751 unlock:
752 mutex_unlock(&ar->mutex);
754 synchronize_rcu();
757 void carl9170_ps_check(struct ar9170 *ar)
759 ieee80211_queue_work(ar->hw, &ar->ps_work);
762 /* caller must hold ar->mutex */
763 static int carl9170_ps_update(struct ar9170 *ar)
765 bool ps = false;
766 int err = 0;
768 if (!ar->ps.off_override)
769 ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
771 if (ps != ar->ps.state) {
772 err = carl9170_powersave(ar, ps);
773 if (err)
774 return err;
776 if (ar->ps.state && !ps) {
777 ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
778 ar->ps.last_action);
781 if (ps)
782 ar->ps.last_slept = jiffies;
784 ar->ps.last_action = jiffies;
785 ar->ps.state = ps;
788 return 0;
791 static void carl9170_ps_work(struct work_struct *work)
793 struct ar9170 *ar = container_of(work, struct ar9170,
794 ps_work);
795 mutex_lock(&ar->mutex);
796 if (IS_STARTED(ar))
797 WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
798 mutex_unlock(&ar->mutex);
801 static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
803 int err;
805 if (noise) {
806 err = carl9170_get_noisefloor(ar);
807 if (err)
808 return err;
811 if (ar->fw.hw_counters) {
812 err = carl9170_collect_tally(ar);
813 if (err)
814 return err;
817 if (flush)
818 memset(&ar->tally, 0, sizeof(ar->tally));
820 return 0;
823 static void carl9170_stat_work(struct work_struct *work)
825 struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
826 int err;
828 mutex_lock(&ar->mutex);
829 err = carl9170_update_survey(ar, false, true);
830 mutex_unlock(&ar->mutex);
832 if (err)
833 return;
835 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
836 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
839 static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
841 struct ar9170 *ar = hw->priv;
842 int err = 0;
844 mutex_lock(&ar->mutex);
845 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
846 /* TODO */
847 err = 0;
850 if (changed & IEEE80211_CONF_CHANGE_PS) {
851 err = carl9170_ps_update(ar);
852 if (err)
853 goto out;
856 if (changed & IEEE80211_CONF_CHANGE_POWER) {
857 /* TODO */
858 err = 0;
861 if (changed & IEEE80211_CONF_CHANGE_SMPS) {
862 /* TODO */
863 err = 0;
866 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
867 /* adjust slot time for 5 GHz */
868 err = carl9170_set_slot_time(ar);
869 if (err)
870 goto out;
872 err = carl9170_update_survey(ar, true, false);
873 if (err)
874 goto out;
876 err = carl9170_set_channel(ar, hw->conf.channel,
877 hw->conf.channel_type, CARL9170_RFI_NONE);
878 if (err)
879 goto out;
881 err = carl9170_update_survey(ar, false, true);
882 if (err)
883 goto out;
885 err = carl9170_set_dyn_sifs_ack(ar);
886 if (err)
887 goto out;
889 err = carl9170_set_rts_cts_rate(ar);
890 if (err)
891 goto out;
894 out:
895 mutex_unlock(&ar->mutex);
896 return err;
899 static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
900 struct netdev_hw_addr_list *mc_list)
902 struct netdev_hw_addr *ha;
903 u64 mchash;
905 /* always get broadcast frames */
906 mchash = 1ULL << (0xff >> 2);
908 netdev_hw_addr_list_for_each(ha, mc_list)
909 mchash |= 1ULL << (ha->addr[5] >> 2);
911 return mchash;
914 static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
915 unsigned int changed_flags,
916 unsigned int *new_flags,
917 u64 multicast)
919 struct ar9170 *ar = hw->priv;
921 /* mask supported flags */
922 *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
924 if (!IS_ACCEPTING_CMD(ar))
925 return;
927 mutex_lock(&ar->mutex);
929 ar->filter_state = *new_flags;
931 * We can support more by setting the sniffer bit and
932 * then checking the error flags, later.
935 if (*new_flags & FIF_ALLMULTI)
936 multicast = ~0ULL;
938 if (multicast != ar->cur_mc_hash)
939 WARN_ON(carl9170_update_multicast(ar, multicast));
941 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
942 ar->sniffer_enabled = !!(*new_flags &
943 (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
945 WARN_ON(carl9170_set_operating_mode(ar));
948 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
949 u32 rx_filter = 0;
951 if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
952 rx_filter |= CARL9170_RX_FILTER_BAD;
954 if (!(*new_flags & FIF_CONTROL))
955 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
957 if (!(*new_flags & FIF_PSPOLL))
958 rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
960 if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
961 rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
962 rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
965 WARN_ON(carl9170_rx_filter(ar, rx_filter));
968 mutex_unlock(&ar->mutex);
972 static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
973 struct ieee80211_vif *vif,
974 struct ieee80211_bss_conf *bss_conf,
975 u32 changed)
977 struct ar9170 *ar = hw->priv;
978 struct ath_common *common = &ar->common;
979 int err = 0;
980 struct carl9170_vif_info *vif_priv;
981 struct ieee80211_vif *main_vif;
983 mutex_lock(&ar->mutex);
984 vif_priv = (void *) vif->drv_priv;
985 main_vif = carl9170_get_main_vif(ar);
986 if (WARN_ON(!main_vif))
987 goto out;
989 if (changed & BSS_CHANGED_BEACON_ENABLED) {
990 struct carl9170_vif_info *iter;
991 int i = 0;
993 vif_priv->enable_beacon = bss_conf->enable_beacon;
994 rcu_read_lock();
995 list_for_each_entry_rcu(iter, &ar->vif_list, list) {
996 if (iter->active && iter->enable_beacon)
997 i++;
1000 rcu_read_unlock();
1002 ar->beacon_enabled = i;
1005 if (changed & BSS_CHANGED_BEACON) {
1006 err = carl9170_update_beacon(ar, false);
1007 if (err)
1008 goto out;
1011 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1012 BSS_CHANGED_BEACON_INT)) {
1014 if (main_vif != vif) {
1015 bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
1016 bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
1020 * Therefore a hard limit for the broadcast traffic should
1021 * prevent false alarms.
1023 if (vif->type != NL80211_IFTYPE_STATION &&
1024 (bss_conf->beacon_int * bss_conf->dtim_period >=
1025 (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
1026 err = -EINVAL;
1027 goto out;
1030 err = carl9170_set_beacon_timers(ar);
1031 if (err)
1032 goto out;
1035 if (changed & BSS_CHANGED_HT) {
1036 /* TODO */
1037 err = 0;
1038 if (err)
1039 goto out;
1042 if (main_vif != vif)
1043 goto out;
1046 * The following settings can only be changed by the
1047 * master interface.
1050 if (changed & BSS_CHANGED_BSSID) {
1051 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1052 err = carl9170_set_operating_mode(ar);
1053 if (err)
1054 goto out;
1057 if (changed & BSS_CHANGED_ASSOC) {
1058 ar->common.curaid = bss_conf->aid;
1059 err = carl9170_set_beacon_timers(ar);
1060 if (err)
1061 goto out;
1064 if (changed & BSS_CHANGED_ERP_SLOT) {
1065 err = carl9170_set_slot_time(ar);
1066 if (err)
1067 goto out;
1070 if (changed & BSS_CHANGED_BASIC_RATES) {
1071 err = carl9170_set_mac_rates(ar);
1072 if (err)
1073 goto out;
1076 out:
1077 WARN_ON_ONCE(err && IS_STARTED(ar));
1078 mutex_unlock(&ar->mutex);
1081 static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
1082 struct ieee80211_vif *vif)
1084 struct ar9170 *ar = hw->priv;
1085 struct carl9170_tsf_rsp tsf;
1086 int err;
1088 mutex_lock(&ar->mutex);
1089 err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1090 0, NULL, sizeof(tsf), &tsf);
1091 mutex_unlock(&ar->mutex);
1092 if (WARN_ON(err))
1093 return 0;
1095 return le64_to_cpu(tsf.tsf_64);
1098 static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1099 struct ieee80211_vif *vif,
1100 struct ieee80211_sta *sta,
1101 struct ieee80211_key_conf *key)
1103 struct ar9170 *ar = hw->priv;
1104 int err = 0, i;
1105 u8 ktype;
1107 if (ar->disable_offload || !vif)
1108 return -EOPNOTSUPP;
1111 * We have to fall back to software encryption, whenever
1112 * the user choose to participates in an IBSS or is connected
1113 * to more than one network.
1115 * This is very unfortunate, because some machines cannot handle
1116 * the high througput speed in 802.11n networks.
1119 if (!is_main_vif(ar, vif)) {
1120 mutex_lock(&ar->mutex);
1121 goto err_softw;
1125 * While the hardware supports *catch-all* key, for offloading
1126 * group-key en-/de-cryption. The way of how the hardware
1127 * decides which keyId maps to which key, remains a mystery...
1129 if ((vif->type != NL80211_IFTYPE_STATION &&
1130 vif->type != NL80211_IFTYPE_ADHOC) &&
1131 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1132 return -EOPNOTSUPP;
1134 switch (key->cipher) {
1135 case WLAN_CIPHER_SUITE_WEP40:
1136 ktype = AR9170_ENC_ALG_WEP64;
1137 break;
1138 case WLAN_CIPHER_SUITE_WEP104:
1139 ktype = AR9170_ENC_ALG_WEP128;
1140 break;
1141 case WLAN_CIPHER_SUITE_TKIP:
1142 ktype = AR9170_ENC_ALG_TKIP;
1143 break;
1144 case WLAN_CIPHER_SUITE_CCMP:
1145 ktype = AR9170_ENC_ALG_AESCCMP;
1146 break;
1147 default:
1148 return -EOPNOTSUPP;
1151 mutex_lock(&ar->mutex);
1152 if (cmd == SET_KEY) {
1153 if (!IS_STARTED(ar)) {
1154 err = -EOPNOTSUPP;
1155 goto out;
1158 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1159 sta = NULL;
1161 i = 64 + key->keyidx;
1162 } else {
1163 for (i = 0; i < 64; i++)
1164 if (!(ar->usedkeys & BIT(i)))
1165 break;
1166 if (i == 64)
1167 goto err_softw;
1170 key->hw_key_idx = i;
1172 err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1173 ktype, 0, key->key,
1174 min_t(u8, 16, key->keylen));
1175 if (err)
1176 goto out;
1178 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1179 err = carl9170_upload_key(ar, i, sta ? sta->addr :
1180 NULL, ktype, 1,
1181 key->key + 16, 16);
1182 if (err)
1183 goto out;
1186 * hardware is not capable generating MMIC
1187 * of fragmented frames!
1189 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1192 if (i < 64)
1193 ar->usedkeys |= BIT(i);
1195 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1196 } else {
1197 if (!IS_STARTED(ar)) {
1198 /* The device is gone... together with the key ;-) */
1199 err = 0;
1200 goto out;
1203 if (key->hw_key_idx < 64) {
1204 ar->usedkeys &= ~BIT(key->hw_key_idx);
1205 } else {
1206 err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1207 AR9170_ENC_ALG_NONE, 0,
1208 NULL, 0);
1209 if (err)
1210 goto out;
1212 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1213 err = carl9170_upload_key(ar, key->hw_key_idx,
1214 NULL,
1215 AR9170_ENC_ALG_NONE,
1216 1, NULL, 0);
1217 if (err)
1218 goto out;
1223 err = carl9170_disable_key(ar, key->hw_key_idx);
1224 if (err)
1225 goto out;
1228 out:
1229 mutex_unlock(&ar->mutex);
1230 return err;
1232 err_softw:
1233 if (!ar->rx_software_decryption) {
1234 ar->rx_software_decryption = true;
1235 carl9170_set_operating_mode(ar);
1237 mutex_unlock(&ar->mutex);
1238 return -ENOSPC;
1241 static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1242 struct ieee80211_vif *vif,
1243 struct ieee80211_sta *sta)
1245 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1246 unsigned int i;
1248 atomic_set(&sta_info->pending_frames, 0);
1250 if (sta->ht_cap.ht_supported) {
1251 if (sta->ht_cap.ampdu_density > 6) {
1253 * HW does support 16us AMPDU density.
1254 * No HT-Xmit for station.
1257 return 0;
1260 for (i = 0; i < CARL9170_NUM_TID; i++)
1261 rcu_assign_pointer(sta_info->agg[i], NULL);
1263 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1264 sta_info->ht_sta = true;
1267 return 0;
1270 static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1271 struct ieee80211_vif *vif,
1272 struct ieee80211_sta *sta)
1274 struct ar9170 *ar = hw->priv;
1275 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1276 unsigned int i;
1277 bool cleanup = false;
1279 if (sta->ht_cap.ht_supported) {
1281 sta_info->ht_sta = false;
1283 rcu_read_lock();
1284 for (i = 0; i < CARL9170_NUM_TID; i++) {
1285 struct carl9170_sta_tid *tid_info;
1287 tid_info = rcu_dereference(sta_info->agg[i]);
1288 rcu_assign_pointer(sta_info->agg[i], NULL);
1290 if (!tid_info)
1291 continue;
1293 spin_lock_bh(&ar->tx_ampdu_list_lock);
1294 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1295 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1296 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1297 cleanup = true;
1299 rcu_read_unlock();
1301 if (cleanup)
1302 carl9170_ampdu_gc(ar);
1305 return 0;
1308 static int carl9170_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
1309 const struct ieee80211_tx_queue_params *param)
1311 struct ar9170 *ar = hw->priv;
1312 int ret;
1314 mutex_lock(&ar->mutex);
1315 if (queue < ar->hw->queues) {
1316 memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
1317 ret = carl9170_set_qos(ar);
1318 } else {
1319 ret = -EINVAL;
1322 mutex_unlock(&ar->mutex);
1323 return ret;
1326 static void carl9170_ampdu_work(struct work_struct *work)
1328 struct ar9170 *ar = container_of(work, struct ar9170,
1329 ampdu_work);
1331 if (!IS_STARTED(ar))
1332 return;
1334 mutex_lock(&ar->mutex);
1335 carl9170_ampdu_gc(ar);
1336 mutex_unlock(&ar->mutex);
1339 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1340 struct ieee80211_vif *vif,
1341 enum ieee80211_ampdu_mlme_action action,
1342 struct ieee80211_sta *sta,
1343 u16 tid, u16 *ssn, u8 buf_size)
1345 struct ar9170 *ar = hw->priv;
1346 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1347 struct carl9170_sta_tid *tid_info;
1349 if (modparam_noht)
1350 return -EOPNOTSUPP;
1352 switch (action) {
1353 case IEEE80211_AMPDU_TX_START:
1354 if (!sta_info->ht_sta)
1355 return -EOPNOTSUPP;
1357 rcu_read_lock();
1358 if (rcu_dereference(sta_info->agg[tid])) {
1359 rcu_read_unlock();
1360 return -EBUSY;
1363 tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1364 GFP_ATOMIC);
1365 if (!tid_info) {
1366 rcu_read_unlock();
1367 return -ENOMEM;
1370 tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1371 tid_info->state = CARL9170_TID_STATE_PROGRESS;
1372 tid_info->tid = tid;
1373 tid_info->max = sta_info->ampdu_max_len;
1375 INIT_LIST_HEAD(&tid_info->list);
1376 INIT_LIST_HEAD(&tid_info->tmp_list);
1377 skb_queue_head_init(&tid_info->queue);
1378 spin_lock_init(&tid_info->lock);
1380 spin_lock_bh(&ar->tx_ampdu_list_lock);
1381 ar->tx_ampdu_list_len++;
1382 list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1383 rcu_assign_pointer(sta_info->agg[tid], tid_info);
1384 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1385 rcu_read_unlock();
1387 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1388 break;
1390 case IEEE80211_AMPDU_TX_STOP:
1391 rcu_read_lock();
1392 tid_info = rcu_dereference(sta_info->agg[tid]);
1393 if (tid_info) {
1394 spin_lock_bh(&ar->tx_ampdu_list_lock);
1395 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1396 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1397 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1400 rcu_assign_pointer(sta_info->agg[tid], NULL);
1401 rcu_read_unlock();
1403 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1404 ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1405 break;
1407 case IEEE80211_AMPDU_TX_OPERATIONAL:
1408 rcu_read_lock();
1409 tid_info = rcu_dereference(sta_info->agg[tid]);
1411 sta_info->stats[tid].clear = true;
1412 sta_info->stats[tid].req = false;
1414 if (tid_info) {
1415 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1416 tid_info->state = CARL9170_TID_STATE_IDLE;
1418 rcu_read_unlock();
1420 if (WARN_ON_ONCE(!tid_info))
1421 return -EFAULT;
1423 break;
1425 case IEEE80211_AMPDU_RX_START:
1426 case IEEE80211_AMPDU_RX_STOP:
1427 /* Handled by hardware */
1428 break;
1430 default:
1431 return -EOPNOTSUPP;
1434 return 0;
1437 #ifdef CONFIG_CARL9170_WPC
1438 static int carl9170_register_wps_button(struct ar9170 *ar)
1440 struct input_dev *input;
1441 int err;
1443 if (!(ar->features & CARL9170_WPS_BUTTON))
1444 return 0;
1446 input = input_allocate_device();
1447 if (!input)
1448 return -ENOMEM;
1450 snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1451 wiphy_name(ar->hw->wiphy));
1453 snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1454 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1456 input->name = ar->wps.name;
1457 input->phys = ar->wps.phys;
1458 input->id.bustype = BUS_USB;
1459 input->dev.parent = &ar->hw->wiphy->dev;
1461 input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1463 err = input_register_device(input);
1464 if (err) {
1465 input_free_device(input);
1466 return err;
1469 ar->wps.pbc = input;
1470 return 0;
1472 #endif /* CONFIG_CARL9170_WPC */
1474 #ifdef CONFIG_CARL9170_HWRNG
1475 static int carl9170_rng_get(struct ar9170 *ar)
1478 #define RW (CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
1479 #define RB (CARL9170_MAX_CMD_PAYLOAD_LEN)
1481 static const __le32 rng_load[RW] = {
1482 [0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
1484 u32 buf[RW];
1486 unsigned int i, off = 0, transfer, count;
1487 int err;
1489 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
1491 if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
1492 return -EAGAIN;
1494 count = ARRAY_SIZE(ar->rng.cache);
1495 while (count) {
1496 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1497 RB, (u8 *) rng_load,
1498 RB, (u8 *) buf);
1499 if (err)
1500 return err;
1502 transfer = min_t(unsigned int, count, RW);
1503 for (i = 0; i < transfer; i++)
1504 ar->rng.cache[off + i] = buf[i];
1506 off += transfer;
1507 count -= transfer;
1510 ar->rng.cache_idx = 0;
1512 #undef RW
1513 #undef RB
1514 return 0;
1517 static int carl9170_rng_read(struct hwrng *rng, u32 *data)
1519 struct ar9170 *ar = (struct ar9170 *)rng->priv;
1520 int ret = -EIO;
1522 mutex_lock(&ar->mutex);
1523 if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
1524 ret = carl9170_rng_get(ar);
1525 if (ret) {
1526 mutex_unlock(&ar->mutex);
1527 return ret;
1531 *data = ar->rng.cache[ar->rng.cache_idx++];
1532 mutex_unlock(&ar->mutex);
1534 return sizeof(u16);
1537 static void carl9170_unregister_hwrng(struct ar9170 *ar)
1539 if (ar->rng.initialized) {
1540 hwrng_unregister(&ar->rng.rng);
1541 ar->rng.initialized = false;
1545 static int carl9170_register_hwrng(struct ar9170 *ar)
1547 int err;
1549 snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
1550 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
1551 ar->rng.rng.name = ar->rng.name;
1552 ar->rng.rng.data_read = carl9170_rng_read;
1553 ar->rng.rng.priv = (unsigned long)ar;
1555 if (WARN_ON(ar->rng.initialized))
1556 return -EALREADY;
1558 err = hwrng_register(&ar->rng.rng);
1559 if (err) {
1560 dev_err(&ar->udev->dev, "Failed to register the random "
1561 "number generator (%d)\n", err);
1562 return err;
1565 ar->rng.initialized = true;
1567 err = carl9170_rng_get(ar);
1568 if (err) {
1569 carl9170_unregister_hwrng(ar);
1570 return err;
1573 return 0;
1575 #endif /* CONFIG_CARL9170_HWRNG */
1577 static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1578 struct survey_info *survey)
1580 struct ar9170 *ar = hw->priv;
1581 struct ieee80211_channel *chan;
1582 struct ieee80211_supported_band *band;
1583 int err, b, i;
1585 chan = ar->channel;
1586 if (!chan)
1587 return -ENODEV;
1589 if (idx == chan->hw_value) {
1590 mutex_lock(&ar->mutex);
1591 err = carl9170_update_survey(ar, false, true);
1592 mutex_unlock(&ar->mutex);
1593 if (err)
1594 return err;
1597 for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
1598 band = ar->hw->wiphy->bands[b];
1600 if (!band)
1601 continue;
1603 for (i = 0; i < band->n_channels; i++) {
1604 if (band->channels[i].hw_value == idx) {
1605 chan = &band->channels[i];
1606 goto found;
1610 return -ENOENT;
1612 found:
1613 memcpy(survey, &ar->survey[idx], sizeof(*survey));
1615 survey->channel = chan;
1616 survey->filled = SURVEY_INFO_NOISE_DBM;
1618 if (ar->channel == chan)
1619 survey->filled |= SURVEY_INFO_IN_USE;
1621 if (ar->fw.hw_counters) {
1622 survey->filled |= SURVEY_INFO_CHANNEL_TIME |
1623 SURVEY_INFO_CHANNEL_TIME_BUSY |
1624 SURVEY_INFO_CHANNEL_TIME_TX;
1627 return 0;
1630 static void carl9170_op_flush(struct ieee80211_hw *hw, bool drop)
1632 struct ar9170 *ar = hw->priv;
1633 unsigned int vid;
1635 mutex_lock(&ar->mutex);
1636 for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1637 carl9170_flush_cab(ar, vid);
1639 carl9170_flush(ar, drop);
1640 mutex_unlock(&ar->mutex);
1643 static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1644 struct ieee80211_low_level_stats *stats)
1646 struct ar9170 *ar = hw->priv;
1648 memset(stats, 0, sizeof(*stats));
1649 stats->dot11ACKFailureCount = ar->tx_ack_failures;
1650 stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1651 return 0;
1654 static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1655 struct ieee80211_vif *vif,
1656 enum sta_notify_cmd cmd,
1657 struct ieee80211_sta *sta)
1659 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1661 switch (cmd) {
1662 case STA_NOTIFY_SLEEP:
1663 sta_info->sleeping = true;
1664 if (atomic_read(&sta_info->pending_frames))
1665 ieee80211_sta_block_awake(hw, sta, true);
1666 break;
1668 case STA_NOTIFY_AWAKE:
1669 sta_info->sleeping = false;
1670 break;
1674 static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
1676 struct ar9170 *ar = hw->priv;
1678 return !!atomic_read(&ar->tx_total_queued);
1681 static const struct ieee80211_ops carl9170_ops = {
1682 .start = carl9170_op_start,
1683 .stop = carl9170_op_stop,
1684 .tx = carl9170_op_tx,
1685 .flush = carl9170_op_flush,
1686 .add_interface = carl9170_op_add_interface,
1687 .remove_interface = carl9170_op_remove_interface,
1688 .config = carl9170_op_config,
1689 .prepare_multicast = carl9170_op_prepare_multicast,
1690 .configure_filter = carl9170_op_configure_filter,
1691 .conf_tx = carl9170_op_conf_tx,
1692 .bss_info_changed = carl9170_op_bss_info_changed,
1693 .get_tsf = carl9170_op_get_tsf,
1694 .set_key = carl9170_op_set_key,
1695 .sta_add = carl9170_op_sta_add,
1696 .sta_remove = carl9170_op_sta_remove,
1697 .sta_notify = carl9170_op_sta_notify,
1698 .get_survey = carl9170_op_get_survey,
1699 .get_stats = carl9170_op_get_stats,
1700 .ampdu_action = carl9170_op_ampdu_action,
1701 .tx_frames_pending = carl9170_tx_frames_pending,
1704 void *carl9170_alloc(size_t priv_size)
1706 struct ieee80211_hw *hw;
1707 struct ar9170 *ar;
1708 struct sk_buff *skb;
1709 int i;
1712 * this buffer is used for rx stream reconstruction.
1713 * Under heavy load this device (or the transport layer?)
1714 * tends to split the streams into separate rx descriptors.
1717 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1718 if (!skb)
1719 goto err_nomem;
1721 hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1722 if (!hw)
1723 goto err_nomem;
1725 ar = hw->priv;
1726 ar->hw = hw;
1727 ar->rx_failover = skb;
1729 memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1730 ar->rx_has_plcp = false;
1733 * Here's a hidden pitfall!
1735 * All 4 AC queues work perfectly well under _legacy_ operation.
1736 * However as soon as aggregation is enabled, the traffic flow
1737 * gets very bumpy. Therefore we have to _switch_ to a
1738 * software AC with a single HW queue.
1740 hw->queues = __AR9170_NUM_TXQ;
1742 mutex_init(&ar->mutex);
1743 spin_lock_init(&ar->beacon_lock);
1744 spin_lock_init(&ar->cmd_lock);
1745 spin_lock_init(&ar->tx_stats_lock);
1746 spin_lock_init(&ar->tx_ampdu_list_lock);
1747 spin_lock_init(&ar->mem_lock);
1748 spin_lock_init(&ar->state_lock);
1749 atomic_set(&ar->pending_restarts, 0);
1750 ar->vifs = 0;
1751 for (i = 0; i < ar->hw->queues; i++) {
1752 skb_queue_head_init(&ar->tx_status[i]);
1753 skb_queue_head_init(&ar->tx_pending[i]);
1755 INIT_WORK(&ar->ps_work, carl9170_ps_work);
1756 INIT_WORK(&ar->ping_work, carl9170_ping_work);
1757 INIT_WORK(&ar->restart_work, carl9170_restart_work);
1758 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1759 INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
1760 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1761 INIT_LIST_HEAD(&ar->tx_ampdu_list);
1762 rcu_assign_pointer(ar->tx_ampdu_iter,
1763 (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1765 bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1766 INIT_LIST_HEAD(&ar->vif_list);
1767 init_completion(&ar->tx_flush);
1769 /* firmware decides which modes we support */
1770 hw->wiphy->interface_modes = 0;
1772 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1773 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1774 IEEE80211_HW_SUPPORTS_PS |
1775 IEEE80211_HW_PS_NULLFUNC_STACK |
1776 IEEE80211_HW_NEED_DTIM_PERIOD |
1777 IEEE80211_HW_SIGNAL_DBM;
1779 if (!modparam_noht) {
1781 * see the comment above, why we allow the user
1782 * to disable HT by a module parameter.
1784 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
1787 hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1788 hw->sta_data_size = sizeof(struct carl9170_sta_info);
1789 hw->vif_data_size = sizeof(struct carl9170_vif_info);
1791 hw->max_rates = CARL9170_TX_MAX_RATES;
1792 hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1794 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1795 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1797 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1798 return ar;
1800 err_nomem:
1801 kfree_skb(skb);
1802 return ERR_PTR(-ENOMEM);
1805 static int carl9170_read_eeprom(struct ar9170 *ar)
1807 #define RW 8 /* number of words to read at once */
1808 #define RB (sizeof(u32) * RW)
1809 u8 *eeprom = (void *)&ar->eeprom;
1810 __le32 offsets[RW];
1811 int i, j, err;
1813 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1815 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1816 #ifndef __CHECKER__
1817 /* don't want to handle trailing remains */
1818 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1819 #endif
1821 for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
1822 for (j = 0; j < RW; j++)
1823 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1824 RB * i + 4 * j);
1826 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1827 RB, (u8 *) &offsets,
1828 RB, eeprom + RB * i);
1829 if (err)
1830 return err;
1833 #undef RW
1834 #undef RB
1835 return 0;
1838 static int carl9170_parse_eeprom(struct ar9170 *ar)
1840 struct ath_regulatory *regulatory = &ar->common.regulatory;
1841 unsigned int rx_streams, tx_streams, tx_params = 0;
1842 int bands = 0;
1843 int chans = 0;
1845 if (ar->eeprom.length == cpu_to_le16(0xffff))
1846 return -ENODATA;
1848 rx_streams = hweight8(ar->eeprom.rx_mask);
1849 tx_streams = hweight8(ar->eeprom.tx_mask);
1851 if (rx_streams != tx_streams) {
1852 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1854 WARN_ON(!(tx_streams >= 1 && tx_streams <=
1855 IEEE80211_HT_MCS_TX_MAX_STREAMS));
1857 tx_params = (tx_streams - 1) <<
1858 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1860 carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1861 carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1864 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1865 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1866 &carl9170_band_2GHz;
1867 chans += carl9170_band_2GHz.n_channels;
1868 bands++;
1870 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1871 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1872 &carl9170_band_5GHz;
1873 chans += carl9170_band_5GHz.n_channels;
1874 bands++;
1877 if (!bands)
1878 return -EINVAL;
1880 ar->survey = kzalloc(sizeof(struct survey_info) * chans, GFP_KERNEL);
1881 if (!ar->survey)
1882 return -ENOMEM;
1883 ar->num_channels = chans;
1886 * I measured this, a bandswitch takes roughly
1887 * 135 ms and a frequency switch about 80.
1889 * FIXME: measure these values again once EEPROM settings
1890 * are used, that will influence them!
1892 if (bands == 2)
1893 ar->hw->channel_change_time = 135 * 1000;
1894 else
1895 ar->hw->channel_change_time = 80 * 1000;
1897 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1898 regulatory->current_rd_ext = le16_to_cpu(ar->eeprom.reg_domain[1]);
1900 /* second part of wiphy init */
1901 SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1903 return 0;
1906 static int carl9170_reg_notifier(struct wiphy *wiphy,
1907 struct regulatory_request *request)
1909 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1910 struct ar9170 *ar = hw->priv;
1912 return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1915 int carl9170_register(struct ar9170 *ar)
1917 struct ath_regulatory *regulatory = &ar->common.regulatory;
1918 int err = 0, i;
1920 if (WARN_ON(ar->mem_bitmap))
1921 return -EINVAL;
1923 ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) *
1924 sizeof(unsigned long), GFP_KERNEL);
1926 if (!ar->mem_bitmap)
1927 return -ENOMEM;
1929 /* try to read EEPROM, init MAC addr */
1930 err = carl9170_read_eeprom(ar);
1931 if (err)
1932 return err;
1934 err = carl9170_fw_fix_eeprom(ar);
1935 if (err)
1936 return err;
1938 err = carl9170_parse_eeprom(ar);
1939 if (err)
1940 return err;
1942 err = ath_regd_init(regulatory, ar->hw->wiphy,
1943 carl9170_reg_notifier);
1944 if (err)
1945 return err;
1947 if (modparam_noht) {
1948 carl9170_band_2GHz.ht_cap.ht_supported = false;
1949 carl9170_band_5GHz.ht_cap.ht_supported = false;
1952 for (i = 0; i < ar->fw.vif_num; i++) {
1953 ar->vif_priv[i].id = i;
1954 ar->vif_priv[i].vif = NULL;
1957 err = ieee80211_register_hw(ar->hw);
1958 if (err)
1959 return err;
1961 /* mac80211 interface is now registered */
1962 ar->registered = true;
1964 if (!ath_is_world_regd(regulatory))
1965 regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
1967 #ifdef CONFIG_CARL9170_DEBUGFS
1968 carl9170_debugfs_register(ar);
1969 #endif /* CONFIG_CARL9170_DEBUGFS */
1971 err = carl9170_led_init(ar);
1972 if (err)
1973 goto err_unreg;
1975 #ifdef CONFIG_CARL9170_LEDS
1976 err = carl9170_led_register(ar);
1977 if (err)
1978 goto err_unreg;
1979 #endif /* CONFIG_CARL9170_LEDS */
1981 #ifdef CONFIG_CARL9170_WPC
1982 err = carl9170_register_wps_button(ar);
1983 if (err)
1984 goto err_unreg;
1985 #endif /* CONFIG_CARL9170_WPC */
1987 #ifdef CONFIG_CARL9170_HWRNG
1988 err = carl9170_register_hwrng(ar);
1989 if (err)
1990 goto err_unreg;
1991 #endif /* CONFIG_CARL9170_HWRNG */
1993 dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
1994 wiphy_name(ar->hw->wiphy));
1996 return 0;
1998 err_unreg:
1999 carl9170_unregister(ar);
2000 return err;
2003 void carl9170_unregister(struct ar9170 *ar)
2005 if (!ar->registered)
2006 return;
2008 ar->registered = false;
2010 #ifdef CONFIG_CARL9170_LEDS
2011 carl9170_led_unregister(ar);
2012 #endif /* CONFIG_CARL9170_LEDS */
2014 #ifdef CONFIG_CARL9170_DEBUGFS
2015 carl9170_debugfs_unregister(ar);
2016 #endif /* CONFIG_CARL9170_DEBUGFS */
2018 #ifdef CONFIG_CARL9170_WPC
2019 if (ar->wps.pbc) {
2020 input_unregister_device(ar->wps.pbc);
2021 ar->wps.pbc = NULL;
2023 #endif /* CONFIG_CARL9170_WPC */
2025 #ifdef CONFIG_CARL9170_HWRNG
2026 carl9170_unregister_hwrng(ar);
2027 #endif /* CONFIG_CARL9170_HWRNG */
2029 carl9170_cancel_worker(ar);
2030 cancel_work_sync(&ar->restart_work);
2032 ieee80211_unregister_hw(ar->hw);
2035 void carl9170_free(struct ar9170 *ar)
2037 WARN_ON(ar->registered);
2038 WARN_ON(IS_INITIALIZED(ar));
2040 kfree_skb(ar->rx_failover);
2041 ar->rx_failover = NULL;
2043 kfree(ar->mem_bitmap);
2044 ar->mem_bitmap = NULL;
2046 kfree(ar->survey);
2047 ar->survey = NULL;
2049 mutex_destroy(&ar->mutex);
2051 ieee80211_free_hw(ar->hw);