ath9k: Add debugfs support for mac/baseband samples
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / wireless / ath / ath9k / init.c
blobdd71a5f775165e60f04bc262bbaab8ff4fb36c9c
1 /*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/ath9k_platform.h>
21 #include "ath9k.h"
23 static char *dev_info = "ath9k";
25 MODULE_AUTHOR("Atheros Communications");
26 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
27 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
28 MODULE_LICENSE("Dual BSD/GPL");
30 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
31 module_param_named(debug, ath9k_debug, uint, 0);
32 MODULE_PARM_DESC(debug, "Debugging mask");
34 int ath9k_modparam_nohwcrypt;
35 module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
36 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
38 int led_blink;
39 module_param_named(blink, led_blink, int, 0444);
40 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
42 static int ath9k_btcoex_enable;
43 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
44 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
46 bool is_ath9k_unloaded;
47 /* We use the hw_value as an index into our private channel structure */
49 #define CHAN2G(_freq, _idx) { \
50 .band = IEEE80211_BAND_2GHZ, \
51 .center_freq = (_freq), \
52 .hw_value = (_idx), \
53 .max_power = 20, \
56 #define CHAN5G(_freq, _idx) { \
57 .band = IEEE80211_BAND_5GHZ, \
58 .center_freq = (_freq), \
59 .hw_value = (_idx), \
60 .max_power = 20, \
63 /* Some 2 GHz radios are actually tunable on 2312-2732
64 * on 5 MHz steps, we support the channels which we know
65 * we have calibration data for all cards though to make
66 * this static */
67 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
68 CHAN2G(2412, 0), /* Channel 1 */
69 CHAN2G(2417, 1), /* Channel 2 */
70 CHAN2G(2422, 2), /* Channel 3 */
71 CHAN2G(2427, 3), /* Channel 4 */
72 CHAN2G(2432, 4), /* Channel 5 */
73 CHAN2G(2437, 5), /* Channel 6 */
74 CHAN2G(2442, 6), /* Channel 7 */
75 CHAN2G(2447, 7), /* Channel 8 */
76 CHAN2G(2452, 8), /* Channel 9 */
77 CHAN2G(2457, 9), /* Channel 10 */
78 CHAN2G(2462, 10), /* Channel 11 */
79 CHAN2G(2467, 11), /* Channel 12 */
80 CHAN2G(2472, 12), /* Channel 13 */
81 CHAN2G(2484, 13), /* Channel 14 */
84 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
85 * on 5 MHz steps, we support the channels which we know
86 * we have calibration data for all cards though to make
87 * this static */
88 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
89 /* _We_ call this UNII 1 */
90 CHAN5G(5180, 14), /* Channel 36 */
91 CHAN5G(5200, 15), /* Channel 40 */
92 CHAN5G(5220, 16), /* Channel 44 */
93 CHAN5G(5240, 17), /* Channel 48 */
94 /* _We_ call this UNII 2 */
95 CHAN5G(5260, 18), /* Channel 52 */
96 CHAN5G(5280, 19), /* Channel 56 */
97 CHAN5G(5300, 20), /* Channel 60 */
98 CHAN5G(5320, 21), /* Channel 64 */
99 /* _We_ call this "Middle band" */
100 CHAN5G(5500, 22), /* Channel 100 */
101 CHAN5G(5520, 23), /* Channel 104 */
102 CHAN5G(5540, 24), /* Channel 108 */
103 CHAN5G(5560, 25), /* Channel 112 */
104 CHAN5G(5580, 26), /* Channel 116 */
105 CHAN5G(5600, 27), /* Channel 120 */
106 CHAN5G(5620, 28), /* Channel 124 */
107 CHAN5G(5640, 29), /* Channel 128 */
108 CHAN5G(5660, 30), /* Channel 132 */
109 CHAN5G(5680, 31), /* Channel 136 */
110 CHAN5G(5700, 32), /* Channel 140 */
111 /* _We_ call this UNII 3 */
112 CHAN5G(5745, 33), /* Channel 149 */
113 CHAN5G(5765, 34), /* Channel 153 */
114 CHAN5G(5785, 35), /* Channel 157 */
115 CHAN5G(5805, 36), /* Channel 161 */
116 CHAN5G(5825, 37), /* Channel 165 */
119 /* Atheros hardware rate code addition for short premble */
120 #define SHPCHECK(__hw_rate, __flags) \
121 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
123 #define RATE(_bitrate, _hw_rate, _flags) { \
124 .bitrate = (_bitrate), \
125 .flags = (_flags), \
126 .hw_value = (_hw_rate), \
127 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
130 static struct ieee80211_rate ath9k_legacy_rates[] = {
131 RATE(10, 0x1b, 0),
132 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
133 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
134 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
135 RATE(60, 0x0b, 0),
136 RATE(90, 0x0f, 0),
137 RATE(120, 0x0a, 0),
138 RATE(180, 0x0e, 0),
139 RATE(240, 0x09, 0),
140 RATE(360, 0x0d, 0),
141 RATE(480, 0x08, 0),
142 RATE(540, 0x0c, 0),
145 #ifdef CONFIG_MAC80211_LEDS
146 static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
147 { .throughput = 0 * 1024, .blink_time = 334 },
148 { .throughput = 1 * 1024, .blink_time = 260 },
149 { .throughput = 5 * 1024, .blink_time = 220 },
150 { .throughput = 10 * 1024, .blink_time = 190 },
151 { .throughput = 20 * 1024, .blink_time = 170 },
152 { .throughput = 50 * 1024, .blink_time = 150 },
153 { .throughput = 70 * 1024, .blink_time = 130 },
154 { .throughput = 100 * 1024, .blink_time = 110 },
155 { .throughput = 200 * 1024, .blink_time = 80 },
156 { .throughput = 300 * 1024, .blink_time = 50 },
158 #endif
160 static void ath9k_deinit_softc(struct ath_softc *sc);
163 * Read and write, they both share the same lock. We do this to serialize
164 * reads and writes on Atheros 802.11n PCI devices only. This is required
165 * as the FIFO on these devices can only accept sanely 2 requests.
168 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
170 struct ath_hw *ah = (struct ath_hw *) hw_priv;
171 struct ath_common *common = ath9k_hw_common(ah);
172 struct ath_softc *sc = (struct ath_softc *) common->priv;
174 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
175 unsigned long flags;
176 spin_lock_irqsave(&sc->sc_serial_rw, flags);
177 iowrite32(val, sc->mem + reg_offset);
178 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
179 } else
180 iowrite32(val, sc->mem + reg_offset);
183 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
185 struct ath_hw *ah = (struct ath_hw *) hw_priv;
186 struct ath_common *common = ath9k_hw_common(ah);
187 struct ath_softc *sc = (struct ath_softc *) common->priv;
188 u32 val;
190 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
191 unsigned long flags;
192 spin_lock_irqsave(&sc->sc_serial_rw, flags);
193 val = ioread32(sc->mem + reg_offset);
194 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
195 } else
196 val = ioread32(sc->mem + reg_offset);
197 return val;
200 static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
201 u32 set, u32 clr)
203 u32 val;
205 val = ioread32(sc->mem + reg_offset);
206 val &= ~clr;
207 val |= set;
208 iowrite32(val, sc->mem + reg_offset);
210 return val;
213 static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
215 struct ath_hw *ah = (struct ath_hw *) hw_priv;
216 struct ath_common *common = ath9k_hw_common(ah);
217 struct ath_softc *sc = (struct ath_softc *) common->priv;
218 unsigned long uninitialized_var(flags);
219 u32 val;
221 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
222 spin_lock_irqsave(&sc->sc_serial_rw, flags);
223 val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
224 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
225 } else
226 val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
228 return val;
231 /**************************/
232 /* Initialization */
233 /**************************/
235 static void setup_ht_cap(struct ath_softc *sc,
236 struct ieee80211_sta_ht_cap *ht_info)
238 struct ath_hw *ah = sc->sc_ah;
239 struct ath_common *common = ath9k_hw_common(ah);
240 u8 tx_streams, rx_streams;
241 int i, max_streams;
243 ht_info->ht_supported = true;
244 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
245 IEEE80211_HT_CAP_SM_PS |
246 IEEE80211_HT_CAP_SGI_40 |
247 IEEE80211_HT_CAP_DSSSCCK40;
249 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
250 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
252 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
253 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
255 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
256 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
258 if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
259 max_streams = 1;
260 else if (AR_SREV_9300_20_OR_LATER(ah))
261 max_streams = 3;
262 else
263 max_streams = 2;
265 if (AR_SREV_9280_20_OR_LATER(ah)) {
266 if (max_streams >= 2)
267 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
268 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
271 /* set up supported mcs set */
272 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
273 tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
274 rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
276 ath_dbg(common, ATH_DBG_CONFIG,
277 "TX streams %d, RX streams: %d\n",
278 tx_streams, rx_streams);
280 if (tx_streams != rx_streams) {
281 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
282 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
283 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
286 for (i = 0; i < rx_streams; i++)
287 ht_info->mcs.rx_mask[i] = 0xff;
289 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
292 static int ath9k_reg_notifier(struct wiphy *wiphy,
293 struct regulatory_request *request)
295 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
296 struct ath_softc *sc = hw->priv;
297 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
299 return ath_reg_notifier_apply(wiphy, request, reg);
303 * This function will allocate both the DMA descriptor structure, and the
304 * buffers it contains. These are used to contain the descriptors used
305 * by the system.
307 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
308 struct list_head *head, const char *name,
309 int nbuf, int ndesc, bool is_tx)
311 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
312 u8 *ds;
313 struct ath_buf *bf;
314 int i, bsize, error, desc_len;
316 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
317 name, nbuf, ndesc);
319 INIT_LIST_HEAD(head);
321 if (is_tx)
322 desc_len = sc->sc_ah->caps.tx_desc_len;
323 else
324 desc_len = sizeof(struct ath_desc);
326 /* ath_desc must be a multiple of DWORDs */
327 if ((desc_len % 4) != 0) {
328 ath_err(common, "ath_desc not DWORD aligned\n");
329 BUG_ON((desc_len % 4) != 0);
330 error = -ENOMEM;
331 goto fail;
334 dd->dd_desc_len = desc_len * nbuf * ndesc;
337 * Need additional DMA memory because we can't use
338 * descriptors that cross the 4K page boundary. Assume
339 * one skipped descriptor per 4K page.
341 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
342 u32 ndesc_skipped =
343 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
344 u32 dma_len;
346 while (ndesc_skipped) {
347 dma_len = ndesc_skipped * desc_len;
348 dd->dd_desc_len += dma_len;
350 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
354 /* allocate descriptors */
355 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
356 &dd->dd_desc_paddr, GFP_KERNEL);
357 if (dd->dd_desc == NULL) {
358 error = -ENOMEM;
359 goto fail;
361 ds = (u8 *) dd->dd_desc;
362 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
363 name, ds, (u32) dd->dd_desc_len,
364 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
366 /* allocate buffers */
367 bsize = sizeof(struct ath_buf) * nbuf;
368 bf = kzalloc(bsize, GFP_KERNEL);
369 if (bf == NULL) {
370 error = -ENOMEM;
371 goto fail2;
373 dd->dd_bufptr = bf;
375 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
376 bf->bf_desc = ds;
377 bf->bf_daddr = DS2PHYS(dd, ds);
379 if (!(sc->sc_ah->caps.hw_caps &
380 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
382 * Skip descriptor addresses which can cause 4KB
383 * boundary crossing (addr + length) with a 32 dword
384 * descriptor fetch.
386 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
387 BUG_ON((caddr_t) bf->bf_desc >=
388 ((caddr_t) dd->dd_desc +
389 dd->dd_desc_len));
391 ds += (desc_len * ndesc);
392 bf->bf_desc = ds;
393 bf->bf_daddr = DS2PHYS(dd, ds);
396 list_add_tail(&bf->list, head);
398 return 0;
399 fail2:
400 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
401 dd->dd_desc_paddr);
402 fail:
403 memset(dd, 0, sizeof(*dd));
404 return error;
407 static int ath9k_init_btcoex(struct ath_softc *sc)
409 struct ath_txq *txq;
410 int r;
412 switch (sc->sc_ah->btcoex_hw.scheme) {
413 case ATH_BTCOEX_CFG_NONE:
414 break;
415 case ATH_BTCOEX_CFG_2WIRE:
416 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
417 break;
418 case ATH_BTCOEX_CFG_3WIRE:
419 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
420 r = ath_init_btcoex_timer(sc);
421 if (r)
422 return -1;
423 txq = sc->tx.txq_map[WME_AC_BE];
424 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
425 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
426 break;
427 default:
428 WARN_ON(1);
429 break;
432 return 0;
435 static int ath9k_init_queues(struct ath_softc *sc)
437 int i = 0;
439 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
440 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
442 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
443 ath_cabq_update(sc);
445 for (i = 0; i < WME_NUM_AC; i++) {
446 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
447 sc->tx.txq_map[i]->mac80211_qnum = i;
449 return 0;
452 static int ath9k_init_channels_rates(struct ath_softc *sc)
454 void *channels;
456 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
457 ARRAY_SIZE(ath9k_5ghz_chantable) !=
458 ATH9K_NUM_CHANNELS);
460 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
461 channels = kmemdup(ath9k_2ghz_chantable,
462 sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
463 if (!channels)
464 return -ENOMEM;
466 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
467 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
468 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
469 ARRAY_SIZE(ath9k_2ghz_chantable);
470 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
471 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
472 ARRAY_SIZE(ath9k_legacy_rates);
475 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
476 channels = kmemdup(ath9k_5ghz_chantable,
477 sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
478 if (!channels) {
479 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
480 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
481 return -ENOMEM;
484 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
485 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
486 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
487 ARRAY_SIZE(ath9k_5ghz_chantable);
488 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
489 ath9k_legacy_rates + 4;
490 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
491 ARRAY_SIZE(ath9k_legacy_rates) - 4;
493 return 0;
496 static void ath9k_init_misc(struct ath_softc *sc)
498 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
499 int i = 0;
500 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
502 sc->config.txpowlimit = ATH_TXPOWER_MAX;
504 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
505 sc->sc_flags |= SC_OP_TXAGGR;
506 sc->sc_flags |= SC_OP_RXAGGR;
509 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
510 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
512 ath9k_hw_set_diversity(sc->sc_ah, true);
513 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
515 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
517 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
519 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
520 sc->beacon.bslot[i] = NULL;
522 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
523 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
526 static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
527 const struct ath_bus_ops *bus_ops)
529 struct ath9k_platform_data *pdata = sc->dev->platform_data;
530 struct ath_hw *ah = NULL;
531 struct ath_common *common;
532 int ret = 0, i;
533 int csz = 0;
535 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
536 if (!ah)
537 return -ENOMEM;
539 ah->hw = sc->hw;
540 ah->hw_version.devid = devid;
541 ah->reg_ops.read = ath9k_ioread32;
542 ah->reg_ops.write = ath9k_iowrite32;
543 ah->reg_ops.rmw = ath9k_reg_rmw;
544 atomic_set(&ah->intr_ref_cnt, -1);
545 sc->sc_ah = ah;
547 if (!pdata) {
548 ah->ah_flags |= AH_USE_EEPROM;
549 sc->sc_ah->led_pin = -1;
550 } else {
551 sc->sc_ah->gpio_mask = pdata->gpio_mask;
552 sc->sc_ah->gpio_val = pdata->gpio_val;
553 sc->sc_ah->led_pin = pdata->led_pin;
554 ah->is_clk_25mhz = pdata->is_clk_25mhz;
555 ah->get_mac_revision = pdata->get_mac_revision;
556 ah->external_reset = pdata->external_reset;
559 common = ath9k_hw_common(ah);
560 common->ops = &ah->reg_ops;
561 common->bus_ops = bus_ops;
562 common->ah = ah;
563 common->hw = sc->hw;
564 common->priv = sc;
565 common->debug_mask = ath9k_debug;
566 common->btcoex_enabled = ath9k_btcoex_enable == 1;
567 common->disable_ani = false;
568 spin_lock_init(&common->cc_lock);
570 spin_lock_init(&sc->sc_serial_rw);
571 spin_lock_init(&sc->sc_pm_lock);
572 mutex_init(&sc->mutex);
573 #ifdef CONFIG_ATH9K_DEBUGFS
574 spin_lock_init(&sc->nodes_lock);
575 spin_lock_init(&sc->debug.samp_lock);
576 INIT_LIST_HEAD(&sc->nodes);
577 #endif
578 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
579 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
580 (unsigned long)sc);
583 * Cache line size is used to size and align various
584 * structures used to communicate with the hardware.
586 ath_read_cachesize(common, &csz);
587 common->cachelsz = csz << 2; /* convert to bytes */
589 /* Initializes the hardware for all supported chipsets */
590 ret = ath9k_hw_init(ah);
591 if (ret)
592 goto err_hw;
594 if (pdata && pdata->macaddr)
595 memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);
597 ret = ath9k_init_queues(sc);
598 if (ret)
599 goto err_queues;
601 ret = ath9k_init_btcoex(sc);
602 if (ret)
603 goto err_btcoex;
605 ret = ath9k_init_channels_rates(sc);
606 if (ret)
607 goto err_btcoex;
609 ath9k_cmn_init_crypto(sc->sc_ah);
610 ath9k_init_misc(sc);
612 return 0;
614 err_btcoex:
615 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
616 if (ATH_TXQ_SETUP(sc, i))
617 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
618 err_queues:
619 ath9k_hw_deinit(ah);
620 err_hw:
622 kfree(ah);
623 sc->sc_ah = NULL;
625 return ret;
628 static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
630 struct ieee80211_supported_band *sband;
631 struct ieee80211_channel *chan;
632 struct ath_hw *ah = sc->sc_ah;
633 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
634 int i;
636 sband = &sc->sbands[band];
637 for (i = 0; i < sband->n_channels; i++) {
638 chan = &sband->channels[i];
639 ah->curchan = &ah->channels[chan->hw_value];
640 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
641 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
642 chan->max_power = reg->max_power_level / 2;
646 static void ath9k_init_txpower_limits(struct ath_softc *sc)
648 struct ath_hw *ah = sc->sc_ah;
649 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
650 struct ath9k_channel *curchan = ah->curchan;
652 ah->txchainmask = common->tx_chainmask;
653 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
654 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
655 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
656 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
658 ah->curchan = curchan;
661 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
663 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
665 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
666 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
667 IEEE80211_HW_SIGNAL_DBM |
668 IEEE80211_HW_SUPPORTS_PS |
669 IEEE80211_HW_PS_NULLFUNC_STACK |
670 IEEE80211_HW_SPECTRUM_MGMT |
671 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
673 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
674 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
676 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
677 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
679 hw->wiphy->interface_modes =
680 BIT(NL80211_IFTYPE_P2P_GO) |
681 BIT(NL80211_IFTYPE_P2P_CLIENT) |
682 BIT(NL80211_IFTYPE_AP) |
683 BIT(NL80211_IFTYPE_WDS) |
684 BIT(NL80211_IFTYPE_STATION) |
685 BIT(NL80211_IFTYPE_ADHOC) |
686 BIT(NL80211_IFTYPE_MESH_POINT);
688 if (AR_SREV_5416(sc->sc_ah))
689 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
691 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
693 hw->queues = 4;
694 hw->max_rates = 4;
695 hw->channel_change_time = 5000;
696 hw->max_listen_interval = 10;
697 hw->max_rate_tries = 10;
698 hw->sta_data_size = sizeof(struct ath_node);
699 hw->vif_data_size = sizeof(struct ath_vif);
701 #ifdef CONFIG_ATH9K_RATE_CONTROL
702 hw->rate_control_algorithm = "ath9k_rate_control";
703 #endif
705 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
706 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
707 &sc->sbands[IEEE80211_BAND_2GHZ];
708 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
709 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
710 &sc->sbands[IEEE80211_BAND_5GHZ];
712 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
713 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
714 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
715 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
716 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
719 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
722 int ath9k_init_device(u16 devid, struct ath_softc *sc,
723 const struct ath_bus_ops *bus_ops)
725 struct ieee80211_hw *hw = sc->hw;
726 struct ath_common *common;
727 struct ath_hw *ah;
728 int error = 0;
729 struct ath_regulatory *reg;
731 /* Bring up device */
732 error = ath9k_init_softc(devid, sc, bus_ops);
733 if (error != 0)
734 goto error_init;
736 ah = sc->sc_ah;
737 common = ath9k_hw_common(ah);
738 ath9k_set_hw_capab(sc, hw);
740 /* Initialize regulatory */
741 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
742 ath9k_reg_notifier);
743 if (error)
744 goto error_regd;
746 reg = &common->regulatory;
748 /* Setup TX DMA */
749 error = ath_tx_init(sc, ATH_TXBUF);
750 if (error != 0)
751 goto error_tx;
753 /* Setup RX DMA */
754 error = ath_rx_init(sc, ATH_RXBUF);
755 if (error != 0)
756 goto error_rx;
758 ath9k_init_txpower_limits(sc);
760 #ifdef CONFIG_MAC80211_LEDS
761 /* must be initialized before ieee80211_register_hw */
762 sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
763 IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
764 ARRAY_SIZE(ath9k_tpt_blink));
765 #endif
767 /* Register with mac80211 */
768 error = ieee80211_register_hw(hw);
769 if (error)
770 goto error_register;
772 error = ath9k_init_debug(ah);
773 if (error) {
774 ath_err(common, "Unable to create debugfs files\n");
775 goto error_world;
778 /* Handle world regulatory */
779 if (!ath_is_world_regd(reg)) {
780 error = regulatory_hint(hw->wiphy, reg->alpha2);
781 if (error)
782 goto error_world;
785 INIT_WORK(&sc->hw_check_work, ath_hw_check);
786 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
787 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
788 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
790 ath_init_leds(sc);
791 ath_start_rfkill_poll(sc);
793 return 0;
795 error_world:
796 ieee80211_unregister_hw(hw);
797 error_register:
798 ath_rx_cleanup(sc);
799 error_rx:
800 ath_tx_cleanup(sc);
801 error_tx:
802 /* Nothing */
803 error_regd:
804 ath9k_deinit_softc(sc);
805 error_init:
806 return error;
809 /*****************************/
810 /* De-Initialization */
811 /*****************************/
813 static void ath9k_deinit_softc(struct ath_softc *sc)
815 int i = 0;
817 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
818 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
820 if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
821 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
823 if ((sc->btcoex.no_stomp_timer) &&
824 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
825 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
827 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
828 if (ATH_TXQ_SETUP(sc, i))
829 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
831 ath9k_hw_deinit(sc->sc_ah);
833 kfree(sc->sc_ah);
834 sc->sc_ah = NULL;
837 void ath9k_deinit_device(struct ath_softc *sc)
839 struct ieee80211_hw *hw = sc->hw;
841 ath9k_ps_wakeup(sc);
843 wiphy_rfkill_stop_polling(sc->hw->wiphy);
844 ath_deinit_leds(sc);
846 ath9k_ps_restore(sc);
848 ieee80211_unregister_hw(hw);
849 ath_rx_cleanup(sc);
850 ath_tx_cleanup(sc);
851 ath9k_deinit_softc(sc);
854 void ath_descdma_cleanup(struct ath_softc *sc,
855 struct ath_descdma *dd,
856 struct list_head *head)
858 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
859 dd->dd_desc_paddr);
861 INIT_LIST_HEAD(head);
862 kfree(dd->dd_bufptr);
863 memset(dd, 0, sizeof(*dd));
866 /************************/
867 /* Module Hooks */
868 /************************/
870 static int __init ath9k_init(void)
872 int error;
874 /* Register rate control algorithm */
875 error = ath_rate_control_register();
876 if (error != 0) {
877 printk(KERN_ERR
878 "ath9k: Unable to register rate control "
879 "algorithm: %d\n",
880 error);
881 goto err_out;
884 error = ath_pci_init();
885 if (error < 0) {
886 printk(KERN_ERR
887 "ath9k: No PCI devices found, driver not installed.\n");
888 error = -ENODEV;
889 goto err_rate_unregister;
892 error = ath_ahb_init();
893 if (error < 0) {
894 error = -ENODEV;
895 goto err_pci_exit;
898 return 0;
900 err_pci_exit:
901 ath_pci_exit();
903 err_rate_unregister:
904 ath_rate_control_unregister();
905 err_out:
906 return error;
908 module_init(ath9k_init);
910 static void __exit ath9k_exit(void)
912 is_ath9k_unloaded = true;
913 ath_ahb_exit();
914 ath_pci_exit();
915 ath_rate_control_unregister();
916 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
918 module_exit(ath9k_exit);