GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / net / wireless / iwlwifi / iwl-core.c
blob200d5b7be8c7adcd2d83b7ab462b67bfc304cd09
1 /******************************************************************************
3 * GPL LICENSE SUMMARY
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/etherdevice.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <net/mac80211.h>
36 #include "iwl-eeprom.h"
37 #include "iwl-dev.h"
38 #include "iwl-debug.h"
39 #include "iwl-core.h"
40 #include "iwl-io.h"
41 #include "iwl-power.h"
42 #include "iwl-sta.h"
43 #include "iwl-helpers.h"
46 MODULE_DESCRIPTION("iwl core");
47 MODULE_VERSION(IWLWIFI_VERSION);
48 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
49 MODULE_LICENSE("GPL");
52 * set bt_coex_active to true, uCode will do kill/defer
53 * every time the priority line is asserted (BT is sending signals on the
54 * priority line in the PCIx).
55 * set bt_coex_active to false, uCode will ignore the BT activity and
56 * perform the normal operation
58 * User might experience transmit issue on some platform due to WiFi/BT
59 * co-exist problem. The possible behaviors are:
60 * Able to scan and finding all the available AP
61 * Not able to associate with any AP
62 * On those platforms, WiFi communication can be restored by set
63 * "bt_coex_active" module parameter to "false"
65 * default: bt_coex_active = true (BT_COEX_ENABLE)
67 static bool bt_coex_active = true;
68 module_param(bt_coex_active, bool, S_IRUGO);
69 MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
71 #define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
72 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
73 IWL_RATE_SISO_##s##M_PLCP, \
74 IWL_RATE_MIMO2_##s##M_PLCP,\
75 IWL_RATE_MIMO3_##s##M_PLCP,\
76 IWL_RATE_##r##M_IEEE, \
77 IWL_RATE_##ip##M_INDEX, \
78 IWL_RATE_##in##M_INDEX, \
79 IWL_RATE_##rp##M_INDEX, \
80 IWL_RATE_##rn##M_INDEX, \
81 IWL_RATE_##pp##M_INDEX, \
82 IWL_RATE_##np##M_INDEX }
84 u32 iwl_debug_level;
85 EXPORT_SYMBOL(iwl_debug_level);
88 * Parameter order:
89 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
91 * If there isn't a valid next or previous rate then INV is used which
92 * maps to IWL_RATE_INVALID
95 const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
96 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
97 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
98 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
99 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
100 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
101 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
102 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
103 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
104 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
105 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
106 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
107 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
108 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
110 EXPORT_SYMBOL(iwl_rates);
112 int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
114 int idx = 0;
116 /* HT rate format */
117 if (rate_n_flags & RATE_MCS_HT_MSK) {
118 idx = (rate_n_flags & 0xff);
120 if (idx >= IWL_RATE_MIMO3_6M_PLCP)
121 idx = idx - IWL_RATE_MIMO3_6M_PLCP;
122 else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
123 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
125 idx += IWL_FIRST_OFDM_RATE;
126 /* skip 9M not supported in ht*/
127 if (idx >= IWL_RATE_9M_INDEX)
128 idx += 1;
129 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
130 return idx;
132 /* legacy rate format, search for match in table */
133 } else {
134 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
135 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
136 return idx;
139 return -1;
141 EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx);
143 u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
145 int i;
146 u8 ind = ant;
148 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
149 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
150 if (valid & BIT(ind))
151 return ind;
153 return ant;
155 EXPORT_SYMBOL(iwl_toggle_tx_ant);
157 const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
158 EXPORT_SYMBOL(iwl_bcast_addr);
161 /* This function both allocates and initializes hw and priv. */
162 struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
163 struct ieee80211_ops *hw_ops)
165 struct iwl_priv *priv;
167 /* mac80211 allocates memory for this device instance, including
168 * space for this driver's private structure */
169 struct ieee80211_hw *hw =
170 ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops);
171 if (hw == NULL) {
172 pr_err("%s: Can not allocate network device\n",
173 cfg->name);
174 goto out;
177 priv = hw->priv;
178 priv->hw = hw;
180 out:
181 return hw;
183 EXPORT_SYMBOL(iwl_alloc_all);
185 void iwl_hw_detect(struct iwl_priv *priv)
187 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
188 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
189 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
191 EXPORT_SYMBOL(iwl_hw_detect);
194 * QoS support
196 static void iwl_update_qos(struct iwl_priv *priv)
198 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
199 return;
201 priv->qos_data.def_qos_parm.qos_flags = 0;
203 if (priv->qos_data.qos_active)
204 priv->qos_data.def_qos_parm.qos_flags |=
205 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
207 if (priv->current_ht_config.is_ht)
208 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
210 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
211 priv->qos_data.qos_active,
212 priv->qos_data.def_qos_parm.qos_flags);
214 iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
215 sizeof(struct iwl_qosparam_cmd),
216 &priv->qos_data.def_qos_parm, NULL);
219 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
220 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
221 static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
222 struct ieee80211_sta_ht_cap *ht_info,
223 enum ieee80211_band band)
225 u16 max_bit_rate = 0;
226 u8 rx_chains_num = priv->hw_params.rx_chains_num;
227 u8 tx_chains_num = priv->hw_params.tx_chains_num;
229 ht_info->cap = 0;
230 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
232 ht_info->ht_supported = true;
234 if (priv->cfg->ht_greenfield_support)
235 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
236 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
237 max_bit_rate = MAX_BIT_RATE_20_MHZ;
238 if (priv->hw_params.ht40_channel & BIT(band)) {
239 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
240 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
241 ht_info->mcs.rx_mask[4] = 0x01;
242 max_bit_rate = MAX_BIT_RATE_40_MHZ;
245 if (priv->cfg->mod_params->amsdu_size_8K)
246 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
248 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
249 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
251 ht_info->mcs.rx_mask[0] = 0xFF;
252 if (rx_chains_num >= 2)
253 ht_info->mcs.rx_mask[1] = 0xFF;
254 if (rx_chains_num >= 3)
255 ht_info->mcs.rx_mask[2] = 0xFF;
257 /* Highest supported Rx data rate */
258 max_bit_rate *= rx_chains_num;
259 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
260 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
262 /* Tx MCS capabilities */
263 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
264 if (tx_chains_num != rx_chains_num) {
265 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
266 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
267 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
272 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
274 int iwlcore_init_geos(struct iwl_priv *priv)
276 struct iwl_channel_info *ch;
277 struct ieee80211_supported_band *sband;
278 struct ieee80211_channel *channels;
279 struct ieee80211_channel *geo_ch;
280 struct ieee80211_rate *rates;
281 int i = 0;
283 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
284 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
285 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
286 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
287 return 0;
290 channels = kzalloc(sizeof(struct ieee80211_channel) *
291 priv->channel_count, GFP_KERNEL);
292 if (!channels)
293 return -ENOMEM;
295 rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
296 GFP_KERNEL);
297 if (!rates) {
298 kfree(channels);
299 return -ENOMEM;
302 /* 5.2GHz channels start after the 2.4GHz channels */
303 sband = &priv->bands[IEEE80211_BAND_5GHZ];
304 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
305 /* just OFDM */
306 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
307 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
309 if (priv->cfg->sku & IWL_SKU_N)
310 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
311 IEEE80211_BAND_5GHZ);
313 sband = &priv->bands[IEEE80211_BAND_2GHZ];
314 sband->channels = channels;
315 /* OFDM & CCK */
316 sband->bitrates = rates;
317 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
319 if (priv->cfg->sku & IWL_SKU_N)
320 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
321 IEEE80211_BAND_2GHZ);
323 priv->ieee_channels = channels;
324 priv->ieee_rates = rates;
326 for (i = 0; i < priv->channel_count; i++) {
327 ch = &priv->channel_info[i];
329 if (!is_channel_valid(ch))
330 continue;
332 if (is_channel_a_band(ch))
333 sband = &priv->bands[IEEE80211_BAND_5GHZ];
334 else
335 sband = &priv->bands[IEEE80211_BAND_2GHZ];
337 geo_ch = &sband->channels[sband->n_channels++];
339 geo_ch->center_freq =
340 ieee80211_channel_to_frequency(ch->channel);
341 geo_ch->max_power = ch->max_power_avg;
342 geo_ch->max_antenna_gain = 0xff;
343 geo_ch->hw_value = ch->channel;
345 if (is_channel_valid(ch)) {
346 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
347 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
349 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
350 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
352 if (ch->flags & EEPROM_CHANNEL_RADAR)
353 geo_ch->flags |= IEEE80211_CHAN_RADAR;
355 geo_ch->flags |= ch->ht40_extension_channel;
357 if (ch->max_power_avg > priv->tx_power_device_lmt)
358 priv->tx_power_device_lmt = ch->max_power_avg;
359 } else {
360 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
363 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
364 ch->channel, geo_ch->center_freq,
365 is_channel_a_band(ch) ? "5.2" : "2.4",
366 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
367 "restricted" : "valid",
368 geo_ch->flags);
371 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
372 priv->cfg->sku & IWL_SKU_A) {
373 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
374 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
375 priv->pci_dev->device,
376 priv->pci_dev->subsystem_device);
377 priv->cfg->sku &= ~IWL_SKU_A;
380 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
381 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
382 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
384 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
386 return 0;
388 EXPORT_SYMBOL(iwlcore_init_geos);
391 * iwlcore_free_geos - undo allocations in iwlcore_init_geos
393 void iwlcore_free_geos(struct iwl_priv *priv)
395 kfree(priv->ieee_channels);
396 kfree(priv->ieee_rates);
397 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
399 EXPORT_SYMBOL(iwlcore_free_geos);
402 * iwlcore_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
403 * function.
405 void iwlcore_tx_cmd_protection(struct iwl_priv *priv,
406 struct ieee80211_tx_info *info,
407 __le16 fc, __le32 *tx_flags)
409 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
410 *tx_flags |= TX_CMD_FLG_RTS_MSK;
411 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
412 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
414 if (!ieee80211_is_mgmt(fc))
415 return;
417 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
418 case cpu_to_le16(IEEE80211_STYPE_AUTH):
419 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
420 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
421 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
422 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
423 *tx_flags |= TX_CMD_FLG_CTS_MSK;
424 break;
426 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
427 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
428 *tx_flags |= TX_CMD_FLG_CTS_MSK;
429 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
432 EXPORT_SYMBOL(iwlcore_tx_cmd_protection);
435 static bool is_single_rx_stream(struct iwl_priv *priv)
437 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
438 priv->current_ht_config.single_chain_sufficient;
441 static u8 iwl_is_channel_extension(struct iwl_priv *priv,
442 enum ieee80211_band band,
443 u16 channel, u8 extension_chan_offset)
445 const struct iwl_channel_info *ch_info;
447 ch_info = iwl_get_channel_info(priv, band, channel);
448 if (!is_channel_valid(ch_info))
449 return 0;
451 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
452 return !(ch_info->ht40_extension_channel &
453 IEEE80211_CHAN_NO_HT40PLUS);
454 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
455 return !(ch_info->ht40_extension_channel &
456 IEEE80211_CHAN_NO_HT40MINUS);
458 return 0;
461 u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
462 struct ieee80211_sta_ht_cap *sta_ht_inf)
464 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
466 if (!ht_conf->is_ht || !ht_conf->is_40mhz)
467 return 0;
469 /* We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
470 * the bit will not set if it is pure 40MHz case
472 if (sta_ht_inf) {
473 if (!sta_ht_inf->ht_supported)
474 return 0;
476 #ifdef CONFIG_IWLWIFI_DEBUGFS
477 if (priv->disable_ht40)
478 return 0;
479 #endif
480 return iwl_is_channel_extension(priv, priv->band,
481 le16_to_cpu(priv->staging_rxon.channel),
482 ht_conf->extension_chan_offset);
484 EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
486 static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
488 u16 new_val = 0;
489 u16 beacon_factor = 0;
491 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
492 new_val = beacon_val / beacon_factor;
494 if (!new_val)
495 new_val = max_beacon_val;
497 return new_val;
500 void iwl_setup_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif)
502 u64 tsf;
503 s32 interval_tm, rem;
504 unsigned long flags;
505 struct ieee80211_conf *conf = NULL;
506 u16 beacon_int;
508 conf = ieee80211_get_hw_conf(priv->hw);
510 spin_lock_irqsave(&priv->lock, flags);
511 priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
512 priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval);
514 beacon_int = vif->bss_conf.beacon_int;
516 if (vif->type == NL80211_IFTYPE_ADHOC) {
517 /* TODO: we need to get atim_window from upper stack
518 * for now we set to 0 */
519 priv->rxon_timing.atim_window = 0;
520 } else {
521 priv->rxon_timing.atim_window = 0;
524 beacon_int = iwl_adjust_beacon_interval(beacon_int,
525 priv->hw_params.max_beacon_itrvl * TIME_UNIT);
526 priv->rxon_timing.beacon_interval = cpu_to_le16(beacon_int);
528 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
529 interval_tm = beacon_int * TIME_UNIT;
530 rem = do_div(tsf, interval_tm);
531 priv->rxon_timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
533 spin_unlock_irqrestore(&priv->lock, flags);
534 IWL_DEBUG_ASSOC(priv,
535 "beacon interval %d beacon timer %d beacon tim %d\n",
536 le16_to_cpu(priv->rxon_timing.beacon_interval),
537 le32_to_cpu(priv->rxon_timing.beacon_init_val),
538 le16_to_cpu(priv->rxon_timing.atim_window));
540 EXPORT_SYMBOL(iwl_setup_rxon_timing);
542 void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
544 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
546 if (hw_decrypt)
547 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
548 else
549 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
552 EXPORT_SYMBOL(iwl_set_rxon_hwcrypto);
555 * iwl_check_rxon_cmd - validate RXON structure is valid
557 * NOTE: This is really only useful during development and can eventually
558 * be #ifdef'd out once the driver is stable and folks aren't actively
559 * making changes
561 int iwl_check_rxon_cmd(struct iwl_priv *priv)
563 int error = 0;
564 int counter = 1;
565 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
567 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
568 error |= le32_to_cpu(rxon->flags &
569 (RXON_FLG_TGJ_NARROW_BAND_MSK |
570 RXON_FLG_RADAR_DETECT_MSK));
571 if (error)
572 IWL_WARN(priv, "check 24G fields %d | %d\n",
573 counter++, error);
574 } else {
575 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
576 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
577 if (error)
578 IWL_WARN(priv, "check 52 fields %d | %d\n",
579 counter++, error);
580 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
581 if (error)
582 IWL_WARN(priv, "check 52 CCK %d | %d\n",
583 counter++, error);
585 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
586 if (error)
587 IWL_WARN(priv, "check mac addr %d | %d\n", counter++, error);
589 /* make sure basic rates 6Mbps and 1Mbps are supported */
590 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
591 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
592 if (error)
593 IWL_WARN(priv, "check basic rate %d | %d\n", counter++, error);
595 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
596 if (error)
597 IWL_WARN(priv, "check assoc id %d | %d\n", counter++, error);
599 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
600 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
601 if (error)
602 IWL_WARN(priv, "check CCK and short slot %d | %d\n",
603 counter++, error);
605 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
606 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
607 if (error)
608 IWL_WARN(priv, "check CCK & auto detect %d | %d\n",
609 counter++, error);
611 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
612 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
613 if (error)
614 IWL_WARN(priv, "check TGG and auto detect %d | %d\n",
615 counter++, error);
617 if (error)
618 IWL_WARN(priv, "Tuning to channel %d\n",
619 le16_to_cpu(rxon->channel));
621 if (error) {
622 IWL_ERR(priv, "Not a valid iwl_rxon_assoc_cmd field values\n");
623 return -1;
625 return 0;
627 EXPORT_SYMBOL(iwl_check_rxon_cmd);
630 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
631 * @priv: staging_rxon is compared to active_rxon
633 * If the RXON structure is changing enough to require a new tune,
634 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
635 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
637 int iwl_full_rxon_required(struct iwl_priv *priv)
640 /* These items are only settable from the full RXON command */
641 if (!(iwl_is_associated(priv)) ||
642 compare_ether_addr(priv->staging_rxon.bssid_addr,
643 priv->active_rxon.bssid_addr) ||
644 compare_ether_addr(priv->staging_rxon.node_addr,
645 priv->active_rxon.node_addr) ||
646 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
647 priv->active_rxon.wlap_bssid_addr) ||
648 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
649 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
650 (priv->staging_rxon.air_propagation !=
651 priv->active_rxon.air_propagation) ||
652 (priv->staging_rxon.ofdm_ht_single_stream_basic_rates !=
653 priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
654 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
655 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
656 (priv->staging_rxon.ofdm_ht_triple_stream_basic_rates !=
657 priv->active_rxon.ofdm_ht_triple_stream_basic_rates) ||
658 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
659 return 1;
661 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
662 * be updated with the RXON_ASSOC command -- however only some
663 * flag transitions are allowed using RXON_ASSOC */
665 /* Check if we are not switching bands */
666 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
667 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
668 return 1;
670 /* Check if we are switching association toggle */
671 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
672 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
673 return 1;
675 return 0;
677 EXPORT_SYMBOL(iwl_full_rxon_required);
679 u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv)
682 * Assign the lowest rate -- should really get this from
683 * the beacon skb from mac80211.
685 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
686 return IWL_RATE_1M_PLCP;
687 else
688 return IWL_RATE_6M_PLCP;
690 EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
692 void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
694 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
696 if (!ht_conf->is_ht) {
697 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
698 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
699 RXON_FLG_HT40_PROT_MSK |
700 RXON_FLG_HT_PROT_MSK);
701 return;
704 rxon->flags |= cpu_to_le32(ht_conf->ht_protection << RXON_FLG_HT_OPERATING_MODE_POS);
706 /* Set up channel bandwidth:
707 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
708 /* clear the HT channel mode before set the mode */
709 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
710 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
711 if (iwl_is_ht40_tx_allowed(priv, NULL)) {
712 /* pure ht40 */
713 if (ht_conf->ht_protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
714 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
715 /* Note: control channel is opposite of extension channel */
716 switch (ht_conf->extension_chan_offset) {
717 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
718 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
719 break;
720 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
721 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
722 break;
724 } else {
725 /* Note: control channel is opposite of extension channel */
726 switch (ht_conf->extension_chan_offset) {
727 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
728 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
729 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
730 break;
731 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
732 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
733 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
734 break;
735 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
736 default:
737 /* channel location only valid if in Mixed mode */
738 IWL_ERR(priv, "invalid extension channel offset\n");
739 break;
742 } else {
743 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
746 if (priv->cfg->ops->hcmd->set_rxon_chain)
747 priv->cfg->ops->hcmd->set_rxon_chain(priv);
749 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
750 "extension channel offset 0x%x\n",
751 le32_to_cpu(rxon->flags), ht_conf->ht_protection,
752 ht_conf->extension_chan_offset);
754 EXPORT_SYMBOL(iwl_set_rxon_ht);
756 #define IWL_NUM_RX_CHAINS_MULTIPLE 3
757 #define IWL_NUM_RX_CHAINS_SINGLE 2
758 #define IWL_NUM_IDLE_CHAINS_DUAL 2
759 #define IWL_NUM_IDLE_CHAINS_SINGLE 1
762 * Determine how many receiver/antenna chains to use.
764 * More provides better reception via diversity. Fewer saves power
765 * at the expense of throughput, but only when not in powersave to
766 * start with.
768 * MIMO (dual stream) requires at least 2, but works better with 3.
769 * This does not determine *which* chains to use, just how many.
771 static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
773 /* # of Rx chains to use when expecting MIMO. */
774 if (is_single_rx_stream(priv))
775 return IWL_NUM_RX_CHAINS_SINGLE;
776 else
777 return IWL_NUM_RX_CHAINS_MULTIPLE;
781 * When we are in power saving mode, unless device support spatial
782 * multiplexing power save, use the active count for rx chain count.
784 static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
786 /* # Rx chains when idling, depending on SMPS mode */
787 switch (priv->current_ht_config.smps) {
788 case IEEE80211_SMPS_STATIC:
789 case IEEE80211_SMPS_DYNAMIC:
790 return IWL_NUM_IDLE_CHAINS_SINGLE;
791 case IEEE80211_SMPS_OFF:
792 return active_cnt;
793 default:
794 WARN(1, "invalid SMPS mode %d",
795 priv->current_ht_config.smps);
796 return active_cnt;
800 /* up to 4 chains */
801 static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
803 u8 res;
804 res = (chain_bitmap & BIT(0)) >> 0;
805 res += (chain_bitmap & BIT(1)) >> 1;
806 res += (chain_bitmap & BIT(2)) >> 2;
807 res += (chain_bitmap & BIT(3)) >> 3;
808 return res;
812 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
814 * Selects how many and which Rx receivers/antennas/chains to use.
815 * This should not be used for scan command ... it puts data in wrong place.
817 void iwl_set_rxon_chain(struct iwl_priv *priv)
819 bool is_single = is_single_rx_stream(priv);
820 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
821 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
822 u32 active_chains;
823 u16 rx_chain;
825 /* Tell uCode which antennas are actually connected.
826 * Before first association, we assume all antennas are connected.
827 * Just after first association, iwl_chain_noise_calibration()
828 * checks which antennas actually *are* connected. */
829 if (priv->chain_noise_data.active_chains)
830 active_chains = priv->chain_noise_data.active_chains;
831 else
832 active_chains = priv->hw_params.valid_rx_ant;
834 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
836 /* How many receivers should we use? */
837 active_rx_cnt = iwl_get_active_rx_chain_count(priv);
838 idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
841 /* correct rx chain count according hw settings
842 * and chain noise calibration
844 valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
845 if (valid_rx_cnt < active_rx_cnt)
846 active_rx_cnt = valid_rx_cnt;
848 if (valid_rx_cnt < idle_rx_cnt)
849 idle_rx_cnt = valid_rx_cnt;
851 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
852 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
854 priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain);
856 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
857 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
858 else
859 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
861 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
862 priv->staging_rxon.rx_chain,
863 active_rx_cnt, idle_rx_cnt);
865 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
866 active_rx_cnt < idle_rx_cnt);
868 EXPORT_SYMBOL(iwl_set_rxon_chain);
870 /* Return valid channel */
871 u8 iwl_get_single_channel_number(struct iwl_priv *priv,
872 enum ieee80211_band band)
874 const struct iwl_channel_info *ch_info;
875 int i;
876 u8 channel = 0;
878 /* only scan single channel, good enough to reset the RF */
879 /* pick the first valid not in-use channel */
880 if (band == IEEE80211_BAND_5GHZ) {
881 for (i = 14; i < priv->channel_count; i++) {
882 if (priv->channel_info[i].channel !=
883 le16_to_cpu(priv->staging_rxon.channel)) {
884 channel = priv->channel_info[i].channel;
885 ch_info = iwl_get_channel_info(priv,
886 band, channel);
887 if (is_channel_valid(ch_info))
888 break;
891 } else {
892 for (i = 0; i < 14; i++) {
893 if (priv->channel_info[i].channel !=
894 le16_to_cpu(priv->staging_rxon.channel)) {
895 channel =
896 priv->channel_info[i].channel;
897 ch_info = iwl_get_channel_info(priv,
898 band, channel);
899 if (is_channel_valid(ch_info))
900 break;
905 return channel;
907 EXPORT_SYMBOL(iwl_get_single_channel_number);
910 * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON
911 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
912 * @channel: Any channel valid for the requested phymode
914 * In addition to setting the staging RXON, priv->phymode is also set.
916 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
917 * in the staging RXON flag structure based on the phymode
919 int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
921 enum ieee80211_band band = ch->band;
922 u16 channel = ieee80211_frequency_to_channel(ch->center_freq);
924 if (!iwl_get_channel_info(priv, band, channel)) {
925 IWL_DEBUG_INFO(priv, "Could not set channel to %d [%d]\n",
926 channel, band);
927 return -EINVAL;
930 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
931 (priv->band == band))
932 return 0;
934 priv->staging_rxon.channel = cpu_to_le16(channel);
935 if (band == IEEE80211_BAND_5GHZ)
936 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
937 else
938 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
940 priv->band = band;
942 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
944 return 0;
946 EXPORT_SYMBOL(iwl_set_rxon_channel);
948 void iwl_set_flags_for_band(struct iwl_priv *priv,
949 enum ieee80211_band band,
950 struct ieee80211_vif *vif)
952 if (band == IEEE80211_BAND_5GHZ) {
953 priv->staging_rxon.flags &=
954 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
955 | RXON_FLG_CCK_MSK);
956 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
957 } else {
958 /* Copied from iwl_post_associate() */
959 if (vif && vif->bss_conf.use_short_slot)
960 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
961 else
962 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
964 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
965 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
966 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
969 EXPORT_SYMBOL(iwl_set_flags_for_band);
972 * initialize rxon structure with default values from eeprom
974 void iwl_connection_init_rx_config(struct iwl_priv *priv,
975 struct ieee80211_vif *vif)
977 const struct iwl_channel_info *ch_info;
978 enum nl80211_iftype type = NL80211_IFTYPE_STATION;
980 if (vif)
981 type = vif->type;
983 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
985 switch (type) {
986 case NL80211_IFTYPE_AP:
987 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
988 break;
990 case NL80211_IFTYPE_STATION:
991 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
992 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
993 break;
995 case NL80211_IFTYPE_ADHOC:
996 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
997 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
998 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
999 RXON_FILTER_ACCEPT_GRP_MSK;
1000 break;
1002 default:
1003 IWL_ERR(priv, "Unsupported interface type %d\n", type);
1004 break;
1008 ch_info = iwl_get_channel_info(priv, priv->band,
1009 le16_to_cpu(priv->active_rxon.channel));
1011 if (!ch_info)
1012 ch_info = &priv->channel_info[0];
1014 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
1015 priv->band = ch_info->band;
1017 iwl_set_flags_for_band(priv, priv->band, vif);
1019 priv->staging_rxon.ofdm_basic_rates =
1020 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1021 priv->staging_rxon.cck_basic_rates =
1022 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1024 /* clear both MIX and PURE40 mode flag */
1025 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
1026 RXON_FLG_CHANNEL_MODE_PURE_40);
1028 if (vif)
1029 memcpy(priv->staging_rxon.node_addr, vif->addr, ETH_ALEN);
1031 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
1032 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
1033 priv->staging_rxon.ofdm_ht_triple_stream_basic_rates = 0xff;
1035 EXPORT_SYMBOL(iwl_connection_init_rx_config);
1037 void iwl_set_rate(struct iwl_priv *priv)
1039 const struct ieee80211_supported_band *hw = NULL;
1040 struct ieee80211_rate *rate;
1041 int i;
1043 hw = iwl_get_hw_mode(priv, priv->band);
1044 if (!hw) {
1045 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
1046 return;
1049 priv->active_rate = 0;
1051 for (i = 0; i < hw->n_bitrates; i++) {
1052 rate = &(hw->bitrates[i]);
1053 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
1054 priv->active_rate |= (1 << rate->hw_value);
1057 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
1059 priv->staging_rxon.cck_basic_rates =
1060 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1062 priv->staging_rxon.ofdm_basic_rates =
1063 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1065 EXPORT_SYMBOL(iwl_set_rate);
1067 void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
1069 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1070 return;
1072 if (priv->switch_rxon.switch_in_progress) {
1073 ieee80211_chswitch_done(priv->vif, is_success);
1074 mutex_lock(&priv->mutex);
1075 priv->switch_rxon.switch_in_progress = false;
1076 mutex_unlock(&priv->mutex);
1079 EXPORT_SYMBOL(iwl_chswitch_done);
1081 void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1083 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1084 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
1085 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
1087 if (priv->switch_rxon.switch_in_progress) {
1088 if (!le32_to_cpu(csa->status) &&
1089 (csa->channel == priv->switch_rxon.channel)) {
1090 rxon->channel = csa->channel;
1091 priv->staging_rxon.channel = csa->channel;
1092 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
1093 le16_to_cpu(csa->channel));
1094 iwl_chswitch_done(priv, true);
1095 } else {
1096 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
1097 le16_to_cpu(csa->channel));
1098 iwl_chswitch_done(priv, false);
1102 EXPORT_SYMBOL(iwl_rx_csa);
1104 #ifdef CONFIG_IWLWIFI_DEBUG
1105 void iwl_print_rx_config_cmd(struct iwl_priv *priv)
1107 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
1109 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
1110 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
1111 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
1112 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
1113 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
1114 le32_to_cpu(rxon->filter_flags));
1115 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
1116 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
1117 rxon->ofdm_basic_rates);
1118 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
1119 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
1120 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
1121 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
1123 EXPORT_SYMBOL(iwl_print_rx_config_cmd);
1124 #endif
1126 * iwl_irq_handle_error - called for HW or SW error interrupt from card
1128 void iwl_irq_handle_error(struct iwl_priv *priv)
1130 /* Set the FW error flag -- cleared on iwl_down */
1131 set_bit(STATUS_FW_ERROR, &priv->status);
1133 /* Cancel currently queued command. */
1134 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1136 IWL_ERR(priv, "Loaded firmware version: %s\n",
1137 priv->hw->wiphy->fw_version);
1139 priv->cfg->ops->lib->dump_nic_error_log(priv);
1140 if (priv->cfg->ops->lib->dump_csr)
1141 priv->cfg->ops->lib->dump_csr(priv);
1142 if (priv->cfg->ops->lib->dump_fh)
1143 priv->cfg->ops->lib->dump_fh(priv, NULL, false);
1144 priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
1145 #ifdef CONFIG_IWLWIFI_DEBUG
1146 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
1147 iwl_print_rx_config_cmd(priv);
1148 #endif
1150 wake_up_interruptible(&priv->wait_command_queue);
1152 /* Keep the restart process from trying to send host
1153 * commands by clearing the INIT status bit */
1154 clear_bit(STATUS_READY, &priv->status);
1156 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1157 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
1158 "Restarting adapter due to uCode error.\n");
1160 if (priv->cfg->mod_params->restart_fw)
1161 queue_work(priv->workqueue, &priv->restart);
1164 EXPORT_SYMBOL(iwl_irq_handle_error);
1166 static int iwl_apm_stop_master(struct iwl_priv *priv)
1168 int ret = 0;
1170 /* stop device's busmaster DMA activity */
1171 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
1173 ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
1174 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
1175 if (ret)
1176 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
1178 IWL_DEBUG_INFO(priv, "stop master\n");
1180 return ret;
1183 void iwl_apm_stop(struct iwl_priv *priv)
1185 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
1187 /* Stop device's DMA activity */
1188 iwl_apm_stop_master(priv);
1190 /* Reset the entire device */
1191 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1193 udelay(10);
1196 * Clear "initialization complete" bit to move adapter from
1197 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1199 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1201 EXPORT_SYMBOL(iwl_apm_stop);
1205 * Start up NIC's basic functionality after it has been reset
1206 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
1207 * NOTE: This does not load uCode nor start the embedded processor
1209 int iwl_apm_init(struct iwl_priv *priv)
1211 int ret = 0;
1212 u16 lctl;
1214 IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1217 * Use "set_bit" below rather than "write", to preserve any hardware
1218 * bits already set by default after reset.
1221 /* Disable L0S exit timer (platform NMI Work/Around) */
1222 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1223 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1226 * Disable L0s without affecting L1;
1227 * don't wait for ICH L0s (ICH bug W/A)
1229 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1230 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1232 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1233 iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
1236 * Enable HAP INTA (interrupt from management bus) to
1237 * wake device's PCI Express link L1a -> L0s
1238 * NOTE: This is no-op for 3945 (non-existant bit)
1240 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1241 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1244 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1245 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1246 * If so (likely), disable L0S, so device moves directly L0->L1;
1247 * costs negligible amount of power savings.
1248 * If not (unlikely), enable L0S, so there is at least some
1249 * power savings, even without L1.
1251 if (priv->cfg->set_l0s) {
1252 lctl = iwl_pcie_link_ctl(priv);
1253 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1254 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1255 /* L1-ASPM enabled; disable(!) L0S */
1256 iwl_set_bit(priv, CSR_GIO_REG,
1257 CSR_GIO_REG_VAL_L0S_ENABLED);
1258 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1259 } else {
1260 /* L1-ASPM disabled; enable(!) L0S */
1261 iwl_clear_bit(priv, CSR_GIO_REG,
1262 CSR_GIO_REG_VAL_L0S_ENABLED);
1263 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1267 /* Configure analog phase-lock-loop before activating to D0A */
1268 if (priv->cfg->pll_cfg_val)
1269 iwl_set_bit(priv, CSR_ANA_PLL_CFG, priv->cfg->pll_cfg_val);
1272 * Set "initialization complete" bit to move adapter from
1273 * D0U* --> D0A* (powered-up active) state.
1275 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1278 * Wait for clock stabilization; once stabilized, access to
1279 * device-internal resources is supported, e.g. iwl_write_prph()
1280 * and accesses to uCode SRAM.
1282 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1283 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1284 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1285 if (ret < 0) {
1286 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1287 goto out;
1291 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1292 * BSM (Boostrap State Machine) is only in 3945 and 4965;
1293 * later devices (i.e. 5000 and later) have non-volatile SRAM,
1294 * and don't need BSM to restore data after power-saving sleep.
1296 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1297 * do not disable clocks. This preserves any hardware bits already
1298 * set by default in "CLK_CTRL_REG" after reset.
1300 if (priv->cfg->use_bsm)
1301 iwl_write_prph(priv, APMG_CLK_EN_REG,
1302 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1303 else
1304 iwl_write_prph(priv, APMG_CLK_EN_REG,
1305 APMG_CLK_VAL_DMA_CLK_RQT);
1306 udelay(20);
1308 /* Disable L1-Active */
1309 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1310 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1312 out:
1313 return ret;
1315 EXPORT_SYMBOL(iwl_apm_init);
1318 int iwl_set_hw_params(struct iwl_priv *priv)
1320 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
1321 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
1322 if (priv->cfg->mod_params->amsdu_size_8K)
1323 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
1324 else
1325 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
1327 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
1329 if (priv->cfg->mod_params->disable_11n)
1330 priv->cfg->sku &= ~IWL_SKU_N;
1332 /* Device-specific setup */
1333 return priv->cfg->ops->lib->set_hw_params(priv);
1335 EXPORT_SYMBOL(iwl_set_hw_params);
1337 int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1339 int ret = 0;
1340 s8 prev_tx_power = priv->tx_power_user_lmt;
1342 if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
1343 IWL_WARN(priv,
1344 "Requested user TXPOWER %d below lower limit %d.\n",
1345 tx_power,
1346 IWLAGN_TX_POWER_TARGET_POWER_MIN);
1347 return -EINVAL;
1350 if (tx_power > priv->tx_power_device_lmt) {
1351 IWL_WARN(priv,
1352 "Requested user TXPOWER %d above upper limit %d.\n",
1353 tx_power, priv->tx_power_device_lmt);
1354 return -EINVAL;
1357 if (priv->tx_power_user_lmt != tx_power)
1358 force = true;
1360 /* if nic is not up don't send command */
1361 if (iwl_is_ready_rf(priv)) {
1362 priv->tx_power_user_lmt = tx_power;
1363 if (force && priv->cfg->ops->lib->send_tx_power)
1364 ret = priv->cfg->ops->lib->send_tx_power(priv);
1365 else if (!priv->cfg->ops->lib->send_tx_power)
1366 ret = -EOPNOTSUPP;
1368 * if fail to set tx_power, restore the orig. tx power
1370 if (ret)
1371 priv->tx_power_user_lmt = prev_tx_power;
1375 * Even this is an async host command, the command
1376 * will always report success from uCode
1377 * So once driver can placing the command into the queue
1378 * successfully, driver can use priv->tx_power_user_lmt
1379 * to reflect the current tx power
1381 return ret;
1383 EXPORT_SYMBOL(iwl_set_tx_power);
1385 irqreturn_t iwl_isr_legacy(int irq, void *data)
1387 struct iwl_priv *priv = data;
1388 u32 inta, inta_mask;
1389 u32 inta_fh;
1390 unsigned long flags;
1391 if (!priv)
1392 return IRQ_NONE;
1394 spin_lock_irqsave(&priv->lock, flags);
1396 /* Disable (but don't clear!) interrupts here to avoid
1397 * back-to-back ISRs and sporadic interrupts from our NIC.
1398 * If we have something to service, the tasklet will re-enable ints.
1399 * If we *don't* have something, we'll re-enable before leaving here. */
1400 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
1401 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
1403 /* Discover which interrupts are active/pending */
1404 inta = iwl_read32(priv, CSR_INT);
1405 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1407 /* Ignore interrupt if there's nothing in NIC to service.
1408 * This may be due to IRQ shared with another device,
1409 * or due to sporadic interrupts thrown from our NIC. */
1410 if (!inta && !inta_fh) {
1411 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0, inta_fh == 0\n");
1412 goto none;
1415 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1416 /* Hardware disappeared. It might have already raised
1417 * an interrupt */
1418 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1419 goto unplugged;
1422 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1423 inta, inta_mask, inta_fh);
1425 inta &= ~CSR_INT_BIT_SCD;
1427 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1428 if (likely(inta || inta_fh))
1429 tasklet_schedule(&priv->irq_tasklet);
1431 unplugged:
1432 spin_unlock_irqrestore(&priv->lock, flags);
1433 return IRQ_HANDLED;
1435 none:
1436 /* re-enable interrupts here since we don't have anything to service. */
1437 /* only Re-enable if diabled by irq */
1438 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1439 iwl_enable_interrupts(priv);
1440 spin_unlock_irqrestore(&priv->lock, flags);
1441 return IRQ_NONE;
1443 EXPORT_SYMBOL(iwl_isr_legacy);
1445 void iwl_send_bt_config(struct iwl_priv *priv)
1447 struct iwl_bt_cmd bt_cmd = {
1448 .lead_time = BT_LEAD_TIME_DEF,
1449 .max_kill = BT_MAX_KILL_DEF,
1450 .kill_ack_mask = 0,
1451 .kill_cts_mask = 0,
1454 if (!bt_coex_active)
1455 bt_cmd.flags = BT_COEX_DISABLE;
1456 else
1457 bt_cmd.flags = BT_COEX_ENABLE;
1459 IWL_DEBUG_INFO(priv, "BT coex %s\n",
1460 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1462 if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1463 sizeof(struct iwl_bt_cmd), &bt_cmd))
1464 IWL_ERR(priv, "failed to send BT Coex Config\n");
1466 EXPORT_SYMBOL(iwl_send_bt_config);
1468 int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1470 struct iwl_statistics_cmd statistics_cmd = {
1471 .configuration_flags =
1472 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
1475 if (flags & CMD_ASYNC)
1476 return iwl_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
1477 sizeof(struct iwl_statistics_cmd),
1478 &statistics_cmd, NULL);
1479 else
1480 return iwl_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
1481 sizeof(struct iwl_statistics_cmd),
1482 &statistics_cmd);
1484 EXPORT_SYMBOL(iwl_send_statistics_request);
1486 void iwl_rf_kill_ct_config(struct iwl_priv *priv)
1488 struct iwl_ct_kill_config cmd;
1489 struct iwl_ct_kill_throttling_config adv_cmd;
1490 unsigned long flags;
1491 int ret = 0;
1493 spin_lock_irqsave(&priv->lock, flags);
1494 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1495 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1496 spin_unlock_irqrestore(&priv->lock, flags);
1497 priv->thermal_throttle.ct_kill_toggle = false;
1499 if (priv->cfg->support_ct_kill_exit) {
1500 adv_cmd.critical_temperature_enter =
1501 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1502 adv_cmd.critical_temperature_exit =
1503 cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
1505 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1506 sizeof(adv_cmd), &adv_cmd);
1507 if (ret)
1508 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1509 else
1510 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1511 "succeeded, "
1512 "critical temperature enter is %d,"
1513 "exit is %d\n",
1514 priv->hw_params.ct_kill_threshold,
1515 priv->hw_params.ct_kill_exit_threshold);
1516 } else {
1517 cmd.critical_temperature_R =
1518 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1520 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1521 sizeof(cmd), &cmd);
1522 if (ret)
1523 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1524 else
1525 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1526 "succeeded, "
1527 "critical temperature is %d\n",
1528 priv->hw_params.ct_kill_threshold);
1531 EXPORT_SYMBOL(iwl_rf_kill_ct_config);
1535 * CARD_STATE_CMD
1537 * Use: Sets the device's internal card state to enable, disable, or halt
1539 * When in the 'enable' state the card operates as normal.
1540 * When in the 'disable' state, the card enters into a low power mode.
1541 * When in the 'halt' state, the card is shut down and must be fully
1542 * restarted to come back on.
1544 int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
1546 struct iwl_host_cmd cmd = {
1547 .id = REPLY_CARD_STATE_CMD,
1548 .len = sizeof(u32),
1549 .data = &flags,
1550 .flags = meta_flag,
1553 return iwl_send_cmd(priv, &cmd);
1556 void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
1557 struct iwl_rx_mem_buffer *rxb)
1559 #ifdef CONFIG_IWLWIFI_DEBUG
1560 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1561 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
1562 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
1563 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
1564 #endif
1566 EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
1568 void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1569 struct iwl_rx_mem_buffer *rxb)
1571 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1572 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1573 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
1574 "notification for %s:\n", len,
1575 get_cmd_string(pkt->hdr.cmd));
1576 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
1578 EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
1580 void iwl_rx_reply_error(struct iwl_priv *priv,
1581 struct iwl_rx_mem_buffer *rxb)
1583 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1585 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
1586 "seq 0x%04X ser 0x%08X\n",
1587 le32_to_cpu(pkt->u.err_resp.error_type),
1588 get_cmd_string(pkt->u.err_resp.cmd_id),
1589 pkt->u.err_resp.cmd_id,
1590 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
1591 le32_to_cpu(pkt->u.err_resp.error_info));
1593 EXPORT_SYMBOL(iwl_rx_reply_error);
1595 void iwl_clear_isr_stats(struct iwl_priv *priv)
1597 memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
1600 int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1601 const struct ieee80211_tx_queue_params *params)
1603 struct iwl_priv *priv = hw->priv;
1604 unsigned long flags;
1605 int q;
1607 IWL_DEBUG_MAC80211(priv, "enter\n");
1609 if (!iwl_is_ready_rf(priv)) {
1610 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1611 return -EIO;
1614 if (queue >= AC_NUM) {
1615 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
1616 return 0;
1619 q = AC_NUM - 1 - queue;
1621 spin_lock_irqsave(&priv->lock, flags);
1623 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
1624 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
1625 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1626 priv->qos_data.def_qos_parm.ac[q].edca_txop =
1627 cpu_to_le16((params->txop * 32));
1629 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1631 spin_unlock_irqrestore(&priv->lock, flags);
1633 IWL_DEBUG_MAC80211(priv, "leave\n");
1634 return 0;
1636 EXPORT_SYMBOL(iwl_mac_conf_tx);
1638 static void iwl_ht_conf(struct iwl_priv *priv,
1639 struct ieee80211_vif *vif)
1641 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
1642 struct ieee80211_sta *sta;
1643 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
1645 IWL_DEBUG_MAC80211(priv, "enter:\n");
1647 if (!ht_conf->is_ht)
1648 return;
1650 ht_conf->ht_protection =
1651 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
1652 ht_conf->non_GF_STA_present =
1653 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
1655 ht_conf->single_chain_sufficient = false;
1657 switch (vif->type) {
1658 case NL80211_IFTYPE_STATION:
1659 rcu_read_lock();
1660 sta = ieee80211_find_sta(vif, bss_conf->bssid);
1661 if (sta) {
1662 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1663 int maxstreams;
1665 maxstreams = (ht_cap->mcs.tx_params &
1666 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
1667 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1668 maxstreams += 1;
1670 if ((ht_cap->mcs.rx_mask[1] == 0) &&
1671 (ht_cap->mcs.rx_mask[2] == 0))
1672 ht_conf->single_chain_sufficient = true;
1673 if (maxstreams <= 1)
1674 ht_conf->single_chain_sufficient = true;
1675 } else {
1677 * If at all, this can only happen through a race
1678 * when the AP disconnects us while we're still
1679 * setting up the connection, in that case mac80211
1680 * will soon tell us about that.
1682 ht_conf->single_chain_sufficient = true;
1684 rcu_read_unlock();
1685 break;
1686 case NL80211_IFTYPE_ADHOC:
1687 ht_conf->single_chain_sufficient = true;
1688 break;
1689 default:
1690 break;
1693 IWL_DEBUG_MAC80211(priv, "leave\n");
1696 static inline void iwl_set_no_assoc(struct iwl_priv *priv)
1698 iwl_led_disassociate(priv);
1700 * inform the ucode that there is no longer an
1701 * association and that no more packets should be
1702 * sent
1704 priv->staging_rxon.filter_flags &=
1705 ~RXON_FILTER_ASSOC_MSK;
1706 priv->staging_rxon.assoc_id = 0;
1707 iwlcore_commit_rxon(priv);
1710 static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
1712 struct iwl_priv *priv = hw->priv;
1713 unsigned long flags;
1714 __le64 timestamp;
1716 IWL_DEBUG_MAC80211(priv, "enter\n");
1718 if (!iwl_is_ready_rf(priv)) {
1719 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1720 return -EIO;
1723 spin_lock_irqsave(&priv->lock, flags);
1725 if (priv->ibss_beacon)
1726 dev_kfree_skb(priv->ibss_beacon);
1728 priv->ibss_beacon = skb;
1730 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
1731 priv->timestamp = le64_to_cpu(timestamp);
1733 IWL_DEBUG_MAC80211(priv, "leave\n");
1734 spin_unlock_irqrestore(&priv->lock, flags);
1736 priv->cfg->ops->lib->post_associate(priv, priv->vif);
1738 return 0;
1741 void iwl_bss_info_changed(struct ieee80211_hw *hw,
1742 struct ieee80211_vif *vif,
1743 struct ieee80211_bss_conf *bss_conf,
1744 u32 changes)
1746 struct iwl_priv *priv = hw->priv;
1747 int ret;
1749 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
1751 if (!iwl_is_alive(priv))
1752 return;
1754 mutex_lock(&priv->mutex);
1756 if (changes & BSS_CHANGED_QOS) {
1757 unsigned long flags;
1759 spin_lock_irqsave(&priv->lock, flags);
1760 priv->qos_data.qos_active = bss_conf->qos;
1761 iwl_update_qos(priv);
1762 spin_unlock_irqrestore(&priv->lock, flags);
1765 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
1766 dev_kfree_skb(priv->ibss_beacon);
1767 priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
1770 if (changes & BSS_CHANGED_BEACON_INT) {
1771 /* TODO: in AP mode, do something to make this take effect */
1774 if (changes & BSS_CHANGED_BSSID) {
1775 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
1778 * If there is currently a HW scan going on in the
1779 * background then we need to cancel it else the RXON
1780 * below/in post_associate will fail.
1782 if (iwl_scan_cancel_timeout(priv, 100)) {
1783 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
1784 IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
1785 mutex_unlock(&priv->mutex);
1786 return;
1789 /* mac80211 only sets assoc when in STATION mode */
1790 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
1791 memcpy(priv->staging_rxon.bssid_addr,
1792 bss_conf->bssid, ETH_ALEN);
1794 /* currently needed in a few places */
1795 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
1796 } else {
1797 priv->staging_rxon.filter_flags &=
1798 ~RXON_FILTER_ASSOC_MSK;
1804 * This needs to be after setting the BSSID in case
1805 * mac80211 decides to do both changes at once because
1806 * it will invoke post_associate.
1808 if (vif->type == NL80211_IFTYPE_ADHOC &&
1809 changes & BSS_CHANGED_BEACON) {
1810 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
1812 if (beacon)
1813 iwl_mac_beacon_update(hw, beacon);
1816 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
1817 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
1818 bss_conf->use_short_preamble);
1819 if (bss_conf->use_short_preamble)
1820 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1821 else
1822 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1825 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
1826 IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
1827 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
1828 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
1829 else
1830 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
1831 if (bss_conf->use_cts_prot)
1832 priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
1833 else
1834 priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
1837 if (changes & BSS_CHANGED_BASIC_RATES) {
1840 if (changes & BSS_CHANGED_HT) {
1841 iwl_ht_conf(priv, vif);
1843 if (priv->cfg->ops->hcmd->set_rxon_chain)
1844 priv->cfg->ops->hcmd->set_rxon_chain(priv);
1847 if (changes & BSS_CHANGED_ASSOC) {
1848 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
1849 if (bss_conf->assoc) {
1850 priv->timestamp = bss_conf->timestamp;
1852 iwl_led_associate(priv);
1854 if (!iwl_is_rfkill(priv))
1855 priv->cfg->ops->lib->post_associate(priv, vif);
1856 } else
1857 iwl_set_no_assoc(priv);
1860 if (changes && iwl_is_associated(priv) && bss_conf->aid) {
1861 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
1862 changes);
1863 ret = iwl_send_rxon_assoc(priv);
1864 if (!ret) {
1865 /* Sync active_rxon with latest change. */
1866 memcpy((void *)&priv->active_rxon,
1867 &priv->staging_rxon,
1868 sizeof(struct iwl_rxon_cmd));
1872 if (changes & BSS_CHANGED_BEACON_ENABLED) {
1873 if (vif->bss_conf.enable_beacon) {
1874 memcpy(priv->staging_rxon.bssid_addr,
1875 bss_conf->bssid, ETH_ALEN);
1876 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
1877 iwlcore_config_ap(priv, vif);
1878 } else
1879 iwl_set_no_assoc(priv);
1882 if (changes & BSS_CHANGED_IBSS) {
1883 ret = priv->cfg->ops->lib->manage_ibss_station(priv, vif,
1884 bss_conf->ibss_joined);
1885 if (ret)
1886 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
1887 bss_conf->ibss_joined ? "add" : "remove",
1888 bss_conf->bssid);
1891 mutex_unlock(&priv->mutex);
1893 IWL_DEBUG_MAC80211(priv, "leave\n");
1895 EXPORT_SYMBOL(iwl_bss_info_changed);
1897 static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif)
1899 iwl_connection_init_rx_config(priv, vif);
1901 if (priv->cfg->ops->hcmd->set_rxon_chain)
1902 priv->cfg->ops->hcmd->set_rxon_chain(priv);
1904 return iwlcore_commit_rxon(priv);
1907 int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1909 struct iwl_priv *priv = hw->priv;
1910 int err = 0;
1912 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1913 vif->type, vif->addr);
1915 mutex_lock(&priv->mutex);
1917 if (WARN_ON(!iwl_is_ready_rf(priv))) {
1918 err = -EINVAL;
1919 goto out;
1922 if (priv->vif) {
1923 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
1924 err = -EOPNOTSUPP;
1925 goto out;
1928 priv->vif = vif;
1929 priv->iw_mode = vif->type;
1931 err = iwl_set_mode(priv, vif);
1932 if (err)
1933 goto out_err;
1935 goto out;
1937 out_err:
1938 priv->vif = NULL;
1939 priv->iw_mode = NL80211_IFTYPE_STATION;
1940 out:
1941 mutex_unlock(&priv->mutex);
1943 IWL_DEBUG_MAC80211(priv, "leave\n");
1944 return err;
1946 EXPORT_SYMBOL(iwl_mac_add_interface);
1948 void iwl_mac_remove_interface(struct ieee80211_hw *hw,
1949 struct ieee80211_vif *vif)
1951 struct iwl_priv *priv = hw->priv;
1952 bool scan_completed = false;
1954 IWL_DEBUG_MAC80211(priv, "enter\n");
1956 mutex_lock(&priv->mutex);
1958 if (iwl_is_ready_rf(priv)) {
1959 iwl_scan_cancel_timeout(priv, 100);
1960 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1961 iwlcore_commit_rxon(priv);
1963 if (priv->vif == vif) {
1964 priv->vif = NULL;
1965 if (priv->scan_vif == vif) {
1966 scan_completed = true;
1967 priv->scan_vif = NULL;
1968 priv->scan_request = NULL;
1970 memset(priv->bssid, 0, ETH_ALEN);
1972 mutex_unlock(&priv->mutex);
1974 if (scan_completed)
1975 ieee80211_scan_completed(priv->hw, true);
1977 IWL_DEBUG_MAC80211(priv, "leave\n");
1980 EXPORT_SYMBOL(iwl_mac_remove_interface);
1983 * iwl_mac_config - mac80211 config callback
1985 int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
1987 struct iwl_priv *priv = hw->priv;
1988 const struct iwl_channel_info *ch_info;
1989 struct ieee80211_conf *conf = &hw->conf;
1990 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
1991 unsigned long flags = 0;
1992 int ret = 0;
1993 u16 ch;
1994 int scan_active = 0;
1996 mutex_lock(&priv->mutex);
1998 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
1999 conf->channel->hw_value, changed);
2001 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
2002 test_bit(STATUS_SCANNING, &priv->status))) {
2003 scan_active = 1;
2004 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2007 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
2008 IEEE80211_CONF_CHANGE_CHANNEL)) {
2009 /* mac80211 uses static for non-HT which is what we want */
2010 priv->current_ht_config.smps = conf->smps_mode;
2013 * Recalculate chain counts.
2015 * If monitor mode is enabled then mac80211 will
2016 * set up the SM PS mode to OFF if an HT channel is
2017 * configured.
2019 if (priv->cfg->ops->hcmd->set_rxon_chain)
2020 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2023 /* during scanning mac80211 will delay channel setting until
2024 * scan finish with changed = 0
2026 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
2027 if (scan_active)
2028 goto set_ch_out;
2030 ch = ieee80211_frequency_to_channel(conf->channel->center_freq);
2031 ch_info = iwl_get_channel_info(priv, conf->channel->band, ch);
2032 if (!is_channel_valid(ch_info)) {
2033 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2034 ret = -EINVAL;
2035 goto set_ch_out;
2038 spin_lock_irqsave(&priv->lock, flags);
2040 /* Configure HT40 channels */
2041 ht_conf->is_ht = conf_is_ht(conf);
2042 if (ht_conf->is_ht) {
2043 if (conf_is_ht40_minus(conf)) {
2044 ht_conf->extension_chan_offset =
2045 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2046 ht_conf->is_40mhz = true;
2047 } else if (conf_is_ht40_plus(conf)) {
2048 ht_conf->extension_chan_offset =
2049 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2050 ht_conf->is_40mhz = true;
2051 } else {
2052 ht_conf->extension_chan_offset =
2053 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2054 ht_conf->is_40mhz = false;
2056 } else
2057 ht_conf->is_40mhz = false;
2058 /* Default to no protection. Protection mode will later be set
2059 * from BSS config in iwl_ht_conf */
2060 ht_conf->ht_protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
2062 /* if we are switching from ht to 2.4 clear flags
2063 * from any ht related info since 2.4 does not
2064 * support ht */
2065 if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
2066 priv->staging_rxon.flags = 0;
2068 iwl_set_rxon_channel(priv, conf->channel);
2069 iwl_set_rxon_ht(priv, ht_conf);
2071 iwl_set_flags_for_band(priv, conf->channel->band, priv->vif);
2072 spin_unlock_irqrestore(&priv->lock, flags);
2074 if (priv->cfg->ops->lib->update_bcast_station)
2075 ret = priv->cfg->ops->lib->update_bcast_station(priv);
2077 set_ch_out:
2078 /* The list of supported rates and rate mask can be different
2079 * for each band; since the band may have changed, reset
2080 * the rate mask to what mac80211 lists */
2081 iwl_set_rate(priv);
2084 if (changed & (IEEE80211_CONF_CHANGE_PS |
2085 IEEE80211_CONF_CHANGE_IDLE)) {
2086 ret = iwl_power_update_mode(priv, false);
2087 if (ret)
2088 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
2091 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2092 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2093 priv->tx_power_user_lmt, conf->power_level);
2095 iwl_set_tx_power(priv, conf->power_level, false);
2098 if (!iwl_is_ready(priv)) {
2099 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2100 goto out;
2103 if (scan_active)
2104 goto out;
2106 if (memcmp(&priv->active_rxon,
2107 &priv->staging_rxon, sizeof(priv->staging_rxon)))
2108 iwlcore_commit_rxon(priv);
2109 else
2110 IWL_DEBUG_INFO(priv, "Not re-sending same RXON configuration.\n");
2113 out:
2114 IWL_DEBUG_MAC80211(priv, "leave\n");
2115 mutex_unlock(&priv->mutex);
2116 return ret;
2118 EXPORT_SYMBOL(iwl_mac_config);
2120 void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2122 struct iwl_priv *priv = hw->priv;
2123 unsigned long flags;
2125 mutex_lock(&priv->mutex);
2126 IWL_DEBUG_MAC80211(priv, "enter\n");
2128 spin_lock_irqsave(&priv->lock, flags);
2129 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
2130 spin_unlock_irqrestore(&priv->lock, flags);
2132 spin_lock_irqsave(&priv->lock, flags);
2134 /* new association get rid of ibss beacon skb */
2135 if (priv->ibss_beacon)
2136 dev_kfree_skb(priv->ibss_beacon);
2138 priv->ibss_beacon = NULL;
2140 priv->timestamp = 0;
2142 spin_unlock_irqrestore(&priv->lock, flags);
2144 if (!iwl_is_ready_rf(priv)) {
2145 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2146 mutex_unlock(&priv->mutex);
2147 return;
2150 /* we are restarting association process
2151 * clear RXON_FILTER_ASSOC_MSK bit
2153 iwl_scan_cancel_timeout(priv, 100);
2154 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2155 iwlcore_commit_rxon(priv);
2157 iwl_set_rate(priv);
2159 mutex_unlock(&priv->mutex);
2161 IWL_DEBUG_MAC80211(priv, "leave\n");
2163 EXPORT_SYMBOL(iwl_mac_reset_tsf);
2165 int iwl_alloc_txq_mem(struct iwl_priv *priv)
2167 if (!priv->txq)
2168 priv->txq = kzalloc(
2169 sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues,
2170 GFP_KERNEL);
2171 if (!priv->txq) {
2172 IWL_ERR(priv, "Not enough memory for txq\n");
2173 return -ENOMEM;
2175 return 0;
2177 EXPORT_SYMBOL(iwl_alloc_txq_mem);
2179 void iwl_free_txq_mem(struct iwl_priv *priv)
2181 kfree(priv->txq);
2182 priv->txq = NULL;
2184 EXPORT_SYMBOL(iwl_free_txq_mem);
2186 #ifdef CONFIG_IWLWIFI_DEBUGFS
2188 #define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
2190 void iwl_reset_traffic_log(struct iwl_priv *priv)
2192 priv->tx_traffic_idx = 0;
2193 priv->rx_traffic_idx = 0;
2194 if (priv->tx_traffic)
2195 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
2196 if (priv->rx_traffic)
2197 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
2200 int iwl_alloc_traffic_mem(struct iwl_priv *priv)
2202 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
2204 if (iwl_debug_level & IWL_DL_TX) {
2205 if (!priv->tx_traffic) {
2206 priv->tx_traffic =
2207 kzalloc(traffic_size, GFP_KERNEL);
2208 if (!priv->tx_traffic)
2209 return -ENOMEM;
2212 if (iwl_debug_level & IWL_DL_RX) {
2213 if (!priv->rx_traffic) {
2214 priv->rx_traffic =
2215 kzalloc(traffic_size, GFP_KERNEL);
2216 if (!priv->rx_traffic)
2217 return -ENOMEM;
2220 iwl_reset_traffic_log(priv);
2221 return 0;
2223 EXPORT_SYMBOL(iwl_alloc_traffic_mem);
2225 void iwl_free_traffic_mem(struct iwl_priv *priv)
2227 kfree(priv->tx_traffic);
2228 priv->tx_traffic = NULL;
2230 kfree(priv->rx_traffic);
2231 priv->rx_traffic = NULL;
2233 EXPORT_SYMBOL(iwl_free_traffic_mem);
2235 void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
2236 u16 length, struct ieee80211_hdr *header)
2238 __le16 fc;
2239 u16 len;
2241 if (likely(!(iwl_debug_level & IWL_DL_TX)))
2242 return;
2244 if (!priv->tx_traffic)
2245 return;
2247 fc = header->frame_control;
2248 if (ieee80211_is_data(fc)) {
2249 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
2250 ? IWL_TRAFFIC_ENTRY_SIZE : length;
2251 memcpy((priv->tx_traffic +
2252 (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
2253 header, len);
2254 priv->tx_traffic_idx =
2255 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
2258 EXPORT_SYMBOL(iwl_dbg_log_tx_data_frame);
2260 void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
2261 u16 length, struct ieee80211_hdr *header)
2263 __le16 fc;
2264 u16 len;
2266 if (likely(!(iwl_debug_level & IWL_DL_RX)))
2267 return;
2269 if (!priv->rx_traffic)
2270 return;
2272 fc = header->frame_control;
2273 if (ieee80211_is_data(fc)) {
2274 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
2275 ? IWL_TRAFFIC_ENTRY_SIZE : length;
2276 memcpy((priv->rx_traffic +
2277 (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
2278 header, len);
2279 priv->rx_traffic_idx =
2280 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
2283 EXPORT_SYMBOL(iwl_dbg_log_rx_data_frame);
2285 const char *get_mgmt_string(int cmd)
2287 switch (cmd) {
2288 IWL_CMD(MANAGEMENT_ASSOC_REQ);
2289 IWL_CMD(MANAGEMENT_ASSOC_RESP);
2290 IWL_CMD(MANAGEMENT_REASSOC_REQ);
2291 IWL_CMD(MANAGEMENT_REASSOC_RESP);
2292 IWL_CMD(MANAGEMENT_PROBE_REQ);
2293 IWL_CMD(MANAGEMENT_PROBE_RESP);
2294 IWL_CMD(MANAGEMENT_BEACON);
2295 IWL_CMD(MANAGEMENT_ATIM);
2296 IWL_CMD(MANAGEMENT_DISASSOC);
2297 IWL_CMD(MANAGEMENT_AUTH);
2298 IWL_CMD(MANAGEMENT_DEAUTH);
2299 IWL_CMD(MANAGEMENT_ACTION);
2300 default:
2301 return "UNKNOWN";
2306 const char *get_ctrl_string(int cmd)
2308 switch (cmd) {
2309 IWL_CMD(CONTROL_BACK_REQ);
2310 IWL_CMD(CONTROL_BACK);
2311 IWL_CMD(CONTROL_PSPOLL);
2312 IWL_CMD(CONTROL_RTS);
2313 IWL_CMD(CONTROL_CTS);
2314 IWL_CMD(CONTROL_ACK);
2315 IWL_CMD(CONTROL_CFEND);
2316 IWL_CMD(CONTROL_CFENDACK);
2317 default:
2318 return "UNKNOWN";
2323 void iwl_clear_traffic_stats(struct iwl_priv *priv)
2325 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
2326 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
2327 priv->led_tpt = 0;
2331 * if CONFIG_IWLWIFI_DEBUGFS defined, iwl_update_stats function will
2332 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass.
2333 * Use debugFs to display the rx/rx_statistics
2334 * if CONFIG_IWLWIFI_DEBUGFS not being defined, then no MGMT and CTRL
2335 * information will be recorded, but DATA pkt still will be recorded
2336 * for the reason of iwl_led.c need to control the led blinking based on
2337 * number of tx and rx data.
2340 void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
2342 struct traffic_stats *stats;
2344 if (is_tx)
2345 stats = &priv->tx_stats;
2346 else
2347 stats = &priv->rx_stats;
2349 if (ieee80211_is_mgmt(fc)) {
2350 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2351 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
2352 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
2353 break;
2354 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
2355 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
2356 break;
2357 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
2358 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
2359 break;
2360 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
2361 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
2362 break;
2363 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2364 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
2365 break;
2366 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2367 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
2368 break;
2369 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2370 stats->mgmt[MANAGEMENT_BEACON]++;
2371 break;
2372 case cpu_to_le16(IEEE80211_STYPE_ATIM):
2373 stats->mgmt[MANAGEMENT_ATIM]++;
2374 break;
2375 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2376 stats->mgmt[MANAGEMENT_DISASSOC]++;
2377 break;
2378 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2379 stats->mgmt[MANAGEMENT_AUTH]++;
2380 break;
2381 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2382 stats->mgmt[MANAGEMENT_DEAUTH]++;
2383 break;
2384 case cpu_to_le16(IEEE80211_STYPE_ACTION):
2385 stats->mgmt[MANAGEMENT_ACTION]++;
2386 break;
2388 } else if (ieee80211_is_ctl(fc)) {
2389 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2390 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
2391 stats->ctrl[CONTROL_BACK_REQ]++;
2392 break;
2393 case cpu_to_le16(IEEE80211_STYPE_BACK):
2394 stats->ctrl[CONTROL_BACK]++;
2395 break;
2396 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
2397 stats->ctrl[CONTROL_PSPOLL]++;
2398 break;
2399 case cpu_to_le16(IEEE80211_STYPE_RTS):
2400 stats->ctrl[CONTROL_RTS]++;
2401 break;
2402 case cpu_to_le16(IEEE80211_STYPE_CTS):
2403 stats->ctrl[CONTROL_CTS]++;
2404 break;
2405 case cpu_to_le16(IEEE80211_STYPE_ACK):
2406 stats->ctrl[CONTROL_ACK]++;
2407 break;
2408 case cpu_to_le16(IEEE80211_STYPE_CFEND):
2409 stats->ctrl[CONTROL_CFEND]++;
2410 break;
2411 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
2412 stats->ctrl[CONTROL_CFENDACK]++;
2413 break;
2415 } else {
2416 /* data */
2417 stats->data_cnt++;
2418 stats->data_bytes += len;
2420 iwl_leds_background(priv);
2422 EXPORT_SYMBOL(iwl_update_stats);
2423 #endif
2425 static const char *get_csr_string(int cmd)
2427 switch (cmd) {
2428 IWL_CMD(CSR_HW_IF_CONFIG_REG);
2429 IWL_CMD(CSR_INT_COALESCING);
2430 IWL_CMD(CSR_INT);
2431 IWL_CMD(CSR_INT_MASK);
2432 IWL_CMD(CSR_FH_INT_STATUS);
2433 IWL_CMD(CSR_GPIO_IN);
2434 IWL_CMD(CSR_RESET);
2435 IWL_CMD(CSR_GP_CNTRL);
2436 IWL_CMD(CSR_HW_REV);
2437 IWL_CMD(CSR_EEPROM_REG);
2438 IWL_CMD(CSR_EEPROM_GP);
2439 IWL_CMD(CSR_OTP_GP_REG);
2440 IWL_CMD(CSR_GIO_REG);
2441 IWL_CMD(CSR_GP_UCODE_REG);
2442 IWL_CMD(CSR_GP_DRIVER_REG);
2443 IWL_CMD(CSR_UCODE_DRV_GP1);
2444 IWL_CMD(CSR_UCODE_DRV_GP2);
2445 IWL_CMD(CSR_LED_REG);
2446 IWL_CMD(CSR_DRAM_INT_TBL_REG);
2447 IWL_CMD(CSR_GIO_CHICKEN_BITS);
2448 IWL_CMD(CSR_ANA_PLL_CFG);
2449 IWL_CMD(CSR_HW_REV_WA_REG);
2450 IWL_CMD(CSR_DBG_HPET_MEM_REG);
2451 default:
2452 return "UNKNOWN";
2457 void iwl_dump_csr(struct iwl_priv *priv)
2459 int i;
2460 u32 csr_tbl[] = {
2461 CSR_HW_IF_CONFIG_REG,
2462 CSR_INT_COALESCING,
2463 CSR_INT,
2464 CSR_INT_MASK,
2465 CSR_FH_INT_STATUS,
2466 CSR_GPIO_IN,
2467 CSR_RESET,
2468 CSR_GP_CNTRL,
2469 CSR_HW_REV,
2470 CSR_EEPROM_REG,
2471 CSR_EEPROM_GP,
2472 CSR_OTP_GP_REG,
2473 CSR_GIO_REG,
2474 CSR_GP_UCODE_REG,
2475 CSR_GP_DRIVER_REG,
2476 CSR_UCODE_DRV_GP1,
2477 CSR_UCODE_DRV_GP2,
2478 CSR_LED_REG,
2479 CSR_DRAM_INT_TBL_REG,
2480 CSR_GIO_CHICKEN_BITS,
2481 CSR_ANA_PLL_CFG,
2482 CSR_HW_REV_WA_REG,
2483 CSR_DBG_HPET_MEM_REG
2485 IWL_ERR(priv, "CSR values:\n");
2486 IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
2487 "CSR_INT_PERIODIC_REG)\n");
2488 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
2489 IWL_ERR(priv, " %25s: 0X%08x\n",
2490 get_csr_string(csr_tbl[i]),
2491 iwl_read32(priv, csr_tbl[i]));
2494 EXPORT_SYMBOL(iwl_dump_csr);
2496 static const char *get_fh_string(int cmd)
2498 switch (cmd) {
2499 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
2500 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
2501 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
2502 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
2503 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
2504 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
2505 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
2506 IWL_CMD(FH_TSSR_TX_STATUS_REG);
2507 IWL_CMD(FH_TSSR_TX_ERROR_REG);
2508 default:
2509 return "UNKNOWN";
2514 int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
2516 int i;
2517 #ifdef CONFIG_IWLWIFI_DEBUG
2518 int pos = 0;
2519 size_t bufsz = 0;
2520 #endif
2521 u32 fh_tbl[] = {
2522 FH_RSCSR_CHNL0_STTS_WPTR_REG,
2523 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
2524 FH_RSCSR_CHNL0_WPTR,
2525 FH_MEM_RCSR_CHNL0_CONFIG_REG,
2526 FH_MEM_RSSR_SHARED_CTRL_REG,
2527 FH_MEM_RSSR_RX_STATUS_REG,
2528 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
2529 FH_TSSR_TX_STATUS_REG,
2530 FH_TSSR_TX_ERROR_REG
2532 #ifdef CONFIG_IWLWIFI_DEBUG
2533 if (display) {
2534 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
2535 *buf = kmalloc(bufsz, GFP_KERNEL);
2536 if (!*buf)
2537 return -ENOMEM;
2538 pos += scnprintf(*buf + pos, bufsz - pos,
2539 "FH register values:\n");
2540 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2541 pos += scnprintf(*buf + pos, bufsz - pos,
2542 " %34s: 0X%08x\n",
2543 get_fh_string(fh_tbl[i]),
2544 iwl_read_direct32(priv, fh_tbl[i]));
2546 return pos;
2548 #endif
2549 IWL_ERR(priv, "FH register values:\n");
2550 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2551 IWL_ERR(priv, " %34s: 0X%08x\n",
2552 get_fh_string(fh_tbl[i]),
2553 iwl_read_direct32(priv, fh_tbl[i]));
2555 return 0;
2557 EXPORT_SYMBOL(iwl_dump_fh);
2559 static void iwl_force_rf_reset(struct iwl_priv *priv)
2561 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2562 return;
2564 if (!iwl_is_associated(priv)) {
2565 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
2566 return;
2569 * There is no easy and better way to force reset the radio,
2570 * the only known method is switching channel which will force to
2571 * reset and tune the radio.
2572 * Use internal short scan (single channel) operation to should
2573 * achieve this objective.
2574 * Driver should reset the radio when number of consecutive missed
2575 * beacon, or any other uCode error condition detected.
2577 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
2578 iwl_internal_short_hw_scan(priv);
2582 int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
2584 struct iwl_force_reset *force_reset;
2586 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2587 return -EINVAL;
2589 if (test_bit(STATUS_SCANNING, &priv->status)) {
2590 IWL_DEBUG_INFO(priv, "scan in progress.\n");
2591 return -EINVAL;
2594 if (mode >= IWL_MAX_FORCE_RESET) {
2595 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
2596 return -EINVAL;
2598 force_reset = &priv->force_reset[mode];
2599 force_reset->reset_request_count++;
2600 if (!external) {
2601 if (force_reset->last_force_reset_jiffies &&
2602 time_after(force_reset->last_force_reset_jiffies +
2603 force_reset->reset_duration, jiffies)) {
2604 IWL_DEBUG_INFO(priv, "force reset rejected\n");
2605 force_reset->reset_reject_count++;
2606 return -EAGAIN;
2609 force_reset->reset_success_count++;
2610 force_reset->last_force_reset_jiffies = jiffies;
2611 IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
2612 switch (mode) {
2613 case IWL_RF_RESET:
2614 iwl_force_rf_reset(priv);
2615 break;
2616 case IWL_FW_RESET:
2618 * if the request is from external(ex: debugfs),
2619 * then always perform the request in regardless the module
2620 * parameter setting
2621 * if the request is from internal (uCode error or driver
2622 * detect failure), then fw_restart module parameter
2623 * need to be check before performing firmware reload
2625 if (!external && !priv->cfg->mod_params->restart_fw) {
2626 IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
2627 "module parameter setting\n");
2628 break;
2630 IWL_ERR(priv, "On demand firmware reload\n");
2631 /* Set the FW error flag -- cleared on iwl_down */
2632 set_bit(STATUS_FW_ERROR, &priv->status);
2633 wake_up_interruptible(&priv->wait_command_queue);
2635 * Keep the restart process from trying to send host
2636 * commands by clearing the INIT status bit
2638 clear_bit(STATUS_READY, &priv->status);
2639 queue_work(priv->workqueue, &priv->restart);
2640 break;
2642 return 0;
2644 EXPORT_SYMBOL(iwl_force_reset);
2647 * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover
2649 * During normal condition (no queue is stuck), the timer is continually set to
2650 * execute every monitor_recover_period milliseconds after the last timer
2651 * expired. When the queue read_ptr is at the same place, the timer is
2652 * shorten to 100mSecs. This is
2653 * 1) to reduce the chance that the read_ptr may wrap around (not stuck)
2654 * 2) to detect the stuck queues quicker before the station and AP can
2655 * disassociate each other.
2657 * This function monitors all the tx queues and recover from it if any
2658 * of the queues are stuck.
2659 * 1. It first check the cmd queue for stuck conditions. If it is stuck,
2660 * it will recover by resetting the firmware and return.
2661 * 2. Then, it checks for station association. If it associates it will check
2662 * other queues. If any queue is stuck, it will recover by resetting
2663 * the firmware.
2664 * Note: It the number of times the queue read_ptr to be at the same place to
2665 * be MAX_REPEAT+1 in order to consider to be stuck.
2668 * The maximum number of times the read pointer of the tx queue at the
2669 * same place without considering to be stuck.
2671 #define MAX_REPEAT (2)
2672 static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
2674 struct iwl_tx_queue *txq;
2675 struct iwl_queue *q;
2677 txq = &priv->txq[cnt];
2678 q = &txq->q;
2679 /* queue is empty, skip */
2680 if (q->read_ptr != q->write_ptr) {
2681 if (q->read_ptr == q->last_read_ptr) {
2682 /* a queue has not been read from last time */
2683 if (q->repeat_same_read_ptr > MAX_REPEAT) {
2684 IWL_ERR(priv,
2685 "queue %d stuck %d time. Fw reload.\n",
2686 q->id, q->repeat_same_read_ptr);
2687 q->repeat_same_read_ptr = 0;
2688 iwl_force_reset(priv, IWL_FW_RESET, false);
2689 } else {
2690 q->repeat_same_read_ptr++;
2691 IWL_DEBUG_RADIO(priv,
2692 "queue %d, not read %d time\n",
2693 q->id,
2694 q->repeat_same_read_ptr);
2695 mod_timer(&priv->monitor_recover, jiffies +
2696 msecs_to_jiffies(IWL_ONE_HUNDRED_MSECS));
2698 return 1;
2699 } else {
2700 q->last_read_ptr = q->read_ptr;
2701 q->repeat_same_read_ptr = 0;
2704 return 0;
2707 void iwl_bg_monitor_recover(unsigned long data)
2709 struct iwl_priv *priv = (struct iwl_priv *)data;
2710 int cnt;
2712 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2713 return;
2715 /* monitor and check for stuck cmd queue */
2716 if (iwl_check_stuck_queue(priv, IWL_CMD_QUEUE_NUM))
2717 return;
2719 /* monitor and check for other stuck queues */
2720 if (iwl_is_associated(priv)) {
2721 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
2722 /* skip as we already checked the command queue */
2723 if (cnt == IWL_CMD_QUEUE_NUM)
2724 continue;
2725 if (iwl_check_stuck_queue(priv, cnt))
2726 return;
2730 * Reschedule the timer to occur in
2731 * priv->cfg->monitor_recover_period
2733 mod_timer(&priv->monitor_recover,
2734 jiffies + msecs_to_jiffies(priv->cfg->monitor_recover_period));
2736 EXPORT_SYMBOL(iwl_bg_monitor_recover);
2740 * extended beacon time format
2741 * time in usec will be changed into a 32-bit value in extended:internal format
2742 * the extended part is the beacon counts
2743 * the internal part is the time in usec within one beacon interval
2745 u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
2747 u32 quot;
2748 u32 rem;
2749 u32 interval = beacon_interval * TIME_UNIT;
2751 if (!interval || !usec)
2752 return 0;
2754 quot = (usec / interval) &
2755 (iwl_beacon_time_mask_high(priv,
2756 priv->hw_params.beacon_time_tsf_bits) >>
2757 priv->hw_params.beacon_time_tsf_bits);
2758 rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
2759 priv->hw_params.beacon_time_tsf_bits);
2761 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
2763 EXPORT_SYMBOL(iwl_usecs_to_beacons);
2765 /* base is usually what we get from ucode with each received frame,
2766 * the same as HW timer counter counting down
2768 __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
2769 u32 addon, u32 beacon_interval)
2771 u32 base_low = base & iwl_beacon_time_mask_low(priv,
2772 priv->hw_params.beacon_time_tsf_bits);
2773 u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
2774 priv->hw_params.beacon_time_tsf_bits);
2775 u32 interval = beacon_interval * TIME_UNIT;
2776 u32 res = (base & iwl_beacon_time_mask_high(priv,
2777 priv->hw_params.beacon_time_tsf_bits)) +
2778 (addon & iwl_beacon_time_mask_high(priv,
2779 priv->hw_params.beacon_time_tsf_bits));
2781 if (base_low > addon_low)
2782 res += base_low - addon_low;
2783 else if (base_low < addon_low) {
2784 res += interval + base_low - addon_low;
2785 res += (1 << priv->hw_params.beacon_time_tsf_bits);
2786 } else
2787 res += (1 << priv->hw_params.beacon_time_tsf_bits);
2789 return cpu_to_le32(res);
2791 EXPORT_SYMBOL(iwl_add_beacon_time);
2793 #ifdef CONFIG_PM
2795 int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2797 struct iwl_priv *priv = pci_get_drvdata(pdev);
2800 * This function is called when system goes into suspend state
2801 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
2802 * first but since iwl_mac_stop() has no knowledge of who the caller is,
2803 * it will not call apm_ops.stop() to stop the DMA operation.
2804 * Calling apm_ops.stop here to make sure we stop the DMA.
2806 priv->cfg->ops->lib->apm_ops.stop(priv);
2808 pci_save_state(pdev);
2809 pci_disable_device(pdev);
2810 pci_set_power_state(pdev, PCI_D3hot);
2812 return 0;
2814 EXPORT_SYMBOL(iwl_pci_suspend);
2816 int iwl_pci_resume(struct pci_dev *pdev)
2818 struct iwl_priv *priv = pci_get_drvdata(pdev);
2819 int ret;
2820 bool hw_rfkill = false;
2823 * We disable the RETRY_TIMEOUT register (0x41) to keep
2824 * PCI Tx retries from interfering with C3 CPU state.
2826 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2828 pci_set_power_state(pdev, PCI_D0);
2829 ret = pci_enable_device(pdev);
2830 if (ret)
2831 return ret;
2832 pci_restore_state(pdev);
2833 iwl_enable_interrupts(priv);
2835 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
2836 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
2837 hw_rfkill = true;
2839 if (hw_rfkill)
2840 set_bit(STATUS_RF_KILL_HW, &priv->status);
2841 else
2842 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2844 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
2846 return 0;
2848 EXPORT_SYMBOL(iwl_pci_resume);
2850 #endif /* CONFIG_PM */