1 /******************************************************************************
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/slab.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/delay.h>
35 #include <linux/sched.h>
36 #include <linux/skbuff.h>
37 #include <linux/netdevice.h>
38 #include <linux/firmware.h>
39 #include <linux/etherdevice.h>
40 #include <linux/if_arp.h>
42 #include <net/mac80211.h>
44 #include <asm/div64.h>
46 #include "iwl-eeprom.h"
50 #include "iwl-helpers.h"
52 #include "iwl-agn-calib.h"
54 #include "iwl-shared.h"
56 #include "iwl-trans.h"
58 /******************************************************************************
62 ******************************************************************************/
65 * module name, copyright, version, etc.
67 #define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
69 #ifdef CONFIG_IWLWIFI_DEBUG
75 #define DRV_VERSION IWLWIFI_VERSION VD
78 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
79 MODULE_VERSION(DRV_VERSION
);
80 MODULE_AUTHOR(DRV_COPYRIGHT
" " DRV_AUTHOR
);
81 MODULE_LICENSE("GPL");
82 MODULE_ALIAS("iwlagn");
84 void iwl_update_chain_flags(struct iwl_priv
*priv
)
86 struct iwl_rxon_context
*ctx
;
88 for_each_context(priv
, ctx
) {
89 iwlagn_set_rxon_chain(priv
, ctx
);
90 if (ctx
->active
.rx_chain
!= ctx
->staging
.rx_chain
)
91 iwlagn_commit_rxon(priv
, ctx
);
95 /* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
96 static void iwl_set_beacon_tim(struct iwl_priv
*priv
,
97 struct iwl_tx_beacon_cmd
*tx_beacon_cmd
,
98 u8
*beacon
, u32 frame_size
)
101 struct ieee80211_mgmt
*mgmt
= (struct ieee80211_mgmt
*)beacon
;
104 * The index is relative to frame start but we start looking at the
105 * variable-length part of the beacon.
107 tim_idx
= mgmt
->u
.beacon
.variable
- beacon
;
109 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
110 while ((tim_idx
< (frame_size
- 2)) &&
111 (beacon
[tim_idx
] != WLAN_EID_TIM
))
112 tim_idx
+= beacon
[tim_idx
+1] + 2;
114 /* If TIM field was found, set variables */
115 if ((tim_idx
< (frame_size
- 1)) && (beacon
[tim_idx
] == WLAN_EID_TIM
)) {
116 tx_beacon_cmd
->tim_idx
= cpu_to_le16(tim_idx
);
117 tx_beacon_cmd
->tim_size
= beacon
[tim_idx
+1];
119 IWL_WARN(priv
, "Unable to find TIM Element in beacon\n");
122 int iwlagn_send_beacon_cmd(struct iwl_priv
*priv
)
124 struct iwl_tx_beacon_cmd
*tx_beacon_cmd
;
125 struct iwl_host_cmd cmd
= {
126 .id
= REPLY_TX_BEACON
,
129 struct ieee80211_tx_info
*info
;
135 * We have to set up the TX command, the TX Beacon command, and the
139 lockdep_assert_held(&priv
->shrd
->mutex
);
141 if (!priv
->beacon_ctx
) {
142 IWL_ERR(priv
, "trying to build beacon w/o beacon context!\n");
146 if (WARN_ON(!priv
->beacon_skb
))
149 /* Allocate beacon command */
150 if (!priv
->beacon_cmd
)
151 priv
->beacon_cmd
= kzalloc(sizeof(*tx_beacon_cmd
), GFP_KERNEL
);
152 tx_beacon_cmd
= priv
->beacon_cmd
;
156 frame_size
= priv
->beacon_skb
->len
;
158 /* Set up TX command fields */
159 tx_beacon_cmd
->tx
.len
= cpu_to_le16((u16
)frame_size
);
160 tx_beacon_cmd
->tx
.sta_id
= priv
->beacon_ctx
->bcast_sta_id
;
161 tx_beacon_cmd
->tx
.stop_time
.life_time
= TX_CMD_LIFE_TIME_INFINITE
;
162 tx_beacon_cmd
->tx
.tx_flags
= TX_CMD_FLG_SEQ_CTL_MSK
|
163 TX_CMD_FLG_TSF_MSK
| TX_CMD_FLG_STA_RATE_MSK
;
165 /* Set up TX beacon command fields */
166 iwl_set_beacon_tim(priv
, tx_beacon_cmd
, priv
->beacon_skb
->data
,
169 /* Set up packet rate and flags */
170 info
= IEEE80211_SKB_CB(priv
->beacon_skb
);
173 * Let's set up the rate at least somewhat correctly;
174 * it will currently not actually be used by the uCode,
175 * it uses the broadcast station's rate instead.
177 if (info
->control
.rates
[0].idx
< 0 ||
178 info
->control
.rates
[0].flags
& IEEE80211_TX_RC_MCS
)
181 rate
= info
->control
.rates
[0].idx
;
183 priv
->mgmt_tx_ant
= iwl_toggle_tx_ant(priv
, priv
->mgmt_tx_ant
,
184 hw_params(priv
).valid_tx_ant
);
185 rate_flags
= iwl_ant_idx_to_flags(priv
->mgmt_tx_ant
);
187 /* In mac80211, rates for 5 GHz start at 0 */
188 if (info
->band
== IEEE80211_BAND_5GHZ
)
189 rate
+= IWL_FIRST_OFDM_RATE
;
190 else if (rate
>= IWL_FIRST_CCK_RATE
&& rate
<= IWL_LAST_CCK_RATE
)
191 rate_flags
|= RATE_MCS_CCK_MSK
;
193 tx_beacon_cmd
->tx
.rate_n_flags
=
194 iwl_hw_set_rate_n_flags(rate
, rate_flags
);
197 cmd
.len
[0] = sizeof(*tx_beacon_cmd
);
198 cmd
.data
[0] = tx_beacon_cmd
;
199 cmd
.dataflags
[0] = IWL_HCMD_DFL_NOCOPY
;
200 cmd
.len
[1] = frame_size
;
201 cmd
.data
[1] = priv
->beacon_skb
->data
;
202 cmd
.dataflags
[1] = IWL_HCMD_DFL_NOCOPY
;
204 return iwl_trans_send_cmd(trans(priv
), &cmd
);
207 static void iwl_bg_beacon_update(struct work_struct
*work
)
209 struct iwl_priv
*priv
=
210 container_of(work
, struct iwl_priv
, beacon_update
);
211 struct sk_buff
*beacon
;
213 mutex_lock(&priv
->shrd
->mutex
);
214 if (!priv
->beacon_ctx
) {
215 IWL_ERR(priv
, "updating beacon w/o beacon context!\n");
219 if (priv
->beacon_ctx
->vif
->type
!= NL80211_IFTYPE_AP
) {
221 * The ucode will send beacon notifications even in
222 * IBSS mode, but we don't want to process them. But
223 * we need to defer the type check to here due to
224 * requiring locking around the beacon_ctx access.
229 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
230 beacon
= ieee80211_beacon_get(priv
->hw
, priv
->beacon_ctx
->vif
);
232 IWL_ERR(priv
, "update beacon failed -- keeping old\n");
236 /* new beacon skb is allocated every time; dispose previous.*/
237 dev_kfree_skb(priv
->beacon_skb
);
239 priv
->beacon_skb
= beacon
;
241 iwlagn_send_beacon_cmd(priv
);
243 mutex_unlock(&priv
->shrd
->mutex
);
246 static void iwl_bg_bt_runtime_config(struct work_struct
*work
)
248 struct iwl_priv
*priv
=
249 container_of(work
, struct iwl_priv
, bt_runtime_config
);
251 if (test_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
))
254 /* dont send host command if rf-kill is on */
255 if (!iwl_is_ready_rf(priv
->shrd
))
257 iwlagn_send_advance_bt_config(priv
);
260 static void iwl_bg_bt_full_concurrency(struct work_struct
*work
)
262 struct iwl_priv
*priv
=
263 container_of(work
, struct iwl_priv
, bt_full_concurrency
);
264 struct iwl_rxon_context
*ctx
;
266 mutex_lock(&priv
->shrd
->mutex
);
268 if (test_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
))
271 /* dont send host command if rf-kill is on */
272 if (!iwl_is_ready_rf(priv
->shrd
))
275 IWL_DEBUG_INFO(priv
, "BT coex in %s mode\n",
276 priv
->bt_full_concurrent
?
277 "full concurrency" : "3-wire");
280 * LQ & RXON updated cmds must be sent before BT Config cmd
281 * to avoid 3-wire collisions
283 for_each_context(priv
, ctx
) {
284 iwlagn_set_rxon_chain(priv
, ctx
);
285 iwlagn_commit_rxon(priv
, ctx
);
288 iwlagn_send_advance_bt_config(priv
);
290 mutex_unlock(&priv
->shrd
->mutex
);
294 * iwl_bg_statistics_periodic - Timer callback to queue statistics
296 * This callback is provided in order to send a statistics request.
298 * This timer function is continually reset to execute within
299 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
300 * was received. We need to ensure we receive the statistics in order
301 * to update the temperature used for calibrating the TXPOWER.
303 static void iwl_bg_statistics_periodic(unsigned long data
)
305 struct iwl_priv
*priv
= (struct iwl_priv
*)data
;
307 if (test_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
))
310 /* dont send host command if rf-kill is on */
311 if (!iwl_is_ready_rf(priv
->shrd
))
314 iwl_send_statistics_request(priv
, CMD_ASYNC
, false);
318 static void iwl_print_cont_event_trace(struct iwl_priv
*priv
, u32 base
,
319 u32 start_idx
, u32 num_events
,
323 u32 ptr
; /* SRAM byte address of log data */
324 u32 ev
, time
, data
; /* event log data */
325 unsigned long reg_flags
;
328 ptr
= base
+ (4 * sizeof(u32
)) + (start_idx
* 2 * sizeof(u32
));
330 ptr
= base
+ (4 * sizeof(u32
)) + (start_idx
* 3 * sizeof(u32
));
332 /* Make sure device is powered up for SRAM reads */
333 spin_lock_irqsave(&bus(priv
)->reg_lock
, reg_flags
);
334 if (iwl_grab_nic_access(bus(priv
))) {
335 spin_unlock_irqrestore(&bus(priv
)->reg_lock
, reg_flags
);
339 /* Set starting address; reads will auto-increment */
340 iwl_write32(bus(priv
), HBUS_TARG_MEM_RADDR
, ptr
);
344 * "time" is actually "data" for mode 0 (no timestamp).
345 * place event id # at far right for easier visual parsing.
347 for (i
= 0; i
< num_events
; i
++) {
348 ev
= iwl_read32(bus(priv
), HBUS_TARG_MEM_RDAT
);
349 time
= iwl_read32(bus(priv
), HBUS_TARG_MEM_RDAT
);
351 trace_iwlwifi_dev_ucode_cont_event(priv
,
354 data
= iwl_read32(bus(priv
), HBUS_TARG_MEM_RDAT
);
355 trace_iwlwifi_dev_ucode_cont_event(priv
,
359 /* Allow device to power down */
360 iwl_release_nic_access(bus(priv
));
361 spin_unlock_irqrestore(&bus(priv
)->reg_lock
, reg_flags
);
364 static void iwl_continuous_event_trace(struct iwl_priv
*priv
)
366 u32 capacity
; /* event log capacity in # entries */
367 u32 base
; /* SRAM byte address of event log header */
368 u32 mode
; /* 0 - no timestamp, 1 - timestamp recorded */
369 u32 num_wraps
; /* # times uCode wrapped to top of log */
370 u32 next_entry
; /* index of next entry to be written by uCode */
372 base
= priv
->device_pointers
.error_event_table
;
373 if (iwlagn_hw_valid_rtc_data_addr(base
)) {
374 capacity
= iwl_read_targ_mem(bus(priv
), base
);
375 num_wraps
= iwl_read_targ_mem(bus(priv
),
376 base
+ (2 * sizeof(u32
)));
377 mode
= iwl_read_targ_mem(bus(priv
), base
+ (1 * sizeof(u32
)));
378 next_entry
= iwl_read_targ_mem(bus(priv
),
379 base
+ (3 * sizeof(u32
)));
383 if (num_wraps
== priv
->event_log
.num_wraps
) {
384 iwl_print_cont_event_trace(priv
,
385 base
, priv
->event_log
.next_entry
,
386 next_entry
- priv
->event_log
.next_entry
,
388 priv
->event_log
.non_wraps_count
++;
390 if ((num_wraps
- priv
->event_log
.num_wraps
) > 1)
391 priv
->event_log
.wraps_more_count
++;
393 priv
->event_log
.wraps_once_count
++;
394 trace_iwlwifi_dev_ucode_wrap_event(priv
,
395 num_wraps
- priv
->event_log
.num_wraps
,
396 next_entry
, priv
->event_log
.next_entry
);
397 if (next_entry
< priv
->event_log
.next_entry
) {
398 iwl_print_cont_event_trace(priv
, base
,
399 priv
->event_log
.next_entry
,
400 capacity
- priv
->event_log
.next_entry
,
403 iwl_print_cont_event_trace(priv
, base
, 0,
406 iwl_print_cont_event_trace(priv
, base
,
407 next_entry
, capacity
- next_entry
,
410 iwl_print_cont_event_trace(priv
, base
, 0,
414 priv
->event_log
.num_wraps
= num_wraps
;
415 priv
->event_log
.next_entry
= next_entry
;
419 * iwl_bg_ucode_trace - Timer callback to log ucode event
421 * The timer is continually set to execute every
422 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
423 * this function is to perform continuous uCode event logging operation
426 static void iwl_bg_ucode_trace(unsigned long data
)
428 struct iwl_priv
*priv
= (struct iwl_priv
*)data
;
430 if (test_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
))
433 if (priv
->event_log
.ucode_trace
) {
434 iwl_continuous_event_trace(priv
);
435 /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
436 mod_timer(&priv
->ucode_trace
,
437 jiffies
+ msecs_to_jiffies(UCODE_TRACE_PERIOD
));
441 static void iwl_bg_tx_flush(struct work_struct
*work
)
443 struct iwl_priv
*priv
=
444 container_of(work
, struct iwl_priv
, tx_flush
);
446 if (test_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
))
449 /* do nothing if rf-kill is on */
450 if (!iwl_is_ready_rf(priv
->shrd
))
453 IWL_DEBUG_INFO(priv
, "device request: flush all tx frames\n");
454 iwlagn_dev_txfifo_flush(priv
, IWL_DROP_ALL
);
457 /******************************************************************************
459 * uCode download functions
461 ******************************************************************************/
463 static void iwl_free_fw_desc(struct iwl_priv
*priv
, struct fw_desc
*desc
)
466 dma_free_coherent(bus(priv
)->dev
, desc
->len
,
467 desc
->v_addr
, desc
->p_addr
);
472 static void iwl_free_fw_img(struct iwl_priv
*priv
, struct fw_img
*img
)
474 iwl_free_fw_desc(priv
, &img
->code
);
475 iwl_free_fw_desc(priv
, &img
->data
);
478 static void iwl_dealloc_ucode(struct iwl_priv
*priv
)
480 iwl_free_fw_img(priv
, &priv
->ucode_rt
);
481 iwl_free_fw_img(priv
, &priv
->ucode_init
);
482 iwl_free_fw_img(priv
, &priv
->ucode_wowlan
);
485 static int iwl_alloc_fw_desc(struct iwl_priv
*priv
, struct fw_desc
*desc
,
486 const void *data
, size_t len
)
493 desc
->v_addr
= dma_alloc_coherent(bus(priv
)->dev
, len
,
494 &desc
->p_addr
, GFP_KERNEL
);
499 memcpy(desc
->v_addr
, data
, len
);
503 static void iwl_init_context(struct iwl_priv
*priv
, u32 ucode_flags
)
508 * The default context is always valid,
509 * the PAN context depends on uCode.
511 priv
->shrd
->valid_contexts
= BIT(IWL_RXON_CTX_BSS
);
512 if (ucode_flags
& IWL_UCODE_TLV_FLAGS_PAN
)
513 priv
->shrd
->valid_contexts
|= BIT(IWL_RXON_CTX_PAN
);
515 for (i
= 0; i
< NUM_IWL_RXON_CTX
; i
++)
516 priv
->contexts
[i
].ctxid
= i
;
518 priv
->contexts
[IWL_RXON_CTX_BSS
].always_active
= true;
519 priv
->contexts
[IWL_RXON_CTX_BSS
].is_active
= true;
520 priv
->contexts
[IWL_RXON_CTX_BSS
].rxon_cmd
= REPLY_RXON
;
521 priv
->contexts
[IWL_RXON_CTX_BSS
].rxon_timing_cmd
= REPLY_RXON_TIMING
;
522 priv
->contexts
[IWL_RXON_CTX_BSS
].rxon_assoc_cmd
= REPLY_RXON_ASSOC
;
523 priv
->contexts
[IWL_RXON_CTX_BSS
].qos_cmd
= REPLY_QOS_PARAM
;
524 priv
->contexts
[IWL_RXON_CTX_BSS
].ap_sta_id
= IWL_AP_ID
;
525 priv
->contexts
[IWL_RXON_CTX_BSS
].wep_key_cmd
= REPLY_WEPKEY
;
526 priv
->contexts
[IWL_RXON_CTX_BSS
].exclusive_interface_modes
=
527 BIT(NL80211_IFTYPE_ADHOC
);
528 priv
->contexts
[IWL_RXON_CTX_BSS
].interface_modes
=
529 BIT(NL80211_IFTYPE_STATION
);
530 priv
->contexts
[IWL_RXON_CTX_BSS
].ap_devtype
= RXON_DEV_TYPE_AP
;
531 priv
->contexts
[IWL_RXON_CTX_BSS
].ibss_devtype
= RXON_DEV_TYPE_IBSS
;
532 priv
->contexts
[IWL_RXON_CTX_BSS
].station_devtype
= RXON_DEV_TYPE_ESS
;
533 priv
->contexts
[IWL_RXON_CTX_BSS
].unused_devtype
= RXON_DEV_TYPE_ESS
;
535 priv
->contexts
[IWL_RXON_CTX_PAN
].rxon_cmd
= REPLY_WIPAN_RXON
;
536 priv
->contexts
[IWL_RXON_CTX_PAN
].rxon_timing_cmd
=
537 REPLY_WIPAN_RXON_TIMING
;
538 priv
->contexts
[IWL_RXON_CTX_PAN
].rxon_assoc_cmd
=
539 REPLY_WIPAN_RXON_ASSOC
;
540 priv
->contexts
[IWL_RXON_CTX_PAN
].qos_cmd
= REPLY_WIPAN_QOS_PARAM
;
541 priv
->contexts
[IWL_RXON_CTX_PAN
].ap_sta_id
= IWL_AP_ID_PAN
;
542 priv
->contexts
[IWL_RXON_CTX_PAN
].wep_key_cmd
= REPLY_WIPAN_WEPKEY
;
543 priv
->contexts
[IWL_RXON_CTX_PAN
].bcast_sta_id
= IWLAGN_PAN_BCAST_ID
;
544 priv
->contexts
[IWL_RXON_CTX_PAN
].station_flags
= STA_FLG_PAN_STATION
;
545 priv
->contexts
[IWL_RXON_CTX_PAN
].interface_modes
=
546 BIT(NL80211_IFTYPE_STATION
) | BIT(NL80211_IFTYPE_AP
);
548 if (ucode_flags
& IWL_UCODE_TLV_FLAGS_P2P
)
549 priv
->contexts
[IWL_RXON_CTX_PAN
].interface_modes
|=
550 BIT(NL80211_IFTYPE_P2P_CLIENT
) |
551 BIT(NL80211_IFTYPE_P2P_GO
);
553 priv
->contexts
[IWL_RXON_CTX_PAN
].ap_devtype
= RXON_DEV_TYPE_CP
;
554 priv
->contexts
[IWL_RXON_CTX_PAN
].station_devtype
= RXON_DEV_TYPE_2STA
;
555 priv
->contexts
[IWL_RXON_CTX_PAN
].unused_devtype
= RXON_DEV_TYPE_P2P
;
557 BUILD_BUG_ON(NUM_IWL_RXON_CTX
!= 2);
561 struct iwlagn_ucode_capabilities
{
562 u32 max_probe_length
;
563 u32 standard_phy_calibration_size
;
567 static void iwl_ucode_callback(const struct firmware
*ucode_raw
, void *context
);
568 static int iwlagn_mac_setup_register(struct iwl_priv
*priv
,
569 struct iwlagn_ucode_capabilities
*capa
);
571 #define UCODE_EXPERIMENTAL_INDEX 100
572 #define UCODE_EXPERIMENTAL_TAG "exp"
574 static int __must_check
iwl_request_firmware(struct iwl_priv
*priv
, bool first
)
576 const char *name_pre
= priv
->cfg
->fw_name_pre
;
580 #ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
581 priv
->fw_index
= UCODE_EXPERIMENTAL_INDEX
;
582 strcpy(tag
, UCODE_EXPERIMENTAL_TAG
);
583 } else if (priv
->fw_index
== UCODE_EXPERIMENTAL_INDEX
) {
585 priv
->fw_index
= priv
->cfg
->ucode_api_max
;
586 sprintf(tag
, "%d", priv
->fw_index
);
589 sprintf(tag
, "%d", priv
->fw_index
);
592 if (priv
->fw_index
< priv
->cfg
->ucode_api_min
) {
593 IWL_ERR(priv
, "no suitable firmware found!\n");
597 sprintf(priv
->firmware_name
, "%s%s%s", name_pre
, tag
, ".ucode");
599 IWL_DEBUG_INFO(priv
, "attempting to load firmware %s'%s'\n",
600 (priv
->fw_index
== UCODE_EXPERIMENTAL_INDEX
)
601 ? "EXPERIMENTAL " : "",
602 priv
->firmware_name
);
604 return request_firmware_nowait(THIS_MODULE
, 1, priv
->firmware_name
,
606 GFP_KERNEL
, priv
, iwl_ucode_callback
);
609 struct iwlagn_firmware_pieces
{
610 const void *inst
, *data
, *init
, *init_data
, *wowlan_inst
, *wowlan_data
;
611 size_t inst_size
, data_size
, init_size
, init_data_size
,
612 wowlan_inst_size
, wowlan_data_size
;
616 u32 init_evtlog_ptr
, init_evtlog_size
, init_errlog_ptr
;
617 u32 inst_evtlog_ptr
, inst_evtlog_size
, inst_errlog_ptr
;
620 static int iwlagn_load_legacy_firmware(struct iwl_priv
*priv
,
621 const struct firmware
*ucode_raw
,
622 struct iwlagn_firmware_pieces
*pieces
)
624 struct iwl_ucode_header
*ucode
= (void *)ucode_raw
->data
;
625 u32 api_ver
, hdr_size
;
628 priv
->ucode_ver
= le32_to_cpu(ucode
->ver
);
629 api_ver
= IWL_UCODE_API(priv
->ucode_ver
);
634 if (ucode_raw
->size
< hdr_size
) {
635 IWL_ERR(priv
, "File size too small!\n");
638 pieces
->build
= le32_to_cpu(ucode
->u
.v2
.build
);
639 pieces
->inst_size
= le32_to_cpu(ucode
->u
.v2
.inst_size
);
640 pieces
->data_size
= le32_to_cpu(ucode
->u
.v2
.data_size
);
641 pieces
->init_size
= le32_to_cpu(ucode
->u
.v2
.init_size
);
642 pieces
->init_data_size
= le32_to_cpu(ucode
->u
.v2
.init_data_size
);
643 src
= ucode
->u
.v2
.data
;
649 if (ucode_raw
->size
< hdr_size
) {
650 IWL_ERR(priv
, "File size too small!\n");
654 pieces
->inst_size
= le32_to_cpu(ucode
->u
.v1
.inst_size
);
655 pieces
->data_size
= le32_to_cpu(ucode
->u
.v1
.data_size
);
656 pieces
->init_size
= le32_to_cpu(ucode
->u
.v1
.init_size
);
657 pieces
->init_data_size
= le32_to_cpu(ucode
->u
.v1
.init_data_size
);
658 src
= ucode
->u
.v1
.data
;
662 /* Verify size of file vs. image size info in file's header */
663 if (ucode_raw
->size
!= hdr_size
+ pieces
->inst_size
+
664 pieces
->data_size
+ pieces
->init_size
+
665 pieces
->init_data_size
) {
668 "uCode file size %d does not match expected size\n",
669 (int)ucode_raw
->size
);
674 src
+= pieces
->inst_size
;
676 src
+= pieces
->data_size
;
678 src
+= pieces
->init_size
;
679 pieces
->init_data
= src
;
680 src
+= pieces
->init_data_size
;
685 static int iwlagn_load_firmware(struct iwl_priv
*priv
,
686 const struct firmware
*ucode_raw
,
687 struct iwlagn_firmware_pieces
*pieces
,
688 struct iwlagn_ucode_capabilities
*capa
)
690 struct iwl_tlv_ucode_header
*ucode
= (void *)ucode_raw
->data
;
691 struct iwl_ucode_tlv
*tlv
;
692 size_t len
= ucode_raw
->size
;
694 int wanted_alternative
= iwlagn_mod_params
.wanted_ucode_alternative
;
698 enum iwl_ucode_tlv_type tlv_type
;
701 if (len
< sizeof(*ucode
)) {
702 IWL_ERR(priv
, "uCode has invalid length: %zd\n", len
);
706 if (ucode
->magic
!= cpu_to_le32(IWL_TLV_UCODE_MAGIC
)) {
707 IWL_ERR(priv
, "invalid uCode magic: 0X%x\n",
708 le32_to_cpu(ucode
->magic
));
713 * Check which alternatives are present, and "downgrade"
714 * when the chosen alternative is not present, warning
715 * the user when that happens. Some files may not have
716 * any alternatives, so don't warn in that case.
718 alternatives
= le64_to_cpu(ucode
->alternatives
);
719 tmp
= wanted_alternative
;
720 if (wanted_alternative
> 63)
721 wanted_alternative
= 63;
722 while (wanted_alternative
&& !(alternatives
& BIT(wanted_alternative
)))
723 wanted_alternative
--;
724 if (wanted_alternative
&& wanted_alternative
!= tmp
)
726 "uCode alternative %d not available, choosing %d\n",
727 tmp
, wanted_alternative
);
729 priv
->ucode_ver
= le32_to_cpu(ucode
->ver
);
730 pieces
->build
= le32_to_cpu(ucode
->build
);
733 len
-= sizeof(*ucode
);
735 while (len
>= sizeof(*tlv
)) {
741 tlv_len
= le32_to_cpu(tlv
->length
);
742 tlv_type
= le16_to_cpu(tlv
->type
);
743 tlv_alt
= le16_to_cpu(tlv
->alternative
);
744 tlv_data
= tlv
->data
;
747 IWL_ERR(priv
, "invalid TLV len: %zd/%u\n",
751 len
-= ALIGN(tlv_len
, 4);
752 data
+= sizeof(*tlv
) + ALIGN(tlv_len
, 4);
755 * Alternative 0 is always valid.
757 * Skip alternative TLVs that are not selected.
759 if (tlv_alt
!= 0 && tlv_alt
!= wanted_alternative
)
763 case IWL_UCODE_TLV_INST
:
764 pieces
->inst
= tlv_data
;
765 pieces
->inst_size
= tlv_len
;
767 case IWL_UCODE_TLV_DATA
:
768 pieces
->data
= tlv_data
;
769 pieces
->data_size
= tlv_len
;
771 case IWL_UCODE_TLV_INIT
:
772 pieces
->init
= tlv_data
;
773 pieces
->init_size
= tlv_len
;
775 case IWL_UCODE_TLV_INIT_DATA
:
776 pieces
->init_data
= tlv_data
;
777 pieces
->init_data_size
= tlv_len
;
779 case IWL_UCODE_TLV_BOOT
:
780 IWL_ERR(priv
, "Found unexpected BOOT ucode\n");
782 case IWL_UCODE_TLV_PROBE_MAX_LEN
:
783 if (tlv_len
!= sizeof(u32
))
784 goto invalid_tlv_len
;
785 capa
->max_probe_length
=
786 le32_to_cpup((__le32
*)tlv_data
);
788 case IWL_UCODE_TLV_PAN
:
790 goto invalid_tlv_len
;
791 capa
->flags
|= IWL_UCODE_TLV_FLAGS_PAN
;
793 case IWL_UCODE_TLV_FLAGS
:
794 /* must be at least one u32 */
795 if (tlv_len
< sizeof(u32
))
796 goto invalid_tlv_len
;
797 /* and a proper number of u32s */
798 if (tlv_len
% sizeof(u32
))
799 goto invalid_tlv_len
;
801 * This driver only reads the first u32 as
802 * right now no more features are defined,
803 * if that changes then either the driver
804 * will not work with the new firmware, or
805 * it'll not take advantage of new features.
807 capa
->flags
= le32_to_cpup((__le32
*)tlv_data
);
809 case IWL_UCODE_TLV_INIT_EVTLOG_PTR
:
810 if (tlv_len
!= sizeof(u32
))
811 goto invalid_tlv_len
;
812 pieces
->init_evtlog_ptr
=
813 le32_to_cpup((__le32
*)tlv_data
);
815 case IWL_UCODE_TLV_INIT_EVTLOG_SIZE
:
816 if (tlv_len
!= sizeof(u32
))
817 goto invalid_tlv_len
;
818 pieces
->init_evtlog_size
=
819 le32_to_cpup((__le32
*)tlv_data
);
821 case IWL_UCODE_TLV_INIT_ERRLOG_PTR
:
822 if (tlv_len
!= sizeof(u32
))
823 goto invalid_tlv_len
;
824 pieces
->init_errlog_ptr
=
825 le32_to_cpup((__le32
*)tlv_data
);
827 case IWL_UCODE_TLV_RUNT_EVTLOG_PTR
:
828 if (tlv_len
!= sizeof(u32
))
829 goto invalid_tlv_len
;
830 pieces
->inst_evtlog_ptr
=
831 le32_to_cpup((__le32
*)tlv_data
);
833 case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE
:
834 if (tlv_len
!= sizeof(u32
))
835 goto invalid_tlv_len
;
836 pieces
->inst_evtlog_size
=
837 le32_to_cpup((__le32
*)tlv_data
);
839 case IWL_UCODE_TLV_RUNT_ERRLOG_PTR
:
840 if (tlv_len
!= sizeof(u32
))
841 goto invalid_tlv_len
;
842 pieces
->inst_errlog_ptr
=
843 le32_to_cpup((__le32
*)tlv_data
);
845 case IWL_UCODE_TLV_ENHANCE_SENS_TBL
:
847 goto invalid_tlv_len
;
848 priv
->enhance_sensitivity_table
= true;
850 case IWL_UCODE_TLV_WOWLAN_INST
:
851 pieces
->wowlan_inst
= tlv_data
;
852 pieces
->wowlan_inst_size
= tlv_len
;
854 case IWL_UCODE_TLV_WOWLAN_DATA
:
855 pieces
->wowlan_data
= tlv_data
;
856 pieces
->wowlan_data_size
= tlv_len
;
858 case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE
:
859 if (tlv_len
!= sizeof(u32
))
860 goto invalid_tlv_len
;
861 capa
->standard_phy_calibration_size
=
862 le32_to_cpup((__le32
*)tlv_data
);
865 IWL_DEBUG_INFO(priv
, "unknown TLV: %d\n", tlv_type
);
871 IWL_ERR(priv
, "invalid TLV after parsing: %zd\n", len
);
872 iwl_print_hex_dump(priv
, IWL_DL_FW
, (u8
*)data
, len
);
879 IWL_ERR(priv
, "TLV %d has invalid size: %u\n", tlv_type
, tlv_len
);
880 iwl_print_hex_dump(priv
, IWL_DL_FW
, tlv_data
, tlv_len
);
886 * iwl_ucode_callback - callback when firmware was loaded
888 * If loaded successfully, copies the firmware into buffers
889 * for the card to fetch (via DMA).
891 static void iwl_ucode_callback(const struct firmware
*ucode_raw
, void *context
)
893 struct iwl_priv
*priv
= context
;
894 struct iwl_ucode_header
*ucode
;
896 struct iwlagn_firmware_pieces pieces
;
897 const unsigned int api_max
= priv
->cfg
->ucode_api_max
;
898 unsigned int api_ok
= priv
->cfg
->ucode_api_ok
;
899 const unsigned int api_min
= priv
->cfg
->ucode_api_min
;
903 struct iwlagn_ucode_capabilities ucode_capa
= {
904 .max_probe_length
= 200,
905 .standard_phy_calibration_size
=
906 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE
,
912 memset(&pieces
, 0, sizeof(pieces
));
915 if (priv
->fw_index
<= api_ok
)
917 "request for firmware file '%s' failed.\n",
918 priv
->firmware_name
);
922 IWL_DEBUG_INFO(priv
, "Loaded firmware file '%s' (%zd bytes).\n",
923 priv
->firmware_name
, ucode_raw
->size
);
925 /* Make sure that we got at least the API version number */
926 if (ucode_raw
->size
< 4) {
927 IWL_ERR(priv
, "File size way too small!\n");
931 /* Data from ucode file: header followed by uCode images */
932 ucode
= (struct iwl_ucode_header
*)ucode_raw
->data
;
935 err
= iwlagn_load_legacy_firmware(priv
, ucode_raw
, &pieces
);
937 err
= iwlagn_load_firmware(priv
, ucode_raw
, &pieces
,
943 api_ver
= IWL_UCODE_API(priv
->ucode_ver
);
944 build
= pieces
.build
;
947 * api_ver should match the api version forming part of the
948 * firmware filename ... but we don't check for that and only rely
949 * on the API version read from firmware header from here on forward
951 /* no api version check required for experimental uCode */
952 if (priv
->fw_index
!= UCODE_EXPERIMENTAL_INDEX
) {
953 if (api_ver
< api_min
|| api_ver
> api_max
) {
955 "Driver unable to support your firmware API. "
956 "Driver supports v%u, firmware is v%u.\n",
961 if (api_ver
< api_ok
) {
962 if (api_ok
!= api_max
)
963 IWL_ERR(priv
, "Firmware has old API version, "
964 "expected v%u through v%u, got v%u.\n",
965 api_ok
, api_max
, api_ver
);
967 IWL_ERR(priv
, "Firmware has old API version, "
968 "expected v%u, got v%u.\n",
970 IWL_ERR(priv
, "New firmware can be obtained from "
971 "http://www.intellinuxwireless.org/.\n");
976 sprintf(buildstr
, " build %u%s", build
,
977 (priv
->fw_index
== UCODE_EXPERIMENTAL_INDEX
)
982 IWL_INFO(priv
, "loaded firmware version %u.%u.%u.%u%s\n",
983 IWL_UCODE_MAJOR(priv
->ucode_ver
),
984 IWL_UCODE_MINOR(priv
->ucode_ver
),
985 IWL_UCODE_API(priv
->ucode_ver
),
986 IWL_UCODE_SERIAL(priv
->ucode_ver
),
989 snprintf(priv
->hw
->wiphy
->fw_version
,
990 sizeof(priv
->hw
->wiphy
->fw_version
),
992 IWL_UCODE_MAJOR(priv
->ucode_ver
),
993 IWL_UCODE_MINOR(priv
->ucode_ver
),
994 IWL_UCODE_API(priv
->ucode_ver
),
995 IWL_UCODE_SERIAL(priv
->ucode_ver
),
999 * For any of the failures below (before allocating pci memory)
1000 * we will try to load a version with a smaller API -- maybe the
1001 * user just got a corrupted version of the latest API.
1004 IWL_DEBUG_INFO(priv
, "f/w package hdr ucode version raw = 0x%x\n",
1006 IWL_DEBUG_INFO(priv
, "f/w package hdr runtime inst size = %Zd\n",
1008 IWL_DEBUG_INFO(priv
, "f/w package hdr runtime data size = %Zd\n",
1010 IWL_DEBUG_INFO(priv
, "f/w package hdr init inst size = %Zd\n",
1012 IWL_DEBUG_INFO(priv
, "f/w package hdr init data size = %Zd\n",
1013 pieces
.init_data_size
);
1015 /* Verify that uCode images will fit in card's SRAM */
1016 if (pieces
.inst_size
> hw_params(priv
).max_inst_size
) {
1017 IWL_ERR(priv
, "uCode instr len %Zd too large to fit in\n",
1022 if (pieces
.data_size
> hw_params(priv
).max_data_size
) {
1023 IWL_ERR(priv
, "uCode data len %Zd too large to fit in\n",
1028 if (pieces
.init_size
> hw_params(priv
).max_inst_size
) {
1029 IWL_ERR(priv
, "uCode init instr len %Zd too large to fit in\n",
1034 if (pieces
.init_data_size
> hw_params(priv
).max_data_size
) {
1035 IWL_ERR(priv
, "uCode init data len %Zd too large to fit in\n",
1036 pieces
.init_data_size
);
1040 /* Allocate ucode buffers for card's bus-master loading ... */
1042 /* Runtime instructions and 2 copies of data:
1043 * 1) unmodified from disk
1044 * 2) backup cache for save/restore during power-downs */
1045 if (iwl_alloc_fw_desc(priv
, &priv
->ucode_rt
.code
,
1046 pieces
.inst
, pieces
.inst_size
))
1048 if (iwl_alloc_fw_desc(priv
, &priv
->ucode_rt
.data
,
1049 pieces
.data
, pieces
.data_size
))
1052 /* Initialization instructions and data */
1053 if (pieces
.init_size
&& pieces
.init_data_size
) {
1054 if (iwl_alloc_fw_desc(priv
, &priv
->ucode_init
.code
,
1055 pieces
.init
, pieces
.init_size
))
1057 if (iwl_alloc_fw_desc(priv
, &priv
->ucode_init
.data
,
1058 pieces
.init_data
, pieces
.init_data_size
))
1062 /* WoWLAN instructions and data */
1063 if (pieces
.wowlan_inst_size
&& pieces
.wowlan_data_size
) {
1064 if (iwl_alloc_fw_desc(priv
, &priv
->ucode_wowlan
.code
,
1066 pieces
.wowlan_inst_size
))
1068 if (iwl_alloc_fw_desc(priv
, &priv
->ucode_wowlan
.data
,
1070 pieces
.wowlan_data_size
))
1074 /* Now that we can no longer fail, copy information */
1077 * The (size - 16) / 12 formula is based on the information recorded
1078 * for each event, which is of mode 1 (including timestamp) for all
1079 * new microcodes that include this information.
1081 priv
->init_evtlog_ptr
= pieces
.init_evtlog_ptr
;
1082 if (pieces
.init_evtlog_size
)
1083 priv
->init_evtlog_size
= (pieces
.init_evtlog_size
- 16)/12;
1085 priv
->init_evtlog_size
=
1086 priv
->cfg
->base_params
->max_event_log_size
;
1087 priv
->init_errlog_ptr
= pieces
.init_errlog_ptr
;
1088 priv
->inst_evtlog_ptr
= pieces
.inst_evtlog_ptr
;
1089 if (pieces
.inst_evtlog_size
)
1090 priv
->inst_evtlog_size
= (pieces
.inst_evtlog_size
- 16)/12;
1092 priv
->inst_evtlog_size
=
1093 priv
->cfg
->base_params
->max_event_log_size
;
1094 priv
->inst_errlog_ptr
= pieces
.inst_errlog_ptr
;
1096 priv
->new_scan_threshold_behaviour
=
1097 !!(ucode_capa
.flags
& IWL_UCODE_TLV_FLAGS_NEWSCAN
);
1099 if (!(priv
->cfg
->sku
& EEPROM_SKU_CAP_IPAN_ENABLE
))
1100 ucode_capa
.flags
&= ~IWL_UCODE_TLV_FLAGS_PAN
;
1103 * if not PAN, then don't support P2P -- might be a uCode
1104 * packaging bug or due to the eeprom check above
1106 if (!(ucode_capa
.flags
& IWL_UCODE_TLV_FLAGS_PAN
))
1107 ucode_capa
.flags
&= ~IWL_UCODE_TLV_FLAGS_P2P
;
1109 if (ucode_capa
.flags
& IWL_UCODE_TLV_FLAGS_PAN
) {
1110 priv
->sta_key_max_num
= STA_KEY_MAX_NUM_PAN
;
1111 priv
->shrd
->cmd_queue
= IWL_IPAN_CMD_QUEUE_NUM
;
1113 priv
->sta_key_max_num
= STA_KEY_MAX_NUM
;
1114 priv
->shrd
->cmd_queue
= IWL_DEFAULT_CMD_QUEUE_NUM
;
1118 * figure out the offset of chain noise reset and gain commands
1119 * base on the size of standard phy calibration commands table size
1121 if (ucode_capa
.standard_phy_calibration_size
>
1122 IWL_MAX_PHY_CALIBRATE_TBL_SIZE
)
1123 ucode_capa
.standard_phy_calibration_size
=
1124 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE
;
1126 priv
->phy_calib_chain_noise_reset_cmd
=
1127 ucode_capa
.standard_phy_calibration_size
;
1128 priv
->phy_calib_chain_noise_gain_cmd
=
1129 ucode_capa
.standard_phy_calibration_size
+ 1;
1131 /* initialize all valid contexts */
1132 iwl_init_context(priv
, ucode_capa
.flags
);
1134 /**************************************************
1135 * This is still part of probe() in a sense...
1137 * 9. Setup and register with mac80211 and debugfs
1138 **************************************************/
1139 err
= iwlagn_mac_setup_register(priv
, &ucode_capa
);
1143 err
= iwl_dbgfs_register(priv
, DRV_NAME
);
1145 IWL_ERR(priv
, "failed to create debugfs files. Ignoring error: %d\n", err
);
1147 /* We have our copies now, allow OS release its copies */
1148 release_firmware(ucode_raw
);
1149 complete(&priv
->firmware_loading_complete
);
1153 /* try next, if any */
1154 if (iwl_request_firmware(priv
, false))
1156 release_firmware(ucode_raw
);
1160 IWL_ERR(priv
, "failed to allocate pci memory\n");
1161 iwl_dealloc_ucode(priv
);
1163 complete(&priv
->firmware_loading_complete
);
1164 device_release_driver(bus(priv
)->dev
);
1165 release_firmware(ucode_raw
);
1168 static void iwl_rf_kill_ct_config(struct iwl_priv
*priv
)
1170 struct iwl_ct_kill_config cmd
;
1171 struct iwl_ct_kill_throttling_config adv_cmd
;
1172 unsigned long flags
;
1175 spin_lock_irqsave(&priv
->shrd
->lock
, flags
);
1176 iwl_write32(bus(priv
), CSR_UCODE_DRV_GP1_CLR
,
1177 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT
);
1178 spin_unlock_irqrestore(&priv
->shrd
->lock
, flags
);
1179 priv
->thermal_throttle
.ct_kill_toggle
= false;
1181 if (priv
->cfg
->base_params
->support_ct_kill_exit
) {
1182 adv_cmd
.critical_temperature_enter
=
1183 cpu_to_le32(hw_params(priv
).ct_kill_threshold
);
1184 adv_cmd
.critical_temperature_exit
=
1185 cpu_to_le32(hw_params(priv
).ct_kill_exit_threshold
);
1187 ret
= iwl_trans_send_cmd_pdu(trans(priv
),
1188 REPLY_CT_KILL_CONFIG_CMD
,
1189 CMD_SYNC
, sizeof(adv_cmd
), &adv_cmd
);
1191 IWL_ERR(priv
, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1193 IWL_DEBUG_INFO(priv
, "REPLY_CT_KILL_CONFIG_CMD "
1194 "succeeded, critical temperature enter is %d,"
1196 hw_params(priv
).ct_kill_threshold
,
1197 hw_params(priv
).ct_kill_exit_threshold
);
1199 cmd
.critical_temperature_R
=
1200 cpu_to_le32(hw_params(priv
).ct_kill_threshold
);
1202 ret
= iwl_trans_send_cmd_pdu(trans(priv
),
1203 REPLY_CT_KILL_CONFIG_CMD
,
1204 CMD_SYNC
, sizeof(cmd
), &cmd
);
1206 IWL_ERR(priv
, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1208 IWL_DEBUG_INFO(priv
, "REPLY_CT_KILL_CONFIG_CMD "
1210 "critical temperature is %d\n",
1211 hw_params(priv
).ct_kill_threshold
);
1215 static int iwlagn_send_calib_cfg_rt(struct iwl_priv
*priv
, u32 cfg
)
1217 struct iwl_calib_cfg_cmd calib_cfg_cmd
;
1218 struct iwl_host_cmd cmd
= {
1219 .id
= CALIBRATION_CFG_CMD
,
1220 .len
= { sizeof(struct iwl_calib_cfg_cmd
), },
1221 .data
= { &calib_cfg_cmd
, },
1224 memset(&calib_cfg_cmd
, 0, sizeof(calib_cfg_cmd
));
1225 calib_cfg_cmd
.ucd_calib_cfg
.once
.is_enable
= IWL_CALIB_RT_CFG_ALL
;
1226 calib_cfg_cmd
.ucd_calib_cfg
.once
.start
= cpu_to_le32(cfg
);
1228 return iwl_trans_send_cmd(trans(priv
), &cmd
);
1232 static int iwlagn_send_tx_ant_config(struct iwl_priv
*priv
, u8 valid_tx_ant
)
1234 struct iwl_tx_ant_config_cmd tx_ant_cmd
= {
1235 .valid
= cpu_to_le32(valid_tx_ant
),
1238 if (IWL_UCODE_API(priv
->ucode_ver
) > 1) {
1239 IWL_DEBUG_HC(priv
, "select valid tx ant: %u\n", valid_tx_ant
);
1240 return iwl_trans_send_cmd_pdu(trans(priv
),
1241 TX_ANT_CONFIGURATION_CMD
,
1243 sizeof(struct iwl_tx_ant_config_cmd
),
1246 IWL_DEBUG_HC(priv
, "TX_ANT_CONFIGURATION_CMD not supported\n");
1252 * iwl_alive_start - called after REPLY_ALIVE notification received
1253 * from protocol/runtime uCode (initialization uCode's
1254 * Alive gets handled by iwl_init_alive_start()).
1256 int iwl_alive_start(struct iwl_priv
*priv
)
1259 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
1261 /*TODO: this should go to the transport layer */
1262 iwl_reset_ict(trans(priv
));
1264 IWL_DEBUG_INFO(priv
, "Runtime Alive received.\n");
1266 /* After the ALIVE response, we can send host commands to the uCode */
1267 set_bit(STATUS_ALIVE
, &priv
->shrd
->status
);
1269 /* Enable watchdog to monitor the driver tx queues */
1270 iwl_setup_watchdog(priv
);
1272 if (iwl_is_rfkill(priv
->shrd
))
1275 /* download priority table before any calibration request */
1276 if (priv
->cfg
->bt_params
&&
1277 priv
->cfg
->bt_params
->advanced_bt_coexist
) {
1278 /* Configure Bluetooth device coexistence support */
1279 if (priv
->cfg
->bt_params
->bt_sco_disable
)
1280 priv
->bt_enable_pspoll
= false;
1282 priv
->bt_enable_pspoll
= true;
1284 priv
->bt_valid
= IWLAGN_BT_ALL_VALID_MSK
;
1285 priv
->kill_ack_mask
= IWLAGN_BT_KILL_ACK_MASK_DEFAULT
;
1286 priv
->kill_cts_mask
= IWLAGN_BT_KILL_CTS_MASK_DEFAULT
;
1287 iwlagn_send_advance_bt_config(priv
);
1288 priv
->bt_valid
= IWLAGN_BT_VALID_ENABLE_FLAGS
;
1289 priv
->cur_rssi_ctx
= NULL
;
1291 iwlagn_send_prio_tbl(priv
);
1293 /* FIXME: w/a to force change uCode BT state machine */
1294 ret
= iwlagn_send_bt_env(priv
, IWL_BT_COEX_ENV_OPEN
,
1295 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2
);
1298 ret
= iwlagn_send_bt_env(priv
, IWL_BT_COEX_ENV_CLOSE
,
1299 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2
);
1304 * default is 2-wire BT coexexistence support
1306 iwl_send_bt_config(priv
);
1309 if (hw_params(priv
).calib_rt_cfg
)
1310 iwlagn_send_calib_cfg_rt(priv
,
1311 hw_params(priv
).calib_rt_cfg
);
1313 ieee80211_wake_queues(priv
->hw
);
1315 priv
->active_rate
= IWL_RATES_MASK
;
1317 /* Configure Tx antenna selection based on H/W config */
1318 iwlagn_send_tx_ant_config(priv
, priv
->cfg
->valid_tx_ant
);
1320 if (iwl_is_associated_ctx(ctx
) && !priv
->shrd
->wowlan
) {
1321 struct iwl_rxon_cmd
*active_rxon
=
1322 (struct iwl_rxon_cmd
*)&ctx
->active
;
1323 /* apply any changes in staging */
1324 ctx
->staging
.filter_flags
|= RXON_FILTER_ASSOC_MSK
;
1325 active_rxon
->filter_flags
&= ~RXON_FILTER_ASSOC_MSK
;
1327 struct iwl_rxon_context
*tmp
;
1328 /* Initialize our rx_config data */
1329 for_each_context(priv
, tmp
)
1330 iwl_connection_init_rx_config(priv
, tmp
);
1332 iwlagn_set_rxon_chain(priv
, ctx
);
1335 if (!priv
->shrd
->wowlan
) {
1336 /* WoWLAN ucode will not reply in the same way, skip it */
1337 iwl_reset_run_time_calib(priv
);
1340 set_bit(STATUS_READY
, &priv
->shrd
->status
);
1342 /* Configure the adapter for unassociated operation */
1343 ret
= iwlagn_commit_rxon(priv
, ctx
);
1347 /* At this point, the NIC is initialized and operational */
1348 iwl_rf_kill_ct_config(priv
);
1350 IWL_DEBUG_INFO(priv
, "ALIVE processing complete.\n");
1352 return iwl_power_update_mode(priv
, true);
1355 static void iwl_cancel_deferred_work(struct iwl_priv
*priv
);
1357 static void __iwl_down(struct iwl_priv
*priv
)
1361 IWL_DEBUG_INFO(priv
, DRV_NAME
" is going down\n");
1363 iwl_scan_cancel_timeout(priv
, 200);
1366 * If active, scanning won't cancel it, so say it expired.
1367 * No race since we hold the mutex here and a new one
1368 * can't come in at this time.
1370 ieee80211_remain_on_channel_expired(priv
->hw
);
1373 test_and_set_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
);
1375 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
1376 * to prevent rearm timer */
1377 del_timer_sync(&priv
->watchdog
);
1379 iwl_clear_ucode_stations(priv
, NULL
);
1380 iwl_dealloc_bcast_stations(priv
);
1381 iwl_clear_driver_stations(priv
);
1383 /* reset BT coex data */
1384 priv
->bt_status
= 0;
1385 priv
->cur_rssi_ctx
= NULL
;
1386 priv
->bt_is_sco
= 0;
1387 if (priv
->cfg
->bt_params
)
1388 priv
->bt_traffic_load
=
1389 priv
->cfg
->bt_params
->bt_init_traffic_load
;
1391 priv
->bt_traffic_load
= 0;
1392 priv
->bt_full_concurrent
= false;
1393 priv
->bt_ci_compliance
= 0;
1395 /* Wipe out the EXIT_PENDING status bit if we are not actually
1396 * exiting the module */
1398 clear_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
);
1400 if (priv
->mac80211_registered
)
1401 ieee80211_stop_queues(priv
->hw
);
1403 iwl_trans_stop_device(trans(priv
));
1405 /* Clear out all status bits but a few that are stable across reset */
1406 priv
->shrd
->status
&=
1407 test_bit(STATUS_RF_KILL_HW
, &priv
->shrd
->status
) <<
1409 test_bit(STATUS_GEO_CONFIGURED
, &priv
->shrd
->status
) <<
1410 STATUS_GEO_CONFIGURED
|
1411 test_bit(STATUS_FW_ERROR
, &priv
->shrd
->status
) <<
1413 test_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
) <<
1414 STATUS_EXIT_PENDING
;
1416 dev_kfree_skb(priv
->beacon_skb
);
1417 priv
->beacon_skb
= NULL
;
1420 static void iwl_down(struct iwl_priv
*priv
)
1422 mutex_lock(&priv
->shrd
->mutex
);
1424 mutex_unlock(&priv
->shrd
->mutex
);
1426 iwl_cancel_deferred_work(priv
);
1429 #define MAX_HW_RESTARTS 5
1431 static int __iwl_up(struct iwl_priv
*priv
)
1433 struct iwl_rxon_context
*ctx
;
1436 lockdep_assert_held(&priv
->shrd
->mutex
);
1438 if (test_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
)) {
1439 IWL_WARN(priv
, "Exit pending; will not bring the NIC up\n");
1443 for_each_context(priv
, ctx
) {
1444 ret
= iwlagn_alloc_bcast_station(priv
, ctx
);
1446 iwl_dealloc_bcast_stations(priv
);
1451 ret
= iwlagn_run_init_ucode(priv
);
1453 IWL_ERR(priv
, "Failed to run INIT ucode: %d\n", ret
);
1457 ret
= iwlagn_load_ucode_wait_alive(priv
,
1461 IWL_ERR(priv
, "Failed to start RT ucode: %d\n", ret
);
1465 ret
= iwl_alive_start(priv
);
1471 set_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
);
1473 clear_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
);
1475 IWL_ERR(priv
, "Unable to initialize device.\n");
1480 /*****************************************************************************
1482 * Workqueue callbacks
1484 *****************************************************************************/
1486 static void iwl_bg_run_time_calib_work(struct work_struct
*work
)
1488 struct iwl_priv
*priv
= container_of(work
, struct iwl_priv
,
1489 run_time_calib_work
);
1491 mutex_lock(&priv
->shrd
->mutex
);
1493 if (test_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
) ||
1494 test_bit(STATUS_SCANNING
, &priv
->shrd
->status
)) {
1495 mutex_unlock(&priv
->shrd
->mutex
);
1499 if (priv
->start_calib
) {
1500 iwl_chain_noise_calibration(priv
);
1501 iwl_sensitivity_calibration(priv
);
1504 mutex_unlock(&priv
->shrd
->mutex
);
1507 static void iwlagn_prepare_restart(struct iwl_priv
*priv
)
1509 struct iwl_rxon_context
*ctx
;
1510 bool bt_full_concurrent
;
1511 u8 bt_ci_compliance
;
1516 lockdep_assert_held(&priv
->shrd
->mutex
);
1518 for_each_context(priv
, ctx
)
1523 * __iwl_down() will clear the BT status variables,
1524 * which is correct, but when we restart we really
1525 * want to keep them so restore them afterwards.
1527 * The restart process will later pick them up and
1528 * re-configure the hw when we reconfigure the BT
1531 bt_full_concurrent
= priv
->bt_full_concurrent
;
1532 bt_ci_compliance
= priv
->bt_ci_compliance
;
1533 bt_load
= priv
->bt_traffic_load
;
1534 bt_status
= priv
->bt_status
;
1535 bt_is_sco
= priv
->bt_is_sco
;
1539 priv
->bt_full_concurrent
= bt_full_concurrent
;
1540 priv
->bt_ci_compliance
= bt_ci_compliance
;
1541 priv
->bt_traffic_load
= bt_load
;
1542 priv
->bt_status
= bt_status
;
1543 priv
->bt_is_sco
= bt_is_sco
;
1546 static void iwl_bg_restart(struct work_struct
*data
)
1548 struct iwl_priv
*priv
= container_of(data
, struct iwl_priv
, restart
);
1550 if (test_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
))
1553 if (test_and_clear_bit(STATUS_FW_ERROR
, &priv
->shrd
->status
)) {
1554 mutex_lock(&priv
->shrd
->mutex
);
1555 iwlagn_prepare_restart(priv
);
1556 mutex_unlock(&priv
->shrd
->mutex
);
1557 iwl_cancel_deferred_work(priv
);
1558 ieee80211_restart_hw(priv
->hw
);
1564 /*****************************************************************************
1566 * mac80211 entry point functions
1568 *****************************************************************************/
1570 static const struct ieee80211_iface_limit iwlagn_sta_ap_limits
[] = {
1573 .types
= BIT(NL80211_IFTYPE_STATION
),
1577 .types
= BIT(NL80211_IFTYPE_AP
),
1581 static const struct ieee80211_iface_limit iwlagn_2sta_limits
[] = {
1584 .types
= BIT(NL80211_IFTYPE_STATION
),
1588 static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits
[] = {
1591 .types
= BIT(NL80211_IFTYPE_STATION
),
1595 .types
= BIT(NL80211_IFTYPE_P2P_GO
) |
1596 BIT(NL80211_IFTYPE_AP
),
1600 static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits
[] = {
1603 .types
= BIT(NL80211_IFTYPE_STATION
),
1607 .types
= BIT(NL80211_IFTYPE_P2P_CLIENT
),
1611 static const struct ieee80211_iface_combination
1612 iwlagn_iface_combinations_dualmode
[] = {
1613 { .num_different_channels
= 1,
1614 .max_interfaces
= 2,
1615 .beacon_int_infra_match
= true,
1616 .limits
= iwlagn_sta_ap_limits
,
1617 .n_limits
= ARRAY_SIZE(iwlagn_sta_ap_limits
),
1619 { .num_different_channels
= 1,
1620 .max_interfaces
= 2,
1621 .limits
= iwlagn_2sta_limits
,
1622 .n_limits
= ARRAY_SIZE(iwlagn_2sta_limits
),
1626 static const struct ieee80211_iface_combination
1627 iwlagn_iface_combinations_p2p
[] = {
1628 { .num_different_channels
= 1,
1629 .max_interfaces
= 2,
1630 .beacon_int_infra_match
= true,
1631 .limits
= iwlagn_p2p_sta_go_limits
,
1632 .n_limits
= ARRAY_SIZE(iwlagn_p2p_sta_go_limits
),
1634 { .num_different_channels
= 1,
1635 .max_interfaces
= 2,
1636 .limits
= iwlagn_p2p_2sta_limits
,
1637 .n_limits
= ARRAY_SIZE(iwlagn_p2p_2sta_limits
),
1642 * Not a mac80211 entry point function, but it fits in with all the
1643 * other mac80211 functions grouped here.
1645 static int iwlagn_mac_setup_register(struct iwl_priv
*priv
,
1646 struct iwlagn_ucode_capabilities
*capa
)
1649 struct ieee80211_hw
*hw
= priv
->hw
;
1650 struct iwl_rxon_context
*ctx
;
1652 hw
->rate_control_algorithm
= "iwl-agn-rs";
1654 /* Tell mac80211 our characteristics */
1655 hw
->flags
= IEEE80211_HW_SIGNAL_DBM
|
1656 IEEE80211_HW_AMPDU_AGGREGATION
|
1657 IEEE80211_HW_NEED_DTIM_PERIOD
|
1658 IEEE80211_HW_SPECTRUM_MGMT
|
1659 IEEE80211_HW_REPORTS_TX_ACK_STATUS
;
1662 * Including the following line will crash some AP's. This
1663 * workaround removes the stimulus which causes the crash until
1664 * the AP software can be fixed.
1665 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1668 hw
->flags
|= IEEE80211_HW_SUPPORTS_PS
|
1669 IEEE80211_HW_SUPPORTS_DYNAMIC_PS
;
1671 if (priv
->cfg
->sku
& EEPROM_SKU_CAP_11N_ENABLE
)
1672 hw
->flags
|= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS
|
1673 IEEE80211_HW_SUPPORTS_STATIC_SMPS
;
1675 if (capa
->flags
& IWL_UCODE_TLV_FLAGS_MFP
)
1676 hw
->flags
|= IEEE80211_HW_MFP_CAPABLE
;
1678 hw
->sta_data_size
= sizeof(struct iwl_station_priv
);
1679 hw
->vif_data_size
= sizeof(struct iwl_vif_priv
);
1681 for_each_context(priv
, ctx
) {
1682 hw
->wiphy
->interface_modes
|= ctx
->interface_modes
;
1683 hw
->wiphy
->interface_modes
|= ctx
->exclusive_interface_modes
;
1686 BUILD_BUG_ON(NUM_IWL_RXON_CTX
!= 2);
1688 if (hw
->wiphy
->interface_modes
& BIT(NL80211_IFTYPE_P2P_CLIENT
)) {
1689 hw
->wiphy
->iface_combinations
= iwlagn_iface_combinations_p2p
;
1690 hw
->wiphy
->n_iface_combinations
=
1691 ARRAY_SIZE(iwlagn_iface_combinations_p2p
);
1692 } else if (hw
->wiphy
->interface_modes
& BIT(NL80211_IFTYPE_AP
)) {
1693 hw
->wiphy
->iface_combinations
= iwlagn_iface_combinations_dualmode
;
1694 hw
->wiphy
->n_iface_combinations
=
1695 ARRAY_SIZE(iwlagn_iface_combinations_dualmode
);
1698 hw
->wiphy
->max_remain_on_channel_duration
= 1000;
1700 hw
->wiphy
->flags
|= WIPHY_FLAG_CUSTOM_REGULATORY
|
1701 WIPHY_FLAG_DISABLE_BEACON_HINTS
|
1702 WIPHY_FLAG_IBSS_RSN
;
1704 if (priv
->ucode_wowlan
.code
.len
&& device_can_wakeup(bus(priv
)->dev
)) {
1705 hw
->wiphy
->wowlan
.flags
= WIPHY_WOWLAN_MAGIC_PKT
|
1706 WIPHY_WOWLAN_DISCONNECT
|
1707 WIPHY_WOWLAN_EAP_IDENTITY_REQ
|
1708 WIPHY_WOWLAN_RFKILL_RELEASE
;
1709 if (!iwlagn_mod_params
.sw_crypto
)
1710 hw
->wiphy
->wowlan
.flags
|=
1711 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY
|
1712 WIPHY_WOWLAN_GTK_REKEY_FAILURE
;
1714 hw
->wiphy
->wowlan
.n_patterns
= IWLAGN_WOWLAN_MAX_PATTERNS
;
1715 hw
->wiphy
->wowlan
.pattern_min_len
=
1716 IWLAGN_WOWLAN_MIN_PATTERN_LEN
;
1717 hw
->wiphy
->wowlan
.pattern_max_len
=
1718 IWLAGN_WOWLAN_MAX_PATTERN_LEN
;
1721 if (iwlagn_mod_params
.power_save
)
1722 hw
->wiphy
->flags
|= WIPHY_FLAG_PS_ON_BY_DEFAULT
;
1724 hw
->wiphy
->flags
&= ~WIPHY_FLAG_PS_ON_BY_DEFAULT
;
1726 hw
->wiphy
->max_scan_ssids
= PROBE_OPTION_MAX
;
1727 /* we create the 802.11 header and a zero-length SSID element */
1728 hw
->wiphy
->max_scan_ie_len
= capa
->max_probe_length
- 24 - 2;
1730 /* Default value; 4 EDCA QOS priorities */
1733 hw
->max_listen_interval
= IWL_CONN_MAX_LISTEN_INTERVAL
;
1735 if (priv
->bands
[IEEE80211_BAND_2GHZ
].n_channels
)
1736 priv
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
1737 &priv
->bands
[IEEE80211_BAND_2GHZ
];
1738 if (priv
->bands
[IEEE80211_BAND_5GHZ
].n_channels
)
1739 priv
->hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
1740 &priv
->bands
[IEEE80211_BAND_5GHZ
];
1742 iwl_leds_init(priv
);
1744 ret
= ieee80211_register_hw(priv
->hw
);
1746 IWL_ERR(priv
, "Failed to register hw (error %d)\n", ret
);
1749 priv
->mac80211_registered
= 1;
1755 static int iwlagn_mac_start(struct ieee80211_hw
*hw
)
1757 struct iwl_priv
*priv
= hw
->priv
;
1760 IWL_DEBUG_MAC80211(priv
, "enter\n");
1762 /* we should be verifying the device is ready to be opened */
1763 mutex_lock(&priv
->shrd
->mutex
);
1764 ret
= __iwl_up(priv
);
1765 mutex_unlock(&priv
->shrd
->mutex
);
1769 IWL_DEBUG_INFO(priv
, "Start UP work done.\n");
1771 /* Now we should be done, and the READY bit should be set. */
1772 if (WARN_ON(!test_bit(STATUS_READY
, &priv
->shrd
->status
)))
1775 iwlagn_led_enable(priv
);
1778 IWL_DEBUG_MAC80211(priv
, "leave\n");
1782 static void iwlagn_mac_stop(struct ieee80211_hw
*hw
)
1784 struct iwl_priv
*priv
= hw
->priv
;
1786 IWL_DEBUG_MAC80211(priv
, "enter\n");
1795 flush_workqueue(priv
->shrd
->workqueue
);
1797 /* User space software may expect getting rfkill changes
1798 * even if interface is down */
1799 iwl_write32(bus(priv
), CSR_INT
, 0xFFFFFFFF);
1800 iwl_enable_rfkill_int(priv
);
1802 IWL_DEBUG_MAC80211(priv
, "leave\n");
1805 #ifdef CONFIG_PM_SLEEP
1806 static int iwlagn_send_patterns(struct iwl_priv
*priv
,
1807 struct cfg80211_wowlan
*wowlan
)
1809 struct iwlagn_wowlan_patterns_cmd
*pattern_cmd
;
1810 struct iwl_host_cmd cmd
= {
1811 .id
= REPLY_WOWLAN_PATTERNS
,
1812 .dataflags
[0] = IWL_HCMD_DFL_NOCOPY
,
1817 if (!wowlan
->n_patterns
)
1820 cmd
.len
[0] = sizeof(*pattern_cmd
) +
1821 wowlan
->n_patterns
* sizeof(struct iwlagn_wowlan_pattern
);
1823 pattern_cmd
= kmalloc(cmd
.len
[0], GFP_KERNEL
);
1827 pattern_cmd
->n_patterns
= cpu_to_le32(wowlan
->n_patterns
);
1829 for (i
= 0; i
< wowlan
->n_patterns
; i
++) {
1830 int mask_len
= DIV_ROUND_UP(wowlan
->patterns
[i
].pattern_len
, 8);
1832 memcpy(&pattern_cmd
->patterns
[i
].mask
,
1833 wowlan
->patterns
[i
].mask
, mask_len
);
1834 memcpy(&pattern_cmd
->patterns
[i
].pattern
,
1835 wowlan
->patterns
[i
].pattern
,
1836 wowlan
->patterns
[i
].pattern_len
);
1837 pattern_cmd
->patterns
[i
].mask_size
= mask_len
;
1838 pattern_cmd
->patterns
[i
].pattern_size
=
1839 wowlan
->patterns
[i
].pattern_len
;
1842 cmd
.data
[0] = pattern_cmd
;
1843 err
= iwl_trans_send_cmd(trans(priv
), &cmd
);
1849 static void iwlagn_mac_set_rekey_data(struct ieee80211_hw
*hw
,
1850 struct ieee80211_vif
*vif
,
1851 struct cfg80211_gtk_rekey_data
*data
)
1853 struct iwl_priv
*priv
= hw
->priv
;
1855 if (iwlagn_mod_params
.sw_crypto
)
1858 IWL_DEBUG_MAC80211(priv
, "enter\n");
1859 mutex_lock(&priv
->shrd
->mutex
);
1861 if (priv
->contexts
[IWL_RXON_CTX_BSS
].vif
!= vif
)
1864 memcpy(priv
->kek
, data
->kek
, NL80211_KEK_LEN
);
1865 memcpy(priv
->kck
, data
->kck
, NL80211_KCK_LEN
);
1866 priv
->replay_ctr
= cpu_to_le64(be64_to_cpup((__be64
*)&data
->replay_ctr
));
1867 priv
->have_rekey_data
= true;
1870 mutex_unlock(&priv
->shrd
->mutex
);
1871 IWL_DEBUG_MAC80211(priv
, "leave\n");
1874 struct wowlan_key_data
{
1875 struct iwl_rxon_context
*ctx
;
1876 struct iwlagn_wowlan_rsc_tsc_params_cmd
*rsc_tsc
;
1877 struct iwlagn_wowlan_tkip_params_cmd
*tkip
;
1879 bool error
, use_rsc_tsc
, use_tkip
;
1882 #ifdef CONFIG_PM_SLEEP
1883 static void iwlagn_convert_p1k(u16
*p1k
, __le16
*out
)
1887 for (i
= 0; i
< IWLAGN_P1K_SIZE
; i
++)
1888 out
[i
] = cpu_to_le16(p1k
[i
]);
1891 static void iwlagn_wowlan_program_keys(struct ieee80211_hw
*hw
,
1892 struct ieee80211_vif
*vif
,
1893 struct ieee80211_sta
*sta
,
1894 struct ieee80211_key_conf
*key
,
1897 struct iwl_priv
*priv
= hw
->priv
;
1898 struct wowlan_key_data
*data
= _data
;
1899 struct iwl_rxon_context
*ctx
= data
->ctx
;
1900 struct aes_sc
*aes_sc
, *aes_tx_sc
= NULL
;
1901 struct tkip_sc
*tkip_sc
, *tkip_tx_sc
= NULL
;
1902 struct iwlagn_p1k_cache
*rx_p1ks
;
1904 struct ieee80211_key_seq seq
;
1905 u32 cur_rx_iv32
= 0;
1906 u16 p1k
[IWLAGN_P1K_SIZE
];
1909 mutex_lock(&priv
->shrd
->mutex
);
1911 if ((key
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
1912 key
->cipher
== WLAN_CIPHER_SUITE_WEP104
) &&
1913 !sta
&& !ctx
->key_mapping_keys
)
1914 ret
= iwl_set_default_wep_key(priv
, ctx
, key
);
1916 ret
= iwl_set_dynamic_key(priv
, ctx
, key
, sta
);
1919 IWL_ERR(priv
, "Error setting key during suspend!\n");
1923 switch (key
->cipher
) {
1924 case WLAN_CIPHER_SUITE_TKIP
:
1926 tkip_sc
= data
->rsc_tsc
->all_tsc_rsc
.tkip
.unicast_rsc
;
1927 tkip_tx_sc
= &data
->rsc_tsc
->all_tsc_rsc
.tkip
.tsc
;
1929 rx_p1ks
= data
->tkip
->rx_uni
;
1931 ieee80211_get_key_tx_seq(key
, &seq
);
1932 tkip_tx_sc
->iv16
= cpu_to_le16(seq
.tkip
.iv16
);
1933 tkip_tx_sc
->iv32
= cpu_to_le32(seq
.tkip
.iv32
);
1935 ieee80211_get_tkip_p1k_iv(key
, seq
.tkip
.iv32
, p1k
);
1936 iwlagn_convert_p1k(p1k
, data
->tkip
->tx
.p1k
);
1938 memcpy(data
->tkip
->mic_keys
.tx
,
1939 &key
->key
[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY
],
1940 IWLAGN_MIC_KEY_SIZE
);
1942 rx_mic_key
= data
->tkip
->mic_keys
.rx_unicast
;
1944 tkip_sc
= data
->rsc_tsc
->all_tsc_rsc
.tkip
.multicast_rsc
;
1945 rx_p1ks
= data
->tkip
->rx_multi
;
1946 rx_mic_key
= data
->tkip
->mic_keys
.rx_mcast
;
1950 * For non-QoS this relies on the fact that both the uCode and
1951 * mac80211 use TID 0 (as they need to to avoid replay attacks)
1952 * for checking the IV in the frames.
1954 for (i
= 0; i
< IWLAGN_NUM_RSC
; i
++) {
1955 ieee80211_get_key_rx_seq(key
, i
, &seq
);
1956 tkip_sc
[i
].iv16
= cpu_to_le16(seq
.tkip
.iv16
);
1957 tkip_sc
[i
].iv32
= cpu_to_le32(seq
.tkip
.iv32
);
1958 /* wrapping isn't allowed, AP must rekey */
1959 if (seq
.tkip
.iv32
> cur_rx_iv32
)
1960 cur_rx_iv32
= seq
.tkip
.iv32
;
1963 ieee80211_get_tkip_rx_p1k(key
, data
->bssid
, cur_rx_iv32
, p1k
);
1964 iwlagn_convert_p1k(p1k
, rx_p1ks
[0].p1k
);
1965 ieee80211_get_tkip_rx_p1k(key
, data
->bssid
,
1966 cur_rx_iv32
+ 1, p1k
);
1967 iwlagn_convert_p1k(p1k
, rx_p1ks
[1].p1k
);
1970 &key
->key
[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY
],
1971 IWLAGN_MIC_KEY_SIZE
);
1973 data
->use_tkip
= true;
1974 data
->use_rsc_tsc
= true;
1976 case WLAN_CIPHER_SUITE_CCMP
:
1978 u8
*pn
= seq
.ccmp
.pn
;
1980 aes_sc
= data
->rsc_tsc
->all_tsc_rsc
.aes
.unicast_rsc
;
1981 aes_tx_sc
= &data
->rsc_tsc
->all_tsc_rsc
.aes
.tsc
;
1983 ieee80211_get_key_tx_seq(key
, &seq
);
1984 aes_tx_sc
->pn
= cpu_to_le64(
1987 ((u64
)pn
[3] << 16) |
1988 ((u64
)pn
[2] << 24) |
1989 ((u64
)pn
[1] << 32) |
1990 ((u64
)pn
[0] << 40));
1992 aes_sc
= data
->rsc_tsc
->all_tsc_rsc
.aes
.multicast_rsc
;
1995 * For non-QoS this relies on the fact that both the uCode and
1996 * mac80211 use TID 0 for checking the IV in the frames.
1998 for (i
= 0; i
< IWLAGN_NUM_RSC
; i
++) {
1999 u8
*pn
= seq
.ccmp
.pn
;
2001 ieee80211_get_key_rx_seq(key
, i
, &seq
);
2002 aes_sc
->pn
= cpu_to_le64(
2005 ((u64
)pn
[3] << 16) |
2006 ((u64
)pn
[2] << 24) |
2007 ((u64
)pn
[1] << 32) |
2008 ((u64
)pn
[0] << 40));
2010 data
->use_rsc_tsc
= true;
2014 mutex_unlock(&priv
->shrd
->mutex
);
2017 static int iwlagn_mac_suspend(struct ieee80211_hw
*hw
,
2018 struct cfg80211_wowlan
*wowlan
)
2020 struct iwl_priv
*priv
= hw
->priv
;
2021 struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd
;
2022 struct iwl_rxon_cmd rxon
;
2023 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
2024 struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd
;
2025 struct iwlagn_wowlan_tkip_params_cmd tkip_cmd
= {};
2026 struct wowlan_key_data key_data
= {
2028 .bssid
= ctx
->active
.bssid_addr
,
2029 .use_rsc_tsc
= false,
2036 if (WARN_ON(!wowlan
))
2039 IWL_DEBUG_MAC80211(priv
, "enter\n");
2040 mutex_lock(&priv
->shrd
->mutex
);
2042 /* Don't attempt WoWLAN when not associated, tear down instead. */
2043 if (!ctx
->vif
|| ctx
->vif
->type
!= NL80211_IFTYPE_STATION
||
2044 !iwl_is_associated_ctx(ctx
)) {
2049 key_data
.rsc_tsc
= kzalloc(sizeof(*key_data
.rsc_tsc
), GFP_KERNEL
);
2050 if (!key_data
.rsc_tsc
) {
2055 memset(&wakeup_filter_cmd
, 0, sizeof(wakeup_filter_cmd
));
2058 * We know the last used seqno, and the uCode expects to know that
2059 * one, it will increment before TX.
2061 seq
= le16_to_cpu(priv
->last_seq_ctl
) & IEEE80211_SCTL_SEQ
;
2062 wakeup_filter_cmd
.non_qos_seq
= cpu_to_le16(seq
);
2065 * For QoS counters, we store the one to use next, so subtract 0x10
2066 * since the uCode will add 0x10 before using the value.
2068 for (i
= 0; i
< 8; i
++) {
2069 seq
= priv
->shrd
->tid_data
[IWL_AP_ID
][i
].seq_number
;
2071 wakeup_filter_cmd
.qos_seq
[i
] = cpu_to_le16(seq
);
2074 if (wowlan
->disconnect
)
2075 wakeup_filter_cmd
.enabled
|=
2076 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS
|
2077 IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE
);
2078 if (wowlan
->magic_pkt
)
2079 wakeup_filter_cmd
.enabled
|=
2080 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET
);
2081 if (wowlan
->gtk_rekey_failure
)
2082 wakeup_filter_cmd
.enabled
|=
2083 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL
);
2084 if (wowlan
->eap_identity_req
)
2085 wakeup_filter_cmd
.enabled
|=
2086 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ
);
2087 if (wowlan
->four_way_handshake
)
2088 wakeup_filter_cmd
.enabled
|=
2089 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE
);
2090 if (wowlan
->rfkill_release
)
2091 wakeup_filter_cmd
.enabled
|=
2092 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_RFKILL
);
2093 if (wowlan
->n_patterns
)
2094 wakeup_filter_cmd
.enabled
|=
2095 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH
);
2097 iwl_scan_cancel_timeout(priv
, 200);
2099 memcpy(&rxon
, &ctx
->active
, sizeof(rxon
));
2101 iwl_trans_stop_device(trans(priv
));
2103 priv
->shrd
->wowlan
= true;
2105 ret
= iwlagn_load_ucode_wait_alive(priv
, &priv
->ucode_wowlan
,
2110 /* now configure WoWLAN ucode */
2111 ret
= iwl_alive_start(priv
);
2115 memcpy(&ctx
->staging
, &rxon
, sizeof(rxon
));
2116 ret
= iwlagn_commit_rxon(priv
, ctx
);
2120 ret
= iwl_power_update_mode(priv
, true);
2124 if (!iwlagn_mod_params
.sw_crypto
) {
2125 /* mark all keys clear */
2126 priv
->ucode_key_table
= 0;
2127 ctx
->key_mapping_keys
= 0;
2130 * This needs to be unlocked due to lock ordering
2131 * constraints. Since we're in the suspend path
2132 * that isn't really a problem though.
2134 mutex_unlock(&priv
->shrd
->mutex
);
2135 ieee80211_iter_keys(priv
->hw
, ctx
->vif
,
2136 iwlagn_wowlan_program_keys
,
2138 mutex_lock(&priv
->shrd
->mutex
);
2139 if (key_data
.error
) {
2144 if (key_data
.use_rsc_tsc
) {
2145 struct iwl_host_cmd rsc_tsc_cmd
= {
2146 .id
= REPLY_WOWLAN_TSC_RSC_PARAMS
,
2148 .data
[0] = key_data
.rsc_tsc
,
2149 .dataflags
[0] = IWL_HCMD_DFL_NOCOPY
,
2150 .len
[0] = sizeof(*key_data
.rsc_tsc
),
2153 ret
= iwl_trans_send_cmd(trans(priv
), &rsc_tsc_cmd
);
2158 if (key_data
.use_tkip
) {
2159 ret
= iwl_trans_send_cmd_pdu(trans(priv
),
2160 REPLY_WOWLAN_TKIP_PARAMS
,
2161 CMD_SYNC
, sizeof(tkip_cmd
),
2167 if (priv
->have_rekey_data
) {
2168 memset(&kek_kck_cmd
, 0, sizeof(kek_kck_cmd
));
2169 memcpy(kek_kck_cmd
.kck
, priv
->kck
, NL80211_KCK_LEN
);
2170 kek_kck_cmd
.kck_len
= cpu_to_le16(NL80211_KCK_LEN
);
2171 memcpy(kek_kck_cmd
.kek
, priv
->kek
, NL80211_KEK_LEN
);
2172 kek_kck_cmd
.kek_len
= cpu_to_le16(NL80211_KEK_LEN
);
2173 kek_kck_cmd
.replay_ctr
= priv
->replay_ctr
;
2175 ret
= iwl_trans_send_cmd_pdu(trans(priv
),
2176 REPLY_WOWLAN_KEK_KCK_MATERIAL
,
2177 CMD_SYNC
, sizeof(kek_kck_cmd
),
2184 ret
= iwl_trans_send_cmd_pdu(trans(priv
), REPLY_WOWLAN_WAKEUP_FILTER
,
2185 CMD_SYNC
, sizeof(wakeup_filter_cmd
),
2186 &wakeup_filter_cmd
);
2190 ret
= iwlagn_send_patterns(priv
, wowlan
);
2194 device_set_wakeup_enable(bus(priv
)->dev
, true);
2196 /* Now let the ucode operate on its own */
2197 iwl_write32(bus(priv
), CSR_UCODE_DRV_GP1_SET
,
2198 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE
);
2203 priv
->shrd
->wowlan
= false;
2204 iwlagn_prepare_restart(priv
);
2205 ieee80211_restart_hw(priv
->hw
);
2207 mutex_unlock(&priv
->shrd
->mutex
);
2208 kfree(key_data
.rsc_tsc
);
2209 IWL_DEBUG_MAC80211(priv
, "leave\n");
2214 static int iwlagn_mac_resume(struct ieee80211_hw
*hw
)
2216 struct iwl_priv
*priv
= hw
->priv
;
2217 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
2218 struct ieee80211_vif
*vif
;
2219 unsigned long flags
;
2220 u32 base
, status
= 0xffffffff;
2223 IWL_DEBUG_MAC80211(priv
, "enter\n");
2224 mutex_lock(&priv
->shrd
->mutex
);
2226 iwl_write32(bus(priv
), CSR_UCODE_DRV_GP1_CLR
,
2227 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE
);
2229 base
= priv
->device_pointers
.error_event_table
;
2230 if (iwlagn_hw_valid_rtc_data_addr(base
)) {
2231 spin_lock_irqsave(&bus(priv
)->reg_lock
, flags
);
2232 ret
= iwl_grab_nic_access_silent(bus(priv
));
2234 iwl_write32(bus(priv
), HBUS_TARG_MEM_RADDR
, base
);
2235 status
= iwl_read32(bus(priv
), HBUS_TARG_MEM_RDAT
);
2236 iwl_release_nic_access(bus(priv
));
2238 spin_unlock_irqrestore(&bus(priv
)->reg_lock
, flags
);
2240 #ifdef CONFIG_IWLWIFI_DEBUGFS
2242 if (!priv
->wowlan_sram
)
2244 kzalloc(priv
->ucode_wowlan
.data
.len
,
2247 if (priv
->wowlan_sram
)
2248 _iwl_read_targ_mem_words(
2249 bus(priv
), 0x800000, priv
->wowlan_sram
,
2250 priv
->ucode_wowlan
.data
.len
/ 4);
2255 /* we'll clear ctx->vif during iwlagn_prepare_restart() */
2258 priv
->shrd
->wowlan
= false;
2260 device_set_wakeup_enable(bus(priv
)->dev
, false);
2262 iwlagn_prepare_restart(priv
);
2264 memset((void *)&ctx
->active
, 0, sizeof(ctx
->active
));
2265 iwl_connection_init_rx_config(priv
, ctx
);
2266 iwlagn_set_rxon_chain(priv
, ctx
);
2268 mutex_unlock(&priv
->shrd
->mutex
);
2269 IWL_DEBUG_MAC80211(priv
, "leave\n");
2271 ieee80211_resume_disconnect(vif
);
2277 static void iwlagn_mac_tx(struct ieee80211_hw
*hw
, struct sk_buff
*skb
)
2279 struct iwl_priv
*priv
= hw
->priv
;
2281 IWL_DEBUG_MACDUMP(priv
, "enter\n");
2283 IWL_DEBUG_TX(priv
, "dev->xmit(%d bytes) at rate 0x%02x\n", skb
->len
,
2284 ieee80211_get_tx_rate(hw
, IEEE80211_SKB_CB(skb
))->bitrate
);
2286 if (iwlagn_tx_skb(priv
, skb
))
2287 dev_kfree_skb_any(skb
);
2289 IWL_DEBUG_MACDUMP(priv
, "leave\n");
2292 static void iwlagn_mac_update_tkip_key(struct ieee80211_hw
*hw
,
2293 struct ieee80211_vif
*vif
,
2294 struct ieee80211_key_conf
*keyconf
,
2295 struct ieee80211_sta
*sta
,
2296 u32 iv32
, u16
*phase1key
)
2298 struct iwl_priv
*priv
= hw
->priv
;
2300 iwl_update_tkip_key(priv
, vif
, keyconf
, sta
, iv32
, phase1key
);
2303 static int iwlagn_mac_set_key(struct ieee80211_hw
*hw
, enum set_key_cmd cmd
,
2304 struct ieee80211_vif
*vif
,
2305 struct ieee80211_sta
*sta
,
2306 struct ieee80211_key_conf
*key
)
2308 struct iwl_priv
*priv
= hw
->priv
;
2309 struct iwl_vif_priv
*vif_priv
= (void *)vif
->drv_priv
;
2310 struct iwl_rxon_context
*ctx
= vif_priv
->ctx
;
2312 bool is_default_wep_key
= false;
2314 IWL_DEBUG_MAC80211(priv
, "enter\n");
2316 if (iwlagn_mod_params
.sw_crypto
) {
2317 IWL_DEBUG_MAC80211(priv
, "leave - hwcrypto disabled\n");
2322 * We could program these keys into the hardware as well, but we
2323 * don't expect much multicast traffic in IBSS and having keys
2324 * for more stations is probably more useful.
2326 * Mark key TX-only and return 0.
2328 if (vif
->type
== NL80211_IFTYPE_ADHOC
&&
2329 !(key
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
)) {
2330 key
->hw_key_idx
= WEP_INVALID_OFFSET
;
2334 /* If they key was TX-only, accept deletion */
2335 if (cmd
== DISABLE_KEY
&& key
->hw_key_idx
== WEP_INVALID_OFFSET
)
2338 mutex_lock(&priv
->shrd
->mutex
);
2339 iwl_scan_cancel_timeout(priv
, 100);
2341 BUILD_BUG_ON(WEP_INVALID_OFFSET
== IWLAGN_HW_KEY_DEFAULT
);
2344 * If we are getting WEP group key and we didn't receive any key mapping
2345 * so far, we are in legacy wep mode (group key only), otherwise we are
2347 * In legacy wep mode, we use another host command to the uCode.
2349 if ((key
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
2350 key
->cipher
== WLAN_CIPHER_SUITE_WEP104
) && !sta
) {
2352 is_default_wep_key
= !ctx
->key_mapping_keys
;
2354 is_default_wep_key
=
2355 key
->hw_key_idx
== IWLAGN_HW_KEY_DEFAULT
;
2361 if (is_default_wep_key
) {
2362 ret
= iwl_set_default_wep_key(priv
, vif_priv
->ctx
, key
);
2365 ret
= iwl_set_dynamic_key(priv
, vif_priv
->ctx
, key
, sta
);
2368 * can't add key for RX, but we don't need it
2369 * in the device for TX so still return 0
2372 key
->hw_key_idx
= WEP_INVALID_OFFSET
;
2375 IWL_DEBUG_MAC80211(priv
, "enable hwcrypto key\n");
2378 if (is_default_wep_key
)
2379 ret
= iwl_remove_default_wep_key(priv
, ctx
, key
);
2381 ret
= iwl_remove_dynamic_key(priv
, ctx
, key
, sta
);
2383 IWL_DEBUG_MAC80211(priv
, "disable hwcrypto key\n");
2389 mutex_unlock(&priv
->shrd
->mutex
);
2390 IWL_DEBUG_MAC80211(priv
, "leave\n");
2395 static int iwlagn_mac_ampdu_action(struct ieee80211_hw
*hw
,
2396 struct ieee80211_vif
*vif
,
2397 enum ieee80211_ampdu_mlme_action action
,
2398 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
,
2401 struct iwl_priv
*priv
= hw
->priv
;
2403 struct iwl_station_priv
*sta_priv
= (void *) sta
->drv_priv
;
2404 struct iwl_rxon_context
*ctx
= iwl_rxon_ctx_from_vif(vif
);
2406 IWL_DEBUG_HT(priv
, "A-MPDU action on addr %pM tid %d\n",
2409 if (!(priv
->cfg
->sku
& EEPROM_SKU_CAP_11N_ENABLE
))
2412 IWL_DEBUG_MAC80211(priv
, "enter\n");
2413 mutex_lock(&priv
->shrd
->mutex
);
2416 case IEEE80211_AMPDU_RX_START
:
2417 IWL_DEBUG_HT(priv
, "start Rx\n");
2418 ret
= iwl_sta_rx_agg_start(priv
, sta
, tid
, *ssn
);
2420 case IEEE80211_AMPDU_RX_STOP
:
2421 IWL_DEBUG_HT(priv
, "stop Rx\n");
2422 ret
= iwl_sta_rx_agg_stop(priv
, sta
, tid
);
2423 if (test_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
))
2426 case IEEE80211_AMPDU_TX_START
:
2427 IWL_DEBUG_HT(priv
, "start Tx\n");
2428 ret
= iwlagn_tx_agg_start(priv
, vif
, sta
, tid
, ssn
);
2430 case IEEE80211_AMPDU_TX_STOP
:
2431 IWL_DEBUG_HT(priv
, "stop Tx\n");
2432 ret
= iwlagn_tx_agg_stop(priv
, vif
, sta
, tid
);
2433 if ((ret
== 0) && (priv
->agg_tids_count
> 0)) {
2434 priv
->agg_tids_count
--;
2435 IWL_DEBUG_HT(priv
, "priv->agg_tids_count = %u\n",
2436 priv
->agg_tids_count
);
2438 if (test_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
))
2440 if (!priv
->agg_tids_count
&& priv
->cfg
->ht_params
&&
2441 priv
->cfg
->ht_params
->use_rts_for_aggregation
) {
2443 * switch off RTS/CTS if it was previously enabled
2445 sta_priv
->lq_sta
.lq
.general_params
.flags
&=
2446 ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK
;
2447 iwl_send_lq_cmd(priv
, iwl_rxon_ctx_from_vif(vif
),
2448 &sta_priv
->lq_sta
.lq
, CMD_ASYNC
, false);
2451 case IEEE80211_AMPDU_TX_OPERATIONAL
:
2452 buf_size
= min_t(int, buf_size
, LINK_QUAL_AGG_FRAME_LIMIT_DEF
);
2454 iwl_trans_tx_agg_setup(trans(priv
), ctx
->ctxid
, iwl_sta_id(sta
),
2458 * If the limit is 0, then it wasn't initialised yet,
2459 * use the default. We can do that since we take the
2460 * minimum below, and we don't want to go above our
2461 * default due to hardware restrictions.
2463 if (sta_priv
->max_agg_bufsize
== 0)
2464 sta_priv
->max_agg_bufsize
=
2465 LINK_QUAL_AGG_FRAME_LIMIT_DEF
;
2468 * Even though in theory the peer could have different
2469 * aggregation reorder buffer sizes for different sessions,
2470 * our ucode doesn't allow for that and has a global limit
2471 * for each station. Therefore, use the minimum of all the
2472 * aggregation sessions and our default value.
2474 sta_priv
->max_agg_bufsize
=
2475 min(sta_priv
->max_agg_bufsize
, buf_size
);
2477 if (priv
->cfg
->ht_params
&&
2478 priv
->cfg
->ht_params
->use_rts_for_aggregation
) {
2480 * switch to RTS/CTS if it is the prefer protection
2481 * method for HT traffic
2484 sta_priv
->lq_sta
.lq
.general_params
.flags
|=
2485 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK
;
2487 priv
->agg_tids_count
++;
2488 IWL_DEBUG_HT(priv
, "priv->agg_tids_count = %u\n",
2489 priv
->agg_tids_count
);
2491 sta_priv
->lq_sta
.lq
.agg_params
.agg_frame_cnt_limit
=
2492 sta_priv
->max_agg_bufsize
;
2494 iwl_send_lq_cmd(priv
, iwl_rxon_ctx_from_vif(vif
),
2495 &sta_priv
->lq_sta
.lq
, CMD_ASYNC
, false);
2497 IWL_INFO(priv
, "Tx aggregation enabled on ra = %pM tid = %d\n",
2502 mutex_unlock(&priv
->shrd
->mutex
);
2503 IWL_DEBUG_MAC80211(priv
, "leave\n");
2507 static int iwlagn_mac_sta_add(struct ieee80211_hw
*hw
,
2508 struct ieee80211_vif
*vif
,
2509 struct ieee80211_sta
*sta
)
2511 struct iwl_priv
*priv
= hw
->priv
;
2512 struct iwl_station_priv
*sta_priv
= (void *)sta
->drv_priv
;
2513 struct iwl_vif_priv
*vif_priv
= (void *)vif
->drv_priv
;
2514 bool is_ap
= vif
->type
== NL80211_IFTYPE_STATION
;
2518 IWL_DEBUG_MAC80211(priv
, "received request to add station %pM\n",
2520 mutex_lock(&priv
->shrd
->mutex
);
2521 IWL_DEBUG_INFO(priv
, "proceeding to add station %pM\n",
2523 sta_priv
->sta_id
= IWL_INVALID_STATION
;
2525 atomic_set(&sta_priv
->pending_frames
, 0);
2526 if (vif
->type
== NL80211_IFTYPE_AP
)
2527 sta_priv
->client
= true;
2529 ret
= iwl_add_station_common(priv
, vif_priv
->ctx
, sta
->addr
,
2530 is_ap
, sta
, &sta_id
);
2532 IWL_ERR(priv
, "Unable to add station %pM (%d)\n",
2534 /* Should we return success if return code is EEXIST ? */
2538 sta_priv
->sta_id
= sta_id
;
2540 /* Initialize rate scaling */
2541 IWL_DEBUG_INFO(priv
, "Initializing rate scaling for station %pM\n",
2543 iwl_rs_rate_init(priv
, sta
, sta_id
);
2545 mutex_unlock(&priv
->shrd
->mutex
);
2546 IWL_DEBUG_MAC80211(priv
, "leave\n");
2551 static void iwlagn_mac_channel_switch(struct ieee80211_hw
*hw
,
2552 struct ieee80211_channel_switch
*ch_switch
)
2554 struct iwl_priv
*priv
= hw
->priv
;
2555 const struct iwl_channel_info
*ch_info
;
2556 struct ieee80211_conf
*conf
= &hw
->conf
;
2557 struct ieee80211_channel
*channel
= ch_switch
->channel
;
2558 struct iwl_ht_config
*ht_conf
= &priv
->current_ht_config
;
2561 * When we add support for multiple interfaces, we need to
2562 * revisit this. The channel switch command in the device
2563 * only affects the BSS context, but what does that really
2564 * mean? And what if we get a CSA on the second interface?
2565 * This needs a lot of work.
2567 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
2570 IWL_DEBUG_MAC80211(priv
, "enter\n");
2572 mutex_lock(&priv
->shrd
->mutex
);
2574 if (iwl_is_rfkill(priv
->shrd
))
2577 if (test_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
) ||
2578 test_bit(STATUS_SCANNING
, &priv
->shrd
->status
) ||
2579 test_bit(STATUS_CHANNEL_SWITCH_PENDING
, &priv
->shrd
->status
))
2582 if (!iwl_is_associated_ctx(ctx
))
2585 if (!priv
->cfg
->lib
->set_channel_switch
)
2588 ch
= channel
->hw_value
;
2589 if (le16_to_cpu(ctx
->active
.channel
) == ch
)
2592 ch_info
= iwl_get_channel_info(priv
, channel
->band
, ch
);
2593 if (!is_channel_valid(ch_info
)) {
2594 IWL_DEBUG_MAC80211(priv
, "invalid channel\n");
2598 spin_lock_irq(&priv
->shrd
->lock
);
2600 priv
->current_ht_config
.smps
= conf
->smps_mode
;
2602 /* Configure HT40 channels */
2603 ctx
->ht
.enabled
= conf_is_ht(conf
);
2604 if (ctx
->ht
.enabled
) {
2605 if (conf_is_ht40_minus(conf
)) {
2606 ctx
->ht
.extension_chan_offset
=
2607 IEEE80211_HT_PARAM_CHA_SEC_BELOW
;
2608 ctx
->ht
.is_40mhz
= true;
2609 } else if (conf_is_ht40_plus(conf
)) {
2610 ctx
->ht
.extension_chan_offset
=
2611 IEEE80211_HT_PARAM_CHA_SEC_ABOVE
;
2612 ctx
->ht
.is_40mhz
= true;
2614 ctx
->ht
.extension_chan_offset
=
2615 IEEE80211_HT_PARAM_CHA_SEC_NONE
;
2616 ctx
->ht
.is_40mhz
= false;
2619 ctx
->ht
.is_40mhz
= false;
2621 if ((le16_to_cpu(ctx
->staging
.channel
) != ch
))
2622 ctx
->staging
.flags
= 0;
2624 iwl_set_rxon_channel(priv
, channel
, ctx
);
2625 iwl_set_rxon_ht(priv
, ht_conf
);
2626 iwl_set_flags_for_band(priv
, ctx
, channel
->band
, ctx
->vif
);
2628 spin_unlock_irq(&priv
->shrd
->lock
);
2632 * at this point, staging_rxon has the
2633 * configuration for channel switch
2635 set_bit(STATUS_CHANNEL_SWITCH_PENDING
, &priv
->shrd
->status
);
2636 priv
->switch_channel
= cpu_to_le16(ch
);
2637 if (priv
->cfg
->lib
->set_channel_switch(priv
, ch_switch
)) {
2638 clear_bit(STATUS_CHANNEL_SWITCH_PENDING
, &priv
->shrd
->status
);
2639 priv
->switch_channel
= 0;
2640 ieee80211_chswitch_done(ctx
->vif
, false);
2644 mutex_unlock(&priv
->shrd
->mutex
);
2645 IWL_DEBUG_MAC80211(priv
, "leave\n");
2648 static void iwlagn_configure_filter(struct ieee80211_hw
*hw
,
2649 unsigned int changed_flags
,
2650 unsigned int *total_flags
,
2653 struct iwl_priv
*priv
= hw
->priv
;
2654 __le32 filter_or
= 0, filter_nand
= 0;
2655 struct iwl_rxon_context
*ctx
;
2657 #define CHK(test, flag) do { \
2658 if (*total_flags & (test)) \
2659 filter_or |= (flag); \
2661 filter_nand |= (flag); \
2664 IWL_DEBUG_MAC80211(priv
, "Enter: changed: 0x%x, total: 0x%x\n",
2665 changed_flags
, *total_flags
);
2667 CHK(FIF_OTHER_BSS
| FIF_PROMISC_IN_BSS
, RXON_FILTER_PROMISC_MSK
);
2668 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
2669 CHK(FIF_CONTROL
, RXON_FILTER_CTL2HOST_MSK
| RXON_FILTER_PROMISC_MSK
);
2670 CHK(FIF_BCN_PRBRESP_PROMISC
, RXON_FILTER_BCON_AWARE_MSK
);
2674 mutex_lock(&priv
->shrd
->mutex
);
2676 for_each_context(priv
, ctx
) {
2677 ctx
->staging
.filter_flags
&= ~filter_nand
;
2678 ctx
->staging
.filter_flags
|= filter_or
;
2681 * Not committing directly because hardware can perform a scan,
2682 * but we'll eventually commit the filter flags change anyway.
2686 mutex_unlock(&priv
->shrd
->mutex
);
2689 * Receiving all multicast frames is always enabled by the
2690 * default flags setup in iwl_connection_init_rx_config()
2691 * since we currently do not support programming multicast
2692 * filters into the device.
2694 *total_flags
&= FIF_OTHER_BSS
| FIF_ALLMULTI
| FIF_PROMISC_IN_BSS
|
2695 FIF_BCN_PRBRESP_PROMISC
| FIF_CONTROL
;
2698 static void iwlagn_mac_flush(struct ieee80211_hw
*hw
, bool drop
)
2700 struct iwl_priv
*priv
= hw
->priv
;
2702 mutex_lock(&priv
->shrd
->mutex
);
2703 IWL_DEBUG_MAC80211(priv
, "enter\n");
2705 if (test_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
)) {
2706 IWL_DEBUG_TX(priv
, "Aborting flush due to device shutdown\n");
2709 if (iwl_is_rfkill(priv
->shrd
)) {
2710 IWL_DEBUG_TX(priv
, "Aborting flush due to RF Kill\n");
2715 * mac80211 will not push any more frames for transmit
2716 * until the flush is completed
2719 IWL_DEBUG_MAC80211(priv
, "send flush command\n");
2720 if (iwlagn_txfifo_flush(priv
, IWL_DROP_ALL
)) {
2721 IWL_ERR(priv
, "flush request fail\n");
2725 IWL_DEBUG_MAC80211(priv
, "wait transmit/flush all frames\n");
2726 iwl_trans_wait_tx_queue_empty(trans(priv
));
2728 mutex_unlock(&priv
->shrd
->mutex
);
2729 IWL_DEBUG_MAC80211(priv
, "leave\n");
2732 void iwlagn_disable_roc(struct iwl_priv
*priv
)
2734 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_PAN
];
2736 lockdep_assert_held(&priv
->shrd
->mutex
);
2738 if (!priv
->hw_roc_setup
)
2741 ctx
->staging
.dev_type
= RXON_DEV_TYPE_P2P
;
2742 ctx
->staging
.filter_flags
&= ~RXON_FILTER_ASSOC_MSK
;
2744 priv
->hw_roc_channel
= NULL
;
2746 memset(ctx
->staging
.node_addr
, 0, ETH_ALEN
);
2748 iwlagn_commit_rxon(priv
, ctx
);
2750 ctx
->is_active
= false;
2751 priv
->hw_roc_setup
= false;
2754 static void iwlagn_disable_roc_work(struct work_struct
*work
)
2756 struct iwl_priv
*priv
= container_of(work
, struct iwl_priv
,
2757 hw_roc_disable_work
.work
);
2759 mutex_lock(&priv
->shrd
->mutex
);
2760 iwlagn_disable_roc(priv
);
2761 mutex_unlock(&priv
->shrd
->mutex
);
2764 static int iwlagn_mac_remain_on_channel(struct ieee80211_hw
*hw
,
2765 struct ieee80211_channel
*channel
,
2766 enum nl80211_channel_type channel_type
,
2769 struct iwl_priv
*priv
= hw
->priv
;
2770 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_PAN
];
2773 if (!(priv
->shrd
->valid_contexts
& BIT(IWL_RXON_CTX_PAN
)))
2776 if (!(ctx
->interface_modes
& BIT(NL80211_IFTYPE_P2P_CLIENT
)))
2779 IWL_DEBUG_MAC80211(priv
, "enter\n");
2780 mutex_lock(&priv
->shrd
->mutex
);
2782 if (test_bit(STATUS_SCAN_HW
, &priv
->shrd
->status
)) {
2787 priv
->hw_roc_channel
= channel
;
2788 priv
->hw_roc_chantype
= channel_type
;
2789 priv
->hw_roc_duration
= duration
;
2790 priv
->hw_roc_start_notified
= false;
2791 cancel_delayed_work(&priv
->hw_roc_disable_work
);
2793 if (!ctx
->is_active
) {
2794 ctx
->is_active
= true;
2795 ctx
->staging
.dev_type
= RXON_DEV_TYPE_P2P
;
2796 memcpy(ctx
->staging
.node_addr
,
2797 priv
->contexts
[IWL_RXON_CTX_BSS
].staging
.node_addr
,
2799 memcpy(ctx
->staging
.bssid_addr
,
2800 priv
->contexts
[IWL_RXON_CTX_BSS
].staging
.node_addr
,
2802 err
= iwlagn_commit_rxon(priv
, ctx
);
2805 ctx
->staging
.filter_flags
|= RXON_FILTER_ASSOC_MSK
|
2806 RXON_FILTER_PROMISC_MSK
|
2807 RXON_FILTER_CTL2HOST_MSK
;
2809 err
= iwlagn_commit_rxon(priv
, ctx
);
2811 iwlagn_disable_roc(priv
);
2814 priv
->hw_roc_setup
= true;
2817 err
= iwl_scan_initiate(priv
, ctx
->vif
, IWL_SCAN_ROC
, channel
->band
);
2819 iwlagn_disable_roc(priv
);
2822 mutex_unlock(&priv
->shrd
->mutex
);
2823 IWL_DEBUG_MAC80211(priv
, "leave\n");
2828 static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw
*hw
)
2830 struct iwl_priv
*priv
= hw
->priv
;
2832 if (!(priv
->shrd
->valid_contexts
& BIT(IWL_RXON_CTX_PAN
)))
2835 IWL_DEBUG_MAC80211(priv
, "enter\n");
2836 mutex_lock(&priv
->shrd
->mutex
);
2837 iwl_scan_cancel_timeout(priv
, priv
->hw_roc_duration
);
2838 iwlagn_disable_roc(priv
);
2839 mutex_unlock(&priv
->shrd
->mutex
);
2840 IWL_DEBUG_MAC80211(priv
, "leave\n");
2845 static int iwlagn_mac_tx_sync(struct ieee80211_hw
*hw
,
2846 struct ieee80211_vif
*vif
,
2848 enum ieee80211_tx_sync_type type
)
2850 struct iwl_priv
*priv
= hw
->priv
;
2851 struct iwl_vif_priv
*vif_priv
= (void *)vif
->drv_priv
;
2852 struct iwl_rxon_context
*ctx
= vif_priv
->ctx
;
2856 IWL_DEBUG_MAC80211(priv
, "enter\n");
2857 mutex_lock(&priv
->shrd
->mutex
);
2859 if (iwl_is_associated_ctx(ctx
)) {
2864 if (ctx
->preauth_bssid
|| test_bit(STATUS_SCAN_HW
, &priv
->shrd
->status
)) {
2869 ret
= iwl_add_station_common(priv
, ctx
, bssid
, true, NULL
, &sta_id
);
2873 if (WARN_ON(sta_id
!= ctx
->ap_sta_id
)) {
2875 goto out_remove_sta
;
2878 memcpy(ctx
->bssid
, bssid
, ETH_ALEN
);
2879 ctx
->preauth_bssid
= true;
2881 ret
= iwlagn_commit_rxon(priv
, ctx
);
2887 iwl_remove_station(priv
, sta_id
, bssid
);
2889 mutex_unlock(&priv
->shrd
->mutex
);
2890 IWL_DEBUG_MAC80211(priv
, "leave\n");
2895 static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw
*hw
,
2896 struct ieee80211_vif
*vif
,
2898 enum ieee80211_tx_sync_type type
)
2900 struct iwl_priv
*priv
= hw
->priv
;
2901 struct iwl_vif_priv
*vif_priv
= (void *)vif
->drv_priv
;
2902 struct iwl_rxon_context
*ctx
= vif_priv
->ctx
;
2904 IWL_DEBUG_MAC80211(priv
, "enter\n");
2905 mutex_lock(&priv
->shrd
->mutex
);
2907 if (iwl_is_associated_ctx(ctx
))
2910 iwl_remove_station(priv
, ctx
->ap_sta_id
, bssid
);
2911 ctx
->preauth_bssid
= false;
2912 /* no need to commit */
2914 mutex_unlock(&priv
->shrd
->mutex
);
2915 IWL_DEBUG_MAC80211(priv
, "leave\n");
2918 /*****************************************************************************
2920 * driver setup and teardown
2922 *****************************************************************************/
2924 static void iwl_setup_deferred_work(struct iwl_priv
*priv
)
2926 priv
->shrd
->workqueue
= create_singlethread_workqueue(DRV_NAME
);
2928 init_waitqueue_head(&priv
->shrd
->wait_command_queue
);
2930 INIT_WORK(&priv
->restart
, iwl_bg_restart
);
2931 INIT_WORK(&priv
->beacon_update
, iwl_bg_beacon_update
);
2932 INIT_WORK(&priv
->run_time_calib_work
, iwl_bg_run_time_calib_work
);
2933 INIT_WORK(&priv
->tx_flush
, iwl_bg_tx_flush
);
2934 INIT_WORK(&priv
->bt_full_concurrency
, iwl_bg_bt_full_concurrency
);
2935 INIT_WORK(&priv
->bt_runtime_config
, iwl_bg_bt_runtime_config
);
2936 INIT_DELAYED_WORK(&priv
->hw_roc_disable_work
,
2937 iwlagn_disable_roc_work
);
2939 iwl_setup_scan_deferred_work(priv
);
2941 if (priv
->cfg
->lib
->bt_setup_deferred_work
)
2942 priv
->cfg
->lib
->bt_setup_deferred_work(priv
);
2944 init_timer(&priv
->statistics_periodic
);
2945 priv
->statistics_periodic
.data
= (unsigned long)priv
;
2946 priv
->statistics_periodic
.function
= iwl_bg_statistics_periodic
;
2948 init_timer(&priv
->ucode_trace
);
2949 priv
->ucode_trace
.data
= (unsigned long)priv
;
2950 priv
->ucode_trace
.function
= iwl_bg_ucode_trace
;
2952 init_timer(&priv
->watchdog
);
2953 priv
->watchdog
.data
= (unsigned long)priv
;
2954 priv
->watchdog
.function
= iwl_bg_watchdog
;
2957 static void iwl_cancel_deferred_work(struct iwl_priv
*priv
)
2959 if (priv
->cfg
->lib
->cancel_deferred_work
)
2960 priv
->cfg
->lib
->cancel_deferred_work(priv
);
2962 cancel_work_sync(&priv
->run_time_calib_work
);
2963 cancel_work_sync(&priv
->beacon_update
);
2965 iwl_cancel_scan_deferred_work(priv
);
2967 cancel_work_sync(&priv
->bt_full_concurrency
);
2968 cancel_work_sync(&priv
->bt_runtime_config
);
2969 cancel_delayed_work_sync(&priv
->hw_roc_disable_work
);
2971 del_timer_sync(&priv
->statistics_periodic
);
2972 del_timer_sync(&priv
->ucode_trace
);
2975 static void iwl_init_hw_rates(struct iwl_priv
*priv
,
2976 struct ieee80211_rate
*rates
)
2980 for (i
= 0; i
< IWL_RATE_COUNT_LEGACY
; i
++) {
2981 rates
[i
].bitrate
= iwl_rates
[i
].ieee
* 5;
2982 rates
[i
].hw_value
= i
; /* Rate scaling will work on indexes */
2983 rates
[i
].hw_value_short
= i
;
2985 if ((i
>= IWL_FIRST_CCK_RATE
) && (i
<= IWL_LAST_CCK_RATE
)) {
2987 * If CCK != 1M then set short preamble rate flag.
2990 (iwl_rates
[i
].plcp
== IWL_RATE_1M_PLCP
) ?
2991 0 : IEEE80211_RATE_SHORT_PREAMBLE
;
2996 static int iwl_init_drv(struct iwl_priv
*priv
)
3000 spin_lock_init(&priv
->shrd
->sta_lock
);
3002 mutex_init(&priv
->shrd
->mutex
);
3004 priv
->ieee_channels
= NULL
;
3005 priv
->ieee_rates
= NULL
;
3006 priv
->band
= IEEE80211_BAND_2GHZ
;
3008 priv
->iw_mode
= NL80211_IFTYPE_STATION
;
3009 priv
->current_ht_config
.smps
= IEEE80211_SMPS_STATIC
;
3010 priv
->missed_beacon_threshold
= IWL_MISSED_BEACON_THRESHOLD_DEF
;
3011 priv
->agg_tids_count
= 0;
3013 /* initialize force reset */
3014 priv
->force_reset
[IWL_RF_RESET
].reset_duration
=
3015 IWL_DELAY_NEXT_FORCE_RF_RESET
;
3016 priv
->force_reset
[IWL_FW_RESET
].reset_duration
=
3017 IWL_DELAY_NEXT_FORCE_FW_RELOAD
;
3019 priv
->rx_statistics_jiffies
= jiffies
;
3021 /* Choose which receivers/antennas to use */
3022 iwlagn_set_rxon_chain(priv
, &priv
->contexts
[IWL_RXON_CTX_BSS
]);
3024 iwl_init_scan_params(priv
);
3027 if (priv
->cfg
->bt_params
&&
3028 priv
->cfg
->bt_params
->advanced_bt_coexist
) {
3029 priv
->kill_ack_mask
= IWLAGN_BT_KILL_ACK_MASK_DEFAULT
;
3030 priv
->kill_cts_mask
= IWLAGN_BT_KILL_CTS_MASK_DEFAULT
;
3031 priv
->bt_valid
= IWLAGN_BT_ALL_VALID_MSK
;
3032 priv
->bt_on_thresh
= BT_ON_THRESHOLD_DEF
;
3033 priv
->bt_duration
= BT_DURATION_LIMIT_DEF
;
3034 priv
->dynamic_frag_thresh
= BT_FRAG_THRESHOLD_DEF
;
3037 ret
= iwl_init_channel_map(priv
);
3039 IWL_ERR(priv
, "initializing regulatory failed: %d\n", ret
);
3043 ret
= iwl_init_geos(priv
);
3045 IWL_ERR(priv
, "initializing geos failed: %d\n", ret
);
3046 goto err_free_channel_map
;
3048 iwl_init_hw_rates(priv
, priv
->ieee_rates
);
3052 err_free_channel_map
:
3053 iwl_free_channel_map(priv
);
3058 static void iwl_uninit_drv(struct iwl_priv
*priv
)
3060 iwl_calib_free_results(priv
);
3061 iwl_free_geos(priv
);
3062 iwl_free_channel_map(priv
);
3063 if (priv
->tx_cmd_pool
)
3064 kmem_cache_destroy(priv
->tx_cmd_pool
);
3065 kfree(priv
->scan_cmd
);
3066 kfree(priv
->beacon_cmd
);
3067 #ifdef CONFIG_IWLWIFI_DEBUGFS
3068 kfree(priv
->wowlan_sram
);
3072 static void iwlagn_mac_rssi_callback(struct ieee80211_hw
*hw
,
3073 enum ieee80211_rssi_event rssi_event
)
3075 struct iwl_priv
*priv
= hw
->priv
;
3077 IWL_DEBUG_MAC80211(priv
, "enter\n");
3078 mutex_lock(&priv
->shrd
->mutex
);
3080 if (priv
->cfg
->bt_params
&&
3081 priv
->cfg
->bt_params
->advanced_bt_coexist
) {
3082 if (rssi_event
== RSSI_EVENT_LOW
)
3083 priv
->bt_enable_pspoll
= true;
3084 else if (rssi_event
== RSSI_EVENT_HIGH
)
3085 priv
->bt_enable_pspoll
= false;
3087 iwlagn_send_advance_bt_config(priv
);
3089 IWL_DEBUG_MAC80211(priv
, "Advanced BT coex disabled,"
3090 "ignoring RSSI callback\n");
3093 mutex_unlock(&priv
->shrd
->mutex
);
3094 IWL_DEBUG_MAC80211(priv
, "leave\n");
3097 static int iwlagn_mac_set_tim(struct ieee80211_hw
*hw
,
3098 struct ieee80211_sta
*sta
, bool set
)
3100 struct iwl_priv
*priv
= hw
->priv
;
3102 queue_work(priv
->shrd
->workqueue
, &priv
->beacon_update
);
3107 struct ieee80211_ops iwlagn_hw_ops
= {
3108 .tx
= iwlagn_mac_tx
,
3109 .start
= iwlagn_mac_start
,
3110 .stop
= iwlagn_mac_stop
,
3111 #ifdef CONFIG_PM_SLEEP
3112 .suspend
= iwlagn_mac_suspend
,
3113 .resume
= iwlagn_mac_resume
,
3115 .add_interface
= iwlagn_mac_add_interface
,
3116 .remove_interface
= iwlagn_mac_remove_interface
,
3117 .change_interface
= iwlagn_mac_change_interface
,
3118 .config
= iwlagn_mac_config
,
3119 .configure_filter
= iwlagn_configure_filter
,
3120 .set_key
= iwlagn_mac_set_key
,
3121 .update_tkip_key
= iwlagn_mac_update_tkip_key
,
3122 .set_rekey_data
= iwlagn_mac_set_rekey_data
,
3123 .conf_tx
= iwlagn_mac_conf_tx
,
3124 .bss_info_changed
= iwlagn_bss_info_changed
,
3125 .ampdu_action
= iwlagn_mac_ampdu_action
,
3126 .hw_scan
= iwlagn_mac_hw_scan
,
3127 .sta_notify
= iwlagn_mac_sta_notify
,
3128 .sta_add
= iwlagn_mac_sta_add
,
3129 .sta_remove
= iwlagn_mac_sta_remove
,
3130 .channel_switch
= iwlagn_mac_channel_switch
,
3131 .flush
= iwlagn_mac_flush
,
3132 .tx_last_beacon
= iwlagn_mac_tx_last_beacon
,
3133 .remain_on_channel
= iwlagn_mac_remain_on_channel
,
3134 .cancel_remain_on_channel
= iwlagn_mac_cancel_remain_on_channel
,
3135 .rssi_callback
= iwlagn_mac_rssi_callback
,
3136 CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd
)
3137 CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump
)
3138 .tx_sync
= iwlagn_mac_tx_sync
,
3139 .finish_tx_sync
= iwlagn_mac_finish_tx_sync
,
3140 .set_tim
= iwlagn_mac_set_tim
,
3143 static u32
iwl_hw_detect(struct iwl_priv
*priv
)
3145 return iwl_read32(bus(priv
), CSR_HW_REV
);
3148 /* Size of one Rx buffer in host DRAM */
3149 #define IWL_RX_BUF_SIZE_4K (4 * 1024)
3150 #define IWL_RX_BUF_SIZE_8K (8 * 1024)
3152 static int iwl_set_hw_params(struct iwl_priv
*priv
)
3154 if (iwlagn_mod_params
.amsdu_size_8K
)
3155 hw_params(priv
).rx_page_order
=
3156 get_order(IWL_RX_BUF_SIZE_8K
);
3158 hw_params(priv
).rx_page_order
=
3159 get_order(IWL_RX_BUF_SIZE_4K
);
3161 if (iwlagn_mod_params
.disable_11n
)
3162 priv
->cfg
->sku
&= ~EEPROM_SKU_CAP_11N_ENABLE
;
3164 hw_params(priv
).num_ampdu_queues
=
3165 priv
->cfg
->base_params
->num_of_ampdu_queues
;
3166 hw_params(priv
).shadow_reg_enable
=
3167 priv
->cfg
->base_params
->shadow_reg_enable
;
3168 hw_params(priv
).sku
= priv
->cfg
->sku
;
3169 hw_params(priv
).wd_timeout
= priv
->cfg
->base_params
->wd_timeout
;
3171 /* Device-specific setup */
3172 return priv
->cfg
->lib
->set_hw_params(priv
);
3175 /* This function both allocates and initializes hw and priv. */
3176 static struct ieee80211_hw
*iwl_alloc_all(struct iwl_cfg
*cfg
)
3178 struct iwl_priv
*priv
;
3179 /* mac80211 allocates memory for this device instance, including
3180 * space for this driver's private structure */
3181 struct ieee80211_hw
*hw
;
3183 hw
= ieee80211_alloc_hw(sizeof(struct iwl_priv
), &iwlagn_hw_ops
);
3185 pr_err("%s: Can not allocate network device\n",
3197 int iwl_probe(struct iwl_bus
*bus
, const struct iwl_trans_ops
*trans_ops
,
3198 struct iwl_cfg
*cfg
)
3201 struct iwl_priv
*priv
;
3202 struct ieee80211_hw
*hw
;
3206 /************************
3207 * 1. Allocating HW data
3208 ************************/
3209 hw
= iwl_alloc_all(cfg
);
3216 priv
->shrd
= &priv
->_shrd
;
3217 bus
->shrd
= priv
->shrd
;
3218 priv
->shrd
->bus
= bus
;
3219 priv
->shrd
->priv
= priv
;
3221 priv
->shrd
->trans
= trans_ops
->alloc(priv
->shrd
);
3222 if (priv
->shrd
->trans
== NULL
) {
3224 goto out_free_traffic_mem
;
3227 /* At this point both hw and priv are allocated. */
3229 SET_IEEE80211_DEV(hw
, bus(priv
)->dev
);
3231 IWL_DEBUG_INFO(priv
, "*** LOAD DRIVER ***\n");
3234 /* is antenna coupling more than 35dB ? */
3235 priv
->bt_ant_couple_ok
=
3236 (iwlagn_mod_params
.ant_coupling
>
3237 IWL_BT_ANTENNA_COUPLING_THRESHOLD
) ?
3240 /* enable/disable bt channel inhibition */
3241 priv
->bt_ch_announce
= iwlagn_mod_params
.bt_ch_announce
;
3242 IWL_DEBUG_INFO(priv
, "BT channel inhibition is %s\n",
3243 (priv
->bt_ch_announce
) ? "On" : "Off");
3245 if (iwl_alloc_traffic_mem(priv
))
3246 IWL_ERR(priv
, "Not enough memory to generate traffic log\n");
3248 /* these spin locks will be used in apm_ops.init and EEPROM access
3249 * we should init now
3251 spin_lock_init(&bus(priv
)->reg_lock
);
3252 spin_lock_init(&priv
->shrd
->lock
);
3255 * stop and reset the on-board processor just in case it is in a
3256 * strange state ... like being left stranded by a primary kernel
3257 * and this is now the kdump kernel trying to start up
3259 iwl_write32(bus(priv
), CSR_RESET
, CSR_RESET_REG_FLAG_NEVO_RESET
);
3261 /***********************
3262 * 3. Read REV register
3263 ***********************/
3264 hw_rev
= iwl_hw_detect(priv
);
3265 IWL_INFO(priv
, "Detected %s, REV=0x%X\n",
3266 priv
->cfg
->name
, hw_rev
);
3268 err
= iwl_trans_request_irq(trans(priv
));
3270 goto out_free_trans
;
3272 if (iwl_trans_prepare_card_hw(trans(priv
))) {
3274 IWL_WARN(priv
, "Failed, HW not ready\n");
3275 goto out_free_trans
;
3281 /* Read the EEPROM */
3282 err
= iwl_eeprom_init(priv
, hw_rev
);
3284 IWL_ERR(priv
, "Unable to init EEPROM\n");
3285 goto out_free_trans
;
3287 err
= iwl_eeprom_check_version(priv
);
3289 goto out_free_eeprom
;
3291 err
= iwl_eeprom_check_sku(priv
);
3293 goto out_free_eeprom
;
3295 /* extract MAC Address */
3296 iwl_eeprom_get_mac(priv
, priv
->addresses
[0].addr
);
3297 IWL_DEBUG_INFO(priv
, "MAC address: %pM\n", priv
->addresses
[0].addr
);
3298 priv
->hw
->wiphy
->addresses
= priv
->addresses
;
3299 priv
->hw
->wiphy
->n_addresses
= 1;
3300 num_mac
= iwl_eeprom_query16(priv
, EEPROM_NUM_MAC_ADDRESS
);
3302 memcpy(priv
->addresses
[1].addr
, priv
->addresses
[0].addr
,
3304 priv
->addresses
[1].addr
[5]++;
3305 priv
->hw
->wiphy
->n_addresses
++;
3308 /************************
3309 * 5. Setup HW constants
3310 ************************/
3311 if (iwl_set_hw_params(priv
)) {
3313 IWL_ERR(priv
, "failed to set hw parameters\n");
3314 goto out_free_eeprom
;
3317 /*******************
3319 *******************/
3321 err
= iwl_init_drv(priv
);
3323 goto out_free_eeprom
;
3324 /* At this point both hw and priv are initialized. */
3326 /********************
3328 ********************/
3329 iwl_setup_deferred_work(priv
);
3330 iwl_setup_rx_handlers(priv
);
3331 iwl_testmode_init(priv
);
3333 /*********************************************
3334 * 8. Enable interrupts
3335 *********************************************/
3337 iwl_enable_rfkill_int(priv
);
3339 /* If platform's RF_KILL switch is NOT set to KILL */
3340 if (iwl_read32(bus(priv
),
3341 CSR_GP_CNTRL
) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW
)
3342 clear_bit(STATUS_RF_KILL_HW
, &priv
->shrd
->status
);
3344 set_bit(STATUS_RF_KILL_HW
, &priv
->shrd
->status
);
3346 wiphy_rfkill_set_hw_state(priv
->hw
->wiphy
,
3347 test_bit(STATUS_RF_KILL_HW
, &priv
->shrd
->status
));
3349 iwl_power_initialize(priv
);
3350 iwl_tt_initialize(priv
);
3352 init_completion(&priv
->firmware_loading_complete
);
3354 err
= iwl_request_firmware(priv
, true);
3356 goto out_destroy_workqueue
;
3360 out_destroy_workqueue
:
3361 destroy_workqueue(priv
->shrd
->workqueue
);
3362 priv
->shrd
->workqueue
= NULL
;
3363 iwl_uninit_drv(priv
);
3365 iwl_eeprom_free(priv
);
3367 iwl_trans_free(trans(priv
));
3368 out_free_traffic_mem
:
3369 iwl_free_traffic_mem(priv
);
3370 ieee80211_free_hw(priv
->hw
);
3375 void __devexit
iwl_remove(struct iwl_priv
* priv
)
3377 wait_for_completion(&priv
->firmware_loading_complete
);
3379 IWL_DEBUG_INFO(priv
, "*** UNLOAD DRIVER ***\n");
3381 iwl_dbgfs_unregister(priv
);
3383 /* ieee80211_unregister_hw call wil cause iwlagn_mac_stop to
3384 * to be called and iwl_down since we are removing the device
3385 * we need to set STATUS_EXIT_PENDING bit.
3387 set_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
);
3389 iwl_testmode_cleanup(priv
);
3390 iwl_leds_exit(priv
);
3392 if (priv
->mac80211_registered
) {
3393 ieee80211_unregister_hw(priv
->hw
);
3394 priv
->mac80211_registered
= 0;
3399 /*This will stop the queues, move the device to low power state */
3400 iwl_trans_stop_device(trans(priv
));
3402 iwl_dealloc_ucode(priv
);
3404 iwl_eeprom_free(priv
);
3406 /*netif_stop_queue(dev); */
3407 flush_workqueue(priv
->shrd
->workqueue
);
3409 /* ieee80211_unregister_hw calls iwlagn_mac_stop, which flushes
3410 * priv->shrd->workqueue... so we can't take down the workqueue
3412 destroy_workqueue(priv
->shrd
->workqueue
);
3413 priv
->shrd
->workqueue
= NULL
;
3414 iwl_free_traffic_mem(priv
);
3416 iwl_trans_free(trans(priv
));
3418 iwl_uninit_drv(priv
);
3420 dev_kfree_skb(priv
->beacon_skb
);
3422 ieee80211_free_hw(priv
->hw
);
3426 /*****************************************************************************
3428 * driver and module entry point
3430 *****************************************************************************/
3431 static int __init
iwl_init(void)
3435 pr_info(DRV_DESCRIPTION
", " DRV_VERSION
"\n");
3436 pr_info(DRV_COPYRIGHT
"\n");
3438 ret
= iwlagn_rate_control_register();
3440 pr_err("Unable to register rate control algorithm: %d\n", ret
);
3444 ret
= iwl_pci_register_driver();
3447 goto error_register
;
3451 iwlagn_rate_control_unregister();
3455 static void __exit
iwl_exit(void)
3457 iwl_pci_unregister_driver();
3458 iwlagn_rate_control_unregister();
3461 module_exit(iwl_exit
);
3462 module_init(iwl_init
);
3464 #ifdef CONFIG_IWLWIFI_DEBUG
3465 module_param_named(debug
, iwlagn_mod_params
.debug_level
, uint
,
3467 MODULE_PARM_DESC(debug
, "debug output mask");
3470 module_param_named(swcrypto
, iwlagn_mod_params
.sw_crypto
, int, S_IRUGO
);
3471 MODULE_PARM_DESC(swcrypto
, "using crypto in software (default 0 [hardware])");
3472 module_param_named(queues_num
, iwlagn_mod_params
.num_of_queues
, int, S_IRUGO
);
3473 MODULE_PARM_DESC(queues_num
, "number of hw queues.");
3474 module_param_named(11n_disable
, iwlagn_mod_params
.disable_11n
, int, S_IRUGO
);
3475 MODULE_PARM_DESC(11n_disable
, "disable 11n functionality");
3476 module_param_named(amsdu_size_8K
, iwlagn_mod_params
.amsdu_size_8K
,
3478 MODULE_PARM_DESC(amsdu_size_8K
, "enable 8K amsdu size");
3479 module_param_named(fw_restart
, iwlagn_mod_params
.restart_fw
, int, S_IRUGO
);
3480 MODULE_PARM_DESC(fw_restart
, "restart firmware in case of error");
3482 module_param_named(ucode_alternative
,
3483 iwlagn_mod_params
.wanted_ucode_alternative
,
3485 MODULE_PARM_DESC(ucode_alternative
,
3486 "specify ucode alternative to use from ucode file");
3488 module_param_named(antenna_coupling
, iwlagn_mod_params
.ant_coupling
,
3490 MODULE_PARM_DESC(antenna_coupling
,
3491 "specify antenna coupling in dB (defualt: 0 dB)");
3493 module_param_named(bt_ch_inhibition
, iwlagn_mod_params
.bt_ch_announce
,
3495 MODULE_PARM_DESC(bt_ch_inhibition
,
3496 "Enable BT channel inhibition (default: enable)");
3498 module_param_named(plcp_check
, iwlagn_mod_params
.plcp_check
, bool, S_IRUGO
);
3499 MODULE_PARM_DESC(plcp_check
, "Check plcp health (default: 1 [enabled])");
3501 module_param_named(ack_check
, iwlagn_mod_params
.ack_check
, bool, S_IRUGO
);
3502 MODULE_PARM_DESC(ack_check
, "Check ack health (default: 0 [disabled])");
3504 module_param_named(wd_disable
, iwlagn_mod_params
.wd_disable
, bool, S_IRUGO
);
3505 MODULE_PARM_DESC(wd_disable
,
3506 "Disable stuck queue watchdog timer (default: 0 [enabled])");
3509 * set bt_coex_active to true, uCode will do kill/defer
3510 * every time the priority line is asserted (BT is sending signals on the
3511 * priority line in the PCIx).
3512 * set bt_coex_active to false, uCode will ignore the BT activity and
3513 * perform the normal operation
3515 * User might experience transmit issue on some platform due to WiFi/BT
3516 * co-exist problem. The possible behaviors are:
3517 * Able to scan and finding all the available AP
3518 * Not able to associate with any AP
3519 * On those platforms, WiFi communication can be restored by set
3520 * "bt_coex_active" module parameter to "false"
3522 * default: bt_coex_active = true (BT_COEX_ENABLE)
3524 module_param_named(bt_coex_active
, iwlagn_mod_params
.bt_coex_active
,
3526 MODULE_PARM_DESC(bt_coex_active
, "enable wifi/bt co-exist (default: enable)");
3528 module_param_named(led_mode
, iwlagn_mod_params
.led_mode
, int, S_IRUGO
);
3529 MODULE_PARM_DESC(led_mode
, "0=system default, "
3530 "1=On(RF On)/Off(RF Off), 2=blinking (default: 0)");
3532 module_param_named(power_save
, iwlagn_mod_params
.power_save
,
3534 MODULE_PARM_DESC(power_save
,
3535 "enable WiFi power management (default: disable)");
3537 module_param_named(power_level
, iwlagn_mod_params
.power_level
,
3539 MODULE_PARM_DESC(power_level
,
3540 "default power save level (range from 1 - 5, default: 1)");
3542 module_param_named(auto_agg
, iwlagn_mod_params
.auto_agg
,
3544 MODULE_PARM_DESC(auto_agg
,
3545 "enable agg w/o check traffic load (default: enable)");
3548 * For now, keep using power level 1 instead of automatically
3551 module_param_named(no_sleep_autoadjust
, iwlagn_mod_params
.no_sleep_autoadjust
,
3553 MODULE_PARM_DESC(no_sleep_autoadjust
,
3554 "don't automatically adjust sleep level "
3555 "according to maximum network latency (default: true)");