iwlagn: move the tx allocation funcs to the transport layer
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / wireless / iwlwifi / iwl-agn-lib.c
blobea83aa5bf29cb4d8d18e0c765993ee7abe4115c4
1 /******************************************************************************
3 * GPL LICENSE SUMMARY
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #include <linux/etherdevice.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
35 #include "iwl-dev.h"
36 #include "iwl-core.h"
37 #include "iwl-io.h"
38 #include "iwl-helpers.h"
39 #include "iwl-agn-hw.h"
40 #include "iwl-agn.h"
41 #include "iwl-sta.h"
43 static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
45 return le32_to_cpup((__le32 *)&tx_resp->status +
46 tx_resp->frame_count) & MAX_SN;
49 static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
51 status &= TX_STATUS_MSK;
53 switch (status) {
54 case TX_STATUS_POSTPONE_DELAY:
55 priv->_agn.reply_tx_stats.pp_delay++;
56 break;
57 case TX_STATUS_POSTPONE_FEW_BYTES:
58 priv->_agn.reply_tx_stats.pp_few_bytes++;
59 break;
60 case TX_STATUS_POSTPONE_BT_PRIO:
61 priv->_agn.reply_tx_stats.pp_bt_prio++;
62 break;
63 case TX_STATUS_POSTPONE_QUIET_PERIOD:
64 priv->_agn.reply_tx_stats.pp_quiet_period++;
65 break;
66 case TX_STATUS_POSTPONE_CALC_TTAK:
67 priv->_agn.reply_tx_stats.pp_calc_ttak++;
68 break;
69 case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
70 priv->_agn.reply_tx_stats.int_crossed_retry++;
71 break;
72 case TX_STATUS_FAIL_SHORT_LIMIT:
73 priv->_agn.reply_tx_stats.short_limit++;
74 break;
75 case TX_STATUS_FAIL_LONG_LIMIT:
76 priv->_agn.reply_tx_stats.long_limit++;
77 break;
78 case TX_STATUS_FAIL_FIFO_UNDERRUN:
79 priv->_agn.reply_tx_stats.fifo_underrun++;
80 break;
81 case TX_STATUS_FAIL_DRAIN_FLOW:
82 priv->_agn.reply_tx_stats.drain_flow++;
83 break;
84 case TX_STATUS_FAIL_RFKILL_FLUSH:
85 priv->_agn.reply_tx_stats.rfkill_flush++;
86 break;
87 case TX_STATUS_FAIL_LIFE_EXPIRE:
88 priv->_agn.reply_tx_stats.life_expire++;
89 break;
90 case TX_STATUS_FAIL_DEST_PS:
91 priv->_agn.reply_tx_stats.dest_ps++;
92 break;
93 case TX_STATUS_FAIL_HOST_ABORTED:
94 priv->_agn.reply_tx_stats.host_abort++;
95 break;
96 case TX_STATUS_FAIL_BT_RETRY:
97 priv->_agn.reply_tx_stats.bt_retry++;
98 break;
99 case TX_STATUS_FAIL_STA_INVALID:
100 priv->_agn.reply_tx_stats.sta_invalid++;
101 break;
102 case TX_STATUS_FAIL_FRAG_DROPPED:
103 priv->_agn.reply_tx_stats.frag_drop++;
104 break;
105 case TX_STATUS_FAIL_TID_DISABLE:
106 priv->_agn.reply_tx_stats.tid_disable++;
107 break;
108 case TX_STATUS_FAIL_FIFO_FLUSHED:
109 priv->_agn.reply_tx_stats.fifo_flush++;
110 break;
111 case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
112 priv->_agn.reply_tx_stats.insuff_cf_poll++;
113 break;
114 case TX_STATUS_FAIL_PASSIVE_NO_RX:
115 priv->_agn.reply_tx_stats.fail_hw_drop++;
116 break;
117 case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
118 priv->_agn.reply_tx_stats.sta_color_mismatch++;
119 break;
120 default:
121 priv->_agn.reply_tx_stats.unknown++;
122 break;
126 static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
128 status &= AGG_TX_STATUS_MSK;
130 switch (status) {
131 case AGG_TX_STATE_UNDERRUN_MSK:
132 priv->_agn.reply_agg_tx_stats.underrun++;
133 break;
134 case AGG_TX_STATE_BT_PRIO_MSK:
135 priv->_agn.reply_agg_tx_stats.bt_prio++;
136 break;
137 case AGG_TX_STATE_FEW_BYTES_MSK:
138 priv->_agn.reply_agg_tx_stats.few_bytes++;
139 break;
140 case AGG_TX_STATE_ABORT_MSK:
141 priv->_agn.reply_agg_tx_stats.abort++;
142 break;
143 case AGG_TX_STATE_LAST_SENT_TTL_MSK:
144 priv->_agn.reply_agg_tx_stats.last_sent_ttl++;
145 break;
146 case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
147 priv->_agn.reply_agg_tx_stats.last_sent_try++;
148 break;
149 case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
150 priv->_agn.reply_agg_tx_stats.last_sent_bt_kill++;
151 break;
152 case AGG_TX_STATE_SCD_QUERY_MSK:
153 priv->_agn.reply_agg_tx_stats.scd_query++;
154 break;
155 case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
156 priv->_agn.reply_agg_tx_stats.bad_crc32++;
157 break;
158 case AGG_TX_STATE_RESPONSE_MSK:
159 priv->_agn.reply_agg_tx_stats.response++;
160 break;
161 case AGG_TX_STATE_DUMP_TX_MSK:
162 priv->_agn.reply_agg_tx_stats.dump_tx++;
163 break;
164 case AGG_TX_STATE_DELAY_TX_MSK:
165 priv->_agn.reply_agg_tx_stats.delay_tx++;
166 break;
167 default:
168 priv->_agn.reply_agg_tx_stats.unknown++;
169 break;
173 static void iwlagn_set_tx_status(struct iwl_priv *priv,
174 struct ieee80211_tx_info *info,
175 struct iwl_rxon_context *ctx,
176 struct iwlagn_tx_resp *tx_resp,
177 int txq_id, bool is_agg)
179 u16 status = le16_to_cpu(tx_resp->status.status);
181 info->status.rates[0].count = tx_resp->failure_frame + 1;
182 if (is_agg)
183 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
184 info->flags |= iwl_tx_status_to_mac80211(status);
185 iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
186 info);
187 if (!iwl_is_tx_success(status))
188 iwlagn_count_tx_err_status(priv, status);
190 if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
191 iwl_is_associated_ctx(ctx) && ctx->vif &&
192 ctx->vif->type == NL80211_IFTYPE_STATION) {
193 ctx->last_tx_rejected = true;
194 iwl_stop_queue(priv, &priv->txq[txq_id]);
197 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
198 "0x%x retries %d\n",
199 txq_id,
200 iwl_get_tx_fail_reason(status), status,
201 le32_to_cpu(tx_resp->rate_n_flags),
202 tx_resp->failure_frame);
205 #ifdef CONFIG_IWLWIFI_DEBUG
206 #define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
208 const char *iwl_get_agg_tx_fail_reason(u16 status)
210 status &= AGG_TX_STATUS_MSK;
211 switch (status) {
212 case AGG_TX_STATE_TRANSMITTED:
213 return "SUCCESS";
214 AGG_TX_STATE_FAIL(UNDERRUN_MSK);
215 AGG_TX_STATE_FAIL(BT_PRIO_MSK);
216 AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
217 AGG_TX_STATE_FAIL(ABORT_MSK);
218 AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
219 AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
220 AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
221 AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
222 AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
223 AGG_TX_STATE_FAIL(RESPONSE_MSK);
224 AGG_TX_STATE_FAIL(DUMP_TX_MSK);
225 AGG_TX_STATE_FAIL(DELAY_TX_MSK);
228 return "UNKNOWN";
230 #endif /* CONFIG_IWLWIFI_DEBUG */
232 static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
233 struct iwl_ht_agg *agg,
234 struct iwlagn_tx_resp *tx_resp,
235 int txq_id, u16 start_idx)
237 u16 status;
238 struct agg_tx_status *frame_status = &tx_resp->status;
239 struct ieee80211_hdr *hdr = NULL;
240 int i, sh, idx;
241 u16 seq;
243 if (agg->wait_for_ba)
244 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
246 agg->frame_count = tx_resp->frame_count;
247 agg->start_idx = start_idx;
248 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
249 agg->bitmap = 0;
251 /* # frames attempted by Tx command */
252 if (agg->frame_count == 1) {
253 struct iwl_tx_info *txb;
255 /* Only one frame was attempted; no block-ack will arrive */
256 idx = start_idx;
258 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
259 agg->frame_count, agg->start_idx, idx);
260 txb = &priv->txq[txq_id].txb[idx];
261 iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(txb->skb),
262 txb->ctx, tx_resp, txq_id, true);
263 agg->wait_for_ba = 0;
264 } else {
265 /* Two or more frames were attempted; expect block-ack */
266 u64 bitmap = 0;
269 * Start is the lowest frame sent. It may not be the first
270 * frame in the batch; we figure this out dynamically during
271 * the following loop.
273 int start = agg->start_idx;
275 /* Construct bit-map of pending frames within Tx window */
276 for (i = 0; i < agg->frame_count; i++) {
277 u16 sc;
278 status = le16_to_cpu(frame_status[i].status);
279 seq = le16_to_cpu(frame_status[i].sequence);
280 idx = SEQ_TO_INDEX(seq);
281 txq_id = SEQ_TO_QUEUE(seq);
283 if (status & AGG_TX_STATUS_MSK)
284 iwlagn_count_agg_tx_err_status(priv, status);
286 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
287 AGG_TX_STATE_ABORT_MSK))
288 continue;
290 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
291 agg->frame_count, txq_id, idx);
292 IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
293 "try-count (0x%08x)\n",
294 iwl_get_agg_tx_fail_reason(status),
295 status & AGG_TX_STATUS_MSK,
296 status & AGG_TX_TRY_MSK);
298 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
299 if (!hdr) {
300 IWL_ERR(priv,
301 "BUG_ON idx doesn't point to valid skb"
302 " idx=%d, txq_id=%d\n", idx, txq_id);
303 return -1;
306 sc = le16_to_cpu(hdr->seq_ctrl);
307 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
308 IWL_ERR(priv,
309 "BUG_ON idx doesn't match seq control"
310 " idx=%d, seq_idx=%d, seq=%d\n",
311 idx, SEQ_TO_SN(sc),
312 hdr->seq_ctrl);
313 return -1;
316 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
317 i, idx, SEQ_TO_SN(sc));
320 * sh -> how many frames ahead of the starting frame is
321 * the current one?
323 * Note that all frames sent in the batch must be in a
324 * 64-frame window, so this number should be in [0,63].
325 * If outside of this window, then we've found a new
326 * "first" frame in the batch and need to change start.
328 sh = idx - start;
331 * If >= 64, out of window. start must be at the front
332 * of the circular buffer, idx must be near the end of
333 * the buffer, and idx is the new "first" frame. Shift
334 * the indices around.
336 if (sh >= 64) {
337 /* Shift bitmap by start - idx, wrapped */
338 sh = 0x100 - idx + start;
339 bitmap = bitmap << sh;
340 /* Now idx is the new start so sh = 0 */
341 sh = 0;
342 start = idx;
344 * If <= -64 then wraps the 256-pkt circular buffer
345 * (e.g., start = 255 and idx = 0, sh should be 1)
347 } else if (sh <= -64) {
348 sh = 0x100 - start + idx;
350 * If < 0 but > -64, out of window. idx is before start
351 * but not wrapped. Shift the indices around.
353 } else if (sh < 0) {
354 /* Shift by how far start is ahead of idx */
355 sh = start - idx;
356 bitmap = bitmap << sh;
357 /* Now idx is the new start so sh = 0 */
358 start = idx;
359 sh = 0;
361 /* Sequence number start + sh was sent in this batch */
362 bitmap |= 1ULL << sh;
363 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
364 start, (unsigned long long)bitmap);
368 * Store the bitmap and possibly the new start, if we wrapped
369 * the buffer above
371 agg->bitmap = bitmap;
372 agg->start_idx = start;
373 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
374 agg->frame_count, agg->start_idx,
375 (unsigned long long)agg->bitmap);
377 if (bitmap)
378 agg->wait_for_ba = 1;
380 return 0;
383 void iwl_check_abort_status(struct iwl_priv *priv,
384 u8 frame_count, u32 status)
386 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
387 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
388 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
389 queue_work(priv->workqueue, &priv->tx_flush);
393 static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
394 struct iwl_rx_mem_buffer *rxb)
396 struct iwl_rx_packet *pkt = rxb_addr(rxb);
397 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
398 int txq_id = SEQ_TO_QUEUE(sequence);
399 int index = SEQ_TO_INDEX(sequence);
400 struct iwl_tx_queue *txq = &priv->txq[txq_id];
401 struct ieee80211_tx_info *info;
402 struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
403 struct iwl_tx_info *txb;
404 u32 status = le16_to_cpu(tx_resp->status.status);
405 int tid;
406 int sta_id;
407 int freed;
408 unsigned long flags;
410 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
411 IWL_ERR(priv, "%s: Read index for DMA queue txq_id (%d) "
412 "index %d is out of range [0-%d] %d %d\n", __func__,
413 txq_id, index, txq->q.n_bd, txq->q.write_ptr,
414 txq->q.read_ptr);
415 return;
418 txq->time_stamp = jiffies;
419 txb = &txq->txb[txq->q.read_ptr];
420 info = IEEE80211_SKB_CB(txb->skb);
421 memset(&info->status, 0, sizeof(info->status));
423 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
424 IWLAGN_TX_RES_TID_POS;
425 sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
426 IWLAGN_TX_RES_RA_POS;
428 spin_lock_irqsave(&priv->sta_lock, flags);
429 if (txq->sched_retry) {
430 const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
431 struct iwl_ht_agg *agg;
433 agg = &priv->stations[sta_id].tid[tid].agg;
435 * If the BT kill count is non-zero, we'll get this
436 * notification again.
438 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
439 priv->cfg->bt_params &&
440 priv->cfg->bt_params->advanced_bt_coexist) {
441 IWL_DEBUG_COEX(priv, "receive reply tx with bt_kill\n");
443 iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
445 /* check if BAR is needed */
446 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
447 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
449 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
450 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
451 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
452 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
453 scd_ssn , index, txq_id, txq->swq_id);
455 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
456 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
458 if (priv->mac80211_registered &&
459 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
460 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
461 iwl_wake_queue(priv, txq);
463 } else {
464 iwlagn_set_tx_status(priv, info, txb->ctx, tx_resp,
465 txq_id, false);
466 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
467 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
469 if (priv->mac80211_registered &&
470 iwl_queue_space(&txq->q) > txq->q.low_mark &&
471 status != TX_STATUS_FAIL_PASSIVE_NO_RX)
472 iwl_wake_queue(priv, txq);
475 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
477 iwl_check_abort_status(priv, tx_resp->frame_count, status);
478 spin_unlock_irqrestore(&priv->sta_lock, flags);
481 void iwlagn_rx_handler_setup(struct iwl_priv *priv)
483 /* init calibration handlers */
484 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
485 iwlagn_rx_calib_result;
486 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
488 /* set up notification wait support */
489 spin_lock_init(&priv->_agn.notif_wait_lock);
490 INIT_LIST_HEAD(&priv->_agn.notif_waits);
491 init_waitqueue_head(&priv->_agn.notif_waitq);
494 void iwlagn_setup_deferred_work(struct iwl_priv *priv)
497 * nothing need to be done here anymore
498 * still keep for future use if needed
502 int iwlagn_hw_valid_rtc_data_addr(u32 addr)
504 return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
505 (addr < IWLAGN_RTC_DATA_UPPER_BOUND);
508 int iwlagn_send_tx_power(struct iwl_priv *priv)
510 struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
511 u8 tx_ant_cfg_cmd;
513 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
514 "TX Power requested while scanning!\n"))
515 return -EAGAIN;
517 /* half dBm need to multiply */
518 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
520 if (priv->tx_power_lmt_in_half_dbm &&
521 priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
523 * For the newer devices which using enhanced/extend tx power
524 * table in EEPROM, the format is in half dBm. driver need to
525 * convert to dBm format before report to mac80211.
526 * By doing so, there is a possibility of 1/2 dBm resolution
527 * lost. driver will perform "round-up" operation before
528 * reporting, but it will cause 1/2 dBm tx power over the
529 * regulatory limit. Perform the checking here, if the
530 * "tx_power_user_lmt" is higher than EEPROM value (in
531 * half-dBm format), lower the tx power based on EEPROM
533 tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
535 tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
536 tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
538 if (IWL_UCODE_API(priv->ucode_ver) == 1)
539 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
540 else
541 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
543 return iwl_send_cmd_pdu(priv, tx_ant_cfg_cmd, sizeof(tx_power_cmd),
544 &tx_power_cmd);
547 void iwlagn_temperature(struct iwl_priv *priv)
549 /* store temperature from correct statistics (in Celsius) */
550 priv->temperature = le32_to_cpu(priv->statistics.common.temperature);
551 iwl_tt_handler(priv);
554 u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
556 struct iwl_eeprom_calib_hdr {
557 u8 version;
558 u8 pa_type;
559 u16 voltage;
560 } *hdr;
562 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
563 EEPROM_CALIB_ALL);
564 return hdr->version;
569 * EEPROM
571 static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
573 u16 offset = 0;
575 if ((address & INDIRECT_ADDRESS) == 0)
576 return address;
578 switch (address & INDIRECT_TYPE_MSK) {
579 case INDIRECT_HOST:
580 offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
581 break;
582 case INDIRECT_GENERAL:
583 offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
584 break;
585 case INDIRECT_REGULATORY:
586 offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
587 break;
588 case INDIRECT_TXP_LIMIT:
589 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
590 break;
591 case INDIRECT_TXP_LIMIT_SIZE:
592 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
593 break;
594 case INDIRECT_CALIBRATION:
595 offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
596 break;
597 case INDIRECT_PROCESS_ADJST:
598 offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
599 break;
600 case INDIRECT_OTHERS:
601 offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
602 break;
603 default:
604 IWL_ERR(priv, "illegal indirect type: 0x%X\n",
605 address & INDIRECT_TYPE_MSK);
606 break;
609 /* translate the offset from words to byte */
610 return (address & ADDRESS_MSK) + (offset << 1);
613 const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
614 size_t offset)
616 u32 address = eeprom_indirect_address(priv, offset);
617 BUG_ON(address >= priv->cfg->base_params->eeprom_size);
618 return &priv->eeprom[address];
621 struct iwl_mod_params iwlagn_mod_params = {
622 .amsdu_size_8K = 1,
623 .restart_fw = 1,
624 .plcp_check = true,
625 .bt_coex_active = true,
626 .no_sleep_autoadjust = true,
627 .power_level = IWL_POWER_INDEX_1,
628 /* the rest are 0 by default */
631 int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
633 u32 rb_size;
634 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
635 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
637 rb_timeout = RX_RB_TIMEOUT;
639 if (iwlagn_mod_params.amsdu_size_8K)
640 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
641 else
642 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
644 /* Stop Rx DMA */
645 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
647 /* Reset driver's Rx queue write index */
648 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
650 /* Tell device where to find RBD circular buffer in DRAM */
651 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
652 (u32)(rxq->bd_dma >> 8));
654 /* Tell device where in DRAM to update its Rx status */
655 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
656 rxq->rb_stts_dma >> 4);
658 /* Enable Rx DMA
659 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
660 * the credit mechanism in 5000 HW RX FIFO
661 * Direct rx interrupts to hosts
662 * Rx buffer size 4 or 8k
663 * RB timeout 0x10
664 * 256 RBDs
666 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
667 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
668 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
669 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
670 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
671 rb_size|
672 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
673 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
675 /* Set interrupt coalescing timer to default (2048 usecs) */
676 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
678 return 0;
681 static void iwlagn_set_pwr_vmain(struct iwl_priv *priv)
684 * (for documentation purposes)
685 * to set power to V_AUX, do:
687 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
688 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
689 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
690 ~APMG_PS_CTRL_MSK_PWR_SRC);
693 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
694 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
695 ~APMG_PS_CTRL_MSK_PWR_SRC);
698 int iwlagn_hw_nic_init(struct iwl_priv *priv)
700 unsigned long flags;
701 struct iwl_rx_queue *rxq = &priv->rxq;
703 /* nic_init */
704 spin_lock_irqsave(&priv->lock, flags);
705 priv->cfg->ops->lib->apm_ops.init(priv);
707 /* Set interrupt coalescing calibration timer to default (512 usecs) */
708 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
710 spin_unlock_irqrestore(&priv->lock, flags);
712 iwlagn_set_pwr_vmain(priv);
714 priv->cfg->ops->lib->apm_ops.config(priv);
716 /* Allocate the RX queue, or reset if it is already allocated */
717 priv->trans.ops->rx_init(priv);
719 iwlagn_rx_replenish(priv);
721 iwlagn_rx_init(priv, rxq);
723 spin_lock_irqsave(&priv->lock, flags);
725 rxq->need_update = 1;
726 iwl_rx_queue_update_write_ptr(priv, rxq);
728 spin_unlock_irqrestore(&priv->lock, flags);
730 /* Allocate or reset and init all Tx and Command queues */
731 if (priv->trans.ops->tx_init(priv))
732 return -ENOMEM;
734 if (priv->cfg->base_params->shadow_reg_enable) {
735 /* enable shadow regs in HW */
736 iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
737 0x800FFFFF);
740 set_bit(STATUS_INIT, &priv->status);
742 return 0;
746 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
748 static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
749 dma_addr_t dma_addr)
751 return cpu_to_le32((u32)(dma_addr >> 8));
755 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
757 * If there are slots in the RX queue that need to be restocked,
758 * and we have free pre-allocated buffers, fill the ranks as much
759 * as we can, pulling from rx_free.
761 * This moves the 'write' index forward to catch up with 'processed', and
762 * also updates the memory address in the firmware to reference the new
763 * target buffer.
765 void iwlagn_rx_queue_restock(struct iwl_priv *priv)
767 struct iwl_rx_queue *rxq = &priv->rxq;
768 struct list_head *element;
769 struct iwl_rx_mem_buffer *rxb;
770 unsigned long flags;
772 spin_lock_irqsave(&rxq->lock, flags);
773 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
774 /* The overwritten rxb must be a used one */
775 rxb = rxq->queue[rxq->write];
776 BUG_ON(rxb && rxb->page);
778 /* Get next free Rx buffer, remove from free list */
779 element = rxq->rx_free.next;
780 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
781 list_del(element);
783 /* Point to Rx buffer via next RBD in circular buffer */
784 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
785 rxb->page_dma);
786 rxq->queue[rxq->write] = rxb;
787 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
788 rxq->free_count--;
790 spin_unlock_irqrestore(&rxq->lock, flags);
791 /* If the pre-allocated buffer pool is dropping low, schedule to
792 * refill it */
793 if (rxq->free_count <= RX_LOW_WATERMARK)
794 queue_work(priv->workqueue, &priv->rx_replenish);
797 /* If we've added more space for the firmware to place data, tell it.
798 * Increment device's write pointer in multiples of 8. */
799 if (rxq->write_actual != (rxq->write & ~0x7)) {
800 spin_lock_irqsave(&rxq->lock, flags);
801 rxq->need_update = 1;
802 spin_unlock_irqrestore(&rxq->lock, flags);
803 iwl_rx_queue_update_write_ptr(priv, rxq);
808 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
810 * When moving to rx_free an SKB is allocated for the slot.
812 * Also restock the Rx queue via iwl_rx_queue_restock.
813 * This is called as a scheduled work item (except for during initialization)
815 void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
817 struct iwl_rx_queue *rxq = &priv->rxq;
818 struct list_head *element;
819 struct iwl_rx_mem_buffer *rxb;
820 struct page *page;
821 unsigned long flags;
822 gfp_t gfp_mask = priority;
824 while (1) {
825 spin_lock_irqsave(&rxq->lock, flags);
826 if (list_empty(&rxq->rx_used)) {
827 spin_unlock_irqrestore(&rxq->lock, flags);
828 return;
830 spin_unlock_irqrestore(&rxq->lock, flags);
832 if (rxq->free_count > RX_LOW_WATERMARK)
833 gfp_mask |= __GFP_NOWARN;
835 if (priv->hw_params.rx_page_order > 0)
836 gfp_mask |= __GFP_COMP;
838 /* Alloc a new receive buffer */
839 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
840 if (!page) {
841 if (net_ratelimit())
842 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
843 "order: %d\n",
844 priv->hw_params.rx_page_order);
846 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
847 net_ratelimit())
848 IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
849 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
850 rxq->free_count);
851 /* We don't reschedule replenish work here -- we will
852 * call the restock method and if it still needs
853 * more buffers it will schedule replenish */
854 return;
857 spin_lock_irqsave(&rxq->lock, flags);
859 if (list_empty(&rxq->rx_used)) {
860 spin_unlock_irqrestore(&rxq->lock, flags);
861 __free_pages(page, priv->hw_params.rx_page_order);
862 return;
864 element = rxq->rx_used.next;
865 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
866 list_del(element);
868 spin_unlock_irqrestore(&rxq->lock, flags);
870 BUG_ON(rxb->page);
871 rxb->page = page;
872 /* Get physical address of the RB */
873 rxb->page_dma = dma_map_page(priv->bus.dev, page, 0,
874 PAGE_SIZE << priv->hw_params.rx_page_order,
875 DMA_FROM_DEVICE);
876 /* dma address must be no more than 36 bits */
877 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
878 /* and also 256 byte aligned! */
879 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
881 spin_lock_irqsave(&rxq->lock, flags);
883 list_add_tail(&rxb->list, &rxq->rx_free);
884 rxq->free_count++;
886 spin_unlock_irqrestore(&rxq->lock, flags);
890 void iwlagn_rx_replenish(struct iwl_priv *priv)
892 unsigned long flags;
894 iwlagn_rx_allocate(priv, GFP_KERNEL);
896 spin_lock_irqsave(&priv->lock, flags);
897 iwlagn_rx_queue_restock(priv);
898 spin_unlock_irqrestore(&priv->lock, flags);
901 void iwlagn_rx_replenish_now(struct iwl_priv *priv)
903 iwlagn_rx_allocate(priv, GFP_ATOMIC);
905 iwlagn_rx_queue_restock(priv);
908 int iwlagn_rxq_stop(struct iwl_priv *priv)
911 /* stop Rx DMA */
912 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
913 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
914 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
916 return 0;
919 int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
921 int idx = 0;
922 int band_offset = 0;
924 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
925 if (rate_n_flags & RATE_MCS_HT_MSK) {
926 idx = (rate_n_flags & 0xff);
927 return idx;
928 /* Legacy rate format, search for match in table */
929 } else {
930 if (band == IEEE80211_BAND_5GHZ)
931 band_offset = IWL_FIRST_OFDM_RATE;
932 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
933 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
934 return idx - band_offset;
937 return -1;
940 static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
941 struct ieee80211_vif *vif,
942 enum ieee80211_band band,
943 struct iwl_scan_channel *scan_ch)
945 const struct ieee80211_supported_band *sband;
946 u16 passive_dwell = 0;
947 u16 active_dwell = 0;
948 int added = 0;
949 u16 channel = 0;
951 sband = iwl_get_hw_mode(priv, band);
952 if (!sband) {
953 IWL_ERR(priv, "invalid band\n");
954 return added;
957 active_dwell = iwl_get_active_dwell_time(priv, band, 0);
958 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
960 if (passive_dwell <= active_dwell)
961 passive_dwell = active_dwell + 1;
963 channel = iwl_get_single_channel_number(priv, band);
964 if (channel) {
965 scan_ch->channel = cpu_to_le16(channel);
966 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
967 scan_ch->active_dwell = cpu_to_le16(active_dwell);
968 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
969 /* Set txpower levels to defaults */
970 scan_ch->dsp_atten = 110;
971 if (band == IEEE80211_BAND_5GHZ)
972 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
973 else
974 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
975 added++;
976 } else
977 IWL_ERR(priv, "no valid channel found\n");
978 return added;
981 static int iwl_get_channels_for_scan(struct iwl_priv *priv,
982 struct ieee80211_vif *vif,
983 enum ieee80211_band band,
984 u8 is_active, u8 n_probes,
985 struct iwl_scan_channel *scan_ch)
987 struct ieee80211_channel *chan;
988 const struct ieee80211_supported_band *sband;
989 const struct iwl_channel_info *ch_info;
990 u16 passive_dwell = 0;
991 u16 active_dwell = 0;
992 int added, i;
993 u16 channel;
995 sband = iwl_get_hw_mode(priv, band);
996 if (!sband)
997 return 0;
999 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
1000 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
1002 if (passive_dwell <= active_dwell)
1003 passive_dwell = active_dwell + 1;
1005 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
1006 chan = priv->scan_request->channels[i];
1008 if (chan->band != band)
1009 continue;
1011 channel = chan->hw_value;
1012 scan_ch->channel = cpu_to_le16(channel);
1014 ch_info = iwl_get_channel_info(priv, band, channel);
1015 if (!is_channel_valid(ch_info)) {
1016 IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
1017 channel);
1018 continue;
1021 if (!is_active || is_channel_passive(ch_info) ||
1022 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
1023 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
1024 else
1025 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
1027 if (n_probes)
1028 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
1030 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1031 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1033 /* Set txpower levels to defaults */
1034 scan_ch->dsp_atten = 110;
1036 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1037 * power level:
1038 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
1040 if (band == IEEE80211_BAND_5GHZ)
1041 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
1042 else
1043 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
1045 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
1046 channel, le32_to_cpu(scan_ch->type),
1047 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
1048 "ACTIVE" : "PASSIVE",
1049 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
1050 active_dwell : passive_dwell);
1052 scan_ch++;
1053 added++;
1056 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
1057 return added;
1060 static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen)
1062 struct sk_buff *skb = priv->_agn.offchan_tx_skb;
1064 if (skb->len < maxlen)
1065 maxlen = skb->len;
1067 memcpy(data, skb->data, maxlen);
1069 return maxlen;
1072 int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1074 struct iwl_host_cmd cmd = {
1075 .id = REPLY_SCAN_CMD,
1076 .len = { sizeof(struct iwl_scan_cmd), },
1078 struct iwl_scan_cmd *scan;
1079 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1080 u32 rate_flags = 0;
1081 u16 cmd_len;
1082 u16 rx_chain = 0;
1083 enum ieee80211_band band;
1084 u8 n_probes = 0;
1085 u8 rx_ant = priv->hw_params.valid_rx_ant;
1086 u8 rate;
1087 bool is_active = false;
1088 int chan_mod;
1089 u8 active_chains;
1090 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
1091 int ret;
1093 lockdep_assert_held(&priv->mutex);
1095 if (vif)
1096 ctx = iwl_rxon_ctx_from_vif(vif);
1098 if (!priv->scan_cmd) {
1099 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
1100 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
1101 if (!priv->scan_cmd) {
1102 IWL_DEBUG_SCAN(priv,
1103 "fail to allocate memory for scan\n");
1104 return -ENOMEM;
1107 scan = priv->scan_cmd;
1108 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
1110 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
1111 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
1113 if (priv->scan_type != IWL_SCAN_OFFCH_TX &&
1114 iwl_is_any_associated(priv)) {
1115 u16 interval = 0;
1116 u32 extra;
1117 u32 suspend_time = 100;
1118 u32 scan_suspend_time = 100;
1120 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
1121 switch (priv->scan_type) {
1122 case IWL_SCAN_OFFCH_TX:
1123 WARN_ON(1);
1124 break;
1125 case IWL_SCAN_RADIO_RESET:
1126 interval = 0;
1127 break;
1128 case IWL_SCAN_NORMAL:
1129 interval = vif->bss_conf.beacon_int;
1130 break;
1133 scan->suspend_time = 0;
1134 scan->max_out_time = cpu_to_le32(200 * 1024);
1135 if (!interval)
1136 interval = suspend_time;
1138 extra = (suspend_time / interval) << 22;
1139 scan_suspend_time = (extra |
1140 ((suspend_time % interval) * 1024));
1141 scan->suspend_time = cpu_to_le32(scan_suspend_time);
1142 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
1143 scan_suspend_time, interval);
1144 } else if (priv->scan_type == IWL_SCAN_OFFCH_TX) {
1145 scan->suspend_time = 0;
1146 scan->max_out_time =
1147 cpu_to_le32(1024 * priv->_agn.offchan_tx_timeout);
1150 switch (priv->scan_type) {
1151 case IWL_SCAN_RADIO_RESET:
1152 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
1153 break;
1154 case IWL_SCAN_NORMAL:
1155 if (priv->scan_request->n_ssids) {
1156 int i, p = 0;
1157 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
1158 for (i = 0; i < priv->scan_request->n_ssids; i++) {
1159 /* always does wildcard anyway */
1160 if (!priv->scan_request->ssids[i].ssid_len)
1161 continue;
1162 scan->direct_scan[p].id = WLAN_EID_SSID;
1163 scan->direct_scan[p].len =
1164 priv->scan_request->ssids[i].ssid_len;
1165 memcpy(scan->direct_scan[p].ssid,
1166 priv->scan_request->ssids[i].ssid,
1167 priv->scan_request->ssids[i].ssid_len);
1168 n_probes++;
1169 p++;
1171 is_active = true;
1172 } else
1173 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
1174 break;
1175 case IWL_SCAN_OFFCH_TX:
1176 IWL_DEBUG_SCAN(priv, "Start offchannel TX scan.\n");
1177 break;
1180 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
1181 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
1182 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1184 switch (priv->scan_band) {
1185 case IEEE80211_BAND_2GHZ:
1186 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
1187 chan_mod = le32_to_cpu(
1188 priv->contexts[IWL_RXON_CTX_BSS].active.flags &
1189 RXON_FLG_CHANNEL_MODE_MSK)
1190 >> RXON_FLG_CHANNEL_MODE_POS;
1191 if (chan_mod == CHANNEL_MODE_PURE_40) {
1192 rate = IWL_RATE_6M_PLCP;
1193 } else {
1194 rate = IWL_RATE_1M_PLCP;
1195 rate_flags = RATE_MCS_CCK_MSK;
1198 * Internal scans are passive, so we can indiscriminately set
1199 * the BT ignore flag on 2.4 GHz since it applies to TX only.
1201 if (priv->cfg->bt_params &&
1202 priv->cfg->bt_params->advanced_bt_coexist)
1203 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
1204 break;
1205 case IEEE80211_BAND_5GHZ:
1206 rate = IWL_RATE_6M_PLCP;
1207 break;
1208 default:
1209 IWL_WARN(priv, "Invalid scan band\n");
1210 return -EIO;
1214 * If active scanning is requested but a certain channel is
1215 * marked passive, we can do active scanning if we detect
1216 * transmissions.
1218 * There is an issue with some firmware versions that triggers
1219 * a sysassert on a "good CRC threshold" of zero (== disabled),
1220 * on a radar channel even though this means that we should NOT
1221 * send probes.
1223 * The "good CRC threshold" is the number of frames that we
1224 * need to receive during our dwell time on a channel before
1225 * sending out probes -- setting this to a huge value will
1226 * mean we never reach it, but at the same time work around
1227 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
1228 * here instead of IWL_GOOD_CRC_TH_DISABLED.
1230 * This was fixed in later versions along with some other
1231 * scan changes, and the threshold behaves as a flag in those
1232 * versions.
1234 if (priv->new_scan_threshold_behaviour)
1235 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
1236 IWL_GOOD_CRC_TH_DISABLED;
1237 else
1238 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
1239 IWL_GOOD_CRC_TH_NEVER;
1241 band = priv->scan_band;
1243 if (priv->cfg->scan_rx_antennas[band])
1244 rx_ant = priv->cfg->scan_rx_antennas[band];
1246 if (band == IEEE80211_BAND_2GHZ &&
1247 priv->cfg->bt_params &&
1248 priv->cfg->bt_params->advanced_bt_coexist) {
1249 /* transmit 2.4 GHz probes only on first antenna */
1250 scan_tx_antennas = first_antenna(scan_tx_antennas);
1253 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
1254 scan_tx_antennas);
1255 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
1256 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
1258 /* In power save mode use one chain, otherwise use all chains */
1259 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
1260 /* rx_ant has been set to all valid chains previously */
1261 active_chains = rx_ant &
1262 ((u8)(priv->chain_noise_data.active_chains));
1263 if (!active_chains)
1264 active_chains = rx_ant;
1266 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
1267 priv->chain_noise_data.active_chains);
1269 rx_ant = first_antenna(active_chains);
1271 if (priv->cfg->bt_params &&
1272 priv->cfg->bt_params->advanced_bt_coexist &&
1273 priv->bt_full_concurrent) {
1274 /* operated as 1x1 in full concurrency mode */
1275 rx_ant = first_antenna(rx_ant);
1278 /* MIMO is not used here, but value is required */
1279 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
1280 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
1281 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
1282 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
1283 scan->rx_chain = cpu_to_le16(rx_chain);
1284 switch (priv->scan_type) {
1285 case IWL_SCAN_NORMAL:
1286 cmd_len = iwl_fill_probe_req(priv,
1287 (struct ieee80211_mgmt *)scan->data,
1288 vif->addr,
1289 priv->scan_request->ie,
1290 priv->scan_request->ie_len,
1291 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1292 break;
1293 case IWL_SCAN_RADIO_RESET:
1294 /* use bcast addr, will not be transmitted but must be valid */
1295 cmd_len = iwl_fill_probe_req(priv,
1296 (struct ieee80211_mgmt *)scan->data,
1297 iwl_bcast_addr, NULL, 0,
1298 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1299 break;
1300 case IWL_SCAN_OFFCH_TX:
1301 cmd_len = iwl_fill_offch_tx(priv, scan->data,
1302 IWL_MAX_SCAN_SIZE
1303 - sizeof(*scan)
1304 - sizeof(struct iwl_scan_channel));
1305 scan->scan_flags |= IWL_SCAN_FLAGS_ACTION_FRAME_TX;
1306 break;
1307 default:
1308 BUG();
1310 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1312 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
1313 RXON_FILTER_BCON_AWARE_MSK);
1315 switch (priv->scan_type) {
1316 case IWL_SCAN_RADIO_RESET:
1317 scan->channel_count =
1318 iwl_get_single_channel_for_scan(priv, vif, band,
1319 (void *)&scan->data[cmd_len]);
1320 break;
1321 case IWL_SCAN_NORMAL:
1322 scan->channel_count =
1323 iwl_get_channels_for_scan(priv, vif, band,
1324 is_active, n_probes,
1325 (void *)&scan->data[cmd_len]);
1326 break;
1327 case IWL_SCAN_OFFCH_TX: {
1328 struct iwl_scan_channel *scan_ch;
1330 scan->channel_count = 1;
1332 scan_ch = (void *)&scan->data[cmd_len];
1333 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
1334 scan_ch->channel =
1335 cpu_to_le16(priv->_agn.offchan_tx_chan->hw_value);
1336 scan_ch->active_dwell =
1337 cpu_to_le16(priv->_agn.offchan_tx_timeout);
1338 scan_ch->passive_dwell = 0;
1340 /* Set txpower levels to defaults */
1341 scan_ch->dsp_atten = 110;
1343 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1344 * power level:
1345 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
1347 if (priv->_agn.offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
1348 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
1349 else
1350 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
1352 break;
1355 if (scan->channel_count == 0) {
1356 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
1357 return -EIO;
1360 cmd.len[0] += le16_to_cpu(scan->tx_cmd.len) +
1361 scan->channel_count * sizeof(struct iwl_scan_channel);
1362 cmd.data[0] = scan;
1363 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
1364 scan->len = cpu_to_le16(cmd.len[0]);
1366 /* set scan bit here for PAN params */
1367 set_bit(STATUS_SCAN_HW, &priv->status);
1369 if (priv->cfg->ops->hcmd->set_pan_params) {
1370 ret = priv->cfg->ops->hcmd->set_pan_params(priv);
1371 if (ret)
1372 return ret;
1375 ret = iwl_send_cmd_sync(priv, &cmd);
1376 if (ret) {
1377 clear_bit(STATUS_SCAN_HW, &priv->status);
1378 if (priv->cfg->ops->hcmd->set_pan_params)
1379 priv->cfg->ops->hcmd->set_pan_params(priv);
1382 return ret;
1385 int iwlagn_manage_ibss_station(struct iwl_priv *priv,
1386 struct ieee80211_vif *vif, bool add)
1388 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1390 if (add)
1391 return iwlagn_add_bssid_station(priv, vif_priv->ctx,
1392 vif->bss_conf.bssid,
1393 &vif_priv->ibss_bssid_sta_id);
1394 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
1395 vif->bss_conf.bssid);
1398 void iwl_free_tfds_in_queue(struct iwl_priv *priv,
1399 int sta_id, int tid, int freed)
1401 lockdep_assert_held(&priv->sta_lock);
1403 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1404 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1405 else {
1406 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
1407 priv->stations[sta_id].tid[tid].tfds_in_queue,
1408 freed);
1409 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
1413 #define IWL_FLUSH_WAIT_MS 2000
1415 int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv)
1417 struct iwl_tx_queue *txq;
1418 struct iwl_queue *q;
1419 int cnt;
1420 unsigned long now = jiffies;
1421 int ret = 0;
1423 /* waiting for all the tx frames complete might take a while */
1424 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1425 if (cnt == priv->cmd_queue)
1426 continue;
1427 txq = &priv->txq[cnt];
1428 q = &txq->q;
1429 while (q->read_ptr != q->write_ptr && !time_after(jiffies,
1430 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
1431 msleep(1);
1433 if (q->read_ptr != q->write_ptr) {
1434 IWL_ERR(priv, "fail to flush all tx fifo queues\n");
1435 ret = -ETIMEDOUT;
1436 break;
1439 return ret;
1442 #define IWL_TX_QUEUE_MSK 0xfffff
1445 * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
1447 * pre-requirements:
1448 * 1. acquire mutex before calling
1449 * 2. make sure rf is on and not in exit state
1451 int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
1453 struct iwl_txfifo_flush_cmd flush_cmd;
1454 struct iwl_host_cmd cmd = {
1455 .id = REPLY_TXFIFO_FLUSH,
1456 .len = { sizeof(struct iwl_txfifo_flush_cmd), },
1457 .flags = CMD_SYNC,
1458 .data = { &flush_cmd, },
1461 might_sleep();
1463 memset(&flush_cmd, 0, sizeof(flush_cmd));
1464 if (flush_control & BIT(IWL_RXON_CTX_BSS))
1465 flush_cmd.fifo_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
1466 IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
1467 IWL_SCD_MGMT_MSK;
1468 if ((flush_control & BIT(IWL_RXON_CTX_PAN)) &&
1469 (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
1470 flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK |
1471 IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK |
1472 IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
1473 IWL_PAN_SCD_MULTICAST_MSK;
1475 if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
1476 flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
1478 IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
1479 flush_cmd.fifo_control);
1480 flush_cmd.flush_control = cpu_to_le16(flush_control);
1482 return iwl_send_cmd(priv, &cmd);
1485 void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
1487 mutex_lock(&priv->mutex);
1488 ieee80211_stop_queues(priv->hw);
1489 if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
1490 IWL_ERR(priv, "flush request fail\n");
1491 goto done;
1493 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
1494 iwlagn_wait_tx_queue_empty(priv);
1495 done:
1496 ieee80211_wake_queues(priv->hw);
1497 mutex_unlock(&priv->mutex);
1501 * BT coex
1504 * Macros to access the lookup table.
1506 * The lookup table has 7 inputs: bt3_prio, bt3_txrx, bt_rf_act, wifi_req,
1507 * wifi_prio, wifi_txrx and wifi_sh_ant_req.
1509 * It has three outputs: WLAN_ACTIVE, WLAN_KILL and ANT_SWITCH
1511 * The format is that "registers" 8 through 11 contain the WLAN_ACTIVE bits
1512 * one after another in 32-bit registers, and "registers" 0 through 7 contain
1513 * the WLAN_KILL and ANT_SWITCH bits interleaved (in that order).
1515 * These macros encode that format.
1517 #define LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, wifi_req, wifi_prio, \
1518 wifi_txrx, wifi_sh_ant_req) \
1519 (bt3_prio | (bt3_txrx << 1) | (bt_rf_act << 2) | (wifi_req << 3) | \
1520 (wifi_prio << 4) | (wifi_txrx << 5) | (wifi_sh_ant_req << 6))
1522 #define LUT_PTA_WLAN_ACTIVE_OP(lut, op, val) \
1523 lut[8 + ((val) >> 5)] op (cpu_to_le32(BIT((val) & 0x1f)))
1524 #define LUT_TEST_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1525 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1526 (!!(LUT_PTA_WLAN_ACTIVE_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, \
1527 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1528 wifi_sh_ant_req))))
1529 #define LUT_SET_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1530 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1531 LUT_PTA_WLAN_ACTIVE_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, \
1532 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1533 wifi_sh_ant_req))
1534 #define LUT_CLEAR_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, \
1535 wifi_req, wifi_prio, wifi_txrx, \
1536 wifi_sh_ant_req) \
1537 LUT_PTA_WLAN_ACTIVE_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, \
1538 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1539 wifi_sh_ant_req))
1541 #define LUT_WLAN_KILL_OP(lut, op, val) \
1542 lut[(val) >> 4] op (cpu_to_le32(BIT(((val) << 1) & 0x1e)))
1543 #define LUT_TEST_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1544 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1545 (!!(LUT_WLAN_KILL_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1546 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))))
1547 #define LUT_SET_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1548 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1549 LUT_WLAN_KILL_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1550 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1551 #define LUT_CLEAR_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1552 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1553 LUT_WLAN_KILL_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1554 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1556 #define LUT_ANT_SWITCH_OP(lut, op, val) \
1557 lut[(val) >> 4] op (cpu_to_le32(BIT((((val) << 1) & 0x1e) + 1)))
1558 #define LUT_TEST_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1559 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1560 (!!(LUT_ANT_SWITCH_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1561 wifi_req, wifi_prio, wifi_txrx, \
1562 wifi_sh_ant_req))))
1563 #define LUT_SET_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1564 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1565 LUT_ANT_SWITCH_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1566 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1567 #define LUT_CLEAR_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1568 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1569 LUT_ANT_SWITCH_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1570 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1572 static const __le32 iwlagn_def_3w_lookup[12] = {
1573 cpu_to_le32(0xaaaaaaaa),
1574 cpu_to_le32(0xaaaaaaaa),
1575 cpu_to_le32(0xaeaaaaaa),
1576 cpu_to_le32(0xaaaaaaaa),
1577 cpu_to_le32(0xcc00ff28),
1578 cpu_to_le32(0x0000aaaa),
1579 cpu_to_le32(0xcc00aaaa),
1580 cpu_to_le32(0x0000aaaa),
1581 cpu_to_le32(0xc0004000),
1582 cpu_to_le32(0x00004000),
1583 cpu_to_le32(0xf0005000),
1584 cpu_to_le32(0xf0005000),
1587 static const __le32 iwlagn_concurrent_lookup[12] = {
1588 cpu_to_le32(0xaaaaaaaa),
1589 cpu_to_le32(0xaaaaaaaa),
1590 cpu_to_le32(0xaaaaaaaa),
1591 cpu_to_le32(0xaaaaaaaa),
1592 cpu_to_le32(0xaaaaaaaa),
1593 cpu_to_le32(0xaaaaaaaa),
1594 cpu_to_le32(0xaaaaaaaa),
1595 cpu_to_le32(0xaaaaaaaa),
1596 cpu_to_le32(0x00000000),
1597 cpu_to_le32(0x00000000),
1598 cpu_to_le32(0x00000000),
1599 cpu_to_le32(0x00000000),
1602 void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1604 struct iwl_basic_bt_cmd basic = {
1605 .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
1606 .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
1607 .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
1608 .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
1610 struct iwl6000_bt_cmd bt_cmd_6000;
1611 struct iwl2000_bt_cmd bt_cmd_2000;
1612 int ret;
1614 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
1615 sizeof(basic.bt3_lookup_table));
1617 if (priv->cfg->bt_params) {
1618 if (priv->cfg->bt_params->bt_session_2) {
1619 bt_cmd_2000.prio_boost = cpu_to_le32(
1620 priv->cfg->bt_params->bt_prio_boost);
1621 bt_cmd_2000.tx_prio_boost = 0;
1622 bt_cmd_2000.rx_prio_boost = 0;
1623 } else {
1624 bt_cmd_6000.prio_boost =
1625 priv->cfg->bt_params->bt_prio_boost;
1626 bt_cmd_6000.tx_prio_boost = 0;
1627 bt_cmd_6000.rx_prio_boost = 0;
1629 } else {
1630 IWL_ERR(priv, "failed to construct BT Coex Config\n");
1631 return;
1634 basic.kill_ack_mask = priv->kill_ack_mask;
1635 basic.kill_cts_mask = priv->kill_cts_mask;
1636 basic.valid = priv->bt_valid;
1639 * Configure BT coex mode to "no coexistence" when the
1640 * user disabled BT coexistence, we have no interface
1641 * (might be in monitor mode), or the interface is in
1642 * IBSS mode (no proper uCode support for coex then).
1644 if (!iwlagn_mod_params.bt_coex_active ||
1645 priv->iw_mode == NL80211_IFTYPE_ADHOC) {
1646 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
1647 } else {
1648 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
1649 IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
1650 if (priv->cfg->bt_params &&
1651 priv->cfg->bt_params->bt_sco_disable)
1652 basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
1654 if (priv->bt_ch_announce)
1655 basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
1656 IWL_DEBUG_COEX(priv, "BT coex flag: 0X%x\n", basic.flags);
1658 priv->bt_enable_flag = basic.flags;
1659 if (priv->bt_full_concurrent)
1660 memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup,
1661 sizeof(iwlagn_concurrent_lookup));
1662 else
1663 memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
1664 sizeof(iwlagn_def_3w_lookup));
1666 IWL_DEBUG_COEX(priv, "BT coex %s in %s mode\n",
1667 basic.flags ? "active" : "disabled",
1668 priv->bt_full_concurrent ?
1669 "full concurrency" : "3-wire");
1671 if (priv->cfg->bt_params->bt_session_2) {
1672 memcpy(&bt_cmd_2000.basic, &basic,
1673 sizeof(basic));
1674 ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1675 sizeof(bt_cmd_2000), &bt_cmd_2000);
1676 } else {
1677 memcpy(&bt_cmd_6000.basic, &basic,
1678 sizeof(basic));
1679 ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1680 sizeof(bt_cmd_6000), &bt_cmd_6000);
1682 if (ret)
1683 IWL_ERR(priv, "failed to send BT Coex Config\n");
1687 static void iwlagn_bt_traffic_change_work(struct work_struct *work)
1689 struct iwl_priv *priv =
1690 container_of(work, struct iwl_priv, bt_traffic_change_work);
1691 struct iwl_rxon_context *ctx;
1692 int smps_request = -1;
1694 if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
1695 /* bt coex disabled */
1696 return;
1700 * Note: bt_traffic_load can be overridden by scan complete and
1701 * coex profile notifications. Ignore that since only bad consequence
1702 * can be not matching debug print with actual state.
1704 IWL_DEBUG_COEX(priv, "BT traffic load changes: %d\n",
1705 priv->bt_traffic_load);
1707 switch (priv->bt_traffic_load) {
1708 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1709 if (priv->bt_status)
1710 smps_request = IEEE80211_SMPS_DYNAMIC;
1711 else
1712 smps_request = IEEE80211_SMPS_AUTOMATIC;
1713 break;
1714 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1715 smps_request = IEEE80211_SMPS_DYNAMIC;
1716 break;
1717 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1718 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1719 smps_request = IEEE80211_SMPS_STATIC;
1720 break;
1721 default:
1722 IWL_ERR(priv, "Invalid BT traffic load: %d\n",
1723 priv->bt_traffic_load);
1724 break;
1727 mutex_lock(&priv->mutex);
1730 * We can not send command to firmware while scanning. When the scan
1731 * complete we will schedule this work again. We do check with mutex
1732 * locked to prevent new scan request to arrive. We do not check
1733 * STATUS_SCANNING to avoid race when queue_work two times from
1734 * different notifications, but quit and not perform any work at all.
1736 if (test_bit(STATUS_SCAN_HW, &priv->status))
1737 goto out;
1739 if (priv->cfg->ops->lib->update_chain_flags)
1740 priv->cfg->ops->lib->update_chain_flags(priv);
1742 if (smps_request != -1) {
1743 priv->current_ht_config.smps = smps_request;
1744 for_each_context(priv, ctx) {
1745 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
1746 ieee80211_request_smps(ctx->vif, smps_request);
1749 out:
1750 mutex_unlock(&priv->mutex);
1753 static void iwlagn_print_uartmsg(struct iwl_priv *priv,
1754 struct iwl_bt_uart_msg *uart_msg)
1756 IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, "
1757 "Update Req = 0x%X",
1758 (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
1759 BT_UART_MSG_FRAME1MSGTYPE_POS,
1760 (BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
1761 BT_UART_MSG_FRAME1SSN_POS,
1762 (BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >>
1763 BT_UART_MSG_FRAME1UPDATEREQ_POS);
1765 IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
1766 "Chl_SeqN = 0x%X, In band = 0x%X",
1767 (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
1768 BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
1769 (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
1770 BT_UART_MSG_FRAME2TRAFFICLOAD_POS,
1771 (BT_UART_MSG_FRAME2CHLSEQN_MSK & uart_msg->frame2) >>
1772 BT_UART_MSG_FRAME2CHLSEQN_POS,
1773 (BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >>
1774 BT_UART_MSG_FRAME2INBAND_POS);
1776 IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
1777 "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X",
1778 (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
1779 BT_UART_MSG_FRAME3SCOESCO_POS,
1780 (BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
1781 BT_UART_MSG_FRAME3SNIFF_POS,
1782 (BT_UART_MSG_FRAME3A2DP_MSK & uart_msg->frame3) >>
1783 BT_UART_MSG_FRAME3A2DP_POS,
1784 (BT_UART_MSG_FRAME3ACL_MSK & uart_msg->frame3) >>
1785 BT_UART_MSG_FRAME3ACL_POS,
1786 (BT_UART_MSG_FRAME3MASTER_MSK & uart_msg->frame3) >>
1787 BT_UART_MSG_FRAME3MASTER_POS,
1788 (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
1789 BT_UART_MSG_FRAME3OBEX_POS);
1791 IWL_DEBUG_COEX(priv, "Idle duration = 0x%X",
1792 (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
1793 BT_UART_MSG_FRAME4IDLEDURATION_POS);
1795 IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
1796 "eSCO Retransmissions = 0x%X",
1797 (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
1798 BT_UART_MSG_FRAME5TXACTIVITY_POS,
1799 (BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
1800 BT_UART_MSG_FRAME5RXACTIVITY_POS,
1801 (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
1802 BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
1804 IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X",
1805 (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
1806 BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
1807 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
1808 BT_UART_MSG_FRAME6DISCOVERABLE_POS);
1810 IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = "
1811 "0x%X, Inquiry = 0x%X, Connectable = 0x%X",
1812 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
1813 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
1814 (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
1815 BT_UART_MSG_FRAME7PAGE_POS,
1816 (BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >>
1817 BT_UART_MSG_FRAME7INQUIRY_POS,
1818 (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
1819 BT_UART_MSG_FRAME7CONNECTABLE_POS);
1822 static void iwlagn_set_kill_msk(struct iwl_priv *priv,
1823 struct iwl_bt_uart_msg *uart_msg)
1825 u8 kill_msk;
1826 static const __le32 bt_kill_ack_msg[2] = {
1827 IWLAGN_BT_KILL_ACK_MASK_DEFAULT,
1828 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
1829 static const __le32 bt_kill_cts_msg[2] = {
1830 IWLAGN_BT_KILL_CTS_MASK_DEFAULT,
1831 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
1833 kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3)
1834 ? 1 : 0;
1835 if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] ||
1836 priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) {
1837 priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
1838 priv->kill_ack_mask = bt_kill_ack_msg[kill_msk];
1839 priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK;
1840 priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
1842 /* schedule to send runtime bt_config */
1843 queue_work(priv->workqueue, &priv->bt_runtime_config);
1847 void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
1848 struct iwl_rx_mem_buffer *rxb)
1850 unsigned long flags;
1851 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1852 struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
1853 struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
1855 if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
1856 /* bt coex disabled */
1857 return;
1860 IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
1861 IWL_DEBUG_COEX(priv, " status: %d\n", coex->bt_status);
1862 IWL_DEBUG_COEX(priv, " traffic load: %d\n", coex->bt_traffic_load);
1863 IWL_DEBUG_COEX(priv, " CI compliance: %d\n",
1864 coex->bt_ci_compliance);
1865 iwlagn_print_uartmsg(priv, uart_msg);
1867 priv->last_bt_traffic_load = priv->bt_traffic_load;
1868 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
1869 if (priv->bt_status != coex->bt_status ||
1870 priv->last_bt_traffic_load != coex->bt_traffic_load) {
1871 if (coex->bt_status) {
1872 /* BT on */
1873 if (!priv->bt_ch_announce)
1874 priv->bt_traffic_load =
1875 IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
1876 else
1877 priv->bt_traffic_load =
1878 coex->bt_traffic_load;
1879 } else {
1880 /* BT off */
1881 priv->bt_traffic_load =
1882 IWL_BT_COEX_TRAFFIC_LOAD_NONE;
1884 priv->bt_status = coex->bt_status;
1885 queue_work(priv->workqueue,
1886 &priv->bt_traffic_change_work);
1890 iwlagn_set_kill_msk(priv, uart_msg);
1892 /* FIXME: based on notification, adjust the prio_boost */
1894 spin_lock_irqsave(&priv->lock, flags);
1895 priv->bt_ci_compliance = coex->bt_ci_compliance;
1896 spin_unlock_irqrestore(&priv->lock, flags);
1899 void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
1901 iwlagn_rx_handler_setup(priv);
1902 priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
1903 iwlagn_bt_coex_profile_notif;
1906 void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
1908 iwlagn_setup_deferred_work(priv);
1910 INIT_WORK(&priv->bt_traffic_change_work,
1911 iwlagn_bt_traffic_change_work);
1914 void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv)
1916 cancel_work_sync(&priv->bt_traffic_change_work);
1919 static bool is_single_rx_stream(struct iwl_priv *priv)
1921 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1922 priv->current_ht_config.single_chain_sufficient;
1925 #define IWL_NUM_RX_CHAINS_MULTIPLE 3
1926 #define IWL_NUM_RX_CHAINS_SINGLE 2
1927 #define IWL_NUM_IDLE_CHAINS_DUAL 2
1928 #define IWL_NUM_IDLE_CHAINS_SINGLE 1
1931 * Determine how many receiver/antenna chains to use.
1933 * More provides better reception via diversity. Fewer saves power
1934 * at the expense of throughput, but only when not in powersave to
1935 * start with.
1937 * MIMO (dual stream) requires at least 2, but works better with 3.
1938 * This does not determine *which* chains to use, just how many.
1940 static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
1942 if (priv->cfg->bt_params &&
1943 priv->cfg->bt_params->advanced_bt_coexist &&
1944 (priv->bt_full_concurrent ||
1945 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
1947 * only use chain 'A' in bt high traffic load or
1948 * full concurrency mode
1950 return IWL_NUM_RX_CHAINS_SINGLE;
1952 /* # of Rx chains to use when expecting MIMO. */
1953 if (is_single_rx_stream(priv))
1954 return IWL_NUM_RX_CHAINS_SINGLE;
1955 else
1956 return IWL_NUM_RX_CHAINS_MULTIPLE;
1960 * When we are in power saving mode, unless device support spatial
1961 * multiplexing power save, use the active count for rx chain count.
1963 static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
1965 /* # Rx chains when idling, depending on SMPS mode */
1966 switch (priv->current_ht_config.smps) {
1967 case IEEE80211_SMPS_STATIC:
1968 case IEEE80211_SMPS_DYNAMIC:
1969 return IWL_NUM_IDLE_CHAINS_SINGLE;
1970 case IEEE80211_SMPS_OFF:
1971 return active_cnt;
1972 default:
1973 WARN(1, "invalid SMPS mode %d",
1974 priv->current_ht_config.smps);
1975 return active_cnt;
1979 /* up to 4 chains */
1980 static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
1982 u8 res;
1983 res = (chain_bitmap & BIT(0)) >> 0;
1984 res += (chain_bitmap & BIT(1)) >> 1;
1985 res += (chain_bitmap & BIT(2)) >> 2;
1986 res += (chain_bitmap & BIT(3)) >> 3;
1987 return res;
1991 * iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1993 * Selects how many and which Rx receivers/antennas/chains to use.
1994 * This should not be used for scan command ... it puts data in wrong place.
1996 void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1998 bool is_single = is_single_rx_stream(priv);
1999 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
2000 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
2001 u32 active_chains;
2002 u16 rx_chain;
2004 /* Tell uCode which antennas are actually connected.
2005 * Before first association, we assume all antennas are connected.
2006 * Just after first association, iwl_chain_noise_calibration()
2007 * checks which antennas actually *are* connected. */
2008 if (priv->chain_noise_data.active_chains)
2009 active_chains = priv->chain_noise_data.active_chains;
2010 else
2011 active_chains = priv->hw_params.valid_rx_ant;
2013 if (priv->cfg->bt_params &&
2014 priv->cfg->bt_params->advanced_bt_coexist &&
2015 (priv->bt_full_concurrent ||
2016 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
2018 * only use chain 'A' in bt high traffic load or
2019 * full concurrency mode
2021 active_chains = first_antenna(active_chains);
2024 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
2026 /* How many receivers should we use? */
2027 active_rx_cnt = iwl_get_active_rx_chain_count(priv);
2028 idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
2031 /* correct rx chain count according hw settings
2032 * and chain noise calibration
2034 valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
2035 if (valid_rx_cnt < active_rx_cnt)
2036 active_rx_cnt = valid_rx_cnt;
2038 if (valid_rx_cnt < idle_rx_cnt)
2039 idle_rx_cnt = valid_rx_cnt;
2041 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
2042 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
2044 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
2046 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
2047 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
2048 else
2049 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
2051 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
2052 ctx->staging.rx_chain,
2053 active_rx_cnt, idle_rx_cnt);
2055 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
2056 active_rx_cnt < idle_rx_cnt);
2059 u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
2061 int i;
2062 u8 ind = ant;
2064 if (priv->band == IEEE80211_BAND_2GHZ &&
2065 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
2066 return 0;
2068 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
2069 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
2070 if (valid & BIT(ind))
2071 return ind;
2073 return ant;
2076 static const char *get_csr_string(int cmd)
2078 switch (cmd) {
2079 IWL_CMD(CSR_HW_IF_CONFIG_REG);
2080 IWL_CMD(CSR_INT_COALESCING);
2081 IWL_CMD(CSR_INT);
2082 IWL_CMD(CSR_INT_MASK);
2083 IWL_CMD(CSR_FH_INT_STATUS);
2084 IWL_CMD(CSR_GPIO_IN);
2085 IWL_CMD(CSR_RESET);
2086 IWL_CMD(CSR_GP_CNTRL);
2087 IWL_CMD(CSR_HW_REV);
2088 IWL_CMD(CSR_EEPROM_REG);
2089 IWL_CMD(CSR_EEPROM_GP);
2090 IWL_CMD(CSR_OTP_GP_REG);
2091 IWL_CMD(CSR_GIO_REG);
2092 IWL_CMD(CSR_GP_UCODE_REG);
2093 IWL_CMD(CSR_GP_DRIVER_REG);
2094 IWL_CMD(CSR_UCODE_DRV_GP1);
2095 IWL_CMD(CSR_UCODE_DRV_GP2);
2096 IWL_CMD(CSR_LED_REG);
2097 IWL_CMD(CSR_DRAM_INT_TBL_REG);
2098 IWL_CMD(CSR_GIO_CHICKEN_BITS);
2099 IWL_CMD(CSR_ANA_PLL_CFG);
2100 IWL_CMD(CSR_HW_REV_WA_REG);
2101 IWL_CMD(CSR_DBG_HPET_MEM_REG);
2102 default:
2103 return "UNKNOWN";
2107 void iwl_dump_csr(struct iwl_priv *priv)
2109 int i;
2110 static const u32 csr_tbl[] = {
2111 CSR_HW_IF_CONFIG_REG,
2112 CSR_INT_COALESCING,
2113 CSR_INT,
2114 CSR_INT_MASK,
2115 CSR_FH_INT_STATUS,
2116 CSR_GPIO_IN,
2117 CSR_RESET,
2118 CSR_GP_CNTRL,
2119 CSR_HW_REV,
2120 CSR_EEPROM_REG,
2121 CSR_EEPROM_GP,
2122 CSR_OTP_GP_REG,
2123 CSR_GIO_REG,
2124 CSR_GP_UCODE_REG,
2125 CSR_GP_DRIVER_REG,
2126 CSR_UCODE_DRV_GP1,
2127 CSR_UCODE_DRV_GP2,
2128 CSR_LED_REG,
2129 CSR_DRAM_INT_TBL_REG,
2130 CSR_GIO_CHICKEN_BITS,
2131 CSR_ANA_PLL_CFG,
2132 CSR_HW_REV_WA_REG,
2133 CSR_DBG_HPET_MEM_REG
2135 IWL_ERR(priv, "CSR values:\n");
2136 IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
2137 "CSR_INT_PERIODIC_REG)\n");
2138 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
2139 IWL_ERR(priv, " %25s: 0X%08x\n",
2140 get_csr_string(csr_tbl[i]),
2141 iwl_read32(priv, csr_tbl[i]));
2145 static const char *get_fh_string(int cmd)
2147 switch (cmd) {
2148 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
2149 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
2150 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
2151 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
2152 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
2153 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
2154 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
2155 IWL_CMD(FH_TSSR_TX_STATUS_REG);
2156 IWL_CMD(FH_TSSR_TX_ERROR_REG);
2157 default:
2158 return "UNKNOWN";
2162 int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
2164 int i;
2165 #ifdef CONFIG_IWLWIFI_DEBUG
2166 int pos = 0;
2167 size_t bufsz = 0;
2168 #endif
2169 static const u32 fh_tbl[] = {
2170 FH_RSCSR_CHNL0_STTS_WPTR_REG,
2171 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
2172 FH_RSCSR_CHNL0_WPTR,
2173 FH_MEM_RCSR_CHNL0_CONFIG_REG,
2174 FH_MEM_RSSR_SHARED_CTRL_REG,
2175 FH_MEM_RSSR_RX_STATUS_REG,
2176 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
2177 FH_TSSR_TX_STATUS_REG,
2178 FH_TSSR_TX_ERROR_REG
2180 #ifdef CONFIG_IWLWIFI_DEBUG
2181 if (display) {
2182 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
2183 *buf = kmalloc(bufsz, GFP_KERNEL);
2184 if (!*buf)
2185 return -ENOMEM;
2186 pos += scnprintf(*buf + pos, bufsz - pos,
2187 "FH register values:\n");
2188 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2189 pos += scnprintf(*buf + pos, bufsz - pos,
2190 " %34s: 0X%08x\n",
2191 get_fh_string(fh_tbl[i]),
2192 iwl_read_direct32(priv, fh_tbl[i]));
2194 return pos;
2196 #endif
2197 IWL_ERR(priv, "FH register values:\n");
2198 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2199 IWL_ERR(priv, " %34s: 0X%08x\n",
2200 get_fh_string(fh_tbl[i]),
2201 iwl_read_direct32(priv, fh_tbl[i]));
2203 return 0;
2206 /* notification wait support */
2207 void iwlagn_init_notification_wait(struct iwl_priv *priv,
2208 struct iwl_notification_wait *wait_entry,
2209 u8 cmd,
2210 void (*fn)(struct iwl_priv *priv,
2211 struct iwl_rx_packet *pkt,
2212 void *data),
2213 void *fn_data)
2215 wait_entry->fn = fn;
2216 wait_entry->fn_data = fn_data;
2217 wait_entry->cmd = cmd;
2218 wait_entry->triggered = false;
2219 wait_entry->aborted = false;
2221 spin_lock_bh(&priv->_agn.notif_wait_lock);
2222 list_add(&wait_entry->list, &priv->_agn.notif_waits);
2223 spin_unlock_bh(&priv->_agn.notif_wait_lock);
2226 int iwlagn_wait_notification(struct iwl_priv *priv,
2227 struct iwl_notification_wait *wait_entry,
2228 unsigned long timeout)
2230 int ret;
2232 ret = wait_event_timeout(priv->_agn.notif_waitq,
2233 wait_entry->triggered || wait_entry->aborted,
2234 timeout);
2236 spin_lock_bh(&priv->_agn.notif_wait_lock);
2237 list_del(&wait_entry->list);
2238 spin_unlock_bh(&priv->_agn.notif_wait_lock);
2240 if (wait_entry->aborted)
2241 return -EIO;
2243 /* return value is always >= 0 */
2244 if (ret <= 0)
2245 return -ETIMEDOUT;
2246 return 0;
2249 void iwlagn_remove_notification(struct iwl_priv *priv,
2250 struct iwl_notification_wait *wait_entry)
2252 spin_lock_bh(&priv->_agn.notif_wait_lock);
2253 list_del(&wait_entry->list);
2254 spin_unlock_bh(&priv->_agn.notif_wait_lock);
2257 int iwlagn_start_device(struct iwl_priv *priv)
2259 int ret;
2261 if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
2262 iwl_prepare_card_hw(priv)) {
2263 IWL_WARN(priv, "Exit HW not ready\n");
2264 return -EIO;
2267 /* If platform's RF_KILL switch is NOT set to KILL */
2268 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2269 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2270 else
2271 set_bit(STATUS_RF_KILL_HW, &priv->status);
2273 if (iwl_is_rfkill(priv)) {
2274 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
2275 iwl_enable_interrupts(priv);
2276 return -ERFKILL;
2279 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2281 ret = iwlagn_hw_nic_init(priv);
2282 if (ret) {
2283 IWL_ERR(priv, "Unable to init nic\n");
2284 return ret;
2287 /* make sure rfkill handshake bits are cleared */
2288 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2289 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2290 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2292 /* clear (again), then enable host interrupts */
2293 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2294 iwl_enable_interrupts(priv);
2296 /* really make sure rfkill handshake bits are cleared */
2297 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2298 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2300 return 0;
2303 void iwlagn_stop_device(struct iwl_priv *priv)
2305 unsigned long flags;
2307 /* stop and reset the on-board processor */
2308 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2310 /* tell the device to stop sending interrupts */
2311 spin_lock_irqsave(&priv->lock, flags);
2312 iwl_disable_interrupts(priv);
2313 spin_unlock_irqrestore(&priv->lock, flags);
2314 iwl_synchronize_irq(priv);
2316 /* device going down, Stop using ICT table */
2317 iwl_disable_ict(priv);
2320 * If a HW restart happens during firmware loading,
2321 * then the firmware loading might call this function
2322 * and later it might be called again due to the
2323 * restart. So don't process again if the device is
2324 * already dead.
2326 if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
2327 iwlagn_txq_ctx_stop(priv);
2328 iwlagn_rxq_stop(priv);
2330 /* Power-down device's busmaster DMA clocks */
2331 iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2332 udelay(5);
2335 /* Make sure (redundant) we've released our request to stay awake */
2336 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2338 /* Stop the device, and put it in low power state */
2339 iwl_apm_stop(priv);