iwlagn: provide heplers to access the transport ops
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / wireless / iwlwifi / iwl-agn-ucode.c
blob06304a681ed37331e0d6fc5ced97e041a535771f
1 /******************************************************************************
3 * GPL LICENSE SUMMARY
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
35 #include "iwl-dev.h"
36 #include "iwl-core.h"
37 #include "iwl-io.h"
38 #include "iwl-helpers.h"
39 #include "iwl-agn-hw.h"
40 #include "iwl-agn.h"
41 #include "iwl-agn-calib.h"
42 #include "iwl-trans.h"
44 #define IWL_AC_UNSET -1
46 struct queue_to_fifo_ac {
47 s8 fifo, ac;
50 static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
51 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
52 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
53 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
54 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
55 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
56 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
57 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
58 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
59 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
60 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
63 static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
64 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
65 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
66 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
67 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
68 { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
69 { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
70 { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
71 { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
72 { IWL_TX_FIFO_BE_IPAN, 2, },
73 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
76 static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
77 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
78 0, COEX_UNASSOC_IDLE_FLAGS},
79 {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
80 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
81 {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
82 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
83 {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
84 0, COEX_CALIBRATION_FLAGS},
85 {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
86 0, COEX_PERIODIC_CALIBRATION_FLAGS},
87 {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
88 0, COEX_CONNECTION_ESTAB_FLAGS},
89 {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
90 0, COEX_ASSOCIATED_IDLE_FLAGS},
91 {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
92 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
93 {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
94 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
95 {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
96 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
97 {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
98 {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
99 {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
100 0, COEX_STAND_ALONE_DEBUG_FLAGS},
101 {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
102 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
103 {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
104 {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
108 * ucode
110 static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
111 struct fw_desc *image, u32 dst_addr)
113 dma_addr_t phy_addr = image->p_addr;
114 u32 byte_cnt = image->len;
115 int ret;
117 priv->ucode_write_complete = 0;
119 iwl_write_direct32(priv,
120 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
121 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
123 iwl_write_direct32(priv,
124 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
126 iwl_write_direct32(priv,
127 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
128 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
130 iwl_write_direct32(priv,
131 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
132 (iwl_get_dma_hi_addr(phy_addr)
133 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
135 iwl_write_direct32(priv,
136 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
137 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
138 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
139 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
141 iwl_write_direct32(priv,
142 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
143 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
144 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
145 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
147 IWL_DEBUG_FW(priv, "%s uCode section being loaded...\n", name);
148 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
149 priv->ucode_write_complete, 5 * HZ);
150 if (ret == -ERESTARTSYS) {
151 IWL_ERR(priv, "Could not load the %s uCode section due "
152 "to interrupt\n", name);
153 return ret;
155 if (!ret) {
156 IWL_ERR(priv, "Could not load the %s uCode section\n",
157 name);
158 return -ETIMEDOUT;
161 return 0;
164 static int iwlagn_load_given_ucode(struct iwl_priv *priv,
165 struct fw_img *image)
167 int ret = 0;
169 ret = iwlagn_load_section(priv, "INST", &image->code,
170 IWLAGN_RTC_INST_LOWER_BOUND);
171 if (ret)
172 return ret;
174 return iwlagn_load_section(priv, "DATA", &image->data,
175 IWLAGN_RTC_DATA_LOWER_BOUND);
179 * Calibration
181 static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
183 struct iwl_calib_xtal_freq_cmd cmd;
184 __le16 *xtal_calib =
185 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
187 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
188 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
189 cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
190 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
191 (u8 *)&cmd, sizeof(cmd));
194 static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
196 struct iwl_calib_temperature_offset_cmd cmd;
197 __le16 *offset_calib =
198 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_TEMPERATURE);
200 memset(&cmd, 0, sizeof(cmd));
201 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
202 cmd.radio_sensor_offset = le16_to_cpu(offset_calib[1]);
203 if (!(cmd.radio_sensor_offset))
204 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
206 IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
207 cmd.radio_sensor_offset);
208 return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
209 (u8 *)&cmd, sizeof(cmd));
212 static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
214 struct iwl_calib_cfg_cmd calib_cfg_cmd;
215 struct iwl_host_cmd cmd = {
216 .id = CALIBRATION_CFG_CMD,
217 .len = { sizeof(struct iwl_calib_cfg_cmd), },
218 .data = { &calib_cfg_cmd, },
221 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
222 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
223 calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
224 calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
225 calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
227 return trans_send_cmd(priv, &cmd);
230 void iwlagn_rx_calib_result(struct iwl_priv *priv,
231 struct iwl_rx_mem_buffer *rxb)
233 struct iwl_rx_packet *pkt = rxb_addr(rxb);
234 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
235 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
236 int index;
238 /* reduce the size of the length field itself */
239 len -= 4;
241 /* Define the order in which the results will be sent to the runtime
242 * uCode. iwl_send_calib_results sends them in a row according to
243 * their index. We sort them here
245 switch (hdr->op_code) {
246 case IWL_PHY_CALIBRATE_DC_CMD:
247 index = IWL_CALIB_DC;
248 break;
249 case IWL_PHY_CALIBRATE_LO_CMD:
250 index = IWL_CALIB_LO;
251 break;
252 case IWL_PHY_CALIBRATE_TX_IQ_CMD:
253 index = IWL_CALIB_TX_IQ;
254 break;
255 case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
256 index = IWL_CALIB_TX_IQ_PERD;
257 break;
258 case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
259 index = IWL_CALIB_BASE_BAND;
260 break;
261 default:
262 IWL_ERR(priv, "Unknown calibration notification %d\n",
263 hdr->op_code);
264 return;
266 iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
269 int iwlagn_init_alive_start(struct iwl_priv *priv)
271 int ret;
273 if (priv->cfg->bt_params &&
274 priv->cfg->bt_params->advanced_bt_coexist) {
276 * Tell uCode we are ready to perform calibration
277 * need to perform this before any calibration
278 * no need to close the envlope since we are going
279 * to load the runtime uCode later.
281 ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
282 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
283 if (ret)
284 return ret;
288 ret = iwlagn_send_calib_cfg(priv);
289 if (ret)
290 return ret;
293 * temperature offset calibration is only needed for runtime ucode,
294 * so prepare the value now.
296 if (priv->cfg->need_temp_offset_calib)
297 return iwlagn_set_temperature_offset_calib(priv);
299 return 0;
302 static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
304 struct iwl_wimax_coex_cmd coex_cmd;
306 if (priv->cfg->base_params->support_wimax_coexist) {
307 /* UnMask wake up src at associated sleep */
308 coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
310 /* UnMask wake up src at unassociated sleep */
311 coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
312 memcpy(coex_cmd.sta_prio, cu_priorities,
313 sizeof(struct iwl_wimax_coex_event_entry) *
314 COEX_NUM_OF_EVENTS);
316 /* enabling the coexistence feature */
317 coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
319 /* enabling the priorities tables */
320 coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
321 } else {
322 /* coexistence is disabled */
323 memset(&coex_cmd, 0, sizeof(coex_cmd));
325 return trans_send_cmd_pdu(priv,
326 COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
327 sizeof(coex_cmd), &coex_cmd);
330 static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
331 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
332 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
333 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
334 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
335 ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
336 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
337 ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
338 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
339 ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
340 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
341 ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
342 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
343 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
344 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
345 ((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
346 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
347 ((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
348 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
349 0, 0, 0, 0, 0, 0, 0
352 void iwlagn_send_prio_tbl(struct iwl_priv *priv)
354 struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
356 memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
357 sizeof(iwlagn_bt_prio_tbl));
358 if (trans_send_cmd_pdu(priv,
359 REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
360 sizeof(prio_tbl_cmd), &prio_tbl_cmd))
361 IWL_ERR(priv, "failed to send BT prio tbl command\n");
364 int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
366 struct iwl_bt_coex_prot_env_cmd env_cmd;
367 int ret;
369 env_cmd.action = action;
370 env_cmd.type = type;
371 ret = trans_send_cmd_pdu(priv,
372 REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
373 sizeof(env_cmd), &env_cmd);
374 if (ret)
375 IWL_ERR(priv, "failed to send BT env command\n");
376 return ret;
380 static int iwlagn_alive_notify(struct iwl_priv *priv)
382 const struct queue_to_fifo_ac *queue_to_fifo;
383 struct iwl_rxon_context *ctx;
384 u32 a;
385 unsigned long flags;
386 int i, chan;
387 u32 reg_val;
388 int ret;
390 spin_lock_irqsave(&priv->lock, flags);
392 priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
393 a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_MEM_LOWER_BOUND;
394 /* reset conext data memory */
395 for (; a < priv->scd_base_addr + IWLAGN_SCD_CONTEXT_MEM_UPPER_BOUND;
396 a += 4)
397 iwl_write_targ_mem(priv, a, 0);
398 /* reset tx status memory */
399 for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_MEM_UPPER_BOUND;
400 a += 4)
401 iwl_write_targ_mem(priv, a, 0);
402 for (; a < priv->scd_base_addr +
403 IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
404 iwl_write_targ_mem(priv, a, 0);
406 iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
407 priv->scd_bc_tbls.dma >> 10);
409 /* Enable DMA channel */
410 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
411 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
412 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
413 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
415 /* Update FH chicken bits */
416 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
417 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
418 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
420 iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
421 IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv));
422 iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
424 /* initiate the queues */
425 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
426 iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
427 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
428 iwl_write_targ_mem(priv, priv->scd_base_addr +
429 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
430 iwl_write_targ_mem(priv, priv->scd_base_addr +
431 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
432 sizeof(u32),
433 ((SCD_WIN_SIZE <<
434 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
435 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
436 ((SCD_FRAME_LIMIT <<
437 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
438 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
441 iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
442 IWL_MASK(0, priv->hw_params.max_txq_num));
444 /* Activate all Tx DMA/FIFO channels */
445 iwlagn_txq_set_sched(priv, IWL_MASK(0, 7));
447 /* map queues to FIFOs */
448 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
449 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
450 else
451 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
453 iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);
455 /* make sure all queue are not stopped */
456 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
457 for (i = 0; i < 4; i++)
458 atomic_set(&priv->queue_stop_count[i], 0);
459 for_each_context(priv, ctx)
460 ctx->last_tx_rejected = false;
462 /* reset to 0 to enable all the queue first */
463 priv->txq_ctx_active_msk = 0;
465 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
466 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
468 for (i = 0; i < 10; i++) {
469 int fifo = queue_to_fifo[i].fifo;
470 int ac = queue_to_fifo[i].ac;
472 iwl_txq_ctx_activate(priv, i);
474 if (fifo == IWL_TX_FIFO_UNUSED)
475 continue;
477 if (ac != IWL_AC_UNSET)
478 iwl_set_swq_id(&priv->txq[i], ac, i);
479 iwlagn_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
482 spin_unlock_irqrestore(&priv->lock, flags);
484 /* Enable L1-Active */
485 iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
486 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
488 ret = iwlagn_send_wimax_coex(priv);
489 if (ret)
490 return ret;
492 ret = iwlagn_set_Xtal_calib(priv);
493 if (ret)
494 return ret;
496 return iwl_send_calib_results(priv);
501 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
502 * using sample data 100 bytes apart. If these sample points are good,
503 * it's a pretty good bet that everything between them is good, too.
505 static int iwlcore_verify_inst_sparse(struct iwl_priv *priv,
506 struct fw_desc *fw_desc)
508 __le32 *image = (__le32 *)fw_desc->v_addr;
509 u32 len = fw_desc->len;
510 u32 val;
511 u32 i;
513 IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
515 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
516 /* read data comes through single port, auto-incr addr */
517 /* NOTE: Use the debugless read so we don't flood kernel log
518 * if IWL_DL_IO is set */
519 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
520 i + IWLAGN_RTC_INST_LOWER_BOUND);
521 val = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
522 if (val != le32_to_cpu(*image))
523 return -EIO;
526 return 0;
529 static void iwl_print_mismatch_inst(struct iwl_priv *priv,
530 struct fw_desc *fw_desc)
532 __le32 *image = (__le32 *)fw_desc->v_addr;
533 u32 len = fw_desc->len;
534 u32 val;
535 u32 offs;
536 int errors = 0;
538 IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
540 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
541 IWLAGN_RTC_INST_LOWER_BOUND);
543 for (offs = 0;
544 offs < len && errors < 20;
545 offs += sizeof(u32), image++) {
546 /* read data comes through single port, auto-incr addr */
547 val = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
548 if (val != le32_to_cpu(*image)) {
549 IWL_ERR(priv, "uCode INST section at "
550 "offset 0x%x, is 0x%x, s/b 0x%x\n",
551 offs, val, le32_to_cpu(*image));
552 errors++;
558 * iwl_verify_ucode - determine which instruction image is in SRAM,
559 * and verify its contents
561 static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img)
563 if (!iwlcore_verify_inst_sparse(priv, &img->code)) {
564 IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
565 return 0;
568 IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
570 iwl_print_mismatch_inst(priv, &img->code);
571 return -EIO;
574 struct iwlagn_alive_data {
575 bool valid;
576 u8 subtype;
579 static void iwlagn_alive_fn(struct iwl_priv *priv,
580 struct iwl_rx_packet *pkt,
581 void *data)
583 struct iwlagn_alive_data *alive_data = data;
584 struct iwl_alive_resp *palive;
586 palive = &pkt->u.alive_frame;
588 IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision "
589 "0x%01X 0x%01X\n",
590 palive->is_valid, palive->ver_type,
591 palive->ver_subtype);
593 priv->device_pointers.error_event_table =
594 le32_to_cpu(palive->error_event_table_ptr);
595 priv->device_pointers.log_event_table =
596 le32_to_cpu(palive->log_event_table_ptr);
598 alive_data->subtype = palive->ver_subtype;
599 alive_data->valid = palive->is_valid == UCODE_VALID_OK;
602 #define UCODE_ALIVE_TIMEOUT HZ
603 #define UCODE_CALIB_TIMEOUT (2*HZ)
605 int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
606 struct fw_img *image,
607 enum iwlagn_ucode_type ucode_type)
609 struct iwl_notification_wait alive_wait;
610 struct iwlagn_alive_data alive_data;
611 int ret;
612 enum iwlagn_ucode_type old_type;
614 ret = iwlagn_start_device(priv);
615 if (ret)
616 return ret;
618 iwlagn_init_notification_wait(priv, &alive_wait, REPLY_ALIVE,
619 iwlagn_alive_fn, &alive_data);
621 old_type = priv->ucode_type;
622 priv->ucode_type = ucode_type;
624 ret = iwlagn_load_given_ucode(priv, image);
625 if (ret) {
626 priv->ucode_type = old_type;
627 iwlagn_remove_notification(priv, &alive_wait);
628 return ret;
631 /* Remove all resets to allow NIC to operate */
632 iwl_write32(priv, CSR_RESET, 0);
635 * Some things may run in the background now, but we
636 * just wait for the ALIVE notification here.
638 ret = iwlagn_wait_notification(priv, &alive_wait, UCODE_ALIVE_TIMEOUT);
639 if (ret) {
640 priv->ucode_type = old_type;
641 return ret;
644 if (!alive_data.valid) {
645 IWL_ERR(priv, "Loaded ucode is not valid!\n");
646 priv->ucode_type = old_type;
647 return -EIO;
650 ret = iwl_verify_ucode(priv, image);
651 if (ret) {
652 priv->ucode_type = old_type;
653 return ret;
656 /* delay a bit to give rfkill time to run */
657 msleep(5);
659 ret = iwlagn_alive_notify(priv);
660 if (ret) {
661 IWL_WARN(priv,
662 "Could not complete ALIVE transition: %d\n", ret);
663 priv->ucode_type = old_type;
664 return ret;
667 return 0;
670 int iwlagn_run_init_ucode(struct iwl_priv *priv)
672 struct iwl_notification_wait calib_wait;
673 int ret;
675 lockdep_assert_held(&priv->mutex);
677 /* No init ucode required? Curious, but maybe ok */
678 if (!priv->ucode_init.code.len)
679 return 0;
681 if (priv->ucode_type != IWL_UCODE_NONE)
682 return 0;
684 iwlagn_init_notification_wait(priv, &calib_wait,
685 CALIBRATION_COMPLETE_NOTIFICATION,
686 NULL, NULL);
688 /* Will also start the device */
689 ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
690 IWL_UCODE_INIT);
691 if (ret)
692 goto error;
694 ret = iwlagn_init_alive_start(priv);
695 if (ret)
696 goto error;
699 * Some things may run in the background now, but we
700 * just wait for the calibration complete notification.
702 ret = iwlagn_wait_notification(priv, &calib_wait, UCODE_CALIB_TIMEOUT);
704 goto out;
706 error:
707 iwlagn_remove_notification(priv, &calib_wait);
708 out:
709 /* Whatever happened, stop the device */
710 iwlagn_stop_device(priv);
711 return ret;