1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * All rights reserved.
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *****************************************************************************/
66 #include <net/mac80211.h>
67 #include <linux/netdevice.h>
68 #include <linux/acpi.h>
70 #include "iwl-trans.h"
71 #include "iwl-op-mode.h"
73 #include "iwl-debug.h"
74 #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
75 #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
77 #include "iwl-eeprom-parse.h"
81 #include "iwl-phy-db.h"
83 #define MVM_UCODE_ALIVE_TIMEOUT HZ
84 #define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
86 #define UCODE_VALID_OK cpu_to_le32(0x1)
88 struct iwl_mvm_alive_data
{
93 static int iwl_send_tx_ant_cfg(struct iwl_mvm
*mvm
, u8 valid_tx_ant
)
95 struct iwl_tx_ant_cfg_cmd tx_ant_cmd
= {
96 .valid
= cpu_to_le32(valid_tx_ant
),
99 IWL_DEBUG_FW(mvm
, "select valid tx ant: %u\n", valid_tx_ant
);
100 return iwl_mvm_send_cmd_pdu(mvm
, TX_ANT_CONFIGURATION_CMD
, 0,
101 sizeof(tx_ant_cmd
), &tx_ant_cmd
);
104 static int iwl_send_rss_cfg_cmd(struct iwl_mvm
*mvm
)
107 struct iwl_rss_config_cmd cmd
= {
108 .flags
= cpu_to_le32(IWL_RSS_ENABLE
),
109 .hash_mask
= IWL_RSS_HASH_TYPE_IPV4_TCP
|
110 IWL_RSS_HASH_TYPE_IPV4_UDP
|
111 IWL_RSS_HASH_TYPE_IPV4_PAYLOAD
|
112 IWL_RSS_HASH_TYPE_IPV6_TCP
|
113 IWL_RSS_HASH_TYPE_IPV6_UDP
|
114 IWL_RSS_HASH_TYPE_IPV6_PAYLOAD
,
117 if (mvm
->trans
->num_rx_queues
== 1)
120 /* Do not direct RSS traffic to Q 0 which is our fallback queue */
121 for (i
= 0; i
< ARRAY_SIZE(cmd
.indirection_table
); i
++)
122 cmd
.indirection_table
[i
] =
123 1 + (i
% (mvm
->trans
->num_rx_queues
- 1));
124 netdev_rss_key_fill(cmd
.secret_key
, sizeof(cmd
.secret_key
));
126 return iwl_mvm_send_cmd_pdu(mvm
, RSS_CONFIG_CMD
, 0, sizeof(cmd
), &cmd
);
129 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm
*mvm
)
131 struct iwl_dqa_enable_cmd dqa_cmd
= {
132 .cmd_queue
= cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE
),
134 u32 cmd_id
= iwl_cmd_id(DQA_ENABLE_CMD
, DATA_PATH_GROUP
, 0);
137 ret
= iwl_mvm_send_cmd_pdu(mvm
, cmd_id
, 0, sizeof(dqa_cmd
), &dqa_cmd
);
139 IWL_ERR(mvm
, "Failed to send DQA enabling command: %d\n", ret
);
141 IWL_DEBUG_FW(mvm
, "Working in DQA mode\n");
146 void iwl_free_fw_paging(struct iwl_mvm
*mvm
)
150 if (!mvm
->fw_paging_db
[0].fw_paging_block
)
153 for (i
= 0; i
< NUM_OF_FW_PAGING_BLOCKS
; i
++) {
154 struct iwl_fw_paging
*paging
= &mvm
->fw_paging_db
[i
];
156 if (!paging
->fw_paging_block
) {
158 "Paging: block %d already freed, continue to next page\n",
163 dma_unmap_page(mvm
->trans
->dev
, paging
->fw_paging_phys
,
164 paging
->fw_paging_size
, DMA_BIDIRECTIONAL
);
166 __free_pages(paging
->fw_paging_block
,
167 get_order(paging
->fw_paging_size
));
168 paging
->fw_paging_block
= NULL
;
170 kfree(mvm
->trans
->paging_download_buf
);
171 mvm
->trans
->paging_download_buf
= NULL
;
172 mvm
->trans
->paging_db
= NULL
;
174 memset(mvm
->fw_paging_db
, 0, sizeof(mvm
->fw_paging_db
));
177 static int iwl_fill_paging_mem(struct iwl_mvm
*mvm
, const struct fw_img
*image
)
183 * find where is the paging image start point:
184 * if CPU2 exist and it's in paging format, then the image looks like:
185 * CPU1 sections (2 or more)
186 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
187 * CPU2 sections (not paged)
188 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
189 * non paged to CPU2 paging sec
191 * CPU2 paging image (including instruction and data)
193 for (sec_idx
= 0; sec_idx
< image
->num_sec
; sec_idx
++) {
194 if (image
->sec
[sec_idx
].offset
== PAGING_SEPARATOR_SECTION
) {
201 * If paging is enabled there should be at least 2 more sections left
202 * (one for CSS and one for Paging data)
204 if (sec_idx
>= image
->num_sec
- 1) {
205 IWL_ERR(mvm
, "Paging: Missing CSS and/or paging sections\n");
206 iwl_free_fw_paging(mvm
);
210 /* copy the CSS block to the dram */
211 IWL_DEBUG_FW(mvm
, "Paging: load paging CSS to FW, sec = %d\n",
214 memcpy(page_address(mvm
->fw_paging_db
[0].fw_paging_block
),
215 image
->sec
[sec_idx
].data
,
216 mvm
->fw_paging_db
[0].fw_paging_size
);
217 dma_sync_single_for_device(mvm
->trans
->dev
,
218 mvm
->fw_paging_db
[0].fw_paging_phys
,
219 mvm
->fw_paging_db
[0].fw_paging_size
,
223 "Paging: copied %d CSS bytes to first block\n",
224 mvm
->fw_paging_db
[0].fw_paging_size
);
229 * copy the paging blocks to the dram
230 * loop index start from 1 since that CSS block already copied to dram
231 * and CSS index is 0.
232 * loop stop at num_of_paging_blk since that last block is not full.
234 for (idx
= 1; idx
< mvm
->num_of_paging_blk
; idx
++) {
235 struct iwl_fw_paging
*block
= &mvm
->fw_paging_db
[idx
];
237 memcpy(page_address(block
->fw_paging_block
),
238 image
->sec
[sec_idx
].data
+ offset
,
239 block
->fw_paging_size
);
240 dma_sync_single_for_device(mvm
->trans
->dev
,
241 block
->fw_paging_phys
,
242 block
->fw_paging_size
,
247 "Paging: copied %d paging bytes to block %d\n",
248 mvm
->fw_paging_db
[idx
].fw_paging_size
,
251 offset
+= mvm
->fw_paging_db
[idx
].fw_paging_size
;
254 /* copy the last paging block */
255 if (mvm
->num_of_pages_in_last_blk
> 0) {
256 struct iwl_fw_paging
*block
= &mvm
->fw_paging_db
[idx
];
258 memcpy(page_address(block
->fw_paging_block
),
259 image
->sec
[sec_idx
].data
+ offset
,
260 FW_PAGING_SIZE
* mvm
->num_of_pages_in_last_blk
);
261 dma_sync_single_for_device(mvm
->trans
->dev
,
262 block
->fw_paging_phys
,
263 block
->fw_paging_size
,
267 "Paging: copied %d pages in the last block %d\n",
268 mvm
->num_of_pages_in_last_blk
, idx
);
274 static int iwl_alloc_fw_paging_mem(struct iwl_mvm
*mvm
,
275 const struct fw_img
*image
)
279 int blk_idx
, order
, num_of_pages
, size
, dma_enabled
;
281 if (mvm
->fw_paging_db
[0].fw_paging_block
)
284 dma_enabled
= is_device_dma_capable(mvm
->trans
->dev
);
286 /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
287 BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE
) != PAGING_BLOCK_SIZE
);
289 num_of_pages
= image
->paging_mem_size
/ FW_PAGING_SIZE
;
290 mvm
->num_of_paging_blk
=
291 DIV_ROUND_UP(num_of_pages
, NUM_OF_PAGE_PER_GROUP
);
292 mvm
->num_of_pages_in_last_blk
=
294 NUM_OF_PAGE_PER_GROUP
* (mvm
->num_of_paging_blk
- 1);
297 "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
298 mvm
->num_of_paging_blk
,
299 mvm
->num_of_pages_in_last_blk
);
302 * Allocate CSS and paging blocks in dram.
304 for (blk_idx
= 0; blk_idx
< mvm
->num_of_paging_blk
+ 1; blk_idx
++) {
305 /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
306 size
= blk_idx
? PAGING_BLOCK_SIZE
: FW_PAGING_SIZE
;
307 order
= get_order(size
);
308 block
= alloc_pages(GFP_KERNEL
, order
);
310 /* free all the previous pages since we failed */
311 iwl_free_fw_paging(mvm
);
315 mvm
->fw_paging_db
[blk_idx
].fw_paging_block
= block
;
316 mvm
->fw_paging_db
[blk_idx
].fw_paging_size
= size
;
319 phys
= dma_map_page(mvm
->trans
->dev
, block
, 0,
322 if (dma_mapping_error(mvm
->trans
->dev
, phys
)) {
324 * free the previous pages and the current one
325 * since we failed to map_page.
327 iwl_free_fw_paging(mvm
);
330 mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
= phys
;
332 mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
=
334 blk_idx
<< BLOCK_2_EXP_SIZE
;
339 "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
343 "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
350 static int iwl_save_fw_paging(struct iwl_mvm
*mvm
,
351 const struct fw_img
*fw
)
355 ret
= iwl_alloc_fw_paging_mem(mvm
, fw
);
359 return iwl_fill_paging_mem(mvm
, fw
);
362 /* send paging cmd to FW in case CPU2 has paging image */
363 static int iwl_send_paging_cmd(struct iwl_mvm
*mvm
, const struct fw_img
*fw
)
365 struct iwl_fw_paging_cmd paging_cmd
= {
367 cpu_to_le32(PAGING_CMD_IS_SECURED
|
368 PAGING_CMD_IS_ENABLED
|
369 (mvm
->num_of_pages_in_last_blk
<<
370 PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS
)),
371 .block_size
= cpu_to_le32(BLOCK_2_EXP_SIZE
),
372 .block_num
= cpu_to_le32(mvm
->num_of_paging_blk
),
374 int blk_idx
, size
= sizeof(paging_cmd
);
376 /* A bit hard coded - but this is the old API and will be deprecated */
377 if (!iwl_mvm_has_new_tx_api(mvm
))
378 size
-= NUM_OF_FW_PAGING_BLOCKS
* 4;
380 /* loop for for all paging blocks + CSS block */
381 for (blk_idx
= 0; blk_idx
< mvm
->num_of_paging_blk
+ 1; blk_idx
++) {
382 dma_addr_t addr
= mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
;
384 addr
= addr
>> PAGE_2_EXP_SIZE
;
386 if (iwl_mvm_has_new_tx_api(mvm
)) {
387 __le64 phy_addr
= cpu_to_le64(addr
);
389 paging_cmd
.device_phy_addr
.addr64
[blk_idx
] = phy_addr
;
391 __le32 phy_addr
= cpu_to_le32(addr
);
393 paging_cmd
.device_phy_addr
.addr32
[blk_idx
] = phy_addr
;
397 return iwl_mvm_send_cmd_pdu(mvm
, iwl_cmd_id(FW_PAGING_BLOCK_CMD
,
398 IWL_ALWAYS_LONG_GROUP
, 0),
399 0, size
, &paging_cmd
);
403 * Send paging item cmd to FW in case CPU2 has paging image
405 static int iwl_trans_get_paging_item(struct iwl_mvm
*mvm
)
408 struct iwl_fw_get_item_cmd fw_get_item_cmd
= {
409 .item_id
= cpu_to_le32(IWL_FW_ITEM_ID_PAGING
),
412 struct iwl_fw_get_item_resp
*item_resp
;
413 struct iwl_host_cmd cmd
= {
414 .id
= iwl_cmd_id(FW_GET_ITEM_CMD
, IWL_ALWAYS_LONG_GROUP
, 0),
415 .flags
= CMD_WANT_SKB
| CMD_SEND_IN_RFKILL
,
416 .data
= { &fw_get_item_cmd
, },
419 cmd
.len
[0] = sizeof(struct iwl_fw_get_item_cmd
);
421 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
424 "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
429 item_resp
= (void *)((struct iwl_rx_packet
*)cmd
.resp_pkt
)->data
;
430 if (item_resp
->item_id
!= cpu_to_le32(IWL_FW_ITEM_ID_PAGING
)) {
432 "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
433 le32_to_cpu(item_resp
->item_id
));
438 /* Add an extra page for headers */
439 mvm
->trans
->paging_download_buf
= kzalloc(PAGING_BLOCK_SIZE
+
442 if (!mvm
->trans
->paging_download_buf
) {
446 mvm
->trans
->paging_req_addr
= le32_to_cpu(item_resp
->item_val
);
447 mvm
->trans
->paging_db
= mvm
->fw_paging_db
;
449 "Paging: got paging request address (paging_req_addr 0x%08x)\n",
450 mvm
->trans
->paging_req_addr
);
458 static bool iwl_alive_fn(struct iwl_notif_wait_data
*notif_wait
,
459 struct iwl_rx_packet
*pkt
, void *data
)
461 struct iwl_mvm
*mvm
=
462 container_of(notif_wait
, struct iwl_mvm
, notif_wait
);
463 struct iwl_mvm_alive_data
*alive_data
= data
;
464 struct mvm_alive_resp_v3
*palive3
;
465 struct mvm_alive_resp
*palive
;
466 struct iwl_umac_alive
*umac
;
467 struct iwl_lmac_alive
*lmac1
;
468 struct iwl_lmac_alive
*lmac2
= NULL
;
471 if (iwl_rx_packet_payload_len(pkt
) == sizeof(*palive
)) {
472 palive
= (void *)pkt
->data
;
473 umac
= &palive
->umac_data
;
474 lmac1
= &palive
->lmac_data
[0];
475 lmac2
= &palive
->lmac_data
[1];
476 status
= le16_to_cpu(palive
->status
);
478 palive3
= (void *)pkt
->data
;
479 umac
= &palive3
->umac_data
;
480 lmac1
= &palive3
->lmac_data
;
481 status
= le16_to_cpu(palive3
->status
);
484 mvm
->error_event_table
[0] = le32_to_cpu(lmac1
->error_event_table_ptr
);
486 mvm
->error_event_table
[1] =
487 le32_to_cpu(lmac2
->error_event_table_ptr
);
488 mvm
->log_event_table
= le32_to_cpu(lmac1
->log_event_table_ptr
);
489 mvm
->sf_space
.addr
= le32_to_cpu(lmac1
->st_fwrd_addr
);
490 mvm
->sf_space
.size
= le32_to_cpu(lmac1
->st_fwrd_size
);
492 mvm
->umac_error_event_table
= le32_to_cpu(umac
->error_info_addr
);
494 alive_data
->scd_base_addr
= le32_to_cpu(lmac1
->scd_base_ptr
);
495 alive_data
->valid
= status
== IWL_ALIVE_STATUS_OK
;
496 if (mvm
->umac_error_event_table
)
497 mvm
->support_umac_log
= true;
500 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
501 status
, lmac1
->ver_type
, lmac1
->ver_subtype
);
504 IWL_DEBUG_FW(mvm
, "Alive ucode CDB\n");
507 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
508 le32_to_cpu(umac
->umac_major
),
509 le32_to_cpu(umac
->umac_minor
));
514 static bool iwl_wait_init_complete(struct iwl_notif_wait_data
*notif_wait
,
515 struct iwl_rx_packet
*pkt
, void *data
)
517 WARN_ON(pkt
->hdr
.cmd
!= INIT_COMPLETE_NOTIF
);
522 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data
*notif_wait
,
523 struct iwl_rx_packet
*pkt
, void *data
)
525 struct iwl_phy_db
*phy_db
= data
;
527 if (pkt
->hdr
.cmd
!= CALIB_RES_NOTIF_PHY_DB
) {
528 WARN_ON(pkt
->hdr
.cmd
!= INIT_COMPLETE_NOTIF
);
532 WARN_ON(iwl_phy_db_set_section(phy_db
, pkt
));
537 static int iwl_mvm_init_paging(struct iwl_mvm
*mvm
)
539 const struct fw_img
*fw
= &mvm
->fw
->img
[mvm
->cur_ucode
];
543 * Configure and operate fw paging mechanism.
544 * The driver configures the paging flow only once.
545 * The CPU2 paging image is included in the IWL_UCODE_INIT image.
547 if (!fw
->paging_mem_size
)
551 * When dma is not enabled, the driver needs to copy / write
552 * the downloaded / uploaded page to / from the smem.
553 * This gets the location of the place were the pages are
556 if (!is_device_dma_capable(mvm
->trans
->dev
)) {
557 ret
= iwl_trans_get_paging_item(mvm
);
559 IWL_ERR(mvm
, "failed to get FW paging item\n");
564 ret
= iwl_save_fw_paging(mvm
, fw
);
566 IWL_ERR(mvm
, "failed to save the FW paging image\n");
570 ret
= iwl_send_paging_cmd(mvm
, fw
);
572 IWL_ERR(mvm
, "failed to send the paging cmd\n");
573 iwl_free_fw_paging(mvm
);
579 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm
*mvm
,
580 enum iwl_ucode_type ucode_type
)
582 struct iwl_notification_wait alive_wait
;
583 struct iwl_mvm_alive_data alive_data
;
584 const struct fw_img
*fw
;
586 enum iwl_ucode_type old_type
= mvm
->cur_ucode
;
587 static const u16 alive_cmd
[] = { MVM_ALIVE
};
588 struct iwl_sf_region st_fwrd_space
;
590 if (ucode_type
== IWL_UCODE_REGULAR
&&
591 iwl_fw_dbg_conf_usniffer(mvm
->fw
, FW_DBG_START_FROM_ALIVE
) &&
592 !(fw_has_capa(&mvm
->fw
->ucode_capa
,
593 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED
)))
594 fw
= iwl_get_ucode_image(mvm
->fw
, IWL_UCODE_REGULAR_USNIFFER
);
596 fw
= iwl_get_ucode_image(mvm
->fw
, ucode_type
);
599 mvm
->cur_ucode
= ucode_type
;
600 mvm
->ucode_loaded
= false;
602 iwl_init_notification_wait(&mvm
->notif_wait
, &alive_wait
,
603 alive_cmd
, ARRAY_SIZE(alive_cmd
),
604 iwl_alive_fn
, &alive_data
);
606 ret
= iwl_trans_start_fw(mvm
->trans
, fw
, ucode_type
== IWL_UCODE_INIT
);
608 mvm
->cur_ucode
= old_type
;
609 iwl_remove_notification(&mvm
->notif_wait
, &alive_wait
);
614 * Some things may run in the background now, but we
615 * just wait for the ALIVE notification here.
617 ret
= iwl_wait_notification(&mvm
->notif_wait
, &alive_wait
,
618 MVM_UCODE_ALIVE_TIMEOUT
);
620 if (mvm
->trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
622 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
623 iwl_read_prph(mvm
->trans
, SB_CPU_1_STATUS
),
624 iwl_read_prph(mvm
->trans
, SB_CPU_2_STATUS
));
625 mvm
->cur_ucode
= old_type
;
629 if (!alive_data
.valid
) {
630 IWL_ERR(mvm
, "Loaded ucode is not valid!\n");
631 mvm
->cur_ucode
= old_type
;
636 * update the sdio allocation according to the pointer we get in the
637 * alive notification.
639 st_fwrd_space
.addr
= mvm
->sf_space
.addr
;
640 st_fwrd_space
.size
= mvm
->sf_space
.size
;
641 ret
= iwl_trans_update_sf(mvm
->trans
, &st_fwrd_space
);
643 IWL_ERR(mvm
, "Failed to update SF size. ret %d\n", ret
);
647 iwl_trans_fw_alive(mvm
->trans
, alive_data
.scd_base_addr
);
650 * Note: all the queues are enabled as part of the interface
651 * initialization, but in firmware restart scenarios they
652 * could be stopped, so wake them up. In firmware restart,
653 * mac80211 will have the queues stopped as well until the
654 * reconfiguration completes. During normal startup, they
658 memset(&mvm
->queue_info
, 0, sizeof(mvm
->queue_info
));
659 if (iwl_mvm_is_dqa_supported(mvm
))
660 mvm
->queue_info
[IWL_MVM_DQA_CMD_QUEUE
].hw_queue_refcount
= 1;
662 mvm
->queue_info
[IWL_MVM_CMD_QUEUE
].hw_queue_refcount
= 1;
664 for (i
= 0; i
< IEEE80211_MAX_QUEUES
; i
++)
665 atomic_set(&mvm
->mac80211_queue_stop_count
[i
], 0);
667 mvm
->ucode_loaded
= true;
672 static int iwl_send_phy_cfg_cmd(struct iwl_mvm
*mvm
)
674 struct iwl_phy_cfg_cmd phy_cfg_cmd
;
675 enum iwl_ucode_type ucode_type
= mvm
->cur_ucode
;
678 phy_cfg_cmd
.phy_cfg
= cpu_to_le32(iwl_mvm_get_phy_config(mvm
));
679 phy_cfg_cmd
.calib_control
.event_trigger
=
680 mvm
->fw
->default_calib
[ucode_type
].event_trigger
;
681 phy_cfg_cmd
.calib_control
.flow_trigger
=
682 mvm
->fw
->default_calib
[ucode_type
].flow_trigger
;
684 IWL_DEBUG_INFO(mvm
, "Sending Phy CFG command: 0x%x\n",
685 phy_cfg_cmd
.phy_cfg
);
687 return iwl_mvm_send_cmd_pdu(mvm
, PHY_CONFIGURATION_CMD
, 0,
688 sizeof(phy_cfg_cmd
), &phy_cfg_cmd
);
691 int iwl_run_init_mvm_ucode(struct iwl_mvm
*mvm
, bool read_nvm
)
693 struct iwl_notification_wait calib_wait
;
694 static const u16 init_complete
[] = {
696 CALIB_RES_NOTIF_PHY_DB
700 lockdep_assert_held(&mvm
->mutex
);
702 if (WARN_ON_ONCE(mvm
->calibrating
))
705 iwl_init_notification_wait(&mvm
->notif_wait
,
708 ARRAY_SIZE(init_complete
),
709 iwl_wait_phy_db_entry
,
712 /* Will also start the device */
713 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_INIT
);
715 IWL_ERR(mvm
, "Failed to start INIT ucode: %d\n", ret
);
719 ret
= iwl_send_bt_init_conf(mvm
);
723 /* Read the NVM only at driver load time, no need to do this twice */
726 ret
= iwl_nvm_init(mvm
, true);
728 IWL_ERR(mvm
, "Failed to read NVM: %d\n", ret
);
733 /* In case we read the NVM from external file, load it to the NIC */
734 if (mvm
->nvm_file_name
)
735 iwl_mvm_load_nvm_to_nic(mvm
);
737 ret
= iwl_nvm_check_version(mvm
->nvm_data
, mvm
->trans
);
741 * abort after reading the nvm in case RF Kill is on, we will complete
742 * the init seq later when RF kill will switch to off
744 if (iwl_mvm_is_radio_hw_killed(mvm
)) {
745 IWL_DEBUG_RF_KILL(mvm
,
746 "jump over all phy activities due to RF kill\n");
747 iwl_remove_notification(&mvm
->notif_wait
, &calib_wait
);
752 mvm
->calibrating
= true;
754 /* Send TX valid antennas before triggering calibrations */
755 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
760 * Send phy configurations command to init uCode
761 * to start the 16.0 uCode init image internal calibrations.
763 ret
= iwl_send_phy_cfg_cmd(mvm
);
765 IWL_ERR(mvm
, "Failed to run INIT calibrations: %d\n",
771 * Some things may run in the background now, but we
772 * just wait for the calibration complete notification.
774 ret
= iwl_wait_notification(&mvm
->notif_wait
, &calib_wait
,
775 MVM_UCODE_CALIB_TIMEOUT
);
777 if (ret
&& iwl_mvm_is_radio_hw_killed(mvm
)) {
778 IWL_DEBUG_RF_KILL(mvm
, "RFKILL while calibrating.\n");
784 iwl_remove_notification(&mvm
->notif_wait
, &calib_wait
);
786 mvm
->calibrating
= false;
787 if (iwlmvm_mod_params
.init_dbg
&& !mvm
->nvm_data
) {
788 /* we want to debug INIT and we have no NVM - fake */
789 mvm
->nvm_data
= kzalloc(sizeof(struct iwl_nvm_data
) +
790 sizeof(struct ieee80211_channel
) +
791 sizeof(struct ieee80211_rate
),
795 mvm
->nvm_data
->bands
[0].channels
= mvm
->nvm_data
->channels
;
796 mvm
->nvm_data
->bands
[0].n_channels
= 1;
797 mvm
->nvm_data
->bands
[0].n_bitrates
= 1;
798 mvm
->nvm_data
->bands
[0].bitrates
=
799 (void *)mvm
->nvm_data
->channels
+ 1;
800 mvm
->nvm_data
->bands
[0].bitrates
->hw_value
= 10;
806 int iwl_run_unified_mvm_ucode(struct iwl_mvm
*mvm
, bool read_nvm
)
808 struct iwl_notification_wait init_wait
;
809 struct iwl_nvm_access_complete_cmd nvm_complete
= {};
810 static const u16 init_complete
[] = {
815 lockdep_assert_held(&mvm
->mutex
);
817 iwl_init_notification_wait(&mvm
->notif_wait
,
820 ARRAY_SIZE(init_complete
),
821 iwl_wait_init_complete
,
824 /* Will also start the device */
825 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_REGULAR
);
827 IWL_ERR(mvm
, "Failed to start RT ucode: %d\n", ret
);
831 /* TODO: remove when integrating context info */
832 ret
= iwl_mvm_init_paging(mvm
);
834 IWL_ERR(mvm
, "Failed to init paging: %d\n",
839 /* Read the NVM only at driver load time, no need to do this twice */
842 ret
= iwl_nvm_init(mvm
, true);
844 IWL_ERR(mvm
, "Failed to read NVM: %d\n", ret
);
849 /* In case we read the NVM from external file, load it to the NIC */
850 if (mvm
->nvm_file_name
)
851 iwl_mvm_load_nvm_to_nic(mvm
);
853 ret
= iwl_nvm_check_version(mvm
->nvm_data
, mvm
->trans
);
857 ret
= iwl_mvm_send_cmd_pdu(mvm
, WIDE_ID(REGULATORY_AND_NVM_GROUP
,
858 NVM_ACCESS_COMPLETE
), 0,
859 sizeof(nvm_complete
), &nvm_complete
);
861 IWL_ERR(mvm
, "Failed to run complete NVM access: %d\n",
866 /* We wait for the INIT complete notification */
867 return iwl_wait_notification(&mvm
->notif_wait
, &init_wait
,
868 MVM_UCODE_ALIVE_TIMEOUT
);
871 iwl_remove_notification(&mvm
->notif_wait
, &init_wait
);
875 static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm
*mvm
,
876 struct iwl_rx_packet
*pkt
)
878 struct iwl_shared_mem_cfg
*mem_cfg
= (void *)pkt
->data
;
881 mvm
->shared_mem_cfg
.num_txfifo_entries
=
882 ARRAY_SIZE(mvm
->shared_mem_cfg
.txfifo_size
);
883 for (i
= 0; i
< ARRAY_SIZE(mem_cfg
->txfifo_size
); i
++)
884 mvm
->shared_mem_cfg
.txfifo_size
[i
] =
885 le32_to_cpu(mem_cfg
->txfifo_size
[i
]);
886 for (i
= 0; i
< ARRAY_SIZE(mvm
->shared_mem_cfg
.rxfifo_size
); i
++)
887 mvm
->shared_mem_cfg
.rxfifo_size
[i
] =
888 le32_to_cpu(mem_cfg
->rxfifo_size
[i
]);
890 BUILD_BUG_ON(sizeof(mvm
->shared_mem_cfg
.internal_txfifo_size
) !=
891 sizeof(mem_cfg
->internal_txfifo_size
));
893 for (i
= 0; i
< ARRAY_SIZE(mvm
->shared_mem_cfg
.internal_txfifo_size
);
895 mvm
->shared_mem_cfg
.internal_txfifo_size
[i
] =
896 le32_to_cpu(mem_cfg
->internal_txfifo_size
[i
]);
899 static void iwl_mvm_parse_shared_mem(struct iwl_mvm
*mvm
,
900 struct iwl_rx_packet
*pkt
)
902 struct iwl_shared_mem_cfg_v1
*mem_cfg
= (void *)pkt
->data
;
905 mvm
->shared_mem_cfg
.num_txfifo_entries
=
906 ARRAY_SIZE(mvm
->shared_mem_cfg
.txfifo_size
);
907 for (i
= 0; i
< ARRAY_SIZE(mem_cfg
->txfifo_size
); i
++)
908 mvm
->shared_mem_cfg
.txfifo_size
[i
] =
909 le32_to_cpu(mem_cfg
->txfifo_size
[i
]);
910 for (i
= 0; i
< ARRAY_SIZE(mvm
->shared_mem_cfg
.rxfifo_size
); i
++)
911 mvm
->shared_mem_cfg
.rxfifo_size
[i
] =
912 le32_to_cpu(mem_cfg
->rxfifo_size
[i
]);
914 /* new API has more data, from rxfifo_addr field and on */
915 if (fw_has_capa(&mvm
->fw
->ucode_capa
,
916 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
)) {
917 BUILD_BUG_ON(sizeof(mvm
->shared_mem_cfg
.internal_txfifo_size
) !=
918 sizeof(mem_cfg
->internal_txfifo_size
));
921 i
< ARRAY_SIZE(mvm
->shared_mem_cfg
.internal_txfifo_size
);
923 mvm
->shared_mem_cfg
.internal_txfifo_size
[i
] =
924 le32_to_cpu(mem_cfg
->internal_txfifo_size
[i
]);
928 static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm
*mvm
)
930 struct iwl_host_cmd cmd
= {
931 .flags
= CMD_WANT_SKB
,
935 struct iwl_rx_packet
*pkt
;
937 lockdep_assert_held(&mvm
->mutex
);
939 if (fw_has_capa(&mvm
->fw
->ucode_capa
,
940 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
))
941 cmd
.id
= iwl_cmd_id(SHARED_MEM_CFG_CMD
, SYSTEM_GROUP
, 0);
943 cmd
.id
= SHARED_MEM_CFG
;
945 if (WARN_ON(iwl_mvm_send_cmd(mvm
, &cmd
)))
949 if (iwl_mvm_has_new_tx_api(mvm
))
950 iwl_mvm_parse_shared_mem_a000(mvm
, pkt
);
952 iwl_mvm_parse_shared_mem(mvm
, pkt
);
954 IWL_DEBUG_INFO(mvm
, "SHARED MEM CFG: got memory offsets/sizes\n");
959 static int iwl_mvm_config_ltr(struct iwl_mvm
*mvm
)
961 struct iwl_ltr_config_cmd cmd
= {
962 .flags
= cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE
),
965 if (!mvm
->trans
->ltr_enabled
)
968 return iwl_mvm_send_cmd_pdu(mvm
, LTR_CONFIG
, 0,
972 #define ACPI_WRDS_METHOD "WRDS"
973 #define ACPI_WRDS_WIFI (0x07)
974 #define ACPI_WRDS_TABLE_SIZE 10
976 struct iwl_mvm_sar_table
{
978 u8 values
[ACPI_WRDS_TABLE_SIZE
];
982 static int iwl_mvm_sar_get_wrds(struct iwl_mvm
*mvm
, union acpi_object
*wrds
,
983 struct iwl_mvm_sar_table
*sar_table
)
985 union acpi_object
*data_pkg
;
988 /* We need at least two packages, one for the revision and one
989 * for the data itself. Also check that the revision is valid
990 * (i.e. it is an integer set to 0).
992 if (wrds
->type
!= ACPI_TYPE_PACKAGE
||
993 wrds
->package
.count
< 2 ||
994 wrds
->package
.elements
[0].type
!= ACPI_TYPE_INTEGER
||
995 wrds
->package
.elements
[0].integer
.value
!= 0) {
996 IWL_DEBUG_RADIO(mvm
, "Unsupported wrds structure\n");
1000 /* loop through all the packages to find the one for WiFi */
1001 for (i
= 1; i
< wrds
->package
.count
; i
++) {
1002 union acpi_object
*domain
;
1004 data_pkg
= &wrds
->package
.elements
[i
];
1006 /* Skip anything that is not a package with the right
1007 * amount of elements (i.e. domain_type,
1008 * enabled/disabled plus the sar table size.
1010 if (data_pkg
->type
!= ACPI_TYPE_PACKAGE
||
1011 data_pkg
->package
.count
!= ACPI_WRDS_TABLE_SIZE
+ 2)
1014 domain
= &data_pkg
->package
.elements
[0];
1015 if (domain
->type
== ACPI_TYPE_INTEGER
&&
1016 domain
->integer
.value
== ACPI_WRDS_WIFI
)
1025 if (data_pkg
->package
.elements
[1].type
!= ACPI_TYPE_INTEGER
)
1028 sar_table
->enabled
= !!(data_pkg
->package
.elements
[1].integer
.value
);
1030 for (i
= 0; i
< ACPI_WRDS_TABLE_SIZE
; i
++) {
1031 union acpi_object
*entry
;
1033 entry
= &data_pkg
->package
.elements
[i
+ 2];
1034 if ((entry
->type
!= ACPI_TYPE_INTEGER
) ||
1035 (entry
->integer
.value
> U8_MAX
))
1038 sar_table
->values
[i
] = entry
->integer
.value
;
1044 static int iwl_mvm_sar_get_table(struct iwl_mvm
*mvm
,
1045 struct iwl_mvm_sar_table
*sar_table
)
1047 acpi_handle root_handle
;
1049 struct acpi_buffer wrds
= {ACPI_ALLOCATE_BUFFER
, NULL
};
1053 root_handle
= ACPI_HANDLE(mvm
->dev
);
1055 IWL_DEBUG_RADIO(mvm
,
1056 "Could not retrieve root port ACPI handle\n");
1060 /* Get the method's handle */
1061 status
= acpi_get_handle(root_handle
, (acpi_string
)ACPI_WRDS_METHOD
,
1063 if (ACPI_FAILURE(status
)) {
1064 IWL_DEBUG_RADIO(mvm
, "WRDS method not found\n");
1068 /* Call WRDS with no arguments */
1069 status
= acpi_evaluate_object(handle
, NULL
, NULL
, &wrds
);
1070 if (ACPI_FAILURE(status
)) {
1071 IWL_DEBUG_RADIO(mvm
, "WRDS invocation failed (0x%x)\n", status
);
1075 ret
= iwl_mvm_sar_get_wrds(mvm
, wrds
.pointer
, sar_table
);
1076 kfree(wrds
.pointer
);
1080 #else /* CONFIG_ACPI */
1081 static int iwl_mvm_sar_get_table(struct iwl_mvm
*mvm
,
1082 struct iwl_mvm_sar_table
*sar_table
)
1086 #endif /* CONFIG_ACPI */
1088 static int iwl_mvm_sar_init(struct iwl_mvm
*mvm
)
1090 struct iwl_mvm_sar_table sar_table
;
1091 struct iwl_dev_tx_power_cmd cmd
= {
1092 .v3
.set_mode
= cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS
),
1095 int len
= sizeof(cmd
);
1097 if (!fw_has_capa(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_CAPA_TX_POWER_ACK
))
1098 len
= sizeof(cmd
.v3
);
1100 ret
= iwl_mvm_sar_get_table(mvm
, &sar_table
);
1102 IWL_DEBUG_RADIO(mvm
,
1103 "SAR BIOS table invalid or unavailable. (%d)\n",
1105 /* we don't fail if the table is not available */
1109 if (!sar_table
.enabled
)
1112 IWL_DEBUG_RADIO(mvm
, "Sending REDUCE_TX_POWER_CMD per chain\n");
1114 BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS
* IWL_NUM_SUB_BANDS
!=
1115 ACPI_WRDS_TABLE_SIZE
);
1117 for (i
= 0; i
< IWL_NUM_CHAIN_LIMITS
; i
++) {
1118 IWL_DEBUG_RADIO(mvm
, " Chain[%d]:\n", i
);
1119 for (j
= 0; j
< IWL_NUM_SUB_BANDS
; j
++) {
1120 idx
= (i
* IWL_NUM_SUB_BANDS
) + j
;
1121 cmd
.v3
.per_chain_restriction
[i
][j
] =
1122 cpu_to_le16(sar_table
.values
[idx
]);
1123 IWL_DEBUG_RADIO(mvm
, " Band[%d] = %d * .125dBm\n",
1124 j
, sar_table
.values
[idx
]);
1128 ret
= iwl_mvm_send_cmd_pdu(mvm
, REDUCE_TX_POWER_CMD
, 0, len
, &cmd
);
1130 IWL_ERR(mvm
, "failed to set per-chain TX power: %d\n", ret
);
1135 static int iwl_mvm_load_rt_fw(struct iwl_mvm
*mvm
)
1139 if (iwl_mvm_has_new_tx_api(mvm
))
1140 return iwl_run_unified_mvm_ucode(mvm
, false);
1142 ret
= iwl_run_init_mvm_ucode(mvm
, false);
1144 if (iwlmvm_mod_params
.init_dbg
)
1148 IWL_ERR(mvm
, "Failed to run INIT ucode: %d\n", ret
);
1149 /* this can't happen */
1150 if (WARN_ON(ret
> 0))
1156 * Stop and start the transport without entering low power
1157 * mode. This will save the state of other components on the
1158 * device that are triggered by the INIT firwmare (MFUART).
1160 _iwl_trans_stop_device(mvm
->trans
, false);
1161 ret
= _iwl_trans_start_hw(mvm
->trans
, false);
1165 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_REGULAR
);
1169 return iwl_mvm_init_paging(mvm
);
1172 int iwl_mvm_up(struct iwl_mvm
*mvm
)
1175 struct ieee80211_channel
*chan
;
1176 struct cfg80211_chan_def chandef
;
1178 lockdep_assert_held(&mvm
->mutex
);
1180 ret
= iwl_trans_start_hw(mvm
->trans
);
1184 ret
= iwl_mvm_load_rt_fw(mvm
);
1186 IWL_ERR(mvm
, "Failed to start RT ucode: %d\n", ret
);
1190 iwl_mvm_get_shared_mem_conf(mvm
);
1192 ret
= iwl_mvm_sf_update(mvm
, NULL
, false);
1194 IWL_ERR(mvm
, "Failed to initialize Smart Fifo\n");
1196 mvm
->fw_dbg_conf
= FW_DBG_INVALID
;
1197 /* if we have a destination, assume EARLY START */
1198 if (mvm
->fw
->dbg_dest_tlv
)
1199 mvm
->fw_dbg_conf
= FW_DBG_START_FROM_ALIVE
;
1200 iwl_mvm_start_fw_dbg_conf(mvm
, FW_DBG_START_FROM_ALIVE
);
1202 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
1206 ret
= iwl_send_bt_init_conf(mvm
);
1210 /* Send phy db control command and then phy db calibration*/
1211 if (!iwl_mvm_has_new_tx_api(mvm
)) {
1212 ret
= iwl_send_phy_db_data(mvm
->phy_db
);
1216 ret
= iwl_send_phy_cfg_cmd(mvm
);
1221 /* Init RSS configuration */
1222 if (iwl_mvm_has_new_rx_api(mvm
)) {
1223 ret
= iwl_send_rss_cfg_cmd(mvm
);
1225 IWL_ERR(mvm
, "Failed to configure RSS queues: %d\n",
1231 /* init the fw <-> mac80211 STA mapping */
1232 for (i
= 0; i
< IWL_MVM_STATION_COUNT
; i
++)
1233 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[i
], NULL
);
1235 mvm
->tdls_cs
.peer
.sta_id
= IWL_MVM_STATION_COUNT
;
1237 /* reset quota debouncing buffer - 0xff will yield invalid data */
1238 memset(&mvm
->last_quota_cmd
, 0xff, sizeof(mvm
->last_quota_cmd
));
1240 /* Enable DQA-mode if required */
1241 if (iwl_mvm_is_dqa_supported(mvm
)) {
1242 ret
= iwl_mvm_send_dqa_cmd(mvm
);
1246 IWL_DEBUG_FW(mvm
, "Working in non-DQA mode\n");
1249 /* Add auxiliary station for scanning */
1250 ret
= iwl_mvm_add_aux_sta(mvm
);
1254 /* Add all the PHY contexts */
1255 chan
= &mvm
->hw
->wiphy
->bands
[NL80211_BAND_2GHZ
]->channels
[0];
1256 cfg80211_chandef_create(&chandef
, chan
, NL80211_CHAN_NO_HT
);
1257 for (i
= 0; i
< NUM_PHY_CTX
; i
++) {
1259 * The channel used here isn't relevant as it's
1260 * going to be overwritten in the other flows.
1261 * For now use the first channel we have.
1263 ret
= iwl_mvm_phy_ctxt_add(mvm
, &mvm
->phy_ctxts
[i
],
1269 #ifdef CONFIG_THERMAL
1270 if (iwl_mvm_is_tt_in_fw(mvm
)) {
1271 /* in order to give the responsibility of ct-kill and
1272 * TX backoff to FW we need to send empty temperature reporting
1273 * cmd during init time
1275 iwl_mvm_send_temp_report_ths_cmd(mvm
);
1277 /* Initialize tx backoffs to the minimal possible */
1278 iwl_mvm_tt_tx_backoff(mvm
, 0);
1281 /* TODO: read the budget from BIOS / Platform NVM */
1282 if (iwl_mvm_is_ctdp_supported(mvm
) && mvm
->cooling_dev
.cur_state
> 0) {
1283 ret
= iwl_mvm_ctdp_command(mvm
, CTDP_CMD_OPERATION_START
,
1284 mvm
->cooling_dev
.cur_state
);
1289 /* Initialize tx backoffs to the minimal possible */
1290 iwl_mvm_tt_tx_backoff(mvm
, 0);
1293 WARN_ON(iwl_mvm_config_ltr(mvm
));
1295 ret
= iwl_mvm_power_update_device(mvm
);
1300 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1301 * anyway, so don't init MCC.
1303 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL
, &mvm
->status
)) {
1304 ret
= iwl_mvm_init_mcc(mvm
);
1309 if (fw_has_capa(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_CAPA_UMAC_SCAN
)) {
1310 mvm
->scan_type
= IWL_SCAN_TYPE_NOT_SET
;
1311 ret
= iwl_mvm_config_scan(mvm
);
1316 if (iwl_mvm_is_csum_supported(mvm
) &&
1317 mvm
->cfg
->features
& NETIF_F_RXCSUM
)
1318 iwl_trans_write_prph(mvm
->trans
, RX_EN_CSUM
, 0x3);
1320 /* allow FW/transport low power modes if not during restart */
1321 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
))
1322 iwl_mvm_unref(mvm
, IWL_MVM_REF_UCODE_DOWN
);
1324 ret
= iwl_mvm_sar_init(mvm
);
1328 IWL_DEBUG_INFO(mvm
, "RT uCode started.\n");
1331 iwl_mvm_stop_device(mvm
);
1335 int iwl_mvm_load_d3_fw(struct iwl_mvm
*mvm
)
1339 lockdep_assert_held(&mvm
->mutex
);
1341 ret
= iwl_trans_start_hw(mvm
->trans
);
1345 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_WOWLAN
);
1347 IWL_ERR(mvm
, "Failed to start WoWLAN firmware: %d\n", ret
);
1351 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
1355 /* Send phy db control command and then phy db calibration*/
1356 ret
= iwl_send_phy_db_data(mvm
->phy_db
);
1360 ret
= iwl_send_phy_cfg_cmd(mvm
);
1364 /* init the fw <-> mac80211 STA mapping */
1365 for (i
= 0; i
< IWL_MVM_STATION_COUNT
; i
++)
1366 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[i
], NULL
);
1368 /* Add auxiliary station for scanning */
1369 ret
= iwl_mvm_add_aux_sta(mvm
);
1375 iwl_mvm_stop_device(mvm
);
1379 void iwl_mvm_rx_card_state_notif(struct iwl_mvm
*mvm
,
1380 struct iwl_rx_cmd_buffer
*rxb
)
1382 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1383 struct iwl_card_state_notif
*card_state_notif
= (void *)pkt
->data
;
1384 u32 flags
= le32_to_cpu(card_state_notif
->flags
);
1386 IWL_DEBUG_RF_KILL(mvm
, "Card state received: HW:%s SW:%s CT:%s\n",
1387 (flags
& HW_CARD_DISABLED
) ? "Kill" : "On",
1388 (flags
& SW_CARD_DISABLED
) ? "Kill" : "On",
1389 (flags
& CT_KILL_CARD_DISABLED
) ?
1390 "Reached" : "Not reached");
1393 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm
*mvm
,
1394 struct iwl_rx_cmd_buffer
*rxb
)
1396 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1397 struct iwl_mfuart_load_notif
*mfuart_notif
= (void *)pkt
->data
;
1400 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1401 le32_to_cpu(mfuart_notif
->installed_ver
),
1402 le32_to_cpu(mfuart_notif
->external_ver
),
1403 le32_to_cpu(mfuart_notif
->status
),
1404 le32_to_cpu(mfuart_notif
->duration
));
1406 if (iwl_rx_packet_payload_len(pkt
) == sizeof(*mfuart_notif
))
1408 "MFUART: image size: 0x%08x\n",
1409 le32_to_cpu(mfuart_notif
->image_size
));