if_iwm - Check sc->sc_attached flag in suspend/resume callbacks.
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
blob5e24aa9612eac8f461236a6f2d9c38d893906823
1 /* $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $ */
3 /*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 /*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
27 ***********************************************************************
29 * This file is provided under a dual BSD/GPLv2 license. When using or
30 * redistributing this file, you may do so under either license.
32 * GPL LICENSE SUMMARY
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
53 * Contact Information:
54 * Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
58 * BSD LICENSE
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
67 * * Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * * Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in
71 * the documentation and/or other materials provided with the
72 * distribution.
73 * * Neither the name Intel Corporation nor the names of its
74 * contributors may be used to endorse or promote products derived
75 * from this software without specific prior written permission.
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
90 /*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
106 * DragonFly work
108 * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109 * changes to remove per-device network interface (DragonFly has not
110 * caught up to that yet on the WLAN side).
112 * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113 * malloc -> kmalloc (in particular, changing improper M_NOWAIT
114 * specifications to M_INTWAIT. We still don't
115 * understand why FreeBSD uses M_NOWAIT for
116 * critical must-not-fail kmalloc()s).
117 * free -> kfree
118 * printf -> kprintf
119 * (bug fix) memset in iwm_reset_rx_ring.
120 * (debug) added several kprintf()s on error
122 * header file paths (DFly allows localized path specifications).
123 * minor header file differences.
125 * Comprehensive list of adjustments for DragonFly #ifdef'd:
126 * (safety) added register read-back serialization in iwm_reset_rx_ring().
127 * packet counters
128 * msleep -> lksleep
129 * mtx -> lk (mtx functions -> lockmgr functions)
130 * callout differences
131 * taskqueue differences
132 * MSI differences
133 * bus_setup_intr() differences
134 * minor PCI config register naming differences
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/module.h>
147 #include <sys/rman.h>
148 #include <sys/sysctl.h>
149 #include <sys/linker.h>
151 #include <machine/endian.h>
153 #include <bus/pci/pcivar.h>
154 #include <bus/pci/pcireg.h>
156 #include <net/bpf.h>
158 #include <net/if.h>
159 #include <net/if_var.h>
160 #include <net/if_arp.h>
161 #include <net/if_dl.h>
162 #include <net/if_media.h>
163 #include <net/if_types.h>
165 #include <netinet/in.h>
166 #include <netinet/in_systm.h>
167 #include <netinet/if_ether.h>
168 #include <netinet/ip.h>
170 #include <netproto/802_11/ieee80211_var.h>
171 #include <netproto/802_11/ieee80211_regdomain.h>
172 #include <netproto/802_11/ieee80211_ratectl.h>
173 #include <netproto/802_11/ieee80211_radiotap.h>
175 #include "if_iwmreg.h"
176 #include "if_iwmvar.h"
177 #include "if_iwm_config.h"
178 #include "if_iwm_debug.h"
179 #include "if_iwm_notif_wait.h"
180 #include "if_iwm_util.h"
181 #include "if_iwm_binding.h"
182 #include "if_iwm_phy_db.h"
183 #include "if_iwm_mac_ctxt.h"
184 #include "if_iwm_phy_ctxt.h"
185 #include "if_iwm_time_event.h"
186 #include "if_iwm_power.h"
187 #include "if_iwm_scan.h"
188 #include "if_iwm_sf.h"
189 #include "if_iwm_sta.h"
190 #include "if_iwm_pcie_trans.h"
191 #include "if_iwm_led.h"
192 #include "if_iwm_fw.h"
194 const uint8_t iwm_nvm_channels[] = {
195 /* 2.4 GHz */
196 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
197 /* 5 GHz */
198 36, 40, 44, 48, 52, 56, 60, 64,
199 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
200 149, 153, 157, 161, 165
202 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
203 "IWM_NUM_CHANNELS is too small");
205 const uint8_t iwm_nvm_channels_8000[] = {
206 /* 2.4 GHz */
207 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
208 /* 5 GHz */
209 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
210 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
211 149, 153, 157, 161, 165, 169, 173, 177, 181
213 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
214 "IWM_NUM_CHANNELS_8000 is too small");
216 #define IWM_NUM_2GHZ_CHANNELS 14
217 #define IWM_N_HW_ADDR_MASK 0xF
220 * XXX For now, there's simply a fixed set of rate table entries
221 * that are populated.
223 const struct iwm_rate {
224 uint8_t rate;
225 uint8_t plcp;
226 } iwm_rates[] = {
227 { 2, IWM_RATE_1M_PLCP },
228 { 4, IWM_RATE_2M_PLCP },
229 { 11, IWM_RATE_5M_PLCP },
230 { 22, IWM_RATE_11M_PLCP },
231 { 12, IWM_RATE_6M_PLCP },
232 { 18, IWM_RATE_9M_PLCP },
233 { 24, IWM_RATE_12M_PLCP },
234 { 36, IWM_RATE_18M_PLCP },
235 { 48, IWM_RATE_24M_PLCP },
236 { 72, IWM_RATE_36M_PLCP },
237 { 96, IWM_RATE_48M_PLCP },
238 { 108, IWM_RATE_54M_PLCP },
240 #define IWM_RIDX_CCK 0
241 #define IWM_RIDX_OFDM 4
242 #define IWM_RIDX_MAX (nitems(iwm_rates)-1)
243 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
244 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
246 struct iwm_nvm_section {
247 uint16_t length;
248 uint8_t *data;
251 #define IWM_MVM_UCODE_ALIVE_TIMEOUT hz
252 #define IWM_MVM_UCODE_CALIB_TIMEOUT (2*hz)
254 struct iwm_mvm_alive_data {
255 int valid;
256 uint32_t scd_base_addr;
259 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
260 static int iwm_firmware_store_section(struct iwm_softc *,
261 enum iwm_ucode_type,
262 const uint8_t *, size_t);
263 static int iwm_set_default_calib(struct iwm_softc *, const void *);
264 static void iwm_fw_info_free(struct iwm_fw_info *);
265 static int iwm_read_firmware(struct iwm_softc *);
266 #if !defined(__DragonFly__)
267 static void iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
268 #endif
269 static int iwm_alloc_fwmem(struct iwm_softc *);
270 static int iwm_alloc_sched(struct iwm_softc *);
271 static int iwm_alloc_kw(struct iwm_softc *);
272 static int iwm_alloc_ict(struct iwm_softc *);
273 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
276 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
277 int);
278 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
279 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
280 static void iwm_enable_interrupts(struct iwm_softc *);
281 static void iwm_restore_interrupts(struct iwm_softc *);
282 static void iwm_disable_interrupts(struct iwm_softc *);
283 static void iwm_ict_reset(struct iwm_softc *);
284 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
285 static void iwm_stop_device(struct iwm_softc *);
286 static void iwm_mvm_nic_config(struct iwm_softc *);
287 static int iwm_nic_rx_init(struct iwm_softc *);
288 static int iwm_nic_tx_init(struct iwm_softc *);
289 static int iwm_nic_init(struct iwm_softc *);
290 static int iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
291 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
292 uint16_t, uint8_t *, uint16_t *);
293 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
294 uint16_t *, uint32_t);
295 static uint32_t iwm_eeprom_channel_flags(uint16_t);
296 static void iwm_add_channel_band(struct iwm_softc *,
297 struct ieee80211_channel[], int, int *, int, size_t,
298 const uint8_t[]);
299 static void iwm_init_channel_map(struct ieee80211com *, int, int *,
300 struct ieee80211_channel[]);
301 static struct iwm_nvm_data *
302 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
303 const uint16_t *, const uint16_t *,
304 const uint16_t *, const uint16_t *,
305 const uint16_t *);
306 static void iwm_free_nvm_data(struct iwm_nvm_data *);
307 static void iwm_set_hw_address_family_8000(struct iwm_softc *,
308 struct iwm_nvm_data *,
309 const uint16_t *,
310 const uint16_t *);
311 static int iwm_get_sku(const struct iwm_softc *, const uint16_t *,
312 const uint16_t *);
313 static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
314 static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
315 const uint16_t *);
316 static int iwm_get_n_hw_addrs(const struct iwm_softc *,
317 const uint16_t *);
318 static void iwm_set_radio_cfg(const struct iwm_softc *,
319 struct iwm_nvm_data *, uint32_t);
320 static struct iwm_nvm_data *
321 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
322 static int iwm_nvm_init(struct iwm_softc *);
323 static int iwm_pcie_load_section(struct iwm_softc *, uint8_t,
324 const struct iwm_fw_desc *);
325 static int iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
326 bus_addr_t, uint32_t);
327 static int iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
328 const struct iwm_fw_sects *,
329 int, int *);
330 static int iwm_pcie_load_cpu_sections(struct iwm_softc *,
331 const struct iwm_fw_sects *,
332 int, int *);
333 static int iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
334 const struct iwm_fw_sects *);
335 static int iwm_pcie_load_given_ucode(struct iwm_softc *,
336 const struct iwm_fw_sects *);
337 static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
338 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
339 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
340 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
341 enum iwm_ucode_type);
342 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
343 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
344 static int iwm_mvm_get_signal_strength(struct iwm_softc *,
345 struct iwm_rx_phy_info *);
346 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
347 struct iwm_rx_packet *);
348 static int iwm_get_noise(struct iwm_softc *,
349 const struct iwm_mvm_statistics_rx_non_phy *);
350 static void iwm_mvm_handle_rx_statistics(struct iwm_softc *,
351 struct iwm_rx_packet *);
352 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
353 uint32_t, boolean_t);
354 static int iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
355 struct iwm_rx_packet *,
356 struct iwm_node *);
357 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
358 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
359 #if 0
360 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
361 uint16_t);
362 #endif
363 static uint8_t iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
364 struct mbuf *, struct iwm_tx_cmd *);
365 static int iwm_tx(struct iwm_softc *, struct mbuf *,
366 struct ieee80211_node *, int);
367 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
368 const struct ieee80211_bpf_params *);
369 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
370 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
371 static int iwm_release(struct iwm_softc *, struct iwm_node *);
372 static struct ieee80211_node *
373 iwm_node_alloc(struct ieee80211vap *,
374 const uint8_t[IEEE80211_ADDR_LEN]);
375 static uint8_t iwm_rate_from_ucode_rate(uint32_t);
376 static int iwm_rate2ridx(struct iwm_softc *, uint8_t);
377 static void iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
378 static int iwm_media_change(struct ifnet *);
379 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
380 static void iwm_endscan_cb(void *, int);
381 static int iwm_send_bt_init_conf(struct iwm_softc *);
382 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
383 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
384 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
385 static void iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
386 static int iwm_init_hw(struct iwm_softc *);
387 static void iwm_init(struct iwm_softc *);
388 static void iwm_start(struct iwm_softc *);
389 static void iwm_stop(struct iwm_softc *);
390 static void iwm_watchdog(void *);
391 static void iwm_parent(struct ieee80211com *);
392 #ifdef IWM_DEBUG
393 static const char *
394 iwm_desc_lookup(uint32_t);
395 static void iwm_nic_error(struct iwm_softc *);
396 static void iwm_nic_umac_error(struct iwm_softc *);
397 #endif
398 static void iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
399 static void iwm_notif_intr(struct iwm_softc *);
400 static void iwm_intr(void *);
401 static int iwm_attach(device_t);
402 static int iwm_is_valid_ether_addr(uint8_t *);
403 static void iwm_preinit(void *);
404 static int iwm_detach_local(struct iwm_softc *sc, int);
405 static void iwm_init_task(void *);
406 static void iwm_radiotap_attach(struct iwm_softc *);
407 static struct ieee80211vap *
408 iwm_vap_create(struct ieee80211com *,
409 const char [IFNAMSIZ], int,
410 enum ieee80211_opmode, int,
411 const uint8_t [IEEE80211_ADDR_LEN],
412 const uint8_t [IEEE80211_ADDR_LEN]);
413 static void iwm_vap_delete(struct ieee80211vap *);
414 static void iwm_xmit_queue_drain(struct iwm_softc *);
415 static void iwm_scan_start(struct ieee80211com *);
416 static void iwm_scan_end(struct ieee80211com *);
417 static void iwm_update_mcast(struct ieee80211com *);
418 static void iwm_set_channel(struct ieee80211com *);
419 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
420 static void iwm_scan_mindwell(struct ieee80211_scan_state *);
421 static int iwm_detach(device_t);
423 #if defined(__DragonFly__)
424 static int iwm_msi_enable = 1;
426 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
427 #endif
429 static int iwm_lar_disable = 0;
430 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
433 * Firmware parser.
436 static int
437 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
439 const struct iwm_fw_cscheme_list *l = (const void *)data;
441 if (dlen < sizeof(*l) ||
442 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
443 return EINVAL;
445 /* we don't actually store anything for now, always use s/w crypto */
447 return 0;
450 static int
451 iwm_firmware_store_section(struct iwm_softc *sc,
452 enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
454 struct iwm_fw_sects *fws;
455 struct iwm_fw_desc *fwone;
457 if (type >= IWM_UCODE_TYPE_MAX)
458 return EINVAL;
459 if (dlen < sizeof(uint32_t))
460 return EINVAL;
462 fws = &sc->sc_fw.fw_sects[type];
463 if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
464 return EINVAL;
466 fwone = &fws->fw_sect[fws->fw_count];
468 /* first 32bit are device load offset */
469 memcpy(&fwone->offset, data, sizeof(uint32_t));
471 /* rest is data */
472 fwone->data = data + sizeof(uint32_t);
473 fwone->len = dlen - sizeof(uint32_t);
475 fws->fw_count++;
477 return 0;
480 #define IWM_DEFAULT_SCAN_CHANNELS 40
482 struct iwm_tlv_calib_data {
483 uint32_t ucode_type;
484 struct iwm_tlv_calib_ctrl calib;
485 } __packed;
487 static int
488 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
490 const struct iwm_tlv_calib_data *def_calib = data;
491 uint32_t ucode_type = le32toh(def_calib->ucode_type);
493 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
494 device_printf(sc->sc_dev,
495 "Wrong ucode_type %u for default "
496 "calibration.\n", ucode_type);
497 return EINVAL;
500 sc->sc_default_calib[ucode_type].flow_trigger =
501 def_calib->calib.flow_trigger;
502 sc->sc_default_calib[ucode_type].event_trigger =
503 def_calib->calib.event_trigger;
505 return 0;
508 static int
509 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
510 struct iwm_ucode_capabilities *capa)
512 const struct iwm_ucode_api *ucode_api = (const void *)data;
513 uint32_t api_index = le32toh(ucode_api->api_index);
514 uint32_t api_flags = le32toh(ucode_api->api_flags);
515 int i;
517 if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
518 device_printf(sc->sc_dev,
519 "api flags index %d larger than supported by driver\n",
520 api_index);
521 /* don't return an error so we can load FW that has more bits */
522 return 0;
525 for (i = 0; i < 32; i++) {
526 if (api_flags & (1U << i))
527 setbit(capa->enabled_api, i + 32 * api_index);
530 return 0;
533 static int
534 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
535 struct iwm_ucode_capabilities *capa)
537 const struct iwm_ucode_capa *ucode_capa = (const void *)data;
538 uint32_t api_index = le32toh(ucode_capa->api_index);
539 uint32_t api_flags = le32toh(ucode_capa->api_capa);
540 int i;
542 if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
543 device_printf(sc->sc_dev,
544 "capa flags index %d larger than supported by driver\n",
545 api_index);
546 /* don't return an error so we can load FW that has more bits */
547 return 0;
550 for (i = 0; i < 32; i++) {
551 if (api_flags & (1U << i))
552 setbit(capa->enabled_capa, i + 32 * api_index);
555 return 0;
558 static void
559 iwm_fw_info_free(struct iwm_fw_info *fw)
561 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
562 fw->fw_fp = NULL;
563 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
566 static int
567 iwm_read_firmware(struct iwm_softc *sc)
569 struct iwm_fw_info *fw = &sc->sc_fw;
570 const struct iwm_tlv_ucode_header *uhdr;
571 const struct iwm_ucode_tlv *tlv;
572 struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
573 enum iwm_ucode_tlv_type tlv_type;
574 const struct firmware *fwp;
575 const uint8_t *data;
576 uint32_t tlv_len;
577 uint32_t usniffer_img;
578 const uint8_t *tlv_data;
579 uint32_t paging_mem_size;
580 int num_of_cpus;
581 int error = 0;
582 size_t len;
585 * Load firmware into driver memory.
586 * fw_fp will be set.
588 fwp = firmware_get(sc->cfg->fw_name);
589 if (fwp == NULL) {
590 device_printf(sc->sc_dev,
591 "could not read firmware %s (error %d)\n",
592 sc->cfg->fw_name, error);
593 goto out;
595 fw->fw_fp = fwp;
597 /* (Re-)Initialize default values. */
598 capa->flags = 0;
599 capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
600 capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
601 memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
602 memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
603 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
606 * Parse firmware contents
609 uhdr = (const void *)fw->fw_fp->data;
610 if (*(const uint32_t *)fw->fw_fp->data != 0
611 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
612 device_printf(sc->sc_dev, "invalid firmware %s\n",
613 sc->cfg->fw_name);
614 error = EINVAL;
615 goto out;
618 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
619 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
620 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
621 IWM_UCODE_API(le32toh(uhdr->ver)));
622 data = uhdr->data;
623 len = fw->fw_fp->datasize - sizeof(*uhdr);
625 while (len >= sizeof(*tlv)) {
626 len -= sizeof(*tlv);
627 tlv = (const void *)data;
629 tlv_len = le32toh(tlv->length);
630 tlv_type = le32toh(tlv->type);
631 tlv_data = tlv->data;
633 if (len < tlv_len) {
634 device_printf(sc->sc_dev,
635 "firmware too short: %zu bytes\n",
636 len);
637 error = EINVAL;
638 goto parse_out;
640 len -= roundup2(tlv_len, 4);
641 data += sizeof(tlv) + roundup2(tlv_len, 4);
643 switch ((int)tlv_type) {
644 case IWM_UCODE_TLV_PROBE_MAX_LEN:
645 if (tlv_len != sizeof(uint32_t)) {
646 device_printf(sc->sc_dev,
647 "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
648 __func__, tlv_len);
649 error = EINVAL;
650 goto parse_out;
652 capa->max_probe_length =
653 le32_to_cpup((const uint32_t *)tlv_data);
654 /* limit it to something sensible */
655 if (capa->max_probe_length >
656 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
657 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
658 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
659 "ridiculous\n", __func__);
660 error = EINVAL;
661 goto parse_out;
663 break;
664 case IWM_UCODE_TLV_PAN:
665 if (tlv_len) {
666 device_printf(sc->sc_dev,
667 "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
668 __func__, tlv_len);
669 error = EINVAL;
670 goto parse_out;
672 capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
673 break;
674 case IWM_UCODE_TLV_FLAGS:
675 if (tlv_len < sizeof(uint32_t)) {
676 device_printf(sc->sc_dev,
677 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
678 __func__, tlv_len);
679 error = EINVAL;
680 goto parse_out;
682 if (tlv_len % sizeof(uint32_t)) {
683 device_printf(sc->sc_dev,
684 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
685 __func__, tlv_len);
686 error = EINVAL;
687 goto parse_out;
690 * Apparently there can be many flags, but Linux driver
691 * parses only the first one, and so do we.
693 * XXX: why does this override IWM_UCODE_TLV_PAN?
694 * Intentional or a bug? Observations from
695 * current firmware file:
696 * 1) TLV_PAN is parsed first
697 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
698 * ==> this resets TLV_PAN to itself... hnnnk
700 capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
701 break;
702 case IWM_UCODE_TLV_CSCHEME:
703 if ((error = iwm_store_cscheme(sc,
704 tlv_data, tlv_len)) != 0) {
705 device_printf(sc->sc_dev,
706 "%s: iwm_store_cscheme(): returned %d\n",
707 __func__, error);
708 goto parse_out;
710 break;
711 case IWM_UCODE_TLV_NUM_OF_CPU:
712 if (tlv_len != sizeof(uint32_t)) {
713 device_printf(sc->sc_dev,
714 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
715 __func__, tlv_len);
716 error = EINVAL;
717 goto parse_out;
719 num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
720 if (num_of_cpus == 2) {
721 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
722 TRUE;
723 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
724 TRUE;
725 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
726 TRUE;
727 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
728 device_printf(sc->sc_dev,
729 "%s: Driver supports only 1 or 2 CPUs\n",
730 __func__);
731 error = EINVAL;
732 goto parse_out;
734 break;
735 case IWM_UCODE_TLV_SEC_RT:
736 if ((error = iwm_firmware_store_section(sc,
737 IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
738 device_printf(sc->sc_dev,
739 "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
740 __func__, error);
741 goto parse_out;
743 break;
744 case IWM_UCODE_TLV_SEC_INIT:
745 if ((error = iwm_firmware_store_section(sc,
746 IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
747 device_printf(sc->sc_dev,
748 "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
749 __func__, error);
750 goto parse_out;
752 break;
753 case IWM_UCODE_TLV_SEC_WOWLAN:
754 if ((error = iwm_firmware_store_section(sc,
755 IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
756 device_printf(sc->sc_dev,
757 "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
758 __func__, error);
759 goto parse_out;
761 break;
762 case IWM_UCODE_TLV_DEF_CALIB:
763 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
764 device_printf(sc->sc_dev,
765 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
766 __func__, tlv_len,
767 sizeof(struct iwm_tlv_calib_data));
768 error = EINVAL;
769 goto parse_out;
771 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
772 device_printf(sc->sc_dev,
773 "%s: iwm_set_default_calib() failed: %d\n",
774 __func__, error);
775 goto parse_out;
777 break;
778 case IWM_UCODE_TLV_PHY_SKU:
779 if (tlv_len != sizeof(uint32_t)) {
780 error = EINVAL;
781 device_printf(sc->sc_dev,
782 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
783 __func__, tlv_len);
784 goto parse_out;
786 sc->sc_fw.phy_config =
787 le32_to_cpup((const uint32_t *)tlv_data);
788 sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
789 IWM_FW_PHY_CFG_TX_CHAIN) >>
790 IWM_FW_PHY_CFG_TX_CHAIN_POS;
791 sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
792 IWM_FW_PHY_CFG_RX_CHAIN) >>
793 IWM_FW_PHY_CFG_RX_CHAIN_POS;
794 break;
796 case IWM_UCODE_TLV_API_CHANGES_SET: {
797 if (tlv_len != sizeof(struct iwm_ucode_api)) {
798 error = EINVAL;
799 goto parse_out;
801 if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
802 error = EINVAL;
803 goto parse_out;
805 break;
808 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
809 if (tlv_len != sizeof(struct iwm_ucode_capa)) {
810 error = EINVAL;
811 goto parse_out;
813 if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
814 error = EINVAL;
815 goto parse_out;
817 break;
820 case 48: /* undocumented TLV */
821 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
822 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
823 /* ignore, not used by current driver */
824 break;
826 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
827 if ((error = iwm_firmware_store_section(sc,
828 IWM_UCODE_REGULAR_USNIFFER, tlv_data,
829 tlv_len)) != 0)
830 goto parse_out;
831 break;
833 case IWM_UCODE_TLV_PAGING:
834 if (tlv_len != sizeof(uint32_t)) {
835 error = EINVAL;
836 goto parse_out;
838 paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
840 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
841 "%s: Paging: paging enabled (size = %u bytes)\n",
842 __func__, paging_mem_size);
843 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
844 device_printf(sc->sc_dev,
845 "%s: Paging: driver supports up to %u bytes for paging image\n",
846 __func__, IWM_MAX_PAGING_IMAGE_SIZE);
847 error = EINVAL;
848 goto out;
850 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
851 device_printf(sc->sc_dev,
852 "%s: Paging: image isn't multiple %u\n",
853 __func__, IWM_FW_PAGING_SIZE);
854 error = EINVAL;
855 goto out;
858 sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
859 paging_mem_size;
860 usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
861 sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
862 paging_mem_size;
863 break;
865 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
866 if (tlv_len != sizeof(uint32_t)) {
867 error = EINVAL;
868 goto parse_out;
870 capa->n_scan_channels =
871 le32_to_cpup((const uint32_t *)tlv_data);
872 break;
874 case IWM_UCODE_TLV_FW_VERSION:
875 if (tlv_len != sizeof(uint32_t) * 3) {
876 error = EINVAL;
877 goto parse_out;
879 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
880 "%d.%d.%d",
881 le32toh(((const uint32_t *)tlv_data)[0]),
882 le32toh(((const uint32_t *)tlv_data)[1]),
883 le32toh(((const uint32_t *)tlv_data)[2]));
884 break;
886 case IWM_UCODE_TLV_FW_MEM_SEG:
887 break;
889 default:
890 device_printf(sc->sc_dev,
891 "%s: unknown firmware section %d, abort\n",
892 __func__, tlv_type);
893 error = EINVAL;
894 goto parse_out;
898 KASSERT(error == 0, ("unhandled error"));
900 parse_out:
901 if (error) {
902 device_printf(sc->sc_dev, "firmware parse error %d, "
903 "section type %d\n", error, tlv_type);
906 out:
907 if (error) {
908 if (fw->fw_fp != NULL)
909 iwm_fw_info_free(fw);
912 return error;
916 * DMA resource routines
919 /* fwmem is used to load firmware onto the card */
920 static int
921 iwm_alloc_fwmem(struct iwm_softc *sc)
923 /* Must be aligned on a 16-byte boundary. */
924 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
925 IWM_FH_MEM_TB_MAX_LENGTH, 16);
928 /* tx scheduler rings. not used? */
929 static int
930 iwm_alloc_sched(struct iwm_softc *sc)
932 /* TX scheduler rings must be aligned on a 1KB boundary. */
933 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
934 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
937 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
938 static int
939 iwm_alloc_kw(struct iwm_softc *sc)
941 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
944 /* interrupt cause table */
945 static int
946 iwm_alloc_ict(struct iwm_softc *sc)
948 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
949 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
952 static int
953 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
955 bus_size_t size;
956 int i, error;
958 ring->cur = 0;
960 /* Allocate RX descriptors (256-byte aligned). */
961 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
962 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
963 if (error != 0) {
964 device_printf(sc->sc_dev,
965 "could not allocate RX ring DMA memory\n");
966 goto fail;
968 ring->desc = ring->desc_dma.vaddr;
970 /* Allocate RX status area (16-byte aligned). */
971 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
972 sizeof(*ring->stat), 16);
973 if (error != 0) {
974 device_printf(sc->sc_dev,
975 "could not allocate RX status DMA memory\n");
976 goto fail;
978 ring->stat = ring->stat_dma.vaddr;
980 /* Create RX buffer DMA tag. */
981 #if defined(__DragonFly__)
982 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
984 BUS_SPACE_MAXADDR_32BIT,
985 BUS_SPACE_MAXADDR,
986 NULL, NULL,
987 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
988 BUS_DMA_NOWAIT, &ring->data_dmat);
989 #else
990 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
991 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
992 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
993 #endif
994 if (error != 0) {
995 device_printf(sc->sc_dev,
996 "%s: could not create RX buf DMA tag, error %d\n",
997 __func__, error);
998 goto fail;
1001 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1002 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1003 if (error != 0) {
1004 device_printf(sc->sc_dev,
1005 "%s: could not create RX buf DMA map, error %d\n",
1006 __func__, error);
1007 goto fail;
1010 * Allocate and map RX buffers.
1012 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1013 struct iwm_rx_data *data = &ring->data[i];
1014 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1015 if (error != 0) {
1016 device_printf(sc->sc_dev,
1017 "%s: could not create RX buf DMA map, error %d\n",
1018 __func__, error);
1019 goto fail;
1021 data->m = NULL;
1023 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1024 goto fail;
1027 return 0;
1029 fail: iwm_free_rx_ring(sc, ring);
1030 return error;
1033 static void
1034 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1036 /* Reset the ring state */
1037 ring->cur = 0;
1040 * The hw rx ring index in shared memory must also be cleared,
1041 * otherwise the discrepancy can cause reprocessing chaos.
1043 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1046 static void
1047 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1049 int i;
1051 iwm_dma_contig_free(&ring->desc_dma);
1052 iwm_dma_contig_free(&ring->stat_dma);
1054 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1055 struct iwm_rx_data *data = &ring->data[i];
1057 if (data->m != NULL) {
1058 bus_dmamap_sync(ring->data_dmat, data->map,
1059 BUS_DMASYNC_POSTREAD);
1060 bus_dmamap_unload(ring->data_dmat, data->map);
1061 m_freem(data->m);
1062 data->m = NULL;
1064 if (data->map != NULL) {
1065 bus_dmamap_destroy(ring->data_dmat, data->map);
1066 data->map = NULL;
1069 if (ring->spare_map != NULL) {
1070 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1071 ring->spare_map = NULL;
1073 if (ring->data_dmat != NULL) {
1074 bus_dma_tag_destroy(ring->data_dmat);
1075 ring->data_dmat = NULL;
1079 static int
1080 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1082 bus_addr_t paddr;
1083 bus_size_t size;
1084 size_t maxsize;
1085 int nsegments;
1086 int i, error;
1088 ring->qid = qid;
1089 ring->queued = 0;
1090 ring->cur = 0;
1092 /* Allocate TX descriptors (256-byte aligned). */
1093 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1094 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1095 if (error != 0) {
1096 device_printf(sc->sc_dev,
1097 "could not allocate TX ring DMA memory\n");
1098 goto fail;
1100 ring->desc = ring->desc_dma.vaddr;
1103 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1104 * to allocate commands space for other rings.
1106 if (qid > IWM_MVM_CMD_QUEUE)
1107 return 0;
1109 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1110 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1111 if (error != 0) {
1112 device_printf(sc->sc_dev,
1113 "could not allocate TX cmd DMA memory\n");
1114 goto fail;
1116 ring->cmd = ring->cmd_dma.vaddr;
1118 /* FW commands may require more mapped space than packets. */
1119 if (qid == IWM_MVM_CMD_QUEUE) {
1120 maxsize = IWM_RBUF_SIZE;
1121 nsegments = 1;
1122 } else {
1123 maxsize = MCLBYTES;
1124 nsegments = IWM_MAX_SCATTER - 2;
1127 #if defined(__DragonFly__)
1128 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1130 BUS_SPACE_MAXADDR_32BIT,
1131 BUS_SPACE_MAXADDR,
1132 NULL, NULL,
1133 maxsize, nsegments, maxsize,
1134 BUS_DMA_NOWAIT, &ring->data_dmat);
1135 #else
1136 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1137 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1138 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1139 #endif
1140 if (error != 0) {
1141 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1142 goto fail;
1145 paddr = ring->cmd_dma.paddr;
1146 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1147 struct iwm_tx_data *data = &ring->data[i];
1149 data->cmd_paddr = paddr;
1150 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1151 + offsetof(struct iwm_tx_cmd, scratch);
1152 paddr += sizeof(struct iwm_device_cmd);
1154 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1155 if (error != 0) {
1156 device_printf(sc->sc_dev,
1157 "could not create TX buf DMA map\n");
1158 goto fail;
1161 KASSERT(paddr == ring->cmd_dma.paddr + size,
1162 ("invalid physical address"));
1163 return 0;
1165 fail: iwm_free_tx_ring(sc, ring);
1166 return error;
1169 static void
1170 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1172 int i;
1174 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1175 struct iwm_tx_data *data = &ring->data[i];
1177 if (data->m != NULL) {
1178 bus_dmamap_sync(ring->data_dmat, data->map,
1179 BUS_DMASYNC_POSTWRITE);
1180 bus_dmamap_unload(ring->data_dmat, data->map);
1181 m_freem(data->m);
1182 data->m = NULL;
1185 /* Clear TX descriptors. */
1186 memset(ring->desc, 0, ring->desc_dma.size);
1187 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1188 BUS_DMASYNC_PREWRITE);
1189 sc->qfullmsk &= ~(1 << ring->qid);
1190 ring->queued = 0;
1191 ring->cur = 0;
1193 if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1194 iwm_pcie_clear_cmd_in_flight(sc);
1197 static void
1198 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1200 int i;
1202 iwm_dma_contig_free(&ring->desc_dma);
1203 iwm_dma_contig_free(&ring->cmd_dma);
1205 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1206 struct iwm_tx_data *data = &ring->data[i];
1208 if (data->m != NULL) {
1209 bus_dmamap_sync(ring->data_dmat, data->map,
1210 BUS_DMASYNC_POSTWRITE);
1211 bus_dmamap_unload(ring->data_dmat, data->map);
1212 m_freem(data->m);
1213 data->m = NULL;
1215 if (data->map != NULL) {
1216 bus_dmamap_destroy(ring->data_dmat, data->map);
1217 data->map = NULL;
1220 if (ring->data_dmat != NULL) {
1221 bus_dma_tag_destroy(ring->data_dmat);
1222 ring->data_dmat = NULL;
1227 * High-level hardware frobbing routines
1230 static void
1231 iwm_enable_interrupts(struct iwm_softc *sc)
1233 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1234 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1237 static void
1238 iwm_restore_interrupts(struct iwm_softc *sc)
1240 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1243 static void
1244 iwm_disable_interrupts(struct iwm_softc *sc)
1246 /* disable interrupts */
1247 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1249 /* acknowledge all interrupts */
1250 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1251 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1254 static void
1255 iwm_ict_reset(struct iwm_softc *sc)
1257 iwm_disable_interrupts(sc);
1259 /* Reset ICT table. */
1260 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1261 sc->ict_cur = 0;
1263 /* Set physical address of ICT table (4KB aligned). */
1264 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1265 IWM_CSR_DRAM_INT_TBL_ENABLE
1266 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1267 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1268 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1270 /* Switch to ICT interrupt mode in driver. */
1271 sc->sc_flags |= IWM_FLAG_USE_ICT;
1273 /* Re-enable interrupts. */
1274 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1275 iwm_enable_interrupts(sc);
1279 * Since this .. hard-resets things, it's time to actually
1280 * mark the first vap (if any) as having no mac context.
1281 * It's annoying, but since the driver is potentially being
1282 * stop/start'ed whilst active (thanks openbsd port!) we
1283 * have to correctly track this.
1285 static void
1286 iwm_stop_device(struct iwm_softc *sc)
1288 struct ieee80211com *ic = &sc->sc_ic;
1289 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1290 int chnl, qid;
1291 uint32_t mask = 0;
1293 /* tell the device to stop sending interrupts */
1294 iwm_disable_interrupts(sc);
1297 * FreeBSD-local: mark the first vap as not-uploaded,
1298 * so the next transition through auth/assoc
1299 * will correctly populate the MAC context.
1301 if (vap) {
1302 struct iwm_vap *iv = IWM_VAP(vap);
1303 iv->phy_ctxt = NULL;
1304 iv->is_uploaded = 0;
1307 /* device going down, Stop using ICT table */
1308 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1310 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1312 if (iwm_nic_lock(sc)) {
1313 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1315 /* Stop each Tx DMA channel */
1316 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1317 IWM_WRITE(sc,
1318 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1319 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1322 /* Wait for DMA channels to be idle */
1323 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1324 5000)) {
1325 device_printf(sc->sc_dev,
1326 "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1327 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1329 iwm_nic_unlock(sc);
1331 iwm_pcie_rx_stop(sc);
1333 /* Stop RX ring. */
1334 iwm_reset_rx_ring(sc, &sc->rxq);
1336 /* Reset all TX rings. */
1337 for (qid = 0; qid < nitems(sc->txq); qid++)
1338 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1340 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1341 /* Power-down device's busmaster DMA clocks */
1342 if (iwm_nic_lock(sc)) {
1343 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1344 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1345 iwm_nic_unlock(sc);
1347 DELAY(5);
1350 /* Make sure (redundant) we've released our request to stay awake */
1351 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1352 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1354 /* Stop the device, and put it in low power state */
1355 iwm_apm_stop(sc);
1357 /* stop and reset the on-board processor */
1358 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1359 DELAY(1000);
1362 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1363 * This is a bug in certain verions of the hardware.
1364 * Certain devices also keep sending HW RF kill interrupt all
1365 * the time, unless the interrupt is ACKed even if the interrupt
1366 * should be masked. Re-ACK all the interrupts here.
1368 iwm_disable_interrupts(sc);
1371 * Even if we stop the HW, we still want the RF kill
1372 * interrupt
1374 iwm_enable_rfkill_int(sc);
1375 iwm_check_rfkill(sc);
1378 static void
1379 iwm_mvm_nic_config(struct iwm_softc *sc)
1381 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1382 uint32_t reg_val = 0;
1383 uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1385 radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1386 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1387 radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1388 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1389 radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1390 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1392 /* SKU control */
1393 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1394 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1395 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1396 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1398 /* radio configuration */
1399 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1400 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1401 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1403 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1405 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1406 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1407 radio_cfg_step, radio_cfg_dash);
1410 * W/A : NIC is stuck in a reset state after Early PCIe power off
1411 * (PCIe power is lost before PERST# is asserted), causing ME FW
1412 * to lose ownership and not being able to obtain it back.
1414 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1415 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1416 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1417 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1421 static int
1422 iwm_nic_rx_init(struct iwm_softc *sc)
1425 * Initialize RX ring. This is from the iwn driver.
1427 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1429 /* Stop Rx DMA */
1430 iwm_pcie_rx_stop(sc);
1432 if (!iwm_nic_lock(sc))
1433 return EBUSY;
1435 /* reset and flush pointers */
1436 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1437 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1438 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1439 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1441 /* Set physical address of RX ring (256-byte aligned). */
1442 IWM_WRITE(sc,
1443 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1445 /* Set physical address of RX status (16-byte aligned). */
1446 IWM_WRITE(sc,
1447 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1449 #if defined(__DragonFly__)
1450 /* Force serialization (probably not needed but don't trust the HW) */
1451 IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1452 #endif
1454 /* Enable Rx DMA
1455 * XXX 5000 HW isn't supported by the iwm(4) driver.
1456 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1457 * the credit mechanism in 5000 HW RX FIFO
1458 * Direct rx interrupts to hosts
1459 * Rx buffer size 4 or 8k or 12k
1460 * RB timeout 0x10
1461 * 256 RBDs
1463 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1464 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1465 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1466 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1467 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1468 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1469 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1471 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1473 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1474 if (sc->cfg->host_interrupt_operation_mode)
1475 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1478 * Thus sayeth el jefe (iwlwifi) via a comment:
1480 * This value should initially be 0 (before preparing any
1481 * RBs), should be 8 after preparing the first 8 RBs (for example)
1483 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1485 iwm_nic_unlock(sc);
1487 return 0;
1490 static int
1491 iwm_nic_tx_init(struct iwm_softc *sc)
1493 int qid;
1495 if (!iwm_nic_lock(sc))
1496 return EBUSY;
1498 /* Deactivate TX scheduler. */
1499 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1501 /* Set physical address of "keep warm" page (16-byte aligned). */
1502 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1504 /* Initialize TX rings. */
1505 for (qid = 0; qid < nitems(sc->txq); qid++) {
1506 struct iwm_tx_ring *txq = &sc->txq[qid];
1508 /* Set physical address of TX ring (256-byte aligned). */
1509 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1510 txq->desc_dma.paddr >> 8);
1511 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1512 "%s: loading ring %d descriptors (%p) at %lx\n",
1513 __func__,
1514 qid, txq->desc,
1515 (unsigned long) (txq->desc_dma.paddr >> 8));
1518 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1520 iwm_nic_unlock(sc);
1522 return 0;
1525 static int
1526 iwm_nic_init(struct iwm_softc *sc)
1528 int error;
1530 iwm_apm_init(sc);
1531 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1532 iwm_set_pwr(sc);
1534 iwm_mvm_nic_config(sc);
1536 if ((error = iwm_nic_rx_init(sc)) != 0)
1537 return error;
1540 * Ditto for TX, from iwn
1542 if ((error = iwm_nic_tx_init(sc)) != 0)
1543 return error;
1545 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1546 "%s: shadow registers enabled\n", __func__);
1547 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1549 return 0;
1553 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1555 if (!iwm_nic_lock(sc)) {
1556 device_printf(sc->sc_dev,
1557 "%s: cannot enable txq %d\n",
1558 __func__,
1559 qid);
1560 return EBUSY;
1563 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1565 if (qid == IWM_MVM_CMD_QUEUE) {
1566 /* unactivate before configuration */
1567 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1568 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1569 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1571 iwm_nic_unlock(sc);
1573 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1575 if (!iwm_nic_lock(sc)) {
1576 device_printf(sc->sc_dev,
1577 "%s: cannot enable txq %d\n", __func__, qid);
1578 return EBUSY;
1580 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1581 iwm_nic_unlock(sc);
1583 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1584 /* Set scheduler window size and frame limit. */
1585 iwm_write_mem32(sc,
1586 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1587 sizeof(uint32_t),
1588 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1589 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1590 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1591 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1593 if (!iwm_nic_lock(sc)) {
1594 device_printf(sc->sc_dev,
1595 "%s: cannot enable txq %d\n", __func__, qid);
1596 return EBUSY;
1598 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1599 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1600 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1601 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1602 IWM_SCD_QUEUE_STTS_REG_MSK);
1603 } else {
1604 struct iwm_scd_txq_cfg_cmd cmd;
1605 int error;
1607 iwm_nic_unlock(sc);
1609 memset(&cmd, 0, sizeof(cmd));
1610 cmd.scd_queue = qid;
1611 cmd.enable = 1;
1612 cmd.sta_id = sta_id;
1613 cmd.tx_fifo = fifo;
1614 cmd.aggregate = 0;
1615 cmd.window = IWM_FRAME_LIMIT;
1617 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1618 sizeof(cmd), &cmd);
1619 if (error) {
1620 device_printf(sc->sc_dev,
1621 "cannot enable txq %d\n", qid);
1622 return error;
1625 if (!iwm_nic_lock(sc))
1626 return EBUSY;
1629 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1630 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1632 iwm_nic_unlock(sc);
1634 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1635 __func__, qid, fifo);
1637 return 0;
1640 static int
1641 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1643 int error, chnl;
1645 int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1646 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1648 if (!iwm_nic_lock(sc))
1649 return EBUSY;
1651 iwm_ict_reset(sc);
1653 sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1654 if (scd_base_addr != 0 &&
1655 scd_base_addr != sc->scd_base_addr) {
1656 device_printf(sc->sc_dev,
1657 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1658 __func__, sc->scd_base_addr, scd_base_addr);
1661 iwm_nic_unlock(sc);
1663 /* reset context data, TX status and translation data */
1664 error = iwm_write_mem(sc,
1665 sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1666 NULL, clear_dwords);
1667 if (error)
1668 return EBUSY;
1670 if (!iwm_nic_lock(sc))
1671 return EBUSY;
1673 /* Set physical address of TX scheduler rings (1KB aligned). */
1674 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1676 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1678 iwm_nic_unlock(sc);
1680 /* enable command channel */
1681 error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1682 if (error)
1683 return error;
1685 if (!iwm_nic_lock(sc))
1686 return EBUSY;
1688 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1690 /* Enable DMA channels. */
1691 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1692 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1693 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1694 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1697 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1698 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1700 iwm_nic_unlock(sc);
1702 /* Enable L1-Active */
1703 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1704 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1705 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1708 return error;
1712 * NVM read access and content parsing. We do not support
1713 * external NVM or writing NVM.
1714 * iwlwifi/mvm/nvm.c
1717 /* Default NVM size to read */
1718 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1720 #define IWM_NVM_WRITE_OPCODE 1
1721 #define IWM_NVM_READ_OPCODE 0
1723 /* load nvm chunk response */
1724 enum {
1725 IWM_READ_NVM_CHUNK_SUCCEED = 0,
1726 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1729 static int
1730 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1731 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1733 struct iwm_nvm_access_cmd nvm_access_cmd = {
1734 .offset = htole16(offset),
1735 .length = htole16(length),
1736 .type = htole16(section),
1737 .op_code = IWM_NVM_READ_OPCODE,
1739 struct iwm_nvm_access_resp *nvm_resp;
1740 struct iwm_rx_packet *pkt;
1741 struct iwm_host_cmd cmd = {
1742 .id = IWM_NVM_ACCESS_CMD,
1743 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1744 .data = { &nvm_access_cmd, },
1746 int ret, bytes_read, offset_read;
1747 uint8_t *resp_data;
1749 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1751 ret = iwm_send_cmd(sc, &cmd);
1752 if (ret) {
1753 device_printf(sc->sc_dev,
1754 "Could not send NVM_ACCESS command (error=%d)\n", ret);
1755 return ret;
1758 pkt = cmd.resp_pkt;
1760 /* Extract NVM response */
1761 nvm_resp = (void *)pkt->data;
1762 ret = le16toh(nvm_resp->status);
1763 bytes_read = le16toh(nvm_resp->length);
1764 offset_read = le16toh(nvm_resp->offset);
1765 resp_data = nvm_resp->data;
1766 if (ret) {
1767 if ((offset != 0) &&
1768 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1770 * meaning of NOT_VALID_ADDRESS:
1771 * driver try to read chunk from address that is
1772 * multiple of 2K and got an error since addr is empty.
1773 * meaning of (offset != 0): driver already
1774 * read valid data from another chunk so this case
1775 * is not an error.
1777 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1778 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1779 offset);
1780 *len = 0;
1781 ret = 0;
1782 } else {
1783 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1784 "NVM access command failed with status %d\n", ret);
1785 ret = EIO;
1787 goto exit;
1790 if (offset_read != offset) {
1791 device_printf(sc->sc_dev,
1792 "NVM ACCESS response with invalid offset %d\n",
1793 offset_read);
1794 ret = EINVAL;
1795 goto exit;
1798 if (bytes_read > length) {
1799 device_printf(sc->sc_dev,
1800 "NVM ACCESS response with too much data "
1801 "(%d bytes requested, %d bytes received)\n",
1802 length, bytes_read);
1803 ret = EINVAL;
1804 goto exit;
1807 /* Write data to NVM */
1808 memcpy(data + offset, resp_data, bytes_read);
1809 *len = bytes_read;
1811 exit:
1812 iwm_free_resp(sc, &cmd);
1813 return ret;
1817 * Reads an NVM section completely.
1818 * NICs prior to 7000 family don't have a real NVM, but just read
1819 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1820 * by uCode, we need to manually check in this case that we don't
1821 * overflow and try to read more than the EEPROM size.
1822 * For 7000 family NICs, we supply the maximal size we can read, and
1823 * the uCode fills the response with as much data as we can,
1824 * without overflowing, so no check is needed.
1826 static int
1827 iwm_nvm_read_section(struct iwm_softc *sc,
1828 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1830 uint16_t seglen, length, offset = 0;
1831 int ret;
1833 /* Set nvm section read length */
1834 length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1836 seglen = length;
1838 /* Read the NVM until exhausted (reading less than requested) */
1839 while (seglen == length) {
1840 /* Check no memory assumptions fail and cause an overflow */
1841 if ((size_read + offset + length) >
1842 sc->cfg->eeprom_size) {
1843 device_printf(sc->sc_dev,
1844 "EEPROM size is too small for NVM\n");
1845 return ENOBUFS;
1848 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1849 if (ret) {
1850 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1851 "Cannot read NVM from section %d offset %d, length %d\n",
1852 section, offset, length);
1853 return ret;
1855 offset += seglen;
1858 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1859 "NVM section %d read completed\n", section);
1860 *len = offset;
1861 return 0;
1864 /* NVM offsets (in words) definitions */
1865 enum iwm_nvm_offsets {
1866 /* NVM HW-Section offset (in words) definitions */
1867 IWM_HW_ADDR = 0x15,
1869 /* NVM SW-Section offset (in words) definitions */
1870 IWM_NVM_SW_SECTION = 0x1C0,
1871 IWM_NVM_VERSION = 0,
1872 IWM_RADIO_CFG = 1,
1873 IWM_SKU = 2,
1874 IWM_N_HW_ADDRS = 3,
1875 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1877 /* NVM calibration section offset (in words) definitions */
1878 IWM_NVM_CALIB_SECTION = 0x2B8,
1879 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1882 enum iwm_8000_nvm_offsets {
1883 /* NVM HW-Section offset (in words) definitions */
1884 IWM_HW_ADDR0_WFPM_8000 = 0x12,
1885 IWM_HW_ADDR1_WFPM_8000 = 0x16,
1886 IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1887 IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1888 IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1890 /* NVM SW-Section offset (in words) definitions */
1891 IWM_NVM_SW_SECTION_8000 = 0x1C0,
1892 IWM_NVM_VERSION_8000 = 0,
1893 IWM_RADIO_CFG_8000 = 0,
1894 IWM_SKU_8000 = 2,
1895 IWM_N_HW_ADDRS_8000 = 3,
1897 /* NVM REGULATORY -Section offset (in words) definitions */
1898 IWM_NVM_CHANNELS_8000 = 0,
1899 IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1900 IWM_NVM_LAR_OFFSET_8000 = 0x507,
1901 IWM_NVM_LAR_ENABLED_8000 = 0x7,
1903 /* NVM calibration section offset (in words) definitions */
1904 IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1905 IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1908 /* SKU Capabilities (actual values from NVM definition) */
1909 enum nvm_sku_bits {
1910 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
1911 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
1912 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
1913 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
1916 /* radio config bits (actual values from NVM definition) */
1917 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
1918 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
1919 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
1920 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
1921 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
1922 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1924 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x) (x & 0xF)
1925 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x) ((x >> 4) & 0xF)
1926 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x) ((x >> 8) & 0xF)
1927 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x) ((x >> 12) & 0xFFF)
1928 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x) ((x >> 24) & 0xF)
1929 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x) ((x >> 28) & 0xF)
1932 * enum iwm_nvm_channel_flags - channel flags in NVM
1933 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1934 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1935 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1936 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1937 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1938 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1939 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1940 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1941 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1942 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1944 enum iwm_nvm_channel_flags {
1945 IWM_NVM_CHANNEL_VALID = (1 << 0),
1946 IWM_NVM_CHANNEL_IBSS = (1 << 1),
1947 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1948 IWM_NVM_CHANNEL_RADAR = (1 << 4),
1949 IWM_NVM_CHANNEL_DFS = (1 << 7),
1950 IWM_NVM_CHANNEL_WIDE = (1 << 8),
1951 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1952 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1953 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1957 * Translate EEPROM flags to net80211.
1959 static uint32_t
1960 iwm_eeprom_channel_flags(uint16_t ch_flags)
1962 uint32_t nflags;
1964 nflags = 0;
1965 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1966 nflags |= IEEE80211_CHAN_PASSIVE;
1967 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1968 nflags |= IEEE80211_CHAN_NOADHOC;
1969 if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1970 nflags |= IEEE80211_CHAN_DFS;
1971 /* Just in case. */
1972 nflags |= IEEE80211_CHAN_NOADHOC;
1975 return (nflags);
1978 static void
1979 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1980 int maxchans, int *nchans, int ch_idx, size_t ch_num,
1981 const uint8_t bands[])
1983 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1984 uint32_t nflags;
1985 uint16_t ch_flags;
1986 uint8_t ieee;
1987 int error;
1989 for (; ch_idx < ch_num; ch_idx++) {
1990 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1991 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1992 ieee = iwm_nvm_channels[ch_idx];
1993 else
1994 ieee = iwm_nvm_channels_8000[ch_idx];
1996 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1997 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1998 "Ch. %d Flags %x [%sGHz] - No traffic\n",
1999 ieee, ch_flags,
2000 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2001 "5.2" : "2.4");
2002 continue;
2005 nflags = iwm_eeprom_channel_flags(ch_flags);
2006 error = ieee80211_add_channel(chans, maxchans, nchans,
2007 ieee, 0, 0, nflags, bands);
2008 if (error != 0)
2009 break;
2011 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2012 "Ch. %d Flags %x [%sGHz] - Added\n",
2013 ieee, ch_flags,
2014 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2015 "5.2" : "2.4");
2019 static void
2020 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2021 struct ieee80211_channel chans[])
2023 struct iwm_softc *sc = ic->ic_softc;
2024 struct iwm_nvm_data *data = sc->nvm_data;
2025 uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2026 size_t ch_num;
2028 memset(bands, 0, sizeof(bands));
2029 /* 1-13: 11b/g channels. */
2030 setbit(bands, IEEE80211_MODE_11B);
2031 setbit(bands, IEEE80211_MODE_11G);
2032 iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2033 IWM_NUM_2GHZ_CHANNELS - 1, bands);
2035 /* 14: 11b channel only. */
2036 clrbit(bands, IEEE80211_MODE_11G);
2037 iwm_add_channel_band(sc, chans, maxchans, nchans,
2038 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2040 if (data->sku_cap_band_52GHz_enable) {
2041 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2042 ch_num = nitems(iwm_nvm_channels);
2043 else
2044 ch_num = nitems(iwm_nvm_channels_8000);
2045 memset(bands, 0, sizeof(bands));
2046 setbit(bands, IEEE80211_MODE_11A);
2047 iwm_add_channel_band(sc, chans, maxchans, nchans,
2048 IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2052 static void
2053 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2054 const uint16_t *mac_override, const uint16_t *nvm_hw)
2056 const uint8_t *hw_addr;
2058 if (mac_override) {
2059 static const uint8_t reserved_mac[] = {
2060 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2063 hw_addr = (const uint8_t *)(mac_override +
2064 IWM_MAC_ADDRESS_OVERRIDE_8000);
2067 * Store the MAC address from MAO section.
2068 * No byte swapping is required in MAO section
2070 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2073 * Force the use of the OTP MAC address in case of reserved MAC
2074 * address in the NVM, or if address is given but invalid.
2076 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2077 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2078 iwm_is_valid_ether_addr(data->hw_addr) &&
2079 !IEEE80211_IS_MULTICAST(data->hw_addr))
2080 return;
2082 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2083 "%s: mac address from nvm override section invalid\n",
2084 __func__);
2087 if (nvm_hw) {
2088 /* read the mac address from WFMP registers */
2089 uint32_t mac_addr0 =
2090 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2091 uint32_t mac_addr1 =
2092 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2094 hw_addr = (const uint8_t *)&mac_addr0;
2095 data->hw_addr[0] = hw_addr[3];
2096 data->hw_addr[1] = hw_addr[2];
2097 data->hw_addr[2] = hw_addr[1];
2098 data->hw_addr[3] = hw_addr[0];
2100 hw_addr = (const uint8_t *)&mac_addr1;
2101 data->hw_addr[4] = hw_addr[1];
2102 data->hw_addr[5] = hw_addr[0];
2104 return;
2107 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2108 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2111 static int
2112 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2113 const uint16_t *phy_sku)
2115 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2116 return le16_to_cpup(nvm_sw + IWM_SKU);
2118 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2121 static int
2122 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2124 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2125 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2126 else
2127 return le32_to_cpup((const uint32_t *)(nvm_sw +
2128 IWM_NVM_VERSION_8000));
2131 static int
2132 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2133 const uint16_t *phy_sku)
2135 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2136 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2138 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2141 static int
2142 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2144 int n_hw_addr;
2146 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2147 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2149 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2151 return n_hw_addr & IWM_N_HW_ADDR_MASK;
2154 static void
2155 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2156 uint32_t radio_cfg)
2158 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2159 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2160 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2161 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2162 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2163 return;
2166 /* set the radio configuration for family 8000 */
2167 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2168 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2169 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2170 data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2171 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2172 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2175 static int
2176 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2177 const uint16_t *nvm_hw, const uint16_t *mac_override)
2179 #ifdef notyet /* for FAMILY 9000 */
2180 if (cfg->mac_addr_from_csr) {
2181 iwm_set_hw_address_from_csr(sc, data);
2182 } else
2183 #endif
2184 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2185 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2187 /* The byte order is little endian 16 bit, meaning 214365 */
2188 data->hw_addr[0] = hw_addr[1];
2189 data->hw_addr[1] = hw_addr[0];
2190 data->hw_addr[2] = hw_addr[3];
2191 data->hw_addr[3] = hw_addr[2];
2192 data->hw_addr[4] = hw_addr[5];
2193 data->hw_addr[5] = hw_addr[4];
2194 } else {
2195 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2198 if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2199 device_printf(sc->sc_dev, "no valid mac address was found\n");
2200 return EINVAL;
2203 return 0;
2206 static struct iwm_nvm_data *
2207 iwm_parse_nvm_data(struct iwm_softc *sc,
2208 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2209 const uint16_t *nvm_calib, const uint16_t *mac_override,
2210 const uint16_t *phy_sku, const uint16_t *regulatory)
2212 struct iwm_nvm_data *data;
2213 uint32_t sku, radio_cfg;
2214 uint16_t lar_config;
2216 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2217 data = kmalloc(sizeof(*data) +
2218 IWM_NUM_CHANNELS * sizeof(uint16_t),
2219 M_DEVBUF, M_WAITOK | M_ZERO);
2220 } else {
2221 data = kmalloc(sizeof(*data) +
2222 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2223 M_DEVBUF, M_WAITOK | M_ZERO);
2225 if (!data)
2226 return NULL;
2228 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2230 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2231 iwm_set_radio_cfg(sc, data, radio_cfg);
2233 sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2234 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2235 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2236 data->sku_cap_11n_enable = 0;
2238 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2240 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2241 uint16_t lar_offset = data->nvm_version < 0xE39 ?
2242 IWM_NVM_LAR_OFFSET_8000_OLD :
2243 IWM_NVM_LAR_OFFSET_8000;
2245 lar_config = le16_to_cpup(regulatory + lar_offset);
2246 data->lar_enabled = !!(lar_config &
2247 IWM_NVM_LAR_ENABLED_8000);
2250 /* If no valid mac address was found - bail out */
2251 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2252 kfree(data, M_DEVBUF);
2253 return NULL;
2256 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2257 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2258 IWM_NUM_CHANNELS * sizeof(uint16_t));
2259 } else {
2260 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2261 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2264 return data;
2267 static void
2268 iwm_free_nvm_data(struct iwm_nvm_data *data)
2270 if (data != NULL)
2271 kfree(data, M_DEVBUF);
2274 static struct iwm_nvm_data *
2275 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2277 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2279 /* Checking for required sections */
2280 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2281 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2282 !sections[sc->cfg->nvm_hw_section_num].data) {
2283 device_printf(sc->sc_dev,
2284 "Can't parse empty OTP/NVM sections\n");
2285 return NULL;
2287 } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2288 /* SW and REGULATORY sections are mandatory */
2289 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2290 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2291 device_printf(sc->sc_dev,
2292 "Can't parse empty OTP/NVM sections\n");
2293 return NULL;
2295 /* MAC_OVERRIDE or at least HW section must exist */
2296 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2297 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2298 device_printf(sc->sc_dev,
2299 "Can't parse mac_address, empty sections\n");
2300 return NULL;
2303 /* PHY_SKU section is mandatory in B0 */
2304 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2305 device_printf(sc->sc_dev,
2306 "Can't parse phy_sku in B0, empty sections\n");
2307 return NULL;
2309 } else {
2310 panic("unknown device family %d\n", sc->cfg->device_family);
2313 hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2314 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2315 calib = (const uint16_t *)
2316 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2317 regulatory = (const uint16_t *)
2318 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2319 mac_override = (const uint16_t *)
2320 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2321 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2323 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2324 phy_sku, regulatory);
2327 static int
2328 iwm_nvm_init(struct iwm_softc *sc)
2330 struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2331 int i, ret, section;
2332 uint32_t size_read = 0;
2333 uint8_t *nvm_buffer, *temp;
2334 uint16_t len;
2336 memset(nvm_sections, 0, sizeof(nvm_sections));
2338 if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2339 return EINVAL;
2341 /* load NVM values from nic */
2342 /* Read From FW NVM */
2343 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2345 nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF,
2346 M_INTWAIT | M_ZERO);
2347 if (!nvm_buffer)
2348 return ENOMEM;
2349 for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2350 /* we override the constness for initial read */
2351 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2352 &len, size_read);
2353 if (ret)
2354 continue;
2355 size_read += len;
2356 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2357 if (!temp) {
2358 ret = ENOMEM;
2359 break;
2361 memcpy(temp, nvm_buffer, len);
2363 nvm_sections[section].data = temp;
2364 nvm_sections[section].length = len;
2366 if (!size_read)
2367 device_printf(sc->sc_dev, "OTP is blank\n");
2368 kfree(nvm_buffer, M_DEVBUF);
2370 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2371 if (!sc->nvm_data)
2372 return EINVAL;
2373 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2374 "nvm version = %x\n", sc->nvm_data->nvm_version);
2376 for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2377 if (nvm_sections[i].data != NULL)
2378 kfree(nvm_sections[i].data, M_DEVBUF);
2381 return 0;
2384 static int
2385 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2386 const struct iwm_fw_desc *section)
2388 struct iwm_dma_info *dma = &sc->fw_dma;
2389 uint8_t *v_addr;
2390 bus_addr_t p_addr;
2391 uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2392 int ret = 0;
2394 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2395 "%s: [%d] uCode section being loaded...\n",
2396 __func__, section_num);
2398 v_addr = dma->vaddr;
2399 p_addr = dma->paddr;
2401 for (offset = 0; offset < section->len; offset += chunk_sz) {
2402 uint32_t copy_size, dst_addr;
2403 int extended_addr = FALSE;
2405 copy_size = MIN(chunk_sz, section->len - offset);
2406 dst_addr = section->offset + offset;
2408 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2409 dst_addr <= IWM_FW_MEM_EXTENDED_END)
2410 extended_addr = TRUE;
2412 if (extended_addr)
2413 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2414 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2416 memcpy(v_addr, (const uint8_t *)section->data + offset,
2417 copy_size);
2418 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2419 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2420 copy_size);
2422 if (extended_addr)
2423 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2424 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2426 if (ret) {
2427 device_printf(sc->sc_dev,
2428 "%s: Could not load the [%d] uCode section\n",
2429 __func__, section_num);
2430 break;
2434 return ret;
2438 * ucode
2440 static int
2441 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2442 bus_addr_t phy_addr, uint32_t byte_cnt)
2444 int ret;
2446 sc->sc_fw_chunk_done = 0;
2448 if (!iwm_nic_lock(sc))
2449 return EBUSY;
2451 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2452 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2454 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2455 dst_addr);
2457 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2458 phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2460 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2461 (iwm_get_dma_hi_addr(phy_addr)
2462 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2464 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2465 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2466 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2467 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2469 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2470 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2471 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2472 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2474 iwm_nic_unlock(sc);
2476 /* wait up to 5s for this segment to load */
2477 ret = 0;
2478 while (!sc->sc_fw_chunk_done) {
2479 #if defined(__DragonFly__)
2480 ret = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", 5 * hz);
2481 #else
2482 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", 5 * hz);
2483 #endif
2484 if (ret)
2485 break;
2488 if (ret != 0) {
2489 device_printf(sc->sc_dev,
2490 "fw chunk addr 0x%x len %d failed to load\n",
2491 dst_addr, byte_cnt);
2492 return ETIMEDOUT;
2495 return 0;
2498 static int
2499 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2500 const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2502 int shift_param;
2503 int i, ret = 0, sec_num = 0x1;
2504 uint32_t val, last_read_idx = 0;
2506 if (cpu == 1) {
2507 shift_param = 0;
2508 *first_ucode_section = 0;
2509 } else {
2510 shift_param = 16;
2511 (*first_ucode_section)++;
2514 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2515 last_read_idx = i;
2518 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2519 * CPU1 to CPU2.
2520 * PAGING_SEPARATOR_SECTION delimiter - separate between
2521 * CPU2 non paged to CPU2 paging sec.
2523 if (!image->fw_sect[i].data ||
2524 image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2525 image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2526 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2527 "Break since Data not valid or Empty section, sec = %d\n",
2529 break;
2531 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2532 if (ret)
2533 return ret;
2535 /* Notify the ucode of the loaded section number and status */
2536 if (iwm_nic_lock(sc)) {
2537 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2538 val = val | (sec_num << shift_param);
2539 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2540 sec_num = (sec_num << 1) | 0x1;
2541 iwm_nic_unlock(sc);
2545 *first_ucode_section = last_read_idx;
2547 iwm_enable_interrupts(sc);
2549 if (iwm_nic_lock(sc)) {
2550 if (cpu == 1)
2551 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2552 else
2553 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2554 iwm_nic_unlock(sc);
2557 return 0;
2560 static int
2561 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2562 const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2564 int shift_param;
2565 int i, ret = 0;
2566 uint32_t last_read_idx = 0;
2568 if (cpu == 1) {
2569 shift_param = 0;
2570 *first_ucode_section = 0;
2571 } else {
2572 shift_param = 16;
2573 (*first_ucode_section)++;
2576 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2577 last_read_idx = i;
2580 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2581 * CPU1 to CPU2.
2582 * PAGING_SEPARATOR_SECTION delimiter - separate between
2583 * CPU2 non paged to CPU2 paging sec.
2585 if (!image->fw_sect[i].data ||
2586 image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2587 image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2588 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2589 "Break since Data not valid or Empty section, sec = %d\n",
2591 break;
2594 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2595 if (ret)
2596 return ret;
2599 *first_ucode_section = last_read_idx;
2601 return 0;
2605 static int
2606 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2607 const struct iwm_fw_sects *image)
2609 int ret = 0;
2610 int first_ucode_section;
2612 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2613 image->is_dual_cpus ? "Dual" : "Single");
2615 /* load to FW the binary non secured sections of CPU1 */
2616 ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2617 if (ret)
2618 return ret;
2620 if (image->is_dual_cpus) {
2621 /* set CPU2 header address */
2622 if (iwm_nic_lock(sc)) {
2623 iwm_write_prph(sc,
2624 IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2625 IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2626 iwm_nic_unlock(sc);
2629 /* load to FW the binary sections of CPU2 */
2630 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2631 &first_ucode_section);
2632 if (ret)
2633 return ret;
2636 iwm_enable_interrupts(sc);
2638 /* release CPU reset */
2639 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2641 return 0;
2645 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2646 const struct iwm_fw_sects *image)
2648 int ret = 0;
2649 int first_ucode_section;
2651 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2652 image->is_dual_cpus ? "Dual" : "Single");
2654 /* configure the ucode to be ready to get the secured image */
2655 /* release CPU reset */
2656 if (iwm_nic_lock(sc)) {
2657 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2658 IWM_RELEASE_CPU_RESET_BIT);
2659 iwm_nic_unlock(sc);
2662 /* load to FW the binary Secured sections of CPU1 */
2663 ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2664 &first_ucode_section);
2665 if (ret)
2666 return ret;
2668 /* load to FW the binary sections of CPU2 */
2669 return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2670 &first_ucode_section);
2673 /* XXX Get rid of this definition */
2674 static inline void
2675 iwm_enable_fw_load_int(struct iwm_softc *sc)
2677 IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2678 sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2679 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2682 /* XXX Add proper rfkill support code */
2683 static int
2684 iwm_start_fw(struct iwm_softc *sc,
2685 const struct iwm_fw_sects *fw)
2687 int ret;
2689 /* This may fail if AMT took ownership of the device */
2690 if (iwm_prepare_card_hw(sc)) {
2691 device_printf(sc->sc_dev,
2692 "%s: Exit HW not ready\n", __func__);
2693 ret = EIO;
2694 goto out;
2697 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2699 iwm_disable_interrupts(sc);
2701 /* make sure rfkill handshake bits are cleared */
2702 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2703 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2704 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2706 /* clear (again), then enable host interrupts */
2707 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2709 ret = iwm_nic_init(sc);
2710 if (ret) {
2711 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2712 goto out;
2716 * Now, we load the firmware and don't want to be interrupted, even
2717 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2718 * FH_TX interrupt which is needed to load the firmware). If the
2719 * RF-Kill switch is toggled, we will find out after having loaded
2720 * the firmware and return the proper value to the caller.
2722 iwm_enable_fw_load_int(sc);
2724 /* really make sure rfkill handshake bits are cleared */
2725 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2726 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2728 /* Load the given image to the HW */
2729 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2730 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2731 else
2732 ret = iwm_pcie_load_given_ucode(sc, fw);
2734 /* XXX re-check RF-Kill state */
2736 out:
2737 return ret;
2740 static int
2741 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2743 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2744 .valid = htole32(valid_tx_ant),
2747 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2748 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2751 static int
2752 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2754 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2755 enum iwm_ucode_type ucode_type = sc->cur_ucode;
2757 /* Set parameters */
2758 phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2759 phy_cfg_cmd.calib_control.event_trigger =
2760 sc->sc_default_calib[ucode_type].event_trigger;
2761 phy_cfg_cmd.calib_control.flow_trigger =
2762 sc->sc_default_calib[ucode_type].flow_trigger;
2764 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2765 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2766 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2767 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2770 static int
2771 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2773 struct iwm_mvm_alive_data *alive_data = data;
2774 struct iwm_mvm_alive_resp_ver1 *palive1;
2775 struct iwm_mvm_alive_resp_ver2 *palive2;
2776 struct iwm_mvm_alive_resp *palive;
2778 if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2779 palive1 = (void *)pkt->data;
2781 sc->support_umac_log = FALSE;
2782 sc->error_event_table =
2783 le32toh(palive1->error_event_table_ptr);
2784 sc->log_event_table =
2785 le32toh(palive1->log_event_table_ptr);
2786 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2788 alive_data->valid = le16toh(palive1->status) ==
2789 IWM_ALIVE_STATUS_OK;
2790 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2791 "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2792 le16toh(palive1->status), palive1->ver_type,
2793 palive1->ver_subtype, palive1->flags);
2794 } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2795 palive2 = (void *)pkt->data;
2796 sc->error_event_table =
2797 le32toh(palive2->error_event_table_ptr);
2798 sc->log_event_table =
2799 le32toh(palive2->log_event_table_ptr);
2800 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2801 sc->umac_error_event_table =
2802 le32toh(palive2->error_info_addr);
2804 alive_data->valid = le16toh(palive2->status) ==
2805 IWM_ALIVE_STATUS_OK;
2806 if (sc->umac_error_event_table)
2807 sc->support_umac_log = TRUE;
2809 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2810 "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2811 le16toh(palive2->status), palive2->ver_type,
2812 palive2->ver_subtype, palive2->flags);
2814 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2815 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2816 palive2->umac_major, palive2->umac_minor);
2817 } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2818 palive = (void *)pkt->data;
2820 sc->error_event_table =
2821 le32toh(palive->error_event_table_ptr);
2822 sc->log_event_table =
2823 le32toh(palive->log_event_table_ptr);
2824 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2825 sc->umac_error_event_table =
2826 le32toh(palive->error_info_addr);
2828 alive_data->valid = le16toh(palive->status) ==
2829 IWM_ALIVE_STATUS_OK;
2830 if (sc->umac_error_event_table)
2831 sc->support_umac_log = TRUE;
2833 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2834 "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2835 le16toh(palive->status), palive->ver_type,
2836 palive->ver_subtype, palive->flags);
2838 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2839 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2840 le32toh(palive->umac_major),
2841 le32toh(palive->umac_minor));
2844 return TRUE;
2847 static int
2848 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2849 struct iwm_rx_packet *pkt, void *data)
2851 struct iwm_phy_db *phy_db = data;
2853 if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2854 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2855 device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2856 __func__, pkt->hdr.code);
2858 return TRUE;
2861 if (iwm_phy_db_set_section(phy_db, pkt)) {
2862 device_printf(sc->sc_dev,
2863 "%s: iwm_phy_db_set_section failed\n", __func__);
2866 return FALSE;
2869 static int
2870 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2871 enum iwm_ucode_type ucode_type)
2873 struct iwm_notification_wait alive_wait;
2874 struct iwm_mvm_alive_data alive_data;
2875 const struct iwm_fw_sects *fw;
2876 enum iwm_ucode_type old_type = sc->cur_ucode;
2877 int error;
2878 static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2880 fw = &sc->sc_fw.fw_sects[ucode_type];
2881 sc->cur_ucode = ucode_type;
2882 sc->ucode_loaded = FALSE;
2884 memset(&alive_data, 0, sizeof(alive_data));
2885 iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2886 alive_cmd, NELEM(alive_cmd),
2887 iwm_alive_fn, &alive_data);
2889 error = iwm_start_fw(sc, fw);
2890 if (error) {
2891 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2892 sc->cur_ucode = old_type;
2893 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2894 return error;
2898 * Some things may run in the background now, but we
2899 * just wait for the ALIVE notification here.
2901 IWM_UNLOCK(sc);
2902 error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2903 IWM_MVM_UCODE_ALIVE_TIMEOUT);
2904 IWM_LOCK(sc);
2905 if (error) {
2906 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2907 uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2908 if (iwm_nic_lock(sc)) {
2909 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2910 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2911 iwm_nic_unlock(sc);
2913 device_printf(sc->sc_dev,
2914 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2915 a, b);
2917 sc->cur_ucode = old_type;
2918 return error;
2921 if (!alive_data.valid) {
2922 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2923 __func__);
2924 sc->cur_ucode = old_type;
2925 return EIO;
2928 iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2931 * configure and operate fw paging mechanism.
2932 * driver configures the paging flow only once, CPU2 paging image
2933 * included in the IWM_UCODE_INIT image.
2935 if (fw->paging_mem_size) {
2936 error = iwm_save_fw_paging(sc, fw);
2937 if (error) {
2938 device_printf(sc->sc_dev,
2939 "%s: failed to save the FW paging image\n",
2940 __func__);
2941 return error;
2944 error = iwm_send_paging_cmd(sc, fw);
2945 if (error) {
2946 device_printf(sc->sc_dev,
2947 "%s: failed to send the paging cmd\n", __func__);
2948 iwm_free_fw_paging(sc);
2949 return error;
2953 if (!error)
2954 sc->ucode_loaded = TRUE;
2955 return error;
2959 * mvm misc bits
2962 static int
2963 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2965 struct iwm_notification_wait calib_wait;
2966 static const uint16_t init_complete[] = {
2967 IWM_INIT_COMPLETE_NOTIF,
2968 IWM_CALIB_RES_NOTIF_PHY_DB
2970 int ret;
2972 /* do not operate with rfkill switch turned on */
2973 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2974 device_printf(sc->sc_dev,
2975 "radio is disabled by hardware switch\n");
2976 return EPERM;
2979 iwm_init_notification_wait(sc->sc_notif_wait,
2980 &calib_wait,
2981 init_complete,
2982 NELEM(init_complete),
2983 iwm_wait_phy_db_entry,
2984 sc->sc_phy_db);
2986 /* Will also start the device */
2987 ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2988 if (ret) {
2989 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2990 ret);
2991 goto error;
2994 if (justnvm) {
2995 /* Read nvm */
2996 ret = iwm_nvm_init(sc);
2997 if (ret) {
2998 device_printf(sc->sc_dev, "failed to read nvm\n");
2999 goto error;
3001 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3002 goto error;
3005 ret = iwm_send_bt_init_conf(sc);
3006 if (ret) {
3007 device_printf(sc->sc_dev,
3008 "failed to send bt coex configuration: %d\n", ret);
3009 goto error;
3012 /* Send TX valid antennas before triggering calibrations */
3013 ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3014 if (ret) {
3015 device_printf(sc->sc_dev,
3016 "failed to send antennas before calibration: %d\n", ret);
3017 goto error;
3021 * Send phy configurations command to init uCode
3022 * to start the 16.0 uCode init image internal calibrations.
3024 ret = iwm_send_phy_cfg_cmd(sc);
3025 if (ret) {
3026 device_printf(sc->sc_dev,
3027 "%s: Failed to run INIT calibrations: %d\n",
3028 __func__, ret);
3029 goto error;
3033 * Nothing to do but wait for the init complete notification
3034 * from the firmware.
3036 IWM_UNLOCK(sc);
3037 ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3038 IWM_MVM_UCODE_CALIB_TIMEOUT);
3039 IWM_LOCK(sc);
3042 goto out;
3044 error:
3045 iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3046 out:
3047 return ret;
3051 * receive side
3054 /* (re)stock rx ring, called at init-time and at runtime */
3055 static int
3056 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3058 struct iwm_rx_ring *ring = &sc->rxq;
3059 struct iwm_rx_data *data = &ring->data[idx];
3060 struct mbuf *m;
3061 bus_dmamap_t dmamap;
3062 bus_dma_segment_t seg;
3063 int nsegs, error;
3065 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3066 if (m == NULL)
3067 return ENOBUFS;
3069 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3070 #if defined(__DragonFly__)
3071 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3072 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3073 #else
3074 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3075 &seg, &nsegs, BUS_DMA_NOWAIT);
3076 #endif
3077 if (error != 0) {
3078 device_printf(sc->sc_dev,
3079 "%s: can't map mbuf, error %d\n", __func__, error);
3080 m_freem(m);
3081 return error;
3084 if (data->m != NULL)
3085 bus_dmamap_unload(ring->data_dmat, data->map);
3087 /* Swap ring->spare_map with data->map */
3088 dmamap = data->map;
3089 data->map = ring->spare_map;
3090 ring->spare_map = dmamap;
3092 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3093 data->m = m;
3095 /* Update RX descriptor. */
3096 KKASSERT((seg.ds_addr & 255) == 0);
3097 ring->desc[idx] = htole32(seg.ds_addr >> 8);
3098 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3099 BUS_DMASYNC_PREWRITE);
3101 return 0;
3105 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3106 * values are reported by the fw as positive values - need to negate
3107 * to obtain their dBM. Account for missing antennas by replacing 0
3108 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3110 static int
3111 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3113 int energy_a, energy_b, energy_c, max_energy;
3114 uint32_t val;
3116 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3117 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3118 IWM_RX_INFO_ENERGY_ANT_A_POS;
3119 energy_a = energy_a ? -energy_a : -256;
3120 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3121 IWM_RX_INFO_ENERGY_ANT_B_POS;
3122 energy_b = energy_b ? -energy_b : -256;
3123 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3124 IWM_RX_INFO_ENERGY_ANT_C_POS;
3125 energy_c = energy_c ? -energy_c : -256;
3126 max_energy = MAX(energy_a, energy_b);
3127 max_energy = MAX(max_energy, energy_c);
3129 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3130 "energy In A %d B %d C %d , and max %d\n",
3131 energy_a, energy_b, energy_c, max_energy);
3133 return max_energy;
3136 static void
3137 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3139 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3141 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3143 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3147 * Retrieve the average noise (in dBm) among receivers.
3149 static int
3150 iwm_get_noise(struct iwm_softc *sc,
3151 const struct iwm_mvm_statistics_rx_non_phy *stats)
3153 int i, total, nbant, noise;
3155 total = nbant = noise = 0;
3156 for (i = 0; i < 3; i++) {
3157 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3158 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3159 __func__, i, noise);
3161 if (noise) {
3162 total += noise;
3163 nbant++;
3167 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3168 __func__, nbant, total);
3169 #if 0
3170 /* There should be at least one antenna but check anyway. */
3171 return (nbant == 0) ? -127 : (total / nbant) - 107;
3172 #else
3173 /* For now, just hard-code it to -96 to be safe */
3174 return (-96);
3175 #endif
3178 static void
3179 iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3181 struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3183 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3184 sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3188 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3190 * Handles the actual data of the Rx packet from the fw
3192 static boolean_t
3193 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3194 boolean_t stolen)
3196 struct ieee80211com *ic = &sc->sc_ic;
3197 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3198 struct ieee80211_frame *wh;
3199 struct ieee80211_node *ni;
3200 struct ieee80211_rx_stats rxs;
3201 struct iwm_rx_phy_info *phy_info;
3202 struct iwm_rx_mpdu_res_start *rx_res;
3203 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3204 uint32_t len;
3205 uint32_t rx_pkt_status;
3206 int rssi;
3208 phy_info = &sc->sc_last_phy_info;
3209 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3210 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3211 len = le16toh(rx_res->byte_count);
3212 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3214 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3215 device_printf(sc->sc_dev,
3216 "dsp size out of range [0,20]: %d\n",
3217 phy_info->cfg_phy_cnt);
3218 return FALSE;
3221 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3222 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3223 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3224 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3225 return FALSE; /* drop */
3228 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3229 /* Note: RSSI is absolute (ie a -ve value) */
3230 if (rssi < IWM_MIN_DBM)
3231 rssi = IWM_MIN_DBM;
3232 else if (rssi > IWM_MAX_DBM)
3233 rssi = IWM_MAX_DBM;
3235 /* Map it to relative value */
3236 rssi = rssi - sc->sc_noise;
3238 /* replenish ring for the buffer we're going to feed to the sharks */
3239 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3240 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3241 __func__);
3242 return FALSE;
3245 m->m_data = pkt->data + sizeof(*rx_res);
3246 m->m_pkthdr.len = m->m_len = len;
3248 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3249 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3251 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3253 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3254 "%s: phy_info: channel=%d, flags=0x%08x\n",
3255 __func__,
3256 le16toh(phy_info->channel),
3257 le16toh(phy_info->phy_flags));
3260 * Populate an RX state struct with the provided information.
3262 bzero(&rxs, sizeof(rxs));
3263 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3264 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3265 rxs.c_ieee = le16toh(phy_info->channel);
3266 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3267 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3268 } else {
3269 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3271 /* rssi is in 1/2db units */
3272 rxs.rssi = rssi * 2;
3273 rxs.nf = sc->sc_noise;
3275 if (ieee80211_radiotap_active_vap(vap)) {
3276 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3278 tap->wr_flags = 0;
3279 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3280 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3281 tap->wr_chan_freq = htole16(rxs.c_freq);
3282 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3283 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3284 tap->wr_dbm_antsignal = (int8_t)rssi;
3285 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3286 tap->wr_tsft = phy_info->system_timestamp;
3287 switch (phy_info->rate) {
3288 /* CCK rates. */
3289 case 10: tap->wr_rate = 2; break;
3290 case 20: tap->wr_rate = 4; break;
3291 case 55: tap->wr_rate = 11; break;
3292 case 110: tap->wr_rate = 22; break;
3293 /* OFDM rates. */
3294 case 0xd: tap->wr_rate = 12; break;
3295 case 0xf: tap->wr_rate = 18; break;
3296 case 0x5: tap->wr_rate = 24; break;
3297 case 0x7: tap->wr_rate = 36; break;
3298 case 0x9: tap->wr_rate = 48; break;
3299 case 0xb: tap->wr_rate = 72; break;
3300 case 0x1: tap->wr_rate = 96; break;
3301 case 0x3: tap->wr_rate = 108; break;
3302 /* Unknown rate: should not happen. */
3303 default: tap->wr_rate = 0;
3307 IWM_UNLOCK(sc);
3308 if (ni != NULL) {
3309 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3310 ieee80211_input_mimo(ni, m, &rxs);
3311 ieee80211_free_node(ni);
3312 } else {
3313 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3314 ieee80211_input_mimo_all(ic, m, &rxs);
3316 IWM_LOCK(sc);
3318 return TRUE;
3321 static int
3322 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3323 struct iwm_node *in)
3325 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3326 struct ieee80211_node *ni = &in->in_ni;
3327 struct ieee80211vap *vap = ni->ni_vap;
3328 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3329 int failack = tx_resp->failure_frame;
3330 int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3331 boolean_t rate_matched;
3332 uint8_t tx_resp_rate;
3333 int ret;
3335 KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3337 /* Update rate control statistics. */
3338 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3339 __func__,
3340 (int) le16toh(tx_resp->status.status),
3341 (int) le16toh(tx_resp->status.sequence),
3342 tx_resp->frame_count,
3343 tx_resp->bt_kill_count,
3344 tx_resp->failure_rts,
3345 tx_resp->failure_frame,
3346 le32toh(tx_resp->initial_rate),
3347 (int) le16toh(tx_resp->wireless_media_time));
3349 tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3351 /* For rate control, ignore frames sent at different initial rate */
3352 rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3354 if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3355 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3356 "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3357 "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3360 if (status != IWM_TX_STATUS_SUCCESS &&
3361 status != IWM_TX_STATUS_DIRECT_DONE) {
3362 if (rate_matched) {
3363 ieee80211_ratectl_tx_complete(vap, ni,
3364 IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3366 ret = 1;
3367 } else {
3368 if (rate_matched) {
3369 ieee80211_ratectl_tx_complete(vap, ni,
3370 IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3372 ret = 0;
3375 if (rate_matched) {
3376 int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3377 new_rate = vap->iv_bss->ni_txrate;
3378 if (new_rate != 0 && new_rate != cur_rate) {
3379 struct iwm_node *in = IWM_NODE(vap->iv_bss);
3380 iwm_setrates(sc, in, rix);
3381 iwm_mvm_send_lq_cmd(sc, &in->in_lq, FALSE);
3385 return ret;
3388 static void
3389 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3391 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3392 int idx = cmd_hdr->idx;
3393 int qid = cmd_hdr->qid;
3394 struct iwm_tx_ring *ring = &sc->txq[qid];
3395 struct iwm_tx_data *txd = &ring->data[idx];
3396 struct iwm_node *in = txd->in;
3397 struct mbuf *m = txd->m;
3398 int status;
3400 KASSERT(txd->done == 0, ("txd not done"));
3401 KASSERT(txd->in != NULL, ("txd without node"));
3402 KASSERT(txd->m != NULL, ("txd without mbuf"));
3404 sc->sc_tx_timer = 0;
3406 status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3408 /* Unmap and free mbuf. */
3409 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3410 bus_dmamap_unload(ring->data_dmat, txd->map);
3412 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3413 "free txd %p, in %p\n", txd, txd->in);
3414 txd->done = 1;
3415 txd->m = NULL;
3416 txd->in = NULL;
3418 ieee80211_tx_complete(&in->in_ni, m, status);
3420 if (--ring->queued < IWM_TX_RING_LOMARK) {
3421 sc->qfullmsk &= ~(1 << ring->qid);
3422 if (sc->qfullmsk == 0) {
3423 iwm_start(sc);
3429 * transmit side
3433 * Process a "command done" firmware notification. This is where we wakeup
3434 * processes waiting for a synchronous command completion.
3435 * from if_iwn
3437 static void
3438 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3440 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3441 struct iwm_tx_data *data;
3443 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3444 return; /* Not a command ack. */
3447 data = &ring->data[pkt->hdr.idx];
3449 /* If the command was mapped in an mbuf, free it. */
3450 if (data->m != NULL) {
3451 bus_dmamap_sync(ring->data_dmat, data->map,
3452 BUS_DMASYNC_POSTWRITE);
3453 bus_dmamap_unload(ring->data_dmat, data->map);
3454 m_freem(data->m);
3455 data->m = NULL;
3457 wakeup(&ring->desc[pkt->hdr.idx]);
3459 if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3460 device_printf(sc->sc_dev,
3461 "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3462 __func__, pkt->hdr.idx, ring->queued, ring->cur);
3463 /* XXX call iwm_force_nmi() */
3466 KKASSERT(ring->queued > 0);
3467 ring->queued--;
3468 if (ring->queued == 0)
3469 iwm_pcie_clear_cmd_in_flight(sc);
3472 #if 0
3474 * necessary only for block ack mode
3476 void
3477 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3478 uint16_t len)
3480 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3481 uint16_t w_val;
3483 scd_bc_tbl = sc->sched_dma.vaddr;
3485 len += 8; /* magic numbers came naturally from paris */
3486 len = roundup(len, 4) / 4;
3488 w_val = htole16(sta_id << 12 | len);
3490 /* Update TX scheduler. */
3491 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3492 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3493 BUS_DMASYNC_PREWRITE);
3495 /* I really wonder what this is ?!? */
3496 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3497 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3498 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3499 BUS_DMASYNC_PREWRITE);
3502 #endif
3505 * Fill in the rate related information for a transmit command.
3507 static uint8_t
3508 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3509 struct mbuf *m, struct iwm_tx_cmd *tx)
3511 struct ieee80211com *ic = &sc->sc_ic;
3512 struct ieee80211_node *ni = &in->in_ni;
3513 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
3514 const struct ieee80211_txparam *tp = ni->ni_txparms;
3515 const struct iwm_rate *rinfo;
3516 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3517 int ridx, rate_flags;
3519 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3520 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3522 if (type == IEEE80211_FC0_TYPE_MGT) {
3523 ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3524 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3525 "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3526 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3527 ridx = iwm_rate2ridx(sc, tp->mcastrate);
3528 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3529 "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3530 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3531 ridx = iwm_rate2ridx(sc, tp->ucastrate);
3532 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3533 "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3534 } else if (m->m_flags & M_EAPOL) {
3535 ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3536 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3537 "%s: EAPOL (%d)\n", __func__, tp->mgmtrate);
3538 } else if (type == IEEE80211_FC0_TYPE_DATA) {
3539 /* This is the index into the programmed table */
3540 tx->initial_rate_index = 0;
3541 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3542 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA (%d)\n",
3543 __func__, ni->ni_txrate);
3544 return ni->ni_txrate;
3545 } else {
3546 ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3547 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3548 "%s: DEFAULT (%d)\n", __func__, tp->mgmtrate);
3552 * Sanity check ridx, and provide fallback. If the rate lookup
3553 * ever fails, iwm_rate2ridx() will already print an error message.
3555 if (ridx < 0 || ridx > IWM_RIDX_MAX) {
3556 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3558 * XXX this assumes the mode is either 11a or not 11a;
3559 * definitely won't work for 11n.
3561 ridx = IWM_RIDX_OFDM;
3562 } else {
3563 ridx = IWM_RIDX_CCK;
3567 rinfo = &iwm_rates[ridx];
3569 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3570 "%s: frame type=%d, ridx=%d, rate=%d, CCK=%d\n",
3571 __func__, type, ridx, rinfo->rate, !! (IWM_RIDX_IS_CCK(ridx)));
3573 /* XXX TODO: hard-coded TX antenna? */
3574 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3575 if (IWM_RIDX_IS_CCK(ridx))
3576 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3577 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3579 return rinfo->rate;
3582 #define TB0_SIZE 16
3583 static int
3584 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3586 struct ieee80211com *ic = &sc->sc_ic;
3587 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3588 struct iwm_node *in = IWM_NODE(ni);
3589 struct iwm_tx_ring *ring;
3590 struct iwm_tx_data *data;
3591 struct iwm_tfd *desc;
3592 struct iwm_device_cmd *cmd;
3593 struct iwm_tx_cmd *tx;
3594 struct ieee80211_frame *wh;
3595 struct ieee80211_key *k = NULL;
3596 #if !defined(__DragonFly__)
3597 struct mbuf *m1;
3598 #endif
3599 uint32_t flags;
3600 u_int hdrlen;
3601 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3602 int nsegs;
3603 uint8_t rate, tid, type;
3604 int i, totlen, error, pad;
3606 wh = mtod(m, struct ieee80211_frame *);
3607 hdrlen = ieee80211_anyhdrsize(wh);
3608 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3609 tid = 0;
3610 ring = &sc->txq[ac];
3611 desc = &ring->desc[ring->cur];
3612 memset(desc, 0, sizeof(*desc));
3613 data = &ring->data[ring->cur];
3615 /* Fill out iwm_tx_cmd to send to the firmware */
3616 cmd = &ring->cmd[ring->cur];
3617 cmd->hdr.code = IWM_TX_CMD;
3618 cmd->hdr.flags = 0;
3619 cmd->hdr.qid = ring->qid;
3620 cmd->hdr.idx = ring->cur;
3622 tx = (void *)cmd->data;
3623 memset(tx, 0, sizeof(*tx));
3625 rate = iwm_tx_fill_cmd(sc, in, m, tx);
3627 /* Encrypt the frame if need be. */
3628 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3629 /* Retrieve key for TX && do software encryption. */
3630 k = ieee80211_crypto_encap(ni, m);
3631 if (k == NULL) {
3632 m_freem(m);
3633 return (ENOBUFS);
3635 /* 802.11 header may have moved. */
3636 wh = mtod(m, struct ieee80211_frame *);
3639 if (ieee80211_radiotap_active_vap(vap)) {
3640 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3642 tap->wt_flags = 0;
3643 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3644 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3645 tap->wt_rate = rate;
3646 if (k != NULL)
3647 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3648 ieee80211_radiotap_tx(vap, m);
3652 totlen = m->m_pkthdr.len;
3654 flags = 0;
3655 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3656 flags |= IWM_TX_CMD_FLG_ACK;
3659 if (type == IEEE80211_FC0_TYPE_DATA
3660 && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3661 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3662 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3665 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3666 type != IEEE80211_FC0_TYPE_DATA)
3667 tx->sta_id = sc->sc_aux_sta.sta_id;
3668 else
3669 tx->sta_id = IWM_STATION_ID;
3671 if (type == IEEE80211_FC0_TYPE_MGT) {
3672 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3674 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3675 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3676 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3677 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3678 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3679 } else {
3680 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3682 } else {
3683 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3686 if (hdrlen & 3) {
3687 /* First segment length must be a multiple of 4. */
3688 flags |= IWM_TX_CMD_FLG_MH_PAD;
3689 pad = 4 - (hdrlen & 3);
3690 } else
3691 pad = 0;
3693 tx->driver_txop = 0;
3694 tx->next_frame_len = 0;
3696 tx->len = htole16(totlen);
3697 tx->tid_tspec = tid;
3698 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3700 /* Set physical address of "scratch area". */
3701 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3702 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3704 /* Copy 802.11 header in TX command. */
3705 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3707 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3709 tx->sec_ctl = 0;
3710 tx->tx_flags |= htole32(flags);
3712 /* Trim 802.11 header. */
3713 m_adj(m, hdrlen);
3714 #if defined(__DragonFly__)
3715 error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3716 segs, IWM_MAX_SCATTER - 2,
3717 &nsegs, BUS_DMA_NOWAIT);
3718 #else
3719 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3720 segs, &nsegs, BUS_DMA_NOWAIT);
3721 #endif
3722 if (error != 0) {
3723 #if defined(__DragonFly__)
3724 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3725 error);
3726 m_freem(m);
3727 return error;
3728 #else
3729 if (error != EFBIG) {
3730 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3731 error);
3732 m_freem(m);
3733 return error;
3735 /* Too many DMA segments, linearize mbuf. */
3736 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3737 if (m1 == NULL) {
3738 device_printf(sc->sc_dev,
3739 "%s: could not defrag mbuf\n", __func__);
3740 m_freem(m);
3741 return (ENOBUFS);
3743 m = m1;
3745 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3746 segs, &nsegs, BUS_DMA_NOWAIT);
3747 if (error != 0) {
3748 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3749 error);
3750 m_freem(m);
3751 return error;
3753 #endif
3755 data->m = m;
3756 data->in = in;
3757 data->done = 0;
3759 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3760 "sending txd %p, in %p\n", data, data->in);
3761 KASSERT(data->in != NULL, ("node is NULL"));
3763 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3764 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3765 ring->qid, ring->cur, totlen, nsegs,
3766 le32toh(tx->tx_flags),
3767 le32toh(tx->rate_n_flags),
3768 tx->initial_rate_index
3771 /* Fill TX descriptor. */
3772 desc->num_tbs = 2 + nsegs;
3774 desc->tbs[0].lo = htole32(data->cmd_paddr);
3775 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3776 (TB0_SIZE << 4);
3777 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3778 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3779 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3780 + hdrlen + pad - TB0_SIZE) << 4);
3782 /* Other DMA segments are for data payload. */
3783 for (i = 0; i < nsegs; i++) {
3784 seg = &segs[i];
3785 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3786 desc->tbs[i+2].hi_n_len = \
3787 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3788 | ((seg->ds_len) << 4);
3791 bus_dmamap_sync(ring->data_dmat, data->map,
3792 BUS_DMASYNC_PREWRITE);
3793 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3794 BUS_DMASYNC_PREWRITE);
3795 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3796 BUS_DMASYNC_PREWRITE);
3798 #if 0
3799 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3800 #endif
3802 /* Kick TX ring. */
3803 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3804 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3806 /* Mark TX ring as full if we reach a certain threshold. */
3807 if (++ring->queued > IWM_TX_RING_HIMARK) {
3808 sc->qfullmsk |= 1 << ring->qid;
3811 return 0;
3814 static int
3815 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3816 const struct ieee80211_bpf_params *params)
3818 struct ieee80211com *ic = ni->ni_ic;
3819 struct iwm_softc *sc = ic->ic_softc;
3820 int error = 0;
3822 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3823 "->%s begin\n", __func__);
3825 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3826 m_freem(m);
3827 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3828 "<-%s not RUNNING\n", __func__);
3829 return (ENETDOWN);
3832 IWM_LOCK(sc);
3833 /* XXX fix this */
3834 if (params == NULL) {
3835 error = iwm_tx(sc, m, ni, 0);
3836 } else {
3837 error = iwm_tx(sc, m, ni, 0);
3839 sc->sc_tx_timer = 5;
3840 IWM_UNLOCK(sc);
3842 return (error);
3846 * mvm/tx.c
3850 * Note that there are transports that buffer frames before they reach
3851 * the firmware. This means that after flush_tx_path is called, the
3852 * queue might not be empty. The race-free way to handle this is to:
3853 * 1) set the station as draining
3854 * 2) flush the Tx path
3855 * 3) wait for the transport queues to be empty
3858 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3860 int ret;
3861 struct iwm_tx_path_flush_cmd flush_cmd = {
3862 .queues_ctl = htole32(tfd_msk),
3863 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3866 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3867 sizeof(flush_cmd), &flush_cmd);
3868 if (ret)
3869 device_printf(sc->sc_dev,
3870 "Flushing tx queue failed: %d\n", ret);
3871 return ret;
3874 static int
3875 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3877 struct iwm_time_quota_cmd cmd;
3878 int i, idx, ret, num_active_macs, quota, quota_rem;
3879 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3880 int n_ifs[IWM_MAX_BINDINGS] = {0, };
3881 uint16_t id;
3883 memset(&cmd, 0, sizeof(cmd));
3885 /* currently, PHY ID == binding ID */
3886 if (ivp) {
3887 id = ivp->phy_ctxt->id;
3888 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3889 colors[id] = ivp->phy_ctxt->color;
3891 if (1)
3892 n_ifs[id] = 1;
3896 * The FW's scheduling session consists of
3897 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3898 * equally between all the bindings that require quota
3900 num_active_macs = 0;
3901 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3902 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3903 num_active_macs += n_ifs[i];
3906 quota = 0;
3907 quota_rem = 0;
3908 if (num_active_macs) {
3909 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3910 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3913 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3914 if (colors[i] < 0)
3915 continue;
3917 cmd.quotas[idx].id_and_color =
3918 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3920 if (n_ifs[i] <= 0) {
3921 cmd.quotas[idx].quota = htole32(0);
3922 cmd.quotas[idx].max_duration = htole32(0);
3923 } else {
3924 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3925 cmd.quotas[idx].max_duration = htole32(0);
3927 idx++;
3930 /* Give the remainder of the session to the first binding */
3931 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3933 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3934 sizeof(cmd), &cmd);
3935 if (ret)
3936 device_printf(sc->sc_dev,
3937 "%s: Failed to send quota: %d\n", __func__, ret);
3938 return ret;
3942 * ieee80211 routines
3946 * Change to AUTH state in 80211 state machine. Roughly matches what
3947 * Linux does in bss_info_changed().
3949 static int
3950 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3952 struct ieee80211_node *ni;
3953 struct iwm_node *in;
3954 struct iwm_vap *iv = IWM_VAP(vap);
3955 uint32_t duration;
3956 int error;
3959 * XXX i have a feeling that the vap node is being
3960 * freed from underneath us. Grr.
3962 ni = ieee80211_ref_node(vap->iv_bss);
3963 in = IWM_NODE(ni);
3964 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3965 "%s: called; vap=%p, bss ni=%p\n",
3966 __func__,
3967 vap,
3968 ni);
3970 in->in_assoc = 0;
3973 * Firmware bug - it'll crash if the beacon interval is less
3974 * than 16. We can't avoid connecting at all, so refuse the
3975 * station state change, this will cause net80211 to abandon
3976 * attempts to connect to this AP, and eventually wpa_s will
3977 * blacklist the AP...
3979 if (ni->ni_intval < 16) {
3980 device_printf(sc->sc_dev,
3981 "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3982 ether_sprintf(ni->ni_bssid), ni->ni_intval);
3983 error = EINVAL;
3984 goto out;
3987 error = iwm_allow_mcast(vap, sc);
3988 if (error) {
3989 device_printf(sc->sc_dev,
3990 "%s: failed to set multicast\n", __func__);
3991 goto out;
3995 * This is where it deviates from what Linux does.
3997 * Linux iwlwifi doesn't reset the nic each time, nor does it
3998 * call ctxt_add() here. Instead, it adds it during vap creation,
3999 * and always does a mac_ctx_changed().
4001 * The openbsd port doesn't attempt to do that - it reset things
4002 * at odd states and does the add here.
4004 * So, until the state handling is fixed (ie, we never reset
4005 * the NIC except for a firmware failure, which should drag
4006 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4007 * contexts that are required), let's do a dirty hack here.
4009 if (iv->is_uploaded) {
4010 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4011 device_printf(sc->sc_dev,
4012 "%s: failed to update MAC\n", __func__);
4013 goto out;
4015 } else {
4016 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4017 device_printf(sc->sc_dev,
4018 "%s: failed to add MAC\n", __func__);
4019 goto out;
4023 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4024 in->in_ni.ni_chan, 1, 1)) != 0) {
4025 device_printf(sc->sc_dev,
4026 "%s: failed update phy ctxt\n", __func__);
4027 goto out;
4029 iv->phy_ctxt = &sc->sc_phyctxt[0];
4031 if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4032 device_printf(sc->sc_dev,
4033 "%s: binding update cmd\n", __func__);
4034 goto out;
4037 * Authentication becomes unreliable when powersaving is left enabled
4038 * here. Powersaving will be activated again when association has
4039 * finished or is aborted.
4041 iv->ps_disabled = TRUE;
4042 error = iwm_mvm_power_update_mac(sc);
4043 iv->ps_disabled = FALSE;
4044 if (error != 0) {
4045 device_printf(sc->sc_dev,
4046 "%s: failed to update power management\n",
4047 __func__);
4048 goto out;
4050 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4051 device_printf(sc->sc_dev,
4052 "%s: failed to add sta\n", __func__);
4053 goto out;
4057 * Prevent the FW from wandering off channel during association
4058 * by "protecting" the session with a time event.
4060 /* XXX duration is in units of TU, not MS */
4061 duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4062 iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4063 DELAY(100);
4065 error = 0;
4066 out:
4067 ieee80211_free_node(ni);
4068 return (error);
4071 static int
4072 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4074 uint32_t tfd_msk;
4077 * Ok, so *technically* the proper set of calls for going
4078 * from RUN back to SCAN is:
4080 * iwm_mvm_power_mac_disable(sc, in);
4081 * iwm_mvm_mac_ctxt_changed(sc, vap);
4082 * iwm_mvm_rm_sta(sc, in);
4083 * iwm_mvm_update_quotas(sc, NULL);
4084 * iwm_mvm_mac_ctxt_changed(sc, in);
4085 * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4086 * iwm_mvm_mac_ctxt_remove(sc, in);
4088 * However, that freezes the device not matter which permutations
4089 * and modifications are attempted. Obviously, this driver is missing
4090 * something since it works in the Linux driver, but figuring out what
4091 * is missing is a little more complicated. Now, since we're going
4092 * back to nothing anyway, we'll just do a complete device reset.
4093 * Up your's, device!
4096 * Just using 0xf for the queues mask is fine as long as we only
4097 * get here from RUN state.
4099 tfd_msk = 0xf;
4100 iwm_xmit_queue_drain(sc);
4101 iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4103 * We seem to get away with just synchronously sending the
4104 * IWM_TXPATH_FLUSH command.
4106 // iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4107 iwm_stop_device(sc);
4108 iwm_init_hw(sc);
4109 if (in)
4110 in->in_assoc = 0;
4111 return 0;
4113 #if 0
4114 int error;
4116 iwm_mvm_power_mac_disable(sc, in);
4118 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4119 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4120 return error;
4123 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4124 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4125 return error;
4127 error = iwm_mvm_rm_sta(sc, in);
4128 in->in_assoc = 0;
4129 iwm_mvm_update_quotas(sc, NULL);
4130 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4131 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4132 return error;
4134 iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4136 iwm_mvm_mac_ctxt_remove(sc, in);
4138 return error;
4139 #endif
4142 static struct ieee80211_node *
4143 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4145 return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4146 M_INTWAIT | M_ZERO);
4149 static uint8_t
4150 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4152 uint8_t plcp = rate_n_flags & 0xff;
4153 int i;
4155 for (i = 0; i <= IWM_RIDX_MAX; i++) {
4156 if (iwm_rates[i].plcp == plcp)
4157 return iwm_rates[i].rate;
4159 return 0;
4162 uint8_t
4163 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4165 int i;
4166 uint8_t rval;
4168 for (i = 0; i < rs->rs_nrates; i++) {
4169 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4170 if (rval == iwm_rates[ridx].rate)
4171 return rs->rs_rates[i];
4174 return 0;
4177 static int
4178 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4180 int i;
4182 for (i = 0; i <= IWM_RIDX_MAX; i++) {
4183 if (iwm_rates[i].rate == rate)
4184 return i;
4187 device_printf(sc->sc_dev,
4188 "%s: WARNING: device rate for %u not found!\n",
4189 __func__, rate);
4191 return -1;
4194 static void
4195 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4197 struct ieee80211_node *ni = &in->in_ni;
4198 struct iwm_lq_cmd *lq = &in->in_lq;
4199 struct ieee80211_rateset *rs = &ni->ni_rates;
4200 int nrates = rs->rs_nrates;
4201 int i, ridx, tab = 0;
4202 int txant = 0;
4204 KKASSERT(rix >= 0 && rix < nrates);
4206 if (nrates > nitems(lq->rs_table)) {
4207 device_printf(sc->sc_dev,
4208 "%s: node supports %d rates, driver handles "
4209 "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4210 return;
4212 if (nrates == 0) {
4213 device_printf(sc->sc_dev,
4214 "%s: node supports 0 rates, odd!\n", __func__);
4215 return;
4217 nrates = imin(rix + 1, nrates);
4219 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4220 "%s: nrates=%d\n", __func__, nrates);
4222 /* then construct a lq_cmd based on those */
4223 memset(lq, 0, sizeof(*lq));
4224 lq->sta_id = IWM_STATION_ID;
4226 /* For HT, always enable RTS/CTS to avoid excessive retries. */
4227 if (ni->ni_flags & IEEE80211_NODE_HT)
4228 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4231 * are these used? (we don't do SISO or MIMO)
4232 * need to set them to non-zero, though, or we get an error.
4234 lq->single_stream_ant_msk = 1;
4235 lq->dual_stream_ant_msk = 1;
4238 * Build the actual rate selection table.
4239 * The lowest bits are the rates. Additionally,
4240 * CCK needs bit 9 to be set. The rest of the bits
4241 * we add to the table select the tx antenna
4242 * Note that we add the rates in the highest rate first
4243 * (opposite of ni_rates).
4245 for (i = 0; i < nrates; i++) {
4246 int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4247 int nextant;
4249 /* Map 802.11 rate to HW rate index. */
4250 ridx = iwm_rate2ridx(sc, rate);
4251 if (ridx == -1)
4252 continue;
4254 if (txant == 0)
4255 txant = iwm_mvm_get_valid_tx_ant(sc);
4256 nextant = 1<<(ffs(txant)-1);
4257 txant &= ~nextant;
4259 tab = iwm_rates[ridx].plcp;
4260 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4261 if (IWM_RIDX_IS_CCK(ridx))
4262 tab |= IWM_RATE_MCS_CCK_MSK;
4263 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4264 "station rate i=%d, rate=%d, hw=%x\n",
4265 i, iwm_rates[ridx].rate, tab);
4266 lq->rs_table[i] = htole32(tab);
4268 /* then fill the rest with the lowest possible rate */
4269 for (i = nrates; i < nitems(lq->rs_table); i++) {
4270 KASSERT(tab != 0, ("invalid tab"));
4271 lq->rs_table[i] = htole32(tab);
4275 static int
4276 iwm_media_change(struct ifnet *ifp)
4278 struct ieee80211vap *vap = ifp->if_softc;
4279 struct ieee80211com *ic = vap->iv_ic;
4280 struct iwm_softc *sc = ic->ic_softc;
4281 int error;
4283 error = ieee80211_media_change(ifp);
4284 if (error != ENETRESET)
4285 return error;
4287 IWM_LOCK(sc);
4288 if (ic->ic_nrunning > 0) {
4289 iwm_stop(sc);
4290 iwm_init(sc);
4292 IWM_UNLOCK(sc);
4293 return error;
4297 static int
4298 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4300 struct iwm_vap *ivp = IWM_VAP(vap);
4301 struct ieee80211com *ic = vap->iv_ic;
4302 struct iwm_softc *sc = ic->ic_softc;
4303 struct iwm_node *in;
4304 int error;
4306 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4307 "switching state %s -> %s\n",
4308 ieee80211_state_name[vap->iv_state],
4309 ieee80211_state_name[nstate]);
4310 IEEE80211_UNLOCK(ic);
4311 IWM_LOCK(sc);
4313 if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4314 iwm_led_blink_stop(sc);
4316 /* disable beacon filtering if we're hopping out of RUN */
4317 if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4318 iwm_mvm_disable_beacon_filter(sc);
4320 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4321 in->in_assoc = 0;
4323 if (nstate == IEEE80211_S_INIT) {
4324 IWM_UNLOCK(sc);
4325 IEEE80211_LOCK(ic);
4326 error = ivp->iv_newstate(vap, nstate, arg);
4327 IEEE80211_UNLOCK(ic);
4328 IWM_LOCK(sc);
4329 iwm_release(sc, NULL);
4330 IWM_UNLOCK(sc);
4331 IEEE80211_LOCK(ic);
4332 return error;
4336 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4337 * above then the card will be completely reinitialized,
4338 * so the driver must do everything necessary to bring the card
4339 * from INIT to SCAN.
4341 * Additionally, upon receiving deauth frame from AP,
4342 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4343 * state. This will also fail with this driver, so bring the FSM
4344 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4346 * XXX TODO: fix this for FreeBSD!
4348 if (nstate == IEEE80211_S_SCAN ||
4349 nstate == IEEE80211_S_AUTH ||
4350 nstate == IEEE80211_S_ASSOC) {
4351 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4352 "Force transition to INIT; MGT=%d\n", arg);
4353 IWM_UNLOCK(sc);
4354 IEEE80211_LOCK(ic);
4355 /* Always pass arg as -1 since we can't Tx right now. */
4357 * XXX arg is just ignored anyway when transitioning
4358 * to IEEE80211_S_INIT.
4360 vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4361 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4362 "Going INIT->SCAN\n");
4363 nstate = IEEE80211_S_SCAN;
4364 IEEE80211_UNLOCK(ic);
4365 IWM_LOCK(sc);
4369 switch (nstate) {
4370 case IEEE80211_S_INIT:
4371 case IEEE80211_S_SCAN:
4372 if (vap->iv_state == IEEE80211_S_AUTH ||
4373 vap->iv_state == IEEE80211_S_ASSOC) {
4374 int myerr;
4375 IWM_UNLOCK(sc);
4376 IEEE80211_LOCK(ic);
4377 myerr = ivp->iv_newstate(vap, nstate, arg);
4378 IEEE80211_UNLOCK(ic);
4379 IWM_LOCK(sc);
4380 error = iwm_mvm_rm_sta(sc, vap, FALSE);
4381 if (error) {
4382 device_printf(sc->sc_dev,
4383 "%s: Failed to remove station: %d\n",
4384 __func__, error);
4386 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4387 if (error) {
4388 device_printf(sc->sc_dev,
4389 "%s: Failed to change mac context: %d\n",
4390 __func__, error);
4392 error = iwm_mvm_binding_remove_vif(sc, ivp);
4393 if (error) {
4394 device_printf(sc->sc_dev,
4395 "%s: Failed to remove channel ctx: %d\n",
4396 __func__, error);
4398 ivp->phy_ctxt = NULL;
4399 error = iwm_mvm_power_update_mac(sc);
4400 if (error != 0) {
4401 device_printf(sc->sc_dev,
4402 "%s: failed to update power management\n",
4403 __func__);
4405 IWM_UNLOCK(sc);
4406 IEEE80211_LOCK(ic);
4407 return myerr;
4409 break;
4411 case IEEE80211_S_AUTH:
4412 if ((error = iwm_auth(vap, sc)) != 0) {
4413 device_printf(sc->sc_dev,
4414 "%s: could not move to auth state: %d\n",
4415 __func__, error);
4417 break;
4419 case IEEE80211_S_ASSOC:
4421 * EBS may be disabled due to previous failures reported by FW.
4422 * Reset EBS status here assuming environment has been changed.
4424 sc->last_ebs_successful = TRUE;
4425 break;
4427 case IEEE80211_S_RUN:
4428 in = IWM_NODE(vap->iv_bss);
4429 /* Update the association state, now we have it all */
4430 /* (eg associd comes in at this point */
4431 error = iwm_mvm_update_sta(sc, in);
4432 if (error != 0) {
4433 device_printf(sc->sc_dev,
4434 "%s: failed to update STA\n", __func__);
4435 IWM_UNLOCK(sc);
4436 IEEE80211_LOCK(ic);
4437 return error;
4439 in->in_assoc = 1;
4440 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4441 if (error != 0) {
4442 device_printf(sc->sc_dev,
4443 "%s: failed to update MAC: %d\n", __func__, error);
4446 iwm_mvm_sf_update(sc, vap, FALSE);
4447 iwm_mvm_enable_beacon_filter(sc, ivp);
4448 iwm_mvm_power_update_mac(sc);
4449 iwm_mvm_update_quotas(sc, ivp);
4450 int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4451 iwm_setrates(sc, in, rix);
4453 if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4454 device_printf(sc->sc_dev,
4455 "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4458 iwm_mvm_led_enable(sc);
4459 break;
4461 default:
4462 break;
4464 IWM_UNLOCK(sc);
4465 IEEE80211_LOCK(ic);
4467 return (ivp->iv_newstate(vap, nstate, arg));
4470 void
4471 iwm_endscan_cb(void *arg, int pending)
4473 struct iwm_softc *sc = arg;
4474 struct ieee80211com *ic = &sc->sc_ic;
4476 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4477 "%s: scan ended\n",
4478 __func__);
4480 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4483 static int
4484 iwm_send_bt_init_conf(struct iwm_softc *sc)
4486 struct iwm_bt_coex_cmd bt_cmd;
4488 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4489 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4491 return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4492 &bt_cmd);
4495 static boolean_t
4496 iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4498 boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4499 boolean_t tlv_lar = fw_has_capa(&sc->ucode_capa,
4500 IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4502 if (iwm_lar_disable)
4503 return FALSE;
4506 * Enable LAR only if it is supported by the FW (TLV) &&
4507 * enabled in the NVM
4509 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4510 return nvm_lar && tlv_lar;
4511 else
4512 return tlv_lar;
4515 static boolean_t
4516 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4518 return fw_has_api(&sc->ucode_capa,
4519 IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4520 fw_has_capa(&sc->ucode_capa,
4521 IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4524 static int
4525 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4527 struct iwm_mcc_update_cmd mcc_cmd;
4528 struct iwm_host_cmd hcmd = {
4529 .id = IWM_MCC_UPDATE_CMD,
4530 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4531 .data = { &mcc_cmd },
4533 int ret;
4534 #ifdef IWM_DEBUG
4535 struct iwm_rx_packet *pkt;
4536 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4537 struct iwm_mcc_update_resp *mcc_resp;
4538 int n_channels;
4539 uint16_t mcc;
4540 #endif
4541 int resp_v2 = fw_has_capa(&sc->ucode_capa,
4542 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4544 if (!iwm_mvm_is_lar_supported(sc)) {
4545 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4546 __func__);
4547 return 0;
4550 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4551 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4552 if (iwm_mvm_is_wifi_mcc_supported(sc))
4553 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4554 else
4555 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4557 if (resp_v2)
4558 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4559 else
4560 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4562 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4563 "send MCC update to FW with '%c%c' src = %d\n",
4564 alpha2[0], alpha2[1], mcc_cmd.source_id);
4566 ret = iwm_send_cmd(sc, &hcmd);
4567 if (ret)
4568 return ret;
4570 #ifdef IWM_DEBUG
4571 pkt = hcmd.resp_pkt;
4573 /* Extract MCC response */
4574 if (resp_v2) {
4575 mcc_resp = (void *)pkt->data;
4576 mcc = mcc_resp->mcc;
4577 n_channels = le32toh(mcc_resp->n_channels);
4578 } else {
4579 mcc_resp_v1 = (void *)pkt->data;
4580 mcc = mcc_resp_v1->mcc;
4581 n_channels = le32toh(mcc_resp_v1->n_channels);
4584 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4585 if (mcc == 0)
4586 mcc = 0x3030; /* "00" - world */
4588 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4589 "regulatory domain '%c%c' (%d channels available)\n",
4590 mcc >> 8, mcc & 0xff, n_channels);
4591 #endif
4592 iwm_free_resp(sc, &hcmd);
4594 return 0;
4597 static void
4598 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4600 struct iwm_host_cmd cmd = {
4601 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4602 .len = { sizeof(uint32_t), },
4603 .data = { &backoff, },
4606 if (iwm_send_cmd(sc, &cmd) != 0) {
4607 device_printf(sc->sc_dev,
4608 "failed to change thermal tx backoff\n");
4612 static int
4613 iwm_init_hw(struct iwm_softc *sc)
4615 struct ieee80211com *ic = &sc->sc_ic;
4616 int error, i, ac;
4618 sc->sf_state = IWM_SF_UNINIT;
4620 if ((error = iwm_start_hw(sc)) != 0) {
4621 kprintf("iwm_start_hw: failed %d\n", error);
4622 return error;
4625 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4626 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4627 return error;
4631 * should stop and start HW since that INIT
4632 * image just loaded
4634 iwm_stop_device(sc);
4635 sc->sc_ps_disabled = FALSE;
4636 if ((error = iwm_start_hw(sc)) != 0) {
4637 device_printf(sc->sc_dev, "could not initialize hardware\n");
4638 return error;
4641 /* omstart, this time with the regular firmware */
4642 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4643 if (error) {
4644 device_printf(sc->sc_dev, "could not load firmware\n");
4645 goto error;
4648 error = iwm_mvm_sf_update(sc, NULL, FALSE);
4649 if (error)
4650 device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4652 if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4653 device_printf(sc->sc_dev, "bt init conf failed\n");
4654 goto error;
4657 error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4658 if (error != 0) {
4659 device_printf(sc->sc_dev, "antenna config failed\n");
4660 goto error;
4663 /* Send phy db control command and then phy db calibration */
4664 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4665 goto error;
4667 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4668 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4669 goto error;
4672 /* Add auxiliary station for scanning */
4673 if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4674 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4675 goto error;
4678 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4680 * The channel used here isn't relevant as it's
4681 * going to be overwritten in the other flows.
4682 * For now use the first channel we have.
4684 if ((error = iwm_mvm_phy_ctxt_add(sc,
4685 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4686 goto error;
4689 /* Initialize tx backoffs to the minimum. */
4690 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4691 iwm_mvm_tt_tx_backoff(sc, 0);
4693 error = iwm_mvm_power_update_device(sc);
4694 if (error)
4695 goto error;
4697 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4698 goto error;
4700 if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4701 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4702 goto error;
4705 /* Enable Tx queues. */
4706 for (ac = 0; ac < WME_NUM_AC; ac++) {
4707 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4708 iwm_mvm_ac_to_tx_fifo[ac]);
4709 if (error)
4710 goto error;
4713 if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4714 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4715 goto error;
4718 return 0;
4720 error:
4721 iwm_stop_device(sc);
4722 return error;
4725 /* Allow multicast from our BSSID. */
4726 static int
4727 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4729 struct ieee80211_node *ni = vap->iv_bss;
4730 struct iwm_mcast_filter_cmd *cmd;
4731 size_t size;
4732 int error;
4734 size = roundup(sizeof(*cmd), 4);
4735 cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4736 if (cmd == NULL)
4737 return ENOMEM;
4738 cmd->filter_own = 1;
4739 cmd->port_id = 0;
4740 cmd->count = 0;
4741 cmd->pass_all = 1;
4742 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4744 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4745 IWM_CMD_SYNC, size, cmd);
4746 kfree(cmd, M_DEVBUF);
4748 return (error);
4752 * ifnet interfaces
4755 static void
4756 iwm_init(struct iwm_softc *sc)
4758 int error;
4760 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4761 return;
4763 sc->sc_generation++;
4764 sc->sc_flags &= ~IWM_FLAG_STOPPED;
4766 if ((error = iwm_init_hw(sc)) != 0) {
4767 kprintf("iwm_init_hw failed %d\n", error);
4768 iwm_stop(sc);
4769 return;
4773 * Ok, firmware loaded and we are jogging
4775 sc->sc_flags |= IWM_FLAG_HW_INITED;
4776 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4779 static int
4780 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4782 struct iwm_softc *sc;
4783 int error;
4785 sc = ic->ic_softc;
4787 IWM_LOCK(sc);
4788 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4789 IWM_UNLOCK(sc);
4790 return (ENXIO);
4792 error = mbufq_enqueue(&sc->sc_snd, m);
4793 if (error) {
4794 IWM_UNLOCK(sc);
4795 return (error);
4797 iwm_start(sc);
4798 IWM_UNLOCK(sc);
4799 return (0);
4803 * Dequeue packets from sendq and call send.
4805 static void
4806 iwm_start(struct iwm_softc *sc)
4808 struct ieee80211_node *ni;
4809 struct mbuf *m;
4810 int ac = 0;
4812 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4813 while (sc->qfullmsk == 0 &&
4814 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4815 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4816 if (iwm_tx(sc, m, ni, ac) != 0) {
4817 if_inc_counter(ni->ni_vap->iv_ifp,
4818 IFCOUNTER_OERRORS, 1);
4819 ieee80211_free_node(ni);
4820 continue;
4822 sc->sc_tx_timer = 15;
4824 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4827 static void
4828 iwm_stop(struct iwm_softc *sc)
4831 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4832 sc->sc_flags |= IWM_FLAG_STOPPED;
4833 sc->sc_generation++;
4834 iwm_led_blink_stop(sc);
4835 sc->sc_tx_timer = 0;
4836 iwm_stop_device(sc);
4837 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4840 static void
4841 iwm_watchdog(void *arg)
4843 struct iwm_softc *sc = arg;
4845 if (sc->sc_tx_timer > 0) {
4846 if (--sc->sc_tx_timer == 0) {
4847 device_printf(sc->sc_dev, "device timeout\n");
4848 #ifdef IWM_DEBUG
4849 iwm_nic_error(sc);
4850 #endif
4851 iwm_stop(sc);
4852 #if defined(__DragonFly__)
4853 ++sc->sc_ic.ic_oerrors;
4854 #else
4855 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4856 #endif
4857 return;
4860 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4863 static void
4864 iwm_parent(struct ieee80211com *ic)
4866 struct iwm_softc *sc = ic->ic_softc;
4867 int startall = 0;
4869 IWM_LOCK(sc);
4870 if (ic->ic_nrunning > 0) {
4871 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4872 iwm_init(sc);
4873 startall = 1;
4875 } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4876 iwm_stop(sc);
4877 IWM_UNLOCK(sc);
4878 if (startall)
4879 ieee80211_start_all(ic);
4883 * The interrupt side of things
4887 * error dumping routines are from iwlwifi/mvm/utils.c
4891 * Note: This structure is read from the device with IO accesses,
4892 * and the reading already does the endian conversion. As it is
4893 * read with uint32_t-sized accesses, any members with a different size
4894 * need to be ordered correctly though!
4896 struct iwm_error_event_table {
4897 uint32_t valid; /* (nonzero) valid, (0) log is empty */
4898 uint32_t error_id; /* type of error */
4899 uint32_t trm_hw_status0; /* TRM HW status */
4900 uint32_t trm_hw_status1; /* TRM HW status */
4901 uint32_t blink2; /* branch link */
4902 uint32_t ilink1; /* interrupt link */
4903 uint32_t ilink2; /* interrupt link */
4904 uint32_t data1; /* error-specific data */
4905 uint32_t data2; /* error-specific data */
4906 uint32_t data3; /* error-specific data */
4907 uint32_t bcon_time; /* beacon timer */
4908 uint32_t tsf_low; /* network timestamp function timer */
4909 uint32_t tsf_hi; /* network timestamp function timer */
4910 uint32_t gp1; /* GP1 timer register */
4911 uint32_t gp2; /* GP2 timer register */
4912 uint32_t fw_rev_type; /* firmware revision type */
4913 uint32_t major; /* uCode version major */
4914 uint32_t minor; /* uCode version minor */
4915 uint32_t hw_ver; /* HW Silicon version */
4916 uint32_t brd_ver; /* HW board version */
4917 uint32_t log_pc; /* log program counter */
4918 uint32_t frame_ptr; /* frame pointer */
4919 uint32_t stack_ptr; /* stack pointer */
4920 uint32_t hcmd; /* last host command header */
4921 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
4922 * rxtx_flag */
4923 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
4924 * host_flag */
4925 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
4926 * enc_flag */
4927 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
4928 * time_flag */
4929 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
4930 * wico interrupt */
4931 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
4932 uint32_t wait_event; /* wait event() caller address */
4933 uint32_t l2p_control; /* L2pControlField */
4934 uint32_t l2p_duration; /* L2pDurationField */
4935 uint32_t l2p_mhvalid; /* L2pMhValidBits */
4936 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
4937 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
4938 * (LMPM_PMG_SEL) */
4939 uint32_t u_timestamp; /* indicate when the date and time of the
4940 * compilation */
4941 uint32_t flow_handler; /* FH read/write pointers, RX credit */
4942 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4945 * UMAC error struct - relevant starting from family 8000 chip.
4946 * Note: This structure is read from the device with IO accesses,
4947 * and the reading already does the endian conversion. As it is
4948 * read with u32-sized accesses, any members with a different size
4949 * need to be ordered correctly though!
4951 struct iwm_umac_error_event_table {
4952 uint32_t valid; /* (nonzero) valid, (0) log is empty */
4953 uint32_t error_id; /* type of error */
4954 uint32_t blink1; /* branch link */
4955 uint32_t blink2; /* branch link */
4956 uint32_t ilink1; /* interrupt link */
4957 uint32_t ilink2; /* interrupt link */
4958 uint32_t data1; /* error-specific data */
4959 uint32_t data2; /* error-specific data */
4960 uint32_t data3; /* error-specific data */
4961 uint32_t umac_major;
4962 uint32_t umac_minor;
4963 uint32_t frame_pointer; /* core register 27*/
4964 uint32_t stack_pointer; /* core register 28 */
4965 uint32_t cmd_header; /* latest host cmd sent to UMAC */
4966 uint32_t nic_isr_pref; /* ISR status register */
4967 } __packed;
4969 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
4970 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
4972 #ifdef IWM_DEBUG
4973 struct {
4974 const char *name;
4975 uint8_t num;
4976 } advanced_lookup[] = {
4977 { "NMI_INTERRUPT_WDG", 0x34 },
4978 { "SYSASSERT", 0x35 },
4979 { "UCODE_VERSION_MISMATCH", 0x37 },
4980 { "BAD_COMMAND", 0x38 },
4981 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4982 { "FATAL_ERROR", 0x3D },
4983 { "NMI_TRM_HW_ERR", 0x46 },
4984 { "NMI_INTERRUPT_TRM", 0x4C },
4985 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4986 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4987 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4988 { "NMI_INTERRUPT_HOST", 0x66 },
4989 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4990 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4991 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4992 { "ADVANCED_SYSASSERT", 0 },
4995 static const char *
4996 iwm_desc_lookup(uint32_t num)
4998 int i;
5000 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5001 if (advanced_lookup[i].num == num)
5002 return advanced_lookup[i].name;
5004 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5005 return advanced_lookup[i].name;
5008 static void
5009 iwm_nic_umac_error(struct iwm_softc *sc)
5011 struct iwm_umac_error_event_table table;
5012 uint32_t base;
5014 base = sc->umac_error_event_table;
5016 if (base < 0x800000) {
5017 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5018 base);
5019 return;
5022 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5023 device_printf(sc->sc_dev, "reading errlog failed\n");
5024 return;
5027 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5028 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5029 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5030 sc->sc_flags, table.valid);
5033 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5034 iwm_desc_lookup(table.error_id));
5035 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5036 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5037 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5038 table.ilink1);
5039 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5040 table.ilink2);
5041 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5042 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5043 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5044 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5045 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5046 device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5047 table.frame_pointer);
5048 device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5049 table.stack_pointer);
5050 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5051 device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5052 table.nic_isr_pref);
5056 * Support for dumping the error log seemed like a good idea ...
5057 * but it's mostly hex junk and the only sensible thing is the
5058 * hw/ucode revision (which we know anyway). Since it's here,
5059 * I'll just leave it in, just in case e.g. the Intel guys want to
5060 * help us decipher some "ADVANCED_SYSASSERT" later.
5062 static void
5063 iwm_nic_error(struct iwm_softc *sc)
5065 struct iwm_error_event_table table;
5066 uint32_t base;
5068 device_printf(sc->sc_dev, "dumping device error log\n");
5069 base = sc->error_event_table;
5070 if (base < 0x800000) {
5071 device_printf(sc->sc_dev,
5072 "Invalid error log pointer 0x%08x\n", base);
5073 return;
5076 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5077 device_printf(sc->sc_dev, "reading errlog failed\n");
5078 return;
5081 if (!table.valid) {
5082 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5083 return;
5086 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5087 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5088 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5089 sc->sc_flags, table.valid);
5092 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5093 iwm_desc_lookup(table.error_id));
5094 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5095 table.trm_hw_status0);
5096 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5097 table.trm_hw_status1);
5098 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5099 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5100 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5101 device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5102 device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5103 device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5104 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5105 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5106 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5107 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5108 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5109 device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5110 table.fw_rev_type);
5111 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5112 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5113 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5114 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5115 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5116 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5117 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5118 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5119 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5120 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5121 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5122 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5123 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5124 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5125 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5126 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5127 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5128 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5129 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5131 if (sc->umac_error_event_table)
5132 iwm_nic_umac_error(sc);
5134 #endif
5136 static void
5137 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5139 struct ieee80211com *ic = &sc->sc_ic;
5140 struct iwm_cmd_response *cresp;
5141 struct mbuf *m1;
5142 uint32_t offset = 0;
5143 uint32_t maxoff = IWM_RBUF_SIZE;
5144 uint32_t nextoff;
5145 boolean_t stolen = FALSE;
5147 #define HAVEROOM(a) \
5148 ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5150 while (HAVEROOM(offset)) {
5151 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5152 offset);
5153 int qid, idx, code, len;
5155 qid = pkt->hdr.qid;
5156 idx = pkt->hdr.idx;
5158 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5161 * randomly get these from the firmware, no idea why.
5162 * they at least seem harmless, so just ignore them for now
5164 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5165 pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5166 break;
5169 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5170 "rx packet qid=%d idx=%d type=%x\n",
5171 qid & ~0x80, pkt->hdr.idx, code);
5173 len = iwm_rx_packet_len(pkt);
5174 len += sizeof(uint32_t); /* account for status word */
5175 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5177 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5179 switch (code) {
5180 case IWM_REPLY_RX_PHY_CMD:
5181 iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5182 break;
5184 case IWM_REPLY_RX_MPDU_CMD: {
5186 * If this is the last frame in the RX buffer, we
5187 * can directly feed the mbuf to the sharks here.
5189 struct iwm_rx_packet *nextpkt = mtodoff(m,
5190 struct iwm_rx_packet *, nextoff);
5191 if (!HAVEROOM(nextoff) ||
5192 (nextpkt->hdr.code == 0 &&
5193 (nextpkt->hdr.qid & ~0x80) == 0 &&
5194 nextpkt->hdr.idx == 0) ||
5195 (nextpkt->len_n_flags ==
5196 htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5197 if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5198 stolen = FALSE;
5199 /* Make sure we abort the loop */
5200 nextoff = maxoff;
5202 break;
5206 * Use m_copym instead of m_split, because that
5207 * makes it easier to keep a valid rx buffer in
5208 * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5210 * We need to start m_copym() at offset 0, to get the
5211 * M_PKTHDR flag preserved.
5213 m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5214 if (m1) {
5215 if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5216 stolen = TRUE;
5217 else
5218 m_freem(m1);
5220 break;
5223 case IWM_TX_CMD:
5224 iwm_mvm_rx_tx_cmd(sc, pkt);
5225 break;
5227 case IWM_MISSED_BEACONS_NOTIFICATION: {
5228 struct iwm_missed_beacons_notif *resp;
5229 int missed;
5231 /* XXX look at mac_id to determine interface ID */
5232 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5234 resp = (void *)pkt->data;
5235 missed = le32toh(resp->consec_missed_beacons);
5237 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5238 "%s: MISSED_BEACON: mac_id=%d, "
5239 "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5240 "num_rx=%d\n",
5241 __func__,
5242 le32toh(resp->mac_id),
5243 le32toh(resp->consec_missed_beacons_since_last_rx),
5244 le32toh(resp->consec_missed_beacons),
5245 le32toh(resp->num_expected_beacons),
5246 le32toh(resp->num_recvd_beacons));
5248 /* Be paranoid */
5249 if (vap == NULL)
5250 break;
5252 /* XXX no net80211 locking? */
5253 if (vap->iv_state == IEEE80211_S_RUN &&
5254 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5255 if (missed > vap->iv_bmissthreshold) {
5256 /* XXX bad locking; turn into task */
5257 IWM_UNLOCK(sc);
5258 ieee80211_beacon_miss(ic);
5259 IWM_LOCK(sc);
5263 break; }
5265 case IWM_MFUART_LOAD_NOTIFICATION:
5266 break;
5268 case IWM_MVM_ALIVE:
5269 break;
5271 case IWM_CALIB_RES_NOTIF_PHY_DB:
5272 break;
5274 case IWM_STATISTICS_NOTIFICATION:
5275 iwm_mvm_handle_rx_statistics(sc, pkt);
5276 break;
5278 case IWM_NVM_ACCESS_CMD:
5279 case IWM_MCC_UPDATE_CMD:
5280 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5281 memcpy(sc->sc_cmd_resp,
5282 pkt, sizeof(sc->sc_cmd_resp));
5284 break;
5286 case IWM_MCC_CHUB_UPDATE_CMD: {
5287 struct iwm_mcc_chub_notif *notif;
5288 notif = (void *)pkt->data;
5290 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5291 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5292 sc->sc_fw_mcc[2] = '\0';
5293 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5294 "fw source %d sent CC '%s'\n",
5295 notif->source_id, sc->sc_fw_mcc);
5296 break;
5299 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5300 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5301 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5302 struct iwm_dts_measurement_notif_v1 *notif;
5304 if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5305 device_printf(sc->sc_dev,
5306 "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5307 break;
5309 notif = (void *)pkt->data;
5310 IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5311 "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5312 notif->temp);
5313 break;
5316 case IWM_PHY_CONFIGURATION_CMD:
5317 case IWM_TX_ANT_CONFIGURATION_CMD:
5318 case IWM_ADD_STA:
5319 case IWM_MAC_CONTEXT_CMD:
5320 case IWM_REPLY_SF_CFG_CMD:
5321 case IWM_POWER_TABLE_CMD:
5322 case IWM_PHY_CONTEXT_CMD:
5323 case IWM_BINDING_CONTEXT_CMD:
5324 case IWM_TIME_EVENT_CMD:
5325 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5326 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5327 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5328 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5329 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5330 case IWM_REPLY_BEACON_FILTERING_CMD:
5331 case IWM_MAC_PM_POWER_TABLE:
5332 case IWM_TIME_QUOTA_CMD:
5333 case IWM_REMOVE_STA:
5334 case IWM_TXPATH_FLUSH:
5335 case IWM_LQ_CMD:
5336 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5337 IWM_FW_PAGING_BLOCK_CMD):
5338 case IWM_BT_CONFIG:
5339 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5340 cresp = (void *)pkt->data;
5341 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5342 memcpy(sc->sc_cmd_resp,
5343 pkt, sizeof(*pkt)+sizeof(*cresp));
5345 break;
5347 /* ignore */
5348 case IWM_PHY_DB_CMD:
5349 break;
5351 case IWM_INIT_COMPLETE_NOTIF:
5352 break;
5354 case IWM_SCAN_OFFLOAD_COMPLETE:
5355 iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5356 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5357 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5358 ieee80211_runtask(ic, &sc->sc_es_task);
5360 break;
5362 case IWM_SCAN_ITERATION_COMPLETE: {
5363 struct iwm_lmac_scan_complete_notif *notif;
5364 notif = (void *)pkt->data;
5365 break;
5368 case IWM_SCAN_COMPLETE_UMAC:
5369 iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5370 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5371 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5372 ieee80211_runtask(ic, &sc->sc_es_task);
5374 break;
5376 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5377 struct iwm_umac_scan_iter_complete_notif *notif;
5378 notif = (void *)pkt->data;
5380 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5381 "complete, status=0x%x, %d channels scanned\n",
5382 notif->status, notif->scanned_channels);
5383 break;
5386 case IWM_REPLY_ERROR: {
5387 struct iwm_error_resp *resp;
5388 resp = (void *)pkt->data;
5390 device_printf(sc->sc_dev,
5391 "firmware error 0x%x, cmd 0x%x\n",
5392 le32toh(resp->error_type),
5393 resp->cmd_id);
5394 break;
5397 case IWM_TIME_EVENT_NOTIFICATION: {
5398 struct iwm_time_event_notif *notif;
5399 notif = (void *)pkt->data;
5401 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5402 "TE notif status = 0x%x action = 0x%x\n",
5403 notif->status, notif->action);
5404 break;
5408 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5409 * messages. Just ignore them for now.
5411 case IWM_DEBUG_LOG_MSG:
5412 break;
5414 case IWM_MCAST_FILTER_CMD:
5415 break;
5417 case IWM_SCD_QUEUE_CFG: {
5418 struct iwm_scd_txq_cfg_rsp *rsp;
5419 rsp = (void *)pkt->data;
5421 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5422 "queue cfg token=0x%x sta_id=%d "
5423 "tid=%d scd_queue=%d\n",
5424 rsp->token, rsp->sta_id, rsp->tid,
5425 rsp->scd_queue);
5426 break;
5429 default:
5430 device_printf(sc->sc_dev,
5431 "frame %d/%d %x UNHANDLED (this should "
5432 "not happen)\n", qid & ~0x80, idx,
5433 pkt->len_n_flags);
5434 break;
5438 * Why test bit 0x80? The Linux driver:
5440 * There is one exception: uCode sets bit 15 when it
5441 * originates the response/notification, i.e. when the
5442 * response/notification is not a direct response to a
5443 * command sent by the driver. For example, uCode issues
5444 * IWM_REPLY_RX when it sends a received frame to the driver;
5445 * it is not a direct response to any driver command.
5447 * Ok, so since when is 7 == 15? Well, the Linux driver
5448 * uses a slightly different format for pkt->hdr, and "qid"
5449 * is actually the upper byte of a two-byte field.
5451 if (!(qid & (1 << 7)))
5452 iwm_cmd_done(sc, pkt);
5454 offset = nextoff;
5456 if (stolen)
5457 m_freem(m);
5458 #undef HAVEROOM
5462 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5463 * Basic structure from if_iwn
5465 static void
5466 iwm_notif_intr(struct iwm_softc *sc)
5468 uint16_t hw;
5470 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5471 BUS_DMASYNC_POSTREAD);
5473 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5476 * Process responses
5478 while (sc->rxq.cur != hw) {
5479 struct iwm_rx_ring *ring = &sc->rxq;
5480 struct iwm_rx_data *data = &ring->data[ring->cur];
5482 bus_dmamap_sync(ring->data_dmat, data->map,
5483 BUS_DMASYNC_POSTREAD);
5485 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5486 "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5487 iwm_handle_rxb(sc, data->m);
5489 ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5493 * Tell the firmware that it can reuse the ring entries that
5494 * we have just processed.
5495 * Seems like the hardware gets upset unless we align
5496 * the write by 8??
5498 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5499 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5502 static void
5503 iwm_intr(void *arg)
5505 struct iwm_softc *sc = arg;
5506 int handled = 0;
5507 int r1, r2, rv = 0;
5508 int isperiodic = 0;
5510 #if defined(__DragonFly__)
5511 if (sc->sc_mem == NULL) {
5512 kprintf("iwm_intr: detached\n");
5513 return;
5515 #endif
5516 IWM_LOCK(sc);
5517 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5519 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5520 uint32_t *ict = sc->ict_dma.vaddr;
5521 int tmp;
5523 tmp = htole32(ict[sc->ict_cur]);
5524 if (!tmp)
5525 goto out_ena;
5528 * ok, there was something. keep plowing until we have all.
5530 r1 = r2 = 0;
5531 while (tmp) {
5532 r1 |= tmp;
5533 ict[sc->ict_cur] = 0;
5534 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5535 tmp = htole32(ict[sc->ict_cur]);
5538 /* this is where the fun begins. don't ask */
5539 if (r1 == 0xffffffff)
5540 r1 = 0;
5542 /* i am not expected to understand this */
5543 if (r1 & 0xc0000)
5544 r1 |= 0x8000;
5545 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5546 } else {
5547 r1 = IWM_READ(sc, IWM_CSR_INT);
5548 /* "hardware gone" (where, fishing?) */
5549 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5550 goto out;
5551 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5553 if (r1 == 0 && r2 == 0) {
5554 goto out_ena;
5557 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5559 /* Safely ignore these bits for debug checks below */
5560 r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5562 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5563 int i;
5564 struct ieee80211com *ic = &sc->sc_ic;
5565 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5567 #ifdef IWM_DEBUG
5568 iwm_nic_error(sc);
5569 #endif
5570 /* Dump driver status (TX and RX rings) while we're here. */
5571 device_printf(sc->sc_dev, "driver status:\n");
5572 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5573 struct iwm_tx_ring *ring = &sc->txq[i];
5574 device_printf(sc->sc_dev,
5575 " tx ring %2d: qid=%-2d cur=%-3d "
5576 "queued=%-3d\n",
5577 i, ring->qid, ring->cur, ring->queued);
5579 device_printf(sc->sc_dev,
5580 " rx ring: cur=%d\n", sc->rxq.cur);
5581 device_printf(sc->sc_dev,
5582 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5584 /* Don't stop the device; just do a VAP restart */
5585 IWM_UNLOCK(sc);
5587 if (vap == NULL) {
5588 kprintf("%s: null vap\n", __func__);
5589 return;
5592 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5593 "restarting\n", __func__, vap->iv_state);
5595 ieee80211_restart_all(ic);
5596 return;
5599 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5600 handled |= IWM_CSR_INT_BIT_HW_ERR;
5601 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5602 iwm_stop(sc);
5603 rv = 1;
5604 goto out;
5607 /* firmware chunk loaded */
5608 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5609 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5610 handled |= IWM_CSR_INT_BIT_FH_TX;
5611 sc->sc_fw_chunk_done = 1;
5612 wakeup(&sc->sc_fw);
5615 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5616 handled |= IWM_CSR_INT_BIT_RF_KILL;
5617 if (iwm_check_rfkill(sc)) {
5618 device_printf(sc->sc_dev,
5619 "%s: rfkill switch, disabling interface\n",
5620 __func__);
5621 iwm_stop(sc);
5626 * The Linux driver uses periodic interrupts to avoid races.
5627 * We cargo-cult like it's going out of fashion.
5629 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5630 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5631 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5632 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5633 IWM_WRITE_1(sc,
5634 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5635 isperiodic = 1;
5638 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5639 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5640 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5642 iwm_notif_intr(sc);
5644 /* enable periodic interrupt, see above */
5645 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5646 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5647 IWM_CSR_INT_PERIODIC_ENA);
5650 if (__predict_false(r1 & ~handled))
5651 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5652 "%s: unhandled interrupts: %x\n", __func__, r1);
5653 rv = 1;
5655 out_ena:
5656 iwm_restore_interrupts(sc);
5657 out:
5658 IWM_UNLOCK(sc);
5659 return;
5663 * Autoconf glue-sniffing
5665 #define PCI_VENDOR_INTEL 0x8086
5666 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
5667 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
5668 #define PCI_PRODUCT_INTEL_WL_3165_1 0x3165
5669 #define PCI_PRODUCT_INTEL_WL_3165_2 0x3166
5670 #define PCI_PRODUCT_INTEL_WL_3168 0x24fb
5671 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
5672 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
5673 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
5674 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
5675 #define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3
5676 #define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4
5677 #define PCI_PRODUCT_INTEL_WL_8265 0x24fd
5679 static const struct iwm_devices {
5680 uint16_t device;
5681 const struct iwm_cfg *cfg;
5682 } iwm_devices[] = {
5683 { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5684 { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5685 { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5686 { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5687 { PCI_PRODUCT_INTEL_WL_3168, &iwm3168_cfg },
5688 { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5689 { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5690 { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5691 { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5692 { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5693 { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5694 { PCI_PRODUCT_INTEL_WL_8265, &iwm8265_cfg },
5697 static int
5698 iwm_probe(device_t dev)
5700 int i;
5702 for (i = 0; i < nitems(iwm_devices); i++) {
5703 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5704 pci_get_device(dev) == iwm_devices[i].device) {
5705 device_set_desc(dev, iwm_devices[i].cfg->name);
5706 return (BUS_PROBE_DEFAULT);
5710 return (ENXIO);
5713 static int
5714 iwm_dev_check(device_t dev)
5716 struct iwm_softc *sc;
5717 uint16_t devid;
5718 int i;
5720 sc = device_get_softc(dev);
5722 devid = pci_get_device(dev);
5723 for (i = 0; i < NELEM(iwm_devices); i++) {
5724 if (iwm_devices[i].device == devid) {
5725 sc->cfg = iwm_devices[i].cfg;
5726 return (0);
5729 device_printf(dev, "unknown adapter type\n");
5730 return ENXIO;
5733 /* PCI registers */
5734 #define PCI_CFG_RETRY_TIMEOUT 0x041
5736 static int
5737 iwm_pci_attach(device_t dev)
5739 struct iwm_softc *sc;
5740 int count, error, rid;
5741 uint16_t reg;
5742 #if defined(__DragonFly__)
5743 int irq_flags;
5744 #endif
5746 sc = device_get_softc(dev);
5748 /* We disable the RETRY_TIMEOUT register (0x41) to keep
5749 * PCI Tx retries from interfering with C3 CPU state */
5750 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5752 /* Enable bus-mastering and hardware bug workaround. */
5753 pci_enable_busmaster(dev);
5754 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5755 /* if !MSI */
5756 if (reg & PCIM_STATUS_INTxSTATE) {
5757 reg &= ~PCIM_STATUS_INTxSTATE;
5759 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5761 rid = PCIR_BAR(0);
5762 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5763 RF_ACTIVE);
5764 if (sc->sc_mem == NULL) {
5765 device_printf(sc->sc_dev, "can't map mem space\n");
5766 return (ENXIO);
5768 sc->sc_st = rman_get_bustag(sc->sc_mem);
5769 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5771 /* Install interrupt handler. */
5772 count = 1;
5773 rid = 0;
5774 #if defined(__DragonFly__)
5775 pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5776 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5777 #else
5778 if (pci_alloc_msi(dev, &count) == 0)
5779 rid = 1;
5780 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5781 (rid != 0 ? 0 : RF_SHAREABLE));
5782 #endif
5783 if (sc->sc_irq == NULL) {
5784 device_printf(dev, "can't map interrupt\n");
5785 return (ENXIO);
5787 #if defined(__DragonFly__)
5788 error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5789 iwm_intr, sc, &sc->sc_ih,
5790 &wlan_global_serializer);
5791 #else
5792 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5793 NULL, iwm_intr, sc, &sc->sc_ih);
5794 #endif
5795 if (sc->sc_ih == NULL) {
5796 device_printf(dev, "can't establish interrupt");
5797 #if defined(__DragonFly__)
5798 pci_release_msi(dev);
5799 #endif
5800 return (ENXIO);
5802 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5804 return (0);
5807 static void
5808 iwm_pci_detach(device_t dev)
5810 struct iwm_softc *sc = device_get_softc(dev);
5812 if (sc->sc_irq != NULL) {
5813 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5814 bus_release_resource(dev, SYS_RES_IRQ,
5815 rman_get_rid(sc->sc_irq), sc->sc_irq);
5816 pci_release_msi(dev);
5817 #if defined(__DragonFly__)
5818 sc->sc_irq = NULL;
5819 #endif
5821 if (sc->sc_mem != NULL) {
5822 bus_release_resource(dev, SYS_RES_MEMORY,
5823 rman_get_rid(sc->sc_mem), sc->sc_mem);
5824 #if defined(__DragonFly__)
5825 sc->sc_mem = NULL;
5826 #endif
5832 static int
5833 iwm_attach(device_t dev)
5835 struct iwm_softc *sc = device_get_softc(dev);
5836 struct ieee80211com *ic = &sc->sc_ic;
5837 int error;
5838 int txq_i, i;
5840 sc->sc_dev = dev;
5841 sc->sc_attached = 1;
5842 IWM_LOCK_INIT(sc);
5843 mbufq_init(&sc->sc_snd, ifqmaxlen);
5844 #if defined(__DragonFly__)
5845 callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5846 #else
5847 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5848 #endif
5849 callout_init(&sc->sc_led_blink_to);
5850 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5852 sc->sc_notif_wait = iwm_notification_wait_init(sc);
5853 if (sc->sc_notif_wait == NULL) {
5854 device_printf(dev, "failed to init notification wait struct\n");
5855 goto fail;
5858 sc->sf_state = IWM_SF_UNINIT;
5860 /* Init phy db */
5861 sc->sc_phy_db = iwm_phy_db_init(sc);
5862 if (!sc->sc_phy_db) {
5863 device_printf(dev, "Cannot init phy_db\n");
5864 goto fail;
5867 /* Set EBS as successful as long as not stated otherwise by the FW. */
5868 sc->last_ebs_successful = TRUE;
5870 /* PCI attach */
5871 error = iwm_pci_attach(dev);
5872 if (error != 0)
5873 goto fail;
5875 sc->sc_wantresp = -1;
5877 /* Match device id */
5878 error = iwm_dev_check(dev);
5879 if (error != 0)
5880 goto fail;
5882 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5884 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5885 * changed, and now the revision step also includes bit 0-1 (no more
5886 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5887 * in the old format.
5889 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5890 int ret;
5891 uint32_t hw_step;
5893 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5894 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5896 if (iwm_prepare_card_hw(sc) != 0) {
5897 device_printf(dev, "could not initialize hardware\n");
5898 goto fail;
5902 * In order to recognize C step the driver should read the
5903 * chip version id located at the AUX bus MISC address.
5905 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5906 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5907 DELAY(2);
5909 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5910 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5911 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5912 25000);
5913 if (!ret) {
5914 device_printf(sc->sc_dev,
5915 "Failed to wake up the nic\n");
5916 goto fail;
5919 if (iwm_nic_lock(sc)) {
5920 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5921 hw_step |= IWM_ENABLE_WFPM;
5922 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5923 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5924 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5925 if (hw_step == 0x3)
5926 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5927 (IWM_SILICON_C_STEP << 2);
5928 iwm_nic_unlock(sc);
5929 } else {
5930 device_printf(sc->sc_dev, "Failed to lock the nic\n");
5931 goto fail;
5935 /* special-case 7265D, it has the same PCI IDs. */
5936 if (sc->cfg == &iwm7265_cfg &&
5937 (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5938 sc->cfg = &iwm7265d_cfg;
5941 /* Allocate DMA memory for firmware transfers. */
5942 if ((error = iwm_alloc_fwmem(sc)) != 0) {
5943 device_printf(dev, "could not allocate memory for firmware\n");
5944 goto fail;
5947 /* Allocate "Keep Warm" page. */
5948 if ((error = iwm_alloc_kw(sc)) != 0) {
5949 device_printf(dev, "could not allocate keep warm page\n");
5950 goto fail;
5953 /* We use ICT interrupts */
5954 if ((error = iwm_alloc_ict(sc)) != 0) {
5955 device_printf(dev, "could not allocate ICT table\n");
5956 goto fail;
5959 /* Allocate TX scheduler "rings". */
5960 if ((error = iwm_alloc_sched(sc)) != 0) {
5961 device_printf(dev, "could not allocate TX scheduler rings\n");
5962 goto fail;
5965 /* Allocate TX rings */
5966 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5967 if ((error = iwm_alloc_tx_ring(sc,
5968 &sc->txq[txq_i], txq_i)) != 0) {
5969 device_printf(dev,
5970 "could not allocate TX ring %d\n",
5971 txq_i);
5972 goto fail;
5976 /* Allocate RX ring. */
5977 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5978 device_printf(dev, "could not allocate RX ring\n");
5979 goto fail;
5982 /* Clear pending interrupts. */
5983 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5985 ic->ic_softc = sc;
5986 ic->ic_name = device_get_nameunit(sc->sc_dev);
5987 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
5988 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
5990 /* Set device capabilities. */
5991 ic->ic_caps =
5992 IEEE80211_C_STA |
5993 IEEE80211_C_WPA | /* WPA/RSN */
5994 IEEE80211_C_WME |
5995 IEEE80211_C_PMGT |
5996 IEEE80211_C_SHSLOT | /* short slot time supported */
5997 IEEE80211_C_SHPREAMBLE /* short preamble supported */
5998 // IEEE80211_C_BGSCAN /* capable of bg scanning */
6000 /* Advertise full-offload scanning */
6001 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6002 for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6003 sc->sc_phyctxt[i].id = i;
6004 sc->sc_phyctxt[i].color = 0;
6005 sc->sc_phyctxt[i].ref = 0;
6006 sc->sc_phyctxt[i].channel = NULL;
6009 /* Default noise floor */
6010 sc->sc_noise = -96;
6012 /* Max RSSI */
6013 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6015 #ifdef IWM_DEBUG
6016 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6017 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6018 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6019 #endif
6021 error = iwm_read_firmware(sc);
6022 if (error) {
6023 goto fail;
6024 } else if (sc->sc_fw.fw_fp == NULL) {
6026 * XXX Add a solution for properly deferring firmware load
6027 * during bootup.
6029 goto fail;
6030 } else {
6031 sc->sc_preinit_hook.ich_func = iwm_preinit;
6032 sc->sc_preinit_hook.ich_arg = sc;
6033 sc->sc_preinit_hook.ich_desc = "iwm";
6034 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6035 device_printf(dev,
6036 "config_intrhook_establish failed\n");
6037 goto fail;
6041 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6042 "<-%s\n", __func__);
6044 return 0;
6046 /* Free allocated memory if something failed during attachment. */
6047 fail:
6048 iwm_detach_local(sc, 0);
6050 return ENXIO;
6053 static int
6054 iwm_is_valid_ether_addr(uint8_t *addr)
6056 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6058 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6059 return (FALSE);
6061 return (TRUE);
6064 static int
6065 iwm_wme_update(struct ieee80211com *ic)
6067 #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
6068 struct iwm_softc *sc = ic->ic_softc;
6069 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6070 struct iwm_vap *ivp = IWM_VAP(vap);
6071 struct iwm_node *in;
6072 struct wmeParams tmp[WME_NUM_AC];
6073 int aci, error;
6075 if (vap == NULL)
6076 return (0);
6078 IEEE80211_LOCK(ic);
6079 for (aci = 0; aci < WME_NUM_AC; aci++)
6080 tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6081 IEEE80211_UNLOCK(ic);
6083 IWM_LOCK(sc);
6084 for (aci = 0; aci < WME_NUM_AC; aci++) {
6085 const struct wmeParams *ac = &tmp[aci];
6086 ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6087 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6088 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6089 ivp->queue_params[aci].edca_txop =
6090 IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6092 ivp->have_wme = TRUE;
6093 if (ivp->is_uploaded && vap->iv_bss != NULL) {
6094 in = IWM_NODE(vap->iv_bss);
6095 if (in->in_assoc) {
6096 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6097 device_printf(sc->sc_dev,
6098 "%s: failed to update MAC\n", __func__);
6102 IWM_UNLOCK(sc);
6104 return (0);
6105 #undef IWM_EXP2
6108 static void
6109 iwm_preinit(void *arg)
6111 struct iwm_softc *sc = arg;
6112 device_t dev = sc->sc_dev;
6113 struct ieee80211com *ic = &sc->sc_ic;
6114 int error;
6116 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6117 "->%s\n", __func__);
6119 IWM_LOCK(sc);
6120 if ((error = iwm_start_hw(sc)) != 0) {
6121 device_printf(dev, "could not initialize hardware\n");
6122 IWM_UNLOCK(sc);
6123 goto fail;
6126 error = iwm_run_init_mvm_ucode(sc, 1);
6127 iwm_stop_device(sc);
6128 if (error) {
6129 IWM_UNLOCK(sc);
6130 goto fail;
6132 device_printf(dev,
6133 "hw rev 0x%x, fw ver %s, address %s\n",
6134 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6135 sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6137 /* not all hardware can do 5GHz band */
6138 if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6139 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6140 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6141 IWM_UNLOCK(sc);
6143 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6144 ic->ic_channels);
6147 * At this point we've committed - if we fail to do setup,
6148 * we now also have to tear down the net80211 state.
6150 ieee80211_ifattach(ic);
6151 ic->ic_vap_create = iwm_vap_create;
6152 ic->ic_vap_delete = iwm_vap_delete;
6153 ic->ic_raw_xmit = iwm_raw_xmit;
6154 ic->ic_node_alloc = iwm_node_alloc;
6155 ic->ic_scan_start = iwm_scan_start;
6156 ic->ic_scan_end = iwm_scan_end;
6157 ic->ic_update_mcast = iwm_update_mcast;
6158 ic->ic_getradiocaps = iwm_init_channel_map;
6159 ic->ic_set_channel = iwm_set_channel;
6160 ic->ic_scan_curchan = iwm_scan_curchan;
6161 ic->ic_scan_mindwell = iwm_scan_mindwell;
6162 ic->ic_wme.wme_update = iwm_wme_update;
6163 ic->ic_parent = iwm_parent;
6164 ic->ic_transmit = iwm_transmit;
6165 iwm_radiotap_attach(sc);
6166 if (bootverbose)
6167 ieee80211_announce(ic);
6169 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6170 "<-%s\n", __func__);
6171 config_intrhook_disestablish(&sc->sc_preinit_hook);
6173 return;
6174 fail:
6175 config_intrhook_disestablish(&sc->sc_preinit_hook);
6176 iwm_detach_local(sc, 0);
6180 * Attach the interface to 802.11 radiotap.
6182 static void
6183 iwm_radiotap_attach(struct iwm_softc *sc)
6185 struct ieee80211com *ic = &sc->sc_ic;
6187 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6188 "->%s begin\n", __func__);
6189 ieee80211_radiotap_attach(ic,
6190 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6191 IWM_TX_RADIOTAP_PRESENT,
6192 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6193 IWM_RX_RADIOTAP_PRESENT);
6194 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6195 "->%s end\n", __func__);
6198 static struct ieee80211vap *
6199 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6200 enum ieee80211_opmode opmode, int flags,
6201 const uint8_t bssid[IEEE80211_ADDR_LEN],
6202 const uint8_t mac[IEEE80211_ADDR_LEN])
6204 struct iwm_vap *ivp;
6205 struct ieee80211vap *vap;
6207 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
6208 return NULL;
6209 ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6210 vap = &ivp->iv_vap;
6211 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6212 vap->iv_bmissthreshold = 10; /* override default */
6213 /* Override with driver methods. */
6214 ivp->iv_newstate = vap->iv_newstate;
6215 vap->iv_newstate = iwm_newstate;
6217 ivp->id = IWM_DEFAULT_MACID;
6218 ivp->color = IWM_DEFAULT_COLOR;
6220 ivp->have_wme = FALSE;
6221 ivp->ps_disabled = FALSE;
6223 ieee80211_ratectl_init(vap);
6224 /* Complete setup. */
6225 ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6226 mac);
6227 ic->ic_opmode = opmode;
6229 return vap;
6232 static void
6233 iwm_vap_delete(struct ieee80211vap *vap)
6235 struct iwm_vap *ivp = IWM_VAP(vap);
6237 ieee80211_ratectl_deinit(vap);
6238 ieee80211_vap_detach(vap);
6239 kfree(ivp, M_80211_VAP);
6242 static void
6243 iwm_xmit_queue_drain(struct iwm_softc *sc)
6245 struct mbuf *m;
6246 struct ieee80211_node *ni;
6248 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6249 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6250 ieee80211_free_node(ni);
6251 m_freem(m);
6255 static void
6256 iwm_scan_start(struct ieee80211com *ic)
6258 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6259 struct iwm_softc *sc = ic->ic_softc;
6260 int error;
6262 IWM_LOCK(sc);
6263 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6264 /* This should not be possible */
6265 device_printf(sc->sc_dev,
6266 "%s: Previous scan not completed yet\n", __func__);
6268 if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6269 error = iwm_mvm_umac_scan(sc);
6270 else
6271 error = iwm_mvm_lmac_scan(sc);
6272 if (error != 0) {
6273 device_printf(sc->sc_dev, "could not initiate scan\n");
6274 IWM_UNLOCK(sc);
6275 ieee80211_cancel_scan(vap);
6276 } else {
6277 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6278 iwm_led_blink_start(sc);
6279 IWM_UNLOCK(sc);
6283 static void
6284 iwm_scan_end(struct ieee80211com *ic)
6286 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6287 struct iwm_softc *sc = ic->ic_softc;
6289 IWM_LOCK(sc);
6290 iwm_led_blink_stop(sc);
6291 if (vap->iv_state == IEEE80211_S_RUN)
6292 iwm_mvm_led_enable(sc);
6293 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6295 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6296 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6297 * taskqueue.
6299 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6300 iwm_mvm_scan_stop_wait(sc);
6302 IWM_UNLOCK(sc);
6305 * Make sure we don't race, if sc_es_task is still enqueued here.
6306 * This is to make sure that it won't call ieee80211_scan_done
6307 * when we have already started the next scan.
6309 taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6312 static void
6313 iwm_update_mcast(struct ieee80211com *ic)
6317 static void
6318 iwm_set_channel(struct ieee80211com *ic)
6322 static void
6323 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6327 static void
6328 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6330 return;
6333 void
6334 iwm_init_task(void *arg1)
6336 struct iwm_softc *sc = arg1;
6338 IWM_LOCK(sc);
6339 while (sc->sc_flags & IWM_FLAG_BUSY) {
6340 #if defined(__DragonFly__)
6341 lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6342 #else
6343 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6344 #endif
6346 sc->sc_flags |= IWM_FLAG_BUSY;
6347 iwm_stop(sc);
6348 if (sc->sc_ic.ic_nrunning > 0)
6349 iwm_init(sc);
6350 sc->sc_flags &= ~IWM_FLAG_BUSY;
6351 wakeup(&sc->sc_flags);
6352 IWM_UNLOCK(sc);
6355 static int
6356 iwm_resume(device_t dev)
6358 struct iwm_softc *sc = device_get_softc(dev);
6359 int do_reinit = 0;
6362 * We disable the RETRY_TIMEOUT register (0x41) to keep
6363 * PCI Tx retries from interfering with C3 CPU state.
6365 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6367 if (!sc->sc_attached)
6368 return 0;
6370 iwm_init_task(device_get_softc(dev));
6372 IWM_LOCK(sc);
6373 if (sc->sc_flags & IWM_FLAG_SCANNING) {
6374 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6375 do_reinit = 1;
6377 IWM_UNLOCK(sc);
6379 if (do_reinit)
6380 ieee80211_resume_all(&sc->sc_ic);
6382 return 0;
6385 static int
6386 iwm_suspend(device_t dev)
6388 int do_stop = 0;
6389 struct iwm_softc *sc = device_get_softc(dev);
6391 do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6393 if (!sc->sc_attached)
6394 return (0);
6396 ieee80211_suspend_all(&sc->sc_ic);
6398 if (do_stop) {
6399 IWM_LOCK(sc);
6400 iwm_stop(sc);
6401 sc->sc_flags |= IWM_FLAG_SCANNING;
6402 IWM_UNLOCK(sc);
6405 return (0);
6408 static int
6409 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6411 struct iwm_fw_info *fw = &sc->sc_fw;
6412 device_t dev = sc->sc_dev;
6413 int i;
6415 if (!sc->sc_attached)
6416 return 0;
6417 sc->sc_attached = 0;
6418 if (do_net80211) {
6419 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6421 callout_drain(&sc->sc_led_blink_to);
6422 callout_drain(&sc->sc_watchdog_to);
6423 iwm_stop_device(sc);
6424 if (do_net80211) {
6425 IWM_LOCK(sc);
6426 iwm_xmit_queue_drain(sc);
6427 IWM_UNLOCK(sc);
6428 ieee80211_ifdetach(&sc->sc_ic);
6431 iwm_phy_db_free(sc->sc_phy_db);
6432 sc->sc_phy_db = NULL;
6434 iwm_free_nvm_data(sc->nvm_data);
6436 /* Free descriptor rings */
6437 iwm_free_rx_ring(sc, &sc->rxq);
6438 for (i = 0; i < nitems(sc->txq); i++)
6439 iwm_free_tx_ring(sc, &sc->txq[i]);
6441 /* Free firmware */
6442 if (fw->fw_fp != NULL)
6443 iwm_fw_info_free(fw);
6445 /* Free scheduler */
6446 iwm_dma_contig_free(&sc->sched_dma);
6447 iwm_dma_contig_free(&sc->ict_dma);
6448 iwm_dma_contig_free(&sc->kw_dma);
6449 iwm_dma_contig_free(&sc->fw_dma);
6451 iwm_free_fw_paging(sc);
6453 /* Finished with the hardware - detach things */
6454 iwm_pci_detach(dev);
6456 if (sc->sc_notif_wait != NULL) {
6457 iwm_notification_wait_free(sc->sc_notif_wait);
6458 sc->sc_notif_wait = NULL;
6461 IWM_LOCK_DESTROY(sc);
6463 return (0);
6466 static int
6467 iwm_detach(device_t dev)
6469 struct iwm_softc *sc = device_get_softc(dev);
6471 return (iwm_detach_local(sc, 1));
6474 static device_method_t iwm_pci_methods[] = {
6475 /* Device interface */
6476 DEVMETHOD(device_probe, iwm_probe),
6477 DEVMETHOD(device_attach, iwm_attach),
6478 DEVMETHOD(device_detach, iwm_detach),
6479 DEVMETHOD(device_suspend, iwm_suspend),
6480 DEVMETHOD(device_resume, iwm_resume),
6482 DEVMETHOD_END
6485 static driver_t iwm_pci_driver = {
6486 "iwm",
6487 iwm_pci_methods,
6488 sizeof (struct iwm_softc)
6491 static devclass_t iwm_devclass;
6493 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6494 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6495 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6496 MODULE_DEPEND(iwm, wlan, 1, 1, 1);