if_iwm - Partly sync if_iwm_binding.c to Linux iwlwifi code.
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
blob64862756267e5f7660a2e507338a3efaef3dbd3a
1 /* $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $ */
3 /*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 /*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
27 ***********************************************************************
29 * This file is provided under a dual BSD/GPLv2 license. When using or
30 * redistributing this file, you may do so under either license.
32 * GPL LICENSE SUMMARY
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
53 * Contact Information:
54 * Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
58 * BSD LICENSE
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
67 * * Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * * Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in
71 * the documentation and/or other materials provided with the
72 * distribution.
73 * * Neither the name Intel Corporation nor the names of its
74 * contributors may be used to endorse or promote products derived
75 * from this software without specific prior written permission.
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
90 /*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
106 * DragonFly work
108 * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109 * changes to remove per-device network interface (DragonFly has not
110 * caught up to that yet on the WLAN side).
112 * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113 * malloc -> kmalloc (in particular, changing improper M_NOWAIT
114 * specifications to M_INTWAIT. We still don't
115 * understand why FreeBSD uses M_NOWAIT for
116 * critical must-not-fail kmalloc()s).
117 * free -> kfree
118 * printf -> kprintf
119 * (bug fix) memset in iwm_reset_rx_ring.
120 * (debug) added several kprintf()s on error
122 * header file paths (DFly allows localized path specifications).
123 * minor header file differences.
125 * Comprehensive list of adjustments for DragonFly #ifdef'd:
126 * (safety) added register read-back serialization in iwm_reset_rx_ring().
127 * packet counters
128 * msleep -> lksleep
129 * mtx -> lk (mtx functions -> lockmgr functions)
130 * callout differences
131 * taskqueue differences
132 * MSI differences
133 * bus_setup_intr() differences
134 * minor PCI config register naming differences
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/module.h>
147 #include <sys/rman.h>
148 #include <sys/sysctl.h>
149 #include <sys/linker.h>
151 #include <machine/endian.h>
153 #include <bus/pci/pcivar.h>
154 #include <bus/pci/pcireg.h>
156 #include <net/bpf.h>
158 #include <net/if.h>
159 #include <net/if_var.h>
160 #include <net/if_arp.h>
161 #include <net/if_dl.h>
162 #include <net/if_media.h>
163 #include <net/if_types.h>
165 #include <netinet/in.h>
166 #include <netinet/in_systm.h>
167 #include <netinet/if_ether.h>
168 #include <netinet/ip.h>
170 #include <netproto/802_11/ieee80211_var.h>
171 #include <netproto/802_11/ieee80211_regdomain.h>
172 #include <netproto/802_11/ieee80211_ratectl.h>
173 #include <netproto/802_11/ieee80211_radiotap.h>
175 #include "if_iwmreg.h"
176 #include "if_iwmvar.h"
177 #include "if_iwm_config.h"
178 #include "if_iwm_debug.h"
179 #include "if_iwm_notif_wait.h"
180 #include "if_iwm_util.h"
181 #include "if_iwm_binding.h"
182 #include "if_iwm_phy_db.h"
183 #include "if_iwm_mac_ctxt.h"
184 #include "if_iwm_phy_ctxt.h"
185 #include "if_iwm_time_event.h"
186 #include "if_iwm_power.h"
187 #include "if_iwm_scan.h"
188 #include "if_iwm_pcie_trans.h"
189 #include "if_iwm_led.h"
190 #include "if_iwm_fw.h"
192 const uint8_t iwm_nvm_channels[] = {
193 /* 2.4 GHz */
194 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
195 /* 5 GHz */
196 36, 40, 44, 48, 52, 56, 60, 64,
197 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
198 149, 153, 157, 161, 165
200 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
201 "IWM_NUM_CHANNELS is too small");
203 const uint8_t iwm_nvm_channels_8000[] = {
204 /* 2.4 GHz */
205 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
206 /* 5 GHz */
207 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
208 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
209 149, 153, 157, 161, 165, 169, 173, 177, 181
211 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
212 "IWM_NUM_CHANNELS_8000 is too small");
214 #define IWM_NUM_2GHZ_CHANNELS 14
215 #define IWM_N_HW_ADDR_MASK 0xF
218 * XXX For now, there's simply a fixed set of rate table entries
219 * that are populated.
221 const struct iwm_rate {
222 uint8_t rate;
223 uint8_t plcp;
224 } iwm_rates[] = {
225 { 2, IWM_RATE_1M_PLCP },
226 { 4, IWM_RATE_2M_PLCP },
227 { 11, IWM_RATE_5M_PLCP },
228 { 22, IWM_RATE_11M_PLCP },
229 { 12, IWM_RATE_6M_PLCP },
230 { 18, IWM_RATE_9M_PLCP },
231 { 24, IWM_RATE_12M_PLCP },
232 { 36, IWM_RATE_18M_PLCP },
233 { 48, IWM_RATE_24M_PLCP },
234 { 72, IWM_RATE_36M_PLCP },
235 { 96, IWM_RATE_48M_PLCP },
236 { 108, IWM_RATE_54M_PLCP },
238 #define IWM_RIDX_CCK 0
239 #define IWM_RIDX_OFDM 4
240 #define IWM_RIDX_MAX (nitems(iwm_rates)-1)
241 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
242 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
244 struct iwm_nvm_section {
245 uint16_t length;
246 uint8_t *data;
249 #define IWM_MVM_UCODE_ALIVE_TIMEOUT hz
250 #define IWM_MVM_UCODE_CALIB_TIMEOUT (2*hz)
252 struct iwm_mvm_alive_data {
253 int valid;
254 uint32_t scd_base_addr;
257 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
258 static int iwm_firmware_store_section(struct iwm_softc *,
259 enum iwm_ucode_type,
260 const uint8_t *, size_t);
261 static int iwm_set_default_calib(struct iwm_softc *, const void *);
262 static void iwm_fw_info_free(struct iwm_fw_info *);
263 static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
264 #if !defined(__DragonFly__)
265 static void iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
266 #endif
267 static int iwm_alloc_fwmem(struct iwm_softc *);
268 static int iwm_alloc_sched(struct iwm_softc *);
269 static int iwm_alloc_kw(struct iwm_softc *);
270 static int iwm_alloc_ict(struct iwm_softc *);
271 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
272 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
273 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
275 int);
276 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
277 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
278 static void iwm_enable_interrupts(struct iwm_softc *);
279 static void iwm_restore_interrupts(struct iwm_softc *);
280 static void iwm_disable_interrupts(struct iwm_softc *);
281 static void iwm_ict_reset(struct iwm_softc *);
282 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
283 static void iwm_stop_device(struct iwm_softc *);
284 static void iwm_mvm_nic_config(struct iwm_softc *);
285 static int iwm_nic_rx_init(struct iwm_softc *);
286 static int iwm_nic_tx_init(struct iwm_softc *);
287 static int iwm_nic_init(struct iwm_softc *);
288 static int iwm_enable_txq(struct iwm_softc *, int, int, int);
289 static int iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
290 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
291 uint16_t, uint8_t *, uint16_t *);
292 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
293 uint16_t *, uint32_t);
294 static uint32_t iwm_eeprom_channel_flags(uint16_t);
295 static void iwm_add_channel_band(struct iwm_softc *,
296 struct ieee80211_channel[], int, int *, int, size_t,
297 const uint8_t[]);
298 static void iwm_init_channel_map(struct ieee80211com *, int, int *,
299 struct ieee80211_channel[]);
300 static struct iwm_nvm_data *
301 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
302 const uint16_t *, const uint16_t *,
303 const uint16_t *, const uint16_t *,
304 const uint16_t *);
305 static void iwm_free_nvm_data(struct iwm_nvm_data *);
306 static void iwm_set_hw_address_family_8000(struct iwm_softc *,
307 struct iwm_nvm_data *,
308 const uint16_t *,
309 const uint16_t *);
310 static int iwm_get_sku(const struct iwm_softc *, const uint16_t *,
311 const uint16_t *);
312 static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
313 static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
314 const uint16_t *);
315 static int iwm_get_n_hw_addrs(const struct iwm_softc *,
316 const uint16_t *);
317 static void iwm_set_radio_cfg(const struct iwm_softc *,
318 struct iwm_nvm_data *, uint32_t);
319 static struct iwm_nvm_data *
320 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
321 static int iwm_nvm_init(struct iwm_softc *);
322 static int iwm_pcie_load_section(struct iwm_softc *, uint8_t,
323 const struct iwm_fw_desc *);
324 static int iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
325 bus_addr_t, uint32_t);
326 static int iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
327 const struct iwm_fw_sects *,
328 int, int *);
329 static int iwm_pcie_load_cpu_sections(struct iwm_softc *,
330 const struct iwm_fw_sects *,
331 int, int *);
332 static int iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
333 const struct iwm_fw_sects *);
334 static int iwm_pcie_load_given_ucode(struct iwm_softc *,
335 const struct iwm_fw_sects *);
336 static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
337 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
338 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
339 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
340 enum iwm_ucode_type);
341 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
342 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
343 static int iwm_mvm_get_signal_strength(struct iwm_softc *,
344 struct iwm_rx_phy_info *);
345 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
346 struct iwm_rx_packet *);
347 static int iwm_get_noise(struct iwm_softc *sc,
348 const struct iwm_mvm_statistics_rx_non_phy *);
349 static void iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *);
350 static int iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
351 struct iwm_rx_packet *,
352 struct iwm_node *);
353 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
354 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
355 #if 0
356 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
357 uint16_t);
358 #endif
359 static const struct iwm_rate *
360 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
361 struct ieee80211_frame *, struct iwm_tx_cmd *);
362 static int iwm_tx(struct iwm_softc *, struct mbuf *,
363 struct ieee80211_node *, int);
364 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
365 const struct ieee80211_bpf_params *);
366 static int iwm_mvm_flush_tx_path(struct iwm_softc *sc,
367 uint32_t tfd_msk, uint32_t flags);
368 static int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
369 struct iwm_mvm_add_sta_cmd *,
370 int *);
371 static int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
372 int);
373 static int iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
374 static int iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
375 static int iwm_mvm_add_int_sta_common(struct iwm_softc *,
376 struct iwm_int_sta *,
377 const uint8_t *, uint16_t, uint16_t);
378 static int iwm_mvm_add_aux_sta(struct iwm_softc *);
379 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
380 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
381 static int iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
382 static int iwm_release(struct iwm_softc *, struct iwm_node *);
383 static struct ieee80211_node *
384 iwm_node_alloc(struct ieee80211vap *,
385 const uint8_t[IEEE80211_ADDR_LEN]);
386 static void iwm_setrates(struct iwm_softc *, struct iwm_node *);
387 static int iwm_media_change(struct ifnet *);
388 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
389 static void iwm_endscan_cb(void *, int);
390 static void iwm_mvm_fill_sf_command(struct iwm_softc *,
391 struct iwm_sf_cfg_cmd *,
392 struct ieee80211_node *);
393 static int iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
394 static int iwm_send_bt_init_conf(struct iwm_softc *);
395 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
396 static void iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
397 static int iwm_init_hw(struct iwm_softc *);
398 static void iwm_init(struct iwm_softc *);
399 static void iwm_start(struct iwm_softc *);
400 static void iwm_stop(struct iwm_softc *);
401 static void iwm_watchdog(void *);
402 static void iwm_parent(struct ieee80211com *);
403 #ifdef IWM_DEBUG
404 static const char *
405 iwm_desc_lookup(uint32_t);
406 static void iwm_nic_error(struct iwm_softc *);
407 static void iwm_nic_umac_error(struct iwm_softc *);
408 #endif
409 static void iwm_notif_intr(struct iwm_softc *);
410 static void iwm_intr(void *);
411 static int iwm_attach(device_t);
412 static int iwm_is_valid_ether_addr(uint8_t *);
413 static void iwm_preinit(void *);
414 static int iwm_detach_local(struct iwm_softc *sc, int);
415 static void iwm_init_task(void *);
416 static void iwm_radiotap_attach(struct iwm_softc *);
417 static struct ieee80211vap *
418 iwm_vap_create(struct ieee80211com *,
419 const char [IFNAMSIZ], int,
420 enum ieee80211_opmode, int,
421 const uint8_t [IEEE80211_ADDR_LEN],
422 const uint8_t [IEEE80211_ADDR_LEN]);
423 static void iwm_vap_delete(struct ieee80211vap *);
424 static void iwm_scan_start(struct ieee80211com *);
425 static void iwm_scan_end(struct ieee80211com *);
426 static void iwm_update_mcast(struct ieee80211com *);
427 static void iwm_set_channel(struct ieee80211com *);
428 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
429 static void iwm_scan_mindwell(struct ieee80211_scan_state *);
430 static int iwm_detach(device_t);
432 #if defined(__DragonFly__)
433 static int iwm_msi_enable = 1;
435 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
437 #endif
440 * Firmware parser.
443 static int
444 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
446 const struct iwm_fw_cscheme_list *l = (const void *)data;
448 if (dlen < sizeof(*l) ||
449 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
450 return EINVAL;
452 /* we don't actually store anything for now, always use s/w crypto */
454 return 0;
457 static int
458 iwm_firmware_store_section(struct iwm_softc *sc,
459 enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
461 struct iwm_fw_sects *fws;
462 struct iwm_fw_desc *fwone;
464 if (type >= IWM_UCODE_TYPE_MAX)
465 return EINVAL;
466 if (dlen < sizeof(uint32_t))
467 return EINVAL;
469 fws = &sc->sc_fw.fw_sects[type];
470 if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
471 return EINVAL;
473 fwone = &fws->fw_sect[fws->fw_count];
475 /* first 32bit are device load offset */
476 memcpy(&fwone->offset, data, sizeof(uint32_t));
478 /* rest is data */
479 fwone->data = data + sizeof(uint32_t);
480 fwone->len = dlen - sizeof(uint32_t);
482 fws->fw_count++;
484 return 0;
487 #define IWM_DEFAULT_SCAN_CHANNELS 40
489 struct iwm_tlv_calib_data {
490 uint32_t ucode_type;
491 struct iwm_tlv_calib_ctrl calib;
492 } __packed;
494 static int
495 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
497 const struct iwm_tlv_calib_data *def_calib = data;
498 uint32_t ucode_type = le32toh(def_calib->ucode_type);
500 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
501 device_printf(sc->sc_dev,
502 "Wrong ucode_type %u for default "
503 "calibration.\n", ucode_type);
504 return EINVAL;
507 sc->sc_default_calib[ucode_type].flow_trigger =
508 def_calib->calib.flow_trigger;
509 sc->sc_default_calib[ucode_type].event_trigger =
510 def_calib->calib.event_trigger;
512 return 0;
515 static int
516 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
517 struct iwm_ucode_capabilities *capa)
519 const struct iwm_ucode_api *ucode_api = (const void *)data;
520 uint32_t api_index = le32toh(ucode_api->api_index);
521 uint32_t api_flags = le32toh(ucode_api->api_flags);
522 int i;
524 if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
525 device_printf(sc->sc_dev,
526 "api flags index %d larger than supported by driver\n",
527 api_index);
528 /* don't return an error so we can load FW that has more bits */
529 return 0;
532 for (i = 0; i < 32; i++) {
533 if (api_flags & (1U << i))
534 setbit(capa->enabled_api, i + 32 * api_index);
537 return 0;
540 static int
541 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
542 struct iwm_ucode_capabilities *capa)
544 const struct iwm_ucode_capa *ucode_capa = (const void *)data;
545 uint32_t api_index = le32toh(ucode_capa->api_index);
546 uint32_t api_flags = le32toh(ucode_capa->api_capa);
547 int i;
549 if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
550 device_printf(sc->sc_dev,
551 "capa flags index %d larger than supported by driver\n",
552 api_index);
553 /* don't return an error so we can load FW that has more bits */
554 return 0;
557 for (i = 0; i < 32; i++) {
558 if (api_flags & (1U << i))
559 setbit(capa->enabled_capa, i + 32 * api_index);
562 return 0;
565 static void
566 iwm_fw_info_free(struct iwm_fw_info *fw)
568 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
569 fw->fw_fp = NULL;
570 /* don't touch fw->fw_status */
571 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
574 static int
575 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
577 struct iwm_fw_info *fw = &sc->sc_fw;
578 const struct iwm_tlv_ucode_header *uhdr;
579 struct iwm_ucode_tlv tlv;
580 struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
581 enum iwm_ucode_tlv_type tlv_type;
582 const struct firmware *fwp;
583 const uint8_t *data;
584 uint32_t usniffer_img;
585 uint32_t paging_mem_size;
586 int num_of_cpus;
587 int error = 0;
588 size_t len;
590 if (fw->fw_status == IWM_FW_STATUS_DONE &&
591 ucode_type != IWM_UCODE_INIT)
592 return 0;
594 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
595 #if defined(__DragonFly__)
596 lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
597 #else
598 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
599 #endif
601 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
603 if (fw->fw_fp != NULL)
604 iwm_fw_info_free(fw);
607 * Load firmware into driver memory.
608 * fw_fp will be set.
610 IWM_UNLOCK(sc);
611 fwp = firmware_get(sc->cfg->fw_name);
612 IWM_LOCK(sc);
613 if (fwp == NULL) {
614 device_printf(sc->sc_dev,
615 "could not read firmware %s (error %d)\n",
616 sc->cfg->fw_name, error);
617 goto out;
619 fw->fw_fp = fwp;
621 /* (Re-)Initialize default values. */
622 capa->flags = 0;
623 capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
624 capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
625 memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
626 memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
627 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
630 * Parse firmware contents
633 uhdr = (const void *)fw->fw_fp->data;
634 if (*(const uint32_t *)fw->fw_fp->data != 0
635 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
636 device_printf(sc->sc_dev, "invalid firmware %s\n",
637 sc->cfg->fw_name);
638 error = EINVAL;
639 goto out;
642 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
643 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
644 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
645 IWM_UCODE_API(le32toh(uhdr->ver)));
646 data = uhdr->data;
647 len = fw->fw_fp->datasize - sizeof(*uhdr);
649 while (len >= sizeof(tlv)) {
650 size_t tlv_len;
651 const void *tlv_data;
653 memcpy(&tlv, data, sizeof(tlv));
654 tlv_len = le32toh(tlv.length);
655 tlv_type = le32toh(tlv.type);
657 len -= sizeof(tlv);
658 data += sizeof(tlv);
659 tlv_data = data;
661 if (len < tlv_len) {
662 device_printf(sc->sc_dev,
663 "firmware too short: %zu bytes\n",
664 len);
665 error = EINVAL;
666 goto parse_out;
669 switch ((int)tlv_type) {
670 case IWM_UCODE_TLV_PROBE_MAX_LEN:
671 if (tlv_len < sizeof(uint32_t)) {
672 device_printf(sc->sc_dev,
673 "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
674 __func__,
675 (int) tlv_len);
676 error = EINVAL;
677 goto parse_out;
679 capa->max_probe_length =
680 le32toh(*(const uint32_t *)tlv_data);
681 /* limit it to something sensible */
682 if (capa->max_probe_length >
683 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
684 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
685 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
686 "ridiculous\n", __func__);
687 error = EINVAL;
688 goto parse_out;
690 break;
691 case IWM_UCODE_TLV_PAN:
692 if (tlv_len) {
693 device_printf(sc->sc_dev,
694 "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
695 __func__,
696 (int) tlv_len);
697 error = EINVAL;
698 goto parse_out;
700 capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
701 break;
702 case IWM_UCODE_TLV_FLAGS:
703 if (tlv_len < sizeof(uint32_t)) {
704 device_printf(sc->sc_dev,
705 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
706 __func__,
707 (int) tlv_len);
708 error = EINVAL;
709 goto parse_out;
712 * Apparently there can be many flags, but Linux driver
713 * parses only the first one, and so do we.
715 * XXX: why does this override IWM_UCODE_TLV_PAN?
716 * Intentional or a bug? Observations from
717 * current firmware file:
718 * 1) TLV_PAN is parsed first
719 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
720 * ==> this resets TLV_PAN to itself... hnnnk
722 capa->flags = le32toh(*(const uint32_t *)tlv_data);
723 break;
724 case IWM_UCODE_TLV_CSCHEME:
725 if ((error = iwm_store_cscheme(sc,
726 tlv_data, tlv_len)) != 0) {
727 device_printf(sc->sc_dev,
728 "%s: iwm_store_cscheme(): returned %d\n",
729 __func__,
730 error);
731 goto parse_out;
733 break;
734 case IWM_UCODE_TLV_NUM_OF_CPU:
735 if (tlv_len != sizeof(uint32_t)) {
736 device_printf(sc->sc_dev,
737 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
738 __func__,
739 (int) tlv_len);
740 error = EINVAL;
741 goto parse_out;
743 num_of_cpus = le32toh(*(const uint32_t *)tlv_data);
744 if (num_of_cpus == 2) {
745 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
746 TRUE;
747 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
748 TRUE;
749 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
750 TRUE;
751 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
752 device_printf(sc->sc_dev,
753 "%s: Driver supports only 1 or 2 CPUs\n",
754 __func__);
755 error = EINVAL;
756 goto parse_out;
758 break;
759 case IWM_UCODE_TLV_SEC_RT:
760 if ((error = iwm_firmware_store_section(sc,
761 IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
762 device_printf(sc->sc_dev,
763 "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
764 __func__,
765 error);
766 goto parse_out;
768 break;
769 case IWM_UCODE_TLV_SEC_INIT:
770 if ((error = iwm_firmware_store_section(sc,
771 IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
772 device_printf(sc->sc_dev,
773 "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
774 __func__,
775 error);
776 goto parse_out;
778 break;
779 case IWM_UCODE_TLV_SEC_WOWLAN:
780 if ((error = iwm_firmware_store_section(sc,
781 IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
782 device_printf(sc->sc_dev,
783 "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
784 __func__,
785 error);
786 goto parse_out;
788 break;
789 case IWM_UCODE_TLV_DEF_CALIB:
790 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
791 device_printf(sc->sc_dev,
792 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
793 __func__,
794 (int) tlv_len,
795 (int) sizeof(struct iwm_tlv_calib_data));
796 error = EINVAL;
797 goto parse_out;
799 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
800 device_printf(sc->sc_dev,
801 "%s: iwm_set_default_calib() failed: %d\n",
802 __func__,
803 error);
804 goto parse_out;
806 break;
807 case IWM_UCODE_TLV_PHY_SKU:
808 if (tlv_len != sizeof(uint32_t)) {
809 error = EINVAL;
810 device_printf(sc->sc_dev,
811 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
812 __func__,
813 (int) tlv_len);
814 goto parse_out;
816 sc->sc_fw.phy_config =
817 le32toh(*(const uint32_t *)tlv_data);
818 sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
819 IWM_FW_PHY_CFG_TX_CHAIN) >>
820 IWM_FW_PHY_CFG_TX_CHAIN_POS;
821 sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
822 IWM_FW_PHY_CFG_RX_CHAIN) >>
823 IWM_FW_PHY_CFG_RX_CHAIN_POS;
824 break;
826 case IWM_UCODE_TLV_API_CHANGES_SET: {
827 if (tlv_len != sizeof(struct iwm_ucode_api)) {
828 error = EINVAL;
829 goto parse_out;
831 if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
832 error = EINVAL;
833 goto parse_out;
835 break;
838 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
839 if (tlv_len != sizeof(struct iwm_ucode_capa)) {
840 error = EINVAL;
841 goto parse_out;
843 if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
844 error = EINVAL;
845 goto parse_out;
847 break;
850 case 48: /* undocumented TLV */
851 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
852 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
853 /* ignore, not used by current driver */
854 break;
856 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
857 if ((error = iwm_firmware_store_section(sc,
858 IWM_UCODE_REGULAR_USNIFFER, tlv_data,
859 tlv_len)) != 0)
860 goto parse_out;
861 break;
863 case IWM_UCODE_TLV_PAGING:
864 if (tlv_len != sizeof(uint32_t)) {
865 error = EINVAL;
866 goto parse_out;
868 paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
870 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
871 "%s: Paging: paging enabled (size = %u bytes)\n",
872 __func__, paging_mem_size);
873 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
874 device_printf(sc->sc_dev,
875 "%s: Paging: driver supports up to %u bytes for paging image\n",
876 __func__, IWM_MAX_PAGING_IMAGE_SIZE);
877 error = EINVAL;
878 goto out;
880 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
881 device_printf(sc->sc_dev,
882 "%s: Paging: image isn't multiple %u\n",
883 __func__, IWM_FW_PAGING_SIZE);
884 error = EINVAL;
885 goto out;
888 sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
889 paging_mem_size;
890 usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
891 sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
892 paging_mem_size;
893 break;
895 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
896 if (tlv_len != sizeof(uint32_t)) {
897 error = EINVAL;
898 goto parse_out;
900 capa->n_scan_channels =
901 le32toh(*(const uint32_t *)tlv_data);
902 break;
904 case IWM_UCODE_TLV_FW_VERSION:
905 if (tlv_len != sizeof(uint32_t) * 3) {
906 error = EINVAL;
907 goto parse_out;
909 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
910 "%d.%d.%d",
911 le32toh(((const uint32_t *)tlv_data)[0]),
912 le32toh(((const uint32_t *)tlv_data)[1]),
913 le32toh(((const uint32_t *)tlv_data)[2]));
914 break;
916 case IWM_UCODE_TLV_FW_MEM_SEG:
917 break;
919 default:
920 device_printf(sc->sc_dev,
921 "%s: unknown firmware section %d, abort\n",
922 __func__, tlv_type);
923 error = EINVAL;
924 goto parse_out;
927 len -= roundup(tlv_len, 4);
928 data += roundup(tlv_len, 4);
931 KASSERT(error == 0, ("unhandled error"));
933 parse_out:
934 if (error) {
935 device_printf(sc->sc_dev, "firmware parse error %d, "
936 "section type %d\n", error, tlv_type);
939 out:
940 if (error) {
941 fw->fw_status = IWM_FW_STATUS_NONE;
942 if (fw->fw_fp != NULL)
943 iwm_fw_info_free(fw);
944 } else
945 fw->fw_status = IWM_FW_STATUS_DONE;
946 wakeup(&sc->sc_fw);
948 return error;
952 * DMA resource routines
955 /* fwmem is used to load firmware onto the card */
956 static int
957 iwm_alloc_fwmem(struct iwm_softc *sc)
959 /* Must be aligned on a 16-byte boundary. */
960 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
961 IWM_FH_MEM_TB_MAX_LENGTH, 16);
964 /* tx scheduler rings. not used? */
965 static int
966 iwm_alloc_sched(struct iwm_softc *sc)
968 /* TX scheduler rings must be aligned on a 1KB boundary. */
969 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
970 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
973 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
974 static int
975 iwm_alloc_kw(struct iwm_softc *sc)
977 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
980 /* interrupt cause table */
981 static int
982 iwm_alloc_ict(struct iwm_softc *sc)
984 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
985 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
988 static int
989 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
991 bus_size_t size;
992 int i, error;
994 ring->cur = 0;
996 /* Allocate RX descriptors (256-byte aligned). */
997 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
998 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
999 if (error != 0) {
1000 device_printf(sc->sc_dev,
1001 "could not allocate RX ring DMA memory\n");
1002 goto fail;
1004 ring->desc = ring->desc_dma.vaddr;
1006 /* Allocate RX status area (16-byte aligned). */
1007 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1008 sizeof(*ring->stat), 16);
1009 if (error != 0) {
1010 device_printf(sc->sc_dev,
1011 "could not allocate RX status DMA memory\n");
1012 goto fail;
1014 ring->stat = ring->stat_dma.vaddr;
1016 /* Create RX buffer DMA tag. */
1017 #if defined(__DragonFly__)
1018 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1020 BUS_SPACE_MAXADDR_32BIT,
1021 BUS_SPACE_MAXADDR,
1022 NULL, NULL,
1023 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
1024 BUS_DMA_NOWAIT, &ring->data_dmat);
1025 #else
1026 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1027 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1028 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1029 #endif
1030 if (error != 0) {
1031 device_printf(sc->sc_dev,
1032 "%s: could not create RX buf DMA tag, error %d\n",
1033 __func__, error);
1034 goto fail;
1037 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1038 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1039 if (error != 0) {
1040 device_printf(sc->sc_dev,
1041 "%s: could not create RX buf DMA map, error %d\n",
1042 __func__, error);
1043 goto fail;
1046 * Allocate and map RX buffers.
1048 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1049 struct iwm_rx_data *data = &ring->data[i];
1050 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1051 if (error != 0) {
1052 device_printf(sc->sc_dev,
1053 "%s: could not create RX buf DMA map, error %d\n",
1054 __func__, error);
1055 goto fail;
1057 data->m = NULL;
1059 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1060 goto fail;
1063 return 0;
1065 fail: iwm_free_rx_ring(sc, ring);
1066 return error;
1069 static void
1070 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1072 /* Reset the ring state */
1073 ring->cur = 0;
1076 * The hw rx ring index in shared memory must also be cleared,
1077 * otherwise the discrepancy can cause reprocessing chaos.
1079 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1082 static void
1083 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1085 int i;
1087 iwm_dma_contig_free(&ring->desc_dma);
1088 iwm_dma_contig_free(&ring->stat_dma);
1090 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1091 struct iwm_rx_data *data = &ring->data[i];
1093 if (data->m != NULL) {
1094 bus_dmamap_sync(ring->data_dmat, data->map,
1095 BUS_DMASYNC_POSTREAD);
1096 bus_dmamap_unload(ring->data_dmat, data->map);
1097 m_freem(data->m);
1098 data->m = NULL;
1100 if (data->map != NULL) {
1101 bus_dmamap_destroy(ring->data_dmat, data->map);
1102 data->map = NULL;
1105 if (ring->spare_map != NULL) {
1106 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1107 ring->spare_map = NULL;
1109 if (ring->data_dmat != NULL) {
1110 bus_dma_tag_destroy(ring->data_dmat);
1111 ring->data_dmat = NULL;
1115 static int
1116 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1118 bus_addr_t paddr;
1119 bus_size_t size;
1120 size_t maxsize;
1121 int nsegments;
1122 int i, error;
1124 ring->qid = qid;
1125 ring->queued = 0;
1126 ring->cur = 0;
1128 /* Allocate TX descriptors (256-byte aligned). */
1129 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1130 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1131 if (error != 0) {
1132 device_printf(sc->sc_dev,
1133 "could not allocate TX ring DMA memory\n");
1134 goto fail;
1136 ring->desc = ring->desc_dma.vaddr;
1139 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1140 * to allocate commands space for other rings.
1142 if (qid > IWM_MVM_CMD_QUEUE)
1143 return 0;
1145 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1146 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1147 if (error != 0) {
1148 device_printf(sc->sc_dev,
1149 "could not allocate TX cmd DMA memory\n");
1150 goto fail;
1152 ring->cmd = ring->cmd_dma.vaddr;
1154 /* FW commands may require more mapped space than packets. */
1155 if (qid == IWM_MVM_CMD_QUEUE) {
1156 maxsize = IWM_RBUF_SIZE;
1157 nsegments = 1;
1158 } else {
1159 maxsize = MCLBYTES;
1160 nsegments = IWM_MAX_SCATTER - 2;
1163 #if defined(__DragonFly__)
1164 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1166 BUS_SPACE_MAXADDR_32BIT,
1167 BUS_SPACE_MAXADDR,
1168 NULL, NULL,
1169 maxsize, nsegments, maxsize,
1170 BUS_DMA_NOWAIT, &ring->data_dmat);
1171 #else
1172 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1173 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1174 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1175 #endif
1176 if (error != 0) {
1177 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1178 goto fail;
1181 paddr = ring->cmd_dma.paddr;
1182 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1183 struct iwm_tx_data *data = &ring->data[i];
1185 data->cmd_paddr = paddr;
1186 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1187 + offsetof(struct iwm_tx_cmd, scratch);
1188 paddr += sizeof(struct iwm_device_cmd);
1190 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1191 if (error != 0) {
1192 device_printf(sc->sc_dev,
1193 "could not create TX buf DMA map\n");
1194 goto fail;
1197 KASSERT(paddr == ring->cmd_dma.paddr + size,
1198 ("invalid physical address"));
1199 return 0;
1201 fail: iwm_free_tx_ring(sc, ring);
1202 return error;
1205 static void
1206 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1208 int i;
1210 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1211 struct iwm_tx_data *data = &ring->data[i];
1213 if (data->m != NULL) {
1214 bus_dmamap_sync(ring->data_dmat, data->map,
1215 BUS_DMASYNC_POSTWRITE);
1216 bus_dmamap_unload(ring->data_dmat, data->map);
1217 m_freem(data->m);
1218 data->m = NULL;
1221 /* Clear TX descriptors. */
1222 memset(ring->desc, 0, ring->desc_dma.size);
1223 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1224 BUS_DMASYNC_PREWRITE);
1225 sc->qfullmsk &= ~(1 << ring->qid);
1226 ring->queued = 0;
1227 ring->cur = 0;
1229 if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1230 iwm_pcie_clear_cmd_in_flight(sc);
1233 static void
1234 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1236 int i;
1238 iwm_dma_contig_free(&ring->desc_dma);
1239 iwm_dma_contig_free(&ring->cmd_dma);
1241 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1242 struct iwm_tx_data *data = &ring->data[i];
1244 if (data->m != NULL) {
1245 bus_dmamap_sync(ring->data_dmat, data->map,
1246 BUS_DMASYNC_POSTWRITE);
1247 bus_dmamap_unload(ring->data_dmat, data->map);
1248 m_freem(data->m);
1249 data->m = NULL;
1251 if (data->map != NULL) {
1252 bus_dmamap_destroy(ring->data_dmat, data->map);
1253 data->map = NULL;
1256 if (ring->data_dmat != NULL) {
1257 bus_dma_tag_destroy(ring->data_dmat);
1258 ring->data_dmat = NULL;
1263 * High-level hardware frobbing routines
1266 static void
1267 iwm_enable_interrupts(struct iwm_softc *sc)
1269 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1270 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1273 static void
1274 iwm_restore_interrupts(struct iwm_softc *sc)
1276 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1279 static void
1280 iwm_disable_interrupts(struct iwm_softc *sc)
1282 /* disable interrupts */
1283 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1285 /* acknowledge all interrupts */
1286 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1287 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1290 static void
1291 iwm_ict_reset(struct iwm_softc *sc)
1293 iwm_disable_interrupts(sc);
1295 /* Reset ICT table. */
1296 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1297 sc->ict_cur = 0;
1299 /* Set physical address of ICT table (4KB aligned). */
1300 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1301 IWM_CSR_DRAM_INT_TBL_ENABLE
1302 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1303 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1304 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1306 /* Switch to ICT interrupt mode in driver. */
1307 sc->sc_flags |= IWM_FLAG_USE_ICT;
1309 /* Re-enable interrupts. */
1310 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1311 iwm_enable_interrupts(sc);
1315 * Since this .. hard-resets things, it's time to actually
1316 * mark the first vap (if any) as having no mac context.
1317 * It's annoying, but since the driver is potentially being
1318 * stop/start'ed whilst active (thanks openbsd port!) we
1319 * have to correctly track this.
1321 static void
1322 iwm_stop_device(struct iwm_softc *sc)
1324 struct ieee80211com *ic = &sc->sc_ic;
1325 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1326 int chnl, qid;
1327 uint32_t mask = 0;
1329 /* tell the device to stop sending interrupts */
1330 iwm_disable_interrupts(sc);
1333 * FreeBSD-local: mark the first vap as not-uploaded,
1334 * so the next transition through auth/assoc
1335 * will correctly populate the MAC context.
1337 if (vap) {
1338 struct iwm_vap *iv = IWM_VAP(vap);
1339 iv->phy_ctxt = NULL;
1340 iv->is_uploaded = 0;
1343 /* device going down, Stop using ICT table */
1344 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1346 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1348 if (iwm_nic_lock(sc)) {
1349 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1351 /* Stop each Tx DMA channel */
1352 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1353 IWM_WRITE(sc,
1354 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1355 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1358 /* Wait for DMA channels to be idle */
1359 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1360 5000)) {
1361 device_printf(sc->sc_dev,
1362 "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1363 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1365 iwm_nic_unlock(sc);
1367 iwm_pcie_rx_stop(sc);
1369 /* Stop RX ring. */
1370 iwm_reset_rx_ring(sc, &sc->rxq);
1372 /* Reset all TX rings. */
1373 for (qid = 0; qid < nitems(sc->txq); qid++)
1374 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1376 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1377 /* Power-down device's busmaster DMA clocks */
1378 if (iwm_nic_lock(sc)) {
1379 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1380 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1381 iwm_nic_unlock(sc);
1383 DELAY(5);
1386 /* Make sure (redundant) we've released our request to stay awake */
1387 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1388 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1390 /* Stop the device, and put it in low power state */
1391 iwm_apm_stop(sc);
1393 /* stop and reset the on-board processor */
1394 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1395 DELAY(1000);
1398 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1399 * This is a bug in certain verions of the hardware.
1400 * Certain devices also keep sending HW RF kill interrupt all
1401 * the time, unless the interrupt is ACKed even if the interrupt
1402 * should be masked. Re-ACK all the interrupts here.
1404 iwm_disable_interrupts(sc);
1407 * Even if we stop the HW, we still want the RF kill
1408 * interrupt
1410 iwm_enable_rfkill_int(sc);
1411 iwm_check_rfkill(sc);
1414 static void
1415 iwm_mvm_nic_config(struct iwm_softc *sc)
1417 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1418 uint32_t reg_val = 0;
1419 uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1421 radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1422 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1423 radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1424 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1425 radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1426 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1428 /* SKU control */
1429 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1430 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1431 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1432 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1434 /* radio configuration */
1435 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1436 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1437 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1439 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1441 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1442 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1443 radio_cfg_step, radio_cfg_dash);
1446 * W/A : NIC is stuck in a reset state after Early PCIe power off
1447 * (PCIe power is lost before PERST# is asserted), causing ME FW
1448 * to lose ownership and not being able to obtain it back.
1450 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1451 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1452 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1453 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1457 static int
1458 iwm_nic_rx_init(struct iwm_softc *sc)
1461 * Initialize RX ring. This is from the iwn driver.
1463 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1465 /* Stop Rx DMA */
1466 iwm_pcie_rx_stop(sc);
1468 if (!iwm_nic_lock(sc))
1469 return EBUSY;
1471 /* reset and flush pointers */
1472 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1473 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1474 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1475 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1477 /* Set physical address of RX ring (256-byte aligned). */
1478 IWM_WRITE(sc,
1479 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1481 /* Set physical address of RX status (16-byte aligned). */
1482 IWM_WRITE(sc,
1483 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1485 #if defined(__DragonFly__)
1486 /* Force serialization (probably not needed but don't trust the HW) */
1487 IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1488 #endif
1490 /* Enable RX. */
1491 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1492 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1493 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1494 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1495 IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
1496 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1497 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1498 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1500 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1502 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1503 if (sc->cfg->host_interrupt_operation_mode)
1504 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1507 * Thus sayeth el jefe (iwlwifi) via a comment:
1509 * This value should initially be 0 (before preparing any
1510 * RBs), should be 8 after preparing the first 8 RBs (for example)
1512 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1514 iwm_nic_unlock(sc);
1516 return 0;
1519 static int
1520 iwm_nic_tx_init(struct iwm_softc *sc)
1522 int qid;
1524 if (!iwm_nic_lock(sc))
1525 return EBUSY;
1527 /* Deactivate TX scheduler. */
1528 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1530 /* Set physical address of "keep warm" page (16-byte aligned). */
1531 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1533 /* Initialize TX rings. */
1534 for (qid = 0; qid < nitems(sc->txq); qid++) {
1535 struct iwm_tx_ring *txq = &sc->txq[qid];
1537 /* Set physical address of TX ring (256-byte aligned). */
1538 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1539 txq->desc_dma.paddr >> 8);
1540 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1541 "%s: loading ring %d descriptors (%p) at %lx\n",
1542 __func__,
1543 qid, txq->desc,
1544 (unsigned long) (txq->desc_dma.paddr >> 8));
1547 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1549 iwm_nic_unlock(sc);
1551 return 0;
1554 static int
1555 iwm_nic_init(struct iwm_softc *sc)
1557 int error;
1559 iwm_apm_init(sc);
1560 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1561 iwm_set_pwr(sc);
1563 iwm_mvm_nic_config(sc);
1565 if ((error = iwm_nic_rx_init(sc)) != 0)
1566 return error;
1569 * Ditto for TX, from iwn
1571 if ((error = iwm_nic_tx_init(sc)) != 0)
1572 return error;
1574 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1575 "%s: shadow registers enabled\n", __func__);
1576 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1578 return 0;
1581 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1582 IWM_MVM_TX_FIFO_VO,
1583 IWM_MVM_TX_FIFO_VI,
1584 IWM_MVM_TX_FIFO_BE,
1585 IWM_MVM_TX_FIFO_BK,
1588 static int
1589 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1591 if (!iwm_nic_lock(sc)) {
1592 device_printf(sc->sc_dev,
1593 "%s: cannot enable txq %d\n",
1594 __func__,
1595 qid);
1596 return EBUSY;
1599 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1601 if (qid == IWM_MVM_CMD_QUEUE) {
1602 /* unactivate before configuration */
1603 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1604 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1605 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1607 iwm_nic_unlock(sc);
1609 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1611 if (!iwm_nic_lock(sc)) {
1612 device_printf(sc->sc_dev,
1613 "%s: cannot enable txq %d\n", __func__, qid);
1614 return EBUSY;
1616 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1617 iwm_nic_unlock(sc);
1619 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1620 /* Set scheduler window size and frame limit. */
1621 iwm_write_mem32(sc,
1622 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1623 sizeof(uint32_t),
1624 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1625 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1626 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1627 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1629 if (!iwm_nic_lock(sc)) {
1630 device_printf(sc->sc_dev,
1631 "%s: cannot enable txq %d\n", __func__, qid);
1632 return EBUSY;
1634 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1635 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1636 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1637 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1638 IWM_SCD_QUEUE_STTS_REG_MSK);
1639 } else {
1640 struct iwm_scd_txq_cfg_cmd cmd;
1641 int error;
1643 iwm_nic_unlock(sc);
1645 memset(&cmd, 0, sizeof(cmd));
1646 cmd.scd_queue = qid;
1647 cmd.enable = 1;
1648 cmd.sta_id = sta_id;
1649 cmd.tx_fifo = fifo;
1650 cmd.aggregate = 0;
1651 cmd.window = IWM_FRAME_LIMIT;
1653 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1654 sizeof(cmd), &cmd);
1655 if (error) {
1656 device_printf(sc->sc_dev,
1657 "cannot enable txq %d\n", qid);
1658 return error;
1661 if (!iwm_nic_lock(sc))
1662 return EBUSY;
1665 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1666 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1668 iwm_nic_unlock(sc);
1670 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1671 __func__, qid, fifo);
1673 return 0;
1676 static int
1677 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1679 int error, chnl;
1681 int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1682 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1684 if (!iwm_nic_lock(sc))
1685 return EBUSY;
1687 iwm_ict_reset(sc);
1689 sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1690 if (scd_base_addr != 0 &&
1691 scd_base_addr != sc->scd_base_addr) {
1692 device_printf(sc->sc_dev,
1693 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1694 __func__, sc->scd_base_addr, scd_base_addr);
1697 iwm_nic_unlock(sc);
1699 /* reset context data, TX status and translation data */
1700 error = iwm_write_mem(sc,
1701 sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1702 NULL, clear_dwords);
1703 if (error)
1704 return EBUSY;
1706 if (!iwm_nic_lock(sc))
1707 return EBUSY;
1709 /* Set physical address of TX scheduler rings (1KB aligned). */
1710 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1712 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1714 iwm_nic_unlock(sc);
1716 /* enable command channel */
1717 error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1718 if (error)
1719 return error;
1721 if (!iwm_nic_lock(sc))
1722 return EBUSY;
1724 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1726 /* Enable DMA channels. */
1727 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1728 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1729 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1730 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1733 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1734 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1736 iwm_nic_unlock(sc);
1738 /* Enable L1-Active */
1739 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1740 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1741 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1744 return error;
1748 * NVM read access and content parsing. We do not support
1749 * external NVM or writing NVM.
1750 * iwlwifi/mvm/nvm.c
1753 /* Default NVM size to read */
1754 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1756 #define IWM_NVM_WRITE_OPCODE 1
1757 #define IWM_NVM_READ_OPCODE 0
1759 /* load nvm chunk response */
1760 enum {
1761 IWM_READ_NVM_CHUNK_SUCCEED = 0,
1762 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1765 static int
1766 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1767 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1769 struct iwm_nvm_access_cmd nvm_access_cmd = {
1770 .offset = htole16(offset),
1771 .length = htole16(length),
1772 .type = htole16(section),
1773 .op_code = IWM_NVM_READ_OPCODE,
1775 struct iwm_nvm_access_resp *nvm_resp;
1776 struct iwm_rx_packet *pkt;
1777 struct iwm_host_cmd cmd = {
1778 .id = IWM_NVM_ACCESS_CMD,
1779 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1780 .data = { &nvm_access_cmd, },
1782 int ret, bytes_read, offset_read;
1783 uint8_t *resp_data;
1785 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1787 ret = iwm_send_cmd(sc, &cmd);
1788 if (ret) {
1789 device_printf(sc->sc_dev,
1790 "Could not send NVM_ACCESS command (error=%d)\n", ret);
1791 return ret;
1794 pkt = cmd.resp_pkt;
1796 /* Extract NVM response */
1797 nvm_resp = (void *)pkt->data;
1798 ret = le16toh(nvm_resp->status);
1799 bytes_read = le16toh(nvm_resp->length);
1800 offset_read = le16toh(nvm_resp->offset);
1801 resp_data = nvm_resp->data;
1802 if (ret) {
1803 if ((offset != 0) &&
1804 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1806 * meaning of NOT_VALID_ADDRESS:
1807 * driver try to read chunk from address that is
1808 * multiple of 2K and got an error since addr is empty.
1809 * meaning of (offset != 0): driver already
1810 * read valid data from another chunk so this case
1811 * is not an error.
1813 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1814 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1815 offset);
1816 *len = 0;
1817 ret = 0;
1818 } else {
1819 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1820 "NVM access command failed with status %d\n", ret);
1821 ret = EIO;
1823 goto exit;
1826 if (offset_read != offset) {
1827 device_printf(sc->sc_dev,
1828 "NVM ACCESS response with invalid offset %d\n",
1829 offset_read);
1830 ret = EINVAL;
1831 goto exit;
1834 if (bytes_read > length) {
1835 device_printf(sc->sc_dev,
1836 "NVM ACCESS response with too much data "
1837 "(%d bytes requested, %d bytes received)\n",
1838 length, bytes_read);
1839 ret = EINVAL;
1840 goto exit;
1843 /* Write data to NVM */
1844 memcpy(data + offset, resp_data, bytes_read);
1845 *len = bytes_read;
1847 exit:
1848 iwm_free_resp(sc, &cmd);
1849 return ret;
1853 * Reads an NVM section completely.
1854 * NICs prior to 7000 family don't have a real NVM, but just read
1855 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1856 * by uCode, we need to manually check in this case that we don't
1857 * overflow and try to read more than the EEPROM size.
1858 * For 7000 family NICs, we supply the maximal size we can read, and
1859 * the uCode fills the response with as much data as we can,
1860 * without overflowing, so no check is needed.
1862 static int
1863 iwm_nvm_read_section(struct iwm_softc *sc,
1864 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1866 uint16_t seglen, length, offset = 0;
1867 int ret;
1869 /* Set nvm section read length */
1870 length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1872 seglen = length;
1874 /* Read the NVM until exhausted (reading less than requested) */
1875 while (seglen == length) {
1876 /* Check no memory assumptions fail and cause an overflow */
1877 if ((size_read + offset + length) >
1878 sc->cfg->eeprom_size) {
1879 device_printf(sc->sc_dev,
1880 "EEPROM size is too small for NVM\n");
1881 return ENOBUFS;
1884 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1885 if (ret) {
1886 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1887 "Cannot read NVM from section %d offset %d, length %d\n",
1888 section, offset, length);
1889 return ret;
1891 offset += seglen;
1894 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1895 "NVM section %d read completed\n", section);
1896 *len = offset;
1897 return 0;
1900 /* NVM offsets (in words) definitions */
1901 enum iwm_nvm_offsets {
1902 /* NVM HW-Section offset (in words) definitions */
1903 IWM_HW_ADDR = 0x15,
1905 /* NVM SW-Section offset (in words) definitions */
1906 IWM_NVM_SW_SECTION = 0x1C0,
1907 IWM_NVM_VERSION = 0,
1908 IWM_RADIO_CFG = 1,
1909 IWM_SKU = 2,
1910 IWM_N_HW_ADDRS = 3,
1911 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1913 /* NVM calibration section offset (in words) definitions */
1914 IWM_NVM_CALIB_SECTION = 0x2B8,
1915 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1918 enum iwm_8000_nvm_offsets {
1919 /* NVM HW-Section offset (in words) definitions */
1920 IWM_HW_ADDR0_WFPM_8000 = 0x12,
1921 IWM_HW_ADDR1_WFPM_8000 = 0x16,
1922 IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1923 IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1924 IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1926 /* NVM SW-Section offset (in words) definitions */
1927 IWM_NVM_SW_SECTION_8000 = 0x1C0,
1928 IWM_NVM_VERSION_8000 = 0,
1929 IWM_RADIO_CFG_8000 = 0,
1930 IWM_SKU_8000 = 2,
1931 IWM_N_HW_ADDRS_8000 = 3,
1933 /* NVM REGULATORY -Section offset (in words) definitions */
1934 IWM_NVM_CHANNELS_8000 = 0,
1935 IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1936 IWM_NVM_LAR_OFFSET_8000 = 0x507,
1937 IWM_NVM_LAR_ENABLED_8000 = 0x7,
1939 /* NVM calibration section offset (in words) definitions */
1940 IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1941 IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1944 /* SKU Capabilities (actual values from NVM definition) */
1945 enum nvm_sku_bits {
1946 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
1947 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
1948 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
1949 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
1952 /* radio config bits (actual values from NVM definition) */
1953 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
1954 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
1955 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
1956 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
1957 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
1958 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1960 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x) (x & 0xF)
1961 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x) ((x >> 4) & 0xF)
1962 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x) ((x >> 8) & 0xF)
1963 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x) ((x >> 12) & 0xFFF)
1964 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x) ((x >> 24) & 0xF)
1965 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x) ((x >> 28) & 0xF)
1967 #define DEFAULT_MAX_TX_POWER 16
1970 * enum iwm_nvm_channel_flags - channel flags in NVM
1971 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1972 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1973 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1974 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1975 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1976 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1977 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1978 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1979 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1980 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1982 enum iwm_nvm_channel_flags {
1983 IWM_NVM_CHANNEL_VALID = (1 << 0),
1984 IWM_NVM_CHANNEL_IBSS = (1 << 1),
1985 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1986 IWM_NVM_CHANNEL_RADAR = (1 << 4),
1987 IWM_NVM_CHANNEL_DFS = (1 << 7),
1988 IWM_NVM_CHANNEL_WIDE = (1 << 8),
1989 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1990 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1991 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1995 * Translate EEPROM flags to net80211.
1997 static uint32_t
1998 iwm_eeprom_channel_flags(uint16_t ch_flags)
2000 uint32_t nflags;
2002 nflags = 0;
2003 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2004 nflags |= IEEE80211_CHAN_PASSIVE;
2005 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2006 nflags |= IEEE80211_CHAN_NOADHOC;
2007 if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2008 nflags |= IEEE80211_CHAN_DFS;
2009 /* Just in case. */
2010 nflags |= IEEE80211_CHAN_NOADHOC;
2013 return (nflags);
2016 static void
2017 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2018 int maxchans, int *nchans, int ch_idx, size_t ch_num,
2019 const uint8_t bands[])
2021 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2022 uint32_t nflags;
2023 uint16_t ch_flags;
2024 uint8_t ieee;
2025 int error;
2027 for (; ch_idx < ch_num; ch_idx++) {
2028 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2029 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2030 ieee = iwm_nvm_channels[ch_idx];
2031 else
2032 ieee = iwm_nvm_channels_8000[ch_idx];
2034 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2035 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2036 "Ch. %d Flags %x [%sGHz] - No traffic\n",
2037 ieee, ch_flags,
2038 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2039 "5.2" : "2.4");
2040 continue;
2043 nflags = iwm_eeprom_channel_flags(ch_flags);
2044 error = ieee80211_add_channel(chans, maxchans, nchans,
2045 ieee, 0, 0, nflags, bands);
2046 if (error != 0)
2047 break;
2049 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2050 "Ch. %d Flags %x [%sGHz] - Added\n",
2051 ieee, ch_flags,
2052 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2053 "5.2" : "2.4");
2057 static void
2058 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2059 struct ieee80211_channel chans[])
2061 struct iwm_softc *sc = ic->ic_softc;
2062 struct iwm_nvm_data *data = sc->nvm_data;
2063 uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2064 size_t ch_num;
2066 memset(bands, 0, sizeof(bands));
2067 /* 1-13: 11b/g channels. */
2068 setbit(bands, IEEE80211_MODE_11B);
2069 setbit(bands, IEEE80211_MODE_11G);
2070 iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2071 IWM_NUM_2GHZ_CHANNELS - 1, bands);
2073 /* 14: 11b channel only. */
2074 clrbit(bands, IEEE80211_MODE_11G);
2075 iwm_add_channel_band(sc, chans, maxchans, nchans,
2076 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2078 if (data->sku_cap_band_52GHz_enable) {
2079 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2080 ch_num = nitems(iwm_nvm_channels);
2081 else
2082 ch_num = nitems(iwm_nvm_channels_8000);
2083 memset(bands, 0, sizeof(bands));
2084 setbit(bands, IEEE80211_MODE_11A);
2085 iwm_add_channel_band(sc, chans, maxchans, nchans,
2086 IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2090 static void
2091 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2092 const uint16_t *mac_override, const uint16_t *nvm_hw)
2094 const uint8_t *hw_addr;
2096 if (mac_override) {
2097 static const uint8_t reserved_mac[] = {
2098 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2101 hw_addr = (const uint8_t *)(mac_override +
2102 IWM_MAC_ADDRESS_OVERRIDE_8000);
2105 * Store the MAC address from MAO section.
2106 * No byte swapping is required in MAO section
2108 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2111 * Force the use of the OTP MAC address in case of reserved MAC
2112 * address in the NVM, or if address is given but invalid.
2114 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2115 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2116 iwm_is_valid_ether_addr(data->hw_addr) &&
2117 !IEEE80211_IS_MULTICAST(data->hw_addr))
2118 return;
2120 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2121 "%s: mac address from nvm override section invalid\n",
2122 __func__);
2125 if (nvm_hw) {
2126 /* read the mac address from WFMP registers */
2127 uint32_t mac_addr0 =
2128 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2129 uint32_t mac_addr1 =
2130 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2132 hw_addr = (const uint8_t *)&mac_addr0;
2133 data->hw_addr[0] = hw_addr[3];
2134 data->hw_addr[1] = hw_addr[2];
2135 data->hw_addr[2] = hw_addr[1];
2136 data->hw_addr[3] = hw_addr[0];
2138 hw_addr = (const uint8_t *)&mac_addr1;
2139 data->hw_addr[4] = hw_addr[1];
2140 data->hw_addr[5] = hw_addr[0];
2142 return;
2145 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2146 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2149 static int
2150 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2151 const uint16_t *phy_sku)
2153 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2154 return le16_to_cpup(nvm_sw + IWM_SKU);
2156 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2159 static int
2160 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2162 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2163 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2164 else
2165 return le32_to_cpup((const uint32_t *)(nvm_sw +
2166 IWM_NVM_VERSION_8000));
2169 static int
2170 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2171 const uint16_t *phy_sku)
2173 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2174 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2176 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2179 static int
2180 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2182 int n_hw_addr;
2184 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2185 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2187 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2189 return n_hw_addr & IWM_N_HW_ADDR_MASK;
2192 static void
2193 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2194 uint32_t radio_cfg)
2196 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2197 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2198 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2199 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2200 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2201 return;
2204 /* set the radio configuration for family 8000 */
2205 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2206 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2207 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2208 data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2209 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2210 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2213 static int
2214 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2215 const uint16_t *nvm_hw, const uint16_t *mac_override)
2217 #ifdef notyet /* for FAMILY 9000 */
2218 if (cfg->mac_addr_from_csr) {
2219 iwm_set_hw_address_from_csr(sc, data);
2220 } else
2221 #endif
2222 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2223 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2225 /* The byte order is little endian 16 bit, meaning 214365 */
2226 data->hw_addr[0] = hw_addr[1];
2227 data->hw_addr[1] = hw_addr[0];
2228 data->hw_addr[2] = hw_addr[3];
2229 data->hw_addr[3] = hw_addr[2];
2230 data->hw_addr[4] = hw_addr[5];
2231 data->hw_addr[5] = hw_addr[4];
2232 } else {
2233 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2236 if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2237 device_printf(sc->sc_dev, "no valid mac address was found\n");
2238 return EINVAL;
2241 return 0;
2244 static struct iwm_nvm_data *
2245 iwm_parse_nvm_data(struct iwm_softc *sc,
2246 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2247 const uint16_t *nvm_calib, const uint16_t *mac_override,
2248 const uint16_t *phy_sku, const uint16_t *regulatory)
2250 struct iwm_nvm_data *data;
2251 uint32_t sku, radio_cfg;
2253 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2254 data = kmalloc(sizeof(*data) +
2255 IWM_NUM_CHANNELS * sizeof(uint16_t),
2256 M_DEVBUF, M_WAITOK | M_ZERO);
2257 } else {
2258 data = kmalloc(sizeof(*data) +
2259 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2260 M_DEVBUF, M_WAITOK | M_ZERO);
2262 if (!data)
2263 return NULL;
2265 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2267 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2268 iwm_set_radio_cfg(sc, data, radio_cfg);
2270 sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2271 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2272 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2273 data->sku_cap_11n_enable = 0;
2275 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2277 /* If no valid mac address was found - bail out */
2278 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2279 kfree(data, M_DEVBUF);
2280 return NULL;
2283 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2284 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2285 IWM_NUM_CHANNELS * sizeof(uint16_t));
2286 } else {
2287 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2288 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2291 return data;
2294 static void
2295 iwm_free_nvm_data(struct iwm_nvm_data *data)
2297 if (data != NULL)
2298 kfree(data, M_DEVBUF);
2301 static struct iwm_nvm_data *
2302 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2304 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2306 /* Checking for required sections */
2307 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2308 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2309 !sections[sc->cfg->nvm_hw_section_num].data) {
2310 device_printf(sc->sc_dev,
2311 "Can't parse empty OTP/NVM sections\n");
2312 return NULL;
2314 } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2315 /* SW and REGULATORY sections are mandatory */
2316 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2317 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2318 device_printf(sc->sc_dev,
2319 "Can't parse empty OTP/NVM sections\n");
2320 return NULL;
2322 /* MAC_OVERRIDE or at least HW section must exist */
2323 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2324 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2325 device_printf(sc->sc_dev,
2326 "Can't parse mac_address, empty sections\n");
2327 return NULL;
2330 /* PHY_SKU section is mandatory in B0 */
2331 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2332 device_printf(sc->sc_dev,
2333 "Can't parse phy_sku in B0, empty sections\n");
2334 return NULL;
2336 } else {
2337 panic("unknown device family %d\n", sc->cfg->device_family);
2340 hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2341 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2342 calib = (const uint16_t *)
2343 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2344 regulatory = (const uint16_t *)
2345 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2346 mac_override = (const uint16_t *)
2347 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2348 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2350 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2351 phy_sku, regulatory);
2354 static int
2355 iwm_nvm_init(struct iwm_softc *sc)
2357 struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2358 int i, ret, section;
2359 uint32_t size_read = 0;
2360 uint8_t *nvm_buffer, *temp;
2361 uint16_t len;
2363 memset(nvm_sections, 0, sizeof(nvm_sections));
2365 if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2366 return EINVAL;
2368 /* load NVM values from nic */
2369 /* Read From FW NVM */
2370 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2372 nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF,
2373 M_INTWAIT | M_ZERO);
2374 if (!nvm_buffer)
2375 return ENOMEM;
2376 for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2377 /* we override the constness for initial read */
2378 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2379 &len, size_read);
2380 if (ret)
2381 continue;
2382 size_read += len;
2383 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2384 if (!temp) {
2385 ret = ENOMEM;
2386 break;
2388 memcpy(temp, nvm_buffer, len);
2390 nvm_sections[section].data = temp;
2391 nvm_sections[section].length = len;
2393 if (!size_read)
2394 device_printf(sc->sc_dev, "OTP is blank\n");
2395 kfree(nvm_buffer, M_DEVBUF);
2397 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2398 if (!sc->nvm_data)
2399 return EINVAL;
2400 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2401 "nvm version = %x\n", sc->nvm_data->nvm_version);
2403 for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2404 if (nvm_sections[i].data != NULL)
2405 kfree(nvm_sections[i].data, M_DEVBUF);
2408 return 0;
2411 static int
2412 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2413 const struct iwm_fw_desc *section)
2415 struct iwm_dma_info *dma = &sc->fw_dma;
2416 uint8_t *v_addr;
2417 bus_addr_t p_addr;
2418 uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2419 int ret = 0;
2421 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2422 "%s: [%d] uCode section being loaded...\n",
2423 __func__, section_num);
2425 v_addr = dma->vaddr;
2426 p_addr = dma->paddr;
2428 for (offset = 0; offset < section->len; offset += chunk_sz) {
2429 uint32_t copy_size, dst_addr;
2430 int extended_addr = FALSE;
2432 copy_size = MIN(chunk_sz, section->len - offset);
2433 dst_addr = section->offset + offset;
2435 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2436 dst_addr <= IWM_FW_MEM_EXTENDED_END)
2437 extended_addr = TRUE;
2439 if (extended_addr)
2440 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2441 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2443 memcpy(v_addr, (const uint8_t *)section->data + offset,
2444 copy_size);
2445 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2446 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2447 copy_size);
2449 if (extended_addr)
2450 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2451 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2453 if (ret) {
2454 device_printf(sc->sc_dev,
2455 "%s: Could not load the [%d] uCode section\n",
2456 __func__, section_num);
2457 break;
2461 return ret;
2465 * ucode
2467 static int
2468 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2469 bus_addr_t phy_addr, uint32_t byte_cnt)
2471 int ret;
2473 sc->sc_fw_chunk_done = 0;
2475 if (!iwm_nic_lock(sc))
2476 return EBUSY;
2478 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2479 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2481 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2482 dst_addr);
2484 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2485 phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2487 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2488 (iwm_get_dma_hi_addr(phy_addr)
2489 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2491 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2492 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2493 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2494 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2496 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2497 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2498 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2499 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2501 iwm_nic_unlock(sc);
2503 /* wait up to 5s for this segment to load */
2504 ret = 0;
2505 while (!sc->sc_fw_chunk_done) {
2506 #if defined(__DragonFly__)
2507 ret = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", 5 * hz);
2508 #else
2509 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", 5 * hz);
2510 #endif
2511 if (ret)
2512 break;
2515 if (ret != 0) {
2516 device_printf(sc->sc_dev,
2517 "fw chunk addr 0x%x len %d failed to load\n",
2518 dst_addr, byte_cnt);
2519 return ETIMEDOUT;
2522 return 0;
2525 static int
2526 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2527 const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2529 int shift_param;
2530 int i, ret = 0, sec_num = 0x1;
2531 uint32_t val, last_read_idx = 0;
2533 if (cpu == 1) {
2534 shift_param = 0;
2535 *first_ucode_section = 0;
2536 } else {
2537 shift_param = 16;
2538 (*first_ucode_section)++;
2541 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2542 last_read_idx = i;
2545 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2546 * CPU1 to CPU2.
2547 * PAGING_SEPARATOR_SECTION delimiter - separate between
2548 * CPU2 non paged to CPU2 paging sec.
2550 if (!image->fw_sect[i].data ||
2551 image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2552 image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2553 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2554 "Break since Data not valid or Empty section, sec = %d\n",
2556 break;
2558 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2559 if (ret)
2560 return ret;
2562 /* Notify the ucode of the loaded section number and status */
2563 if (iwm_nic_lock(sc)) {
2564 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2565 val = val | (sec_num << shift_param);
2566 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2567 sec_num = (sec_num << 1) | 0x1;
2568 iwm_nic_unlock(sc);
2572 *first_ucode_section = last_read_idx;
2574 iwm_enable_interrupts(sc);
2576 if (iwm_nic_lock(sc)) {
2577 if (cpu == 1)
2578 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2579 else
2580 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2581 iwm_nic_unlock(sc);
2584 return 0;
2587 static int
2588 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2589 const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2591 int shift_param;
2592 int i, ret = 0;
2593 uint32_t last_read_idx = 0;
2595 if (cpu == 1) {
2596 shift_param = 0;
2597 *first_ucode_section = 0;
2598 } else {
2599 shift_param = 16;
2600 (*first_ucode_section)++;
2603 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2604 last_read_idx = i;
2607 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2608 * CPU1 to CPU2.
2609 * PAGING_SEPARATOR_SECTION delimiter - separate between
2610 * CPU2 non paged to CPU2 paging sec.
2612 if (!image->fw_sect[i].data ||
2613 image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2614 image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2615 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2616 "Break since Data not valid or Empty section, sec = %d\n",
2618 break;
2621 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2622 if (ret)
2623 return ret;
2626 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2627 iwm_set_bits_prph(sc,
2628 IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2629 (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2630 IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2631 IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2632 shift_param);
2634 *first_ucode_section = last_read_idx;
2636 return 0;
2640 static int
2641 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2642 const struct iwm_fw_sects *image)
2644 int ret = 0;
2645 int first_ucode_section;
2647 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2648 image->is_dual_cpus ? "Dual" : "Single");
2650 /* load to FW the binary non secured sections of CPU1 */
2651 ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2652 if (ret)
2653 return ret;
2655 if (image->is_dual_cpus) {
2656 /* set CPU2 header address */
2657 if (iwm_nic_lock(sc)) {
2658 iwm_write_prph(sc,
2659 IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2660 IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2661 iwm_nic_unlock(sc);
2664 /* load to FW the binary sections of CPU2 */
2665 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2666 &first_ucode_section);
2667 if (ret)
2668 return ret;
2671 iwm_enable_interrupts(sc);
2673 /* release CPU reset */
2674 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2676 return 0;
2680 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2681 const struct iwm_fw_sects *image)
2683 int ret = 0;
2684 int first_ucode_section;
2686 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2687 image->is_dual_cpus ? "Dual" : "Single");
2689 /* configure the ucode to be ready to get the secured image */
2690 /* release CPU reset */
2691 if (iwm_nic_lock(sc)) {
2692 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2693 IWM_RELEASE_CPU_RESET_BIT);
2694 iwm_nic_unlock(sc);
2697 /* load to FW the binary Secured sections of CPU1 */
2698 ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2699 &first_ucode_section);
2700 if (ret)
2701 return ret;
2703 /* load to FW the binary sections of CPU2 */
2704 return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2705 &first_ucode_section);
2708 /* XXX Get rid of this definition */
2709 static inline void
2710 iwm_enable_fw_load_int(struct iwm_softc *sc)
2712 IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2713 sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2714 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2717 /* XXX Add proper rfkill support code */
2718 static int
2719 iwm_start_fw(struct iwm_softc *sc,
2720 const struct iwm_fw_sects *fw)
2722 int ret;
2724 /* This may fail if AMT took ownership of the device */
2725 if (iwm_prepare_card_hw(sc)) {
2726 device_printf(sc->sc_dev,
2727 "%s: Exit HW not ready\n", __func__);
2728 ret = EIO;
2729 goto out;
2732 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2734 iwm_disable_interrupts(sc);
2736 /* make sure rfkill handshake bits are cleared */
2737 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2738 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2739 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2741 /* clear (again), then enable host interrupts */
2742 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2744 ret = iwm_nic_init(sc);
2745 if (ret) {
2746 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2747 goto out;
2751 * Now, we load the firmware and don't want to be interrupted, even
2752 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2753 * FH_TX interrupt which is needed to load the firmware). If the
2754 * RF-Kill switch is toggled, we will find out after having loaded
2755 * the firmware and return the proper value to the caller.
2757 iwm_enable_fw_load_int(sc);
2759 /* really make sure rfkill handshake bits are cleared */
2760 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2761 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2763 /* Load the given image to the HW */
2764 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2765 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2766 else
2767 ret = iwm_pcie_load_given_ucode(sc, fw);
2769 /* XXX re-check RF-Kill state */
2771 out:
2772 return ret;
2775 static int
2776 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2778 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2779 .valid = htole32(valid_tx_ant),
2782 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2783 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2786 static int
2787 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2789 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2790 enum iwm_ucode_type ucode_type = sc->cur_ucode;
2792 /* Set parameters */
2793 phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2794 phy_cfg_cmd.calib_control.event_trigger =
2795 sc->sc_default_calib[ucode_type].event_trigger;
2796 phy_cfg_cmd.calib_control.flow_trigger =
2797 sc->sc_default_calib[ucode_type].flow_trigger;
2799 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2800 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2801 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2802 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2805 static int
2806 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2808 struct iwm_mvm_alive_data *alive_data = data;
2809 struct iwm_mvm_alive_resp_ver1 *palive1;
2810 struct iwm_mvm_alive_resp_ver2 *palive2;
2811 struct iwm_mvm_alive_resp *palive;
2813 if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2814 palive1 = (void *)pkt->data;
2816 sc->support_umac_log = FALSE;
2817 sc->error_event_table =
2818 le32toh(palive1->error_event_table_ptr);
2819 sc->log_event_table =
2820 le32toh(palive1->log_event_table_ptr);
2821 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2823 alive_data->valid = le16toh(palive1->status) ==
2824 IWM_ALIVE_STATUS_OK;
2825 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2826 "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2827 le16toh(palive1->status), palive1->ver_type,
2828 palive1->ver_subtype, palive1->flags);
2829 } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2830 palive2 = (void *)pkt->data;
2831 sc->error_event_table =
2832 le32toh(palive2->error_event_table_ptr);
2833 sc->log_event_table =
2834 le32toh(palive2->log_event_table_ptr);
2835 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2836 sc->umac_error_event_table =
2837 le32toh(palive2->error_info_addr);
2839 alive_data->valid = le16toh(palive2->status) ==
2840 IWM_ALIVE_STATUS_OK;
2841 if (sc->umac_error_event_table)
2842 sc->support_umac_log = TRUE;
2844 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2845 "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2846 le16toh(palive2->status), palive2->ver_type,
2847 palive2->ver_subtype, palive2->flags);
2849 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2850 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2851 palive2->umac_major, palive2->umac_minor);
2852 } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2853 palive = (void *)pkt->data;
2855 sc->error_event_table =
2856 le32toh(palive->error_event_table_ptr);
2857 sc->log_event_table =
2858 le32toh(palive->log_event_table_ptr);
2859 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2860 sc->umac_error_event_table =
2861 le32toh(palive->error_info_addr);
2863 alive_data->valid = le16toh(palive->status) ==
2864 IWM_ALIVE_STATUS_OK;
2865 if (sc->umac_error_event_table)
2866 sc->support_umac_log = TRUE;
2868 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2869 "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2870 le16toh(palive->status), palive->ver_type,
2871 palive->ver_subtype, palive->flags);
2873 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2874 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2875 le32toh(palive->umac_major),
2876 le32toh(palive->umac_minor));
2879 return TRUE;
2882 static int
2883 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2884 struct iwm_rx_packet *pkt, void *data)
2886 struct iwm_phy_db *phy_db = data;
2888 if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2889 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2890 device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2891 __func__, pkt->hdr.code);
2893 return TRUE;
2896 if (iwm_phy_db_set_section(phy_db, pkt)) {
2897 device_printf(sc->sc_dev,
2898 "%s: iwm_phy_db_set_section failed\n", __func__);
2901 return FALSE;
2904 static int
2905 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2906 enum iwm_ucode_type ucode_type)
2908 struct iwm_notification_wait alive_wait;
2909 struct iwm_mvm_alive_data alive_data;
2910 const struct iwm_fw_sects *fw;
2911 enum iwm_ucode_type old_type = sc->cur_ucode;
2912 int error;
2913 static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2915 if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2916 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2917 error);
2918 return error;
2920 fw = &sc->sc_fw.fw_sects[ucode_type];
2921 sc->cur_ucode = ucode_type;
2922 sc->ucode_loaded = FALSE;
2924 memset(&alive_data, 0, sizeof(alive_data));
2925 iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2926 alive_cmd, NELEM(alive_cmd),
2927 iwm_alive_fn, &alive_data);
2929 error = iwm_start_fw(sc, fw);
2930 if (error) {
2931 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2932 sc->cur_ucode = old_type;
2933 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2934 return error;
2938 * Some things may run in the background now, but we
2939 * just wait for the ALIVE notification here.
2941 IWM_UNLOCK(sc);
2942 error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2943 IWM_MVM_UCODE_ALIVE_TIMEOUT);
2944 IWM_LOCK(sc);
2945 if (error) {
2946 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2947 uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2948 if (iwm_nic_lock(sc)) {
2949 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2950 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2951 iwm_nic_unlock(sc);
2953 device_printf(sc->sc_dev,
2954 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2955 a, b);
2957 sc->cur_ucode = old_type;
2958 return error;
2961 if (!alive_data.valid) {
2962 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2963 __func__);
2964 sc->cur_ucode = old_type;
2965 return EIO;
2968 iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2971 * configure and operate fw paging mechanism.
2972 * driver configures the paging flow only once, CPU2 paging image
2973 * included in the IWM_UCODE_INIT image.
2975 if (fw->paging_mem_size) {
2976 error = iwm_save_fw_paging(sc, fw);
2977 if (error) {
2978 device_printf(sc->sc_dev,
2979 "%s: failed to save the FW paging image\n",
2980 __func__);
2981 return error;
2984 error = iwm_send_paging_cmd(sc, fw);
2985 if (error) {
2986 device_printf(sc->sc_dev,
2987 "%s: failed to send the paging cmd\n", __func__);
2988 iwm_free_fw_paging(sc);
2989 return error;
2993 if (!error)
2994 sc->ucode_loaded = TRUE;
2995 return error;
2999 * mvm misc bits
3002 static int
3003 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3005 struct iwm_notification_wait calib_wait;
3006 static const uint16_t init_complete[] = {
3007 IWM_INIT_COMPLETE_NOTIF,
3008 IWM_CALIB_RES_NOTIF_PHY_DB
3010 int ret;
3012 /* do not operate with rfkill switch turned on */
3013 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3014 device_printf(sc->sc_dev,
3015 "radio is disabled by hardware switch\n");
3016 return EPERM;
3019 iwm_init_notification_wait(sc->sc_notif_wait,
3020 &calib_wait,
3021 init_complete,
3022 NELEM(init_complete),
3023 iwm_wait_phy_db_entry,
3024 sc->sc_phy_db);
3026 /* Will also start the device */
3027 ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
3028 if (ret) {
3029 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
3030 ret);
3031 goto error;
3034 if (justnvm) {
3035 /* Read nvm */
3036 ret = iwm_nvm_init(sc);
3037 if (ret) {
3038 device_printf(sc->sc_dev, "failed to read nvm\n");
3039 goto error;
3041 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3042 goto error;
3045 ret = iwm_send_bt_init_conf(sc);
3046 if (ret) {
3047 device_printf(sc->sc_dev,
3048 "failed to send bt coex configuration: %d\n", ret);
3049 goto error;
3052 /* Init Smart FIFO. */
3053 ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3054 if (ret)
3055 goto error;
3057 /* Send TX valid antennas before triggering calibrations */
3058 ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3059 if (ret) {
3060 device_printf(sc->sc_dev,
3061 "failed to send antennas before calibration: %d\n", ret);
3062 goto error;
3066 * Send phy configurations command to init uCode
3067 * to start the 16.0 uCode init image internal calibrations.
3069 ret = iwm_send_phy_cfg_cmd(sc);
3070 if (ret) {
3071 device_printf(sc->sc_dev,
3072 "%s: Failed to run INIT calibrations: %d\n",
3073 __func__, ret);
3074 goto error;
3078 * Nothing to do but wait for the init complete notification
3079 * from the firmware.
3081 IWM_UNLOCK(sc);
3082 ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3083 IWM_MVM_UCODE_CALIB_TIMEOUT);
3084 IWM_LOCK(sc);
3087 goto out;
3089 error:
3090 iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3091 out:
3092 return ret;
3096 * receive side
3099 /* (re)stock rx ring, called at init-time and at runtime */
3100 static int
3101 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3103 struct iwm_rx_ring *ring = &sc->rxq;
3104 struct iwm_rx_data *data = &ring->data[idx];
3105 struct mbuf *m;
3106 bus_dmamap_t dmamap;
3107 bus_dma_segment_t seg;
3108 int nsegs, error;
3110 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3111 if (m == NULL)
3112 return ENOBUFS;
3114 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3115 #if defined(__DragonFly__)
3116 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3117 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3118 #else
3119 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3120 &seg, &nsegs, BUS_DMA_NOWAIT);
3121 #endif
3122 if (error != 0) {
3123 device_printf(sc->sc_dev,
3124 "%s: can't map mbuf, error %d\n", __func__, error);
3125 m_freem(m);
3126 return error;
3129 if (data->m != NULL)
3130 bus_dmamap_unload(ring->data_dmat, data->map);
3132 /* Swap ring->spare_map with data->map */
3133 dmamap = data->map;
3134 data->map = ring->spare_map;
3135 ring->spare_map = dmamap;
3137 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3138 data->m = m;
3140 /* Update RX descriptor. */
3141 KKASSERT((seg.ds_addr & 255) == 0);
3142 ring->desc[idx] = htole32(seg.ds_addr >> 8);
3143 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3144 BUS_DMASYNC_PREWRITE);
3146 return 0;
3150 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3151 * values are reported by the fw as positive values - need to negate
3152 * to obtain their dBM. Account for missing antennas by replacing 0
3153 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3155 static int
3156 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3158 int energy_a, energy_b, energy_c, max_energy;
3159 uint32_t val;
3161 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3162 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3163 IWM_RX_INFO_ENERGY_ANT_A_POS;
3164 energy_a = energy_a ? -energy_a : -256;
3165 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3166 IWM_RX_INFO_ENERGY_ANT_B_POS;
3167 energy_b = energy_b ? -energy_b : -256;
3168 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3169 IWM_RX_INFO_ENERGY_ANT_C_POS;
3170 energy_c = energy_c ? -energy_c : -256;
3171 max_energy = MAX(energy_a, energy_b);
3172 max_energy = MAX(max_energy, energy_c);
3174 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3175 "energy In A %d B %d C %d , and max %d\n",
3176 energy_a, energy_b, energy_c, max_energy);
3178 return max_energy;
3181 static void
3182 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3184 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3186 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3188 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3192 * Retrieve the average noise (in dBm) among receivers.
3194 static int
3195 iwm_get_noise(struct iwm_softc *sc,
3196 const struct iwm_mvm_statistics_rx_non_phy *stats)
3198 int i, total, nbant, noise;
3200 total = nbant = noise = 0;
3201 for (i = 0; i < 3; i++) {
3202 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3203 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3204 __func__, i, noise);
3206 if (noise) {
3207 total += noise;
3208 nbant++;
3212 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3213 __func__, nbant, total);
3214 #if 0
3215 /* There should be at least one antenna but check anyway. */
3216 return (nbant == 0) ? -127 : (total / nbant) - 107;
3217 #else
3218 /* For now, just hard-code it to -96 to be safe */
3219 return (-96);
3220 #endif
3224 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3226 * Handles the actual data of the Rx packet from the fw
3228 static void
3229 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m)
3231 struct ieee80211com *ic = &sc->sc_ic;
3232 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3233 struct ieee80211_frame *wh;
3234 struct ieee80211_node *ni;
3235 struct ieee80211_rx_stats rxs;
3236 struct iwm_rx_phy_info *phy_info;
3237 struct iwm_rx_mpdu_res_start *rx_res;
3238 struct iwm_rx_packet *pkt = mtod(m, struct iwm_rx_packet *);
3239 uint32_t len;
3240 uint32_t rx_pkt_status;
3241 int rssi;
3243 phy_info = &sc->sc_last_phy_info;
3244 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3245 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3246 len = le16toh(rx_res->byte_count);
3247 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3249 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3250 device_printf(sc->sc_dev,
3251 "dsp size out of range [0,20]: %d\n",
3252 phy_info->cfg_phy_cnt);
3253 return;
3256 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3257 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3258 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3259 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3260 return; /* drop */
3263 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3264 /* Note: RSSI is absolute (ie a -ve value) */
3265 if (rssi < IWM_MIN_DBM)
3266 rssi = IWM_MIN_DBM;
3267 else if (rssi > IWM_MAX_DBM)
3268 rssi = IWM_MAX_DBM;
3270 /* Map it to relative value */
3271 rssi = rssi - sc->sc_noise;
3273 /* replenish ring for the buffer we're going to feed to the sharks */
3274 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3275 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3276 __func__);
3277 return;
3280 m->m_data = pkt->data + sizeof(*rx_res);
3281 m->m_pkthdr.len = m->m_len = len;
3283 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3284 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3286 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3288 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3289 "%s: phy_info: channel=%d, flags=0x%08x\n",
3290 __func__,
3291 le16toh(phy_info->channel),
3292 le16toh(phy_info->phy_flags));
3295 * Populate an RX state struct with the provided information.
3297 bzero(&rxs, sizeof(rxs));
3298 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3299 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3300 rxs.c_ieee = le16toh(phy_info->channel);
3301 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3302 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3303 } else {
3304 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3306 /* rssi is in 1/2db units */
3307 rxs.rssi = rssi * 2;
3308 rxs.nf = sc->sc_noise;
3310 if (ieee80211_radiotap_active_vap(vap)) {
3311 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3313 tap->wr_flags = 0;
3314 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3315 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3316 tap->wr_chan_freq = htole16(rxs.c_freq);
3317 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3318 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3319 tap->wr_dbm_antsignal = (int8_t)rssi;
3320 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3321 tap->wr_tsft = phy_info->system_timestamp;
3322 switch (phy_info->rate) {
3323 /* CCK rates. */
3324 case 10: tap->wr_rate = 2; break;
3325 case 20: tap->wr_rate = 4; break;
3326 case 55: tap->wr_rate = 11; break;
3327 case 110: tap->wr_rate = 22; break;
3328 /* OFDM rates. */
3329 case 0xd: tap->wr_rate = 12; break;
3330 case 0xf: tap->wr_rate = 18; break;
3331 case 0x5: tap->wr_rate = 24; break;
3332 case 0x7: tap->wr_rate = 36; break;
3333 case 0x9: tap->wr_rate = 48; break;
3334 case 0xb: tap->wr_rate = 72; break;
3335 case 0x1: tap->wr_rate = 96; break;
3336 case 0x3: tap->wr_rate = 108; break;
3337 /* Unknown rate: should not happen. */
3338 default: tap->wr_rate = 0;
3342 IWM_UNLOCK(sc);
3343 if (ni != NULL) {
3344 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3345 ieee80211_input_mimo(ni, m, &rxs);
3346 ieee80211_free_node(ni);
3347 } else {
3348 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3349 ieee80211_input_mimo_all(ic, m, &rxs);
3351 IWM_LOCK(sc);
3354 static int
3355 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3356 struct iwm_node *in)
3358 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3359 struct ieee80211_node *ni = &in->in_ni;
3360 struct ieee80211vap *vap = ni->ni_vap;
3361 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3362 int failack = tx_resp->failure_frame;
3364 KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3366 /* Update rate control statistics. */
3367 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3368 __func__,
3369 (int) le16toh(tx_resp->status.status),
3370 (int) le16toh(tx_resp->status.sequence),
3371 tx_resp->frame_count,
3372 tx_resp->bt_kill_count,
3373 tx_resp->failure_rts,
3374 tx_resp->failure_frame,
3375 le32toh(tx_resp->initial_rate),
3376 (int) le16toh(tx_resp->wireless_media_time));
3378 if (status != IWM_TX_STATUS_SUCCESS &&
3379 status != IWM_TX_STATUS_DIRECT_DONE) {
3380 ieee80211_ratectl_tx_complete(vap, ni,
3381 IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3382 return (1);
3383 } else {
3384 ieee80211_ratectl_tx_complete(vap, ni,
3385 IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3386 return (0);
3390 static void
3391 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3393 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3394 int idx = cmd_hdr->idx;
3395 int qid = cmd_hdr->qid;
3396 struct iwm_tx_ring *ring = &sc->txq[qid];
3397 struct iwm_tx_data *txd = &ring->data[idx];
3398 struct iwm_node *in = txd->in;
3399 struct mbuf *m = txd->m;
3400 int status;
3402 KASSERT(txd->done == 0, ("txd not done"));
3403 KASSERT(txd->in != NULL, ("txd without node"));
3404 KASSERT(txd->m != NULL, ("txd without mbuf"));
3406 sc->sc_tx_timer = 0;
3408 status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3410 /* Unmap and free mbuf. */
3411 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3412 bus_dmamap_unload(ring->data_dmat, txd->map);
3414 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3415 "free txd %p, in %p\n", txd, txd->in);
3416 txd->done = 1;
3417 txd->m = NULL;
3418 txd->in = NULL;
3420 ieee80211_tx_complete(&in->in_ni, m, status);
3422 if (--ring->queued < IWM_TX_RING_LOMARK) {
3423 sc->qfullmsk &= ~(1 << ring->qid);
3424 if (sc->qfullmsk == 0) {
3425 iwm_start(sc);
3431 * transmit side
3435 * Process a "command done" firmware notification. This is where we wakeup
3436 * processes waiting for a synchronous command completion.
3437 * from if_iwn
3439 static void
3440 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3442 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3443 struct iwm_tx_data *data;
3445 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3446 return; /* Not a command ack. */
3449 data = &ring->data[pkt->hdr.idx];
3451 /* If the command was mapped in an mbuf, free it. */
3452 if (data->m != NULL) {
3453 bus_dmamap_sync(ring->data_dmat, data->map,
3454 BUS_DMASYNC_POSTWRITE);
3455 bus_dmamap_unload(ring->data_dmat, data->map);
3456 m_freem(data->m);
3457 data->m = NULL;
3459 wakeup(&ring->desc[pkt->hdr.idx]);
3461 if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3462 device_printf(sc->sc_dev,
3463 "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3464 __func__, pkt->hdr.idx, ring->queued, ring->cur);
3465 /* XXX call iwm_force_nmi() */
3468 KKASSERT(ring->queued > 0);
3469 ring->queued--;
3470 if (ring->queued == 0)
3471 iwm_pcie_clear_cmd_in_flight(sc);
3474 #if 0
3476 * necessary only for block ack mode
3478 void
3479 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3480 uint16_t len)
3482 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3483 uint16_t w_val;
3485 scd_bc_tbl = sc->sched_dma.vaddr;
3487 len += 8; /* magic numbers came naturally from paris */
3488 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3489 len = roundup(len, 4) / 4;
3491 w_val = htole16(sta_id << 12 | len);
3493 /* Update TX scheduler. */
3494 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3495 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3496 BUS_DMASYNC_PREWRITE);
3498 /* I really wonder what this is ?!? */
3499 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3500 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3501 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3502 BUS_DMASYNC_PREWRITE);
3505 #endif
3508 * Take an 802.11 (non-n) rate, find the relevant rate
3509 * table entry. return the index into in_ridx[].
3511 * The caller then uses that index back into in_ridx
3512 * to figure out the rate index programmed /into/
3513 * the firmware for this given node.
3515 static int
3516 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3517 uint8_t rate)
3519 int i;
3520 uint8_t r;
3522 for (i = 0; i < nitems(in->in_ridx); i++) {
3523 r = iwm_rates[in->in_ridx[i]].rate;
3524 if (rate == r)
3525 return (i);
3527 /* XXX Return the first */
3528 /* XXX TODO: have it return the /lowest/ */
3529 return (0);
3533 * Fill in the rate related information for a transmit command.
3535 static const struct iwm_rate *
3536 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3537 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3539 struct ieee80211com *ic = &sc->sc_ic;
3540 struct ieee80211_node *ni = &in->in_ni;
3541 const struct iwm_rate *rinfo;
3542 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3543 int ridx, rate_flags;
3545 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3546 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3549 * XXX TODO: everything about the rate selection here is terrible!
3552 if (type == IEEE80211_FC0_TYPE_DATA) {
3553 int i;
3554 /* for data frames, use RS table */
3555 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3556 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3557 ridx = in->in_ridx[i];
3559 /* This is the index into the programmed table */
3560 tx->initial_rate_index = i;
3561 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3562 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3563 "%s: start with i=%d, txrate %d\n",
3564 __func__, i, iwm_rates[ridx].rate);
3565 } else {
3567 * For non-data, use the lowest supported rate for the given
3568 * operational mode.
3570 * Note: there may not be any rate control information available.
3571 * This driver currently assumes if we're transmitting data
3572 * frames, use the rate control table. Grr.
3574 * XXX TODO: use the configured rate for the traffic type!
3575 * XXX TODO: this should be per-vap, not curmode; as we later
3576 * on we'll want to handle off-channel stuff (eg TDLS).
3578 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3580 * XXX this assumes the mode is either 11a or not 11a;
3581 * definitely won't work for 11n.
3583 ridx = IWM_RIDX_OFDM;
3584 } else {
3585 ridx = IWM_RIDX_CCK;
3589 rinfo = &iwm_rates[ridx];
3591 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3592 __func__, ridx,
3593 rinfo->rate,
3594 !! (IWM_RIDX_IS_CCK(ridx))
3597 /* XXX TODO: hard-coded TX antenna? */
3598 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3599 if (IWM_RIDX_IS_CCK(ridx))
3600 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3601 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3603 return rinfo;
3606 #define TB0_SIZE 16
3607 static int
3608 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3610 struct ieee80211com *ic = &sc->sc_ic;
3611 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3612 struct iwm_node *in = IWM_NODE(ni);
3613 struct iwm_tx_ring *ring;
3614 struct iwm_tx_data *data;
3615 struct iwm_tfd *desc;
3616 struct iwm_device_cmd *cmd;
3617 struct iwm_tx_cmd *tx;
3618 struct ieee80211_frame *wh;
3619 struct ieee80211_key *k = NULL;
3620 #if !defined(__DragonFly__)
3621 struct mbuf *m1;
3622 #endif
3623 const struct iwm_rate *rinfo;
3624 uint32_t flags;
3625 u_int hdrlen;
3626 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3627 int nsegs;
3628 uint8_t tid, type;
3629 int i, totlen, error, pad;
3631 wh = mtod(m, struct ieee80211_frame *);
3632 hdrlen = ieee80211_anyhdrsize(wh);
3633 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3634 tid = 0;
3635 ring = &sc->txq[ac];
3636 desc = &ring->desc[ring->cur];
3637 memset(desc, 0, sizeof(*desc));
3638 data = &ring->data[ring->cur];
3640 /* Fill out iwm_tx_cmd to send to the firmware */
3641 cmd = &ring->cmd[ring->cur];
3642 cmd->hdr.code = IWM_TX_CMD;
3643 cmd->hdr.flags = 0;
3644 cmd->hdr.qid = ring->qid;
3645 cmd->hdr.idx = ring->cur;
3647 tx = (void *)cmd->data;
3648 memset(tx, 0, sizeof(*tx));
3650 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3652 /* Encrypt the frame if need be. */
3653 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3654 /* Retrieve key for TX && do software encryption. */
3655 k = ieee80211_crypto_encap(ni, m);
3656 if (k == NULL) {
3657 m_freem(m);
3658 return (ENOBUFS);
3660 /* 802.11 header may have moved. */
3661 wh = mtod(m, struct ieee80211_frame *);
3664 if (ieee80211_radiotap_active_vap(vap)) {
3665 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3667 tap->wt_flags = 0;
3668 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3669 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3670 tap->wt_rate = rinfo->rate;
3671 if (k != NULL)
3672 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3673 ieee80211_radiotap_tx(vap, m);
3677 totlen = m->m_pkthdr.len;
3679 flags = 0;
3680 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3681 flags |= IWM_TX_CMD_FLG_ACK;
3684 if (type == IEEE80211_FC0_TYPE_DATA
3685 && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3686 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3687 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3690 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3691 type != IEEE80211_FC0_TYPE_DATA)
3692 tx->sta_id = sc->sc_aux_sta.sta_id;
3693 else
3694 tx->sta_id = IWM_STATION_ID;
3696 if (type == IEEE80211_FC0_TYPE_MGT) {
3697 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3699 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3700 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3701 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3702 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3703 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3704 } else {
3705 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3707 } else {
3708 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3711 if (hdrlen & 3) {
3712 /* First segment length must be a multiple of 4. */
3713 flags |= IWM_TX_CMD_FLG_MH_PAD;
3714 pad = 4 - (hdrlen & 3);
3715 } else
3716 pad = 0;
3718 tx->driver_txop = 0;
3719 tx->next_frame_len = 0;
3721 tx->len = htole16(totlen);
3722 tx->tid_tspec = tid;
3723 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3725 /* Set physical address of "scratch area". */
3726 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3727 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3729 /* Copy 802.11 header in TX command. */
3730 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3732 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3734 tx->sec_ctl = 0;
3735 tx->tx_flags |= htole32(flags);
3737 /* Trim 802.11 header. */
3738 m_adj(m, hdrlen);
3739 #if defined(__DragonFly__)
3740 error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3741 segs, IWM_MAX_SCATTER - 2,
3742 &nsegs, BUS_DMA_NOWAIT);
3743 #else
3744 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3745 segs, &nsegs, BUS_DMA_NOWAIT);
3746 #endif
3747 if (error != 0) {
3748 #if defined(__DragonFly__)
3749 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3750 error);
3751 m_freem(m);
3752 return error;
3753 #else
3754 if (error != EFBIG) {
3755 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3756 error);
3757 m_freem(m);
3758 return error;
3760 /* Too many DMA segments, linearize mbuf. */
3761 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3762 if (m1 == NULL) {
3763 device_printf(sc->sc_dev,
3764 "%s: could not defrag mbuf\n", __func__);
3765 m_freem(m);
3766 return (ENOBUFS);
3768 m = m1;
3770 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3771 segs, &nsegs, BUS_DMA_NOWAIT);
3772 if (error != 0) {
3773 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3774 error);
3775 m_freem(m);
3776 return error;
3778 #endif
3780 data->m = m;
3781 data->in = in;
3782 data->done = 0;
3784 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3785 "sending txd %p, in %p\n", data, data->in);
3786 KASSERT(data->in != NULL, ("node is NULL"));
3788 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3789 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3790 ring->qid, ring->cur, totlen, nsegs,
3791 le32toh(tx->tx_flags),
3792 le32toh(tx->rate_n_flags),
3793 tx->initial_rate_index
3796 /* Fill TX descriptor. */
3797 desc->num_tbs = 2 + nsegs;
3799 desc->tbs[0].lo = htole32(data->cmd_paddr);
3800 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3801 (TB0_SIZE << 4);
3802 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3803 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3804 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3805 + hdrlen + pad - TB0_SIZE) << 4);
3807 /* Other DMA segments are for data payload. */
3808 for (i = 0; i < nsegs; i++) {
3809 seg = &segs[i];
3810 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3811 desc->tbs[i+2].hi_n_len = \
3812 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3813 | ((seg->ds_len) << 4);
3816 bus_dmamap_sync(ring->data_dmat, data->map,
3817 BUS_DMASYNC_PREWRITE);
3818 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3819 BUS_DMASYNC_PREWRITE);
3820 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3821 BUS_DMASYNC_PREWRITE);
3823 #if 0
3824 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3825 #endif
3827 /* Kick TX ring. */
3828 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3829 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3831 /* Mark TX ring as full if we reach a certain threshold. */
3832 if (++ring->queued > IWM_TX_RING_HIMARK) {
3833 sc->qfullmsk |= 1 << ring->qid;
3836 return 0;
3839 static int
3840 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3841 const struct ieee80211_bpf_params *params)
3843 struct ieee80211com *ic = ni->ni_ic;
3844 struct iwm_softc *sc = ic->ic_softc;
3845 int error = 0;
3847 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3848 "->%s begin\n", __func__);
3850 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3851 m_freem(m);
3852 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3853 "<-%s not RUNNING\n", __func__);
3854 return (ENETDOWN);
3857 IWM_LOCK(sc);
3858 /* XXX fix this */
3859 if (params == NULL) {
3860 error = iwm_tx(sc, m, ni, 0);
3861 } else {
3862 error = iwm_tx(sc, m, ni, 0);
3864 sc->sc_tx_timer = 5;
3865 IWM_UNLOCK(sc);
3867 return (error);
3871 * mvm/tx.c
3875 * Note that there are transports that buffer frames before they reach
3876 * the firmware. This means that after flush_tx_path is called, the
3877 * queue might not be empty. The race-free way to handle this is to:
3878 * 1) set the station as draining
3879 * 2) flush the Tx path
3880 * 3) wait for the transport queues to be empty
3882 static int
3883 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3885 int ret;
3886 struct iwm_tx_path_flush_cmd flush_cmd = {
3887 .queues_ctl = htole32(tfd_msk),
3888 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3891 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3892 sizeof(flush_cmd), &flush_cmd);
3893 if (ret)
3894 device_printf(sc->sc_dev,
3895 "Flushing tx queue failed: %d\n", ret);
3896 return ret;
3899 static int
3900 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3901 struct iwm_mvm_add_sta_cmd *cmd, int *status)
3903 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3904 cmd, status);
3907 /* send station add/update command to firmware */
3908 static int
3909 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3911 struct iwm_mvm_add_sta_cmd add_sta_cmd;
3912 int ret;
3913 uint32_t status;
3915 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3917 add_sta_cmd.sta_id = IWM_STATION_ID;
3918 add_sta_cmd.mac_id_n_color
3919 = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3920 IWM_DEFAULT_COLOR));
3921 if (!update) {
3922 int ac;
3923 for (ac = 0; ac < WME_NUM_AC; ac++) {
3924 add_sta_cmd.tfd_queue_msk |=
3925 htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3927 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3929 add_sta_cmd.add_modify = update ? 1 : 0;
3930 add_sta_cmd.station_flags_msk
3931 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3932 add_sta_cmd.tid_disable_tx = htole16(0xffff);
3933 if (update)
3934 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3936 status = IWM_ADD_STA_SUCCESS;
3937 ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3938 if (ret)
3939 return ret;
3941 switch (status) {
3942 case IWM_ADD_STA_SUCCESS:
3943 break;
3944 default:
3945 ret = EIO;
3946 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3947 break;
3950 return ret;
3953 static int
3954 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3956 return iwm_mvm_sta_send_to_fw(sc, in, 0);
3959 static int
3960 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3962 return iwm_mvm_sta_send_to_fw(sc, in, 1);
3965 static int
3966 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3967 const uint8_t *addr, uint16_t mac_id, uint16_t color)
3969 struct iwm_mvm_add_sta_cmd cmd;
3970 int ret;
3971 uint32_t status;
3973 memset(&cmd, 0, sizeof(cmd));
3974 cmd.sta_id = sta->sta_id;
3975 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3977 cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3978 cmd.tid_disable_tx = htole16(0xffff);
3980 if (addr)
3981 IEEE80211_ADDR_COPY(cmd.addr, addr);
3983 ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3984 if (ret)
3985 return ret;
3987 switch (status) {
3988 case IWM_ADD_STA_SUCCESS:
3989 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3990 "%s: Internal station added.\n", __func__);
3991 return 0;
3992 default:
3993 device_printf(sc->sc_dev,
3994 "%s: Add internal station failed, status=0x%x\n",
3995 __func__, status);
3996 ret = EIO;
3997 break;
3999 return ret;
4002 static int
4003 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
4005 int ret;
4007 sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
4008 sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
4010 ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
4011 if (ret)
4012 return ret;
4014 ret = iwm_mvm_add_int_sta_common(sc,
4015 &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4017 if (ret)
4018 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4019 return ret;
4022 static int
4023 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
4025 struct iwm_time_quota_cmd cmd;
4026 int i, idx, ret, num_active_macs, quota, quota_rem;
4027 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4028 int n_ifs[IWM_MAX_BINDINGS] = {0, };
4029 uint16_t id;
4031 memset(&cmd, 0, sizeof(cmd));
4033 /* currently, PHY ID == binding ID */
4034 if (ivp) {
4035 id = ivp->phy_ctxt->id;
4036 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4037 colors[id] = ivp->phy_ctxt->color;
4039 if (1)
4040 n_ifs[id] = 1;
4044 * The FW's scheduling session consists of
4045 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
4046 * equally between all the bindings that require quota
4048 num_active_macs = 0;
4049 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4050 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4051 num_active_macs += n_ifs[i];
4054 quota = 0;
4055 quota_rem = 0;
4056 if (num_active_macs) {
4057 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
4058 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
4061 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4062 if (colors[i] < 0)
4063 continue;
4065 cmd.quotas[idx].id_and_color =
4066 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4068 if (n_ifs[i] <= 0) {
4069 cmd.quotas[idx].quota = htole32(0);
4070 cmd.quotas[idx].max_duration = htole32(0);
4071 } else {
4072 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4073 cmd.quotas[idx].max_duration = htole32(0);
4075 idx++;
4078 /* Give the remainder of the session to the first binding */
4079 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4081 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4082 sizeof(cmd), &cmd);
4083 if (ret)
4084 device_printf(sc->sc_dev,
4085 "%s: Failed to send quota: %d\n", __func__, ret);
4086 return ret;
4090 * ieee80211 routines
4094 * Change to AUTH state in 80211 state machine. Roughly matches what
4095 * Linux does in bss_info_changed().
4097 static int
4098 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4100 struct ieee80211_node *ni;
4101 struct iwm_node *in;
4102 struct iwm_vap *iv = IWM_VAP(vap);
4103 uint32_t duration;
4104 int error;
4107 * XXX i have a feeling that the vap node is being
4108 * freed from underneath us. Grr.
4110 ni = ieee80211_ref_node(vap->iv_bss);
4111 in = IWM_NODE(ni);
4112 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4113 "%s: called; vap=%p, bss ni=%p\n",
4114 __func__,
4115 vap,
4116 ni);
4118 in->in_assoc = 0;
4120 error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4121 if (error != 0)
4122 return error;
4124 error = iwm_allow_mcast(vap, sc);
4125 if (error) {
4126 device_printf(sc->sc_dev,
4127 "%s: failed to set multicast\n", __func__);
4128 goto out;
4132 * This is where it deviates from what Linux does.
4134 * Linux iwlwifi doesn't reset the nic each time, nor does it
4135 * call ctxt_add() here. Instead, it adds it during vap creation,
4136 * and always does a mac_ctx_changed().
4138 * The openbsd port doesn't attempt to do that - it reset things
4139 * at odd states and does the add here.
4141 * So, until the state handling is fixed (ie, we never reset
4142 * the NIC except for a firmware failure, which should drag
4143 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4144 * contexts that are required), let's do a dirty hack here.
4146 if (iv->is_uploaded) {
4147 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4148 device_printf(sc->sc_dev,
4149 "%s: failed to update MAC\n", __func__);
4150 goto out;
4152 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4153 in->in_ni.ni_chan, 1, 1)) != 0) {
4154 device_printf(sc->sc_dev,
4155 "%s: failed update phy ctxt\n", __func__);
4156 goto out;
4158 iv->phy_ctxt = &sc->sc_phyctxt[0];
4160 if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4161 device_printf(sc->sc_dev,
4162 "%s: binding update cmd\n", __func__);
4163 goto out;
4165 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4166 device_printf(sc->sc_dev,
4167 "%s: failed to update sta\n", __func__);
4168 goto out;
4170 } else {
4171 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4172 device_printf(sc->sc_dev,
4173 "%s: failed to add MAC\n", __func__);
4174 goto out;
4176 if ((error = iwm_mvm_power_update_mac(sc)) != 0) {
4177 device_printf(sc->sc_dev,
4178 "%s: failed to update power management\n",
4179 __func__);
4180 goto out;
4182 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4183 in->in_ni.ni_chan, 1, 1)) != 0) {
4184 device_printf(sc->sc_dev,
4185 "%s: failed add phy ctxt!\n", __func__);
4186 error = ETIMEDOUT;
4187 goto out;
4189 iv->phy_ctxt = &sc->sc_phyctxt[0];
4191 if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4192 device_printf(sc->sc_dev,
4193 "%s: binding add cmd\n", __func__);
4194 goto out;
4196 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4197 device_printf(sc->sc_dev,
4198 "%s: failed to add sta\n", __func__);
4199 goto out;
4204 * Prevent the FW from wandering off channel during association
4205 * by "protecting" the session with a time event.
4207 /* XXX duration is in units of TU, not MS */
4208 duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4209 iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4210 DELAY(100);
4212 error = 0;
4213 out:
4214 ieee80211_free_node(ni);
4215 return (error);
4218 static int
4219 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4221 struct iwm_node *in = IWM_NODE(vap->iv_bss);
4222 int error;
4224 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4225 device_printf(sc->sc_dev,
4226 "%s: failed to update STA\n", __func__);
4227 return error;
4230 in->in_assoc = 1;
4231 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4232 device_printf(sc->sc_dev,
4233 "%s: failed to update MAC\n", __func__);
4234 return error;
4237 return 0;
4240 static int
4241 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4243 uint32_t tfd_msk;
4246 * Ok, so *technically* the proper set of calls for going
4247 * from RUN back to SCAN is:
4249 * iwm_mvm_power_mac_disable(sc, in);
4250 * iwm_mvm_mac_ctxt_changed(sc, in);
4251 * iwm_mvm_rm_sta(sc, in);
4252 * iwm_mvm_update_quotas(sc, NULL);
4253 * iwm_mvm_mac_ctxt_changed(sc, in);
4254 * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4255 * iwm_mvm_mac_ctxt_remove(sc, in);
4257 * However, that freezes the device not matter which permutations
4258 * and modifications are attempted. Obviously, this driver is missing
4259 * something since it works in the Linux driver, but figuring out what
4260 * is missing is a little more complicated. Now, since we're going
4261 * back to nothing anyway, we'll just do a complete device reset.
4262 * Up your's, device!
4265 * Just using 0xf for the queues mask is fine as long as we only
4266 * get here from RUN state.
4268 tfd_msk = 0xf;
4269 mbufq_drain(&sc->sc_snd);
4270 iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4272 * We seem to get away with just synchronously sending the
4273 * IWM_TXPATH_FLUSH command.
4275 // iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4276 iwm_stop_device(sc);
4277 iwm_init_hw(sc);
4278 if (in)
4279 in->in_assoc = 0;
4280 return 0;
4282 #if 0
4283 int error;
4285 iwm_mvm_power_mac_disable(sc, in);
4287 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4288 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4289 return error;
4292 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4293 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4294 return error;
4296 error = iwm_mvm_rm_sta(sc, in);
4297 in->in_assoc = 0;
4298 iwm_mvm_update_quotas(sc, NULL);
4299 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4300 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4301 return error;
4303 iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4305 iwm_mvm_mac_ctxt_remove(sc, in);
4307 return error;
4308 #endif
4311 static struct ieee80211_node *
4312 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4314 return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4315 M_INTWAIT | M_ZERO);
4318 uint8_t
4319 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4321 int i;
4322 uint8_t rval;
4324 for (i = 0; i < rs->rs_nrates; i++) {
4325 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4326 if (rval == iwm_rates[ridx].rate)
4327 return rs->rs_rates[i];
4330 return 0;
4333 static void
4334 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4336 struct ieee80211_node *ni = &in->in_ni;
4337 struct iwm_lq_cmd *lq = &in->in_lq;
4338 int nrates = ni->ni_rates.rs_nrates;
4339 int i, ridx, tab = 0;
4340 int txant = 0;
4342 if (nrates > nitems(lq->rs_table)) {
4343 device_printf(sc->sc_dev,
4344 "%s: node supports %d rates, driver handles "
4345 "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4346 return;
4348 if (nrates == 0) {
4349 device_printf(sc->sc_dev,
4350 "%s: node supports 0 rates, odd!\n", __func__);
4351 return;
4355 * XXX .. and most of iwm_node is not initialised explicitly;
4356 * it's all just 0x0 passed to the firmware.
4359 /* first figure out which rates we should support */
4360 /* XXX TODO: this isn't 11n aware /at all/ */
4361 memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4362 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4363 "%s: nrates=%d\n", __func__, nrates);
4366 * Loop over nrates and populate in_ridx from the highest
4367 * rate to the lowest rate. Remember, in_ridx[] has
4368 * IEEE80211_RATE_MAXSIZE entries!
4370 for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4371 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4373 /* Map 802.11 rate to HW rate index. */
4374 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4375 if (iwm_rates[ridx].rate == rate)
4376 break;
4377 if (ridx > IWM_RIDX_MAX) {
4378 device_printf(sc->sc_dev,
4379 "%s: WARNING: device rate for %d not found!\n",
4380 __func__, rate);
4381 } else {
4382 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4383 "%s: rate: i: %d, rate=%d, ridx=%d\n",
4384 __func__,
4386 rate,
4387 ridx);
4388 in->in_ridx[i] = ridx;
4392 /* then construct a lq_cmd based on those */
4393 memset(lq, 0, sizeof(*lq));
4394 lq->sta_id = IWM_STATION_ID;
4396 /* For HT, always enable RTS/CTS to avoid excessive retries. */
4397 if (ni->ni_flags & IEEE80211_NODE_HT)
4398 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4401 * are these used? (we don't do SISO or MIMO)
4402 * need to set them to non-zero, though, or we get an error.
4404 lq->single_stream_ant_msk = 1;
4405 lq->dual_stream_ant_msk = 1;
4408 * Build the actual rate selection table.
4409 * The lowest bits are the rates. Additionally,
4410 * CCK needs bit 9 to be set. The rest of the bits
4411 * we add to the table select the tx antenna
4412 * Note that we add the rates in the highest rate first
4413 * (opposite of ni_rates).
4416 * XXX TODO: this should be looping over the min of nrates
4417 * and LQ_MAX_RETRY_NUM. Sigh.
4419 for (i = 0; i < nrates; i++) {
4420 int nextant;
4422 if (txant == 0)
4423 txant = iwm_mvm_get_valid_tx_ant(sc);
4424 nextant = 1<<(ffs(txant)-1);
4425 txant &= ~nextant;
4428 * Map the rate id into a rate index into
4429 * our hardware table containing the
4430 * configuration to use for this rate.
4432 ridx = in->in_ridx[i];
4433 tab = iwm_rates[ridx].plcp;
4434 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4435 if (IWM_RIDX_IS_CCK(ridx))
4436 tab |= IWM_RATE_MCS_CCK_MSK;
4437 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4438 "station rate i=%d, rate=%d, hw=%x\n",
4439 i, iwm_rates[ridx].rate, tab);
4440 lq->rs_table[i] = htole32(tab);
4442 /* then fill the rest with the lowest possible rate */
4443 for (i = nrates; i < nitems(lq->rs_table); i++) {
4444 KASSERT(tab != 0, ("invalid tab"));
4445 lq->rs_table[i] = htole32(tab);
4449 static int
4450 iwm_media_change(struct ifnet *ifp)
4452 struct ieee80211vap *vap = ifp->if_softc;
4453 struct ieee80211com *ic = vap->iv_ic;
4454 struct iwm_softc *sc = ic->ic_softc;
4455 int error;
4457 error = ieee80211_media_change(ifp);
4458 if (error != ENETRESET)
4459 return error;
4461 IWM_LOCK(sc);
4462 if (ic->ic_nrunning > 0) {
4463 iwm_stop(sc);
4464 iwm_init(sc);
4466 IWM_UNLOCK(sc);
4467 return error;
4471 static int
4472 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4474 struct iwm_vap *ivp = IWM_VAP(vap);
4475 struct ieee80211com *ic = vap->iv_ic;
4476 struct iwm_softc *sc = ic->ic_softc;
4477 struct iwm_node *in;
4478 int error;
4480 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4481 "switching state %s -> %s\n",
4482 ieee80211_state_name[vap->iv_state],
4483 ieee80211_state_name[nstate]);
4484 IEEE80211_UNLOCK(ic);
4485 IWM_LOCK(sc);
4487 if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4488 iwm_led_blink_stop(sc);
4490 /* disable beacon filtering if we're hopping out of RUN */
4491 if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4492 iwm_mvm_disable_beacon_filter(sc);
4494 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4495 in->in_assoc = 0;
4497 if (nstate == IEEE80211_S_INIT) {
4498 IWM_UNLOCK(sc);
4499 IEEE80211_LOCK(ic);
4500 error = ivp->iv_newstate(vap, nstate, arg);
4501 IEEE80211_UNLOCK(ic);
4502 IWM_LOCK(sc);
4503 iwm_release(sc, NULL);
4504 IWM_UNLOCK(sc);
4505 IEEE80211_LOCK(ic);
4506 return error;
4510 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4511 * above then the card will be completely reinitialized,
4512 * so the driver must do everything necessary to bring the card
4513 * from INIT to SCAN.
4515 * Additionally, upon receiving deauth frame from AP,
4516 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4517 * state. This will also fail with this driver, so bring the FSM
4518 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4520 * XXX TODO: fix this for FreeBSD!
4522 if (nstate == IEEE80211_S_SCAN ||
4523 nstate == IEEE80211_S_AUTH ||
4524 nstate == IEEE80211_S_ASSOC) {
4525 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4526 "Force transition to INIT; MGT=%d\n", arg);
4527 IWM_UNLOCK(sc);
4528 IEEE80211_LOCK(ic);
4529 /* Always pass arg as -1 since we can't Tx right now. */
4531 * XXX arg is just ignored anyway when transitioning
4532 * to IEEE80211_S_INIT.
4534 vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4535 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4536 "Going INIT->SCAN\n");
4537 nstate = IEEE80211_S_SCAN;
4538 IEEE80211_UNLOCK(ic);
4539 IWM_LOCK(sc);
4543 switch (nstate) {
4544 case IEEE80211_S_INIT:
4545 break;
4547 case IEEE80211_S_AUTH:
4548 if ((error = iwm_auth(vap, sc)) != 0) {
4549 device_printf(sc->sc_dev,
4550 "%s: could not move to auth state: %d\n",
4551 __func__, error);
4552 break;
4554 break;
4556 case IEEE80211_S_ASSOC:
4558 * EBS may be disabled due to previous failures reported by FW.
4559 * Reset EBS status here assuming environment has been changed.
4561 sc->last_ebs_successful = TRUE;
4562 if ((error = iwm_assoc(vap, sc)) != 0) {
4563 device_printf(sc->sc_dev,
4564 "%s: failed to associate: %d\n", __func__,
4565 error);
4566 break;
4568 break;
4570 case IEEE80211_S_RUN:
4572 struct iwm_host_cmd cmd = {
4573 .id = IWM_LQ_CMD,
4574 .len = { sizeof(in->in_lq), },
4575 .flags = IWM_CMD_SYNC,
4578 /* Update the association state, now we have it all */
4579 /* (eg associd comes in at this point */
4580 error = iwm_assoc(vap, sc);
4581 if (error != 0) {
4582 device_printf(sc->sc_dev,
4583 "%s: failed to update association state: %d\n",
4584 __func__,
4585 error);
4586 break;
4589 in = IWM_NODE(vap->iv_bss);
4590 iwm_mvm_enable_beacon_filter(sc, in);
4591 iwm_mvm_power_update_mac(sc);
4592 iwm_mvm_update_quotas(sc, ivp);
4593 iwm_setrates(sc, in);
4595 cmd.data[0] = &in->in_lq;
4596 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4597 device_printf(sc->sc_dev,
4598 "%s: IWM_LQ_CMD failed\n", __func__);
4601 iwm_mvm_led_enable(sc);
4602 break;
4605 default:
4606 break;
4608 IWM_UNLOCK(sc);
4609 IEEE80211_LOCK(ic);
4611 return (ivp->iv_newstate(vap, nstate, arg));
4614 void
4615 iwm_endscan_cb(void *arg, int pending)
4617 struct iwm_softc *sc = arg;
4618 struct ieee80211com *ic = &sc->sc_ic;
4620 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4621 "%s: scan ended\n",
4622 __func__);
4624 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4628 * Aging and idle timeouts for the different possible scenarios
4629 * in default configuration
4631 static const uint32_t
4632 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4634 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4635 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4638 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4639 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4642 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4643 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4646 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4647 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4650 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4651 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4656 * Aging and idle timeouts for the different possible scenarios
4657 * in single BSS MAC configuration.
4659 static const uint32_t
4660 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4662 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4663 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4666 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4667 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4670 htole32(IWM_SF_MCAST_AGING_TIMER),
4671 htole32(IWM_SF_MCAST_IDLE_TIMER)
4674 htole32(IWM_SF_BA_AGING_TIMER),
4675 htole32(IWM_SF_BA_IDLE_TIMER)
4678 htole32(IWM_SF_TX_RE_AGING_TIMER),
4679 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4683 static void
4684 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4685 struct ieee80211_node *ni)
4687 int i, j, watermark;
4689 sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4692 * If we are in association flow - check antenna configuration
4693 * capabilities of the AP station, and choose the watermark accordingly.
4695 if (ni) {
4696 if (ni->ni_flags & IEEE80211_NODE_HT) {
4697 #ifdef notyet
4698 if (ni->ni_rxmcs[2] != 0)
4699 watermark = IWM_SF_W_MARK_MIMO3;
4700 else if (ni->ni_rxmcs[1] != 0)
4701 watermark = IWM_SF_W_MARK_MIMO2;
4702 else
4703 #endif
4704 watermark = IWM_SF_W_MARK_SISO;
4705 } else {
4706 watermark = IWM_SF_W_MARK_LEGACY;
4708 /* default watermark value for unassociated mode. */
4709 } else {
4710 watermark = IWM_SF_W_MARK_MIMO2;
4712 sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4714 for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4715 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4716 sf_cmd->long_delay_timeouts[i][j] =
4717 htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4721 if (ni) {
4722 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4723 sizeof(iwm_sf_full_timeout));
4724 } else {
4725 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4726 sizeof(iwm_sf_full_timeout_def));
4730 static int
4731 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4733 struct ieee80211com *ic = &sc->sc_ic;
4734 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4735 struct iwm_sf_cfg_cmd sf_cmd = {
4736 .state = htole32(IWM_SF_FULL_ON),
4738 int ret = 0;
4740 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4741 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4743 switch (new_state) {
4744 case IWM_SF_UNINIT:
4745 case IWM_SF_INIT_OFF:
4746 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4747 break;
4748 case IWM_SF_FULL_ON:
4749 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4750 break;
4751 default:
4752 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4753 "Invalid state: %d. not sending Smart Fifo cmd\n",
4754 new_state);
4755 return EINVAL;
4758 ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4759 sizeof(sf_cmd), &sf_cmd);
4760 return ret;
4763 static int
4764 iwm_send_bt_init_conf(struct iwm_softc *sc)
4766 struct iwm_bt_coex_cmd bt_cmd;
4768 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4769 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4771 return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4772 &bt_cmd);
4775 static int
4776 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4778 struct iwm_mcc_update_cmd mcc_cmd;
4779 struct iwm_host_cmd hcmd = {
4780 .id = IWM_MCC_UPDATE_CMD,
4781 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4782 .data = { &mcc_cmd },
4784 int ret;
4785 #ifdef IWM_DEBUG
4786 struct iwm_rx_packet *pkt;
4787 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4788 struct iwm_mcc_update_resp *mcc_resp;
4789 int n_channels;
4790 uint16_t mcc;
4791 #endif
4792 int resp_v2 = fw_has_capa(&sc->ucode_capa,
4793 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4795 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4796 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4797 if (fw_has_api(&sc->ucode_capa, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4798 fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4799 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4800 else
4801 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4803 if (resp_v2)
4804 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4805 else
4806 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4808 IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4809 "send MCC update to FW with '%c%c' src = %d\n",
4810 alpha2[0], alpha2[1], mcc_cmd.source_id);
4812 ret = iwm_send_cmd(sc, &hcmd);
4813 if (ret)
4814 return ret;
4816 #ifdef IWM_DEBUG
4817 pkt = hcmd.resp_pkt;
4819 /* Extract MCC response */
4820 if (resp_v2) {
4821 mcc_resp = (void *)pkt->data;
4822 mcc = mcc_resp->mcc;
4823 n_channels = le32toh(mcc_resp->n_channels);
4824 } else {
4825 mcc_resp_v1 = (void *)pkt->data;
4826 mcc = mcc_resp_v1->mcc;
4827 n_channels = le32toh(mcc_resp_v1->n_channels);
4830 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4831 if (mcc == 0)
4832 mcc = 0x3030; /* "00" - world */
4834 IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4835 "regulatory domain '%c%c' (%d channels available)\n",
4836 mcc >> 8, mcc & 0xff, n_channels);
4837 #endif
4838 iwm_free_resp(sc, &hcmd);
4840 return 0;
4843 static void
4844 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4846 struct iwm_host_cmd cmd = {
4847 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4848 .len = { sizeof(uint32_t), },
4849 .data = { &backoff, },
4852 if (iwm_send_cmd(sc, &cmd) != 0) {
4853 device_printf(sc->sc_dev,
4854 "failed to change thermal tx backoff\n");
4858 static int
4859 iwm_init_hw(struct iwm_softc *sc)
4861 struct ieee80211com *ic = &sc->sc_ic;
4862 int error, i, ac;
4864 if ((error = iwm_start_hw(sc)) != 0) {
4865 kprintf("iwm_start_hw: failed %d\n", error);
4866 return error;
4869 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4870 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4871 return error;
4875 * should stop and start HW since that INIT
4876 * image just loaded
4878 iwm_stop_device(sc);
4879 sc->sc_ps_disabled = FALSE;
4880 if ((error = iwm_start_hw(sc)) != 0) {
4881 device_printf(sc->sc_dev, "could not initialize hardware\n");
4882 return error;
4885 /* omstart, this time with the regular firmware */
4886 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4887 if (error) {
4888 device_printf(sc->sc_dev, "could not load firmware\n");
4889 goto error;
4892 if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4893 device_printf(sc->sc_dev, "bt init conf failed\n");
4894 goto error;
4897 error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4898 if (error != 0) {
4899 device_printf(sc->sc_dev, "antenna config failed\n");
4900 goto error;
4903 /* Send phy db control command and then phy db calibration */
4904 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4905 goto error;
4907 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4908 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4909 goto error;
4912 /* Add auxiliary station for scanning */
4913 if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4914 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4915 goto error;
4918 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4920 * The channel used here isn't relevant as it's
4921 * going to be overwritten in the other flows.
4922 * For now use the first channel we have.
4924 if ((error = iwm_mvm_phy_ctxt_add(sc,
4925 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4926 goto error;
4929 /* Initialize tx backoffs to the minimum. */
4930 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4931 iwm_mvm_tt_tx_backoff(sc, 0);
4933 error = iwm_mvm_power_update_device(sc);
4934 if (error)
4935 goto error;
4937 if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4938 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4939 goto error;
4942 if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4943 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4944 goto error;
4947 /* Enable Tx queues. */
4948 for (ac = 0; ac < WME_NUM_AC; ac++) {
4949 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4950 iwm_mvm_ac_to_tx_fifo[ac]);
4951 if (error)
4952 goto error;
4955 if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4956 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4957 goto error;
4960 return 0;
4962 error:
4963 iwm_stop_device(sc);
4964 return error;
4967 /* Allow multicast from our BSSID. */
4968 static int
4969 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4971 struct ieee80211_node *ni = vap->iv_bss;
4972 struct iwm_mcast_filter_cmd *cmd;
4973 size_t size;
4974 int error;
4976 size = roundup(sizeof(*cmd), 4);
4977 cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4978 if (cmd == NULL)
4979 return ENOMEM;
4980 cmd->filter_own = 1;
4981 cmd->port_id = 0;
4982 cmd->count = 0;
4983 cmd->pass_all = 1;
4984 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4986 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4987 IWM_CMD_SYNC, size, cmd);
4988 kfree(cmd, M_DEVBUF);
4990 return (error);
4994 * ifnet interfaces
4997 static void
4998 iwm_init(struct iwm_softc *sc)
5000 int error;
5002 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
5003 return;
5005 sc->sc_generation++;
5006 sc->sc_flags &= ~IWM_FLAG_STOPPED;
5008 if ((error = iwm_init_hw(sc)) != 0) {
5009 kprintf("iwm_init_hw failed %d\n", error);
5010 iwm_stop(sc);
5011 return;
5015 * Ok, firmware loaded and we are jogging
5017 sc->sc_flags |= IWM_FLAG_HW_INITED;
5018 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5021 static int
5022 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
5024 struct iwm_softc *sc;
5025 int error;
5027 sc = ic->ic_softc;
5029 IWM_LOCK(sc);
5030 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
5031 IWM_UNLOCK(sc);
5032 return (ENXIO);
5034 error = mbufq_enqueue(&sc->sc_snd, m);
5035 if (error) {
5036 IWM_UNLOCK(sc);
5037 return (error);
5039 iwm_start(sc);
5040 IWM_UNLOCK(sc);
5041 return (0);
5045 * Dequeue packets from sendq and call send.
5047 static void
5048 iwm_start(struct iwm_softc *sc)
5050 struct ieee80211_node *ni;
5051 struct mbuf *m;
5052 int ac = 0;
5054 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5055 while (sc->qfullmsk == 0 &&
5056 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5057 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5058 if (iwm_tx(sc, m, ni, ac) != 0) {
5059 if_inc_counter(ni->ni_vap->iv_ifp,
5060 IFCOUNTER_OERRORS, 1);
5061 ieee80211_free_node(ni);
5062 continue;
5064 sc->sc_tx_timer = 15;
5066 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5069 static void
5070 iwm_stop(struct iwm_softc *sc)
5073 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5074 sc->sc_flags |= IWM_FLAG_STOPPED;
5075 sc->sc_generation++;
5076 iwm_led_blink_stop(sc);
5077 sc->sc_tx_timer = 0;
5078 iwm_stop_device(sc);
5079 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5082 static void
5083 iwm_watchdog(void *arg)
5085 struct iwm_softc *sc = arg;
5087 if (sc->sc_tx_timer > 0) {
5088 if (--sc->sc_tx_timer == 0) {
5089 device_printf(sc->sc_dev, "device timeout\n");
5090 #ifdef IWM_DEBUG
5091 iwm_nic_error(sc);
5092 #endif
5093 iwm_stop(sc);
5094 #if defined(__DragonFly__)
5095 ++sc->sc_ic.ic_oerrors;
5096 #else
5097 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5098 #endif
5099 return;
5102 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5105 static void
5106 iwm_parent(struct ieee80211com *ic)
5108 struct iwm_softc *sc = ic->ic_softc;
5109 int startall = 0;
5111 IWM_LOCK(sc);
5112 if (ic->ic_nrunning > 0) {
5113 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5114 iwm_init(sc);
5115 startall = 1;
5117 } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5118 iwm_stop(sc);
5119 IWM_UNLOCK(sc);
5120 if (startall)
5121 ieee80211_start_all(ic);
5125 * The interrupt side of things
5129 * error dumping routines are from iwlwifi/mvm/utils.c
5133 * Note: This structure is read from the device with IO accesses,
5134 * and the reading already does the endian conversion. As it is
5135 * read with uint32_t-sized accesses, any members with a different size
5136 * need to be ordered correctly though!
5138 struct iwm_error_event_table {
5139 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5140 uint32_t error_id; /* type of error */
5141 uint32_t trm_hw_status0; /* TRM HW status */
5142 uint32_t trm_hw_status1; /* TRM HW status */
5143 uint32_t blink2; /* branch link */
5144 uint32_t ilink1; /* interrupt link */
5145 uint32_t ilink2; /* interrupt link */
5146 uint32_t data1; /* error-specific data */
5147 uint32_t data2; /* error-specific data */
5148 uint32_t data3; /* error-specific data */
5149 uint32_t bcon_time; /* beacon timer */
5150 uint32_t tsf_low; /* network timestamp function timer */
5151 uint32_t tsf_hi; /* network timestamp function timer */
5152 uint32_t gp1; /* GP1 timer register */
5153 uint32_t gp2; /* GP2 timer register */
5154 uint32_t fw_rev_type; /* firmware revision type */
5155 uint32_t major; /* uCode version major */
5156 uint32_t minor; /* uCode version minor */
5157 uint32_t hw_ver; /* HW Silicon version */
5158 uint32_t brd_ver; /* HW board version */
5159 uint32_t log_pc; /* log program counter */
5160 uint32_t frame_ptr; /* frame pointer */
5161 uint32_t stack_ptr; /* stack pointer */
5162 uint32_t hcmd; /* last host command header */
5163 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
5164 * rxtx_flag */
5165 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
5166 * host_flag */
5167 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
5168 * enc_flag */
5169 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
5170 * time_flag */
5171 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
5172 * wico interrupt */
5173 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
5174 uint32_t wait_event; /* wait event() caller address */
5175 uint32_t l2p_control; /* L2pControlField */
5176 uint32_t l2p_duration; /* L2pDurationField */
5177 uint32_t l2p_mhvalid; /* L2pMhValidBits */
5178 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
5179 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
5180 * (LMPM_PMG_SEL) */
5181 uint32_t u_timestamp; /* indicate when the date and time of the
5182 * compilation */
5183 uint32_t flow_handler; /* FH read/write pointers, RX credit */
5184 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5187 * UMAC error struct - relevant starting from family 8000 chip.
5188 * Note: This structure is read from the device with IO accesses,
5189 * and the reading already does the endian conversion. As it is
5190 * read with u32-sized accesses, any members with a different size
5191 * need to be ordered correctly though!
5193 struct iwm_umac_error_event_table {
5194 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5195 uint32_t error_id; /* type of error */
5196 uint32_t blink1; /* branch link */
5197 uint32_t blink2; /* branch link */
5198 uint32_t ilink1; /* interrupt link */
5199 uint32_t ilink2; /* interrupt link */
5200 uint32_t data1; /* error-specific data */
5201 uint32_t data2; /* error-specific data */
5202 uint32_t data3; /* error-specific data */
5203 uint32_t umac_major;
5204 uint32_t umac_minor;
5205 uint32_t frame_pointer; /* core register 27*/
5206 uint32_t stack_pointer; /* core register 28 */
5207 uint32_t cmd_header; /* latest host cmd sent to UMAC */
5208 uint32_t nic_isr_pref; /* ISR status register */
5209 } __packed;
5211 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
5212 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
5214 #ifdef IWM_DEBUG
5215 struct {
5216 const char *name;
5217 uint8_t num;
5218 } advanced_lookup[] = {
5219 { "NMI_INTERRUPT_WDG", 0x34 },
5220 { "SYSASSERT", 0x35 },
5221 { "UCODE_VERSION_MISMATCH", 0x37 },
5222 { "BAD_COMMAND", 0x38 },
5223 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5224 { "FATAL_ERROR", 0x3D },
5225 { "NMI_TRM_HW_ERR", 0x46 },
5226 { "NMI_INTERRUPT_TRM", 0x4C },
5227 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5228 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5229 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5230 { "NMI_INTERRUPT_HOST", 0x66 },
5231 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5232 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5233 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5234 { "ADVANCED_SYSASSERT", 0 },
5237 static const char *
5238 iwm_desc_lookup(uint32_t num)
5240 int i;
5242 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5243 if (advanced_lookup[i].num == num)
5244 return advanced_lookup[i].name;
5246 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5247 return advanced_lookup[i].name;
5250 static void
5251 iwm_nic_umac_error(struct iwm_softc *sc)
5253 struct iwm_umac_error_event_table table;
5254 uint32_t base;
5256 base = sc->umac_error_event_table;
5258 if (base < 0x800000) {
5259 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5260 base);
5261 return;
5264 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5265 device_printf(sc->sc_dev, "reading errlog failed\n");
5266 return;
5269 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5270 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5271 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5272 sc->sc_flags, table.valid);
5275 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5276 iwm_desc_lookup(table.error_id));
5277 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5278 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5279 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5280 table.ilink1);
5281 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5282 table.ilink2);
5283 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5284 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5285 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5286 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5287 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5288 device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5289 table.frame_pointer);
5290 device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5291 table.stack_pointer);
5292 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5293 device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5294 table.nic_isr_pref);
5298 * Support for dumping the error log seemed like a good idea ...
5299 * but it's mostly hex junk and the only sensible thing is the
5300 * hw/ucode revision (which we know anyway). Since it's here,
5301 * I'll just leave it in, just in case e.g. the Intel guys want to
5302 * help us decipher some "ADVANCED_SYSASSERT" later.
5304 static void
5305 iwm_nic_error(struct iwm_softc *sc)
5307 struct iwm_error_event_table table;
5308 uint32_t base;
5310 device_printf(sc->sc_dev, "dumping device error log\n");
5311 base = sc->error_event_table;
5312 if (base < 0x800000) {
5313 device_printf(sc->sc_dev,
5314 "Invalid error log pointer 0x%08x\n", base);
5315 return;
5318 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5319 device_printf(sc->sc_dev, "reading errlog failed\n");
5320 return;
5323 if (!table.valid) {
5324 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5325 return;
5328 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5329 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5330 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5331 sc->sc_flags, table.valid);
5334 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5335 iwm_desc_lookup(table.error_id));
5336 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5337 table.trm_hw_status0);
5338 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5339 table.trm_hw_status1);
5340 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5341 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5342 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5343 device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5344 device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5345 device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5346 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5347 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5348 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5349 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5350 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5351 device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5352 table.fw_rev_type);
5353 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5354 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5355 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5356 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5357 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5358 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5359 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5360 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5361 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5362 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5363 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5364 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5365 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5366 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5367 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5368 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5369 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5370 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5371 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5373 if (sc->umac_error_event_table)
5374 iwm_nic_umac_error(sc);
5376 #endif
5378 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5381 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5382 * Basic structure from if_iwn
5384 static void
5385 iwm_notif_intr(struct iwm_softc *sc)
5387 struct ieee80211com *ic = &sc->sc_ic;
5388 uint16_t hw;
5390 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5391 BUS_DMASYNC_POSTREAD);
5393 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5396 * Process responses
5398 while (sc->rxq.cur != hw) {
5399 struct iwm_rx_ring *ring = &sc->rxq;
5400 struct iwm_rx_data *data = &ring->data[ring->cur];
5401 struct iwm_rx_packet *pkt;
5402 struct iwm_cmd_response *cresp;
5403 int qid, idx, code;
5405 bus_dmamap_sync(ring->data_dmat, data->map,
5406 BUS_DMASYNC_POSTREAD);
5407 pkt = mtod(data->m, struct iwm_rx_packet *);
5409 qid = pkt->hdr.qid & ~0x80;
5410 idx = pkt->hdr.idx;
5412 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5413 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5414 "rx packet qid=%d idx=%d type=%x %d %d\n",
5415 pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5418 * randomly get these from the firmware, no idea why.
5419 * they at least seem harmless, so just ignore them for now
5421 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5422 || pkt->len_n_flags == htole32(0x55550000))) {
5423 ADVANCE_RXQ(sc);
5424 continue;
5427 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5429 switch (code) {
5430 case IWM_REPLY_RX_PHY_CMD:
5431 iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5432 break;
5434 case IWM_REPLY_RX_MPDU_CMD:
5435 iwm_mvm_rx_rx_mpdu(sc, data->m);
5436 break;
5438 case IWM_TX_CMD:
5439 iwm_mvm_rx_tx_cmd(sc, pkt);
5440 break;
5442 case IWM_MISSED_BEACONS_NOTIFICATION: {
5443 struct iwm_missed_beacons_notif *resp;
5444 int missed;
5446 /* XXX look at mac_id to determine interface ID */
5447 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5449 resp = (void *)pkt->data;
5450 missed = le32toh(resp->consec_missed_beacons);
5452 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5453 "%s: MISSED_BEACON: mac_id=%d, "
5454 "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5455 "num_rx=%d\n",
5456 __func__,
5457 le32toh(resp->mac_id),
5458 le32toh(resp->consec_missed_beacons_since_last_rx),
5459 le32toh(resp->consec_missed_beacons),
5460 le32toh(resp->num_expected_beacons),
5461 le32toh(resp->num_recvd_beacons));
5463 /* Be paranoid */
5464 if (vap == NULL)
5465 break;
5467 /* XXX no net80211 locking? */
5468 if (vap->iv_state == IEEE80211_S_RUN &&
5469 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5470 if (missed > vap->iv_bmissthreshold) {
5471 /* XXX bad locking; turn into task */
5472 IWM_UNLOCK(sc);
5473 ieee80211_beacon_miss(ic);
5474 IWM_LOCK(sc);
5478 break; }
5480 case IWM_MFUART_LOAD_NOTIFICATION:
5481 break;
5483 case IWM_MVM_ALIVE:
5484 break;
5486 case IWM_CALIB_RES_NOTIF_PHY_DB:
5487 break;
5489 case IWM_STATISTICS_NOTIFICATION: {
5490 struct iwm_notif_statistics *stats;
5491 stats = (void *)pkt->data;
5492 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5493 sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5494 break;
5497 case IWM_NVM_ACCESS_CMD:
5498 case IWM_MCC_UPDATE_CMD:
5499 if (sc->sc_wantresp == ((qid << 16) | idx)) {
5500 memcpy(sc->sc_cmd_resp,
5501 pkt, sizeof(sc->sc_cmd_resp));
5503 break;
5505 case IWM_MCC_CHUB_UPDATE_CMD: {
5506 struct iwm_mcc_chub_notif *notif;
5507 notif = (void *)pkt->data;
5509 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5510 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5511 sc->sc_fw_mcc[2] = '\0';
5512 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5513 "fw source %d sent CC '%s'\n",
5514 notif->source_id, sc->sc_fw_mcc);
5515 break;
5518 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5519 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5520 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5521 struct iwm_dts_measurement_notif_v1 *notif;
5523 if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5524 device_printf(sc->sc_dev,
5525 "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5526 break;
5528 notif = (void *)pkt->data;
5529 IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5530 "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5531 notif->temp);
5532 break;
5535 case IWM_PHY_CONFIGURATION_CMD:
5536 case IWM_TX_ANT_CONFIGURATION_CMD:
5537 case IWM_ADD_STA:
5538 case IWM_MAC_CONTEXT_CMD:
5539 case IWM_REPLY_SF_CFG_CMD:
5540 case IWM_POWER_TABLE_CMD:
5541 case IWM_PHY_CONTEXT_CMD:
5542 case IWM_BINDING_CONTEXT_CMD:
5543 case IWM_TIME_EVENT_CMD:
5544 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5545 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5546 case IWM_SCAN_ABORT_UMAC:
5547 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5548 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5549 case IWM_REPLY_BEACON_FILTERING_CMD:
5550 case IWM_MAC_PM_POWER_TABLE:
5551 case IWM_TIME_QUOTA_CMD:
5552 case IWM_REMOVE_STA:
5553 case IWM_TXPATH_FLUSH:
5554 case IWM_LQ_CMD:
5555 case IWM_FW_PAGING_BLOCK_CMD:
5556 case IWM_BT_CONFIG:
5557 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5558 cresp = (void *)pkt->data;
5559 if (sc->sc_wantresp == ((qid << 16) | idx)) {
5560 memcpy(sc->sc_cmd_resp,
5561 pkt, sizeof(*pkt)+sizeof(*cresp));
5563 break;
5565 /* ignore */
5566 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5567 break;
5569 case IWM_INIT_COMPLETE_NOTIF:
5570 break;
5572 case IWM_SCAN_OFFLOAD_COMPLETE:
5573 iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5574 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5575 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5576 ieee80211_runtask(ic, &sc->sc_es_task);
5578 break;
5580 case IWM_SCAN_ITERATION_COMPLETE: {
5581 struct iwm_lmac_scan_complete_notif *notif;
5582 notif = (void *)pkt->data;
5583 break;
5586 case IWM_SCAN_COMPLETE_UMAC:
5587 iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5588 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5589 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5590 ieee80211_runtask(ic, &sc->sc_es_task);
5592 break;
5594 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5595 struct iwm_umac_scan_iter_complete_notif *notif;
5596 notif = (void *)pkt->data;
5598 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5599 "complete, status=0x%x, %d channels scanned\n",
5600 notif->status, notif->scanned_channels);
5601 break;
5604 case IWM_REPLY_ERROR: {
5605 struct iwm_error_resp *resp;
5606 resp = (void *)pkt->data;
5608 device_printf(sc->sc_dev,
5609 "firmware error 0x%x, cmd 0x%x\n",
5610 le32toh(resp->error_type),
5611 resp->cmd_id);
5612 break;
5615 case IWM_TIME_EVENT_NOTIFICATION: {
5616 struct iwm_time_event_notif *notif;
5617 notif = (void *)pkt->data;
5619 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5620 "TE notif status = 0x%x action = 0x%x\n",
5621 notif->status, notif->action);
5622 break;
5625 case IWM_MCAST_FILTER_CMD:
5626 break;
5628 case IWM_SCD_QUEUE_CFG: {
5629 struct iwm_scd_txq_cfg_rsp *rsp;
5630 rsp = (void *)pkt->data;
5632 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5633 "queue cfg token=0x%x sta_id=%d "
5634 "tid=%d scd_queue=%d\n",
5635 rsp->token, rsp->sta_id, rsp->tid,
5636 rsp->scd_queue);
5637 break;
5640 default:
5641 device_printf(sc->sc_dev,
5642 "frame %d/%d %x UNHANDLED (this should "
5643 "not happen)\n", qid, idx,
5644 pkt->len_n_flags);
5645 break;
5649 * Why test bit 0x80? The Linux driver:
5651 * There is one exception: uCode sets bit 15 when it
5652 * originates the response/notification, i.e. when the
5653 * response/notification is not a direct response to a
5654 * command sent by the driver. For example, uCode issues
5655 * IWM_REPLY_RX when it sends a received frame to the driver;
5656 * it is not a direct response to any driver command.
5658 * Ok, so since when is 7 == 15? Well, the Linux driver
5659 * uses a slightly different format for pkt->hdr, and "qid"
5660 * is actually the upper byte of a two-byte field.
5662 if (!(pkt->hdr.qid & (1 << 7))) {
5663 iwm_cmd_done(sc, pkt);
5666 ADVANCE_RXQ(sc);
5670 * Tell the firmware what we have processed.
5671 * Seems like the hardware gets upset unless we align
5672 * the write by 8??
5674 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5675 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5678 static void
5679 iwm_intr(void *arg)
5681 struct iwm_softc *sc = arg;
5682 int handled = 0;
5683 int r1, r2, rv = 0;
5684 int isperiodic = 0;
5686 #if defined(__DragonFly__)
5687 if (sc->sc_mem == NULL) {
5688 kprintf("iwm_intr: detached\n");
5689 return;
5691 #endif
5692 IWM_LOCK(sc);
5693 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5695 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5696 uint32_t *ict = sc->ict_dma.vaddr;
5697 int tmp;
5699 tmp = htole32(ict[sc->ict_cur]);
5700 if (!tmp)
5701 goto out_ena;
5704 * ok, there was something. keep plowing until we have all.
5706 r1 = r2 = 0;
5707 while (tmp) {
5708 r1 |= tmp;
5709 ict[sc->ict_cur] = 0;
5710 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5711 tmp = htole32(ict[sc->ict_cur]);
5714 /* this is where the fun begins. don't ask */
5715 if (r1 == 0xffffffff)
5716 r1 = 0;
5718 /* i am not expected to understand this */
5719 if (r1 & 0xc0000)
5720 r1 |= 0x8000;
5721 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5722 } else {
5723 r1 = IWM_READ(sc, IWM_CSR_INT);
5724 /* "hardware gone" (where, fishing?) */
5725 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5726 goto out;
5727 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5729 if (r1 == 0 && r2 == 0) {
5730 goto out_ena;
5733 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5735 /* Safely ignore these bits for debug checks below */
5736 r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5738 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5739 int i;
5740 struct ieee80211com *ic = &sc->sc_ic;
5741 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5743 #ifdef IWM_DEBUG
5744 iwm_nic_error(sc);
5745 #endif
5746 /* Dump driver status (TX and RX rings) while we're here. */
5747 device_printf(sc->sc_dev, "driver status:\n");
5748 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5749 struct iwm_tx_ring *ring = &sc->txq[i];
5750 device_printf(sc->sc_dev,
5751 " tx ring %2d: qid=%-2d cur=%-3d "
5752 "queued=%-3d\n",
5753 i, ring->qid, ring->cur, ring->queued);
5755 device_printf(sc->sc_dev,
5756 " rx ring: cur=%d\n", sc->rxq.cur);
5757 device_printf(sc->sc_dev,
5758 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5760 /* Don't stop the device; just do a VAP restart */
5761 IWM_UNLOCK(sc);
5763 if (vap == NULL) {
5764 kprintf("%s: null vap\n", __func__);
5765 return;
5768 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5769 "restarting\n", __func__, vap->iv_state);
5771 ieee80211_restart_all(ic);
5772 return;
5775 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5776 handled |= IWM_CSR_INT_BIT_HW_ERR;
5777 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5778 iwm_stop(sc);
5779 rv = 1;
5780 goto out;
5783 /* firmware chunk loaded */
5784 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5785 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5786 handled |= IWM_CSR_INT_BIT_FH_TX;
5787 sc->sc_fw_chunk_done = 1;
5788 wakeup(&sc->sc_fw);
5791 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5792 handled |= IWM_CSR_INT_BIT_RF_KILL;
5793 if (iwm_check_rfkill(sc)) {
5794 device_printf(sc->sc_dev,
5795 "%s: rfkill switch, disabling interface\n",
5796 __func__);
5797 iwm_stop(sc);
5802 * The Linux driver uses periodic interrupts to avoid races.
5803 * We cargo-cult like it's going out of fashion.
5805 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5806 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5807 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5808 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5809 IWM_WRITE_1(sc,
5810 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5811 isperiodic = 1;
5814 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5815 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5816 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5818 iwm_notif_intr(sc);
5820 /* enable periodic interrupt, see above */
5821 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5822 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5823 IWM_CSR_INT_PERIODIC_ENA);
5826 if (__predict_false(r1 & ~handled))
5827 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5828 "%s: unhandled interrupts: %x\n", __func__, r1);
5829 rv = 1;
5831 out_ena:
5832 iwm_restore_interrupts(sc);
5833 out:
5834 IWM_UNLOCK(sc);
5835 return;
5839 * Autoconf glue-sniffing
5841 #define PCI_VENDOR_INTEL 0x8086
5842 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
5843 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
5844 #define PCI_PRODUCT_INTEL_WL_3165_1 0x3165
5845 #define PCI_PRODUCT_INTEL_WL_3165_2 0x3166
5846 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
5847 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
5848 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
5849 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
5850 #define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3
5851 #define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4
5853 static const struct iwm_devices {
5854 uint16_t device;
5855 const struct iwm_cfg *cfg;
5856 } iwm_devices[] = {
5857 { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5858 { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5859 { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5860 { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5861 { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5862 { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5863 { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5864 { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5865 { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5866 { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5869 static int
5870 iwm_probe(device_t dev)
5872 int i;
5874 for (i = 0; i < nitems(iwm_devices); i++) {
5875 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5876 pci_get_device(dev) == iwm_devices[i].device) {
5877 device_set_desc(dev, iwm_devices[i].cfg->name);
5878 return (BUS_PROBE_DEFAULT);
5882 return (ENXIO);
5885 static int
5886 iwm_dev_check(device_t dev)
5888 struct iwm_softc *sc;
5889 uint16_t devid;
5890 int i;
5892 sc = device_get_softc(dev);
5894 devid = pci_get_device(dev);
5895 for (i = 0; i < NELEM(iwm_devices); i++) {
5896 if (iwm_devices[i].device == devid) {
5897 sc->cfg = iwm_devices[i].cfg;
5898 return (0);
5901 device_printf(dev, "unknown adapter type\n");
5902 return ENXIO;
5905 /* PCI registers */
5906 #define PCI_CFG_RETRY_TIMEOUT 0x041
5908 static int
5909 iwm_pci_attach(device_t dev)
5911 struct iwm_softc *sc;
5912 int count, error, rid;
5913 uint16_t reg;
5914 #if defined(__DragonFly__)
5915 int irq_flags;
5916 #endif
5918 sc = device_get_softc(dev);
5920 /* We disable the RETRY_TIMEOUT register (0x41) to keep
5921 * PCI Tx retries from interfering with C3 CPU state */
5922 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5924 /* Enable bus-mastering and hardware bug workaround. */
5925 pci_enable_busmaster(dev);
5926 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5927 /* if !MSI */
5928 if (reg & PCIM_STATUS_INTxSTATE) {
5929 reg &= ~PCIM_STATUS_INTxSTATE;
5931 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5933 rid = PCIR_BAR(0);
5934 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5935 RF_ACTIVE);
5936 if (sc->sc_mem == NULL) {
5937 device_printf(sc->sc_dev, "can't map mem space\n");
5938 return (ENXIO);
5940 sc->sc_st = rman_get_bustag(sc->sc_mem);
5941 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5943 /* Install interrupt handler. */
5944 count = 1;
5945 rid = 0;
5946 #if defined(__DragonFly__)
5947 pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5948 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5949 #else
5950 if (pci_alloc_msi(dev, &count) == 0)
5951 rid = 1;
5952 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5953 (rid != 0 ? 0 : RF_SHAREABLE));
5954 #endif
5955 if (sc->sc_irq == NULL) {
5956 device_printf(dev, "can't map interrupt\n");
5957 return (ENXIO);
5959 #if defined(__DragonFly__)
5960 error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5961 iwm_intr, sc, &sc->sc_ih,
5962 &wlan_global_serializer);
5963 #else
5964 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5965 NULL, iwm_intr, sc, &sc->sc_ih);
5966 #endif
5967 if (sc->sc_ih == NULL) {
5968 device_printf(dev, "can't establish interrupt");
5969 #if defined(__DragonFly__)
5970 pci_release_msi(dev);
5971 #endif
5972 return (ENXIO);
5974 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5976 return (0);
5979 static void
5980 iwm_pci_detach(device_t dev)
5982 struct iwm_softc *sc = device_get_softc(dev);
5984 if (sc->sc_irq != NULL) {
5985 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5986 bus_release_resource(dev, SYS_RES_IRQ,
5987 rman_get_rid(sc->sc_irq), sc->sc_irq);
5988 pci_release_msi(dev);
5989 #if defined(__DragonFly__)
5990 sc->sc_irq = NULL;
5991 #endif
5993 if (sc->sc_mem != NULL) {
5994 bus_release_resource(dev, SYS_RES_MEMORY,
5995 rman_get_rid(sc->sc_mem), sc->sc_mem);
5996 #if defined(__DragonFly__)
5997 sc->sc_mem = NULL;
5998 #endif
6004 static int
6005 iwm_attach(device_t dev)
6007 struct iwm_softc *sc = device_get_softc(dev);
6008 struct ieee80211com *ic = &sc->sc_ic;
6009 int error;
6010 int txq_i, i;
6012 sc->sc_dev = dev;
6013 sc->sc_attached = 1;
6014 IWM_LOCK_INIT(sc);
6015 mbufq_init(&sc->sc_snd, ifqmaxlen);
6016 #if defined(__DragonFly__)
6017 callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
6018 #else
6019 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
6020 #endif
6021 callout_init(&sc->sc_led_blink_to);
6022 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6024 sc->sc_notif_wait = iwm_notification_wait_init(sc);
6025 if (sc->sc_notif_wait == NULL) {
6026 device_printf(dev, "failed to init notification wait struct\n");
6027 goto fail;
6030 /* Init phy db */
6031 sc->sc_phy_db = iwm_phy_db_init(sc);
6032 if (!sc->sc_phy_db) {
6033 device_printf(dev, "Cannot init phy_db\n");
6034 goto fail;
6037 /* Set EBS as successful as long as not stated otherwise by the FW. */
6038 sc->last_ebs_successful = TRUE;
6040 /* PCI attach */
6041 error = iwm_pci_attach(dev);
6042 if (error != 0)
6043 goto fail;
6045 sc->sc_wantresp = -1;
6047 /* Check device type */
6048 error = iwm_dev_check(dev);
6049 if (error != 0)
6050 goto fail;
6052 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6054 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6055 * changed, and now the revision step also includes bit 0-1 (no more
6056 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6057 * in the old format.
6059 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6060 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6061 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6063 if (iwm_prepare_card_hw(sc) != 0) {
6064 device_printf(dev, "could not initialize hardware\n");
6065 goto fail;
6068 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6069 int ret;
6070 uint32_t hw_step;
6073 * In order to recognize C step the driver should read the
6074 * chip version id located at the AUX bus MISC address.
6076 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6077 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6078 DELAY(2);
6080 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6081 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6082 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6083 25000);
6084 if (!ret) {
6085 device_printf(sc->sc_dev,
6086 "Failed to wake up the nic\n");
6087 goto fail;
6090 if (iwm_nic_lock(sc)) {
6091 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6092 hw_step |= IWM_ENABLE_WFPM;
6093 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6094 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6095 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6096 if (hw_step == 0x3)
6097 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6098 (IWM_SILICON_C_STEP << 2);
6099 iwm_nic_unlock(sc);
6100 } else {
6101 device_printf(sc->sc_dev, "Failed to lock the nic\n");
6102 goto fail;
6106 /* special-case 7265D, it has the same PCI IDs. */
6107 if (sc->cfg == &iwm7265_cfg &&
6108 (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6109 sc->cfg = &iwm7265d_cfg;
6112 /* Allocate DMA memory for firmware transfers. */
6113 if ((error = iwm_alloc_fwmem(sc)) != 0) {
6114 device_printf(dev, "could not allocate memory for firmware\n");
6115 goto fail;
6118 /* Allocate "Keep Warm" page. */
6119 if ((error = iwm_alloc_kw(sc)) != 0) {
6120 device_printf(dev, "could not allocate keep warm page\n");
6121 goto fail;
6124 /* We use ICT interrupts */
6125 if ((error = iwm_alloc_ict(sc)) != 0) {
6126 device_printf(dev, "could not allocate ICT table\n");
6127 goto fail;
6130 /* Allocate TX scheduler "rings". */
6131 if ((error = iwm_alloc_sched(sc)) != 0) {
6132 device_printf(dev, "could not allocate TX scheduler rings\n");
6133 goto fail;
6136 /* Allocate TX rings */
6137 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6138 if ((error = iwm_alloc_tx_ring(sc,
6139 &sc->txq[txq_i], txq_i)) != 0) {
6140 device_printf(dev,
6141 "could not allocate TX ring %d\n",
6142 txq_i);
6143 goto fail;
6147 /* Allocate RX ring. */
6148 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6149 device_printf(dev, "could not allocate RX ring\n");
6150 goto fail;
6153 /* Clear pending interrupts. */
6154 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6156 ic->ic_softc = sc;
6157 ic->ic_name = device_get_nameunit(sc->sc_dev);
6158 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
6159 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
6161 /* Set device capabilities. */
6162 ic->ic_caps =
6163 IEEE80211_C_STA |
6164 IEEE80211_C_WPA | /* WPA/RSN */
6165 IEEE80211_C_WME |
6166 IEEE80211_C_PMGT |
6167 IEEE80211_C_SHSLOT | /* short slot time supported */
6168 IEEE80211_C_SHPREAMBLE /* short preamble supported */
6169 // IEEE80211_C_BGSCAN /* capable of bg scanning */
6171 for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6172 sc->sc_phyctxt[i].id = i;
6173 sc->sc_phyctxt[i].color = 0;
6174 sc->sc_phyctxt[i].ref = 0;
6175 sc->sc_phyctxt[i].channel = NULL;
6178 /* Default noise floor */
6179 sc->sc_noise = -96;
6181 /* Max RSSI */
6182 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6184 sc->sc_preinit_hook.ich_func = iwm_preinit;
6185 sc->sc_preinit_hook.ich_arg = sc;
6186 sc->sc_preinit_hook.ich_desc = "iwm";
6187 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6188 device_printf(dev, "config_intrhook_establish failed\n");
6189 goto fail;
6192 #ifdef IWM_DEBUG
6193 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6194 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6195 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6196 #endif
6198 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6199 "<-%s\n", __func__);
6201 return 0;
6203 /* Free allocated memory if something failed during attachment. */
6204 fail:
6205 iwm_detach_local(sc, 0);
6207 return ENXIO;
6210 static int
6211 iwm_is_valid_ether_addr(uint8_t *addr)
6213 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6215 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6216 return (FALSE);
6218 return (TRUE);
6221 static int
6222 iwm_update_edca(struct ieee80211com *ic)
6224 struct iwm_softc *sc = ic->ic_softc;
6226 device_printf(sc->sc_dev, "%s: called\n", __func__);
6227 return (0);
6230 static void
6231 iwm_preinit(void *arg)
6233 struct iwm_softc *sc = arg;
6234 device_t dev = sc->sc_dev;
6235 struct ieee80211com *ic = &sc->sc_ic;
6236 int error;
6238 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6239 "->%s\n", __func__);
6241 IWM_LOCK(sc);
6242 if ((error = iwm_start_hw(sc)) != 0) {
6243 device_printf(dev, "could not initialize hardware\n");
6244 IWM_UNLOCK(sc);
6245 goto fail;
6248 error = iwm_run_init_mvm_ucode(sc, 1);
6249 iwm_stop_device(sc);
6250 if (error) {
6251 IWM_UNLOCK(sc);
6252 goto fail;
6254 device_printf(dev,
6255 "hw rev 0x%x, fw ver %s, address %s\n",
6256 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6257 sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6259 /* not all hardware can do 5GHz band */
6260 if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6261 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6262 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6263 IWM_UNLOCK(sc);
6265 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6266 ic->ic_channels);
6269 * At this point we've committed - if we fail to do setup,
6270 * we now also have to tear down the net80211 state.
6272 ieee80211_ifattach(ic);
6273 ic->ic_vap_create = iwm_vap_create;
6274 ic->ic_vap_delete = iwm_vap_delete;
6275 ic->ic_raw_xmit = iwm_raw_xmit;
6276 ic->ic_node_alloc = iwm_node_alloc;
6277 ic->ic_scan_start = iwm_scan_start;
6278 ic->ic_scan_end = iwm_scan_end;
6279 ic->ic_update_mcast = iwm_update_mcast;
6280 ic->ic_getradiocaps = iwm_init_channel_map;
6281 ic->ic_set_channel = iwm_set_channel;
6282 ic->ic_scan_curchan = iwm_scan_curchan;
6283 ic->ic_scan_mindwell = iwm_scan_mindwell;
6284 ic->ic_wme.wme_update = iwm_update_edca;
6285 ic->ic_parent = iwm_parent;
6286 ic->ic_transmit = iwm_transmit;
6287 iwm_radiotap_attach(sc);
6288 if (bootverbose)
6289 ieee80211_announce(ic);
6291 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6292 "<-%s\n", __func__);
6293 config_intrhook_disestablish(&sc->sc_preinit_hook);
6295 return;
6296 fail:
6297 config_intrhook_disestablish(&sc->sc_preinit_hook);
6298 iwm_detach_local(sc, 0);
6302 * Attach the interface to 802.11 radiotap.
6304 static void
6305 iwm_radiotap_attach(struct iwm_softc *sc)
6307 struct ieee80211com *ic = &sc->sc_ic;
6309 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6310 "->%s begin\n", __func__);
6311 ieee80211_radiotap_attach(ic,
6312 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6313 IWM_TX_RADIOTAP_PRESENT,
6314 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6315 IWM_RX_RADIOTAP_PRESENT);
6316 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6317 "->%s end\n", __func__);
6320 static struct ieee80211vap *
6321 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6322 enum ieee80211_opmode opmode, int flags,
6323 const uint8_t bssid[IEEE80211_ADDR_LEN],
6324 const uint8_t mac[IEEE80211_ADDR_LEN])
6326 struct iwm_vap *ivp;
6327 struct ieee80211vap *vap;
6329 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
6330 return NULL;
6331 ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6332 vap = &ivp->iv_vap;
6333 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6334 vap->iv_bmissthreshold = 10; /* override default */
6335 /* Override with driver methods. */
6336 ivp->iv_newstate = vap->iv_newstate;
6337 vap->iv_newstate = iwm_newstate;
6339 ivp->id = IWM_DEFAULT_MACID;
6340 ivp->color = IWM_DEFAULT_COLOR;
6342 ieee80211_ratectl_init(vap);
6343 /* Complete setup. */
6344 ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6345 mac);
6346 ic->ic_opmode = opmode;
6348 return vap;
6351 static void
6352 iwm_vap_delete(struct ieee80211vap *vap)
6354 struct iwm_vap *ivp = IWM_VAP(vap);
6356 ieee80211_ratectl_deinit(vap);
6357 ieee80211_vap_detach(vap);
6358 kfree(ivp, M_80211_VAP);
6361 static void
6362 iwm_scan_start(struct ieee80211com *ic)
6364 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6365 struct iwm_softc *sc = ic->ic_softc;
6366 int error;
6368 IWM_LOCK(sc);
6369 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6370 /* This should not be possible */
6371 device_printf(sc->sc_dev,
6372 "%s: Previous scan not completed yet\n", __func__);
6374 if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6375 error = iwm_mvm_umac_scan(sc);
6376 else
6377 error = iwm_mvm_lmac_scan(sc);
6378 if (error != 0) {
6379 device_printf(sc->sc_dev, "could not initiate scan\n");
6380 IWM_UNLOCK(sc);
6381 ieee80211_cancel_scan(vap);
6382 } else {
6383 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6384 iwm_led_blink_start(sc);
6385 IWM_UNLOCK(sc);
6389 static void
6390 iwm_scan_end(struct ieee80211com *ic)
6392 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6393 struct iwm_softc *sc = ic->ic_softc;
6395 IWM_LOCK(sc);
6396 iwm_led_blink_stop(sc);
6397 if (vap->iv_state == IEEE80211_S_RUN)
6398 iwm_mvm_led_enable(sc);
6399 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6401 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6402 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6403 * taskqueue.
6405 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6406 iwm_mvm_scan_stop_wait(sc);
6408 IWM_UNLOCK(sc);
6411 * Make sure we don't race, if sc_es_task is still enqueued here.
6412 * This is to make sure that it won't call ieee80211_scan_done
6413 * when we have already started the next scan.
6415 taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6418 static void
6419 iwm_update_mcast(struct ieee80211com *ic)
6423 static void
6424 iwm_set_channel(struct ieee80211com *ic)
6428 static void
6429 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6433 static void
6434 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6436 return;
6439 void
6440 iwm_init_task(void *arg1)
6442 struct iwm_softc *sc = arg1;
6444 IWM_LOCK(sc);
6445 while (sc->sc_flags & IWM_FLAG_BUSY) {
6446 #if defined(__DragonFly__)
6447 lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6448 #else
6449 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6450 #endif
6452 sc->sc_flags |= IWM_FLAG_BUSY;
6453 iwm_stop(sc);
6454 if (sc->sc_ic.ic_nrunning > 0)
6455 iwm_init(sc);
6456 sc->sc_flags &= ~IWM_FLAG_BUSY;
6457 wakeup(&sc->sc_flags);
6458 IWM_UNLOCK(sc);
6461 static int
6462 iwm_resume(device_t dev)
6464 struct iwm_softc *sc = device_get_softc(dev);
6465 int do_reinit = 0;
6468 * We disable the RETRY_TIMEOUT register (0x41) to keep
6469 * PCI Tx retries from interfering with C3 CPU state.
6471 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6472 iwm_init_task(device_get_softc(dev));
6474 IWM_LOCK(sc);
6475 if (sc->sc_flags & IWM_FLAG_SCANNING) {
6476 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6477 do_reinit = 1;
6479 IWM_UNLOCK(sc);
6481 if (do_reinit)
6482 ieee80211_resume_all(&sc->sc_ic);
6484 return 0;
6487 static int
6488 iwm_suspend(device_t dev)
6490 int do_stop = 0;
6491 struct iwm_softc *sc = device_get_softc(dev);
6493 do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6495 ieee80211_suspend_all(&sc->sc_ic);
6497 if (do_stop) {
6498 IWM_LOCK(sc);
6499 iwm_stop(sc);
6500 sc->sc_flags |= IWM_FLAG_SCANNING;
6501 IWM_UNLOCK(sc);
6504 return (0);
6507 static int
6508 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6510 struct iwm_fw_info *fw = &sc->sc_fw;
6511 device_t dev = sc->sc_dev;
6512 int i;
6514 if (!sc->sc_attached)
6515 return 0;
6516 sc->sc_attached = 0;
6517 if (do_net80211) {
6518 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6520 callout_drain(&sc->sc_led_blink_to);
6521 callout_drain(&sc->sc_watchdog_to);
6522 iwm_stop_device(sc);
6523 if (do_net80211) {
6524 ieee80211_ifdetach(&sc->sc_ic);
6527 iwm_phy_db_free(sc->sc_phy_db);
6528 sc->sc_phy_db = NULL;
6530 iwm_free_nvm_data(sc->nvm_data);
6532 /* Free descriptor rings */
6533 iwm_free_rx_ring(sc, &sc->rxq);
6534 for (i = 0; i < nitems(sc->txq); i++)
6535 iwm_free_tx_ring(sc, &sc->txq[i]);
6537 /* Free firmware */
6538 if (fw->fw_fp != NULL)
6539 iwm_fw_info_free(fw);
6541 /* Free scheduler */
6542 iwm_dma_contig_free(&sc->sched_dma);
6543 iwm_dma_contig_free(&sc->ict_dma);
6544 iwm_dma_contig_free(&sc->kw_dma);
6545 iwm_dma_contig_free(&sc->fw_dma);
6547 iwm_free_fw_paging(sc);
6549 /* Finished with the hardware - detach things */
6550 iwm_pci_detach(dev);
6552 if (sc->sc_notif_wait != NULL) {
6553 iwm_notification_wait_free(sc->sc_notif_wait);
6554 sc->sc_notif_wait = NULL;
6557 mbufq_drain(&sc->sc_snd);
6558 IWM_LOCK_DESTROY(sc);
6560 return (0);
6563 static int
6564 iwm_detach(device_t dev)
6566 struct iwm_softc *sc = device_get_softc(dev);
6568 return (iwm_detach_local(sc, 1));
6571 static device_method_t iwm_pci_methods[] = {
6572 /* Device interface */
6573 DEVMETHOD(device_probe, iwm_probe),
6574 DEVMETHOD(device_attach, iwm_attach),
6575 DEVMETHOD(device_detach, iwm_detach),
6576 DEVMETHOD(device_suspend, iwm_suspend),
6577 DEVMETHOD(device_resume, iwm_resume),
6579 DEVMETHOD_END
6582 static driver_t iwm_pci_driver = {
6583 "iwm",
6584 iwm_pci_methods,
6585 sizeof (struct iwm_softc)
6588 static devclass_t iwm_devclass;
6590 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6591 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6592 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6593 MODULE_DEPEND(iwm, wlan, 1, 1, 1);