if_iwm - Use rates given in struct ieee80211_txparam for non-data transfers.
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
blob9a2ea77f32287ce39da5797353e33abf6d745388
1 /* $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $ */
3 /*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 /*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
27 ***********************************************************************
29 * This file is provided under a dual BSD/GPLv2 license. When using or
30 * redistributing this file, you may do so under either license.
32 * GPL LICENSE SUMMARY
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
53 * Contact Information:
54 * Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
58 * BSD LICENSE
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
67 * * Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * * Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in
71 * the documentation and/or other materials provided with the
72 * distribution.
73 * * Neither the name Intel Corporation nor the names of its
74 * contributors may be used to endorse or promote products derived
75 * from this software without specific prior written permission.
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
90 /*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
106 * DragonFly work
108 * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109 * changes to remove per-device network interface (DragonFly has not
110 * caught up to that yet on the WLAN side).
112 * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113 * malloc -> kmalloc (in particular, changing improper M_NOWAIT
114 * specifications to M_INTWAIT. We still don't
115 * understand why FreeBSD uses M_NOWAIT for
116 * critical must-not-fail kmalloc()s).
117 * free -> kfree
118 * printf -> kprintf
119 * (bug fix) memset in iwm_reset_rx_ring.
120 * (debug) added several kprintf()s on error
122 * header file paths (DFly allows localized path specifications).
123 * minor header file differences.
125 * Comprehensive list of adjustments for DragonFly #ifdef'd:
126 * (safety) added register read-back serialization in iwm_reset_rx_ring().
127 * packet counters
128 * msleep -> lksleep
129 * mtx -> lk (mtx functions -> lockmgr functions)
130 * callout differences
131 * taskqueue differences
132 * MSI differences
133 * bus_setup_intr() differences
134 * minor PCI config register naming differences
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/module.h>
147 #include <sys/rman.h>
148 #include <sys/sysctl.h>
149 #include <sys/linker.h>
151 #include <machine/endian.h>
153 #include <bus/pci/pcivar.h>
154 #include <bus/pci/pcireg.h>
156 #include <net/bpf.h>
158 #include <net/if.h>
159 #include <net/if_var.h>
160 #include <net/if_arp.h>
161 #include <net/if_dl.h>
162 #include <net/if_media.h>
163 #include <net/if_types.h>
165 #include <netinet/in.h>
166 #include <netinet/in_systm.h>
167 #include <netinet/if_ether.h>
168 #include <netinet/ip.h>
170 #include <netproto/802_11/ieee80211_var.h>
171 #include <netproto/802_11/ieee80211_regdomain.h>
172 #include <netproto/802_11/ieee80211_ratectl.h>
173 #include <netproto/802_11/ieee80211_radiotap.h>
175 #include "if_iwmreg.h"
176 #include "if_iwmvar.h"
177 #include "if_iwm_config.h"
178 #include "if_iwm_debug.h"
179 #include "if_iwm_notif_wait.h"
180 #include "if_iwm_util.h"
181 #include "if_iwm_binding.h"
182 #include "if_iwm_phy_db.h"
183 #include "if_iwm_mac_ctxt.h"
184 #include "if_iwm_phy_ctxt.h"
185 #include "if_iwm_time_event.h"
186 #include "if_iwm_power.h"
187 #include "if_iwm_scan.h"
188 #include "if_iwm_sf.h"
189 #include "if_iwm_sta.h"
190 #include "if_iwm_pcie_trans.h"
191 #include "if_iwm_led.h"
192 #include "if_iwm_fw.h"
194 const uint8_t iwm_nvm_channels[] = {
195 /* 2.4 GHz */
196 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
197 /* 5 GHz */
198 36, 40, 44, 48, 52, 56, 60, 64,
199 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
200 149, 153, 157, 161, 165
202 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
203 "IWM_NUM_CHANNELS is too small");
205 const uint8_t iwm_nvm_channels_8000[] = {
206 /* 2.4 GHz */
207 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
208 /* 5 GHz */
209 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
210 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
211 149, 153, 157, 161, 165, 169, 173, 177, 181
213 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
214 "IWM_NUM_CHANNELS_8000 is too small");
216 #define IWM_NUM_2GHZ_CHANNELS 14
217 #define IWM_N_HW_ADDR_MASK 0xF
220 * XXX For now, there's simply a fixed set of rate table entries
221 * that are populated.
223 const struct iwm_rate {
224 uint8_t rate;
225 uint8_t plcp;
226 } iwm_rates[] = {
227 { 2, IWM_RATE_1M_PLCP },
228 { 4, IWM_RATE_2M_PLCP },
229 { 11, IWM_RATE_5M_PLCP },
230 { 22, IWM_RATE_11M_PLCP },
231 { 12, IWM_RATE_6M_PLCP },
232 { 18, IWM_RATE_9M_PLCP },
233 { 24, IWM_RATE_12M_PLCP },
234 { 36, IWM_RATE_18M_PLCP },
235 { 48, IWM_RATE_24M_PLCP },
236 { 72, IWM_RATE_36M_PLCP },
237 { 96, IWM_RATE_48M_PLCP },
238 { 108, IWM_RATE_54M_PLCP },
240 #define IWM_RIDX_CCK 0
241 #define IWM_RIDX_OFDM 4
242 #define IWM_RIDX_MAX (nitems(iwm_rates)-1)
243 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
244 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
246 struct iwm_nvm_section {
247 uint16_t length;
248 uint8_t *data;
251 #define IWM_MVM_UCODE_ALIVE_TIMEOUT hz
252 #define IWM_MVM_UCODE_CALIB_TIMEOUT (2*hz)
254 struct iwm_mvm_alive_data {
255 int valid;
256 uint32_t scd_base_addr;
259 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
260 static int iwm_firmware_store_section(struct iwm_softc *,
261 enum iwm_ucode_type,
262 const uint8_t *, size_t);
263 static int iwm_set_default_calib(struct iwm_softc *, const void *);
264 static void iwm_fw_info_free(struct iwm_fw_info *);
265 static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
266 #if !defined(__DragonFly__)
267 static void iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
268 #endif
269 static int iwm_alloc_fwmem(struct iwm_softc *);
270 static int iwm_alloc_sched(struct iwm_softc *);
271 static int iwm_alloc_kw(struct iwm_softc *);
272 static int iwm_alloc_ict(struct iwm_softc *);
273 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
276 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
277 int);
278 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
279 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
280 static void iwm_enable_interrupts(struct iwm_softc *);
281 static void iwm_restore_interrupts(struct iwm_softc *);
282 static void iwm_disable_interrupts(struct iwm_softc *);
283 static void iwm_ict_reset(struct iwm_softc *);
284 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
285 static void iwm_stop_device(struct iwm_softc *);
286 static void iwm_mvm_nic_config(struct iwm_softc *);
287 static int iwm_nic_rx_init(struct iwm_softc *);
288 static int iwm_nic_tx_init(struct iwm_softc *);
289 static int iwm_nic_init(struct iwm_softc *);
290 static int iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
291 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
292 uint16_t, uint8_t *, uint16_t *);
293 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
294 uint16_t *, uint32_t);
295 static uint32_t iwm_eeprom_channel_flags(uint16_t);
296 static void iwm_add_channel_band(struct iwm_softc *,
297 struct ieee80211_channel[], int, int *, int, size_t,
298 const uint8_t[]);
299 static void iwm_init_channel_map(struct ieee80211com *, int, int *,
300 struct ieee80211_channel[]);
301 static struct iwm_nvm_data *
302 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
303 const uint16_t *, const uint16_t *,
304 const uint16_t *, const uint16_t *,
305 const uint16_t *);
306 static void iwm_free_nvm_data(struct iwm_nvm_data *);
307 static void iwm_set_hw_address_family_8000(struct iwm_softc *,
308 struct iwm_nvm_data *,
309 const uint16_t *,
310 const uint16_t *);
311 static int iwm_get_sku(const struct iwm_softc *, const uint16_t *,
312 const uint16_t *);
313 static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
314 static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
315 const uint16_t *);
316 static int iwm_get_n_hw_addrs(const struct iwm_softc *,
317 const uint16_t *);
318 static void iwm_set_radio_cfg(const struct iwm_softc *,
319 struct iwm_nvm_data *, uint32_t);
320 static struct iwm_nvm_data *
321 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
322 static int iwm_nvm_init(struct iwm_softc *);
323 static int iwm_pcie_load_section(struct iwm_softc *, uint8_t,
324 const struct iwm_fw_desc *);
325 static int iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
326 bus_addr_t, uint32_t);
327 static int iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
328 const struct iwm_fw_sects *,
329 int, int *);
330 static int iwm_pcie_load_cpu_sections(struct iwm_softc *,
331 const struct iwm_fw_sects *,
332 int, int *);
333 static int iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
334 const struct iwm_fw_sects *);
335 static int iwm_pcie_load_given_ucode(struct iwm_softc *,
336 const struct iwm_fw_sects *);
337 static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
338 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
339 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
340 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
341 enum iwm_ucode_type);
342 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
343 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
344 static int iwm_mvm_get_signal_strength(struct iwm_softc *,
345 struct iwm_rx_phy_info *);
346 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
347 struct iwm_rx_packet *);
348 static int iwm_get_noise(struct iwm_softc *,
349 const struct iwm_mvm_statistics_rx_non_phy *);
350 static void iwm_mvm_handle_rx_statistics(struct iwm_softc *,
351 struct iwm_rx_packet *);
352 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
353 uint32_t, boolean_t);
354 static int iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
355 struct iwm_rx_packet *,
356 struct iwm_node *);
357 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
358 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
359 #if 0
360 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
361 uint16_t);
362 #endif
363 static uint8_t iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
364 struct mbuf *, struct iwm_tx_cmd *);
365 static int iwm_tx(struct iwm_softc *, struct mbuf *,
366 struct ieee80211_node *, int);
367 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
368 const struct ieee80211_bpf_params *);
369 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
370 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
371 static int iwm_release(struct iwm_softc *, struct iwm_node *);
372 static struct ieee80211_node *
373 iwm_node_alloc(struct ieee80211vap *,
374 const uint8_t[IEEE80211_ADDR_LEN]);
375 static uint8_t iwm_rate_from_ucode_rate(uint32_t);
376 static int iwm_rate2ridx(struct iwm_softc *, uint8_t);
377 static void iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
378 static int iwm_media_change(struct ifnet *);
379 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
380 static void iwm_endscan_cb(void *, int);
381 static int iwm_send_bt_init_conf(struct iwm_softc *);
382 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
383 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
384 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
385 static void iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
386 static int iwm_init_hw(struct iwm_softc *);
387 static void iwm_init(struct iwm_softc *);
388 static void iwm_start(struct iwm_softc *);
389 static void iwm_stop(struct iwm_softc *);
390 static void iwm_watchdog(void *);
391 static void iwm_parent(struct ieee80211com *);
392 #ifdef IWM_DEBUG
393 static const char *
394 iwm_desc_lookup(uint32_t);
395 static void iwm_nic_error(struct iwm_softc *);
396 static void iwm_nic_umac_error(struct iwm_softc *);
397 #endif
398 static void iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
399 static void iwm_notif_intr(struct iwm_softc *);
400 static void iwm_intr(void *);
401 static int iwm_attach(device_t);
402 static int iwm_is_valid_ether_addr(uint8_t *);
403 static void iwm_preinit(void *);
404 static int iwm_detach_local(struct iwm_softc *sc, int);
405 static void iwm_init_task(void *);
406 static void iwm_radiotap_attach(struct iwm_softc *);
407 static struct ieee80211vap *
408 iwm_vap_create(struct ieee80211com *,
409 const char [IFNAMSIZ], int,
410 enum ieee80211_opmode, int,
411 const uint8_t [IEEE80211_ADDR_LEN],
412 const uint8_t [IEEE80211_ADDR_LEN]);
413 static void iwm_vap_delete(struct ieee80211vap *);
414 static void iwm_xmit_queue_drain(struct iwm_softc *);
415 static void iwm_scan_start(struct ieee80211com *);
416 static void iwm_scan_end(struct ieee80211com *);
417 static void iwm_update_mcast(struct ieee80211com *);
418 static void iwm_set_channel(struct ieee80211com *);
419 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
420 static void iwm_scan_mindwell(struct ieee80211_scan_state *);
421 static int iwm_detach(device_t);
423 #if defined(__DragonFly__)
424 static int iwm_msi_enable = 1;
426 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
427 #endif
429 static int iwm_lar_disable = 0;
430 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
433 * Firmware parser.
436 static int
437 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
439 const struct iwm_fw_cscheme_list *l = (const void *)data;
441 if (dlen < sizeof(*l) ||
442 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
443 return EINVAL;
445 /* we don't actually store anything for now, always use s/w crypto */
447 return 0;
450 static int
451 iwm_firmware_store_section(struct iwm_softc *sc,
452 enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
454 struct iwm_fw_sects *fws;
455 struct iwm_fw_desc *fwone;
457 if (type >= IWM_UCODE_TYPE_MAX)
458 return EINVAL;
459 if (dlen < sizeof(uint32_t))
460 return EINVAL;
462 fws = &sc->sc_fw.fw_sects[type];
463 if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
464 return EINVAL;
466 fwone = &fws->fw_sect[fws->fw_count];
468 /* first 32bit are device load offset */
469 memcpy(&fwone->offset, data, sizeof(uint32_t));
471 /* rest is data */
472 fwone->data = data + sizeof(uint32_t);
473 fwone->len = dlen - sizeof(uint32_t);
475 fws->fw_count++;
477 return 0;
480 #define IWM_DEFAULT_SCAN_CHANNELS 40
482 struct iwm_tlv_calib_data {
483 uint32_t ucode_type;
484 struct iwm_tlv_calib_ctrl calib;
485 } __packed;
487 static int
488 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
490 const struct iwm_tlv_calib_data *def_calib = data;
491 uint32_t ucode_type = le32toh(def_calib->ucode_type);
493 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
494 device_printf(sc->sc_dev,
495 "Wrong ucode_type %u for default "
496 "calibration.\n", ucode_type);
497 return EINVAL;
500 sc->sc_default_calib[ucode_type].flow_trigger =
501 def_calib->calib.flow_trigger;
502 sc->sc_default_calib[ucode_type].event_trigger =
503 def_calib->calib.event_trigger;
505 return 0;
508 static int
509 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
510 struct iwm_ucode_capabilities *capa)
512 const struct iwm_ucode_api *ucode_api = (const void *)data;
513 uint32_t api_index = le32toh(ucode_api->api_index);
514 uint32_t api_flags = le32toh(ucode_api->api_flags);
515 int i;
517 if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
518 device_printf(sc->sc_dev,
519 "api flags index %d larger than supported by driver\n",
520 api_index);
521 /* don't return an error so we can load FW that has more bits */
522 return 0;
525 for (i = 0; i < 32; i++) {
526 if (api_flags & (1U << i))
527 setbit(capa->enabled_api, i + 32 * api_index);
530 return 0;
533 static int
534 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
535 struct iwm_ucode_capabilities *capa)
537 const struct iwm_ucode_capa *ucode_capa = (const void *)data;
538 uint32_t api_index = le32toh(ucode_capa->api_index);
539 uint32_t api_flags = le32toh(ucode_capa->api_capa);
540 int i;
542 if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
543 device_printf(sc->sc_dev,
544 "capa flags index %d larger than supported by driver\n",
545 api_index);
546 /* don't return an error so we can load FW that has more bits */
547 return 0;
550 for (i = 0; i < 32; i++) {
551 if (api_flags & (1U << i))
552 setbit(capa->enabled_capa, i + 32 * api_index);
555 return 0;
558 static void
559 iwm_fw_info_free(struct iwm_fw_info *fw)
561 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
562 fw->fw_fp = NULL;
563 /* don't touch fw->fw_status */
564 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
567 static int
568 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
570 struct iwm_fw_info *fw = &sc->sc_fw;
571 const struct iwm_tlv_ucode_header *uhdr;
572 const struct iwm_ucode_tlv *tlv;
573 struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
574 enum iwm_ucode_tlv_type tlv_type;
575 const struct firmware *fwp;
576 const uint8_t *data;
577 uint32_t tlv_len;
578 uint32_t usniffer_img;
579 const uint8_t *tlv_data;
580 uint32_t paging_mem_size;
581 int num_of_cpus;
582 int error = 0;
583 size_t len;
585 if (fw->fw_status == IWM_FW_STATUS_DONE &&
586 ucode_type != IWM_UCODE_INIT)
587 return 0;
589 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
590 #if defined(__DragonFly__)
591 lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
592 #else
593 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
594 #endif
596 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
598 if (fw->fw_fp != NULL)
599 iwm_fw_info_free(fw);
602 * Load firmware into driver memory.
603 * fw_fp will be set.
605 IWM_UNLOCK(sc);
606 fwp = firmware_get(sc->cfg->fw_name);
607 IWM_LOCK(sc);
608 if (fwp == NULL) {
609 device_printf(sc->sc_dev,
610 "could not read firmware %s (error %d)\n",
611 sc->cfg->fw_name, error);
612 goto out;
614 fw->fw_fp = fwp;
616 /* (Re-)Initialize default values. */
617 capa->flags = 0;
618 capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
619 capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
620 memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
621 memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
622 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
625 * Parse firmware contents
628 uhdr = (const void *)fw->fw_fp->data;
629 if (*(const uint32_t *)fw->fw_fp->data != 0
630 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
631 device_printf(sc->sc_dev, "invalid firmware %s\n",
632 sc->cfg->fw_name);
633 error = EINVAL;
634 goto out;
637 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
638 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
639 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
640 IWM_UCODE_API(le32toh(uhdr->ver)));
641 data = uhdr->data;
642 len = fw->fw_fp->datasize - sizeof(*uhdr);
644 while (len >= sizeof(*tlv)) {
645 len -= sizeof(*tlv);
646 tlv = (const void *)data;
648 tlv_len = le32toh(tlv->length);
649 tlv_type = le32toh(tlv->type);
650 tlv_data = tlv->data;
652 if (len < tlv_len) {
653 device_printf(sc->sc_dev,
654 "firmware too short: %zu bytes\n",
655 len);
656 error = EINVAL;
657 goto parse_out;
659 len -= roundup2(tlv_len, 4);
660 data += sizeof(tlv) + roundup2(tlv_len, 4);
662 switch ((int)tlv_type) {
663 case IWM_UCODE_TLV_PROBE_MAX_LEN:
664 if (tlv_len != sizeof(uint32_t)) {
665 device_printf(sc->sc_dev,
666 "%s: PROBE_MAX_LEN (%d) != sizeof(uint32_t)\n",
667 __func__,
668 (int) tlv_len);
669 error = EINVAL;
670 goto parse_out;
672 capa->max_probe_length =
673 le32_to_cpup((const uint32_t *)tlv_data);
674 /* limit it to something sensible */
675 if (capa->max_probe_length >
676 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
677 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
678 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
679 "ridiculous\n", __func__);
680 error = EINVAL;
681 goto parse_out;
683 break;
684 case IWM_UCODE_TLV_PAN:
685 if (tlv_len) {
686 device_printf(sc->sc_dev,
687 "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
688 __func__,
689 (int) tlv_len);
690 error = EINVAL;
691 goto parse_out;
693 capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
694 break;
695 case IWM_UCODE_TLV_FLAGS:
696 if (tlv_len < sizeof(uint32_t)) {
697 device_printf(sc->sc_dev,
698 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
699 __func__,
700 (int) tlv_len);
701 error = EINVAL;
702 goto parse_out;
704 if (tlv_len % sizeof(uint32_t)) {
705 device_printf(sc->sc_dev,
706 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) %% sizeof(uint32_t)\n",
707 __func__,
708 (int) tlv_len);
709 error = EINVAL;
710 goto parse_out;
713 * Apparently there can be many flags, but Linux driver
714 * parses only the first one, and so do we.
716 * XXX: why does this override IWM_UCODE_TLV_PAN?
717 * Intentional or a bug? Observations from
718 * current firmware file:
719 * 1) TLV_PAN is parsed first
720 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
721 * ==> this resets TLV_PAN to itself... hnnnk
723 capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
724 break;
725 case IWM_UCODE_TLV_CSCHEME:
726 if ((error = iwm_store_cscheme(sc,
727 tlv_data, tlv_len)) != 0) {
728 device_printf(sc->sc_dev,
729 "%s: iwm_store_cscheme(): returned %d\n",
730 __func__,
731 error);
732 goto parse_out;
734 break;
735 case IWM_UCODE_TLV_NUM_OF_CPU:
736 if (tlv_len != sizeof(uint32_t)) {
737 device_printf(sc->sc_dev,
738 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
739 __func__,
740 (int) tlv_len);
741 error = EINVAL;
742 goto parse_out;
744 num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
745 if (num_of_cpus == 2) {
746 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
747 TRUE;
748 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
749 TRUE;
750 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
751 TRUE;
752 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
753 device_printf(sc->sc_dev,
754 "%s: Driver supports only 1 or 2 CPUs\n",
755 __func__);
756 error = EINVAL;
757 goto parse_out;
759 break;
760 case IWM_UCODE_TLV_SEC_RT:
761 if ((error = iwm_firmware_store_section(sc,
762 IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
763 device_printf(sc->sc_dev,
764 "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
765 __func__,
766 error);
767 goto parse_out;
769 break;
770 case IWM_UCODE_TLV_SEC_INIT:
771 if ((error = iwm_firmware_store_section(sc,
772 IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
773 device_printf(sc->sc_dev,
774 "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
775 __func__,
776 error);
777 goto parse_out;
779 break;
780 case IWM_UCODE_TLV_SEC_WOWLAN:
781 if ((error = iwm_firmware_store_section(sc,
782 IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
783 device_printf(sc->sc_dev,
784 "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
785 __func__,
786 error);
787 goto parse_out;
789 break;
790 case IWM_UCODE_TLV_DEF_CALIB:
791 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
792 device_printf(sc->sc_dev,
793 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
794 __func__,
795 (int) tlv_len,
796 (int) sizeof(struct iwm_tlv_calib_data));
797 error = EINVAL;
798 goto parse_out;
800 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
801 device_printf(sc->sc_dev,
802 "%s: iwm_set_default_calib() failed: %d\n",
803 __func__,
804 error);
805 goto parse_out;
807 break;
808 case IWM_UCODE_TLV_PHY_SKU:
809 if (tlv_len != sizeof(uint32_t)) {
810 error = EINVAL;
811 device_printf(sc->sc_dev,
812 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
813 __func__,
814 (int) tlv_len);
815 goto parse_out;
817 sc->sc_fw.phy_config =
818 le32_to_cpup((const uint32_t *)tlv_data);
819 sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
820 IWM_FW_PHY_CFG_TX_CHAIN) >>
821 IWM_FW_PHY_CFG_TX_CHAIN_POS;
822 sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
823 IWM_FW_PHY_CFG_RX_CHAIN) >>
824 IWM_FW_PHY_CFG_RX_CHAIN_POS;
825 break;
827 case IWM_UCODE_TLV_API_CHANGES_SET: {
828 if (tlv_len != sizeof(struct iwm_ucode_api)) {
829 error = EINVAL;
830 goto parse_out;
832 if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
833 error = EINVAL;
834 goto parse_out;
836 break;
839 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
840 if (tlv_len != sizeof(struct iwm_ucode_capa)) {
841 error = EINVAL;
842 goto parse_out;
844 if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
845 error = EINVAL;
846 goto parse_out;
848 break;
851 case 48: /* undocumented TLV */
852 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
853 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
854 /* ignore, not used by current driver */
855 break;
857 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
858 if ((error = iwm_firmware_store_section(sc,
859 IWM_UCODE_REGULAR_USNIFFER, tlv_data,
860 tlv_len)) != 0)
861 goto parse_out;
862 break;
864 case IWM_UCODE_TLV_PAGING:
865 if (tlv_len != sizeof(uint32_t)) {
866 error = EINVAL;
867 goto parse_out;
869 paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
871 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
872 "%s: Paging: paging enabled (size = %u bytes)\n",
873 __func__, paging_mem_size);
874 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
875 device_printf(sc->sc_dev,
876 "%s: Paging: driver supports up to %u bytes for paging image\n",
877 __func__, IWM_MAX_PAGING_IMAGE_SIZE);
878 error = EINVAL;
879 goto out;
881 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
882 device_printf(sc->sc_dev,
883 "%s: Paging: image isn't multiple %u\n",
884 __func__, IWM_FW_PAGING_SIZE);
885 error = EINVAL;
886 goto out;
889 sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
890 paging_mem_size;
891 usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
892 sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
893 paging_mem_size;
894 break;
896 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
897 if (tlv_len != sizeof(uint32_t)) {
898 error = EINVAL;
899 goto parse_out;
901 capa->n_scan_channels =
902 le32_to_cpup((const uint32_t *)tlv_data);
903 break;
905 case IWM_UCODE_TLV_FW_VERSION:
906 if (tlv_len != sizeof(uint32_t) * 3) {
907 error = EINVAL;
908 goto parse_out;
910 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
911 "%d.%d.%d",
912 le32toh(((const uint32_t *)tlv_data)[0]),
913 le32toh(((const uint32_t *)tlv_data)[1]),
914 le32toh(((const uint32_t *)tlv_data)[2]));
915 break;
917 case IWM_UCODE_TLV_FW_MEM_SEG:
918 break;
920 default:
921 device_printf(sc->sc_dev,
922 "%s: unknown firmware section %d, abort\n",
923 __func__, tlv_type);
924 error = EINVAL;
925 goto parse_out;
929 KASSERT(error == 0, ("unhandled error"));
931 parse_out:
932 if (error) {
933 device_printf(sc->sc_dev, "firmware parse error %d, "
934 "section type %d\n", error, tlv_type);
937 out:
938 if (error) {
939 fw->fw_status = IWM_FW_STATUS_NONE;
940 if (fw->fw_fp != NULL)
941 iwm_fw_info_free(fw);
942 } else
943 fw->fw_status = IWM_FW_STATUS_DONE;
944 wakeup(&sc->sc_fw);
946 return error;
950 * DMA resource routines
953 /* fwmem is used to load firmware onto the card */
954 static int
955 iwm_alloc_fwmem(struct iwm_softc *sc)
957 /* Must be aligned on a 16-byte boundary. */
958 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
959 IWM_FH_MEM_TB_MAX_LENGTH, 16);
962 /* tx scheduler rings. not used? */
963 static int
964 iwm_alloc_sched(struct iwm_softc *sc)
966 /* TX scheduler rings must be aligned on a 1KB boundary. */
967 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
968 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
971 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
972 static int
973 iwm_alloc_kw(struct iwm_softc *sc)
975 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
978 /* interrupt cause table */
979 static int
980 iwm_alloc_ict(struct iwm_softc *sc)
982 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
983 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
986 static int
987 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
989 bus_size_t size;
990 int i, error;
992 ring->cur = 0;
994 /* Allocate RX descriptors (256-byte aligned). */
995 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
996 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
997 if (error != 0) {
998 device_printf(sc->sc_dev,
999 "could not allocate RX ring DMA memory\n");
1000 goto fail;
1002 ring->desc = ring->desc_dma.vaddr;
1004 /* Allocate RX status area (16-byte aligned). */
1005 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1006 sizeof(*ring->stat), 16);
1007 if (error != 0) {
1008 device_printf(sc->sc_dev,
1009 "could not allocate RX status DMA memory\n");
1010 goto fail;
1012 ring->stat = ring->stat_dma.vaddr;
1014 /* Create RX buffer DMA tag. */
1015 #if defined(__DragonFly__)
1016 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1018 BUS_SPACE_MAXADDR_32BIT,
1019 BUS_SPACE_MAXADDR,
1020 NULL, NULL,
1021 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
1022 BUS_DMA_NOWAIT, &ring->data_dmat);
1023 #else
1024 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1025 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1026 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1027 #endif
1028 if (error != 0) {
1029 device_printf(sc->sc_dev,
1030 "%s: could not create RX buf DMA tag, error %d\n",
1031 __func__, error);
1032 goto fail;
1035 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1036 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1037 if (error != 0) {
1038 device_printf(sc->sc_dev,
1039 "%s: could not create RX buf DMA map, error %d\n",
1040 __func__, error);
1041 goto fail;
1044 * Allocate and map RX buffers.
1046 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1047 struct iwm_rx_data *data = &ring->data[i];
1048 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1049 if (error != 0) {
1050 device_printf(sc->sc_dev,
1051 "%s: could not create RX buf DMA map, error %d\n",
1052 __func__, error);
1053 goto fail;
1055 data->m = NULL;
1057 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1058 goto fail;
1061 return 0;
1063 fail: iwm_free_rx_ring(sc, ring);
1064 return error;
1067 static void
1068 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1070 /* Reset the ring state */
1071 ring->cur = 0;
1074 * The hw rx ring index in shared memory must also be cleared,
1075 * otherwise the discrepancy can cause reprocessing chaos.
1077 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1080 static void
1081 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1083 int i;
1085 iwm_dma_contig_free(&ring->desc_dma);
1086 iwm_dma_contig_free(&ring->stat_dma);
1088 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1089 struct iwm_rx_data *data = &ring->data[i];
1091 if (data->m != NULL) {
1092 bus_dmamap_sync(ring->data_dmat, data->map,
1093 BUS_DMASYNC_POSTREAD);
1094 bus_dmamap_unload(ring->data_dmat, data->map);
1095 m_freem(data->m);
1096 data->m = NULL;
1098 if (data->map != NULL) {
1099 bus_dmamap_destroy(ring->data_dmat, data->map);
1100 data->map = NULL;
1103 if (ring->spare_map != NULL) {
1104 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1105 ring->spare_map = NULL;
1107 if (ring->data_dmat != NULL) {
1108 bus_dma_tag_destroy(ring->data_dmat);
1109 ring->data_dmat = NULL;
1113 static int
1114 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1116 bus_addr_t paddr;
1117 bus_size_t size;
1118 size_t maxsize;
1119 int nsegments;
1120 int i, error;
1122 ring->qid = qid;
1123 ring->queued = 0;
1124 ring->cur = 0;
1126 /* Allocate TX descriptors (256-byte aligned). */
1127 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1128 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1129 if (error != 0) {
1130 device_printf(sc->sc_dev,
1131 "could not allocate TX ring DMA memory\n");
1132 goto fail;
1134 ring->desc = ring->desc_dma.vaddr;
1137 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1138 * to allocate commands space for other rings.
1140 if (qid > IWM_MVM_CMD_QUEUE)
1141 return 0;
1143 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1144 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1145 if (error != 0) {
1146 device_printf(sc->sc_dev,
1147 "could not allocate TX cmd DMA memory\n");
1148 goto fail;
1150 ring->cmd = ring->cmd_dma.vaddr;
1152 /* FW commands may require more mapped space than packets. */
1153 if (qid == IWM_MVM_CMD_QUEUE) {
1154 maxsize = IWM_RBUF_SIZE;
1155 nsegments = 1;
1156 } else {
1157 maxsize = MCLBYTES;
1158 nsegments = IWM_MAX_SCATTER - 2;
1161 #if defined(__DragonFly__)
1162 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1164 BUS_SPACE_MAXADDR_32BIT,
1165 BUS_SPACE_MAXADDR,
1166 NULL, NULL,
1167 maxsize, nsegments, maxsize,
1168 BUS_DMA_NOWAIT, &ring->data_dmat);
1169 #else
1170 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1171 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1172 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1173 #endif
1174 if (error != 0) {
1175 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1176 goto fail;
1179 paddr = ring->cmd_dma.paddr;
1180 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1181 struct iwm_tx_data *data = &ring->data[i];
1183 data->cmd_paddr = paddr;
1184 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1185 + offsetof(struct iwm_tx_cmd, scratch);
1186 paddr += sizeof(struct iwm_device_cmd);
1188 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1189 if (error != 0) {
1190 device_printf(sc->sc_dev,
1191 "could not create TX buf DMA map\n");
1192 goto fail;
1195 KASSERT(paddr == ring->cmd_dma.paddr + size,
1196 ("invalid physical address"));
1197 return 0;
1199 fail: iwm_free_tx_ring(sc, ring);
1200 return error;
1203 static void
1204 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1206 int i;
1208 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1209 struct iwm_tx_data *data = &ring->data[i];
1211 if (data->m != NULL) {
1212 bus_dmamap_sync(ring->data_dmat, data->map,
1213 BUS_DMASYNC_POSTWRITE);
1214 bus_dmamap_unload(ring->data_dmat, data->map);
1215 m_freem(data->m);
1216 data->m = NULL;
1219 /* Clear TX descriptors. */
1220 memset(ring->desc, 0, ring->desc_dma.size);
1221 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1222 BUS_DMASYNC_PREWRITE);
1223 sc->qfullmsk &= ~(1 << ring->qid);
1224 ring->queued = 0;
1225 ring->cur = 0;
1227 if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1228 iwm_pcie_clear_cmd_in_flight(sc);
1231 static void
1232 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1234 int i;
1236 iwm_dma_contig_free(&ring->desc_dma);
1237 iwm_dma_contig_free(&ring->cmd_dma);
1239 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1240 struct iwm_tx_data *data = &ring->data[i];
1242 if (data->m != NULL) {
1243 bus_dmamap_sync(ring->data_dmat, data->map,
1244 BUS_DMASYNC_POSTWRITE);
1245 bus_dmamap_unload(ring->data_dmat, data->map);
1246 m_freem(data->m);
1247 data->m = NULL;
1249 if (data->map != NULL) {
1250 bus_dmamap_destroy(ring->data_dmat, data->map);
1251 data->map = NULL;
1254 if (ring->data_dmat != NULL) {
1255 bus_dma_tag_destroy(ring->data_dmat);
1256 ring->data_dmat = NULL;
1261 * High-level hardware frobbing routines
1264 static void
1265 iwm_enable_interrupts(struct iwm_softc *sc)
1267 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1268 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1271 static void
1272 iwm_restore_interrupts(struct iwm_softc *sc)
1274 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1277 static void
1278 iwm_disable_interrupts(struct iwm_softc *sc)
1280 /* disable interrupts */
1281 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1283 /* acknowledge all interrupts */
1284 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1285 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1288 static void
1289 iwm_ict_reset(struct iwm_softc *sc)
1291 iwm_disable_interrupts(sc);
1293 /* Reset ICT table. */
1294 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1295 sc->ict_cur = 0;
1297 /* Set physical address of ICT table (4KB aligned). */
1298 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1299 IWM_CSR_DRAM_INT_TBL_ENABLE
1300 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1301 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1302 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1304 /* Switch to ICT interrupt mode in driver. */
1305 sc->sc_flags |= IWM_FLAG_USE_ICT;
1307 /* Re-enable interrupts. */
1308 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1309 iwm_enable_interrupts(sc);
1313 * Since this .. hard-resets things, it's time to actually
1314 * mark the first vap (if any) as having no mac context.
1315 * It's annoying, but since the driver is potentially being
1316 * stop/start'ed whilst active (thanks openbsd port!) we
1317 * have to correctly track this.
1319 static void
1320 iwm_stop_device(struct iwm_softc *sc)
1322 struct ieee80211com *ic = &sc->sc_ic;
1323 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1324 int chnl, qid;
1325 uint32_t mask = 0;
1327 /* tell the device to stop sending interrupts */
1328 iwm_disable_interrupts(sc);
1331 * FreeBSD-local: mark the first vap as not-uploaded,
1332 * so the next transition through auth/assoc
1333 * will correctly populate the MAC context.
1335 if (vap) {
1336 struct iwm_vap *iv = IWM_VAP(vap);
1337 iv->phy_ctxt = NULL;
1338 iv->is_uploaded = 0;
1341 /* device going down, Stop using ICT table */
1342 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1344 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1346 if (iwm_nic_lock(sc)) {
1347 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1349 /* Stop each Tx DMA channel */
1350 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1351 IWM_WRITE(sc,
1352 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1353 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1356 /* Wait for DMA channels to be idle */
1357 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1358 5000)) {
1359 device_printf(sc->sc_dev,
1360 "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1361 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1363 iwm_nic_unlock(sc);
1365 iwm_pcie_rx_stop(sc);
1367 /* Stop RX ring. */
1368 iwm_reset_rx_ring(sc, &sc->rxq);
1370 /* Reset all TX rings. */
1371 for (qid = 0; qid < nitems(sc->txq); qid++)
1372 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1374 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1375 /* Power-down device's busmaster DMA clocks */
1376 if (iwm_nic_lock(sc)) {
1377 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1378 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1379 iwm_nic_unlock(sc);
1381 DELAY(5);
1384 /* Make sure (redundant) we've released our request to stay awake */
1385 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1386 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1388 /* Stop the device, and put it in low power state */
1389 iwm_apm_stop(sc);
1391 /* stop and reset the on-board processor */
1392 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1393 DELAY(1000);
1396 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1397 * This is a bug in certain verions of the hardware.
1398 * Certain devices also keep sending HW RF kill interrupt all
1399 * the time, unless the interrupt is ACKed even if the interrupt
1400 * should be masked. Re-ACK all the interrupts here.
1402 iwm_disable_interrupts(sc);
1405 * Even if we stop the HW, we still want the RF kill
1406 * interrupt
1408 iwm_enable_rfkill_int(sc);
1409 iwm_check_rfkill(sc);
1412 static void
1413 iwm_mvm_nic_config(struct iwm_softc *sc)
1415 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1416 uint32_t reg_val = 0;
1417 uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1419 radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1420 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1421 radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1422 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1423 radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1424 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1426 /* SKU control */
1427 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1428 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1429 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1430 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1432 /* radio configuration */
1433 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1434 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1435 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1437 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1439 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1440 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1441 radio_cfg_step, radio_cfg_dash);
1444 * W/A : NIC is stuck in a reset state after Early PCIe power off
1445 * (PCIe power is lost before PERST# is asserted), causing ME FW
1446 * to lose ownership and not being able to obtain it back.
1448 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1449 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1450 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1451 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1455 static int
1456 iwm_nic_rx_init(struct iwm_softc *sc)
1459 * Initialize RX ring. This is from the iwn driver.
1461 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1463 /* Stop Rx DMA */
1464 iwm_pcie_rx_stop(sc);
1466 if (!iwm_nic_lock(sc))
1467 return EBUSY;
1469 /* reset and flush pointers */
1470 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1471 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1472 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1473 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1475 /* Set physical address of RX ring (256-byte aligned). */
1476 IWM_WRITE(sc,
1477 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1479 /* Set physical address of RX status (16-byte aligned). */
1480 IWM_WRITE(sc,
1481 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1483 #if defined(__DragonFly__)
1484 /* Force serialization (probably not needed but don't trust the HW) */
1485 IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1486 #endif
1488 /* Enable Rx DMA
1489 * XXX 5000 HW isn't supported by the iwm(4) driver.
1490 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1491 * the credit mechanism in 5000 HW RX FIFO
1492 * Direct rx interrupts to hosts
1493 * Rx buffer size 4 or 8k or 12k
1494 * RB timeout 0x10
1495 * 256 RBDs
1497 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1498 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1499 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1500 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1501 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1502 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1503 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1505 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1507 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1508 if (sc->cfg->host_interrupt_operation_mode)
1509 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1512 * Thus sayeth el jefe (iwlwifi) via a comment:
1514 * This value should initially be 0 (before preparing any
1515 * RBs), should be 8 after preparing the first 8 RBs (for example)
1517 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1519 iwm_nic_unlock(sc);
1521 return 0;
1524 static int
1525 iwm_nic_tx_init(struct iwm_softc *sc)
1527 int qid;
1529 if (!iwm_nic_lock(sc))
1530 return EBUSY;
1532 /* Deactivate TX scheduler. */
1533 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1535 /* Set physical address of "keep warm" page (16-byte aligned). */
1536 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1538 /* Initialize TX rings. */
1539 for (qid = 0; qid < nitems(sc->txq); qid++) {
1540 struct iwm_tx_ring *txq = &sc->txq[qid];
1542 /* Set physical address of TX ring (256-byte aligned). */
1543 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1544 txq->desc_dma.paddr >> 8);
1545 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1546 "%s: loading ring %d descriptors (%p) at %lx\n",
1547 __func__,
1548 qid, txq->desc,
1549 (unsigned long) (txq->desc_dma.paddr >> 8));
1552 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1554 iwm_nic_unlock(sc);
1556 return 0;
1559 static int
1560 iwm_nic_init(struct iwm_softc *sc)
1562 int error;
1564 iwm_apm_init(sc);
1565 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1566 iwm_set_pwr(sc);
1568 iwm_mvm_nic_config(sc);
1570 if ((error = iwm_nic_rx_init(sc)) != 0)
1571 return error;
1574 * Ditto for TX, from iwn
1576 if ((error = iwm_nic_tx_init(sc)) != 0)
1577 return error;
1579 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1580 "%s: shadow registers enabled\n", __func__);
1581 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1583 return 0;
1587 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1589 if (!iwm_nic_lock(sc)) {
1590 device_printf(sc->sc_dev,
1591 "%s: cannot enable txq %d\n",
1592 __func__,
1593 qid);
1594 return EBUSY;
1597 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1599 if (qid == IWM_MVM_CMD_QUEUE) {
1600 /* unactivate before configuration */
1601 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1602 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1603 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1605 iwm_nic_unlock(sc);
1607 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1609 if (!iwm_nic_lock(sc)) {
1610 device_printf(sc->sc_dev,
1611 "%s: cannot enable txq %d\n", __func__, qid);
1612 return EBUSY;
1614 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1615 iwm_nic_unlock(sc);
1617 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1618 /* Set scheduler window size and frame limit. */
1619 iwm_write_mem32(sc,
1620 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1621 sizeof(uint32_t),
1622 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1623 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1624 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1625 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1627 if (!iwm_nic_lock(sc)) {
1628 device_printf(sc->sc_dev,
1629 "%s: cannot enable txq %d\n", __func__, qid);
1630 return EBUSY;
1632 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1633 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1634 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1635 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1636 IWM_SCD_QUEUE_STTS_REG_MSK);
1637 } else {
1638 struct iwm_scd_txq_cfg_cmd cmd;
1639 int error;
1641 iwm_nic_unlock(sc);
1643 memset(&cmd, 0, sizeof(cmd));
1644 cmd.scd_queue = qid;
1645 cmd.enable = 1;
1646 cmd.sta_id = sta_id;
1647 cmd.tx_fifo = fifo;
1648 cmd.aggregate = 0;
1649 cmd.window = IWM_FRAME_LIMIT;
1651 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1652 sizeof(cmd), &cmd);
1653 if (error) {
1654 device_printf(sc->sc_dev,
1655 "cannot enable txq %d\n", qid);
1656 return error;
1659 if (!iwm_nic_lock(sc))
1660 return EBUSY;
1663 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1664 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1666 iwm_nic_unlock(sc);
1668 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1669 __func__, qid, fifo);
1671 return 0;
1674 static int
1675 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1677 int error, chnl;
1679 int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1680 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1682 if (!iwm_nic_lock(sc))
1683 return EBUSY;
1685 iwm_ict_reset(sc);
1687 sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1688 if (scd_base_addr != 0 &&
1689 scd_base_addr != sc->scd_base_addr) {
1690 device_printf(sc->sc_dev,
1691 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1692 __func__, sc->scd_base_addr, scd_base_addr);
1695 iwm_nic_unlock(sc);
1697 /* reset context data, TX status and translation data */
1698 error = iwm_write_mem(sc,
1699 sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1700 NULL, clear_dwords);
1701 if (error)
1702 return EBUSY;
1704 if (!iwm_nic_lock(sc))
1705 return EBUSY;
1707 /* Set physical address of TX scheduler rings (1KB aligned). */
1708 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1710 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1712 iwm_nic_unlock(sc);
1714 /* enable command channel */
1715 error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1716 if (error)
1717 return error;
1719 if (!iwm_nic_lock(sc))
1720 return EBUSY;
1722 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1724 /* Enable DMA channels. */
1725 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1726 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1727 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1728 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1731 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1732 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1734 iwm_nic_unlock(sc);
1736 /* Enable L1-Active */
1737 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1738 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1739 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1742 return error;
1746 * NVM read access and content parsing. We do not support
1747 * external NVM or writing NVM.
1748 * iwlwifi/mvm/nvm.c
1751 /* Default NVM size to read */
1752 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1754 #define IWM_NVM_WRITE_OPCODE 1
1755 #define IWM_NVM_READ_OPCODE 0
1757 /* load nvm chunk response */
1758 enum {
1759 IWM_READ_NVM_CHUNK_SUCCEED = 0,
1760 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1763 static int
1764 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1765 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1767 struct iwm_nvm_access_cmd nvm_access_cmd = {
1768 .offset = htole16(offset),
1769 .length = htole16(length),
1770 .type = htole16(section),
1771 .op_code = IWM_NVM_READ_OPCODE,
1773 struct iwm_nvm_access_resp *nvm_resp;
1774 struct iwm_rx_packet *pkt;
1775 struct iwm_host_cmd cmd = {
1776 .id = IWM_NVM_ACCESS_CMD,
1777 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1778 .data = { &nvm_access_cmd, },
1780 int ret, bytes_read, offset_read;
1781 uint8_t *resp_data;
1783 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1785 ret = iwm_send_cmd(sc, &cmd);
1786 if (ret) {
1787 device_printf(sc->sc_dev,
1788 "Could not send NVM_ACCESS command (error=%d)\n", ret);
1789 return ret;
1792 pkt = cmd.resp_pkt;
1794 /* Extract NVM response */
1795 nvm_resp = (void *)pkt->data;
1796 ret = le16toh(nvm_resp->status);
1797 bytes_read = le16toh(nvm_resp->length);
1798 offset_read = le16toh(nvm_resp->offset);
1799 resp_data = nvm_resp->data;
1800 if (ret) {
1801 if ((offset != 0) &&
1802 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1804 * meaning of NOT_VALID_ADDRESS:
1805 * driver try to read chunk from address that is
1806 * multiple of 2K and got an error since addr is empty.
1807 * meaning of (offset != 0): driver already
1808 * read valid data from another chunk so this case
1809 * is not an error.
1811 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1812 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1813 offset);
1814 *len = 0;
1815 ret = 0;
1816 } else {
1817 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1818 "NVM access command failed with status %d\n", ret);
1819 ret = EIO;
1821 goto exit;
1824 if (offset_read != offset) {
1825 device_printf(sc->sc_dev,
1826 "NVM ACCESS response with invalid offset %d\n",
1827 offset_read);
1828 ret = EINVAL;
1829 goto exit;
1832 if (bytes_read > length) {
1833 device_printf(sc->sc_dev,
1834 "NVM ACCESS response with too much data "
1835 "(%d bytes requested, %d bytes received)\n",
1836 length, bytes_read);
1837 ret = EINVAL;
1838 goto exit;
1841 /* Write data to NVM */
1842 memcpy(data + offset, resp_data, bytes_read);
1843 *len = bytes_read;
1845 exit:
1846 iwm_free_resp(sc, &cmd);
1847 return ret;
1851 * Reads an NVM section completely.
1852 * NICs prior to 7000 family don't have a real NVM, but just read
1853 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1854 * by uCode, we need to manually check in this case that we don't
1855 * overflow and try to read more than the EEPROM size.
1856 * For 7000 family NICs, we supply the maximal size we can read, and
1857 * the uCode fills the response with as much data as we can,
1858 * without overflowing, so no check is needed.
1860 static int
1861 iwm_nvm_read_section(struct iwm_softc *sc,
1862 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1864 uint16_t seglen, length, offset = 0;
1865 int ret;
1867 /* Set nvm section read length */
1868 length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1870 seglen = length;
1872 /* Read the NVM until exhausted (reading less than requested) */
1873 while (seglen == length) {
1874 /* Check no memory assumptions fail and cause an overflow */
1875 if ((size_read + offset + length) >
1876 sc->cfg->eeprom_size) {
1877 device_printf(sc->sc_dev,
1878 "EEPROM size is too small for NVM\n");
1879 return ENOBUFS;
1882 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1883 if (ret) {
1884 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1885 "Cannot read NVM from section %d offset %d, length %d\n",
1886 section, offset, length);
1887 return ret;
1889 offset += seglen;
1892 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1893 "NVM section %d read completed\n", section);
1894 *len = offset;
1895 return 0;
1898 /* NVM offsets (in words) definitions */
1899 enum iwm_nvm_offsets {
1900 /* NVM HW-Section offset (in words) definitions */
1901 IWM_HW_ADDR = 0x15,
1903 /* NVM SW-Section offset (in words) definitions */
1904 IWM_NVM_SW_SECTION = 0x1C0,
1905 IWM_NVM_VERSION = 0,
1906 IWM_RADIO_CFG = 1,
1907 IWM_SKU = 2,
1908 IWM_N_HW_ADDRS = 3,
1909 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1911 /* NVM calibration section offset (in words) definitions */
1912 IWM_NVM_CALIB_SECTION = 0x2B8,
1913 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1916 enum iwm_8000_nvm_offsets {
1917 /* NVM HW-Section offset (in words) definitions */
1918 IWM_HW_ADDR0_WFPM_8000 = 0x12,
1919 IWM_HW_ADDR1_WFPM_8000 = 0x16,
1920 IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1921 IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1922 IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1924 /* NVM SW-Section offset (in words) definitions */
1925 IWM_NVM_SW_SECTION_8000 = 0x1C0,
1926 IWM_NVM_VERSION_8000 = 0,
1927 IWM_RADIO_CFG_8000 = 0,
1928 IWM_SKU_8000 = 2,
1929 IWM_N_HW_ADDRS_8000 = 3,
1931 /* NVM REGULATORY -Section offset (in words) definitions */
1932 IWM_NVM_CHANNELS_8000 = 0,
1933 IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1934 IWM_NVM_LAR_OFFSET_8000 = 0x507,
1935 IWM_NVM_LAR_ENABLED_8000 = 0x7,
1937 /* NVM calibration section offset (in words) definitions */
1938 IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1939 IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1942 /* SKU Capabilities (actual values from NVM definition) */
1943 enum nvm_sku_bits {
1944 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
1945 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
1946 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
1947 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
1950 /* radio config bits (actual values from NVM definition) */
1951 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
1952 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
1953 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
1954 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
1955 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
1956 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1958 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x) (x & 0xF)
1959 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x) ((x >> 4) & 0xF)
1960 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x) ((x >> 8) & 0xF)
1961 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x) ((x >> 12) & 0xFFF)
1962 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x) ((x >> 24) & 0xF)
1963 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x) ((x >> 28) & 0xF)
1965 #define DEFAULT_MAX_TX_POWER 16
1968 * enum iwm_nvm_channel_flags - channel flags in NVM
1969 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1970 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1971 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1972 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1973 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1974 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1975 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1976 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1977 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1978 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1980 enum iwm_nvm_channel_flags {
1981 IWM_NVM_CHANNEL_VALID = (1 << 0),
1982 IWM_NVM_CHANNEL_IBSS = (1 << 1),
1983 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1984 IWM_NVM_CHANNEL_RADAR = (1 << 4),
1985 IWM_NVM_CHANNEL_DFS = (1 << 7),
1986 IWM_NVM_CHANNEL_WIDE = (1 << 8),
1987 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1988 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1989 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1993 * Translate EEPROM flags to net80211.
1995 static uint32_t
1996 iwm_eeprom_channel_flags(uint16_t ch_flags)
1998 uint32_t nflags;
2000 nflags = 0;
2001 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2002 nflags |= IEEE80211_CHAN_PASSIVE;
2003 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2004 nflags |= IEEE80211_CHAN_NOADHOC;
2005 if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2006 nflags |= IEEE80211_CHAN_DFS;
2007 /* Just in case. */
2008 nflags |= IEEE80211_CHAN_NOADHOC;
2011 return (nflags);
2014 static void
2015 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2016 int maxchans, int *nchans, int ch_idx, size_t ch_num,
2017 const uint8_t bands[])
2019 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2020 uint32_t nflags;
2021 uint16_t ch_flags;
2022 uint8_t ieee;
2023 int error;
2025 for (; ch_idx < ch_num; ch_idx++) {
2026 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2027 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2028 ieee = iwm_nvm_channels[ch_idx];
2029 else
2030 ieee = iwm_nvm_channels_8000[ch_idx];
2032 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2033 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2034 "Ch. %d Flags %x [%sGHz] - No traffic\n",
2035 ieee, ch_flags,
2036 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2037 "5.2" : "2.4");
2038 continue;
2041 nflags = iwm_eeprom_channel_flags(ch_flags);
2042 error = ieee80211_add_channel(chans, maxchans, nchans,
2043 ieee, 0, 0, nflags, bands);
2044 if (error != 0)
2045 break;
2047 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2048 "Ch. %d Flags %x [%sGHz] - Added\n",
2049 ieee, ch_flags,
2050 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2051 "5.2" : "2.4");
2055 static void
2056 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2057 struct ieee80211_channel chans[])
2059 struct iwm_softc *sc = ic->ic_softc;
2060 struct iwm_nvm_data *data = sc->nvm_data;
2061 uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2062 size_t ch_num;
2064 memset(bands, 0, sizeof(bands));
2065 /* 1-13: 11b/g channels. */
2066 setbit(bands, IEEE80211_MODE_11B);
2067 setbit(bands, IEEE80211_MODE_11G);
2068 iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2069 IWM_NUM_2GHZ_CHANNELS - 1, bands);
2071 /* 14: 11b channel only. */
2072 clrbit(bands, IEEE80211_MODE_11G);
2073 iwm_add_channel_band(sc, chans, maxchans, nchans,
2074 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2076 if (data->sku_cap_band_52GHz_enable) {
2077 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2078 ch_num = nitems(iwm_nvm_channels);
2079 else
2080 ch_num = nitems(iwm_nvm_channels_8000);
2081 memset(bands, 0, sizeof(bands));
2082 setbit(bands, IEEE80211_MODE_11A);
2083 iwm_add_channel_band(sc, chans, maxchans, nchans,
2084 IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2088 static void
2089 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2090 const uint16_t *mac_override, const uint16_t *nvm_hw)
2092 const uint8_t *hw_addr;
2094 if (mac_override) {
2095 static const uint8_t reserved_mac[] = {
2096 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2099 hw_addr = (const uint8_t *)(mac_override +
2100 IWM_MAC_ADDRESS_OVERRIDE_8000);
2103 * Store the MAC address from MAO section.
2104 * No byte swapping is required in MAO section
2106 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2109 * Force the use of the OTP MAC address in case of reserved MAC
2110 * address in the NVM, or if address is given but invalid.
2112 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2113 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2114 iwm_is_valid_ether_addr(data->hw_addr) &&
2115 !IEEE80211_IS_MULTICAST(data->hw_addr))
2116 return;
2118 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2119 "%s: mac address from nvm override section invalid\n",
2120 __func__);
2123 if (nvm_hw) {
2124 /* read the mac address from WFMP registers */
2125 uint32_t mac_addr0 =
2126 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2127 uint32_t mac_addr1 =
2128 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2130 hw_addr = (const uint8_t *)&mac_addr0;
2131 data->hw_addr[0] = hw_addr[3];
2132 data->hw_addr[1] = hw_addr[2];
2133 data->hw_addr[2] = hw_addr[1];
2134 data->hw_addr[3] = hw_addr[0];
2136 hw_addr = (const uint8_t *)&mac_addr1;
2137 data->hw_addr[4] = hw_addr[1];
2138 data->hw_addr[5] = hw_addr[0];
2140 return;
2143 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2144 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2147 static int
2148 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2149 const uint16_t *phy_sku)
2151 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2152 return le16_to_cpup(nvm_sw + IWM_SKU);
2154 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2157 static int
2158 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2160 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2161 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2162 else
2163 return le32_to_cpup((const uint32_t *)(nvm_sw +
2164 IWM_NVM_VERSION_8000));
2167 static int
2168 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2169 const uint16_t *phy_sku)
2171 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2172 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2174 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2177 static int
2178 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2180 int n_hw_addr;
2182 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2183 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2185 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2187 return n_hw_addr & IWM_N_HW_ADDR_MASK;
2190 static void
2191 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2192 uint32_t radio_cfg)
2194 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2195 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2196 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2197 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2198 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2199 return;
2202 /* set the radio configuration for family 8000 */
2203 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2204 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2205 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2206 data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2207 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2208 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2211 static int
2212 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2213 const uint16_t *nvm_hw, const uint16_t *mac_override)
2215 #ifdef notyet /* for FAMILY 9000 */
2216 if (cfg->mac_addr_from_csr) {
2217 iwm_set_hw_address_from_csr(sc, data);
2218 } else
2219 #endif
2220 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2221 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2223 /* The byte order is little endian 16 bit, meaning 214365 */
2224 data->hw_addr[0] = hw_addr[1];
2225 data->hw_addr[1] = hw_addr[0];
2226 data->hw_addr[2] = hw_addr[3];
2227 data->hw_addr[3] = hw_addr[2];
2228 data->hw_addr[4] = hw_addr[5];
2229 data->hw_addr[5] = hw_addr[4];
2230 } else {
2231 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2234 if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2235 device_printf(sc->sc_dev, "no valid mac address was found\n");
2236 return EINVAL;
2239 return 0;
2242 static struct iwm_nvm_data *
2243 iwm_parse_nvm_data(struct iwm_softc *sc,
2244 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2245 const uint16_t *nvm_calib, const uint16_t *mac_override,
2246 const uint16_t *phy_sku, const uint16_t *regulatory)
2248 struct iwm_nvm_data *data;
2249 uint32_t sku, radio_cfg;
2250 uint16_t lar_config;
2252 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2253 data = kmalloc(sizeof(*data) +
2254 IWM_NUM_CHANNELS * sizeof(uint16_t),
2255 M_DEVBUF, M_WAITOK | M_ZERO);
2256 } else {
2257 data = kmalloc(sizeof(*data) +
2258 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2259 M_DEVBUF, M_WAITOK | M_ZERO);
2261 if (!data)
2262 return NULL;
2264 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2266 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2267 iwm_set_radio_cfg(sc, data, radio_cfg);
2269 sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2270 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2271 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2272 data->sku_cap_11n_enable = 0;
2274 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2276 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2277 uint16_t lar_offset = data->nvm_version < 0xE39 ?
2278 IWM_NVM_LAR_OFFSET_8000_OLD :
2279 IWM_NVM_LAR_OFFSET_8000;
2281 lar_config = le16_to_cpup(regulatory + lar_offset);
2282 data->lar_enabled = !!(lar_config &
2283 IWM_NVM_LAR_ENABLED_8000);
2286 /* If no valid mac address was found - bail out */
2287 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2288 kfree(data, M_DEVBUF);
2289 return NULL;
2292 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2293 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2294 IWM_NUM_CHANNELS * sizeof(uint16_t));
2295 } else {
2296 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2297 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2300 return data;
2303 static void
2304 iwm_free_nvm_data(struct iwm_nvm_data *data)
2306 if (data != NULL)
2307 kfree(data, M_DEVBUF);
2310 static struct iwm_nvm_data *
2311 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2313 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2315 /* Checking for required sections */
2316 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2317 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2318 !sections[sc->cfg->nvm_hw_section_num].data) {
2319 device_printf(sc->sc_dev,
2320 "Can't parse empty OTP/NVM sections\n");
2321 return NULL;
2323 } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2324 /* SW and REGULATORY sections are mandatory */
2325 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2326 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2327 device_printf(sc->sc_dev,
2328 "Can't parse empty OTP/NVM sections\n");
2329 return NULL;
2331 /* MAC_OVERRIDE or at least HW section must exist */
2332 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2333 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2334 device_printf(sc->sc_dev,
2335 "Can't parse mac_address, empty sections\n");
2336 return NULL;
2339 /* PHY_SKU section is mandatory in B0 */
2340 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2341 device_printf(sc->sc_dev,
2342 "Can't parse phy_sku in B0, empty sections\n");
2343 return NULL;
2345 } else {
2346 panic("unknown device family %d\n", sc->cfg->device_family);
2349 hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2350 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2351 calib = (const uint16_t *)
2352 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2353 regulatory = (const uint16_t *)
2354 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2355 mac_override = (const uint16_t *)
2356 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2357 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2359 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2360 phy_sku, regulatory);
2363 static int
2364 iwm_nvm_init(struct iwm_softc *sc)
2366 struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2367 int i, ret, section;
2368 uint32_t size_read = 0;
2369 uint8_t *nvm_buffer, *temp;
2370 uint16_t len;
2372 memset(nvm_sections, 0, sizeof(nvm_sections));
2374 if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2375 return EINVAL;
2377 /* load NVM values from nic */
2378 /* Read From FW NVM */
2379 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2381 nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF,
2382 M_INTWAIT | M_ZERO);
2383 if (!nvm_buffer)
2384 return ENOMEM;
2385 for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2386 /* we override the constness for initial read */
2387 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2388 &len, size_read);
2389 if (ret)
2390 continue;
2391 size_read += len;
2392 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2393 if (!temp) {
2394 ret = ENOMEM;
2395 break;
2397 memcpy(temp, nvm_buffer, len);
2399 nvm_sections[section].data = temp;
2400 nvm_sections[section].length = len;
2402 if (!size_read)
2403 device_printf(sc->sc_dev, "OTP is blank\n");
2404 kfree(nvm_buffer, M_DEVBUF);
2406 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2407 if (!sc->nvm_data)
2408 return EINVAL;
2409 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2410 "nvm version = %x\n", sc->nvm_data->nvm_version);
2412 for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2413 if (nvm_sections[i].data != NULL)
2414 kfree(nvm_sections[i].data, M_DEVBUF);
2417 return 0;
2420 static int
2421 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2422 const struct iwm_fw_desc *section)
2424 struct iwm_dma_info *dma = &sc->fw_dma;
2425 uint8_t *v_addr;
2426 bus_addr_t p_addr;
2427 uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2428 int ret = 0;
2430 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2431 "%s: [%d] uCode section being loaded...\n",
2432 __func__, section_num);
2434 v_addr = dma->vaddr;
2435 p_addr = dma->paddr;
2437 for (offset = 0; offset < section->len; offset += chunk_sz) {
2438 uint32_t copy_size, dst_addr;
2439 int extended_addr = FALSE;
2441 copy_size = MIN(chunk_sz, section->len - offset);
2442 dst_addr = section->offset + offset;
2444 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2445 dst_addr <= IWM_FW_MEM_EXTENDED_END)
2446 extended_addr = TRUE;
2448 if (extended_addr)
2449 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2450 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2452 memcpy(v_addr, (const uint8_t *)section->data + offset,
2453 copy_size);
2454 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2455 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2456 copy_size);
2458 if (extended_addr)
2459 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2460 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2462 if (ret) {
2463 device_printf(sc->sc_dev,
2464 "%s: Could not load the [%d] uCode section\n",
2465 __func__, section_num);
2466 break;
2470 return ret;
2474 * ucode
2476 static int
2477 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2478 bus_addr_t phy_addr, uint32_t byte_cnt)
2480 int ret;
2482 sc->sc_fw_chunk_done = 0;
2484 if (!iwm_nic_lock(sc))
2485 return EBUSY;
2487 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2488 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2490 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2491 dst_addr);
2493 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2494 phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2496 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2497 (iwm_get_dma_hi_addr(phy_addr)
2498 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2500 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2501 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2502 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2503 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2505 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2506 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2507 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2508 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2510 iwm_nic_unlock(sc);
2512 /* wait up to 5s for this segment to load */
2513 ret = 0;
2514 while (!sc->sc_fw_chunk_done) {
2515 #if defined(__DragonFly__)
2516 ret = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", 5 * hz);
2517 #else
2518 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", 5 * hz);
2519 #endif
2520 if (ret)
2521 break;
2524 if (ret != 0) {
2525 device_printf(sc->sc_dev,
2526 "fw chunk addr 0x%x len %d failed to load\n",
2527 dst_addr, byte_cnt);
2528 return ETIMEDOUT;
2531 return 0;
2534 static int
2535 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2536 const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2538 int shift_param;
2539 int i, ret = 0, sec_num = 0x1;
2540 uint32_t val, last_read_idx = 0;
2542 if (cpu == 1) {
2543 shift_param = 0;
2544 *first_ucode_section = 0;
2545 } else {
2546 shift_param = 16;
2547 (*first_ucode_section)++;
2550 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2551 last_read_idx = i;
2554 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2555 * CPU1 to CPU2.
2556 * PAGING_SEPARATOR_SECTION delimiter - separate between
2557 * CPU2 non paged to CPU2 paging sec.
2559 if (!image->fw_sect[i].data ||
2560 image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2561 image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2562 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2563 "Break since Data not valid or Empty section, sec = %d\n",
2565 break;
2567 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2568 if (ret)
2569 return ret;
2571 /* Notify the ucode of the loaded section number and status */
2572 if (iwm_nic_lock(sc)) {
2573 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2574 val = val | (sec_num << shift_param);
2575 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2576 sec_num = (sec_num << 1) | 0x1;
2577 iwm_nic_unlock(sc);
2581 *first_ucode_section = last_read_idx;
2583 iwm_enable_interrupts(sc);
2585 if (iwm_nic_lock(sc)) {
2586 if (cpu == 1)
2587 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2588 else
2589 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2590 iwm_nic_unlock(sc);
2593 return 0;
2596 static int
2597 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2598 const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2600 int shift_param;
2601 int i, ret = 0;
2602 uint32_t last_read_idx = 0;
2604 if (cpu == 1) {
2605 shift_param = 0;
2606 *first_ucode_section = 0;
2607 } else {
2608 shift_param = 16;
2609 (*first_ucode_section)++;
2612 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2613 last_read_idx = i;
2616 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2617 * CPU1 to CPU2.
2618 * PAGING_SEPARATOR_SECTION delimiter - separate between
2619 * CPU2 non paged to CPU2 paging sec.
2621 if (!image->fw_sect[i].data ||
2622 image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2623 image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2624 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2625 "Break since Data not valid or Empty section, sec = %d\n",
2627 break;
2630 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2631 if (ret)
2632 return ret;
2635 *first_ucode_section = last_read_idx;
2637 return 0;
2641 static int
2642 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2643 const struct iwm_fw_sects *image)
2645 int ret = 0;
2646 int first_ucode_section;
2648 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2649 image->is_dual_cpus ? "Dual" : "Single");
2651 /* load to FW the binary non secured sections of CPU1 */
2652 ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2653 if (ret)
2654 return ret;
2656 if (image->is_dual_cpus) {
2657 /* set CPU2 header address */
2658 if (iwm_nic_lock(sc)) {
2659 iwm_write_prph(sc,
2660 IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2661 IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2662 iwm_nic_unlock(sc);
2665 /* load to FW the binary sections of CPU2 */
2666 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2667 &first_ucode_section);
2668 if (ret)
2669 return ret;
2672 iwm_enable_interrupts(sc);
2674 /* release CPU reset */
2675 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2677 return 0;
2681 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2682 const struct iwm_fw_sects *image)
2684 int ret = 0;
2685 int first_ucode_section;
2687 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2688 image->is_dual_cpus ? "Dual" : "Single");
2690 /* configure the ucode to be ready to get the secured image */
2691 /* release CPU reset */
2692 if (iwm_nic_lock(sc)) {
2693 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2694 IWM_RELEASE_CPU_RESET_BIT);
2695 iwm_nic_unlock(sc);
2698 /* load to FW the binary Secured sections of CPU1 */
2699 ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2700 &first_ucode_section);
2701 if (ret)
2702 return ret;
2704 /* load to FW the binary sections of CPU2 */
2705 return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2706 &first_ucode_section);
2709 /* XXX Get rid of this definition */
2710 static inline void
2711 iwm_enable_fw_load_int(struct iwm_softc *sc)
2713 IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2714 sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2715 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2718 /* XXX Add proper rfkill support code */
2719 static int
2720 iwm_start_fw(struct iwm_softc *sc,
2721 const struct iwm_fw_sects *fw)
2723 int ret;
2725 /* This may fail if AMT took ownership of the device */
2726 if (iwm_prepare_card_hw(sc)) {
2727 device_printf(sc->sc_dev,
2728 "%s: Exit HW not ready\n", __func__);
2729 ret = EIO;
2730 goto out;
2733 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2735 iwm_disable_interrupts(sc);
2737 /* make sure rfkill handshake bits are cleared */
2738 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2739 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2740 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2742 /* clear (again), then enable host interrupts */
2743 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2745 ret = iwm_nic_init(sc);
2746 if (ret) {
2747 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2748 goto out;
2752 * Now, we load the firmware and don't want to be interrupted, even
2753 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2754 * FH_TX interrupt which is needed to load the firmware). If the
2755 * RF-Kill switch is toggled, we will find out after having loaded
2756 * the firmware and return the proper value to the caller.
2758 iwm_enable_fw_load_int(sc);
2760 /* really make sure rfkill handshake bits are cleared */
2761 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2762 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2764 /* Load the given image to the HW */
2765 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2766 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2767 else
2768 ret = iwm_pcie_load_given_ucode(sc, fw);
2770 /* XXX re-check RF-Kill state */
2772 out:
2773 return ret;
2776 static int
2777 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2779 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2780 .valid = htole32(valid_tx_ant),
2783 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2784 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2787 static int
2788 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2790 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2791 enum iwm_ucode_type ucode_type = sc->cur_ucode;
2793 /* Set parameters */
2794 phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2795 phy_cfg_cmd.calib_control.event_trigger =
2796 sc->sc_default_calib[ucode_type].event_trigger;
2797 phy_cfg_cmd.calib_control.flow_trigger =
2798 sc->sc_default_calib[ucode_type].flow_trigger;
2800 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2801 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2802 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2803 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2806 static int
2807 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2809 struct iwm_mvm_alive_data *alive_data = data;
2810 struct iwm_mvm_alive_resp_ver1 *palive1;
2811 struct iwm_mvm_alive_resp_ver2 *palive2;
2812 struct iwm_mvm_alive_resp *palive;
2814 if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2815 palive1 = (void *)pkt->data;
2817 sc->support_umac_log = FALSE;
2818 sc->error_event_table =
2819 le32toh(palive1->error_event_table_ptr);
2820 sc->log_event_table =
2821 le32toh(palive1->log_event_table_ptr);
2822 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2824 alive_data->valid = le16toh(palive1->status) ==
2825 IWM_ALIVE_STATUS_OK;
2826 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2827 "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2828 le16toh(palive1->status), palive1->ver_type,
2829 palive1->ver_subtype, palive1->flags);
2830 } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2831 palive2 = (void *)pkt->data;
2832 sc->error_event_table =
2833 le32toh(palive2->error_event_table_ptr);
2834 sc->log_event_table =
2835 le32toh(palive2->log_event_table_ptr);
2836 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2837 sc->umac_error_event_table =
2838 le32toh(palive2->error_info_addr);
2840 alive_data->valid = le16toh(palive2->status) ==
2841 IWM_ALIVE_STATUS_OK;
2842 if (sc->umac_error_event_table)
2843 sc->support_umac_log = TRUE;
2845 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2846 "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2847 le16toh(palive2->status), palive2->ver_type,
2848 palive2->ver_subtype, palive2->flags);
2850 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2851 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2852 palive2->umac_major, palive2->umac_minor);
2853 } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2854 palive = (void *)pkt->data;
2856 sc->error_event_table =
2857 le32toh(palive->error_event_table_ptr);
2858 sc->log_event_table =
2859 le32toh(palive->log_event_table_ptr);
2860 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2861 sc->umac_error_event_table =
2862 le32toh(palive->error_info_addr);
2864 alive_data->valid = le16toh(palive->status) ==
2865 IWM_ALIVE_STATUS_OK;
2866 if (sc->umac_error_event_table)
2867 sc->support_umac_log = TRUE;
2869 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2870 "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2871 le16toh(palive->status), palive->ver_type,
2872 palive->ver_subtype, palive->flags);
2874 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2875 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2876 le32toh(palive->umac_major),
2877 le32toh(palive->umac_minor));
2880 return TRUE;
2883 static int
2884 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2885 struct iwm_rx_packet *pkt, void *data)
2887 struct iwm_phy_db *phy_db = data;
2889 if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2890 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2891 device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2892 __func__, pkt->hdr.code);
2894 return TRUE;
2897 if (iwm_phy_db_set_section(phy_db, pkt)) {
2898 device_printf(sc->sc_dev,
2899 "%s: iwm_phy_db_set_section failed\n", __func__);
2902 return FALSE;
2905 static int
2906 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2907 enum iwm_ucode_type ucode_type)
2909 struct iwm_notification_wait alive_wait;
2910 struct iwm_mvm_alive_data alive_data;
2911 const struct iwm_fw_sects *fw;
2912 enum iwm_ucode_type old_type = sc->cur_ucode;
2913 int error;
2914 static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2916 if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2917 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2918 error);
2919 return error;
2921 fw = &sc->sc_fw.fw_sects[ucode_type];
2922 sc->cur_ucode = ucode_type;
2923 sc->ucode_loaded = FALSE;
2925 memset(&alive_data, 0, sizeof(alive_data));
2926 iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2927 alive_cmd, NELEM(alive_cmd),
2928 iwm_alive_fn, &alive_data);
2930 error = iwm_start_fw(sc, fw);
2931 if (error) {
2932 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2933 sc->cur_ucode = old_type;
2934 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2935 return error;
2939 * Some things may run in the background now, but we
2940 * just wait for the ALIVE notification here.
2942 IWM_UNLOCK(sc);
2943 error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2944 IWM_MVM_UCODE_ALIVE_TIMEOUT);
2945 IWM_LOCK(sc);
2946 if (error) {
2947 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2948 uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2949 if (iwm_nic_lock(sc)) {
2950 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2951 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2952 iwm_nic_unlock(sc);
2954 device_printf(sc->sc_dev,
2955 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2956 a, b);
2958 sc->cur_ucode = old_type;
2959 return error;
2962 if (!alive_data.valid) {
2963 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2964 __func__);
2965 sc->cur_ucode = old_type;
2966 return EIO;
2969 iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2972 * configure and operate fw paging mechanism.
2973 * driver configures the paging flow only once, CPU2 paging image
2974 * included in the IWM_UCODE_INIT image.
2976 if (fw->paging_mem_size) {
2977 error = iwm_save_fw_paging(sc, fw);
2978 if (error) {
2979 device_printf(sc->sc_dev,
2980 "%s: failed to save the FW paging image\n",
2981 __func__);
2982 return error;
2985 error = iwm_send_paging_cmd(sc, fw);
2986 if (error) {
2987 device_printf(sc->sc_dev,
2988 "%s: failed to send the paging cmd\n", __func__);
2989 iwm_free_fw_paging(sc);
2990 return error;
2994 if (!error)
2995 sc->ucode_loaded = TRUE;
2996 return error;
3000 * mvm misc bits
3003 static int
3004 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3006 struct iwm_notification_wait calib_wait;
3007 static const uint16_t init_complete[] = {
3008 IWM_INIT_COMPLETE_NOTIF,
3009 IWM_CALIB_RES_NOTIF_PHY_DB
3011 int ret;
3013 /* do not operate with rfkill switch turned on */
3014 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3015 device_printf(sc->sc_dev,
3016 "radio is disabled by hardware switch\n");
3017 return EPERM;
3020 iwm_init_notification_wait(sc->sc_notif_wait,
3021 &calib_wait,
3022 init_complete,
3023 NELEM(init_complete),
3024 iwm_wait_phy_db_entry,
3025 sc->sc_phy_db);
3027 /* Will also start the device */
3028 ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
3029 if (ret) {
3030 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
3031 ret);
3032 goto error;
3035 if (justnvm) {
3036 /* Read nvm */
3037 ret = iwm_nvm_init(sc);
3038 if (ret) {
3039 device_printf(sc->sc_dev, "failed to read nvm\n");
3040 goto error;
3042 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3043 goto error;
3046 ret = iwm_send_bt_init_conf(sc);
3047 if (ret) {
3048 device_printf(sc->sc_dev,
3049 "failed to send bt coex configuration: %d\n", ret);
3050 goto error;
3053 /* Send TX valid antennas before triggering calibrations */
3054 ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3055 if (ret) {
3056 device_printf(sc->sc_dev,
3057 "failed to send antennas before calibration: %d\n", ret);
3058 goto error;
3062 * Send phy configurations command to init uCode
3063 * to start the 16.0 uCode init image internal calibrations.
3065 ret = iwm_send_phy_cfg_cmd(sc);
3066 if (ret) {
3067 device_printf(sc->sc_dev,
3068 "%s: Failed to run INIT calibrations: %d\n",
3069 __func__, ret);
3070 goto error;
3074 * Nothing to do but wait for the init complete notification
3075 * from the firmware.
3077 IWM_UNLOCK(sc);
3078 ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3079 IWM_MVM_UCODE_CALIB_TIMEOUT);
3080 IWM_LOCK(sc);
3083 goto out;
3085 error:
3086 iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3087 out:
3088 return ret;
3092 * receive side
3095 /* (re)stock rx ring, called at init-time and at runtime */
3096 static int
3097 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3099 struct iwm_rx_ring *ring = &sc->rxq;
3100 struct iwm_rx_data *data = &ring->data[idx];
3101 struct mbuf *m;
3102 bus_dmamap_t dmamap;
3103 bus_dma_segment_t seg;
3104 int nsegs, error;
3106 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3107 if (m == NULL)
3108 return ENOBUFS;
3110 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3111 #if defined(__DragonFly__)
3112 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3113 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3114 #else
3115 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3116 &seg, &nsegs, BUS_DMA_NOWAIT);
3117 #endif
3118 if (error != 0) {
3119 device_printf(sc->sc_dev,
3120 "%s: can't map mbuf, error %d\n", __func__, error);
3121 m_freem(m);
3122 return error;
3125 if (data->m != NULL)
3126 bus_dmamap_unload(ring->data_dmat, data->map);
3128 /* Swap ring->spare_map with data->map */
3129 dmamap = data->map;
3130 data->map = ring->spare_map;
3131 ring->spare_map = dmamap;
3133 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3134 data->m = m;
3136 /* Update RX descriptor. */
3137 KKASSERT((seg.ds_addr & 255) == 0);
3138 ring->desc[idx] = htole32(seg.ds_addr >> 8);
3139 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3140 BUS_DMASYNC_PREWRITE);
3142 return 0;
3146 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3147 * values are reported by the fw as positive values - need to negate
3148 * to obtain their dBM. Account for missing antennas by replacing 0
3149 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3151 static int
3152 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3154 int energy_a, energy_b, energy_c, max_energy;
3155 uint32_t val;
3157 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3158 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3159 IWM_RX_INFO_ENERGY_ANT_A_POS;
3160 energy_a = energy_a ? -energy_a : -256;
3161 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3162 IWM_RX_INFO_ENERGY_ANT_B_POS;
3163 energy_b = energy_b ? -energy_b : -256;
3164 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3165 IWM_RX_INFO_ENERGY_ANT_C_POS;
3166 energy_c = energy_c ? -energy_c : -256;
3167 max_energy = MAX(energy_a, energy_b);
3168 max_energy = MAX(max_energy, energy_c);
3170 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3171 "energy In A %d B %d C %d , and max %d\n",
3172 energy_a, energy_b, energy_c, max_energy);
3174 return max_energy;
3177 static void
3178 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3180 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3182 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3184 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3188 * Retrieve the average noise (in dBm) among receivers.
3190 static int
3191 iwm_get_noise(struct iwm_softc *sc,
3192 const struct iwm_mvm_statistics_rx_non_phy *stats)
3194 int i, total, nbant, noise;
3196 total = nbant = noise = 0;
3197 for (i = 0; i < 3; i++) {
3198 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3199 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3200 __func__, i, noise);
3202 if (noise) {
3203 total += noise;
3204 nbant++;
3208 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3209 __func__, nbant, total);
3210 #if 0
3211 /* There should be at least one antenna but check anyway. */
3212 return (nbant == 0) ? -127 : (total / nbant) - 107;
3213 #else
3214 /* For now, just hard-code it to -96 to be safe */
3215 return (-96);
3216 #endif
3219 static void
3220 iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3222 struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3224 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3225 sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3229 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3231 * Handles the actual data of the Rx packet from the fw
3233 static boolean_t
3234 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3235 boolean_t stolen)
3237 struct ieee80211com *ic = &sc->sc_ic;
3238 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3239 struct ieee80211_frame *wh;
3240 struct ieee80211_node *ni;
3241 struct ieee80211_rx_stats rxs;
3242 struct iwm_rx_phy_info *phy_info;
3243 struct iwm_rx_mpdu_res_start *rx_res;
3244 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3245 uint32_t len;
3246 uint32_t rx_pkt_status;
3247 int rssi;
3249 phy_info = &sc->sc_last_phy_info;
3250 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3251 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3252 len = le16toh(rx_res->byte_count);
3253 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3255 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3256 device_printf(sc->sc_dev,
3257 "dsp size out of range [0,20]: %d\n",
3258 phy_info->cfg_phy_cnt);
3259 return FALSE;
3262 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3263 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3264 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3265 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3266 return FALSE; /* drop */
3269 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3270 /* Note: RSSI is absolute (ie a -ve value) */
3271 if (rssi < IWM_MIN_DBM)
3272 rssi = IWM_MIN_DBM;
3273 else if (rssi > IWM_MAX_DBM)
3274 rssi = IWM_MAX_DBM;
3276 /* Map it to relative value */
3277 rssi = rssi - sc->sc_noise;
3279 /* replenish ring for the buffer we're going to feed to the sharks */
3280 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3281 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3282 __func__);
3283 return FALSE;
3286 m->m_data = pkt->data + sizeof(*rx_res);
3287 m->m_pkthdr.len = m->m_len = len;
3289 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3290 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3292 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3294 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3295 "%s: phy_info: channel=%d, flags=0x%08x\n",
3296 __func__,
3297 le16toh(phy_info->channel),
3298 le16toh(phy_info->phy_flags));
3301 * Populate an RX state struct with the provided information.
3303 bzero(&rxs, sizeof(rxs));
3304 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3305 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3306 rxs.c_ieee = le16toh(phy_info->channel);
3307 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3308 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3309 } else {
3310 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3312 /* rssi is in 1/2db units */
3313 rxs.rssi = rssi * 2;
3314 rxs.nf = sc->sc_noise;
3316 if (ieee80211_radiotap_active_vap(vap)) {
3317 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3319 tap->wr_flags = 0;
3320 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3321 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3322 tap->wr_chan_freq = htole16(rxs.c_freq);
3323 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3324 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3325 tap->wr_dbm_antsignal = (int8_t)rssi;
3326 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3327 tap->wr_tsft = phy_info->system_timestamp;
3328 switch (phy_info->rate) {
3329 /* CCK rates. */
3330 case 10: tap->wr_rate = 2; break;
3331 case 20: tap->wr_rate = 4; break;
3332 case 55: tap->wr_rate = 11; break;
3333 case 110: tap->wr_rate = 22; break;
3334 /* OFDM rates. */
3335 case 0xd: tap->wr_rate = 12; break;
3336 case 0xf: tap->wr_rate = 18; break;
3337 case 0x5: tap->wr_rate = 24; break;
3338 case 0x7: tap->wr_rate = 36; break;
3339 case 0x9: tap->wr_rate = 48; break;
3340 case 0xb: tap->wr_rate = 72; break;
3341 case 0x1: tap->wr_rate = 96; break;
3342 case 0x3: tap->wr_rate = 108; break;
3343 /* Unknown rate: should not happen. */
3344 default: tap->wr_rate = 0;
3348 IWM_UNLOCK(sc);
3349 if (ni != NULL) {
3350 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3351 ieee80211_input_mimo(ni, m, &rxs);
3352 ieee80211_free_node(ni);
3353 } else {
3354 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3355 ieee80211_input_mimo_all(ic, m, &rxs);
3357 IWM_LOCK(sc);
3359 return TRUE;
3362 static int
3363 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3364 struct iwm_node *in)
3366 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3367 struct ieee80211_node *ni = &in->in_ni;
3368 struct ieee80211vap *vap = ni->ni_vap;
3369 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3370 int failack = tx_resp->failure_frame;
3371 int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3372 boolean_t rate_matched;
3373 uint8_t tx_resp_rate;
3374 int ret;
3376 KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3378 /* Update rate control statistics. */
3379 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3380 __func__,
3381 (int) le16toh(tx_resp->status.status),
3382 (int) le16toh(tx_resp->status.sequence),
3383 tx_resp->frame_count,
3384 tx_resp->bt_kill_count,
3385 tx_resp->failure_rts,
3386 tx_resp->failure_frame,
3387 le32toh(tx_resp->initial_rate),
3388 (int) le16toh(tx_resp->wireless_media_time));
3390 tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3392 /* For rate control, ignore frames sent at different initial rate */
3393 rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3395 if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3396 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3397 "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3398 "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3401 if (status != IWM_TX_STATUS_SUCCESS &&
3402 status != IWM_TX_STATUS_DIRECT_DONE) {
3403 if (rate_matched) {
3404 ieee80211_ratectl_tx_complete(vap, ni,
3405 IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3407 ret = 1;
3408 } else {
3409 if (rate_matched) {
3410 ieee80211_ratectl_tx_complete(vap, ni,
3411 IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3413 ret = 0;
3416 if (rate_matched) {
3417 int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3418 new_rate = vap->iv_bss->ni_txrate;
3419 if (new_rate != 0 && new_rate != cur_rate) {
3420 struct iwm_node *in = IWM_NODE(vap->iv_bss);
3421 iwm_setrates(sc, in, rix);
3422 iwm_mvm_send_lq_cmd(sc, &in->in_lq, FALSE);
3426 return ret;
3429 static void
3430 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3432 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3433 int idx = cmd_hdr->idx;
3434 int qid = cmd_hdr->qid;
3435 struct iwm_tx_ring *ring = &sc->txq[qid];
3436 struct iwm_tx_data *txd = &ring->data[idx];
3437 struct iwm_node *in = txd->in;
3438 struct mbuf *m = txd->m;
3439 int status;
3441 KASSERT(txd->done == 0, ("txd not done"));
3442 KASSERT(txd->in != NULL, ("txd without node"));
3443 KASSERT(txd->m != NULL, ("txd without mbuf"));
3445 sc->sc_tx_timer = 0;
3447 status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3449 /* Unmap and free mbuf. */
3450 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3451 bus_dmamap_unload(ring->data_dmat, txd->map);
3453 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3454 "free txd %p, in %p\n", txd, txd->in);
3455 txd->done = 1;
3456 txd->m = NULL;
3457 txd->in = NULL;
3459 ieee80211_tx_complete(&in->in_ni, m, status);
3461 if (--ring->queued < IWM_TX_RING_LOMARK) {
3462 sc->qfullmsk &= ~(1 << ring->qid);
3463 if (sc->qfullmsk == 0) {
3464 iwm_start(sc);
3470 * transmit side
3474 * Process a "command done" firmware notification. This is where we wakeup
3475 * processes waiting for a synchronous command completion.
3476 * from if_iwn
3478 static void
3479 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3481 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3482 struct iwm_tx_data *data;
3484 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3485 return; /* Not a command ack. */
3488 data = &ring->data[pkt->hdr.idx];
3490 /* If the command was mapped in an mbuf, free it. */
3491 if (data->m != NULL) {
3492 bus_dmamap_sync(ring->data_dmat, data->map,
3493 BUS_DMASYNC_POSTWRITE);
3494 bus_dmamap_unload(ring->data_dmat, data->map);
3495 m_freem(data->m);
3496 data->m = NULL;
3498 wakeup(&ring->desc[pkt->hdr.idx]);
3500 if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3501 device_printf(sc->sc_dev,
3502 "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3503 __func__, pkt->hdr.idx, ring->queued, ring->cur);
3504 /* XXX call iwm_force_nmi() */
3507 KKASSERT(ring->queued > 0);
3508 ring->queued--;
3509 if (ring->queued == 0)
3510 iwm_pcie_clear_cmd_in_flight(sc);
3513 #if 0
3515 * necessary only for block ack mode
3517 void
3518 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3519 uint16_t len)
3521 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3522 uint16_t w_val;
3524 scd_bc_tbl = sc->sched_dma.vaddr;
3526 len += 8; /* magic numbers came naturally from paris */
3527 len = roundup(len, 4) / 4;
3529 w_val = htole16(sta_id << 12 | len);
3531 /* Update TX scheduler. */
3532 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3533 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3534 BUS_DMASYNC_PREWRITE);
3536 /* I really wonder what this is ?!? */
3537 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3538 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3539 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3540 BUS_DMASYNC_PREWRITE);
3543 #endif
3546 * Fill in the rate related information for a transmit command.
3548 static uint8_t
3549 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3550 struct mbuf *m, struct iwm_tx_cmd *tx)
3552 struct ieee80211com *ic = &sc->sc_ic;
3553 struct ieee80211_node *ni = &in->in_ni;
3554 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
3555 const struct ieee80211_txparam *tp = ni->ni_txparms;
3556 const struct iwm_rate *rinfo;
3557 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3558 int ridx, rate_flags;
3560 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3561 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3563 if (type == IEEE80211_FC0_TYPE_MGT) {
3564 ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3565 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3566 "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3567 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3568 ridx = iwm_rate2ridx(sc, tp->mcastrate);
3569 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3570 "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3571 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3572 ridx = iwm_rate2ridx(sc, tp->ucastrate);
3573 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3574 "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3575 } else if (m->m_flags & M_EAPOL) {
3576 ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3577 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3578 "%s: EAPOL (%d)\n", __func__, tp->mgmtrate);
3579 } else if (type == IEEE80211_FC0_TYPE_DATA) {
3580 /* This is the index into the programmed table */
3581 tx->initial_rate_index = 0;
3582 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3583 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA (%d)\n",
3584 __func__, ni->ni_txrate);
3585 return ni->ni_txrate;
3586 } else {
3587 ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3588 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3589 "%s: DEFAULT (%d)\n", __func__, tp->mgmtrate);
3593 * Sanity check ridx, and provide fallback. If the rate lookup
3594 * ever fails, iwm_rate2ridx() will already print an error message.
3596 if (ridx < 0 || ridx > IWM_RIDX_MAX) {
3597 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3599 * XXX this assumes the mode is either 11a or not 11a;
3600 * definitely won't work for 11n.
3602 ridx = IWM_RIDX_OFDM;
3603 } else {
3604 ridx = IWM_RIDX_CCK;
3608 rinfo = &iwm_rates[ridx];
3610 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3611 "%s: frame type=%d, ridx=%d, rate=%d, CCK=%d\n",
3612 __func__, type, ridx, rinfo->rate, !! (IWM_RIDX_IS_CCK(ridx)));
3614 /* XXX TODO: hard-coded TX antenna? */
3615 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3616 if (IWM_RIDX_IS_CCK(ridx))
3617 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3618 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3620 return rinfo->rate;
3623 #define TB0_SIZE 16
3624 static int
3625 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3627 struct ieee80211com *ic = &sc->sc_ic;
3628 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3629 struct iwm_node *in = IWM_NODE(ni);
3630 struct iwm_tx_ring *ring;
3631 struct iwm_tx_data *data;
3632 struct iwm_tfd *desc;
3633 struct iwm_device_cmd *cmd;
3634 struct iwm_tx_cmd *tx;
3635 struct ieee80211_frame *wh;
3636 struct ieee80211_key *k = NULL;
3637 #if !defined(__DragonFly__)
3638 struct mbuf *m1;
3639 #endif
3640 uint32_t flags;
3641 u_int hdrlen;
3642 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3643 int nsegs;
3644 uint8_t rate, tid, type;
3645 int i, totlen, error, pad;
3647 wh = mtod(m, struct ieee80211_frame *);
3648 hdrlen = ieee80211_anyhdrsize(wh);
3649 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3650 tid = 0;
3651 ring = &sc->txq[ac];
3652 desc = &ring->desc[ring->cur];
3653 memset(desc, 0, sizeof(*desc));
3654 data = &ring->data[ring->cur];
3656 /* Fill out iwm_tx_cmd to send to the firmware */
3657 cmd = &ring->cmd[ring->cur];
3658 cmd->hdr.code = IWM_TX_CMD;
3659 cmd->hdr.flags = 0;
3660 cmd->hdr.qid = ring->qid;
3661 cmd->hdr.idx = ring->cur;
3663 tx = (void *)cmd->data;
3664 memset(tx, 0, sizeof(*tx));
3666 rate = iwm_tx_fill_cmd(sc, in, m, tx);
3668 /* Encrypt the frame if need be. */
3669 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3670 /* Retrieve key for TX && do software encryption. */
3671 k = ieee80211_crypto_encap(ni, m);
3672 if (k == NULL) {
3673 m_freem(m);
3674 return (ENOBUFS);
3676 /* 802.11 header may have moved. */
3677 wh = mtod(m, struct ieee80211_frame *);
3680 if (ieee80211_radiotap_active_vap(vap)) {
3681 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3683 tap->wt_flags = 0;
3684 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3685 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3686 tap->wt_rate = rate;
3687 if (k != NULL)
3688 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3689 ieee80211_radiotap_tx(vap, m);
3693 totlen = m->m_pkthdr.len;
3695 flags = 0;
3696 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3697 flags |= IWM_TX_CMD_FLG_ACK;
3700 if (type == IEEE80211_FC0_TYPE_DATA
3701 && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3702 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3703 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3706 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3707 type != IEEE80211_FC0_TYPE_DATA)
3708 tx->sta_id = sc->sc_aux_sta.sta_id;
3709 else
3710 tx->sta_id = IWM_STATION_ID;
3712 if (type == IEEE80211_FC0_TYPE_MGT) {
3713 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3715 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3716 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3717 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3718 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3719 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3720 } else {
3721 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3723 } else {
3724 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3727 if (hdrlen & 3) {
3728 /* First segment length must be a multiple of 4. */
3729 flags |= IWM_TX_CMD_FLG_MH_PAD;
3730 pad = 4 - (hdrlen & 3);
3731 } else
3732 pad = 0;
3734 tx->driver_txop = 0;
3735 tx->next_frame_len = 0;
3737 tx->len = htole16(totlen);
3738 tx->tid_tspec = tid;
3739 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3741 /* Set physical address of "scratch area". */
3742 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3743 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3745 /* Copy 802.11 header in TX command. */
3746 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3748 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3750 tx->sec_ctl = 0;
3751 tx->tx_flags |= htole32(flags);
3753 /* Trim 802.11 header. */
3754 m_adj(m, hdrlen);
3755 #if defined(__DragonFly__)
3756 error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3757 segs, IWM_MAX_SCATTER - 2,
3758 &nsegs, BUS_DMA_NOWAIT);
3759 #else
3760 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3761 segs, &nsegs, BUS_DMA_NOWAIT);
3762 #endif
3763 if (error != 0) {
3764 #if defined(__DragonFly__)
3765 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3766 error);
3767 m_freem(m);
3768 return error;
3769 #else
3770 if (error != EFBIG) {
3771 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3772 error);
3773 m_freem(m);
3774 return error;
3776 /* Too many DMA segments, linearize mbuf. */
3777 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3778 if (m1 == NULL) {
3779 device_printf(sc->sc_dev,
3780 "%s: could not defrag mbuf\n", __func__);
3781 m_freem(m);
3782 return (ENOBUFS);
3784 m = m1;
3786 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3787 segs, &nsegs, BUS_DMA_NOWAIT);
3788 if (error != 0) {
3789 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3790 error);
3791 m_freem(m);
3792 return error;
3794 #endif
3796 data->m = m;
3797 data->in = in;
3798 data->done = 0;
3800 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3801 "sending txd %p, in %p\n", data, data->in);
3802 KASSERT(data->in != NULL, ("node is NULL"));
3804 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3805 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3806 ring->qid, ring->cur, totlen, nsegs,
3807 le32toh(tx->tx_flags),
3808 le32toh(tx->rate_n_flags),
3809 tx->initial_rate_index
3812 /* Fill TX descriptor. */
3813 desc->num_tbs = 2 + nsegs;
3815 desc->tbs[0].lo = htole32(data->cmd_paddr);
3816 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3817 (TB0_SIZE << 4);
3818 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3819 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3820 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3821 + hdrlen + pad - TB0_SIZE) << 4);
3823 /* Other DMA segments are for data payload. */
3824 for (i = 0; i < nsegs; i++) {
3825 seg = &segs[i];
3826 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3827 desc->tbs[i+2].hi_n_len = \
3828 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3829 | ((seg->ds_len) << 4);
3832 bus_dmamap_sync(ring->data_dmat, data->map,
3833 BUS_DMASYNC_PREWRITE);
3834 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3835 BUS_DMASYNC_PREWRITE);
3836 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3837 BUS_DMASYNC_PREWRITE);
3839 #if 0
3840 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3841 #endif
3843 /* Kick TX ring. */
3844 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3845 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3847 /* Mark TX ring as full if we reach a certain threshold. */
3848 if (++ring->queued > IWM_TX_RING_HIMARK) {
3849 sc->qfullmsk |= 1 << ring->qid;
3852 return 0;
3855 static int
3856 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3857 const struct ieee80211_bpf_params *params)
3859 struct ieee80211com *ic = ni->ni_ic;
3860 struct iwm_softc *sc = ic->ic_softc;
3861 int error = 0;
3863 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3864 "->%s begin\n", __func__);
3866 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3867 m_freem(m);
3868 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3869 "<-%s not RUNNING\n", __func__);
3870 return (ENETDOWN);
3873 IWM_LOCK(sc);
3874 /* XXX fix this */
3875 if (params == NULL) {
3876 error = iwm_tx(sc, m, ni, 0);
3877 } else {
3878 error = iwm_tx(sc, m, ni, 0);
3880 sc->sc_tx_timer = 5;
3881 IWM_UNLOCK(sc);
3883 return (error);
3887 * mvm/tx.c
3891 * Note that there are transports that buffer frames before they reach
3892 * the firmware. This means that after flush_tx_path is called, the
3893 * queue might not be empty. The race-free way to handle this is to:
3894 * 1) set the station as draining
3895 * 2) flush the Tx path
3896 * 3) wait for the transport queues to be empty
3899 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3901 int ret;
3902 struct iwm_tx_path_flush_cmd flush_cmd = {
3903 .queues_ctl = htole32(tfd_msk),
3904 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3907 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3908 sizeof(flush_cmd), &flush_cmd);
3909 if (ret)
3910 device_printf(sc->sc_dev,
3911 "Flushing tx queue failed: %d\n", ret);
3912 return ret;
3915 static int
3916 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3918 struct iwm_time_quota_cmd cmd;
3919 int i, idx, ret, num_active_macs, quota, quota_rem;
3920 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3921 int n_ifs[IWM_MAX_BINDINGS] = {0, };
3922 uint16_t id;
3924 memset(&cmd, 0, sizeof(cmd));
3926 /* currently, PHY ID == binding ID */
3927 if (ivp) {
3928 id = ivp->phy_ctxt->id;
3929 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3930 colors[id] = ivp->phy_ctxt->color;
3932 if (1)
3933 n_ifs[id] = 1;
3937 * The FW's scheduling session consists of
3938 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3939 * equally between all the bindings that require quota
3941 num_active_macs = 0;
3942 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3943 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3944 num_active_macs += n_ifs[i];
3947 quota = 0;
3948 quota_rem = 0;
3949 if (num_active_macs) {
3950 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3951 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3954 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3955 if (colors[i] < 0)
3956 continue;
3958 cmd.quotas[idx].id_and_color =
3959 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3961 if (n_ifs[i] <= 0) {
3962 cmd.quotas[idx].quota = htole32(0);
3963 cmd.quotas[idx].max_duration = htole32(0);
3964 } else {
3965 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3966 cmd.quotas[idx].max_duration = htole32(0);
3968 idx++;
3971 /* Give the remainder of the session to the first binding */
3972 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3974 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3975 sizeof(cmd), &cmd);
3976 if (ret)
3977 device_printf(sc->sc_dev,
3978 "%s: Failed to send quota: %d\n", __func__, ret);
3979 return ret;
3983 * ieee80211 routines
3987 * Change to AUTH state in 80211 state machine. Roughly matches what
3988 * Linux does in bss_info_changed().
3990 static int
3991 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3993 struct ieee80211_node *ni;
3994 struct iwm_node *in;
3995 struct iwm_vap *iv = IWM_VAP(vap);
3996 uint32_t duration;
3997 int error;
4000 * XXX i have a feeling that the vap node is being
4001 * freed from underneath us. Grr.
4003 ni = ieee80211_ref_node(vap->iv_bss);
4004 in = IWM_NODE(ni);
4005 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4006 "%s: called; vap=%p, bss ni=%p\n",
4007 __func__,
4008 vap,
4009 ni);
4011 in->in_assoc = 0;
4014 * Firmware bug - it'll crash if the beacon interval is less
4015 * than 16. We can't avoid connecting at all, so refuse the
4016 * station state change, this will cause net80211 to abandon
4017 * attempts to connect to this AP, and eventually wpa_s will
4018 * blacklist the AP...
4020 if (ni->ni_intval < 16) {
4021 device_printf(sc->sc_dev,
4022 "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4023 ether_sprintf(ni->ni_bssid), ni->ni_intval);
4024 error = EINVAL;
4025 goto out;
4028 error = iwm_allow_mcast(vap, sc);
4029 if (error) {
4030 device_printf(sc->sc_dev,
4031 "%s: failed to set multicast\n", __func__);
4032 goto out;
4036 * This is where it deviates from what Linux does.
4038 * Linux iwlwifi doesn't reset the nic each time, nor does it
4039 * call ctxt_add() here. Instead, it adds it during vap creation,
4040 * and always does a mac_ctx_changed().
4042 * The openbsd port doesn't attempt to do that - it reset things
4043 * at odd states and does the add here.
4045 * So, until the state handling is fixed (ie, we never reset
4046 * the NIC except for a firmware failure, which should drag
4047 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4048 * contexts that are required), let's do a dirty hack here.
4050 if (iv->is_uploaded) {
4051 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4052 device_printf(sc->sc_dev,
4053 "%s: failed to update MAC\n", __func__);
4054 goto out;
4056 } else {
4057 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4058 device_printf(sc->sc_dev,
4059 "%s: failed to add MAC\n", __func__);
4060 goto out;
4064 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4065 in->in_ni.ni_chan, 1, 1)) != 0) {
4066 device_printf(sc->sc_dev,
4067 "%s: failed update phy ctxt\n", __func__);
4068 goto out;
4070 iv->phy_ctxt = &sc->sc_phyctxt[0];
4072 if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4073 device_printf(sc->sc_dev,
4074 "%s: binding update cmd\n", __func__);
4075 goto out;
4078 * Authentication becomes unreliable when powersaving is left enabled
4079 * here. Powersaving will be activated again when association has
4080 * finished or is aborted.
4082 iv->ps_disabled = TRUE;
4083 error = iwm_mvm_power_update_mac(sc);
4084 iv->ps_disabled = FALSE;
4085 if (error != 0) {
4086 device_printf(sc->sc_dev,
4087 "%s: failed to update power management\n",
4088 __func__);
4089 goto out;
4091 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4092 device_printf(sc->sc_dev,
4093 "%s: failed to add sta\n", __func__);
4094 goto out;
4098 * Prevent the FW from wandering off channel during association
4099 * by "protecting" the session with a time event.
4101 /* XXX duration is in units of TU, not MS */
4102 duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4103 iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4104 DELAY(100);
4106 error = 0;
4107 out:
4108 ieee80211_free_node(ni);
4109 return (error);
4112 static int
4113 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4115 uint32_t tfd_msk;
4118 * Ok, so *technically* the proper set of calls for going
4119 * from RUN back to SCAN is:
4121 * iwm_mvm_power_mac_disable(sc, in);
4122 * iwm_mvm_mac_ctxt_changed(sc, vap);
4123 * iwm_mvm_rm_sta(sc, in);
4124 * iwm_mvm_update_quotas(sc, NULL);
4125 * iwm_mvm_mac_ctxt_changed(sc, in);
4126 * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4127 * iwm_mvm_mac_ctxt_remove(sc, in);
4129 * However, that freezes the device not matter which permutations
4130 * and modifications are attempted. Obviously, this driver is missing
4131 * something since it works in the Linux driver, but figuring out what
4132 * is missing is a little more complicated. Now, since we're going
4133 * back to nothing anyway, we'll just do a complete device reset.
4134 * Up your's, device!
4137 * Just using 0xf for the queues mask is fine as long as we only
4138 * get here from RUN state.
4140 tfd_msk = 0xf;
4141 iwm_xmit_queue_drain(sc);
4142 iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4144 * We seem to get away with just synchronously sending the
4145 * IWM_TXPATH_FLUSH command.
4147 // iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4148 iwm_stop_device(sc);
4149 iwm_init_hw(sc);
4150 if (in)
4151 in->in_assoc = 0;
4152 return 0;
4154 #if 0
4155 int error;
4157 iwm_mvm_power_mac_disable(sc, in);
4159 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4160 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4161 return error;
4164 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4165 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4166 return error;
4168 error = iwm_mvm_rm_sta(sc, in);
4169 in->in_assoc = 0;
4170 iwm_mvm_update_quotas(sc, NULL);
4171 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4172 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4173 return error;
4175 iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4177 iwm_mvm_mac_ctxt_remove(sc, in);
4179 return error;
4180 #endif
4183 static struct ieee80211_node *
4184 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4186 return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4187 M_INTWAIT | M_ZERO);
4190 static uint8_t
4191 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4193 uint8_t plcp = rate_n_flags & 0xff;
4194 int i;
4196 for (i = 0; i <= IWM_RIDX_MAX; i++) {
4197 if (iwm_rates[i].plcp == plcp)
4198 return iwm_rates[i].rate;
4200 return 0;
4203 uint8_t
4204 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4206 int i;
4207 uint8_t rval;
4209 for (i = 0; i < rs->rs_nrates; i++) {
4210 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4211 if (rval == iwm_rates[ridx].rate)
4212 return rs->rs_rates[i];
4215 return 0;
4218 static int
4219 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4221 int i;
4223 for (i = 0; i <= IWM_RIDX_MAX; i++) {
4224 if (iwm_rates[i].rate == rate)
4225 return i;
4228 device_printf(sc->sc_dev,
4229 "%s: WARNING: device rate for %u not found!\n",
4230 __func__, rate);
4232 return -1;
4235 static void
4236 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4238 struct ieee80211_node *ni = &in->in_ni;
4239 struct iwm_lq_cmd *lq = &in->in_lq;
4240 struct ieee80211_rateset *rs = &ni->ni_rates;
4241 int nrates = rs->rs_nrates;
4242 int i, ridx, tab = 0;
4243 int txant = 0;
4245 KKASSERT(rix >= 0 && rix < nrates);
4247 if (nrates > nitems(lq->rs_table)) {
4248 device_printf(sc->sc_dev,
4249 "%s: node supports %d rates, driver handles "
4250 "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4251 return;
4253 if (nrates == 0) {
4254 device_printf(sc->sc_dev,
4255 "%s: node supports 0 rates, odd!\n", __func__);
4256 return;
4258 nrates = imin(rix + 1, nrates);
4260 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4261 "%s: nrates=%d\n", __func__, nrates);
4263 /* then construct a lq_cmd based on those */
4264 memset(lq, 0, sizeof(*lq));
4265 lq->sta_id = IWM_STATION_ID;
4267 /* For HT, always enable RTS/CTS to avoid excessive retries. */
4268 if (ni->ni_flags & IEEE80211_NODE_HT)
4269 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4272 * are these used? (we don't do SISO or MIMO)
4273 * need to set them to non-zero, though, or we get an error.
4275 lq->single_stream_ant_msk = 1;
4276 lq->dual_stream_ant_msk = 1;
4279 * Build the actual rate selection table.
4280 * The lowest bits are the rates. Additionally,
4281 * CCK needs bit 9 to be set. The rest of the bits
4282 * we add to the table select the tx antenna
4283 * Note that we add the rates in the highest rate first
4284 * (opposite of ni_rates).
4286 for (i = 0; i < nrates; i++) {
4287 int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4288 int nextant;
4290 /* Map 802.11 rate to HW rate index. */
4291 ridx = iwm_rate2ridx(sc, rate);
4292 if (ridx == -1)
4293 continue;
4295 if (txant == 0)
4296 txant = iwm_mvm_get_valid_tx_ant(sc);
4297 nextant = 1<<(ffs(txant)-1);
4298 txant &= ~nextant;
4300 tab = iwm_rates[ridx].plcp;
4301 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4302 if (IWM_RIDX_IS_CCK(ridx))
4303 tab |= IWM_RATE_MCS_CCK_MSK;
4304 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4305 "station rate i=%d, rate=%d, hw=%x\n",
4306 i, iwm_rates[ridx].rate, tab);
4307 lq->rs_table[i] = htole32(tab);
4309 /* then fill the rest with the lowest possible rate */
4310 for (i = nrates; i < nitems(lq->rs_table); i++) {
4311 KASSERT(tab != 0, ("invalid tab"));
4312 lq->rs_table[i] = htole32(tab);
4316 static int
4317 iwm_media_change(struct ifnet *ifp)
4319 struct ieee80211vap *vap = ifp->if_softc;
4320 struct ieee80211com *ic = vap->iv_ic;
4321 struct iwm_softc *sc = ic->ic_softc;
4322 int error;
4324 error = ieee80211_media_change(ifp);
4325 if (error != ENETRESET)
4326 return error;
4328 IWM_LOCK(sc);
4329 if (ic->ic_nrunning > 0) {
4330 iwm_stop(sc);
4331 iwm_init(sc);
4333 IWM_UNLOCK(sc);
4334 return error;
4338 static int
4339 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4341 struct iwm_vap *ivp = IWM_VAP(vap);
4342 struct ieee80211com *ic = vap->iv_ic;
4343 struct iwm_softc *sc = ic->ic_softc;
4344 struct iwm_node *in;
4345 int error;
4347 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4348 "switching state %s -> %s\n",
4349 ieee80211_state_name[vap->iv_state],
4350 ieee80211_state_name[nstate]);
4351 IEEE80211_UNLOCK(ic);
4352 IWM_LOCK(sc);
4354 if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4355 iwm_led_blink_stop(sc);
4357 /* disable beacon filtering if we're hopping out of RUN */
4358 if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4359 iwm_mvm_disable_beacon_filter(sc);
4361 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4362 in->in_assoc = 0;
4364 if (nstate == IEEE80211_S_INIT) {
4365 IWM_UNLOCK(sc);
4366 IEEE80211_LOCK(ic);
4367 error = ivp->iv_newstate(vap, nstate, arg);
4368 IEEE80211_UNLOCK(ic);
4369 IWM_LOCK(sc);
4370 iwm_release(sc, NULL);
4371 IWM_UNLOCK(sc);
4372 IEEE80211_LOCK(ic);
4373 return error;
4377 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4378 * above then the card will be completely reinitialized,
4379 * so the driver must do everything necessary to bring the card
4380 * from INIT to SCAN.
4382 * Additionally, upon receiving deauth frame from AP,
4383 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4384 * state. This will also fail with this driver, so bring the FSM
4385 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4387 * XXX TODO: fix this for FreeBSD!
4389 if (nstate == IEEE80211_S_SCAN ||
4390 nstate == IEEE80211_S_AUTH ||
4391 nstate == IEEE80211_S_ASSOC) {
4392 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4393 "Force transition to INIT; MGT=%d\n", arg);
4394 IWM_UNLOCK(sc);
4395 IEEE80211_LOCK(ic);
4396 /* Always pass arg as -1 since we can't Tx right now. */
4398 * XXX arg is just ignored anyway when transitioning
4399 * to IEEE80211_S_INIT.
4401 vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4402 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4403 "Going INIT->SCAN\n");
4404 nstate = IEEE80211_S_SCAN;
4405 IEEE80211_UNLOCK(ic);
4406 IWM_LOCK(sc);
4410 switch (nstate) {
4411 case IEEE80211_S_INIT:
4412 case IEEE80211_S_SCAN:
4413 if (vap->iv_state == IEEE80211_S_AUTH ||
4414 vap->iv_state == IEEE80211_S_ASSOC) {
4415 int myerr;
4416 IWM_UNLOCK(sc);
4417 IEEE80211_LOCK(ic);
4418 myerr = ivp->iv_newstate(vap, nstate, arg);
4419 IEEE80211_UNLOCK(ic);
4420 IWM_LOCK(sc);
4421 error = iwm_mvm_rm_sta(sc, vap, FALSE);
4422 if (error) {
4423 device_printf(sc->sc_dev,
4424 "%s: Failed to remove station: %d\n",
4425 __func__, error);
4427 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4428 if (error) {
4429 device_printf(sc->sc_dev,
4430 "%s: Failed to change mac context: %d\n",
4431 __func__, error);
4433 error = iwm_mvm_binding_remove_vif(sc, ivp);
4434 if (error) {
4435 device_printf(sc->sc_dev,
4436 "%s: Failed to remove channel ctx: %d\n",
4437 __func__, error);
4439 ivp->phy_ctxt = NULL;
4440 error = iwm_mvm_power_update_mac(sc);
4441 if (error != 0) {
4442 device_printf(sc->sc_dev,
4443 "%s: failed to update power management\n",
4444 __func__);
4446 IWM_UNLOCK(sc);
4447 IEEE80211_LOCK(ic);
4448 return myerr;
4450 break;
4452 case IEEE80211_S_AUTH:
4453 if ((error = iwm_auth(vap, sc)) != 0) {
4454 device_printf(sc->sc_dev,
4455 "%s: could not move to auth state: %d\n",
4456 __func__, error);
4458 break;
4460 case IEEE80211_S_ASSOC:
4462 * EBS may be disabled due to previous failures reported by FW.
4463 * Reset EBS status here assuming environment has been changed.
4465 sc->last_ebs_successful = TRUE;
4466 break;
4468 case IEEE80211_S_RUN:
4469 in = IWM_NODE(vap->iv_bss);
4470 /* Update the association state, now we have it all */
4471 /* (eg associd comes in at this point */
4472 error = iwm_mvm_update_sta(sc, in);
4473 if (error != 0) {
4474 device_printf(sc->sc_dev,
4475 "%s: failed to update STA\n", __func__);
4476 IWM_UNLOCK(sc);
4477 IEEE80211_LOCK(ic);
4478 return error;
4480 in->in_assoc = 1;
4481 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4482 if (error != 0) {
4483 device_printf(sc->sc_dev,
4484 "%s: failed to update MAC: %d\n", __func__, error);
4487 iwm_mvm_sf_update(sc, vap, FALSE);
4488 iwm_mvm_enable_beacon_filter(sc, ivp);
4489 iwm_mvm_power_update_mac(sc);
4490 iwm_mvm_update_quotas(sc, ivp);
4491 int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4492 iwm_setrates(sc, in, rix);
4494 if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4495 device_printf(sc->sc_dev,
4496 "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4499 iwm_mvm_led_enable(sc);
4500 break;
4502 default:
4503 break;
4505 IWM_UNLOCK(sc);
4506 IEEE80211_LOCK(ic);
4508 return (ivp->iv_newstate(vap, nstate, arg));
4511 void
4512 iwm_endscan_cb(void *arg, int pending)
4514 struct iwm_softc *sc = arg;
4515 struct ieee80211com *ic = &sc->sc_ic;
4517 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4518 "%s: scan ended\n",
4519 __func__);
4521 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4524 static int
4525 iwm_send_bt_init_conf(struct iwm_softc *sc)
4527 struct iwm_bt_coex_cmd bt_cmd;
4529 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4530 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4532 return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4533 &bt_cmd);
4536 static boolean_t
4537 iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4539 boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4540 boolean_t tlv_lar = fw_has_capa(&sc->ucode_capa,
4541 IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4543 if (iwm_lar_disable)
4544 return FALSE;
4547 * Enable LAR only if it is supported by the FW (TLV) &&
4548 * enabled in the NVM
4550 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4551 return nvm_lar && tlv_lar;
4552 else
4553 return tlv_lar;
4556 static boolean_t
4557 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4559 return fw_has_api(&sc->ucode_capa,
4560 IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4561 fw_has_capa(&sc->ucode_capa,
4562 IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4565 static int
4566 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4568 struct iwm_mcc_update_cmd mcc_cmd;
4569 struct iwm_host_cmd hcmd = {
4570 .id = IWM_MCC_UPDATE_CMD,
4571 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4572 .data = { &mcc_cmd },
4574 int ret;
4575 #ifdef IWM_DEBUG
4576 struct iwm_rx_packet *pkt;
4577 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4578 struct iwm_mcc_update_resp *mcc_resp;
4579 int n_channels;
4580 uint16_t mcc;
4581 #endif
4582 int resp_v2 = fw_has_capa(&sc->ucode_capa,
4583 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4585 if (!iwm_mvm_is_lar_supported(sc)) {
4586 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4587 __func__);
4588 return 0;
4591 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4592 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4593 if (iwm_mvm_is_wifi_mcc_supported(sc))
4594 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4595 else
4596 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4598 if (resp_v2)
4599 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4600 else
4601 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4603 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4604 "send MCC update to FW with '%c%c' src = %d\n",
4605 alpha2[0], alpha2[1], mcc_cmd.source_id);
4607 ret = iwm_send_cmd(sc, &hcmd);
4608 if (ret)
4609 return ret;
4611 #ifdef IWM_DEBUG
4612 pkt = hcmd.resp_pkt;
4614 /* Extract MCC response */
4615 if (resp_v2) {
4616 mcc_resp = (void *)pkt->data;
4617 mcc = mcc_resp->mcc;
4618 n_channels = le32toh(mcc_resp->n_channels);
4619 } else {
4620 mcc_resp_v1 = (void *)pkt->data;
4621 mcc = mcc_resp_v1->mcc;
4622 n_channels = le32toh(mcc_resp_v1->n_channels);
4625 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4626 if (mcc == 0)
4627 mcc = 0x3030; /* "00" - world */
4629 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4630 "regulatory domain '%c%c' (%d channels available)\n",
4631 mcc >> 8, mcc & 0xff, n_channels);
4632 #endif
4633 iwm_free_resp(sc, &hcmd);
4635 return 0;
4638 static void
4639 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4641 struct iwm_host_cmd cmd = {
4642 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4643 .len = { sizeof(uint32_t), },
4644 .data = { &backoff, },
4647 if (iwm_send_cmd(sc, &cmd) != 0) {
4648 device_printf(sc->sc_dev,
4649 "failed to change thermal tx backoff\n");
4653 static int
4654 iwm_init_hw(struct iwm_softc *sc)
4656 struct ieee80211com *ic = &sc->sc_ic;
4657 int error, i, ac;
4659 sc->sf_state = IWM_SF_UNINIT;
4661 if ((error = iwm_start_hw(sc)) != 0) {
4662 kprintf("iwm_start_hw: failed %d\n", error);
4663 return error;
4666 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4667 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4668 return error;
4672 * should stop and start HW since that INIT
4673 * image just loaded
4675 iwm_stop_device(sc);
4676 sc->sc_ps_disabled = FALSE;
4677 if ((error = iwm_start_hw(sc)) != 0) {
4678 device_printf(sc->sc_dev, "could not initialize hardware\n");
4679 return error;
4682 /* omstart, this time with the regular firmware */
4683 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4684 if (error) {
4685 device_printf(sc->sc_dev, "could not load firmware\n");
4686 goto error;
4689 error = iwm_mvm_sf_update(sc, NULL, FALSE);
4690 if (error)
4691 device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4693 if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4694 device_printf(sc->sc_dev, "bt init conf failed\n");
4695 goto error;
4698 error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4699 if (error != 0) {
4700 device_printf(sc->sc_dev, "antenna config failed\n");
4701 goto error;
4704 /* Send phy db control command and then phy db calibration */
4705 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4706 goto error;
4708 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4709 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4710 goto error;
4713 /* Add auxiliary station for scanning */
4714 if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4715 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4716 goto error;
4719 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4721 * The channel used here isn't relevant as it's
4722 * going to be overwritten in the other flows.
4723 * For now use the first channel we have.
4725 if ((error = iwm_mvm_phy_ctxt_add(sc,
4726 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4727 goto error;
4730 /* Initialize tx backoffs to the minimum. */
4731 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4732 iwm_mvm_tt_tx_backoff(sc, 0);
4734 error = iwm_mvm_power_update_device(sc);
4735 if (error)
4736 goto error;
4738 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4739 goto error;
4741 if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4742 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4743 goto error;
4746 /* Enable Tx queues. */
4747 for (ac = 0; ac < WME_NUM_AC; ac++) {
4748 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4749 iwm_mvm_ac_to_tx_fifo[ac]);
4750 if (error)
4751 goto error;
4754 if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4755 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4756 goto error;
4759 return 0;
4761 error:
4762 iwm_stop_device(sc);
4763 return error;
4766 /* Allow multicast from our BSSID. */
4767 static int
4768 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4770 struct ieee80211_node *ni = vap->iv_bss;
4771 struct iwm_mcast_filter_cmd *cmd;
4772 size_t size;
4773 int error;
4775 size = roundup(sizeof(*cmd), 4);
4776 cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4777 if (cmd == NULL)
4778 return ENOMEM;
4779 cmd->filter_own = 1;
4780 cmd->port_id = 0;
4781 cmd->count = 0;
4782 cmd->pass_all = 1;
4783 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4785 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4786 IWM_CMD_SYNC, size, cmd);
4787 kfree(cmd, M_DEVBUF);
4789 return (error);
4793 * ifnet interfaces
4796 static void
4797 iwm_init(struct iwm_softc *sc)
4799 int error;
4801 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4802 return;
4804 sc->sc_generation++;
4805 sc->sc_flags &= ~IWM_FLAG_STOPPED;
4807 if ((error = iwm_init_hw(sc)) != 0) {
4808 kprintf("iwm_init_hw failed %d\n", error);
4809 iwm_stop(sc);
4810 return;
4814 * Ok, firmware loaded and we are jogging
4816 sc->sc_flags |= IWM_FLAG_HW_INITED;
4817 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4820 static int
4821 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4823 struct iwm_softc *sc;
4824 int error;
4826 sc = ic->ic_softc;
4828 IWM_LOCK(sc);
4829 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4830 IWM_UNLOCK(sc);
4831 return (ENXIO);
4833 error = mbufq_enqueue(&sc->sc_snd, m);
4834 if (error) {
4835 IWM_UNLOCK(sc);
4836 return (error);
4838 iwm_start(sc);
4839 IWM_UNLOCK(sc);
4840 return (0);
4844 * Dequeue packets from sendq and call send.
4846 static void
4847 iwm_start(struct iwm_softc *sc)
4849 struct ieee80211_node *ni;
4850 struct mbuf *m;
4851 int ac = 0;
4853 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4854 while (sc->qfullmsk == 0 &&
4855 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4856 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4857 if (iwm_tx(sc, m, ni, ac) != 0) {
4858 if_inc_counter(ni->ni_vap->iv_ifp,
4859 IFCOUNTER_OERRORS, 1);
4860 ieee80211_free_node(ni);
4861 continue;
4863 sc->sc_tx_timer = 15;
4865 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4868 static void
4869 iwm_stop(struct iwm_softc *sc)
4872 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4873 sc->sc_flags |= IWM_FLAG_STOPPED;
4874 sc->sc_generation++;
4875 iwm_led_blink_stop(sc);
4876 sc->sc_tx_timer = 0;
4877 iwm_stop_device(sc);
4878 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4881 static void
4882 iwm_watchdog(void *arg)
4884 struct iwm_softc *sc = arg;
4886 if (sc->sc_tx_timer > 0) {
4887 if (--sc->sc_tx_timer == 0) {
4888 device_printf(sc->sc_dev, "device timeout\n");
4889 #ifdef IWM_DEBUG
4890 iwm_nic_error(sc);
4891 #endif
4892 iwm_stop(sc);
4893 #if defined(__DragonFly__)
4894 ++sc->sc_ic.ic_oerrors;
4895 #else
4896 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4897 #endif
4898 return;
4901 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4904 static void
4905 iwm_parent(struct ieee80211com *ic)
4907 struct iwm_softc *sc = ic->ic_softc;
4908 int startall = 0;
4910 IWM_LOCK(sc);
4911 if (ic->ic_nrunning > 0) {
4912 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4913 iwm_init(sc);
4914 startall = 1;
4916 } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4917 iwm_stop(sc);
4918 IWM_UNLOCK(sc);
4919 if (startall)
4920 ieee80211_start_all(ic);
4924 * The interrupt side of things
4928 * error dumping routines are from iwlwifi/mvm/utils.c
4932 * Note: This structure is read from the device with IO accesses,
4933 * and the reading already does the endian conversion. As it is
4934 * read with uint32_t-sized accesses, any members with a different size
4935 * need to be ordered correctly though!
4937 struct iwm_error_event_table {
4938 uint32_t valid; /* (nonzero) valid, (0) log is empty */
4939 uint32_t error_id; /* type of error */
4940 uint32_t trm_hw_status0; /* TRM HW status */
4941 uint32_t trm_hw_status1; /* TRM HW status */
4942 uint32_t blink2; /* branch link */
4943 uint32_t ilink1; /* interrupt link */
4944 uint32_t ilink2; /* interrupt link */
4945 uint32_t data1; /* error-specific data */
4946 uint32_t data2; /* error-specific data */
4947 uint32_t data3; /* error-specific data */
4948 uint32_t bcon_time; /* beacon timer */
4949 uint32_t tsf_low; /* network timestamp function timer */
4950 uint32_t tsf_hi; /* network timestamp function timer */
4951 uint32_t gp1; /* GP1 timer register */
4952 uint32_t gp2; /* GP2 timer register */
4953 uint32_t fw_rev_type; /* firmware revision type */
4954 uint32_t major; /* uCode version major */
4955 uint32_t minor; /* uCode version minor */
4956 uint32_t hw_ver; /* HW Silicon version */
4957 uint32_t brd_ver; /* HW board version */
4958 uint32_t log_pc; /* log program counter */
4959 uint32_t frame_ptr; /* frame pointer */
4960 uint32_t stack_ptr; /* stack pointer */
4961 uint32_t hcmd; /* last host command header */
4962 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
4963 * rxtx_flag */
4964 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
4965 * host_flag */
4966 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
4967 * enc_flag */
4968 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
4969 * time_flag */
4970 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
4971 * wico interrupt */
4972 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
4973 uint32_t wait_event; /* wait event() caller address */
4974 uint32_t l2p_control; /* L2pControlField */
4975 uint32_t l2p_duration; /* L2pDurationField */
4976 uint32_t l2p_mhvalid; /* L2pMhValidBits */
4977 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
4978 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
4979 * (LMPM_PMG_SEL) */
4980 uint32_t u_timestamp; /* indicate when the date and time of the
4981 * compilation */
4982 uint32_t flow_handler; /* FH read/write pointers, RX credit */
4983 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4986 * UMAC error struct - relevant starting from family 8000 chip.
4987 * Note: This structure is read from the device with IO accesses,
4988 * and the reading already does the endian conversion. As it is
4989 * read with u32-sized accesses, any members with a different size
4990 * need to be ordered correctly though!
4992 struct iwm_umac_error_event_table {
4993 uint32_t valid; /* (nonzero) valid, (0) log is empty */
4994 uint32_t error_id; /* type of error */
4995 uint32_t blink1; /* branch link */
4996 uint32_t blink2; /* branch link */
4997 uint32_t ilink1; /* interrupt link */
4998 uint32_t ilink2; /* interrupt link */
4999 uint32_t data1; /* error-specific data */
5000 uint32_t data2; /* error-specific data */
5001 uint32_t data3; /* error-specific data */
5002 uint32_t umac_major;
5003 uint32_t umac_minor;
5004 uint32_t frame_pointer; /* core register 27*/
5005 uint32_t stack_pointer; /* core register 28 */
5006 uint32_t cmd_header; /* latest host cmd sent to UMAC */
5007 uint32_t nic_isr_pref; /* ISR status register */
5008 } __packed;
5010 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
5011 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
5013 #ifdef IWM_DEBUG
5014 struct {
5015 const char *name;
5016 uint8_t num;
5017 } advanced_lookup[] = {
5018 { "NMI_INTERRUPT_WDG", 0x34 },
5019 { "SYSASSERT", 0x35 },
5020 { "UCODE_VERSION_MISMATCH", 0x37 },
5021 { "BAD_COMMAND", 0x38 },
5022 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5023 { "FATAL_ERROR", 0x3D },
5024 { "NMI_TRM_HW_ERR", 0x46 },
5025 { "NMI_INTERRUPT_TRM", 0x4C },
5026 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5027 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5028 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5029 { "NMI_INTERRUPT_HOST", 0x66 },
5030 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5031 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5032 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5033 { "ADVANCED_SYSASSERT", 0 },
5036 static const char *
5037 iwm_desc_lookup(uint32_t num)
5039 int i;
5041 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5042 if (advanced_lookup[i].num == num)
5043 return advanced_lookup[i].name;
5045 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5046 return advanced_lookup[i].name;
5049 static void
5050 iwm_nic_umac_error(struct iwm_softc *sc)
5052 struct iwm_umac_error_event_table table;
5053 uint32_t base;
5055 base = sc->umac_error_event_table;
5057 if (base < 0x800000) {
5058 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5059 base);
5060 return;
5063 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5064 device_printf(sc->sc_dev, "reading errlog failed\n");
5065 return;
5068 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5069 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5070 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5071 sc->sc_flags, table.valid);
5074 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5075 iwm_desc_lookup(table.error_id));
5076 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5077 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5078 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5079 table.ilink1);
5080 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5081 table.ilink2);
5082 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5083 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5084 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5085 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5086 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5087 device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5088 table.frame_pointer);
5089 device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5090 table.stack_pointer);
5091 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5092 device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5093 table.nic_isr_pref);
5097 * Support for dumping the error log seemed like a good idea ...
5098 * but it's mostly hex junk and the only sensible thing is the
5099 * hw/ucode revision (which we know anyway). Since it's here,
5100 * I'll just leave it in, just in case e.g. the Intel guys want to
5101 * help us decipher some "ADVANCED_SYSASSERT" later.
5103 static void
5104 iwm_nic_error(struct iwm_softc *sc)
5106 struct iwm_error_event_table table;
5107 uint32_t base;
5109 device_printf(sc->sc_dev, "dumping device error log\n");
5110 base = sc->error_event_table;
5111 if (base < 0x800000) {
5112 device_printf(sc->sc_dev,
5113 "Invalid error log pointer 0x%08x\n", base);
5114 return;
5117 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5118 device_printf(sc->sc_dev, "reading errlog failed\n");
5119 return;
5122 if (!table.valid) {
5123 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5124 return;
5127 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5128 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5129 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5130 sc->sc_flags, table.valid);
5133 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5134 iwm_desc_lookup(table.error_id));
5135 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5136 table.trm_hw_status0);
5137 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5138 table.trm_hw_status1);
5139 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5140 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5141 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5142 device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5143 device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5144 device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5145 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5146 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5147 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5148 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5149 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5150 device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5151 table.fw_rev_type);
5152 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5153 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5154 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5155 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5156 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5157 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5158 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5159 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5160 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5161 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5162 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5163 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5164 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5165 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5166 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5167 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5168 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5169 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5170 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5172 if (sc->umac_error_event_table)
5173 iwm_nic_umac_error(sc);
5175 #endif
5177 static void
5178 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5180 struct ieee80211com *ic = &sc->sc_ic;
5181 struct iwm_cmd_response *cresp;
5182 struct mbuf *m1;
5183 uint32_t offset = 0;
5184 uint32_t maxoff = IWM_RBUF_SIZE;
5185 uint32_t nextoff;
5186 boolean_t stolen = FALSE;
5188 #define HAVEROOM(a) \
5189 ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5191 while (HAVEROOM(offset)) {
5192 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5193 offset);
5194 int qid, idx, code, len;
5196 qid = pkt->hdr.qid;
5197 idx = pkt->hdr.idx;
5199 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5202 * randomly get these from the firmware, no idea why.
5203 * they at least seem harmless, so just ignore them for now
5205 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5206 pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5207 break;
5210 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5211 "rx packet qid=%d idx=%d type=%x\n",
5212 qid & ~0x80, pkt->hdr.idx, code);
5214 len = iwm_rx_packet_len(pkt);
5215 len += sizeof(uint32_t); /* account for status word */
5216 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5218 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5220 switch (code) {
5221 case IWM_REPLY_RX_PHY_CMD:
5222 iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5223 break;
5225 case IWM_REPLY_RX_MPDU_CMD: {
5227 * If this is the last frame in the RX buffer, we
5228 * can directly feed the mbuf to the sharks here.
5230 struct iwm_rx_packet *nextpkt = mtodoff(m,
5231 struct iwm_rx_packet *, nextoff);
5232 if (!HAVEROOM(nextoff) ||
5233 (nextpkt->hdr.code == 0 &&
5234 (nextpkt->hdr.qid & ~0x80) == 0 &&
5235 nextpkt->hdr.idx == 0) ||
5236 (nextpkt->len_n_flags ==
5237 htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5238 if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5239 stolen = FALSE;
5240 /* Make sure we abort the loop */
5241 nextoff = maxoff;
5243 break;
5247 * Use m_copym instead of m_split, because that
5248 * makes it easier to keep a valid rx buffer in
5249 * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5251 * We need to start m_copym() at offset 0, to get the
5252 * M_PKTHDR flag preserved.
5254 m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5255 if (m1) {
5256 if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5257 stolen = TRUE;
5258 else
5259 m_freem(m1);
5261 break;
5264 case IWM_TX_CMD:
5265 iwm_mvm_rx_tx_cmd(sc, pkt);
5266 break;
5268 case IWM_MISSED_BEACONS_NOTIFICATION: {
5269 struct iwm_missed_beacons_notif *resp;
5270 int missed;
5272 /* XXX look at mac_id to determine interface ID */
5273 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5275 resp = (void *)pkt->data;
5276 missed = le32toh(resp->consec_missed_beacons);
5278 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5279 "%s: MISSED_BEACON: mac_id=%d, "
5280 "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5281 "num_rx=%d\n",
5282 __func__,
5283 le32toh(resp->mac_id),
5284 le32toh(resp->consec_missed_beacons_since_last_rx),
5285 le32toh(resp->consec_missed_beacons),
5286 le32toh(resp->num_expected_beacons),
5287 le32toh(resp->num_recvd_beacons));
5289 /* Be paranoid */
5290 if (vap == NULL)
5291 break;
5293 /* XXX no net80211 locking? */
5294 if (vap->iv_state == IEEE80211_S_RUN &&
5295 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5296 if (missed > vap->iv_bmissthreshold) {
5297 /* XXX bad locking; turn into task */
5298 IWM_UNLOCK(sc);
5299 ieee80211_beacon_miss(ic);
5300 IWM_LOCK(sc);
5304 break; }
5306 case IWM_MFUART_LOAD_NOTIFICATION:
5307 break;
5309 case IWM_MVM_ALIVE:
5310 break;
5312 case IWM_CALIB_RES_NOTIF_PHY_DB:
5313 break;
5315 case IWM_STATISTICS_NOTIFICATION:
5316 iwm_mvm_handle_rx_statistics(sc, pkt);
5317 break;
5319 case IWM_NVM_ACCESS_CMD:
5320 case IWM_MCC_UPDATE_CMD:
5321 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5322 memcpy(sc->sc_cmd_resp,
5323 pkt, sizeof(sc->sc_cmd_resp));
5325 break;
5327 case IWM_MCC_CHUB_UPDATE_CMD: {
5328 struct iwm_mcc_chub_notif *notif;
5329 notif = (void *)pkt->data;
5331 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5332 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5333 sc->sc_fw_mcc[2] = '\0';
5334 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5335 "fw source %d sent CC '%s'\n",
5336 notif->source_id, sc->sc_fw_mcc);
5337 break;
5340 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5341 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5342 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5343 struct iwm_dts_measurement_notif_v1 *notif;
5345 if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5346 device_printf(sc->sc_dev,
5347 "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5348 break;
5350 notif = (void *)pkt->data;
5351 IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5352 "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5353 notif->temp);
5354 break;
5357 case IWM_PHY_CONFIGURATION_CMD:
5358 case IWM_TX_ANT_CONFIGURATION_CMD:
5359 case IWM_ADD_STA:
5360 case IWM_MAC_CONTEXT_CMD:
5361 case IWM_REPLY_SF_CFG_CMD:
5362 case IWM_POWER_TABLE_CMD:
5363 case IWM_PHY_CONTEXT_CMD:
5364 case IWM_BINDING_CONTEXT_CMD:
5365 case IWM_TIME_EVENT_CMD:
5366 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5367 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5368 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5369 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5370 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5371 case IWM_REPLY_BEACON_FILTERING_CMD:
5372 case IWM_MAC_PM_POWER_TABLE:
5373 case IWM_TIME_QUOTA_CMD:
5374 case IWM_REMOVE_STA:
5375 case IWM_TXPATH_FLUSH:
5376 case IWM_LQ_CMD:
5377 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5378 IWM_FW_PAGING_BLOCK_CMD):
5379 case IWM_BT_CONFIG:
5380 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5381 cresp = (void *)pkt->data;
5382 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5383 memcpy(sc->sc_cmd_resp,
5384 pkt, sizeof(*pkt)+sizeof(*cresp));
5386 break;
5388 /* ignore */
5389 case IWM_PHY_DB_CMD:
5390 break;
5392 case IWM_INIT_COMPLETE_NOTIF:
5393 break;
5395 case IWM_SCAN_OFFLOAD_COMPLETE:
5396 iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5397 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5398 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5399 ieee80211_runtask(ic, &sc->sc_es_task);
5401 break;
5403 case IWM_SCAN_ITERATION_COMPLETE: {
5404 struct iwm_lmac_scan_complete_notif *notif;
5405 notif = (void *)pkt->data;
5406 break;
5409 case IWM_SCAN_COMPLETE_UMAC:
5410 iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5411 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5412 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5413 ieee80211_runtask(ic, &sc->sc_es_task);
5415 break;
5417 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5418 struct iwm_umac_scan_iter_complete_notif *notif;
5419 notif = (void *)pkt->data;
5421 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5422 "complete, status=0x%x, %d channels scanned\n",
5423 notif->status, notif->scanned_channels);
5424 break;
5427 case IWM_REPLY_ERROR: {
5428 struct iwm_error_resp *resp;
5429 resp = (void *)pkt->data;
5431 device_printf(sc->sc_dev,
5432 "firmware error 0x%x, cmd 0x%x\n",
5433 le32toh(resp->error_type),
5434 resp->cmd_id);
5435 break;
5438 case IWM_TIME_EVENT_NOTIFICATION: {
5439 struct iwm_time_event_notif *notif;
5440 notif = (void *)pkt->data;
5442 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5443 "TE notif status = 0x%x action = 0x%x\n",
5444 notif->status, notif->action);
5445 break;
5449 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5450 * messages. Just ignore them for now.
5452 case IWM_DEBUG_LOG_MSG:
5453 break;
5455 case IWM_MCAST_FILTER_CMD:
5456 break;
5458 case IWM_SCD_QUEUE_CFG: {
5459 struct iwm_scd_txq_cfg_rsp *rsp;
5460 rsp = (void *)pkt->data;
5462 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5463 "queue cfg token=0x%x sta_id=%d "
5464 "tid=%d scd_queue=%d\n",
5465 rsp->token, rsp->sta_id, rsp->tid,
5466 rsp->scd_queue);
5467 break;
5470 default:
5471 device_printf(sc->sc_dev,
5472 "frame %d/%d %x UNHANDLED (this should "
5473 "not happen)\n", qid & ~0x80, idx,
5474 pkt->len_n_flags);
5475 break;
5479 * Why test bit 0x80? The Linux driver:
5481 * There is one exception: uCode sets bit 15 when it
5482 * originates the response/notification, i.e. when the
5483 * response/notification is not a direct response to a
5484 * command sent by the driver. For example, uCode issues
5485 * IWM_REPLY_RX when it sends a received frame to the driver;
5486 * it is not a direct response to any driver command.
5488 * Ok, so since when is 7 == 15? Well, the Linux driver
5489 * uses a slightly different format for pkt->hdr, and "qid"
5490 * is actually the upper byte of a two-byte field.
5492 if (!(qid & (1 << 7)))
5493 iwm_cmd_done(sc, pkt);
5495 offset = nextoff;
5497 if (stolen)
5498 m_freem(m);
5499 #undef HAVEROOM
5503 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5504 * Basic structure from if_iwn
5506 static void
5507 iwm_notif_intr(struct iwm_softc *sc)
5509 uint16_t hw;
5511 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5512 BUS_DMASYNC_POSTREAD);
5514 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5517 * Process responses
5519 while (sc->rxq.cur != hw) {
5520 struct iwm_rx_ring *ring = &sc->rxq;
5521 struct iwm_rx_data *data = &ring->data[ring->cur];
5523 bus_dmamap_sync(ring->data_dmat, data->map,
5524 BUS_DMASYNC_POSTREAD);
5526 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5527 "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5528 iwm_handle_rxb(sc, data->m);
5530 ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5534 * Tell the firmware that it can reuse the ring entries that
5535 * we have just processed.
5536 * Seems like the hardware gets upset unless we align
5537 * the write by 8??
5539 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5540 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5543 static void
5544 iwm_intr(void *arg)
5546 struct iwm_softc *sc = arg;
5547 int handled = 0;
5548 int r1, r2, rv = 0;
5549 int isperiodic = 0;
5551 #if defined(__DragonFly__)
5552 if (sc->sc_mem == NULL) {
5553 kprintf("iwm_intr: detached\n");
5554 return;
5556 #endif
5557 IWM_LOCK(sc);
5558 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5560 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5561 uint32_t *ict = sc->ict_dma.vaddr;
5562 int tmp;
5564 tmp = htole32(ict[sc->ict_cur]);
5565 if (!tmp)
5566 goto out_ena;
5569 * ok, there was something. keep plowing until we have all.
5571 r1 = r2 = 0;
5572 while (tmp) {
5573 r1 |= tmp;
5574 ict[sc->ict_cur] = 0;
5575 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5576 tmp = htole32(ict[sc->ict_cur]);
5579 /* this is where the fun begins. don't ask */
5580 if (r1 == 0xffffffff)
5581 r1 = 0;
5583 /* i am not expected to understand this */
5584 if (r1 & 0xc0000)
5585 r1 |= 0x8000;
5586 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5587 } else {
5588 r1 = IWM_READ(sc, IWM_CSR_INT);
5589 /* "hardware gone" (where, fishing?) */
5590 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5591 goto out;
5592 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5594 if (r1 == 0 && r2 == 0) {
5595 goto out_ena;
5598 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5600 /* Safely ignore these bits for debug checks below */
5601 r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5603 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5604 int i;
5605 struct ieee80211com *ic = &sc->sc_ic;
5606 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5608 #ifdef IWM_DEBUG
5609 iwm_nic_error(sc);
5610 #endif
5611 /* Dump driver status (TX and RX rings) while we're here. */
5612 device_printf(sc->sc_dev, "driver status:\n");
5613 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5614 struct iwm_tx_ring *ring = &sc->txq[i];
5615 device_printf(sc->sc_dev,
5616 " tx ring %2d: qid=%-2d cur=%-3d "
5617 "queued=%-3d\n",
5618 i, ring->qid, ring->cur, ring->queued);
5620 device_printf(sc->sc_dev,
5621 " rx ring: cur=%d\n", sc->rxq.cur);
5622 device_printf(sc->sc_dev,
5623 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5625 /* Don't stop the device; just do a VAP restart */
5626 IWM_UNLOCK(sc);
5628 if (vap == NULL) {
5629 kprintf("%s: null vap\n", __func__);
5630 return;
5633 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5634 "restarting\n", __func__, vap->iv_state);
5636 ieee80211_restart_all(ic);
5637 return;
5640 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5641 handled |= IWM_CSR_INT_BIT_HW_ERR;
5642 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5643 iwm_stop(sc);
5644 rv = 1;
5645 goto out;
5648 /* firmware chunk loaded */
5649 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5650 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5651 handled |= IWM_CSR_INT_BIT_FH_TX;
5652 sc->sc_fw_chunk_done = 1;
5653 wakeup(&sc->sc_fw);
5656 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5657 handled |= IWM_CSR_INT_BIT_RF_KILL;
5658 if (iwm_check_rfkill(sc)) {
5659 device_printf(sc->sc_dev,
5660 "%s: rfkill switch, disabling interface\n",
5661 __func__);
5662 iwm_stop(sc);
5667 * The Linux driver uses periodic interrupts to avoid races.
5668 * We cargo-cult like it's going out of fashion.
5670 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5671 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5672 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5673 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5674 IWM_WRITE_1(sc,
5675 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5676 isperiodic = 1;
5679 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5680 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5681 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5683 iwm_notif_intr(sc);
5685 /* enable periodic interrupt, see above */
5686 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5687 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5688 IWM_CSR_INT_PERIODIC_ENA);
5691 if (__predict_false(r1 & ~handled))
5692 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5693 "%s: unhandled interrupts: %x\n", __func__, r1);
5694 rv = 1;
5696 out_ena:
5697 iwm_restore_interrupts(sc);
5698 out:
5699 IWM_UNLOCK(sc);
5700 return;
5704 * Autoconf glue-sniffing
5706 #define PCI_VENDOR_INTEL 0x8086
5707 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
5708 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
5709 #define PCI_PRODUCT_INTEL_WL_3165_1 0x3165
5710 #define PCI_PRODUCT_INTEL_WL_3165_2 0x3166
5711 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
5712 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
5713 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
5714 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
5715 #define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3
5716 #define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4
5718 static const struct iwm_devices {
5719 uint16_t device;
5720 const struct iwm_cfg *cfg;
5721 } iwm_devices[] = {
5722 { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5723 { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5724 { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5725 { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5726 { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5727 { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5728 { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5729 { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5730 { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5731 { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5734 static int
5735 iwm_probe(device_t dev)
5737 int i;
5739 for (i = 0; i < nitems(iwm_devices); i++) {
5740 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5741 pci_get_device(dev) == iwm_devices[i].device) {
5742 device_set_desc(dev, iwm_devices[i].cfg->name);
5743 return (BUS_PROBE_DEFAULT);
5747 return (ENXIO);
5750 static int
5751 iwm_dev_check(device_t dev)
5753 struct iwm_softc *sc;
5754 uint16_t devid;
5755 int i;
5757 sc = device_get_softc(dev);
5759 devid = pci_get_device(dev);
5760 for (i = 0; i < NELEM(iwm_devices); i++) {
5761 if (iwm_devices[i].device == devid) {
5762 sc->cfg = iwm_devices[i].cfg;
5763 return (0);
5766 device_printf(dev, "unknown adapter type\n");
5767 return ENXIO;
5770 /* PCI registers */
5771 #define PCI_CFG_RETRY_TIMEOUT 0x041
5773 static int
5774 iwm_pci_attach(device_t dev)
5776 struct iwm_softc *sc;
5777 int count, error, rid;
5778 uint16_t reg;
5779 #if defined(__DragonFly__)
5780 int irq_flags;
5781 #endif
5783 sc = device_get_softc(dev);
5785 /* We disable the RETRY_TIMEOUT register (0x41) to keep
5786 * PCI Tx retries from interfering with C3 CPU state */
5787 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5789 /* Enable bus-mastering and hardware bug workaround. */
5790 pci_enable_busmaster(dev);
5791 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5792 /* if !MSI */
5793 if (reg & PCIM_STATUS_INTxSTATE) {
5794 reg &= ~PCIM_STATUS_INTxSTATE;
5796 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5798 rid = PCIR_BAR(0);
5799 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5800 RF_ACTIVE);
5801 if (sc->sc_mem == NULL) {
5802 device_printf(sc->sc_dev, "can't map mem space\n");
5803 return (ENXIO);
5805 sc->sc_st = rman_get_bustag(sc->sc_mem);
5806 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5808 /* Install interrupt handler. */
5809 count = 1;
5810 rid = 0;
5811 #if defined(__DragonFly__)
5812 pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5813 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5814 #else
5815 if (pci_alloc_msi(dev, &count) == 0)
5816 rid = 1;
5817 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5818 (rid != 0 ? 0 : RF_SHAREABLE));
5819 #endif
5820 if (sc->sc_irq == NULL) {
5821 device_printf(dev, "can't map interrupt\n");
5822 return (ENXIO);
5824 #if defined(__DragonFly__)
5825 error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5826 iwm_intr, sc, &sc->sc_ih,
5827 &wlan_global_serializer);
5828 #else
5829 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5830 NULL, iwm_intr, sc, &sc->sc_ih);
5831 #endif
5832 if (sc->sc_ih == NULL) {
5833 device_printf(dev, "can't establish interrupt");
5834 #if defined(__DragonFly__)
5835 pci_release_msi(dev);
5836 #endif
5837 return (ENXIO);
5839 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5841 return (0);
5844 static void
5845 iwm_pci_detach(device_t dev)
5847 struct iwm_softc *sc = device_get_softc(dev);
5849 if (sc->sc_irq != NULL) {
5850 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5851 bus_release_resource(dev, SYS_RES_IRQ,
5852 rman_get_rid(sc->sc_irq), sc->sc_irq);
5853 pci_release_msi(dev);
5854 #if defined(__DragonFly__)
5855 sc->sc_irq = NULL;
5856 #endif
5858 if (sc->sc_mem != NULL) {
5859 bus_release_resource(dev, SYS_RES_MEMORY,
5860 rman_get_rid(sc->sc_mem), sc->sc_mem);
5861 #if defined(__DragonFly__)
5862 sc->sc_mem = NULL;
5863 #endif
5869 static int
5870 iwm_attach(device_t dev)
5872 struct iwm_softc *sc = device_get_softc(dev);
5873 struct ieee80211com *ic = &sc->sc_ic;
5874 int error;
5875 int txq_i, i;
5877 sc->sc_dev = dev;
5878 sc->sc_attached = 1;
5879 IWM_LOCK_INIT(sc);
5880 mbufq_init(&sc->sc_snd, ifqmaxlen);
5881 #if defined(__DragonFly__)
5882 callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5883 #else
5884 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5885 #endif
5886 callout_init(&sc->sc_led_blink_to);
5887 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5889 sc->sc_notif_wait = iwm_notification_wait_init(sc);
5890 if (sc->sc_notif_wait == NULL) {
5891 device_printf(dev, "failed to init notification wait struct\n");
5892 goto fail;
5895 sc->sf_state = IWM_SF_UNINIT;
5897 /* Init phy db */
5898 sc->sc_phy_db = iwm_phy_db_init(sc);
5899 if (!sc->sc_phy_db) {
5900 device_printf(dev, "Cannot init phy_db\n");
5901 goto fail;
5904 /* Set EBS as successful as long as not stated otherwise by the FW. */
5905 sc->last_ebs_successful = TRUE;
5907 /* PCI attach */
5908 error = iwm_pci_attach(dev);
5909 if (error != 0)
5910 goto fail;
5912 sc->sc_wantresp = -1;
5914 /* Check device type */
5915 error = iwm_dev_check(dev);
5916 if (error != 0)
5917 goto fail;
5919 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5921 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5922 * changed, and now the revision step also includes bit 0-1 (no more
5923 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5924 * in the old format.
5926 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
5927 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5928 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5930 if (iwm_prepare_card_hw(sc) != 0) {
5931 device_printf(dev, "could not initialize hardware\n");
5932 goto fail;
5935 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5936 int ret;
5937 uint32_t hw_step;
5940 * In order to recognize C step the driver should read the
5941 * chip version id located at the AUX bus MISC address.
5943 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5944 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5945 DELAY(2);
5947 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5948 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5949 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5950 25000);
5951 if (!ret) {
5952 device_printf(sc->sc_dev,
5953 "Failed to wake up the nic\n");
5954 goto fail;
5957 if (iwm_nic_lock(sc)) {
5958 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5959 hw_step |= IWM_ENABLE_WFPM;
5960 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5961 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5962 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5963 if (hw_step == 0x3)
5964 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5965 (IWM_SILICON_C_STEP << 2);
5966 iwm_nic_unlock(sc);
5967 } else {
5968 device_printf(sc->sc_dev, "Failed to lock the nic\n");
5969 goto fail;
5973 /* special-case 7265D, it has the same PCI IDs. */
5974 if (sc->cfg == &iwm7265_cfg &&
5975 (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5976 sc->cfg = &iwm7265d_cfg;
5979 /* Allocate DMA memory for firmware transfers. */
5980 if ((error = iwm_alloc_fwmem(sc)) != 0) {
5981 device_printf(dev, "could not allocate memory for firmware\n");
5982 goto fail;
5985 /* Allocate "Keep Warm" page. */
5986 if ((error = iwm_alloc_kw(sc)) != 0) {
5987 device_printf(dev, "could not allocate keep warm page\n");
5988 goto fail;
5991 /* We use ICT interrupts */
5992 if ((error = iwm_alloc_ict(sc)) != 0) {
5993 device_printf(dev, "could not allocate ICT table\n");
5994 goto fail;
5997 /* Allocate TX scheduler "rings". */
5998 if ((error = iwm_alloc_sched(sc)) != 0) {
5999 device_printf(dev, "could not allocate TX scheduler rings\n");
6000 goto fail;
6003 /* Allocate TX rings */
6004 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6005 if ((error = iwm_alloc_tx_ring(sc,
6006 &sc->txq[txq_i], txq_i)) != 0) {
6007 device_printf(dev,
6008 "could not allocate TX ring %d\n",
6009 txq_i);
6010 goto fail;
6014 /* Allocate RX ring. */
6015 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6016 device_printf(dev, "could not allocate RX ring\n");
6017 goto fail;
6020 /* Clear pending interrupts. */
6021 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6023 ic->ic_softc = sc;
6024 ic->ic_name = device_get_nameunit(sc->sc_dev);
6025 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
6026 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
6028 /* Set device capabilities. */
6029 ic->ic_caps =
6030 IEEE80211_C_STA |
6031 IEEE80211_C_WPA | /* WPA/RSN */
6032 IEEE80211_C_WME |
6033 IEEE80211_C_PMGT |
6034 IEEE80211_C_SHSLOT | /* short slot time supported */
6035 IEEE80211_C_SHPREAMBLE /* short preamble supported */
6036 // IEEE80211_C_BGSCAN /* capable of bg scanning */
6038 for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6039 sc->sc_phyctxt[i].id = i;
6040 sc->sc_phyctxt[i].color = 0;
6041 sc->sc_phyctxt[i].ref = 0;
6042 sc->sc_phyctxt[i].channel = NULL;
6045 /* Default noise floor */
6046 sc->sc_noise = -96;
6048 /* Max RSSI */
6049 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6051 sc->sc_preinit_hook.ich_func = iwm_preinit;
6052 sc->sc_preinit_hook.ich_arg = sc;
6053 sc->sc_preinit_hook.ich_desc = "iwm";
6054 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6055 device_printf(dev, "config_intrhook_establish failed\n");
6056 goto fail;
6059 #ifdef IWM_DEBUG
6060 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6061 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6062 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6063 #endif
6065 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6066 "<-%s\n", __func__);
6068 return 0;
6070 /* Free allocated memory if something failed during attachment. */
6071 fail:
6072 iwm_detach_local(sc, 0);
6074 return ENXIO;
6077 static int
6078 iwm_is_valid_ether_addr(uint8_t *addr)
6080 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6082 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6083 return (FALSE);
6085 return (TRUE);
6088 static int
6089 iwm_wme_update(struct ieee80211com *ic)
6091 #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
6092 struct iwm_softc *sc = ic->ic_softc;
6093 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6094 struct iwm_vap *ivp = IWM_VAP(vap);
6095 struct iwm_node *in;
6096 struct wmeParams tmp[WME_NUM_AC];
6097 int aci, error;
6099 if (vap == NULL)
6100 return (0);
6102 IEEE80211_LOCK(ic);
6103 for (aci = 0; aci < WME_NUM_AC; aci++)
6104 tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6105 IEEE80211_UNLOCK(ic);
6107 IWM_LOCK(sc);
6108 for (aci = 0; aci < WME_NUM_AC; aci++) {
6109 const struct wmeParams *ac = &tmp[aci];
6110 ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6111 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6112 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6113 ivp->queue_params[aci].edca_txop =
6114 IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6116 ivp->have_wme = TRUE;
6117 if (ivp->is_uploaded && vap->iv_bss != NULL) {
6118 in = IWM_NODE(vap->iv_bss);
6119 if (in->in_assoc) {
6120 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6121 device_printf(sc->sc_dev,
6122 "%s: failed to update MAC\n", __func__);
6126 IWM_UNLOCK(sc);
6128 return (0);
6129 #undef IWM_EXP2
6132 static void
6133 iwm_preinit(void *arg)
6135 struct iwm_softc *sc = arg;
6136 device_t dev = sc->sc_dev;
6137 struct ieee80211com *ic = &sc->sc_ic;
6138 int error;
6140 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6141 "->%s\n", __func__);
6143 IWM_LOCK(sc);
6144 if ((error = iwm_start_hw(sc)) != 0) {
6145 device_printf(dev, "could not initialize hardware\n");
6146 IWM_UNLOCK(sc);
6147 goto fail;
6150 error = iwm_run_init_mvm_ucode(sc, 1);
6151 iwm_stop_device(sc);
6152 if (error) {
6153 IWM_UNLOCK(sc);
6154 goto fail;
6156 device_printf(dev,
6157 "hw rev 0x%x, fw ver %s, address %s\n",
6158 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6159 sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6161 /* not all hardware can do 5GHz band */
6162 if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6163 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6164 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6165 IWM_UNLOCK(sc);
6167 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6168 ic->ic_channels);
6171 * At this point we've committed - if we fail to do setup,
6172 * we now also have to tear down the net80211 state.
6174 ieee80211_ifattach(ic);
6175 ic->ic_vap_create = iwm_vap_create;
6176 ic->ic_vap_delete = iwm_vap_delete;
6177 ic->ic_raw_xmit = iwm_raw_xmit;
6178 ic->ic_node_alloc = iwm_node_alloc;
6179 ic->ic_scan_start = iwm_scan_start;
6180 ic->ic_scan_end = iwm_scan_end;
6181 ic->ic_update_mcast = iwm_update_mcast;
6182 ic->ic_getradiocaps = iwm_init_channel_map;
6183 ic->ic_set_channel = iwm_set_channel;
6184 ic->ic_scan_curchan = iwm_scan_curchan;
6185 ic->ic_scan_mindwell = iwm_scan_mindwell;
6186 ic->ic_wme.wme_update = iwm_wme_update;
6187 ic->ic_parent = iwm_parent;
6188 ic->ic_transmit = iwm_transmit;
6189 iwm_radiotap_attach(sc);
6190 if (bootverbose)
6191 ieee80211_announce(ic);
6193 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6194 "<-%s\n", __func__);
6195 config_intrhook_disestablish(&sc->sc_preinit_hook);
6197 return;
6198 fail:
6199 config_intrhook_disestablish(&sc->sc_preinit_hook);
6200 iwm_detach_local(sc, 0);
6204 * Attach the interface to 802.11 radiotap.
6206 static void
6207 iwm_radiotap_attach(struct iwm_softc *sc)
6209 struct ieee80211com *ic = &sc->sc_ic;
6211 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6212 "->%s begin\n", __func__);
6213 ieee80211_radiotap_attach(ic,
6214 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6215 IWM_TX_RADIOTAP_PRESENT,
6216 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6217 IWM_RX_RADIOTAP_PRESENT);
6218 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6219 "->%s end\n", __func__);
6222 static struct ieee80211vap *
6223 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6224 enum ieee80211_opmode opmode, int flags,
6225 const uint8_t bssid[IEEE80211_ADDR_LEN],
6226 const uint8_t mac[IEEE80211_ADDR_LEN])
6228 struct iwm_vap *ivp;
6229 struct ieee80211vap *vap;
6231 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
6232 return NULL;
6233 ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6234 vap = &ivp->iv_vap;
6235 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6236 vap->iv_bmissthreshold = 10; /* override default */
6237 /* Override with driver methods. */
6238 ivp->iv_newstate = vap->iv_newstate;
6239 vap->iv_newstate = iwm_newstate;
6241 ivp->id = IWM_DEFAULT_MACID;
6242 ivp->color = IWM_DEFAULT_COLOR;
6244 ivp->have_wme = FALSE;
6245 ivp->ps_disabled = FALSE;
6247 ieee80211_ratectl_init(vap);
6248 /* Complete setup. */
6249 ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6250 mac);
6251 ic->ic_opmode = opmode;
6253 return vap;
6256 static void
6257 iwm_vap_delete(struct ieee80211vap *vap)
6259 struct iwm_vap *ivp = IWM_VAP(vap);
6261 ieee80211_ratectl_deinit(vap);
6262 ieee80211_vap_detach(vap);
6263 kfree(ivp, M_80211_VAP);
6266 static void
6267 iwm_xmit_queue_drain(struct iwm_softc *sc)
6269 struct mbuf *m;
6270 struct ieee80211_node *ni;
6272 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6273 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6274 ieee80211_free_node(ni);
6275 m_freem(m);
6279 static void
6280 iwm_scan_start(struct ieee80211com *ic)
6282 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6283 struct iwm_softc *sc = ic->ic_softc;
6284 int error;
6286 IWM_LOCK(sc);
6287 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6288 /* This should not be possible */
6289 device_printf(sc->sc_dev,
6290 "%s: Previous scan not completed yet\n", __func__);
6292 if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6293 error = iwm_mvm_umac_scan(sc);
6294 else
6295 error = iwm_mvm_lmac_scan(sc);
6296 if (error != 0) {
6297 device_printf(sc->sc_dev, "could not initiate scan\n");
6298 IWM_UNLOCK(sc);
6299 ieee80211_cancel_scan(vap);
6300 } else {
6301 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6302 iwm_led_blink_start(sc);
6303 IWM_UNLOCK(sc);
6307 static void
6308 iwm_scan_end(struct ieee80211com *ic)
6310 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6311 struct iwm_softc *sc = ic->ic_softc;
6313 IWM_LOCK(sc);
6314 iwm_led_blink_stop(sc);
6315 if (vap->iv_state == IEEE80211_S_RUN)
6316 iwm_mvm_led_enable(sc);
6317 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6319 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6320 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6321 * taskqueue.
6323 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6324 iwm_mvm_scan_stop_wait(sc);
6326 IWM_UNLOCK(sc);
6329 * Make sure we don't race, if sc_es_task is still enqueued here.
6330 * This is to make sure that it won't call ieee80211_scan_done
6331 * when we have already started the next scan.
6333 taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6336 static void
6337 iwm_update_mcast(struct ieee80211com *ic)
6341 static void
6342 iwm_set_channel(struct ieee80211com *ic)
6346 static void
6347 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6351 static void
6352 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6354 return;
6357 void
6358 iwm_init_task(void *arg1)
6360 struct iwm_softc *sc = arg1;
6362 IWM_LOCK(sc);
6363 while (sc->sc_flags & IWM_FLAG_BUSY) {
6364 #if defined(__DragonFly__)
6365 lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6366 #else
6367 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6368 #endif
6370 sc->sc_flags |= IWM_FLAG_BUSY;
6371 iwm_stop(sc);
6372 if (sc->sc_ic.ic_nrunning > 0)
6373 iwm_init(sc);
6374 sc->sc_flags &= ~IWM_FLAG_BUSY;
6375 wakeup(&sc->sc_flags);
6376 IWM_UNLOCK(sc);
6379 static int
6380 iwm_resume(device_t dev)
6382 struct iwm_softc *sc = device_get_softc(dev);
6383 int do_reinit = 0;
6386 * We disable the RETRY_TIMEOUT register (0x41) to keep
6387 * PCI Tx retries from interfering with C3 CPU state.
6389 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6390 iwm_init_task(device_get_softc(dev));
6392 IWM_LOCK(sc);
6393 if (sc->sc_flags & IWM_FLAG_SCANNING) {
6394 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6395 do_reinit = 1;
6397 IWM_UNLOCK(sc);
6399 if (do_reinit)
6400 ieee80211_resume_all(&sc->sc_ic);
6402 return 0;
6405 static int
6406 iwm_suspend(device_t dev)
6408 int do_stop = 0;
6409 struct iwm_softc *sc = device_get_softc(dev);
6411 do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6413 ieee80211_suspend_all(&sc->sc_ic);
6415 if (do_stop) {
6416 IWM_LOCK(sc);
6417 iwm_stop(sc);
6418 sc->sc_flags |= IWM_FLAG_SCANNING;
6419 IWM_UNLOCK(sc);
6422 return (0);
6425 static int
6426 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6428 struct iwm_fw_info *fw = &sc->sc_fw;
6429 device_t dev = sc->sc_dev;
6430 int i;
6432 if (!sc->sc_attached)
6433 return 0;
6434 sc->sc_attached = 0;
6435 if (do_net80211) {
6436 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6438 callout_drain(&sc->sc_led_blink_to);
6439 callout_drain(&sc->sc_watchdog_to);
6440 iwm_stop_device(sc);
6441 if (do_net80211) {
6442 IWM_LOCK(sc);
6443 iwm_xmit_queue_drain(sc);
6444 IWM_UNLOCK(sc);
6445 ieee80211_ifdetach(&sc->sc_ic);
6448 iwm_phy_db_free(sc->sc_phy_db);
6449 sc->sc_phy_db = NULL;
6451 iwm_free_nvm_data(sc->nvm_data);
6453 /* Free descriptor rings */
6454 iwm_free_rx_ring(sc, &sc->rxq);
6455 for (i = 0; i < nitems(sc->txq); i++)
6456 iwm_free_tx_ring(sc, &sc->txq[i]);
6458 /* Free firmware */
6459 if (fw->fw_fp != NULL)
6460 iwm_fw_info_free(fw);
6462 /* Free scheduler */
6463 iwm_dma_contig_free(&sc->sched_dma);
6464 iwm_dma_contig_free(&sc->ict_dma);
6465 iwm_dma_contig_free(&sc->kw_dma);
6466 iwm_dma_contig_free(&sc->fw_dma);
6468 iwm_free_fw_paging(sc);
6470 /* Finished with the hardware - detach things */
6471 iwm_pci_detach(dev);
6473 if (sc->sc_notif_wait != NULL) {
6474 iwm_notification_wait_free(sc->sc_notif_wait);
6475 sc->sc_notif_wait = NULL;
6478 IWM_LOCK_DESTROY(sc);
6480 return (0);
6483 static int
6484 iwm_detach(device_t dev)
6486 struct iwm_softc *sc = device_get_softc(dev);
6488 return (iwm_detach_local(sc, 1));
6491 static device_method_t iwm_pci_methods[] = {
6492 /* Device interface */
6493 DEVMETHOD(device_probe, iwm_probe),
6494 DEVMETHOD(device_attach, iwm_attach),
6495 DEVMETHOD(device_detach, iwm_detach),
6496 DEVMETHOD(device_suspend, iwm_suspend),
6497 DEVMETHOD(device_resume, iwm_resume),
6499 DEVMETHOD_END
6502 static driver_t iwm_pci_driver = {
6503 "iwm",
6504 iwm_pci_methods,
6505 sizeof (struct iwm_softc)
6508 static devclass_t iwm_devclass;
6510 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6511 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6512 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6513 MODULE_DEPEND(iwm, wlan, 1, 1, 1);