if_iwm - Update firmware rs table, instead of indexing the table in tx cmds.
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
blob5632cf2c2181556798103866d868711fd295b4c8
1 /* $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $ */
3 /*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 /*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
27 ***********************************************************************
29 * This file is provided under a dual BSD/GPLv2 license. When using or
30 * redistributing this file, you may do so under either license.
32 * GPL LICENSE SUMMARY
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
53 * Contact Information:
54 * Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
58 * BSD LICENSE
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
67 * * Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * * Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in
71 * the documentation and/or other materials provided with the
72 * distribution.
73 * * Neither the name Intel Corporation nor the names of its
74 * contributors may be used to endorse or promote products derived
75 * from this software without specific prior written permission.
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
90 /*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
106 * DragonFly work
108 * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109 * changes to remove per-device network interface (DragonFly has not
110 * caught up to that yet on the WLAN side).
112 * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113 * malloc -> kmalloc (in particular, changing improper M_NOWAIT
114 * specifications to M_INTWAIT. We still don't
115 * understand why FreeBSD uses M_NOWAIT for
116 * critical must-not-fail kmalloc()s).
117 * free -> kfree
118 * printf -> kprintf
119 * (bug fix) memset in iwm_reset_rx_ring.
120 * (debug) added several kprintf()s on error
122 * header file paths (DFly allows localized path specifications).
123 * minor header file differences.
125 * Comprehensive list of adjustments for DragonFly #ifdef'd:
126 * (safety) added register read-back serialization in iwm_reset_rx_ring().
127 * packet counters
128 * msleep -> lksleep
129 * mtx -> lk (mtx functions -> lockmgr functions)
130 * callout differences
131 * taskqueue differences
132 * MSI differences
133 * bus_setup_intr() differences
134 * minor PCI config register naming differences
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/module.h>
147 #include <sys/rman.h>
148 #include <sys/sysctl.h>
149 #include <sys/linker.h>
151 #include <machine/endian.h>
153 #include <bus/pci/pcivar.h>
154 #include <bus/pci/pcireg.h>
156 #include <net/bpf.h>
158 #include <net/if.h>
159 #include <net/if_var.h>
160 #include <net/if_arp.h>
161 #include <net/if_dl.h>
162 #include <net/if_media.h>
163 #include <net/if_types.h>
165 #include <netinet/in.h>
166 #include <netinet/in_systm.h>
167 #include <netinet/if_ether.h>
168 #include <netinet/ip.h>
170 #include <netproto/802_11/ieee80211_var.h>
171 #include <netproto/802_11/ieee80211_regdomain.h>
172 #include <netproto/802_11/ieee80211_ratectl.h>
173 #include <netproto/802_11/ieee80211_radiotap.h>
175 #include "if_iwmreg.h"
176 #include "if_iwmvar.h"
177 #include "if_iwm_config.h"
178 #include "if_iwm_debug.h"
179 #include "if_iwm_notif_wait.h"
180 #include "if_iwm_util.h"
181 #include "if_iwm_binding.h"
182 #include "if_iwm_phy_db.h"
183 #include "if_iwm_mac_ctxt.h"
184 #include "if_iwm_phy_ctxt.h"
185 #include "if_iwm_time_event.h"
186 #include "if_iwm_power.h"
187 #include "if_iwm_scan.h"
188 #include "if_iwm_sf.h"
189 #include "if_iwm_sta.h"
190 #include "if_iwm_pcie_trans.h"
191 #include "if_iwm_led.h"
192 #include "if_iwm_fw.h"
194 const uint8_t iwm_nvm_channels[] = {
195 /* 2.4 GHz */
196 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
197 /* 5 GHz */
198 36, 40, 44, 48, 52, 56, 60, 64,
199 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
200 149, 153, 157, 161, 165
202 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
203 "IWM_NUM_CHANNELS is too small");
205 const uint8_t iwm_nvm_channels_8000[] = {
206 /* 2.4 GHz */
207 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
208 /* 5 GHz */
209 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
210 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
211 149, 153, 157, 161, 165, 169, 173, 177, 181
213 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
214 "IWM_NUM_CHANNELS_8000 is too small");
216 #define IWM_NUM_2GHZ_CHANNELS 14
217 #define IWM_N_HW_ADDR_MASK 0xF
220 * XXX For now, there's simply a fixed set of rate table entries
221 * that are populated.
223 const struct iwm_rate {
224 uint8_t rate;
225 uint8_t plcp;
226 } iwm_rates[] = {
227 { 2, IWM_RATE_1M_PLCP },
228 { 4, IWM_RATE_2M_PLCP },
229 { 11, IWM_RATE_5M_PLCP },
230 { 22, IWM_RATE_11M_PLCP },
231 { 12, IWM_RATE_6M_PLCP },
232 { 18, IWM_RATE_9M_PLCP },
233 { 24, IWM_RATE_12M_PLCP },
234 { 36, IWM_RATE_18M_PLCP },
235 { 48, IWM_RATE_24M_PLCP },
236 { 72, IWM_RATE_36M_PLCP },
237 { 96, IWM_RATE_48M_PLCP },
238 { 108, IWM_RATE_54M_PLCP },
240 #define IWM_RIDX_CCK 0
241 #define IWM_RIDX_OFDM 4
242 #define IWM_RIDX_MAX (nitems(iwm_rates)-1)
243 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
244 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
246 struct iwm_nvm_section {
247 uint16_t length;
248 uint8_t *data;
251 #define IWM_MVM_UCODE_ALIVE_TIMEOUT hz
252 #define IWM_MVM_UCODE_CALIB_TIMEOUT (2*hz)
254 struct iwm_mvm_alive_data {
255 int valid;
256 uint32_t scd_base_addr;
259 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
260 static int iwm_firmware_store_section(struct iwm_softc *,
261 enum iwm_ucode_type,
262 const uint8_t *, size_t);
263 static int iwm_set_default_calib(struct iwm_softc *, const void *);
264 static void iwm_fw_info_free(struct iwm_fw_info *);
265 static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
266 #if !defined(__DragonFly__)
267 static void iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
268 #endif
269 static int iwm_alloc_fwmem(struct iwm_softc *);
270 static int iwm_alloc_sched(struct iwm_softc *);
271 static int iwm_alloc_kw(struct iwm_softc *);
272 static int iwm_alloc_ict(struct iwm_softc *);
273 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
276 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
277 int);
278 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
279 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
280 static void iwm_enable_interrupts(struct iwm_softc *);
281 static void iwm_restore_interrupts(struct iwm_softc *);
282 static void iwm_disable_interrupts(struct iwm_softc *);
283 static void iwm_ict_reset(struct iwm_softc *);
284 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
285 static void iwm_stop_device(struct iwm_softc *);
286 static void iwm_mvm_nic_config(struct iwm_softc *);
287 static int iwm_nic_rx_init(struct iwm_softc *);
288 static int iwm_nic_tx_init(struct iwm_softc *);
289 static int iwm_nic_init(struct iwm_softc *);
290 static int iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
291 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
292 uint16_t, uint8_t *, uint16_t *);
293 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
294 uint16_t *, uint32_t);
295 static uint32_t iwm_eeprom_channel_flags(uint16_t);
296 static void iwm_add_channel_band(struct iwm_softc *,
297 struct ieee80211_channel[], int, int *, int, size_t,
298 const uint8_t[]);
299 static void iwm_init_channel_map(struct ieee80211com *, int, int *,
300 struct ieee80211_channel[]);
301 static struct iwm_nvm_data *
302 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
303 const uint16_t *, const uint16_t *,
304 const uint16_t *, const uint16_t *,
305 const uint16_t *);
306 static void iwm_free_nvm_data(struct iwm_nvm_data *);
307 static void iwm_set_hw_address_family_8000(struct iwm_softc *,
308 struct iwm_nvm_data *,
309 const uint16_t *,
310 const uint16_t *);
311 static int iwm_get_sku(const struct iwm_softc *, const uint16_t *,
312 const uint16_t *);
313 static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
314 static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
315 const uint16_t *);
316 static int iwm_get_n_hw_addrs(const struct iwm_softc *,
317 const uint16_t *);
318 static void iwm_set_radio_cfg(const struct iwm_softc *,
319 struct iwm_nvm_data *, uint32_t);
320 static struct iwm_nvm_data *
321 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
322 static int iwm_nvm_init(struct iwm_softc *);
323 static int iwm_pcie_load_section(struct iwm_softc *, uint8_t,
324 const struct iwm_fw_desc *);
325 static int iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
326 bus_addr_t, uint32_t);
327 static int iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
328 const struct iwm_fw_sects *,
329 int, int *);
330 static int iwm_pcie_load_cpu_sections(struct iwm_softc *,
331 const struct iwm_fw_sects *,
332 int, int *);
333 static int iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
334 const struct iwm_fw_sects *);
335 static int iwm_pcie_load_given_ucode(struct iwm_softc *,
336 const struct iwm_fw_sects *);
337 static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
338 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
339 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
340 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
341 enum iwm_ucode_type);
342 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
343 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
344 static int iwm_mvm_get_signal_strength(struct iwm_softc *,
345 struct iwm_rx_phy_info *);
346 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
347 struct iwm_rx_packet *);
348 static int iwm_get_noise(struct iwm_softc *,
349 const struct iwm_mvm_statistics_rx_non_phy *);
350 static void iwm_mvm_handle_rx_statistics(struct iwm_softc *,
351 struct iwm_rx_packet *);
352 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
353 uint32_t, boolean_t);
354 static int iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
355 struct iwm_rx_packet *,
356 struct iwm_node *);
357 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
358 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
359 #if 0
360 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
361 uint16_t);
362 #endif
363 static const struct iwm_rate *
364 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
365 struct ieee80211_frame *, struct iwm_tx_cmd *);
366 static int iwm_tx(struct iwm_softc *, struct mbuf *,
367 struct ieee80211_node *, int);
368 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
369 const struct ieee80211_bpf_params *);
370 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
371 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
372 static int iwm_release(struct iwm_softc *, struct iwm_node *);
373 static struct ieee80211_node *
374 iwm_node_alloc(struct ieee80211vap *,
375 const uint8_t[IEEE80211_ADDR_LEN]);
376 static uint8_t iwm_rate_from_ucode_rate(uint32_t);
377 static int iwm_rate2ridx(struct iwm_softc *, uint8_t);
378 static void iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
379 static int iwm_media_change(struct ifnet *);
380 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
381 static void iwm_endscan_cb(void *, int);
382 static int iwm_send_bt_init_conf(struct iwm_softc *);
383 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
384 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
385 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
386 static void iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
387 static int iwm_init_hw(struct iwm_softc *);
388 static void iwm_init(struct iwm_softc *);
389 static void iwm_start(struct iwm_softc *);
390 static void iwm_stop(struct iwm_softc *);
391 static void iwm_watchdog(void *);
392 static void iwm_parent(struct ieee80211com *);
393 #ifdef IWM_DEBUG
394 static const char *
395 iwm_desc_lookup(uint32_t);
396 static void iwm_nic_error(struct iwm_softc *);
397 static void iwm_nic_umac_error(struct iwm_softc *);
398 #endif
399 static void iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
400 static void iwm_notif_intr(struct iwm_softc *);
401 static void iwm_intr(void *);
402 static int iwm_attach(device_t);
403 static int iwm_is_valid_ether_addr(uint8_t *);
404 static void iwm_preinit(void *);
405 static int iwm_detach_local(struct iwm_softc *sc, int);
406 static void iwm_init_task(void *);
407 static void iwm_radiotap_attach(struct iwm_softc *);
408 static struct ieee80211vap *
409 iwm_vap_create(struct ieee80211com *,
410 const char [IFNAMSIZ], int,
411 enum ieee80211_opmode, int,
412 const uint8_t [IEEE80211_ADDR_LEN],
413 const uint8_t [IEEE80211_ADDR_LEN]);
414 static void iwm_vap_delete(struct ieee80211vap *);
415 static void iwm_xmit_queue_drain(struct iwm_softc *);
416 static void iwm_scan_start(struct ieee80211com *);
417 static void iwm_scan_end(struct ieee80211com *);
418 static void iwm_update_mcast(struct ieee80211com *);
419 static void iwm_set_channel(struct ieee80211com *);
420 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
421 static void iwm_scan_mindwell(struct ieee80211_scan_state *);
422 static int iwm_detach(device_t);
424 #if defined(__DragonFly__)
425 static int iwm_msi_enable = 1;
427 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
428 #endif
430 static int iwm_lar_disable = 0;
431 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
434 * Firmware parser.
437 static int
438 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
440 const struct iwm_fw_cscheme_list *l = (const void *)data;
442 if (dlen < sizeof(*l) ||
443 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
444 return EINVAL;
446 /* we don't actually store anything for now, always use s/w crypto */
448 return 0;
451 static int
452 iwm_firmware_store_section(struct iwm_softc *sc,
453 enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
455 struct iwm_fw_sects *fws;
456 struct iwm_fw_desc *fwone;
458 if (type >= IWM_UCODE_TYPE_MAX)
459 return EINVAL;
460 if (dlen < sizeof(uint32_t))
461 return EINVAL;
463 fws = &sc->sc_fw.fw_sects[type];
464 if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
465 return EINVAL;
467 fwone = &fws->fw_sect[fws->fw_count];
469 /* first 32bit are device load offset */
470 memcpy(&fwone->offset, data, sizeof(uint32_t));
472 /* rest is data */
473 fwone->data = data + sizeof(uint32_t);
474 fwone->len = dlen - sizeof(uint32_t);
476 fws->fw_count++;
478 return 0;
481 #define IWM_DEFAULT_SCAN_CHANNELS 40
483 struct iwm_tlv_calib_data {
484 uint32_t ucode_type;
485 struct iwm_tlv_calib_ctrl calib;
486 } __packed;
488 static int
489 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
491 const struct iwm_tlv_calib_data *def_calib = data;
492 uint32_t ucode_type = le32toh(def_calib->ucode_type);
494 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
495 device_printf(sc->sc_dev,
496 "Wrong ucode_type %u for default "
497 "calibration.\n", ucode_type);
498 return EINVAL;
501 sc->sc_default_calib[ucode_type].flow_trigger =
502 def_calib->calib.flow_trigger;
503 sc->sc_default_calib[ucode_type].event_trigger =
504 def_calib->calib.event_trigger;
506 return 0;
509 static int
510 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
511 struct iwm_ucode_capabilities *capa)
513 const struct iwm_ucode_api *ucode_api = (const void *)data;
514 uint32_t api_index = le32toh(ucode_api->api_index);
515 uint32_t api_flags = le32toh(ucode_api->api_flags);
516 int i;
518 if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
519 device_printf(sc->sc_dev,
520 "api flags index %d larger than supported by driver\n",
521 api_index);
522 /* don't return an error so we can load FW that has more bits */
523 return 0;
526 for (i = 0; i < 32; i++) {
527 if (api_flags & (1U << i))
528 setbit(capa->enabled_api, i + 32 * api_index);
531 return 0;
534 static int
535 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
536 struct iwm_ucode_capabilities *capa)
538 const struct iwm_ucode_capa *ucode_capa = (const void *)data;
539 uint32_t api_index = le32toh(ucode_capa->api_index);
540 uint32_t api_flags = le32toh(ucode_capa->api_capa);
541 int i;
543 if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
544 device_printf(sc->sc_dev,
545 "capa flags index %d larger than supported by driver\n",
546 api_index);
547 /* don't return an error so we can load FW that has more bits */
548 return 0;
551 for (i = 0; i < 32; i++) {
552 if (api_flags & (1U << i))
553 setbit(capa->enabled_capa, i + 32 * api_index);
556 return 0;
559 static void
560 iwm_fw_info_free(struct iwm_fw_info *fw)
562 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
563 fw->fw_fp = NULL;
564 /* don't touch fw->fw_status */
565 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
568 static int
569 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
571 struct iwm_fw_info *fw = &sc->sc_fw;
572 const struct iwm_tlv_ucode_header *uhdr;
573 const struct iwm_ucode_tlv *tlv;
574 struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
575 enum iwm_ucode_tlv_type tlv_type;
576 const struct firmware *fwp;
577 const uint8_t *data;
578 uint32_t tlv_len;
579 uint32_t usniffer_img;
580 const uint8_t *tlv_data;
581 uint32_t paging_mem_size;
582 int num_of_cpus;
583 int error = 0;
584 size_t len;
586 if (fw->fw_status == IWM_FW_STATUS_DONE &&
587 ucode_type != IWM_UCODE_INIT)
588 return 0;
590 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
591 #if defined(__DragonFly__)
592 lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
593 #else
594 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
595 #endif
597 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
599 if (fw->fw_fp != NULL)
600 iwm_fw_info_free(fw);
603 * Load firmware into driver memory.
604 * fw_fp will be set.
606 IWM_UNLOCK(sc);
607 fwp = firmware_get(sc->cfg->fw_name);
608 IWM_LOCK(sc);
609 if (fwp == NULL) {
610 device_printf(sc->sc_dev,
611 "could not read firmware %s (error %d)\n",
612 sc->cfg->fw_name, error);
613 goto out;
615 fw->fw_fp = fwp;
617 /* (Re-)Initialize default values. */
618 capa->flags = 0;
619 capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
620 capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
621 memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
622 memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
623 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
626 * Parse firmware contents
629 uhdr = (const void *)fw->fw_fp->data;
630 if (*(const uint32_t *)fw->fw_fp->data != 0
631 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
632 device_printf(sc->sc_dev, "invalid firmware %s\n",
633 sc->cfg->fw_name);
634 error = EINVAL;
635 goto out;
638 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
639 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
640 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
641 IWM_UCODE_API(le32toh(uhdr->ver)));
642 data = uhdr->data;
643 len = fw->fw_fp->datasize - sizeof(*uhdr);
645 while (len >= sizeof(*tlv)) {
646 len -= sizeof(*tlv);
647 tlv = (const void *)data;
649 tlv_len = le32toh(tlv->length);
650 tlv_type = le32toh(tlv->type);
651 tlv_data = tlv->data;
653 if (len < tlv_len) {
654 device_printf(sc->sc_dev,
655 "firmware too short: %zu bytes\n",
656 len);
657 error = EINVAL;
658 goto parse_out;
660 len -= roundup2(tlv_len, 4);
661 data += sizeof(tlv) + roundup2(tlv_len, 4);
663 switch ((int)tlv_type) {
664 case IWM_UCODE_TLV_PROBE_MAX_LEN:
665 if (tlv_len != sizeof(uint32_t)) {
666 device_printf(sc->sc_dev,
667 "%s: PROBE_MAX_LEN (%d) != sizeof(uint32_t)\n",
668 __func__,
669 (int) tlv_len);
670 error = EINVAL;
671 goto parse_out;
673 capa->max_probe_length =
674 le32_to_cpup((const uint32_t *)tlv_data);
675 /* limit it to something sensible */
676 if (capa->max_probe_length >
677 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
678 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
679 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
680 "ridiculous\n", __func__);
681 error = EINVAL;
682 goto parse_out;
684 break;
685 case IWM_UCODE_TLV_PAN:
686 if (tlv_len) {
687 device_printf(sc->sc_dev,
688 "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
689 __func__,
690 (int) tlv_len);
691 error = EINVAL;
692 goto parse_out;
694 capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
695 break;
696 case IWM_UCODE_TLV_FLAGS:
697 if (tlv_len < sizeof(uint32_t)) {
698 device_printf(sc->sc_dev,
699 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
700 __func__,
701 (int) tlv_len);
702 error = EINVAL;
703 goto parse_out;
705 if (tlv_len % sizeof(uint32_t)) {
706 device_printf(sc->sc_dev,
707 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) %% sizeof(uint32_t)\n",
708 __func__,
709 (int) tlv_len);
710 error = EINVAL;
711 goto parse_out;
714 * Apparently there can be many flags, but Linux driver
715 * parses only the first one, and so do we.
717 * XXX: why does this override IWM_UCODE_TLV_PAN?
718 * Intentional or a bug? Observations from
719 * current firmware file:
720 * 1) TLV_PAN is parsed first
721 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
722 * ==> this resets TLV_PAN to itself... hnnnk
724 capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
725 break;
726 case IWM_UCODE_TLV_CSCHEME:
727 if ((error = iwm_store_cscheme(sc,
728 tlv_data, tlv_len)) != 0) {
729 device_printf(sc->sc_dev,
730 "%s: iwm_store_cscheme(): returned %d\n",
731 __func__,
732 error);
733 goto parse_out;
735 break;
736 case IWM_UCODE_TLV_NUM_OF_CPU:
737 if (tlv_len != sizeof(uint32_t)) {
738 device_printf(sc->sc_dev,
739 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
740 __func__,
741 (int) tlv_len);
742 error = EINVAL;
743 goto parse_out;
745 num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
746 if (num_of_cpus == 2) {
747 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
748 TRUE;
749 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
750 TRUE;
751 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
752 TRUE;
753 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
754 device_printf(sc->sc_dev,
755 "%s: Driver supports only 1 or 2 CPUs\n",
756 __func__);
757 error = EINVAL;
758 goto parse_out;
760 break;
761 case IWM_UCODE_TLV_SEC_RT:
762 if ((error = iwm_firmware_store_section(sc,
763 IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
764 device_printf(sc->sc_dev,
765 "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
766 __func__,
767 error);
768 goto parse_out;
770 break;
771 case IWM_UCODE_TLV_SEC_INIT:
772 if ((error = iwm_firmware_store_section(sc,
773 IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
774 device_printf(sc->sc_dev,
775 "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
776 __func__,
777 error);
778 goto parse_out;
780 break;
781 case IWM_UCODE_TLV_SEC_WOWLAN:
782 if ((error = iwm_firmware_store_section(sc,
783 IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
784 device_printf(sc->sc_dev,
785 "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
786 __func__,
787 error);
788 goto parse_out;
790 break;
791 case IWM_UCODE_TLV_DEF_CALIB:
792 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
793 device_printf(sc->sc_dev,
794 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
795 __func__,
796 (int) tlv_len,
797 (int) sizeof(struct iwm_tlv_calib_data));
798 error = EINVAL;
799 goto parse_out;
801 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
802 device_printf(sc->sc_dev,
803 "%s: iwm_set_default_calib() failed: %d\n",
804 __func__,
805 error);
806 goto parse_out;
808 break;
809 case IWM_UCODE_TLV_PHY_SKU:
810 if (tlv_len != sizeof(uint32_t)) {
811 error = EINVAL;
812 device_printf(sc->sc_dev,
813 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
814 __func__,
815 (int) tlv_len);
816 goto parse_out;
818 sc->sc_fw.phy_config =
819 le32_to_cpup((const uint32_t *)tlv_data);
820 sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
821 IWM_FW_PHY_CFG_TX_CHAIN) >>
822 IWM_FW_PHY_CFG_TX_CHAIN_POS;
823 sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
824 IWM_FW_PHY_CFG_RX_CHAIN) >>
825 IWM_FW_PHY_CFG_RX_CHAIN_POS;
826 break;
828 case IWM_UCODE_TLV_API_CHANGES_SET: {
829 if (tlv_len != sizeof(struct iwm_ucode_api)) {
830 error = EINVAL;
831 goto parse_out;
833 if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
834 error = EINVAL;
835 goto parse_out;
837 break;
840 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
841 if (tlv_len != sizeof(struct iwm_ucode_capa)) {
842 error = EINVAL;
843 goto parse_out;
845 if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
846 error = EINVAL;
847 goto parse_out;
849 break;
852 case 48: /* undocumented TLV */
853 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
854 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
855 /* ignore, not used by current driver */
856 break;
858 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
859 if ((error = iwm_firmware_store_section(sc,
860 IWM_UCODE_REGULAR_USNIFFER, tlv_data,
861 tlv_len)) != 0)
862 goto parse_out;
863 break;
865 case IWM_UCODE_TLV_PAGING:
866 if (tlv_len != sizeof(uint32_t)) {
867 error = EINVAL;
868 goto parse_out;
870 paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
872 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
873 "%s: Paging: paging enabled (size = %u bytes)\n",
874 __func__, paging_mem_size);
875 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
876 device_printf(sc->sc_dev,
877 "%s: Paging: driver supports up to %u bytes for paging image\n",
878 __func__, IWM_MAX_PAGING_IMAGE_SIZE);
879 error = EINVAL;
880 goto out;
882 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
883 device_printf(sc->sc_dev,
884 "%s: Paging: image isn't multiple %u\n",
885 __func__, IWM_FW_PAGING_SIZE);
886 error = EINVAL;
887 goto out;
890 sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
891 paging_mem_size;
892 usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
893 sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
894 paging_mem_size;
895 break;
897 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
898 if (tlv_len != sizeof(uint32_t)) {
899 error = EINVAL;
900 goto parse_out;
902 capa->n_scan_channels =
903 le32_to_cpup((const uint32_t *)tlv_data);
904 break;
906 case IWM_UCODE_TLV_FW_VERSION:
907 if (tlv_len != sizeof(uint32_t) * 3) {
908 error = EINVAL;
909 goto parse_out;
911 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
912 "%d.%d.%d",
913 le32toh(((const uint32_t *)tlv_data)[0]),
914 le32toh(((const uint32_t *)tlv_data)[1]),
915 le32toh(((const uint32_t *)tlv_data)[2]));
916 break;
918 case IWM_UCODE_TLV_FW_MEM_SEG:
919 break;
921 default:
922 device_printf(sc->sc_dev,
923 "%s: unknown firmware section %d, abort\n",
924 __func__, tlv_type);
925 error = EINVAL;
926 goto parse_out;
930 KASSERT(error == 0, ("unhandled error"));
932 parse_out:
933 if (error) {
934 device_printf(sc->sc_dev, "firmware parse error %d, "
935 "section type %d\n", error, tlv_type);
938 out:
939 if (error) {
940 fw->fw_status = IWM_FW_STATUS_NONE;
941 if (fw->fw_fp != NULL)
942 iwm_fw_info_free(fw);
943 } else
944 fw->fw_status = IWM_FW_STATUS_DONE;
945 wakeup(&sc->sc_fw);
947 return error;
951 * DMA resource routines
954 /* fwmem is used to load firmware onto the card */
955 static int
956 iwm_alloc_fwmem(struct iwm_softc *sc)
958 /* Must be aligned on a 16-byte boundary. */
959 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
960 IWM_FH_MEM_TB_MAX_LENGTH, 16);
963 /* tx scheduler rings. not used? */
964 static int
965 iwm_alloc_sched(struct iwm_softc *sc)
967 /* TX scheduler rings must be aligned on a 1KB boundary. */
968 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
969 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
972 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
973 static int
974 iwm_alloc_kw(struct iwm_softc *sc)
976 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
979 /* interrupt cause table */
980 static int
981 iwm_alloc_ict(struct iwm_softc *sc)
983 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
984 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
987 static int
988 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
990 bus_size_t size;
991 int i, error;
993 ring->cur = 0;
995 /* Allocate RX descriptors (256-byte aligned). */
996 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
997 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
998 if (error != 0) {
999 device_printf(sc->sc_dev,
1000 "could not allocate RX ring DMA memory\n");
1001 goto fail;
1003 ring->desc = ring->desc_dma.vaddr;
1005 /* Allocate RX status area (16-byte aligned). */
1006 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1007 sizeof(*ring->stat), 16);
1008 if (error != 0) {
1009 device_printf(sc->sc_dev,
1010 "could not allocate RX status DMA memory\n");
1011 goto fail;
1013 ring->stat = ring->stat_dma.vaddr;
1015 /* Create RX buffer DMA tag. */
1016 #if defined(__DragonFly__)
1017 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1019 BUS_SPACE_MAXADDR_32BIT,
1020 BUS_SPACE_MAXADDR,
1021 NULL, NULL,
1022 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
1023 BUS_DMA_NOWAIT, &ring->data_dmat);
1024 #else
1025 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1026 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1027 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1028 #endif
1029 if (error != 0) {
1030 device_printf(sc->sc_dev,
1031 "%s: could not create RX buf DMA tag, error %d\n",
1032 __func__, error);
1033 goto fail;
1036 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1037 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1038 if (error != 0) {
1039 device_printf(sc->sc_dev,
1040 "%s: could not create RX buf DMA map, error %d\n",
1041 __func__, error);
1042 goto fail;
1045 * Allocate and map RX buffers.
1047 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1048 struct iwm_rx_data *data = &ring->data[i];
1049 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1050 if (error != 0) {
1051 device_printf(sc->sc_dev,
1052 "%s: could not create RX buf DMA map, error %d\n",
1053 __func__, error);
1054 goto fail;
1056 data->m = NULL;
1058 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1059 goto fail;
1062 return 0;
1064 fail: iwm_free_rx_ring(sc, ring);
1065 return error;
1068 static void
1069 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1071 /* Reset the ring state */
1072 ring->cur = 0;
1075 * The hw rx ring index in shared memory must also be cleared,
1076 * otherwise the discrepancy can cause reprocessing chaos.
1078 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1081 static void
1082 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1084 int i;
1086 iwm_dma_contig_free(&ring->desc_dma);
1087 iwm_dma_contig_free(&ring->stat_dma);
1089 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1090 struct iwm_rx_data *data = &ring->data[i];
1092 if (data->m != NULL) {
1093 bus_dmamap_sync(ring->data_dmat, data->map,
1094 BUS_DMASYNC_POSTREAD);
1095 bus_dmamap_unload(ring->data_dmat, data->map);
1096 m_freem(data->m);
1097 data->m = NULL;
1099 if (data->map != NULL) {
1100 bus_dmamap_destroy(ring->data_dmat, data->map);
1101 data->map = NULL;
1104 if (ring->spare_map != NULL) {
1105 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1106 ring->spare_map = NULL;
1108 if (ring->data_dmat != NULL) {
1109 bus_dma_tag_destroy(ring->data_dmat);
1110 ring->data_dmat = NULL;
1114 static int
1115 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1117 bus_addr_t paddr;
1118 bus_size_t size;
1119 size_t maxsize;
1120 int nsegments;
1121 int i, error;
1123 ring->qid = qid;
1124 ring->queued = 0;
1125 ring->cur = 0;
1127 /* Allocate TX descriptors (256-byte aligned). */
1128 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1129 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1130 if (error != 0) {
1131 device_printf(sc->sc_dev,
1132 "could not allocate TX ring DMA memory\n");
1133 goto fail;
1135 ring->desc = ring->desc_dma.vaddr;
1138 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1139 * to allocate commands space for other rings.
1141 if (qid > IWM_MVM_CMD_QUEUE)
1142 return 0;
1144 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1145 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1146 if (error != 0) {
1147 device_printf(sc->sc_dev,
1148 "could not allocate TX cmd DMA memory\n");
1149 goto fail;
1151 ring->cmd = ring->cmd_dma.vaddr;
1153 /* FW commands may require more mapped space than packets. */
1154 if (qid == IWM_MVM_CMD_QUEUE) {
1155 maxsize = IWM_RBUF_SIZE;
1156 nsegments = 1;
1157 } else {
1158 maxsize = MCLBYTES;
1159 nsegments = IWM_MAX_SCATTER - 2;
1162 #if defined(__DragonFly__)
1163 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1165 BUS_SPACE_MAXADDR_32BIT,
1166 BUS_SPACE_MAXADDR,
1167 NULL, NULL,
1168 maxsize, nsegments, maxsize,
1169 BUS_DMA_NOWAIT, &ring->data_dmat);
1170 #else
1171 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1172 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1173 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1174 #endif
1175 if (error != 0) {
1176 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1177 goto fail;
1180 paddr = ring->cmd_dma.paddr;
1181 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1182 struct iwm_tx_data *data = &ring->data[i];
1184 data->cmd_paddr = paddr;
1185 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1186 + offsetof(struct iwm_tx_cmd, scratch);
1187 paddr += sizeof(struct iwm_device_cmd);
1189 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1190 if (error != 0) {
1191 device_printf(sc->sc_dev,
1192 "could not create TX buf DMA map\n");
1193 goto fail;
1196 KASSERT(paddr == ring->cmd_dma.paddr + size,
1197 ("invalid physical address"));
1198 return 0;
1200 fail: iwm_free_tx_ring(sc, ring);
1201 return error;
1204 static void
1205 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1207 int i;
1209 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1210 struct iwm_tx_data *data = &ring->data[i];
1212 if (data->m != NULL) {
1213 bus_dmamap_sync(ring->data_dmat, data->map,
1214 BUS_DMASYNC_POSTWRITE);
1215 bus_dmamap_unload(ring->data_dmat, data->map);
1216 m_freem(data->m);
1217 data->m = NULL;
1220 /* Clear TX descriptors. */
1221 memset(ring->desc, 0, ring->desc_dma.size);
1222 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1223 BUS_DMASYNC_PREWRITE);
1224 sc->qfullmsk &= ~(1 << ring->qid);
1225 ring->queued = 0;
1226 ring->cur = 0;
1228 if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1229 iwm_pcie_clear_cmd_in_flight(sc);
1232 static void
1233 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1235 int i;
1237 iwm_dma_contig_free(&ring->desc_dma);
1238 iwm_dma_contig_free(&ring->cmd_dma);
1240 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1241 struct iwm_tx_data *data = &ring->data[i];
1243 if (data->m != NULL) {
1244 bus_dmamap_sync(ring->data_dmat, data->map,
1245 BUS_DMASYNC_POSTWRITE);
1246 bus_dmamap_unload(ring->data_dmat, data->map);
1247 m_freem(data->m);
1248 data->m = NULL;
1250 if (data->map != NULL) {
1251 bus_dmamap_destroy(ring->data_dmat, data->map);
1252 data->map = NULL;
1255 if (ring->data_dmat != NULL) {
1256 bus_dma_tag_destroy(ring->data_dmat);
1257 ring->data_dmat = NULL;
1262 * High-level hardware frobbing routines
1265 static void
1266 iwm_enable_interrupts(struct iwm_softc *sc)
1268 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1269 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1272 static void
1273 iwm_restore_interrupts(struct iwm_softc *sc)
1275 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1278 static void
1279 iwm_disable_interrupts(struct iwm_softc *sc)
1281 /* disable interrupts */
1282 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1284 /* acknowledge all interrupts */
1285 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1286 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1289 static void
1290 iwm_ict_reset(struct iwm_softc *sc)
1292 iwm_disable_interrupts(sc);
1294 /* Reset ICT table. */
1295 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1296 sc->ict_cur = 0;
1298 /* Set physical address of ICT table (4KB aligned). */
1299 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1300 IWM_CSR_DRAM_INT_TBL_ENABLE
1301 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1302 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1303 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1305 /* Switch to ICT interrupt mode in driver. */
1306 sc->sc_flags |= IWM_FLAG_USE_ICT;
1308 /* Re-enable interrupts. */
1309 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1310 iwm_enable_interrupts(sc);
1314 * Since this .. hard-resets things, it's time to actually
1315 * mark the first vap (if any) as having no mac context.
1316 * It's annoying, but since the driver is potentially being
1317 * stop/start'ed whilst active (thanks openbsd port!) we
1318 * have to correctly track this.
1320 static void
1321 iwm_stop_device(struct iwm_softc *sc)
1323 struct ieee80211com *ic = &sc->sc_ic;
1324 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1325 int chnl, qid;
1326 uint32_t mask = 0;
1328 /* tell the device to stop sending interrupts */
1329 iwm_disable_interrupts(sc);
1332 * FreeBSD-local: mark the first vap as not-uploaded,
1333 * so the next transition through auth/assoc
1334 * will correctly populate the MAC context.
1336 if (vap) {
1337 struct iwm_vap *iv = IWM_VAP(vap);
1338 iv->phy_ctxt = NULL;
1339 iv->is_uploaded = 0;
1342 /* device going down, Stop using ICT table */
1343 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1345 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1347 if (iwm_nic_lock(sc)) {
1348 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1350 /* Stop each Tx DMA channel */
1351 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1352 IWM_WRITE(sc,
1353 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1354 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1357 /* Wait for DMA channels to be idle */
1358 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1359 5000)) {
1360 device_printf(sc->sc_dev,
1361 "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1362 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1364 iwm_nic_unlock(sc);
1366 iwm_pcie_rx_stop(sc);
1368 /* Stop RX ring. */
1369 iwm_reset_rx_ring(sc, &sc->rxq);
1371 /* Reset all TX rings. */
1372 for (qid = 0; qid < nitems(sc->txq); qid++)
1373 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1375 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1376 /* Power-down device's busmaster DMA clocks */
1377 if (iwm_nic_lock(sc)) {
1378 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1379 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1380 iwm_nic_unlock(sc);
1382 DELAY(5);
1385 /* Make sure (redundant) we've released our request to stay awake */
1386 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1387 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1389 /* Stop the device, and put it in low power state */
1390 iwm_apm_stop(sc);
1392 /* stop and reset the on-board processor */
1393 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1394 DELAY(1000);
1397 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1398 * This is a bug in certain verions of the hardware.
1399 * Certain devices also keep sending HW RF kill interrupt all
1400 * the time, unless the interrupt is ACKed even if the interrupt
1401 * should be masked. Re-ACK all the interrupts here.
1403 iwm_disable_interrupts(sc);
1406 * Even if we stop the HW, we still want the RF kill
1407 * interrupt
1409 iwm_enable_rfkill_int(sc);
1410 iwm_check_rfkill(sc);
1413 static void
1414 iwm_mvm_nic_config(struct iwm_softc *sc)
1416 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1417 uint32_t reg_val = 0;
1418 uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1420 radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1421 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1422 radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1423 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1424 radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1425 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1427 /* SKU control */
1428 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1429 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1430 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1431 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1433 /* radio configuration */
1434 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1435 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1436 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1438 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1440 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1441 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1442 radio_cfg_step, radio_cfg_dash);
1445 * W/A : NIC is stuck in a reset state after Early PCIe power off
1446 * (PCIe power is lost before PERST# is asserted), causing ME FW
1447 * to lose ownership and not being able to obtain it back.
1449 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1450 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1451 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1452 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1456 static int
1457 iwm_nic_rx_init(struct iwm_softc *sc)
1460 * Initialize RX ring. This is from the iwn driver.
1462 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1464 /* Stop Rx DMA */
1465 iwm_pcie_rx_stop(sc);
1467 if (!iwm_nic_lock(sc))
1468 return EBUSY;
1470 /* reset and flush pointers */
1471 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1472 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1473 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1474 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1476 /* Set physical address of RX ring (256-byte aligned). */
1477 IWM_WRITE(sc,
1478 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1480 /* Set physical address of RX status (16-byte aligned). */
1481 IWM_WRITE(sc,
1482 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1484 #if defined(__DragonFly__)
1485 /* Force serialization (probably not needed but don't trust the HW) */
1486 IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1487 #endif
1489 /* Enable Rx DMA
1490 * XXX 5000 HW isn't supported by the iwm(4) driver.
1491 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1492 * the credit mechanism in 5000 HW RX FIFO
1493 * Direct rx interrupts to hosts
1494 * Rx buffer size 4 or 8k or 12k
1495 * RB timeout 0x10
1496 * 256 RBDs
1498 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1499 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1500 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1501 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1502 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1503 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1504 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1506 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1508 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1509 if (sc->cfg->host_interrupt_operation_mode)
1510 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1513 * Thus sayeth el jefe (iwlwifi) via a comment:
1515 * This value should initially be 0 (before preparing any
1516 * RBs), should be 8 after preparing the first 8 RBs (for example)
1518 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1520 iwm_nic_unlock(sc);
1522 return 0;
1525 static int
1526 iwm_nic_tx_init(struct iwm_softc *sc)
1528 int qid;
1530 if (!iwm_nic_lock(sc))
1531 return EBUSY;
1533 /* Deactivate TX scheduler. */
1534 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1536 /* Set physical address of "keep warm" page (16-byte aligned). */
1537 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1539 /* Initialize TX rings. */
1540 for (qid = 0; qid < nitems(sc->txq); qid++) {
1541 struct iwm_tx_ring *txq = &sc->txq[qid];
1543 /* Set physical address of TX ring (256-byte aligned). */
1544 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1545 txq->desc_dma.paddr >> 8);
1546 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1547 "%s: loading ring %d descriptors (%p) at %lx\n",
1548 __func__,
1549 qid, txq->desc,
1550 (unsigned long) (txq->desc_dma.paddr >> 8));
1553 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1555 iwm_nic_unlock(sc);
1557 return 0;
1560 static int
1561 iwm_nic_init(struct iwm_softc *sc)
1563 int error;
1565 iwm_apm_init(sc);
1566 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1567 iwm_set_pwr(sc);
1569 iwm_mvm_nic_config(sc);
1571 if ((error = iwm_nic_rx_init(sc)) != 0)
1572 return error;
1575 * Ditto for TX, from iwn
1577 if ((error = iwm_nic_tx_init(sc)) != 0)
1578 return error;
1580 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1581 "%s: shadow registers enabled\n", __func__);
1582 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1584 return 0;
1588 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1590 if (!iwm_nic_lock(sc)) {
1591 device_printf(sc->sc_dev,
1592 "%s: cannot enable txq %d\n",
1593 __func__,
1594 qid);
1595 return EBUSY;
1598 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1600 if (qid == IWM_MVM_CMD_QUEUE) {
1601 /* unactivate before configuration */
1602 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1603 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1604 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1606 iwm_nic_unlock(sc);
1608 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1610 if (!iwm_nic_lock(sc)) {
1611 device_printf(sc->sc_dev,
1612 "%s: cannot enable txq %d\n", __func__, qid);
1613 return EBUSY;
1615 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1616 iwm_nic_unlock(sc);
1618 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1619 /* Set scheduler window size and frame limit. */
1620 iwm_write_mem32(sc,
1621 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1622 sizeof(uint32_t),
1623 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1624 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1625 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1626 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1628 if (!iwm_nic_lock(sc)) {
1629 device_printf(sc->sc_dev,
1630 "%s: cannot enable txq %d\n", __func__, qid);
1631 return EBUSY;
1633 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1634 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1635 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1636 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1637 IWM_SCD_QUEUE_STTS_REG_MSK);
1638 } else {
1639 struct iwm_scd_txq_cfg_cmd cmd;
1640 int error;
1642 iwm_nic_unlock(sc);
1644 memset(&cmd, 0, sizeof(cmd));
1645 cmd.scd_queue = qid;
1646 cmd.enable = 1;
1647 cmd.sta_id = sta_id;
1648 cmd.tx_fifo = fifo;
1649 cmd.aggregate = 0;
1650 cmd.window = IWM_FRAME_LIMIT;
1652 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1653 sizeof(cmd), &cmd);
1654 if (error) {
1655 device_printf(sc->sc_dev,
1656 "cannot enable txq %d\n", qid);
1657 return error;
1660 if (!iwm_nic_lock(sc))
1661 return EBUSY;
1664 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1665 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1667 iwm_nic_unlock(sc);
1669 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1670 __func__, qid, fifo);
1672 return 0;
1675 static int
1676 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1678 int error, chnl;
1680 int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1681 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1683 if (!iwm_nic_lock(sc))
1684 return EBUSY;
1686 iwm_ict_reset(sc);
1688 sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1689 if (scd_base_addr != 0 &&
1690 scd_base_addr != sc->scd_base_addr) {
1691 device_printf(sc->sc_dev,
1692 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1693 __func__, sc->scd_base_addr, scd_base_addr);
1696 iwm_nic_unlock(sc);
1698 /* reset context data, TX status and translation data */
1699 error = iwm_write_mem(sc,
1700 sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1701 NULL, clear_dwords);
1702 if (error)
1703 return EBUSY;
1705 if (!iwm_nic_lock(sc))
1706 return EBUSY;
1708 /* Set physical address of TX scheduler rings (1KB aligned). */
1709 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1711 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1713 iwm_nic_unlock(sc);
1715 /* enable command channel */
1716 error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1717 if (error)
1718 return error;
1720 if (!iwm_nic_lock(sc))
1721 return EBUSY;
1723 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1725 /* Enable DMA channels. */
1726 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1727 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1728 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1729 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1732 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1733 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1735 iwm_nic_unlock(sc);
1737 /* Enable L1-Active */
1738 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1739 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1740 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1743 return error;
1747 * NVM read access and content parsing. We do not support
1748 * external NVM or writing NVM.
1749 * iwlwifi/mvm/nvm.c
1752 /* Default NVM size to read */
1753 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1755 #define IWM_NVM_WRITE_OPCODE 1
1756 #define IWM_NVM_READ_OPCODE 0
1758 /* load nvm chunk response */
1759 enum {
1760 IWM_READ_NVM_CHUNK_SUCCEED = 0,
1761 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1764 static int
1765 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1766 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1768 struct iwm_nvm_access_cmd nvm_access_cmd = {
1769 .offset = htole16(offset),
1770 .length = htole16(length),
1771 .type = htole16(section),
1772 .op_code = IWM_NVM_READ_OPCODE,
1774 struct iwm_nvm_access_resp *nvm_resp;
1775 struct iwm_rx_packet *pkt;
1776 struct iwm_host_cmd cmd = {
1777 .id = IWM_NVM_ACCESS_CMD,
1778 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1779 .data = { &nvm_access_cmd, },
1781 int ret, bytes_read, offset_read;
1782 uint8_t *resp_data;
1784 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1786 ret = iwm_send_cmd(sc, &cmd);
1787 if (ret) {
1788 device_printf(sc->sc_dev,
1789 "Could not send NVM_ACCESS command (error=%d)\n", ret);
1790 return ret;
1793 pkt = cmd.resp_pkt;
1795 /* Extract NVM response */
1796 nvm_resp = (void *)pkt->data;
1797 ret = le16toh(nvm_resp->status);
1798 bytes_read = le16toh(nvm_resp->length);
1799 offset_read = le16toh(nvm_resp->offset);
1800 resp_data = nvm_resp->data;
1801 if (ret) {
1802 if ((offset != 0) &&
1803 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1805 * meaning of NOT_VALID_ADDRESS:
1806 * driver try to read chunk from address that is
1807 * multiple of 2K and got an error since addr is empty.
1808 * meaning of (offset != 0): driver already
1809 * read valid data from another chunk so this case
1810 * is not an error.
1812 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1813 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1814 offset);
1815 *len = 0;
1816 ret = 0;
1817 } else {
1818 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1819 "NVM access command failed with status %d\n", ret);
1820 ret = EIO;
1822 goto exit;
1825 if (offset_read != offset) {
1826 device_printf(sc->sc_dev,
1827 "NVM ACCESS response with invalid offset %d\n",
1828 offset_read);
1829 ret = EINVAL;
1830 goto exit;
1833 if (bytes_read > length) {
1834 device_printf(sc->sc_dev,
1835 "NVM ACCESS response with too much data "
1836 "(%d bytes requested, %d bytes received)\n",
1837 length, bytes_read);
1838 ret = EINVAL;
1839 goto exit;
1842 /* Write data to NVM */
1843 memcpy(data + offset, resp_data, bytes_read);
1844 *len = bytes_read;
1846 exit:
1847 iwm_free_resp(sc, &cmd);
1848 return ret;
1852 * Reads an NVM section completely.
1853 * NICs prior to 7000 family don't have a real NVM, but just read
1854 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1855 * by uCode, we need to manually check in this case that we don't
1856 * overflow and try to read more than the EEPROM size.
1857 * For 7000 family NICs, we supply the maximal size we can read, and
1858 * the uCode fills the response with as much data as we can,
1859 * without overflowing, so no check is needed.
1861 static int
1862 iwm_nvm_read_section(struct iwm_softc *sc,
1863 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1865 uint16_t seglen, length, offset = 0;
1866 int ret;
1868 /* Set nvm section read length */
1869 length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1871 seglen = length;
1873 /* Read the NVM until exhausted (reading less than requested) */
1874 while (seglen == length) {
1875 /* Check no memory assumptions fail and cause an overflow */
1876 if ((size_read + offset + length) >
1877 sc->cfg->eeprom_size) {
1878 device_printf(sc->sc_dev,
1879 "EEPROM size is too small for NVM\n");
1880 return ENOBUFS;
1883 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1884 if (ret) {
1885 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1886 "Cannot read NVM from section %d offset %d, length %d\n",
1887 section, offset, length);
1888 return ret;
1890 offset += seglen;
1893 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1894 "NVM section %d read completed\n", section);
1895 *len = offset;
1896 return 0;
1899 /* NVM offsets (in words) definitions */
1900 enum iwm_nvm_offsets {
1901 /* NVM HW-Section offset (in words) definitions */
1902 IWM_HW_ADDR = 0x15,
1904 /* NVM SW-Section offset (in words) definitions */
1905 IWM_NVM_SW_SECTION = 0x1C0,
1906 IWM_NVM_VERSION = 0,
1907 IWM_RADIO_CFG = 1,
1908 IWM_SKU = 2,
1909 IWM_N_HW_ADDRS = 3,
1910 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1912 /* NVM calibration section offset (in words) definitions */
1913 IWM_NVM_CALIB_SECTION = 0x2B8,
1914 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1917 enum iwm_8000_nvm_offsets {
1918 /* NVM HW-Section offset (in words) definitions */
1919 IWM_HW_ADDR0_WFPM_8000 = 0x12,
1920 IWM_HW_ADDR1_WFPM_8000 = 0x16,
1921 IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1922 IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1923 IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1925 /* NVM SW-Section offset (in words) definitions */
1926 IWM_NVM_SW_SECTION_8000 = 0x1C0,
1927 IWM_NVM_VERSION_8000 = 0,
1928 IWM_RADIO_CFG_8000 = 0,
1929 IWM_SKU_8000 = 2,
1930 IWM_N_HW_ADDRS_8000 = 3,
1932 /* NVM REGULATORY -Section offset (in words) definitions */
1933 IWM_NVM_CHANNELS_8000 = 0,
1934 IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1935 IWM_NVM_LAR_OFFSET_8000 = 0x507,
1936 IWM_NVM_LAR_ENABLED_8000 = 0x7,
1938 /* NVM calibration section offset (in words) definitions */
1939 IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1940 IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1943 /* SKU Capabilities (actual values from NVM definition) */
1944 enum nvm_sku_bits {
1945 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
1946 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
1947 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
1948 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
1951 /* radio config bits (actual values from NVM definition) */
1952 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
1953 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
1954 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
1955 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
1956 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
1957 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1959 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x) (x & 0xF)
1960 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x) ((x >> 4) & 0xF)
1961 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x) ((x >> 8) & 0xF)
1962 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x) ((x >> 12) & 0xFFF)
1963 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x) ((x >> 24) & 0xF)
1964 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x) ((x >> 28) & 0xF)
1966 #define DEFAULT_MAX_TX_POWER 16
1969 * enum iwm_nvm_channel_flags - channel flags in NVM
1970 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1971 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1972 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1973 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1974 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1975 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1976 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1977 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1978 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1979 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1981 enum iwm_nvm_channel_flags {
1982 IWM_NVM_CHANNEL_VALID = (1 << 0),
1983 IWM_NVM_CHANNEL_IBSS = (1 << 1),
1984 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1985 IWM_NVM_CHANNEL_RADAR = (1 << 4),
1986 IWM_NVM_CHANNEL_DFS = (1 << 7),
1987 IWM_NVM_CHANNEL_WIDE = (1 << 8),
1988 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1989 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1990 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1994 * Translate EEPROM flags to net80211.
1996 static uint32_t
1997 iwm_eeprom_channel_flags(uint16_t ch_flags)
1999 uint32_t nflags;
2001 nflags = 0;
2002 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2003 nflags |= IEEE80211_CHAN_PASSIVE;
2004 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2005 nflags |= IEEE80211_CHAN_NOADHOC;
2006 if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2007 nflags |= IEEE80211_CHAN_DFS;
2008 /* Just in case. */
2009 nflags |= IEEE80211_CHAN_NOADHOC;
2012 return (nflags);
2015 static void
2016 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2017 int maxchans, int *nchans, int ch_idx, size_t ch_num,
2018 const uint8_t bands[])
2020 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2021 uint32_t nflags;
2022 uint16_t ch_flags;
2023 uint8_t ieee;
2024 int error;
2026 for (; ch_idx < ch_num; ch_idx++) {
2027 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2028 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2029 ieee = iwm_nvm_channels[ch_idx];
2030 else
2031 ieee = iwm_nvm_channels_8000[ch_idx];
2033 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2034 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2035 "Ch. %d Flags %x [%sGHz] - No traffic\n",
2036 ieee, ch_flags,
2037 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2038 "5.2" : "2.4");
2039 continue;
2042 nflags = iwm_eeprom_channel_flags(ch_flags);
2043 error = ieee80211_add_channel(chans, maxchans, nchans,
2044 ieee, 0, 0, nflags, bands);
2045 if (error != 0)
2046 break;
2048 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2049 "Ch. %d Flags %x [%sGHz] - Added\n",
2050 ieee, ch_flags,
2051 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2052 "5.2" : "2.4");
2056 static void
2057 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2058 struct ieee80211_channel chans[])
2060 struct iwm_softc *sc = ic->ic_softc;
2061 struct iwm_nvm_data *data = sc->nvm_data;
2062 uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2063 size_t ch_num;
2065 memset(bands, 0, sizeof(bands));
2066 /* 1-13: 11b/g channels. */
2067 setbit(bands, IEEE80211_MODE_11B);
2068 setbit(bands, IEEE80211_MODE_11G);
2069 iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2070 IWM_NUM_2GHZ_CHANNELS - 1, bands);
2072 /* 14: 11b channel only. */
2073 clrbit(bands, IEEE80211_MODE_11G);
2074 iwm_add_channel_band(sc, chans, maxchans, nchans,
2075 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2077 if (data->sku_cap_band_52GHz_enable) {
2078 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2079 ch_num = nitems(iwm_nvm_channels);
2080 else
2081 ch_num = nitems(iwm_nvm_channels_8000);
2082 memset(bands, 0, sizeof(bands));
2083 setbit(bands, IEEE80211_MODE_11A);
2084 iwm_add_channel_band(sc, chans, maxchans, nchans,
2085 IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2089 static void
2090 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2091 const uint16_t *mac_override, const uint16_t *nvm_hw)
2093 const uint8_t *hw_addr;
2095 if (mac_override) {
2096 static const uint8_t reserved_mac[] = {
2097 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2100 hw_addr = (const uint8_t *)(mac_override +
2101 IWM_MAC_ADDRESS_OVERRIDE_8000);
2104 * Store the MAC address from MAO section.
2105 * No byte swapping is required in MAO section
2107 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2110 * Force the use of the OTP MAC address in case of reserved MAC
2111 * address in the NVM, or if address is given but invalid.
2113 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2114 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2115 iwm_is_valid_ether_addr(data->hw_addr) &&
2116 !IEEE80211_IS_MULTICAST(data->hw_addr))
2117 return;
2119 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2120 "%s: mac address from nvm override section invalid\n",
2121 __func__);
2124 if (nvm_hw) {
2125 /* read the mac address from WFMP registers */
2126 uint32_t mac_addr0 =
2127 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2128 uint32_t mac_addr1 =
2129 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2131 hw_addr = (const uint8_t *)&mac_addr0;
2132 data->hw_addr[0] = hw_addr[3];
2133 data->hw_addr[1] = hw_addr[2];
2134 data->hw_addr[2] = hw_addr[1];
2135 data->hw_addr[3] = hw_addr[0];
2137 hw_addr = (const uint8_t *)&mac_addr1;
2138 data->hw_addr[4] = hw_addr[1];
2139 data->hw_addr[5] = hw_addr[0];
2141 return;
2144 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2145 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2148 static int
2149 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2150 const uint16_t *phy_sku)
2152 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2153 return le16_to_cpup(nvm_sw + IWM_SKU);
2155 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2158 static int
2159 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2161 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2162 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2163 else
2164 return le32_to_cpup((const uint32_t *)(nvm_sw +
2165 IWM_NVM_VERSION_8000));
2168 static int
2169 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2170 const uint16_t *phy_sku)
2172 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2173 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2175 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2178 static int
2179 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2181 int n_hw_addr;
2183 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2184 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2186 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2188 return n_hw_addr & IWM_N_HW_ADDR_MASK;
2191 static void
2192 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2193 uint32_t radio_cfg)
2195 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2196 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2197 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2198 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2199 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2200 return;
2203 /* set the radio configuration for family 8000 */
2204 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2205 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2206 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2207 data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2208 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2209 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2212 static int
2213 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2214 const uint16_t *nvm_hw, const uint16_t *mac_override)
2216 #ifdef notyet /* for FAMILY 9000 */
2217 if (cfg->mac_addr_from_csr) {
2218 iwm_set_hw_address_from_csr(sc, data);
2219 } else
2220 #endif
2221 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2222 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2224 /* The byte order is little endian 16 bit, meaning 214365 */
2225 data->hw_addr[0] = hw_addr[1];
2226 data->hw_addr[1] = hw_addr[0];
2227 data->hw_addr[2] = hw_addr[3];
2228 data->hw_addr[3] = hw_addr[2];
2229 data->hw_addr[4] = hw_addr[5];
2230 data->hw_addr[5] = hw_addr[4];
2231 } else {
2232 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2235 if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2236 device_printf(sc->sc_dev, "no valid mac address was found\n");
2237 return EINVAL;
2240 return 0;
2243 static struct iwm_nvm_data *
2244 iwm_parse_nvm_data(struct iwm_softc *sc,
2245 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2246 const uint16_t *nvm_calib, const uint16_t *mac_override,
2247 const uint16_t *phy_sku, const uint16_t *regulatory)
2249 struct iwm_nvm_data *data;
2250 uint32_t sku, radio_cfg;
2251 uint16_t lar_config;
2253 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2254 data = kmalloc(sizeof(*data) +
2255 IWM_NUM_CHANNELS * sizeof(uint16_t),
2256 M_DEVBUF, M_WAITOK | M_ZERO);
2257 } else {
2258 data = kmalloc(sizeof(*data) +
2259 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2260 M_DEVBUF, M_WAITOK | M_ZERO);
2262 if (!data)
2263 return NULL;
2265 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2267 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2268 iwm_set_radio_cfg(sc, data, radio_cfg);
2270 sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2271 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2272 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2273 data->sku_cap_11n_enable = 0;
2275 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2277 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2278 uint16_t lar_offset = data->nvm_version < 0xE39 ?
2279 IWM_NVM_LAR_OFFSET_8000_OLD :
2280 IWM_NVM_LAR_OFFSET_8000;
2282 lar_config = le16_to_cpup(regulatory + lar_offset);
2283 data->lar_enabled = !!(lar_config &
2284 IWM_NVM_LAR_ENABLED_8000);
2287 /* If no valid mac address was found - bail out */
2288 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2289 kfree(data, M_DEVBUF);
2290 return NULL;
2293 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2294 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2295 IWM_NUM_CHANNELS * sizeof(uint16_t));
2296 } else {
2297 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2298 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2301 return data;
2304 static void
2305 iwm_free_nvm_data(struct iwm_nvm_data *data)
2307 if (data != NULL)
2308 kfree(data, M_DEVBUF);
2311 static struct iwm_nvm_data *
2312 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2314 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2316 /* Checking for required sections */
2317 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2318 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2319 !sections[sc->cfg->nvm_hw_section_num].data) {
2320 device_printf(sc->sc_dev,
2321 "Can't parse empty OTP/NVM sections\n");
2322 return NULL;
2324 } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2325 /* SW and REGULATORY sections are mandatory */
2326 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2327 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2328 device_printf(sc->sc_dev,
2329 "Can't parse empty OTP/NVM sections\n");
2330 return NULL;
2332 /* MAC_OVERRIDE or at least HW section must exist */
2333 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2334 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2335 device_printf(sc->sc_dev,
2336 "Can't parse mac_address, empty sections\n");
2337 return NULL;
2340 /* PHY_SKU section is mandatory in B0 */
2341 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2342 device_printf(sc->sc_dev,
2343 "Can't parse phy_sku in B0, empty sections\n");
2344 return NULL;
2346 } else {
2347 panic("unknown device family %d\n", sc->cfg->device_family);
2350 hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2351 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2352 calib = (const uint16_t *)
2353 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2354 regulatory = (const uint16_t *)
2355 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2356 mac_override = (const uint16_t *)
2357 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2358 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2360 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2361 phy_sku, regulatory);
2364 static int
2365 iwm_nvm_init(struct iwm_softc *sc)
2367 struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2368 int i, ret, section;
2369 uint32_t size_read = 0;
2370 uint8_t *nvm_buffer, *temp;
2371 uint16_t len;
2373 memset(nvm_sections, 0, sizeof(nvm_sections));
2375 if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2376 return EINVAL;
2378 /* load NVM values from nic */
2379 /* Read From FW NVM */
2380 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2382 nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF,
2383 M_INTWAIT | M_ZERO);
2384 if (!nvm_buffer)
2385 return ENOMEM;
2386 for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2387 /* we override the constness for initial read */
2388 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2389 &len, size_read);
2390 if (ret)
2391 continue;
2392 size_read += len;
2393 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2394 if (!temp) {
2395 ret = ENOMEM;
2396 break;
2398 memcpy(temp, nvm_buffer, len);
2400 nvm_sections[section].data = temp;
2401 nvm_sections[section].length = len;
2403 if (!size_read)
2404 device_printf(sc->sc_dev, "OTP is blank\n");
2405 kfree(nvm_buffer, M_DEVBUF);
2407 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2408 if (!sc->nvm_data)
2409 return EINVAL;
2410 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2411 "nvm version = %x\n", sc->nvm_data->nvm_version);
2413 for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2414 if (nvm_sections[i].data != NULL)
2415 kfree(nvm_sections[i].data, M_DEVBUF);
2418 return 0;
2421 static int
2422 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2423 const struct iwm_fw_desc *section)
2425 struct iwm_dma_info *dma = &sc->fw_dma;
2426 uint8_t *v_addr;
2427 bus_addr_t p_addr;
2428 uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2429 int ret = 0;
2431 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2432 "%s: [%d] uCode section being loaded...\n",
2433 __func__, section_num);
2435 v_addr = dma->vaddr;
2436 p_addr = dma->paddr;
2438 for (offset = 0; offset < section->len; offset += chunk_sz) {
2439 uint32_t copy_size, dst_addr;
2440 int extended_addr = FALSE;
2442 copy_size = MIN(chunk_sz, section->len - offset);
2443 dst_addr = section->offset + offset;
2445 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2446 dst_addr <= IWM_FW_MEM_EXTENDED_END)
2447 extended_addr = TRUE;
2449 if (extended_addr)
2450 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2451 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2453 memcpy(v_addr, (const uint8_t *)section->data + offset,
2454 copy_size);
2455 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2456 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2457 copy_size);
2459 if (extended_addr)
2460 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2461 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2463 if (ret) {
2464 device_printf(sc->sc_dev,
2465 "%s: Could not load the [%d] uCode section\n",
2466 __func__, section_num);
2467 break;
2471 return ret;
2475 * ucode
2477 static int
2478 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2479 bus_addr_t phy_addr, uint32_t byte_cnt)
2481 int ret;
2483 sc->sc_fw_chunk_done = 0;
2485 if (!iwm_nic_lock(sc))
2486 return EBUSY;
2488 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2489 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2491 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2492 dst_addr);
2494 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2495 phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2497 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2498 (iwm_get_dma_hi_addr(phy_addr)
2499 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2501 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2502 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2503 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2504 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2506 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2507 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2508 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2509 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2511 iwm_nic_unlock(sc);
2513 /* wait up to 5s for this segment to load */
2514 ret = 0;
2515 while (!sc->sc_fw_chunk_done) {
2516 #if defined(__DragonFly__)
2517 ret = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", 5 * hz);
2518 #else
2519 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", 5 * hz);
2520 #endif
2521 if (ret)
2522 break;
2525 if (ret != 0) {
2526 device_printf(sc->sc_dev,
2527 "fw chunk addr 0x%x len %d failed to load\n",
2528 dst_addr, byte_cnt);
2529 return ETIMEDOUT;
2532 return 0;
2535 static int
2536 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2537 const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2539 int shift_param;
2540 int i, ret = 0, sec_num = 0x1;
2541 uint32_t val, last_read_idx = 0;
2543 if (cpu == 1) {
2544 shift_param = 0;
2545 *first_ucode_section = 0;
2546 } else {
2547 shift_param = 16;
2548 (*first_ucode_section)++;
2551 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2552 last_read_idx = i;
2555 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2556 * CPU1 to CPU2.
2557 * PAGING_SEPARATOR_SECTION delimiter - separate between
2558 * CPU2 non paged to CPU2 paging sec.
2560 if (!image->fw_sect[i].data ||
2561 image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2562 image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2563 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2564 "Break since Data not valid or Empty section, sec = %d\n",
2566 break;
2568 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2569 if (ret)
2570 return ret;
2572 /* Notify the ucode of the loaded section number and status */
2573 if (iwm_nic_lock(sc)) {
2574 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2575 val = val | (sec_num << shift_param);
2576 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2577 sec_num = (sec_num << 1) | 0x1;
2578 iwm_nic_unlock(sc);
2582 *first_ucode_section = last_read_idx;
2584 iwm_enable_interrupts(sc);
2586 if (iwm_nic_lock(sc)) {
2587 if (cpu == 1)
2588 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2589 else
2590 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2591 iwm_nic_unlock(sc);
2594 return 0;
2597 static int
2598 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2599 const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2601 int shift_param;
2602 int i, ret = 0;
2603 uint32_t last_read_idx = 0;
2605 if (cpu == 1) {
2606 shift_param = 0;
2607 *first_ucode_section = 0;
2608 } else {
2609 shift_param = 16;
2610 (*first_ucode_section)++;
2613 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2614 last_read_idx = i;
2617 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2618 * CPU1 to CPU2.
2619 * PAGING_SEPARATOR_SECTION delimiter - separate between
2620 * CPU2 non paged to CPU2 paging sec.
2622 if (!image->fw_sect[i].data ||
2623 image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2624 image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2625 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2626 "Break since Data not valid or Empty section, sec = %d\n",
2628 break;
2631 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2632 if (ret)
2633 return ret;
2636 *first_ucode_section = last_read_idx;
2638 return 0;
2642 static int
2643 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2644 const struct iwm_fw_sects *image)
2646 int ret = 0;
2647 int first_ucode_section;
2649 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2650 image->is_dual_cpus ? "Dual" : "Single");
2652 /* load to FW the binary non secured sections of CPU1 */
2653 ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2654 if (ret)
2655 return ret;
2657 if (image->is_dual_cpus) {
2658 /* set CPU2 header address */
2659 if (iwm_nic_lock(sc)) {
2660 iwm_write_prph(sc,
2661 IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2662 IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2663 iwm_nic_unlock(sc);
2666 /* load to FW the binary sections of CPU2 */
2667 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2668 &first_ucode_section);
2669 if (ret)
2670 return ret;
2673 iwm_enable_interrupts(sc);
2675 /* release CPU reset */
2676 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2678 return 0;
2682 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2683 const struct iwm_fw_sects *image)
2685 int ret = 0;
2686 int first_ucode_section;
2688 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2689 image->is_dual_cpus ? "Dual" : "Single");
2691 /* configure the ucode to be ready to get the secured image */
2692 /* release CPU reset */
2693 if (iwm_nic_lock(sc)) {
2694 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2695 IWM_RELEASE_CPU_RESET_BIT);
2696 iwm_nic_unlock(sc);
2699 /* load to FW the binary Secured sections of CPU1 */
2700 ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2701 &first_ucode_section);
2702 if (ret)
2703 return ret;
2705 /* load to FW the binary sections of CPU2 */
2706 return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2707 &first_ucode_section);
2710 /* XXX Get rid of this definition */
2711 static inline void
2712 iwm_enable_fw_load_int(struct iwm_softc *sc)
2714 IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2715 sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2716 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2719 /* XXX Add proper rfkill support code */
2720 static int
2721 iwm_start_fw(struct iwm_softc *sc,
2722 const struct iwm_fw_sects *fw)
2724 int ret;
2726 /* This may fail if AMT took ownership of the device */
2727 if (iwm_prepare_card_hw(sc)) {
2728 device_printf(sc->sc_dev,
2729 "%s: Exit HW not ready\n", __func__);
2730 ret = EIO;
2731 goto out;
2734 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2736 iwm_disable_interrupts(sc);
2738 /* make sure rfkill handshake bits are cleared */
2739 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2740 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2741 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2743 /* clear (again), then enable host interrupts */
2744 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2746 ret = iwm_nic_init(sc);
2747 if (ret) {
2748 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2749 goto out;
2753 * Now, we load the firmware and don't want to be interrupted, even
2754 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2755 * FH_TX interrupt which is needed to load the firmware). If the
2756 * RF-Kill switch is toggled, we will find out after having loaded
2757 * the firmware and return the proper value to the caller.
2759 iwm_enable_fw_load_int(sc);
2761 /* really make sure rfkill handshake bits are cleared */
2762 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2763 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2765 /* Load the given image to the HW */
2766 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2767 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2768 else
2769 ret = iwm_pcie_load_given_ucode(sc, fw);
2771 /* XXX re-check RF-Kill state */
2773 out:
2774 return ret;
2777 static int
2778 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2780 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2781 .valid = htole32(valid_tx_ant),
2784 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2785 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2788 static int
2789 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2791 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2792 enum iwm_ucode_type ucode_type = sc->cur_ucode;
2794 /* Set parameters */
2795 phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2796 phy_cfg_cmd.calib_control.event_trigger =
2797 sc->sc_default_calib[ucode_type].event_trigger;
2798 phy_cfg_cmd.calib_control.flow_trigger =
2799 sc->sc_default_calib[ucode_type].flow_trigger;
2801 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2802 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2803 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2804 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2807 static int
2808 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2810 struct iwm_mvm_alive_data *alive_data = data;
2811 struct iwm_mvm_alive_resp_ver1 *palive1;
2812 struct iwm_mvm_alive_resp_ver2 *palive2;
2813 struct iwm_mvm_alive_resp *palive;
2815 if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2816 palive1 = (void *)pkt->data;
2818 sc->support_umac_log = FALSE;
2819 sc->error_event_table =
2820 le32toh(palive1->error_event_table_ptr);
2821 sc->log_event_table =
2822 le32toh(palive1->log_event_table_ptr);
2823 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2825 alive_data->valid = le16toh(palive1->status) ==
2826 IWM_ALIVE_STATUS_OK;
2827 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2828 "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2829 le16toh(palive1->status), palive1->ver_type,
2830 palive1->ver_subtype, palive1->flags);
2831 } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2832 palive2 = (void *)pkt->data;
2833 sc->error_event_table =
2834 le32toh(palive2->error_event_table_ptr);
2835 sc->log_event_table =
2836 le32toh(palive2->log_event_table_ptr);
2837 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2838 sc->umac_error_event_table =
2839 le32toh(palive2->error_info_addr);
2841 alive_data->valid = le16toh(palive2->status) ==
2842 IWM_ALIVE_STATUS_OK;
2843 if (sc->umac_error_event_table)
2844 sc->support_umac_log = TRUE;
2846 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2847 "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2848 le16toh(palive2->status), palive2->ver_type,
2849 palive2->ver_subtype, palive2->flags);
2851 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2852 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2853 palive2->umac_major, palive2->umac_minor);
2854 } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2855 palive = (void *)pkt->data;
2857 sc->error_event_table =
2858 le32toh(palive->error_event_table_ptr);
2859 sc->log_event_table =
2860 le32toh(palive->log_event_table_ptr);
2861 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2862 sc->umac_error_event_table =
2863 le32toh(palive->error_info_addr);
2865 alive_data->valid = le16toh(palive->status) ==
2866 IWM_ALIVE_STATUS_OK;
2867 if (sc->umac_error_event_table)
2868 sc->support_umac_log = TRUE;
2870 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2871 "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2872 le16toh(palive->status), palive->ver_type,
2873 palive->ver_subtype, palive->flags);
2875 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2876 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2877 le32toh(palive->umac_major),
2878 le32toh(palive->umac_minor));
2881 return TRUE;
2884 static int
2885 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2886 struct iwm_rx_packet *pkt, void *data)
2888 struct iwm_phy_db *phy_db = data;
2890 if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2891 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2892 device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2893 __func__, pkt->hdr.code);
2895 return TRUE;
2898 if (iwm_phy_db_set_section(phy_db, pkt)) {
2899 device_printf(sc->sc_dev,
2900 "%s: iwm_phy_db_set_section failed\n", __func__);
2903 return FALSE;
2906 static int
2907 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2908 enum iwm_ucode_type ucode_type)
2910 struct iwm_notification_wait alive_wait;
2911 struct iwm_mvm_alive_data alive_data;
2912 const struct iwm_fw_sects *fw;
2913 enum iwm_ucode_type old_type = sc->cur_ucode;
2914 int error;
2915 static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2917 if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2918 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2919 error);
2920 return error;
2922 fw = &sc->sc_fw.fw_sects[ucode_type];
2923 sc->cur_ucode = ucode_type;
2924 sc->ucode_loaded = FALSE;
2926 memset(&alive_data, 0, sizeof(alive_data));
2927 iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2928 alive_cmd, NELEM(alive_cmd),
2929 iwm_alive_fn, &alive_data);
2931 error = iwm_start_fw(sc, fw);
2932 if (error) {
2933 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2934 sc->cur_ucode = old_type;
2935 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2936 return error;
2940 * Some things may run in the background now, but we
2941 * just wait for the ALIVE notification here.
2943 IWM_UNLOCK(sc);
2944 error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2945 IWM_MVM_UCODE_ALIVE_TIMEOUT);
2946 IWM_LOCK(sc);
2947 if (error) {
2948 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2949 uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2950 if (iwm_nic_lock(sc)) {
2951 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2952 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2953 iwm_nic_unlock(sc);
2955 device_printf(sc->sc_dev,
2956 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2957 a, b);
2959 sc->cur_ucode = old_type;
2960 return error;
2963 if (!alive_data.valid) {
2964 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2965 __func__);
2966 sc->cur_ucode = old_type;
2967 return EIO;
2970 iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2973 * configure and operate fw paging mechanism.
2974 * driver configures the paging flow only once, CPU2 paging image
2975 * included in the IWM_UCODE_INIT image.
2977 if (fw->paging_mem_size) {
2978 error = iwm_save_fw_paging(sc, fw);
2979 if (error) {
2980 device_printf(sc->sc_dev,
2981 "%s: failed to save the FW paging image\n",
2982 __func__);
2983 return error;
2986 error = iwm_send_paging_cmd(sc, fw);
2987 if (error) {
2988 device_printf(sc->sc_dev,
2989 "%s: failed to send the paging cmd\n", __func__);
2990 iwm_free_fw_paging(sc);
2991 return error;
2995 if (!error)
2996 sc->ucode_loaded = TRUE;
2997 return error;
3001 * mvm misc bits
3004 static int
3005 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3007 struct iwm_notification_wait calib_wait;
3008 static const uint16_t init_complete[] = {
3009 IWM_INIT_COMPLETE_NOTIF,
3010 IWM_CALIB_RES_NOTIF_PHY_DB
3012 int ret;
3014 /* do not operate with rfkill switch turned on */
3015 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3016 device_printf(sc->sc_dev,
3017 "radio is disabled by hardware switch\n");
3018 return EPERM;
3021 iwm_init_notification_wait(sc->sc_notif_wait,
3022 &calib_wait,
3023 init_complete,
3024 NELEM(init_complete),
3025 iwm_wait_phy_db_entry,
3026 sc->sc_phy_db);
3028 /* Will also start the device */
3029 ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
3030 if (ret) {
3031 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
3032 ret);
3033 goto error;
3036 if (justnvm) {
3037 /* Read nvm */
3038 ret = iwm_nvm_init(sc);
3039 if (ret) {
3040 device_printf(sc->sc_dev, "failed to read nvm\n");
3041 goto error;
3043 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3044 goto error;
3047 ret = iwm_send_bt_init_conf(sc);
3048 if (ret) {
3049 device_printf(sc->sc_dev,
3050 "failed to send bt coex configuration: %d\n", ret);
3051 goto error;
3054 /* Send TX valid antennas before triggering calibrations */
3055 ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3056 if (ret) {
3057 device_printf(sc->sc_dev,
3058 "failed to send antennas before calibration: %d\n", ret);
3059 goto error;
3063 * Send phy configurations command to init uCode
3064 * to start the 16.0 uCode init image internal calibrations.
3066 ret = iwm_send_phy_cfg_cmd(sc);
3067 if (ret) {
3068 device_printf(sc->sc_dev,
3069 "%s: Failed to run INIT calibrations: %d\n",
3070 __func__, ret);
3071 goto error;
3075 * Nothing to do but wait for the init complete notification
3076 * from the firmware.
3078 IWM_UNLOCK(sc);
3079 ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3080 IWM_MVM_UCODE_CALIB_TIMEOUT);
3081 IWM_LOCK(sc);
3084 goto out;
3086 error:
3087 iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3088 out:
3089 return ret;
3093 * receive side
3096 /* (re)stock rx ring, called at init-time and at runtime */
3097 static int
3098 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3100 struct iwm_rx_ring *ring = &sc->rxq;
3101 struct iwm_rx_data *data = &ring->data[idx];
3102 struct mbuf *m;
3103 bus_dmamap_t dmamap;
3104 bus_dma_segment_t seg;
3105 int nsegs, error;
3107 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3108 if (m == NULL)
3109 return ENOBUFS;
3111 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3112 #if defined(__DragonFly__)
3113 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3114 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3115 #else
3116 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3117 &seg, &nsegs, BUS_DMA_NOWAIT);
3118 #endif
3119 if (error != 0) {
3120 device_printf(sc->sc_dev,
3121 "%s: can't map mbuf, error %d\n", __func__, error);
3122 m_freem(m);
3123 return error;
3126 if (data->m != NULL)
3127 bus_dmamap_unload(ring->data_dmat, data->map);
3129 /* Swap ring->spare_map with data->map */
3130 dmamap = data->map;
3131 data->map = ring->spare_map;
3132 ring->spare_map = dmamap;
3134 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3135 data->m = m;
3137 /* Update RX descriptor. */
3138 KKASSERT((seg.ds_addr & 255) == 0);
3139 ring->desc[idx] = htole32(seg.ds_addr >> 8);
3140 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3141 BUS_DMASYNC_PREWRITE);
3143 return 0;
3147 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3148 * values are reported by the fw as positive values - need to negate
3149 * to obtain their dBM. Account for missing antennas by replacing 0
3150 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3152 static int
3153 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3155 int energy_a, energy_b, energy_c, max_energy;
3156 uint32_t val;
3158 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3159 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3160 IWM_RX_INFO_ENERGY_ANT_A_POS;
3161 energy_a = energy_a ? -energy_a : -256;
3162 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3163 IWM_RX_INFO_ENERGY_ANT_B_POS;
3164 energy_b = energy_b ? -energy_b : -256;
3165 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3166 IWM_RX_INFO_ENERGY_ANT_C_POS;
3167 energy_c = energy_c ? -energy_c : -256;
3168 max_energy = MAX(energy_a, energy_b);
3169 max_energy = MAX(max_energy, energy_c);
3171 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3172 "energy In A %d B %d C %d , and max %d\n",
3173 energy_a, energy_b, energy_c, max_energy);
3175 return max_energy;
3178 static void
3179 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3181 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3183 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3185 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3189 * Retrieve the average noise (in dBm) among receivers.
3191 static int
3192 iwm_get_noise(struct iwm_softc *sc,
3193 const struct iwm_mvm_statistics_rx_non_phy *stats)
3195 int i, total, nbant, noise;
3197 total = nbant = noise = 0;
3198 for (i = 0; i < 3; i++) {
3199 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3200 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3201 __func__, i, noise);
3203 if (noise) {
3204 total += noise;
3205 nbant++;
3209 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3210 __func__, nbant, total);
3211 #if 0
3212 /* There should be at least one antenna but check anyway. */
3213 return (nbant == 0) ? -127 : (total / nbant) - 107;
3214 #else
3215 /* For now, just hard-code it to -96 to be safe */
3216 return (-96);
3217 #endif
3220 static void
3221 iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3223 struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3225 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3226 sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3230 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3232 * Handles the actual data of the Rx packet from the fw
3234 static boolean_t
3235 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3236 boolean_t stolen)
3238 struct ieee80211com *ic = &sc->sc_ic;
3239 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3240 struct ieee80211_frame *wh;
3241 struct ieee80211_node *ni;
3242 struct ieee80211_rx_stats rxs;
3243 struct iwm_rx_phy_info *phy_info;
3244 struct iwm_rx_mpdu_res_start *rx_res;
3245 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3246 uint32_t len;
3247 uint32_t rx_pkt_status;
3248 int rssi;
3250 phy_info = &sc->sc_last_phy_info;
3251 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3252 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3253 len = le16toh(rx_res->byte_count);
3254 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3256 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3257 device_printf(sc->sc_dev,
3258 "dsp size out of range [0,20]: %d\n",
3259 phy_info->cfg_phy_cnt);
3260 return FALSE;
3263 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3264 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3265 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3266 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3267 return FALSE; /* drop */
3270 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3271 /* Note: RSSI is absolute (ie a -ve value) */
3272 if (rssi < IWM_MIN_DBM)
3273 rssi = IWM_MIN_DBM;
3274 else if (rssi > IWM_MAX_DBM)
3275 rssi = IWM_MAX_DBM;
3277 /* Map it to relative value */
3278 rssi = rssi - sc->sc_noise;
3280 /* replenish ring for the buffer we're going to feed to the sharks */
3281 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3282 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3283 __func__);
3284 return FALSE;
3287 m->m_data = pkt->data + sizeof(*rx_res);
3288 m->m_pkthdr.len = m->m_len = len;
3290 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3291 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3293 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3295 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3296 "%s: phy_info: channel=%d, flags=0x%08x\n",
3297 __func__,
3298 le16toh(phy_info->channel),
3299 le16toh(phy_info->phy_flags));
3302 * Populate an RX state struct with the provided information.
3304 bzero(&rxs, sizeof(rxs));
3305 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3306 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3307 rxs.c_ieee = le16toh(phy_info->channel);
3308 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3309 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3310 } else {
3311 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3313 /* rssi is in 1/2db units */
3314 rxs.rssi = rssi * 2;
3315 rxs.nf = sc->sc_noise;
3317 if (ieee80211_radiotap_active_vap(vap)) {
3318 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3320 tap->wr_flags = 0;
3321 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3322 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3323 tap->wr_chan_freq = htole16(rxs.c_freq);
3324 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3325 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3326 tap->wr_dbm_antsignal = (int8_t)rssi;
3327 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3328 tap->wr_tsft = phy_info->system_timestamp;
3329 switch (phy_info->rate) {
3330 /* CCK rates. */
3331 case 10: tap->wr_rate = 2; break;
3332 case 20: tap->wr_rate = 4; break;
3333 case 55: tap->wr_rate = 11; break;
3334 case 110: tap->wr_rate = 22; break;
3335 /* OFDM rates. */
3336 case 0xd: tap->wr_rate = 12; break;
3337 case 0xf: tap->wr_rate = 18; break;
3338 case 0x5: tap->wr_rate = 24; break;
3339 case 0x7: tap->wr_rate = 36; break;
3340 case 0x9: tap->wr_rate = 48; break;
3341 case 0xb: tap->wr_rate = 72; break;
3342 case 0x1: tap->wr_rate = 96; break;
3343 case 0x3: tap->wr_rate = 108; break;
3344 /* Unknown rate: should not happen. */
3345 default: tap->wr_rate = 0;
3349 IWM_UNLOCK(sc);
3350 if (ni != NULL) {
3351 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3352 ieee80211_input_mimo(ni, m, &rxs);
3353 ieee80211_free_node(ni);
3354 } else {
3355 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3356 ieee80211_input_mimo_all(ic, m, &rxs);
3358 IWM_LOCK(sc);
3360 return TRUE;
3363 static int
3364 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3365 struct iwm_node *in)
3367 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3368 struct ieee80211_node *ni = &in->in_ni;
3369 struct ieee80211vap *vap = ni->ni_vap;
3370 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3371 int failack = tx_resp->failure_frame;
3372 int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3373 boolean_t rate_matched;
3374 uint8_t tx_resp_rate;
3375 int ret;
3377 KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3379 /* Update rate control statistics. */
3380 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3381 __func__,
3382 (int) le16toh(tx_resp->status.status),
3383 (int) le16toh(tx_resp->status.sequence),
3384 tx_resp->frame_count,
3385 tx_resp->bt_kill_count,
3386 tx_resp->failure_rts,
3387 tx_resp->failure_frame,
3388 le32toh(tx_resp->initial_rate),
3389 (int) le16toh(tx_resp->wireless_media_time));
3391 tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3393 /* For rate control, ignore frames sent at different initial rate */
3394 rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3396 if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3397 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3398 "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3399 "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3402 if (status != IWM_TX_STATUS_SUCCESS &&
3403 status != IWM_TX_STATUS_DIRECT_DONE) {
3404 if (rate_matched) {
3405 ieee80211_ratectl_tx_complete(vap, ni,
3406 IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3408 ret = 1;
3409 } else {
3410 if (rate_matched) {
3411 ieee80211_ratectl_tx_complete(vap, ni,
3412 IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3414 ret = 0;
3417 if (rate_matched) {
3418 int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3419 new_rate = vap->iv_bss->ni_txrate;
3420 if (new_rate != 0 && new_rate != cur_rate) {
3421 struct iwm_node *in = IWM_NODE(vap->iv_bss);
3422 iwm_setrates(sc, in, rix);
3423 iwm_mvm_send_lq_cmd(sc, &in->in_lq, FALSE);
3427 return ret;
3430 static void
3431 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3433 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3434 int idx = cmd_hdr->idx;
3435 int qid = cmd_hdr->qid;
3436 struct iwm_tx_ring *ring = &sc->txq[qid];
3437 struct iwm_tx_data *txd = &ring->data[idx];
3438 struct iwm_node *in = txd->in;
3439 struct mbuf *m = txd->m;
3440 int status;
3442 KASSERT(txd->done == 0, ("txd not done"));
3443 KASSERT(txd->in != NULL, ("txd without node"));
3444 KASSERT(txd->m != NULL, ("txd without mbuf"));
3446 sc->sc_tx_timer = 0;
3448 status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3450 /* Unmap and free mbuf. */
3451 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3452 bus_dmamap_unload(ring->data_dmat, txd->map);
3454 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3455 "free txd %p, in %p\n", txd, txd->in);
3456 txd->done = 1;
3457 txd->m = NULL;
3458 txd->in = NULL;
3460 ieee80211_tx_complete(&in->in_ni, m, status);
3462 if (--ring->queued < IWM_TX_RING_LOMARK) {
3463 sc->qfullmsk &= ~(1 << ring->qid);
3464 if (sc->qfullmsk == 0) {
3465 iwm_start(sc);
3471 * transmit side
3475 * Process a "command done" firmware notification. This is where we wakeup
3476 * processes waiting for a synchronous command completion.
3477 * from if_iwn
3479 static void
3480 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3482 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3483 struct iwm_tx_data *data;
3485 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3486 return; /* Not a command ack. */
3489 data = &ring->data[pkt->hdr.idx];
3491 /* If the command was mapped in an mbuf, free it. */
3492 if (data->m != NULL) {
3493 bus_dmamap_sync(ring->data_dmat, data->map,
3494 BUS_DMASYNC_POSTWRITE);
3495 bus_dmamap_unload(ring->data_dmat, data->map);
3496 m_freem(data->m);
3497 data->m = NULL;
3499 wakeup(&ring->desc[pkt->hdr.idx]);
3501 if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3502 device_printf(sc->sc_dev,
3503 "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3504 __func__, pkt->hdr.idx, ring->queued, ring->cur);
3505 /* XXX call iwm_force_nmi() */
3508 KKASSERT(ring->queued > 0);
3509 ring->queued--;
3510 if (ring->queued == 0)
3511 iwm_pcie_clear_cmd_in_flight(sc);
3514 #if 0
3516 * necessary only for block ack mode
3518 void
3519 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3520 uint16_t len)
3522 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3523 uint16_t w_val;
3525 scd_bc_tbl = sc->sched_dma.vaddr;
3527 len += 8; /* magic numbers came naturally from paris */
3528 len = roundup(len, 4) / 4;
3530 w_val = htole16(sta_id << 12 | len);
3532 /* Update TX scheduler. */
3533 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3534 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3535 BUS_DMASYNC_PREWRITE);
3537 /* I really wonder what this is ?!? */
3538 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3539 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3540 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3541 BUS_DMASYNC_PREWRITE);
3544 #endif
3547 * Fill in the rate related information for a transmit command.
3549 static const struct iwm_rate *
3550 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3551 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3553 struct ieee80211com *ic = &sc->sc_ic;
3554 struct ieee80211_node *ni = &in->in_ni;
3555 const struct iwm_rate *rinfo;
3556 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3557 int ridx, rate_flags;
3559 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3560 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3563 * XXX TODO: everything about the rate selection here is terrible!
3566 if (type == IEEE80211_FC0_TYPE_DATA) {
3567 /* for data frames, use RS table */
3568 ridx = iwm_rate2ridx(sc, ni->ni_txrate);
3569 if (ridx == -1)
3570 ridx = 0;
3572 /* This is the index into the programmed table */
3573 tx->initial_rate_index = 0;
3574 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3575 } else {
3577 * For non-data, use the lowest supported rate for the given
3578 * operational mode.
3580 * Note: there may not be any rate control information available.
3581 * This driver currently assumes if we're transmitting data
3582 * frames, use the rate control table. Grr.
3584 * XXX TODO: use the configured rate for the traffic type!
3585 * XXX TODO: this should be per-vap, not curmode; as we later
3586 * on we'll want to handle off-channel stuff (eg TDLS).
3588 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3590 * XXX this assumes the mode is either 11a or not 11a;
3591 * definitely won't work for 11n.
3593 ridx = IWM_RIDX_OFDM;
3594 } else {
3595 ridx = IWM_RIDX_CCK;
3599 rinfo = &iwm_rates[ridx];
3601 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3602 __func__, ridx,
3603 rinfo->rate,
3604 !! (IWM_RIDX_IS_CCK(ridx))
3607 /* XXX TODO: hard-coded TX antenna? */
3608 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3609 if (IWM_RIDX_IS_CCK(ridx))
3610 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3611 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3613 return rinfo;
3616 #define TB0_SIZE 16
3617 static int
3618 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3620 struct ieee80211com *ic = &sc->sc_ic;
3621 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3622 struct iwm_node *in = IWM_NODE(ni);
3623 struct iwm_tx_ring *ring;
3624 struct iwm_tx_data *data;
3625 struct iwm_tfd *desc;
3626 struct iwm_device_cmd *cmd;
3627 struct iwm_tx_cmd *tx;
3628 struct ieee80211_frame *wh;
3629 struct ieee80211_key *k = NULL;
3630 #if !defined(__DragonFly__)
3631 struct mbuf *m1;
3632 #endif
3633 const struct iwm_rate *rinfo;
3634 uint32_t flags;
3635 u_int hdrlen;
3636 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3637 int nsegs;
3638 uint8_t tid, type;
3639 int i, totlen, error, pad;
3641 wh = mtod(m, struct ieee80211_frame *);
3642 hdrlen = ieee80211_anyhdrsize(wh);
3643 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3644 tid = 0;
3645 ring = &sc->txq[ac];
3646 desc = &ring->desc[ring->cur];
3647 memset(desc, 0, sizeof(*desc));
3648 data = &ring->data[ring->cur];
3650 /* Fill out iwm_tx_cmd to send to the firmware */
3651 cmd = &ring->cmd[ring->cur];
3652 cmd->hdr.code = IWM_TX_CMD;
3653 cmd->hdr.flags = 0;
3654 cmd->hdr.qid = ring->qid;
3655 cmd->hdr.idx = ring->cur;
3657 tx = (void *)cmd->data;
3658 memset(tx, 0, sizeof(*tx));
3660 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3662 /* Encrypt the frame if need be. */
3663 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3664 /* Retrieve key for TX && do software encryption. */
3665 k = ieee80211_crypto_encap(ni, m);
3666 if (k == NULL) {
3667 m_freem(m);
3668 return (ENOBUFS);
3670 /* 802.11 header may have moved. */
3671 wh = mtod(m, struct ieee80211_frame *);
3674 if (ieee80211_radiotap_active_vap(vap)) {
3675 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3677 tap->wt_flags = 0;
3678 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3679 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3680 tap->wt_rate = rinfo->rate;
3681 if (k != NULL)
3682 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3683 ieee80211_radiotap_tx(vap, m);
3687 totlen = m->m_pkthdr.len;
3689 flags = 0;
3690 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3691 flags |= IWM_TX_CMD_FLG_ACK;
3694 if (type == IEEE80211_FC0_TYPE_DATA
3695 && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3696 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3697 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3700 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3701 type != IEEE80211_FC0_TYPE_DATA)
3702 tx->sta_id = sc->sc_aux_sta.sta_id;
3703 else
3704 tx->sta_id = IWM_STATION_ID;
3706 if (type == IEEE80211_FC0_TYPE_MGT) {
3707 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3709 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3710 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3711 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3712 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3713 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3714 } else {
3715 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3717 } else {
3718 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3721 if (hdrlen & 3) {
3722 /* First segment length must be a multiple of 4. */
3723 flags |= IWM_TX_CMD_FLG_MH_PAD;
3724 pad = 4 - (hdrlen & 3);
3725 } else
3726 pad = 0;
3728 tx->driver_txop = 0;
3729 tx->next_frame_len = 0;
3731 tx->len = htole16(totlen);
3732 tx->tid_tspec = tid;
3733 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3735 /* Set physical address of "scratch area". */
3736 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3737 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3739 /* Copy 802.11 header in TX command. */
3740 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3742 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3744 tx->sec_ctl = 0;
3745 tx->tx_flags |= htole32(flags);
3747 /* Trim 802.11 header. */
3748 m_adj(m, hdrlen);
3749 #if defined(__DragonFly__)
3750 error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3751 segs, IWM_MAX_SCATTER - 2,
3752 &nsegs, BUS_DMA_NOWAIT);
3753 #else
3754 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3755 segs, &nsegs, BUS_DMA_NOWAIT);
3756 #endif
3757 if (error != 0) {
3758 #if defined(__DragonFly__)
3759 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3760 error);
3761 m_freem(m);
3762 return error;
3763 #else
3764 if (error != EFBIG) {
3765 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3766 error);
3767 m_freem(m);
3768 return error;
3770 /* Too many DMA segments, linearize mbuf. */
3771 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3772 if (m1 == NULL) {
3773 device_printf(sc->sc_dev,
3774 "%s: could not defrag mbuf\n", __func__);
3775 m_freem(m);
3776 return (ENOBUFS);
3778 m = m1;
3780 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3781 segs, &nsegs, BUS_DMA_NOWAIT);
3782 if (error != 0) {
3783 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3784 error);
3785 m_freem(m);
3786 return error;
3788 #endif
3790 data->m = m;
3791 data->in = in;
3792 data->done = 0;
3794 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3795 "sending txd %p, in %p\n", data, data->in);
3796 KASSERT(data->in != NULL, ("node is NULL"));
3798 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3799 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3800 ring->qid, ring->cur, totlen, nsegs,
3801 le32toh(tx->tx_flags),
3802 le32toh(tx->rate_n_flags),
3803 tx->initial_rate_index
3806 /* Fill TX descriptor. */
3807 desc->num_tbs = 2 + nsegs;
3809 desc->tbs[0].lo = htole32(data->cmd_paddr);
3810 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3811 (TB0_SIZE << 4);
3812 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3813 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3814 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3815 + hdrlen + pad - TB0_SIZE) << 4);
3817 /* Other DMA segments are for data payload. */
3818 for (i = 0; i < nsegs; i++) {
3819 seg = &segs[i];
3820 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3821 desc->tbs[i+2].hi_n_len = \
3822 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3823 | ((seg->ds_len) << 4);
3826 bus_dmamap_sync(ring->data_dmat, data->map,
3827 BUS_DMASYNC_PREWRITE);
3828 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3829 BUS_DMASYNC_PREWRITE);
3830 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3831 BUS_DMASYNC_PREWRITE);
3833 #if 0
3834 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3835 #endif
3837 /* Kick TX ring. */
3838 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3839 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3841 /* Mark TX ring as full if we reach a certain threshold. */
3842 if (++ring->queued > IWM_TX_RING_HIMARK) {
3843 sc->qfullmsk |= 1 << ring->qid;
3846 return 0;
3849 static int
3850 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3851 const struct ieee80211_bpf_params *params)
3853 struct ieee80211com *ic = ni->ni_ic;
3854 struct iwm_softc *sc = ic->ic_softc;
3855 int error = 0;
3857 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3858 "->%s begin\n", __func__);
3860 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3861 m_freem(m);
3862 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3863 "<-%s not RUNNING\n", __func__);
3864 return (ENETDOWN);
3867 IWM_LOCK(sc);
3868 /* XXX fix this */
3869 if (params == NULL) {
3870 error = iwm_tx(sc, m, ni, 0);
3871 } else {
3872 error = iwm_tx(sc, m, ni, 0);
3874 sc->sc_tx_timer = 5;
3875 IWM_UNLOCK(sc);
3877 return (error);
3881 * mvm/tx.c
3885 * Note that there are transports that buffer frames before they reach
3886 * the firmware. This means that after flush_tx_path is called, the
3887 * queue might not be empty. The race-free way to handle this is to:
3888 * 1) set the station as draining
3889 * 2) flush the Tx path
3890 * 3) wait for the transport queues to be empty
3893 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3895 int ret;
3896 struct iwm_tx_path_flush_cmd flush_cmd = {
3897 .queues_ctl = htole32(tfd_msk),
3898 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3901 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3902 sizeof(flush_cmd), &flush_cmd);
3903 if (ret)
3904 device_printf(sc->sc_dev,
3905 "Flushing tx queue failed: %d\n", ret);
3906 return ret;
3909 static int
3910 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3912 struct iwm_time_quota_cmd cmd;
3913 int i, idx, ret, num_active_macs, quota, quota_rem;
3914 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3915 int n_ifs[IWM_MAX_BINDINGS] = {0, };
3916 uint16_t id;
3918 memset(&cmd, 0, sizeof(cmd));
3920 /* currently, PHY ID == binding ID */
3921 if (ivp) {
3922 id = ivp->phy_ctxt->id;
3923 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3924 colors[id] = ivp->phy_ctxt->color;
3926 if (1)
3927 n_ifs[id] = 1;
3931 * The FW's scheduling session consists of
3932 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3933 * equally between all the bindings that require quota
3935 num_active_macs = 0;
3936 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3937 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3938 num_active_macs += n_ifs[i];
3941 quota = 0;
3942 quota_rem = 0;
3943 if (num_active_macs) {
3944 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3945 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3948 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3949 if (colors[i] < 0)
3950 continue;
3952 cmd.quotas[idx].id_and_color =
3953 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3955 if (n_ifs[i] <= 0) {
3956 cmd.quotas[idx].quota = htole32(0);
3957 cmd.quotas[idx].max_duration = htole32(0);
3958 } else {
3959 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3960 cmd.quotas[idx].max_duration = htole32(0);
3962 idx++;
3965 /* Give the remainder of the session to the first binding */
3966 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3968 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3969 sizeof(cmd), &cmd);
3970 if (ret)
3971 device_printf(sc->sc_dev,
3972 "%s: Failed to send quota: %d\n", __func__, ret);
3973 return ret;
3977 * ieee80211 routines
3981 * Change to AUTH state in 80211 state machine. Roughly matches what
3982 * Linux does in bss_info_changed().
3984 static int
3985 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3987 struct ieee80211_node *ni;
3988 struct iwm_node *in;
3989 struct iwm_vap *iv = IWM_VAP(vap);
3990 uint32_t duration;
3991 int error;
3994 * XXX i have a feeling that the vap node is being
3995 * freed from underneath us. Grr.
3997 ni = ieee80211_ref_node(vap->iv_bss);
3998 in = IWM_NODE(ni);
3999 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4000 "%s: called; vap=%p, bss ni=%p\n",
4001 __func__,
4002 vap,
4003 ni);
4005 in->in_assoc = 0;
4008 * Firmware bug - it'll crash if the beacon interval is less
4009 * than 16. We can't avoid connecting at all, so refuse the
4010 * station state change, this will cause net80211 to abandon
4011 * attempts to connect to this AP, and eventually wpa_s will
4012 * blacklist the AP...
4014 if (ni->ni_intval < 16) {
4015 device_printf(sc->sc_dev,
4016 "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4017 ether_sprintf(ni->ni_bssid), ni->ni_intval);
4018 error = EINVAL;
4019 goto out;
4022 error = iwm_allow_mcast(vap, sc);
4023 if (error) {
4024 device_printf(sc->sc_dev,
4025 "%s: failed to set multicast\n", __func__);
4026 goto out;
4030 * This is where it deviates from what Linux does.
4032 * Linux iwlwifi doesn't reset the nic each time, nor does it
4033 * call ctxt_add() here. Instead, it adds it during vap creation,
4034 * and always does a mac_ctx_changed().
4036 * The openbsd port doesn't attempt to do that - it reset things
4037 * at odd states and does the add here.
4039 * So, until the state handling is fixed (ie, we never reset
4040 * the NIC except for a firmware failure, which should drag
4041 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4042 * contexts that are required), let's do a dirty hack here.
4044 if (iv->is_uploaded) {
4045 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4046 device_printf(sc->sc_dev,
4047 "%s: failed to update MAC\n", __func__);
4048 goto out;
4050 } else {
4051 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4052 device_printf(sc->sc_dev,
4053 "%s: failed to add MAC\n", __func__);
4054 goto out;
4058 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4059 in->in_ni.ni_chan, 1, 1)) != 0) {
4060 device_printf(sc->sc_dev,
4061 "%s: failed update phy ctxt\n", __func__);
4062 goto out;
4064 iv->phy_ctxt = &sc->sc_phyctxt[0];
4066 if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4067 device_printf(sc->sc_dev,
4068 "%s: binding update cmd\n", __func__);
4069 goto out;
4072 * Authentication becomes unreliable when powersaving is left enabled
4073 * here. Powersaving will be activated again when association has
4074 * finished or is aborted.
4076 iv->ps_disabled = TRUE;
4077 error = iwm_mvm_power_update_mac(sc);
4078 iv->ps_disabled = FALSE;
4079 if (error != 0) {
4080 device_printf(sc->sc_dev,
4081 "%s: failed to update power management\n",
4082 __func__);
4083 goto out;
4085 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4086 device_printf(sc->sc_dev,
4087 "%s: failed to add sta\n", __func__);
4088 goto out;
4092 * Prevent the FW from wandering off channel during association
4093 * by "protecting" the session with a time event.
4095 /* XXX duration is in units of TU, not MS */
4096 duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4097 iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4098 DELAY(100);
4100 error = 0;
4101 out:
4102 ieee80211_free_node(ni);
4103 return (error);
4106 static int
4107 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4109 uint32_t tfd_msk;
4112 * Ok, so *technically* the proper set of calls for going
4113 * from RUN back to SCAN is:
4115 * iwm_mvm_power_mac_disable(sc, in);
4116 * iwm_mvm_mac_ctxt_changed(sc, vap);
4117 * iwm_mvm_rm_sta(sc, in);
4118 * iwm_mvm_update_quotas(sc, NULL);
4119 * iwm_mvm_mac_ctxt_changed(sc, in);
4120 * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4121 * iwm_mvm_mac_ctxt_remove(sc, in);
4123 * However, that freezes the device not matter which permutations
4124 * and modifications are attempted. Obviously, this driver is missing
4125 * something since it works in the Linux driver, but figuring out what
4126 * is missing is a little more complicated. Now, since we're going
4127 * back to nothing anyway, we'll just do a complete device reset.
4128 * Up your's, device!
4131 * Just using 0xf for the queues mask is fine as long as we only
4132 * get here from RUN state.
4134 tfd_msk = 0xf;
4135 iwm_xmit_queue_drain(sc);
4136 iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4138 * We seem to get away with just synchronously sending the
4139 * IWM_TXPATH_FLUSH command.
4141 // iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4142 iwm_stop_device(sc);
4143 iwm_init_hw(sc);
4144 if (in)
4145 in->in_assoc = 0;
4146 return 0;
4148 #if 0
4149 int error;
4151 iwm_mvm_power_mac_disable(sc, in);
4153 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4154 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4155 return error;
4158 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4159 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4160 return error;
4162 error = iwm_mvm_rm_sta(sc, in);
4163 in->in_assoc = 0;
4164 iwm_mvm_update_quotas(sc, NULL);
4165 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4166 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4167 return error;
4169 iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4171 iwm_mvm_mac_ctxt_remove(sc, in);
4173 return error;
4174 #endif
4177 static struct ieee80211_node *
4178 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4180 return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4181 M_INTWAIT | M_ZERO);
4184 static uint8_t
4185 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4187 uint8_t plcp = rate_n_flags & 0xff;
4188 int i;
4190 for (i = 0; i <= IWM_RIDX_MAX; i++) {
4191 if (iwm_rates[i].plcp == plcp)
4192 return iwm_rates[i].rate;
4194 return 0;
4197 uint8_t
4198 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4200 int i;
4201 uint8_t rval;
4203 for (i = 0; i < rs->rs_nrates; i++) {
4204 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4205 if (rval == iwm_rates[ridx].rate)
4206 return rs->rs_rates[i];
4209 return 0;
4212 static int
4213 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4215 int i;
4217 for (i = 0; i <= IWM_RIDX_MAX; i++) {
4218 if (iwm_rates[i].rate == rate)
4219 return i;
4222 device_printf(sc->sc_dev,
4223 "%s: WARNING: device rate for %u not found!\n",
4224 __func__, rate);
4226 return -1;
4229 static void
4230 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4232 struct ieee80211_node *ni = &in->in_ni;
4233 struct iwm_lq_cmd *lq = &in->in_lq;
4234 struct ieee80211_rateset *rs = &ni->ni_rates;
4235 int nrates = rs->rs_nrates;
4236 int i, ridx, tab = 0;
4237 int txant = 0;
4239 KKASSERT(rix >= 0 && rix < nrates);
4241 if (nrates > nitems(lq->rs_table)) {
4242 device_printf(sc->sc_dev,
4243 "%s: node supports %d rates, driver handles "
4244 "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4245 return;
4247 if (nrates == 0) {
4248 device_printf(sc->sc_dev,
4249 "%s: node supports 0 rates, odd!\n", __func__);
4250 return;
4252 nrates = imin(rix + 1, nrates);
4254 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4255 "%s: nrates=%d\n", __func__, nrates);
4257 /* then construct a lq_cmd based on those */
4258 memset(lq, 0, sizeof(*lq));
4259 lq->sta_id = IWM_STATION_ID;
4261 /* For HT, always enable RTS/CTS to avoid excessive retries. */
4262 if (ni->ni_flags & IEEE80211_NODE_HT)
4263 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4266 * are these used? (we don't do SISO or MIMO)
4267 * need to set them to non-zero, though, or we get an error.
4269 lq->single_stream_ant_msk = 1;
4270 lq->dual_stream_ant_msk = 1;
4273 * Build the actual rate selection table.
4274 * The lowest bits are the rates. Additionally,
4275 * CCK needs bit 9 to be set. The rest of the bits
4276 * we add to the table select the tx antenna
4277 * Note that we add the rates in the highest rate first
4278 * (opposite of ni_rates).
4280 for (i = 0; i < nrates; i++) {
4281 int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4282 int nextant;
4284 /* Map 802.11 rate to HW rate index. */
4285 ridx = iwm_rate2ridx(sc, rate);
4286 if (ridx == -1)
4287 continue;
4289 if (txant == 0)
4290 txant = iwm_mvm_get_valid_tx_ant(sc);
4291 nextant = 1<<(ffs(txant)-1);
4292 txant &= ~nextant;
4294 tab = iwm_rates[ridx].plcp;
4295 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4296 if (IWM_RIDX_IS_CCK(ridx))
4297 tab |= IWM_RATE_MCS_CCK_MSK;
4298 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4299 "station rate i=%d, rate=%d, hw=%x\n",
4300 i, iwm_rates[ridx].rate, tab);
4301 lq->rs_table[i] = htole32(tab);
4303 /* then fill the rest with the lowest possible rate */
4304 for (i = nrates; i < nitems(lq->rs_table); i++) {
4305 KASSERT(tab != 0, ("invalid tab"));
4306 lq->rs_table[i] = htole32(tab);
4310 static int
4311 iwm_media_change(struct ifnet *ifp)
4313 struct ieee80211vap *vap = ifp->if_softc;
4314 struct ieee80211com *ic = vap->iv_ic;
4315 struct iwm_softc *sc = ic->ic_softc;
4316 int error;
4318 error = ieee80211_media_change(ifp);
4319 if (error != ENETRESET)
4320 return error;
4322 IWM_LOCK(sc);
4323 if (ic->ic_nrunning > 0) {
4324 iwm_stop(sc);
4325 iwm_init(sc);
4327 IWM_UNLOCK(sc);
4328 return error;
4332 static int
4333 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4335 struct iwm_vap *ivp = IWM_VAP(vap);
4336 struct ieee80211com *ic = vap->iv_ic;
4337 struct iwm_softc *sc = ic->ic_softc;
4338 struct iwm_node *in;
4339 int error;
4341 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4342 "switching state %s -> %s\n",
4343 ieee80211_state_name[vap->iv_state],
4344 ieee80211_state_name[nstate]);
4345 IEEE80211_UNLOCK(ic);
4346 IWM_LOCK(sc);
4348 if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4349 iwm_led_blink_stop(sc);
4351 /* disable beacon filtering if we're hopping out of RUN */
4352 if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4353 iwm_mvm_disable_beacon_filter(sc);
4355 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4356 in->in_assoc = 0;
4358 if (nstate == IEEE80211_S_INIT) {
4359 IWM_UNLOCK(sc);
4360 IEEE80211_LOCK(ic);
4361 error = ivp->iv_newstate(vap, nstate, arg);
4362 IEEE80211_UNLOCK(ic);
4363 IWM_LOCK(sc);
4364 iwm_release(sc, NULL);
4365 IWM_UNLOCK(sc);
4366 IEEE80211_LOCK(ic);
4367 return error;
4371 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4372 * above then the card will be completely reinitialized,
4373 * so the driver must do everything necessary to bring the card
4374 * from INIT to SCAN.
4376 * Additionally, upon receiving deauth frame from AP,
4377 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4378 * state. This will also fail with this driver, so bring the FSM
4379 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4381 * XXX TODO: fix this for FreeBSD!
4383 if (nstate == IEEE80211_S_SCAN ||
4384 nstate == IEEE80211_S_AUTH ||
4385 nstate == IEEE80211_S_ASSOC) {
4386 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4387 "Force transition to INIT; MGT=%d\n", arg);
4388 IWM_UNLOCK(sc);
4389 IEEE80211_LOCK(ic);
4390 /* Always pass arg as -1 since we can't Tx right now. */
4392 * XXX arg is just ignored anyway when transitioning
4393 * to IEEE80211_S_INIT.
4395 vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4396 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4397 "Going INIT->SCAN\n");
4398 nstate = IEEE80211_S_SCAN;
4399 IEEE80211_UNLOCK(ic);
4400 IWM_LOCK(sc);
4404 switch (nstate) {
4405 case IEEE80211_S_INIT:
4406 case IEEE80211_S_SCAN:
4407 if (vap->iv_state == IEEE80211_S_AUTH ||
4408 vap->iv_state == IEEE80211_S_ASSOC) {
4409 int myerr;
4410 IWM_UNLOCK(sc);
4411 IEEE80211_LOCK(ic);
4412 myerr = ivp->iv_newstate(vap, nstate, arg);
4413 IEEE80211_UNLOCK(ic);
4414 IWM_LOCK(sc);
4415 error = iwm_mvm_rm_sta(sc, vap, FALSE);
4416 if (error) {
4417 device_printf(sc->sc_dev,
4418 "%s: Failed to remove station: %d\n",
4419 __func__, error);
4421 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4422 if (error) {
4423 device_printf(sc->sc_dev,
4424 "%s: Failed to change mac context: %d\n",
4425 __func__, error);
4427 error = iwm_mvm_binding_remove_vif(sc, ivp);
4428 if (error) {
4429 device_printf(sc->sc_dev,
4430 "%s: Failed to remove channel ctx: %d\n",
4431 __func__, error);
4433 ivp->phy_ctxt = NULL;
4434 error = iwm_mvm_power_update_mac(sc);
4435 if (error != 0) {
4436 device_printf(sc->sc_dev,
4437 "%s: failed to update power management\n",
4438 __func__);
4440 IWM_UNLOCK(sc);
4441 IEEE80211_LOCK(ic);
4442 return myerr;
4444 break;
4446 case IEEE80211_S_AUTH:
4447 if ((error = iwm_auth(vap, sc)) != 0) {
4448 device_printf(sc->sc_dev,
4449 "%s: could not move to auth state: %d\n",
4450 __func__, error);
4452 break;
4454 case IEEE80211_S_ASSOC:
4456 * EBS may be disabled due to previous failures reported by FW.
4457 * Reset EBS status here assuming environment has been changed.
4459 sc->last_ebs_successful = TRUE;
4460 break;
4462 case IEEE80211_S_RUN:
4463 in = IWM_NODE(vap->iv_bss);
4464 /* Update the association state, now we have it all */
4465 /* (eg associd comes in at this point */
4466 error = iwm_mvm_update_sta(sc, in);
4467 if (error != 0) {
4468 device_printf(sc->sc_dev,
4469 "%s: failed to update STA\n", __func__);
4470 IWM_UNLOCK(sc);
4471 IEEE80211_LOCK(ic);
4472 return error;
4474 in->in_assoc = 1;
4475 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4476 if (error != 0) {
4477 device_printf(sc->sc_dev,
4478 "%s: failed to update MAC: %d\n", __func__, error);
4481 iwm_mvm_sf_update(sc, vap, FALSE);
4482 iwm_mvm_enable_beacon_filter(sc, ivp);
4483 iwm_mvm_power_update_mac(sc);
4484 iwm_mvm_update_quotas(sc, ivp);
4485 int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4486 iwm_setrates(sc, in, rix);
4488 if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4489 device_printf(sc->sc_dev,
4490 "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4493 iwm_mvm_led_enable(sc);
4494 break;
4496 default:
4497 break;
4499 IWM_UNLOCK(sc);
4500 IEEE80211_LOCK(ic);
4502 return (ivp->iv_newstate(vap, nstate, arg));
4505 void
4506 iwm_endscan_cb(void *arg, int pending)
4508 struct iwm_softc *sc = arg;
4509 struct ieee80211com *ic = &sc->sc_ic;
4511 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4512 "%s: scan ended\n",
4513 __func__);
4515 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4518 static int
4519 iwm_send_bt_init_conf(struct iwm_softc *sc)
4521 struct iwm_bt_coex_cmd bt_cmd;
4523 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4524 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4526 return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4527 &bt_cmd);
4530 static boolean_t
4531 iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4533 boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4534 boolean_t tlv_lar = fw_has_capa(&sc->ucode_capa,
4535 IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4537 if (iwm_lar_disable)
4538 return FALSE;
4541 * Enable LAR only if it is supported by the FW (TLV) &&
4542 * enabled in the NVM
4544 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4545 return nvm_lar && tlv_lar;
4546 else
4547 return tlv_lar;
4550 static boolean_t
4551 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4553 return fw_has_api(&sc->ucode_capa,
4554 IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4555 fw_has_capa(&sc->ucode_capa,
4556 IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4559 static int
4560 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4562 struct iwm_mcc_update_cmd mcc_cmd;
4563 struct iwm_host_cmd hcmd = {
4564 .id = IWM_MCC_UPDATE_CMD,
4565 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4566 .data = { &mcc_cmd },
4568 int ret;
4569 #ifdef IWM_DEBUG
4570 struct iwm_rx_packet *pkt;
4571 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4572 struct iwm_mcc_update_resp *mcc_resp;
4573 int n_channels;
4574 uint16_t mcc;
4575 #endif
4576 int resp_v2 = fw_has_capa(&sc->ucode_capa,
4577 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4579 if (!iwm_mvm_is_lar_supported(sc)) {
4580 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4581 __func__);
4582 return 0;
4585 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4586 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4587 if (iwm_mvm_is_wifi_mcc_supported(sc))
4588 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4589 else
4590 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4592 if (resp_v2)
4593 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4594 else
4595 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4597 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4598 "send MCC update to FW with '%c%c' src = %d\n",
4599 alpha2[0], alpha2[1], mcc_cmd.source_id);
4601 ret = iwm_send_cmd(sc, &hcmd);
4602 if (ret)
4603 return ret;
4605 #ifdef IWM_DEBUG
4606 pkt = hcmd.resp_pkt;
4608 /* Extract MCC response */
4609 if (resp_v2) {
4610 mcc_resp = (void *)pkt->data;
4611 mcc = mcc_resp->mcc;
4612 n_channels = le32toh(mcc_resp->n_channels);
4613 } else {
4614 mcc_resp_v1 = (void *)pkt->data;
4615 mcc = mcc_resp_v1->mcc;
4616 n_channels = le32toh(mcc_resp_v1->n_channels);
4619 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4620 if (mcc == 0)
4621 mcc = 0x3030; /* "00" - world */
4623 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4624 "regulatory domain '%c%c' (%d channels available)\n",
4625 mcc >> 8, mcc & 0xff, n_channels);
4626 #endif
4627 iwm_free_resp(sc, &hcmd);
4629 return 0;
4632 static void
4633 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4635 struct iwm_host_cmd cmd = {
4636 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4637 .len = { sizeof(uint32_t), },
4638 .data = { &backoff, },
4641 if (iwm_send_cmd(sc, &cmd) != 0) {
4642 device_printf(sc->sc_dev,
4643 "failed to change thermal tx backoff\n");
4647 static int
4648 iwm_init_hw(struct iwm_softc *sc)
4650 struct ieee80211com *ic = &sc->sc_ic;
4651 int error, i, ac;
4653 sc->sf_state = IWM_SF_UNINIT;
4655 if ((error = iwm_start_hw(sc)) != 0) {
4656 kprintf("iwm_start_hw: failed %d\n", error);
4657 return error;
4660 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4661 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4662 return error;
4666 * should stop and start HW since that INIT
4667 * image just loaded
4669 iwm_stop_device(sc);
4670 sc->sc_ps_disabled = FALSE;
4671 if ((error = iwm_start_hw(sc)) != 0) {
4672 device_printf(sc->sc_dev, "could not initialize hardware\n");
4673 return error;
4676 /* omstart, this time with the regular firmware */
4677 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4678 if (error) {
4679 device_printf(sc->sc_dev, "could not load firmware\n");
4680 goto error;
4683 error = iwm_mvm_sf_update(sc, NULL, FALSE);
4684 if (error)
4685 device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4687 if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4688 device_printf(sc->sc_dev, "bt init conf failed\n");
4689 goto error;
4692 error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4693 if (error != 0) {
4694 device_printf(sc->sc_dev, "antenna config failed\n");
4695 goto error;
4698 /* Send phy db control command and then phy db calibration */
4699 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4700 goto error;
4702 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4703 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4704 goto error;
4707 /* Add auxiliary station for scanning */
4708 if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4709 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4710 goto error;
4713 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4715 * The channel used here isn't relevant as it's
4716 * going to be overwritten in the other flows.
4717 * For now use the first channel we have.
4719 if ((error = iwm_mvm_phy_ctxt_add(sc,
4720 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4721 goto error;
4724 /* Initialize tx backoffs to the minimum. */
4725 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4726 iwm_mvm_tt_tx_backoff(sc, 0);
4728 error = iwm_mvm_power_update_device(sc);
4729 if (error)
4730 goto error;
4732 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4733 goto error;
4735 if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4736 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4737 goto error;
4740 /* Enable Tx queues. */
4741 for (ac = 0; ac < WME_NUM_AC; ac++) {
4742 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4743 iwm_mvm_ac_to_tx_fifo[ac]);
4744 if (error)
4745 goto error;
4748 if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4749 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4750 goto error;
4753 return 0;
4755 error:
4756 iwm_stop_device(sc);
4757 return error;
4760 /* Allow multicast from our BSSID. */
4761 static int
4762 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4764 struct ieee80211_node *ni = vap->iv_bss;
4765 struct iwm_mcast_filter_cmd *cmd;
4766 size_t size;
4767 int error;
4769 size = roundup(sizeof(*cmd), 4);
4770 cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4771 if (cmd == NULL)
4772 return ENOMEM;
4773 cmd->filter_own = 1;
4774 cmd->port_id = 0;
4775 cmd->count = 0;
4776 cmd->pass_all = 1;
4777 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4779 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4780 IWM_CMD_SYNC, size, cmd);
4781 kfree(cmd, M_DEVBUF);
4783 return (error);
4787 * ifnet interfaces
4790 static void
4791 iwm_init(struct iwm_softc *sc)
4793 int error;
4795 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4796 return;
4798 sc->sc_generation++;
4799 sc->sc_flags &= ~IWM_FLAG_STOPPED;
4801 if ((error = iwm_init_hw(sc)) != 0) {
4802 kprintf("iwm_init_hw failed %d\n", error);
4803 iwm_stop(sc);
4804 return;
4808 * Ok, firmware loaded and we are jogging
4810 sc->sc_flags |= IWM_FLAG_HW_INITED;
4811 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4814 static int
4815 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4817 struct iwm_softc *sc;
4818 int error;
4820 sc = ic->ic_softc;
4822 IWM_LOCK(sc);
4823 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4824 IWM_UNLOCK(sc);
4825 return (ENXIO);
4827 error = mbufq_enqueue(&sc->sc_snd, m);
4828 if (error) {
4829 IWM_UNLOCK(sc);
4830 return (error);
4832 iwm_start(sc);
4833 IWM_UNLOCK(sc);
4834 return (0);
4838 * Dequeue packets from sendq and call send.
4840 static void
4841 iwm_start(struct iwm_softc *sc)
4843 struct ieee80211_node *ni;
4844 struct mbuf *m;
4845 int ac = 0;
4847 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4848 while (sc->qfullmsk == 0 &&
4849 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4850 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4851 if (iwm_tx(sc, m, ni, ac) != 0) {
4852 if_inc_counter(ni->ni_vap->iv_ifp,
4853 IFCOUNTER_OERRORS, 1);
4854 ieee80211_free_node(ni);
4855 continue;
4857 sc->sc_tx_timer = 15;
4859 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4862 static void
4863 iwm_stop(struct iwm_softc *sc)
4866 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4867 sc->sc_flags |= IWM_FLAG_STOPPED;
4868 sc->sc_generation++;
4869 iwm_led_blink_stop(sc);
4870 sc->sc_tx_timer = 0;
4871 iwm_stop_device(sc);
4872 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4875 static void
4876 iwm_watchdog(void *arg)
4878 struct iwm_softc *sc = arg;
4880 if (sc->sc_tx_timer > 0) {
4881 if (--sc->sc_tx_timer == 0) {
4882 device_printf(sc->sc_dev, "device timeout\n");
4883 #ifdef IWM_DEBUG
4884 iwm_nic_error(sc);
4885 #endif
4886 iwm_stop(sc);
4887 #if defined(__DragonFly__)
4888 ++sc->sc_ic.ic_oerrors;
4889 #else
4890 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4891 #endif
4892 return;
4895 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4898 static void
4899 iwm_parent(struct ieee80211com *ic)
4901 struct iwm_softc *sc = ic->ic_softc;
4902 int startall = 0;
4904 IWM_LOCK(sc);
4905 if (ic->ic_nrunning > 0) {
4906 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4907 iwm_init(sc);
4908 startall = 1;
4910 } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4911 iwm_stop(sc);
4912 IWM_UNLOCK(sc);
4913 if (startall)
4914 ieee80211_start_all(ic);
4918 * The interrupt side of things
4922 * error dumping routines are from iwlwifi/mvm/utils.c
4926 * Note: This structure is read from the device with IO accesses,
4927 * and the reading already does the endian conversion. As it is
4928 * read with uint32_t-sized accesses, any members with a different size
4929 * need to be ordered correctly though!
4931 struct iwm_error_event_table {
4932 uint32_t valid; /* (nonzero) valid, (0) log is empty */
4933 uint32_t error_id; /* type of error */
4934 uint32_t trm_hw_status0; /* TRM HW status */
4935 uint32_t trm_hw_status1; /* TRM HW status */
4936 uint32_t blink2; /* branch link */
4937 uint32_t ilink1; /* interrupt link */
4938 uint32_t ilink2; /* interrupt link */
4939 uint32_t data1; /* error-specific data */
4940 uint32_t data2; /* error-specific data */
4941 uint32_t data3; /* error-specific data */
4942 uint32_t bcon_time; /* beacon timer */
4943 uint32_t tsf_low; /* network timestamp function timer */
4944 uint32_t tsf_hi; /* network timestamp function timer */
4945 uint32_t gp1; /* GP1 timer register */
4946 uint32_t gp2; /* GP2 timer register */
4947 uint32_t fw_rev_type; /* firmware revision type */
4948 uint32_t major; /* uCode version major */
4949 uint32_t minor; /* uCode version minor */
4950 uint32_t hw_ver; /* HW Silicon version */
4951 uint32_t brd_ver; /* HW board version */
4952 uint32_t log_pc; /* log program counter */
4953 uint32_t frame_ptr; /* frame pointer */
4954 uint32_t stack_ptr; /* stack pointer */
4955 uint32_t hcmd; /* last host command header */
4956 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
4957 * rxtx_flag */
4958 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
4959 * host_flag */
4960 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
4961 * enc_flag */
4962 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
4963 * time_flag */
4964 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
4965 * wico interrupt */
4966 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
4967 uint32_t wait_event; /* wait event() caller address */
4968 uint32_t l2p_control; /* L2pControlField */
4969 uint32_t l2p_duration; /* L2pDurationField */
4970 uint32_t l2p_mhvalid; /* L2pMhValidBits */
4971 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
4972 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
4973 * (LMPM_PMG_SEL) */
4974 uint32_t u_timestamp; /* indicate when the date and time of the
4975 * compilation */
4976 uint32_t flow_handler; /* FH read/write pointers, RX credit */
4977 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4980 * UMAC error struct - relevant starting from family 8000 chip.
4981 * Note: This structure is read from the device with IO accesses,
4982 * and the reading already does the endian conversion. As it is
4983 * read with u32-sized accesses, any members with a different size
4984 * need to be ordered correctly though!
4986 struct iwm_umac_error_event_table {
4987 uint32_t valid; /* (nonzero) valid, (0) log is empty */
4988 uint32_t error_id; /* type of error */
4989 uint32_t blink1; /* branch link */
4990 uint32_t blink2; /* branch link */
4991 uint32_t ilink1; /* interrupt link */
4992 uint32_t ilink2; /* interrupt link */
4993 uint32_t data1; /* error-specific data */
4994 uint32_t data2; /* error-specific data */
4995 uint32_t data3; /* error-specific data */
4996 uint32_t umac_major;
4997 uint32_t umac_minor;
4998 uint32_t frame_pointer; /* core register 27*/
4999 uint32_t stack_pointer; /* core register 28 */
5000 uint32_t cmd_header; /* latest host cmd sent to UMAC */
5001 uint32_t nic_isr_pref; /* ISR status register */
5002 } __packed;
5004 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
5005 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
5007 #ifdef IWM_DEBUG
5008 struct {
5009 const char *name;
5010 uint8_t num;
5011 } advanced_lookup[] = {
5012 { "NMI_INTERRUPT_WDG", 0x34 },
5013 { "SYSASSERT", 0x35 },
5014 { "UCODE_VERSION_MISMATCH", 0x37 },
5015 { "BAD_COMMAND", 0x38 },
5016 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5017 { "FATAL_ERROR", 0x3D },
5018 { "NMI_TRM_HW_ERR", 0x46 },
5019 { "NMI_INTERRUPT_TRM", 0x4C },
5020 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5021 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5022 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5023 { "NMI_INTERRUPT_HOST", 0x66 },
5024 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5025 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5026 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5027 { "ADVANCED_SYSASSERT", 0 },
5030 static const char *
5031 iwm_desc_lookup(uint32_t num)
5033 int i;
5035 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5036 if (advanced_lookup[i].num == num)
5037 return advanced_lookup[i].name;
5039 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5040 return advanced_lookup[i].name;
5043 static void
5044 iwm_nic_umac_error(struct iwm_softc *sc)
5046 struct iwm_umac_error_event_table table;
5047 uint32_t base;
5049 base = sc->umac_error_event_table;
5051 if (base < 0x800000) {
5052 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5053 base);
5054 return;
5057 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5058 device_printf(sc->sc_dev, "reading errlog failed\n");
5059 return;
5062 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5063 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5064 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5065 sc->sc_flags, table.valid);
5068 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5069 iwm_desc_lookup(table.error_id));
5070 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5071 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5072 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5073 table.ilink1);
5074 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5075 table.ilink2);
5076 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5077 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5078 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5079 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5080 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5081 device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5082 table.frame_pointer);
5083 device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5084 table.stack_pointer);
5085 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5086 device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5087 table.nic_isr_pref);
5091 * Support for dumping the error log seemed like a good idea ...
5092 * but it's mostly hex junk and the only sensible thing is the
5093 * hw/ucode revision (which we know anyway). Since it's here,
5094 * I'll just leave it in, just in case e.g. the Intel guys want to
5095 * help us decipher some "ADVANCED_SYSASSERT" later.
5097 static void
5098 iwm_nic_error(struct iwm_softc *sc)
5100 struct iwm_error_event_table table;
5101 uint32_t base;
5103 device_printf(sc->sc_dev, "dumping device error log\n");
5104 base = sc->error_event_table;
5105 if (base < 0x800000) {
5106 device_printf(sc->sc_dev,
5107 "Invalid error log pointer 0x%08x\n", base);
5108 return;
5111 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5112 device_printf(sc->sc_dev, "reading errlog failed\n");
5113 return;
5116 if (!table.valid) {
5117 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5118 return;
5121 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5122 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5123 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5124 sc->sc_flags, table.valid);
5127 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5128 iwm_desc_lookup(table.error_id));
5129 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5130 table.trm_hw_status0);
5131 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5132 table.trm_hw_status1);
5133 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5134 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5135 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5136 device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5137 device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5138 device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5139 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5140 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5141 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5142 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5143 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5144 device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5145 table.fw_rev_type);
5146 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5147 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5148 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5149 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5150 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5151 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5152 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5153 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5154 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5155 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5156 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5157 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5158 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5159 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5160 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5161 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5162 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5163 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5164 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5166 if (sc->umac_error_event_table)
5167 iwm_nic_umac_error(sc);
5169 #endif
5171 static void
5172 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5174 struct ieee80211com *ic = &sc->sc_ic;
5175 struct iwm_cmd_response *cresp;
5176 struct mbuf *m1;
5177 uint32_t offset = 0;
5178 uint32_t maxoff = IWM_RBUF_SIZE;
5179 uint32_t nextoff;
5180 boolean_t stolen = FALSE;
5182 #define HAVEROOM(a) \
5183 ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5185 while (HAVEROOM(offset)) {
5186 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5187 offset);
5188 int qid, idx, code, len;
5190 qid = pkt->hdr.qid;
5191 idx = pkt->hdr.idx;
5193 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5196 * randomly get these from the firmware, no idea why.
5197 * they at least seem harmless, so just ignore them for now
5199 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5200 pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5201 break;
5204 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5205 "rx packet qid=%d idx=%d type=%x\n",
5206 qid & ~0x80, pkt->hdr.idx, code);
5208 len = iwm_rx_packet_len(pkt);
5209 len += sizeof(uint32_t); /* account for status word */
5210 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5212 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5214 switch (code) {
5215 case IWM_REPLY_RX_PHY_CMD:
5216 iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5217 break;
5219 case IWM_REPLY_RX_MPDU_CMD: {
5221 * If this is the last frame in the RX buffer, we
5222 * can directly feed the mbuf to the sharks here.
5224 struct iwm_rx_packet *nextpkt = mtodoff(m,
5225 struct iwm_rx_packet *, nextoff);
5226 if (!HAVEROOM(nextoff) ||
5227 (nextpkt->hdr.code == 0 &&
5228 (nextpkt->hdr.qid & ~0x80) == 0 &&
5229 nextpkt->hdr.idx == 0) ||
5230 (nextpkt->len_n_flags ==
5231 htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5232 if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5233 stolen = FALSE;
5234 /* Make sure we abort the loop */
5235 nextoff = maxoff;
5237 break;
5241 * Use m_copym instead of m_split, because that
5242 * makes it easier to keep a valid rx buffer in
5243 * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5245 * We need to start m_copym() at offset 0, to get the
5246 * M_PKTHDR flag preserved.
5248 m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5249 if (m1) {
5250 if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5251 stolen = TRUE;
5252 else
5253 m_freem(m1);
5255 break;
5258 case IWM_TX_CMD:
5259 iwm_mvm_rx_tx_cmd(sc, pkt);
5260 break;
5262 case IWM_MISSED_BEACONS_NOTIFICATION: {
5263 struct iwm_missed_beacons_notif *resp;
5264 int missed;
5266 /* XXX look at mac_id to determine interface ID */
5267 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5269 resp = (void *)pkt->data;
5270 missed = le32toh(resp->consec_missed_beacons);
5272 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5273 "%s: MISSED_BEACON: mac_id=%d, "
5274 "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5275 "num_rx=%d\n",
5276 __func__,
5277 le32toh(resp->mac_id),
5278 le32toh(resp->consec_missed_beacons_since_last_rx),
5279 le32toh(resp->consec_missed_beacons),
5280 le32toh(resp->num_expected_beacons),
5281 le32toh(resp->num_recvd_beacons));
5283 /* Be paranoid */
5284 if (vap == NULL)
5285 break;
5287 /* XXX no net80211 locking? */
5288 if (vap->iv_state == IEEE80211_S_RUN &&
5289 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5290 if (missed > vap->iv_bmissthreshold) {
5291 /* XXX bad locking; turn into task */
5292 IWM_UNLOCK(sc);
5293 ieee80211_beacon_miss(ic);
5294 IWM_LOCK(sc);
5298 break; }
5300 case IWM_MFUART_LOAD_NOTIFICATION:
5301 break;
5303 case IWM_MVM_ALIVE:
5304 break;
5306 case IWM_CALIB_RES_NOTIF_PHY_DB:
5307 break;
5309 case IWM_STATISTICS_NOTIFICATION:
5310 iwm_mvm_handle_rx_statistics(sc, pkt);
5311 break;
5313 case IWM_NVM_ACCESS_CMD:
5314 case IWM_MCC_UPDATE_CMD:
5315 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5316 memcpy(sc->sc_cmd_resp,
5317 pkt, sizeof(sc->sc_cmd_resp));
5319 break;
5321 case IWM_MCC_CHUB_UPDATE_CMD: {
5322 struct iwm_mcc_chub_notif *notif;
5323 notif = (void *)pkt->data;
5325 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5326 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5327 sc->sc_fw_mcc[2] = '\0';
5328 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5329 "fw source %d sent CC '%s'\n",
5330 notif->source_id, sc->sc_fw_mcc);
5331 break;
5334 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5335 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5336 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5337 struct iwm_dts_measurement_notif_v1 *notif;
5339 if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5340 device_printf(sc->sc_dev,
5341 "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5342 break;
5344 notif = (void *)pkt->data;
5345 IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5346 "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5347 notif->temp);
5348 break;
5351 case IWM_PHY_CONFIGURATION_CMD:
5352 case IWM_TX_ANT_CONFIGURATION_CMD:
5353 case IWM_ADD_STA:
5354 case IWM_MAC_CONTEXT_CMD:
5355 case IWM_REPLY_SF_CFG_CMD:
5356 case IWM_POWER_TABLE_CMD:
5357 case IWM_PHY_CONTEXT_CMD:
5358 case IWM_BINDING_CONTEXT_CMD:
5359 case IWM_TIME_EVENT_CMD:
5360 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5361 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5362 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5363 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5364 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5365 case IWM_REPLY_BEACON_FILTERING_CMD:
5366 case IWM_MAC_PM_POWER_TABLE:
5367 case IWM_TIME_QUOTA_CMD:
5368 case IWM_REMOVE_STA:
5369 case IWM_TXPATH_FLUSH:
5370 case IWM_LQ_CMD:
5371 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5372 IWM_FW_PAGING_BLOCK_CMD):
5373 case IWM_BT_CONFIG:
5374 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5375 cresp = (void *)pkt->data;
5376 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5377 memcpy(sc->sc_cmd_resp,
5378 pkt, sizeof(*pkt)+sizeof(*cresp));
5380 break;
5382 /* ignore */
5383 case IWM_PHY_DB_CMD:
5384 break;
5386 case IWM_INIT_COMPLETE_NOTIF:
5387 break;
5389 case IWM_SCAN_OFFLOAD_COMPLETE:
5390 iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5391 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5392 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5393 ieee80211_runtask(ic, &sc->sc_es_task);
5395 break;
5397 case IWM_SCAN_ITERATION_COMPLETE: {
5398 struct iwm_lmac_scan_complete_notif *notif;
5399 notif = (void *)pkt->data;
5400 break;
5403 case IWM_SCAN_COMPLETE_UMAC:
5404 iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5405 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5406 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5407 ieee80211_runtask(ic, &sc->sc_es_task);
5409 break;
5411 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5412 struct iwm_umac_scan_iter_complete_notif *notif;
5413 notif = (void *)pkt->data;
5415 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5416 "complete, status=0x%x, %d channels scanned\n",
5417 notif->status, notif->scanned_channels);
5418 break;
5421 case IWM_REPLY_ERROR: {
5422 struct iwm_error_resp *resp;
5423 resp = (void *)pkt->data;
5425 device_printf(sc->sc_dev,
5426 "firmware error 0x%x, cmd 0x%x\n",
5427 le32toh(resp->error_type),
5428 resp->cmd_id);
5429 break;
5432 case IWM_TIME_EVENT_NOTIFICATION: {
5433 struct iwm_time_event_notif *notif;
5434 notif = (void *)pkt->data;
5436 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5437 "TE notif status = 0x%x action = 0x%x\n",
5438 notif->status, notif->action);
5439 break;
5443 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5444 * messages. Just ignore them for now.
5446 case IWM_DEBUG_LOG_MSG:
5447 break;
5449 case IWM_MCAST_FILTER_CMD:
5450 break;
5452 case IWM_SCD_QUEUE_CFG: {
5453 struct iwm_scd_txq_cfg_rsp *rsp;
5454 rsp = (void *)pkt->data;
5456 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5457 "queue cfg token=0x%x sta_id=%d "
5458 "tid=%d scd_queue=%d\n",
5459 rsp->token, rsp->sta_id, rsp->tid,
5460 rsp->scd_queue);
5461 break;
5464 default:
5465 device_printf(sc->sc_dev,
5466 "frame %d/%d %x UNHANDLED (this should "
5467 "not happen)\n", qid & ~0x80, idx,
5468 pkt->len_n_flags);
5469 break;
5473 * Why test bit 0x80? The Linux driver:
5475 * There is one exception: uCode sets bit 15 when it
5476 * originates the response/notification, i.e. when the
5477 * response/notification is not a direct response to a
5478 * command sent by the driver. For example, uCode issues
5479 * IWM_REPLY_RX when it sends a received frame to the driver;
5480 * it is not a direct response to any driver command.
5482 * Ok, so since when is 7 == 15? Well, the Linux driver
5483 * uses a slightly different format for pkt->hdr, and "qid"
5484 * is actually the upper byte of a two-byte field.
5486 if (!(qid & (1 << 7)))
5487 iwm_cmd_done(sc, pkt);
5489 offset = nextoff;
5491 if (stolen)
5492 m_freem(m);
5493 #undef HAVEROOM
5497 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5498 * Basic structure from if_iwn
5500 static void
5501 iwm_notif_intr(struct iwm_softc *sc)
5503 uint16_t hw;
5505 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5506 BUS_DMASYNC_POSTREAD);
5508 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5511 * Process responses
5513 while (sc->rxq.cur != hw) {
5514 struct iwm_rx_ring *ring = &sc->rxq;
5515 struct iwm_rx_data *data = &ring->data[ring->cur];
5517 bus_dmamap_sync(ring->data_dmat, data->map,
5518 BUS_DMASYNC_POSTREAD);
5520 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5521 "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5522 iwm_handle_rxb(sc, data->m);
5524 ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5528 * Tell the firmware that it can reuse the ring entries that
5529 * we have just processed.
5530 * Seems like the hardware gets upset unless we align
5531 * the write by 8??
5533 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5534 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5537 static void
5538 iwm_intr(void *arg)
5540 struct iwm_softc *sc = arg;
5541 int handled = 0;
5542 int r1, r2, rv = 0;
5543 int isperiodic = 0;
5545 #if defined(__DragonFly__)
5546 if (sc->sc_mem == NULL) {
5547 kprintf("iwm_intr: detached\n");
5548 return;
5550 #endif
5551 IWM_LOCK(sc);
5552 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5554 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5555 uint32_t *ict = sc->ict_dma.vaddr;
5556 int tmp;
5558 tmp = htole32(ict[sc->ict_cur]);
5559 if (!tmp)
5560 goto out_ena;
5563 * ok, there was something. keep plowing until we have all.
5565 r1 = r2 = 0;
5566 while (tmp) {
5567 r1 |= tmp;
5568 ict[sc->ict_cur] = 0;
5569 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5570 tmp = htole32(ict[sc->ict_cur]);
5573 /* this is where the fun begins. don't ask */
5574 if (r1 == 0xffffffff)
5575 r1 = 0;
5577 /* i am not expected to understand this */
5578 if (r1 & 0xc0000)
5579 r1 |= 0x8000;
5580 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5581 } else {
5582 r1 = IWM_READ(sc, IWM_CSR_INT);
5583 /* "hardware gone" (where, fishing?) */
5584 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5585 goto out;
5586 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5588 if (r1 == 0 && r2 == 0) {
5589 goto out_ena;
5592 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5594 /* Safely ignore these bits for debug checks below */
5595 r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5597 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5598 int i;
5599 struct ieee80211com *ic = &sc->sc_ic;
5600 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5602 #ifdef IWM_DEBUG
5603 iwm_nic_error(sc);
5604 #endif
5605 /* Dump driver status (TX and RX rings) while we're here. */
5606 device_printf(sc->sc_dev, "driver status:\n");
5607 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5608 struct iwm_tx_ring *ring = &sc->txq[i];
5609 device_printf(sc->sc_dev,
5610 " tx ring %2d: qid=%-2d cur=%-3d "
5611 "queued=%-3d\n",
5612 i, ring->qid, ring->cur, ring->queued);
5614 device_printf(sc->sc_dev,
5615 " rx ring: cur=%d\n", sc->rxq.cur);
5616 device_printf(sc->sc_dev,
5617 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5619 /* Don't stop the device; just do a VAP restart */
5620 IWM_UNLOCK(sc);
5622 if (vap == NULL) {
5623 kprintf("%s: null vap\n", __func__);
5624 return;
5627 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5628 "restarting\n", __func__, vap->iv_state);
5630 ieee80211_restart_all(ic);
5631 return;
5634 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5635 handled |= IWM_CSR_INT_BIT_HW_ERR;
5636 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5637 iwm_stop(sc);
5638 rv = 1;
5639 goto out;
5642 /* firmware chunk loaded */
5643 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5644 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5645 handled |= IWM_CSR_INT_BIT_FH_TX;
5646 sc->sc_fw_chunk_done = 1;
5647 wakeup(&sc->sc_fw);
5650 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5651 handled |= IWM_CSR_INT_BIT_RF_KILL;
5652 if (iwm_check_rfkill(sc)) {
5653 device_printf(sc->sc_dev,
5654 "%s: rfkill switch, disabling interface\n",
5655 __func__);
5656 iwm_stop(sc);
5661 * The Linux driver uses periodic interrupts to avoid races.
5662 * We cargo-cult like it's going out of fashion.
5664 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5665 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5666 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5667 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5668 IWM_WRITE_1(sc,
5669 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5670 isperiodic = 1;
5673 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5674 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5675 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5677 iwm_notif_intr(sc);
5679 /* enable periodic interrupt, see above */
5680 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5681 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5682 IWM_CSR_INT_PERIODIC_ENA);
5685 if (__predict_false(r1 & ~handled))
5686 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5687 "%s: unhandled interrupts: %x\n", __func__, r1);
5688 rv = 1;
5690 out_ena:
5691 iwm_restore_interrupts(sc);
5692 out:
5693 IWM_UNLOCK(sc);
5694 return;
5698 * Autoconf glue-sniffing
5700 #define PCI_VENDOR_INTEL 0x8086
5701 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
5702 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
5703 #define PCI_PRODUCT_INTEL_WL_3165_1 0x3165
5704 #define PCI_PRODUCT_INTEL_WL_3165_2 0x3166
5705 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
5706 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
5707 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
5708 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
5709 #define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3
5710 #define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4
5712 static const struct iwm_devices {
5713 uint16_t device;
5714 const struct iwm_cfg *cfg;
5715 } iwm_devices[] = {
5716 { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5717 { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5718 { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5719 { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5720 { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5721 { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5722 { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5723 { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5724 { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5725 { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5728 static int
5729 iwm_probe(device_t dev)
5731 int i;
5733 for (i = 0; i < nitems(iwm_devices); i++) {
5734 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5735 pci_get_device(dev) == iwm_devices[i].device) {
5736 device_set_desc(dev, iwm_devices[i].cfg->name);
5737 return (BUS_PROBE_DEFAULT);
5741 return (ENXIO);
5744 static int
5745 iwm_dev_check(device_t dev)
5747 struct iwm_softc *sc;
5748 uint16_t devid;
5749 int i;
5751 sc = device_get_softc(dev);
5753 devid = pci_get_device(dev);
5754 for (i = 0; i < NELEM(iwm_devices); i++) {
5755 if (iwm_devices[i].device == devid) {
5756 sc->cfg = iwm_devices[i].cfg;
5757 return (0);
5760 device_printf(dev, "unknown adapter type\n");
5761 return ENXIO;
5764 /* PCI registers */
5765 #define PCI_CFG_RETRY_TIMEOUT 0x041
5767 static int
5768 iwm_pci_attach(device_t dev)
5770 struct iwm_softc *sc;
5771 int count, error, rid;
5772 uint16_t reg;
5773 #if defined(__DragonFly__)
5774 int irq_flags;
5775 #endif
5777 sc = device_get_softc(dev);
5779 /* We disable the RETRY_TIMEOUT register (0x41) to keep
5780 * PCI Tx retries from interfering with C3 CPU state */
5781 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5783 /* Enable bus-mastering and hardware bug workaround. */
5784 pci_enable_busmaster(dev);
5785 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5786 /* if !MSI */
5787 if (reg & PCIM_STATUS_INTxSTATE) {
5788 reg &= ~PCIM_STATUS_INTxSTATE;
5790 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5792 rid = PCIR_BAR(0);
5793 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5794 RF_ACTIVE);
5795 if (sc->sc_mem == NULL) {
5796 device_printf(sc->sc_dev, "can't map mem space\n");
5797 return (ENXIO);
5799 sc->sc_st = rman_get_bustag(sc->sc_mem);
5800 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5802 /* Install interrupt handler. */
5803 count = 1;
5804 rid = 0;
5805 #if defined(__DragonFly__)
5806 pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5807 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5808 #else
5809 if (pci_alloc_msi(dev, &count) == 0)
5810 rid = 1;
5811 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5812 (rid != 0 ? 0 : RF_SHAREABLE));
5813 #endif
5814 if (sc->sc_irq == NULL) {
5815 device_printf(dev, "can't map interrupt\n");
5816 return (ENXIO);
5818 #if defined(__DragonFly__)
5819 error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5820 iwm_intr, sc, &sc->sc_ih,
5821 &wlan_global_serializer);
5822 #else
5823 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5824 NULL, iwm_intr, sc, &sc->sc_ih);
5825 #endif
5826 if (sc->sc_ih == NULL) {
5827 device_printf(dev, "can't establish interrupt");
5828 #if defined(__DragonFly__)
5829 pci_release_msi(dev);
5830 #endif
5831 return (ENXIO);
5833 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5835 return (0);
5838 static void
5839 iwm_pci_detach(device_t dev)
5841 struct iwm_softc *sc = device_get_softc(dev);
5843 if (sc->sc_irq != NULL) {
5844 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5845 bus_release_resource(dev, SYS_RES_IRQ,
5846 rman_get_rid(sc->sc_irq), sc->sc_irq);
5847 pci_release_msi(dev);
5848 #if defined(__DragonFly__)
5849 sc->sc_irq = NULL;
5850 #endif
5852 if (sc->sc_mem != NULL) {
5853 bus_release_resource(dev, SYS_RES_MEMORY,
5854 rman_get_rid(sc->sc_mem), sc->sc_mem);
5855 #if defined(__DragonFly__)
5856 sc->sc_mem = NULL;
5857 #endif
5863 static int
5864 iwm_attach(device_t dev)
5866 struct iwm_softc *sc = device_get_softc(dev);
5867 struct ieee80211com *ic = &sc->sc_ic;
5868 int error;
5869 int txq_i, i;
5871 sc->sc_dev = dev;
5872 sc->sc_attached = 1;
5873 IWM_LOCK_INIT(sc);
5874 mbufq_init(&sc->sc_snd, ifqmaxlen);
5875 #if defined(__DragonFly__)
5876 callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5877 #else
5878 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5879 #endif
5880 callout_init(&sc->sc_led_blink_to);
5881 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5883 sc->sc_notif_wait = iwm_notification_wait_init(sc);
5884 if (sc->sc_notif_wait == NULL) {
5885 device_printf(dev, "failed to init notification wait struct\n");
5886 goto fail;
5889 sc->sf_state = IWM_SF_UNINIT;
5891 /* Init phy db */
5892 sc->sc_phy_db = iwm_phy_db_init(sc);
5893 if (!sc->sc_phy_db) {
5894 device_printf(dev, "Cannot init phy_db\n");
5895 goto fail;
5898 /* Set EBS as successful as long as not stated otherwise by the FW. */
5899 sc->last_ebs_successful = TRUE;
5901 /* PCI attach */
5902 error = iwm_pci_attach(dev);
5903 if (error != 0)
5904 goto fail;
5906 sc->sc_wantresp = -1;
5908 /* Check device type */
5909 error = iwm_dev_check(dev);
5910 if (error != 0)
5911 goto fail;
5913 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5915 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5916 * changed, and now the revision step also includes bit 0-1 (no more
5917 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5918 * in the old format.
5920 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
5921 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5922 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5924 if (iwm_prepare_card_hw(sc) != 0) {
5925 device_printf(dev, "could not initialize hardware\n");
5926 goto fail;
5929 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5930 int ret;
5931 uint32_t hw_step;
5934 * In order to recognize C step the driver should read the
5935 * chip version id located at the AUX bus MISC address.
5937 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5938 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5939 DELAY(2);
5941 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5942 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5943 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5944 25000);
5945 if (!ret) {
5946 device_printf(sc->sc_dev,
5947 "Failed to wake up the nic\n");
5948 goto fail;
5951 if (iwm_nic_lock(sc)) {
5952 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5953 hw_step |= IWM_ENABLE_WFPM;
5954 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5955 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5956 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5957 if (hw_step == 0x3)
5958 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5959 (IWM_SILICON_C_STEP << 2);
5960 iwm_nic_unlock(sc);
5961 } else {
5962 device_printf(sc->sc_dev, "Failed to lock the nic\n");
5963 goto fail;
5967 /* special-case 7265D, it has the same PCI IDs. */
5968 if (sc->cfg == &iwm7265_cfg &&
5969 (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5970 sc->cfg = &iwm7265d_cfg;
5973 /* Allocate DMA memory for firmware transfers. */
5974 if ((error = iwm_alloc_fwmem(sc)) != 0) {
5975 device_printf(dev, "could not allocate memory for firmware\n");
5976 goto fail;
5979 /* Allocate "Keep Warm" page. */
5980 if ((error = iwm_alloc_kw(sc)) != 0) {
5981 device_printf(dev, "could not allocate keep warm page\n");
5982 goto fail;
5985 /* We use ICT interrupts */
5986 if ((error = iwm_alloc_ict(sc)) != 0) {
5987 device_printf(dev, "could not allocate ICT table\n");
5988 goto fail;
5991 /* Allocate TX scheduler "rings". */
5992 if ((error = iwm_alloc_sched(sc)) != 0) {
5993 device_printf(dev, "could not allocate TX scheduler rings\n");
5994 goto fail;
5997 /* Allocate TX rings */
5998 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5999 if ((error = iwm_alloc_tx_ring(sc,
6000 &sc->txq[txq_i], txq_i)) != 0) {
6001 device_printf(dev,
6002 "could not allocate TX ring %d\n",
6003 txq_i);
6004 goto fail;
6008 /* Allocate RX ring. */
6009 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6010 device_printf(dev, "could not allocate RX ring\n");
6011 goto fail;
6014 /* Clear pending interrupts. */
6015 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6017 ic->ic_softc = sc;
6018 ic->ic_name = device_get_nameunit(sc->sc_dev);
6019 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
6020 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
6022 /* Set device capabilities. */
6023 ic->ic_caps =
6024 IEEE80211_C_STA |
6025 IEEE80211_C_WPA | /* WPA/RSN */
6026 IEEE80211_C_WME |
6027 IEEE80211_C_PMGT |
6028 IEEE80211_C_SHSLOT | /* short slot time supported */
6029 IEEE80211_C_SHPREAMBLE /* short preamble supported */
6030 // IEEE80211_C_BGSCAN /* capable of bg scanning */
6032 for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6033 sc->sc_phyctxt[i].id = i;
6034 sc->sc_phyctxt[i].color = 0;
6035 sc->sc_phyctxt[i].ref = 0;
6036 sc->sc_phyctxt[i].channel = NULL;
6039 /* Default noise floor */
6040 sc->sc_noise = -96;
6042 /* Max RSSI */
6043 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6045 sc->sc_preinit_hook.ich_func = iwm_preinit;
6046 sc->sc_preinit_hook.ich_arg = sc;
6047 sc->sc_preinit_hook.ich_desc = "iwm";
6048 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6049 device_printf(dev, "config_intrhook_establish failed\n");
6050 goto fail;
6053 #ifdef IWM_DEBUG
6054 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6055 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6056 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6057 #endif
6059 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6060 "<-%s\n", __func__);
6062 return 0;
6064 /* Free allocated memory if something failed during attachment. */
6065 fail:
6066 iwm_detach_local(sc, 0);
6068 return ENXIO;
6071 static int
6072 iwm_is_valid_ether_addr(uint8_t *addr)
6074 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6076 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6077 return (FALSE);
6079 return (TRUE);
6082 static int
6083 iwm_wme_update(struct ieee80211com *ic)
6085 #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
6086 struct iwm_softc *sc = ic->ic_softc;
6087 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6088 struct iwm_vap *ivp = IWM_VAP(vap);
6089 struct iwm_node *in;
6090 struct wmeParams tmp[WME_NUM_AC];
6091 int aci, error;
6093 if (vap == NULL)
6094 return (0);
6096 IEEE80211_LOCK(ic);
6097 for (aci = 0; aci < WME_NUM_AC; aci++)
6098 tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6099 IEEE80211_UNLOCK(ic);
6101 IWM_LOCK(sc);
6102 for (aci = 0; aci < WME_NUM_AC; aci++) {
6103 const struct wmeParams *ac = &tmp[aci];
6104 ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6105 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6106 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6107 ivp->queue_params[aci].edca_txop =
6108 IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6110 ivp->have_wme = TRUE;
6111 if (ivp->is_uploaded && vap->iv_bss != NULL) {
6112 in = IWM_NODE(vap->iv_bss);
6113 if (in->in_assoc) {
6114 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6115 device_printf(sc->sc_dev,
6116 "%s: failed to update MAC\n", __func__);
6120 IWM_UNLOCK(sc);
6122 return (0);
6123 #undef IWM_EXP2
6126 static void
6127 iwm_preinit(void *arg)
6129 struct iwm_softc *sc = arg;
6130 device_t dev = sc->sc_dev;
6131 struct ieee80211com *ic = &sc->sc_ic;
6132 int error;
6134 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6135 "->%s\n", __func__);
6137 IWM_LOCK(sc);
6138 if ((error = iwm_start_hw(sc)) != 0) {
6139 device_printf(dev, "could not initialize hardware\n");
6140 IWM_UNLOCK(sc);
6141 goto fail;
6144 error = iwm_run_init_mvm_ucode(sc, 1);
6145 iwm_stop_device(sc);
6146 if (error) {
6147 IWM_UNLOCK(sc);
6148 goto fail;
6150 device_printf(dev,
6151 "hw rev 0x%x, fw ver %s, address %s\n",
6152 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6153 sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6155 /* not all hardware can do 5GHz band */
6156 if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6157 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6158 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6159 IWM_UNLOCK(sc);
6161 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6162 ic->ic_channels);
6165 * At this point we've committed - if we fail to do setup,
6166 * we now also have to tear down the net80211 state.
6168 ieee80211_ifattach(ic);
6169 ic->ic_vap_create = iwm_vap_create;
6170 ic->ic_vap_delete = iwm_vap_delete;
6171 ic->ic_raw_xmit = iwm_raw_xmit;
6172 ic->ic_node_alloc = iwm_node_alloc;
6173 ic->ic_scan_start = iwm_scan_start;
6174 ic->ic_scan_end = iwm_scan_end;
6175 ic->ic_update_mcast = iwm_update_mcast;
6176 ic->ic_getradiocaps = iwm_init_channel_map;
6177 ic->ic_set_channel = iwm_set_channel;
6178 ic->ic_scan_curchan = iwm_scan_curchan;
6179 ic->ic_scan_mindwell = iwm_scan_mindwell;
6180 ic->ic_wme.wme_update = iwm_wme_update;
6181 ic->ic_parent = iwm_parent;
6182 ic->ic_transmit = iwm_transmit;
6183 iwm_radiotap_attach(sc);
6184 if (bootverbose)
6185 ieee80211_announce(ic);
6187 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6188 "<-%s\n", __func__);
6189 config_intrhook_disestablish(&sc->sc_preinit_hook);
6191 return;
6192 fail:
6193 config_intrhook_disestablish(&sc->sc_preinit_hook);
6194 iwm_detach_local(sc, 0);
6198 * Attach the interface to 802.11 radiotap.
6200 static void
6201 iwm_radiotap_attach(struct iwm_softc *sc)
6203 struct ieee80211com *ic = &sc->sc_ic;
6205 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6206 "->%s begin\n", __func__);
6207 ieee80211_radiotap_attach(ic,
6208 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6209 IWM_TX_RADIOTAP_PRESENT,
6210 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6211 IWM_RX_RADIOTAP_PRESENT);
6212 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6213 "->%s end\n", __func__);
6216 static struct ieee80211vap *
6217 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6218 enum ieee80211_opmode opmode, int flags,
6219 const uint8_t bssid[IEEE80211_ADDR_LEN],
6220 const uint8_t mac[IEEE80211_ADDR_LEN])
6222 struct iwm_vap *ivp;
6223 struct ieee80211vap *vap;
6225 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
6226 return NULL;
6227 ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6228 vap = &ivp->iv_vap;
6229 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6230 vap->iv_bmissthreshold = 10; /* override default */
6231 /* Override with driver methods. */
6232 ivp->iv_newstate = vap->iv_newstate;
6233 vap->iv_newstate = iwm_newstate;
6235 ivp->id = IWM_DEFAULT_MACID;
6236 ivp->color = IWM_DEFAULT_COLOR;
6238 ivp->have_wme = FALSE;
6239 ivp->ps_disabled = FALSE;
6241 ieee80211_ratectl_init(vap);
6242 /* Complete setup. */
6243 ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6244 mac);
6245 ic->ic_opmode = opmode;
6247 return vap;
6250 static void
6251 iwm_vap_delete(struct ieee80211vap *vap)
6253 struct iwm_vap *ivp = IWM_VAP(vap);
6255 ieee80211_ratectl_deinit(vap);
6256 ieee80211_vap_detach(vap);
6257 kfree(ivp, M_80211_VAP);
6260 static void
6261 iwm_xmit_queue_drain(struct iwm_softc *sc)
6263 struct mbuf *m;
6264 struct ieee80211_node *ni;
6266 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6267 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6268 ieee80211_free_node(ni);
6269 m_freem(m);
6273 static void
6274 iwm_scan_start(struct ieee80211com *ic)
6276 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6277 struct iwm_softc *sc = ic->ic_softc;
6278 int error;
6280 IWM_LOCK(sc);
6281 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6282 /* This should not be possible */
6283 device_printf(sc->sc_dev,
6284 "%s: Previous scan not completed yet\n", __func__);
6286 if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6287 error = iwm_mvm_umac_scan(sc);
6288 else
6289 error = iwm_mvm_lmac_scan(sc);
6290 if (error != 0) {
6291 device_printf(sc->sc_dev, "could not initiate scan\n");
6292 IWM_UNLOCK(sc);
6293 ieee80211_cancel_scan(vap);
6294 } else {
6295 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6296 iwm_led_blink_start(sc);
6297 IWM_UNLOCK(sc);
6301 static void
6302 iwm_scan_end(struct ieee80211com *ic)
6304 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6305 struct iwm_softc *sc = ic->ic_softc;
6307 IWM_LOCK(sc);
6308 iwm_led_blink_stop(sc);
6309 if (vap->iv_state == IEEE80211_S_RUN)
6310 iwm_mvm_led_enable(sc);
6311 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6313 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6314 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6315 * taskqueue.
6317 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6318 iwm_mvm_scan_stop_wait(sc);
6320 IWM_UNLOCK(sc);
6323 * Make sure we don't race, if sc_es_task is still enqueued here.
6324 * This is to make sure that it won't call ieee80211_scan_done
6325 * when we have already started the next scan.
6327 taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6330 static void
6331 iwm_update_mcast(struct ieee80211com *ic)
6335 static void
6336 iwm_set_channel(struct ieee80211com *ic)
6340 static void
6341 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6345 static void
6346 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6348 return;
6351 void
6352 iwm_init_task(void *arg1)
6354 struct iwm_softc *sc = arg1;
6356 IWM_LOCK(sc);
6357 while (sc->sc_flags & IWM_FLAG_BUSY) {
6358 #if defined(__DragonFly__)
6359 lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6360 #else
6361 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6362 #endif
6364 sc->sc_flags |= IWM_FLAG_BUSY;
6365 iwm_stop(sc);
6366 if (sc->sc_ic.ic_nrunning > 0)
6367 iwm_init(sc);
6368 sc->sc_flags &= ~IWM_FLAG_BUSY;
6369 wakeup(&sc->sc_flags);
6370 IWM_UNLOCK(sc);
6373 static int
6374 iwm_resume(device_t dev)
6376 struct iwm_softc *sc = device_get_softc(dev);
6377 int do_reinit = 0;
6380 * We disable the RETRY_TIMEOUT register (0x41) to keep
6381 * PCI Tx retries from interfering with C3 CPU state.
6383 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6384 iwm_init_task(device_get_softc(dev));
6386 IWM_LOCK(sc);
6387 if (sc->sc_flags & IWM_FLAG_SCANNING) {
6388 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6389 do_reinit = 1;
6391 IWM_UNLOCK(sc);
6393 if (do_reinit)
6394 ieee80211_resume_all(&sc->sc_ic);
6396 return 0;
6399 static int
6400 iwm_suspend(device_t dev)
6402 int do_stop = 0;
6403 struct iwm_softc *sc = device_get_softc(dev);
6405 do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6407 ieee80211_suspend_all(&sc->sc_ic);
6409 if (do_stop) {
6410 IWM_LOCK(sc);
6411 iwm_stop(sc);
6412 sc->sc_flags |= IWM_FLAG_SCANNING;
6413 IWM_UNLOCK(sc);
6416 return (0);
6419 static int
6420 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6422 struct iwm_fw_info *fw = &sc->sc_fw;
6423 device_t dev = sc->sc_dev;
6424 int i;
6426 if (!sc->sc_attached)
6427 return 0;
6428 sc->sc_attached = 0;
6429 if (do_net80211) {
6430 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6432 callout_drain(&sc->sc_led_blink_to);
6433 callout_drain(&sc->sc_watchdog_to);
6434 iwm_stop_device(sc);
6435 if (do_net80211) {
6436 IWM_LOCK(sc);
6437 iwm_xmit_queue_drain(sc);
6438 IWM_UNLOCK(sc);
6439 ieee80211_ifdetach(&sc->sc_ic);
6442 iwm_phy_db_free(sc->sc_phy_db);
6443 sc->sc_phy_db = NULL;
6445 iwm_free_nvm_data(sc->nvm_data);
6447 /* Free descriptor rings */
6448 iwm_free_rx_ring(sc, &sc->rxq);
6449 for (i = 0; i < nitems(sc->txq); i++)
6450 iwm_free_tx_ring(sc, &sc->txq[i]);
6452 /* Free firmware */
6453 if (fw->fw_fp != NULL)
6454 iwm_fw_info_free(fw);
6456 /* Free scheduler */
6457 iwm_dma_contig_free(&sc->sched_dma);
6458 iwm_dma_contig_free(&sc->ict_dma);
6459 iwm_dma_contig_free(&sc->kw_dma);
6460 iwm_dma_contig_free(&sc->fw_dma);
6462 iwm_free_fw_paging(sc);
6464 /* Finished with the hardware - detach things */
6465 iwm_pci_detach(dev);
6467 if (sc->sc_notif_wait != NULL) {
6468 iwm_notification_wait_free(sc->sc_notif_wait);
6469 sc->sc_notif_wait = NULL;
6472 IWM_LOCK_DESTROY(sc);
6474 return (0);
6477 static int
6478 iwm_detach(device_t dev)
6480 struct iwm_softc *sc = device_get_softc(dev);
6482 return (iwm_detach_local(sc, 1));
6485 static device_method_t iwm_pci_methods[] = {
6486 /* Device interface */
6487 DEVMETHOD(device_probe, iwm_probe),
6488 DEVMETHOD(device_attach, iwm_attach),
6489 DEVMETHOD(device_detach, iwm_detach),
6490 DEVMETHOD(device_suspend, iwm_suspend),
6491 DEVMETHOD(device_resume, iwm_resume),
6493 DEVMETHOD_END
6496 static driver_t iwm_pci_driver = {
6497 "iwm",
6498 iwm_pci_methods,
6499 sizeof (struct iwm_softc)
6502 static devclass_t iwm_devclass;
6504 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6505 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6506 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6507 MODULE_DEPEND(iwm, wlan, 1, 1, 1);