if_iwm - Clean up iwm(4) scanning logic a bit.
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
blob5817b0b9e2f3be1d1b3036ed830230aec190ec04
1 /* $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $ */
3 /*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 /*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
27 ***********************************************************************
29 * This file is provided under a dual BSD/GPLv2 license. When using or
30 * redistributing this file, you may do so under either license.
32 * GPL LICENSE SUMMARY
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
53 * Contact Information:
54 * Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
58 * BSD LICENSE
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
67 * * Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * * Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in
71 * the documentation and/or other materials provided with the
72 * distribution.
73 * * Neither the name Intel Corporation nor the names of its
74 * contributors may be used to endorse or promote products derived
75 * from this software without specific prior written permission.
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
90 /*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
106 * DragonFly work
108 * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109 * changes to remove per-device network interface (DragonFly has not
110 * caught up to that yet on the WLAN side).
112 * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113 * malloc -> kmalloc (in particular, changing improper M_NOWAIT
114 * specifications to M_INTWAIT. We still don't
115 * understand why FreeBSD uses M_NOWAIT for
116 * critical must-not-fail kmalloc()s).
117 * free -> kfree
118 * printf -> kprintf
119 * (bug fix) memset in iwm_reset_rx_ring.
120 * (debug) added several kprintf()s on error
122 * header file paths (DFly allows localized path specifications).
123 * minor header file differences.
125 * Comprehensive list of adjustments for DragonFly #ifdef'd:
126 * (safety) added register read-back serialization in iwm_reset_rx_ring().
127 * packet counters
128 * msleep -> iwmsleep (handle deadlocks due to dfly interrupt serializer)
129 * mtx -> lk (mtx functions -> lockmgr functions)
130 * callout differences
131 * taskqueue differences
132 * MSI differences
133 * bus_setup_intr() differences
134 * minor PCI config register naming differences
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/mutex.h>
147 #include <sys/module.h>
148 #include <sys/proc.h>
149 #include <sys/rman.h>
150 #include <sys/socket.h>
151 #include <sys/sockio.h>
152 #include <sys/sysctl.h>
153 #include <sys/linker.h>
155 #include <machine/endian.h>
157 #include <bus/pci/pcivar.h>
158 #include <bus/pci/pcireg.h>
160 #include <net/bpf.h>
162 #include <net/if.h>
163 #include <net/if_var.h>
164 #include <net/if_arp.h>
165 #include <net/if_dl.h>
166 #include <net/if_media.h>
167 #include <net/if_types.h>
169 #include <netinet/in.h>
170 #include <netinet/in_systm.h>
171 #include <netinet/if_ether.h>
172 #include <netinet/ip.h>
174 #include <netproto/802_11/ieee80211_var.h>
175 #include <netproto/802_11/ieee80211_regdomain.h>
176 #include <netproto/802_11/ieee80211_ratectl.h>
177 #include <netproto/802_11/ieee80211_radiotap.h>
179 #include "if_iwmreg.h"
180 #include "if_iwmvar.h"
181 #include "if_iwm_debug.h"
182 #include "if_iwm_util.h"
183 #include "if_iwm_binding.h"
184 #include "if_iwm_phy_db.h"
185 #include "if_iwm_mac_ctxt.h"
186 #include "if_iwm_phy_ctxt.h"
187 #include "if_iwm_time_event.h"
188 #include "if_iwm_power.h"
189 #include "if_iwm_scan.h"
190 #include "if_iwm_pcie_trans.h"
191 #include "if_iwm_led.h"
193 const uint8_t iwm_nvm_channels[] = {
194 /* 2.4 GHz */
195 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
196 /* 5 GHz */
197 36, 40, 44, 48, 52, 56, 60, 64,
198 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
199 149, 153, 157, 161, 165
201 #define IWM_NUM_2GHZ_CHANNELS 14
203 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
204 "IWM_NUM_CHANNELS is too small");
207 * XXX For now, there's simply a fixed set of rate table entries
208 * that are populated.
210 const struct iwm_rate {
211 uint8_t rate;
212 uint8_t plcp;
213 } iwm_rates[] = {
214 { 2, IWM_RATE_1M_PLCP },
215 { 4, IWM_RATE_2M_PLCP },
216 { 11, IWM_RATE_5M_PLCP },
217 { 22, IWM_RATE_11M_PLCP },
218 { 12, IWM_RATE_6M_PLCP },
219 { 18, IWM_RATE_9M_PLCP },
220 { 24, IWM_RATE_12M_PLCP },
221 { 36, IWM_RATE_18M_PLCP },
222 { 48, IWM_RATE_24M_PLCP },
223 { 72, IWM_RATE_36M_PLCP },
224 { 96, IWM_RATE_48M_PLCP },
225 { 108, IWM_RATE_54M_PLCP },
227 #define IWM_RIDX_CCK 0
228 #define IWM_RIDX_OFDM 4
229 #define IWM_RIDX_MAX (nitems(iwm_rates)-1)
230 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
231 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
233 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
234 static int iwm_firmware_store_section(struct iwm_softc *,
235 enum iwm_ucode_type,
236 const uint8_t *, size_t);
237 static int iwm_set_default_calib(struct iwm_softc *, const void *);
238 static void iwm_fw_info_free(struct iwm_fw_info *);
239 static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
240 static void iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
241 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
242 bus_size_t, bus_size_t);
243 static void iwm_dma_contig_free(struct iwm_dma_info *);
244 static int iwm_alloc_fwmem(struct iwm_softc *);
245 static void iwm_free_fwmem(struct iwm_softc *);
246 static int iwm_alloc_sched(struct iwm_softc *);
247 static void iwm_free_sched(struct iwm_softc *);
248 static int iwm_alloc_kw(struct iwm_softc *);
249 static void iwm_free_kw(struct iwm_softc *);
250 static int iwm_alloc_ict(struct iwm_softc *);
251 static void iwm_free_ict(struct iwm_softc *);
252 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
253 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
256 int);
257 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
258 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void iwm_enable_interrupts(struct iwm_softc *);
260 static void iwm_restore_interrupts(struct iwm_softc *);
261 static void iwm_disable_interrupts(struct iwm_softc *);
262 static void iwm_ict_reset(struct iwm_softc *);
263 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
264 static void iwm_stop_device(struct iwm_softc *);
265 static void iwm_mvm_nic_config(struct iwm_softc *);
266 static int iwm_nic_rx_init(struct iwm_softc *);
267 static int iwm_nic_tx_init(struct iwm_softc *);
268 static int iwm_nic_init(struct iwm_softc *);
269 static void iwm_enable_txq(struct iwm_softc *, int, int);
270 static int iwm_post_alive(struct iwm_softc *);
271 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
272 uint16_t, uint8_t *, uint16_t *);
273 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
274 uint16_t *);
275 static uint32_t iwm_eeprom_channel_flags(uint16_t);
276 static void iwm_add_channel_band(struct iwm_softc *,
277 struct ieee80211_channel[], int, int *, int, int,
278 const uint8_t[]);
279 static void iwm_init_channel_map(struct ieee80211com *, int, int *,
280 struct ieee80211_channel[]);
281 static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
282 const uint16_t *, const uint16_t *, uint8_t,
283 uint8_t);
284 struct iwm_nvm_section;
285 static int iwm_parse_nvm_sections(struct iwm_softc *,
286 struct iwm_nvm_section *);
287 static int iwm_nvm_init(struct iwm_softc *);
288 static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
289 const uint8_t *, uint32_t);
290 static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
291 static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
292 static int iwm_fw_alive(struct iwm_softc *, uint32_t);
293 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
294 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
295 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
296 enum iwm_ucode_type);
297 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
298 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
299 static int iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
300 static int iwm_mvm_get_signal_strength(struct iwm_softc *,
301 struct iwm_rx_phy_info *);
302 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
303 struct iwm_rx_packet *,
304 struct iwm_rx_data *);
305 static int iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
306 static void iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
307 struct iwm_rx_data *);
308 static int iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
309 struct iwm_rx_packet *,
310 struct iwm_node *);
311 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
312 struct iwm_rx_data *);
313 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
314 #if 0
315 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
316 uint16_t);
317 #endif
318 static const struct iwm_rate *
319 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
320 struct ieee80211_frame *, struct iwm_tx_cmd *);
321 static int iwm_tx(struct iwm_softc *, struct mbuf *,
322 struct ieee80211_node *, int);
323 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
324 const struct ieee80211_bpf_params *);
325 static void iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
326 struct iwm_mvm_add_sta_cmd_v5 *);
327 static int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
328 struct iwm_mvm_add_sta_cmd_v6 *,
329 int *);
330 static int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
331 int);
332 static int iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
333 static int iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
334 static int iwm_mvm_add_int_sta_common(struct iwm_softc *,
335 struct iwm_int_sta *,
336 const uint8_t *, uint16_t, uint16_t);
337 static int iwm_mvm_add_aux_sta(struct iwm_softc *);
338 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
339 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
340 static int iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
341 static int iwm_release(struct iwm_softc *, struct iwm_node *);
342 static struct ieee80211_node *
343 iwm_node_alloc(struct ieee80211vap *,
344 const uint8_t[IEEE80211_ADDR_LEN]);
345 static void iwm_setrates(struct iwm_softc *, struct iwm_node *);
346 static int iwm_media_change(struct ifnet *);
347 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
348 static void iwm_endscan_cb(void *, int);
349 static int iwm_init_hw(struct iwm_softc *);
350 static void iwm_init(struct iwm_softc *);
351 static void iwm_start(struct iwm_softc *);
352 static void iwm_stop(struct iwm_softc *);
353 static void iwm_watchdog(void *);
354 static void iwm_parent(struct ieee80211com *);
355 #ifdef IWM_DEBUG
356 static const char *
357 iwm_desc_lookup(uint32_t);
358 static void iwm_nic_error(struct iwm_softc *);
359 #endif
360 static void iwm_notif_intr(struct iwm_softc *);
361 static void iwm_intr(void *);
362 static int iwm_attach(device_t);
363 static void iwm_preinit(void *);
364 static int iwm_detach_local(struct iwm_softc *sc, int);
365 static void iwm_init_task(void *);
366 static void iwm_radiotap_attach(struct iwm_softc *);
367 static struct ieee80211vap *
368 iwm_vap_create(struct ieee80211com *,
369 const char [IFNAMSIZ], int,
370 enum ieee80211_opmode, int,
371 const uint8_t [IEEE80211_ADDR_LEN],
372 const uint8_t [IEEE80211_ADDR_LEN]);
373 static void iwm_vap_delete(struct ieee80211vap *);
374 static void iwm_scan_start(struct ieee80211com *);
375 static void iwm_scan_end(struct ieee80211com *);
376 static void iwm_update_mcast(struct ieee80211com *);
377 static void iwm_set_channel(struct ieee80211com *);
378 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
379 static void iwm_scan_mindwell(struct ieee80211_scan_state *);
380 static int iwm_detach(device_t);
382 #if defined(__DragonFly__)
383 static int iwm_msi_enable = 1;
385 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
388 * This is a hack due to the wlan_serializer deadlocking sleepers.
390 int iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to);
393 iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to)
395 int error;
397 if (wlan_is_serialized()) {
398 wlan_serialize_exit();
399 kprintf("%s: have to release serializer for sleeping\n",
400 __func__);
401 error = lksleep(chan, lk, flags, wmesg, to);
402 lockmgr(lk, LK_RELEASE);
403 wlan_serialize_enter();
404 lockmgr(lk, LK_EXCLUSIVE);
405 } else {
406 error = lksleep(chan, lk, flags, wmesg, to);
408 return error;
411 #endif
414 * Firmware parser.
417 static int
418 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
420 const struct iwm_fw_cscheme_list *l = (const void *)data;
422 if (dlen < sizeof(*l) ||
423 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
424 return EINVAL;
426 /* we don't actually store anything for now, always use s/w crypto */
428 return 0;
431 static int
432 iwm_firmware_store_section(struct iwm_softc *sc,
433 enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
435 struct iwm_fw_sects *fws;
436 struct iwm_fw_onesect *fwone;
438 if (type >= IWM_UCODE_TYPE_MAX)
439 return EINVAL;
440 if (dlen < sizeof(uint32_t))
441 return EINVAL;
443 fws = &sc->sc_fw.fw_sects[type];
444 if (fws->fw_count >= IWM_UCODE_SECT_MAX)
445 return EINVAL;
447 fwone = &fws->fw_sect[fws->fw_count];
449 /* first 32bit are device load offset */
450 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
452 /* rest is data */
453 fwone->fws_data = data + sizeof(uint32_t);
454 fwone->fws_len = dlen - sizeof(uint32_t);
456 fws->fw_count++;
457 fws->fw_totlen += fwone->fws_len;
459 return 0;
462 struct iwm_tlv_calib_data {
463 uint32_t ucode_type;
464 struct iwm_tlv_calib_ctrl calib;
465 } __packed;
467 static int
468 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
470 const struct iwm_tlv_calib_data *def_calib = data;
471 uint32_t ucode_type = le32toh(def_calib->ucode_type);
473 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
474 device_printf(sc->sc_dev,
475 "Wrong ucode_type %u for default "
476 "calibration.\n", ucode_type);
477 return EINVAL;
480 sc->sc_default_calib[ucode_type].flow_trigger =
481 def_calib->calib.flow_trigger;
482 sc->sc_default_calib[ucode_type].event_trigger =
483 def_calib->calib.event_trigger;
485 return 0;
488 static void
489 iwm_fw_info_free(struct iwm_fw_info *fw)
491 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
492 fw->fw_fp = NULL;
493 /* don't touch fw->fw_status */
494 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
497 static int
498 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
500 struct iwm_fw_info *fw = &sc->sc_fw;
501 const struct iwm_tlv_ucode_header *uhdr;
502 struct iwm_ucode_tlv tlv;
503 enum iwm_ucode_tlv_type tlv_type;
504 const struct firmware *fwp;
505 const uint8_t *data;
506 int error = 0;
507 size_t len;
509 if (fw->fw_status == IWM_FW_STATUS_DONE &&
510 ucode_type != IWM_UCODE_TYPE_INIT)
511 return 0;
513 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
514 #if defined(__DragonFly__)
515 iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
516 #else
517 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
518 #endif
520 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
522 if (fw->fw_fp != NULL)
523 iwm_fw_info_free(fw);
526 * Load firmware into driver memory.
527 * fw_fp will be set.
529 IWM_UNLOCK(sc);
530 fwp = firmware_get(sc->sc_fwname);
531 IWM_LOCK(sc);
532 if (fwp == NULL) {
533 device_printf(sc->sc_dev,
534 "could not read firmware %s (error %d)\n",
535 sc->sc_fwname, error);
536 goto out;
538 fw->fw_fp = fwp;
541 * Parse firmware contents
544 uhdr = (const void *)fw->fw_fp->data;
545 if (*(const uint32_t *)fw->fw_fp->data != 0
546 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
547 device_printf(sc->sc_dev, "invalid firmware %s\n",
548 sc->sc_fwname);
549 error = EINVAL;
550 goto out;
553 sc->sc_fwver = le32toh(uhdr->ver);
554 data = uhdr->data;
555 len = fw->fw_fp->datasize - sizeof(*uhdr);
557 while (len >= sizeof(tlv)) {
558 size_t tlv_len;
559 const void *tlv_data;
561 memcpy(&tlv, data, sizeof(tlv));
562 tlv_len = le32toh(tlv.length);
563 tlv_type = le32toh(tlv.type);
565 len -= sizeof(tlv);
566 data += sizeof(tlv);
567 tlv_data = data;
569 if (len < tlv_len) {
570 device_printf(sc->sc_dev,
571 "firmware too short: %zu bytes\n",
572 len);
573 error = EINVAL;
574 goto parse_out;
577 switch ((int)tlv_type) {
578 case IWM_UCODE_TLV_PROBE_MAX_LEN:
579 if (tlv_len < sizeof(uint32_t)) {
580 device_printf(sc->sc_dev,
581 "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
582 __func__,
583 (int) tlv_len);
584 error = EINVAL;
585 goto parse_out;
587 sc->sc_capa_max_probe_len
588 = le32toh(*(const uint32_t *)tlv_data);
589 /* limit it to something sensible */
590 if (sc->sc_capa_max_probe_len > (1<<16)) {
591 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
592 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
593 "ridiculous\n", __func__);
594 error = EINVAL;
595 goto parse_out;
597 break;
598 case IWM_UCODE_TLV_PAN:
599 if (tlv_len) {
600 device_printf(sc->sc_dev,
601 "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
602 __func__,
603 (int) tlv_len);
604 error = EINVAL;
605 goto parse_out;
607 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
608 break;
609 case IWM_UCODE_TLV_FLAGS:
610 if (tlv_len < sizeof(uint32_t)) {
611 device_printf(sc->sc_dev,
612 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
613 __func__,
614 (int) tlv_len);
615 error = EINVAL;
616 goto parse_out;
619 * Apparently there can be many flags, but Linux driver
620 * parses only the first one, and so do we.
622 * XXX: why does this override IWM_UCODE_TLV_PAN?
623 * Intentional or a bug? Observations from
624 * current firmware file:
625 * 1) TLV_PAN is parsed first
626 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
627 * ==> this resets TLV_PAN to itself... hnnnk
629 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
630 break;
631 case IWM_UCODE_TLV_CSCHEME:
632 if ((error = iwm_store_cscheme(sc,
633 tlv_data, tlv_len)) != 0) {
634 device_printf(sc->sc_dev,
635 "%s: iwm_store_cscheme(): returned %d\n",
636 __func__,
637 error);
638 goto parse_out;
640 break;
641 case IWM_UCODE_TLV_NUM_OF_CPU:
642 if (tlv_len != sizeof(uint32_t)) {
643 device_printf(sc->sc_dev,
644 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
645 __func__,
646 (int) tlv_len);
647 error = EINVAL;
648 goto parse_out;
650 if (le32toh(*(const uint32_t*)tlv_data) != 1) {
651 device_printf(sc->sc_dev,
652 "%s: driver supports "
653 "only TLV_NUM_OF_CPU == 1",
654 __func__);
655 error = EINVAL;
656 goto parse_out;
658 break;
659 case IWM_UCODE_TLV_SEC_RT:
660 if ((error = iwm_firmware_store_section(sc,
661 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
662 device_printf(sc->sc_dev,
663 "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
664 __func__,
665 error);
666 goto parse_out;
668 break;
669 case IWM_UCODE_TLV_SEC_INIT:
670 if ((error = iwm_firmware_store_section(sc,
671 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
672 device_printf(sc->sc_dev,
673 "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
674 __func__,
675 error);
676 goto parse_out;
678 break;
679 case IWM_UCODE_TLV_SEC_WOWLAN:
680 if ((error = iwm_firmware_store_section(sc,
681 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
682 device_printf(sc->sc_dev,
683 "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
684 __func__,
685 error);
686 goto parse_out;
688 break;
689 case IWM_UCODE_TLV_DEF_CALIB:
690 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
691 device_printf(sc->sc_dev,
692 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
693 __func__,
694 (int) tlv_len,
695 (int) sizeof(struct iwm_tlv_calib_data));
696 error = EINVAL;
697 goto parse_out;
699 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
700 device_printf(sc->sc_dev,
701 "%s: iwm_set_default_calib() failed: %d\n",
702 __func__,
703 error);
704 goto parse_out;
706 break;
707 case IWM_UCODE_TLV_PHY_SKU:
708 if (tlv_len != sizeof(uint32_t)) {
709 error = EINVAL;
710 device_printf(sc->sc_dev,
711 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
712 __func__,
713 (int) tlv_len);
714 goto parse_out;
716 sc->sc_fw_phy_config =
717 le32toh(*(const uint32_t *)tlv_data);
718 break;
720 case IWM_UCODE_TLV_API_CHANGES_SET:
721 case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
722 /* ignore, not used by current driver */
723 break;
725 default:
726 device_printf(sc->sc_dev,
727 "%s: unknown firmware section %d, abort\n",
728 __func__, tlv_type);
729 error = EINVAL;
730 goto parse_out;
733 len -= roundup(tlv_len, 4);
734 data += roundup(tlv_len, 4);
737 KASSERT(error == 0, ("unhandled error"));
739 parse_out:
740 if (error) {
741 device_printf(sc->sc_dev, "firmware parse error %d, "
742 "section type %d\n", error, tlv_type);
745 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
746 device_printf(sc->sc_dev,
747 "device uses unsupported power ops\n");
748 error = ENOTSUP;
751 out:
752 if (error) {
753 fw->fw_status = IWM_FW_STATUS_NONE;
754 if (fw->fw_fp != NULL)
755 iwm_fw_info_free(fw);
756 } else
757 fw->fw_status = IWM_FW_STATUS_DONE;
758 wakeup(&sc->sc_fw);
760 return error;
764 * DMA resource routines
767 static void
768 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
770 if (error != 0)
771 return;
772 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
773 *(bus_addr_t *)arg = segs[0].ds_addr;
776 static int
777 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
778 bus_size_t size, bus_size_t alignment)
780 int error;
782 dma->tag = NULL;
783 dma->size = size;
785 #if defined(__DragonFly__)
786 error = bus_dma_tag_create(tag, alignment,
788 BUS_SPACE_MAXADDR_32BIT,
789 BUS_SPACE_MAXADDR,
790 NULL, NULL,
791 size, 1, size,
792 BUS_DMA_NOWAIT, &dma->tag);
793 #else
794 error = bus_dma_tag_create(tag, alignment,
795 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
796 1, size, 0, NULL, NULL, &dma->tag);
797 #endif
798 if (error != 0)
799 goto fail;
801 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
802 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
803 if (error != 0)
804 goto fail;
806 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
807 iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
808 if (error != 0)
809 goto fail;
811 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
813 return 0;
815 fail:
816 iwm_dma_contig_free(dma);
818 return error;
821 static void
822 iwm_dma_contig_free(struct iwm_dma_info *dma)
824 if (dma->map != NULL) {
825 if (dma->vaddr != NULL) {
826 bus_dmamap_sync(dma->tag, dma->map,
827 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
828 bus_dmamap_unload(dma->tag, dma->map);
829 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
830 dma->vaddr = NULL;
832 bus_dmamap_destroy(dma->tag, dma->map);
833 dma->map = NULL;
835 if (dma->tag != NULL) {
836 bus_dma_tag_destroy(dma->tag);
837 dma->tag = NULL;
842 /* fwmem is used to load firmware onto the card */
843 static int
844 iwm_alloc_fwmem(struct iwm_softc *sc)
846 /* Must be aligned on a 16-byte boundary. */
847 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
848 sc->sc_fwdmasegsz, 16);
851 static void
852 iwm_free_fwmem(struct iwm_softc *sc)
854 iwm_dma_contig_free(&sc->fw_dma);
857 /* tx scheduler rings. not used? */
858 static int
859 iwm_alloc_sched(struct iwm_softc *sc)
861 int rv;
863 /* TX scheduler rings must be aligned on a 1KB boundary. */
864 rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
865 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
866 return rv;
869 static void
870 iwm_free_sched(struct iwm_softc *sc)
872 iwm_dma_contig_free(&sc->sched_dma);
875 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
876 static int
877 iwm_alloc_kw(struct iwm_softc *sc)
879 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
882 static void
883 iwm_free_kw(struct iwm_softc *sc)
885 iwm_dma_contig_free(&sc->kw_dma);
888 /* interrupt cause table */
889 static int
890 iwm_alloc_ict(struct iwm_softc *sc)
892 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
893 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
896 static void
897 iwm_free_ict(struct iwm_softc *sc)
899 iwm_dma_contig_free(&sc->ict_dma);
902 static int
903 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
905 bus_size_t size;
906 int i, error;
908 ring->cur = 0;
910 /* Allocate RX descriptors (256-byte aligned). */
911 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
912 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
913 if (error != 0) {
914 device_printf(sc->sc_dev,
915 "could not allocate RX ring DMA memory\n");
916 goto fail;
918 ring->desc = ring->desc_dma.vaddr;
920 /* Allocate RX status area (16-byte aligned). */
921 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
922 sizeof(*ring->stat), 16);
923 if (error != 0) {
924 device_printf(sc->sc_dev,
925 "could not allocate RX status DMA memory\n");
926 goto fail;
928 ring->stat = ring->stat_dma.vaddr;
930 /* Create RX buffer DMA tag. */
931 #if defined(__DragonFly__)
932 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
934 BUS_SPACE_MAXADDR_32BIT,
935 BUS_SPACE_MAXADDR,
936 NULL, NULL,
937 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
938 BUS_DMA_NOWAIT, &ring->data_dmat);
939 #else
940 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
941 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
942 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
943 #endif
944 if (error != 0) {
945 device_printf(sc->sc_dev,
946 "%s: could not create RX buf DMA tag, error %d\n",
947 __func__, error);
948 goto fail;
952 * Allocate and map RX buffers.
954 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
955 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
956 goto fail;
959 return 0;
961 fail: iwm_free_rx_ring(sc, ring);
962 return error;
965 static void
966 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
968 /* XXX conditional nic locks are stupid */
969 /* XXX print out if we can't lock the NIC? */
970 if (iwm_nic_lock(sc)) {
971 /* XXX handle if RX stop doesn't finish? */
972 (void) iwm_pcie_rx_stop(sc);
973 iwm_nic_unlock(sc);
975 /* Reset the ring state */
976 ring->cur = 0;
979 * The hw rx ring index in shared memory must also be cleared,
980 * otherwise the discrepancy can cause reprocessing chaos.
982 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
985 static void
986 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
988 int i;
990 iwm_dma_contig_free(&ring->desc_dma);
991 iwm_dma_contig_free(&ring->stat_dma);
993 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
994 struct iwm_rx_data *data = &ring->data[i];
996 if (data->m != NULL) {
997 bus_dmamap_sync(ring->data_dmat, data->map,
998 BUS_DMASYNC_POSTREAD);
999 bus_dmamap_unload(ring->data_dmat, data->map);
1000 m_freem(data->m);
1001 data->m = NULL;
1003 if (data->map != NULL) {
1004 bus_dmamap_destroy(ring->data_dmat, data->map);
1005 data->map = NULL;
1008 if (ring->data_dmat != NULL) {
1009 bus_dma_tag_destroy(ring->data_dmat);
1010 ring->data_dmat = NULL;
1014 static int
1015 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1017 bus_addr_t paddr;
1018 bus_size_t size;
1019 int i, error;
1021 ring->qid = qid;
1022 ring->queued = 0;
1023 ring->cur = 0;
1025 /* Allocate TX descriptors (256-byte aligned). */
1026 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1027 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1028 if (error != 0) {
1029 device_printf(sc->sc_dev,
1030 "could not allocate TX ring DMA memory\n");
1031 goto fail;
1033 ring->desc = ring->desc_dma.vaddr;
1036 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1037 * to allocate commands space for other rings.
1039 if (qid > IWM_MVM_CMD_QUEUE)
1040 return 0;
1042 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1043 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1044 if (error != 0) {
1045 device_printf(sc->sc_dev,
1046 "could not allocate TX cmd DMA memory\n");
1047 goto fail;
1049 ring->cmd = ring->cmd_dma.vaddr;
1051 #if defined(__DragonFly__)
1052 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1054 BUS_SPACE_MAXADDR_32BIT,
1055 BUS_SPACE_MAXADDR,
1056 NULL, NULL,
1057 MCLBYTES, IWM_MAX_SCATTER - 2, MCLBYTES,
1058 BUS_DMA_NOWAIT, &ring->data_dmat);
1059 #else
1060 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1061 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1062 IWM_MAX_SCATTER - 2, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
1063 #endif
1064 if (error != 0) {
1065 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1066 goto fail;
1069 paddr = ring->cmd_dma.paddr;
1070 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1071 struct iwm_tx_data *data = &ring->data[i];
1073 data->cmd_paddr = paddr;
1074 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1075 + offsetof(struct iwm_tx_cmd, scratch);
1076 paddr += sizeof(struct iwm_device_cmd);
1078 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1079 if (error != 0) {
1080 device_printf(sc->sc_dev,
1081 "could not create TX buf DMA map\n");
1082 goto fail;
1085 KASSERT(paddr == ring->cmd_dma.paddr + size,
1086 ("invalid physical address"));
1087 return 0;
1089 fail: iwm_free_tx_ring(sc, ring);
1090 return error;
1093 static void
1094 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1096 int i;
1098 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1099 struct iwm_tx_data *data = &ring->data[i];
1101 if (data->m != NULL) {
1102 bus_dmamap_sync(ring->data_dmat, data->map,
1103 BUS_DMASYNC_POSTWRITE);
1104 bus_dmamap_unload(ring->data_dmat, data->map);
1105 m_freem(data->m);
1106 data->m = NULL;
1109 /* Clear TX descriptors. */
1110 memset(ring->desc, 0, ring->desc_dma.size);
1111 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1112 BUS_DMASYNC_PREWRITE);
1113 sc->qfullmsk &= ~(1 << ring->qid);
1114 ring->queued = 0;
1115 ring->cur = 0;
1118 static void
1119 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1121 int i;
1123 iwm_dma_contig_free(&ring->desc_dma);
1124 iwm_dma_contig_free(&ring->cmd_dma);
1126 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1127 struct iwm_tx_data *data = &ring->data[i];
1129 if (data->m != NULL) {
1130 bus_dmamap_sync(ring->data_dmat, data->map,
1131 BUS_DMASYNC_POSTWRITE);
1132 bus_dmamap_unload(ring->data_dmat, data->map);
1133 m_freem(data->m);
1134 data->m = NULL;
1136 if (data->map != NULL) {
1137 bus_dmamap_destroy(ring->data_dmat, data->map);
1138 data->map = NULL;
1141 if (ring->data_dmat != NULL) {
1142 bus_dma_tag_destroy(ring->data_dmat);
1143 ring->data_dmat = NULL;
1148 * High-level hardware frobbing routines
1151 static void
1152 iwm_enable_interrupts(struct iwm_softc *sc)
1154 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1155 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1158 static void
1159 iwm_restore_interrupts(struct iwm_softc *sc)
1161 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1164 static void
1165 iwm_disable_interrupts(struct iwm_softc *sc)
1167 /* disable interrupts */
1168 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1170 /* acknowledge all interrupts */
1171 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1172 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1175 static void
1176 iwm_ict_reset(struct iwm_softc *sc)
1178 iwm_disable_interrupts(sc);
1180 /* Reset ICT table. */
1181 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1182 sc->ict_cur = 0;
1184 /* Set physical address of ICT table (4KB aligned). */
1185 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1186 IWM_CSR_DRAM_INT_TBL_ENABLE
1187 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1188 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1190 /* Switch to ICT interrupt mode in driver. */
1191 sc->sc_flags |= IWM_FLAG_USE_ICT;
1193 /* Re-enable interrupts. */
1194 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1195 iwm_enable_interrupts(sc);
1199 * Since this .. hard-resets things, it's time to actually
1200 * mark the first vap (if any) as having no mac context.
1201 * It's annoying, but since the driver is potentially being
1202 * stop/start'ed whilst active (thanks openbsd port!) we
1203 * have to correctly track this.
1205 static void
1206 iwm_stop_device(struct iwm_softc *sc)
1208 struct ieee80211com *ic = &sc->sc_ic;
1209 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1210 int chnl, ntries;
1211 int qid;
1213 /* tell the device to stop sending interrupts */
1214 iwm_disable_interrupts(sc);
1217 * FreeBSD-local: mark the first vap as not-uploaded,
1218 * so the next transition through auth/assoc
1219 * will correctly populate the MAC context.
1221 if (vap) {
1222 struct iwm_vap *iv = IWM_VAP(vap);
1223 iv->is_uploaded = 0;
1226 /* device going down, Stop using ICT table */
1227 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1229 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1231 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1233 /* Stop all DMA channels. */
1234 if (iwm_nic_lock(sc)) {
1235 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1236 IWM_WRITE(sc,
1237 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1238 for (ntries = 0; ntries < 200; ntries++) {
1239 uint32_t r;
1241 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1242 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1243 chnl))
1244 break;
1245 DELAY(20);
1248 iwm_nic_unlock(sc);
1251 /* Stop RX ring. */
1252 iwm_reset_rx_ring(sc, &sc->rxq);
1254 /* Reset all TX rings. */
1255 for (qid = 0; qid < nitems(sc->txq); qid++)
1256 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1259 * Power-down device's busmaster DMA clocks
1261 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1262 DELAY(5);
1264 /* Make sure (redundant) we've released our request to stay awake */
1265 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1266 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1268 /* Stop the device, and put it in low power state */
1269 iwm_apm_stop(sc);
1271 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1272 * Clean again the interrupt here
1274 iwm_disable_interrupts(sc);
1275 /* stop and reset the on-board processor */
1276 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1279 * Even if we stop the HW, we still want the RF kill
1280 * interrupt
1282 iwm_enable_rfkill_int(sc);
1283 iwm_check_rfkill(sc);
1286 static void
1287 iwm_mvm_nic_config(struct iwm_softc *sc)
1289 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1290 uint32_t reg_val = 0;
1292 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1293 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1294 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1295 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1296 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1297 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1299 /* SKU control */
1300 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1301 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1302 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1303 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1305 /* radio configuration */
1306 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1307 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1308 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1310 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1312 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1313 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1314 radio_cfg_step, radio_cfg_dash);
1317 * W/A : NIC is stuck in a reset state after Early PCIe power off
1318 * (PCIe power is lost before PERST# is asserted), causing ME FW
1319 * to lose ownership and not being able to obtain it back.
1321 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1322 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1323 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1326 static int
1327 iwm_nic_rx_init(struct iwm_softc *sc)
1329 if (!iwm_nic_lock(sc))
1330 return EBUSY;
1333 * Initialize RX ring. This is from the iwn driver.
1335 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1337 /* stop DMA */
1338 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1339 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1340 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1341 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1342 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1344 /* Set physical address of RX ring (256-byte aligned). */
1345 IWM_WRITE(sc,
1346 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1348 /* Set physical address of RX status (16-byte aligned). */
1349 IWM_WRITE(sc,
1350 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1352 #if defined(__DragonFly__)
1353 /* Force serialization (probably not needed but don't trust the HW) */
1354 IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1355 #endif
1357 /* Enable RX. */
1359 * Note: Linux driver also sets this:
1360 * (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1362 * It causes weird behavior. YMMV.
1364 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1365 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1366 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1367 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1368 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1369 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1371 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1373 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1374 if (sc->host_interrupt_operation_mode)
1375 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1378 * Thus sayeth el jefe (iwlwifi) via a comment:
1380 * This value should initially be 0 (before preparing any
1381 * RBs), should be 8 after preparing the first 8 RBs (for example)
1383 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1385 iwm_nic_unlock(sc);
1387 return 0;
1390 static int
1391 iwm_nic_tx_init(struct iwm_softc *sc)
1393 int qid;
1395 if (!iwm_nic_lock(sc))
1396 return EBUSY;
1398 /* Deactivate TX scheduler. */
1399 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1401 /* Set physical address of "keep warm" page (16-byte aligned). */
1402 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1404 /* Initialize TX rings. */
1405 for (qid = 0; qid < nitems(sc->txq); qid++) {
1406 struct iwm_tx_ring *txq = &sc->txq[qid];
1408 /* Set physical address of TX ring (256-byte aligned). */
1409 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1410 txq->desc_dma.paddr >> 8);
1411 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1412 "%s: loading ring %d descriptors (%p) at %lx\n",
1413 __func__,
1414 qid, txq->desc,
1415 (unsigned long) (txq->desc_dma.paddr >> 8));
1417 iwm_nic_unlock(sc);
1419 return 0;
1422 static int
1423 iwm_nic_init(struct iwm_softc *sc)
1425 int error;
1427 iwm_apm_init(sc);
1428 iwm_set_pwr(sc);
1430 iwm_mvm_nic_config(sc);
1432 if ((error = iwm_nic_rx_init(sc)) != 0)
1433 return error;
1436 * Ditto for TX, from iwn
1438 if ((error = iwm_nic_tx_init(sc)) != 0)
1439 return error;
1441 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1442 "%s: shadow registers enabled\n", __func__);
1443 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1445 return 0;
1448 enum iwm_mvm_tx_fifo {
1449 IWM_MVM_TX_FIFO_BK = 0,
1450 IWM_MVM_TX_FIFO_BE,
1451 IWM_MVM_TX_FIFO_VI,
1452 IWM_MVM_TX_FIFO_VO,
1453 IWM_MVM_TX_FIFO_MCAST = 5,
1456 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1457 IWM_MVM_TX_FIFO_VO,
1458 IWM_MVM_TX_FIFO_VI,
1459 IWM_MVM_TX_FIFO_BE,
1460 IWM_MVM_TX_FIFO_BK,
1463 static void
1464 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1466 if (!iwm_nic_lock(sc)) {
1467 device_printf(sc->sc_dev,
1468 "%s: cannot enable txq %d\n",
1469 __func__,
1470 qid);
1471 return; /* XXX return EBUSY */
1474 /* unactivate before configuration */
1475 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1476 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1477 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1479 if (qid != IWM_MVM_CMD_QUEUE) {
1480 iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1483 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1485 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1486 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1488 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1489 /* Set scheduler window size and frame limit. */
1490 iwm_write_mem32(sc,
1491 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1492 sizeof(uint32_t),
1493 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1494 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1495 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1496 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1498 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1499 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1500 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1501 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1502 IWM_SCD_QUEUE_STTS_REG_MSK);
1504 iwm_nic_unlock(sc);
1506 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1507 "%s: enabled txq %d FIFO %d\n",
1508 __func__, qid, fifo);
1511 static int
1512 iwm_post_alive(struct iwm_softc *sc)
1514 int nwords;
1515 int error, chnl;
1517 if (!iwm_nic_lock(sc))
1518 return EBUSY;
1520 if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1521 device_printf(sc->sc_dev,
1522 "%s: sched addr mismatch",
1523 __func__);
1524 error = EINVAL;
1525 goto out;
1528 iwm_ict_reset(sc);
1530 /* Clear TX scheduler state in SRAM. */
1531 nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1532 IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1533 / sizeof(uint32_t);
1534 error = iwm_write_mem(sc,
1535 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1536 NULL, nwords);
1537 if (error)
1538 goto out;
1540 /* Set physical address of TX scheduler rings (1KB aligned). */
1541 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1543 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1545 /* enable command channel */
1546 iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1548 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1550 /* Enable DMA channels. */
1551 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1552 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1553 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1554 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1557 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1558 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1560 /* Enable L1-Active */
1561 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1562 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1564 out:
1565 iwm_nic_unlock(sc);
1566 return error;
1570 * NVM read access and content parsing. We do not support
1571 * external NVM or writing NVM.
1572 * iwlwifi/mvm/nvm.c
1575 /* list of NVM sections we are allowed/need to read */
1576 const int nvm_to_read[] = {
1577 IWM_NVM_SECTION_TYPE_HW,
1578 IWM_NVM_SECTION_TYPE_SW,
1579 IWM_NVM_SECTION_TYPE_CALIBRATION,
1580 IWM_NVM_SECTION_TYPE_PRODUCTION,
1583 /* Default NVM size to read */
1584 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1585 #define IWM_MAX_NVM_SECTION_SIZE 7000
1587 #define IWM_NVM_WRITE_OPCODE 1
1588 #define IWM_NVM_READ_OPCODE 0
1590 static int
1591 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1592 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1594 offset = 0;
1595 struct iwm_nvm_access_cmd nvm_access_cmd = {
1596 .offset = htole16(offset),
1597 .length = htole16(length),
1598 .type = htole16(section),
1599 .op_code = IWM_NVM_READ_OPCODE,
1601 struct iwm_nvm_access_resp *nvm_resp;
1602 struct iwm_rx_packet *pkt;
1603 struct iwm_host_cmd cmd = {
1604 .id = IWM_NVM_ACCESS_CMD,
1605 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1606 IWM_CMD_SEND_IN_RFKILL,
1607 .data = { &nvm_access_cmd, },
1609 int ret, bytes_read, offset_read;
1610 uint8_t *resp_data;
1612 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1614 ret = iwm_send_cmd(sc, &cmd);
1615 if (ret)
1616 return ret;
1618 pkt = cmd.resp_pkt;
1619 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1620 device_printf(sc->sc_dev,
1621 "%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1622 __func__, pkt->hdr.flags);
1623 ret = EIO;
1624 goto exit;
1627 /* Extract NVM response */
1628 nvm_resp = (void *)pkt->data;
1630 ret = le16toh(nvm_resp->status);
1631 bytes_read = le16toh(nvm_resp->length);
1632 offset_read = le16toh(nvm_resp->offset);
1633 resp_data = nvm_resp->data;
1634 if (ret) {
1635 device_printf(sc->sc_dev,
1636 "%s: NVM access command failed with status %d\n",
1637 __func__, ret);
1638 ret = EINVAL;
1639 goto exit;
1642 if (offset_read != offset) {
1643 device_printf(sc->sc_dev,
1644 "%s: NVM ACCESS response with invalid offset %d\n",
1645 __func__, offset_read);
1646 ret = EINVAL;
1647 goto exit;
1650 memcpy(data + offset, resp_data, bytes_read);
1651 *len = bytes_read;
1653 exit:
1654 iwm_free_resp(sc, &cmd);
1655 return ret;
1659 * Reads an NVM section completely.
1660 * NICs prior to 7000 family doesn't have a real NVM, but just read
1661 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1662 * by uCode, we need to manually check in this case that we don't
1663 * overflow and try to read more than the EEPROM size.
1664 * For 7000 family NICs, we supply the maximal size we can read, and
1665 * the uCode fills the response with as much data as we can,
1666 * without overflowing, so no check is needed.
1668 static int
1669 iwm_nvm_read_section(struct iwm_softc *sc,
1670 uint16_t section, uint8_t *data, uint16_t *len)
1672 uint16_t length, seglen;
1673 int error;
1675 /* Set nvm section read length */
1676 length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1677 *len = 0;
1679 /* Read the NVM until exhausted (reading less than requested) */
1680 while (seglen == length) {
1681 error = iwm_nvm_read_chunk(sc,
1682 section, *len, length, data, &seglen);
1683 if (error) {
1684 device_printf(sc->sc_dev,
1685 "Cannot read NVM from section "
1686 "%d offset %d, length %d\n",
1687 section, *len, length);
1688 return error;
1690 *len += seglen;
1693 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1694 "NVM section %d read completed\n", section);
1695 return 0;
1699 * BEGIN IWM_NVM_PARSE
1702 /* NVM offsets (in words) definitions */
1703 enum wkp_nvm_offsets {
1704 /* NVM HW-Section offset (in words) definitions */
1705 IWM_HW_ADDR = 0x15,
1707 /* NVM SW-Section offset (in words) definitions */
1708 IWM_NVM_SW_SECTION = 0x1C0,
1709 IWM_NVM_VERSION = 0,
1710 IWM_RADIO_CFG = 1,
1711 IWM_SKU = 2,
1712 IWM_N_HW_ADDRS = 3,
1713 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1715 /* NVM calibration section offset (in words) definitions */
1716 IWM_NVM_CALIB_SECTION = 0x2B8,
1717 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1720 /* SKU Capabilities (actual values from NVM definition) */
1721 enum nvm_sku_bits {
1722 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
1723 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
1724 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
1725 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
1728 /* radio config bits (actual values from NVM definition) */
1729 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
1730 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
1731 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
1732 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
1733 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
1734 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1736 #define DEFAULT_MAX_TX_POWER 16
1739 * enum iwm_nvm_channel_flags - channel flags in NVM
1740 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1741 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1742 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1743 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1744 * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1745 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1746 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1747 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1748 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1749 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1751 enum iwm_nvm_channel_flags {
1752 IWM_NVM_CHANNEL_VALID = (1 << 0),
1753 IWM_NVM_CHANNEL_IBSS = (1 << 1),
1754 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1755 IWM_NVM_CHANNEL_RADAR = (1 << 4),
1756 IWM_NVM_CHANNEL_DFS = (1 << 7),
1757 IWM_NVM_CHANNEL_WIDE = (1 << 8),
1758 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1759 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1760 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1764 * Translate EEPROM flags to net80211.
1766 static uint32_t
1767 iwm_eeprom_channel_flags(uint16_t ch_flags)
1769 uint32_t nflags;
1771 nflags = 0;
1772 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1773 nflags |= IEEE80211_CHAN_PASSIVE;
1774 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1775 nflags |= IEEE80211_CHAN_NOADHOC;
1776 if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1777 nflags |= IEEE80211_CHAN_DFS;
1778 /* Just in case. */
1779 nflags |= IEEE80211_CHAN_NOADHOC;
1782 return (nflags);
1785 static void
1786 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1787 int maxchans, int *nchans, int ch_idx, int ch_num, const uint8_t bands[])
1789 const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
1790 uint32_t nflags;
1791 uint16_t ch_flags;
1792 uint8_t ieee;
1793 int error;
1795 for (; ch_idx < ch_num; ch_idx++) {
1796 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1797 ieee = iwm_nvm_channels[ch_idx];
1799 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1800 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1801 "Ch. %d Flags %x [%sGHz] - No traffic\n",
1802 ieee, ch_flags,
1803 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1804 "5.2" : "2.4");
1805 continue;
1808 nflags = iwm_eeprom_channel_flags(ch_flags);
1809 error = ieee80211_add_channel(chans, maxchans, nchans,
1810 ieee, 0, 0, nflags, bands);
1811 if (error != 0)
1812 break;
1814 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1815 "Ch. %d Flags %x [%sGHz] - Added\n",
1816 ieee, ch_flags,
1817 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1818 "5.2" : "2.4");
1822 static void
1823 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1824 struct ieee80211_channel chans[])
1826 struct iwm_softc *sc = ic->ic_softc;
1827 struct iwm_nvm_data *data = &sc->sc_nvm;
1828 uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
1830 memset(bands, 0, sizeof(bands));
1831 /* 1-13: 11b/g channels. */
1832 setbit(bands, IEEE80211_MODE_11B);
1833 setbit(bands, IEEE80211_MODE_11G);
1834 iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1835 IWM_NUM_2GHZ_CHANNELS - 1, bands);
1837 /* 14: 11b channel only. */
1838 clrbit(bands, IEEE80211_MODE_11G);
1839 iwm_add_channel_band(sc, chans, maxchans, nchans,
1840 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
1842 if (data->sku_cap_band_52GHz_enable) {
1843 memset(bands, 0, sizeof(bands));
1844 setbit(bands, IEEE80211_MODE_11A);
1845 iwm_add_channel_band(sc, chans, maxchans, nchans,
1846 IWM_NUM_2GHZ_CHANNELS, nitems(iwm_nvm_channels), bands);
1850 static int
1851 iwm_parse_nvm_data(struct iwm_softc *sc,
1852 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
1853 const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
1855 struct iwm_nvm_data *data = &sc->sc_nvm;
1856 uint8_t hw_addr[IEEE80211_ADDR_LEN];
1857 uint16_t radio_cfg, sku;
1859 data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
1861 radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
1862 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
1863 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
1864 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
1865 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
1866 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
1867 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
1869 sku = le16_to_cpup(nvm_sw + IWM_SKU);
1870 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
1871 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
1872 data->sku_cap_11n_enable = 0;
1874 if (!data->valid_tx_ant || !data->valid_rx_ant) {
1875 device_printf(sc->sc_dev,
1876 "%s: invalid antennas (0x%x, 0x%x)\n",
1877 __func__, data->valid_tx_ant,
1878 data->valid_rx_ant);
1879 return EINVAL;
1882 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
1884 data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
1885 data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
1887 /* The byte order is little endian 16 bit, meaning 214365 */
1888 IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
1889 data->hw_addr[0] = hw_addr[1];
1890 data->hw_addr[1] = hw_addr[0];
1891 data->hw_addr[2] = hw_addr[3];
1892 data->hw_addr[3] = hw_addr[2];
1893 data->hw_addr[4] = hw_addr[5];
1894 data->hw_addr[5] = hw_addr[4];
1896 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
1897 sizeof(data->nvm_ch_flags));
1898 data->calib_version = 255; /* TODO:
1899 this value will prevent some checks from
1900 failing, we need to check if this
1901 field is still needed, and if it does,
1902 where is it in the NVM */
1904 return 0;
1908 * END NVM PARSE
1911 struct iwm_nvm_section {
1912 uint16_t length;
1913 const uint8_t *data;
1916 static int
1917 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
1919 const uint16_t *hw, *sw, *calib;
1921 /* Checking for required sections */
1922 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
1923 !sections[IWM_NVM_SECTION_TYPE_HW].data) {
1924 device_printf(sc->sc_dev,
1925 "%s: Can't parse empty NVM sections\n",
1926 __func__);
1927 return ENOENT;
1930 hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
1931 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
1932 calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
1933 return iwm_parse_nvm_data(sc, hw, sw, calib,
1934 IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
1937 static int
1938 iwm_nvm_init(struct iwm_softc *sc)
1940 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
1941 int i, section, error;
1942 uint16_t len;
1943 uint8_t *nvm_buffer, *temp;
1945 /* Read From FW NVM */
1946 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1947 "%s: Read NVM\n",
1948 __func__);
1950 /* TODO: find correct NVM max size for a section */
1951 nvm_buffer = kmalloc(IWM_OTP_LOW_IMAGE_SIZE, M_DEVBUF, M_INTWAIT);
1952 if (nvm_buffer == NULL)
1953 return (ENOMEM);
1954 for (i = 0; i < nitems(nvm_to_read); i++) {
1955 section = nvm_to_read[i];
1956 KASSERT(section <= nitems(nvm_sections),
1957 ("too many sections"));
1959 error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
1960 if (error)
1961 break;
1963 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
1964 if (temp == NULL) {
1965 error = ENOMEM;
1966 break;
1968 memcpy(temp, nvm_buffer, len);
1969 nvm_sections[section].data = temp;
1970 nvm_sections[section].length = len;
1972 kfree(nvm_buffer, M_DEVBUF);
1973 if (error)
1974 return error;
1976 return iwm_parse_nvm_sections(sc, nvm_sections);
1980 * Firmware loading gunk. This is kind of a weird hybrid between the
1981 * iwn driver and the Linux iwlwifi driver.
1984 static int
1985 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
1986 const uint8_t *section, uint32_t byte_cnt)
1988 struct iwm_dma_info *dma = &sc->fw_dma;
1989 int error;
1991 /* Copy firmware section into pre-allocated DMA-safe memory. */
1992 memcpy(dma->vaddr, section, byte_cnt);
1993 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1995 if (!iwm_nic_lock(sc))
1996 return EBUSY;
1998 sc->sc_fw_chunk_done = 0;
2000 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2001 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2002 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2003 dst_addr);
2004 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2005 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2006 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2007 (iwm_get_dma_hi_addr(dma->paddr)
2008 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2009 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2010 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2011 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2012 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2013 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2014 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2015 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2016 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2018 iwm_nic_unlock(sc);
2020 /* wait 1s for this segment to load */
2021 error = 0;
2022 while (!sc->sc_fw_chunk_done) {
2023 #if defined(__DragonFly__)
2024 error = iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz);
2025 #else
2026 error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2027 #endif
2028 if (error)
2029 break;
2032 return error;
2035 static int
2036 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2038 struct iwm_fw_sects *fws;
2039 int error, i, w;
2040 const void *data;
2041 uint32_t dlen;
2042 uint32_t offset;
2044 sc->sc_uc.uc_intr = 0;
2046 fws = &sc->sc_fw.fw_sects[ucode_type];
2047 for (i = 0; i < fws->fw_count; i++) {
2048 data = fws->fw_sect[i].fws_data;
2049 dlen = fws->fw_sect[i].fws_len;
2050 offset = fws->fw_sect[i].fws_devoff;
2051 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2052 "LOAD FIRMWARE type %d offset %u len %d\n",
2053 ucode_type, offset, dlen);
2054 error = iwm_firmware_load_chunk(sc, offset, data, dlen);
2055 if (error) {
2056 device_printf(sc->sc_dev,
2057 "%s: chunk %u of %u returned error %02d\n",
2058 __func__, i, fws->fw_count, error);
2059 return error;
2063 /* wait for the firmware to load */
2064 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2066 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2067 #if defined(__DragonFly__)
2068 error = iwmsleep(&sc->sc_uc, &sc->sc_lk, 0, "iwmuc", hz/10);
2069 #else
2070 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2071 #endif
2074 return error;
2077 static int
2078 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2080 int error;
2082 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2084 if ((error = iwm_nic_init(sc)) != 0) {
2085 device_printf(sc->sc_dev, "unable to init nic\n");
2086 return error;
2089 /* make sure rfkill handshake bits are cleared */
2090 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2091 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2092 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2094 /* clear (again), then enable host interrupts */
2095 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2096 iwm_enable_interrupts(sc);
2098 /* really make sure rfkill handshake bits are cleared */
2099 /* maybe we should write a few times more? just to make sure */
2100 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2101 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2103 /* Load the given image to the HW */
2104 return iwm_load_firmware(sc, ucode_type);
2107 static int
2108 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
2110 return iwm_post_alive(sc);
2113 static int
2114 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2116 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2117 .valid = htole32(valid_tx_ant),
2120 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2121 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2124 static int
2125 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2127 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2128 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2130 /* Set parameters */
2131 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2132 phy_cfg_cmd.calib_control.event_trigger =
2133 sc->sc_default_calib[ucode_type].event_trigger;
2134 phy_cfg_cmd.calib_control.flow_trigger =
2135 sc->sc_default_calib[ucode_type].flow_trigger;
2137 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2138 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2139 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2140 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2143 static int
2144 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2145 enum iwm_ucode_type ucode_type)
2147 enum iwm_ucode_type old_type = sc->sc_uc_current;
2148 int error;
2150 if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2151 kprintf("iwm_read_firmweare: failed %d\n",
2152 error);
2153 return error;
2156 sc->sc_uc_current = ucode_type;
2157 error = iwm_start_fw(sc, ucode_type);
2158 if (error) {
2159 kprintf("iwm_start_fw: failed %d\n", error);
2160 sc->sc_uc_current = old_type;
2161 return error;
2164 error = iwm_fw_alive(sc, sc->sched_base);
2165 if (error) {
2166 kprintf("iwm_fw_alive: failed %d\n", error);
2168 return error;
2172 * mvm misc bits
2175 static int
2176 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2178 int error;
2180 /* do not operate with rfkill switch turned on */
2181 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2182 device_printf(sc->sc_dev,
2183 "radio is disabled by hardware switch\n");
2184 return EPERM;
2187 sc->sc_init_complete = 0;
2188 if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2189 IWM_UCODE_TYPE_INIT)) != 0)
2190 return error;
2192 if (justnvm) {
2193 if ((error = iwm_nvm_init(sc)) != 0) {
2194 device_printf(sc->sc_dev, "failed to read nvm\n");
2195 return error;
2197 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2199 sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2200 + sc->sc_capa_max_probe_len
2201 + IWM_MAX_NUM_SCAN_CHANNELS
2202 * sizeof(struct iwm_scan_channel);
2203 sc->sc_scan_cmd = kmalloc(sc->sc_scan_cmd_len, M_DEVBUF,
2204 M_INTWAIT);
2205 if (sc->sc_scan_cmd == NULL)
2206 return (ENOMEM);
2208 return 0;
2211 /* Send TX valid antennas before triggering calibrations */
2212 if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0) {
2213 kprintf("iwm_send_tx_ant_cfg: failed %d\n", error);
2214 return error;
2218 * Send phy configurations command to init uCode
2219 * to start the 16.0 uCode init image internal calibrations.
2221 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2222 device_printf(sc->sc_dev,
2223 "%s: failed to run internal calibration: %d\n",
2224 __func__, error);
2225 return error;
2229 * Nothing to do but wait for the init complete notification
2230 * from the firmware
2232 while (!sc->sc_init_complete) {
2233 #if defined(__DragonFly__)
2234 error = iwmsleep(&sc->sc_init_complete, &sc->sc_lk,
2235 0, "iwminit", 2*hz);
2236 #else
2237 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2238 0, "iwminit", 2*hz);
2239 #endif
2240 if (error) {
2241 kprintf("init complete failed %d\n",
2242 sc->sc_init_complete);
2243 break;
2247 return error;
2251 * receive side
2254 /* (re)stock rx ring, called at init-time and at runtime */
2255 static int
2256 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2258 struct iwm_rx_ring *ring = &sc->rxq;
2259 struct iwm_rx_data *data = &ring->data[idx];
2260 struct mbuf *m;
2261 int error;
2262 bus_addr_t paddr;
2264 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2265 if (m == NULL)
2266 return ENOBUFS;
2268 if (data->m != NULL)
2269 bus_dmamap_unload(ring->data_dmat, data->map);
2271 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2272 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2273 if (error != 0) {
2274 device_printf(sc->sc_dev,
2275 "%s: could not create RX buf DMA map, error %d\n",
2276 __func__, error);
2277 goto fail;
2279 data->m = m;
2280 error = bus_dmamap_load(ring->data_dmat, data->map,
2281 mtod(data->m, void *), IWM_RBUF_SIZE, iwm_dma_map_addr,
2282 &paddr, BUS_DMA_NOWAIT);
2283 if (error != 0 && error != EFBIG) {
2284 device_printf(sc->sc_dev,
2285 "%s: can't not map mbuf, error %d\n", __func__,
2286 error);
2287 goto fail;
2289 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2291 /* Update RX descriptor. */
2292 KKASSERT((paddr & 255) == 0);
2293 ring->desc[idx] = htole32(paddr >> 8);
2294 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2295 BUS_DMASYNC_PREWRITE);
2297 return 0;
2298 fail:
2299 return error;
2302 #define IWM_RSSI_OFFSET 50
2303 static int
2304 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2306 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2307 uint32_t agc_a, agc_b;
2308 uint32_t val;
2310 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2311 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2312 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2314 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2315 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2316 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2319 * dBm = rssi dB - agc dB - constant.
2320 * Higher AGC (higher radio gain) means lower signal.
2322 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2323 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2324 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2326 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2327 "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2328 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2330 return max_rssi_dbm;
2334 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2335 * values are reported by the fw as positive values - need to negate
2336 * to obtain their dBM. Account for missing antennas by replacing 0
2337 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2339 static int
2340 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2342 int energy_a, energy_b, energy_c, max_energy;
2343 uint32_t val;
2345 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2346 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2347 IWM_RX_INFO_ENERGY_ANT_A_POS;
2348 energy_a = energy_a ? -energy_a : -256;
2349 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2350 IWM_RX_INFO_ENERGY_ANT_B_POS;
2351 energy_b = energy_b ? -energy_b : -256;
2352 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2353 IWM_RX_INFO_ENERGY_ANT_C_POS;
2354 energy_c = energy_c ? -energy_c : -256;
2355 max_energy = MAX(energy_a, energy_b);
2356 max_energy = MAX(max_energy, energy_c);
2358 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2359 "energy In A %d B %d C %d , and max %d\n",
2360 energy_a, energy_b, energy_c, max_energy);
2362 return max_energy;
2365 static void
2366 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2367 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2369 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2371 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2372 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2374 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2378 * Retrieve the average noise (in dBm) among receivers.
2380 static int
2381 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2383 int i, total, nbant, noise;
2385 total = nbant = noise = 0;
2386 for (i = 0; i < 3; i++) {
2387 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2388 if (noise) {
2389 total += noise;
2390 nbant++;
2394 /* There should be at least one antenna but check anyway. */
2395 return (nbant == 0) ? -127 : (total / nbant) - 107;
2399 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2401 * Handles the actual data of the Rx packet from the fw
2403 static void
2404 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2405 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2407 struct ieee80211com *ic = &sc->sc_ic;
2408 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2409 struct ieee80211_frame *wh;
2410 struct ieee80211_node *ni;
2411 struct ieee80211_rx_stats rxs;
2412 struct mbuf *m;
2413 struct iwm_rx_phy_info *phy_info;
2414 struct iwm_rx_mpdu_res_start *rx_res;
2415 uint32_t len;
2416 uint32_t rx_pkt_status;
2417 int rssi;
2419 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2421 phy_info = &sc->sc_last_phy_info;
2422 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2423 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2424 len = le16toh(rx_res->byte_count);
2425 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2427 m = data->m;
2428 m->m_data = pkt->data + sizeof(*rx_res);
2429 m->m_pkthdr.len = m->m_len = len;
2431 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2432 device_printf(sc->sc_dev,
2433 "dsp size out of range [0,20]: %d\n",
2434 phy_info->cfg_phy_cnt);
2435 return;
2438 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2439 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2440 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2441 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2442 return; /* drop */
2445 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2446 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2447 } else {
2448 rssi = iwm_mvm_calc_rssi(sc, phy_info);
2450 rssi = (0 - IWM_MIN_DBM) + rssi; /* normalize */
2451 rssi = MIN(rssi, sc->sc_max_rssi); /* clip to max. 100% */
2453 /* replenish ring for the buffer we're going to feed to the sharks */
2454 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2455 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2456 __func__);
2457 return;
2460 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2462 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2463 "%s: phy_info: channel=%d, flags=0x%08x\n",
2464 __func__,
2465 le16toh(phy_info->channel),
2466 le16toh(phy_info->phy_flags));
2469 * Populate an RX state struct with the provided information.
2471 bzero(&rxs, sizeof(rxs));
2472 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2473 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2474 rxs.c_ieee = le16toh(phy_info->channel);
2475 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2476 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2477 } else {
2478 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2480 rxs.rssi = rssi - sc->sc_noise;
2481 rxs.nf = sc->sc_noise;
2483 if (ieee80211_radiotap_active_vap(vap)) {
2484 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
2486 tap->wr_flags = 0;
2487 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
2488 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2489 tap->wr_chan_freq = htole16(rxs.c_freq);
2490 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
2491 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
2492 tap->wr_dbm_antsignal = (int8_t)rssi;
2493 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
2494 tap->wr_tsft = phy_info->system_timestamp;
2495 switch (phy_info->rate) {
2496 /* CCK rates. */
2497 case 10: tap->wr_rate = 2; break;
2498 case 20: tap->wr_rate = 4; break;
2499 case 55: tap->wr_rate = 11; break;
2500 case 110: tap->wr_rate = 22; break;
2501 /* OFDM rates. */
2502 case 0xd: tap->wr_rate = 12; break;
2503 case 0xf: tap->wr_rate = 18; break;
2504 case 0x5: tap->wr_rate = 24; break;
2505 case 0x7: tap->wr_rate = 36; break;
2506 case 0x9: tap->wr_rate = 48; break;
2507 case 0xb: tap->wr_rate = 72; break;
2508 case 0x1: tap->wr_rate = 96; break;
2509 case 0x3: tap->wr_rate = 108; break;
2510 /* Unknown rate: should not happen. */
2511 default: tap->wr_rate = 0;
2515 IWM_UNLOCK(sc);
2516 if (ni != NULL) {
2517 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
2518 ieee80211_input_mimo(ni, m, &rxs);
2519 ieee80211_free_node(ni);
2520 } else {
2521 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
2522 ieee80211_input_mimo_all(ic, m, &rxs);
2524 IWM_LOCK(sc);
2527 static int
2528 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
2529 struct iwm_node *in)
2531 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
2532 struct ieee80211_node *ni = &in->in_ni;
2533 struct ieee80211vap *vap = ni->ni_vap;
2534 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
2535 int failack = tx_resp->failure_frame;
2537 KASSERT(tx_resp->frame_count == 1, ("too many frames"));
2539 /* Update rate control statistics. */
2540 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
2541 __func__,
2542 (int) le16toh(tx_resp->status.status),
2543 (int) le16toh(tx_resp->status.sequence),
2544 tx_resp->frame_count,
2545 tx_resp->bt_kill_count,
2546 tx_resp->failure_rts,
2547 tx_resp->failure_frame,
2548 le32toh(tx_resp->initial_rate),
2549 (int) le16toh(tx_resp->wireless_media_time));
2551 if (status != IWM_TX_STATUS_SUCCESS &&
2552 status != IWM_TX_STATUS_DIRECT_DONE) {
2553 ieee80211_ratectl_tx_complete(vap, ni,
2554 IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
2555 return (1);
2556 } else {
2557 ieee80211_ratectl_tx_complete(vap, ni,
2558 IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
2559 return (0);
2563 static void
2564 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
2565 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2567 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
2568 int idx = cmd_hdr->idx;
2569 int qid = cmd_hdr->qid;
2570 struct iwm_tx_ring *ring = &sc->txq[qid];
2571 struct iwm_tx_data *txd = &ring->data[idx];
2572 struct iwm_node *in = txd->in;
2573 struct mbuf *m = txd->m;
2574 int status;
2576 KASSERT(txd->done == 0, ("txd not done"));
2577 KASSERT(txd->in != NULL, ("txd without node"));
2578 KASSERT(txd->m != NULL, ("txd without mbuf"));
2580 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2582 sc->sc_tx_timer = 0;
2584 status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
2586 /* Unmap and free mbuf. */
2587 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
2588 bus_dmamap_unload(ring->data_dmat, txd->map);
2590 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2591 "free txd %p, in %p\n", txd, txd->in);
2592 txd->done = 1;
2593 txd->m = NULL;
2594 txd->in = NULL;
2596 ieee80211_tx_complete(&in->in_ni, m, status);
2598 if (--ring->queued < IWM_TX_RING_LOMARK) {
2599 sc->qfullmsk &= ~(1 << ring->qid);
2600 if (sc->qfullmsk == 0) {
2602 * Well, we're in interrupt context, but then again
2603 * I guess net80211 does all sorts of stunts in
2604 * interrupt context, so maybe this is no biggie.
2606 iwm_start(sc);
2612 * transmit side
2616 * Process a "command done" firmware notification. This is where we wakeup
2617 * processes waiting for a synchronous command completion.
2618 * from if_iwn
2620 static void
2621 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
2623 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
2624 struct iwm_tx_data *data;
2626 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
2627 return; /* Not a command ack. */
2630 data = &ring->data[pkt->hdr.idx];
2632 /* If the command was mapped in an mbuf, free it. */
2633 if (data->m != NULL) {
2634 bus_dmamap_sync(ring->data_dmat, data->map,
2635 BUS_DMASYNC_POSTWRITE);
2636 bus_dmamap_unload(ring->data_dmat, data->map);
2637 m_freem(data->m);
2638 data->m = NULL;
2640 wakeup(&ring->desc[pkt->hdr.idx]);
2643 #if 0
2645 * necessary only for block ack mode
2647 void
2648 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
2649 uint16_t len)
2651 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
2652 uint16_t w_val;
2654 scd_bc_tbl = sc->sched_dma.vaddr;
2656 len += 8; /* magic numbers came naturally from paris */
2657 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
2658 len = roundup(len, 4) / 4;
2660 w_val = htole16(sta_id << 12 | len);
2662 /* Update TX scheduler. */
2663 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
2664 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2665 BUS_DMASYNC_PREWRITE);
2667 /* I really wonder what this is ?!? */
2668 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
2669 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
2670 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2671 BUS_DMASYNC_PREWRITE);
2674 #endif
2677 * Take an 802.11 (non-n) rate, find the relevant rate
2678 * table entry. return the index into in_ridx[].
2680 * The caller then uses that index back into in_ridx
2681 * to figure out the rate index programmed /into/
2682 * the firmware for this given node.
2684 static int
2685 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
2686 uint8_t rate)
2688 int i;
2689 uint8_t r;
2691 for (i = 0; i < nitems(in->in_ridx); i++) {
2692 r = iwm_rates[in->in_ridx[i]].rate;
2693 if (rate == r)
2694 return (i);
2696 /* XXX Return the first */
2697 /* XXX TODO: have it return the /lowest/ */
2698 return (0);
2702 * Fill in the rate related information for a transmit command.
2704 static const struct iwm_rate *
2705 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
2706 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
2708 struct ieee80211com *ic = &sc->sc_ic;
2709 struct ieee80211_node *ni = &in->in_ni;
2710 const struct iwm_rate *rinfo;
2711 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2712 int ridx, rate_flags;
2714 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
2715 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
2718 * XXX TODO: everything about the rate selection here is terrible!
2721 if (type == IEEE80211_FC0_TYPE_DATA) {
2722 int i;
2723 /* for data frames, use RS table */
2724 (void) ieee80211_ratectl_rate(ni, NULL, 0);
2725 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
2726 ridx = in->in_ridx[i];
2728 /* This is the index into the programmed table */
2729 tx->initial_rate_index = i;
2730 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
2731 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
2732 "%s: start with i=%d, txrate %d\n",
2733 __func__, i, iwm_rates[ridx].rate);
2734 } else {
2736 * For non-data, use the lowest supported rate for the given
2737 * operational mode.
2739 * Note: there may not be any rate control information available.
2740 * This driver currently assumes if we're transmitting data
2741 * frames, use the rate control table. Grr.
2743 * XXX TODO: use the configured rate for the traffic type!
2744 * XXX TODO: this should be per-vap, not curmode; as we later
2745 * on we'll want to handle off-channel stuff (eg TDLS).
2747 if (ic->ic_curmode == IEEE80211_MODE_11A) {
2749 * XXX this assumes the mode is either 11a or not 11a;
2750 * definitely won't work for 11n.
2752 ridx = IWM_RIDX_OFDM;
2753 } else {
2754 ridx = IWM_RIDX_CCK;
2758 rinfo = &iwm_rates[ridx];
2760 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
2761 __func__, ridx,
2762 rinfo->rate,
2763 !! (IWM_RIDX_IS_CCK(ridx))
2766 /* XXX TODO: hard-coded TX antenna? */
2767 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
2768 if (IWM_RIDX_IS_CCK(ridx))
2769 rate_flags |= IWM_RATE_MCS_CCK_MSK;
2770 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
2772 return rinfo;
2775 #define TB0_SIZE 16
2776 static int
2777 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
2779 struct ieee80211com *ic = &sc->sc_ic;
2780 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2781 struct iwm_node *in = IWM_NODE(ni);
2782 struct iwm_tx_ring *ring;
2783 struct iwm_tx_data *data;
2784 struct iwm_tfd *desc;
2785 struct iwm_device_cmd *cmd;
2786 struct iwm_tx_cmd *tx;
2787 struct ieee80211_frame *wh;
2788 struct ieee80211_key *k = NULL;
2789 const struct iwm_rate *rinfo;
2790 uint32_t flags;
2791 u_int hdrlen;
2792 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
2793 int nsegs;
2794 uint8_t tid, type;
2795 int i, totlen, error, pad;
2797 wh = mtod(m, struct ieee80211_frame *);
2798 hdrlen = ieee80211_anyhdrsize(wh);
2799 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2800 tid = 0;
2801 ring = &sc->txq[ac];
2802 desc = &ring->desc[ring->cur];
2803 memset(desc, 0, sizeof(*desc));
2804 data = &ring->data[ring->cur];
2806 /* Fill out iwm_tx_cmd to send to the firmware */
2807 cmd = &ring->cmd[ring->cur];
2808 cmd->hdr.code = IWM_TX_CMD;
2809 cmd->hdr.flags = 0;
2810 cmd->hdr.qid = ring->qid;
2811 cmd->hdr.idx = ring->cur;
2813 tx = (void *)cmd->data;
2814 memset(tx, 0, sizeof(*tx));
2816 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
2818 /* Encrypt the frame if need be. */
2819 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2820 /* Retrieve key for TX && do software encryption. */
2821 k = ieee80211_crypto_encap(ni, m);
2822 if (k == NULL) {
2823 m_freem(m);
2824 return (ENOBUFS);
2826 /* 802.11 header may have moved. */
2827 wh = mtod(m, struct ieee80211_frame *);
2830 if (ieee80211_radiotap_active_vap(vap)) {
2831 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
2833 tap->wt_flags = 0;
2834 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
2835 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
2836 tap->wt_rate = rinfo->rate;
2837 if (k != NULL)
2838 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2839 ieee80211_radiotap_tx(vap, m);
2843 totlen = m->m_pkthdr.len;
2845 flags = 0;
2846 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2847 flags |= IWM_TX_CMD_FLG_ACK;
2850 if (type != IEEE80211_FC0_TYPE_DATA
2851 && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
2852 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2853 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
2856 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2857 type != IEEE80211_FC0_TYPE_DATA)
2858 tx->sta_id = sc->sc_aux_sta.sta_id;
2859 else
2860 tx->sta_id = IWM_STATION_ID;
2862 if (type == IEEE80211_FC0_TYPE_MGT) {
2863 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2865 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2866 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2867 tx->pm_frame_timeout = htole16(3);
2868 else
2869 tx->pm_frame_timeout = htole16(2);
2870 } else {
2871 tx->pm_frame_timeout = htole16(0);
2874 if (hdrlen & 3) {
2875 /* First segment length must be a multiple of 4. */
2876 flags |= IWM_TX_CMD_FLG_MH_PAD;
2877 pad = 4 - (hdrlen & 3);
2878 } else
2879 pad = 0;
2881 tx->driver_txop = 0;
2882 tx->next_frame_len = 0;
2884 tx->len = htole16(totlen);
2885 tx->tid_tspec = tid;
2886 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
2888 /* Set physical address of "scratch area". */
2889 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
2890 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
2892 /* Copy 802.11 header in TX command. */
2893 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
2895 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
2897 tx->sec_ctl = 0;
2898 tx->tx_flags |= htole32(flags);
2900 /* Trim 802.11 header. */
2901 m_adj(m, hdrlen);
2902 #if defined(__DragonFly__)
2903 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, m,
2904 segs, IWM_MAX_SCATTER - 2,
2905 &nsegs, BUS_DMA_NOWAIT);
2906 #else
2907 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2908 segs, &nsegs, BUS_DMA_NOWAIT);
2909 #endif
2910 if (error != 0) {
2911 if (error != EFBIG) {
2912 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2913 error);
2914 m_freem(m);
2915 return error;
2917 /* Too many DMA segments, linearize mbuf. */
2918 if (m_defrag(m, M_NOWAIT)) {
2919 device_printf(sc->sc_dev,
2920 "%s: could not defrag mbuf\n", __func__);
2921 m_freem(m);
2922 return (ENOBUFS);
2925 #if defined(__DragonFly__)
2926 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, m,
2927 segs, IWM_MAX_SCATTER - 2,
2928 &nsegs, BUS_DMA_NOWAIT);
2929 #else
2930 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2931 segs, &nsegs, BUS_DMA_NOWAIT);
2932 #endif
2933 if (error != 0) {
2934 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2935 error);
2936 m_freem(m);
2937 return error;
2940 data->m = m;
2941 data->in = in;
2942 data->done = 0;
2944 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2945 "sending txd %p, in %p\n", data, data->in);
2946 KASSERT(data->in != NULL, ("node is NULL"));
2948 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2949 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
2950 ring->qid, ring->cur, totlen, nsegs,
2951 le32toh(tx->tx_flags),
2952 le32toh(tx->rate_n_flags),
2953 tx->initial_rate_index
2956 /* Fill TX descriptor. */
2957 desc->num_tbs = 2 + nsegs;
2959 desc->tbs[0].lo = htole32(data->cmd_paddr);
2960 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2961 (TB0_SIZE << 4);
2962 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
2963 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2964 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
2965 + hdrlen + pad - TB0_SIZE) << 4);
2967 /* Other DMA segments are for data payload. */
2968 for (i = 0; i < nsegs; i++) {
2969 seg = &segs[i];
2970 desc->tbs[i+2].lo = htole32(seg->ds_addr);
2971 desc->tbs[i+2].hi_n_len = \
2972 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
2973 | ((seg->ds_len) << 4);
2976 bus_dmamap_sync(ring->data_dmat, data->map,
2977 BUS_DMASYNC_PREWRITE);
2978 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
2979 BUS_DMASYNC_PREWRITE);
2980 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2981 BUS_DMASYNC_PREWRITE);
2983 #if 0
2984 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
2985 #endif
2987 /* Kick TX ring. */
2988 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
2989 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2991 /* Mark TX ring as full if we reach a certain threshold. */
2992 if (++ring->queued > IWM_TX_RING_HIMARK) {
2993 sc->qfullmsk |= 1 << ring->qid;
2996 return 0;
2999 static int
3000 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3001 const struct ieee80211_bpf_params *params)
3003 struct ieee80211com *ic = ni->ni_ic;
3004 struct iwm_softc *sc = ic->ic_softc;
3005 int error = 0;
3007 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3008 "->%s begin\n", __func__);
3010 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3011 m_freem(m);
3012 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3013 "<-%s not RUNNING\n", __func__);
3014 return (ENETDOWN);
3017 IWM_LOCK(sc);
3018 /* XXX fix this */
3019 if (params == NULL) {
3020 error = iwm_tx(sc, m, ni, 0);
3021 } else {
3022 error = iwm_tx(sc, m, ni, 0);
3024 sc->sc_tx_timer = 5;
3025 IWM_UNLOCK(sc);
3027 return (error);
3031 * mvm/tx.c
3034 #if 0
3036 * Note that there are transports that buffer frames before they reach
3037 * the firmware. This means that after flush_tx_path is called, the
3038 * queue might not be empty. The race-free way to handle this is to:
3039 * 1) set the station as draining
3040 * 2) flush the Tx path
3041 * 3) wait for the transport queues to be empty
3044 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3046 struct iwm_tx_path_flush_cmd flush_cmd = {
3047 .queues_ctl = htole32(tfd_msk),
3048 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3050 int ret;
3052 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3053 sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3054 sizeof(flush_cmd), &flush_cmd);
3055 if (ret)
3056 device_printf(sc->sc_dev,
3057 "Flushing tx queue failed: %d\n", ret);
3058 return ret;
3060 #endif
3062 static void
3063 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
3064 struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
3066 memset(cmd_v5, 0, sizeof(*cmd_v5));
3068 cmd_v5->add_modify = cmd_v6->add_modify;
3069 cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
3070 cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
3071 IEEE80211_ADDR_COPY(cmd_v5->addr, cmd_v6->addr);
3072 cmd_v5->sta_id = cmd_v6->sta_id;
3073 cmd_v5->modify_mask = cmd_v6->modify_mask;
3074 cmd_v5->station_flags = cmd_v6->station_flags;
3075 cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
3076 cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
3077 cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
3078 cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
3079 cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
3080 cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
3081 cmd_v5->assoc_id = cmd_v6->assoc_id;
3082 cmd_v5->beamform_flags = cmd_v6->beamform_flags;
3083 cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
3086 static int
3087 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3088 struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
3090 struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
3092 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
3093 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
3094 sizeof(*cmd), cmd, status);
3097 iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
3099 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
3100 &cmd_v5, status);
3103 /* send station add/update command to firmware */
3104 static int
3105 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3107 struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
3108 int ret;
3109 uint32_t status;
3111 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3113 add_sta_cmd.sta_id = IWM_STATION_ID;
3114 add_sta_cmd.mac_id_n_color
3115 = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3116 IWM_DEFAULT_COLOR));
3117 if (!update) {
3118 add_sta_cmd.tfd_queue_msk = htole32(0xf);
3119 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3121 add_sta_cmd.add_modify = update ? 1 : 0;
3122 add_sta_cmd.station_flags_msk
3123 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3125 status = IWM_ADD_STA_SUCCESS;
3126 ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3127 if (ret)
3128 return ret;
3130 switch (status) {
3131 case IWM_ADD_STA_SUCCESS:
3132 break;
3133 default:
3134 ret = EIO;
3135 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3136 break;
3139 return ret;
3142 static int
3143 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3145 int ret;
3147 ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
3148 if (ret)
3149 return ret;
3151 return 0;
3154 static int
3155 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3157 return iwm_mvm_sta_send_to_fw(sc, in, 1);
3160 static int
3161 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3162 const uint8_t *addr, uint16_t mac_id, uint16_t color)
3164 struct iwm_mvm_add_sta_cmd_v6 cmd;
3165 int ret;
3166 uint32_t status;
3168 memset(&cmd, 0, sizeof(cmd));
3169 cmd.sta_id = sta->sta_id;
3170 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3172 cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3174 if (addr)
3175 IEEE80211_ADDR_COPY(cmd.addr, addr);
3177 ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3178 if (ret)
3179 return ret;
3181 switch (status) {
3182 case IWM_ADD_STA_SUCCESS:
3183 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3184 "%s: Internal station added.\n", __func__);
3185 return 0;
3186 default:
3187 device_printf(sc->sc_dev,
3188 "%s: Add internal station failed, status=0x%x\n",
3189 __func__, status);
3190 ret = EIO;
3191 break;
3193 return ret;
3196 static int
3197 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3199 int ret;
3201 sc->sc_aux_sta.sta_id = 3;
3202 sc->sc_aux_sta.tfd_queue_msk = 0;
3204 ret = iwm_mvm_add_int_sta_common(sc,
3205 &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3207 if (ret)
3208 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3209 return ret;
3212 static int
3213 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3215 struct iwm_time_quota_cmd cmd;
3216 int i, idx, ret, num_active_macs, quota, quota_rem;
3217 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3218 int n_ifs[IWM_MAX_BINDINGS] = {0, };
3219 uint16_t id;
3221 memset(&cmd, 0, sizeof(cmd));
3223 /* currently, PHY ID == binding ID */
3224 if (in) {
3225 id = in->in_phyctxt->id;
3226 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3227 colors[id] = in->in_phyctxt->color;
3229 if (1)
3230 n_ifs[id] = 1;
3234 * The FW's scheduling session consists of
3235 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3236 * equally between all the bindings that require quota
3238 num_active_macs = 0;
3239 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3240 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3241 num_active_macs += n_ifs[i];
3244 quota = 0;
3245 quota_rem = 0;
3246 if (num_active_macs) {
3247 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3248 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3251 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3252 if (colors[i] < 0)
3253 continue;
3255 cmd.quotas[idx].id_and_color =
3256 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3258 if (n_ifs[i] <= 0) {
3259 cmd.quotas[idx].quota = htole32(0);
3260 cmd.quotas[idx].max_duration = htole32(0);
3261 } else {
3262 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3263 cmd.quotas[idx].max_duration = htole32(0);
3265 idx++;
3268 /* Give the remainder of the session to the first binding */
3269 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3271 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3272 sizeof(cmd), &cmd);
3273 if (ret)
3274 device_printf(sc->sc_dev,
3275 "%s: Failed to send quota: %d\n", __func__, ret);
3276 return ret;
3280 * ieee80211 routines
3284 * Change to AUTH state in 80211 state machine. Roughly matches what
3285 * Linux does in bss_info_changed().
3287 static int
3288 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3290 struct ieee80211_node *ni;
3291 struct iwm_node *in;
3292 struct iwm_vap *iv = IWM_VAP(vap);
3293 uint32_t duration;
3294 int error;
3297 * XXX i have a feeling that the vap node is being
3298 * freed from underneath us. Grr.
3300 ni = ieee80211_ref_node(vap->iv_bss);
3301 in = IWM_NODE(ni);
3302 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3303 "%s: called; vap=%p, bss ni=%p\n",
3304 __func__,
3305 vap,
3306 ni);
3308 in->in_assoc = 0;
3310 error = iwm_allow_mcast(vap, sc);
3311 if (error) {
3312 device_printf(sc->sc_dev,
3313 "%s: failed to set multicast\n", __func__);
3314 goto out;
3318 * This is where it deviates from what Linux does.
3320 * Linux iwlwifi doesn't reset the nic each time, nor does it
3321 * call ctxt_add() here. Instead, it adds it during vap creation,
3322 * and always does does a mac_ctx_changed().
3324 * The openbsd port doesn't attempt to do that - it reset things
3325 * at odd states and does the add here.
3327 * So, until the state handling is fixed (ie, we never reset
3328 * the NIC except for a firmware failure, which should drag
3329 * the NIC back to IDLE, re-setup and re-add all the mac/phy
3330 * contexts that are required), let's do a dirty hack here.
3332 if (iv->is_uploaded) {
3333 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3334 device_printf(sc->sc_dev,
3335 "%s: failed to update MAC\n", __func__);
3336 goto out;
3338 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3339 in->in_ni.ni_chan, 1, 1)) != 0) {
3340 device_printf(sc->sc_dev,
3341 "%s: failed update phy ctxt\n", __func__);
3342 goto out;
3344 in->in_phyctxt = &sc->sc_phyctxt[0];
3346 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3347 device_printf(sc->sc_dev,
3348 "%s: binding update cmd\n", __func__);
3349 goto out;
3351 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3352 device_printf(sc->sc_dev,
3353 "%s: failed to update sta\n", __func__);
3354 goto out;
3356 } else {
3357 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3358 device_printf(sc->sc_dev,
3359 "%s: failed to add MAC\n", __func__);
3360 goto out;
3362 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3363 in->in_ni.ni_chan, 1, 1)) != 0) {
3364 device_printf(sc->sc_dev,
3365 "%s: failed add phy ctxt!\n", __func__);
3366 error = ETIMEDOUT;
3367 goto out;
3369 in->in_phyctxt = &sc->sc_phyctxt[0];
3371 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3372 device_printf(sc->sc_dev,
3373 "%s: binding add cmd\n", __func__);
3374 goto out;
3376 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3377 device_printf(sc->sc_dev,
3378 "%s: failed to add sta\n", __func__);
3379 goto out;
3384 * Prevent the FW from wandering off channel during association
3385 * by "protecting" the session with a time event.
3387 /* XXX duration is in units of TU, not MS */
3388 duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3389 iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3390 DELAY(100);
3392 error = 0;
3393 out:
3394 ieee80211_free_node(ni);
3395 return (error);
3398 static int
3399 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3401 struct iwm_node *in = IWM_NODE(vap->iv_bss);
3402 int error;
3404 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3405 device_printf(sc->sc_dev,
3406 "%s: failed to update STA\n", __func__);
3407 return error;
3410 in->in_assoc = 1;
3411 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3412 device_printf(sc->sc_dev,
3413 "%s: failed to update MAC\n", __func__);
3414 return error;
3417 return 0;
3420 static int
3421 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3424 * Ok, so *technically* the proper set of calls for going
3425 * from RUN back to SCAN is:
3427 * iwm_mvm_power_mac_disable(sc, in);
3428 * iwm_mvm_mac_ctxt_changed(sc, in);
3429 * iwm_mvm_rm_sta(sc, in);
3430 * iwm_mvm_update_quotas(sc, NULL);
3431 * iwm_mvm_mac_ctxt_changed(sc, in);
3432 * iwm_mvm_binding_remove_vif(sc, in);
3433 * iwm_mvm_mac_ctxt_remove(sc, in);
3435 * However, that freezes the device not matter which permutations
3436 * and modifications are attempted. Obviously, this driver is missing
3437 * something since it works in the Linux driver, but figuring out what
3438 * is missing is a little more complicated. Now, since we're going
3439 * back to nothing anyway, we'll just do a complete device reset.
3440 * Up your's, device!
3442 //iwm_mvm_flush_tx_path(sc, 0xf, 1);
3443 iwm_stop_device(sc);
3444 iwm_init_hw(sc);
3445 if (in)
3446 in->in_assoc = 0;
3447 return 0;
3449 #if 0
3450 int error;
3452 iwm_mvm_power_mac_disable(sc, in);
3454 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3455 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3456 return error;
3459 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3460 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
3461 return error;
3463 error = iwm_mvm_rm_sta(sc, in);
3464 in->in_assoc = 0;
3465 iwm_mvm_update_quotas(sc, NULL);
3466 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3467 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
3468 return error;
3470 iwm_mvm_binding_remove_vif(sc, in);
3472 iwm_mvm_mac_ctxt_remove(sc, in);
3474 return error;
3475 #endif
3478 static struct ieee80211_node *
3479 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3481 return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
3482 M_INTWAIT | M_ZERO);
3485 static void
3486 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
3488 struct ieee80211_node *ni = &in->in_ni;
3489 struct iwm_lq_cmd *lq = &in->in_lq;
3490 int nrates = ni->ni_rates.rs_nrates;
3491 int i, ridx, tab = 0;
3492 int txant = 0;
3494 if (nrates > nitems(lq->rs_table)) {
3495 device_printf(sc->sc_dev,
3496 "%s: node supports %d rates, driver handles "
3497 "only %zu\n", __func__, nrates, nitems(lq->rs_table));
3498 return;
3500 if (nrates == 0) {
3501 device_printf(sc->sc_dev,
3502 "%s: node supports 0 rates, odd!\n", __func__);
3503 return;
3507 * XXX .. and most of iwm_node is not initialised explicitly;
3508 * it's all just 0x0 passed to the firmware.
3511 /* first figure out which rates we should support */
3512 /* XXX TODO: this isn't 11n aware /at all/ */
3513 memset(&in->in_ridx, -1, sizeof(in->in_ridx));
3514 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3515 "%s: nrates=%d\n", __func__, nrates);
3518 * Loop over nrates and populate in_ridx from the highest
3519 * rate to the lowest rate. Remember, in_ridx[] has
3520 * IEEE80211_RATE_MAXSIZE entries!
3522 for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
3523 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
3525 /* Map 802.11 rate to HW rate index. */
3526 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
3527 if (iwm_rates[ridx].rate == rate)
3528 break;
3529 if (ridx > IWM_RIDX_MAX) {
3530 device_printf(sc->sc_dev,
3531 "%s: WARNING: device rate for %d not found!\n",
3532 __func__, rate);
3533 } else {
3534 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3535 "%s: rate: i: %d, rate=%d, ridx=%d\n",
3536 __func__,
3538 rate,
3539 ridx);
3540 in->in_ridx[i] = ridx;
3544 /* then construct a lq_cmd based on those */
3545 memset(lq, 0, sizeof(*lq));
3546 lq->sta_id = IWM_STATION_ID;
3549 * are these used? (we don't do SISO or MIMO)
3550 * need to set them to non-zero, though, or we get an error.
3552 lq->single_stream_ant_msk = 1;
3553 lq->dual_stream_ant_msk = 1;
3556 * Build the actual rate selection table.
3557 * The lowest bits are the rates. Additionally,
3558 * CCK needs bit 9 to be set. The rest of the bits
3559 * we add to the table select the tx antenna
3560 * Note that we add the rates in the highest rate first
3561 * (opposite of ni_rates).
3564 * XXX TODO: this should be looping over the min of nrates
3565 * and LQ_MAX_RETRY_NUM. Sigh.
3567 for (i = 0; i < nrates; i++) {
3568 int nextant;
3570 if (txant == 0)
3571 txant = IWM_FW_VALID_TX_ANT(sc);
3572 nextant = 1<<(ffs(txant)-1);
3573 txant &= ~nextant;
3576 * Map the rate id into a rate index into
3577 * our hardware table containing the
3578 * configuration to use for this rate.
3580 ridx = in->in_ridx[i];
3581 tab = iwm_rates[ridx].plcp;
3582 tab |= nextant << IWM_RATE_MCS_ANT_POS;
3583 if (IWM_RIDX_IS_CCK(ridx))
3584 tab |= IWM_RATE_MCS_CCK_MSK;
3585 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3586 "station rate i=%d, rate=%d, hw=%x\n",
3587 i, iwm_rates[ridx].rate, tab);
3588 lq->rs_table[i] = htole32(tab);
3590 /* then fill the rest with the lowest possible rate */
3591 for (i = nrates; i < nitems(lq->rs_table); i++) {
3592 KASSERT(tab != 0, ("invalid tab"));
3593 lq->rs_table[i] = htole32(tab);
3597 static int
3598 iwm_media_change(struct ifnet *ifp)
3600 struct ieee80211vap *vap = ifp->if_softc;
3601 struct ieee80211com *ic = vap->iv_ic;
3602 struct iwm_softc *sc = ic->ic_softc;
3603 int error;
3605 error = ieee80211_media_change(ifp);
3606 if (error != ENETRESET)
3607 return error;
3609 IWM_LOCK(sc);
3610 if (ic->ic_nrunning > 0) {
3611 iwm_stop(sc);
3612 iwm_init(sc);
3614 IWM_UNLOCK(sc);
3615 return error;
3619 static int
3620 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
3622 struct iwm_vap *ivp = IWM_VAP(vap);
3623 struct ieee80211com *ic = vap->iv_ic;
3624 struct iwm_softc *sc = ic->ic_softc;
3625 struct iwm_node *in;
3626 int error;
3628 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3629 "switching state %s -> %s\n",
3630 ieee80211_state_name[vap->iv_state],
3631 ieee80211_state_name[nstate]);
3632 IEEE80211_UNLOCK(ic);
3633 IWM_LOCK(sc);
3635 if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
3636 iwm_led_blink_stop(sc);
3638 /* disable beacon filtering if we're hopping out of RUN */
3639 if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
3640 iwm_mvm_disable_beacon_filter(sc);
3642 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
3643 in->in_assoc = 0;
3645 iwm_release(sc, NULL);
3648 * It's impossible to directly go RUN->SCAN. If we iwm_release()
3649 * above then the card will be completely reinitialized,
3650 * so the driver must do everything necessary to bring the card
3651 * from INIT to SCAN.
3653 * Additionally, upon receiving deauth frame from AP,
3654 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
3655 * state. This will also fail with this driver, so bring the FSM
3656 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
3658 * XXX TODO: fix this for FreeBSD!
3660 if (nstate == IEEE80211_S_SCAN ||
3661 nstate == IEEE80211_S_AUTH ||
3662 nstate == IEEE80211_S_ASSOC) {
3663 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3664 "Force transition to INIT; MGT=%d\n", arg);
3665 IWM_UNLOCK(sc);
3666 IEEE80211_LOCK(ic);
3667 vap->iv_newstate(vap, IEEE80211_S_INIT, arg);
3668 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3669 "Going INIT->SCAN\n");
3670 nstate = IEEE80211_S_SCAN;
3671 IEEE80211_UNLOCK(ic);
3672 IWM_LOCK(sc);
3676 switch (nstate) {
3677 case IEEE80211_S_INIT:
3678 sc->sc_scanband = 0;
3679 break;
3681 case IEEE80211_S_AUTH:
3682 if ((error = iwm_auth(vap, sc)) != 0) {
3683 device_printf(sc->sc_dev,
3684 "%s: could not move to auth state: %d\n",
3685 __func__, error);
3686 break;
3688 break;
3690 case IEEE80211_S_ASSOC:
3691 if ((error = iwm_assoc(vap, sc)) != 0) {
3692 device_printf(sc->sc_dev,
3693 "%s: failed to associate: %d\n", __func__,
3694 error);
3695 break;
3697 break;
3699 case IEEE80211_S_RUN:
3701 struct iwm_host_cmd cmd = {
3702 .id = IWM_LQ_CMD,
3703 .len = { sizeof(in->in_lq), },
3704 .flags = IWM_CMD_SYNC,
3707 /* Update the association state, now we have it all */
3708 /* (eg associd comes in at this point */
3709 error = iwm_assoc(vap, sc);
3710 if (error != 0) {
3711 device_printf(sc->sc_dev,
3712 "%s: failed to update association state: %d\n",
3713 __func__,
3714 error);
3715 break;
3718 in = IWM_NODE(vap->iv_bss);
3719 iwm_mvm_power_mac_update_mode(sc, in);
3720 iwm_mvm_enable_beacon_filter(sc, in);
3721 iwm_mvm_update_quotas(sc, in);
3722 iwm_setrates(sc, in);
3724 cmd.data[0] = &in->in_lq;
3725 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
3726 device_printf(sc->sc_dev,
3727 "%s: IWM_LQ_CMD failed\n", __func__);
3730 iwm_mvm_led_enable(sc);
3731 break;
3734 default:
3735 break;
3737 IWM_UNLOCK(sc);
3738 IEEE80211_LOCK(ic);
3740 return (ivp->iv_newstate(vap, nstate, arg));
3743 void
3744 iwm_endscan_cb(void *arg, int pending)
3746 struct iwm_softc *sc = arg;
3747 struct ieee80211com *ic = &sc->sc_ic;
3748 int done;
3749 int error;
3751 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
3752 "%s: scan ended\n",
3753 __func__);
3755 IWM_LOCK(sc);
3756 if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
3757 sc->sc_nvm.sku_cap_band_52GHz_enable) {
3758 done = 0;
3759 if ((error = iwm_mvm_scan_request(sc,
3760 IEEE80211_CHAN_5GHZ, 0, NULL, 0)) != 0) {
3761 device_printf(sc->sc_dev,
3762 "could not initiate 5 GHz scan\n");
3763 done = 1;
3765 } else {
3766 done = 1;
3769 if (done) {
3770 IWM_UNLOCK(sc);
3771 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
3772 IWM_LOCK(sc);
3773 sc->sc_scanband = 0;
3775 IWM_UNLOCK(sc);
3778 static int
3779 iwm_init_hw(struct iwm_softc *sc)
3781 struct ieee80211com *ic = &sc->sc_ic;
3782 int error, i, qid;
3784 if ((error = iwm_start_hw(sc)) != 0) {
3785 kprintf("iwm_start_hw: failed %d\n", error);
3786 return error;
3789 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
3790 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
3791 return error;
3795 * should stop and start HW since that INIT
3796 * image just loaded
3798 iwm_stop_device(sc);
3799 if ((error = iwm_start_hw(sc)) != 0) {
3800 device_printf(sc->sc_dev, "could not initialize hardware\n");
3801 return error;
3804 /* omstart, this time with the regular firmware */
3805 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
3806 if (error) {
3807 device_printf(sc->sc_dev, "could not load firmware\n");
3808 goto error;
3811 if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0) {
3812 device_printf(sc->sc_dev, "antenna config failed\n");
3813 goto error;
3816 /* Send phy db control command and then phy db calibration*/
3817 if ((error = iwm_send_phy_db_data(sc)) != 0) {
3818 device_printf(sc->sc_dev, "phy_db_data failed\n");
3819 goto error;
3822 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
3823 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
3824 goto error;
3827 /* Add auxiliary station for scanning */
3828 if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
3829 device_printf(sc->sc_dev, "add_aux_sta failed\n");
3830 goto error;
3833 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
3835 * The channel used here isn't relevant as it's
3836 * going to be overwritten in the other flows.
3837 * For now use the first channel we have.
3839 if ((error = iwm_mvm_phy_ctxt_add(sc,
3840 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
3841 goto error;
3844 error = iwm_mvm_power_update_device(sc);
3845 if (error)
3846 goto error;
3848 /* Mark TX rings as active. */
3849 for (qid = 0; qid < 4; qid++) {
3850 iwm_enable_txq(sc, qid, qid);
3853 return 0;
3855 error:
3856 iwm_stop_device(sc);
3857 return error;
3860 /* Allow multicast from our BSSID. */
3861 static int
3862 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
3864 struct ieee80211_node *ni = vap->iv_bss;
3865 struct iwm_mcast_filter_cmd *cmd;
3866 size_t size;
3867 int error;
3869 size = roundup(sizeof(*cmd), 4);
3870 cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
3871 if (cmd == NULL)
3872 return ENOMEM;
3873 cmd->filter_own = 1;
3874 cmd->port_id = 0;
3875 cmd->count = 0;
3876 cmd->pass_all = 1;
3877 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
3879 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
3880 IWM_CMD_SYNC, size, cmd);
3881 kfree(cmd, M_DEVBUF);
3883 return (error);
3887 * ifnet interfaces
3890 static void
3891 iwm_init(struct iwm_softc *sc)
3893 int error;
3895 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
3896 return;
3898 sc->sc_generation++;
3899 sc->sc_flags &= ~IWM_FLAG_STOPPED;
3901 if ((error = iwm_init_hw(sc)) != 0) {
3902 kprintf("iwm_init_hw failed %d\n", error);
3903 iwm_stop(sc);
3904 return;
3908 * Ok, firmware loaded and we are jogging
3910 sc->sc_flags |= IWM_FLAG_HW_INITED;
3911 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3914 static int
3915 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
3917 struct iwm_softc *sc;
3918 int error;
3920 sc = ic->ic_softc;
3922 IWM_LOCK(sc);
3923 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3924 IWM_UNLOCK(sc);
3925 return (ENXIO);
3927 error = mbufq_enqueue(&sc->sc_snd, m);
3928 if (error) {
3929 IWM_UNLOCK(sc);
3930 return (error);
3932 iwm_start(sc);
3933 IWM_UNLOCK(sc);
3934 return (0);
3938 * Dequeue packets from sendq and call send.
3940 static void
3941 iwm_start(struct iwm_softc *sc)
3943 struct ieee80211_node *ni;
3944 struct mbuf *m;
3945 int ac = 0;
3947 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
3948 while (sc->qfullmsk == 0 &&
3949 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
3950 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3951 if (iwm_tx(sc, m, ni, ac) != 0) {
3952 if_inc_counter(ni->ni_vap->iv_ifp,
3953 IFCOUNTER_OERRORS, 1);
3954 ieee80211_free_node(ni);
3955 continue;
3957 sc->sc_tx_timer = 15;
3959 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
3962 static void
3963 iwm_stop(struct iwm_softc *sc)
3966 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
3967 sc->sc_flags |= IWM_FLAG_STOPPED;
3968 sc->sc_generation++;
3969 sc->sc_scanband = 0;
3970 iwm_led_blink_stop(sc);
3971 sc->sc_tx_timer = 0;
3972 iwm_stop_device(sc);
3975 static void
3976 iwm_watchdog(void *arg)
3978 struct iwm_softc *sc = arg;
3980 if (sc->sc_tx_timer > 0) {
3981 if (--sc->sc_tx_timer == 0) {
3982 device_printf(sc->sc_dev, "device timeout\n");
3983 #ifdef IWM_DEBUG
3984 iwm_nic_error(sc);
3985 #endif
3986 iwm_stop(sc);
3987 #if defined(__DragonFly__)
3988 ++sc->sc_ic.ic_oerrors;
3989 #else
3990 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
3991 #endif
3992 return;
3995 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3998 static void
3999 iwm_parent(struct ieee80211com *ic)
4001 struct iwm_softc *sc = ic->ic_softc;
4002 int startall = 0;
4004 IWM_LOCK(sc);
4005 if (ic->ic_nrunning > 0) {
4006 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4007 iwm_init(sc);
4008 startall = 1;
4010 } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4011 iwm_stop(sc);
4012 IWM_UNLOCK(sc);
4013 if (startall)
4014 ieee80211_start_all(ic);
4018 * The interrupt side of things
4022 * error dumping routines are from iwlwifi/mvm/utils.c
4026 * Note: This structure is read from the device with IO accesses,
4027 * and the reading already does the endian conversion. As it is
4028 * read with uint32_t-sized accesses, any members with a different size
4029 * need to be ordered correctly though!
4031 struct iwm_error_event_table {
4032 uint32_t valid; /* (nonzero) valid, (0) log is empty */
4033 uint32_t error_id; /* type of error */
4034 uint32_t pc; /* program counter */
4035 uint32_t blink1; /* branch link */
4036 uint32_t blink2; /* branch link */
4037 uint32_t ilink1; /* interrupt link */
4038 uint32_t ilink2; /* interrupt link */
4039 uint32_t data1; /* error-specific data */
4040 uint32_t data2; /* error-specific data */
4041 uint32_t data3; /* error-specific data */
4042 uint32_t bcon_time; /* beacon timer */
4043 uint32_t tsf_low; /* network timestamp function timer */
4044 uint32_t tsf_hi; /* network timestamp function timer */
4045 uint32_t gp1; /* GP1 timer register */
4046 uint32_t gp2; /* GP2 timer register */
4047 uint32_t gp3; /* GP3 timer register */
4048 uint32_t ucode_ver; /* uCode version */
4049 uint32_t hw_ver; /* HW Silicon version */
4050 uint32_t brd_ver; /* HW board version */
4051 uint32_t log_pc; /* log program counter */
4052 uint32_t frame_ptr; /* frame pointer */
4053 uint32_t stack_ptr; /* stack pointer */
4054 uint32_t hcmd; /* last host command header */
4055 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
4056 * rxtx_flag */
4057 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
4058 * host_flag */
4059 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
4060 * enc_flag */
4061 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
4062 * time_flag */
4063 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
4064 * wico interrupt */
4065 uint32_t isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
4066 uint32_t wait_event; /* wait event() caller address */
4067 uint32_t l2p_control; /* L2pControlField */
4068 uint32_t l2p_duration; /* L2pDurationField */
4069 uint32_t l2p_mhvalid; /* L2pMhValidBits */
4070 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
4071 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
4072 * (LMPM_PMG_SEL) */
4073 uint32_t u_timestamp; /* indicate when the date and time of the
4074 * compilation */
4075 uint32_t flow_handler; /* FH read/write pointers, RX credit */
4076 } __packed;
4078 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
4079 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
4081 #ifdef IWM_DEBUG
4082 struct {
4083 const char *name;
4084 uint8_t num;
4085 } advanced_lookup[] = {
4086 { "NMI_INTERRUPT_WDG", 0x34 },
4087 { "SYSASSERT", 0x35 },
4088 { "UCODE_VERSION_MISMATCH", 0x37 },
4089 { "BAD_COMMAND", 0x38 },
4090 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4091 { "FATAL_ERROR", 0x3D },
4092 { "NMI_TRM_HW_ERR", 0x46 },
4093 { "NMI_INTERRUPT_TRM", 0x4C },
4094 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4095 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4096 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4097 { "NMI_INTERRUPT_HOST", 0x66 },
4098 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4099 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4100 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4101 { "ADVANCED_SYSASSERT", 0 },
4104 static const char *
4105 iwm_desc_lookup(uint32_t num)
4107 int i;
4109 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4110 if (advanced_lookup[i].num == num)
4111 return advanced_lookup[i].name;
4113 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4114 return advanced_lookup[i].name;
4118 * Support for dumping the error log seemed like a good idea ...
4119 * but it's mostly hex junk and the only sensible thing is the
4120 * hw/ucode revision (which we know anyway). Since it's here,
4121 * I'll just leave it in, just in case e.g. the Intel guys want to
4122 * help us decipher some "ADVANCED_SYSASSERT" later.
4124 static void
4125 iwm_nic_error(struct iwm_softc *sc)
4127 struct iwm_error_event_table table;
4128 uint32_t base;
4130 device_printf(sc->sc_dev, "dumping device error log\n");
4131 base = sc->sc_uc.uc_error_event_table;
4132 if (base < 0x800000 || base >= 0x80C000) {
4133 device_printf(sc->sc_dev,
4134 "Not valid error log pointer 0x%08x\n", base);
4135 return;
4138 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
4139 device_printf(sc->sc_dev, "reading errlog failed\n");
4140 return;
4143 if (!table.valid) {
4144 device_printf(sc->sc_dev, "errlog not found, skipping\n");
4145 return;
4148 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4149 device_printf(sc->sc_dev, "Start IWL Error Log Dump:\n");
4150 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4151 sc->sc_flags, table.valid);
4154 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
4155 iwm_desc_lookup(table.error_id));
4156 device_printf(sc->sc_dev, "%08X | uPc\n", table.pc);
4157 device_printf(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
4158 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
4159 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
4160 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
4161 device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
4162 device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
4163 device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
4164 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
4165 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
4166 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
4167 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
4168 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
4169 device_printf(sc->sc_dev, "%08X | time gp3\n", table.gp3);
4170 device_printf(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
4171 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
4172 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
4173 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
4174 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
4175 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
4176 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
4177 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
4178 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
4179 device_printf(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
4180 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
4181 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
4182 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
4183 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
4184 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
4185 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
4186 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
4187 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
4189 #endif
4191 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
4192 do { \
4193 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4194 _var_ = (void *)((_pkt_)+1); \
4195 } while (/*CONSTCOND*/0)
4197 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
4198 do { \
4199 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4200 _ptr_ = (void *)((_pkt_)+1); \
4201 } while (/*CONSTCOND*/0)
4203 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
4206 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
4207 * Basic structure from if_iwn
4209 static void
4210 iwm_notif_intr(struct iwm_softc *sc)
4212 uint16_t hw;
4214 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
4215 BUS_DMASYNC_POSTREAD);
4217 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
4220 * Process responses
4222 while (sc->rxq.cur != hw) {
4223 struct iwm_rx_ring *ring = &sc->rxq;
4224 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
4225 struct iwm_rx_packet *pkt;
4226 struct iwm_cmd_response *cresp;
4227 int qid, idx;
4229 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4230 BUS_DMASYNC_POSTREAD);
4231 pkt = mtod(data->m, struct iwm_rx_packet *);
4233 qid = pkt->hdr.qid & ~0x80;
4234 idx = pkt->hdr.idx;
4236 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4237 "rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
4238 pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
4239 pkt->hdr.code, sc->rxq.cur, hw);
4242 * randomly get these from the firmware, no idea why.
4243 * they at least seem harmless, so just ignore them for now
4245 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
4246 || pkt->len_n_flags == htole32(0x55550000))) {
4247 ADVANCE_RXQ(sc);
4248 continue;
4251 switch (pkt->hdr.code) {
4252 case IWM_REPLY_RX_PHY_CMD:
4253 iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
4254 break;
4256 case IWM_REPLY_RX_MPDU_CMD:
4257 iwm_mvm_rx_rx_mpdu(sc, pkt, data);
4258 break;
4260 case IWM_TX_CMD:
4261 iwm_mvm_rx_tx_cmd(sc, pkt, data);
4262 break;
4264 case IWM_MISSED_BEACONS_NOTIFICATION: {
4265 struct iwm_missed_beacons_notif *resp;
4266 int missed;
4268 /* XXX look at mac_id to determine interface ID */
4269 struct ieee80211com *ic = &sc->sc_ic;
4270 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4272 SYNC_RESP_STRUCT(resp, pkt);
4273 missed = le32toh(resp->consec_missed_beacons);
4275 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
4276 "%s: MISSED_BEACON: mac_id=%d, "
4277 "consec_since_last_rx=%d, consec=%d, num_expect=%d "
4278 "num_rx=%d\n",
4279 __func__,
4280 le32toh(resp->mac_id),
4281 le32toh(resp->consec_missed_beacons_since_last_rx),
4282 le32toh(resp->consec_missed_beacons),
4283 le32toh(resp->num_expected_beacons),
4284 le32toh(resp->num_recvd_beacons));
4286 /* Be paranoid */
4287 if (vap == NULL)
4288 break;
4290 /* XXX no net80211 locking? */
4291 if (vap->iv_state == IEEE80211_S_RUN &&
4292 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
4293 if (missed > vap->iv_bmissthreshold) {
4294 /* XXX bad locking; turn into task */
4295 IWM_UNLOCK(sc);
4296 ieee80211_beacon_miss(ic);
4297 IWM_LOCK(sc);
4301 break; }
4303 case IWM_MVM_ALIVE: {
4304 struct iwm_mvm_alive_resp *resp;
4305 SYNC_RESP_STRUCT(resp, pkt);
4307 sc->sc_uc.uc_error_event_table
4308 = le32toh(resp->error_event_table_ptr);
4309 sc->sc_uc.uc_log_event_table
4310 = le32toh(resp->log_event_table_ptr);
4311 sc->sched_base = le32toh(resp->scd_base_ptr);
4312 sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
4314 sc->sc_uc.uc_intr = 1;
4315 wakeup(&sc->sc_uc);
4316 break; }
4318 case IWM_CALIB_RES_NOTIF_PHY_DB: {
4319 struct iwm_calib_res_notif_phy_db *phy_db_notif;
4320 SYNC_RESP_STRUCT(phy_db_notif, pkt);
4322 iwm_phy_db_set_section(sc, phy_db_notif);
4324 break; }
4326 case IWM_STATISTICS_NOTIFICATION: {
4327 struct iwm_notif_statistics *stats;
4328 SYNC_RESP_STRUCT(stats, pkt);
4329 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
4330 sc->sc_noise = iwm_get_noise(&stats->rx.general);
4331 break; }
4333 case IWM_NVM_ACCESS_CMD:
4334 if (sc->sc_wantresp == ((qid << 16) | idx)) {
4335 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4336 BUS_DMASYNC_POSTREAD);
4337 memcpy(sc->sc_cmd_resp,
4338 pkt, sizeof(sc->sc_cmd_resp));
4340 break;
4342 case IWM_PHY_CONFIGURATION_CMD:
4343 case IWM_TX_ANT_CONFIGURATION_CMD:
4344 case IWM_ADD_STA:
4345 case IWM_MAC_CONTEXT_CMD:
4346 case IWM_REPLY_SF_CFG_CMD:
4347 case IWM_POWER_TABLE_CMD:
4348 case IWM_PHY_CONTEXT_CMD:
4349 case IWM_BINDING_CONTEXT_CMD:
4350 case IWM_TIME_EVENT_CMD:
4351 case IWM_SCAN_REQUEST_CMD:
4352 case IWM_REPLY_BEACON_FILTERING_CMD:
4353 case IWM_MAC_PM_POWER_TABLE:
4354 case IWM_TIME_QUOTA_CMD:
4355 case IWM_REMOVE_STA:
4356 case IWM_TXPATH_FLUSH:
4357 case IWM_LQ_CMD:
4358 SYNC_RESP_STRUCT(cresp, pkt);
4359 if (sc->sc_wantresp == ((qid << 16) | idx)) {
4360 memcpy(sc->sc_cmd_resp,
4361 pkt, sizeof(*pkt)+sizeof(*cresp));
4363 break;
4365 /* ignore */
4366 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
4367 break;
4369 case IWM_INIT_COMPLETE_NOTIF:
4370 sc->sc_init_complete = 1;
4371 wakeup(&sc->sc_init_complete);
4372 break;
4374 case IWM_SCAN_COMPLETE_NOTIFICATION: {
4375 struct iwm_scan_complete_notif *notif;
4376 SYNC_RESP_STRUCT(notif, pkt);
4377 taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
4378 break; }
4380 case IWM_REPLY_ERROR: {
4381 struct iwm_error_resp *resp;
4382 SYNC_RESP_STRUCT(resp, pkt);
4384 device_printf(sc->sc_dev,
4385 "firmware error 0x%x, cmd 0x%x\n",
4386 le32toh(resp->error_type),
4387 resp->cmd_id);
4388 break; }
4390 case IWM_TIME_EVENT_NOTIFICATION: {
4391 struct iwm_time_event_notif *notif;
4392 SYNC_RESP_STRUCT(notif, pkt);
4394 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4395 "TE notif status = 0x%x action = 0x%x\n",
4396 notif->status, notif->action);
4397 break; }
4399 case IWM_MCAST_FILTER_CMD:
4400 break;
4402 default:
4403 device_printf(sc->sc_dev,
4404 "frame %d/%d %x UNHANDLED (this should "
4405 "not happen)\n", qid, idx,
4406 pkt->len_n_flags);
4407 break;
4411 * Why test bit 0x80? The Linux driver:
4413 * There is one exception: uCode sets bit 15 when it
4414 * originates the response/notification, i.e. when the
4415 * response/notification is not a direct response to a
4416 * command sent by the driver. For example, uCode issues
4417 * IWM_REPLY_RX when it sends a received frame to the driver;
4418 * it is not a direct response to any driver command.
4420 * Ok, so since when is 7 == 15? Well, the Linux driver
4421 * uses a slightly different format for pkt->hdr, and "qid"
4422 * is actually the upper byte of a two-byte field.
4424 if (!(pkt->hdr.qid & (1 << 7))) {
4425 iwm_cmd_done(sc, pkt);
4428 ADVANCE_RXQ(sc);
4431 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
4432 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4435 * Tell the firmware what we have processed.
4436 * Seems like the hardware gets upset unless we align
4437 * the write by 8??
4439 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
4440 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
4443 static void
4444 iwm_intr(void *arg)
4446 struct iwm_softc *sc = arg;
4447 int handled = 0;
4448 int r1, r2, rv = 0;
4449 int isperiodic = 0;
4451 #if defined(__DragonFly__)
4452 if (sc->sc_mem == NULL) {
4453 kprintf("iwm_intr: detached\n");
4454 return;
4456 #endif
4457 IWM_LOCK(sc);
4458 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
4460 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
4461 uint32_t *ict = sc->ict_dma.vaddr;
4462 int tmp;
4464 tmp = htole32(ict[sc->ict_cur]);
4465 if (!tmp)
4466 goto out_ena;
4469 * ok, there was something. keep plowing until we have all.
4471 r1 = r2 = 0;
4472 while (tmp) {
4473 r1 |= tmp;
4474 ict[sc->ict_cur] = 0;
4475 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
4476 tmp = htole32(ict[sc->ict_cur]);
4479 /* this is where the fun begins. don't ask */
4480 if (r1 == 0xffffffff)
4481 r1 = 0;
4483 /* i am not expected to understand this */
4484 if (r1 & 0xc0000)
4485 r1 |= 0x8000;
4486 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
4487 } else {
4488 r1 = IWM_READ(sc, IWM_CSR_INT);
4489 /* "hardware gone" (where, fishing?) */
4490 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
4491 goto out;
4492 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
4494 if (r1 == 0 && r2 == 0) {
4495 goto out_ena;
4498 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
4500 /* ignored */
4501 handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
4503 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
4504 int i;
4505 struct ieee80211com *ic = &sc->sc_ic;
4506 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4508 #ifdef IWM_DEBUG
4509 iwm_nic_error(sc);
4510 #endif
4511 /* Dump driver status (TX and RX rings) while we're here. */
4512 device_printf(sc->sc_dev, "driver status:\n");
4513 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
4514 struct iwm_tx_ring *ring = &sc->txq[i];
4515 device_printf(sc->sc_dev,
4516 " tx ring %2d: qid=%-2d cur=%-3d "
4517 "queued=%-3d\n",
4518 i, ring->qid, ring->cur, ring->queued);
4520 device_printf(sc->sc_dev,
4521 " rx ring: cur=%d\n", sc->rxq.cur);
4522 device_printf(sc->sc_dev,
4523 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
4525 /* Don't stop the device; just do a VAP restart */
4526 IWM_UNLOCK(sc);
4528 if (vap == NULL) {
4529 kprintf("%s: null vap\n", __func__);
4530 return;
4533 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
4534 "restarting\n", __func__, vap->iv_state);
4536 /* XXX TODO: turn this into a callout/taskqueue */
4537 ieee80211_restart_all(ic);
4538 return;
4541 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
4542 handled |= IWM_CSR_INT_BIT_HW_ERR;
4543 device_printf(sc->sc_dev, "hardware error, stopping device\n");
4544 iwm_stop(sc);
4545 rv = 1;
4546 goto out;
4549 /* firmware chunk loaded */
4550 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
4551 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
4552 handled |= IWM_CSR_INT_BIT_FH_TX;
4553 sc->sc_fw_chunk_done = 1;
4554 wakeup(&sc->sc_fw);
4557 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
4558 handled |= IWM_CSR_INT_BIT_RF_KILL;
4559 if (iwm_check_rfkill(sc)) {
4560 device_printf(sc->sc_dev,
4561 "%s: rfkill switch, disabling interface\n",
4562 __func__);
4563 iwm_stop(sc);
4568 * The Linux driver uses periodic interrupts to avoid races.
4569 * We cargo-cult like it's going out of fashion.
4571 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
4572 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
4573 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
4574 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
4575 IWM_WRITE_1(sc,
4576 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
4577 isperiodic = 1;
4580 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
4581 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
4582 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
4584 iwm_notif_intr(sc);
4586 /* enable periodic interrupt, see above */
4587 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
4588 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
4589 IWM_CSR_INT_PERIODIC_ENA);
4592 if (__predict_false(r1 & ~handled))
4593 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4594 "%s: unhandled interrupts: %x\n", __func__, r1);
4595 rv = 1;
4597 out_ena:
4598 iwm_restore_interrupts(sc);
4599 out:
4600 IWM_UNLOCK(sc);
4601 return;
4605 * Autoconf glue-sniffing
4607 #define PCI_VENDOR_INTEL 0x8086
4608 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
4609 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
4610 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
4611 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
4612 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
4613 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
4615 static const struct iwm_devices {
4616 uint16_t device;
4617 const char *name;
4618 } iwm_devices[] = {
4619 { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
4620 { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
4621 { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
4622 { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
4623 { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
4624 { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
4627 static int
4628 iwm_probe(device_t dev)
4630 int i;
4632 for (i = 0; i < nitems(iwm_devices); i++) {
4633 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
4634 pci_get_device(dev) == iwm_devices[i].device) {
4635 device_set_desc(dev, iwm_devices[i].name);
4636 return (BUS_PROBE_DEFAULT);
4640 return (ENXIO);
4643 static int
4644 iwm_dev_check(device_t dev)
4646 struct iwm_softc *sc;
4648 sc = device_get_softc(dev);
4650 switch (pci_get_device(dev)) {
4651 case PCI_PRODUCT_INTEL_WL_3160_1:
4652 case PCI_PRODUCT_INTEL_WL_3160_2:
4653 sc->sc_fwname = "iwm3160fw";
4654 sc->host_interrupt_operation_mode = 1;
4655 return (0);
4656 case PCI_PRODUCT_INTEL_WL_7260_1:
4657 case PCI_PRODUCT_INTEL_WL_7260_2:
4658 sc->sc_fwname = "iwm7260fw";
4659 sc->host_interrupt_operation_mode = 1;
4660 return (0);
4661 case PCI_PRODUCT_INTEL_WL_7265_1:
4662 case PCI_PRODUCT_INTEL_WL_7265_2:
4663 sc->sc_fwname = "iwm7265fw";
4664 sc->host_interrupt_operation_mode = 0;
4665 return (0);
4666 default:
4667 device_printf(dev, "unknown adapter type\n");
4668 return ENXIO;
4672 static int
4673 iwm_pci_attach(device_t dev)
4675 struct iwm_softc *sc;
4676 int count, error, rid;
4677 uint16_t reg;
4678 #if defined(__DragonFly__)
4679 int irq_flags;
4680 #endif
4682 sc = device_get_softc(dev);
4684 /* Clear device-specific "PCI retry timeout" register (41h). */
4685 reg = pci_read_config(dev, 0x40, sizeof(reg));
4686 pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
4688 /* Enable bus-mastering and hardware bug workaround. */
4689 pci_enable_busmaster(dev);
4690 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
4691 /* if !MSI */
4692 if (reg & PCIM_STATUS_INTxSTATE) {
4693 reg &= ~PCIM_STATUS_INTxSTATE;
4695 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
4697 rid = PCIR_BAR(0);
4698 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
4699 RF_ACTIVE);
4700 if (sc->sc_mem == NULL) {
4701 device_printf(sc->sc_dev, "can't map mem space\n");
4702 return (ENXIO);
4704 sc->sc_st = rman_get_bustag(sc->sc_mem);
4705 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
4707 /* Install interrupt handler. */
4708 count = 1;
4709 rid = 0;
4710 #if defined(__DragonFly__)
4711 pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
4712 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
4713 #else
4714 if (pci_alloc_msi(dev, &count) == 0)
4715 rid = 1;
4716 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
4717 (rid != 0 ? 0 : RF_SHAREABLE));
4718 #endif
4719 if (sc->sc_irq == NULL) {
4720 device_printf(dev, "can't map interrupt\n");
4721 return (ENXIO);
4723 #if defined(__DragonFly__)
4724 error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
4725 iwm_intr, sc, &sc->sc_ih,
4726 &wlan_global_serializer);
4727 #else
4728 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
4729 NULL, iwm_intr, sc, &sc->sc_ih);
4730 #endif
4731 if (sc->sc_ih == NULL) {
4732 device_printf(dev, "can't establish interrupt");
4733 #if defined(__DragonFly__)
4734 pci_release_msi(dev);
4735 #endif
4736 return (ENXIO);
4738 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
4740 return (0);
4743 static void
4744 iwm_pci_detach(device_t dev)
4746 struct iwm_softc *sc = device_get_softc(dev);
4748 if (sc->sc_irq != NULL) {
4749 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
4750 bus_release_resource(dev, SYS_RES_IRQ,
4751 rman_get_rid(sc->sc_irq), sc->sc_irq);
4752 pci_release_msi(dev);
4753 #if defined(__DragonFly__)
4754 sc->sc_irq = NULL;
4755 #endif
4757 if (sc->sc_mem != NULL) {
4758 bus_release_resource(dev, SYS_RES_MEMORY,
4759 rman_get_rid(sc->sc_mem), sc->sc_mem);
4760 #if defined(__DragonFly__)
4761 sc->sc_mem = NULL;
4762 #endif
4768 static int
4769 iwm_attach(device_t dev)
4771 struct iwm_softc *sc = device_get_softc(dev);
4772 struct ieee80211com *ic = &sc->sc_ic;
4773 int error;
4774 int txq_i, i;
4776 sc->sc_dev = dev;
4777 IWM_LOCK_INIT(sc);
4778 mbufq_init(&sc->sc_snd, ifqmaxlen);
4779 #if defined(__DragonFly__)
4780 callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
4781 #else
4782 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
4783 #endif
4784 callout_init(&sc->sc_led_blink_to);
4785 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
4786 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
4787 taskqueue_thread_enqueue, &sc->sc_tq);
4788 #if defined(__DragonFly__)
4789 error = taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON,
4790 -1, "iwm_taskq");
4791 #else
4792 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
4793 #endif
4794 if (error != 0) {
4795 device_printf(dev, "can't start threads, error %d\n",
4796 error);
4797 goto fail;
4800 /* PCI attach */
4801 error = iwm_pci_attach(dev);
4802 if (error != 0)
4803 goto fail;
4805 sc->sc_wantresp = -1;
4807 /* Check device type */
4808 error = iwm_dev_check(dev);
4809 if (error != 0)
4810 goto fail;
4812 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
4815 * We now start fiddling with the hardware
4817 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
4818 if (iwm_prepare_card_hw(sc) != 0) {
4819 device_printf(dev, "could not initialize hardware\n");
4820 goto fail;
4823 /* Allocate DMA memory for firmware transfers. */
4824 if ((error = iwm_alloc_fwmem(sc)) != 0) {
4825 device_printf(dev, "could not allocate memory for firmware\n");
4826 goto fail;
4829 /* Allocate "Keep Warm" page. */
4830 if ((error = iwm_alloc_kw(sc)) != 0) {
4831 device_printf(dev, "could not allocate keep warm page\n");
4832 goto fail;
4835 /* We use ICT interrupts */
4836 if ((error = iwm_alloc_ict(sc)) != 0) {
4837 device_printf(dev, "could not allocate ICT table\n");
4838 goto fail;
4841 /* Allocate TX scheduler "rings". */
4842 if ((error = iwm_alloc_sched(sc)) != 0) {
4843 device_printf(dev, "could not allocate TX scheduler rings\n");
4844 goto fail;
4847 /* Allocate TX rings */
4848 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
4849 if ((error = iwm_alloc_tx_ring(sc,
4850 &sc->txq[txq_i], txq_i)) != 0) {
4851 device_printf(dev,
4852 "could not allocate TX ring %d\n",
4853 txq_i);
4854 goto fail;
4858 /* Allocate RX ring. */
4859 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
4860 device_printf(dev, "could not allocate RX ring\n");
4861 goto fail;
4864 /* Clear pending interrupts. */
4865 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
4867 ic->ic_softc = sc;
4868 ic->ic_name = device_get_nameunit(sc->sc_dev);
4869 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
4870 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
4872 /* Set device capabilities. */
4873 ic->ic_caps =
4874 IEEE80211_C_STA |
4875 IEEE80211_C_WPA | /* WPA/RSN */
4876 IEEE80211_C_WME |
4877 IEEE80211_C_SHSLOT | /* short slot time supported */
4878 IEEE80211_C_SHPREAMBLE /* short preamble supported */
4879 // IEEE80211_C_BGSCAN /* capable of bg scanning */
4881 for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
4882 sc->sc_phyctxt[i].id = i;
4883 sc->sc_phyctxt[i].color = 0;
4884 sc->sc_phyctxt[i].ref = 0;
4885 sc->sc_phyctxt[i].channel = NULL;
4888 /* Max RSSI */
4889 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
4890 sc->sc_preinit_hook.ich_func = iwm_preinit;
4891 sc->sc_preinit_hook.ich_arg = sc;
4892 sc->sc_preinit_hook.ich_desc = "iwm";
4893 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
4894 device_printf(dev, "config_intrhook_establish failed\n");
4895 goto fail;
4898 #ifdef IWM_DEBUG
4899 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4900 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
4901 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
4902 #endif
4904 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4905 "<-%s\n", __func__);
4907 return 0;
4909 /* Free allocated memory if something failed during attachment. */
4910 fail:
4911 iwm_detach_local(sc, 0);
4913 return ENXIO;
4916 static int
4917 iwm_update_edca(struct ieee80211com *ic)
4919 struct iwm_softc *sc = ic->ic_softc;
4921 device_printf(sc->sc_dev, "%s: called\n", __func__);
4922 return (0);
4925 static void
4926 iwm_preinit(void *arg)
4928 struct iwm_softc *sc = arg;
4929 device_t dev = sc->sc_dev;
4930 struct ieee80211com *ic = &sc->sc_ic;
4931 int error;
4933 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4934 "->%s\n", __func__);
4936 IWM_LOCK(sc);
4937 if ((error = iwm_start_hw(sc)) != 0) {
4938 device_printf(dev, "could not initialize hardware\n");
4939 IWM_UNLOCK(sc);
4940 goto fail;
4943 error = iwm_run_init_mvm_ucode(sc, 1);
4944 iwm_stop_device(sc);
4945 if (error) {
4946 IWM_UNLOCK(sc);
4947 goto fail;
4949 device_printf(dev,
4950 "revision 0x%x, firmware %d.%d (API ver. %d)\n",
4951 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
4952 IWM_UCODE_MAJOR(sc->sc_fwver),
4953 IWM_UCODE_MINOR(sc->sc_fwver),
4954 IWM_UCODE_API(sc->sc_fwver));
4956 /* not all hardware can do 5GHz band */
4957 if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
4958 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
4959 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
4960 IWM_UNLOCK(sc);
4962 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
4963 ic->ic_channels);
4966 * At this point we've committed - if we fail to do setup,
4967 * we now also have to tear down the net80211 state.
4969 ieee80211_ifattach(ic);
4970 ic->ic_vap_create = iwm_vap_create;
4971 ic->ic_vap_delete = iwm_vap_delete;
4972 ic->ic_raw_xmit = iwm_raw_xmit;
4973 ic->ic_node_alloc = iwm_node_alloc;
4974 ic->ic_scan_start = iwm_scan_start;
4975 ic->ic_scan_end = iwm_scan_end;
4976 ic->ic_update_mcast = iwm_update_mcast;
4977 ic->ic_getradiocaps = iwm_init_channel_map;
4978 ic->ic_set_channel = iwm_set_channel;
4979 ic->ic_scan_curchan = iwm_scan_curchan;
4980 ic->ic_scan_mindwell = iwm_scan_mindwell;
4981 ic->ic_wme.wme_update = iwm_update_edca;
4982 ic->ic_parent = iwm_parent;
4983 ic->ic_transmit = iwm_transmit;
4984 iwm_radiotap_attach(sc);
4985 if (bootverbose)
4986 ieee80211_announce(ic);
4988 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4989 "<-%s\n", __func__);
4990 config_intrhook_disestablish(&sc->sc_preinit_hook);
4992 return;
4993 fail:
4994 config_intrhook_disestablish(&sc->sc_preinit_hook);
4995 iwm_detach_local(sc, 0);
4999 * Attach the interface to 802.11 radiotap.
5001 static void
5002 iwm_radiotap_attach(struct iwm_softc *sc)
5004 struct ieee80211com *ic = &sc->sc_ic;
5006 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5007 "->%s begin\n", __func__);
5008 ieee80211_radiotap_attach(ic,
5009 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
5010 IWM_TX_RADIOTAP_PRESENT,
5011 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
5012 IWM_RX_RADIOTAP_PRESENT);
5013 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5014 "->%s end\n", __func__);
5017 static struct ieee80211vap *
5018 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
5019 enum ieee80211_opmode opmode, int flags,
5020 const uint8_t bssid[IEEE80211_ADDR_LEN],
5021 const uint8_t mac[IEEE80211_ADDR_LEN])
5023 struct iwm_vap *ivp;
5024 struct ieee80211vap *vap;
5026 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
5027 return NULL;
5028 ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
5029 vap = &ivp->iv_vap;
5030 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
5031 vap->iv_bmissthreshold = 10; /* override default */
5032 /* Override with driver methods. */
5033 ivp->iv_newstate = vap->iv_newstate;
5034 vap->iv_newstate = iwm_newstate;
5036 ieee80211_ratectl_init(vap);
5037 /* Complete setup. */
5038 ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
5039 mac);
5040 ic->ic_opmode = opmode;
5042 return vap;
5045 static void
5046 iwm_vap_delete(struct ieee80211vap *vap)
5048 struct iwm_vap *ivp = IWM_VAP(vap);
5050 ieee80211_ratectl_deinit(vap);
5051 ieee80211_vap_detach(vap);
5052 kfree(ivp, M_80211_VAP);
5055 static void
5056 iwm_scan_start(struct ieee80211com *ic)
5058 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5059 struct iwm_softc *sc = ic->ic_softc;
5060 int error;
5062 if (sc->sc_scanband)
5063 return;
5064 IWM_LOCK(sc);
5065 error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ, 0, NULL, 0);
5066 if (error) {
5067 device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
5068 IWM_UNLOCK(sc);
5069 ieee80211_cancel_scan(vap);
5070 sc->sc_scanband = 0;
5071 } else {
5072 iwm_led_blink_start(sc);
5073 IWM_UNLOCK(sc);
5077 static void
5078 iwm_scan_end(struct ieee80211com *ic)
5080 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5081 struct iwm_softc *sc = ic->ic_softc;
5083 IWM_LOCK(sc);
5084 iwm_led_blink_stop(sc);
5085 if (vap->iv_state == IEEE80211_S_RUN)
5086 iwm_mvm_led_enable(sc);
5087 IWM_UNLOCK(sc);
5090 static void
5091 iwm_update_mcast(struct ieee80211com *ic)
5095 static void
5096 iwm_set_channel(struct ieee80211com *ic)
5100 static void
5101 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
5105 static void
5106 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
5108 return;
5111 void
5112 iwm_init_task(void *arg1)
5114 struct iwm_softc *sc = arg1;
5116 IWM_LOCK(sc);
5117 while (sc->sc_flags & IWM_FLAG_BUSY) {
5118 #if defined(__DragonFly__)
5119 iwmsleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
5120 #else
5121 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
5122 #endif
5124 sc->sc_flags |= IWM_FLAG_BUSY;
5125 iwm_stop(sc);
5126 if (sc->sc_ic.ic_nrunning > 0)
5127 iwm_init(sc);
5128 sc->sc_flags &= ~IWM_FLAG_BUSY;
5129 wakeup(&sc->sc_flags);
5130 IWM_UNLOCK(sc);
5133 static int
5134 iwm_resume(device_t dev)
5136 struct iwm_softc *sc = device_get_softc(dev);
5137 int do_reinit = 0;
5138 uint16_t reg;
5140 /* Clear device-specific "PCI retry timeout" register (41h). */
5141 reg = pci_read_config(dev, 0x40, sizeof(reg));
5142 pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5143 iwm_init_task(device_get_softc(dev));
5145 IWM_LOCK(sc);
5146 if (sc->sc_flags & IWM_FLAG_DORESUME) {
5147 sc->sc_flags &= ~IWM_FLAG_DORESUME;
5148 do_reinit = 1;
5150 IWM_UNLOCK(sc);
5152 if (do_reinit)
5153 ieee80211_resume_all(&sc->sc_ic);
5155 return 0;
5158 static int
5159 iwm_suspend(device_t dev)
5161 int do_stop = 0;
5162 struct iwm_softc *sc = device_get_softc(dev);
5164 do_stop = !! (sc->sc_ic.ic_nrunning > 0);
5166 ieee80211_suspend_all(&sc->sc_ic);
5168 if (do_stop) {
5169 IWM_LOCK(sc);
5170 iwm_stop(sc);
5171 sc->sc_flags |= IWM_FLAG_DORESUME;
5172 IWM_UNLOCK(sc);
5175 return (0);
5178 static int
5179 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
5181 struct iwm_fw_info *fw = &sc->sc_fw;
5182 device_t dev = sc->sc_dev;
5183 int i;
5185 if (sc->sc_tq) {
5186 #if defined(__DragonFly__)
5187 /* doesn't exist for DFly, DFly drains tasks on free */
5188 #else
5189 taskqueue_drain_all(sc->sc_tq);
5190 #endif
5191 taskqueue_free(sc->sc_tq);
5192 #if defined(__DragonFly__)
5193 sc->sc_tq = NULL;
5194 #endif
5196 callout_drain(&sc->sc_led_blink_to);
5197 callout_drain(&sc->sc_watchdog_to);
5198 iwm_stop_device(sc);
5199 if (do_net80211) {
5200 ieee80211_ifdetach(&sc->sc_ic);
5203 /* Free descriptor rings */
5204 for (i = 0; i < nitems(sc->txq); i++)
5205 iwm_free_tx_ring(sc, &sc->txq[i]);
5207 /* Free firmware */
5208 if (fw->fw_fp != NULL)
5209 iwm_fw_info_free(fw);
5211 /* Free scheduler */
5212 iwm_free_sched(sc);
5213 if (sc->ict_dma.vaddr != NULL)
5214 iwm_free_ict(sc);
5215 if (sc->kw_dma.vaddr != NULL)
5216 iwm_free_kw(sc);
5217 if (sc->fw_dma.vaddr != NULL)
5218 iwm_free_fwmem(sc);
5220 /* Finished with the hardware - detach things */
5221 iwm_pci_detach(dev);
5223 mbufq_drain(&sc->sc_snd);
5224 IWM_LOCK_DESTROY(sc);
5226 return (0);
5229 static int
5230 iwm_detach(device_t dev)
5232 struct iwm_softc *sc = device_get_softc(dev);
5234 return (iwm_detach_local(sc, 1));
5237 static device_method_t iwm_pci_methods[] = {
5238 /* Device interface */
5239 DEVMETHOD(device_probe, iwm_probe),
5240 DEVMETHOD(device_attach, iwm_attach),
5241 DEVMETHOD(device_detach, iwm_detach),
5242 DEVMETHOD(device_suspend, iwm_suspend),
5243 DEVMETHOD(device_resume, iwm_resume),
5245 DEVMETHOD_END
5248 static driver_t iwm_pci_driver = {
5249 "iwm",
5250 iwm_pci_methods,
5251 sizeof (struct iwm_softc)
5254 static devclass_t iwm_devclass;
5256 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
5257 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
5258 MODULE_DEPEND(iwm, pci, 1, 1, 1);
5259 MODULE_DEPEND(iwm, wlan, 1, 1, 1);