wlan - Sync dev/netif/wpi from FreeBSD
[dragonfly.git] / sys / dev / netif / wpi / if_wpi.c
blob36079aa735e3b4beea466d6c3d0945183ee03252
1 /*-
2 * Copyright (c) 2006,2007
3 * Damien Bergamini <damien.bergamini@free.fr>
4 * Benjamin Close <Benjamin.Close@clearchain.com>
5 * Copyright (c) 2015 Andriy Voskoboinyk <avos@FreeBSD.org>
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #include <sys/cdefs.h>
21 __FBSDID("$FreeBSD$");
24 * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters.
26 * The 3945ABG network adapter doesn't use traditional hardware as
27 * many other adaptors do. Instead at run time the eeprom is set into a known
28 * state and told to load boot firmware. The boot firmware loads an init and a
29 * main binary firmware image into SRAM on the card via DMA.
30 * Once the firmware is loaded, the driver/hw then
31 * communicate by way of circular dma rings via the SRAM to the firmware.
33 * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings.
34 * The 4 tx data rings allow for prioritization QoS.
36 * The rx data ring consists of 32 dma buffers. Two registers are used to
37 * indicate where in the ring the driver and the firmware are up to. The
38 * driver sets the initial read index (reg1) and the initial write index (reg2),
39 * the firmware updates the read index (reg1) on rx of a packet and fires an
40 * interrupt. The driver then processes the buffers starting at reg1 indicating
41 * to the firmware which buffers have been accessed by updating reg2. At the
42 * same time allocating new memory for the processed buffer.
44 * A similar thing happens with the tx rings. The difference is the firmware
45 * stop processing buffers once the queue is full and until confirmation
46 * of a successful transmition (tx_done) has occurred.
48 * The command ring operates in the same manner as the tx queues.
50 * All communication direct to the card (ie eeprom) is classed as Stage1
51 * communication
53 * All communication via the firmware to the card is classed as State2.
54 * The firmware consists of 2 parts. A bootstrap firmware and a runtime
55 * firmware. The bootstrap firmware and runtime firmware are loaded
56 * from host memory via dma to the card then told to execute. From this point
57 * on the majority of communications between the driver and the card goes
58 * via the firmware.
61 #include "opt_wlan.h"
62 #include "opt_wpi.h"
64 #include <sys/param.h>
65 #include <sys/sysctl.h>
66 #include <sys/sockio.h>
67 #include <sys/mbuf.h>
68 #include <sys/kernel.h>
69 #include <sys/socket.h>
70 #include <sys/systm.h>
71 #include <sys/malloc.h>
72 #include <sys/queue.h>
73 #include <sys/taskqueue.h>
74 #include <sys/module.h>
75 #include <sys/bus.h>
76 #include <sys/endian.h>
77 #include <sys/linker.h>
78 #include <sys/firmware.h>
80 #if defined(__DragonFly__)
81 /* empty */
82 #else
83 #include <machine/bus.h>
84 #include <machine/resource.h>
85 #endif
86 #include <sys/rman.h>
88 #include <bus/pci/pcireg.h>
89 #include <bus/pci/pcivar.h>
91 #include <net/bpf.h>
92 #include <net/if.h>
93 #include <net/if_var.h>
94 #include <net/if_arp.h>
95 #include <net/ethernet.h>
96 #include <net/if_dl.h>
97 #include <net/if_media.h>
98 #include <net/if_types.h>
100 #include <netinet/in.h>
101 #include <netinet/in_systm.h>
102 #include <netinet/in_var.h>
103 #include <netinet/if_ether.h>
104 #include <netinet/ip.h>
106 #include <netproto/802_11/ieee80211_var.h>
107 #include <netproto/802_11/ieee80211_radiotap.h>
108 #include <netproto/802_11/ieee80211_regdomain.h>
109 #include <netproto/802_11/ieee80211_ratectl.h>
111 #include <dev/netif/wpi/if_wpireg.h>
112 #include <dev/netif/wpi/if_wpivar.h>
113 #include <dev/netif/wpi/if_wpi_debug.h>
115 struct wpi_ident {
116 uint16_t vendor;
117 uint16_t device;
118 uint16_t subdevice;
119 const char *name;
122 static const struct wpi_ident wpi_ident_table[] = {
123 /* The below entries support ABG regardless of the subid */
124 { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" },
125 { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" },
126 /* The below entries only support BG */
127 { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" },
128 { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" },
129 { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" },
130 { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" },
131 { 0, 0, 0, NULL }
134 static int wpi_probe(device_t);
135 static int wpi_attach(device_t);
136 static void wpi_radiotap_attach(struct wpi_softc *);
137 static void wpi_sysctlattach(struct wpi_softc *);
138 static void wpi_init_beacon(struct wpi_vap *);
139 static struct ieee80211vap *wpi_vap_create(struct ieee80211com *,
140 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
141 const uint8_t [IEEE80211_ADDR_LEN],
142 const uint8_t [IEEE80211_ADDR_LEN]);
143 static void wpi_vap_delete(struct ieee80211vap *);
144 static int wpi_detach(device_t);
145 static int wpi_shutdown(device_t);
146 static int wpi_suspend(device_t);
147 static int wpi_resume(device_t);
148 static int wpi_nic_lock(struct wpi_softc *);
149 static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int);
150 static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int);
151 static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *,
152 void **, bus_size_t, bus_size_t);
153 static void wpi_dma_contig_free(struct wpi_dma_info *);
154 static int wpi_alloc_shared(struct wpi_softc *);
155 static void wpi_free_shared(struct wpi_softc *);
156 static int wpi_alloc_fwmem(struct wpi_softc *);
157 static void wpi_free_fwmem(struct wpi_softc *);
158 static int wpi_alloc_rx_ring(struct wpi_softc *);
159 static void wpi_update_rx_ring(struct wpi_softc *);
160 static void wpi_update_rx_ring_ps(struct wpi_softc *);
161 static void wpi_reset_rx_ring(struct wpi_softc *);
162 static void wpi_free_rx_ring(struct wpi_softc *);
163 static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *,
164 uint8_t);
165 static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *);
166 static void wpi_update_tx_ring_ps(struct wpi_softc *,
167 struct wpi_tx_ring *);
168 static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *);
169 static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *);
170 static int wpi_read_eeprom(struct wpi_softc *,
171 uint8_t macaddr[IEEE80211_ADDR_LEN]);
172 static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *);
173 static void wpi_read_eeprom_band(struct wpi_softc *, uint8_t, int, int *,
174 struct ieee80211_channel[]);
175 static int wpi_read_eeprom_channels(struct wpi_softc *, uint8_t);
176 static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *,
177 struct ieee80211_channel *);
178 static void wpi_getradiocaps(struct ieee80211com *, int, int *,
179 struct ieee80211_channel[]);
180 static int wpi_setregdomain(struct ieee80211com *,
181 struct ieee80211_regdomain *, int,
182 struct ieee80211_channel[]);
183 static int wpi_read_eeprom_group(struct wpi_softc *, uint8_t);
184 static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *,
185 const uint8_t mac[IEEE80211_ADDR_LEN]);
186 static void wpi_node_free(struct ieee80211_node *);
187 static void wpi_ibss_recv_mgmt(struct ieee80211_node *, struct mbuf *, int,
188 const struct ieee80211_rx_stats *,
189 int, int);
190 static void wpi_restore_node(void *, struct ieee80211_node *);
191 static void wpi_restore_node_table(struct wpi_softc *, struct wpi_vap *);
192 static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int);
193 static void wpi_calib_timeout(void *);
194 static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *,
195 struct wpi_rx_data *);
196 static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *,
197 struct wpi_rx_data *);
198 static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *);
199 static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *);
200 static void wpi_notif_intr(struct wpi_softc *);
201 static void wpi_wakeup_intr(struct wpi_softc *);
202 #ifdef WPI_DEBUG
203 static void wpi_debug_registers(struct wpi_softc *);
204 #endif
205 static void wpi_fatal_intr(struct wpi_softc *);
206 static void wpi_intr(void *);
207 static void wpi_free_txfrags(struct wpi_softc *, uint16_t);
208 static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *);
209 static int wpi_tx_data(struct wpi_softc *, struct mbuf *,
210 struct ieee80211_node *);
211 static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *,
212 struct ieee80211_node *,
213 const struct ieee80211_bpf_params *);
214 static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *,
215 const struct ieee80211_bpf_params *);
216 static int wpi_transmit(struct ieee80211com *, struct mbuf *);
217 static void wpi_watchdog_rfkill(void *);
218 static void wpi_scan_timeout(void *);
219 static void wpi_tx_timeout(void *);
220 static void wpi_parent(struct ieee80211com *);
221 static int wpi_cmd(struct wpi_softc *, uint8_t, const void *, uint16_t,
222 int);
223 static int wpi_mrr_setup(struct wpi_softc *);
224 static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *);
225 static int wpi_add_broadcast_node(struct wpi_softc *, int);
226 static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *);
227 static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *);
228 static int wpi_updateedca(struct ieee80211com *);
229 static void wpi_set_promisc(struct wpi_softc *);
230 static void wpi_update_promisc(struct ieee80211com *);
231 static void wpi_update_mcast(struct ieee80211com *);
232 static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t);
233 static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *);
234 static void wpi_power_calibration(struct wpi_softc *);
235 static int wpi_set_txpower(struct wpi_softc *, int);
236 static int wpi_get_power_index(struct wpi_softc *,
237 struct wpi_power_group *, uint8_t, int, int);
238 static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int);
239 static int wpi_send_btcoex(struct wpi_softc *);
240 static int wpi_send_rxon(struct wpi_softc *, int, int);
241 static int wpi_config(struct wpi_softc *);
242 static uint16_t wpi_get_active_dwell_time(struct wpi_softc *,
243 struct ieee80211_channel *, uint8_t);
244 static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t);
245 static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *,
246 struct ieee80211_channel *);
247 static uint32_t wpi_get_scan_pause_time(uint32_t, uint16_t);
248 static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *);
249 static int wpi_auth(struct wpi_softc *, struct ieee80211vap *);
250 static int wpi_config_beacon(struct wpi_vap *);
251 static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *);
252 static void wpi_update_beacon(struct ieee80211vap *, int);
253 static void wpi_newassoc(struct ieee80211_node *, int);
254 static int wpi_run(struct wpi_softc *, struct ieee80211vap *);
255 static int wpi_load_key(struct ieee80211_node *,
256 const struct ieee80211_key *);
257 static void wpi_load_key_cb(void *, struct ieee80211_node *);
258 static int wpi_set_global_keys(struct ieee80211_node *);
259 static int wpi_del_key(struct ieee80211_node *,
260 const struct ieee80211_key *);
261 static void wpi_del_key_cb(void *, struct ieee80211_node *);
262 static int wpi_process_key(struct ieee80211vap *,
263 const struct ieee80211_key *, int);
264 static int wpi_key_set(struct ieee80211vap *,
265 const struct ieee80211_key *);
266 static int wpi_key_delete(struct ieee80211vap *,
267 const struct ieee80211_key *);
268 static int wpi_post_alive(struct wpi_softc *);
269 static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *,
270 uint32_t);
271 static int wpi_load_firmware(struct wpi_softc *);
272 static int wpi_read_firmware(struct wpi_softc *);
273 static void wpi_unload_firmware(struct wpi_softc *);
274 static int wpi_clock_wait(struct wpi_softc *);
275 static int wpi_apm_init(struct wpi_softc *);
276 static void wpi_apm_stop_master(struct wpi_softc *);
277 static void wpi_apm_stop(struct wpi_softc *);
278 static void wpi_nic_config(struct wpi_softc *);
279 static int wpi_hw_init(struct wpi_softc *);
280 static void wpi_hw_stop(struct wpi_softc *);
281 static void wpi_radio_on(void *, int);
282 static void wpi_radio_off(void *, int);
283 static int wpi_init(struct wpi_softc *);
284 static void wpi_stop_locked(struct wpi_softc *);
285 static void wpi_stop(struct wpi_softc *);
286 static void wpi_scan_start(struct ieee80211com *);
287 static void wpi_scan_end(struct ieee80211com *);
288 static void wpi_set_channel(struct ieee80211com *);
289 static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long);
290 static void wpi_scan_mindwell(struct ieee80211_scan_state *);
292 static device_method_t wpi_methods[] = {
293 /* Device interface */
294 DEVMETHOD(device_probe, wpi_probe),
295 DEVMETHOD(device_attach, wpi_attach),
296 DEVMETHOD(device_detach, wpi_detach),
297 DEVMETHOD(device_shutdown, wpi_shutdown),
298 DEVMETHOD(device_suspend, wpi_suspend),
299 DEVMETHOD(device_resume, wpi_resume),
301 DEVMETHOD_END
304 static driver_t wpi_driver = {
305 "wpi",
306 wpi_methods,
307 sizeof (struct wpi_softc)
309 static devclass_t wpi_devclass;
311 DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL);
313 MODULE_VERSION(wpi, 1);
315 MODULE_DEPEND(wpi, pci, 1, 1, 1);
316 MODULE_DEPEND(wpi, wlan, 1, 1, 1);
317 MODULE_DEPEND(wpi, firmware, 1, 1, 1);
319 static int
320 wpi_probe(device_t dev)
322 const struct wpi_ident *ident;
324 for (ident = wpi_ident_table; ident->name != NULL; ident++) {
325 if (pci_get_vendor(dev) == ident->vendor &&
326 pci_get_device(dev) == ident->device) {
327 device_set_desc(dev, ident->name);
328 return (BUS_PROBE_DEFAULT);
331 return ENXIO;
334 static int
335 wpi_attach(device_t dev)
337 struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev);
338 struct ieee80211com *ic;
339 uint8_t i;
340 int error, rid;
341 #ifdef WPI_DEBUG
342 int supportsa = 1;
343 const struct wpi_ident *ident;
344 #endif
345 #if defined(__DragonFly__)
346 int irq_flags;
347 #endif
349 sc->sc_dev = dev;
351 #ifdef WPI_DEBUG
352 error = resource_int_value(device_get_name(sc->sc_dev),
353 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug));
354 if (error != 0)
355 sc->sc_debug = 0;
356 #else
357 sc->sc_debug = 0;
358 #endif
360 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
363 * Get the offset of the PCI Express Capability Structure in PCI
364 * Configuration Space.
366 #if defined(__DragonFly__)
367 error = pci_find_extcap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
368 #else
369 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
370 #endif
371 if (error != 0) {
372 device_printf(dev, "PCIe capability structure not found!\n");
373 return error;
377 * Some card's only support 802.11b/g not a, check to see if
378 * this is one such card. A 0x0 in the subdevice table indicates
379 * the entire subdevice range is to be ignored.
381 #ifdef WPI_DEBUG
382 for (ident = wpi_ident_table; ident->name != NULL; ident++) {
383 if (ident->subdevice &&
384 pci_get_subdevice(dev) == ident->subdevice) {
385 supportsa = 0;
386 break;
389 #endif
391 /* Clear device-specific "PCI retry timeout" register (41h). */
392 pci_write_config(dev, 0x41, 0, 1);
394 /* Enable bus-mastering. */
395 pci_enable_busmaster(dev);
397 rid = PCIR_BAR(0);
398 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
399 RF_ACTIVE);
400 if (sc->mem == NULL) {
401 device_printf(dev, "can't map mem space\n");
402 return ENOMEM;
404 sc->sc_st = rman_get_bustag(sc->mem);
405 sc->sc_sh = rman_get_bushandle(sc->mem);
407 #if defined(__DragonFly__)
408 pci_alloc_1intr(dev, 1, &rid, &irq_flags);
409 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
410 #else
411 rid = 1;
412 if (pci_alloc_msi(dev, &rid) == 0)
413 rid = 1;
414 else
415 rid = 0;
416 /* Install interrupt handler. */
417 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
418 (rid != 0 ? 0 : RF_SHAREABLE));
419 #endif
420 if (sc->irq == NULL) {
421 device_printf(dev, "can't map interrupt\n");
422 error = ENOMEM;
423 goto fail;
426 WPI_LOCK_INIT(sc);
427 WPI_TX_LOCK_INIT(sc);
428 WPI_RXON_LOCK_INIT(sc);
429 WPI_NT_LOCK_INIT(sc);
430 WPI_TXQ_LOCK_INIT(sc);
431 WPI_TXQ_STATE_LOCK_INIT(sc);
433 /* Allocate DMA memory for firmware transfers. */
434 if ((error = wpi_alloc_fwmem(sc)) != 0) {
435 device_printf(dev,
436 "could not allocate memory for firmware, error %d\n",
437 error);
438 goto fail;
441 /* Allocate shared page. */
442 if ((error = wpi_alloc_shared(sc)) != 0) {
443 device_printf(dev, "could not allocate shared page\n");
444 goto fail;
447 /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */
448 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) {
449 if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
450 device_printf(dev,
451 "could not allocate TX ring %d, error %d\n", i,
452 error);
453 goto fail;
457 /* Allocate RX ring. */
458 if ((error = wpi_alloc_rx_ring(sc)) != 0) {
459 device_printf(dev, "could not allocate RX ring, error %d\n",
460 error);
461 goto fail;
464 /* Clear pending interrupts. */
465 WPI_WRITE(sc, WPI_INT, 0xffffffff);
467 ic = &sc->sc_ic;
468 ic->ic_softc = sc;
469 ic->ic_name = device_get_nameunit(dev);
470 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
471 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
473 /* Set device capabilities. */
474 ic->ic_caps =
475 IEEE80211_C_STA /* station mode supported */
476 | IEEE80211_C_IBSS /* IBSS mode supported */
477 | IEEE80211_C_HOSTAP /* Host access point mode */
478 | IEEE80211_C_MONITOR /* monitor mode supported */
479 | IEEE80211_C_AHDEMO /* adhoc demo mode */
480 | IEEE80211_C_BGSCAN /* capable of bg scanning */
481 | IEEE80211_C_TXFRAG /* handle tx frags */
482 | IEEE80211_C_TXPMGT /* tx power management */
483 | IEEE80211_C_SHSLOT /* short slot time supported */
484 | IEEE80211_C_WPA /* 802.11i */
485 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
486 | IEEE80211_C_WME /* 802.11e */
487 | IEEE80211_C_PMGT /* Station-side power mgmt */
490 ic->ic_cryptocaps =
491 IEEE80211_CRYPTO_AES_CCM;
494 * Read in the eeprom and also setup the channels for
495 * net80211. We don't set the rates as net80211 does this for us
497 if ((error = wpi_read_eeprom(sc, ic->ic_macaddr)) != 0) {
498 device_printf(dev, "could not read EEPROM, error %d\n",
499 error);
500 goto fail;
503 #ifdef WPI_DEBUG
504 if (bootverbose) {
505 device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n",
506 sc->domain);
507 device_printf(sc->sc_dev, "Hardware Type: %c\n",
508 sc->type > 1 ? 'B': '?');
509 device_printf(sc->sc_dev, "Hardware Revision: %c\n",
510 ((sc->rev & 0xf0) == 0xd0) ? 'D': '?');
511 device_printf(sc->sc_dev, "SKU %s support 802.11a\n",
512 supportsa ? "does" : "does not");
514 /* XXX hw_config uses the PCIDEV for the Hardware rev. Must
515 check what sc->rev really represents - benjsc 20070615 */
517 #endif
519 ieee80211_ifattach(ic);
520 ic->ic_vap_create = wpi_vap_create;
521 ic->ic_vap_delete = wpi_vap_delete;
522 ic->ic_parent = wpi_parent;
523 ic->ic_raw_xmit = wpi_raw_xmit;
524 ic->ic_transmit = wpi_transmit;
525 ic->ic_node_alloc = wpi_node_alloc;
526 sc->sc_node_free = ic->ic_node_free;
527 ic->ic_node_free = wpi_node_free;
528 ic->ic_wme.wme_update = wpi_updateedca;
529 ic->ic_update_promisc = wpi_update_promisc;
530 ic->ic_update_mcast = wpi_update_mcast;
531 ic->ic_newassoc = wpi_newassoc;
532 ic->ic_scan_start = wpi_scan_start;
533 ic->ic_scan_end = wpi_scan_end;
534 ic->ic_set_channel = wpi_set_channel;
535 ic->ic_scan_curchan = wpi_scan_curchan;
536 ic->ic_scan_mindwell = wpi_scan_mindwell;
537 ic->ic_getradiocaps = wpi_getradiocaps;
538 ic->ic_setregdomain = wpi_setregdomain;
540 sc->sc_update_rx_ring = wpi_update_rx_ring;
541 sc->sc_update_tx_ring = wpi_update_tx_ring;
543 wpi_radiotap_attach(sc);
545 callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0);
546 callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0);
547 callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0);
548 callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0);
549 TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc);
550 TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc);
552 wpi_sysctlattach(sc);
555 * Hook our interrupt after all initialization is complete.
557 #if defined(__DragonFly__)
558 error = bus_setup_intr(dev, sc->irq, INTR_MPSAFE,
559 wpi_intr, sc, &sc->sc_ih, &wlan_global_serializer);
560 #else
561 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
562 NULL, wpi_intr, sc, &sc->sc_ih);
563 #endif
564 if (error != 0) {
565 device_printf(dev, "can't establish interrupt, error %d\n",
566 error);
567 goto fail;
570 if (bootverbose)
571 ieee80211_announce(ic);
573 #ifdef WPI_DEBUG
574 if (sc->sc_debug & WPI_DEBUG_HW)
575 ieee80211_announce_channels(ic);
576 #endif
578 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
579 return 0;
581 fail: wpi_detach(dev);
582 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
583 return error;
587 * Attach the interface to 802.11 radiotap.
589 static void
590 wpi_radiotap_attach(struct wpi_softc *sc)
592 struct wpi_rx_radiotap_header *rxtap = &sc->sc_rxtap;
593 struct wpi_tx_radiotap_header *txtap = &sc->sc_txtap;
595 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
596 ieee80211_radiotap_attach(&sc->sc_ic,
597 &txtap->wt_ihdr, sizeof(*txtap), WPI_TX_RADIOTAP_PRESENT,
598 &rxtap->wr_ihdr, sizeof(*rxtap), WPI_RX_RADIOTAP_PRESENT);
599 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
602 static void
603 wpi_sysctlattach(struct wpi_softc *sc)
605 #ifdef WPI_DEBUG
606 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
607 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
609 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
610 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug,
611 "control debugging printfs");
612 #endif
615 static void
616 wpi_init_beacon(struct wpi_vap *wvp)
618 struct wpi_buf *bcn = &wvp->wv_bcbuf;
619 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data;
621 cmd->id = WPI_ID_BROADCAST;
622 cmd->ofdm_mask = 0xff;
623 cmd->cck_mask = 0x0f;
624 cmd->lifetime = htole32(WPI_LIFETIME_INFINITE);
627 * XXX WPI_TX_AUTO_SEQ seems to be ignored - workaround this issue
628 * XXX by using WPI_TX_NEED_ACK instead (with some side effects).
630 cmd->flags = htole32(WPI_TX_NEED_ACK | WPI_TX_INSERT_TSTAMP);
632 bcn->code = WPI_CMD_SET_BEACON;
633 bcn->ac = WPI_CMD_QUEUE_NUM;
634 bcn->size = sizeof(struct wpi_cmd_beacon);
637 static struct ieee80211vap *
638 wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
639 enum ieee80211_opmode opmode, int flags,
640 const uint8_t bssid[IEEE80211_ADDR_LEN],
641 const uint8_t mac[IEEE80211_ADDR_LEN])
643 struct wpi_vap *wvp;
644 struct ieee80211vap *vap;
646 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
647 return NULL;
649 wvp = kmalloc(sizeof(struct wpi_vap), M_80211_VAP, M_WAITOK | M_ZERO);
650 vap = &wvp->wv_vap;
651 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
653 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) {
654 WPI_VAP_LOCK_INIT(wvp);
655 wpi_init_beacon(wvp);
658 /* Override with driver methods. */
659 vap->iv_key_set = wpi_key_set;
660 vap->iv_key_delete = wpi_key_delete;
661 if (opmode == IEEE80211_M_IBSS) {
662 wvp->wv_recv_mgmt = vap->iv_recv_mgmt;
663 vap->iv_recv_mgmt = wpi_ibss_recv_mgmt;
665 wvp->wv_newstate = vap->iv_newstate;
666 vap->iv_newstate = wpi_newstate;
667 vap->iv_update_beacon = wpi_update_beacon;
668 vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1;
670 ieee80211_ratectl_init(vap);
671 /* Complete setup. */
672 ieee80211_vap_attach(vap, ieee80211_media_change,
673 ieee80211_media_status, mac);
674 ic->ic_opmode = opmode;
675 return vap;
678 static void
679 wpi_vap_delete(struct ieee80211vap *vap)
681 struct wpi_vap *wvp = WPI_VAP(vap);
682 struct wpi_buf *bcn = &wvp->wv_bcbuf;
683 enum ieee80211_opmode opmode = vap->iv_opmode;
685 ieee80211_ratectl_deinit(vap);
686 ieee80211_vap_detach(vap);
688 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) {
689 if (bcn->m != NULL)
690 m_freem(bcn->m);
692 WPI_VAP_LOCK_DESTROY(wvp);
695 kfree(wvp, M_80211_VAP);
698 static int
699 wpi_detach(device_t dev)
701 struct wpi_softc *sc = device_get_softc(dev);
702 struct ieee80211com *ic = &sc->sc_ic;
703 uint8_t qid;
705 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
707 if (ic->ic_vap_create == wpi_vap_create) {
708 ieee80211_draintask(ic, &sc->sc_radioon_task);
709 ieee80211_draintask(ic, &sc->sc_radiooff_task);
711 wpi_stop(sc);
713 callout_drain(&sc->watchdog_rfkill);
714 callout_drain(&sc->tx_timeout);
715 callout_drain(&sc->scan_timeout);
716 callout_drain(&sc->calib_to);
717 ieee80211_ifdetach(ic);
720 /* Uninstall interrupt handler. */
721 if (sc->irq != NULL) {
722 bus_teardown_intr(dev, sc->irq, sc->sc_ih);
723 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq),
724 sc->irq);
725 pci_release_msi(dev);
728 if (sc->txq[0].data_dmat) {
729 /* Free DMA resources. */
730 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++)
731 wpi_free_tx_ring(sc, &sc->txq[qid]);
733 wpi_free_rx_ring(sc);
734 wpi_free_shared(sc);
737 if (sc->fw_dma.tag)
738 wpi_free_fwmem(sc);
740 if (sc->mem != NULL)
741 bus_release_resource(dev, SYS_RES_MEMORY,
742 rman_get_rid(sc->mem), sc->mem);
744 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
745 WPI_TXQ_STATE_LOCK_DESTROY(sc);
746 WPI_TXQ_LOCK_DESTROY(sc);
747 WPI_NT_LOCK_DESTROY(sc);
748 WPI_RXON_LOCK_DESTROY(sc);
749 WPI_TX_LOCK_DESTROY(sc);
750 WPI_LOCK_DESTROY(sc);
751 return 0;
754 static int
755 wpi_shutdown(device_t dev)
757 struct wpi_softc *sc = device_get_softc(dev);
759 wpi_stop(sc);
760 return 0;
763 static int
764 wpi_suspend(device_t dev)
766 struct wpi_softc *sc = device_get_softc(dev);
767 struct ieee80211com *ic = &sc->sc_ic;
769 ieee80211_suspend_all(ic);
770 return 0;
773 static int
774 wpi_resume(device_t dev)
776 struct wpi_softc *sc = device_get_softc(dev);
777 struct ieee80211com *ic = &sc->sc_ic;
779 /* Clear device-specific "PCI retry timeout" register (41h). */
780 pci_write_config(dev, 0x41, 0, 1);
782 ieee80211_resume_all(ic);
783 return 0;
787 * Grab exclusive access to NIC memory.
789 static int
790 wpi_nic_lock(struct wpi_softc *sc)
792 int ntries;
794 /* Request exclusive access to NIC. */
795 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
797 /* Spin until we actually get the lock. */
798 for (ntries = 0; ntries < 1000; ntries++) {
799 if ((WPI_READ(sc, WPI_GP_CNTRL) &
800 (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) ==
801 WPI_GP_CNTRL_MAC_ACCESS_ENA)
802 return 0;
803 DELAY(10);
806 device_printf(sc->sc_dev, "could not lock memory\n");
808 return ETIMEDOUT;
812 * Release lock on NIC memory.
814 static __inline void
815 wpi_nic_unlock(struct wpi_softc *sc)
817 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
820 static __inline uint32_t
821 wpi_prph_read(struct wpi_softc *sc, uint32_t addr)
823 WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr);
824 WPI_BARRIER_READ_WRITE(sc);
825 return WPI_READ(sc, WPI_PRPH_RDATA);
828 static __inline void
829 wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data)
831 WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr);
832 WPI_BARRIER_WRITE(sc);
833 WPI_WRITE(sc, WPI_PRPH_WDATA, data);
836 static __inline void
837 wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask)
839 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask);
842 static __inline void
843 wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask)
845 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask);
848 static __inline void
849 wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr,
850 const uint32_t *data, uint32_t count)
852 for (; count != 0; count--, data++, addr += 4)
853 wpi_prph_write(sc, addr, *data);
856 static __inline uint32_t
857 wpi_mem_read(struct wpi_softc *sc, uint32_t addr)
859 WPI_WRITE(sc, WPI_MEM_RADDR, addr);
860 WPI_BARRIER_READ_WRITE(sc);
861 return WPI_READ(sc, WPI_MEM_RDATA);
864 static __inline void
865 wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data,
866 int count)
868 for (; count > 0; count--, addr += 4)
869 *data++ = wpi_mem_read(sc, addr);
872 static int
873 wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count)
875 uint8_t *out = data;
876 uint32_t val;
877 int error, ntries;
879 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
881 if ((error = wpi_nic_lock(sc)) != 0)
882 return error;
884 for (; count > 0; count -= 2, addr++) {
885 WPI_WRITE(sc, WPI_EEPROM, addr << 2);
886 for (ntries = 0; ntries < 10; ntries++) {
887 val = WPI_READ(sc, WPI_EEPROM);
888 if (val & WPI_EEPROM_READ_VALID)
889 break;
890 DELAY(5);
892 if (ntries == 10) {
893 device_printf(sc->sc_dev,
894 "timeout reading ROM at 0x%x\n", addr);
895 return ETIMEDOUT;
897 *out++= val >> 16;
898 if (count > 1)
899 *out ++= val >> 24;
902 wpi_nic_unlock(sc);
904 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
906 return 0;
909 static void
910 wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
912 if (error != 0)
913 return;
914 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
915 *(bus_addr_t *)arg = segs[0].ds_addr;
919 * Allocates a contiguous block of dma memory of the requested size and
920 * alignment.
922 static int
923 wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma,
924 void **kvap, bus_size_t size, bus_size_t alignment)
926 int error;
928 dma->tag = NULL;
929 dma->size = size;
931 #if defined(__DragonFly__)
932 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
933 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
934 1, size, 0, &dma->tag);
935 #else
936 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
937 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
938 1, size, 0, NULL, NULL, &dma->tag);
939 #endif
940 if (error != 0)
941 goto fail;
943 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
944 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
945 if (error != 0)
946 goto fail;
948 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
949 wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
950 if (error != 0)
951 goto fail;
953 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
955 if (kvap != NULL)
956 *kvap = dma->vaddr;
958 return 0;
960 fail: wpi_dma_contig_free(dma);
961 return error;
964 static void
965 wpi_dma_contig_free(struct wpi_dma_info *dma)
967 if (dma->vaddr != NULL) {
968 bus_dmamap_sync(dma->tag, dma->map,
969 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
970 bus_dmamap_unload(dma->tag, dma->map);
971 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
972 dma->vaddr = NULL;
974 if (dma->tag != NULL) {
975 bus_dma_tag_destroy(dma->tag);
976 dma->tag = NULL;
981 * Allocate a shared page between host and NIC.
983 static int
984 wpi_alloc_shared(struct wpi_softc *sc)
986 /* Shared buffer must be aligned on a 4KB boundary. */
987 return wpi_dma_contig_alloc(sc, &sc->shared_dma,
988 (void **)&sc->shared, sizeof (struct wpi_shared), 4096);
991 static void
992 wpi_free_shared(struct wpi_softc *sc)
994 wpi_dma_contig_free(&sc->shared_dma);
998 * Allocate DMA-safe memory for firmware transfer.
1000 static int
1001 wpi_alloc_fwmem(struct wpi_softc *sc)
1003 /* Must be aligned on a 16-byte boundary. */
1004 return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL,
1005 WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16);
1008 static void
1009 wpi_free_fwmem(struct wpi_softc *sc)
1011 wpi_dma_contig_free(&sc->fw_dma);
1014 static int
1015 wpi_alloc_rx_ring(struct wpi_softc *sc)
1017 struct wpi_rx_ring *ring = &sc->rxq;
1018 bus_size_t size;
1019 int i, error;
1021 ring->cur = 0;
1022 ring->update = 0;
1024 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1026 /* Allocate RX descriptors (16KB aligned.) */
1027 size = WPI_RX_RING_COUNT * sizeof (uint32_t);
1028 error = wpi_dma_contig_alloc(sc, &ring->desc_dma,
1029 (void **)&ring->desc, size, WPI_RING_DMA_ALIGN);
1030 if (error != 0) {
1031 device_printf(sc->sc_dev,
1032 "%s: could not allocate RX ring DMA memory, error %d\n",
1033 __func__, error);
1034 goto fail;
1037 /* Create RX buffer DMA tag. */
1038 #if defined(__DragonFly__)
1039 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1040 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1041 MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, &ring->data_dmat);
1042 #else
1043 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1044 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1045 MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL, &ring->data_dmat);
1046 #endif
1047 if (error != 0) {
1048 device_printf(sc->sc_dev,
1049 "%s: could not create RX buf DMA tag, error %d\n",
1050 __func__, error);
1051 goto fail;
1055 * Allocate and map RX buffers.
1057 for (i = 0; i < WPI_RX_RING_COUNT; i++) {
1058 struct wpi_rx_data *data = &ring->data[i];
1059 bus_addr_t paddr;
1061 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1062 if (error != 0) {
1063 device_printf(sc->sc_dev,
1064 "%s: could not create RX buf DMA map, error %d\n",
1065 __func__, error);
1066 goto fail;
1069 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
1070 if (data->m == NULL) {
1071 device_printf(sc->sc_dev,
1072 "%s: could not allocate RX mbuf\n", __func__);
1073 error = ENOBUFS;
1074 goto fail;
1077 error = bus_dmamap_load(ring->data_dmat, data->map,
1078 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr,
1079 &paddr, BUS_DMA_NOWAIT);
1080 if (error != 0 && error != EFBIG) {
1081 device_printf(sc->sc_dev,
1082 "%s: can't map mbuf (error %d)\n", __func__,
1083 error);
1084 goto fail;
1087 /* Set physical address of RX buffer. */
1088 ring->desc[i] = htole32(paddr);
1091 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1092 BUS_DMASYNC_PREWRITE);
1094 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
1096 return 0;
1098 fail: wpi_free_rx_ring(sc);
1100 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1102 return error;
1105 static void
1106 wpi_update_rx_ring(struct wpi_softc *sc)
1108 WPI_WRITE(sc, WPI_FH_RX_WPTR, sc->rxq.cur & ~7);
1111 static void
1112 wpi_update_rx_ring_ps(struct wpi_softc *sc)
1114 struct wpi_rx_ring *ring = &sc->rxq;
1116 if (ring->update != 0) {
1117 /* Wait for INT_WAKEUP event. */
1118 return;
1121 WPI_TXQ_LOCK(sc);
1122 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
1123 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) {
1124 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n",
1125 __func__);
1126 ring->update = 1;
1127 } else {
1128 wpi_update_rx_ring(sc);
1129 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
1131 WPI_TXQ_UNLOCK(sc);
1134 static void
1135 wpi_reset_rx_ring(struct wpi_softc *sc)
1137 struct wpi_rx_ring *ring = &sc->rxq;
1138 int ntries;
1140 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
1142 if (wpi_nic_lock(sc) == 0) {
1143 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0);
1144 for (ntries = 0; ntries < 1000; ntries++) {
1145 if (WPI_READ(sc, WPI_FH_RX_STATUS) &
1146 WPI_FH_RX_STATUS_IDLE)
1147 break;
1148 DELAY(10);
1150 wpi_nic_unlock(sc);
1153 ring->cur = 0;
1154 ring->update = 0;
1157 static void
1158 wpi_free_rx_ring(struct wpi_softc *sc)
1160 struct wpi_rx_ring *ring = &sc->rxq;
1161 int i;
1163 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
1165 wpi_dma_contig_free(&ring->desc_dma);
1167 for (i = 0; i < WPI_RX_RING_COUNT; i++) {
1168 struct wpi_rx_data *data = &ring->data[i];
1170 if (data->m != NULL) {
1171 bus_dmamap_sync(ring->data_dmat, data->map,
1172 BUS_DMASYNC_POSTREAD);
1173 bus_dmamap_unload(ring->data_dmat, data->map);
1174 m_freem(data->m);
1175 data->m = NULL;
1177 if (data->map != NULL)
1178 bus_dmamap_destroy(ring->data_dmat, data->map);
1180 if (ring->data_dmat != NULL) {
1181 bus_dma_tag_destroy(ring->data_dmat);
1182 ring->data_dmat = NULL;
1186 static int
1187 wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, uint8_t qid)
1189 bus_addr_t paddr;
1190 bus_size_t size;
1191 int i, error;
1193 ring->qid = qid;
1194 ring->queued = 0;
1195 ring->cur = 0;
1196 ring->pending = 0;
1197 ring->update = 0;
1199 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1201 /* Allocate TX descriptors (16KB aligned.) */
1202 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc);
1203 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1204 size, WPI_RING_DMA_ALIGN);
1205 if (error != 0) {
1206 device_printf(sc->sc_dev,
1207 "%s: could not allocate TX ring DMA memory, error %d\n",
1208 __func__, error);
1209 goto fail;
1212 /* Update shared area with ring physical address. */
1213 sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr);
1214 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map,
1215 BUS_DMASYNC_PREWRITE);
1217 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd);
1218 error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd,
1219 size, 4);
1220 if (error != 0) {
1221 device_printf(sc->sc_dev,
1222 "%s: could not allocate TX cmd DMA memory, error %d\n",
1223 __func__, error);
1224 goto fail;
1227 #if defined(__DragonFly__)
1228 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1229 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1230 WPI_MAX_SCATTER - 1, MCLBYTES, 0, &ring->data_dmat);
1231 #else
1232 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1233 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1234 WPI_MAX_SCATTER - 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
1235 #endif
1236 if (error != 0) {
1237 device_printf(sc->sc_dev,
1238 "%s: could not create TX buf DMA tag, error %d\n",
1239 __func__, error);
1240 goto fail;
1243 paddr = ring->cmd_dma.paddr;
1244 for (i = 0; i < WPI_TX_RING_COUNT; i++) {
1245 struct wpi_tx_data *data = &ring->data[i];
1247 data->cmd_paddr = paddr;
1248 paddr += sizeof (struct wpi_tx_cmd);
1250 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1251 if (error != 0) {
1252 device_printf(sc->sc_dev,
1253 "%s: could not create TX buf DMA map, error %d\n",
1254 __func__, error);
1255 goto fail;
1259 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
1261 return 0;
1263 fail: wpi_free_tx_ring(sc, ring);
1264 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1265 return error;
1268 static void
1269 wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
1271 WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
1274 static void
1275 wpi_update_tx_ring_ps(struct wpi_softc *sc, struct wpi_tx_ring *ring)
1278 if (ring->update != 0) {
1279 /* Wait for INT_WAKEUP event. */
1280 return;
1283 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
1284 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) {
1285 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n",
1286 __func__, ring->qid);
1287 ring->update = 1;
1288 } else {
1289 wpi_update_tx_ring(sc, ring);
1290 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
1294 static void
1295 wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
1297 int i;
1299 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
1301 for (i = 0; i < WPI_TX_RING_COUNT; i++) {
1302 struct wpi_tx_data *data = &ring->data[i];
1304 if (data->m != NULL) {
1305 bus_dmamap_sync(ring->data_dmat, data->map,
1306 BUS_DMASYNC_POSTWRITE);
1307 bus_dmamap_unload(ring->data_dmat, data->map);
1308 m_freem(data->m);
1309 data->m = NULL;
1311 if (data->ni != NULL) {
1312 ieee80211_free_node(data->ni);
1313 data->ni = NULL;
1316 /* Clear TX descriptors. */
1317 memset(ring->desc, 0, ring->desc_dma.size);
1318 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1319 BUS_DMASYNC_PREWRITE);
1320 ring->queued = 0;
1321 ring->cur = 0;
1322 ring->pending = 0;
1323 ring->update = 0;
1326 static void
1327 wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
1329 int i;
1331 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
1333 wpi_dma_contig_free(&ring->desc_dma);
1334 wpi_dma_contig_free(&ring->cmd_dma);
1336 for (i = 0; i < WPI_TX_RING_COUNT; i++) {
1337 struct wpi_tx_data *data = &ring->data[i];
1339 if (data->m != NULL) {
1340 bus_dmamap_sync(ring->data_dmat, data->map,
1341 BUS_DMASYNC_POSTWRITE);
1342 bus_dmamap_unload(ring->data_dmat, data->map);
1343 m_freem(data->m);
1345 if (data->map != NULL)
1346 bus_dmamap_destroy(ring->data_dmat, data->map);
1348 if (ring->data_dmat != NULL) {
1349 bus_dma_tag_destroy(ring->data_dmat);
1350 ring->data_dmat = NULL;
1355 * Extract various information from EEPROM.
1357 static int
1358 wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
1360 #define WPI_CHK(res) do { \
1361 if ((error = res) != 0) \
1362 goto fail; \
1363 } while (0)
1364 uint8_t i;
1365 int error;
1367 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1369 /* Adapter has to be powered on for EEPROM access to work. */
1370 if ((error = wpi_apm_init(sc)) != 0) {
1371 device_printf(sc->sc_dev,
1372 "%s: could not power ON adapter, error %d\n", __func__,
1373 error);
1374 return error;
1377 if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) {
1378 device_printf(sc->sc_dev, "bad EEPROM signature\n");
1379 error = EIO;
1380 goto fail;
1382 /* Clear HW ownership of EEPROM. */
1383 WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER);
1385 /* Read the hardware capabilities, revision and SKU type. */
1386 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap,
1387 sizeof(sc->cap)));
1388 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev,
1389 sizeof(sc->rev)));
1390 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type,
1391 sizeof(sc->type)));
1393 sc->rev = le16toh(sc->rev);
1394 DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap,
1395 sc->rev, sc->type);
1397 /* Read the regulatory domain (4 ASCII characters.) */
1398 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain,
1399 sizeof(sc->domain)));
1401 /* Read MAC address. */
1402 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr,
1403 IEEE80211_ADDR_LEN));
1405 /* Read the list of authorized channels. */
1406 for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++)
1407 WPI_CHK(wpi_read_eeprom_channels(sc, i));
1409 /* Read the list of TX power groups. */
1410 for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++)
1411 WPI_CHK(wpi_read_eeprom_group(sc, i));
1413 fail: wpi_apm_stop(sc); /* Power OFF adapter. */
1415 DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END,
1416 __func__);
1418 return error;
1419 #undef WPI_CHK
1423 * Translate EEPROM flags to net80211.
1425 static uint32_t
1426 wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel)
1428 uint32_t nflags;
1430 nflags = 0;
1431 if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0)
1432 nflags |= IEEE80211_CHAN_PASSIVE;
1433 if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0)
1434 nflags |= IEEE80211_CHAN_NOADHOC;
1435 if (channel->flags & WPI_EEPROM_CHAN_RADAR) {
1436 nflags |= IEEE80211_CHAN_DFS;
1437 /* XXX apparently IBSS may still be marked */
1438 nflags |= IEEE80211_CHAN_NOADHOC;
1441 /* XXX HOSTAP uses WPI_MODE_IBSS */
1442 if (nflags & IEEE80211_CHAN_NOADHOC)
1443 nflags |= IEEE80211_CHAN_NOHOSTAP;
1445 return nflags;
1448 static void
1449 wpi_read_eeprom_band(struct wpi_softc *sc, uint8_t n, int maxchans,
1450 int *nchans, struct ieee80211_channel chans[])
1452 struct wpi_eeprom_chan *channels = sc->eeprom_channels[n];
1453 const struct wpi_chan_band *band = &wpi_bands[n];
1454 uint32_t nflags;
1455 uint8_t bands[IEEE80211_MODE_BYTES];
1456 uint8_t chan, i;
1457 int error;
1459 memset(bands, 0, sizeof(bands));
1461 if (n == 0) {
1462 setbit(bands, IEEE80211_MODE_11B);
1463 setbit(bands, IEEE80211_MODE_11G);
1464 } else
1465 setbit(bands, IEEE80211_MODE_11A);
1467 for (i = 0; i < band->nchan; i++) {
1468 if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) {
1469 DPRINTF(sc, WPI_DEBUG_EEPROM,
1470 "Channel Not Valid: %d, band %d\n",
1471 band->chan[i],n);
1472 continue;
1475 chan = band->chan[i];
1476 nflags = wpi_eeprom_channel_flags(&channels[i]);
1477 error = ieee80211_add_channel(chans, maxchans, nchans,
1478 chan, 0, channels[i].maxpwr, nflags, bands);
1479 if (error != 0)
1480 break;
1482 /* Save maximum allowed TX power for this channel. */
1483 sc->maxpwr[chan] = channels[i].maxpwr;
1485 DPRINTF(sc, WPI_DEBUG_EEPROM,
1486 "adding chan %d flags=0x%x maxpwr=%d, offset %d\n",
1487 chan, channels[i].flags, sc->maxpwr[chan], *nchans);
1492 * Read the eeprom to find out what channels are valid for the given
1493 * band and update net80211 with what we find.
1495 static int
1496 wpi_read_eeprom_channels(struct wpi_softc *sc, uint8_t n)
1498 struct ieee80211com *ic = &sc->sc_ic;
1499 const struct wpi_chan_band *band = &wpi_bands[n];
1500 int error;
1502 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1504 error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n],
1505 band->nchan * sizeof (struct wpi_eeprom_chan));
1506 if (error != 0) {
1507 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1508 return error;
1511 wpi_read_eeprom_band(sc, n, IEEE80211_CHAN_MAX, &ic->ic_nchans,
1512 ic->ic_channels);
1514 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
1516 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
1518 return 0;
1521 static struct wpi_eeprom_chan *
1522 wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c)
1524 int i, j;
1526 for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++)
1527 for (i = 0; i < wpi_bands[j].nchan; i++)
1528 if (wpi_bands[j].chan[i] == c->ic_ieee &&
1529 ((j == 0) ^ IEEE80211_IS_CHAN_A(c)) == 1)
1530 return &sc->eeprom_channels[j][i];
1532 return NULL;
1535 static void
1536 wpi_getradiocaps(struct ieee80211com *ic,
1537 int maxchans, int *nchans, struct ieee80211_channel chans[])
1539 struct wpi_softc *sc = ic->ic_softc;
1540 int i;
1542 /* Parse the list of authorized channels. */
1543 for (i = 0; i < WPI_CHAN_BANDS_COUNT && *nchans < maxchans; i++)
1544 wpi_read_eeprom_band(sc, i, maxchans, nchans, chans);
1548 * Enforce flags read from EEPROM.
1550 static int
1551 wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
1552 int nchan, struct ieee80211_channel chans[])
1554 struct wpi_softc *sc = ic->ic_softc;
1555 int i;
1557 for (i = 0; i < nchan; i++) {
1558 struct ieee80211_channel *c = &chans[i];
1559 struct wpi_eeprom_chan *channel;
1561 channel = wpi_find_eeprom_channel(sc, c);
1562 if (channel == NULL) {
1563 ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n",
1564 __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
1565 return EINVAL;
1567 c->ic_flags |= wpi_eeprom_channel_flags(channel);
1570 return 0;
1573 static int
1574 wpi_read_eeprom_group(struct wpi_softc *sc, uint8_t n)
1576 struct wpi_power_group *group = &sc->groups[n];
1577 struct wpi_eeprom_group rgroup;
1578 int i, error;
1580 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1582 if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32,
1583 &rgroup, sizeof rgroup)) != 0) {
1584 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1585 return error;
1588 /* Save TX power group information. */
1589 group->chan = rgroup.chan;
1590 group->maxpwr = rgroup.maxpwr;
1591 /* Retrieve temperature at which the samples were taken. */
1592 group->temp = (int16_t)le16toh(rgroup.temp);
1594 DPRINTF(sc, WPI_DEBUG_EEPROM,
1595 "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan,
1596 group->maxpwr, group->temp);
1598 for (i = 0; i < WPI_SAMPLES_COUNT; i++) {
1599 group->samples[i].index = rgroup.samples[i].index;
1600 group->samples[i].power = rgroup.samples[i].power;
1602 DPRINTF(sc, WPI_DEBUG_EEPROM,
1603 "\tsample %d: index=%d power=%d\n", i,
1604 group->samples[i].index, group->samples[i].power);
1607 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
1609 return 0;
1612 static __inline uint8_t
1613 wpi_add_node_entry_adhoc(struct wpi_softc *sc)
1615 uint8_t newid = WPI_ID_IBSS_MIN;
1617 for (; newid <= WPI_ID_IBSS_MAX; newid++) {
1618 if ((sc->nodesmsk & (1 << newid)) == 0) {
1619 sc->nodesmsk |= 1 << newid;
1620 return newid;
1624 return WPI_ID_UNDEFINED;
1627 static __inline uint8_t
1628 wpi_add_node_entry_sta(struct wpi_softc *sc)
1630 sc->nodesmsk |= 1 << WPI_ID_BSS;
1632 return WPI_ID_BSS;
1635 static __inline int
1636 wpi_check_node_entry(struct wpi_softc *sc, uint8_t id)
1638 if (id == WPI_ID_UNDEFINED)
1639 return 0;
1641 return (sc->nodesmsk >> id) & 1;
1644 static __inline void
1645 wpi_clear_node_table(struct wpi_softc *sc)
1647 sc->nodesmsk = 0;
1650 static __inline void
1651 wpi_del_node_entry(struct wpi_softc *sc, uint8_t id)
1653 sc->nodesmsk &= ~(1 << id);
1656 static struct ieee80211_node *
1657 wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
1659 struct wpi_node *wn;
1661 wn = kmalloc(sizeof (struct wpi_node), M_80211_NODE,
1662 M_INTWAIT | M_ZERO);
1664 if (wn == NULL)
1665 return NULL;
1667 wn->id = WPI_ID_UNDEFINED;
1669 return &wn->ni;
1672 static void
1673 wpi_node_free(struct ieee80211_node *ni)
1675 struct wpi_softc *sc = ni->ni_ic->ic_softc;
1676 struct wpi_node *wn = WPI_NODE(ni);
1678 if (wn->id != WPI_ID_UNDEFINED) {
1679 WPI_NT_LOCK(sc);
1680 if (wpi_check_node_entry(sc, wn->id)) {
1681 wpi_del_node_entry(sc, wn->id);
1682 wpi_del_node(sc, ni);
1684 WPI_NT_UNLOCK(sc);
1687 sc->sc_node_free(ni);
1690 static __inline int
1691 wpi_check_bss_filter(struct wpi_softc *sc)
1693 return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0;
1696 static void
1697 wpi_ibss_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype,
1698 const struct ieee80211_rx_stats *rxs,
1699 int rssi, int nf)
1701 struct ieee80211vap *vap = ni->ni_vap;
1702 struct wpi_softc *sc = vap->iv_ic->ic_softc;
1703 struct wpi_vap *wvp = WPI_VAP(vap);
1704 uint64_t ni_tstamp, rx_tstamp;
1706 wvp->wv_recv_mgmt(ni, m, subtype, rxs, rssi, nf);
1708 if (vap->iv_state == IEEE80211_S_RUN &&
1709 (subtype == IEEE80211_FC0_SUBTYPE_BEACON ||
1710 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) {
1711 ni_tstamp = le64toh(ni->ni_tstamp.tsf);
1712 rx_tstamp = le64toh(sc->rx_tstamp);
1714 if (ni_tstamp >= rx_tstamp) {
1715 DPRINTF(sc, WPI_DEBUG_STATE,
1716 "ibss merge, tsf %ju tstamp %ju\n",
1717 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp);
1718 (void) ieee80211_ibss_merge(ni);
1723 static void
1724 wpi_restore_node(void *arg, struct ieee80211_node *ni)
1726 struct wpi_softc *sc = arg;
1727 struct wpi_node *wn = WPI_NODE(ni);
1728 int error;
1730 WPI_NT_LOCK(sc);
1731 if (wn->id != WPI_ID_UNDEFINED) {
1732 wn->id = WPI_ID_UNDEFINED;
1733 if ((error = wpi_add_ibss_node(sc, ni)) != 0) {
1734 device_printf(sc->sc_dev,
1735 "%s: could not add IBSS node, error %d\n",
1736 __func__, error);
1739 WPI_NT_UNLOCK(sc);
1742 static void
1743 wpi_restore_node_table(struct wpi_softc *sc, struct wpi_vap *wvp)
1745 struct ieee80211com *ic = &sc->sc_ic;
1747 /* Set group keys once. */
1748 WPI_NT_LOCK(sc);
1749 wvp->wv_gtk = 0;
1750 WPI_NT_UNLOCK(sc);
1752 ieee80211_iterate_nodes(&ic->ic_sta, wpi_restore_node, sc);
1753 ieee80211_crypto_reload_keys(ic);
1757 * Called by net80211 when ever there is a change to 80211 state machine
1759 static int
1760 wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
1762 struct wpi_vap *wvp = WPI_VAP(vap);
1763 struct ieee80211com *ic = vap->iv_ic;
1764 struct wpi_softc *sc = ic->ic_softc;
1765 int error = 0;
1767 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1769 WPI_TXQ_LOCK(sc);
1770 if (nstate > IEEE80211_S_INIT && sc->sc_running == 0) {
1771 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1772 WPI_TXQ_UNLOCK(sc);
1774 return ENXIO;
1776 WPI_TXQ_UNLOCK(sc);
1778 DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__,
1779 ieee80211_state_name[vap->iv_state],
1780 ieee80211_state_name[nstate]);
1782 if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) {
1783 if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) {
1784 device_printf(sc->sc_dev,
1785 "%s: could not set power saving level\n",
1786 __func__);
1787 return error;
1790 wpi_set_led(sc, WPI_LED_LINK, 1, 0);
1793 switch (nstate) {
1794 case IEEE80211_S_SCAN:
1795 WPI_RXON_LOCK(sc);
1796 if (wpi_check_bss_filter(sc) != 0) {
1797 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS);
1798 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
1799 device_printf(sc->sc_dev,
1800 "%s: could not send RXON\n", __func__);
1803 WPI_RXON_UNLOCK(sc);
1804 break;
1806 case IEEE80211_S_ASSOC:
1807 if (vap->iv_state != IEEE80211_S_RUN)
1808 break;
1809 /* FALLTHROUGH */
1810 case IEEE80211_S_AUTH:
1812 * NB: do not optimize AUTH -> AUTH state transmission -
1813 * this will break powersave with non-QoS AP!
1817 * The node must be registered in the firmware before auth.
1818 * Also the associd must be cleared on RUN -> ASSOC
1819 * transitions.
1821 if ((error = wpi_auth(sc, vap)) != 0) {
1822 device_printf(sc->sc_dev,
1823 "%s: could not move to AUTH state, error %d\n",
1824 __func__, error);
1826 break;
1828 case IEEE80211_S_RUN:
1830 * RUN -> RUN transition:
1831 * STA mode: Just restart the timers.
1832 * IBSS mode: Process IBSS merge.
1834 if (vap->iv_state == IEEE80211_S_RUN) {
1835 if (vap->iv_opmode != IEEE80211_M_IBSS) {
1836 WPI_RXON_LOCK(sc);
1837 wpi_calib_timeout(sc);
1838 WPI_RXON_UNLOCK(sc);
1839 break;
1840 } else {
1842 * Drop the BSS_FILTER bit
1843 * (there is no another way to change bssid).
1845 WPI_RXON_LOCK(sc);
1846 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS);
1847 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
1848 device_printf(sc->sc_dev,
1849 "%s: could not send RXON\n",
1850 __func__);
1852 WPI_RXON_UNLOCK(sc);
1854 /* Restore all what was lost. */
1855 wpi_restore_node_table(sc, wvp);
1857 /* XXX set conditionally? */
1858 wpi_updateedca(ic);
1863 * !RUN -> RUN requires setting the association id
1864 * which is done with a firmware cmd. We also defer
1865 * starting the timers until that work is done.
1867 if ((error = wpi_run(sc, vap)) != 0) {
1868 device_printf(sc->sc_dev,
1869 "%s: could not move to RUN state\n", __func__);
1871 break;
1873 default:
1874 break;
1876 if (error != 0) {
1877 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1878 return error;
1881 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
1883 return wvp->wv_newstate(vap, nstate, arg);
1886 static void
1887 wpi_calib_timeout(void *arg)
1889 struct wpi_softc *sc = arg;
1891 if (wpi_check_bss_filter(sc) == 0)
1892 return;
1894 wpi_power_calibration(sc);
1896 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc);
1899 static __inline uint8_t
1900 rate2plcp(const uint8_t rate)
1902 switch (rate) {
1903 case 12: return 0xd;
1904 case 18: return 0xf;
1905 case 24: return 0x5;
1906 case 36: return 0x7;
1907 case 48: return 0x9;
1908 case 72: return 0xb;
1909 case 96: return 0x1;
1910 case 108: return 0x3;
1911 case 2: return 10;
1912 case 4: return 20;
1913 case 11: return 55;
1914 case 22: return 110;
1915 default: return 0;
1919 static __inline uint8_t
1920 plcp2rate(const uint8_t plcp)
1922 switch (plcp) {
1923 case 0xd: return 12;
1924 case 0xf: return 18;
1925 case 0x5: return 24;
1926 case 0x7: return 36;
1927 case 0x9: return 48;
1928 case 0xb: return 72;
1929 case 0x1: return 96;
1930 case 0x3: return 108;
1931 case 10: return 2;
1932 case 20: return 4;
1933 case 55: return 11;
1934 case 110: return 22;
1935 default: return 0;
1939 /* Quickly determine if a given rate is CCK or OFDM. */
1940 #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22)
1942 static void
1943 wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc,
1944 struct wpi_rx_data *data)
1946 struct ieee80211com *ic = &sc->sc_ic;
1947 struct wpi_rx_ring *ring = &sc->rxq;
1948 struct wpi_rx_stat *stat;
1949 struct wpi_rx_head *head;
1950 struct wpi_rx_tail *tail;
1951 struct ieee80211_frame *wh;
1952 struct ieee80211_node *ni;
1953 struct mbuf *m, *m1;
1954 bus_addr_t paddr;
1955 uint32_t flags;
1956 uint16_t len;
1957 int error;
1959 stat = (struct wpi_rx_stat *)(desc + 1);
1961 if (__predict_false(stat->len > WPI_STAT_MAXLEN)) {
1962 device_printf(sc->sc_dev, "invalid RX statistic header\n");
1963 goto fail1;
1966 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
1967 head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len);
1968 len = le16toh(head->len);
1969 tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len);
1970 flags = le32toh(tail->flags);
1972 DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d"
1973 " rate %x chan %d tstamp %ju\n", __func__, ring->cur,
1974 le32toh(desc->len), len, (int8_t)stat->rssi,
1975 head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp));
1977 /* Discard frames with a bad FCS early. */
1978 if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) {
1979 DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n",
1980 __func__, flags);
1981 goto fail1;
1983 /* Discard frames that are too short. */
1984 if (len < sizeof (struct ieee80211_frame_ack)) {
1985 DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n",
1986 __func__, len);
1987 goto fail1;
1990 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
1991 if (__predict_false(m1 == NULL)) {
1992 DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n",
1993 __func__);
1994 goto fail1;
1996 bus_dmamap_unload(ring->data_dmat, data->map);
1998 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *),
1999 MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
2000 if (__predict_false(error != 0 && error != EFBIG)) {
2001 device_printf(sc->sc_dev,
2002 "%s: bus_dmamap_load failed, error %d\n", __func__, error);
2003 m_freem(m1);
2005 /* Try to reload the old mbuf. */
2006 error = bus_dmamap_load(ring->data_dmat, data->map,
2007 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr,
2008 &paddr, BUS_DMA_NOWAIT);
2009 if (error != 0 && error != EFBIG) {
2010 panic("%s: could not load old RX mbuf", __func__);
2012 /* Physical address may have changed. */
2013 ring->desc[ring->cur] = htole32(paddr);
2014 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map,
2015 BUS_DMASYNC_PREWRITE);
2016 goto fail1;
2019 m = data->m;
2020 data->m = m1;
2021 /* Update RX descriptor. */
2022 ring->desc[ring->cur] = htole32(paddr);
2023 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2024 BUS_DMASYNC_PREWRITE);
2026 /* Finalize mbuf. */
2027 m->m_data = (caddr_t)(head + 1);
2028 m->m_pkthdr.len = m->m_len = len;
2030 /* Grab a reference to the source node. */
2031 wh = mtod(m, struct ieee80211_frame *);
2033 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
2034 (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) {
2035 /* Check whether decryption was successful or not. */
2036 if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) {
2037 DPRINTF(sc, WPI_DEBUG_RECV,
2038 "CCMP decryption failed 0x%x\n", flags);
2039 goto fail2;
2041 m->m_flags |= M_WEP;
2044 if (len >= sizeof(struct ieee80211_frame_min))
2045 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2046 else
2047 ni = NULL;
2049 sc->rx_tstamp = tail->tstamp;
2051 if (ieee80211_radiotap_active(ic)) {
2052 struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap;
2054 tap->wr_flags = 0;
2055 if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE))
2056 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2057 tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET);
2058 tap->wr_dbm_antnoise = WPI_RSSI_OFFSET;
2059 tap->wr_tsft = tail->tstamp;
2060 tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf;
2061 tap->wr_rate = plcp2rate(head->plcp);
2064 WPI_UNLOCK(sc);
2066 /* Send the frame to the 802.11 layer. */
2067 if (ni != NULL) {
2068 (void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET);
2069 /* Node is no longer needed. */
2070 ieee80211_free_node(ni);
2071 } else
2072 (void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET);
2074 WPI_LOCK(sc);
2076 return;
2078 fail2: m_freem(m);
2080 #if defined(__DragonFly__)
2081 fail1: ; /* not implemented */
2082 #else
2083 fail1: counter_u64_add(ic->ic_ierrors, 1);
2084 #endif
2087 static void
2088 wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc,
2089 struct wpi_rx_data *data)
2091 /* Ignore */
2094 static void
2095 wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc)
2097 struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3];
2098 struct wpi_tx_data *data = &ring->data[desc->idx];
2099 struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1);
2100 struct mbuf *m;
2101 struct ieee80211_node *ni;
2102 struct ieee80211vap *vap;
2103 uint32_t status = le32toh(stat->status);
2104 int ackfailcnt = stat->ackfailcnt / WPI_NTRIES_DEFAULT;
2106 KASSERT(data->ni != NULL, ("no node"));
2107 KASSERT(data->m != NULL, ("no mbuf"));
2109 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
2111 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: "
2112 "qid %d idx %d retries %d btkillcnt %d rate %x duration %d "
2113 "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt,
2114 stat->btkillcnt, stat->rate, le32toh(stat->duration), status);
2116 /* Unmap and free mbuf. */
2117 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
2118 bus_dmamap_unload(ring->data_dmat, data->map);
2119 m = data->m, data->m = NULL;
2120 ni = data->ni, data->ni = NULL;
2121 vap = ni->ni_vap;
2124 * Update rate control statistics for the node.
2126 if (status & WPI_TX_STATUS_FAIL) {
2127 ieee80211_ratectl_tx_complete(vap, ni,
2128 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2129 } else
2130 ieee80211_ratectl_tx_complete(vap, ni,
2131 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2133 ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0);
2135 WPI_TXQ_STATE_LOCK(sc);
2136 if (--ring->queued > 0)
2137 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc);
2138 else
2139 callout_stop(&sc->tx_timeout);
2140 WPI_TXQ_STATE_UNLOCK(sc);
2142 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
2146 * Process a "command done" firmware notification. This is where we wakeup
2147 * processes waiting for a synchronous command completion.
2149 static void
2150 wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc)
2152 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM];
2153 struct wpi_tx_data *data;
2154 struct wpi_tx_cmd *cmd;
2156 DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x "
2157 "type %s len %d\n", desc->qid, desc->idx,
2158 desc->flags, wpi_cmd_str(desc->type),
2159 le32toh(desc->len));
2161 if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM)
2162 return; /* Not a command ack. */
2164 KASSERT(ring->queued == 0, ("ring->queued must be 0"));
2166 data = &ring->data[desc->idx];
2167 cmd = &ring->cmd[desc->idx];
2169 /* If the command was mapped in an mbuf, free it. */
2170 if (data->m != NULL) {
2171 bus_dmamap_sync(ring->data_dmat, data->map,
2172 BUS_DMASYNC_POSTWRITE);
2173 bus_dmamap_unload(ring->data_dmat, data->map);
2174 m_freem(data->m);
2175 data->m = NULL;
2178 wakeup(cmd);
2180 if (desc->type == WPI_CMD_SET_POWER_MODE) {
2181 struct wpi_pmgt_cmd *pcmd = (struct wpi_pmgt_cmd *)cmd->data;
2183 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
2184 BUS_DMASYNC_POSTREAD);
2186 WPI_TXQ_LOCK(sc);
2187 if (le16toh(pcmd->flags) & WPI_PS_ALLOW_SLEEP) {
2188 sc->sc_update_rx_ring = wpi_update_rx_ring_ps;
2189 sc->sc_update_tx_ring = wpi_update_tx_ring_ps;
2190 } else {
2191 sc->sc_update_rx_ring = wpi_update_rx_ring;
2192 sc->sc_update_tx_ring = wpi_update_tx_ring;
2194 WPI_TXQ_UNLOCK(sc);
2198 static void
2199 wpi_notif_intr(struct wpi_softc *sc)
2201 struct ieee80211com *ic = &sc->sc_ic;
2202 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2203 uint32_t hw;
2205 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map,
2206 BUS_DMASYNC_POSTREAD);
2208 hw = le32toh(sc->shared->next) & 0xfff;
2209 hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1;
2211 while (sc->rxq.cur != hw) {
2212 sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT;
2214 struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2215 struct wpi_rx_desc *desc;
2217 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2218 BUS_DMASYNC_POSTREAD);
2219 desc = mtod(data->m, struct wpi_rx_desc *);
2221 DPRINTF(sc, WPI_DEBUG_NOTIFY,
2222 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n",
2223 __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags,
2224 desc->type, wpi_cmd_str(desc->type), le32toh(desc->len));
2226 if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) {
2227 /* Reply to a command. */
2228 wpi_cmd_done(sc, desc);
2231 switch (desc->type) {
2232 case WPI_RX_DONE:
2233 /* An 802.11 frame has been received. */
2234 wpi_rx_done(sc, desc, data);
2236 if (__predict_false(sc->sc_running == 0)) {
2237 /* wpi_stop() was called. */
2238 return;
2241 break;
2243 case WPI_TX_DONE:
2244 /* An 802.11 frame has been transmitted. */
2245 wpi_tx_done(sc, desc);
2246 break;
2248 case WPI_RX_STATISTICS:
2249 case WPI_BEACON_STATISTICS:
2250 wpi_rx_statistics(sc, desc, data);
2251 break;
2253 case WPI_BEACON_MISSED:
2255 struct wpi_beacon_missed *miss =
2256 (struct wpi_beacon_missed *)(desc + 1);
2257 uint32_t expected, misses, received, threshold;
2259 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2260 BUS_DMASYNC_POSTREAD);
2262 misses = le32toh(miss->consecutive);
2263 expected = le32toh(miss->expected);
2264 received = le32toh(miss->received);
2265 threshold = MAX(2, vap->iv_bmissthreshold);
2267 DPRINTF(sc, WPI_DEBUG_BMISS,
2268 "%s: beacons missed %u(%u) (received %u/%u)\n",
2269 __func__, misses, le32toh(miss->total), received,
2270 expected);
2272 if (misses >= threshold ||
2273 (received == 0 && expected >= threshold)) {
2274 WPI_RXON_LOCK(sc);
2275 if (callout_pending(&sc->scan_timeout)) {
2276 wpi_cmd(sc, WPI_CMD_SCAN_ABORT, NULL,
2277 0, 1);
2279 WPI_RXON_UNLOCK(sc);
2280 if (vap->iv_state == IEEE80211_S_RUN &&
2281 (ic->ic_flags & IEEE80211_F_SCAN) == 0)
2282 ieee80211_beacon_miss(ic);
2285 break;
2287 #ifdef WPI_DEBUG
2288 case WPI_BEACON_SENT:
2290 struct wpi_tx_stat *stat =
2291 (struct wpi_tx_stat *)(desc + 1);
2292 uint64_t *tsf = (uint64_t *)(stat + 1);
2293 uint32_t *mode = (uint32_t *)(tsf + 1);
2295 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2296 BUS_DMASYNC_POSTREAD);
2298 DPRINTF(sc, WPI_DEBUG_BEACON,
2299 "beacon sent: rts %u, ack %u, btkill %u, rate %u, "
2300 "duration %u, status %x, tsf %ju, mode %x\n",
2301 stat->rtsfailcnt, stat->ackfailcnt,
2302 stat->btkillcnt, stat->rate, le32toh(stat->duration),
2303 le32toh(stat->status), le64toh(*tsf),
2304 le32toh(*mode));
2306 break;
2308 #endif
2309 case WPI_UC_READY:
2311 struct wpi_ucode_info *uc =
2312 (struct wpi_ucode_info *)(desc + 1);
2314 /* The microcontroller is ready. */
2315 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2316 BUS_DMASYNC_POSTREAD);
2317 DPRINTF(sc, WPI_DEBUG_RESET,
2318 "microcode alive notification version=%d.%d "
2319 "subtype=%x alive=%x\n", uc->major, uc->minor,
2320 uc->subtype, le32toh(uc->valid));
2322 if (le32toh(uc->valid) != 1) {
2323 device_printf(sc->sc_dev,
2324 "microcontroller initialization failed\n");
2325 wpi_stop_locked(sc);
2326 return;
2328 /* Save the address of the error log in SRAM. */
2329 sc->errptr = le32toh(uc->errptr);
2330 break;
2332 case WPI_STATE_CHANGED:
2334 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2335 BUS_DMASYNC_POSTREAD);
2337 uint32_t *status = (uint32_t *)(desc + 1);
2339 DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n",
2340 le32toh(*status));
2342 if (le32toh(*status) & 1) {
2343 WPI_NT_LOCK(sc);
2344 wpi_clear_node_table(sc);
2345 WPI_NT_UNLOCK(sc);
2346 ieee80211_runtask(ic,
2347 &sc->sc_radiooff_task);
2348 return;
2350 break;
2352 #ifdef WPI_DEBUG
2353 case WPI_START_SCAN:
2355 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2356 BUS_DMASYNC_POSTREAD);
2358 struct wpi_start_scan *scan =
2359 (struct wpi_start_scan *)(desc + 1);
2360 DPRINTF(sc, WPI_DEBUG_SCAN,
2361 "%s: scanning channel %d status %x\n",
2362 __func__, scan->chan, le32toh(scan->status));
2364 break;
2366 #endif
2367 case WPI_STOP_SCAN:
2369 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2370 BUS_DMASYNC_POSTREAD);
2372 struct wpi_stop_scan *scan =
2373 (struct wpi_stop_scan *)(desc + 1);
2375 DPRINTF(sc, WPI_DEBUG_SCAN,
2376 "scan finished nchan=%d status=%d chan=%d\n",
2377 scan->nchan, scan->status, scan->chan);
2379 WPI_RXON_LOCK(sc);
2380 callout_stop(&sc->scan_timeout);
2381 WPI_RXON_UNLOCK(sc);
2382 if (scan->status == WPI_SCAN_ABORTED)
2383 ieee80211_cancel_scan(vap);
2384 else
2385 ieee80211_scan_next(vap);
2386 break;
2390 if (sc->rxq.cur % 8 == 0) {
2391 /* Tell the firmware what we have processed. */
2392 sc->sc_update_rx_ring(sc);
2398 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
2399 * from power-down sleep mode.
2401 static void
2402 wpi_wakeup_intr(struct wpi_softc *sc)
2404 int qid;
2406 DPRINTF(sc, WPI_DEBUG_PWRSAVE,
2407 "%s: ucode wakeup from power-down sleep\n", __func__);
2409 /* Wakeup RX and TX rings. */
2410 if (sc->rxq.update) {
2411 sc->rxq.update = 0;
2412 wpi_update_rx_ring(sc);
2414 WPI_TXQ_LOCK(sc);
2415 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) {
2416 struct wpi_tx_ring *ring = &sc->txq[qid];
2418 if (ring->update) {
2419 ring->update = 0;
2420 wpi_update_tx_ring(sc, ring);
2423 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
2424 WPI_TXQ_UNLOCK(sc);
2428 * This function prints firmware registers
2430 #ifdef WPI_DEBUG
2431 static void
2432 wpi_debug_registers(struct wpi_softc *sc)
2434 size_t i;
2435 static const uint32_t csr_tbl[] = {
2436 WPI_HW_IF_CONFIG,
2437 WPI_INT,
2438 WPI_INT_MASK,
2439 WPI_FH_INT,
2440 WPI_GPIO_IN,
2441 WPI_RESET,
2442 WPI_GP_CNTRL,
2443 WPI_EEPROM,
2444 WPI_EEPROM_GP,
2445 WPI_GIO,
2446 WPI_UCODE_GP1,
2447 WPI_UCODE_GP2,
2448 WPI_GIO_CHICKEN,
2449 WPI_ANA_PLL,
2450 WPI_DBG_HPET_MEM,
2452 static const uint32_t prph_tbl[] = {
2453 WPI_APMG_CLK_CTRL,
2454 WPI_APMG_PS,
2455 WPI_APMG_PCI_STT,
2456 WPI_APMG_RFKILL,
2459 DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n");
2461 for (i = 0; i < nitems(csr_tbl); i++) {
2462 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ",
2463 wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i]));
2465 if ((i + 1) % 2 == 0)
2466 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n");
2468 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n");
2470 if (wpi_nic_lock(sc) == 0) {
2471 for (i = 0; i < nitems(prph_tbl); i++) {
2472 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ",
2473 wpi_get_prph_string(prph_tbl[i]),
2474 wpi_prph_read(sc, prph_tbl[i]));
2476 if ((i + 1) % 2 == 0)
2477 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n");
2479 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n");
2480 wpi_nic_unlock(sc);
2481 } else {
2482 DPRINTF(sc, WPI_DEBUG_REGISTER,
2483 "Cannot access internal registers.\n");
2486 #endif
2489 * Dump the error log of the firmware when a firmware panic occurs. Although
2490 * we can't debug the firmware because it is neither open source nor free, it
2491 * can help us to identify certain classes of problems.
2493 static void
2494 wpi_fatal_intr(struct wpi_softc *sc)
2496 struct wpi_fw_dump dump;
2497 uint32_t i, offset, count;
2499 /* Check that the error log address is valid. */
2500 if (sc->errptr < WPI_FW_DATA_BASE ||
2501 sc->errptr + sizeof (dump) >
2502 WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) {
2503 kprintf("%s: bad firmware error log address 0x%08x\n", __func__,
2504 sc->errptr);
2505 return;
2507 if (wpi_nic_lock(sc) != 0) {
2508 kprintf("%s: could not read firmware error log\n", __func__);
2509 return;
2511 /* Read number of entries in the log. */
2512 count = wpi_mem_read(sc, sc->errptr);
2513 if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) {
2514 kprintf("%s: invalid count field (count = %u)\n", __func__,
2515 count);
2516 wpi_nic_unlock(sc);
2517 return;
2519 /* Skip "count" field. */
2520 offset = sc->errptr + sizeof (uint32_t);
2521 kprintf("firmware error log (count = %u):\n", count);
2522 for (i = 0; i < count; i++) {
2523 wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump,
2524 sizeof (dump) / sizeof (uint32_t));
2526 kprintf(" error type = \"%s\" (0x%08X)\n",
2527 (dump.desc < nitems(wpi_fw_errmsg)) ?
2528 wpi_fw_errmsg[dump.desc] : "UNKNOWN",
2529 dump.desc);
2530 kprintf(" error data = 0x%08X\n",
2531 dump.data);
2532 kprintf(" branch link = 0x%08X%08X\n",
2533 dump.blink[0], dump.blink[1]);
2534 kprintf(" interrupt link = 0x%08X%08X\n",
2535 dump.ilink[0], dump.ilink[1]);
2536 kprintf(" time = %u\n", dump.time);
2538 offset += sizeof (dump);
2540 wpi_nic_unlock(sc);
2541 /* Dump driver status (TX and RX rings) while we're here. */
2542 kprintf("driver status:\n");
2543 WPI_TXQ_LOCK(sc);
2544 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) {
2545 struct wpi_tx_ring *ring = &sc->txq[i];
2546 kprintf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
2547 i, ring->qid, ring->cur, ring->queued);
2549 WPI_TXQ_UNLOCK(sc);
2550 kprintf(" rx ring: cur=%d\n", sc->rxq.cur);
2553 static void
2554 wpi_intr(void *arg)
2556 struct wpi_softc *sc = arg;
2557 uint32_t r1, r2;
2559 WPI_LOCK(sc);
2561 /* Disable interrupts. */
2562 WPI_WRITE(sc, WPI_INT_MASK, 0);
2564 r1 = WPI_READ(sc, WPI_INT);
2566 if (__predict_false(r1 == 0xffffffff ||
2567 (r1 & 0xfffffff0) == 0xa5a5a5a0))
2568 goto end; /* Hardware gone! */
2570 r2 = WPI_READ(sc, WPI_FH_INT);
2572 DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__,
2573 r1, r2);
2575 if (r1 == 0 && r2 == 0)
2576 goto done; /* Interrupt not for us. */
2578 /* Acknowledge interrupts. */
2579 WPI_WRITE(sc, WPI_INT, r1);
2580 WPI_WRITE(sc, WPI_FH_INT, r2);
2582 if (__predict_false(r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR))) {
2583 struct ieee80211com *ic = &sc->sc_ic;
2585 device_printf(sc->sc_dev, "fatal firmware error\n");
2586 #ifdef WPI_DEBUG
2587 wpi_debug_registers(sc);
2588 #endif
2589 wpi_fatal_intr(sc);
2590 DPRINTF(sc, WPI_DEBUG_HW,
2591 "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" :
2592 "(Hardware Error)");
2593 ieee80211_restart_all(ic);
2594 goto end;
2597 if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) ||
2598 (r2 & WPI_FH_INT_RX))
2599 wpi_notif_intr(sc);
2601 if (r1 & WPI_INT_ALIVE)
2602 wakeup(sc); /* Firmware is alive. */
2604 if (r1 & WPI_INT_WAKEUP)
2605 wpi_wakeup_intr(sc);
2607 done:
2608 /* Re-enable interrupts. */
2609 if (__predict_true(sc->sc_running))
2610 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF);
2612 end: WPI_UNLOCK(sc);
2615 static void
2616 wpi_free_txfrags(struct wpi_softc *sc, uint16_t ac)
2618 struct wpi_tx_ring *ring;
2619 struct wpi_tx_data *data;
2620 uint8_t cur;
2622 WPI_TXQ_LOCK(sc);
2623 ring = &sc->txq[ac];
2625 while (ring->pending != 0) {
2626 ring->pending--;
2627 cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT;
2628 data = &ring->data[cur];
2630 bus_dmamap_sync(ring->data_dmat, data->map,
2631 BUS_DMASYNC_POSTWRITE);
2632 bus_dmamap_unload(ring->data_dmat, data->map);
2633 m_freem(data->m);
2634 data->m = NULL;
2636 ieee80211_node_decref(data->ni);
2637 data->ni = NULL;
2640 WPI_TXQ_UNLOCK(sc);
2643 static int
2644 wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf)
2646 struct ieee80211_frame *wh;
2647 struct wpi_tx_cmd *cmd;
2648 struct wpi_tx_data *data;
2649 struct wpi_tx_desc *desc;
2650 struct wpi_tx_ring *ring;
2651 struct mbuf *m1;
2652 bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER];
2653 uint8_t cur, pad;
2654 uint16_t hdrlen;
2655 int error, i, nsegs, totlen, frag;
2657 WPI_TXQ_LOCK(sc);
2659 KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow"));
2661 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
2663 if (__predict_false(sc->sc_running == 0)) {
2664 /* wpi_stop() was called */
2665 error = ENETDOWN;
2666 goto end;
2669 wh = mtod(buf->m, struct ieee80211_frame *);
2670 hdrlen = ieee80211_anyhdrsize(wh);
2671 totlen = buf->m->m_pkthdr.len;
2672 frag = ((buf->m->m_flags & (M_FRAG | M_LASTFRAG)) == M_FRAG);
2674 if (__predict_false(totlen < sizeof(struct ieee80211_frame_min))) {
2675 error = EINVAL;
2676 goto end;
2679 if (hdrlen & 3) {
2680 /* First segment length must be a multiple of 4. */
2681 pad = 4 - (hdrlen & 3);
2682 } else
2683 pad = 0;
2685 ring = &sc->txq[buf->ac];
2686 cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT;
2687 desc = &ring->desc[cur];
2688 data = &ring->data[cur];
2690 /* Prepare TX firmware command. */
2691 cmd = &ring->cmd[cur];
2692 cmd->code = buf->code;
2693 cmd->flags = 0;
2694 cmd->qid = ring->qid;
2695 cmd->idx = cur;
2697 memcpy(cmd->data, buf->data, buf->size);
2699 /* Save and trim IEEE802.11 header. */
2700 memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen);
2701 m_adj(buf->m, hdrlen);
2703 #if defined(__DragonFly__)
2704 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, buf->m,
2705 segs, 1, &nsegs, BUS_DMA_NOWAIT);
2706 #else
2707 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m,
2708 segs, &nsegs, BUS_DMA_NOWAIT);
2709 #endif
2710 if (error != 0 && error != EFBIG) {
2711 device_printf(sc->sc_dev,
2712 "%s: can't map mbuf (error %d)\n", __func__, error);
2713 goto end;
2715 if (error != 0) {
2716 /* Too many DMA segments, linearize mbuf. */
2717 #if defined(__DragonFly__)
2718 m1 = m_defrag(buf->m, M_NOWAIT);
2719 #else
2720 m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1);
2721 #endif
2722 if (m1 == NULL) {
2723 device_printf(sc->sc_dev,
2724 "%s: could not defrag mbuf\n", __func__);
2725 error = ENOBUFS;
2726 goto end;
2728 buf->m = m1;
2730 #if defined(__DragonFly__)
2731 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map,
2732 buf->m, segs, 1, &nsegs, BUS_DMA_NOWAIT);
2733 #else
2734 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map,
2735 buf->m, segs, &nsegs, BUS_DMA_NOWAIT);
2736 #endif
2737 if (__predict_false(error != 0)) {
2738 /* XXX fix this (applicable to the iwn(4) too) */
2740 * NB: Do not return error;
2741 * original mbuf does not exist anymore.
2743 device_printf(sc->sc_dev,
2744 "%s: can't map mbuf (error %d)\n", __func__,
2745 error);
2746 if (ring->qid < WPI_CMD_QUEUE_NUM) {
2747 if_inc_counter(buf->ni->ni_vap->iv_ifp,
2748 IFCOUNTER_OERRORS, 1);
2749 if (!frag)
2750 ieee80211_free_node(buf->ni);
2752 m_freem(buf->m);
2753 error = 0;
2754 goto end;
2758 KASSERT(nsegs < WPI_MAX_SCATTER,
2759 ("too many DMA segments, nsegs (%d) should be less than %d",
2760 nsegs, WPI_MAX_SCATTER));
2762 data->m = buf->m;
2763 data->ni = buf->ni;
2765 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
2766 __func__, ring->qid, cur, totlen, nsegs);
2768 /* Fill TX descriptor. */
2769 desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs);
2770 /* First DMA segment is used by the TX command. */
2771 desc->segs[0].addr = htole32(data->cmd_paddr);
2772 desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad);
2773 /* Other DMA segments are for data payload. */
2774 seg = &segs[0];
2775 for (i = 1; i <= nsegs; i++) {
2776 desc->segs[i].addr = htole32(seg->ds_addr);
2777 desc->segs[i].len = htole32(seg->ds_len);
2778 seg++;
2781 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
2782 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
2783 BUS_DMASYNC_PREWRITE);
2784 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2785 BUS_DMASYNC_PREWRITE);
2787 ring->pending += 1;
2789 if (!frag) {
2790 if (ring->qid < WPI_CMD_QUEUE_NUM) {
2791 WPI_TXQ_STATE_LOCK(sc);
2792 ring->queued += ring->pending;
2793 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout,
2794 sc);
2795 WPI_TXQ_STATE_UNLOCK(sc);
2798 /* Kick TX ring. */
2799 ring->cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT;
2800 ring->pending = 0;
2801 sc->sc_update_tx_ring(sc, ring);
2802 } else
2803 ieee80211_node_incref(data->ni);
2805 end: DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END,
2806 __func__);
2808 WPI_TXQ_UNLOCK(sc);
2810 return (error);
2814 * Construct the data packet for a transmit buffer.
2816 static int
2817 wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
2819 const struct ieee80211_txparam *tp;
2820 struct ieee80211vap *vap = ni->ni_vap;
2821 struct ieee80211com *ic = ni->ni_ic;
2822 struct wpi_node *wn = WPI_NODE(ni);
2823 struct ieee80211_channel *chan;
2824 struct ieee80211_frame *wh;
2825 struct ieee80211_key *k = NULL;
2826 struct wpi_buf tx_data;
2827 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data;
2828 uint32_t flags;
2829 uint16_t ac, qos;
2830 uint8_t tid, type, rate;
2831 int swcrypt, ismcast, totlen;
2833 wh = mtod(m, struct ieee80211_frame *);
2834 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2835 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
2836 swcrypt = 1;
2838 /* Select EDCA Access Category and TX ring for this frame. */
2839 if (IEEE80211_QOS_HAS_SEQ(wh)) {
2840 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
2841 tid = qos & IEEE80211_QOS_TID;
2842 } else {
2843 qos = 0;
2844 tid = 0;
2846 ac = M_WME_GETAC(m);
2848 chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ?
2849 ni->ni_chan : ic->ic_curchan;
2850 tp = &vap->iv_txparms[ieee80211_chan2mode(chan)];
2852 /* Choose a TX rate index. */
2853 if (type == IEEE80211_FC0_TYPE_MGT)
2854 rate = tp->mgmtrate;
2855 else if (ismcast)
2856 rate = tp->mcastrate;
2857 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
2858 rate = tp->ucastrate;
2859 else if (m->m_flags & M_EAPOL)
2860 rate = tp->mgmtrate;
2861 else {
2862 /* XXX pass pktlen */
2863 (void) ieee80211_ratectl_rate(ni, NULL, 0);
2864 rate = ni->ni_txrate;
2867 /* Encrypt the frame if need be. */
2868 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2869 /* Retrieve key for TX. */
2870 k = ieee80211_crypto_encap(ni, m);
2871 if (k == NULL)
2872 return (ENOBUFS);
2874 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT;
2876 /* 802.11 header may have moved. */
2877 wh = mtod(m, struct ieee80211_frame *);
2879 totlen = m->m_pkthdr.len;
2881 if (ieee80211_radiotap_active_vap(vap)) {
2882 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap;
2884 tap->wt_flags = 0;
2885 tap->wt_rate = rate;
2886 if (k != NULL)
2887 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2888 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
2889 tap->wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
2891 ieee80211_radiotap_tx(vap, m);
2894 flags = 0;
2895 if (!ismcast) {
2896 /* Unicast frame, check if an ACK is expected. */
2897 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
2898 IEEE80211_QOS_ACKPOLICY_NOACK)
2899 flags |= WPI_TX_NEED_ACK;
2902 if (!IEEE80211_QOS_HAS_SEQ(wh))
2903 flags |= WPI_TX_AUTO_SEQ;
2904 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
2905 flags |= WPI_TX_MORE_FRAG;
2907 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */
2908 if (!ismcast) {
2909 /* NB: Group frames are sent using CCK in 802.11b/g. */
2910 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
2911 flags |= WPI_TX_NEED_RTS;
2912 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
2913 WPI_RATE_IS_OFDM(rate)) {
2914 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
2915 flags |= WPI_TX_NEED_CTS;
2916 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
2917 flags |= WPI_TX_NEED_RTS;
2920 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS))
2921 flags |= WPI_TX_FULL_TXOP;
2924 memset(tx, 0, sizeof (struct wpi_cmd_data));
2925 if (type == IEEE80211_FC0_TYPE_MGT) {
2926 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2928 /* Tell HW to set timestamp in probe responses. */
2929 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2930 flags |= WPI_TX_INSERT_TSTAMP;
2931 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2932 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2933 tx->timeout = htole16(3);
2934 else
2935 tx->timeout = htole16(2);
2938 if (ismcast || type != IEEE80211_FC0_TYPE_DATA)
2939 tx->id = WPI_ID_BROADCAST;
2940 else {
2941 if (wn->id == WPI_ID_UNDEFINED) {
2942 device_printf(sc->sc_dev,
2943 "%s: undefined node id\n", __func__);
2944 return (EINVAL);
2947 tx->id = wn->id;
2950 if (!swcrypt) {
2951 switch (k->wk_cipher->ic_cipher) {
2952 case IEEE80211_CIPHER_AES_CCM:
2953 tx->security = WPI_CIPHER_CCMP;
2954 break;
2956 default:
2957 break;
2960 memcpy(tx->key, k->wk_key, k->wk_keylen);
2963 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
2964 struct mbuf *next = m->m_nextpkt;
2966 tx->lnext = htole16(next->m_pkthdr.len);
2967 tx->fnext = htole32(tx->security |
2968 (flags & WPI_TX_NEED_ACK) |
2969 WPI_NEXT_STA_ID(tx->id));
2972 tx->len = htole16(totlen);
2973 tx->flags = htole32(flags);
2974 tx->plcp = rate2plcp(rate);
2975 tx->tid = tid;
2976 tx->lifetime = htole32(WPI_LIFETIME_INFINITE);
2977 tx->ofdm_mask = 0xff;
2978 tx->cck_mask = 0x0f;
2979 tx->rts_ntries = 7;
2980 tx->data_ntries = tp->maxretry;
2982 tx_data.ni = ni;
2983 tx_data.m = m;
2984 tx_data.size = sizeof(struct wpi_cmd_data);
2985 tx_data.code = WPI_CMD_TX_DATA;
2986 tx_data.ac = ac;
2988 return wpi_cmd2(sc, &tx_data);
2991 static int
2992 wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m,
2993 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
2995 struct ieee80211vap *vap = ni->ni_vap;
2996 struct ieee80211_key *k = NULL;
2997 struct ieee80211_frame *wh;
2998 struct wpi_buf tx_data;
2999 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data;
3000 uint32_t flags;
3001 uint8_t ac, type, rate;
3002 int swcrypt, totlen;
3004 wh = mtod(m, struct ieee80211_frame *);
3005 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3006 swcrypt = 1;
3008 ac = params->ibp_pri & 3;
3010 /* Choose a TX rate index. */
3011 rate = params->ibp_rate0;
3013 flags = 0;
3014 if (!IEEE80211_QOS_HAS_SEQ(wh))
3015 flags |= WPI_TX_AUTO_SEQ;
3016 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
3017 flags |= WPI_TX_NEED_ACK;
3018 if (params->ibp_flags & IEEE80211_BPF_RTS)
3019 flags |= WPI_TX_NEED_RTS;
3020 if (params->ibp_flags & IEEE80211_BPF_CTS)
3021 flags |= WPI_TX_NEED_CTS;
3022 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS))
3023 flags |= WPI_TX_FULL_TXOP;
3025 /* Encrypt the frame if need be. */
3026 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) {
3027 /* Retrieve key for TX. */
3028 k = ieee80211_crypto_encap(ni, m);
3029 if (k == NULL)
3030 return (ENOBUFS);
3032 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT;
3034 /* 802.11 header may have moved. */
3035 wh = mtod(m, struct ieee80211_frame *);
3037 totlen = m->m_pkthdr.len;
3039 if (ieee80211_radiotap_active_vap(vap)) {
3040 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap;
3042 tap->wt_flags = 0;
3043 tap->wt_rate = rate;
3044 if (params->ibp_flags & IEEE80211_BPF_CRYPTO)
3045 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3047 ieee80211_radiotap_tx(vap, m);
3050 memset(tx, 0, sizeof (struct wpi_cmd_data));
3051 if (type == IEEE80211_FC0_TYPE_MGT) {
3052 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3054 /* Tell HW to set timestamp in probe responses. */
3055 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3056 flags |= WPI_TX_INSERT_TSTAMP;
3057 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3058 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3059 tx->timeout = htole16(3);
3060 else
3061 tx->timeout = htole16(2);
3064 if (!swcrypt) {
3065 switch (k->wk_cipher->ic_cipher) {
3066 case IEEE80211_CIPHER_AES_CCM:
3067 tx->security = WPI_CIPHER_CCMP;
3068 break;
3070 default:
3071 break;
3074 memcpy(tx->key, k->wk_key, k->wk_keylen);
3077 tx->len = htole16(totlen);
3078 tx->flags = htole32(flags);
3079 tx->plcp = rate2plcp(rate);
3080 tx->id = WPI_ID_BROADCAST;
3081 tx->lifetime = htole32(WPI_LIFETIME_INFINITE);
3082 tx->rts_ntries = params->ibp_try1;
3083 tx->data_ntries = params->ibp_try0;
3085 tx_data.ni = ni;
3086 tx_data.m = m;
3087 tx_data.size = sizeof(struct wpi_cmd_data);
3088 tx_data.code = WPI_CMD_TX_DATA;
3089 tx_data.ac = ac;
3091 return wpi_cmd2(sc, &tx_data);
3094 static __inline int
3095 wpi_tx_ring_free_space(struct wpi_softc *sc, uint16_t ac)
3097 struct wpi_tx_ring *ring = &sc->txq[ac];
3098 int retval;
3100 WPI_TXQ_STATE_LOCK(sc);
3101 retval = WPI_TX_RING_HIMARK - ring->queued;
3102 WPI_TXQ_STATE_UNLOCK(sc);
3104 return retval;
3107 static int
3108 wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3109 const struct ieee80211_bpf_params *params)
3111 struct ieee80211com *ic = ni->ni_ic;
3112 struct wpi_softc *sc = ic->ic_softc;
3113 uint16_t ac;
3114 int error = 0;
3116 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
3118 ac = M_WME_GETAC(m);
3120 WPI_TX_LOCK(sc);
3122 /* NB: no fragments here */
3123 if (sc->sc_running == 0 || wpi_tx_ring_free_space(sc, ac) < 1) {
3124 error = sc->sc_running ? ENOBUFS : ENETDOWN;
3125 goto unlock;
3128 if (params == NULL) {
3130 * Legacy path; interpret frame contents to decide
3131 * precisely how to send the frame.
3133 error = wpi_tx_data(sc, m, ni);
3134 } else {
3136 * Caller supplied explicit parameters to use in
3137 * sending the frame.
3139 error = wpi_tx_data_raw(sc, m, ni, params);
3142 unlock: WPI_TX_UNLOCK(sc);
3144 if (error != 0) {
3145 m_freem(m);
3146 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
3148 return error;
3151 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
3153 return 0;
3156 static int
3157 wpi_transmit(struct ieee80211com *ic, struct mbuf *m)
3159 struct wpi_softc *sc = ic->ic_softc;
3160 struct ieee80211_node *ni;
3161 struct mbuf *mnext;
3162 uint16_t ac;
3163 int error, nmbufs;
3165 WPI_TX_LOCK(sc);
3166 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__);
3168 /* Check if interface is up & running. */
3169 if (__predict_false(sc->sc_running == 0)) {
3170 error = ENXIO;
3171 goto unlock;
3174 nmbufs = 1;
3175 for (mnext = m->m_nextpkt; mnext != NULL; mnext = mnext->m_nextpkt)
3176 nmbufs++;
3178 /* Check for available space. */
3179 ac = M_WME_GETAC(m);
3180 if (wpi_tx_ring_free_space(sc, ac) < nmbufs) {
3181 error = ENOBUFS;
3182 goto unlock;
3185 error = 0;
3186 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3187 do {
3188 mnext = m->m_nextpkt;
3189 if (wpi_tx_data(sc, m, ni) != 0) {
3190 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS,
3191 nmbufs);
3192 wpi_free_txfrags(sc, ac);
3193 ieee80211_free_mbuf(m);
3194 ieee80211_free_node(ni);
3195 break;
3197 } while((m = mnext) != NULL);
3199 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__);
3201 unlock: WPI_TX_UNLOCK(sc);
3203 return (error);
3206 static void
3207 wpi_watchdog_rfkill(void *arg)
3209 struct wpi_softc *sc = arg;
3210 struct ieee80211com *ic = &sc->sc_ic;
3212 DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n");
3214 /* No need to lock firmware memory. */
3215 if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) {
3216 /* Radio kill switch is still off. */
3217 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill,
3218 sc);
3219 } else
3220 ieee80211_runtask(ic, &sc->sc_radioon_task);
3223 static void
3224 wpi_scan_timeout(void *arg)
3226 struct wpi_softc *sc = arg;
3227 struct ieee80211com *ic = &sc->sc_ic;
3229 ic_printf(ic, "scan timeout\n");
3230 ieee80211_restart_all(ic);
3233 static void
3234 wpi_tx_timeout(void *arg)
3236 struct wpi_softc *sc = arg;
3237 struct ieee80211com *ic = &sc->sc_ic;
3239 ic_printf(ic, "device timeout\n");
3240 ieee80211_restart_all(ic);
3243 static void
3244 wpi_parent(struct ieee80211com *ic)
3246 struct wpi_softc *sc = ic->ic_softc;
3247 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3249 if (ic->ic_nrunning > 0) {
3250 if (wpi_init(sc) == 0) {
3251 ieee80211_notify_radio(ic, 1);
3252 ieee80211_start_all(ic);
3253 } else {
3254 ieee80211_notify_radio(ic, 0);
3255 ieee80211_stop(vap);
3257 } else {
3258 ieee80211_notify_radio(ic, 0);
3259 wpi_stop(sc);
3264 * Send a command to the firmware.
3266 static int
3267 wpi_cmd(struct wpi_softc *sc, uint8_t code, const void *buf, uint16_t size,
3268 int async)
3270 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM];
3271 struct wpi_tx_desc *desc;
3272 struct wpi_tx_data *data;
3273 struct wpi_tx_cmd *cmd;
3274 struct mbuf *m;
3275 bus_addr_t paddr;
3276 uint16_t totlen;
3277 int error;
3279 WPI_TXQ_LOCK(sc);
3281 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
3283 if (__predict_false(sc->sc_running == 0)) {
3284 /* wpi_stop() was called */
3285 if (code == WPI_CMD_SCAN)
3286 error = ENETDOWN;
3287 else
3288 error = 0;
3290 goto fail;
3293 if (async == 0)
3294 WPI_LOCK_ASSERT(sc);
3296 DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %u async %d\n",
3297 __func__, wpi_cmd_str(code), size, async);
3299 desc = &ring->desc[ring->cur];
3300 data = &ring->data[ring->cur];
3301 totlen = 4 + size;
3303 if (size > sizeof cmd->data) {
3304 /* Command is too large to fit in a descriptor. */
3305 if (totlen > MCLBYTES) {
3306 error = EINVAL;
3307 goto fail;
3309 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
3310 if (m == NULL) {
3311 error = ENOMEM;
3312 goto fail;
3314 cmd = mtod(m, struct wpi_tx_cmd *);
3315 error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
3316 totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
3317 if (error != 0) {
3318 m_freem(m);
3319 goto fail;
3321 data->m = m;
3322 } else {
3323 cmd = &ring->cmd[ring->cur];
3324 paddr = data->cmd_paddr;
3327 cmd->code = code;
3328 cmd->flags = 0;
3329 cmd->qid = ring->qid;
3330 cmd->idx = ring->cur;
3331 memcpy(cmd->data, buf, size);
3333 desc->nsegs = 1 + (WPI_PAD32(size) << 4);
3334 desc->segs[0].addr = htole32(paddr);
3335 desc->segs[0].len = htole32(totlen);
3337 if (size > sizeof cmd->data) {
3338 bus_dmamap_sync(ring->data_dmat, data->map,
3339 BUS_DMASYNC_PREWRITE);
3340 } else {
3341 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3342 BUS_DMASYNC_PREWRITE);
3344 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3345 BUS_DMASYNC_PREWRITE);
3347 /* Kick command ring. */
3348 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT;
3349 sc->sc_update_tx_ring(sc, ring);
3351 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
3353 WPI_TXQ_UNLOCK(sc);
3355 #if defined(__DragonFly__)
3356 return async ? 0 : lksleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz);
3357 #else
3358 return async ? 0 : mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz);
3359 #endif
3361 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
3363 WPI_TXQ_UNLOCK(sc);
3365 return error;
3369 * Configure HW multi-rate retries.
3371 static int
3372 wpi_mrr_setup(struct wpi_softc *sc)
3374 struct ieee80211com *ic = &sc->sc_ic;
3375 struct wpi_mrr_setup mrr;
3376 uint8_t i;
3377 int error;
3379 /* CCK rates (not used with 802.11a). */
3380 for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) {
3381 mrr.rates[i].flags = 0;
3382 mrr.rates[i].plcp = wpi_ridx_to_plcp[i];
3383 /* Fallback to the immediate lower CCK rate (if any.) */
3384 mrr.rates[i].next =
3385 (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1;
3386 /* Try twice at this rate before falling back to "next". */
3387 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT;
3389 /* OFDM rates (not used with 802.11b). */
3390 for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) {
3391 mrr.rates[i].flags = 0;
3392 mrr.rates[i].plcp = wpi_ridx_to_plcp[i];
3393 /* Fallback to the immediate lower rate (if any.) */
3394 /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */
3395 mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ?
3396 ((ic->ic_curmode == IEEE80211_MODE_11A) ?
3397 WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) :
3398 i - 1;
3399 /* Try twice at this rate before falling back to "next". */
3400 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT;
3402 /* Setup MRR for control frames. */
3403 mrr.which = htole32(WPI_MRR_CTL);
3404 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0);
3405 if (error != 0) {
3406 device_printf(sc->sc_dev,
3407 "could not setup MRR for control frames\n");
3408 return error;
3410 /* Setup MRR for data frames. */
3411 mrr.which = htole32(WPI_MRR_DATA);
3412 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0);
3413 if (error != 0) {
3414 device_printf(sc->sc_dev,
3415 "could not setup MRR for data frames\n");
3416 return error;
3418 return 0;
3421 static int
3422 wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni)
3424 struct ieee80211com *ic = ni->ni_ic;
3425 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap);
3426 struct wpi_node *wn = WPI_NODE(ni);
3427 struct wpi_node_info node;
3428 int error;
3430 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3432 if (wn->id == WPI_ID_UNDEFINED)
3433 return EINVAL;
3435 memset(&node, 0, sizeof node);
3436 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
3437 node.id = wn->id;
3438 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ?
3439 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1];
3440 node.action = htole32(WPI_ACTION_SET_RATE);
3441 node.antenna = WPI_ANTENNA_BOTH;
3443 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__,
3444 wn->id, ether_sprintf(ni->ni_macaddr));
3446 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1);
3447 if (error != 0) {
3448 device_printf(sc->sc_dev,
3449 "%s: wpi_cmd() call failed with error code %d\n", __func__,
3450 error);
3451 return error;
3454 if (wvp->wv_gtk != 0) {
3455 error = wpi_set_global_keys(ni);
3456 if (error != 0) {
3457 device_printf(sc->sc_dev,
3458 "%s: error while setting global keys\n", __func__);
3459 return ENXIO;
3463 return 0;
3467 * Broadcast node is used to send group-addressed and management frames.
3469 static int
3470 wpi_add_broadcast_node(struct wpi_softc *sc, int async)
3472 struct ieee80211com *ic = &sc->sc_ic;
3473 struct wpi_node_info node;
3475 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3477 memset(&node, 0, sizeof node);
3478 IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr);
3479 node.id = WPI_ID_BROADCAST;
3480 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ?
3481 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1];
3482 node.action = htole32(WPI_ACTION_SET_RATE);
3483 node.antenna = WPI_ANTENNA_BOTH;
3485 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__);
3487 return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async);
3490 static int
3491 wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni)
3493 struct wpi_node *wn = WPI_NODE(ni);
3494 int error;
3496 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3498 wn->id = wpi_add_node_entry_sta(sc);
3500 if ((error = wpi_add_node(sc, ni)) != 0) {
3501 wpi_del_node_entry(sc, wn->id);
3502 wn->id = WPI_ID_UNDEFINED;
3503 return error;
3506 return 0;
3509 static int
3510 wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni)
3512 struct wpi_node *wn = WPI_NODE(ni);
3513 int error;
3515 KASSERT(wn->id == WPI_ID_UNDEFINED,
3516 ("the node %d was added before", wn->id));
3518 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3520 if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) {
3521 device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__);
3522 return ENOMEM;
3525 if ((error = wpi_add_node(sc, ni)) != 0) {
3526 wpi_del_node_entry(sc, wn->id);
3527 wn->id = WPI_ID_UNDEFINED;
3528 return error;
3531 return 0;
3534 static void
3535 wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni)
3537 struct wpi_node *wn = WPI_NODE(ni);
3538 struct wpi_cmd_del_node node;
3539 int error;
3541 KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed"));
3543 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3545 memset(&node, 0, sizeof node);
3546 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
3547 node.count = 1;
3549 DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__,
3550 wn->id, ether_sprintf(ni->ni_macaddr));
3552 error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1);
3553 if (error != 0) {
3554 device_printf(sc->sc_dev,
3555 "%s: could not delete node %u, error %d\n", __func__,
3556 wn->id, error);
3560 static int
3561 wpi_updateedca(struct ieee80211com *ic)
3563 #define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
3564 struct wpi_softc *sc = ic->ic_softc;
3565 struct wpi_edca_params cmd;
3566 int aci, error;
3568 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
3570 memset(&cmd, 0, sizeof cmd);
3571 cmd.flags = htole32(WPI_EDCA_UPDATE);
3572 for (aci = 0; aci < WME_NUM_AC; aci++) {
3573 const struct wmeParams *ac =
3574 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
3575 cmd.ac[aci].aifsn = ac->wmep_aifsn;
3576 cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin));
3577 cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax));
3578 cmd.ac[aci].txoplimit =
3579 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit));
3581 DPRINTF(sc, WPI_DEBUG_EDCA,
3582 "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d "
3583 "txoplimit=%d\n", aci, cmd.ac[aci].aifsn,
3584 cmd.ac[aci].cwmin, cmd.ac[aci].cwmax,
3585 cmd.ac[aci].txoplimit);
3587 error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
3589 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
3591 return error;
3592 #undef WPI_EXP2
3595 static void
3596 wpi_set_promisc(struct wpi_softc *sc)
3598 struct ieee80211com *ic = &sc->sc_ic;
3599 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3600 uint32_t promisc_filter;
3602 promisc_filter = WPI_FILTER_CTL;
3603 if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP)
3604 promisc_filter |= WPI_FILTER_PROMISC;
3606 if (ic->ic_promisc > 0)
3607 sc->rxon.filter |= htole32(promisc_filter);
3608 else
3609 sc->rxon.filter &= ~htole32(promisc_filter);
3612 static void
3613 wpi_update_promisc(struct ieee80211com *ic)
3615 struct wpi_softc *sc = ic->ic_softc;
3617 WPI_LOCK(sc);
3618 if (sc->sc_running == 0) {
3619 WPI_UNLOCK(sc);
3620 return;
3622 WPI_UNLOCK(sc);
3624 WPI_RXON_LOCK(sc);
3625 wpi_set_promisc(sc);
3627 if (wpi_send_rxon(sc, 1, 1) != 0) {
3628 device_printf(sc->sc_dev, "%s: could not send RXON\n",
3629 __func__);
3631 WPI_RXON_UNLOCK(sc);
3634 static void
3635 wpi_update_mcast(struct ieee80211com *ic)
3637 /* Ignore */
3640 static void
3641 wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on)
3643 struct wpi_cmd_led led;
3645 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3647 led.which = which;
3648 led.unit = htole32(100000); /* on/off in unit of 100ms */
3649 led.off = off;
3650 led.on = on;
3651 (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1);
3654 static int
3655 wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni)
3657 struct wpi_cmd_timing cmd;
3658 uint64_t val, mod;
3660 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3662 memset(&cmd, 0, sizeof cmd);
3663 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
3664 cmd.bintval = htole16(ni->ni_intval);
3665 cmd.lintval = htole16(10);
3667 /* Compute remaining time until next beacon. */
3668 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
3669 mod = le64toh(cmd.tstamp) % val;
3670 cmd.binitval = htole32((uint32_t)(val - mod));
3672 DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
3673 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
3675 return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1);
3679 * This function is called periodically (every 60 seconds) to adjust output
3680 * power to temperature changes.
3682 static void
3683 wpi_power_calibration(struct wpi_softc *sc)
3685 int temp;
3687 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3689 /* Update sensor data. */
3690 temp = (int)WPI_READ(sc, WPI_UCODE_GP2);
3691 DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp);
3693 /* Sanity-check read value. */
3694 if (temp < -260 || temp > 25) {
3695 /* This can't be correct, ignore. */
3696 DPRINTF(sc, WPI_DEBUG_TEMP,
3697 "out-of-range temperature reported: %d\n", temp);
3698 return;
3701 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp);
3703 /* Adjust Tx power if need be. */
3704 if (abs(temp - sc->temp) <= 6)
3705 return;
3707 sc->temp = temp;
3709 if (wpi_set_txpower(sc, 1) != 0) {
3710 /* just warn, too bad for the automatic calibration... */
3711 device_printf(sc->sc_dev,"could not adjust Tx power\n");
3716 * Set TX power for current channel.
3718 static int
3719 wpi_set_txpower(struct wpi_softc *sc, int async)
3721 struct wpi_power_group *group;
3722 struct wpi_cmd_txpower cmd;
3723 uint8_t chan;
3724 int idx, is_chan_5ghz, i;
3726 /* Retrieve current channel from last RXON. */
3727 chan = sc->rxon.chan;
3728 is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0;
3730 /* Find the TX power group to which this channel belongs. */
3731 if (is_chan_5ghz) {
3732 for (group = &sc->groups[1]; group < &sc->groups[4]; group++)
3733 if (chan <= group->chan)
3734 break;
3735 } else
3736 group = &sc->groups[0];
3738 memset(&cmd, 0, sizeof cmd);
3739 cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ;
3740 cmd.chan = htole16(chan);
3742 /* Set TX power for all OFDM and CCK rates. */
3743 for (i = 0; i <= WPI_RIDX_MAX ; i++) {
3744 /* Retrieve TX power for this channel/rate. */
3745 idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i);
3747 cmd.rates[i].plcp = wpi_ridx_to_plcp[i];
3749 if (is_chan_5ghz) {
3750 cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx];
3751 cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx];
3752 } else {
3753 cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx];
3754 cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx];
3756 DPRINTF(sc, WPI_DEBUG_TEMP,
3757 "chan %d/ridx %d: power index %d\n", chan, i, idx);
3760 return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async);
3764 * Determine Tx power index for a given channel/rate combination.
3765 * This takes into account the regulatory information from EEPROM and the
3766 * current temperature.
3768 static int
3769 wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group,
3770 uint8_t chan, int is_chan_5ghz, int ridx)
3772 /* Fixed-point arithmetic division using a n-bit fractional part. */
3773 #define fdivround(a, b, n) \
3774 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
3776 /* Linear interpolation. */
3777 #define interpolate(x, x1, y1, x2, y2, n) \
3778 ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
3780 struct wpi_power_sample *sample;
3781 int pwr, idx;
3783 /* Default TX power is group maximum TX power minus 3dB. */
3784 pwr = group->maxpwr / 2;
3786 /* Decrease TX power for highest OFDM rates to reduce distortion. */
3787 switch (ridx) {
3788 case WPI_RIDX_OFDM36:
3789 pwr -= is_chan_5ghz ? 5 : 0;
3790 break;
3791 case WPI_RIDX_OFDM48:
3792 pwr -= is_chan_5ghz ? 10 : 7;
3793 break;
3794 case WPI_RIDX_OFDM54:
3795 pwr -= is_chan_5ghz ? 12 : 9;
3796 break;
3799 /* Never exceed the channel maximum allowed TX power. */
3800 pwr = min(pwr, sc->maxpwr[chan]);
3802 /* Retrieve TX power index into gain tables from samples. */
3803 for (sample = group->samples; sample < &group->samples[3]; sample++)
3804 if (pwr > sample[1].power)
3805 break;
3806 /* Fixed-point linear interpolation using a 19-bit fractional part. */
3807 idx = interpolate(pwr, sample[0].power, sample[0].index,
3808 sample[1].power, sample[1].index, 19);
3811 * Adjust power index based on current temperature:
3812 * - if cooler than factory-calibrated: decrease output power
3813 * - if warmer than factory-calibrated: increase output power
3815 idx -= (sc->temp - group->temp) * 11 / 100;
3817 /* Decrease TX power for CCK rates (-5dB). */
3818 if (ridx >= WPI_RIDX_CCK1)
3819 idx += 10;
3821 /* Make sure idx stays in a valid range. */
3822 if (idx < 0)
3823 return 0;
3824 if (idx > WPI_MAX_PWR_INDEX)
3825 return WPI_MAX_PWR_INDEX;
3826 return idx;
3828 #undef interpolate
3829 #undef fdivround
3833 * Set STA mode power saving level (between 0 and 5).
3834 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
3836 static int
3837 wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async)
3839 struct wpi_pmgt_cmd cmd;
3840 const struct wpi_pmgt *pmgt;
3841 uint32_t max, reg;
3842 uint8_t skip_dtim;
3843 int i;
3845 DPRINTF(sc, WPI_DEBUG_PWRSAVE,
3846 "%s: dtim=%d, level=%d, async=%d\n",
3847 __func__, dtim, level, async);
3849 /* Select which PS parameters to use. */
3850 if (dtim <= 10)
3851 pmgt = &wpi_pmgt[0][level];
3852 else
3853 pmgt = &wpi_pmgt[1][level];
3855 memset(&cmd, 0, sizeof cmd);
3856 if (level != 0) /* not CAM */
3857 cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP);
3858 /* Retrieve PCIe Active State Power Management (ASPM). */
3859 #if defined(__DragonFly__)
3860 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINKCTRL, 1);
3861 if (!(reg & PCIEM_LNKCTL_ASPM_L0S)) /* L0s Entry disabled. */
3862 cmd.flags |= htole16(WPI_PS_PCI_PMGT);
3863 #else
3864 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1);
3865 if (!(reg & PCIEM_LINK_CTL_ASPMC_L0S)) /* L0s Entry disabled. */
3866 cmd.flags |= htole16(WPI_PS_PCI_PMGT);
3867 #endif
3869 cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU);
3870 cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU);
3872 if (dtim == 0) {
3873 dtim = 1;
3874 skip_dtim = 0;
3875 } else
3876 skip_dtim = pmgt->skip_dtim;
3878 if (skip_dtim != 0) {
3879 cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM);
3880 max = pmgt->intval[4];
3881 if (max == (uint32_t)-1)
3882 max = dtim * (skip_dtim + 1);
3883 else if (max > dtim)
3884 max = rounddown(max, dtim);
3885 } else
3886 max = dtim;
3888 for (i = 0; i < 5; i++)
3889 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
3891 return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
3894 static int
3895 wpi_send_btcoex(struct wpi_softc *sc)
3897 struct wpi_bluetooth cmd;
3899 memset(&cmd, 0, sizeof cmd);
3900 cmd.flags = WPI_BT_COEX_MODE_4WIRE;
3901 cmd.lead_time = WPI_BT_LEAD_TIME_DEF;
3902 cmd.max_kill = WPI_BT_MAX_KILL_DEF;
3903 DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n",
3904 __func__);
3905 return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
3908 static int
3909 wpi_send_rxon(struct wpi_softc *sc, int assoc, int async)
3911 int error;
3913 if (async)
3914 WPI_RXON_LOCK_ASSERT(sc);
3916 if (assoc && wpi_check_bss_filter(sc) != 0) {
3917 struct wpi_assoc rxon_assoc;
3919 rxon_assoc.flags = sc->rxon.flags;
3920 rxon_assoc.filter = sc->rxon.filter;
3921 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask;
3922 rxon_assoc.cck_mask = sc->rxon.cck_mask;
3923 rxon_assoc.reserved = 0;
3925 error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc,
3926 sizeof (struct wpi_assoc), async);
3927 if (error != 0) {
3928 device_printf(sc->sc_dev,
3929 "RXON_ASSOC command failed, error %d\n", error);
3930 return error;
3932 } else {
3933 if (async) {
3934 WPI_NT_LOCK(sc);
3935 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon,
3936 sizeof (struct wpi_rxon), async);
3937 if (error == 0)
3938 wpi_clear_node_table(sc);
3939 WPI_NT_UNLOCK(sc);
3940 } else {
3941 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon,
3942 sizeof (struct wpi_rxon), async);
3943 if (error == 0)
3944 wpi_clear_node_table(sc);
3947 if (error != 0) {
3948 device_printf(sc->sc_dev,
3949 "RXON command failed, error %d\n", error);
3950 return error;
3953 /* Add broadcast node. */
3954 error = wpi_add_broadcast_node(sc, async);
3955 if (error != 0) {
3956 device_printf(sc->sc_dev,
3957 "could not add broadcast node, error %d\n", error);
3958 return error;
3962 /* Configuration has changed, set Tx power accordingly. */
3963 if ((error = wpi_set_txpower(sc, async)) != 0) {
3964 device_printf(sc->sc_dev,
3965 "%s: could not set TX power, error %d\n", __func__, error);
3966 return error;
3969 return 0;
3973 * Configure the card to listen to a particular channel, this transisions the
3974 * card in to being able to receive frames from remote devices.
3976 static int
3977 wpi_config(struct wpi_softc *sc)
3979 struct ieee80211com *ic = &sc->sc_ic;
3980 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3981 struct ieee80211_channel *c = ic->ic_curchan;
3982 int error;
3984 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
3986 /* Set power saving level to CAM during initialization. */
3987 if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) {
3988 device_printf(sc->sc_dev,
3989 "%s: could not set power saving level\n", __func__);
3990 return error;
3993 /* Configure bluetooth coexistence. */
3994 if ((error = wpi_send_btcoex(sc)) != 0) {
3995 device_printf(sc->sc_dev,
3996 "could not configure bluetooth coexistence\n");
3997 return error;
4000 /* Configure adapter. */
4001 memset(&sc->rxon, 0, sizeof (struct wpi_rxon));
4002 IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr);
4004 /* Set default channel. */
4005 sc->rxon.chan = ieee80211_chan2ieee(ic, c);
4006 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF);
4007 if (IEEE80211_IS_CHAN_2GHZ(c))
4008 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ);
4010 sc->rxon.filter = WPI_FILTER_MULTICAST;
4011 switch (ic->ic_opmode) {
4012 case IEEE80211_M_STA:
4013 sc->rxon.mode = WPI_MODE_STA;
4014 break;
4015 case IEEE80211_M_IBSS:
4016 sc->rxon.mode = WPI_MODE_IBSS;
4017 sc->rxon.filter |= WPI_FILTER_BEACON;
4018 break;
4019 case IEEE80211_M_HOSTAP:
4020 /* XXX workaround for beaconing */
4021 sc->rxon.mode = WPI_MODE_IBSS;
4022 sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC;
4023 break;
4024 case IEEE80211_M_AHDEMO:
4025 sc->rxon.mode = WPI_MODE_HOSTAP;
4026 break;
4027 case IEEE80211_M_MONITOR:
4028 sc->rxon.mode = WPI_MODE_MONITOR;
4029 break;
4030 default:
4031 device_printf(sc->sc_dev, "unknown opmode %d\n",
4032 ic->ic_opmode);
4033 return EINVAL;
4035 sc->rxon.filter = htole32(sc->rxon.filter);
4036 wpi_set_promisc(sc);
4037 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */
4038 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */
4040 if ((error = wpi_send_rxon(sc, 0, 0)) != 0) {
4041 device_printf(sc->sc_dev, "%s: could not send RXON\n",
4042 __func__);
4043 return error;
4046 /* Setup rate scalling. */
4047 if ((error = wpi_mrr_setup(sc)) != 0) {
4048 device_printf(sc->sc_dev, "could not setup MRR, error %d\n",
4049 error);
4050 return error;
4053 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
4055 return 0;
4058 static uint16_t
4059 wpi_get_active_dwell_time(struct wpi_softc *sc,
4060 struct ieee80211_channel *c, uint8_t n_probes)
4062 /* No channel? Default to 2GHz settings. */
4063 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) {
4064 return (WPI_ACTIVE_DWELL_TIME_2GHZ +
4065 WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1));
4068 /* 5GHz dwell time. */
4069 return (WPI_ACTIVE_DWELL_TIME_5GHZ +
4070 WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1));
4074 * Limit the total dwell time.
4076 * Returns the dwell time in milliseconds.
4078 static uint16_t
4079 wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time)
4081 struct ieee80211com *ic = &sc->sc_ic;
4082 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4083 uint16_t bintval = 0;
4085 /* bintval is in TU (1.024mS) */
4086 if (vap != NULL)
4087 bintval = vap->iv_bss->ni_intval;
4090 * If it's non-zero, we should calculate the minimum of
4091 * it and the DWELL_BASE.
4093 * XXX Yes, the math should take into account that bintval
4094 * is 1.024mS, not 1mS..
4096 if (bintval > 0) {
4097 DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__,
4098 bintval);
4099 return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2));
4102 /* No association context? Default. */
4103 return dwell_time;
4106 static uint16_t
4107 wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c)
4109 uint16_t passive;
4111 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c))
4112 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ;
4113 else
4114 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ;
4116 /* Clamp to the beacon interval if we're associated. */
4117 return (wpi_limit_dwell(sc, passive));
4120 static uint32_t
4121 wpi_get_scan_pause_time(uint32_t time, uint16_t bintval)
4123 uint32_t mod = (time % bintval) * IEEE80211_DUR_TU;
4124 uint32_t nbeacons = time / bintval;
4126 if (mod > WPI_PAUSE_MAX_TIME)
4127 mod = WPI_PAUSE_MAX_TIME;
4129 return WPI_PAUSE_SCAN(nbeacons, mod);
4133 * Send a scan request to the firmware.
4135 static int
4136 wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c)
4138 struct ieee80211com *ic = &sc->sc_ic;
4139 struct ieee80211_scan_state *ss = ic->ic_scan;
4140 struct ieee80211vap *vap = ss->ss_vap;
4141 struct wpi_scan_hdr *hdr;
4142 struct wpi_cmd_data *tx;
4143 struct wpi_scan_essid *essids;
4144 struct wpi_scan_chan *chan;
4145 struct ieee80211_frame *wh;
4146 struct ieee80211_rateset *rs;
4147 uint16_t bintval, buflen, dwell_active, dwell_passive;
4148 uint8_t *buf, *frm, i, nssid;
4149 int bgscan, error;
4151 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
4154 * We are absolutely not allowed to send a scan command when another
4155 * scan command is pending.
4157 if (callout_pending(&sc->scan_timeout)) {
4158 device_printf(sc->sc_dev, "%s: called whilst scanning!\n",
4159 __func__);
4160 error = EAGAIN;
4161 goto fail;
4164 bgscan = wpi_check_bss_filter(sc);
4165 bintval = vap->iv_bss->ni_intval;
4166 if (bgscan != 0 &&
4167 bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) {
4168 error = EOPNOTSUPP;
4169 goto fail;
4172 buf = kmalloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_INTWAIT | M_ZERO);
4173 if (buf == NULL) {
4174 device_printf(sc->sc_dev,
4175 "%s: could not allocate buffer for scan command\n",
4176 __func__);
4177 error = ENOMEM;
4178 goto fail;
4180 hdr = (struct wpi_scan_hdr *)buf;
4183 * Move to the next channel if no packets are received within 10 msecs
4184 * after sending the probe request.
4186 hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT);
4187 hdr->quiet_threshold = htole16(1);
4189 if (bgscan != 0) {
4191 * Max needs to be greater than active and passive and quiet!
4192 * It's also in microseconds!
4194 hdr->max_svc = htole32(250 * IEEE80211_DUR_TU);
4195 hdr->pause_svc = htole32(wpi_get_scan_pause_time(100,
4196 bintval));
4199 hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON);
4201 tx = (struct wpi_cmd_data *)(hdr + 1);
4202 tx->flags = htole32(WPI_TX_AUTO_SEQ);
4203 tx->id = WPI_ID_BROADCAST;
4204 tx->lifetime = htole32(WPI_LIFETIME_INFINITE);
4206 if (IEEE80211_IS_CHAN_5GHZ(c)) {
4207 /* Send probe requests at 6Mbps. */
4208 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6];
4209 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4210 } else {
4211 hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO);
4212 /* Send probe requests at 1Mbps. */
4213 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1];
4214 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4217 essids = (struct wpi_scan_essid *)(tx + 1);
4218 nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS);
4219 for (i = 0; i < nssid; i++) {
4220 essids[i].id = IEEE80211_ELEMID_SSID;
4221 essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN);
4222 memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len);
4223 #ifdef WPI_DEBUG
4224 if (sc->sc_debug & WPI_DEBUG_SCAN) {
4225 printf("Scanning Essid: ");
4226 ieee80211_print_essid(essids[i].data, essids[i].len);
4227 printf("\n");
4229 #endif
4233 * Build a probe request frame. Most of the following code is a
4234 * copy & paste of what is done in net80211.
4236 wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS);
4237 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4238 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4239 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4240 IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr);
4241 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
4242 IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr);
4244 frm = (uint8_t *)(wh + 1);
4245 frm = ieee80211_add_ssid(frm, NULL, 0);
4246 frm = ieee80211_add_rates(frm, rs);
4247 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4248 frm = ieee80211_add_xrates(frm, rs);
4250 /* Set length of probe request. */
4251 tx->len = htole16(frm - (uint8_t *)wh);
4254 * Construct information about the channel that we
4255 * want to scan. The firmware expects this to be directly
4256 * after the scan probe request
4258 chan = (struct wpi_scan_chan *)frm;
4259 chan->chan = ieee80211_chan2ieee(ic, c);
4260 chan->flags = 0;
4261 if (nssid) {
4262 hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT;
4263 chan->flags |= WPI_CHAN_NPBREQS(nssid);
4264 } else
4265 hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER;
4267 if (!IEEE80211_IS_CHAN_PASSIVE(c))
4268 chan->flags |= WPI_CHAN_ACTIVE;
4271 * Calculate the active/passive dwell times.
4273 dwell_active = wpi_get_active_dwell_time(sc, c, nssid);
4274 dwell_passive = wpi_get_passive_dwell_time(sc, c);
4276 /* Make sure they're valid. */
4277 if (dwell_active > dwell_passive)
4278 dwell_active = dwell_passive;
4280 chan->active = htole16(dwell_active);
4281 chan->passive = htole16(dwell_passive);
4283 chan->dsp_gain = 0x6e; /* Default level */
4285 if (IEEE80211_IS_CHAN_5GHZ(c))
4286 chan->rf_gain = 0x3b;
4287 else
4288 chan->rf_gain = 0x28;
4290 DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n",
4291 chan->chan, IEEE80211_IS_CHAN_PASSIVE(c));
4293 hdr->nchan++;
4295 if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) {
4296 /* XXX Force probe request transmission. */
4297 memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan));
4299 chan++;
4301 /* Reduce unnecessary delay. */
4302 chan->flags = 0;
4303 chan->passive = chan->active = hdr->quiet_time;
4305 hdr->nchan++;
4308 chan++;
4310 buflen = (uint8_t *)chan - buf;
4311 hdr->len = htole16(buflen);
4313 DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n",
4314 hdr->nchan);
4315 error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1);
4316 kfree(buf, M_DEVBUF);
4318 if (error != 0)
4319 goto fail;
4321 callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc);
4323 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
4325 return 0;
4327 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
4329 return error;
4332 static int
4333 wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap)
4335 struct ieee80211com *ic = vap->iv_ic;
4336 struct ieee80211_node *ni = vap->iv_bss;
4337 struct ieee80211_channel *c = ni->ni_chan;
4338 int error;
4340 WPI_RXON_LOCK(sc);
4342 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
4344 /* Update adapter configuration. */
4345 sc->rxon.associd = 0;
4346 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS);
4347 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
4348 sc->rxon.chan = ieee80211_chan2ieee(ic, c);
4349 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF);
4350 if (IEEE80211_IS_CHAN_2GHZ(c))
4351 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ);
4352 if (ic->ic_flags & IEEE80211_F_SHSLOT)
4353 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT);
4354 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4355 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE);
4356 if (IEEE80211_IS_CHAN_A(c)) {
4357 sc->rxon.cck_mask = 0;
4358 sc->rxon.ofdm_mask = 0x15;
4359 } else if (IEEE80211_IS_CHAN_B(c)) {
4360 sc->rxon.cck_mask = 0x03;
4361 sc->rxon.ofdm_mask = 0;
4362 } else {
4363 /* Assume 802.11b/g. */
4364 sc->rxon.cck_mask = 0x0f;
4365 sc->rxon.ofdm_mask = 0x15;
4368 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n",
4369 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask,
4370 sc->rxon.ofdm_mask);
4372 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
4373 device_printf(sc->sc_dev, "%s: could not send RXON\n",
4374 __func__);
4377 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
4379 WPI_RXON_UNLOCK(sc);
4381 return error;
4384 static int
4385 wpi_config_beacon(struct wpi_vap *wvp)
4387 struct ieee80211vap *vap = &wvp->wv_vap;
4388 struct ieee80211com *ic = vap->iv_ic;
4389 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off;
4390 struct wpi_buf *bcn = &wvp->wv_bcbuf;
4391 struct wpi_softc *sc = ic->ic_softc;
4392 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data;
4393 struct ieee80211_tim_ie *tie;
4394 struct mbuf *m;
4395 uint8_t *ptr;
4396 int error;
4398 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4400 WPI_VAP_LOCK_ASSERT(wvp);
4402 cmd->len = htole16(bcn->m->m_pkthdr.len);
4403 cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ?
4404 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1];
4406 /* XXX seems to be unused */
4407 if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) {
4408 tie = (struct ieee80211_tim_ie *) bo->bo_tim;
4409 ptr = mtod(bcn->m, uint8_t *);
4411 cmd->tim = htole16(bo->bo_tim - ptr);
4412 cmd->timsz = tie->tim_len;
4415 /* Necessary for recursion in ieee80211_beacon_update(). */
4416 m = bcn->m;
4417 bcn->m = m_dup(m, M_NOWAIT);
4418 if (bcn->m == NULL) {
4419 device_printf(sc->sc_dev,
4420 "%s: could not copy beacon frame\n", __func__);
4421 error = ENOMEM;
4422 goto end;
4425 if ((error = wpi_cmd2(sc, bcn)) != 0) {
4426 device_printf(sc->sc_dev,
4427 "%s: could not update beacon frame, error %d", __func__,
4428 error);
4429 m_freem(bcn->m);
4432 /* Restore mbuf. */
4433 end: bcn->m = m;
4435 return error;
4438 static int
4439 wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni)
4441 struct ieee80211vap *vap = ni->ni_vap;
4442 struct wpi_vap *wvp = WPI_VAP(vap);
4443 struct wpi_buf *bcn = &wvp->wv_bcbuf;
4444 struct mbuf *m;
4445 int error;
4447 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4449 if (ni->ni_chan == IEEE80211_CHAN_ANYC)
4450 return EINVAL;
4452 m = ieee80211_beacon_alloc(ni);
4453 if (m == NULL) {
4454 device_printf(sc->sc_dev,
4455 "%s: could not allocate beacon frame\n", __func__);
4456 return ENOMEM;
4459 WPI_VAP_LOCK(wvp);
4460 if (bcn->m != NULL)
4461 m_freem(bcn->m);
4463 bcn->m = m;
4465 error = wpi_config_beacon(wvp);
4466 WPI_VAP_UNLOCK(wvp);
4468 return error;
4471 static void
4472 wpi_update_beacon(struct ieee80211vap *vap, int item)
4474 struct wpi_softc *sc = vap->iv_ic->ic_softc;
4475 struct wpi_vap *wvp = WPI_VAP(vap);
4476 struct wpi_buf *bcn = &wvp->wv_bcbuf;
4477 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off;
4478 struct ieee80211_node *ni = vap->iv_bss;
4479 int mcast = 0;
4481 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
4483 WPI_VAP_LOCK(wvp);
4484 if (bcn->m == NULL) {
4485 bcn->m = ieee80211_beacon_alloc(ni);
4486 if (bcn->m == NULL) {
4487 device_printf(sc->sc_dev,
4488 "%s: could not allocate beacon frame\n", __func__);
4490 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR,
4491 __func__);
4493 WPI_VAP_UNLOCK(wvp);
4494 return;
4497 WPI_VAP_UNLOCK(wvp);
4499 if (item == IEEE80211_BEACON_TIM)
4500 mcast = 1; /* TODO */
4502 setbit(bo->bo_flags, item);
4503 ieee80211_beacon_update(ni, bcn->m, mcast);
4505 WPI_VAP_LOCK(wvp);
4506 wpi_config_beacon(wvp);
4507 WPI_VAP_UNLOCK(wvp);
4509 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
4512 static void
4513 wpi_newassoc(struct ieee80211_node *ni, int isnew)
4515 struct ieee80211vap *vap = ni->ni_vap;
4516 struct wpi_softc *sc = ni->ni_ic->ic_softc;
4517 struct wpi_node *wn = WPI_NODE(ni);
4518 int error;
4520 WPI_NT_LOCK(sc);
4522 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4524 if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) {
4525 if ((error = wpi_add_ibss_node(sc, ni)) != 0) {
4526 device_printf(sc->sc_dev,
4527 "%s: could not add IBSS node, error %d\n",
4528 __func__, error);
4531 WPI_NT_UNLOCK(sc);
4534 static int
4535 wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap)
4537 struct ieee80211com *ic = vap->iv_ic;
4538 struct ieee80211_node *ni = vap->iv_bss;
4539 struct ieee80211_channel *c = ni->ni_chan;
4540 int error;
4542 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
4544 if (vap->iv_opmode == IEEE80211_M_MONITOR) {
4545 /* Link LED blinks while monitoring. */
4546 wpi_set_led(sc, WPI_LED_LINK, 5, 5);
4547 return 0;
4550 /* XXX kernel panic workaround */
4551 if (c == IEEE80211_CHAN_ANYC) {
4552 device_printf(sc->sc_dev, "%s: incomplete configuration\n",
4553 __func__);
4554 return EINVAL;
4557 if ((error = wpi_set_timing(sc, ni)) != 0) {
4558 device_printf(sc->sc_dev,
4559 "%s: could not set timing, error %d\n", __func__, error);
4560 return error;
4563 /* Update adapter configuration. */
4564 WPI_RXON_LOCK(sc);
4565 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
4566 sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni));
4567 sc->rxon.chan = ieee80211_chan2ieee(ic, c);
4568 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF);
4569 if (IEEE80211_IS_CHAN_2GHZ(c))
4570 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ);
4571 if (ic->ic_flags & IEEE80211_F_SHSLOT)
4572 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT);
4573 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4574 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE);
4575 if (IEEE80211_IS_CHAN_A(c)) {
4576 sc->rxon.cck_mask = 0;
4577 sc->rxon.ofdm_mask = 0x15;
4578 } else if (IEEE80211_IS_CHAN_B(c)) {
4579 sc->rxon.cck_mask = 0x03;
4580 sc->rxon.ofdm_mask = 0;
4581 } else {
4582 /* Assume 802.11b/g. */
4583 sc->rxon.cck_mask = 0x0f;
4584 sc->rxon.ofdm_mask = 0x15;
4586 sc->rxon.filter |= htole32(WPI_FILTER_BSS);
4588 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n",
4589 sc->rxon.chan, sc->rxon.flags);
4591 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
4592 device_printf(sc->sc_dev, "%s: could not send RXON\n",
4593 __func__);
4594 return error;
4597 /* Start periodic calibration timer. */
4598 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc);
4600 WPI_RXON_UNLOCK(sc);
4602 if (vap->iv_opmode == IEEE80211_M_IBSS ||
4603 vap->iv_opmode == IEEE80211_M_HOSTAP) {
4604 if ((error = wpi_setup_beacon(sc, ni)) != 0) {
4605 device_printf(sc->sc_dev,
4606 "%s: could not setup beacon, error %d\n", __func__,
4607 error);
4608 return error;
4612 if (vap->iv_opmode == IEEE80211_M_STA) {
4613 /* Add BSS node. */
4614 WPI_NT_LOCK(sc);
4615 error = wpi_add_sta_node(sc, ni);
4616 WPI_NT_UNLOCK(sc);
4617 if (error != 0) {
4618 device_printf(sc->sc_dev,
4619 "%s: could not add BSS node, error %d\n", __func__,
4620 error);
4621 return error;
4625 /* Link LED always on while associated. */
4626 wpi_set_led(sc, WPI_LED_LINK, 0, 1);
4628 /* Enable power-saving mode if requested by user. */
4629 if ((vap->iv_flags & IEEE80211_F_PMGTON) &&
4630 vap->iv_opmode != IEEE80211_M_IBSS)
4631 (void)wpi_set_pslevel(sc, 0, 3, 1);
4633 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
4635 return 0;
4638 static int
4639 wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k)
4641 const struct ieee80211_cipher *cip = k->wk_cipher;
4642 struct ieee80211vap *vap = ni->ni_vap;
4643 struct wpi_softc *sc = ni->ni_ic->ic_softc;
4644 struct wpi_node *wn = WPI_NODE(ni);
4645 struct wpi_node_info node;
4646 uint16_t kflags;
4647 int error;
4649 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4651 if (wpi_check_node_entry(sc, wn->id) == 0) {
4652 device_printf(sc->sc_dev, "%s: node does not exist\n",
4653 __func__);
4654 return 0;
4657 switch (cip->ic_cipher) {
4658 case IEEE80211_CIPHER_AES_CCM:
4659 kflags = WPI_KFLAG_CCMP;
4660 break;
4662 default:
4663 device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__,
4664 cip->ic_cipher);
4665 return 0;
4668 kflags |= WPI_KFLAG_KID(k->wk_keyix);
4669 if (k->wk_flags & IEEE80211_KEY_GROUP)
4670 kflags |= WPI_KFLAG_MULTICAST;
4672 memset(&node, 0, sizeof node);
4673 node.id = wn->id;
4674 node.control = WPI_NODE_UPDATE;
4675 node.flags = WPI_FLAG_KEY_SET;
4676 node.kflags = htole16(kflags);
4677 memcpy(node.key, k->wk_key, k->wk_keylen);
4678 again:
4679 DPRINTF(sc, WPI_DEBUG_KEY,
4680 "%s: setting %s key id %d for node %d (%s)\n", __func__,
4681 (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix,
4682 node.id, ether_sprintf(ni->ni_macaddr));
4684 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1);
4685 if (error != 0) {
4686 device_printf(sc->sc_dev, "can't update node info, error %d\n",
4687 error);
4688 return !error;
4691 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k &&
4692 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
4693 kflags |= WPI_KFLAG_MULTICAST;
4694 node.kflags = htole16(kflags);
4696 goto again;
4699 return 1;
4702 static void
4703 wpi_load_key_cb(void *arg, struct ieee80211_node *ni)
4705 const struct ieee80211_key *k = arg;
4706 struct ieee80211vap *vap = ni->ni_vap;
4707 struct wpi_softc *sc = ni->ni_ic->ic_softc;
4708 struct wpi_node *wn = WPI_NODE(ni);
4709 int error;
4711 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED)
4712 return;
4714 WPI_NT_LOCK(sc);
4715 error = wpi_load_key(ni, k);
4716 WPI_NT_UNLOCK(sc);
4718 if (error == 0) {
4719 device_printf(sc->sc_dev, "%s: error while setting key\n",
4720 __func__);
4724 static int
4725 wpi_set_global_keys(struct ieee80211_node *ni)
4727 struct ieee80211vap *vap = ni->ni_vap;
4728 struct ieee80211_key *wk = &vap->iv_nw_keys[0];
4729 int error = 1;
4731 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++)
4732 if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4733 error = wpi_load_key(ni, wk);
4735 return !error;
4738 static int
4739 wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k)
4741 struct ieee80211vap *vap = ni->ni_vap;
4742 struct wpi_softc *sc = ni->ni_ic->ic_softc;
4743 struct wpi_node *wn = WPI_NODE(ni);
4744 struct wpi_node_info node;
4745 uint16_t kflags;
4746 int error;
4748 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4750 if (wpi_check_node_entry(sc, wn->id) == 0) {
4751 DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__);
4752 return 1; /* Nothing to do. */
4755 kflags = WPI_KFLAG_KID(k->wk_keyix);
4756 if (k->wk_flags & IEEE80211_KEY_GROUP)
4757 kflags |= WPI_KFLAG_MULTICAST;
4759 memset(&node, 0, sizeof node);
4760 node.id = wn->id;
4761 node.control = WPI_NODE_UPDATE;
4762 node.flags = WPI_FLAG_KEY_SET;
4763 node.kflags = htole16(kflags);
4764 again:
4765 DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n",
4766 __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast",
4767 k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr));
4769 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1);
4770 if (error != 0) {
4771 device_printf(sc->sc_dev, "can't update node info, error %d\n",
4772 error);
4773 return !error;
4776 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k &&
4777 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
4778 kflags |= WPI_KFLAG_MULTICAST;
4779 node.kflags = htole16(kflags);
4781 goto again;
4784 return 1;
4787 static void
4788 wpi_del_key_cb(void *arg, struct ieee80211_node *ni)
4790 const struct ieee80211_key *k = arg;
4791 struct ieee80211vap *vap = ni->ni_vap;
4792 struct wpi_softc *sc = ni->ni_ic->ic_softc;
4793 struct wpi_node *wn = WPI_NODE(ni);
4794 int error;
4796 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED)
4797 return;
4799 WPI_NT_LOCK(sc);
4800 error = wpi_del_key(ni, k);
4801 WPI_NT_UNLOCK(sc);
4803 if (error == 0) {
4804 device_printf(sc->sc_dev, "%s: error while deleting key\n",
4805 __func__);
4809 static int
4810 wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k,
4811 int set)
4813 struct ieee80211com *ic = vap->iv_ic;
4814 struct wpi_softc *sc = ic->ic_softc;
4815 struct wpi_vap *wvp = WPI_VAP(vap);
4816 struct ieee80211_node *ni;
4817 int error, ni_ref = 0;
4819 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4821 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
4822 /* Not for us. */
4823 return 1;
4826 if (!(k->wk_flags & IEEE80211_KEY_RECV)) {
4827 /* XMIT keys are handled in wpi_tx_data(). */
4828 return 1;
4831 /* Handle group keys. */
4832 if (&vap->iv_nw_keys[0] <= k &&
4833 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
4834 WPI_NT_LOCK(sc);
4835 if (set)
4836 wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix);
4837 else
4838 wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix);
4839 WPI_NT_UNLOCK(sc);
4841 if (vap->iv_state == IEEE80211_S_RUN) {
4842 ieee80211_iterate_nodes(&ic->ic_sta,
4843 set ? wpi_load_key_cb : wpi_del_key_cb,
4844 __DECONST(void *, k));
4847 return 1;
4850 switch (vap->iv_opmode) {
4851 case IEEE80211_M_STA:
4852 ni = vap->iv_bss;
4853 break;
4855 case IEEE80211_M_IBSS:
4856 case IEEE80211_M_AHDEMO:
4857 case IEEE80211_M_HOSTAP:
4858 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr);
4859 if (ni == NULL)
4860 return 0; /* should not happen */
4862 ni_ref = 1;
4863 break;
4865 default:
4866 device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__,
4867 vap->iv_opmode);
4868 return 0;
4871 WPI_NT_LOCK(sc);
4872 if (set)
4873 error = wpi_load_key(ni, k);
4874 else
4875 error = wpi_del_key(ni, k);
4876 WPI_NT_UNLOCK(sc);
4878 if (ni_ref)
4879 ieee80211_node_decref(ni);
4881 return error;
4884 static int
4885 wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
4887 return wpi_process_key(vap, k, 1);
4890 static int
4891 wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
4893 return wpi_process_key(vap, k, 0);
4897 * This function is called after the runtime firmware notifies us of its
4898 * readiness (called in a process context).
4900 static int
4901 wpi_post_alive(struct wpi_softc *sc)
4903 int ntries, error;
4905 /* Check (again) that the radio is not disabled. */
4906 if ((error = wpi_nic_lock(sc)) != 0)
4907 return error;
4909 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4911 /* NB: Runtime firmware must be up and running. */
4912 if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) {
4913 device_printf(sc->sc_dev,
4914 "RF switch: radio disabled (%s)\n", __func__);
4915 wpi_nic_unlock(sc);
4916 return EPERM; /* :-) */
4918 wpi_nic_unlock(sc);
4920 /* Wait for thermal sensor to calibrate. */
4921 for (ntries = 0; ntries < 1000; ntries++) {
4922 if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0)
4923 break;
4924 DELAY(10);
4927 if (ntries == 1000) {
4928 device_printf(sc->sc_dev,
4929 "timeout waiting for thermal sensor calibration\n");
4930 return ETIMEDOUT;
4933 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp);
4934 return 0;
4938 * The firmware boot code is small and is intended to be copied directly into
4939 * the NIC internal memory (no DMA transfer).
4941 static int
4942 wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, uint32_t size)
4944 int error, ntries;
4946 DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size);
4948 size /= sizeof (uint32_t);
4950 if ((error = wpi_nic_lock(sc)) != 0)
4951 return error;
4953 /* Copy microcode image into NIC memory. */
4954 wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE,
4955 (const uint32_t *)ucode, size);
4957 wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0);
4958 wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE);
4959 wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size);
4961 /* Start boot load now. */
4962 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START);
4964 /* Wait for transfer to complete. */
4965 for (ntries = 0; ntries < 1000; ntries++) {
4966 uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS);
4967 DPRINTF(sc, WPI_DEBUG_HW,
4968 "firmware status=0x%x, val=0x%x, result=0x%x\n", status,
4969 WPI_FH_TX_STATUS_IDLE(6),
4970 status & WPI_FH_TX_STATUS_IDLE(6));
4971 if (status & WPI_FH_TX_STATUS_IDLE(6)) {
4972 DPRINTF(sc, WPI_DEBUG_HW,
4973 "Status Match! - ntries = %d\n", ntries);
4974 break;
4976 DELAY(10);
4978 if (ntries == 1000) {
4979 device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
4980 __func__);
4981 wpi_nic_unlock(sc);
4982 return ETIMEDOUT;
4985 /* Enable boot after power up. */
4986 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN);
4988 wpi_nic_unlock(sc);
4989 return 0;
4992 static int
4993 wpi_load_firmware(struct wpi_softc *sc)
4995 struct wpi_fw_info *fw = &sc->fw;
4996 struct wpi_dma_info *dma = &sc->fw_dma;
4997 int error;
4999 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
5001 /* Copy initialization sections into pre-allocated DMA-safe memory. */
5002 memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
5003 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5004 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz);
5005 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5007 /* Tell adapter where to find initialization sections. */
5008 if ((error = wpi_nic_lock(sc)) != 0)
5009 return error;
5010 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr);
5011 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz);
5012 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR,
5013 dma->paddr + WPI_FW_DATA_MAXSZ);
5014 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
5015 wpi_nic_unlock(sc);
5017 /* Load firmware boot code. */
5018 error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
5019 if (error != 0) {
5020 device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
5021 __func__);
5022 return error;
5025 /* Now press "execute". */
5026 WPI_WRITE(sc, WPI_RESET, 0);
5028 /* Wait at most one second for first alive notification. */
5029 #if defined(__DragonFly__)
5030 if ((error = lksleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) {
5031 #else
5032 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) {
5033 #endif
5034 device_printf(sc->sc_dev,
5035 "%s: timeout waiting for adapter to initialize, error %d\n",
5036 __func__, error);
5037 return error;
5040 /* Copy runtime sections into pre-allocated DMA-safe memory. */
5041 memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
5042 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5043 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz);
5044 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5046 /* Tell adapter where to find runtime sections. */
5047 if ((error = wpi_nic_lock(sc)) != 0)
5048 return error;
5049 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr);
5050 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz);
5051 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR,
5052 dma->paddr + WPI_FW_DATA_MAXSZ);
5053 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE,
5054 WPI_FW_UPDATED | fw->main.textsz);
5055 wpi_nic_unlock(sc);
5057 return 0;
5060 static int
5061 wpi_read_firmware(struct wpi_softc *sc)
5063 const struct firmware *fp;
5064 struct wpi_fw_info *fw = &sc->fw;
5065 const struct wpi_firmware_hdr *hdr;
5066 int error;
5068 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
5070 DPRINTF(sc, WPI_DEBUG_FIRMWARE,
5071 "Attempting Loading Firmware from %s module\n", WPI_FW_NAME);
5073 WPI_UNLOCK(sc);
5074 fp = firmware_get(WPI_FW_NAME);
5075 WPI_LOCK(sc);
5077 if (fp == NULL) {
5078 device_printf(sc->sc_dev,
5079 "could not load firmware image '%s'\n", WPI_FW_NAME);
5080 return EINVAL;
5083 sc->fw_fp = fp;
5085 if (fp->datasize < sizeof (struct wpi_firmware_hdr)) {
5086 device_printf(sc->sc_dev,
5087 "firmware file too short: %zu bytes\n", fp->datasize);
5088 error = EINVAL;
5089 goto fail;
5092 fw->size = fp->datasize;
5093 fw->data = (const uint8_t *)fp->data;
5095 /* Extract firmware header information. */
5096 hdr = (const struct wpi_firmware_hdr *)fw->data;
5098 /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW |
5099 |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */
5101 fw->main.textsz = le32toh(hdr->rtextsz);
5102 fw->main.datasz = le32toh(hdr->rdatasz);
5103 fw->init.textsz = le32toh(hdr->itextsz);
5104 fw->init.datasz = le32toh(hdr->idatasz);
5105 fw->boot.textsz = le32toh(hdr->btextsz);
5106 fw->boot.datasz = 0;
5108 /* Sanity-check firmware header. */
5109 if (fw->main.textsz > WPI_FW_TEXT_MAXSZ ||
5110 fw->main.datasz > WPI_FW_DATA_MAXSZ ||
5111 fw->init.textsz > WPI_FW_TEXT_MAXSZ ||
5112 fw->init.datasz > WPI_FW_DATA_MAXSZ ||
5113 fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ ||
5114 (fw->boot.textsz & 3) != 0) {
5115 device_printf(sc->sc_dev, "invalid firmware header\n");
5116 error = EINVAL;
5117 goto fail;
5120 /* Check that all firmware sections fit. */
5121 if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz +
5122 fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
5123 device_printf(sc->sc_dev,
5124 "firmware file too short: %zu bytes\n", fw->size);
5125 error = EINVAL;
5126 goto fail;
5129 /* Get pointers to firmware sections. */
5130 fw->main.text = (const uint8_t *)(hdr + 1);
5131 fw->main.data = fw->main.text + fw->main.textsz;
5132 fw->init.text = fw->main.data + fw->main.datasz;
5133 fw->init.data = fw->init.text + fw->init.textsz;
5134 fw->boot.text = fw->init.data + fw->init.datasz;
5136 DPRINTF(sc, WPI_DEBUG_FIRMWARE,
5137 "Firmware Version: Major %d, Minor %d, Driver %d, \n"
5138 "runtime (text: %u, data: %u) init (text: %u, data %u) "
5139 "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver),
5140 fw->main.textsz, fw->main.datasz,
5141 fw->init.textsz, fw->init.datasz, fw->boot.textsz);
5143 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text);
5144 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data);
5145 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text);
5146 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data);
5147 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text);
5149 return 0;
5151 fail: wpi_unload_firmware(sc);
5152 return error;
5156 * Free the referenced firmware image
5158 static void
5159 wpi_unload_firmware(struct wpi_softc *sc)
5161 if (sc->fw_fp != NULL) {
5162 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
5163 sc->fw_fp = NULL;
5167 static int
5168 wpi_clock_wait(struct wpi_softc *sc)
5170 int ntries;
5172 /* Set "initialization complete" bit. */
5173 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE);
5175 /* Wait for clock stabilization. */
5176 for (ntries = 0; ntries < 2500; ntries++) {
5177 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY)
5178 return 0;
5179 DELAY(100);
5181 device_printf(sc->sc_dev,
5182 "%s: timeout waiting for clock stabilization\n", __func__);
5184 return ETIMEDOUT;
5187 static int
5188 wpi_apm_init(struct wpi_softc *sc)
5190 uint32_t reg;
5191 int error;
5193 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
5195 /* Disable L0s exit timer (NMI bug workaround). */
5196 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER);
5197 /* Don't wait for ICH L0s (ICH bug workaround). */
5198 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX);
5200 /* Set FH wait threshold to max (HW bug under stress workaround). */
5201 WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000);
5203 /* Retrieve PCIe Active State Power Management (ASPM). */
5204 #if defined(__DragonFly__)
5205 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINKCTRL, 1);
5206 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
5207 if (reg & PCIEM_LNKCTL_ASPM_L1) /* L1 Entry enabled. */
5208 #else
5209 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1);
5210 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
5211 if (reg & PCIEM_LINK_CTL_ASPMC_L1) /* L1 Entry enabled. */
5212 #endif
5213 WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA);
5214 else
5215 WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA);
5217 WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT);
5219 /* Wait for clock stabilization before accessing prph. */
5220 if ((error = wpi_clock_wait(sc)) != 0)
5221 return error;
5223 if ((error = wpi_nic_lock(sc)) != 0)
5224 return error;
5225 /* Cleanup. */
5226 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400);
5227 wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200);
5229 /* Enable DMA and BSM (Bootstrap State Machine). */
5230 wpi_prph_write(sc, WPI_APMG_CLK_EN,
5231 WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT);
5232 DELAY(20);
5233 /* Disable L1-Active. */
5234 wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS);
5235 wpi_nic_unlock(sc);
5237 return 0;
5240 static void
5241 wpi_apm_stop_master(struct wpi_softc *sc)
5243 int ntries;
5245 /* Stop busmaster DMA activity. */
5246 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER);
5248 if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) ==
5249 WPI_GP_CNTRL_MAC_PS)
5250 return; /* Already asleep. */
5252 for (ntries = 0; ntries < 100; ntries++) {
5253 if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED)
5254 return;
5255 DELAY(10);
5257 device_printf(sc->sc_dev, "%s: timeout waiting for master\n",
5258 __func__);
5261 static void
5262 wpi_apm_stop(struct wpi_softc *sc)
5264 wpi_apm_stop_master(sc);
5266 /* Reset the entire device. */
5267 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW);
5268 DELAY(10);
5269 /* Clear "initialization complete" bit. */
5270 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE);
5273 static void
5274 wpi_nic_config(struct wpi_softc *sc)
5276 uint32_t rev;
5278 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
5280 /* voodoo from the Linux "driver".. */
5281 rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1);
5282 if ((rev & 0xc0) == 0x40)
5283 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB);
5284 else if (!(rev & 0x80))
5285 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM);
5287 if (sc->cap == 0x80)
5288 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC);
5290 if ((sc->rev & 0xf0) == 0xd0)
5291 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D);
5292 else
5293 WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D);
5295 if (sc->type > 1)
5296 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B);
5299 static int
5300 wpi_hw_init(struct wpi_softc *sc)
5302 uint8_t chnl;
5303 int ntries, error;
5305 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
5307 /* Clear pending interrupts. */
5308 WPI_WRITE(sc, WPI_INT, 0xffffffff);
5310 if ((error = wpi_apm_init(sc)) != 0) {
5311 device_printf(sc->sc_dev,
5312 "%s: could not power ON adapter, error %d\n", __func__,
5313 error);
5314 return error;
5317 /* Select VMAIN power source. */
5318 if ((error = wpi_nic_lock(sc)) != 0)
5319 return error;
5320 wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK);
5321 wpi_nic_unlock(sc);
5322 /* Spin until VMAIN gets selected. */
5323 for (ntries = 0; ntries < 5000; ntries++) {
5324 if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN)
5325 break;
5326 DELAY(10);
5328 if (ntries == 5000) {
5329 device_printf(sc->sc_dev, "timeout selecting power source\n");
5330 return ETIMEDOUT;
5333 /* Perform adapter initialization. */
5334 wpi_nic_config(sc);
5336 /* Initialize RX ring. */
5337 if ((error = wpi_nic_lock(sc)) != 0)
5338 return error;
5339 /* Set physical address of RX ring. */
5340 WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr);
5341 /* Set physical address of RX read pointer. */
5342 WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr +
5343 offsetof(struct wpi_shared, next));
5344 WPI_WRITE(sc, WPI_FH_RX_WPTR, 0);
5345 /* Enable RX. */
5346 WPI_WRITE(sc, WPI_FH_RX_CONFIG,
5347 WPI_FH_RX_CONFIG_DMA_ENA |
5348 WPI_FH_RX_CONFIG_RDRBD_ENA |
5349 WPI_FH_RX_CONFIG_WRSTATUS_ENA |
5350 WPI_FH_RX_CONFIG_MAXFRAG |
5351 WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) |
5352 WPI_FH_RX_CONFIG_IRQ_DST_HOST |
5353 WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1));
5354 (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */
5355 wpi_nic_unlock(sc);
5356 WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7);
5358 /* Initialize TX rings. */
5359 if ((error = wpi_nic_lock(sc)) != 0)
5360 return error;
5361 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */
5362 wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */
5363 /* Enable all 6 TX rings. */
5364 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f);
5365 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000);
5366 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002);
5367 wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4);
5368 wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5);
5369 /* Set physical address of TX rings. */
5370 WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr);
5371 WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5);
5373 /* Enable all DMA channels. */
5374 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) {
5375 WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0);
5376 WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0);
5377 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008);
5379 wpi_nic_unlock(sc);
5380 (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */
5382 /* Clear "radio off" and "commands blocked" bits. */
5383 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL);
5384 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED);
5386 /* Clear pending interrupts. */
5387 WPI_WRITE(sc, WPI_INT, 0xffffffff);
5388 /* Enable interrupts. */
5389 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF);
5391 /* _Really_ make sure "radio off" bit is cleared! */
5392 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL);
5393 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL);
5395 if ((error = wpi_load_firmware(sc)) != 0) {
5396 device_printf(sc->sc_dev,
5397 "%s: could not load firmware, error %d\n", __func__,
5398 error);
5399 return error;
5401 /* Wait at most one second for firmware alive notification. */
5402 #if defined(__DragonFly__)
5403 if ((error = lksleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) {
5404 #else
5405 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) {
5406 #endif
5407 device_printf(sc->sc_dev,
5408 "%s: timeout waiting for adapter to initialize, error %d\n",
5409 __func__, error);
5410 return error;
5413 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
5415 /* Do post-firmware initialization. */
5416 return wpi_post_alive(sc);
5419 static void
5420 wpi_hw_stop(struct wpi_softc *sc)
5422 uint8_t chnl, qid;
5423 int ntries;
5425 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
5427 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP)
5428 wpi_nic_lock(sc);
5430 WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO);
5432 /* Disable interrupts. */
5433 WPI_WRITE(sc, WPI_INT_MASK, 0);
5434 WPI_WRITE(sc, WPI_INT, 0xffffffff);
5435 WPI_WRITE(sc, WPI_FH_INT, 0xffffffff);
5437 /* Make sure we no longer hold the NIC lock. */
5438 wpi_nic_unlock(sc);
5440 if (wpi_nic_lock(sc) == 0) {
5441 /* Stop TX scheduler. */
5442 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0);
5443 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0);
5445 /* Stop all DMA channels. */
5446 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) {
5447 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0);
5448 for (ntries = 0; ntries < 200; ntries++) {
5449 if (WPI_READ(sc, WPI_FH_TX_STATUS) &
5450 WPI_FH_TX_STATUS_IDLE(chnl))
5451 break;
5452 DELAY(10);
5455 wpi_nic_unlock(sc);
5458 /* Stop RX ring. */
5459 wpi_reset_rx_ring(sc);
5461 /* Reset all TX rings. */
5462 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++)
5463 wpi_reset_tx_ring(sc, &sc->txq[qid]);
5465 if (wpi_nic_lock(sc) == 0) {
5466 wpi_prph_write(sc, WPI_APMG_CLK_DIS,
5467 WPI_APMG_CLK_CTRL_DMA_CLK_RQT);
5468 wpi_nic_unlock(sc);
5470 DELAY(5);
5471 /* Power OFF adapter. */
5472 wpi_apm_stop(sc);
5475 static void
5476 wpi_radio_on(void *arg0, int pending)
5478 struct wpi_softc *sc = arg0;
5479 struct ieee80211com *ic = &sc->sc_ic;
5480 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5482 device_printf(sc->sc_dev, "RF switch: radio enabled\n");
5484 WPI_LOCK(sc);
5485 callout_stop(&sc->watchdog_rfkill);
5486 WPI_UNLOCK(sc);
5488 if (vap != NULL)
5489 ieee80211_init(vap);
5492 static void
5493 wpi_radio_off(void *arg0, int pending)
5495 struct wpi_softc *sc = arg0;
5496 struct ieee80211com *ic = &sc->sc_ic;
5497 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5499 device_printf(sc->sc_dev, "RF switch: radio disabled\n");
5501 ieee80211_notify_radio(ic, 0);
5502 wpi_stop(sc);
5503 if (vap != NULL)
5504 ieee80211_stop(vap);
5506 WPI_LOCK(sc);
5507 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc);
5508 WPI_UNLOCK(sc);
5511 static int
5512 wpi_init(struct wpi_softc *sc)
5514 int error = 0;
5516 WPI_LOCK(sc);
5518 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
5520 if (sc->sc_running != 0)
5521 goto end;
5523 /* Check that the radio is not disabled by hardware switch. */
5524 if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) {
5525 device_printf(sc->sc_dev,
5526 "RF switch: radio disabled (%s)\n", __func__);
5527 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill,
5528 sc);
5529 error = EINPROGRESS;
5530 goto end;
5533 /* Read firmware images from the filesystem. */
5534 if ((error = wpi_read_firmware(sc)) != 0) {
5535 device_printf(sc->sc_dev,
5536 "%s: could not read firmware, error %d\n", __func__,
5537 error);
5538 goto end;
5541 sc->sc_running = 1;
5543 /* Initialize hardware and upload firmware. */
5544 error = wpi_hw_init(sc);
5545 wpi_unload_firmware(sc);
5546 if (error != 0) {
5547 device_printf(sc->sc_dev,
5548 "%s: could not initialize hardware, error %d\n", __func__,
5549 error);
5550 goto fail;
5553 /* Configure adapter now that it is ready. */
5554 if ((error = wpi_config(sc)) != 0) {
5555 device_printf(sc->sc_dev,
5556 "%s: could not configure device, error %d\n", __func__,
5557 error);
5558 goto fail;
5561 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
5563 WPI_UNLOCK(sc);
5565 return 0;
5567 fail: wpi_stop_locked(sc);
5569 end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
5570 WPI_UNLOCK(sc);
5572 return error;
5575 static void
5576 wpi_stop_locked(struct wpi_softc *sc)
5579 WPI_LOCK_ASSERT(sc);
5581 if (sc->sc_running == 0)
5582 return;
5584 WPI_TX_LOCK(sc);
5585 WPI_TXQ_LOCK(sc);
5586 sc->sc_running = 0;
5587 WPI_TXQ_UNLOCK(sc);
5588 WPI_TX_UNLOCK(sc);
5590 WPI_TXQ_STATE_LOCK(sc);
5591 callout_stop(&sc->tx_timeout);
5592 WPI_TXQ_STATE_UNLOCK(sc);
5594 WPI_RXON_LOCK(sc);
5595 callout_stop(&sc->scan_timeout);
5596 callout_stop(&sc->calib_to);
5597 WPI_RXON_UNLOCK(sc);
5599 /* Power OFF hardware. */
5600 wpi_hw_stop(sc);
5603 static void
5604 wpi_stop(struct wpi_softc *sc)
5606 WPI_LOCK(sc);
5607 wpi_stop_locked(sc);
5608 WPI_UNLOCK(sc);
5612 * Callback from net80211 to start a scan.
5614 static void
5615 wpi_scan_start(struct ieee80211com *ic)
5617 struct wpi_softc *sc = ic->ic_softc;
5619 wpi_set_led(sc, WPI_LED_LINK, 20, 2);
5623 * Callback from net80211 to terminate a scan.
5625 static void
5626 wpi_scan_end(struct ieee80211com *ic)
5628 struct wpi_softc *sc = ic->ic_softc;
5629 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5631 if (vap->iv_state == IEEE80211_S_RUN)
5632 wpi_set_led(sc, WPI_LED_LINK, 0, 1);
5636 * Called by the net80211 framework to indicate to the driver
5637 * that the channel should be changed
5639 static void
5640 wpi_set_channel(struct ieee80211com *ic)
5642 const struct ieee80211_channel *c = ic->ic_curchan;
5643 struct wpi_softc *sc = ic->ic_softc;
5644 int error;
5646 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
5648 WPI_LOCK(sc);
5649 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
5650 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
5651 WPI_UNLOCK(sc);
5652 WPI_TX_LOCK(sc);
5653 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
5654 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
5655 WPI_TX_UNLOCK(sc);
5658 * Only need to set the channel in Monitor mode. AP scanning and auth
5659 * are already taken care of by their respective firmware commands.
5661 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5662 WPI_RXON_LOCK(sc);
5663 sc->rxon.chan = ieee80211_chan2ieee(ic, c);
5664 if (IEEE80211_IS_CHAN_2GHZ(c)) {
5665 sc->rxon.flags |= htole32(WPI_RXON_AUTO |
5666 WPI_RXON_24GHZ);
5667 } else {
5668 sc->rxon.flags &= ~htole32(WPI_RXON_AUTO |
5669 WPI_RXON_24GHZ);
5671 if ((error = wpi_send_rxon(sc, 0, 1)) != 0)
5672 device_printf(sc->sc_dev,
5673 "%s: error %d setting channel\n", __func__,
5674 error);
5675 WPI_RXON_UNLOCK(sc);
5680 * Called by net80211 to indicate that we need to scan the current
5681 * channel. The channel is previously be set via the wpi_set_channel
5682 * callback.
5684 static void
5685 wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
5687 struct ieee80211vap *vap = ss->ss_vap;
5688 struct ieee80211com *ic = vap->iv_ic;
5689 struct wpi_softc *sc = ic->ic_softc;
5690 int error;
5692 WPI_RXON_LOCK(sc);
5693 error = wpi_scan(sc, ic->ic_curchan);
5694 WPI_RXON_UNLOCK(sc);
5695 if (error != 0)
5696 ieee80211_cancel_scan(vap);
5700 * Called by the net80211 framework to indicate
5701 * the minimum dwell time has been met, terminate the scan.
5702 * We don't actually terminate the scan as the firmware will notify
5703 * us when it's finished and we have no way to interrupt it.
5705 static void
5706 wpi_scan_mindwell(struct ieee80211_scan_state *ss)
5708 /* NB: don't try to abort scan; wait for firmware to finish */