5255 uts shouldn't open-code ISP2
[illumos-gate.git] / usr / src / uts / common / io / arn / arn_main.c
blob0aed5d40c8ff0bf379cb899f9da33e11ab5eebdd
1 /*
2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 */
6 /*
7 * Copyright (c) 2008 Atheros Communications Inc.
9 * Permission to use, copy, modify, and/or distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 #include <sys/sysmacros.h>
23 #include <sys/param.h>
24 #include <sys/types.h>
25 #include <sys/signal.h>
26 #include <sys/stream.h>
27 #include <sys/termio.h>
28 #include <sys/errno.h>
29 #include <sys/file.h>
30 #include <sys/cmn_err.h>
31 #include <sys/stropts.h>
32 #include <sys/strsubr.h>
33 #include <sys/strtty.h>
34 #include <sys/kbio.h>
35 #include <sys/cred.h>
36 #include <sys/stat.h>
37 #include <sys/consdev.h>
38 #include <sys/kmem.h>
39 #include <sys/modctl.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/pci.h>
43 #include <sys/errno.h>
44 #include <sys/mac_provider.h>
45 #include <sys/dlpi.h>
46 #include <sys/ethernet.h>
47 #include <sys/list.h>
48 #include <sys/byteorder.h>
49 #include <sys/strsun.h>
50 #include <sys/policy.h>
51 #include <inet/common.h>
52 #include <inet/nd.h>
53 #include <inet/mi.h>
54 #include <inet/wifi_ioctl.h>
55 #include <sys/mac_wifi.h>
56 #include <sys/net80211.h>
57 #include <sys/net80211_proto.h>
58 #include <sys/net80211_ht.h>
61 #include "arn_ath9k.h"
62 #include "arn_core.h"
63 #include "arn_reg.h"
64 #include "arn_hw.h"
66 #define ARN_MAX_RSSI 45 /* max rssi */
69 * Default 11n reates supported by this station.
71 extern struct ieee80211_htrateset ieee80211_rateset_11n;
74 * PIO access attributes for registers
76 static ddi_device_acc_attr_t arn_reg_accattr = {
77 DDI_DEVICE_ATTR_V0,
78 DDI_STRUCTURE_LE_ACC,
79 DDI_STRICTORDER_ACC,
80 DDI_DEFAULT_ACC
84 * DMA access attributes for descriptors: NOT to be byte swapped.
86 static ddi_device_acc_attr_t arn_desc_accattr = {
87 DDI_DEVICE_ATTR_V0,
88 DDI_STRUCTURE_LE_ACC,
89 DDI_STRICTORDER_ACC,
90 DDI_DEFAULT_ACC
94 * Describes the chip's DMA engine
96 static ddi_dma_attr_t arn_dma_attr = {
97 DMA_ATTR_V0, /* version number */
98 0, /* low address */
99 0xffffffffU, /* high address */
100 0x3ffffU, /* counter register max */
101 1, /* alignment */
102 0xFFF, /* burst sizes */
103 1, /* minimum transfer size */
104 0x3ffffU, /* max transfer size */
105 0xffffffffU, /* address register max */
106 1, /* no scatter-gather */
107 1, /* granularity of device */
108 0, /* DMA flags */
111 static ddi_dma_attr_t arn_desc_dma_attr = {
112 DMA_ATTR_V0, /* version number */
113 0, /* low address */
114 0xffffffffU, /* high address */
115 0xffffffffU, /* counter register max */
116 0x1000, /* alignment */
117 0xFFF, /* burst sizes */
118 1, /* minimum transfer size */
119 0xffffffffU, /* max transfer size */
120 0xffffffffU, /* address register max */
121 1, /* no scatter-gather */
122 1, /* granularity of device */
123 0, /* DMA flags */
126 #define ATH_DEF_CACHE_BYTES 32 /* default cache line size */
128 static kmutex_t arn_loglock;
129 static void *arn_soft_state_p = NULL;
130 static int arn_dwelltime = 200; /* scan interval */
132 static int arn_m_stat(void *, uint_t, uint64_t *);
133 static int arn_m_start(void *);
134 static void arn_m_stop(void *);
135 static int arn_m_promisc(void *, boolean_t);
136 static int arn_m_multicst(void *, boolean_t, const uint8_t *);
137 static int arn_m_unicst(void *, const uint8_t *);
138 static mblk_t *arn_m_tx(void *, mblk_t *);
139 static void arn_m_ioctl(void *, queue_t *, mblk_t *);
140 static int arn_m_setprop(void *, const char *, mac_prop_id_t,
141 uint_t, const void *);
142 static int arn_m_getprop(void *, const char *, mac_prop_id_t,
143 uint_t, void *);
144 static void arn_m_propinfo(void *, const char *, mac_prop_id_t,
145 mac_prop_info_handle_t);
147 /* MAC Callcack Functions */
148 static mac_callbacks_t arn_m_callbacks = {
149 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
150 arn_m_stat,
151 arn_m_start,
152 arn_m_stop,
153 arn_m_promisc,
154 arn_m_multicst,
155 arn_m_unicst,
156 arn_m_tx,
157 NULL,
158 arn_m_ioctl,
159 NULL,
160 NULL,
161 NULL,
162 arn_m_setprop,
163 arn_m_getprop,
164 arn_m_propinfo
168 * ARN_DBG_HW
169 * ARN_DBG_REG_IO
170 * ARN_DBG_QUEUE
171 * ARN_DBG_EEPROM
172 * ARN_DBG_XMIT
173 * ARN_DBG_RECV
174 * ARN_DBG_CALIBRATE
175 * ARN_DBG_CHANNEL
176 * ARN_DBG_INTERRUPT
177 * ARN_DBG_REGULATORY
178 * ARN_DBG_ANI
179 * ARN_DBG_POWER_MGMT
180 * ARN_DBG_KEYCACHE
181 * ARN_DBG_BEACON
182 * ARN_DBG_RATE
183 * ARN_DBG_INIT
184 * ARN_DBG_ATTACH
185 * ARN_DBG_DEATCH
186 * ARN_DBG_AGGR
187 * ARN_DBG_RESET
188 * ARN_DBG_FATAL
189 * ARN_DBG_ANY
190 * ARN_DBG_ALL
192 uint32_t arn_dbg_mask = 0;
195 * Exception/warning cases not leading to panic.
197 void
198 arn_problem(const int8_t *fmt, ...)
200 va_list args;
202 mutex_enter(&arn_loglock);
204 va_start(args, fmt);
205 vcmn_err(CE_WARN, fmt, args);
206 va_end(args);
208 mutex_exit(&arn_loglock);
212 * Normal log information independent of debug.
214 void
215 arn_log(const int8_t *fmt, ...)
217 va_list args;
219 mutex_enter(&arn_loglock);
221 va_start(args, fmt);
222 vcmn_err(CE_CONT, fmt, args);
223 va_end(args);
225 mutex_exit(&arn_loglock);
228 void
229 arn_dbg(uint32_t dbg_flags, const int8_t *fmt, ...)
231 va_list args;
233 if (dbg_flags & arn_dbg_mask) {
234 mutex_enter(&arn_loglock);
235 va_start(args, fmt);
236 vcmn_err(CE_CONT, fmt, args);
237 va_end(args);
238 mutex_exit(&arn_loglock);
243 * Read and write, they both share the same lock. We do this to serialize
244 * reads and writes on Atheros 802.11n PCI devices only. This is required
245 * as the FIFO on these devices can only accept sanely 2 requests. After
246 * that the device goes bananas. Serializing the reads/writes prevents this
247 * from happening.
249 void
250 arn_iowrite32(struct ath_hal *ah, uint32_t reg_offset, uint32_t val)
252 struct arn_softc *sc = ah->ah_sc;
253 if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
254 mutex_enter(&sc->sc_serial_rw);
255 ddi_put32(sc->sc_io_handle,
256 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val);
257 mutex_exit(&sc->sc_serial_rw);
258 } else {
259 ddi_put32(sc->sc_io_handle,
260 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val);
264 unsigned int
265 arn_ioread32(struct ath_hal *ah, uint32_t reg_offset)
267 uint32_t val;
268 struct arn_softc *sc = ah->ah_sc;
269 if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
270 mutex_enter(&sc->sc_serial_rw);
271 val = ddi_get32(sc->sc_io_handle,
272 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)));
273 mutex_exit(&sc->sc_serial_rw);
274 } else {
275 val = ddi_get32(sc->sc_io_handle,
276 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)));
279 return (val);
283 * Allocate an area of memory and a DMA handle for accessing it
285 static int
286 arn_alloc_dma_mem(dev_info_t *devinfo, ddi_dma_attr_t *dma_attr, size_t memsize,
287 ddi_device_acc_attr_t *attr_p, uint_t alloc_flags,
288 uint_t bind_flags, dma_area_t *dma_p)
290 int err;
293 * Allocate handle
295 err = ddi_dma_alloc_handle(devinfo, dma_attr,
296 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
297 if (err != DDI_SUCCESS)
298 return (DDI_FAILURE);
301 * Allocate memory
303 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
304 alloc_flags, DDI_DMA_SLEEP, NULL, &dma_p->mem_va,
305 &dma_p->alength, &dma_p->acc_hdl);
306 if (err != DDI_SUCCESS)
307 return (DDI_FAILURE);
310 * Bind the two together
312 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
313 dma_p->mem_va, dma_p->alength, bind_flags,
314 DDI_DMA_SLEEP, NULL, &dma_p->cookie, &dma_p->ncookies);
315 if (err != DDI_DMA_MAPPED)
316 return (DDI_FAILURE);
318 dma_p->nslots = ~0U;
319 dma_p->size = ~0U;
320 dma_p->token = ~0U;
321 dma_p->offset = 0;
322 return (DDI_SUCCESS);
326 * Free one allocated area of DMAable memory
328 static void
329 arn_free_dma_mem(dma_area_t *dma_p)
331 if (dma_p->dma_hdl != NULL) {
332 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
333 if (dma_p->acc_hdl != NULL) {
334 ddi_dma_mem_free(&dma_p->acc_hdl);
335 dma_p->acc_hdl = NULL;
337 ddi_dma_free_handle(&dma_p->dma_hdl);
338 dma_p->ncookies = 0;
339 dma_p->dma_hdl = NULL;
344 * Initialize tx, rx. or beacon buffer list. Allocate DMA memory for
345 * each buffer.
347 static int
348 arn_buflist_setup(dev_info_t *devinfo,
349 struct arn_softc *sc,
350 list_t *bflist,
351 struct ath_buf **pbf,
352 struct ath_desc **pds,
353 int nbuf,
354 uint_t dmabflags,
355 uint32_t buflen)
357 int i, err;
358 struct ath_buf *bf = *pbf;
359 struct ath_desc *ds = *pds;
361 list_create(bflist, sizeof (struct ath_buf),
362 offsetof(struct ath_buf, bf_node));
363 for (i = 0; i < nbuf; i++, bf++, ds++) {
364 bf->bf_desc = ds;
365 bf->bf_daddr = sc->sc_desc_dma.cookie.dmac_address +
366 ((uintptr_t)ds - (uintptr_t)sc->sc_desc);
367 list_insert_tail(bflist, bf);
369 /* alloc DMA memory */
370 err = arn_alloc_dma_mem(devinfo, &arn_dma_attr,
371 buflen, &arn_desc_accattr, DDI_DMA_STREAMING,
372 dmabflags, &bf->bf_dma);
373 if (err != DDI_SUCCESS)
374 return (err);
376 *pbf = bf;
377 *pds = ds;
379 return (DDI_SUCCESS);
383 * Destroy tx, rx or beacon buffer list. Free DMA memory.
385 static void
386 arn_buflist_cleanup(list_t *buflist)
388 struct ath_buf *bf;
390 if (!buflist)
391 return;
393 bf = list_head(buflist);
394 while (bf != NULL) {
395 if (bf->bf_m != NULL) {
396 freemsg(bf->bf_m);
397 bf->bf_m = NULL;
399 /* Free DMA buffer */
400 arn_free_dma_mem(&bf->bf_dma);
401 if (bf->bf_in != NULL) {
402 ieee80211_free_node(bf->bf_in);
403 bf->bf_in = NULL;
405 list_remove(buflist, bf);
406 bf = list_head(buflist);
408 list_destroy(buflist);
411 static void
412 arn_desc_free(struct arn_softc *sc)
414 arn_buflist_cleanup(&sc->sc_txbuf_list);
415 arn_buflist_cleanup(&sc->sc_rxbuf_list);
416 #ifdef ARN_IBSS
417 arn_buflist_cleanup(&sc->sc_bcbuf_list);
418 #endif
420 /* Free descriptor DMA buffer */
421 arn_free_dma_mem(&sc->sc_desc_dma);
423 kmem_free((void *)sc->sc_vbufptr, sc->sc_vbuflen);
424 sc->sc_vbufptr = NULL;
427 static int
428 arn_desc_alloc(dev_info_t *devinfo, struct arn_softc *sc)
430 int err;
431 size_t size;
432 struct ath_desc *ds;
433 struct ath_buf *bf;
435 #ifdef ARN_IBSS
436 size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF);
437 #else
438 size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF);
439 #endif
441 err = arn_alloc_dma_mem(devinfo, &arn_desc_dma_attr, size,
442 &arn_desc_accattr, DDI_DMA_CONSISTENT,
443 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &sc->sc_desc_dma);
445 /* virtual address of the first descriptor */
446 sc->sc_desc = (struct ath_desc *)sc->sc_desc_dma.mem_va;
448 ds = sc->sc_desc;
449 ARN_DBG((ARN_DBG_INIT, "arn: arn_desc_alloc(): DMA map: "
450 "%p (%d) -> %p\n",
451 sc->sc_desc, sc->sc_desc_dma.alength,
452 sc->sc_desc_dma.cookie.dmac_address));
454 /* allocate data structures to describe TX/RX DMA buffers */
455 #ifdef ARN_IBSS
456 sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF +
457 ATH_BCBUF);
458 #else
459 sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF);
460 #endif
461 bf = (struct ath_buf *)kmem_zalloc(sc->sc_vbuflen, KM_SLEEP);
462 sc->sc_vbufptr = bf;
464 /* DMA buffer size for each TX/RX packet */
465 #ifdef ARN_TX_AGGREGRATION
466 sc->tx_dmabuf_size =
467 roundup((IEEE80211_MAX_MPDU_LEN + 3840 * 2),
468 min(sc->sc_cachelsz, (uint16_t)64));
469 #else
470 sc->tx_dmabuf_size =
471 roundup(IEEE80211_MAX_MPDU_LEN, min(sc->sc_cachelsz, (uint16_t)64));
472 #endif
473 sc->rx_dmabuf_size =
474 roundup(IEEE80211_MAX_MPDU_LEN, min(sc->sc_cachelsz, (uint16_t)64));
476 /* create RX buffer list */
477 err = arn_buflist_setup(devinfo, sc, &sc->sc_rxbuf_list, &bf, &ds,
478 ATH_RXBUF, DDI_DMA_READ | DDI_DMA_STREAMING, sc->rx_dmabuf_size);
479 if (err != DDI_SUCCESS) {
480 arn_desc_free(sc);
481 return (err);
484 /* create TX buffer list */
485 err = arn_buflist_setup(devinfo, sc, &sc->sc_txbuf_list, &bf, &ds,
486 ATH_TXBUF, DDI_DMA_STREAMING, sc->tx_dmabuf_size);
487 if (err != DDI_SUCCESS) {
488 arn_desc_free(sc);
489 return (err);
492 /* create beacon buffer list */
493 #ifdef ARN_IBSS
494 err = arn_buflist_setup(devinfo, sc, &sc->sc_bcbuf_list, &bf, &ds,
495 ATH_BCBUF, DDI_DMA_STREAMING);
496 if (err != DDI_SUCCESS) {
497 arn_desc_free(sc);
498 return (err);
500 #endif
502 return (DDI_SUCCESS);
505 static struct ath_rate_table *
506 /* LINTED E_STATIC_UNUSED */
507 arn_get_ratetable(struct arn_softc *sc, uint32_t mode)
509 struct ath_rate_table *rate_table = NULL;
511 switch (mode) {
512 case IEEE80211_MODE_11A:
513 rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
514 break;
515 case IEEE80211_MODE_11B:
516 rate_table = sc->hw_rate_table[ATH9K_MODE_11B];
517 break;
518 case IEEE80211_MODE_11G:
519 rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
520 break;
521 #ifdef ARB_11N
522 case IEEE80211_MODE_11NA_HT20:
523 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
524 break;
525 case IEEE80211_MODE_11NG_HT20:
526 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
527 break;
528 case IEEE80211_MODE_11NA_HT40PLUS:
529 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
530 break;
531 case IEEE80211_MODE_11NA_HT40MINUS:
532 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
533 break;
534 case IEEE80211_MODE_11NG_HT40PLUS:
535 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
536 break;
537 case IEEE80211_MODE_11NG_HT40MINUS:
538 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
539 break;
540 #endif
541 default:
542 ARN_DBG((ARN_DBG_FATAL, "arn: arn_get_ratetable(): "
543 "invalid mode %u\n", mode));
544 return (NULL);
547 return (rate_table);
551 static void
552 arn_setcurmode(struct arn_softc *sc, enum wireless_mode mode)
554 struct ath_rate_table *rt;
555 int i;
557 for (i = 0; i < sizeof (sc->asc_rixmap); i++)
558 sc->asc_rixmap[i] = 0xff;
560 rt = sc->hw_rate_table[mode];
561 ASSERT(rt != NULL);
563 for (i = 0; i < rt->rate_cnt; i++)
564 sc->asc_rixmap[rt->info[i].dot11rate &
565 IEEE80211_RATE_VAL] = (uint8_t)i; /* LINT */
567 sc->sc_currates = rt;
568 sc->sc_curmode = mode;
571 * All protection frames are transmited at 2Mb/s for
572 * 11g, otherwise at 1Mb/s.
573 * XXX select protection rate index from rate table.
575 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
578 static enum wireless_mode
579 arn_chan2mode(struct ath9k_channel *chan)
581 if (chan->chanmode == CHANNEL_A)
582 return (ATH9K_MODE_11A);
583 else if (chan->chanmode == CHANNEL_G)
584 return (ATH9K_MODE_11G);
585 else if (chan->chanmode == CHANNEL_B)
586 return (ATH9K_MODE_11B);
587 else if (chan->chanmode == CHANNEL_A_HT20)
588 return (ATH9K_MODE_11NA_HT20);
589 else if (chan->chanmode == CHANNEL_G_HT20)
590 return (ATH9K_MODE_11NG_HT20);
591 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
592 return (ATH9K_MODE_11NA_HT40PLUS);
593 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
594 return (ATH9K_MODE_11NA_HT40MINUS);
595 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
596 return (ATH9K_MODE_11NG_HT40PLUS);
597 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
598 return (ATH9K_MODE_11NG_HT40MINUS);
600 return (ATH9K_MODE_11B);
603 static void
604 arn_update_txpow(struct arn_softc *sc)
606 struct ath_hal *ah = sc->sc_ah;
607 uint32_t txpow;
609 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
610 (void) ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
611 /* read back in case value is clamped */
612 (void) ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
613 sc->sc_curtxpow = (uint32_t)txpow;
617 uint8_t
618 parse_mpdudensity(uint8_t mpdudensity)
621 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
622 * 0 for no restriction
623 * 1 for 1/4 us
624 * 2 for 1/2 us
625 * 3 for 1 us
626 * 4 for 2 us
627 * 5 for 4 us
628 * 6 for 8 us
629 * 7 for 16 us
631 switch (mpdudensity) {
632 case 0:
633 return (0);
634 case 1:
635 case 2:
636 case 3:
638 * Our lower layer calculations limit our
639 * precision to 1 microsecond
641 return (1);
642 case 4:
643 return (2);
644 case 5:
645 return (4);
646 case 6:
647 return (8);
648 case 7:
649 return (16);
650 default:
651 return (0);
655 static void
656 arn_setup_rates(struct arn_softc *sc, uint32_t mode)
658 int i, maxrates;
659 struct ath_rate_table *rate_table = NULL;
660 struct ieee80211_rateset *rateset;
661 ieee80211com_t *ic = (ieee80211com_t *)sc;
663 /* rate_table = arn_get_ratetable(sc, mode); */
664 switch (mode) {
665 case IEEE80211_MODE_11A:
666 rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
667 break;
668 case IEEE80211_MODE_11B:
669 rate_table = sc->hw_rate_table[ATH9K_MODE_11B];
670 break;
671 case IEEE80211_MODE_11G:
672 rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
673 break;
674 #ifdef ARN_11N
675 case IEEE80211_MODE_11NA_HT20:
676 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
677 break;
678 case IEEE80211_MODE_11NG_HT20:
679 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
680 break;
681 case IEEE80211_MODE_11NA_HT40PLUS:
682 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
683 break;
684 case IEEE80211_MODE_11NA_HT40MINUS:
685 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
686 break;
687 case IEEE80211_MODE_11NG_HT40PLUS:
688 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
689 break;
690 case IEEE80211_MODE_11NG_HT40MINUS:
691 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
692 break;
693 #endif
694 default:
695 ARN_DBG((ARN_DBG_RATE, "arn: arn_get_ratetable(): "
696 "invalid mode %u\n", mode));
697 break;
699 if (rate_table == NULL)
700 return;
701 if (rate_table->rate_cnt > ATH_RATE_MAX) {
702 ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
703 "rate table too small (%u > %u)\n",
704 rate_table->rate_cnt, IEEE80211_RATE_MAXSIZE));
705 maxrates = ATH_RATE_MAX;
706 } else
707 maxrates = rate_table->rate_cnt;
709 ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
710 "maxrates is %d\n", maxrates));
712 rateset = &ic->ic_sup_rates[mode];
713 for (i = 0; i < maxrates; i++) {
714 rateset->ir_rates[i] = rate_table->info[i].dot11rate;
715 ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
716 "%d\n", rate_table->info[i].dot11rate));
718 rateset->ir_nrates = (uint8_t)maxrates; /* ??? */
721 static int
722 arn_setup_channels(struct arn_softc *sc)
724 struct ath_hal *ah = sc->sc_ah;
725 ieee80211com_t *ic = (ieee80211com_t *)sc;
726 int nchan, i, index;
727 uint8_t regclassids[ATH_REGCLASSIDS_MAX];
728 uint32_t nregclass = 0;
729 struct ath9k_channel *c;
731 /* Fill in ah->ah_channels */
732 if (!ath9k_regd_init_channels(ah, ATH_CHAN_MAX, (uint32_t *)&nchan,
733 regclassids, ATH_REGCLASSIDS_MAX, &nregclass, CTRY_DEFAULT,
734 B_FALSE, 1)) {
735 uint32_t rd = ah->ah_currentRD;
736 ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): "
737 "unable to collect channel list; "
738 "regdomain likely %u country code %u\n",
739 rd, CTRY_DEFAULT));
740 return (EINVAL);
743 ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): "
744 "number of channel is %d\n", nchan));
746 for (i = 0; i < nchan; i++) {
747 c = &ah->ah_channels[i];
748 uint32_t flags;
749 index = ath9k_hw_mhz2ieee(ah, c->channel, c->channelFlags);
751 if (index > IEEE80211_CHAN_MAX) {
752 ARN_DBG((ARN_DBG_CHANNEL,
753 "arn: arn_setup_channels(): "
754 "bad hal channel %d (%u/%x) ignored\n",
755 index, c->channel, c->channelFlags));
756 continue;
758 /* NB: flags are known to be compatible */
759 if (index < 0) {
761 * can't handle frequency <2400MHz (negative
762 * channels) right now
764 ARN_DBG((ARN_DBG_CHANNEL,
765 "arn: arn_setup_channels(): "
766 "hal channel %d (%u/%x) "
767 "cannot be handled, ignored\n",
768 index, c->channel, c->channelFlags));
769 continue;
773 * Calculate net80211 flags; most are compatible
774 * but some need massaging. Note the static turbo
775 * conversion can be removed once net80211 is updated
776 * to understand static vs. dynamic turbo.
779 flags = c->channelFlags & (CHANNEL_ALL | CHANNEL_PASSIVE);
781 if (ic->ic_sup_channels[index].ich_freq == 0) {
782 ic->ic_sup_channels[index].ich_freq = c->channel;
783 ic->ic_sup_channels[index].ich_flags = flags;
784 } else {
785 /* channels overlap; e.g. 11g and 11b */
786 ic->ic_sup_channels[index].ich_flags |= flags;
788 if ((c->channelFlags & CHANNEL_G) == CHANNEL_G) {
789 sc->sc_have11g = 1;
790 ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
791 IEEE80211_C_SHSLOT; /* short slot time */
795 return (0);
798 uint32_t
799 arn_chan2flags(ieee80211com_t *isc, struct ieee80211_channel *chan)
801 uint32_t channel_mode;
802 switch (ieee80211_chan2mode(isc, chan)) {
803 case IEEE80211_MODE_11NA:
804 if (chan->ich_flags & IEEE80211_CHAN_HT40U)
805 channel_mode = CHANNEL_A_HT40PLUS;
806 else if (chan->ich_flags & IEEE80211_CHAN_HT40D)
807 channel_mode = CHANNEL_A_HT40MINUS;
808 else
809 channel_mode = CHANNEL_A_HT20;
810 break;
811 case IEEE80211_MODE_11NG:
812 if (chan->ich_flags & IEEE80211_CHAN_HT40U)
813 channel_mode = CHANNEL_G_HT40PLUS;
814 else if (chan->ich_flags & IEEE80211_CHAN_HT40D)
815 channel_mode = CHANNEL_G_HT40MINUS;
816 else
817 channel_mode = CHANNEL_G_HT20;
818 break;
819 case IEEE80211_MODE_TURBO_G:
820 case IEEE80211_MODE_STURBO_A:
821 case IEEE80211_MODE_TURBO_A:
822 channel_mode = 0;
823 break;
824 case IEEE80211_MODE_11A:
825 channel_mode = CHANNEL_A;
826 break;
827 case IEEE80211_MODE_11G:
828 channel_mode = CHANNEL_B;
829 break;
830 case IEEE80211_MODE_11B:
831 channel_mode = CHANNEL_G;
832 break;
833 case IEEE80211_MODE_FH:
834 channel_mode = 0;
835 break;
836 default:
837 break;
840 return (channel_mode);
844 * Update internal state after a channel change.
846 void
847 arn_chan_change(struct arn_softc *sc, struct ieee80211_channel *chan)
849 struct ieee80211com *ic = &sc->sc_isc;
850 enum ieee80211_phymode mode;
851 enum wireless_mode wlmode;
854 * Change channels and update the h/w rate map
855 * if we're switching; e.g. 11a to 11b/g.
857 mode = ieee80211_chan2mode(ic, chan);
858 switch (mode) {
859 case IEEE80211_MODE_11A:
860 wlmode = ATH9K_MODE_11A;
861 break;
862 case IEEE80211_MODE_11B:
863 wlmode = ATH9K_MODE_11B;
864 break;
865 case IEEE80211_MODE_11G:
866 wlmode = ATH9K_MODE_11B;
867 break;
868 default:
869 break;
871 if (wlmode != sc->sc_curmode)
872 arn_setcurmode(sc, wlmode);
877 * Set/change channels. If the channel is really being changed, it's done
878 * by reseting the chip. To accomplish this we must first cleanup any pending
879 * DMA, then restart stuff.
881 static int
882 arn_set_channel(struct arn_softc *sc, struct ath9k_channel *hchan)
884 struct ath_hal *ah = sc->sc_ah;
885 ieee80211com_t *ic = &sc->sc_isc;
886 boolean_t fastcc = B_TRUE;
887 boolean_t stopped;
888 struct ieee80211_channel chan;
889 enum wireless_mode curmode;
891 if (sc->sc_flags & SC_OP_INVALID)
892 return (EIO);
894 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
895 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
896 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
897 (sc->sc_flags & SC_OP_FULL_RESET)) {
898 int status;
901 * This is only performed if the channel settings have
902 * actually changed.
904 * To switch channels clear any pending DMA operations;
905 * wait long enough for the RX fifo to drain, reset the
906 * hardware at the new frequency, and then re-enable
907 * the relevant bits of the h/w.
909 (void) ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
910 arn_draintxq(sc, B_FALSE); /* clear pending tx frames */
911 stopped = arn_stoprecv(sc); /* turn off frame recv */
914 * XXX: do not flush receive queue here. We don't want
915 * to flush data frames already in queue because of
916 * changing channel.
919 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
920 fastcc = B_FALSE;
922 ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_set_channel(): "
923 "(%u MHz) -> (%u MHz), cflags:%x, chanwidth: %d\n",
924 sc->sc_ah->ah_curchan->channel,
925 hchan->channel, hchan->channelFlags, sc->tx_chan_width));
927 if (!ath9k_hw_reset(ah, hchan, sc->tx_chan_width,
928 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
929 sc->sc_ht_extprotspacing, fastcc, &status)) {
930 ARN_DBG((ARN_DBG_FATAL, "arn: arn_set_channel(): "
931 "unable to reset channel %u (%uMhz) "
932 "flags 0x%x hal status %u\n",
933 ath9k_hw_mhz2ieee(ah, hchan->channel,
934 hchan->channelFlags),
935 hchan->channel, hchan->channelFlags, status));
936 return (EIO);
939 sc->sc_curchan = *hchan;
941 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
942 sc->sc_flags &= ~SC_OP_FULL_RESET;
944 if (arn_startrecv(sc) != 0) {
945 arn_problem("arn: arn_set_channel(): "
946 "unable to restart recv logic\n");
947 return (EIO);
950 chan.ich_freq = hchan->channel;
951 chan.ich_flags = hchan->channelFlags;
952 ic->ic_ibss_chan = &chan;
955 * Change channels and update the h/w rate map
956 * if we're switching; e.g. 11a to 11b/g.
958 curmode = arn_chan2mode(hchan);
959 if (curmode != sc->sc_curmode)
960 arn_setcurmode(sc, arn_chan2mode(hchan));
962 arn_update_txpow(sc);
964 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
967 return (0);
971 * This routine performs the periodic noise floor calibration function
972 * that is used to adjust and optimize the chip performance. This
973 * takes environmental changes (location, temperature) into account.
974 * When the task is complete, it reschedules itself depending on the
975 * appropriate interval that was calculated.
977 static void
978 arn_ani_calibrate(void *arg)
981 ieee80211com_t *ic = (ieee80211com_t *)arg;
982 struct arn_softc *sc = (struct arn_softc *)ic;
983 struct ath_hal *ah = sc->sc_ah;
984 boolean_t longcal = B_FALSE;
985 boolean_t shortcal = B_FALSE;
986 boolean_t aniflag = B_FALSE;
987 unsigned int timestamp = drv_hztousec(ddi_get_lbolt())/1000;
988 uint32_t cal_interval;
991 * don't calibrate when we're scanning.
992 * we are most likely not on our home channel.
994 if (ic->ic_state != IEEE80211_S_RUN)
995 goto settimer;
997 /* Long calibration runs independently of short calibration. */
998 if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
999 longcal = B_TRUE;
1000 ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1001 "%s: longcal @%lu\n", __func__, drv_hztousec));
1002 sc->sc_ani.sc_longcal_timer = timestamp;
1005 /* Short calibration applies only while sc_caldone is FALSE */
1006 if (!sc->sc_ani.sc_caldone) {
1007 if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
1008 ATH_SHORT_CALINTERVAL) {
1009 shortcal = B_TRUE;
1010 ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1011 "%s: shortcal @%lu\n",
1012 __func__, drv_hztousec));
1013 sc->sc_ani.sc_shortcal_timer = timestamp;
1014 sc->sc_ani.sc_resetcal_timer = timestamp;
1016 } else {
1017 if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
1018 ATH_RESTART_CALINTERVAL) {
1019 ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
1020 &sc->sc_ani.sc_caldone);
1021 if (sc->sc_ani.sc_caldone)
1022 sc->sc_ani.sc_resetcal_timer = timestamp;
1026 /* Verify whether we must check ANI */
1027 if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
1028 ATH_ANI_POLLINTERVAL) {
1029 aniflag = B_TRUE;
1030 sc->sc_ani.sc_checkani_timer = timestamp;
1033 /* Skip all processing if there's nothing to do. */
1034 if (longcal || shortcal || aniflag) {
1035 /* Call ANI routine if necessary */
1036 if (aniflag)
1037 ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
1038 ah->ah_curchan);
1040 /* Perform calibration if necessary */
1041 if (longcal || shortcal) {
1042 boolean_t iscaldone = B_FALSE;
1044 if (ath9k_hw_calibrate(ah, ah->ah_curchan,
1045 sc->sc_rx_chainmask, longcal, &iscaldone)) {
1046 if (longcal)
1047 sc->sc_ani.sc_noise_floor =
1048 ath9k_hw_getchan_noise(ah,
1049 ah->ah_curchan);
1051 ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1052 "%s: calibrate chan %u/%x nf: %d\n",
1053 __func__,
1054 ah->ah_curchan->channel,
1055 ah->ah_curchan->channelFlags,
1056 sc->sc_ani.sc_noise_floor));
1057 } else {
1058 ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1059 "%s: calibrate chan %u/%x failed\n",
1060 __func__,
1061 ah->ah_curchan->channel,
1062 ah->ah_curchan->channelFlags));
1064 sc->sc_ani.sc_caldone = iscaldone;
1068 settimer:
1070 * Set timer interval based on previous results.
1071 * The interval must be the shortest necessary to satisfy ANI,
1072 * short calibration and long calibration.
1074 cal_interval = ATH_LONG_CALINTERVAL;
1075 if (sc->sc_ah->ah_config.enable_ani)
1076 cal_interval =
1077 min(cal_interval, (uint32_t)ATH_ANI_POLLINTERVAL);
1079 if (!sc->sc_ani.sc_caldone)
1080 cal_interval = min(cal_interval,
1081 (uint32_t)ATH_SHORT_CALINTERVAL);
1083 sc->sc_scan_timer = 0;
1084 sc->sc_scan_timer = timeout(arn_ani_calibrate, (void *)sc,
1085 drv_usectohz(cal_interval * 1000));
1088 static void
1089 arn_stop_caltimer(struct arn_softc *sc)
1091 timeout_id_t tmp_id = 0;
1093 while ((sc->sc_cal_timer != 0) && (tmp_id != sc->sc_cal_timer)) {
1094 tmp_id = sc->sc_cal_timer;
1095 (void) untimeout(tmp_id);
1097 sc->sc_cal_timer = 0;
1100 static uint_t
1101 arn_isr(caddr_t arg)
1103 /* LINTED E_BAD_PTR_CAST_ALIGN */
1104 struct arn_softc *sc = (struct arn_softc *)arg;
1105 struct ath_hal *ah = sc->sc_ah;
1106 enum ath9k_int status;
1107 ieee80211com_t *ic = (ieee80211com_t *)sc;
1109 ARN_LOCK(sc);
1111 if (sc->sc_flags & SC_OP_INVALID) {
1113 * The hardware is not ready/present, don't
1114 * touch anything. Note this can happen early
1115 * on if the IRQ is shared.
1117 ARN_UNLOCK(sc);
1118 return (DDI_INTR_UNCLAIMED);
1120 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
1121 ARN_UNLOCK(sc);
1122 return (DDI_INTR_UNCLAIMED);
1126 * Figure out the reason(s) for the interrupt. Note
1127 * that the hal returns a pseudo-ISR that may include
1128 * bits we haven't explicitly enabled so we mask the
1129 * value to insure we only process bits we requested.
1131 (void) ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
1133 status &= sc->sc_imask; /* discard unasked-for bits */
1136 * If there are no status bits set, then this interrupt was not
1137 * for me (should have been caught above).
1139 if (!status) {
1140 ARN_UNLOCK(sc);
1141 return (DDI_INTR_UNCLAIMED);
1144 sc->sc_intrstatus = status;
1146 if (status & ATH9K_INT_FATAL) {
1147 /* need a chip reset */
1148 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1149 "ATH9K_INT_FATAL\n"));
1150 goto reset;
1151 } else if (status & ATH9K_INT_RXORN) {
1152 /* need a chip reset */
1153 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1154 "ATH9K_INT_RXORN\n"));
1155 goto reset;
1156 } else {
1157 if (status & ATH9K_INT_RXEOL) {
1159 * NB: the hardware should re-read the link when
1160 * RXE bit is written, but it doesn't work
1161 * at least on older hardware revs.
1163 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1164 "ATH9K_INT_RXEOL\n"));
1165 sc->sc_rxlink = NULL;
1167 if (status & ATH9K_INT_TXURN) {
1168 /* bump tx trigger level */
1169 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1170 "ATH9K_INT_TXURN\n"));
1171 (void) ath9k_hw_updatetxtriglevel(ah, B_TRUE);
1173 /* XXX: optimize this */
1174 if (status & ATH9K_INT_RX) {
1175 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1176 "ATH9K_INT_RX\n"));
1177 sc->sc_rx_pend = 1;
1178 ddi_trigger_softintr(sc->sc_softint_id);
1180 if (status & ATH9K_INT_TX) {
1181 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1182 "ATH9K_INT_TX\n"));
1183 if (ddi_taskq_dispatch(sc->sc_tq,
1184 arn_tx_int_proc, sc, DDI_NOSLEEP) !=
1185 DDI_SUCCESS) {
1186 arn_problem("arn: arn_isr(): "
1187 "No memory for tx taskq\n");
1190 #ifdef ARN_ATH9K_INT_MIB
1191 if (status & ATH9K_INT_MIB) {
1193 * Disable interrupts until we service the MIB
1194 * interrupt; otherwise it will continue to
1195 * fire.
1197 (void) ath9k_hw_set_interrupts(ah, 0);
1199 * Let the hal handle the event. We assume
1200 * it will clear whatever condition caused
1201 * the interrupt.
1203 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
1204 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1205 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1206 "ATH9K_INT_MIB\n"));
1208 #endif
1210 #ifdef ARN_ATH9K_INT_TIM_TIMER
1211 if (status & ATH9K_INT_TIM_TIMER) {
1212 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1213 "ATH9K_INT_TIM_TIMER\n"));
1214 if (!(ah->ah_caps.hw_caps &
1215 ATH9K_HW_CAP_AUTOSLEEP)) {
1217 * Clear RxAbort bit so that we can
1218 * receive frames
1220 ath9k_hw_setrxabort(ah, 0);
1221 goto reset;
1224 #endif
1226 if (status & ATH9K_INT_BMISS) {
1227 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1228 "ATH9K_INT_BMISS\n"));
1229 #ifdef ARN_HW_BEACON_MISS_HANDLE
1230 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1231 "handle beacon mmiss by H/W mechanism\n"));
1232 if (ddi_taskq_dispatch(sc->sc_tq, arn_bmiss_proc,
1233 sc, DDI_NOSLEEP) != DDI_SUCCESS) {
1234 arn_problem("arn: arn_isr(): "
1235 "No memory available for bmiss taskq\n");
1237 #else
1238 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1239 "handle beacon mmiss by S/W mechanism\n"));
1240 #endif /* ARN_HW_BEACON_MISS_HANDLE */
1243 ARN_UNLOCK(sc);
1245 #ifdef ARN_ATH9K_INT_CST
1246 /* carrier sense timeout */
1247 if (status & ATH9K_INT_CST) {
1248 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1249 "ATH9K_INT_CST\n"));
1250 return (DDI_INTR_CLAIMED);
1252 #endif
1254 if (status & ATH9K_INT_SWBA) {
1255 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1256 "ATH9K_INT_SWBA\n"));
1257 /* This will occur only in Host-AP or Ad-Hoc mode */
1258 return (DDI_INTR_CLAIMED);
1262 return (DDI_INTR_CLAIMED);
1263 reset:
1264 ARN_DBG((ARN_DBG_INTERRUPT, "Rset for fatal err\n"));
1265 (void) arn_reset(ic);
1266 ARN_UNLOCK(sc);
1267 return (DDI_INTR_CLAIMED);
1270 static int
1271 arn_get_channel(struct arn_softc *sc, struct ieee80211_channel *chan)
1273 int i;
1275 for (i = 0; i < sc->sc_ah->ah_nchan; i++) {
1276 if (sc->sc_ah->ah_channels[i].channel == chan->ich_freq)
1277 return (i);
1280 return (-1);
1284 arn_reset(ieee80211com_t *ic)
1286 struct arn_softc *sc = (struct arn_softc *)ic;
1287 struct ath_hal *ah = sc->sc_ah;
1288 int status;
1289 int error = 0;
1291 (void) ath9k_hw_set_interrupts(ah, 0);
1292 arn_draintxq(sc, 0);
1293 (void) arn_stoprecv(sc);
1295 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan, sc->tx_chan_width,
1296 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1297 sc->sc_ht_extprotspacing, B_FALSE, &status)) {
1298 ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): "
1299 "unable to reset hardware; hal status %u\n", status));
1300 error = EIO;
1303 if (arn_startrecv(sc) != 0)
1304 ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): "
1305 "unable to start recv logic\n"));
1308 * We may be doing a reset in response to a request
1309 * that changes the channel so update any state that
1310 * might change as a result.
1312 arn_setcurmode(sc, arn_chan2mode(sc->sc_ah->ah_curchan));
1314 arn_update_txpow(sc);
1316 if (sc->sc_flags & SC_OP_BEACONS)
1317 arn_beacon_config(sc); /* restart beacons */
1319 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1321 return (error);
1325 arn_get_hal_qnum(uint16_t queue, struct arn_softc *sc)
1327 int qnum;
1329 switch (queue) {
1330 case WME_AC_VO:
1331 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1332 break;
1333 case WME_AC_VI:
1334 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1335 break;
1336 case WME_AC_BE:
1337 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1338 break;
1339 case WME_AC_BK:
1340 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1341 break;
1342 default:
1343 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1344 break;
1347 return (qnum);
1350 static struct {
1351 uint32_t version;
1352 const char *name;
1353 } ath_mac_bb_names[] = {
1354 { AR_SREV_VERSION_5416_PCI, "5416" },
1355 { AR_SREV_VERSION_5416_PCIE, "5418" },
1356 { AR_SREV_VERSION_9100, "9100" },
1357 { AR_SREV_VERSION_9160, "9160" },
1358 { AR_SREV_VERSION_9280, "9280" },
1359 { AR_SREV_VERSION_9285, "9285" }
1362 static struct {
1363 uint16_t version;
1364 const char *name;
1365 } ath_rf_names[] = {
1366 { 0, "5133" },
1367 { AR_RAD5133_SREV_MAJOR, "5133" },
1368 { AR_RAD5122_SREV_MAJOR, "5122" },
1369 { AR_RAD2133_SREV_MAJOR, "2133" },
1370 { AR_RAD2122_SREV_MAJOR, "2122" }
1374 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
1377 static const char *
1378 arn_mac_bb_name(uint32_t mac_bb_version)
1380 int i;
1382 for (i = 0; i < ARRAY_SIZE(ath_mac_bb_names); i++) {
1383 if (ath_mac_bb_names[i].version == mac_bb_version) {
1384 return (ath_mac_bb_names[i].name);
1388 return ("????");
1392 * Return the RF name. "????" is returned if the RF is unknown.
1395 static const char *
1396 arn_rf_name(uint16_t rf_version)
1398 int i;
1400 for (i = 0; i < ARRAY_SIZE(ath_rf_names); i++) {
1401 if (ath_rf_names[i].version == rf_version) {
1402 return (ath_rf_names[i].name);
1406 return ("????");
1409 static void
1410 arn_next_scan(void *arg)
1412 ieee80211com_t *ic = arg;
1413 struct arn_softc *sc = (struct arn_softc *)ic;
1415 sc->sc_scan_timer = 0;
1416 if (ic->ic_state == IEEE80211_S_SCAN) {
1417 sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc,
1418 drv_usectohz(arn_dwelltime * 1000));
1419 ieee80211_next_scan(ic);
1423 static void
1424 arn_stop_scantimer(struct arn_softc *sc)
1426 timeout_id_t tmp_id = 0;
1428 while ((sc->sc_scan_timer != 0) && (tmp_id != sc->sc_scan_timer)) {
1429 tmp_id = sc->sc_scan_timer;
1430 (void) untimeout(tmp_id);
1432 sc->sc_scan_timer = 0;
1435 static int32_t
1436 arn_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1438 struct arn_softc *sc = (struct arn_softc *)ic;
1439 struct ath_hal *ah = sc->sc_ah;
1440 struct ieee80211_node *in;
1441 int32_t i, error;
1442 uint8_t *bssid;
1443 uint32_t rfilt;
1444 enum ieee80211_state ostate;
1445 struct ath9k_channel *channel;
1446 int pos;
1448 /* Should set up & init LED here */
1450 if (sc->sc_flags & SC_OP_INVALID)
1451 return (0);
1453 ostate = ic->ic_state;
1454 ARN_DBG((ARN_DBG_INIT, "arn: arn_newstate(): "
1455 "%x -> %x!\n", ostate, nstate));
1457 ARN_LOCK(sc);
1459 if (nstate != IEEE80211_S_SCAN)
1460 arn_stop_scantimer(sc);
1461 if (nstate != IEEE80211_S_RUN)
1462 arn_stop_caltimer(sc);
1464 /* Should set LED here */
1466 if (nstate == IEEE80211_S_INIT) {
1467 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1469 * Disable interrupts.
1471 (void) ath9k_hw_set_interrupts
1472 (ah, sc->sc_imask &~ ATH9K_INT_GLOBAL);
1474 #ifdef ARN_IBSS
1475 if (ic->ic_opmode == IEEE80211_M_IBSS) {
1476 (void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq);
1477 arn_beacon_return(sc);
1479 #endif
1480 ARN_UNLOCK(sc);
1481 ieee80211_stop_watchdog(ic);
1482 goto done;
1484 in = ic->ic_bss;
1486 pos = arn_get_channel(sc, ic->ic_curchan);
1488 if (pos == -1) {
1489 ARN_DBG((ARN_DBG_FATAL, "arn: "
1490 "%s: Invalid channel\n", __func__));
1491 error = EINVAL;
1492 ARN_UNLOCK(sc);
1493 goto bad;
1496 if (in->in_htcap & IEEE80211_HTCAP_CHWIDTH40) {
1497 arn_update_chainmask(sc);
1498 sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
1499 } else
1500 sc->tx_chan_width = ATH9K_HT_MACMODE_20;
1502 sc->sc_ah->ah_channels[pos].chanmode =
1503 arn_chan2flags(ic, ic->ic_curchan);
1504 channel = &sc->sc_ah->ah_channels[pos];
1505 if (channel == NULL) {
1506 arn_problem("arn_newstate(): channel == NULL");
1507 ARN_UNLOCK(sc);
1508 goto bad;
1510 error = arn_set_channel(sc, channel);
1511 if (error != 0) {
1512 if (nstate != IEEE80211_S_SCAN) {
1513 ARN_UNLOCK(sc);
1514 ieee80211_reset_chan(ic);
1515 goto bad;
1520 * Get the receive filter according to the
1521 * operating mode and state
1523 rfilt = arn_calcrxfilter(sc);
1525 if (nstate == IEEE80211_S_SCAN)
1526 bssid = ic->ic_macaddr;
1527 else
1528 bssid = in->in_bssid;
1530 ath9k_hw_setrxfilter(ah, rfilt);
1532 if (nstate == IEEE80211_S_RUN && ic->ic_opmode != IEEE80211_M_IBSS)
1533 ath9k_hw_write_associd(ah, bssid, in->in_associd);
1534 else
1535 ath9k_hw_write_associd(ah, bssid, 0);
1537 /* Check for WLAN_CAPABILITY_PRIVACY ? */
1538 if (ic->ic_flags & IEEE80211_F_PRIVACY) {
1539 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1540 if (ath9k_hw_keyisvalid(ah, (uint16_t)i))
1541 (void) ath9k_hw_keysetmac(ah, (uint16_t)i,
1542 bssid);
1546 if (nstate == IEEE80211_S_RUN) {
1547 switch (ic->ic_opmode) {
1548 #ifdef ARN_IBSS
1549 case IEEE80211_M_IBSS:
1551 * Allocate and setup the beacon frame.
1552 * Stop any previous beacon DMA.
1554 (void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq);
1555 arn_beacon_return(sc);
1556 error = arn_beacon_alloc(sc, in);
1557 if (error != 0) {
1558 ARN_UNLOCK(sc);
1559 goto bad;
1562 * If joining an adhoc network defer beacon timer
1563 * configuration to the next beacon frame so we
1564 * have a current TSF to use. Otherwise we're
1565 * starting an ibss/bss so there's no need to delay.
1567 if (ic->ic_opmode == IEEE80211_M_IBSS &&
1568 ic->ic_bss->in_tstamp.tsf != 0) {
1569 sc->sc_bsync = 1;
1570 } else {
1571 arn_beacon_config(sc);
1573 break;
1574 #endif /* ARN_IBSS */
1575 case IEEE80211_M_STA:
1576 if (ostate != IEEE80211_S_RUN) {
1578 * Defer beacon timer configuration to the next
1579 * beacon frame so we have a current TSF to use.
1580 * Any TSF collected when scanning is likely old
1582 #ifdef ARN_IBSS
1583 sc->sc_bsync = 1;
1584 #else
1585 /* Configure the beacon and sleep timers. */
1586 arn_beacon_config(sc);
1587 /* Reset rssi stats */
1588 sc->sc_halstats.ns_avgbrssi =
1589 ATH_RSSI_DUMMY_MARKER;
1590 sc->sc_halstats.ns_avgrssi =
1591 ATH_RSSI_DUMMY_MARKER;
1592 sc->sc_halstats.ns_avgtxrssi =
1593 ATH_RSSI_DUMMY_MARKER;
1594 sc->sc_halstats.ns_avgtxrate =
1595 ATH_RATE_DUMMY_MARKER;
1596 /* end */
1598 #endif /* ARN_IBSS */
1600 break;
1601 default:
1602 break;
1604 } else {
1605 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1606 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1610 * Reset the rate control state.
1612 arn_rate_ctl_reset(sc, nstate);
1614 ARN_UNLOCK(sc);
1615 done:
1617 * Invoke the parent method to complete the work.
1619 error = sc->sc_newstate(ic, nstate, arg);
1622 * Finally, start any timers.
1624 if (nstate == IEEE80211_S_RUN) {
1625 ieee80211_start_watchdog(ic, 1);
1626 ASSERT(sc->sc_cal_timer == 0);
1627 sc->sc_cal_timer = timeout(arn_ani_calibrate, (void *)sc,
1628 drv_usectohz(100 * 1000));
1629 } else if ((nstate == IEEE80211_S_SCAN) && (ostate != nstate)) {
1630 /* start ap/neighbor scan timer */
1631 /* ASSERT(sc->sc_scan_timer == 0); */
1632 if (sc->sc_scan_timer != 0) {
1633 (void) untimeout(sc->sc_scan_timer);
1634 sc->sc_scan_timer = 0;
1636 sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc,
1637 drv_usectohz(arn_dwelltime * 1000));
1640 bad:
1641 return (error);
1644 static void
1645 arn_watchdog(void *arg)
1647 struct arn_softc *sc = arg;
1648 ieee80211com_t *ic = &sc->sc_isc;
1649 int ntimer = 0;
1651 ARN_LOCK(sc);
1652 ic->ic_watchdog_timer = 0;
1653 if (sc->sc_flags & SC_OP_INVALID) {
1654 ARN_UNLOCK(sc);
1655 return;
1658 if (ic->ic_state == IEEE80211_S_RUN) {
1660 * Start the background rate control thread if we
1661 * are not configured to use a fixed xmit rate.
1663 #ifdef ARN_LEGACY_RC
1664 if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1665 sc->sc_stats.ast_rate_calls ++;
1666 if (ic->ic_opmode == IEEE80211_M_STA)
1667 arn_rate_ctl(ic, ic->ic_bss);
1668 else
1669 ieee80211_iterate_nodes(&ic->ic_sta,
1670 arn_rate_ctl, sc);
1672 #endif /* ARN_LEGACY_RC */
1674 #ifdef ARN_HW_BEACON_MISS_HANDLE
1675 /* nothing to do here */
1676 #else
1677 /* currently set 10 seconds as beacon miss threshold */
1678 if (ic->ic_beaconmiss++ > 100) {
1679 ARN_DBG((ARN_DBG_BEACON, "arn_watchdog():"
1680 "Beacon missed for 10 seconds, run"
1681 "ieee80211_new_state(ic, IEEE80211_S_INIT, -1)\n"));
1682 ARN_UNLOCK(sc);
1683 (void) ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1684 return;
1686 #endif /* ARN_HW_BEACON_MISS_HANDLE */
1688 ntimer = 1;
1690 ARN_UNLOCK(sc);
1692 ieee80211_watchdog(ic);
1693 if (ntimer != 0)
1694 ieee80211_start_watchdog(ic, ntimer);
1697 /* ARGSUSED */
1698 static struct ieee80211_node *
1699 arn_node_alloc(ieee80211com_t *ic)
1701 struct ath_node *an;
1702 #ifdef ARN_TX_AGGREGATION
1703 struct arn_softc *sc = (struct arn_softc *)ic;
1704 #endif
1706 an = kmem_zalloc(sizeof (struct ath_node), KM_SLEEP);
1708 /* legacy rate control */
1709 #ifdef ARN_LEGACY_RC
1710 arn_rate_update(sc, &an->an_node, 0);
1711 #endif
1713 #ifdef ARN_TX_AGGREGATION
1714 if (sc->sc_flags & SC_OP_TXAGGR) {
1715 arn_tx_node_init(sc, an);
1717 #endif /* ARN_TX_AGGREGATION */
1719 an->last_rssi = ATH_RSSI_DUMMY_MARKER;
1721 return ((an != NULL) ? &an->an_node : NULL);
1724 static void
1725 arn_node_free(struct ieee80211_node *in)
1727 ieee80211com_t *ic = in->in_ic;
1728 struct arn_softc *sc = (struct arn_softc *)ic;
1729 struct ath_buf *bf;
1730 struct ath_txq *txq;
1731 int32_t i;
1733 #ifdef ARN_TX_AGGREGATION
1734 if (sc->sc_flags & SC_OP_TXAGGR)
1735 arn_tx_node_cleanup(sc, in);
1736 #endif /* TX_AGGREGATION */
1738 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1739 if (ARN_TXQ_SETUP(sc, i)) {
1740 txq = &sc->sc_txq[i];
1741 mutex_enter(&txq->axq_lock);
1742 bf = list_head(&txq->axq_list);
1743 while (bf != NULL) {
1744 if (bf->bf_in == in) {
1745 bf->bf_in = NULL;
1747 bf = list_next(&txq->axq_list, bf);
1749 mutex_exit(&txq->axq_lock);
1753 ic->ic_node_cleanup(in);
1755 if (in->in_wpa_ie != NULL)
1756 ieee80211_free(in->in_wpa_ie);
1758 if (in->in_wme_ie != NULL)
1759 ieee80211_free(in->in_wme_ie);
1761 if (in->in_htcap_ie != NULL)
1762 ieee80211_free(in->in_htcap_ie);
1764 kmem_free(in, sizeof (struct ath_node));
1768 * Allocate tx/rx key slots for TKIP. We allocate one slot for
1769 * each key. MIC is right after the decrypt/encrypt key.
1771 static uint16_t
1772 arn_key_alloc_pair(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1773 ieee80211_keyix *rxkeyix)
1775 uint16_t i, keyix;
1777 ASSERT(!sc->sc_splitmic);
1778 for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) {
1779 uint8_t b = sc->sc_keymap[i];
1780 if (b == 0xff)
1781 continue;
1782 for (keyix = i * NBBY; keyix < (i + 1) * NBBY;
1783 keyix++, b >>= 1) {
1784 if ((b & 1) || is_set(keyix+64, sc->sc_keymap)) {
1785 /* full pair unavailable */
1786 continue;
1788 set_bit(keyix, sc->sc_keymap);
1789 set_bit(keyix+64, sc->sc_keymap);
1790 ARN_DBG((ARN_DBG_KEYCACHE,
1791 "arn_key_alloc_pair(): key pair %u,%u\n",
1792 keyix, keyix+64));
1793 *txkeyix = *rxkeyix = keyix;
1794 return (1);
1797 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_pair():"
1798 " out of pair space\n"));
1800 return (0);
1804 * Allocate tx/rx key slots for TKIP. We allocate two slots for
1805 * each key, one for decrypt/encrypt and the other for the MIC.
1807 static int
1808 arn_key_alloc_2pair(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1809 ieee80211_keyix *rxkeyix)
1811 uint16_t i, keyix;
1813 ASSERT(sc->sc_splitmic);
1814 for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) {
1815 uint8_t b = sc->sc_keymap[i];
1816 if (b != 0xff) {
1818 * One or more slots in this byte are free.
1820 keyix = i*NBBY;
1821 while (b & 1) {
1822 again:
1823 keyix++;
1824 b >>= 1;
1826 /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
1827 if (is_set(keyix+32, sc->sc_keymap) ||
1828 is_set(keyix+64, sc->sc_keymap) ||
1829 is_set(keyix+32+64, sc->sc_keymap)) {
1830 /* full pair unavailable */
1831 if (keyix == (i+1)*NBBY) {
1832 /* no slots were appropriate, advance */
1833 continue;
1835 goto again;
1837 set_bit(keyix, sc->sc_keymap);
1838 set_bit(keyix+64, sc->sc_keymap);
1839 set_bit(keyix+32, sc->sc_keymap);
1840 set_bit(keyix+32+64, sc->sc_keymap);
1841 ARN_DBG((ARN_DBG_KEYCACHE,
1842 "arn_key_alloc_2pair(): key pair %u,%u %u,%u\n",
1843 keyix, keyix+64,
1844 keyix+32, keyix+32+64));
1845 *txkeyix = *rxkeyix = keyix;
1846 return (1);
1849 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_2pair(): "
1850 " out of pair space\n"));
1852 return (0);
1855 * Allocate a single key cache slot.
1857 static int
1858 arn_key_alloc_single(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1859 ieee80211_keyix *rxkeyix)
1861 uint16_t i, keyix;
1863 /* try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
1864 for (i = 0; i < ARRAY_SIZE(sc->sc_keymap); i++) {
1865 uint8_t b = sc->sc_keymap[i];
1867 if (b != 0xff) {
1869 * One or more slots are free.
1871 keyix = i*NBBY;
1872 while (b & 1)
1873 keyix++, b >>= 1;
1874 set_bit(keyix, sc->sc_keymap);
1875 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_single(): "
1876 "key %u\n", keyix));
1877 *txkeyix = *rxkeyix = keyix;
1878 return (1);
1881 return (0);
1885 * Allocate one or more key cache slots for a unicast key. The
1886 * key itself is needed only to identify the cipher. For hardware
1887 * TKIP with split cipher+MIC keys we allocate two key cache slot
1888 * pairs so that we can setup separate TX and RX MIC keys. Note
1889 * that the MIC key for a TKIP key at slot i is assumed by the
1890 * hardware to be at slot i+64. This limits TKIP keys to the first
1891 * 64 entries.
1893 /* ARGSUSED */
1895 arn_key_alloc(ieee80211com_t *ic, const struct ieee80211_key *k,
1896 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1898 struct arn_softc *sc = (struct arn_softc *)ic;
1901 * We allocate two pair for TKIP when using the h/w to do
1902 * the MIC. For everything else, including software crypto,
1903 * we allocate a single entry. Note that s/w crypto requires
1904 * a pass-through slot on the 5211 and 5212. The 5210 does
1905 * not support pass-through cache entries and we map all
1906 * those requests to slot 0.
1908 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
1909 return (arn_key_alloc_single(sc, keyix, rxkeyix));
1910 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP &&
1911 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1912 if (sc->sc_splitmic)
1913 return (arn_key_alloc_2pair(sc, keyix, rxkeyix));
1914 else
1915 return (arn_key_alloc_pair(sc, keyix, rxkeyix));
1916 } else {
1917 return (arn_key_alloc_single(sc, keyix, rxkeyix));
1922 * Delete an entry in the key cache allocated by ath_key_alloc.
1925 arn_key_delete(ieee80211com_t *ic, const struct ieee80211_key *k)
1927 struct arn_softc *sc = (struct arn_softc *)ic;
1928 struct ath_hal *ah = sc->sc_ah;
1929 const struct ieee80211_cipher *cip = k->wk_cipher;
1930 ieee80211_keyix keyix = k->wk_keyix;
1932 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_delete():"
1933 " delete key %u ic_cipher=0x%x\n", keyix, cip->ic_cipher));
1935 (void) ath9k_hw_keyreset(ah, keyix);
1937 * Handle split tx/rx keying required for TKIP with h/w MIC.
1939 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
1940 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic)
1941 (void) ath9k_hw_keyreset(ah, keyix+32); /* RX key */
1943 if (keyix >= IEEE80211_WEP_NKID) {
1945 * Don't touch keymap entries for global keys so
1946 * they are never considered for dynamic allocation.
1948 clr_bit(keyix, sc->sc_keymap);
1949 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
1950 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1952 * If splitmic is true +64 is TX key MIC,
1953 * else +64 is RX key + RX key MIC.
1955 clr_bit(keyix+64, sc->sc_keymap);
1956 if (sc->sc_splitmic) {
1957 /* Rx key */
1958 clr_bit(keyix+32, sc->sc_keymap);
1959 /* RX key MIC */
1960 clr_bit(keyix+32+64, sc->sc_keymap);
1964 return (1);
1968 * Set a TKIP key into the hardware. This handles the
1969 * potential distribution of key state to multiple key
1970 * cache slots for TKIP.
1972 static int
1973 arn_keyset_tkip(struct arn_softc *sc, const struct ieee80211_key *k,
1974 struct ath9k_keyval *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1976 uint8_t *key_rxmic = NULL;
1977 uint8_t *key_txmic = NULL;
1978 uint8_t *key = (uint8_t *)&(k->wk_key[0]);
1979 struct ath_hal *ah = sc->sc_ah;
1981 key_txmic = key + 16;
1982 key_rxmic = key + 24;
1984 if (mac == NULL) {
1985 /* Group key installation */
1986 (void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
1987 return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk,
1988 mac, B_FALSE));
1990 if (!sc->sc_splitmic) {
1992 * data key goes at first index,
1993 * the hal handles the MIC keys at index+64.
1995 (void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
1996 (void) memcpy(hk->kv_txmic, key_txmic, sizeof (hk->kv_txmic));
1997 return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk,
1998 mac, B_FALSE));
2001 * TX key goes at first index, RX key at +32.
2002 * The hal handles the MIC keys at index+64.
2004 (void) memcpy(hk->kv_mic, key_txmic, sizeof (hk->kv_mic));
2005 if (!(ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, NULL,
2006 B_FALSE))) {
2007 /* Txmic entry failed. No need to proceed further */
2008 ARN_DBG((ARN_DBG_KEYCACHE,
2009 "%s Setting TX MIC Key Failed\n", __func__));
2010 return (0);
2013 (void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
2015 /* XXX delete tx key on failure? */
2016 return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, mac, B_FALSE));
2021 arn_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
2022 const uint8_t mac[IEEE80211_ADDR_LEN])
2024 struct arn_softc *sc = (struct arn_softc *)ic;
2025 const struct ieee80211_cipher *cip = k->wk_cipher;
2026 struct ath9k_keyval hk;
2028 /* cipher table */
2029 static const uint8_t ciphermap[] = {
2030 ATH9K_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */
2031 ATH9K_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */
2032 ATH9K_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */
2033 ATH9K_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */
2034 ATH9K_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */
2035 ATH9K_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */
2038 bzero(&hk, sizeof (hk));
2041 * Software crypto uses a "clear key" so non-crypto
2042 * state kept in the key cache are maintainedd so that
2043 * rx frames have an entry to match.
2045 if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
2046 ASSERT(cip->ic_cipher < 6);
2047 hk.kv_type = ciphermap[cip->ic_cipher];
2048 hk.kv_len = k->wk_keylen;
2049 bcopy(k->wk_key, hk.kv_val, k->wk_keylen);
2050 } else {
2051 hk.kv_type = ATH9K_CIPHER_CLR;
2054 if (hk.kv_type == ATH9K_CIPHER_TKIP &&
2055 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2056 return (arn_keyset_tkip(sc, k, &hk, mac));
2057 } else {
2058 return (ath9k_hw_set_keycache_entry(sc->sc_ah,
2059 k->wk_keyix, &hk, mac, B_FALSE));
2064 * Enable/Disable short slot timing
2066 void
2067 arn_set_shortslot(ieee80211com_t *ic, int onoff)
2069 struct ath_hal *ah = ((struct arn_softc *)ic)->sc_ah;
2071 if (onoff)
2072 (void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9);
2073 else
2074 (void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_20);
2077 static int
2078 arn_open(struct arn_softc *sc)
2080 ieee80211com_t *ic = (ieee80211com_t *)sc;
2081 struct ieee80211_channel *curchan = ic->ic_curchan;
2082 struct ath9k_channel *init_channel;
2083 int error = 0, pos, status;
2085 ARN_LOCK_ASSERT(sc);
2087 pos = arn_get_channel(sc, curchan);
2088 if (pos == -1) {
2089 ARN_DBG((ARN_DBG_FATAL, "arn: "
2090 "%s: Invalid channel\n", __func__));
2091 error = EINVAL;
2092 goto error;
2095 sc->tx_chan_width = ATH9K_HT_MACMODE_20;
2097 if (sc->sc_curmode == ATH9K_MODE_11A) {
2098 sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_A;
2099 } else {
2100 sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_G;
2103 init_channel = &sc->sc_ah->ah_channels[pos];
2105 /* Reset SERDES registers */
2106 ath9k_hw_configpcipowersave(sc->sc_ah, 0);
2109 * The basic interface to setting the hardware in a good
2110 * state is ``reset''. On return the hardware is known to
2111 * be powered up and with interrupts disabled. This must
2112 * be followed by initialization of the appropriate bits
2113 * and then setup of the interrupt mask.
2115 if (!ath9k_hw_reset(sc->sc_ah, init_channel,
2116 sc->tx_chan_width, sc->sc_tx_chainmask,
2117 sc->sc_rx_chainmask, sc->sc_ht_extprotspacing,
2118 B_FALSE, &status)) {
2119 ARN_DBG((ARN_DBG_FATAL, "arn: "
2120 "%s: unable to reset hardware; hal status %u "
2121 "(freq %u flags 0x%x)\n", __func__, status,
2122 init_channel->channel, init_channel->channelFlags));
2124 error = EIO;
2125 goto error;
2129 * This is needed only to setup initial state
2130 * but it's best done after a reset.
2132 arn_update_txpow(sc);
2135 * Setup the hardware after reset:
2136 * The receive engine is set going.
2137 * Frame transmit is handled entirely
2138 * in the frame output path; there's nothing to do
2139 * here except setup the interrupt mask.
2141 if (arn_startrecv(sc) != 0) {
2142 ARN_DBG((ARN_DBG_INIT, "arn: "
2143 "%s: unable to start recv logic\n", __func__));
2144 error = EIO;
2145 goto error;
2148 /* Setup our intr mask. */
2149 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX |
2150 ATH9K_INT_RXEOL | ATH9K_INT_RXORN |
2151 ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
2152 #ifdef ARN_ATH9K_HW_CAP_GTT
2153 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
2154 sc->sc_imask |= ATH9K_INT_GTT;
2155 #endif
2157 #ifdef ARN_ATH9K_HW_CAP_GTT
2158 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
2159 sc->sc_imask |= ATH9K_INT_CST;
2160 #endif
2163 * Enable MIB interrupts when there are hardware phy counters.
2164 * Note we only do this (at the moment) for station mode.
2166 #ifdef ARN_ATH9K_INT_MIB
2167 if (ath9k_hw_phycounters(sc->sc_ah) &&
2168 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
2169 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
2170 sc->sc_imask |= ATH9K_INT_MIB;
2171 #endif
2173 * Some hardware processes the TIM IE and fires an
2174 * interrupt when the TIM bit is set. For hardware
2175 * that does, if not overridden by configuration,
2176 * enable the TIM interrupt when operating as station.
2178 #ifdef ARN_ATH9K_INT_TIM
2179 if ((sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
2180 (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
2181 !sc->sc_config.swBeaconProcess)
2182 sc->sc_imask |= ATH9K_INT_TIM;
2183 #endif
2184 if (arn_chan2mode(init_channel) != sc->sc_curmode)
2185 arn_setcurmode(sc, arn_chan2mode(init_channel));
2186 ARN_DBG((ARN_DBG_INIT, "arn: "
2187 "%s: current mode after arn_setcurmode is %d\n",
2188 __func__, sc->sc_curmode));
2190 sc->sc_isrunning = 1;
2192 /* Disable BMISS interrupt when we're not associated */
2193 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
2194 (void) ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
2196 return (0);
2198 error:
2199 return (error);
2202 static void
2203 arn_close(struct arn_softc *sc)
2205 ieee80211com_t *ic = (ieee80211com_t *)sc;
2206 struct ath_hal *ah = sc->sc_ah;
2208 ARN_LOCK_ASSERT(sc);
2210 if (!sc->sc_isrunning)
2211 return;
2214 * Shutdown the hardware and driver
2215 * Note that some of this work is not possible if the
2216 * hardware is gone (invalid).
2218 ARN_UNLOCK(sc);
2219 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2220 ieee80211_stop_watchdog(ic);
2221 ARN_LOCK(sc);
2224 * make sure h/w will not generate any interrupt
2225 * before setting the invalid flag.
2227 (void) ath9k_hw_set_interrupts(ah, 0);
2229 if (!(sc->sc_flags & SC_OP_INVALID)) {
2230 arn_draintxq(sc, 0);
2231 (void) arn_stoprecv(sc);
2232 (void) ath9k_hw_phy_disable(ah);
2233 } else {
2234 sc->sc_rxlink = NULL;
2237 sc->sc_isrunning = 0;
2241 * MAC callback functions
2243 static int
2244 arn_m_stat(void *arg, uint_t stat, uint64_t *val)
2246 struct arn_softc *sc = arg;
2247 ieee80211com_t *ic = (ieee80211com_t *)sc;
2248 struct ieee80211_node *in;
2249 struct ieee80211_rateset *rs;
2251 ARN_LOCK(sc);
2252 switch (stat) {
2253 case MAC_STAT_IFSPEED:
2254 in = ic->ic_bss;
2255 rs = &in->in_rates;
2256 *val = (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL) / 2 *
2257 1000000ull;
2258 break;
2259 case MAC_STAT_NOXMTBUF:
2260 *val = sc->sc_stats.ast_tx_nobuf +
2261 sc->sc_stats.ast_tx_nobufmgt;
2262 break;
2263 case MAC_STAT_IERRORS:
2264 *val = sc->sc_stats.ast_rx_tooshort;
2265 break;
2266 case MAC_STAT_RBYTES:
2267 *val = ic->ic_stats.is_rx_bytes;
2268 break;
2269 case MAC_STAT_IPACKETS:
2270 *val = ic->ic_stats.is_rx_frags;
2271 break;
2272 case MAC_STAT_OBYTES:
2273 *val = ic->ic_stats.is_tx_bytes;
2274 break;
2275 case MAC_STAT_OPACKETS:
2276 *val = ic->ic_stats.is_tx_frags;
2277 break;
2278 case MAC_STAT_OERRORS:
2279 case WIFI_STAT_TX_FAILED:
2280 *val = sc->sc_stats.ast_tx_fifoerr +
2281 sc->sc_stats.ast_tx_xretries +
2282 sc->sc_stats.ast_tx_discard;
2283 break;
2284 case WIFI_STAT_TX_RETRANS:
2285 *val = sc->sc_stats.ast_tx_xretries;
2286 break;
2287 case WIFI_STAT_FCS_ERRORS:
2288 *val = sc->sc_stats.ast_rx_crcerr;
2289 break;
2290 case WIFI_STAT_WEP_ERRORS:
2291 *val = sc->sc_stats.ast_rx_badcrypt;
2292 break;
2293 case WIFI_STAT_TX_FRAGS:
2294 case WIFI_STAT_MCAST_TX:
2295 case WIFI_STAT_RTS_SUCCESS:
2296 case WIFI_STAT_RTS_FAILURE:
2297 case WIFI_STAT_ACK_FAILURE:
2298 case WIFI_STAT_RX_FRAGS:
2299 case WIFI_STAT_MCAST_RX:
2300 case WIFI_STAT_RX_DUPS:
2301 ARN_UNLOCK(sc);
2302 return (ieee80211_stat(ic, stat, val));
2303 default:
2304 ARN_UNLOCK(sc);
2305 return (ENOTSUP);
2307 ARN_UNLOCK(sc);
2309 return (0);
2313 arn_m_start(void *arg)
2315 struct arn_softc *sc = arg;
2316 int err = 0;
2318 ARN_LOCK(sc);
2321 * Stop anything previously setup. This is safe
2322 * whether this is the first time through or not.
2325 arn_close(sc);
2327 if ((err = arn_open(sc)) != 0) {
2328 ARN_UNLOCK(sc);
2329 return (err);
2332 /* H/W is reday now */
2333 sc->sc_flags &= ~SC_OP_INVALID;
2335 ARN_UNLOCK(sc);
2337 return (0);
2340 static void
2341 arn_m_stop(void *arg)
2343 struct arn_softc *sc = arg;
2345 ARN_LOCK(sc);
2346 arn_close(sc);
2348 /* disable HAL and put h/w to sleep */
2349 (void) ath9k_hw_disable(sc->sc_ah);
2350 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
2352 /* XXX: hardware will not be ready in suspend state */
2353 sc->sc_flags |= SC_OP_INVALID;
2354 ARN_UNLOCK(sc);
2357 static int
2358 arn_m_promisc(void *arg, boolean_t on)
2360 struct arn_softc *sc = arg;
2361 struct ath_hal *ah = sc->sc_ah;
2362 uint32_t rfilt;
2364 ARN_LOCK(sc);
2366 rfilt = ath9k_hw_getrxfilter(ah);
2367 if (on)
2368 rfilt |= ATH9K_RX_FILTER_PROM;
2369 else
2370 rfilt &= ~ATH9K_RX_FILTER_PROM;
2371 sc->sc_promisc = on;
2372 ath9k_hw_setrxfilter(ah, rfilt);
2374 ARN_UNLOCK(sc);
2376 return (0);
2379 static int
2380 arn_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2382 struct arn_softc *sc = arg;
2383 struct ath_hal *ah = sc->sc_ah;
2384 uint32_t val, index, bit;
2385 uint8_t pos;
2386 uint32_t *mfilt = sc->sc_mcast_hash;
2388 ARN_LOCK(sc);
2390 /* calculate XOR of eight 6bit values */
2391 val = ARN_LE_READ_32(mca + 0);
2392 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2393 val = ARN_LE_READ_32(mca + 3);
2394 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2395 pos &= 0x3f;
2396 index = pos / 32;
2397 bit = 1 << (pos % 32);
2399 if (add) { /* enable multicast */
2400 sc->sc_mcast_refs[pos]++;
2401 mfilt[index] |= bit;
2402 } else { /* disable multicast */
2403 if (--sc->sc_mcast_refs[pos] == 0)
2404 mfilt[index] &= ~bit;
2406 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
2408 ARN_UNLOCK(sc);
2409 return (0);
2412 static int
2413 arn_m_unicst(void *arg, const uint8_t *macaddr)
2415 struct arn_softc *sc = arg;
2416 struct ath_hal *ah = sc->sc_ah;
2417 ieee80211com_t *ic = (ieee80211com_t *)sc;
2419 ARN_DBG((ARN_DBG_XMIT, "ath: ath_gld_saddr(): "
2420 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
2421 macaddr[0], macaddr[1], macaddr[2],
2422 macaddr[3], macaddr[4], macaddr[5]));
2424 ARN_LOCK(sc);
2425 IEEE80211_ADDR_COPY(sc->sc_isc.ic_macaddr, macaddr);
2426 (void) ath9k_hw_setmac(ah, sc->sc_isc.ic_macaddr);
2427 (void) arn_reset(ic);
2428 ARN_UNLOCK(sc);
2429 return (0);
2432 static mblk_t *
2433 arn_m_tx(void *arg, mblk_t *mp)
2435 struct arn_softc *sc = arg;
2436 int error = 0;
2437 mblk_t *next;
2438 ieee80211com_t *ic = (ieee80211com_t *)sc;
2441 * No data frames go out unless we're associated; this
2442 * should not happen as the 802.11 layer does not enable
2443 * the xmit queue until we enter the RUN state.
2445 if (ic->ic_state != IEEE80211_S_RUN) {
2446 ARN_DBG((ARN_DBG_XMIT, "arn: arn_m_tx(): "
2447 "discard, state %u\n", ic->ic_state));
2448 sc->sc_stats.ast_tx_discard++;
2449 freemsgchain(mp);
2450 return (NULL);
2453 while (mp != NULL) {
2454 next = mp->b_next;
2455 mp->b_next = NULL;
2456 error = arn_tx(ic, mp, IEEE80211_FC0_TYPE_DATA);
2457 if (error != 0) {
2458 mp->b_next = next;
2459 if (error == ENOMEM) {
2460 break;
2461 } else {
2462 freemsgchain(mp);
2463 return (NULL);
2466 mp = next;
2469 return (mp);
2472 static void
2473 arn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2475 struct arn_softc *sc = arg;
2476 int32_t err;
2478 err = ieee80211_ioctl(&sc->sc_isc, wq, mp);
2480 ARN_LOCK(sc);
2481 if (err == ENETRESET) {
2482 if (!(sc->sc_flags & SC_OP_INVALID)) {
2483 ARN_UNLOCK(sc);
2485 (void) arn_m_start(sc);
2487 (void) ieee80211_new_state(&sc->sc_isc,
2488 IEEE80211_S_SCAN, -1);
2489 ARN_LOCK(sc);
2492 ARN_UNLOCK(sc);
2495 static int
2496 arn_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2497 uint_t wldp_length, const void *wldp_buf)
2499 struct arn_softc *sc = arg;
2500 int err;
2502 err = ieee80211_setprop(&sc->sc_isc, pr_name, wldp_pr_num,
2503 wldp_length, wldp_buf);
2505 ARN_LOCK(sc);
2507 if (err == ENETRESET) {
2508 if (!(sc->sc_flags & SC_OP_INVALID)) {
2509 ARN_UNLOCK(sc);
2510 (void) arn_m_start(sc);
2511 (void) ieee80211_new_state(&sc->sc_isc,
2512 IEEE80211_S_SCAN, -1);
2513 ARN_LOCK(sc);
2515 err = 0;
2518 ARN_UNLOCK(sc);
2520 return (err);
2523 /* ARGSUSED */
2524 static int
2525 arn_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2526 uint_t wldp_length, void *wldp_buf)
2528 struct arn_softc *sc = arg;
2529 int err = 0;
2531 err = ieee80211_getprop(&sc->sc_isc, pr_name, wldp_pr_num,
2532 wldp_length, wldp_buf);
2534 return (err);
2537 static void
2538 arn_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2539 mac_prop_info_handle_t prh)
2541 struct arn_softc *sc = arg;
2543 ieee80211_propinfo(&sc->sc_isc, pr_name, wldp_pr_num, prh);
2546 /* return bus cachesize in 4B word units */
2547 static void
2548 arn_pci_config_cachesize(struct arn_softc *sc)
2550 uint8_t csz;
2553 * Cache line size is used to size and align various
2554 * structures used to communicate with the hardware.
2556 csz = pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ);
2557 if (csz == 0) {
2559 * We must have this setup properly for rx buffer
2560 * DMA to work so force a reasonable value here if it
2561 * comes up zero.
2563 csz = ATH_DEF_CACHE_BYTES / sizeof (uint32_t);
2564 pci_config_put8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ,
2565 csz);
2567 sc->sc_cachelsz = csz << 2;
2570 static int
2571 arn_pci_setup(struct arn_softc *sc)
2573 uint16_t command;
2576 * Enable memory mapping and bus mastering
2578 ASSERT(sc != NULL);
2579 command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM);
2580 command |= PCI_COMM_MAE | PCI_COMM_ME;
2581 pci_config_put16(sc->sc_cfg_handle, PCI_CONF_COMM, command);
2582 command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM);
2583 if ((command & PCI_COMM_MAE) == 0) {
2584 arn_problem("arn: arn_pci_setup(): "
2585 "failed to enable memory mapping\n");
2586 return (EIO);
2588 if ((command & PCI_COMM_ME) == 0) {
2589 arn_problem("arn: arn_pci_setup(): "
2590 "failed to enable bus mastering\n");
2591 return (EIO);
2593 ARN_DBG((ARN_DBG_INIT, "arn: arn_pci_setup(): "
2594 "set command reg to 0x%x \n", command));
2596 return (0);
2599 static void
2600 arn_get_hw_encap(struct arn_softc *sc)
2602 ieee80211com_t *ic;
2603 struct ath_hal *ah;
2605 ic = (ieee80211com_t *)sc;
2606 ah = sc->sc_ah;
2608 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2609 ATH9K_CIPHER_AES_CCM, NULL))
2610 ic->ic_caps |= IEEE80211_C_AES_CCM;
2611 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2612 ATH9K_CIPHER_AES_OCB, NULL))
2613 ic->ic_caps |= IEEE80211_C_AES;
2614 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2615 ATH9K_CIPHER_TKIP, NULL))
2616 ic->ic_caps |= IEEE80211_C_TKIP;
2617 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2618 ATH9K_CIPHER_WEP, NULL))
2619 ic->ic_caps |= IEEE80211_C_WEP;
2620 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2621 ATH9K_CIPHER_MIC, NULL))
2622 ic->ic_caps |= IEEE80211_C_TKIPMIC;
2625 static void
2626 arn_setup_ht_cap(struct arn_softc *sc)
2628 #define ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
2629 #define ATH9K_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
2631 /* LINTED E_FUNC_SET_NOT_USED */
2632 uint8_t tx_streams;
2633 uint8_t rx_streams;
2635 arn_ht_conf *ht_info = &sc->sc_ht_conf;
2637 ht_info->ht_supported = B_TRUE;
2639 /* Todo: IEEE80211_HTCAP_SMPS */
2640 ht_info->cap = IEEE80211_HTCAP_CHWIDTH40|
2641 IEEE80211_HTCAP_SHORTGI40 |
2642 IEEE80211_HTCAP_DSSSCCK40;
2644 ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536;
2645 ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8;
2647 /* set up supported mcs set */
2648 (void) memset(&ht_info->rx_mcs_mask, 0, sizeof (ht_info->rx_mcs_mask));
2649 tx_streams = ISP2(sc->sc_ah->ah_caps.tx_chainmask) ? 1 : 2;
2650 rx_streams = ISP2(sc->sc_ah->ah_caps.rx_chainmask) ? 1 : 2;
2652 ht_info->rx_mcs_mask[0] = 0xff;
2653 if (rx_streams >= 2)
2654 ht_info->rx_mcs_mask[1] = 0xff;
2657 /* xxx should be used for ht rate set negotiating ? */
2658 static void
2659 arn_overwrite_11n_rateset(struct arn_softc *sc)
2661 uint8_t *ht_rs = sc->sc_ht_conf.rx_mcs_mask;
2662 int mcs_idx, mcs_count = 0;
2663 int i, j;
2665 (void) memset(&ieee80211_rateset_11n, 0,
2666 sizeof (ieee80211_rateset_11n));
2667 for (i = 0; i < 10; i++) {
2668 for (j = 0; j < 8; j++) {
2669 if (ht_rs[i] & (1 << j)) {
2670 mcs_idx = i * 8 + j;
2671 if (mcs_idx >= IEEE80211_HTRATE_MAXSIZE) {
2672 break;
2675 ieee80211_rateset_11n.rs_rates[mcs_idx] =
2676 (uint8_t)mcs_idx;
2677 mcs_count++;
2682 ieee80211_rateset_11n.rs_nrates = (uint8_t)mcs_count;
2684 ARN_DBG((ARN_DBG_RATE, "arn_overwrite_11n_rateset(): "
2685 "MCS rate set supported by this station is as follows:\n"));
2687 for (i = 0; i < ieee80211_rateset_11n.rs_nrates; i++) {
2688 ARN_DBG((ARN_DBG_RATE, "MCS rate %d is %d\n",
2689 i, ieee80211_rateset_11n.rs_rates[i]));
2695 * Update WME parameters for a transmit queue.
2697 static int
2698 arn_tx_queue_update(struct arn_softc *sc, int ac)
2700 #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
2701 #define ATH_TXOP_TO_US(v) (v<<5)
2702 ieee80211com_t *ic = (ieee80211com_t *)sc;
2703 struct ath_txq *txq;
2704 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2705 struct ath_hal *ah = sc->sc_ah;
2706 struct ath9k_tx_queue_info qi;
2708 txq = &sc->sc_txq[arn_get_hal_qnum(ac, sc)];
2709 (void) ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi);
2712 * TXQ_FLAG_TXOKINT_ENABLE = 0x0001
2713 * TXQ_FLAG_TXERRINT_ENABLE = 0x0001
2714 * TXQ_FLAG_TXDESCINT_ENABLE = 0x0002
2715 * TXQ_FLAG_TXEOLINT_ENABLE = 0x0004
2716 * TXQ_FLAG_TXURNINT_ENABLE = 0x0008
2717 * TXQ_FLAG_BACKOFF_DISABLE = 0x0010
2718 * TXQ_FLAG_COMPRESSION_ENABLE = 0x0020
2719 * TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040
2720 * TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080
2723 /* xxx should update these flags here? */
2724 #if 0
2725 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
2726 TXQ_FLAG_TXERRINT_ENABLE |
2727 TXQ_FLAG_TXDESCINT_ENABLE |
2728 TXQ_FLAG_TXURNINT_ENABLE;
2729 #endif
2731 qi.tqi_aifs = wmep->wmep_aifsn;
2732 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2733 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2734 qi.tqi_readyTime = 0;
2735 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
2737 ARN_DBG((ARN_DBG_INIT,
2738 "%s:"
2739 "Q%u"
2740 "qflags 0x%x"
2741 "aifs %u"
2742 "cwmin %u"
2743 "cwmax %u"
2744 "burstTime %u\n",
2745 __func__,
2746 txq->axq_qnum,
2747 qi.tqi_qflags,
2748 qi.tqi_aifs,
2749 qi.tqi_cwmin,
2750 qi.tqi_cwmax,
2751 qi.tqi_burstTime));
2753 if (!ath9k_hw_set_txq_props(ah, txq->axq_qnum, &qi)) {
2754 arn_problem("unable to update hardware queue "
2755 "parameters for %s traffic!\n",
2756 ieee80211_wme_acnames[ac]);
2757 return (0);
2758 } else {
2759 /* push to H/W */
2760 (void) ath9k_hw_resettxqueue(ah, txq->axq_qnum);
2761 return (1);
2764 #undef ATH_TXOP_TO_US
2765 #undef ATH_EXPONENT_TO_VALUE
2768 /* Update WME parameters */
2769 static int
2770 arn_wme_update(ieee80211com_t *ic)
2772 struct arn_softc *sc = (struct arn_softc *)ic;
2774 /* updateing */
2775 return (!arn_tx_queue_update(sc, WME_AC_BE) ||
2776 !arn_tx_queue_update(sc, WME_AC_BK) ||
2777 !arn_tx_queue_update(sc, WME_AC_VI) ||
2778 !arn_tx_queue_update(sc, WME_AC_VO) ? EIO : 0);
2782 * Update tx/rx chainmask. For legacy association,
2783 * hard code chainmask to 1x1, for 11n association, use
2784 * the chainmask configuration.
2786 void
2787 arn_update_chainmask(struct arn_softc *sc)
2789 boolean_t is_ht = B_FALSE;
2790 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
2792 is_ht = sc->sc_ht_conf.ht_supported;
2793 if (is_ht) {
2794 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
2795 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
2796 } else {
2797 sc->sc_tx_chainmask = 1;
2798 sc->sc_rx_chainmask = 1;
2801 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2802 "tx_chainmask = %d, rx_chainmask = %d\n",
2803 sc->sc_tx_chainmask, sc->sc_rx_chainmask));
2806 static int
2807 arn_resume(dev_info_t *devinfo)
2809 struct arn_softc *sc;
2810 int ret = DDI_SUCCESS;
2812 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
2813 if (sc == NULL) {
2814 ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): "
2815 "failed to get soft state\n"));
2816 return (DDI_FAILURE);
2819 ARN_LOCK(sc);
2821 * Set up config space command register(s). Refuse
2822 * to resume on failure.
2824 if (arn_pci_setup(sc) != 0) {
2825 ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): "
2826 "ath_pci_setup() failed\n"));
2827 ARN_UNLOCK(sc);
2828 return (DDI_FAILURE);
2831 if (!(sc->sc_flags & SC_OP_INVALID))
2832 ret = arn_open(sc);
2833 ARN_UNLOCK(sc);
2835 return (ret);
2838 static int
2839 arn_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
2841 struct arn_softc *sc;
2842 int instance;
2843 int status;
2844 int32_t err;
2845 uint16_t vendor_id;
2846 uint16_t device_id;
2847 uint32_t i;
2848 uint32_t val;
2849 char strbuf[32];
2850 ieee80211com_t *ic;
2851 struct ath_hal *ah;
2852 wifi_data_t wd = { 0 };
2853 mac_register_t *macp;
2855 switch (cmd) {
2856 case DDI_ATTACH:
2857 break;
2858 case DDI_RESUME:
2859 return (arn_resume(devinfo));
2860 default:
2861 return (DDI_FAILURE);
2864 instance = ddi_get_instance(devinfo);
2865 if (ddi_soft_state_zalloc(arn_soft_state_p, instance) != DDI_SUCCESS) {
2866 ARN_DBG((ARN_DBG_ATTACH, "arn: "
2867 "%s: Unable to alloc softstate\n", __func__));
2868 return (DDI_FAILURE);
2871 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
2872 ic = (ieee80211com_t *)sc;
2873 sc->sc_dev = devinfo;
2875 mutex_init(&sc->sc_genlock, NULL, MUTEX_DRIVER, NULL);
2876 mutex_init(&sc->sc_serial_rw, NULL, MUTEX_DRIVER, NULL);
2877 mutex_init(&sc->sc_txbuflock, NULL, MUTEX_DRIVER, NULL);
2878 mutex_init(&sc->sc_rxbuflock, NULL, MUTEX_DRIVER, NULL);
2879 mutex_init(&sc->sc_resched_lock, NULL, MUTEX_DRIVER, NULL);
2880 #ifdef ARN_IBSS
2881 mutex_init(&sc->sc_bcbuflock, NULL, MUTEX_DRIVER, NULL);
2882 #endif
2884 sc->sc_flags |= SC_OP_INVALID;
2886 err = pci_config_setup(devinfo, &sc->sc_cfg_handle);
2887 if (err != DDI_SUCCESS) {
2888 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2889 "pci_config_setup() failed"));
2890 goto attach_fail0;
2893 if (arn_pci_setup(sc) != 0)
2894 goto attach_fail1;
2896 /* Cache line size set up */
2897 arn_pci_config_cachesize(sc);
2899 vendor_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_VENID);
2900 device_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_DEVID);
2901 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): vendor 0x%x, "
2902 "device id 0x%x, cache size %d\n",
2903 vendor_id, device_id,
2904 pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ)));
2906 pci_config_put8(sc->sc_cfg_handle, PCI_CONF_LATENCY_TIMER, 0xa8);
2907 val = pci_config_get32(sc->sc_cfg_handle, 0x40);
2908 if ((val & 0x0000ff00) != 0)
2909 pci_config_put32(sc->sc_cfg_handle, 0x40, val & 0xffff00ff);
2911 err = ddi_regs_map_setup(devinfo, 1,
2912 &sc->mem, 0, 0, &arn_reg_accattr, &sc->sc_io_handle);
2913 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2914 "regs map1 = %x err=%d\n", sc->mem, err));
2915 if (err != DDI_SUCCESS) {
2916 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2917 "ddi_regs_map_setup() failed"));
2918 goto attach_fail1;
2921 ah = ath9k_hw_attach(device_id, sc, sc->mem, &status);
2922 if (ah == NULL) {
2923 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2924 "unable to attach hw: H/W status %u\n",
2925 status));
2926 goto attach_fail2;
2928 sc->sc_ah = ah;
2930 ath9k_hw_getmac(ah, ic->ic_macaddr);
2932 /* Get the hardware key cache size. */
2933 sc->sc_keymax = ah->ah_caps.keycache_size;
2934 if (sc->sc_keymax > ATH_KEYMAX) {
2935 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2936 "Warning, using only %u entries in %u key cache\n",
2937 ATH_KEYMAX, sc->sc_keymax));
2938 sc->sc_keymax = ATH_KEYMAX;
2942 * Reset the key cache since some parts do not
2943 * reset the contents on initial power up.
2945 for (i = 0; i < sc->sc_keymax; i++)
2946 (void) ath9k_hw_keyreset(ah, (uint16_t)i);
2948 * Mark key cache slots associated with global keys
2949 * as in use. If we knew TKIP was not to be used we
2950 * could leave the +32, +64, and +32+64 slots free.
2951 * XXX only for splitmic.
2953 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
2954 set_bit(i, sc->sc_keymap);
2955 set_bit(i + 32, sc->sc_keymap);
2956 set_bit(i + 64, sc->sc_keymap);
2957 set_bit(i + 32 + 64, sc->sc_keymap);
2960 /* Collect the channel list using the default country code */
2961 err = arn_setup_channels(sc);
2962 if (err == EINVAL) {
2963 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2964 "ERR:arn_setup_channels\n"));
2965 goto attach_fail3;
2968 /* default to STA mode */
2969 sc->sc_ah->ah_opmode = ATH9K_M_STA;
2971 /* Setup rate tables */
2972 arn_rate_attach(sc);
2973 arn_setup_rates(sc, IEEE80211_MODE_11A);
2974 arn_setup_rates(sc, IEEE80211_MODE_11B);
2975 arn_setup_rates(sc, IEEE80211_MODE_11G);
2977 /* Setup current mode here */
2978 arn_setcurmode(sc, ATH9K_MODE_11G);
2980 /* 802.11g features */
2981 if (sc->sc_have11g)
2982 ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
2983 IEEE80211_C_SHSLOT; /* short slot time */
2985 /* Temp workaround */
2986 sc->sc_mrretry = 1;
2987 sc->sc_config.ath_aggr_prot = 0;
2989 /* Setup tx/rx descriptors */
2990 err = arn_desc_alloc(devinfo, sc);
2991 if (err != DDI_SUCCESS) {
2992 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2993 "failed to allocate descriptors: %d\n", err));
2994 goto attach_fail3;
2997 if ((sc->sc_tq = ddi_taskq_create(devinfo, "ath_taskq", 1,
2998 TASKQ_DEFAULTPRI, 0)) == NULL) {
2999 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3000 "ERR:ddi_taskq_create\n"));
3001 goto attach_fail4;
3005 * Allocate hardware transmit queues: one queue for
3006 * beacon frames and one data queue for each QoS
3007 * priority. Note that the hal handles reseting
3008 * these queues at the needed time.
3010 #ifdef ARN_IBSS
3011 sc->sc_beaconq = arn_beaconq_setup(ah);
3012 if (sc->sc_beaconq == (-1)) {
3013 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3014 "unable to setup a beacon xmit queue\n"));
3015 goto attach_fail4;
3017 #endif
3018 #ifdef ARN_HOSTAP
3019 sc->sc_cabq = arn_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
3020 if (sc->sc_cabq == NULL) {
3021 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3022 "unable to setup CAB xmit queue\n"));
3023 goto attach_fail4;
3026 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
3027 ath_cabq_update(sc);
3028 #endif
3030 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
3031 sc->sc_haltype2q[i] = -1;
3033 /* Setup data queues */
3034 /* NB: ensure BK queue is the lowest priority h/w queue */
3035 if (!arn_tx_setup(sc, ATH9K_WME_AC_BK)) {
3036 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3037 "unable to setup xmit queue for BK traffic\n"));
3038 goto attach_fail4;
3040 if (!arn_tx_setup(sc, ATH9K_WME_AC_BE)) {
3041 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3042 "unable to setup xmit queue for BE traffic\n"));
3043 goto attach_fail4;
3045 if (!arn_tx_setup(sc, ATH9K_WME_AC_VI)) {
3046 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3047 "unable to setup xmit queue for VI traffic\n"));
3048 goto attach_fail4;
3050 if (!arn_tx_setup(sc, ATH9K_WME_AC_VO)) {
3051 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3052 "unable to setup xmit queue for VO traffic\n"));
3053 goto attach_fail4;
3057 * Initializes the noise floor to a reasonable default value.
3058 * Later on this will be updated during ANI processing.
3061 sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
3064 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3065 ATH9K_CIPHER_TKIP, NULL)) {
3067 * Whether we should enable h/w TKIP MIC.
3068 * XXX: if we don't support WME TKIP MIC, then we wouldn't
3069 * report WMM capable, so it's always safe to turn on
3070 * TKIP MIC in this case.
3072 (void) ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
3073 0, 1, NULL);
3076 /* Get cipher releated capability information */
3077 arn_get_hw_encap(sc);
3080 * Check whether the separate key cache entries
3081 * are required to handle both tx+rx MIC keys.
3082 * With split mic keys the number of stations is limited
3083 * to 27 otherwise 59.
3085 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3086 ATH9K_CIPHER_TKIP, NULL) &&
3087 ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3088 ATH9K_CIPHER_MIC, NULL) &&
3089 ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
3090 0, NULL))
3091 sc->sc_splitmic = 1;
3093 /* turn on mcast key search if possible */
3094 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
3095 (void) ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
3096 1, NULL);
3098 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
3099 sc->sc_config.txpowlimit_override = 0;
3101 /* 11n Capabilities */
3102 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
3103 sc->sc_flags |= SC_OP_TXAGGR;
3104 sc->sc_flags |= SC_OP_RXAGGR;
3105 arn_setup_ht_cap(sc);
3106 arn_overwrite_11n_rateset(sc);
3109 sc->sc_tx_chainmask = 1;
3110 sc->sc_rx_chainmask = 1;
3111 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3112 "tx_chainmask = %d, rx_chainmask = %d\n",
3113 sc->sc_tx_chainmask, sc->sc_rx_chainmask));
3115 /* arn_update_chainmask(sc); */
3117 (void) ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, B_TRUE, NULL);
3118 sc->sc_defant = ath9k_hw_getdefantenna(ah);
3120 ath9k_hw_getmac(ah, sc->sc_myaddr);
3121 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
3122 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
3123 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
3124 (void) ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
3127 /* set default value to short slot time */
3128 sc->sc_slottime = ATH9K_SLOT_TIME_9;
3129 (void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9);
3131 /* initialize beacon slots */
3132 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
3133 sc->sc_bslot[i] = ATH_IF_ID_ANY;
3135 /* Save MISC configurations */
3136 sc->sc_config.swBeaconProcess = 1;
3138 /* Support QoS/WME */
3139 ic->ic_caps |= IEEE80211_C_WME;
3140 ic->ic_wme.wme_update = arn_wme_update;
3142 /* Support 802.11n/HT */
3143 if (sc->sc_ht_conf.ht_supported) {
3144 ic->ic_htcaps =
3145 IEEE80211_HTCAP_CHWIDTH40 |
3146 IEEE80211_HTCAP_SHORTGI40 |
3147 IEEE80211_HTCAP_DSSSCCK40 |
3148 IEEE80211_HTCAP_MAXAMSDU_7935 |
3149 IEEE80211_HTC_HT |
3150 IEEE80211_HTC_AMSDU |
3151 IEEE80211_HTCAP_RXSTBC_2STREAM;
3153 #ifdef ARN_TX_AGGREGATION
3154 ic->ic_htcaps |= IEEE80211_HTC_AMPDU;
3155 #endif
3158 /* Header padding requested by driver */
3159 ic->ic_flags |= IEEE80211_F_DATAPAD;
3160 /* Support WPA/WPA2 */
3161 ic->ic_caps |= IEEE80211_C_WPA;
3162 #if 0
3163 ic->ic_caps |= IEEE80211_C_TXFRAG; /* handle tx frags */
3164 ic->ic_caps |= IEEE80211_C_BGSCAN; /* capable of bg scanning */
3165 #endif
3166 ic->ic_phytype = IEEE80211_T_HT;
3167 ic->ic_opmode = IEEE80211_M_STA;
3168 ic->ic_state = IEEE80211_S_INIT;
3169 ic->ic_maxrssi = ARN_MAX_RSSI;
3170 ic->ic_set_shortslot = arn_set_shortslot;
3171 ic->ic_xmit = arn_tx;
3172 ieee80211_attach(ic);
3174 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3175 "ic->ic_curchan->ich_freq: %d\n", ic->ic_curchan->ich_freq));
3177 /* different instance has different WPA door */
3178 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
3179 ddi_driver_name(devinfo),
3180 ddi_get_instance(devinfo));
3182 if (sc->sc_ht_conf.ht_supported) {
3183 sc->sc_recv_action = ic->ic_recv_action;
3184 ic->ic_recv_action = arn_ampdu_recv_action;
3185 // sc->sc_send_action = ic->ic_send_action;
3186 // ic->ic_send_action = arn_ampdu_send_action;
3188 ic->ic_ampdu_rxmax = sc->sc_ht_conf.ampdu_factor;
3189 ic->ic_ampdu_density = sc->sc_ht_conf.ampdu_density;
3190 ic->ic_ampdu_limit = ic->ic_ampdu_rxmax;
3193 /* Override 80211 default routines */
3194 sc->sc_newstate = ic->ic_newstate;
3195 ic->ic_newstate = arn_newstate;
3196 #ifdef ARN_IBSS
3197 sc->sc_recv_mgmt = ic->ic_recv_mgmt;
3198 ic->ic_recv_mgmt = arn_recv_mgmt;
3199 #endif
3200 ic->ic_watchdog = arn_watchdog;
3201 ic->ic_node_alloc = arn_node_alloc;
3202 ic->ic_node_free = arn_node_free;
3203 ic->ic_crypto.cs_key_alloc = arn_key_alloc;
3204 ic->ic_crypto.cs_key_delete = arn_key_delete;
3205 ic->ic_crypto.cs_key_set = arn_key_set;
3207 ieee80211_media_init(ic);
3210 * initialize default tx key
3212 ic->ic_def_txkey = 0;
3214 sc->sc_rx_pend = 0;
3215 (void) ath9k_hw_set_interrupts(sc->sc_ah, 0);
3216 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW,
3217 &sc->sc_softint_id, NULL, 0, arn_softint_handler, (caddr_t)sc);
3218 if (err != DDI_SUCCESS) {
3219 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3220 "ddi_add_softintr() failed....\n"));
3221 goto attach_fail5;
3224 if (ddi_get_iblock_cookie(devinfo, 0, &sc->sc_iblock)
3225 != DDI_SUCCESS) {
3226 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3227 "Can not get iblock cookie for INT\n"));
3228 goto attach_fail6;
3231 if (ddi_add_intr(devinfo, 0, NULL, NULL, arn_isr,
3232 (caddr_t)sc) != DDI_SUCCESS) {
3233 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3234 "Can not set intr for ARN driver\n"));
3235 goto attach_fail6;
3239 * Provide initial settings for the WiFi plugin; whenever this
3240 * information changes, we need to call mac_plugindata_update()
3242 wd.wd_opmode = ic->ic_opmode;
3243 wd.wd_secalloc = WIFI_SEC_NONE;
3244 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid);
3246 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3247 "IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid)"
3248 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
3249 wd.wd_bssid[0], wd.wd_bssid[1], wd.wd_bssid[2],
3250 wd.wd_bssid[3], wd.wd_bssid[4], wd.wd_bssid[5]));
3252 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
3253 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3254 "MAC version mismatch\n"));
3255 goto attach_fail7;
3258 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI;
3259 macp->m_driver = sc;
3260 macp->m_dip = devinfo;
3261 macp->m_src_addr = ic->ic_macaddr;
3262 macp->m_callbacks = &arn_m_callbacks;
3263 macp->m_min_sdu = 0;
3264 macp->m_max_sdu = IEEE80211_MTU;
3265 macp->m_pdata = &wd;
3266 macp->m_pdata_size = sizeof (wd);
3268 err = mac_register(macp, &ic->ic_mach);
3269 mac_free(macp);
3270 if (err != 0) {
3271 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3272 "mac_register err %x\n", err));
3273 goto attach_fail7;
3276 /* Create minor node of type DDI_NT_NET_WIFI */
3277 (void) snprintf(strbuf, sizeof (strbuf), "%s%d",
3278 ARN_NODENAME, instance);
3279 err = ddi_create_minor_node(devinfo, strbuf, S_IFCHR,
3280 instance + 1, DDI_NT_NET_WIFI, 0);
3281 if (err != DDI_SUCCESS)
3282 ARN_DBG((ARN_DBG_ATTACH, "WARN: arn: arn_attach(): "
3283 "Create minor node failed - %d\n", err));
3285 /* Notify link is down now */
3286 mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
3288 sc->sc_promisc = B_FALSE;
3289 bzero(sc->sc_mcast_refs, sizeof (sc->sc_mcast_refs));
3290 bzero(sc->sc_mcast_hash, sizeof (sc->sc_mcast_hash));
3292 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3293 "Atheros AR%s MAC/BB Rev:%x "
3294 "AR%s RF Rev:%x: mem=0x%lx\n",
3295 arn_mac_bb_name(ah->ah_macVersion),
3296 ah->ah_macRev,
3297 arn_rf_name((ah->ah_analog5GhzRev & AR_RADIO_SREV_MAJOR)),
3298 ah->ah_phyRev,
3299 (unsigned long)sc->mem));
3301 /* XXX: hardware will not be ready until arn_open() being called */
3302 sc->sc_flags |= SC_OP_INVALID;
3303 sc->sc_isrunning = 0;
3305 return (DDI_SUCCESS);
3307 attach_fail7:
3308 ddi_remove_intr(devinfo, 0, sc->sc_iblock);
3309 attach_fail6:
3310 ddi_remove_softintr(sc->sc_softint_id);
3311 attach_fail5:
3312 (void) ieee80211_detach(ic);
3313 attach_fail4:
3314 arn_desc_free(sc);
3315 if (sc->sc_tq)
3316 ddi_taskq_destroy(sc->sc_tq);
3317 attach_fail3:
3318 ath9k_hw_detach(ah);
3319 attach_fail2:
3320 ddi_regs_map_free(&sc->sc_io_handle);
3321 attach_fail1:
3322 pci_config_teardown(&sc->sc_cfg_handle);
3323 attach_fail0:
3324 sc->sc_flags |= SC_OP_INVALID;
3325 /* cleanup tx queues */
3326 mutex_destroy(&sc->sc_txbuflock);
3327 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3328 if (ARN_TXQ_SETUP(sc, i)) {
3329 /* arn_tx_cleanupq(asc, &asc->sc_txq[i]); */
3330 mutex_destroy(&((&sc->sc_txq[i])->axq_lock));
3333 mutex_destroy(&sc->sc_rxbuflock);
3334 mutex_destroy(&sc->sc_serial_rw);
3335 mutex_destroy(&sc->sc_genlock);
3336 mutex_destroy(&sc->sc_resched_lock);
3337 #ifdef ARN_IBSS
3338 mutex_destroy(&sc->sc_bcbuflock);
3339 #endif
3341 ddi_soft_state_free(arn_soft_state_p, instance);
3343 return (DDI_FAILURE);
3348 * Suspend transmit/receive for powerdown
3350 static int
3351 arn_suspend(struct arn_softc *sc)
3353 ARN_LOCK(sc);
3354 arn_close(sc);
3355 ARN_UNLOCK(sc);
3357 return (DDI_SUCCESS);
3360 static int32_t
3361 arn_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
3363 struct arn_softc *sc;
3364 int i;
3366 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
3367 ASSERT(sc != NULL);
3369 switch (cmd) {
3370 case DDI_DETACH:
3371 break;
3373 case DDI_SUSPEND:
3374 return (arn_suspend(sc));
3376 default:
3377 return (DDI_FAILURE);
3380 if (mac_disable(sc->sc_isc.ic_mach) != 0)
3381 return (DDI_FAILURE);
3383 arn_stop_scantimer(sc);
3384 arn_stop_caltimer(sc);
3386 /* disable interrupts */
3387 (void) ath9k_hw_set_interrupts(sc->sc_ah, 0);
3390 * Unregister from the MAC layer subsystem
3392 (void) mac_unregister(sc->sc_isc.ic_mach);
3394 /* free intterrupt resources */
3395 ddi_remove_intr(devinfo, 0, sc->sc_iblock);
3396 ddi_remove_softintr(sc->sc_softint_id);
3399 * NB: the order of these is important:
3400 * o call the 802.11 layer before detaching the hal to
3401 * insure callbacks into the driver to delete global
3402 * key cache entries can be handled
3403 * o reclaim the tx queue data structures after calling
3404 * the 802.11 layer as we'll get called back to reclaim
3405 * node state and potentially want to use them
3406 * o to cleanup the tx queues the hal is called, so detach
3407 * it last
3409 ieee80211_detach(&sc->sc_isc);
3411 arn_desc_free(sc);
3413 ddi_taskq_destroy(sc->sc_tq);
3415 if (!(sc->sc_flags & SC_OP_INVALID))
3416 (void) ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
3418 /* cleanup tx queues */
3419 mutex_destroy(&sc->sc_txbuflock);
3420 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3421 if (ARN_TXQ_SETUP(sc, i)) {
3422 arn_tx_cleanupq(sc, &sc->sc_txq[i]);
3423 mutex_destroy(&((&sc->sc_txq[i])->axq_lock));
3427 ath9k_hw_detach(sc->sc_ah);
3429 /* free io handle */
3430 ddi_regs_map_free(&sc->sc_io_handle);
3431 pci_config_teardown(&sc->sc_cfg_handle);
3433 /* destroy locks */
3434 mutex_destroy(&sc->sc_genlock);
3435 mutex_destroy(&sc->sc_serial_rw);
3436 mutex_destroy(&sc->sc_rxbuflock);
3437 mutex_destroy(&sc->sc_resched_lock);
3438 #ifdef ARN_IBSS
3439 mutex_destroy(&sc->sc_bcbuflock);
3440 #endif
3442 ddi_remove_minor_node(devinfo, NULL);
3443 ddi_soft_state_free(arn_soft_state_p, ddi_get_instance(devinfo));
3445 return (DDI_SUCCESS);
3449 * quiesce(9E) entry point.
3451 * This function is called when the system is single-threaded at high
3452 * PIL with preemption disabled. Therefore, this function must not be
3453 * blocked.
3455 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
3456 * DDI_FAILURE indicates an error condition and should almost never happen.
3458 static int32_t
3459 arn_quiesce(dev_info_t *devinfo)
3461 struct arn_softc *sc;
3462 int i;
3463 struct ath_hal *ah;
3465 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
3467 if (sc == NULL || (ah = sc->sc_ah) == NULL)
3468 return (DDI_FAILURE);
3471 * Disable interrupts
3473 (void) ath9k_hw_set_interrupts(ah, 0);
3476 * Disable TX HW
3478 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3479 if (ARN_TXQ_SETUP(sc, i))
3480 (void) ath9k_hw_stoptxdma(ah, sc->sc_txq[i].axq_qnum);
3484 * Disable RX HW
3486 ath9k_hw_stoppcurecv(ah);
3487 ath9k_hw_setrxfilter(ah, 0);
3488 (void) ath9k_hw_stopdmarecv(ah);
3489 drv_usecwait(3000);
3492 * Power down HW
3494 (void) ath9k_hw_phy_disable(ah);
3496 return (DDI_SUCCESS);
3499 DDI_DEFINE_STREAM_OPS(arn_dev_ops, nulldev, nulldev, arn_attach, arn_detach,
3500 nodev, NULL, D_MP, NULL, arn_quiesce);
3502 static struct modldrv arn_modldrv = {
3503 &mod_driverops, /* Type of module. This one is a driver */
3504 "arn-Atheros 9000 series driver:2.0", /* short description */
3505 &arn_dev_ops /* driver specific ops */
3508 static struct modlinkage modlinkage = {
3509 MODREV_1, (void *)&arn_modldrv, NULL
3513 _info(struct modinfo *modinfop)
3515 return (mod_info(&modlinkage, modinfop));
3519 _init(void)
3521 int status;
3523 status = ddi_soft_state_init
3524 (&arn_soft_state_p, sizeof (struct arn_softc), 1);
3525 if (status != 0)
3526 return (status);
3528 mutex_init(&arn_loglock, NULL, MUTEX_DRIVER, NULL);
3529 mac_init_ops(&arn_dev_ops, "arn");
3530 status = mod_install(&modlinkage);
3531 if (status != 0) {
3532 mac_fini_ops(&arn_dev_ops);
3533 mutex_destroy(&arn_loglock);
3534 ddi_soft_state_fini(&arn_soft_state_p);
3537 return (status);
3541 _fini(void)
3543 int status;
3545 status = mod_remove(&modlinkage);
3546 if (status == 0) {
3547 mac_fini_ops(&arn_dev_ops);
3548 mutex_destroy(&arn_loglock);
3549 ddi_soft_state_fini(&arn_soft_state_p);
3551 return (status);