2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
7 * Copyright (c) 2008 Atheros Communications Inc.
9 * Permission to use, copy, modify, and/or distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 #include <sys/sysmacros.h>
23 #include <sys/param.h>
24 #include <sys/types.h>
25 #include <sys/signal.h>
26 #include <sys/stream.h>
27 #include <sys/termio.h>
28 #include <sys/errno.h>
30 #include <sys/cmn_err.h>
31 #include <sys/stropts.h>
32 #include <sys/strsubr.h>
33 #include <sys/strtty.h>
37 #include <sys/consdev.h>
39 #include <sys/modctl.h>
41 #include <sys/sunddi.h>
43 #include <sys/errno.h>
44 #include <sys/mac_provider.h>
46 #include <sys/ethernet.h>
48 #include <sys/byteorder.h>
49 #include <sys/strsun.h>
50 #include <sys/policy.h>
51 #include <inet/common.h>
54 #include <inet/wifi_ioctl.h>
55 #include <sys/mac_wifi.h>
56 #include <sys/net80211.h>
57 #include <sys/net80211_proto.h>
58 #include <sys/net80211_ht.h>
61 #include "arn_ath9k.h"
66 #define ARN_MAX_RSSI 45 /* max rssi */
69 * Default 11n reates supported by this station.
71 extern struct ieee80211_htrateset ieee80211_rateset_11n
;
74 * PIO access attributes for registers
76 static ddi_device_acc_attr_t arn_reg_accattr
= {
84 * DMA access attributes for descriptors: NOT to be byte swapped.
86 static ddi_device_acc_attr_t arn_desc_accattr
= {
94 * Describes the chip's DMA engine
96 static ddi_dma_attr_t arn_dma_attr
= {
97 DMA_ATTR_V0
, /* version number */
99 0xffffffffU
, /* high address */
100 0x3ffffU
, /* counter register max */
102 0xFFF, /* burst sizes */
103 1, /* minimum transfer size */
104 0x3ffffU
, /* max transfer size */
105 0xffffffffU
, /* address register max */
106 1, /* no scatter-gather */
107 1, /* granularity of device */
111 static ddi_dma_attr_t arn_desc_dma_attr
= {
112 DMA_ATTR_V0
, /* version number */
114 0xffffffffU
, /* high address */
115 0xffffffffU
, /* counter register max */
116 0x1000, /* alignment */
117 0xFFF, /* burst sizes */
118 1, /* minimum transfer size */
119 0xffffffffU
, /* max transfer size */
120 0xffffffffU
, /* address register max */
121 1, /* no scatter-gather */
122 1, /* granularity of device */
126 #define ATH_DEF_CACHE_BYTES 32 /* default cache line size */
128 static kmutex_t arn_loglock
;
129 static void *arn_soft_state_p
= NULL
;
130 static int arn_dwelltime
= 200; /* scan interval */
132 static int arn_m_stat(void *, uint_t
, uint64_t *);
133 static int arn_m_start(void *);
134 static void arn_m_stop(void *);
135 static int arn_m_promisc(void *, boolean_t
);
136 static int arn_m_multicst(void *, boolean_t
, const uint8_t *);
137 static int arn_m_unicst(void *, const uint8_t *);
138 static mblk_t
*arn_m_tx(void *, mblk_t
*);
139 static void arn_m_ioctl(void *, queue_t
*, mblk_t
*);
140 static int arn_m_setprop(void *, const char *, mac_prop_id_t
,
141 uint_t
, const void *);
142 static int arn_m_getprop(void *, const char *, mac_prop_id_t
,
144 static void arn_m_propinfo(void *, const char *, mac_prop_id_t
,
145 mac_prop_info_handle_t
);
147 /* MAC Callcack Functions */
148 static mac_callbacks_t arn_m_callbacks
= {
149 MC_IOCTL
| MC_SETPROP
| MC_GETPROP
| MC_PROPINFO
,
192 uint32_t arn_dbg_mask
= 0;
195 * Exception/warning cases not leading to panic.
198 arn_problem(const int8_t *fmt
, ...)
202 mutex_enter(&arn_loglock
);
205 vcmn_err(CE_WARN
, fmt
, args
);
208 mutex_exit(&arn_loglock
);
212 * Normal log information independent of debug.
215 arn_log(const int8_t *fmt
, ...)
219 mutex_enter(&arn_loglock
);
222 vcmn_err(CE_CONT
, fmt
, args
);
225 mutex_exit(&arn_loglock
);
229 arn_dbg(uint32_t dbg_flags
, const int8_t *fmt
, ...)
233 if (dbg_flags
& arn_dbg_mask
) {
234 mutex_enter(&arn_loglock
);
236 vcmn_err(CE_CONT
, fmt
, args
);
238 mutex_exit(&arn_loglock
);
243 * Read and write, they both share the same lock. We do this to serialize
244 * reads and writes on Atheros 802.11n PCI devices only. This is required
245 * as the FIFO on these devices can only accept sanely 2 requests. After
246 * that the device goes bananas. Serializing the reads/writes prevents this
250 arn_iowrite32(struct ath_hal
*ah
, uint32_t reg_offset
, uint32_t val
)
252 struct arn_softc
*sc
= ah
->ah_sc
;
253 if (ah
->ah_config
.serialize_regmode
== SER_REG_MODE_ON
) {
254 mutex_enter(&sc
->sc_serial_rw
);
255 ddi_put32(sc
->sc_io_handle
,
256 (uint32_t *)((uintptr_t)(sc
->mem
) + (reg_offset
)), val
);
257 mutex_exit(&sc
->sc_serial_rw
);
259 ddi_put32(sc
->sc_io_handle
,
260 (uint32_t *)((uintptr_t)(sc
->mem
) + (reg_offset
)), val
);
265 arn_ioread32(struct ath_hal
*ah
, uint32_t reg_offset
)
268 struct arn_softc
*sc
= ah
->ah_sc
;
269 if (ah
->ah_config
.serialize_regmode
== SER_REG_MODE_ON
) {
270 mutex_enter(&sc
->sc_serial_rw
);
271 val
= ddi_get32(sc
->sc_io_handle
,
272 (uint32_t *)((uintptr_t)(sc
->mem
) + (reg_offset
)));
273 mutex_exit(&sc
->sc_serial_rw
);
275 val
= ddi_get32(sc
->sc_io_handle
,
276 (uint32_t *)((uintptr_t)(sc
->mem
) + (reg_offset
)));
283 * Allocate an area of memory and a DMA handle for accessing it
286 arn_alloc_dma_mem(dev_info_t
*devinfo
, ddi_dma_attr_t
*dma_attr
, size_t memsize
,
287 ddi_device_acc_attr_t
*attr_p
, uint_t alloc_flags
,
288 uint_t bind_flags
, dma_area_t
*dma_p
)
295 err
= ddi_dma_alloc_handle(devinfo
, dma_attr
,
296 DDI_DMA_SLEEP
, NULL
, &dma_p
->dma_hdl
);
297 if (err
!= DDI_SUCCESS
)
298 return (DDI_FAILURE
);
303 err
= ddi_dma_mem_alloc(dma_p
->dma_hdl
, memsize
, attr_p
,
304 alloc_flags
, DDI_DMA_SLEEP
, NULL
, &dma_p
->mem_va
,
305 &dma_p
->alength
, &dma_p
->acc_hdl
);
306 if (err
!= DDI_SUCCESS
)
307 return (DDI_FAILURE
);
310 * Bind the two together
312 err
= ddi_dma_addr_bind_handle(dma_p
->dma_hdl
, NULL
,
313 dma_p
->mem_va
, dma_p
->alength
, bind_flags
,
314 DDI_DMA_SLEEP
, NULL
, &dma_p
->cookie
, &dma_p
->ncookies
);
315 if (err
!= DDI_DMA_MAPPED
)
316 return (DDI_FAILURE
);
322 return (DDI_SUCCESS
);
326 * Free one allocated area of DMAable memory
329 arn_free_dma_mem(dma_area_t
*dma_p
)
331 if (dma_p
->dma_hdl
!= NULL
) {
332 (void) ddi_dma_unbind_handle(dma_p
->dma_hdl
);
333 if (dma_p
->acc_hdl
!= NULL
) {
334 ddi_dma_mem_free(&dma_p
->acc_hdl
);
335 dma_p
->acc_hdl
= NULL
;
337 ddi_dma_free_handle(&dma_p
->dma_hdl
);
339 dma_p
->dma_hdl
= NULL
;
344 * Initialize tx, rx. or beacon buffer list. Allocate DMA memory for
348 arn_buflist_setup(dev_info_t
*devinfo
,
349 struct arn_softc
*sc
,
351 struct ath_buf
**pbf
,
352 struct ath_desc
**pds
,
358 struct ath_buf
*bf
= *pbf
;
359 struct ath_desc
*ds
= *pds
;
361 list_create(bflist
, sizeof (struct ath_buf
),
362 offsetof(struct ath_buf
, bf_node
));
363 for (i
= 0; i
< nbuf
; i
++, bf
++, ds
++) {
365 bf
->bf_daddr
= sc
->sc_desc_dma
.cookie
.dmac_address
+
366 ((uintptr_t)ds
- (uintptr_t)sc
->sc_desc
);
367 list_insert_tail(bflist
, bf
);
369 /* alloc DMA memory */
370 err
= arn_alloc_dma_mem(devinfo
, &arn_dma_attr
,
371 buflen
, &arn_desc_accattr
, DDI_DMA_STREAMING
,
372 dmabflags
, &bf
->bf_dma
);
373 if (err
!= DDI_SUCCESS
)
379 return (DDI_SUCCESS
);
383 * Destroy tx, rx or beacon buffer list. Free DMA memory.
386 arn_buflist_cleanup(list_t
*buflist
)
393 bf
= list_head(buflist
);
395 if (bf
->bf_m
!= NULL
) {
399 /* Free DMA buffer */
400 arn_free_dma_mem(&bf
->bf_dma
);
401 if (bf
->bf_in
!= NULL
) {
402 ieee80211_free_node(bf
->bf_in
);
405 list_remove(buflist
, bf
);
406 bf
= list_head(buflist
);
408 list_destroy(buflist
);
412 arn_desc_free(struct arn_softc
*sc
)
414 arn_buflist_cleanup(&sc
->sc_txbuf_list
);
415 arn_buflist_cleanup(&sc
->sc_rxbuf_list
);
417 arn_buflist_cleanup(&sc
->sc_bcbuf_list
);
420 /* Free descriptor DMA buffer */
421 arn_free_dma_mem(&sc
->sc_desc_dma
);
423 kmem_free((void *)sc
->sc_vbufptr
, sc
->sc_vbuflen
);
424 sc
->sc_vbufptr
= NULL
;
428 arn_desc_alloc(dev_info_t
*devinfo
, struct arn_softc
*sc
)
436 size
= sizeof (struct ath_desc
) * (ATH_TXBUF
+ ATH_RXBUF
+ ATH_BCBUF
);
438 size
= sizeof (struct ath_desc
) * (ATH_TXBUF
+ ATH_RXBUF
);
441 err
= arn_alloc_dma_mem(devinfo
, &arn_desc_dma_attr
, size
,
442 &arn_desc_accattr
, DDI_DMA_CONSISTENT
,
443 DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
, &sc
->sc_desc_dma
);
445 /* virtual address of the first descriptor */
446 sc
->sc_desc
= (struct ath_desc
*)sc
->sc_desc_dma
.mem_va
;
449 ARN_DBG((ARN_DBG_INIT
, "arn: arn_desc_alloc(): DMA map: "
451 sc
->sc_desc
, sc
->sc_desc_dma
.alength
,
452 sc
->sc_desc_dma
.cookie
.dmac_address
));
454 /* allocate data structures to describe TX/RX DMA buffers */
456 sc
->sc_vbuflen
= sizeof (struct ath_buf
) * (ATH_TXBUF
+ ATH_RXBUF
+
459 sc
->sc_vbuflen
= sizeof (struct ath_buf
) * (ATH_TXBUF
+ ATH_RXBUF
);
461 bf
= (struct ath_buf
*)kmem_zalloc(sc
->sc_vbuflen
, KM_SLEEP
);
464 /* DMA buffer size for each TX/RX packet */
465 #ifdef ARN_TX_AGGREGRATION
467 roundup((IEEE80211_MAX_MPDU_LEN
+ 3840 * 2),
468 min(sc
->sc_cachelsz
, (uint16_t)64));
471 roundup(IEEE80211_MAX_MPDU_LEN
, min(sc
->sc_cachelsz
, (uint16_t)64));
474 roundup(IEEE80211_MAX_MPDU_LEN
, min(sc
->sc_cachelsz
, (uint16_t)64));
476 /* create RX buffer list */
477 err
= arn_buflist_setup(devinfo
, sc
, &sc
->sc_rxbuf_list
, &bf
, &ds
,
478 ATH_RXBUF
, DDI_DMA_READ
| DDI_DMA_STREAMING
, sc
->rx_dmabuf_size
);
479 if (err
!= DDI_SUCCESS
) {
484 /* create TX buffer list */
485 err
= arn_buflist_setup(devinfo
, sc
, &sc
->sc_txbuf_list
, &bf
, &ds
,
486 ATH_TXBUF
, DDI_DMA_STREAMING
, sc
->tx_dmabuf_size
);
487 if (err
!= DDI_SUCCESS
) {
492 /* create beacon buffer list */
494 err
= arn_buflist_setup(devinfo
, sc
, &sc
->sc_bcbuf_list
, &bf
, &ds
,
495 ATH_BCBUF
, DDI_DMA_STREAMING
);
496 if (err
!= DDI_SUCCESS
) {
502 return (DDI_SUCCESS
);
505 static struct ath_rate_table
*
506 /* LINTED E_STATIC_UNUSED */
507 arn_get_ratetable(struct arn_softc
*sc
, uint32_t mode
)
509 struct ath_rate_table
*rate_table
= NULL
;
512 case IEEE80211_MODE_11A
:
513 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11A
];
515 case IEEE80211_MODE_11B
:
516 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11B
];
518 case IEEE80211_MODE_11G
:
519 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11G
];
522 case IEEE80211_MODE_11NA_HT20
:
523 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11NA_HT20
];
525 case IEEE80211_MODE_11NG_HT20
:
526 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11NG_HT20
];
528 case IEEE80211_MODE_11NA_HT40PLUS
:
529 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11NA_HT40PLUS
];
531 case IEEE80211_MODE_11NA_HT40MINUS
:
532 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11NA_HT40MINUS
];
534 case IEEE80211_MODE_11NG_HT40PLUS
:
535 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11NG_HT40PLUS
];
537 case IEEE80211_MODE_11NG_HT40MINUS
:
538 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11NG_HT40MINUS
];
542 ARN_DBG((ARN_DBG_FATAL
, "arn: arn_get_ratetable(): "
543 "invalid mode %u\n", mode
));
552 arn_setcurmode(struct arn_softc
*sc
, enum wireless_mode mode
)
554 struct ath_rate_table
*rt
;
557 for (i
= 0; i
< sizeof (sc
->asc_rixmap
); i
++)
558 sc
->asc_rixmap
[i
] = 0xff;
560 rt
= sc
->hw_rate_table
[mode
];
563 for (i
= 0; i
< rt
->rate_cnt
; i
++)
564 sc
->asc_rixmap
[rt
->info
[i
].dot11rate
&
565 IEEE80211_RATE_VAL
] = (uint8_t)i
; /* LINT */
567 sc
->sc_currates
= rt
;
568 sc
->sc_curmode
= mode
;
571 * All protection frames are transmited at 2Mb/s for
572 * 11g, otherwise at 1Mb/s.
573 * XXX select protection rate index from rate table.
575 sc
->sc_protrix
= (mode
== ATH9K_MODE_11G
? 1 : 0);
578 static enum wireless_mode
579 arn_chan2mode(struct ath9k_channel
*chan
)
581 if (chan
->chanmode
== CHANNEL_A
)
582 return (ATH9K_MODE_11A
);
583 else if (chan
->chanmode
== CHANNEL_G
)
584 return (ATH9K_MODE_11G
);
585 else if (chan
->chanmode
== CHANNEL_B
)
586 return (ATH9K_MODE_11B
);
587 else if (chan
->chanmode
== CHANNEL_A_HT20
)
588 return (ATH9K_MODE_11NA_HT20
);
589 else if (chan
->chanmode
== CHANNEL_G_HT20
)
590 return (ATH9K_MODE_11NG_HT20
);
591 else if (chan
->chanmode
== CHANNEL_A_HT40PLUS
)
592 return (ATH9K_MODE_11NA_HT40PLUS
);
593 else if (chan
->chanmode
== CHANNEL_A_HT40MINUS
)
594 return (ATH9K_MODE_11NA_HT40MINUS
);
595 else if (chan
->chanmode
== CHANNEL_G_HT40PLUS
)
596 return (ATH9K_MODE_11NG_HT40PLUS
);
597 else if (chan
->chanmode
== CHANNEL_G_HT40MINUS
)
598 return (ATH9K_MODE_11NG_HT40MINUS
);
600 return (ATH9K_MODE_11B
);
604 arn_update_txpow(struct arn_softc
*sc
)
606 struct ath_hal
*ah
= sc
->sc_ah
;
609 if (sc
->sc_curtxpow
!= sc
->sc_config
.txpowlimit
) {
610 (void) ath9k_hw_set_txpowerlimit(ah
, sc
->sc_config
.txpowlimit
);
611 /* read back in case value is clamped */
612 (void) ath9k_hw_getcapability(ah
, ATH9K_CAP_TXPOW
, 1, &txpow
);
613 sc
->sc_curtxpow
= (uint32_t)txpow
;
618 parse_mpdudensity(uint8_t mpdudensity
)
621 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
622 * 0 for no restriction
631 switch (mpdudensity
) {
638 * Our lower layer calculations limit our
639 * precision to 1 microsecond
656 arn_setup_rates(struct arn_softc
*sc
, uint32_t mode
)
659 struct ath_rate_table
*rate_table
= NULL
;
660 struct ieee80211_rateset
*rateset
;
661 ieee80211com_t
*ic
= (ieee80211com_t
*)sc
;
663 /* rate_table = arn_get_ratetable(sc, mode); */
665 case IEEE80211_MODE_11A
:
666 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11A
];
668 case IEEE80211_MODE_11B
:
669 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11B
];
671 case IEEE80211_MODE_11G
:
672 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11G
];
675 case IEEE80211_MODE_11NA_HT20
:
676 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11NA_HT20
];
678 case IEEE80211_MODE_11NG_HT20
:
679 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11NG_HT20
];
681 case IEEE80211_MODE_11NA_HT40PLUS
:
682 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11NA_HT40PLUS
];
684 case IEEE80211_MODE_11NA_HT40MINUS
:
685 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11NA_HT40MINUS
];
687 case IEEE80211_MODE_11NG_HT40PLUS
:
688 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11NG_HT40PLUS
];
690 case IEEE80211_MODE_11NG_HT40MINUS
:
691 rate_table
= sc
->hw_rate_table
[ATH9K_MODE_11NG_HT40MINUS
];
695 ARN_DBG((ARN_DBG_RATE
, "arn: arn_get_ratetable(): "
696 "invalid mode %u\n", mode
));
699 if (rate_table
== NULL
)
701 if (rate_table
->rate_cnt
> ATH_RATE_MAX
) {
702 ARN_DBG((ARN_DBG_RATE
, "arn: arn_rate_setup(): "
703 "rate table too small (%u > %u)\n",
704 rate_table
->rate_cnt
, IEEE80211_RATE_MAXSIZE
));
705 maxrates
= ATH_RATE_MAX
;
707 maxrates
= rate_table
->rate_cnt
;
709 ARN_DBG((ARN_DBG_RATE
, "arn: arn_rate_setup(): "
710 "maxrates is %d\n", maxrates
));
712 rateset
= &ic
->ic_sup_rates
[mode
];
713 for (i
= 0; i
< maxrates
; i
++) {
714 rateset
->ir_rates
[i
] = rate_table
->info
[i
].dot11rate
;
715 ARN_DBG((ARN_DBG_RATE
, "arn: arn_rate_setup(): "
716 "%d\n", rate_table
->info
[i
].dot11rate
));
718 rateset
->ir_nrates
= (uint8_t)maxrates
; /* ??? */
722 arn_setup_channels(struct arn_softc
*sc
)
724 struct ath_hal
*ah
= sc
->sc_ah
;
725 ieee80211com_t
*ic
= (ieee80211com_t
*)sc
;
727 uint8_t regclassids
[ATH_REGCLASSIDS_MAX
];
728 uint32_t nregclass
= 0;
729 struct ath9k_channel
*c
;
731 /* Fill in ah->ah_channels */
732 if (!ath9k_regd_init_channels(ah
, ATH_CHAN_MAX
, (uint32_t *)&nchan
,
733 regclassids
, ATH_REGCLASSIDS_MAX
, &nregclass
, CTRY_DEFAULT
,
735 uint32_t rd
= ah
->ah_currentRD
;
736 ARN_DBG((ARN_DBG_CHANNEL
, "arn: arn_setup_channels(): "
737 "unable to collect channel list; "
738 "regdomain likely %u country code %u\n",
743 ARN_DBG((ARN_DBG_CHANNEL
, "arn: arn_setup_channels(): "
744 "number of channel is %d\n", nchan
));
746 for (i
= 0; i
< nchan
; i
++) {
747 c
= &ah
->ah_channels
[i
];
749 index
= ath9k_hw_mhz2ieee(ah
, c
->channel
, c
->channelFlags
);
751 if (index
> IEEE80211_CHAN_MAX
) {
752 ARN_DBG((ARN_DBG_CHANNEL
,
753 "arn: arn_setup_channels(): "
754 "bad hal channel %d (%u/%x) ignored\n",
755 index
, c
->channel
, c
->channelFlags
));
758 /* NB: flags are known to be compatible */
761 * can't handle frequency <2400MHz (negative
762 * channels) right now
764 ARN_DBG((ARN_DBG_CHANNEL
,
765 "arn: arn_setup_channels(): "
766 "hal channel %d (%u/%x) "
767 "cannot be handled, ignored\n",
768 index
, c
->channel
, c
->channelFlags
));
773 * Calculate net80211 flags; most are compatible
774 * but some need massaging. Note the static turbo
775 * conversion can be removed once net80211 is updated
776 * to understand static vs. dynamic turbo.
779 flags
= c
->channelFlags
& (CHANNEL_ALL
| CHANNEL_PASSIVE
);
781 if (ic
->ic_sup_channels
[index
].ich_freq
== 0) {
782 ic
->ic_sup_channels
[index
].ich_freq
= c
->channel
;
783 ic
->ic_sup_channels
[index
].ich_flags
= flags
;
785 /* channels overlap; e.g. 11g and 11b */
786 ic
->ic_sup_channels
[index
].ich_flags
|= flags
;
788 if ((c
->channelFlags
& CHANNEL_G
) == CHANNEL_G
) {
790 ic
->ic_caps
|= IEEE80211_C_SHPREAMBLE
|
791 IEEE80211_C_SHSLOT
; /* short slot time */
799 arn_chan2flags(ieee80211com_t
*isc
, struct ieee80211_channel
*chan
)
801 uint32_t channel_mode
;
802 switch (ieee80211_chan2mode(isc
, chan
)) {
803 case IEEE80211_MODE_11NA
:
804 if (chan
->ich_flags
& IEEE80211_CHAN_HT40U
)
805 channel_mode
= CHANNEL_A_HT40PLUS
;
806 else if (chan
->ich_flags
& IEEE80211_CHAN_HT40D
)
807 channel_mode
= CHANNEL_A_HT40MINUS
;
809 channel_mode
= CHANNEL_A_HT20
;
811 case IEEE80211_MODE_11NG
:
812 if (chan
->ich_flags
& IEEE80211_CHAN_HT40U
)
813 channel_mode
= CHANNEL_G_HT40PLUS
;
814 else if (chan
->ich_flags
& IEEE80211_CHAN_HT40D
)
815 channel_mode
= CHANNEL_G_HT40MINUS
;
817 channel_mode
= CHANNEL_G_HT20
;
819 case IEEE80211_MODE_TURBO_G
:
820 case IEEE80211_MODE_STURBO_A
:
821 case IEEE80211_MODE_TURBO_A
:
824 case IEEE80211_MODE_11A
:
825 channel_mode
= CHANNEL_A
;
827 case IEEE80211_MODE_11G
:
828 channel_mode
= CHANNEL_B
;
830 case IEEE80211_MODE_11B
:
831 channel_mode
= CHANNEL_G
;
833 case IEEE80211_MODE_FH
:
840 return (channel_mode
);
844 * Update internal state after a channel change.
847 arn_chan_change(struct arn_softc
*sc
, struct ieee80211_channel
*chan
)
849 struct ieee80211com
*ic
= &sc
->sc_isc
;
850 enum ieee80211_phymode mode
;
851 enum wireless_mode wlmode
;
854 * Change channels and update the h/w rate map
855 * if we're switching; e.g. 11a to 11b/g.
857 mode
= ieee80211_chan2mode(ic
, chan
);
859 case IEEE80211_MODE_11A
:
860 wlmode
= ATH9K_MODE_11A
;
862 case IEEE80211_MODE_11B
:
863 wlmode
= ATH9K_MODE_11B
;
865 case IEEE80211_MODE_11G
:
866 wlmode
= ATH9K_MODE_11B
;
871 if (wlmode
!= sc
->sc_curmode
)
872 arn_setcurmode(sc
, wlmode
);
877 * Set/change channels. If the channel is really being changed, it's done
878 * by reseting the chip. To accomplish this we must first cleanup any pending
879 * DMA, then restart stuff.
882 arn_set_channel(struct arn_softc
*sc
, struct ath9k_channel
*hchan
)
884 struct ath_hal
*ah
= sc
->sc_ah
;
885 ieee80211com_t
*ic
= &sc
->sc_isc
;
886 boolean_t fastcc
= B_TRUE
;
888 struct ieee80211_channel chan
;
889 enum wireless_mode curmode
;
891 if (sc
->sc_flags
& SC_OP_INVALID
)
894 if (hchan
->channel
!= sc
->sc_ah
->ah_curchan
->channel
||
895 hchan
->channelFlags
!= sc
->sc_ah
->ah_curchan
->channelFlags
||
896 (sc
->sc_flags
& SC_OP_CHAINMASK_UPDATE
) ||
897 (sc
->sc_flags
& SC_OP_FULL_RESET
)) {
901 * This is only performed if the channel settings have
904 * To switch channels clear any pending DMA operations;
905 * wait long enough for the RX fifo to drain, reset the
906 * hardware at the new frequency, and then re-enable
907 * the relevant bits of the h/w.
909 (void) ath9k_hw_set_interrupts(ah
, 0); /* disable interrupts */
910 arn_draintxq(sc
, B_FALSE
); /* clear pending tx frames */
911 stopped
= arn_stoprecv(sc
); /* turn off frame recv */
914 * XXX: do not flush receive queue here. We don't want
915 * to flush data frames already in queue because of
919 if (!stopped
|| (sc
->sc_flags
& SC_OP_FULL_RESET
))
922 ARN_DBG((ARN_DBG_CHANNEL
, "arn: arn_set_channel(): "
923 "(%u MHz) -> (%u MHz), cflags:%x, chanwidth: %d\n",
924 sc
->sc_ah
->ah_curchan
->channel
,
925 hchan
->channel
, hchan
->channelFlags
, sc
->tx_chan_width
));
927 if (!ath9k_hw_reset(ah
, hchan
, sc
->tx_chan_width
,
928 sc
->sc_tx_chainmask
, sc
->sc_rx_chainmask
,
929 sc
->sc_ht_extprotspacing
, fastcc
, &status
)) {
930 ARN_DBG((ARN_DBG_FATAL
, "arn: arn_set_channel(): "
931 "unable to reset channel %u (%uMhz) "
932 "flags 0x%x hal status %u\n",
933 ath9k_hw_mhz2ieee(ah
, hchan
->channel
,
934 hchan
->channelFlags
),
935 hchan
->channel
, hchan
->channelFlags
, status
));
939 sc
->sc_curchan
= *hchan
;
941 sc
->sc_flags
&= ~SC_OP_CHAINMASK_UPDATE
;
942 sc
->sc_flags
&= ~SC_OP_FULL_RESET
;
944 if (arn_startrecv(sc
) != 0) {
945 arn_problem("arn: arn_set_channel(): "
946 "unable to restart recv logic\n");
950 chan
.ich_freq
= hchan
->channel
;
951 chan
.ich_flags
= hchan
->channelFlags
;
952 ic
->ic_ibss_chan
= &chan
;
955 * Change channels and update the h/w rate map
956 * if we're switching; e.g. 11a to 11b/g.
958 curmode
= arn_chan2mode(hchan
);
959 if (curmode
!= sc
->sc_curmode
)
960 arn_setcurmode(sc
, arn_chan2mode(hchan
));
962 arn_update_txpow(sc
);
964 (void) ath9k_hw_set_interrupts(ah
, sc
->sc_imask
);
971 * This routine performs the periodic noise floor calibration function
972 * that is used to adjust and optimize the chip performance. This
973 * takes environmental changes (location, temperature) into account.
974 * When the task is complete, it reschedules itself depending on the
975 * appropriate interval that was calculated.
978 arn_ani_calibrate(void *arg
)
981 ieee80211com_t
*ic
= (ieee80211com_t
*)arg
;
982 struct arn_softc
*sc
= (struct arn_softc
*)ic
;
983 struct ath_hal
*ah
= sc
->sc_ah
;
984 boolean_t longcal
= B_FALSE
;
985 boolean_t shortcal
= B_FALSE
;
986 boolean_t aniflag
= B_FALSE
;
987 unsigned int timestamp
= drv_hztousec(ddi_get_lbolt())/1000;
988 uint32_t cal_interval
;
991 * don't calibrate when we're scanning.
992 * we are most likely not on our home channel.
994 if (ic
->ic_state
!= IEEE80211_S_RUN
)
997 /* Long calibration runs independently of short calibration. */
998 if ((timestamp
- sc
->sc_ani
.sc_longcal_timer
) >= ATH_LONG_CALINTERVAL
) {
1000 ARN_DBG((ARN_DBG_CALIBRATE
, "arn: "
1001 "%s: longcal @%lu\n", __func__
, drv_hztousec
));
1002 sc
->sc_ani
.sc_longcal_timer
= timestamp
;
1005 /* Short calibration applies only while sc_caldone is FALSE */
1006 if (!sc
->sc_ani
.sc_caldone
) {
1007 if ((timestamp
- sc
->sc_ani
.sc_shortcal_timer
) >=
1008 ATH_SHORT_CALINTERVAL
) {
1010 ARN_DBG((ARN_DBG_CALIBRATE
, "arn: "
1011 "%s: shortcal @%lu\n",
1012 __func__
, drv_hztousec
));
1013 sc
->sc_ani
.sc_shortcal_timer
= timestamp
;
1014 sc
->sc_ani
.sc_resetcal_timer
= timestamp
;
1017 if ((timestamp
- sc
->sc_ani
.sc_resetcal_timer
) >=
1018 ATH_RESTART_CALINTERVAL
) {
1019 ath9k_hw_reset_calvalid(ah
, ah
->ah_curchan
,
1020 &sc
->sc_ani
.sc_caldone
);
1021 if (sc
->sc_ani
.sc_caldone
)
1022 sc
->sc_ani
.sc_resetcal_timer
= timestamp
;
1026 /* Verify whether we must check ANI */
1027 if ((timestamp
- sc
->sc_ani
.sc_checkani_timer
) >=
1028 ATH_ANI_POLLINTERVAL
) {
1030 sc
->sc_ani
.sc_checkani_timer
= timestamp
;
1033 /* Skip all processing if there's nothing to do. */
1034 if (longcal
|| shortcal
|| aniflag
) {
1035 /* Call ANI routine if necessary */
1037 ath9k_hw_ani_monitor(ah
, &sc
->sc_halstats
,
1040 /* Perform calibration if necessary */
1041 if (longcal
|| shortcal
) {
1042 boolean_t iscaldone
= B_FALSE
;
1044 if (ath9k_hw_calibrate(ah
, ah
->ah_curchan
,
1045 sc
->sc_rx_chainmask
, longcal
, &iscaldone
)) {
1047 sc
->sc_ani
.sc_noise_floor
=
1048 ath9k_hw_getchan_noise(ah
,
1051 ARN_DBG((ARN_DBG_CALIBRATE
, "arn: "
1052 "%s: calibrate chan %u/%x nf: %d\n",
1054 ah
->ah_curchan
->channel
,
1055 ah
->ah_curchan
->channelFlags
,
1056 sc
->sc_ani
.sc_noise_floor
));
1058 ARN_DBG((ARN_DBG_CALIBRATE
, "arn: "
1059 "%s: calibrate chan %u/%x failed\n",
1061 ah
->ah_curchan
->channel
,
1062 ah
->ah_curchan
->channelFlags
));
1064 sc
->sc_ani
.sc_caldone
= iscaldone
;
1070 * Set timer interval based on previous results.
1071 * The interval must be the shortest necessary to satisfy ANI,
1072 * short calibration and long calibration.
1074 cal_interval
= ATH_LONG_CALINTERVAL
;
1075 if (sc
->sc_ah
->ah_config
.enable_ani
)
1077 min(cal_interval
, (uint32_t)ATH_ANI_POLLINTERVAL
);
1079 if (!sc
->sc_ani
.sc_caldone
)
1080 cal_interval
= min(cal_interval
,
1081 (uint32_t)ATH_SHORT_CALINTERVAL
);
1083 sc
->sc_scan_timer
= 0;
1084 sc
->sc_scan_timer
= timeout(arn_ani_calibrate
, (void *)sc
,
1085 drv_usectohz(cal_interval
* 1000));
1089 arn_stop_caltimer(struct arn_softc
*sc
)
1091 timeout_id_t tmp_id
= 0;
1093 while ((sc
->sc_cal_timer
!= 0) && (tmp_id
!= sc
->sc_cal_timer
)) {
1094 tmp_id
= sc
->sc_cal_timer
;
1095 (void) untimeout(tmp_id
);
1097 sc
->sc_cal_timer
= 0;
1101 arn_isr(caddr_t arg
)
1103 /* LINTED E_BAD_PTR_CAST_ALIGN */
1104 struct arn_softc
*sc
= (struct arn_softc
*)arg
;
1105 struct ath_hal
*ah
= sc
->sc_ah
;
1106 enum ath9k_int status
;
1107 ieee80211com_t
*ic
= (ieee80211com_t
*)sc
;
1111 if (sc
->sc_flags
& SC_OP_INVALID
) {
1113 * The hardware is not ready/present, don't
1114 * touch anything. Note this can happen early
1115 * on if the IRQ is shared.
1118 return (DDI_INTR_UNCLAIMED
);
1120 if (!ath9k_hw_intrpend(ah
)) { /* shared irq, not for us */
1122 return (DDI_INTR_UNCLAIMED
);
1126 * Figure out the reason(s) for the interrupt. Note
1127 * that the hal returns a pseudo-ISR that may include
1128 * bits we haven't explicitly enabled so we mask the
1129 * value to insure we only process bits we requested.
1131 (void) ath9k_hw_getisr(ah
, &status
); /* NB: clears ISR too */
1133 status
&= sc
->sc_imask
; /* discard unasked-for bits */
1136 * If there are no status bits set, then this interrupt was not
1137 * for me (should have been caught above).
1141 return (DDI_INTR_UNCLAIMED
);
1144 sc
->sc_intrstatus
= status
;
1146 if (status
& ATH9K_INT_FATAL
) {
1147 /* need a chip reset */
1148 ARN_DBG((ARN_DBG_INTERRUPT
, "arn: arn_isr(): "
1149 "ATH9K_INT_FATAL\n"));
1151 } else if (status
& ATH9K_INT_RXORN
) {
1152 /* need a chip reset */
1153 ARN_DBG((ARN_DBG_INTERRUPT
, "arn: arn_isr(): "
1154 "ATH9K_INT_RXORN\n"));
1157 if (status
& ATH9K_INT_RXEOL
) {
1159 * NB: the hardware should re-read the link when
1160 * RXE bit is written, but it doesn't work
1161 * at least on older hardware revs.
1163 ARN_DBG((ARN_DBG_INTERRUPT
, "arn: arn_isr(): "
1164 "ATH9K_INT_RXEOL\n"));
1165 sc
->sc_rxlink
= NULL
;
1167 if (status
& ATH9K_INT_TXURN
) {
1168 /* bump tx trigger level */
1169 ARN_DBG((ARN_DBG_INTERRUPT
, "arn: arn_isr(): "
1170 "ATH9K_INT_TXURN\n"));
1171 (void) ath9k_hw_updatetxtriglevel(ah
, B_TRUE
);
1173 /* XXX: optimize this */
1174 if (status
& ATH9K_INT_RX
) {
1175 ARN_DBG((ARN_DBG_INTERRUPT
, "arn: arn_isr(): "
1178 ddi_trigger_softintr(sc
->sc_softint_id
);
1180 if (status
& ATH9K_INT_TX
) {
1181 ARN_DBG((ARN_DBG_INTERRUPT
, "arn: arn_isr(): "
1183 if (ddi_taskq_dispatch(sc
->sc_tq
,
1184 arn_tx_int_proc
, sc
, DDI_NOSLEEP
) !=
1186 arn_problem("arn: arn_isr(): "
1187 "No memory for tx taskq\n");
1190 #ifdef ARN_ATH9K_INT_MIB
1191 if (status
& ATH9K_INT_MIB
) {
1193 * Disable interrupts until we service the MIB
1194 * interrupt; otherwise it will continue to
1197 (void) ath9k_hw_set_interrupts(ah
, 0);
1199 * Let the hal handle the event. We assume
1200 * it will clear whatever condition caused
1203 ath9k_hw_procmibevent(ah
, &sc
->sc_halstats
);
1204 (void) ath9k_hw_set_interrupts(ah
, sc
->sc_imask
);
1205 ARN_DBG((ARN_DBG_INTERRUPT
, "arn: arn_isr(): "
1206 "ATH9K_INT_MIB\n"));
1210 #ifdef ARN_ATH9K_INT_TIM_TIMER
1211 if (status
& ATH9K_INT_TIM_TIMER
) {
1212 ARN_DBG((ARN_DBG_INTERRUPT
, "arn: arn_isr(): "
1213 "ATH9K_INT_TIM_TIMER\n"));
1214 if (!(ah
->ah_caps
.hw_caps
&
1215 ATH9K_HW_CAP_AUTOSLEEP
)) {
1217 * Clear RxAbort bit so that we can
1220 ath9k_hw_setrxabort(ah
, 0);
1226 if (status
& ATH9K_INT_BMISS
) {
1227 ARN_DBG((ARN_DBG_INTERRUPT
, "arn: arn_isr(): "
1228 "ATH9K_INT_BMISS\n"));
1229 #ifdef ARN_HW_BEACON_MISS_HANDLE
1230 ARN_DBG((ARN_DBG_INTERRUPT
, "arn: arn_isr(): "
1231 "handle beacon mmiss by H/W mechanism\n"));
1232 if (ddi_taskq_dispatch(sc
->sc_tq
, arn_bmiss_proc
,
1233 sc
, DDI_NOSLEEP
) != DDI_SUCCESS
) {
1234 arn_problem("arn: arn_isr(): "
1235 "No memory available for bmiss taskq\n");
1238 ARN_DBG((ARN_DBG_INTERRUPT
, "arn: arn_isr(): "
1239 "handle beacon mmiss by S/W mechanism\n"));
1240 #endif /* ARN_HW_BEACON_MISS_HANDLE */
1245 #ifdef ARN_ATH9K_INT_CST
1246 /* carrier sense timeout */
1247 if (status
& ATH9K_INT_CST
) {
1248 ARN_DBG((ARN_DBG_INTERRUPT
, "arn: arn_isr(): "
1249 "ATH9K_INT_CST\n"));
1250 return (DDI_INTR_CLAIMED
);
1254 if (status
& ATH9K_INT_SWBA
) {
1255 ARN_DBG((ARN_DBG_INTERRUPT
, "arn: arn_isr(): "
1256 "ATH9K_INT_SWBA\n"));
1257 /* This will occur only in Host-AP or Ad-Hoc mode */
1258 return (DDI_INTR_CLAIMED
);
1262 return (DDI_INTR_CLAIMED
);
1264 ARN_DBG((ARN_DBG_INTERRUPT
, "Rset for fatal err\n"));
1265 (void) arn_reset(ic
);
1267 return (DDI_INTR_CLAIMED
);
1271 arn_get_channel(struct arn_softc
*sc
, struct ieee80211_channel
*chan
)
1275 for (i
= 0; i
< sc
->sc_ah
->ah_nchan
; i
++) {
1276 if (sc
->sc_ah
->ah_channels
[i
].channel
== chan
->ich_freq
)
1284 arn_reset(ieee80211com_t
*ic
)
1286 struct arn_softc
*sc
= (struct arn_softc
*)ic
;
1287 struct ath_hal
*ah
= sc
->sc_ah
;
1291 (void) ath9k_hw_set_interrupts(ah
, 0);
1292 arn_draintxq(sc
, 0);
1293 (void) arn_stoprecv(sc
);
1295 if (!ath9k_hw_reset(ah
, sc
->sc_ah
->ah_curchan
, sc
->tx_chan_width
,
1296 sc
->sc_tx_chainmask
, sc
->sc_rx_chainmask
,
1297 sc
->sc_ht_extprotspacing
, B_FALSE
, &status
)) {
1298 ARN_DBG((ARN_DBG_RESET
, "arn: arn_reset(): "
1299 "unable to reset hardware; hal status %u\n", status
));
1303 if (arn_startrecv(sc
) != 0)
1304 ARN_DBG((ARN_DBG_RESET
, "arn: arn_reset(): "
1305 "unable to start recv logic\n"));
1308 * We may be doing a reset in response to a request
1309 * that changes the channel so update any state that
1310 * might change as a result.
1312 arn_setcurmode(sc
, arn_chan2mode(sc
->sc_ah
->ah_curchan
));
1314 arn_update_txpow(sc
);
1316 if (sc
->sc_flags
& SC_OP_BEACONS
)
1317 arn_beacon_config(sc
); /* restart beacons */
1319 (void) ath9k_hw_set_interrupts(ah
, sc
->sc_imask
);
1325 arn_get_hal_qnum(uint16_t queue
, struct arn_softc
*sc
)
1331 qnum
= sc
->sc_haltype2q
[ATH9K_WME_AC_VO
];
1334 qnum
= sc
->sc_haltype2q
[ATH9K_WME_AC_VI
];
1337 qnum
= sc
->sc_haltype2q
[ATH9K_WME_AC_BE
];
1340 qnum
= sc
->sc_haltype2q
[ATH9K_WME_AC_BK
];
1343 qnum
= sc
->sc_haltype2q
[ATH9K_WME_AC_BE
];
1353 } ath_mac_bb_names
[] = {
1354 { AR_SREV_VERSION_5416_PCI
, "5416" },
1355 { AR_SREV_VERSION_5416_PCIE
, "5418" },
1356 { AR_SREV_VERSION_9100
, "9100" },
1357 { AR_SREV_VERSION_9160
, "9160" },
1358 { AR_SREV_VERSION_9280
, "9280" },
1359 { AR_SREV_VERSION_9285
, "9285" }
1365 } ath_rf_names
[] = {
1367 { AR_RAD5133_SREV_MAJOR
, "5133" },
1368 { AR_RAD5122_SREV_MAJOR
, "5122" },
1369 { AR_RAD2133_SREV_MAJOR
, "2133" },
1370 { AR_RAD2122_SREV_MAJOR
, "2122" }
1374 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
1378 arn_mac_bb_name(uint32_t mac_bb_version
)
1382 for (i
= 0; i
< ARRAY_SIZE(ath_mac_bb_names
); i
++) {
1383 if (ath_mac_bb_names
[i
].version
== mac_bb_version
) {
1384 return (ath_mac_bb_names
[i
].name
);
1392 * Return the RF name. "????" is returned if the RF is unknown.
1396 arn_rf_name(uint16_t rf_version
)
1400 for (i
= 0; i
< ARRAY_SIZE(ath_rf_names
); i
++) {
1401 if (ath_rf_names
[i
].version
== rf_version
) {
1402 return (ath_rf_names
[i
].name
);
1410 arn_next_scan(void *arg
)
1412 ieee80211com_t
*ic
= arg
;
1413 struct arn_softc
*sc
= (struct arn_softc
*)ic
;
1415 sc
->sc_scan_timer
= 0;
1416 if (ic
->ic_state
== IEEE80211_S_SCAN
) {
1417 sc
->sc_scan_timer
= timeout(arn_next_scan
, (void *)sc
,
1418 drv_usectohz(arn_dwelltime
* 1000));
1419 ieee80211_next_scan(ic
);
1424 arn_stop_scantimer(struct arn_softc
*sc
)
1426 timeout_id_t tmp_id
= 0;
1428 while ((sc
->sc_scan_timer
!= 0) && (tmp_id
!= sc
->sc_scan_timer
)) {
1429 tmp_id
= sc
->sc_scan_timer
;
1430 (void) untimeout(tmp_id
);
1432 sc
->sc_scan_timer
= 0;
1436 arn_newstate(ieee80211com_t
*ic
, enum ieee80211_state nstate
, int arg
)
1438 struct arn_softc
*sc
= (struct arn_softc
*)ic
;
1439 struct ath_hal
*ah
= sc
->sc_ah
;
1440 struct ieee80211_node
*in
;
1444 enum ieee80211_state ostate
;
1445 struct ath9k_channel
*channel
;
1448 /* Should set up & init LED here */
1450 if (sc
->sc_flags
& SC_OP_INVALID
)
1453 ostate
= ic
->ic_state
;
1454 ARN_DBG((ARN_DBG_INIT
, "arn: arn_newstate(): "
1455 "%x -> %x!\n", ostate
, nstate
));
1459 if (nstate
!= IEEE80211_S_SCAN
)
1460 arn_stop_scantimer(sc
);
1461 if (nstate
!= IEEE80211_S_RUN
)
1462 arn_stop_caltimer(sc
);
1464 /* Should set LED here */
1466 if (nstate
== IEEE80211_S_INIT
) {
1467 sc
->sc_imask
&= ~(ATH9K_INT_SWBA
| ATH9K_INT_BMISS
);
1469 * Disable interrupts.
1471 (void) ath9k_hw_set_interrupts
1472 (ah
, sc
->sc_imask
&~ ATH9K_INT_GLOBAL
);
1475 if (ic
->ic_opmode
== IEEE80211_M_IBSS
) {
1476 (void) ath9k_hw_stoptxdma(ah
, sc
->sc_beaconq
);
1477 arn_beacon_return(sc
);
1481 ieee80211_stop_watchdog(ic
);
1486 pos
= arn_get_channel(sc
, ic
->ic_curchan
);
1489 ARN_DBG((ARN_DBG_FATAL
, "arn: "
1490 "%s: Invalid channel\n", __func__
));
1496 if (in
->in_htcap
& IEEE80211_HTCAP_CHWIDTH40
) {
1497 arn_update_chainmask(sc
);
1498 sc
->tx_chan_width
= ATH9K_HT_MACMODE_2040
;
1500 sc
->tx_chan_width
= ATH9K_HT_MACMODE_20
;
1502 sc
->sc_ah
->ah_channels
[pos
].chanmode
=
1503 arn_chan2flags(ic
, ic
->ic_curchan
);
1504 channel
= &sc
->sc_ah
->ah_channels
[pos
];
1505 if (channel
== NULL
) {
1506 arn_problem("arn_newstate(): channel == NULL");
1510 error
= arn_set_channel(sc
, channel
);
1512 if (nstate
!= IEEE80211_S_SCAN
) {
1514 ieee80211_reset_chan(ic
);
1520 * Get the receive filter according to the
1521 * operating mode and state
1523 rfilt
= arn_calcrxfilter(sc
);
1525 if (nstate
== IEEE80211_S_SCAN
)
1526 bssid
= ic
->ic_macaddr
;
1528 bssid
= in
->in_bssid
;
1530 ath9k_hw_setrxfilter(ah
, rfilt
);
1532 if (nstate
== IEEE80211_S_RUN
&& ic
->ic_opmode
!= IEEE80211_M_IBSS
)
1533 ath9k_hw_write_associd(ah
, bssid
, in
->in_associd
);
1535 ath9k_hw_write_associd(ah
, bssid
, 0);
1537 /* Check for WLAN_CAPABILITY_PRIVACY ? */
1538 if (ic
->ic_flags
& IEEE80211_F_PRIVACY
) {
1539 for (i
= 0; i
< IEEE80211_WEP_NKID
; i
++) {
1540 if (ath9k_hw_keyisvalid(ah
, (uint16_t)i
))
1541 (void) ath9k_hw_keysetmac(ah
, (uint16_t)i
,
1546 if (nstate
== IEEE80211_S_RUN
) {
1547 switch (ic
->ic_opmode
) {
1549 case IEEE80211_M_IBSS
:
1551 * Allocate and setup the beacon frame.
1552 * Stop any previous beacon DMA.
1554 (void) ath9k_hw_stoptxdma(ah
, sc
->sc_beaconq
);
1555 arn_beacon_return(sc
);
1556 error
= arn_beacon_alloc(sc
, in
);
1562 * If joining an adhoc network defer beacon timer
1563 * configuration to the next beacon frame so we
1564 * have a current TSF to use. Otherwise we're
1565 * starting an ibss/bss so there's no need to delay.
1567 if (ic
->ic_opmode
== IEEE80211_M_IBSS
&&
1568 ic
->ic_bss
->in_tstamp
.tsf
!= 0) {
1571 arn_beacon_config(sc
);
1574 #endif /* ARN_IBSS */
1575 case IEEE80211_M_STA
:
1576 if (ostate
!= IEEE80211_S_RUN
) {
1578 * Defer beacon timer configuration to the next
1579 * beacon frame so we have a current TSF to use.
1580 * Any TSF collected when scanning is likely old
1585 /* Configure the beacon and sleep timers. */
1586 arn_beacon_config(sc
);
1587 /* Reset rssi stats */
1588 sc
->sc_halstats
.ns_avgbrssi
=
1589 ATH_RSSI_DUMMY_MARKER
;
1590 sc
->sc_halstats
.ns_avgrssi
=
1591 ATH_RSSI_DUMMY_MARKER
;
1592 sc
->sc_halstats
.ns_avgtxrssi
=
1593 ATH_RSSI_DUMMY_MARKER
;
1594 sc
->sc_halstats
.ns_avgtxrate
=
1595 ATH_RATE_DUMMY_MARKER
;
1598 #endif /* ARN_IBSS */
1605 sc
->sc_imask
&= ~(ATH9K_INT_SWBA
| ATH9K_INT_BMISS
);
1606 (void) ath9k_hw_set_interrupts(ah
, sc
->sc_imask
);
1610 * Reset the rate control state.
1612 arn_rate_ctl_reset(sc
, nstate
);
1617 * Invoke the parent method to complete the work.
1619 error
= sc
->sc_newstate(ic
, nstate
, arg
);
1622 * Finally, start any timers.
1624 if (nstate
== IEEE80211_S_RUN
) {
1625 ieee80211_start_watchdog(ic
, 1);
1626 ASSERT(sc
->sc_cal_timer
== 0);
1627 sc
->sc_cal_timer
= timeout(arn_ani_calibrate
, (void *)sc
,
1628 drv_usectohz(100 * 1000));
1629 } else if ((nstate
== IEEE80211_S_SCAN
) && (ostate
!= nstate
)) {
1630 /* start ap/neighbor scan timer */
1631 /* ASSERT(sc->sc_scan_timer == 0); */
1632 if (sc
->sc_scan_timer
!= 0) {
1633 (void) untimeout(sc
->sc_scan_timer
);
1634 sc
->sc_scan_timer
= 0;
1636 sc
->sc_scan_timer
= timeout(arn_next_scan
, (void *)sc
,
1637 drv_usectohz(arn_dwelltime
* 1000));
1645 arn_watchdog(void *arg
)
1647 struct arn_softc
*sc
= arg
;
1648 ieee80211com_t
*ic
= &sc
->sc_isc
;
1652 ic
->ic_watchdog_timer
= 0;
1653 if (sc
->sc_flags
& SC_OP_INVALID
) {
1658 if (ic
->ic_state
== IEEE80211_S_RUN
) {
1660 * Start the background rate control thread if we
1661 * are not configured to use a fixed xmit rate.
1663 #ifdef ARN_LEGACY_RC
1664 if (ic
->ic_fixed_rate
== IEEE80211_FIXED_RATE_NONE
) {
1665 sc
->sc_stats
.ast_rate_calls
++;
1666 if (ic
->ic_opmode
== IEEE80211_M_STA
)
1667 arn_rate_ctl(ic
, ic
->ic_bss
);
1669 ieee80211_iterate_nodes(&ic
->ic_sta
,
1672 #endif /* ARN_LEGACY_RC */
1674 #ifdef ARN_HW_BEACON_MISS_HANDLE
1675 /* nothing to do here */
1677 /* currently set 10 seconds as beacon miss threshold */
1678 if (ic
->ic_beaconmiss
++ > 100) {
1679 ARN_DBG((ARN_DBG_BEACON
, "arn_watchdog():"
1680 "Beacon missed for 10 seconds, run"
1681 "ieee80211_new_state(ic, IEEE80211_S_INIT, -1)\n"));
1683 (void) ieee80211_new_state(ic
, IEEE80211_S_INIT
, -1);
1686 #endif /* ARN_HW_BEACON_MISS_HANDLE */
1692 ieee80211_watchdog(ic
);
1694 ieee80211_start_watchdog(ic
, ntimer
);
1698 static struct ieee80211_node
*
1699 arn_node_alloc(ieee80211com_t
*ic
)
1701 struct ath_node
*an
;
1702 #ifdef ARN_TX_AGGREGATION
1703 struct arn_softc
*sc
= (struct arn_softc
*)ic
;
1706 an
= kmem_zalloc(sizeof (struct ath_node
), KM_SLEEP
);
1708 /* legacy rate control */
1709 #ifdef ARN_LEGACY_RC
1710 arn_rate_update(sc
, &an
->an_node
, 0);
1713 #ifdef ARN_TX_AGGREGATION
1714 if (sc
->sc_flags
& SC_OP_TXAGGR
) {
1715 arn_tx_node_init(sc
, an
);
1717 #endif /* ARN_TX_AGGREGATION */
1719 an
->last_rssi
= ATH_RSSI_DUMMY_MARKER
;
1721 return ((an
!= NULL
) ? &an
->an_node
: NULL
);
1725 arn_node_free(struct ieee80211_node
*in
)
1727 ieee80211com_t
*ic
= in
->in_ic
;
1728 struct arn_softc
*sc
= (struct arn_softc
*)ic
;
1730 struct ath_txq
*txq
;
1733 #ifdef ARN_TX_AGGREGATION
1734 if (sc
->sc_flags
& SC_OP_TXAGGR
)
1735 arn_tx_node_cleanup(sc
, in
);
1736 #endif /* TX_AGGREGATION */
1738 for (i
= 0; i
< ATH9K_NUM_TX_QUEUES
; i
++) {
1739 if (ARN_TXQ_SETUP(sc
, i
)) {
1740 txq
= &sc
->sc_txq
[i
];
1741 mutex_enter(&txq
->axq_lock
);
1742 bf
= list_head(&txq
->axq_list
);
1743 while (bf
!= NULL
) {
1744 if (bf
->bf_in
== in
) {
1747 bf
= list_next(&txq
->axq_list
, bf
);
1749 mutex_exit(&txq
->axq_lock
);
1753 ic
->ic_node_cleanup(in
);
1755 if (in
->in_wpa_ie
!= NULL
)
1756 ieee80211_free(in
->in_wpa_ie
);
1758 if (in
->in_wme_ie
!= NULL
)
1759 ieee80211_free(in
->in_wme_ie
);
1761 if (in
->in_htcap_ie
!= NULL
)
1762 ieee80211_free(in
->in_htcap_ie
);
1764 kmem_free(in
, sizeof (struct ath_node
));
1768 * Allocate tx/rx key slots for TKIP. We allocate one slot for
1769 * each key. MIC is right after the decrypt/encrypt key.
1772 arn_key_alloc_pair(struct arn_softc
*sc
, ieee80211_keyix
*txkeyix
,
1773 ieee80211_keyix
*rxkeyix
)
1777 ASSERT(!sc
->sc_splitmic
);
1778 for (i
= 0; i
< ARRAY_SIZE(sc
->sc_keymap
)/4; i
++) {
1779 uint8_t b
= sc
->sc_keymap
[i
];
1782 for (keyix
= i
* NBBY
; keyix
< (i
+ 1) * NBBY
;
1784 if ((b
& 1) || is_set(keyix
+64, sc
->sc_keymap
)) {
1785 /* full pair unavailable */
1788 set_bit(keyix
, sc
->sc_keymap
);
1789 set_bit(keyix
+64, sc
->sc_keymap
);
1790 ARN_DBG((ARN_DBG_KEYCACHE
,
1791 "arn_key_alloc_pair(): key pair %u,%u\n",
1793 *txkeyix
= *rxkeyix
= keyix
;
1797 ARN_DBG((ARN_DBG_KEYCACHE
, "arn_key_alloc_pair():"
1798 " out of pair space\n"));
1804 * Allocate tx/rx key slots for TKIP. We allocate two slots for
1805 * each key, one for decrypt/encrypt and the other for the MIC.
1808 arn_key_alloc_2pair(struct arn_softc
*sc
, ieee80211_keyix
*txkeyix
,
1809 ieee80211_keyix
*rxkeyix
)
1813 ASSERT(sc
->sc_splitmic
);
1814 for (i
= 0; i
< ARRAY_SIZE(sc
->sc_keymap
)/4; i
++) {
1815 uint8_t b
= sc
->sc_keymap
[i
];
1818 * One or more slots in this byte are free.
1826 /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
1827 if (is_set(keyix
+32, sc
->sc_keymap
) ||
1828 is_set(keyix
+64, sc
->sc_keymap
) ||
1829 is_set(keyix
+32+64, sc
->sc_keymap
)) {
1830 /* full pair unavailable */
1831 if (keyix
== (i
+1)*NBBY
) {
1832 /* no slots were appropriate, advance */
1837 set_bit(keyix
, sc
->sc_keymap
);
1838 set_bit(keyix
+64, sc
->sc_keymap
);
1839 set_bit(keyix
+32, sc
->sc_keymap
);
1840 set_bit(keyix
+32+64, sc
->sc_keymap
);
1841 ARN_DBG((ARN_DBG_KEYCACHE
,
1842 "arn_key_alloc_2pair(): key pair %u,%u %u,%u\n",
1844 keyix
+32, keyix
+32+64));
1845 *txkeyix
= *rxkeyix
= keyix
;
1849 ARN_DBG((ARN_DBG_KEYCACHE
, "arn_key_alloc_2pair(): "
1850 " out of pair space\n"));
1855 * Allocate a single key cache slot.
1858 arn_key_alloc_single(struct arn_softc
*sc
, ieee80211_keyix
*txkeyix
,
1859 ieee80211_keyix
*rxkeyix
)
1863 /* try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
1864 for (i
= 0; i
< ARRAY_SIZE(sc
->sc_keymap
); i
++) {
1865 uint8_t b
= sc
->sc_keymap
[i
];
1869 * One or more slots are free.
1874 set_bit(keyix
, sc
->sc_keymap
);
1875 ARN_DBG((ARN_DBG_KEYCACHE
, "arn_key_alloc_single(): "
1876 "key %u\n", keyix
));
1877 *txkeyix
= *rxkeyix
= keyix
;
1885 * Allocate one or more key cache slots for a unicast key. The
1886 * key itself is needed only to identify the cipher. For hardware
1887 * TKIP with split cipher+MIC keys we allocate two key cache slot
1888 * pairs so that we can setup separate TX and RX MIC keys. Note
1889 * that the MIC key for a TKIP key at slot i is assumed by the
1890 * hardware to be at slot i+64. This limits TKIP keys to the first
1895 arn_key_alloc(ieee80211com_t
*ic
, const struct ieee80211_key
*k
,
1896 ieee80211_keyix
*keyix
, ieee80211_keyix
*rxkeyix
)
1898 struct arn_softc
*sc
= (struct arn_softc
*)ic
;
1901 * We allocate two pair for TKIP when using the h/w to do
1902 * the MIC. For everything else, including software crypto,
1903 * we allocate a single entry. Note that s/w crypto requires
1904 * a pass-through slot on the 5211 and 5212. The 5210 does
1905 * not support pass-through cache entries and we map all
1906 * those requests to slot 0.
1908 if (k
->wk_flags
& IEEE80211_KEY_SWCRYPT
) {
1909 return (arn_key_alloc_single(sc
, keyix
, rxkeyix
));
1910 } else if (k
->wk_cipher
->ic_cipher
== IEEE80211_CIPHER_TKIP
&&
1911 (k
->wk_flags
& IEEE80211_KEY_SWMIC
) == 0) {
1912 if (sc
->sc_splitmic
)
1913 return (arn_key_alloc_2pair(sc
, keyix
, rxkeyix
));
1915 return (arn_key_alloc_pair(sc
, keyix
, rxkeyix
));
1917 return (arn_key_alloc_single(sc
, keyix
, rxkeyix
));
1922 * Delete an entry in the key cache allocated by ath_key_alloc.
1925 arn_key_delete(ieee80211com_t
*ic
, const struct ieee80211_key
*k
)
1927 struct arn_softc
*sc
= (struct arn_softc
*)ic
;
1928 struct ath_hal
*ah
= sc
->sc_ah
;
1929 const struct ieee80211_cipher
*cip
= k
->wk_cipher
;
1930 ieee80211_keyix keyix
= k
->wk_keyix
;
1932 ARN_DBG((ARN_DBG_KEYCACHE
, "arn_key_delete():"
1933 " delete key %u ic_cipher=0x%x\n", keyix
, cip
->ic_cipher
));
1935 (void) ath9k_hw_keyreset(ah
, keyix
);
1937 * Handle split tx/rx keying required for TKIP with h/w MIC.
1939 if (cip
->ic_cipher
== IEEE80211_CIPHER_TKIP
&&
1940 (k
->wk_flags
& IEEE80211_KEY_SWMIC
) == 0 && sc
->sc_splitmic
)
1941 (void) ath9k_hw_keyreset(ah
, keyix
+32); /* RX key */
1943 if (keyix
>= IEEE80211_WEP_NKID
) {
1945 * Don't touch keymap entries for global keys so
1946 * they are never considered for dynamic allocation.
1948 clr_bit(keyix
, sc
->sc_keymap
);
1949 if (cip
->ic_cipher
== IEEE80211_CIPHER_TKIP
&&
1950 (k
->wk_flags
& IEEE80211_KEY_SWMIC
) == 0) {
1952 * If splitmic is true +64 is TX key MIC,
1953 * else +64 is RX key + RX key MIC.
1955 clr_bit(keyix
+64, sc
->sc_keymap
);
1956 if (sc
->sc_splitmic
) {
1958 clr_bit(keyix
+32, sc
->sc_keymap
);
1960 clr_bit(keyix
+32+64, sc
->sc_keymap
);
1968 * Set a TKIP key into the hardware. This handles the
1969 * potential distribution of key state to multiple key
1970 * cache slots for TKIP.
1973 arn_keyset_tkip(struct arn_softc
*sc
, const struct ieee80211_key
*k
,
1974 struct ath9k_keyval
*hk
, const uint8_t mac
[IEEE80211_ADDR_LEN
])
1976 uint8_t *key_rxmic
= NULL
;
1977 uint8_t *key_txmic
= NULL
;
1978 uint8_t *key
= (uint8_t *)&(k
->wk_key
[0]);
1979 struct ath_hal
*ah
= sc
->sc_ah
;
1981 key_txmic
= key
+ 16;
1982 key_rxmic
= key
+ 24;
1985 /* Group key installation */
1986 (void) memcpy(hk
->kv_mic
, key_rxmic
, sizeof (hk
->kv_mic
));
1987 return (ath9k_hw_set_keycache_entry(ah
, k
->wk_keyix
, hk
,
1990 if (!sc
->sc_splitmic
) {
1992 * data key goes at first index,
1993 * the hal handles the MIC keys at index+64.
1995 (void) memcpy(hk
->kv_mic
, key_rxmic
, sizeof (hk
->kv_mic
));
1996 (void) memcpy(hk
->kv_txmic
, key_txmic
, sizeof (hk
->kv_txmic
));
1997 return (ath9k_hw_set_keycache_entry(ah
, k
->wk_keyix
, hk
,
2001 * TX key goes at first index, RX key at +32.
2002 * The hal handles the MIC keys at index+64.
2004 (void) memcpy(hk
->kv_mic
, key_txmic
, sizeof (hk
->kv_mic
));
2005 if (!(ath9k_hw_set_keycache_entry(ah
, k
->wk_keyix
, hk
, NULL
,
2007 /* Txmic entry failed. No need to proceed further */
2008 ARN_DBG((ARN_DBG_KEYCACHE
,
2009 "%s Setting TX MIC Key Failed\n", __func__
));
2013 (void) memcpy(hk
->kv_mic
, key_rxmic
, sizeof (hk
->kv_mic
));
2015 /* XXX delete tx key on failure? */
2016 return (ath9k_hw_set_keycache_entry(ah
, k
->wk_keyix
, hk
, mac
, B_FALSE
));
2021 arn_key_set(ieee80211com_t
*ic
, const struct ieee80211_key
*k
,
2022 const uint8_t mac
[IEEE80211_ADDR_LEN
])
2024 struct arn_softc
*sc
= (struct arn_softc
*)ic
;
2025 const struct ieee80211_cipher
*cip
= k
->wk_cipher
;
2026 struct ath9k_keyval hk
;
2029 static const uint8_t ciphermap
[] = {
2030 ATH9K_CIPHER_WEP
, /* IEEE80211_CIPHER_WEP */
2031 ATH9K_CIPHER_TKIP
, /* IEEE80211_CIPHER_TKIP */
2032 ATH9K_CIPHER_AES_OCB
, /* IEEE80211_CIPHER_AES_OCB */
2033 ATH9K_CIPHER_AES_CCM
, /* IEEE80211_CIPHER_AES_CCM */
2034 ATH9K_CIPHER_CKIP
, /* IEEE80211_CIPHER_CKIP */
2035 ATH9K_CIPHER_CLR
, /* IEEE80211_CIPHER_NONE */
2038 bzero(&hk
, sizeof (hk
));
2041 * Software crypto uses a "clear key" so non-crypto
2042 * state kept in the key cache are maintainedd so that
2043 * rx frames have an entry to match.
2045 if ((k
->wk_flags
& IEEE80211_KEY_SWCRYPT
) == 0) {
2046 ASSERT(cip
->ic_cipher
< 6);
2047 hk
.kv_type
= ciphermap
[cip
->ic_cipher
];
2048 hk
.kv_len
= k
->wk_keylen
;
2049 bcopy(k
->wk_key
, hk
.kv_val
, k
->wk_keylen
);
2051 hk
.kv_type
= ATH9K_CIPHER_CLR
;
2054 if (hk
.kv_type
== ATH9K_CIPHER_TKIP
&&
2055 (k
->wk_flags
& IEEE80211_KEY_SWMIC
) == 0) {
2056 return (arn_keyset_tkip(sc
, k
, &hk
, mac
));
2058 return (ath9k_hw_set_keycache_entry(sc
->sc_ah
,
2059 k
->wk_keyix
, &hk
, mac
, B_FALSE
));
2064 * Enable/Disable short slot timing
2067 arn_set_shortslot(ieee80211com_t
*ic
, int onoff
)
2069 struct ath_hal
*ah
= ((struct arn_softc
*)ic
)->sc_ah
;
2072 (void) ath9k_hw_setslottime(ah
, ATH9K_SLOT_TIME_9
);
2074 (void) ath9k_hw_setslottime(ah
, ATH9K_SLOT_TIME_20
);
2078 arn_open(struct arn_softc
*sc
)
2080 ieee80211com_t
*ic
= (ieee80211com_t
*)sc
;
2081 struct ieee80211_channel
*curchan
= ic
->ic_curchan
;
2082 struct ath9k_channel
*init_channel
;
2083 int error
= 0, pos
, status
;
2085 ARN_LOCK_ASSERT(sc
);
2087 pos
= arn_get_channel(sc
, curchan
);
2089 ARN_DBG((ARN_DBG_FATAL
, "arn: "
2090 "%s: Invalid channel\n", __func__
));
2095 sc
->tx_chan_width
= ATH9K_HT_MACMODE_20
;
2097 if (sc
->sc_curmode
== ATH9K_MODE_11A
) {
2098 sc
->sc_ah
->ah_channels
[pos
].chanmode
= CHANNEL_A
;
2100 sc
->sc_ah
->ah_channels
[pos
].chanmode
= CHANNEL_G
;
2103 init_channel
= &sc
->sc_ah
->ah_channels
[pos
];
2105 /* Reset SERDES registers */
2106 ath9k_hw_configpcipowersave(sc
->sc_ah
, 0);
2109 * The basic interface to setting the hardware in a good
2110 * state is ``reset''. On return the hardware is known to
2111 * be powered up and with interrupts disabled. This must
2112 * be followed by initialization of the appropriate bits
2113 * and then setup of the interrupt mask.
2115 if (!ath9k_hw_reset(sc
->sc_ah
, init_channel
,
2116 sc
->tx_chan_width
, sc
->sc_tx_chainmask
,
2117 sc
->sc_rx_chainmask
, sc
->sc_ht_extprotspacing
,
2118 B_FALSE
, &status
)) {
2119 ARN_DBG((ARN_DBG_FATAL
, "arn: "
2120 "%s: unable to reset hardware; hal status %u "
2121 "(freq %u flags 0x%x)\n", __func__
, status
,
2122 init_channel
->channel
, init_channel
->channelFlags
));
2129 * This is needed only to setup initial state
2130 * but it's best done after a reset.
2132 arn_update_txpow(sc
);
2135 * Setup the hardware after reset:
2136 * The receive engine is set going.
2137 * Frame transmit is handled entirely
2138 * in the frame output path; there's nothing to do
2139 * here except setup the interrupt mask.
2141 if (arn_startrecv(sc
) != 0) {
2142 ARN_DBG((ARN_DBG_INIT
, "arn: "
2143 "%s: unable to start recv logic\n", __func__
));
2148 /* Setup our intr mask. */
2149 sc
->sc_imask
= ATH9K_INT_RX
| ATH9K_INT_TX
|
2150 ATH9K_INT_RXEOL
| ATH9K_INT_RXORN
|
2151 ATH9K_INT_FATAL
| ATH9K_INT_GLOBAL
;
2152 #ifdef ARN_ATH9K_HW_CAP_GTT
2153 if (sc
->sc_ah
->ah_caps
.hw_caps
& ATH9K_HW_CAP_GTT
)
2154 sc
->sc_imask
|= ATH9K_INT_GTT
;
2157 #ifdef ARN_ATH9K_HW_CAP_GTT
2158 if (sc
->sc_ah
->ah_caps
.hw_caps
& ATH9K_HW_CAP_HT
)
2159 sc
->sc_imask
|= ATH9K_INT_CST
;
2163 * Enable MIB interrupts when there are hardware phy counters.
2164 * Note we only do this (at the moment) for station mode.
2166 #ifdef ARN_ATH9K_INT_MIB
2167 if (ath9k_hw_phycounters(sc
->sc_ah
) &&
2168 ((sc
->sc_ah
->ah_opmode
== ATH9K_M_STA
) ||
2169 (sc
->sc_ah
->ah_opmode
== ATH9K_M_IBSS
)))
2170 sc
->sc_imask
|= ATH9K_INT_MIB
;
2173 * Some hardware processes the TIM IE and fires an
2174 * interrupt when the TIM bit is set. For hardware
2175 * that does, if not overridden by configuration,
2176 * enable the TIM interrupt when operating as station.
2178 #ifdef ARN_ATH9K_INT_TIM
2179 if ((sc
->sc_ah
->ah_caps
.hw_caps
& ATH9K_HW_CAP_ENHANCEDPM
) &&
2180 (sc
->sc_ah
->ah_opmode
== ATH9K_M_STA
) &&
2181 !sc
->sc_config
.swBeaconProcess
)
2182 sc
->sc_imask
|= ATH9K_INT_TIM
;
2184 if (arn_chan2mode(init_channel
) != sc
->sc_curmode
)
2185 arn_setcurmode(sc
, arn_chan2mode(init_channel
));
2186 ARN_DBG((ARN_DBG_INIT
, "arn: "
2187 "%s: current mode after arn_setcurmode is %d\n",
2188 __func__
, sc
->sc_curmode
));
2190 sc
->sc_isrunning
= 1;
2192 /* Disable BMISS interrupt when we're not associated */
2193 sc
->sc_imask
&= ~(ATH9K_INT_SWBA
| ATH9K_INT_BMISS
);
2194 (void) ath9k_hw_set_interrupts(sc
->sc_ah
, sc
->sc_imask
);
2203 arn_close(struct arn_softc
*sc
)
2205 ieee80211com_t
*ic
= (ieee80211com_t
*)sc
;
2206 struct ath_hal
*ah
= sc
->sc_ah
;
2208 ARN_LOCK_ASSERT(sc
);
2210 if (!sc
->sc_isrunning
)
2214 * Shutdown the hardware and driver
2215 * Note that some of this work is not possible if the
2216 * hardware is gone (invalid).
2219 ieee80211_new_state(ic
, IEEE80211_S_INIT
, -1);
2220 ieee80211_stop_watchdog(ic
);
2224 * make sure h/w will not generate any interrupt
2225 * before setting the invalid flag.
2227 (void) ath9k_hw_set_interrupts(ah
, 0);
2229 if (!(sc
->sc_flags
& SC_OP_INVALID
)) {
2230 arn_draintxq(sc
, 0);
2231 (void) arn_stoprecv(sc
);
2232 (void) ath9k_hw_phy_disable(ah
);
2234 sc
->sc_rxlink
= NULL
;
2237 sc
->sc_isrunning
= 0;
2241 * MAC callback functions
2244 arn_m_stat(void *arg
, uint_t stat
, uint64_t *val
)
2246 struct arn_softc
*sc
= arg
;
2247 ieee80211com_t
*ic
= (ieee80211com_t
*)sc
;
2248 struct ieee80211_node
*in
;
2249 struct ieee80211_rateset
*rs
;
2253 case MAC_STAT_IFSPEED
:
2256 *val
= (rs
->ir_rates
[in
->in_txrate
] & IEEE80211_RATE_VAL
) / 2 *
2259 case MAC_STAT_NOXMTBUF
:
2260 *val
= sc
->sc_stats
.ast_tx_nobuf
+
2261 sc
->sc_stats
.ast_tx_nobufmgt
;
2263 case MAC_STAT_IERRORS
:
2264 *val
= sc
->sc_stats
.ast_rx_tooshort
;
2266 case MAC_STAT_RBYTES
:
2267 *val
= ic
->ic_stats
.is_rx_bytes
;
2269 case MAC_STAT_IPACKETS
:
2270 *val
= ic
->ic_stats
.is_rx_frags
;
2272 case MAC_STAT_OBYTES
:
2273 *val
= ic
->ic_stats
.is_tx_bytes
;
2275 case MAC_STAT_OPACKETS
:
2276 *val
= ic
->ic_stats
.is_tx_frags
;
2278 case MAC_STAT_OERRORS
:
2279 case WIFI_STAT_TX_FAILED
:
2280 *val
= sc
->sc_stats
.ast_tx_fifoerr
+
2281 sc
->sc_stats
.ast_tx_xretries
+
2282 sc
->sc_stats
.ast_tx_discard
;
2284 case WIFI_STAT_TX_RETRANS
:
2285 *val
= sc
->sc_stats
.ast_tx_xretries
;
2287 case WIFI_STAT_FCS_ERRORS
:
2288 *val
= sc
->sc_stats
.ast_rx_crcerr
;
2290 case WIFI_STAT_WEP_ERRORS
:
2291 *val
= sc
->sc_stats
.ast_rx_badcrypt
;
2293 case WIFI_STAT_TX_FRAGS
:
2294 case WIFI_STAT_MCAST_TX
:
2295 case WIFI_STAT_RTS_SUCCESS
:
2296 case WIFI_STAT_RTS_FAILURE
:
2297 case WIFI_STAT_ACK_FAILURE
:
2298 case WIFI_STAT_RX_FRAGS
:
2299 case WIFI_STAT_MCAST_RX
:
2300 case WIFI_STAT_RX_DUPS
:
2302 return (ieee80211_stat(ic
, stat
, val
));
2313 arn_m_start(void *arg
)
2315 struct arn_softc
*sc
= arg
;
2321 * Stop anything previously setup. This is safe
2322 * whether this is the first time through or not.
2327 if ((err
= arn_open(sc
)) != 0) {
2332 /* H/W is reday now */
2333 sc
->sc_flags
&= ~SC_OP_INVALID
;
2341 arn_m_stop(void *arg
)
2343 struct arn_softc
*sc
= arg
;
2348 /* disable HAL and put h/w to sleep */
2349 (void) ath9k_hw_disable(sc
->sc_ah
);
2350 ath9k_hw_configpcipowersave(sc
->sc_ah
, 1);
2352 /* XXX: hardware will not be ready in suspend state */
2353 sc
->sc_flags
|= SC_OP_INVALID
;
2358 arn_m_promisc(void *arg
, boolean_t on
)
2360 struct arn_softc
*sc
= arg
;
2361 struct ath_hal
*ah
= sc
->sc_ah
;
2366 rfilt
= ath9k_hw_getrxfilter(ah
);
2368 rfilt
|= ATH9K_RX_FILTER_PROM
;
2370 rfilt
&= ~ATH9K_RX_FILTER_PROM
;
2371 sc
->sc_promisc
= on
;
2372 ath9k_hw_setrxfilter(ah
, rfilt
);
2380 arn_m_multicst(void *arg
, boolean_t add
, const uint8_t *mca
)
2382 struct arn_softc
*sc
= arg
;
2383 struct ath_hal
*ah
= sc
->sc_ah
;
2384 uint32_t val
, index
, bit
;
2386 uint32_t *mfilt
= sc
->sc_mcast_hash
;
2390 /* calculate XOR of eight 6bit values */
2391 val
= ARN_LE_READ_32(mca
+ 0);
2392 pos
= (val
>> 18) ^ (val
>> 12) ^ (val
>> 6) ^ val
;
2393 val
= ARN_LE_READ_32(mca
+ 3);
2394 pos
^= (val
>> 18) ^ (val
>> 12) ^ (val
>> 6) ^ val
;
2397 bit
= 1 << (pos
% 32);
2399 if (add
) { /* enable multicast */
2400 sc
->sc_mcast_refs
[pos
]++;
2401 mfilt
[index
] |= bit
;
2402 } else { /* disable multicast */
2403 if (--sc
->sc_mcast_refs
[pos
] == 0)
2404 mfilt
[index
] &= ~bit
;
2406 ath9k_hw_setmcastfilter(ah
, mfilt
[0], mfilt
[1]);
2413 arn_m_unicst(void *arg
, const uint8_t *macaddr
)
2415 struct arn_softc
*sc
= arg
;
2416 struct ath_hal
*ah
= sc
->sc_ah
;
2417 ieee80211com_t
*ic
= (ieee80211com_t
*)sc
;
2419 ARN_DBG((ARN_DBG_XMIT
, "ath: ath_gld_saddr(): "
2420 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
2421 macaddr
[0], macaddr
[1], macaddr
[2],
2422 macaddr
[3], macaddr
[4], macaddr
[5]));
2425 IEEE80211_ADDR_COPY(sc
->sc_isc
.ic_macaddr
, macaddr
);
2426 (void) ath9k_hw_setmac(ah
, sc
->sc_isc
.ic_macaddr
);
2427 (void) arn_reset(ic
);
2433 arn_m_tx(void *arg
, mblk_t
*mp
)
2435 struct arn_softc
*sc
= arg
;
2438 ieee80211com_t
*ic
= (ieee80211com_t
*)sc
;
2441 * No data frames go out unless we're associated; this
2442 * should not happen as the 802.11 layer does not enable
2443 * the xmit queue until we enter the RUN state.
2445 if (ic
->ic_state
!= IEEE80211_S_RUN
) {
2446 ARN_DBG((ARN_DBG_XMIT
, "arn: arn_m_tx(): "
2447 "discard, state %u\n", ic
->ic_state
));
2448 sc
->sc_stats
.ast_tx_discard
++;
2453 while (mp
!= NULL
) {
2456 error
= arn_tx(ic
, mp
, IEEE80211_FC0_TYPE_DATA
);
2459 if (error
== ENOMEM
) {
2473 arn_m_ioctl(void *arg
, queue_t
*wq
, mblk_t
*mp
)
2475 struct arn_softc
*sc
= arg
;
2478 err
= ieee80211_ioctl(&sc
->sc_isc
, wq
, mp
);
2481 if (err
== ENETRESET
) {
2482 if (!(sc
->sc_flags
& SC_OP_INVALID
)) {
2485 (void) arn_m_start(sc
);
2487 (void) ieee80211_new_state(&sc
->sc_isc
,
2488 IEEE80211_S_SCAN
, -1);
2496 arn_m_setprop(void *arg
, const char *pr_name
, mac_prop_id_t wldp_pr_num
,
2497 uint_t wldp_length
, const void *wldp_buf
)
2499 struct arn_softc
*sc
= arg
;
2502 err
= ieee80211_setprop(&sc
->sc_isc
, pr_name
, wldp_pr_num
,
2503 wldp_length
, wldp_buf
);
2507 if (err
== ENETRESET
) {
2508 if (!(sc
->sc_flags
& SC_OP_INVALID
)) {
2510 (void) arn_m_start(sc
);
2511 (void) ieee80211_new_state(&sc
->sc_isc
,
2512 IEEE80211_S_SCAN
, -1);
2525 arn_m_getprop(void *arg
, const char *pr_name
, mac_prop_id_t wldp_pr_num
,
2526 uint_t wldp_length
, void *wldp_buf
)
2528 struct arn_softc
*sc
= arg
;
2531 err
= ieee80211_getprop(&sc
->sc_isc
, pr_name
, wldp_pr_num
,
2532 wldp_length
, wldp_buf
);
2538 arn_m_propinfo(void *arg
, const char *pr_name
, mac_prop_id_t wldp_pr_num
,
2539 mac_prop_info_handle_t prh
)
2541 struct arn_softc
*sc
= arg
;
2543 ieee80211_propinfo(&sc
->sc_isc
, pr_name
, wldp_pr_num
, prh
);
2546 /* return bus cachesize in 4B word units */
2548 arn_pci_config_cachesize(struct arn_softc
*sc
)
2553 * Cache line size is used to size and align various
2554 * structures used to communicate with the hardware.
2556 csz
= pci_config_get8(sc
->sc_cfg_handle
, PCI_CONF_CACHE_LINESZ
);
2559 * We must have this setup properly for rx buffer
2560 * DMA to work so force a reasonable value here if it
2563 csz
= ATH_DEF_CACHE_BYTES
/ sizeof (uint32_t);
2564 pci_config_put8(sc
->sc_cfg_handle
, PCI_CONF_CACHE_LINESZ
,
2567 sc
->sc_cachelsz
= csz
<< 2;
2571 arn_pci_setup(struct arn_softc
*sc
)
2576 * Enable memory mapping and bus mastering
2579 command
= pci_config_get16(sc
->sc_cfg_handle
, PCI_CONF_COMM
);
2580 command
|= PCI_COMM_MAE
| PCI_COMM_ME
;
2581 pci_config_put16(sc
->sc_cfg_handle
, PCI_CONF_COMM
, command
);
2582 command
= pci_config_get16(sc
->sc_cfg_handle
, PCI_CONF_COMM
);
2583 if ((command
& PCI_COMM_MAE
) == 0) {
2584 arn_problem("arn: arn_pci_setup(): "
2585 "failed to enable memory mapping\n");
2588 if ((command
& PCI_COMM_ME
) == 0) {
2589 arn_problem("arn: arn_pci_setup(): "
2590 "failed to enable bus mastering\n");
2593 ARN_DBG((ARN_DBG_INIT
, "arn: arn_pci_setup(): "
2594 "set command reg to 0x%x \n", command
));
2600 arn_get_hw_encap(struct arn_softc
*sc
)
2605 ic
= (ieee80211com_t
*)sc
;
2608 if (ath9k_hw_getcapability(ah
, ATH9K_CAP_CIPHER
,
2609 ATH9K_CIPHER_AES_CCM
, NULL
))
2610 ic
->ic_caps
|= IEEE80211_C_AES_CCM
;
2611 if (ath9k_hw_getcapability(ah
, ATH9K_CAP_CIPHER
,
2612 ATH9K_CIPHER_AES_OCB
, NULL
))
2613 ic
->ic_caps
|= IEEE80211_C_AES
;
2614 if (ath9k_hw_getcapability(ah
, ATH9K_CAP_CIPHER
,
2615 ATH9K_CIPHER_TKIP
, NULL
))
2616 ic
->ic_caps
|= IEEE80211_C_TKIP
;
2617 if (ath9k_hw_getcapability(ah
, ATH9K_CAP_CIPHER
,
2618 ATH9K_CIPHER_WEP
, NULL
))
2619 ic
->ic_caps
|= IEEE80211_C_WEP
;
2620 if (ath9k_hw_getcapability(ah
, ATH9K_CAP_CIPHER
,
2621 ATH9K_CIPHER_MIC
, NULL
))
2622 ic
->ic_caps
|= IEEE80211_C_TKIPMIC
;
2626 arn_setup_ht_cap(struct arn_softc
*sc
)
2628 #define ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
2629 #define ATH9K_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
2631 /* LINTED E_FUNC_SET_NOT_USED */
2635 arn_ht_conf
*ht_info
= &sc
->sc_ht_conf
;
2637 ht_info
->ht_supported
= B_TRUE
;
2639 /* Todo: IEEE80211_HTCAP_SMPS */
2640 ht_info
->cap
= IEEE80211_HTCAP_CHWIDTH40
|
2641 IEEE80211_HTCAP_SHORTGI40
|
2642 IEEE80211_HTCAP_DSSSCCK40
;
2644 ht_info
->ampdu_factor
= ATH9K_HT_CAP_MAXRXAMPDU_65536
;
2645 ht_info
->ampdu_density
= ATH9K_HT_CAP_MPDUDENSITY_8
;
2647 /* set up supported mcs set */
2648 (void) memset(&ht_info
->rx_mcs_mask
, 0, sizeof (ht_info
->rx_mcs_mask
));
2649 tx_streams
= ISP2(sc
->sc_ah
->ah_caps
.tx_chainmask
) ? 1 : 2;
2650 rx_streams
= ISP2(sc
->sc_ah
->ah_caps
.rx_chainmask
) ? 1 : 2;
2652 ht_info
->rx_mcs_mask
[0] = 0xff;
2653 if (rx_streams
>= 2)
2654 ht_info
->rx_mcs_mask
[1] = 0xff;
2657 /* xxx should be used for ht rate set negotiating ? */
2659 arn_overwrite_11n_rateset(struct arn_softc
*sc
)
2661 uint8_t *ht_rs
= sc
->sc_ht_conf
.rx_mcs_mask
;
2662 int mcs_idx
, mcs_count
= 0;
2665 (void) memset(&ieee80211_rateset_11n
, 0,
2666 sizeof (ieee80211_rateset_11n
));
2667 for (i
= 0; i
< 10; i
++) {
2668 for (j
= 0; j
< 8; j
++) {
2669 if (ht_rs
[i
] & (1 << j
)) {
2670 mcs_idx
= i
* 8 + j
;
2671 if (mcs_idx
>= IEEE80211_HTRATE_MAXSIZE
) {
2675 ieee80211_rateset_11n
.rs_rates
[mcs_idx
] =
2682 ieee80211_rateset_11n
.rs_nrates
= (uint8_t)mcs_count
;
2684 ARN_DBG((ARN_DBG_RATE
, "arn_overwrite_11n_rateset(): "
2685 "MCS rate set supported by this station is as follows:\n"));
2687 for (i
= 0; i
< ieee80211_rateset_11n
.rs_nrates
; i
++) {
2688 ARN_DBG((ARN_DBG_RATE
, "MCS rate %d is %d\n",
2689 i
, ieee80211_rateset_11n
.rs_rates
[i
]));
2695 * Update WME parameters for a transmit queue.
2698 arn_tx_queue_update(struct arn_softc
*sc
, int ac
)
2700 #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
2701 #define ATH_TXOP_TO_US(v) (v<<5)
2702 ieee80211com_t
*ic
= (ieee80211com_t
*)sc
;
2703 struct ath_txq
*txq
;
2704 struct wmeParams
*wmep
= &ic
->ic_wme
.wme_chanParams
.cap_wmeParams
[ac
];
2705 struct ath_hal
*ah
= sc
->sc_ah
;
2706 struct ath9k_tx_queue_info qi
;
2708 txq
= &sc
->sc_txq
[arn_get_hal_qnum(ac
, sc
)];
2709 (void) ath9k_hw_get_txq_props(ah
, txq
->axq_qnum
, &qi
);
2712 * TXQ_FLAG_TXOKINT_ENABLE = 0x0001
2713 * TXQ_FLAG_TXERRINT_ENABLE = 0x0001
2714 * TXQ_FLAG_TXDESCINT_ENABLE = 0x0002
2715 * TXQ_FLAG_TXEOLINT_ENABLE = 0x0004
2716 * TXQ_FLAG_TXURNINT_ENABLE = 0x0008
2717 * TXQ_FLAG_BACKOFF_DISABLE = 0x0010
2718 * TXQ_FLAG_COMPRESSION_ENABLE = 0x0020
2719 * TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040
2720 * TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080
2723 /* xxx should update these flags here? */
2725 qi
.tqi_qflags
= TXQ_FLAG_TXOKINT_ENABLE
|
2726 TXQ_FLAG_TXERRINT_ENABLE
|
2727 TXQ_FLAG_TXDESCINT_ENABLE
|
2728 TXQ_FLAG_TXURNINT_ENABLE
;
2731 qi
.tqi_aifs
= wmep
->wmep_aifsn
;
2732 qi
.tqi_cwmin
= ATH_EXPONENT_TO_VALUE(wmep
->wmep_logcwmin
);
2733 qi
.tqi_cwmax
= ATH_EXPONENT_TO_VALUE(wmep
->wmep_logcwmax
);
2734 qi
.tqi_readyTime
= 0;
2735 qi
.tqi_burstTime
= ATH_TXOP_TO_US(wmep
->wmep_txopLimit
);
2737 ARN_DBG((ARN_DBG_INIT
,
2753 if (!ath9k_hw_set_txq_props(ah
, txq
->axq_qnum
, &qi
)) {
2754 arn_problem("unable to update hardware queue "
2755 "parameters for %s traffic!\n",
2756 ieee80211_wme_acnames
[ac
]);
2760 (void) ath9k_hw_resettxqueue(ah
, txq
->axq_qnum
);
2764 #undef ATH_TXOP_TO_US
2765 #undef ATH_EXPONENT_TO_VALUE
2768 /* Update WME parameters */
2770 arn_wme_update(ieee80211com_t
*ic
)
2772 struct arn_softc
*sc
= (struct arn_softc
*)ic
;
2775 return (!arn_tx_queue_update(sc
, WME_AC_BE
) ||
2776 !arn_tx_queue_update(sc
, WME_AC_BK
) ||
2777 !arn_tx_queue_update(sc
, WME_AC_VI
) ||
2778 !arn_tx_queue_update(sc
, WME_AC_VO
) ? EIO
: 0);
2782 * Update tx/rx chainmask. For legacy association,
2783 * hard code chainmask to 1x1, for 11n association, use
2784 * the chainmask configuration.
2787 arn_update_chainmask(struct arn_softc
*sc
)
2789 boolean_t is_ht
= B_FALSE
;
2790 sc
->sc_flags
|= SC_OP_CHAINMASK_UPDATE
;
2792 is_ht
= sc
->sc_ht_conf
.ht_supported
;
2794 sc
->sc_tx_chainmask
= sc
->sc_ah
->ah_caps
.tx_chainmask
;
2795 sc
->sc_rx_chainmask
= sc
->sc_ah
->ah_caps
.rx_chainmask
;
2797 sc
->sc_tx_chainmask
= 1;
2798 sc
->sc_rx_chainmask
= 1;
2801 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
2802 "tx_chainmask = %d, rx_chainmask = %d\n",
2803 sc
->sc_tx_chainmask
, sc
->sc_rx_chainmask
));
2807 arn_resume(dev_info_t
*devinfo
)
2809 struct arn_softc
*sc
;
2810 int ret
= DDI_SUCCESS
;
2812 sc
= ddi_get_soft_state(arn_soft_state_p
, ddi_get_instance(devinfo
));
2814 ARN_DBG((ARN_DBG_INIT
, "ath: ath_resume(): "
2815 "failed to get soft state\n"));
2816 return (DDI_FAILURE
);
2821 * Set up config space command register(s). Refuse
2822 * to resume on failure.
2824 if (arn_pci_setup(sc
) != 0) {
2825 ARN_DBG((ARN_DBG_INIT
, "ath: ath_resume(): "
2826 "ath_pci_setup() failed\n"));
2828 return (DDI_FAILURE
);
2831 if (!(sc
->sc_flags
& SC_OP_INVALID
))
2839 arn_attach(dev_info_t
*devinfo
, ddi_attach_cmd_t cmd
)
2841 struct arn_softc
*sc
;
2852 wifi_data_t wd
= { 0 };
2853 mac_register_t
*macp
;
2859 return (arn_resume(devinfo
));
2861 return (DDI_FAILURE
);
2864 instance
= ddi_get_instance(devinfo
);
2865 if (ddi_soft_state_zalloc(arn_soft_state_p
, instance
) != DDI_SUCCESS
) {
2866 ARN_DBG((ARN_DBG_ATTACH
, "arn: "
2867 "%s: Unable to alloc softstate\n", __func__
));
2868 return (DDI_FAILURE
);
2871 sc
= ddi_get_soft_state(arn_soft_state_p
, ddi_get_instance(devinfo
));
2872 ic
= (ieee80211com_t
*)sc
;
2873 sc
->sc_dev
= devinfo
;
2875 mutex_init(&sc
->sc_genlock
, NULL
, MUTEX_DRIVER
, NULL
);
2876 mutex_init(&sc
->sc_serial_rw
, NULL
, MUTEX_DRIVER
, NULL
);
2877 mutex_init(&sc
->sc_txbuflock
, NULL
, MUTEX_DRIVER
, NULL
);
2878 mutex_init(&sc
->sc_rxbuflock
, NULL
, MUTEX_DRIVER
, NULL
);
2879 mutex_init(&sc
->sc_resched_lock
, NULL
, MUTEX_DRIVER
, NULL
);
2881 mutex_init(&sc
->sc_bcbuflock
, NULL
, MUTEX_DRIVER
, NULL
);
2884 sc
->sc_flags
|= SC_OP_INVALID
;
2886 err
= pci_config_setup(devinfo
, &sc
->sc_cfg_handle
);
2887 if (err
!= DDI_SUCCESS
) {
2888 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
2889 "pci_config_setup() failed"));
2893 if (arn_pci_setup(sc
) != 0)
2896 /* Cache line size set up */
2897 arn_pci_config_cachesize(sc
);
2899 vendor_id
= pci_config_get16(sc
->sc_cfg_handle
, PCI_CONF_VENID
);
2900 device_id
= pci_config_get16(sc
->sc_cfg_handle
, PCI_CONF_DEVID
);
2901 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): vendor 0x%x, "
2902 "device id 0x%x, cache size %d\n",
2903 vendor_id
, device_id
,
2904 pci_config_get8(sc
->sc_cfg_handle
, PCI_CONF_CACHE_LINESZ
)));
2906 pci_config_put8(sc
->sc_cfg_handle
, PCI_CONF_LATENCY_TIMER
, 0xa8);
2907 val
= pci_config_get32(sc
->sc_cfg_handle
, 0x40);
2908 if ((val
& 0x0000ff00) != 0)
2909 pci_config_put32(sc
->sc_cfg_handle
, 0x40, val
& 0xffff00ff);
2911 err
= ddi_regs_map_setup(devinfo
, 1,
2912 &sc
->mem
, 0, 0, &arn_reg_accattr
, &sc
->sc_io_handle
);
2913 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
2914 "regs map1 = %x err=%d\n", sc
->mem
, err
));
2915 if (err
!= DDI_SUCCESS
) {
2916 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
2917 "ddi_regs_map_setup() failed"));
2921 ah
= ath9k_hw_attach(device_id
, sc
, sc
->mem
, &status
);
2923 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
2924 "unable to attach hw: H/W status %u\n",
2930 ath9k_hw_getmac(ah
, ic
->ic_macaddr
);
2932 /* Get the hardware key cache size. */
2933 sc
->sc_keymax
= ah
->ah_caps
.keycache_size
;
2934 if (sc
->sc_keymax
> ATH_KEYMAX
) {
2935 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
2936 "Warning, using only %u entries in %u key cache\n",
2937 ATH_KEYMAX
, sc
->sc_keymax
));
2938 sc
->sc_keymax
= ATH_KEYMAX
;
2942 * Reset the key cache since some parts do not
2943 * reset the contents on initial power up.
2945 for (i
= 0; i
< sc
->sc_keymax
; i
++)
2946 (void) ath9k_hw_keyreset(ah
, (uint16_t)i
);
2948 * Mark key cache slots associated with global keys
2949 * as in use. If we knew TKIP was not to be used we
2950 * could leave the +32, +64, and +32+64 slots free.
2951 * XXX only for splitmic.
2953 for (i
= 0; i
< IEEE80211_WEP_NKID
; i
++) {
2954 set_bit(i
, sc
->sc_keymap
);
2955 set_bit(i
+ 32, sc
->sc_keymap
);
2956 set_bit(i
+ 64, sc
->sc_keymap
);
2957 set_bit(i
+ 32 + 64, sc
->sc_keymap
);
2960 /* Collect the channel list using the default country code */
2961 err
= arn_setup_channels(sc
);
2962 if (err
== EINVAL
) {
2963 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
2964 "ERR:arn_setup_channels\n"));
2968 /* default to STA mode */
2969 sc
->sc_ah
->ah_opmode
= ATH9K_M_STA
;
2971 /* Setup rate tables */
2972 arn_rate_attach(sc
);
2973 arn_setup_rates(sc
, IEEE80211_MODE_11A
);
2974 arn_setup_rates(sc
, IEEE80211_MODE_11B
);
2975 arn_setup_rates(sc
, IEEE80211_MODE_11G
);
2977 /* Setup current mode here */
2978 arn_setcurmode(sc
, ATH9K_MODE_11G
);
2980 /* 802.11g features */
2982 ic
->ic_caps
|= IEEE80211_C_SHPREAMBLE
|
2983 IEEE80211_C_SHSLOT
; /* short slot time */
2985 /* Temp workaround */
2987 sc
->sc_config
.ath_aggr_prot
= 0;
2989 /* Setup tx/rx descriptors */
2990 err
= arn_desc_alloc(devinfo
, sc
);
2991 if (err
!= DDI_SUCCESS
) {
2992 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
2993 "failed to allocate descriptors: %d\n", err
));
2997 if ((sc
->sc_tq
= ddi_taskq_create(devinfo
, "ath_taskq", 1,
2998 TASKQ_DEFAULTPRI
, 0)) == NULL
) {
2999 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
3000 "ERR:ddi_taskq_create\n"));
3005 * Allocate hardware transmit queues: one queue for
3006 * beacon frames and one data queue for each QoS
3007 * priority. Note that the hal handles reseting
3008 * these queues at the needed time.
3011 sc
->sc_beaconq
= arn_beaconq_setup(ah
);
3012 if (sc
->sc_beaconq
== (-1)) {
3013 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
3014 "unable to setup a beacon xmit queue\n"));
3019 sc
->sc_cabq
= arn_txq_setup(sc
, ATH9K_TX_QUEUE_CAB
, 0);
3020 if (sc
->sc_cabq
== NULL
) {
3021 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
3022 "unable to setup CAB xmit queue\n"));
3026 sc
->sc_config
.cabqReadytime
= ATH_CABQ_READY_TIME
;
3027 ath_cabq_update(sc
);
3030 for (i
= 0; i
< ARRAY_SIZE(sc
->sc_haltype2q
); i
++)
3031 sc
->sc_haltype2q
[i
] = -1;
3033 /* Setup data queues */
3034 /* NB: ensure BK queue is the lowest priority h/w queue */
3035 if (!arn_tx_setup(sc
, ATH9K_WME_AC_BK
)) {
3036 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
3037 "unable to setup xmit queue for BK traffic\n"));
3040 if (!arn_tx_setup(sc
, ATH9K_WME_AC_BE
)) {
3041 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
3042 "unable to setup xmit queue for BE traffic\n"));
3045 if (!arn_tx_setup(sc
, ATH9K_WME_AC_VI
)) {
3046 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
3047 "unable to setup xmit queue for VI traffic\n"));
3050 if (!arn_tx_setup(sc
, ATH9K_WME_AC_VO
)) {
3051 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
3052 "unable to setup xmit queue for VO traffic\n"));
3057 * Initializes the noise floor to a reasonable default value.
3058 * Later on this will be updated during ANI processing.
3061 sc
->sc_ani
.sc_noise_floor
= ATH_DEFAULT_NOISE_FLOOR
;
3064 if (ath9k_hw_getcapability(ah
, ATH9K_CAP_CIPHER
,
3065 ATH9K_CIPHER_TKIP
, NULL
)) {
3067 * Whether we should enable h/w TKIP MIC.
3068 * XXX: if we don't support WME TKIP MIC, then we wouldn't
3069 * report WMM capable, so it's always safe to turn on
3070 * TKIP MIC in this case.
3072 (void) ath9k_hw_setcapability(sc
->sc_ah
, ATH9K_CAP_TKIP_MIC
,
3076 /* Get cipher releated capability information */
3077 arn_get_hw_encap(sc
);
3080 * Check whether the separate key cache entries
3081 * are required to handle both tx+rx MIC keys.
3082 * With split mic keys the number of stations is limited
3083 * to 27 otherwise 59.
3085 if (ath9k_hw_getcapability(ah
, ATH9K_CAP_CIPHER
,
3086 ATH9K_CIPHER_TKIP
, NULL
) &&
3087 ath9k_hw_getcapability(ah
, ATH9K_CAP_CIPHER
,
3088 ATH9K_CIPHER_MIC
, NULL
) &&
3089 ath9k_hw_getcapability(ah
, ATH9K_CAP_TKIP_SPLIT
,
3091 sc
->sc_splitmic
= 1;
3093 /* turn on mcast key search if possible */
3094 if (!ath9k_hw_getcapability(ah
, ATH9K_CAP_MCAST_KEYSRCH
, 0, NULL
))
3095 (void) ath9k_hw_setcapability(ah
, ATH9K_CAP_MCAST_KEYSRCH
, 1,
3098 sc
->sc_config
.txpowlimit
= ATH_TXPOWER_MAX
;
3099 sc
->sc_config
.txpowlimit_override
= 0;
3101 /* 11n Capabilities */
3102 if (ah
->ah_caps
.hw_caps
& ATH9K_HW_CAP_HT
) {
3103 sc
->sc_flags
|= SC_OP_TXAGGR
;
3104 sc
->sc_flags
|= SC_OP_RXAGGR
;
3105 arn_setup_ht_cap(sc
);
3106 arn_overwrite_11n_rateset(sc
);
3109 sc
->sc_tx_chainmask
= 1;
3110 sc
->sc_rx_chainmask
= 1;
3111 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
3112 "tx_chainmask = %d, rx_chainmask = %d\n",
3113 sc
->sc_tx_chainmask
, sc
->sc_rx_chainmask
));
3115 /* arn_update_chainmask(sc); */
3117 (void) ath9k_hw_setcapability(ah
, ATH9K_CAP_DIVERSITY
, 1, B_TRUE
, NULL
);
3118 sc
->sc_defant
= ath9k_hw_getdefantenna(ah
);
3120 ath9k_hw_getmac(ah
, sc
->sc_myaddr
);
3121 if (ah
->ah_caps
.hw_caps
& ATH9K_HW_CAP_BSSIDMASK
) {
3122 ath9k_hw_getbssidmask(ah
, sc
->sc_bssidmask
);
3123 ATH_SET_VAP_BSSID_MASK(sc
->sc_bssidmask
);
3124 (void) ath9k_hw_setbssidmask(ah
, sc
->sc_bssidmask
);
3127 /* set default value to short slot time */
3128 sc
->sc_slottime
= ATH9K_SLOT_TIME_9
;
3129 (void) ath9k_hw_setslottime(ah
, ATH9K_SLOT_TIME_9
);
3131 /* initialize beacon slots */
3132 for (i
= 0; i
< ARRAY_SIZE(sc
->sc_bslot
); i
++)
3133 sc
->sc_bslot
[i
] = ATH_IF_ID_ANY
;
3135 /* Save MISC configurations */
3136 sc
->sc_config
.swBeaconProcess
= 1;
3138 /* Support QoS/WME */
3139 ic
->ic_caps
|= IEEE80211_C_WME
;
3140 ic
->ic_wme
.wme_update
= arn_wme_update
;
3142 /* Support 802.11n/HT */
3143 if (sc
->sc_ht_conf
.ht_supported
) {
3145 IEEE80211_HTCAP_CHWIDTH40
|
3146 IEEE80211_HTCAP_SHORTGI40
|
3147 IEEE80211_HTCAP_DSSSCCK40
|
3148 IEEE80211_HTCAP_MAXAMSDU_7935
|
3150 IEEE80211_HTC_AMSDU
|
3151 IEEE80211_HTCAP_RXSTBC_2STREAM
;
3153 #ifdef ARN_TX_AGGREGATION
3154 ic
->ic_htcaps
|= IEEE80211_HTC_AMPDU
;
3158 /* Header padding requested by driver */
3159 ic
->ic_flags
|= IEEE80211_F_DATAPAD
;
3160 /* Support WPA/WPA2 */
3161 ic
->ic_caps
|= IEEE80211_C_WPA
;
3163 ic
->ic_caps
|= IEEE80211_C_TXFRAG
; /* handle tx frags */
3164 ic
->ic_caps
|= IEEE80211_C_BGSCAN
; /* capable of bg scanning */
3166 ic
->ic_phytype
= IEEE80211_T_HT
;
3167 ic
->ic_opmode
= IEEE80211_M_STA
;
3168 ic
->ic_state
= IEEE80211_S_INIT
;
3169 ic
->ic_maxrssi
= ARN_MAX_RSSI
;
3170 ic
->ic_set_shortslot
= arn_set_shortslot
;
3171 ic
->ic_xmit
= arn_tx
;
3172 ieee80211_attach(ic
);
3174 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
3175 "ic->ic_curchan->ich_freq: %d\n", ic
->ic_curchan
->ich_freq
));
3177 /* different instance has different WPA door */
3178 (void) snprintf(ic
->ic_wpadoor
, MAX_IEEE80211STR
, "%s_%s%d", WPA_DOOR
,
3179 ddi_driver_name(devinfo
),
3180 ddi_get_instance(devinfo
));
3182 if (sc
->sc_ht_conf
.ht_supported
) {
3183 sc
->sc_recv_action
= ic
->ic_recv_action
;
3184 ic
->ic_recv_action
= arn_ampdu_recv_action
;
3185 // sc->sc_send_action = ic->ic_send_action;
3186 // ic->ic_send_action = arn_ampdu_send_action;
3188 ic
->ic_ampdu_rxmax
= sc
->sc_ht_conf
.ampdu_factor
;
3189 ic
->ic_ampdu_density
= sc
->sc_ht_conf
.ampdu_density
;
3190 ic
->ic_ampdu_limit
= ic
->ic_ampdu_rxmax
;
3193 /* Override 80211 default routines */
3194 sc
->sc_newstate
= ic
->ic_newstate
;
3195 ic
->ic_newstate
= arn_newstate
;
3197 sc
->sc_recv_mgmt
= ic
->ic_recv_mgmt
;
3198 ic
->ic_recv_mgmt
= arn_recv_mgmt
;
3200 ic
->ic_watchdog
= arn_watchdog
;
3201 ic
->ic_node_alloc
= arn_node_alloc
;
3202 ic
->ic_node_free
= arn_node_free
;
3203 ic
->ic_crypto
.cs_key_alloc
= arn_key_alloc
;
3204 ic
->ic_crypto
.cs_key_delete
= arn_key_delete
;
3205 ic
->ic_crypto
.cs_key_set
= arn_key_set
;
3207 ieee80211_media_init(ic
);
3210 * initialize default tx key
3212 ic
->ic_def_txkey
= 0;
3215 (void) ath9k_hw_set_interrupts(sc
->sc_ah
, 0);
3216 err
= ddi_add_softintr(devinfo
, DDI_SOFTINT_LOW
,
3217 &sc
->sc_softint_id
, NULL
, 0, arn_softint_handler
, (caddr_t
)sc
);
3218 if (err
!= DDI_SUCCESS
) {
3219 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
3220 "ddi_add_softintr() failed....\n"));
3224 if (ddi_get_iblock_cookie(devinfo
, 0, &sc
->sc_iblock
)
3226 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
3227 "Can not get iblock cookie for INT\n"));
3231 if (ddi_add_intr(devinfo
, 0, NULL
, NULL
, arn_isr
,
3232 (caddr_t
)sc
) != DDI_SUCCESS
) {
3233 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
3234 "Can not set intr for ARN driver\n"));
3239 * Provide initial settings for the WiFi plugin; whenever this
3240 * information changes, we need to call mac_plugindata_update()
3242 wd
.wd_opmode
= ic
->ic_opmode
;
3243 wd
.wd_secalloc
= WIFI_SEC_NONE
;
3244 IEEE80211_ADDR_COPY(wd
.wd_bssid
, ic
->ic_bss
->in_bssid
);
3246 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
3247 "IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid)"
3248 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
3249 wd
.wd_bssid
[0], wd
.wd_bssid
[1], wd
.wd_bssid
[2],
3250 wd
.wd_bssid
[3], wd
.wd_bssid
[4], wd
.wd_bssid
[5]));
3252 if ((macp
= mac_alloc(MAC_VERSION
)) == NULL
) {
3253 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
3254 "MAC version mismatch\n"));
3258 macp
->m_type_ident
= MAC_PLUGIN_IDENT_WIFI
;
3259 macp
->m_driver
= sc
;
3260 macp
->m_dip
= devinfo
;
3261 macp
->m_src_addr
= ic
->ic_macaddr
;
3262 macp
->m_callbacks
= &arn_m_callbacks
;
3263 macp
->m_min_sdu
= 0;
3264 macp
->m_max_sdu
= IEEE80211_MTU
;
3265 macp
->m_pdata
= &wd
;
3266 macp
->m_pdata_size
= sizeof (wd
);
3268 err
= mac_register(macp
, &ic
->ic_mach
);
3271 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
3272 "mac_register err %x\n", err
));
3276 /* Create minor node of type DDI_NT_NET_WIFI */
3277 (void) snprintf(strbuf
, sizeof (strbuf
), "%s%d",
3278 ARN_NODENAME
, instance
);
3279 err
= ddi_create_minor_node(devinfo
, strbuf
, S_IFCHR
,
3280 instance
+ 1, DDI_NT_NET_WIFI
, 0);
3281 if (err
!= DDI_SUCCESS
)
3282 ARN_DBG((ARN_DBG_ATTACH
, "WARN: arn: arn_attach(): "
3283 "Create minor node failed - %d\n", err
));
3285 /* Notify link is down now */
3286 mac_link_update(ic
->ic_mach
, LINK_STATE_DOWN
);
3288 sc
->sc_promisc
= B_FALSE
;
3289 bzero(sc
->sc_mcast_refs
, sizeof (sc
->sc_mcast_refs
));
3290 bzero(sc
->sc_mcast_hash
, sizeof (sc
->sc_mcast_hash
));
3292 ARN_DBG((ARN_DBG_ATTACH
, "arn: arn_attach(): "
3293 "Atheros AR%s MAC/BB Rev:%x "
3294 "AR%s RF Rev:%x: mem=0x%lx\n",
3295 arn_mac_bb_name(ah
->ah_macVersion
),
3297 arn_rf_name((ah
->ah_analog5GhzRev
& AR_RADIO_SREV_MAJOR
)),
3299 (unsigned long)sc
->mem
));
3301 /* XXX: hardware will not be ready until arn_open() being called */
3302 sc
->sc_flags
|= SC_OP_INVALID
;
3303 sc
->sc_isrunning
= 0;
3305 return (DDI_SUCCESS
);
3308 ddi_remove_intr(devinfo
, 0, sc
->sc_iblock
);
3310 ddi_remove_softintr(sc
->sc_softint_id
);
3312 (void) ieee80211_detach(ic
);
3316 ddi_taskq_destroy(sc
->sc_tq
);
3318 ath9k_hw_detach(ah
);
3320 ddi_regs_map_free(&sc
->sc_io_handle
);
3322 pci_config_teardown(&sc
->sc_cfg_handle
);
3324 sc
->sc_flags
|= SC_OP_INVALID
;
3325 /* cleanup tx queues */
3326 mutex_destroy(&sc
->sc_txbuflock
);
3327 for (i
= 0; i
< ATH9K_NUM_TX_QUEUES
; i
++) {
3328 if (ARN_TXQ_SETUP(sc
, i
)) {
3329 /* arn_tx_cleanupq(asc, &asc->sc_txq[i]); */
3330 mutex_destroy(&((&sc
->sc_txq
[i
])->axq_lock
));
3333 mutex_destroy(&sc
->sc_rxbuflock
);
3334 mutex_destroy(&sc
->sc_serial_rw
);
3335 mutex_destroy(&sc
->sc_genlock
);
3336 mutex_destroy(&sc
->sc_resched_lock
);
3338 mutex_destroy(&sc
->sc_bcbuflock
);
3341 ddi_soft_state_free(arn_soft_state_p
, instance
);
3343 return (DDI_FAILURE
);
3348 * Suspend transmit/receive for powerdown
3351 arn_suspend(struct arn_softc
*sc
)
3357 return (DDI_SUCCESS
);
3361 arn_detach(dev_info_t
*devinfo
, ddi_detach_cmd_t cmd
)
3363 struct arn_softc
*sc
;
3366 sc
= ddi_get_soft_state(arn_soft_state_p
, ddi_get_instance(devinfo
));
3374 return (arn_suspend(sc
));
3377 return (DDI_FAILURE
);
3380 if (mac_disable(sc
->sc_isc
.ic_mach
) != 0)
3381 return (DDI_FAILURE
);
3383 arn_stop_scantimer(sc
);
3384 arn_stop_caltimer(sc
);
3386 /* disable interrupts */
3387 (void) ath9k_hw_set_interrupts(sc
->sc_ah
, 0);
3390 * Unregister from the MAC layer subsystem
3392 (void) mac_unregister(sc
->sc_isc
.ic_mach
);
3394 /* free intterrupt resources */
3395 ddi_remove_intr(devinfo
, 0, sc
->sc_iblock
);
3396 ddi_remove_softintr(sc
->sc_softint_id
);
3399 * NB: the order of these is important:
3400 * o call the 802.11 layer before detaching the hal to
3401 * insure callbacks into the driver to delete global
3402 * key cache entries can be handled
3403 * o reclaim the tx queue data structures after calling
3404 * the 802.11 layer as we'll get called back to reclaim
3405 * node state and potentially want to use them
3406 * o to cleanup the tx queues the hal is called, so detach
3409 ieee80211_detach(&sc
->sc_isc
);
3413 ddi_taskq_destroy(sc
->sc_tq
);
3415 if (!(sc
->sc_flags
& SC_OP_INVALID
))
3416 (void) ath9k_hw_setpower(sc
->sc_ah
, ATH9K_PM_AWAKE
);
3418 /* cleanup tx queues */
3419 mutex_destroy(&sc
->sc_txbuflock
);
3420 for (i
= 0; i
< ATH9K_NUM_TX_QUEUES
; i
++) {
3421 if (ARN_TXQ_SETUP(sc
, i
)) {
3422 arn_tx_cleanupq(sc
, &sc
->sc_txq
[i
]);
3423 mutex_destroy(&((&sc
->sc_txq
[i
])->axq_lock
));
3427 ath9k_hw_detach(sc
->sc_ah
);
3429 /* free io handle */
3430 ddi_regs_map_free(&sc
->sc_io_handle
);
3431 pci_config_teardown(&sc
->sc_cfg_handle
);
3434 mutex_destroy(&sc
->sc_genlock
);
3435 mutex_destroy(&sc
->sc_serial_rw
);
3436 mutex_destroy(&sc
->sc_rxbuflock
);
3437 mutex_destroy(&sc
->sc_resched_lock
);
3439 mutex_destroy(&sc
->sc_bcbuflock
);
3442 ddi_remove_minor_node(devinfo
, NULL
);
3443 ddi_soft_state_free(arn_soft_state_p
, ddi_get_instance(devinfo
));
3445 return (DDI_SUCCESS
);
3449 * quiesce(9E) entry point.
3451 * This function is called when the system is single-threaded at high
3452 * PIL with preemption disabled. Therefore, this function must not be
3455 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
3456 * DDI_FAILURE indicates an error condition and should almost never happen.
3459 arn_quiesce(dev_info_t
*devinfo
)
3461 struct arn_softc
*sc
;
3465 sc
= ddi_get_soft_state(arn_soft_state_p
, ddi_get_instance(devinfo
));
3467 if (sc
== NULL
|| (ah
= sc
->sc_ah
) == NULL
)
3468 return (DDI_FAILURE
);
3471 * Disable interrupts
3473 (void) ath9k_hw_set_interrupts(ah
, 0);
3478 for (i
= 0; i
< ATH9K_NUM_TX_QUEUES
; i
++) {
3479 if (ARN_TXQ_SETUP(sc
, i
))
3480 (void) ath9k_hw_stoptxdma(ah
, sc
->sc_txq
[i
].axq_qnum
);
3486 ath9k_hw_stoppcurecv(ah
);
3487 ath9k_hw_setrxfilter(ah
, 0);
3488 (void) ath9k_hw_stopdmarecv(ah
);
3494 (void) ath9k_hw_phy_disable(ah
);
3496 return (DDI_SUCCESS
);
3499 DDI_DEFINE_STREAM_OPS(arn_dev_ops
, nulldev
, nulldev
, arn_attach
, arn_detach
,
3500 nodev
, NULL
, D_MP
, NULL
, arn_quiesce
);
3502 static struct modldrv arn_modldrv
= {
3503 &mod_driverops
, /* Type of module. This one is a driver */
3504 "arn-Atheros 9000 series driver:2.0", /* short description */
3505 &arn_dev_ops
/* driver specific ops */
3508 static struct modlinkage modlinkage
= {
3509 MODREV_1
, (void *)&arn_modldrv
, NULL
3513 _info(struct modinfo
*modinfop
)
3515 return (mod_info(&modlinkage
, modinfop
));
3523 status
= ddi_soft_state_init
3524 (&arn_soft_state_p
, sizeof (struct arn_softc
), 1);
3528 mutex_init(&arn_loglock
, NULL
, MUTEX_DRIVER
, NULL
);
3529 mac_init_ops(&arn_dev_ops
, "arn");
3530 status
= mod_install(&modlinkage
);
3532 mac_fini_ops(&arn_dev_ops
);
3533 mutex_destroy(&arn_loglock
);
3534 ddi_soft_state_fini(&arn_soft_state_p
);
3545 status
= mod_remove(&modlinkage
);
3547 mac_fini_ops(&arn_dev_ops
);
3548 mutex_destroy(&arn_loglock
);
3549 ddi_soft_state_fini(&arn_soft_state_p
);