ath9k: Simplify node attach/detach routines
[linux-2.6/kvm.git] / drivers / net / wireless / ath9k / core.c
blob689a280370960dca738bad219b7507ffd5001577
1 /*
2 * Copyright (c) 2008, Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 /* Implementation of the main "ATH" layer. */
19 #include "core.h"
20 #include "regd.h"
22 static int ath_outdoor; /* enable outdoor use */
24 static u32 ath_chainmask_sel_up_rssi_thres =
25 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
26 static u32 ath_chainmask_sel_down_rssi_thres =
27 ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
28 static u32 ath_chainmask_sel_period =
29 ATH_CHAINMASK_SEL_TIMEOUT;
31 /* return bus cachesize in 4B word units */
33 static void bus_read_cachesize(struct ath_softc *sc, int *csz)
35 u8 u8tmp;
37 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
38 *csz = (int)u8tmp;
41 * This check was put in to avoid "unplesant" consequences if
42 * the bootrom has not fully initialized all PCI devices.
43 * Sometimes the cache line size register is not set
46 if (*csz == 0)
47 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
50 static u8 parse_mpdudensity(u8 mpdudensity)
53 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
54 * 0 for no restriction
55 * 1 for 1/4 us
56 * 2 for 1/2 us
57 * 3 for 1 us
58 * 4 for 2 us
59 * 5 for 4 us
60 * 6 for 8 us
61 * 7 for 16 us
63 switch (mpdudensity) {
64 case 0:
65 return 0;
66 case 1:
67 case 2:
68 case 3:
69 /* Our lower layer calculations limit our precision to
70 1 microsecond */
71 return 1;
72 case 4:
73 return 2;
74 case 5:
75 return 4;
76 case 6:
77 return 8;
78 case 7:
79 return 16;
80 default:
81 return 0;
86 * Set current operating mode
88 * This function initializes and fills the rate table in the ATH object based
89 * on the operating mode.
91 static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
93 const struct ath9k_rate_table *rt;
94 int i;
96 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
97 rt = ath9k_hw_getratetable(sc->sc_ah, mode);
98 BUG_ON(!rt);
100 for (i = 0; i < rt->rateCount; i++)
101 sc->sc_rixmap[rt->info[i].rateCode] = (u8) i;
103 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
104 for (i = 0; i < 256; i++) {
105 u8 ix = rt->rateCodeToIndex[i];
107 if (ix == 0xff)
108 continue;
110 sc->sc_hwmap[i].ieeerate =
111 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
112 sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
114 if (rt->info[ix].shortPreamble ||
115 rt->info[ix].phy == PHY_OFDM) {
116 /* XXX: Handle this */
119 /* NB: this uses the last entry if the rate isn't found */
120 /* XXX beware of overlow */
122 sc->sc_currates = rt;
123 sc->sc_curmode = mode;
125 * All protection frames are transmited at 2Mb/s for
126 * 11g, otherwise at 1Mb/s.
127 * XXX select protection rate index from rate table.
129 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
133 * Set up rate table (legacy rates)
135 static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
137 struct ath_hal *ah = sc->sc_ah;
138 const struct ath9k_rate_table *rt = NULL;
139 struct ieee80211_supported_band *sband;
140 struct ieee80211_rate *rate;
141 int i, maxrates;
143 switch (band) {
144 case IEEE80211_BAND_2GHZ:
145 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11G);
146 break;
147 case IEEE80211_BAND_5GHZ:
148 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11A);
149 break;
150 default:
151 break;
154 if (rt == NULL)
155 return;
157 sband = &sc->sbands[band];
158 rate = sc->rates[band];
160 if (rt->rateCount > ATH_RATE_MAX)
161 maxrates = ATH_RATE_MAX;
162 else
163 maxrates = rt->rateCount;
165 for (i = 0; i < maxrates; i++) {
166 rate[i].bitrate = rt->info[i].rateKbps / 100;
167 rate[i].hw_value = rt->info[i].rateCode;
168 sband->n_bitrates++;
169 DPRINTF(sc, ATH_DBG_CONFIG,
170 "%s: Rate: %2dMbps, ratecode: %2d\n",
171 __func__,
172 rate[i].bitrate / 10,
173 rate[i].hw_value);
178 * Set up channel list
180 static int ath_setup_channels(struct ath_softc *sc)
182 struct ath_hal *ah = sc->sc_ah;
183 int nchan, i, a = 0, b = 0;
184 u8 regclassids[ATH_REGCLASSIDS_MAX];
185 u32 nregclass = 0;
186 struct ieee80211_supported_band *band_2ghz;
187 struct ieee80211_supported_band *band_5ghz;
188 struct ieee80211_channel *chan_2ghz;
189 struct ieee80211_channel *chan_5ghz;
190 struct ath9k_channel *c;
192 /* Fill in ah->ah_channels */
193 if (!ath9k_regd_init_channels(ah,
194 ATH_CHAN_MAX,
195 (u32 *)&nchan,
196 regclassids,
197 ATH_REGCLASSIDS_MAX,
198 &nregclass,
199 CTRY_DEFAULT,
200 false,
201 1)) {
202 u32 rd = ah->ah_currentRD;
204 DPRINTF(sc, ATH_DBG_FATAL,
205 "%s: unable to collect channel list; "
206 "regdomain likely %u country code %u\n",
207 __func__, rd, CTRY_DEFAULT);
208 return -EINVAL;
211 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
212 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
213 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
214 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
216 for (i = 0; i < nchan; i++) {
217 c = &ah->ah_channels[i];
218 if (IS_CHAN_2GHZ(c)) {
219 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
220 chan_2ghz[a].center_freq = c->channel;
221 chan_2ghz[a].max_power = c->maxTxPower;
223 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
224 chan_2ghz[a].flags |=
225 IEEE80211_CHAN_NO_IBSS;
226 if (c->channelFlags & CHANNEL_PASSIVE)
227 chan_2ghz[a].flags |=
228 IEEE80211_CHAN_PASSIVE_SCAN;
230 band_2ghz->n_channels = ++a;
232 DPRINTF(sc, ATH_DBG_CONFIG,
233 "%s: 2MHz channel: %d, "
234 "channelFlags: 0x%x\n",
235 __func__,
236 c->channel,
237 c->channelFlags);
238 } else if (IS_CHAN_5GHZ(c)) {
239 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
240 chan_5ghz[b].center_freq = c->channel;
241 chan_5ghz[b].max_power = c->maxTxPower;
243 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
244 chan_5ghz[b].flags |=
245 IEEE80211_CHAN_NO_IBSS;
246 if (c->channelFlags & CHANNEL_PASSIVE)
247 chan_5ghz[b].flags |=
248 IEEE80211_CHAN_PASSIVE_SCAN;
250 band_5ghz->n_channels = ++b;
252 DPRINTF(sc, ATH_DBG_CONFIG,
253 "%s: 5MHz channel: %d, "
254 "channelFlags: 0x%x\n",
255 __func__,
256 c->channel,
257 c->channelFlags);
261 return 0;
265 * Determine mode from channel flags
267 * This routine will provide the enumerated WIRELESSS_MODE value based
268 * on the settings of the channel flags. If no valid set of flags
269 * exist, the lowest mode (11b) is selected.
272 static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
274 if (chan->chanmode == CHANNEL_A)
275 return ATH9K_MODE_11A;
276 else if (chan->chanmode == CHANNEL_G)
277 return ATH9K_MODE_11G;
278 else if (chan->chanmode == CHANNEL_B)
279 return ATH9K_MODE_11B;
280 else if (chan->chanmode == CHANNEL_A_HT20)
281 return ATH9K_MODE_11NA_HT20;
282 else if (chan->chanmode == CHANNEL_G_HT20)
283 return ATH9K_MODE_11NG_HT20;
284 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
285 return ATH9K_MODE_11NA_HT40PLUS;
286 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
287 return ATH9K_MODE_11NA_HT40MINUS;
288 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
289 return ATH9K_MODE_11NG_HT40PLUS;
290 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
291 return ATH9K_MODE_11NG_HT40MINUS;
293 WARN_ON(1); /* should not get here */
295 return ATH9K_MODE_11B;
299 * Stop the device, grabbing the top-level lock to protect
300 * against concurrent entry through ath_init (which can happen
301 * if another thread does a system call and the thread doing the
302 * stop is preempted).
305 static int ath_stop(struct ath_softc *sc)
307 struct ath_hal *ah = sc->sc_ah;
309 DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %ld\n",
310 __func__, sc->sc_flags & SC_OP_INVALID);
313 * Shutdown the hardware and driver:
314 * stop output from above
315 * turn off timers
316 * disable interrupts
317 * clear transmit machinery
318 * clear receive machinery
319 * turn off the radio
320 * reclaim beacon resources
322 * Note that some of this work is not possible if the
323 * hardware is gone (invalid).
326 ath_draintxq(sc, false);
327 if (!(sc->sc_flags & SC_OP_INVALID)) {
328 ath_stoprecv(sc);
329 ath9k_hw_phy_disable(ah);
330 } else
331 sc->sc_rxlink = NULL;
333 return 0;
337 * Set the current channel
339 * Set/change channels. If the channel is really being changed, it's done
340 * by reseting the chip. To accomplish this we must first cleanup any pending
341 * DMA, then restart stuff after a la ath_init.
343 int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
345 struct ath_hal *ah = sc->sc_ah;
346 bool fastcc = true, stopped;
348 if (sc->sc_flags & SC_OP_INVALID) /* the device is invalid or removed */
349 return -EIO;
351 DPRINTF(sc, ATH_DBG_CONFIG,
352 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
353 __func__,
354 ath9k_hw_mhz2ieee(ah, sc->sc_ah->ah_curchan->channel,
355 sc->sc_ah->ah_curchan->channelFlags),
356 sc->sc_ah->ah_curchan->channel,
357 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
358 hchan->channel, hchan->channelFlags);
360 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
361 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
362 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
363 (sc->sc_flags & SC_OP_FULL_RESET)) {
364 int status;
366 * This is only performed if the channel settings have
367 * actually changed.
369 * To switch channels clear any pending DMA operations;
370 * wait long enough for the RX fifo to drain, reset the
371 * hardware at the new frequency, and then re-enable
372 * the relevant bits of the h/w.
374 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
375 ath_draintxq(sc, false); /* clear pending tx frames */
376 stopped = ath_stoprecv(sc); /* turn off frame recv */
378 /* XXX: do not flush receive queue here. We don't want
379 * to flush data frames already in queue because of
380 * changing channel. */
382 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
383 fastcc = false;
385 spin_lock_bh(&sc->sc_resetlock);
386 if (!ath9k_hw_reset(ah, hchan,
387 sc->sc_ht_info.tx_chan_width,
388 sc->sc_tx_chainmask,
389 sc->sc_rx_chainmask,
390 sc->sc_ht_extprotspacing,
391 fastcc, &status)) {
392 DPRINTF(sc, ATH_DBG_FATAL,
393 "%s: unable to reset channel %u (%uMhz) "
394 "flags 0x%x hal status %u\n", __func__,
395 ath9k_hw_mhz2ieee(ah, hchan->channel,
396 hchan->channelFlags),
397 hchan->channel, hchan->channelFlags, status);
398 spin_unlock_bh(&sc->sc_resetlock);
399 return -EIO;
401 spin_unlock_bh(&sc->sc_resetlock);
403 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
404 sc->sc_flags &= ~SC_OP_FULL_RESET;
406 /* Re-enable rx framework */
407 if (ath_startrecv(sc) != 0) {
408 DPRINTF(sc, ATH_DBG_FATAL,
409 "%s: unable to restart recv logic\n", __func__);
410 return -EIO;
413 * Change channels and update the h/w rate map
414 * if we're switching; e.g. 11a to 11b/g.
416 ath_setcurmode(sc, ath_chan2mode(hchan));
418 ath_update_txpow(sc); /* update tx power state */
420 * Re-enable interrupts.
422 ath9k_hw_set_interrupts(ah, sc->sc_imask);
424 return 0;
427 /**********************/
428 /* Chainmask Handling */
429 /**********************/
431 static void ath_chainmask_sel_timertimeout(unsigned long data)
433 struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
434 cm->switch_allowed = 1;
437 /* Start chainmask select timer */
438 static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
440 cm->switch_allowed = 0;
441 mod_timer(&cm->timer, ath_chainmask_sel_period);
444 /* Stop chainmask select timer */
445 static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
447 cm->switch_allowed = 0;
448 del_timer_sync(&cm->timer);
451 static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
453 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
455 memset(cm, 0, sizeof(struct ath_chainmask_sel));
457 cm->cur_tx_mask = sc->sc_tx_chainmask;
458 cm->cur_rx_mask = sc->sc_rx_chainmask;
459 cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
460 setup_timer(&cm->timer,
461 ath_chainmask_sel_timertimeout, (unsigned long) cm);
464 int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
466 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
469 * Disable auto-swtiching in one of the following if conditions.
470 * sc_chainmask_auto_sel is used for internal global auto-switching
471 * enabled/disabled setting
473 if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
474 cm->cur_tx_mask = sc->sc_tx_chainmask;
475 return cm->cur_tx_mask;
478 if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
479 return cm->cur_tx_mask;
481 if (cm->switch_allowed) {
482 /* Switch down from tx 3 to tx 2. */
483 if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
484 ATH_RSSI_OUT(cm->tx_avgrssi) >=
485 ath_chainmask_sel_down_rssi_thres) {
486 cm->cur_tx_mask = sc->sc_tx_chainmask;
488 /* Don't let another switch happen until
489 * this timer expires */
490 ath_chainmask_sel_timerstart(cm);
492 /* Switch up from tx 2 to 3. */
493 else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
494 ATH_RSSI_OUT(cm->tx_avgrssi) <=
495 ath_chainmask_sel_up_rssi_thres) {
496 cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
498 /* Don't let another switch happen
499 * until this timer expires */
500 ath_chainmask_sel_timerstart(cm);
504 return cm->cur_tx_mask;
508 * Update tx/rx chainmask. For legacy association,
509 * hard code chainmask to 1x1, for 11n association, use
510 * the chainmask configuration.
513 void ath_update_chainmask(struct ath_softc *sc, int is_ht)
515 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
516 if (is_ht) {
517 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
518 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
519 } else {
520 sc->sc_tx_chainmask = 1;
521 sc->sc_rx_chainmask = 1;
524 DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
525 __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
528 /*******/
529 /* ANI */
530 /*******/
533 * This routine performs the periodic noise floor calibration function
534 * that is used to adjust and optimize the chip performance. This
535 * takes environmental changes (location, temperature) into account.
536 * When the task is complete, it reschedules itself depending on the
537 * appropriate interval that was calculated.
540 static void ath_ani_calibrate(unsigned long data)
542 struct ath_softc *sc;
543 struct ath_hal *ah;
544 bool longcal = false;
545 bool shortcal = false;
546 bool aniflag = false;
547 unsigned int timestamp = jiffies_to_msecs(jiffies);
548 u32 cal_interval;
550 sc = (struct ath_softc *)data;
551 ah = sc->sc_ah;
554 * don't calibrate when we're scanning.
555 * we are most likely not on our home channel.
557 if (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)
558 return;
560 /* Long calibration runs independently of short calibration. */
561 if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
562 longcal = true;
563 DPRINTF(sc, ATH_DBG_ANI, "%s: longcal @%lu\n",
564 __func__, jiffies);
565 sc->sc_ani.sc_longcal_timer = timestamp;
568 /* Short calibration applies only while sc_caldone is false */
569 if (!sc->sc_ani.sc_caldone) {
570 if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
571 ATH_SHORT_CALINTERVAL) {
572 shortcal = true;
573 DPRINTF(sc, ATH_DBG_ANI, "%s: shortcal @%lu\n",
574 __func__, jiffies);
575 sc->sc_ani.sc_shortcal_timer = timestamp;
576 sc->sc_ani.sc_resetcal_timer = timestamp;
578 } else {
579 if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
580 ATH_RESTART_CALINTERVAL) {
581 ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
582 &sc->sc_ani.sc_caldone);
583 if (sc->sc_ani.sc_caldone)
584 sc->sc_ani.sc_resetcal_timer = timestamp;
588 /* Verify whether we must check ANI */
589 if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
590 ATH_ANI_POLLINTERVAL) {
591 aniflag = true;
592 sc->sc_ani.sc_checkani_timer = timestamp;
595 /* Skip all processing if there's nothing to do. */
596 if (longcal || shortcal || aniflag) {
597 /* Call ANI routine if necessary */
598 if (aniflag)
599 ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
600 ah->ah_curchan);
602 /* Perform calibration if necessary */
603 if (longcal || shortcal) {
604 bool iscaldone = false;
606 if (ath9k_hw_calibrate(ah, ah->ah_curchan,
607 sc->sc_rx_chainmask, longcal,
608 &iscaldone)) {
609 if (longcal)
610 sc->sc_ani.sc_noise_floor =
611 ath9k_hw_getchan_noise(ah,
612 ah->ah_curchan);
614 DPRINTF(sc, ATH_DBG_ANI,
615 "%s: calibrate chan %u/%x nf: %d\n",
616 __func__,
617 ah->ah_curchan->channel,
618 ah->ah_curchan->channelFlags,
619 sc->sc_ani.sc_noise_floor);
620 } else {
621 DPRINTF(sc, ATH_DBG_ANY,
622 "%s: calibrate chan %u/%x failed\n",
623 __func__,
624 ah->ah_curchan->channel,
625 ah->ah_curchan->channelFlags);
627 sc->sc_ani.sc_caldone = iscaldone;
632 * Set timer interval based on previous results.
633 * The interval must be the shortest necessary to satisfy ANI,
634 * short calibration and long calibration.
637 cal_interval = ATH_ANI_POLLINTERVAL;
638 if (!sc->sc_ani.sc_caldone)
639 cal_interval = min(cal_interval, (u32)ATH_SHORT_CALINTERVAL);
641 mod_timer(&sc->sc_ani.timer, jiffies + msecs_to_jiffies(cal_interval));
644 /******************/
645 /* VAP management */
646 /******************/
648 int ath_vap_attach(struct ath_softc *sc,
649 int if_id,
650 struct ieee80211_vif *if_data,
651 enum ath9k_opmode opmode)
653 struct ath_vap *avp;
655 if (if_id >= ATH_BCBUF || sc->sc_vaps[if_id] != NULL) {
656 DPRINTF(sc, ATH_DBG_FATAL,
657 "%s: Invalid interface id = %u\n", __func__, if_id);
658 return -EINVAL;
661 switch (opmode) {
662 case ATH9K_M_STA:
663 case ATH9K_M_IBSS:
664 case ATH9K_M_MONITOR:
665 break;
666 case ATH9K_M_HOSTAP:
667 /* XXX not right, beacon buffer is allocated on RUN trans */
668 if (list_empty(&sc->sc_bbuf))
669 return -ENOMEM;
670 break;
671 default:
672 return -EINVAL;
675 /* create ath_vap */
676 avp = kmalloc(sizeof(struct ath_vap), GFP_KERNEL);
677 if (avp == NULL)
678 return -ENOMEM;
680 memset(avp, 0, sizeof(struct ath_vap));
681 avp->av_if_data = if_data;
682 /* Set the VAP opmode */
683 avp->av_opmode = opmode;
684 avp->av_bslot = -1;
686 if (opmode == ATH9K_M_HOSTAP)
687 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
689 sc->sc_vaps[if_id] = avp;
690 sc->sc_nvaps++;
691 /* Set the device opmode */
692 sc->sc_ah->ah_opmode = opmode;
694 /* default VAP configuration */
695 avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
696 avp->av_config.av_fixed_retryset = 0x03030303;
698 return 0;
701 int ath_vap_detach(struct ath_softc *sc, int if_id)
703 struct ath_hal *ah = sc->sc_ah;
704 struct ath_vap *avp;
706 avp = sc->sc_vaps[if_id];
707 if (avp == NULL) {
708 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
709 __func__, if_id);
710 return -EINVAL;
714 * Quiesce the hardware while we remove the vap. In
715 * particular we need to reclaim all references to the
716 * vap state by any frames pending on the tx queues.
718 * XXX can we do this w/o affecting other vap's?
720 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
721 ath_draintxq(sc, false); /* stop xmit side */
722 ath_stoprecv(sc); /* stop recv side */
723 ath_flushrecv(sc); /* flush recv queue */
725 kfree(avp);
726 sc->sc_vaps[if_id] = NULL;
727 sc->sc_nvaps--;
729 return 0;
732 int ath_vap_config(struct ath_softc *sc,
733 int if_id, struct ath_vap_config *if_config)
735 struct ath_vap *avp;
737 if (if_id >= ATH_BCBUF) {
738 DPRINTF(sc, ATH_DBG_FATAL,
739 "%s: Invalid interface id = %u\n", __func__, if_id);
740 return -EINVAL;
743 avp = sc->sc_vaps[if_id];
744 ASSERT(avp != NULL);
746 if (avp)
747 memcpy(&avp->av_config, if_config, sizeof(avp->av_config));
749 return 0;
752 /********/
753 /* Core */
754 /********/
756 int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
758 struct ath_hal *ah = sc->sc_ah;
759 int status;
760 int error = 0;
762 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n",
763 __func__, sc->sc_ah->ah_opmode);
766 * Stop anything previously setup. This is safe
767 * whether this is the first time through or not.
769 ath_stop(sc);
771 /* Initialize chanmask selection */
772 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
773 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
775 /* Reset SERDES registers */
776 ath9k_hw_configpcipowersave(ah, 0);
779 * The basic interface to setting the hardware in a good
780 * state is ``reset''. On return the hardware is known to
781 * be powered up and with interrupts disabled. This must
782 * be followed by initialization of the appropriate bits
783 * and then setup of the interrupt mask.
786 spin_lock_bh(&sc->sc_resetlock);
787 if (!ath9k_hw_reset(ah, initial_chan,
788 sc->sc_ht_info.tx_chan_width,
789 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
790 sc->sc_ht_extprotspacing, false, &status)) {
791 DPRINTF(sc, ATH_DBG_FATAL,
792 "%s: unable to reset hardware; hal status %u "
793 "(freq %u flags 0x%x)\n", __func__, status,
794 initial_chan->channel, initial_chan->channelFlags);
795 error = -EIO;
796 spin_unlock_bh(&sc->sc_resetlock);
797 goto done;
799 spin_unlock_bh(&sc->sc_resetlock);
801 * This is needed only to setup initial state
802 * but it's best done after a reset.
804 ath_update_txpow(sc);
807 * Setup the hardware after reset:
808 * The receive engine is set going.
809 * Frame transmit is handled entirely
810 * in the frame output path; there's nothing to do
811 * here except setup the interrupt mask.
813 if (ath_startrecv(sc) != 0) {
814 DPRINTF(sc, ATH_DBG_FATAL,
815 "%s: unable to start recv logic\n", __func__);
816 error = -EIO;
817 goto done;
819 /* Setup our intr mask. */
820 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
821 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
822 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
824 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
825 sc->sc_imask |= ATH9K_INT_GTT;
827 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
828 sc->sc_imask |= ATH9K_INT_CST;
831 * Enable MIB interrupts when there are hardware phy counters.
832 * Note we only do this (at the moment) for station mode.
834 if (ath9k_hw_phycounters(ah) &&
835 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
836 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
837 sc->sc_imask |= ATH9K_INT_MIB;
839 * Some hardware processes the TIM IE and fires an
840 * interrupt when the TIM bit is set. For hardware
841 * that does, if not overridden by configuration,
842 * enable the TIM interrupt when operating as station.
844 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
845 (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
846 !sc->sc_config.swBeaconProcess)
847 sc->sc_imask |= ATH9K_INT_TIM;
849 * Don't enable interrupts here as we've not yet built our
850 * vap and node data structures, which will be needed as soon
851 * as we start receiving.
853 ath_setcurmode(sc, ath_chan2mode(initial_chan));
855 /* XXX: we must make sure h/w is ready and clear invalid flag
856 * before turning on interrupt. */
857 sc->sc_flags &= ~SC_OP_INVALID;
858 done:
859 return error;
862 int ath_reset(struct ath_softc *sc, bool retry_tx)
864 struct ath_hal *ah = sc->sc_ah;
865 int status;
866 int error = 0;
868 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
869 ath_draintxq(sc, retry_tx); /* stop xmit */
870 ath_stoprecv(sc); /* stop recv */
871 ath_flushrecv(sc); /* flush recv queue */
873 /* Reset chip */
874 spin_lock_bh(&sc->sc_resetlock);
875 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan,
876 sc->sc_ht_info.tx_chan_width,
877 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
878 sc->sc_ht_extprotspacing, false, &status)) {
879 DPRINTF(sc, ATH_DBG_FATAL,
880 "%s: unable to reset hardware; hal status %u\n",
881 __func__, status);
882 error = -EIO;
884 spin_unlock_bh(&sc->sc_resetlock);
886 if (ath_startrecv(sc) != 0) /* restart recv */
887 DPRINTF(sc, ATH_DBG_FATAL,
888 "%s: unable to start recv logic\n", __func__);
891 * We may be doing a reset in response to a request
892 * that changes the channel so update any state that
893 * might change as a result.
895 ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan));
897 ath_update_txpow(sc);
899 if (sc->sc_flags & SC_OP_BEACONS)
900 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
902 ath9k_hw_set_interrupts(ah, sc->sc_imask);
904 /* Restart the txq */
905 if (retry_tx) {
906 int i;
907 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
908 if (ATH_TXQ_SETUP(sc, i)) {
909 spin_lock_bh(&sc->sc_txq[i].axq_lock);
910 ath_txq_schedule(sc, &sc->sc_txq[i]);
911 spin_unlock_bh(&sc->sc_txq[i].axq_lock);
916 return error;
919 int ath_suspend(struct ath_softc *sc)
921 struct ath_hal *ah = sc->sc_ah;
923 /* No I/O if device has been surprise removed */
924 if (sc->sc_flags & SC_OP_INVALID)
925 return -EIO;
927 /* Shut off the interrupt before setting sc->sc_invalid to '1' */
928 ath9k_hw_set_interrupts(ah, 0);
930 /* XXX: we must make sure h/w will not generate any interrupt
931 * before setting the invalid flag. */
932 sc->sc_flags |= SC_OP_INVALID;
934 /* disable HAL and put h/w to sleep */
935 ath9k_hw_disable(sc->sc_ah);
937 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
939 return 0;
942 /* Interrupt handler. Most of the actual processing is deferred.
943 * It's the caller's responsibility to ensure the chip is awake. */
945 irqreturn_t ath_isr(int irq, void *dev)
947 struct ath_softc *sc = dev;
948 struct ath_hal *ah = sc->sc_ah;
949 enum ath9k_int status;
950 bool sched = false;
952 do {
953 if (sc->sc_flags & SC_OP_INVALID) {
955 * The hardware is not ready/present, don't
956 * touch anything. Note this can happen early
957 * on if the IRQ is shared.
959 return IRQ_NONE;
961 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
962 return IRQ_NONE;
966 * Figure out the reason(s) for the interrupt. Note
967 * that the hal returns a pseudo-ISR that may include
968 * bits we haven't explicitly enabled so we mask the
969 * value to insure we only process bits we requested.
971 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
973 status &= sc->sc_imask; /* discard unasked-for bits */
976 * If there are no status bits set, then this interrupt was not
977 * for me (should have been caught above).
980 if (!status)
981 return IRQ_NONE;
983 sc->sc_intrstatus = status;
985 if (status & ATH9K_INT_FATAL) {
986 /* need a chip reset */
987 sched = true;
988 } else if (status & ATH9K_INT_RXORN) {
989 /* need a chip reset */
990 sched = true;
991 } else {
992 if (status & ATH9K_INT_SWBA) {
993 /* schedule a tasklet for beacon handling */
994 tasklet_schedule(&sc->bcon_tasklet);
996 if (status & ATH9K_INT_RXEOL) {
998 * NB: the hardware should re-read the link when
999 * RXE bit is written, but it doesn't work
1000 * at least on older hardware revs.
1002 sched = true;
1005 if (status & ATH9K_INT_TXURN)
1006 /* bump tx trigger level */
1007 ath9k_hw_updatetxtriglevel(ah, true);
1008 /* XXX: optimize this */
1009 if (status & ATH9K_INT_RX)
1010 sched = true;
1011 if (status & ATH9K_INT_TX)
1012 sched = true;
1013 if (status & ATH9K_INT_BMISS)
1014 sched = true;
1015 /* carrier sense timeout */
1016 if (status & ATH9K_INT_CST)
1017 sched = true;
1018 if (status & ATH9K_INT_MIB) {
1020 * Disable interrupts until we service the MIB
1021 * interrupt; otherwise it will continue to
1022 * fire.
1024 ath9k_hw_set_interrupts(ah, 0);
1026 * Let the hal handle the event. We assume
1027 * it will clear whatever condition caused
1028 * the interrupt.
1030 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
1031 ath9k_hw_set_interrupts(ah, sc->sc_imask);
1033 if (status & ATH9K_INT_TIM_TIMER) {
1034 if (!(ah->ah_caps.hw_caps &
1035 ATH9K_HW_CAP_AUTOSLEEP)) {
1036 /* Clear RxAbort bit so that we can
1037 * receive frames */
1038 ath9k_hw_setrxabort(ah, 0);
1039 sched = true;
1043 } while (0);
1045 if (sched) {
1046 /* turn off every interrupt except SWBA */
1047 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
1048 tasklet_schedule(&sc->intr_tq);
1051 return IRQ_HANDLED;
1054 /* Deferred interrupt processing */
1056 static void ath9k_tasklet(unsigned long data)
1058 struct ath_softc *sc = (struct ath_softc *)data;
1059 u32 status = sc->sc_intrstatus;
1061 if (status & ATH9K_INT_FATAL) {
1062 /* need a chip reset */
1063 ath_reset(sc, false);
1064 return;
1065 } else {
1067 if (status &
1068 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
1069 /* XXX: fill me in */
1071 if (status & ATH9K_INT_RXORN) {
1073 if (status & ATH9K_INT_RXEOL) {
1076 spin_lock_bh(&sc->sc_rxflushlock);
1077 ath_rx_tasklet(sc, 0);
1078 spin_unlock_bh(&sc->sc_rxflushlock);
1080 /* XXX: optimize this */
1081 if (status & ATH9K_INT_TX)
1082 ath_tx_tasklet(sc);
1083 /* XXX: fill me in */
1085 if (status & ATH9K_INT_BMISS) {
1087 if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
1088 if (status & ATH9K_INT_TIM) {
1090 if (status & ATH9K_INT_DTIMSYNC) {
1096 /* re-enable hardware interrupt */
1097 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
1100 int ath_init(u16 devid, struct ath_softc *sc)
1102 struct ath_hal *ah = NULL;
1103 int status;
1104 int error = 0, i;
1105 int csz = 0;
1107 /* XXX: hardware will not be ready until ath_open() being called */
1108 sc->sc_flags |= SC_OP_INVALID;
1110 sc->sc_debug = DBG_DEFAULT;
1111 DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
1113 /* Initialize tasklet */
1114 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1115 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
1116 (unsigned long)sc);
1119 * Cache line size is used to size and align various
1120 * structures used to communicate with the hardware.
1122 bus_read_cachesize(sc, &csz);
1123 /* XXX assert csz is non-zero */
1124 sc->sc_cachelsz = csz << 2; /* convert to bytes */
1126 spin_lock_init(&sc->sc_resetlock);
1128 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
1129 if (ah == NULL) {
1130 DPRINTF(sc, ATH_DBG_FATAL,
1131 "%s: unable to attach hardware; HAL status %u\n",
1132 __func__, status);
1133 error = -ENXIO;
1134 goto bad;
1136 sc->sc_ah = ah;
1138 /* Initializes the noise floor to a reasonable default value.
1139 * Later on this will be updated during ANI processing. */
1140 sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1142 /* Get the hardware key cache size. */
1143 sc->sc_keymax = ah->ah_caps.keycache_size;
1144 if (sc->sc_keymax > ATH_KEYMAX) {
1145 DPRINTF(sc, ATH_DBG_KEYCACHE,
1146 "%s: Warning, using only %u entries in %u key cache\n",
1147 __func__, ATH_KEYMAX, sc->sc_keymax);
1148 sc->sc_keymax = ATH_KEYMAX;
1152 * Reset the key cache since some parts do not
1153 * reset the contents on initial power up.
1155 for (i = 0; i < sc->sc_keymax; i++)
1156 ath9k_hw_keyreset(ah, (u16) i);
1158 * Mark key cache slots associated with global keys
1159 * as in use. If we knew TKIP was not to be used we
1160 * could leave the +32, +64, and +32+64 slots free.
1161 * XXX only for splitmic.
1163 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1164 set_bit(i, sc->sc_keymap);
1165 set_bit(i + 32, sc->sc_keymap);
1166 set_bit(i + 64, sc->sc_keymap);
1167 set_bit(i + 32 + 64, sc->sc_keymap);
1170 * Collect the channel list using the default country
1171 * code and including outdoor channels. The 802.11 layer
1172 * is resposible for filtering this list based on settings
1173 * like the phy mode.
1175 error = ath_setup_channels(sc);
1176 if (error)
1177 goto bad;
1179 /* default to STA mode */
1180 sc->sc_ah->ah_opmode = ATH9K_M_MONITOR;
1182 /* Setup rate tables */
1184 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1185 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1187 /* NB: setup here so ath_rate_update is happy */
1188 ath_setcurmode(sc, ATH9K_MODE_11A);
1191 * Allocate hardware transmit queues: one queue for
1192 * beacon frames and one data queue for each QoS
1193 * priority. Note that the hal handles reseting
1194 * these queues at the needed time.
1196 sc->sc_bhalq = ath_beaconq_setup(ah);
1197 if (sc->sc_bhalq == -1) {
1198 DPRINTF(sc, ATH_DBG_FATAL,
1199 "%s: unable to setup a beacon xmit queue\n", __func__);
1200 error = -EIO;
1201 goto bad2;
1203 sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1204 if (sc->sc_cabq == NULL) {
1205 DPRINTF(sc, ATH_DBG_FATAL,
1206 "%s: unable to setup CAB xmit queue\n", __func__);
1207 error = -EIO;
1208 goto bad2;
1211 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
1212 ath_cabq_update(sc);
1214 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
1215 sc->sc_haltype2q[i] = -1;
1217 /* Setup data queues */
1218 /* NB: ensure BK queue is the lowest priority h/w queue */
1219 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1220 DPRINTF(sc, ATH_DBG_FATAL,
1221 "%s: unable to setup xmit queue for BK traffic\n",
1222 __func__);
1223 error = -EIO;
1224 goto bad2;
1227 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1228 DPRINTF(sc, ATH_DBG_FATAL,
1229 "%s: unable to setup xmit queue for BE traffic\n",
1230 __func__);
1231 error = -EIO;
1232 goto bad2;
1234 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1235 DPRINTF(sc, ATH_DBG_FATAL,
1236 "%s: unable to setup xmit queue for VI traffic\n",
1237 __func__);
1238 error = -EIO;
1239 goto bad2;
1241 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1242 DPRINTF(sc, ATH_DBG_FATAL,
1243 "%s: unable to setup xmit queue for VO traffic\n",
1244 __func__);
1245 error = -EIO;
1246 goto bad2;
1249 setup_timer(&sc->sc_ani.timer, ath_ani_calibrate, (unsigned long)sc);
1251 sc->sc_rc = ath_rate_attach(ah);
1252 if (sc->sc_rc == NULL) {
1253 error = -EIO;
1254 goto bad2;
1257 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1258 ATH9K_CIPHER_TKIP, NULL)) {
1260 * Whether we should enable h/w TKIP MIC.
1261 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1262 * report WMM capable, so it's always safe to turn on
1263 * TKIP MIC in this case.
1265 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1266 0, 1, NULL);
1270 * Check whether the separate key cache entries
1271 * are required to handle both tx+rx MIC keys.
1272 * With split mic keys the number of stations is limited
1273 * to 27 otherwise 59.
1275 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1276 ATH9K_CIPHER_TKIP, NULL)
1277 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1278 ATH9K_CIPHER_MIC, NULL)
1279 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1280 0, NULL))
1281 sc->sc_splitmic = 1;
1283 /* turn on mcast key search if possible */
1284 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1285 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1286 1, NULL);
1288 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1289 sc->sc_config.txpowlimit_override = 0;
1291 /* 11n Capabilities */
1292 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1293 sc->sc_flags |= SC_OP_TXAGGR;
1294 sc->sc_flags |= SC_OP_RXAGGR;
1297 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1298 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
1300 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1301 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1303 ath9k_hw_getmac(ah, sc->sc_myaddr);
1304 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1305 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1306 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1307 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1309 sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1311 /* initialize beacon slots */
1312 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
1313 sc->sc_bslot[i] = ATH_IF_ID_ANY;
1315 /* save MISC configurations */
1316 sc->sc_config.swBeaconProcess = 1;
1318 #ifdef CONFIG_SLOW_ANT_DIV
1319 /* range is 40 - 255, we use something in the middle */
1320 ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
1321 #endif
1323 return 0;
1324 bad2:
1325 /* cleanup tx queues */
1326 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1327 if (ATH_TXQ_SETUP(sc, i))
1328 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1329 bad:
1330 if (ah)
1331 ath9k_hw_detach(ah);
1332 return error;
1335 void ath_deinit(struct ath_softc *sc)
1337 struct ath_hal *ah = sc->sc_ah;
1338 int i;
1340 DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
1342 tasklet_kill(&sc->intr_tq);
1343 tasklet_kill(&sc->bcon_tasklet);
1344 ath_stop(sc);
1345 if (!(sc->sc_flags & SC_OP_INVALID))
1346 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1347 ath_rate_detach(sc->sc_rc);
1348 /* cleanup tx queues */
1349 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1350 if (ATH_TXQ_SETUP(sc, i))
1351 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1352 ath9k_hw_detach(ah);
1355 /*******************/
1356 /* Node Management */
1357 /*******************/
1359 void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta, int if_id)
1361 struct ath_vap *avp;
1362 struct ath_node *an;
1364 avp = sc->sc_vaps[if_id];
1365 ASSERT(avp != NULL);
1367 an = (struct ath_node *)sta->drv_priv;
1369 if (sc->sc_flags & SC_OP_TXAGGR)
1370 ath_tx_node_init(sc, an);
1371 if (sc->sc_flags & SC_OP_RXAGGR)
1372 ath_rx_node_init(sc, an);
1374 an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
1375 sta->ht_cap.ampdu_factor);
1376 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
1378 ath_chainmask_sel_init(sc, an);
1379 ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
1382 void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
1384 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1386 ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
1388 if (sc->sc_flags & SC_OP_TXAGGR)
1389 ath_tx_node_cleanup(sc, an);
1390 if (sc->sc_flags & SC_OP_RXAGGR)
1391 ath_rx_node_cleanup(sc, an);
1395 * Set up New Node
1397 * Setup driver-specific state for a newly associated node. This routine
1398 * really only applies if compression or XR are enabled, there is no code
1399 * covering any other cases.
1402 void ath_newassoc(struct ath_softc *sc,
1403 struct ath_node *an, int isnew, int isuapsd)
1405 int tidno;
1407 /* if station reassociates, tear down the aggregation state. */
1408 if (!isnew) {
1409 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1410 if (sc->sc_flags & SC_OP_TXAGGR)
1411 ath_tx_aggr_teardown(sc, an, tidno);
1412 if (sc->sc_flags & SC_OP_RXAGGR)
1413 ath_rx_aggr_teardown(sc, an, tidno);
1418 /**************/
1419 /* Encryption */
1420 /**************/
1422 void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
1424 ath9k_hw_keyreset(sc->sc_ah, keyix);
1425 if (freeslot)
1426 clear_bit(keyix, sc->sc_keymap);
1429 int ath_keyset(struct ath_softc *sc,
1430 u16 keyix,
1431 struct ath9k_keyval *hk,
1432 const u8 mac[ETH_ALEN])
1434 bool status;
1436 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
1437 keyix, hk, mac, false);
1439 return status != false;
1442 /***********************/
1443 /* TX Power/Regulatory */
1444 /***********************/
1447 * Set Transmit power in HAL
1449 * This routine makes the actual HAL calls to set the new transmit power
1450 * limit.
1453 void ath_update_txpow(struct ath_softc *sc)
1455 struct ath_hal *ah = sc->sc_ah;
1456 u32 txpow;
1458 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
1459 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
1460 /* read back in case value is clamped */
1461 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
1462 sc->sc_curtxpow = txpow;
1466 /* Return the current country and domain information */
1467 void ath_get_currentCountry(struct ath_softc *sc,
1468 struct ath9k_country_entry *ctry)
1470 ath9k_regd_get_current_country(sc->sc_ah, ctry);
1472 /* If HAL not specific yet, since it is band dependent,
1473 * use the one we passed in. */
1474 if (ctry->countryCode == CTRY_DEFAULT) {
1475 ctry->iso[0] = 0;
1476 ctry->iso[1] = 0;
1477 } else if (ctry->iso[0] && ctry->iso[1]) {
1478 if (!ctry->iso[2]) {
1479 if (ath_outdoor)
1480 ctry->iso[2] = 'O';
1481 else
1482 ctry->iso[2] = 'I';
1487 /**************************/
1488 /* Slow Antenna Diversity */
1489 /**************************/
1491 void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
1492 struct ath_softc *sc,
1493 int32_t rssitrig)
1495 int trig;
1497 /* antdivf_rssitrig can range from 40 - 0xff */
1498 trig = (rssitrig > 0xff) ? 0xff : rssitrig;
1499 trig = (rssitrig < 40) ? 40 : rssitrig;
1501 antdiv->antdiv_sc = sc;
1502 antdiv->antdivf_rssitrig = trig;
1505 void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
1506 u8 num_antcfg,
1507 const u8 *bssid)
1509 antdiv->antdiv_num_antcfg =
1510 num_antcfg < ATH_ANT_DIV_MAX_CFG ?
1511 num_antcfg : ATH_ANT_DIV_MAX_CFG;
1512 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1513 antdiv->antdiv_curcfg = 0;
1514 antdiv->antdiv_bestcfg = 0;
1515 antdiv->antdiv_laststatetsf = 0;
1517 memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
1519 antdiv->antdiv_start = 1;
1522 void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
1524 antdiv->antdiv_start = 0;
1527 static int32_t ath_find_max_val(int32_t *val,
1528 u8 num_val, u8 *max_index)
1530 u32 MaxVal = *val++;
1531 u32 cur_index = 0;
1533 *max_index = 0;
1534 while (++cur_index < num_val) {
1535 if (*val > MaxVal) {
1536 MaxVal = *val;
1537 *max_index = cur_index;
1540 val++;
1543 return MaxVal;
1546 void ath_slow_ant_div(struct ath_antdiv *antdiv,
1547 struct ieee80211_hdr *hdr,
1548 struct ath_rx_status *rx_stats)
1550 struct ath_softc *sc = antdiv->antdiv_sc;
1551 struct ath_hal *ah = sc->sc_ah;
1552 u64 curtsf = 0;
1553 u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
1554 __le16 fc = hdr->frame_control;
1556 if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
1557 && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
1558 antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
1559 antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
1560 curtsf = antdiv->antdiv_lastbtsf[curcfg];
1561 } else {
1562 return;
1565 switch (antdiv->antdiv_state) {
1566 case ATH_ANT_DIV_IDLE:
1567 if ((antdiv->antdiv_lastbrssi[curcfg] <
1568 antdiv->antdivf_rssitrig)
1569 && ((curtsf - antdiv->antdiv_laststatetsf) >
1570 ATH_ANT_DIV_MIN_IDLE_US)) {
1572 curcfg++;
1573 if (curcfg == antdiv->antdiv_num_antcfg)
1574 curcfg = 0;
1576 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1577 antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
1578 antdiv->antdiv_curcfg = curcfg;
1579 antdiv->antdiv_laststatetsf = curtsf;
1580 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1583 break;
1585 case ATH_ANT_DIV_SCAN:
1586 if ((curtsf - antdiv->antdiv_laststatetsf) <
1587 ATH_ANT_DIV_MIN_SCAN_US)
1588 break;
1590 curcfg++;
1591 if (curcfg == antdiv->antdiv_num_antcfg)
1592 curcfg = 0;
1594 if (curcfg == antdiv->antdiv_bestcfg) {
1595 ath_find_max_val(antdiv->antdiv_lastbrssi,
1596 antdiv->antdiv_num_antcfg, &bestcfg);
1597 if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
1598 antdiv->antdiv_bestcfg = bestcfg;
1599 antdiv->antdiv_curcfg = bestcfg;
1600 antdiv->antdiv_laststatetsf = curtsf;
1601 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1603 } else {
1604 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1605 antdiv->antdiv_curcfg = curcfg;
1606 antdiv->antdiv_laststatetsf = curtsf;
1607 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1611 break;
1615 /***********************/
1616 /* Descriptor Handling */
1617 /***********************/
1620 * Set up DMA descriptors
1622 * This function will allocate both the DMA descriptor structure, and the
1623 * buffers it contains. These are used to contain the descriptors used
1624 * by the system.
1627 int ath_descdma_setup(struct ath_softc *sc,
1628 struct ath_descdma *dd,
1629 struct list_head *head,
1630 const char *name,
1631 int nbuf,
1632 int ndesc)
1634 #define DS2PHYS(_dd, _ds) \
1635 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1636 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1637 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1639 struct ath_desc *ds;
1640 struct ath_buf *bf;
1641 int i, bsize, error;
1643 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
1644 __func__, name, nbuf, ndesc);
1646 /* ath_desc must be a multiple of DWORDs */
1647 if ((sizeof(struct ath_desc) % 4) != 0) {
1648 DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
1649 __func__);
1650 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1651 error = -ENOMEM;
1652 goto fail;
1655 dd->dd_name = name;
1656 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1659 * Need additional DMA memory because we can't use
1660 * descriptors that cross the 4K page boundary. Assume
1661 * one skipped descriptor per 4K page.
1663 if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1664 u32 ndesc_skipped =
1665 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1666 u32 dma_len;
1668 while (ndesc_skipped) {
1669 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1670 dd->dd_desc_len += dma_len;
1672 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1676 /* allocate descriptors */
1677 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1678 dd->dd_desc_len,
1679 &dd->dd_desc_paddr);
1680 if (dd->dd_desc == NULL) {
1681 error = -ENOMEM;
1682 goto fail;
1684 ds = dd->dd_desc;
1685 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
1686 __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
1687 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1689 /* allocate buffers */
1690 bsize = sizeof(struct ath_buf) * nbuf;
1691 bf = kmalloc(bsize, GFP_KERNEL);
1692 if (bf == NULL) {
1693 error = -ENOMEM;
1694 goto fail2;
1696 memset(bf, 0, bsize);
1697 dd->dd_bufptr = bf;
1699 INIT_LIST_HEAD(head);
1700 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1701 bf->bf_desc = ds;
1702 bf->bf_daddr = DS2PHYS(dd, ds);
1704 if (!(sc->sc_ah->ah_caps.hw_caps &
1705 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1707 * Skip descriptor addresses which can cause 4KB
1708 * boundary crossing (addr + length) with a 32 dword
1709 * descriptor fetch.
1711 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1712 ASSERT((caddr_t) bf->bf_desc <
1713 ((caddr_t) dd->dd_desc +
1714 dd->dd_desc_len));
1716 ds += ndesc;
1717 bf->bf_desc = ds;
1718 bf->bf_daddr = DS2PHYS(dd, ds);
1721 list_add_tail(&bf->list, head);
1723 return 0;
1724 fail2:
1725 pci_free_consistent(sc->pdev,
1726 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1727 fail:
1728 memset(dd, 0, sizeof(*dd));
1729 return error;
1730 #undef ATH_DESC_4KB_BOUND_CHECK
1731 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1732 #undef DS2PHYS
1736 * Cleanup DMA descriptors
1738 * This function will free the DMA block that was allocated for the descriptor
1739 * pool. Since this was allocated as one "chunk", it is freed in the same
1740 * manner.
1743 void ath_descdma_cleanup(struct ath_softc *sc,
1744 struct ath_descdma *dd,
1745 struct list_head *head)
1747 /* Free memory associated with descriptors */
1748 pci_free_consistent(sc->pdev,
1749 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1751 INIT_LIST_HEAD(head);
1752 kfree(dd->dd_bufptr);
1753 memset(dd, 0, sizeof(*dd));
1756 /*************/
1757 /* Utilities */
1758 /*************/
1760 int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1762 int qnum;
1764 switch (queue) {
1765 case 0:
1766 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1767 break;
1768 case 1:
1769 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1770 break;
1771 case 2:
1772 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1773 break;
1774 case 3:
1775 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1776 break;
1777 default:
1778 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1779 break;
1782 return qnum;
1785 int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1787 int qnum;
1789 switch (queue) {
1790 case ATH9K_WME_AC_VO:
1791 qnum = 0;
1792 break;
1793 case ATH9K_WME_AC_VI:
1794 qnum = 1;
1795 break;
1796 case ATH9K_WME_AC_BE:
1797 qnum = 2;
1798 break;
1799 case ATH9K_WME_AC_BK:
1800 qnum = 3;
1801 break;
1802 default:
1803 qnum = -1;
1804 break;
1807 return qnum;
1812 * Expand time stamp to TSF
1814 * Extend 15-bit time stamp from rx descriptor to
1815 * a full 64-bit TSF using the current h/w TSF.
1818 u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
1820 u64 tsf;
1822 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1823 if ((tsf & 0x7fff) < rstamp)
1824 tsf -= 0x8000;
1825 return (tsf & ~0x7fff) | rstamp;
1829 * Set Default Antenna
1831 * Call into the HAL to set the default antenna to use. Not really valid for
1832 * MIMO technology.
1835 void ath_setdefantenna(void *context, u32 antenna)
1837 struct ath_softc *sc = (struct ath_softc *)context;
1838 struct ath_hal *ah = sc->sc_ah;
1840 /* XXX block beacon interrupts */
1841 ath9k_hw_setantenna(ah, antenna);
1842 sc->sc_defant = antenna;
1843 sc->sc_rxotherant = 0;
1847 * Set Slot Time
1849 * This will wake up the chip if required, and set the slot time for the
1850 * frame (maximum transmit time). Slot time is assumed to be already set
1851 * in the ATH object member sc_slottime
1854 void ath_setslottime(struct ath_softc *sc)
1856 ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
1857 sc->sc_updateslot = OK;