ath9k: Build RFKILL feature even when RFKILL subsystem is a MODULE
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / wireless / ath9k / core.c
blob3e94dab6c9d99b4cf532457ead7a1ecd3bab1161
1 /*
2 * Copyright (c) 2008, Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include "core.h"
18 #include "regd.h"
20 static u32 ath_chainmask_sel_up_rssi_thres =
21 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
22 static u32 ath_chainmask_sel_down_rssi_thres =
23 ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
24 static u32 ath_chainmask_sel_period =
25 ATH_CHAINMASK_SEL_TIMEOUT;
27 /* return bus cachesize in 4B word units */
29 static void bus_read_cachesize(struct ath_softc *sc, int *csz)
31 u8 u8tmp;
33 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
34 *csz = (int)u8tmp;
37 * This check was put in to avoid "unplesant" consequences if
38 * the bootrom has not fully initialized all PCI devices.
39 * Sometimes the cache line size register is not set
42 if (*csz == 0)
43 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
46 static u8 parse_mpdudensity(u8 mpdudensity)
49 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
50 * 0 for no restriction
51 * 1 for 1/4 us
52 * 2 for 1/2 us
53 * 3 for 1 us
54 * 4 for 2 us
55 * 5 for 4 us
56 * 6 for 8 us
57 * 7 for 16 us
59 switch (mpdudensity) {
60 case 0:
61 return 0;
62 case 1:
63 case 2:
64 case 3:
65 /* Our lower layer calculations limit our precision to
66 1 microsecond */
67 return 1;
68 case 4:
69 return 2;
70 case 5:
71 return 4;
72 case 6:
73 return 8;
74 case 7:
75 return 16;
76 default:
77 return 0;
82 * Set current operating mode
84 * This function initializes and fills the rate table in the ATH object based
85 * on the operating mode.
87 static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
89 const struct ath9k_rate_table *rt;
90 int i;
92 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
93 rt = ath9k_hw_getratetable(sc->sc_ah, mode);
94 BUG_ON(!rt);
96 for (i = 0; i < rt->rateCount; i++)
97 sc->sc_rixmap[rt->info[i].rateCode] = (u8) i;
99 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
100 for (i = 0; i < 256; i++) {
101 u8 ix = rt->rateCodeToIndex[i];
103 if (ix == 0xff)
104 continue;
106 sc->sc_hwmap[i].ieeerate =
107 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
108 sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
110 if (rt->info[ix].shortPreamble ||
111 rt->info[ix].phy == PHY_OFDM) {
112 /* XXX: Handle this */
115 /* NB: this uses the last entry if the rate isn't found */
116 /* XXX beware of overlow */
118 sc->sc_currates = rt;
119 sc->sc_curmode = mode;
121 * All protection frames are transmited at 2Mb/s for
122 * 11g, otherwise at 1Mb/s.
123 * XXX select protection rate index from rate table.
125 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
129 * Set up rate table (legacy rates)
131 static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
133 struct ath_hal *ah = sc->sc_ah;
134 const struct ath9k_rate_table *rt = NULL;
135 struct ieee80211_supported_band *sband;
136 struct ieee80211_rate *rate;
137 int i, maxrates;
139 switch (band) {
140 case IEEE80211_BAND_2GHZ:
141 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11G);
142 break;
143 case IEEE80211_BAND_5GHZ:
144 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11A);
145 break;
146 default:
147 break;
150 if (rt == NULL)
151 return;
153 sband = &sc->sbands[band];
154 rate = sc->rates[band];
156 if (rt->rateCount > ATH_RATE_MAX)
157 maxrates = ATH_RATE_MAX;
158 else
159 maxrates = rt->rateCount;
161 for (i = 0; i < maxrates; i++) {
162 rate[i].bitrate = rt->info[i].rateKbps / 100;
163 rate[i].hw_value = rt->info[i].rateCode;
164 sband->n_bitrates++;
165 DPRINTF(sc, ATH_DBG_CONFIG,
166 "%s: Rate: %2dMbps, ratecode: %2d\n",
167 __func__,
168 rate[i].bitrate / 10,
169 rate[i].hw_value);
174 * Set up channel list
176 static int ath_setup_channels(struct ath_softc *sc)
178 struct ath_hal *ah = sc->sc_ah;
179 int nchan, i, a = 0, b = 0;
180 u8 regclassids[ATH_REGCLASSIDS_MAX];
181 u32 nregclass = 0;
182 struct ieee80211_supported_band *band_2ghz;
183 struct ieee80211_supported_band *band_5ghz;
184 struct ieee80211_channel *chan_2ghz;
185 struct ieee80211_channel *chan_5ghz;
186 struct ath9k_channel *c;
188 /* Fill in ah->ah_channels */
189 if (!ath9k_regd_init_channels(ah, ATH_CHAN_MAX, (u32 *)&nchan,
190 regclassids, ATH_REGCLASSIDS_MAX,
191 &nregclass, CTRY_DEFAULT, false, 1)) {
192 u32 rd = ah->ah_currentRD;
193 DPRINTF(sc, ATH_DBG_FATAL,
194 "%s: unable to collect channel list; "
195 "regdomain likely %u country code %u\n",
196 __func__, rd, CTRY_DEFAULT);
197 return -EINVAL;
200 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
201 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
202 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
203 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
205 for (i = 0; i < nchan; i++) {
206 c = &ah->ah_channels[i];
207 if (IS_CHAN_2GHZ(c)) {
208 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
209 chan_2ghz[a].center_freq = c->channel;
210 chan_2ghz[a].max_power = c->maxTxPower;
212 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
213 chan_2ghz[a].flags |= IEEE80211_CHAN_NO_IBSS;
214 if (c->channelFlags & CHANNEL_PASSIVE)
215 chan_2ghz[a].flags |= IEEE80211_CHAN_PASSIVE_SCAN;
217 band_2ghz->n_channels = ++a;
219 DPRINTF(sc, ATH_DBG_CONFIG,
220 "%s: 2MHz channel: %d, "
221 "channelFlags: 0x%x\n",
222 __func__, c->channel, c->channelFlags);
223 } else if (IS_CHAN_5GHZ(c)) {
224 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
225 chan_5ghz[b].center_freq = c->channel;
226 chan_5ghz[b].max_power = c->maxTxPower;
228 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
229 chan_5ghz[b].flags |= IEEE80211_CHAN_NO_IBSS;
230 if (c->channelFlags & CHANNEL_PASSIVE)
231 chan_5ghz[b].flags |= IEEE80211_CHAN_PASSIVE_SCAN;
233 band_5ghz->n_channels = ++b;
235 DPRINTF(sc, ATH_DBG_CONFIG,
236 "%s: 5MHz channel: %d, "
237 "channelFlags: 0x%x\n",
238 __func__, c->channel, c->channelFlags);
242 return 0;
246 * Determine mode from channel flags
248 * This routine will provide the enumerated WIRELESSS_MODE value based
249 * on the settings of the channel flags. If no valid set of flags
250 * exist, the lowest mode (11b) is selected.
253 static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
255 if (chan->chanmode == CHANNEL_A)
256 return ATH9K_MODE_11A;
257 else if (chan->chanmode == CHANNEL_G)
258 return ATH9K_MODE_11G;
259 else if (chan->chanmode == CHANNEL_B)
260 return ATH9K_MODE_11B;
261 else if (chan->chanmode == CHANNEL_A_HT20)
262 return ATH9K_MODE_11NA_HT20;
263 else if (chan->chanmode == CHANNEL_G_HT20)
264 return ATH9K_MODE_11NG_HT20;
265 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
266 return ATH9K_MODE_11NA_HT40PLUS;
267 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
268 return ATH9K_MODE_11NA_HT40MINUS;
269 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
270 return ATH9K_MODE_11NG_HT40PLUS;
271 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
272 return ATH9K_MODE_11NG_HT40MINUS;
274 WARN_ON(1); /* should not get here */
276 return ATH9K_MODE_11B;
280 * Set the current channel
282 * Set/change channels. If the channel is really being changed, it's done
283 * by reseting the chip. To accomplish this we must first cleanup any pending
284 * DMA, then restart stuff after a la ath_init.
286 int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
288 struct ath_hal *ah = sc->sc_ah;
289 bool fastcc = true, stopped;
291 if (sc->sc_flags & SC_OP_INVALID) /* the device is invalid or removed */
292 return -EIO;
294 DPRINTF(sc, ATH_DBG_CONFIG,
295 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
296 __func__,
297 ath9k_hw_mhz2ieee(ah, sc->sc_ah->ah_curchan->channel,
298 sc->sc_ah->ah_curchan->channelFlags),
299 sc->sc_ah->ah_curchan->channel,
300 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
301 hchan->channel, hchan->channelFlags);
303 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
304 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
305 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
306 (sc->sc_flags & SC_OP_FULL_RESET)) {
307 int status;
309 * This is only performed if the channel settings have
310 * actually changed.
312 * To switch channels clear any pending DMA operations;
313 * wait long enough for the RX fifo to drain, reset the
314 * hardware at the new frequency, and then re-enable
315 * the relevant bits of the h/w.
317 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
318 ath_draintxq(sc, false); /* clear pending tx frames */
319 stopped = ath_stoprecv(sc); /* turn off frame recv */
321 /* XXX: do not flush receive queue here. We don't want
322 * to flush data frames already in queue because of
323 * changing channel. */
325 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
326 fastcc = false;
328 spin_lock_bh(&sc->sc_resetlock);
329 if (!ath9k_hw_reset(ah, hchan,
330 sc->sc_ht_info.tx_chan_width,
331 sc->sc_tx_chainmask,
332 sc->sc_rx_chainmask,
333 sc->sc_ht_extprotspacing,
334 fastcc, &status)) {
335 DPRINTF(sc, ATH_DBG_FATAL,
336 "%s: unable to reset channel %u (%uMhz) "
337 "flags 0x%x hal status %u\n", __func__,
338 ath9k_hw_mhz2ieee(ah, hchan->channel,
339 hchan->channelFlags),
340 hchan->channel, hchan->channelFlags, status);
341 spin_unlock_bh(&sc->sc_resetlock);
342 return -EIO;
344 spin_unlock_bh(&sc->sc_resetlock);
346 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
347 sc->sc_flags &= ~SC_OP_FULL_RESET;
349 /* Re-enable rx framework */
350 if (ath_startrecv(sc) != 0) {
351 DPRINTF(sc, ATH_DBG_FATAL,
352 "%s: unable to restart recv logic\n", __func__);
353 return -EIO;
356 * Change channels and update the h/w rate map
357 * if we're switching; e.g. 11a to 11b/g.
359 ath_setcurmode(sc, ath_chan2mode(hchan));
361 ath_update_txpow(sc); /* update tx power state */
363 * Re-enable interrupts.
365 ath9k_hw_set_interrupts(ah, sc->sc_imask);
367 return 0;
370 /**********************/
371 /* Chainmask Handling */
372 /**********************/
374 static void ath_chainmask_sel_timertimeout(unsigned long data)
376 struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
377 cm->switch_allowed = 1;
380 /* Start chainmask select timer */
381 static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
383 cm->switch_allowed = 0;
384 mod_timer(&cm->timer, ath_chainmask_sel_period);
387 /* Stop chainmask select timer */
388 static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
390 cm->switch_allowed = 0;
391 del_timer_sync(&cm->timer);
394 static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
396 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
398 memset(cm, 0, sizeof(struct ath_chainmask_sel));
400 cm->cur_tx_mask = sc->sc_tx_chainmask;
401 cm->cur_rx_mask = sc->sc_rx_chainmask;
402 cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
403 setup_timer(&cm->timer,
404 ath_chainmask_sel_timertimeout, (unsigned long) cm);
407 int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
409 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
412 * Disable auto-swtiching in one of the following if conditions.
413 * sc_chainmask_auto_sel is used for internal global auto-switching
414 * enabled/disabled setting
416 if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
417 cm->cur_tx_mask = sc->sc_tx_chainmask;
418 return cm->cur_tx_mask;
421 if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
422 return cm->cur_tx_mask;
424 if (cm->switch_allowed) {
425 /* Switch down from tx 3 to tx 2. */
426 if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
427 ATH_RSSI_OUT(cm->tx_avgrssi) >=
428 ath_chainmask_sel_down_rssi_thres) {
429 cm->cur_tx_mask = sc->sc_tx_chainmask;
431 /* Don't let another switch happen until
432 * this timer expires */
433 ath_chainmask_sel_timerstart(cm);
435 /* Switch up from tx 2 to 3. */
436 else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
437 ATH_RSSI_OUT(cm->tx_avgrssi) <=
438 ath_chainmask_sel_up_rssi_thres) {
439 cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
441 /* Don't let another switch happen
442 * until this timer expires */
443 ath_chainmask_sel_timerstart(cm);
447 return cm->cur_tx_mask;
451 * Update tx/rx chainmask. For legacy association,
452 * hard code chainmask to 1x1, for 11n association, use
453 * the chainmask configuration.
456 void ath_update_chainmask(struct ath_softc *sc, int is_ht)
458 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
459 if (is_ht) {
460 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
461 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
462 } else {
463 sc->sc_tx_chainmask = 1;
464 sc->sc_rx_chainmask = 1;
467 DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
468 __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
471 /*******/
472 /* ANI */
473 /*******/
476 * This routine performs the periodic noise floor calibration function
477 * that is used to adjust and optimize the chip performance. This
478 * takes environmental changes (location, temperature) into account.
479 * When the task is complete, it reschedules itself depending on the
480 * appropriate interval that was calculated.
483 static void ath_ani_calibrate(unsigned long data)
485 struct ath_softc *sc;
486 struct ath_hal *ah;
487 bool longcal = false;
488 bool shortcal = false;
489 bool aniflag = false;
490 unsigned int timestamp = jiffies_to_msecs(jiffies);
491 u32 cal_interval;
493 sc = (struct ath_softc *)data;
494 ah = sc->sc_ah;
497 * don't calibrate when we're scanning.
498 * we are most likely not on our home channel.
500 if (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)
501 return;
503 /* Long calibration runs independently of short calibration. */
504 if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
505 longcal = true;
506 DPRINTF(sc, ATH_DBG_ANI, "%s: longcal @%lu\n",
507 __func__, jiffies);
508 sc->sc_ani.sc_longcal_timer = timestamp;
511 /* Short calibration applies only while sc_caldone is false */
512 if (!sc->sc_ani.sc_caldone) {
513 if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
514 ATH_SHORT_CALINTERVAL) {
515 shortcal = true;
516 DPRINTF(sc, ATH_DBG_ANI, "%s: shortcal @%lu\n",
517 __func__, jiffies);
518 sc->sc_ani.sc_shortcal_timer = timestamp;
519 sc->sc_ani.sc_resetcal_timer = timestamp;
521 } else {
522 if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
523 ATH_RESTART_CALINTERVAL) {
524 ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
525 &sc->sc_ani.sc_caldone);
526 if (sc->sc_ani.sc_caldone)
527 sc->sc_ani.sc_resetcal_timer = timestamp;
531 /* Verify whether we must check ANI */
532 if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
533 ATH_ANI_POLLINTERVAL) {
534 aniflag = true;
535 sc->sc_ani.sc_checkani_timer = timestamp;
538 /* Skip all processing if there's nothing to do. */
539 if (longcal || shortcal || aniflag) {
540 /* Call ANI routine if necessary */
541 if (aniflag)
542 ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
543 ah->ah_curchan);
545 /* Perform calibration if necessary */
546 if (longcal || shortcal) {
547 bool iscaldone = false;
549 if (ath9k_hw_calibrate(ah, ah->ah_curchan,
550 sc->sc_rx_chainmask, longcal,
551 &iscaldone)) {
552 if (longcal)
553 sc->sc_ani.sc_noise_floor =
554 ath9k_hw_getchan_noise(ah,
555 ah->ah_curchan);
557 DPRINTF(sc, ATH_DBG_ANI,
558 "%s: calibrate chan %u/%x nf: %d\n",
559 __func__,
560 ah->ah_curchan->channel,
561 ah->ah_curchan->channelFlags,
562 sc->sc_ani.sc_noise_floor);
563 } else {
564 DPRINTF(sc, ATH_DBG_ANY,
565 "%s: calibrate chan %u/%x failed\n",
566 __func__,
567 ah->ah_curchan->channel,
568 ah->ah_curchan->channelFlags);
570 sc->sc_ani.sc_caldone = iscaldone;
575 * Set timer interval based on previous results.
576 * The interval must be the shortest necessary to satisfy ANI,
577 * short calibration and long calibration.
580 cal_interval = ATH_ANI_POLLINTERVAL;
581 if (!sc->sc_ani.sc_caldone)
582 cal_interval = min(cal_interval, (u32)ATH_SHORT_CALINTERVAL);
584 mod_timer(&sc->sc_ani.timer, jiffies + msecs_to_jiffies(cal_interval));
587 /********/
588 /* Core */
589 /********/
591 int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
593 struct ath_hal *ah = sc->sc_ah;
594 int status;
595 int error = 0;
597 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n",
598 __func__, sc->sc_ah->ah_opmode);
600 /* Reset SERDES registers */
601 ath9k_hw_configpcipowersave(ah, 0);
604 * The basic interface to setting the hardware in a good
605 * state is ``reset''. On return the hardware is known to
606 * be powered up and with interrupts disabled. This must
607 * be followed by initialization of the appropriate bits
608 * and then setup of the interrupt mask.
611 spin_lock_bh(&sc->sc_resetlock);
612 if (!ath9k_hw_reset(ah, initial_chan,
613 sc->sc_ht_info.tx_chan_width,
614 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
615 sc->sc_ht_extprotspacing, false, &status)) {
616 DPRINTF(sc, ATH_DBG_FATAL,
617 "%s: unable to reset hardware; hal status %u "
618 "(freq %u flags 0x%x)\n", __func__, status,
619 initial_chan->channel, initial_chan->channelFlags);
620 error = -EIO;
621 spin_unlock_bh(&sc->sc_resetlock);
622 goto done;
624 spin_unlock_bh(&sc->sc_resetlock);
627 * This is needed only to setup initial state
628 * but it's best done after a reset.
630 ath_update_txpow(sc);
633 * Setup the hardware after reset:
634 * The receive engine is set going.
635 * Frame transmit is handled entirely
636 * in the frame output path; there's nothing to do
637 * here except setup the interrupt mask.
639 if (ath_startrecv(sc) != 0) {
640 DPRINTF(sc, ATH_DBG_FATAL,
641 "%s: unable to start recv logic\n", __func__);
642 error = -EIO;
643 goto done;
646 /* Setup our intr mask. */
647 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
648 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
649 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
651 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
652 sc->sc_imask |= ATH9K_INT_GTT;
654 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
655 sc->sc_imask |= ATH9K_INT_CST;
658 * Enable MIB interrupts when there are hardware phy counters.
659 * Note we only do this (at the moment) for station mode.
661 if (ath9k_hw_phycounters(ah) &&
662 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
663 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
664 sc->sc_imask |= ATH9K_INT_MIB;
666 * Some hardware processes the TIM IE and fires an
667 * interrupt when the TIM bit is set. For hardware
668 * that does, if not overridden by configuration,
669 * enable the TIM interrupt when operating as station.
671 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
672 (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
673 !sc->sc_config.swBeaconProcess)
674 sc->sc_imask |= ATH9K_INT_TIM;
676 ath_setcurmode(sc, ath_chan2mode(initial_chan));
678 sc->sc_flags &= ~SC_OP_INVALID;
680 /* Disable BMISS interrupt when we're not associated */
681 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
682 ath9k_hw_set_interrupts(sc->sc_ah,sc->sc_imask);
684 ieee80211_wake_queues(sc->hw);
685 done:
686 return error;
689 void ath_stop(struct ath_softc *sc)
691 struct ath_hal *ah = sc->sc_ah;
693 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Cleaning up\n", __func__);
695 ieee80211_stop_queues(sc->hw);
697 /* make sure h/w will not generate any interrupt
698 * before setting the invalid flag. */
699 ath9k_hw_set_interrupts(ah, 0);
701 if (!(sc->sc_flags & SC_OP_INVALID)) {
702 ath_draintxq(sc, false);
703 ath_stoprecv(sc);
704 ath9k_hw_phy_disable(ah);
705 } else
706 sc->sc_rxlink = NULL;
708 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
709 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
710 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
711 #endif
712 /* disable HAL and put h/w to sleep */
713 ath9k_hw_disable(sc->sc_ah);
714 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
716 sc->sc_flags |= SC_OP_INVALID;
719 int ath_reset(struct ath_softc *sc, bool retry_tx)
721 struct ath_hal *ah = sc->sc_ah;
722 int status;
723 int error = 0;
725 ath9k_hw_set_interrupts(ah, 0);
726 ath_draintxq(sc, retry_tx);
727 ath_stoprecv(sc);
728 ath_flushrecv(sc);
730 /* Reset chip */
731 spin_lock_bh(&sc->sc_resetlock);
732 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan,
733 sc->sc_ht_info.tx_chan_width,
734 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
735 sc->sc_ht_extprotspacing, false, &status)) {
736 DPRINTF(sc, ATH_DBG_FATAL,
737 "%s: unable to reset hardware; hal status %u\n",
738 __func__, status);
739 error = -EIO;
741 spin_unlock_bh(&sc->sc_resetlock);
743 if (ath_startrecv(sc) != 0)
744 DPRINTF(sc, ATH_DBG_FATAL,
745 "%s: unable to start recv logic\n", __func__);
748 * We may be doing a reset in response to a request
749 * that changes the channel so update any state that
750 * might change as a result.
752 ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan));
754 ath_update_txpow(sc);
756 if (sc->sc_flags & SC_OP_BEACONS)
757 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
759 ath9k_hw_set_interrupts(ah, sc->sc_imask);
761 /* Restart the txq */
762 if (retry_tx) {
763 int i;
764 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
765 if (ATH_TXQ_SETUP(sc, i)) {
766 spin_lock_bh(&sc->sc_txq[i].axq_lock);
767 ath_txq_schedule(sc, &sc->sc_txq[i]);
768 spin_unlock_bh(&sc->sc_txq[i].axq_lock);
773 return error;
776 /* Interrupt handler. Most of the actual processing is deferred.
777 * It's the caller's responsibility to ensure the chip is awake. */
779 irqreturn_t ath_isr(int irq, void *dev)
781 struct ath_softc *sc = dev;
782 struct ath_hal *ah = sc->sc_ah;
783 enum ath9k_int status;
784 bool sched = false;
786 do {
787 if (sc->sc_flags & SC_OP_INVALID) {
789 * The hardware is not ready/present, don't
790 * touch anything. Note this can happen early
791 * on if the IRQ is shared.
793 return IRQ_NONE;
795 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
796 return IRQ_NONE;
800 * Figure out the reason(s) for the interrupt. Note
801 * that the hal returns a pseudo-ISR that may include
802 * bits we haven't explicitly enabled so we mask the
803 * value to insure we only process bits we requested.
805 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
807 status &= sc->sc_imask; /* discard unasked-for bits */
810 * If there are no status bits set, then this interrupt was not
811 * for me (should have been caught above).
814 if (!status)
815 return IRQ_NONE;
817 sc->sc_intrstatus = status;
819 if (status & ATH9K_INT_FATAL) {
820 /* need a chip reset */
821 sched = true;
822 } else if (status & ATH9K_INT_RXORN) {
823 /* need a chip reset */
824 sched = true;
825 } else {
826 if (status & ATH9K_INT_SWBA) {
827 /* schedule a tasklet for beacon handling */
828 tasklet_schedule(&sc->bcon_tasklet);
830 if (status & ATH9K_INT_RXEOL) {
832 * NB: the hardware should re-read the link when
833 * RXE bit is written, but it doesn't work
834 * at least on older hardware revs.
836 sched = true;
839 if (status & ATH9K_INT_TXURN)
840 /* bump tx trigger level */
841 ath9k_hw_updatetxtriglevel(ah, true);
842 /* XXX: optimize this */
843 if (status & ATH9K_INT_RX)
844 sched = true;
845 if (status & ATH9K_INT_TX)
846 sched = true;
847 if (status & ATH9K_INT_BMISS)
848 sched = true;
849 /* carrier sense timeout */
850 if (status & ATH9K_INT_CST)
851 sched = true;
852 if (status & ATH9K_INT_MIB) {
854 * Disable interrupts until we service the MIB
855 * interrupt; otherwise it will continue to
856 * fire.
858 ath9k_hw_set_interrupts(ah, 0);
860 * Let the hal handle the event. We assume
861 * it will clear whatever condition caused
862 * the interrupt.
864 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
865 ath9k_hw_set_interrupts(ah, sc->sc_imask);
867 if (status & ATH9K_INT_TIM_TIMER) {
868 if (!(ah->ah_caps.hw_caps &
869 ATH9K_HW_CAP_AUTOSLEEP)) {
870 /* Clear RxAbort bit so that we can
871 * receive frames */
872 ath9k_hw_setrxabort(ah, 0);
873 sched = true;
877 } while (0);
879 if (sched) {
880 /* turn off every interrupt except SWBA */
881 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
882 tasklet_schedule(&sc->intr_tq);
885 return IRQ_HANDLED;
888 /* Deferred interrupt processing */
890 static void ath9k_tasklet(unsigned long data)
892 struct ath_softc *sc = (struct ath_softc *)data;
893 u32 status = sc->sc_intrstatus;
895 if (status & ATH9K_INT_FATAL) {
896 /* need a chip reset */
897 ath_reset(sc, false);
898 return;
899 } else {
901 if (status &
902 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
903 /* XXX: fill me in */
905 if (status & ATH9K_INT_RXORN) {
907 if (status & ATH9K_INT_RXEOL) {
910 spin_lock_bh(&sc->sc_rxflushlock);
911 ath_rx_tasklet(sc, 0);
912 spin_unlock_bh(&sc->sc_rxflushlock);
914 /* XXX: optimize this */
915 if (status & ATH9K_INT_TX)
916 ath_tx_tasklet(sc);
917 /* XXX: fill me in */
919 if (status & ATH9K_INT_BMISS) {
921 if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
922 if (status & ATH9K_INT_TIM) {
924 if (status & ATH9K_INT_DTIMSYNC) {
930 /* re-enable hardware interrupt */
931 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
934 int ath_init(u16 devid, struct ath_softc *sc)
936 struct ath_hal *ah = NULL;
937 int status;
938 int error = 0, i;
939 int csz = 0;
941 /* XXX: hardware will not be ready until ath_open() being called */
942 sc->sc_flags |= SC_OP_INVALID;
943 sc->sc_debug = DBG_DEFAULT;
945 spin_lock_init(&sc->sc_resetlock);
946 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
947 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
948 (unsigned long)sc);
951 * Cache line size is used to size and align various
952 * structures used to communicate with the hardware.
954 bus_read_cachesize(sc, &csz);
955 /* XXX assert csz is non-zero */
956 sc->sc_cachelsz = csz << 2; /* convert to bytes */
958 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
959 if (ah == NULL) {
960 DPRINTF(sc, ATH_DBG_FATAL,
961 "%s: unable to attach hardware; HAL status %u\n",
962 __func__, status);
963 error = -ENXIO;
964 goto bad;
966 sc->sc_ah = ah;
968 /* Get the hardware key cache size. */
969 sc->sc_keymax = ah->ah_caps.keycache_size;
970 if (sc->sc_keymax > ATH_KEYMAX) {
971 DPRINTF(sc, ATH_DBG_KEYCACHE,
972 "%s: Warning, using only %u entries in %u key cache\n",
973 __func__, ATH_KEYMAX, sc->sc_keymax);
974 sc->sc_keymax = ATH_KEYMAX;
978 * Reset the key cache since some parts do not
979 * reset the contents on initial power up.
981 for (i = 0; i < sc->sc_keymax; i++)
982 ath9k_hw_keyreset(ah, (u16) i);
984 * Mark key cache slots associated with global keys
985 * as in use. If we knew TKIP was not to be used we
986 * could leave the +32, +64, and +32+64 slots free.
987 * XXX only for splitmic.
989 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
990 set_bit(i, sc->sc_keymap);
991 set_bit(i + 32, sc->sc_keymap);
992 set_bit(i + 64, sc->sc_keymap);
993 set_bit(i + 32 + 64, sc->sc_keymap);
996 /* Collect the channel list using the default country code */
998 error = ath_setup_channels(sc);
999 if (error)
1000 goto bad;
1002 /* default to MONITOR mode */
1003 sc->sc_ah->ah_opmode = ATH9K_M_MONITOR;
1005 /* Setup rate tables */
1007 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1008 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1010 /* NB: setup here so ath_rate_update is happy */
1011 ath_setcurmode(sc, ATH9K_MODE_11A);
1014 * Allocate hardware transmit queues: one queue for
1015 * beacon frames and one data queue for each QoS
1016 * priority. Note that the hal handles reseting
1017 * these queues at the needed time.
1019 sc->sc_bhalq = ath_beaconq_setup(ah);
1020 if (sc->sc_bhalq == -1) {
1021 DPRINTF(sc, ATH_DBG_FATAL,
1022 "%s: unable to setup a beacon xmit queue\n", __func__);
1023 error = -EIO;
1024 goto bad2;
1026 sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1027 if (sc->sc_cabq == NULL) {
1028 DPRINTF(sc, ATH_DBG_FATAL,
1029 "%s: unable to setup CAB xmit queue\n", __func__);
1030 error = -EIO;
1031 goto bad2;
1034 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
1035 ath_cabq_update(sc);
1037 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
1038 sc->sc_haltype2q[i] = -1;
1040 /* Setup data queues */
1041 /* NB: ensure BK queue is the lowest priority h/w queue */
1042 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1043 DPRINTF(sc, ATH_DBG_FATAL,
1044 "%s: unable to setup xmit queue for BK traffic\n",
1045 __func__);
1046 error = -EIO;
1047 goto bad2;
1050 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1051 DPRINTF(sc, ATH_DBG_FATAL,
1052 "%s: unable to setup xmit queue for BE traffic\n",
1053 __func__);
1054 error = -EIO;
1055 goto bad2;
1057 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1058 DPRINTF(sc, ATH_DBG_FATAL,
1059 "%s: unable to setup xmit queue for VI traffic\n",
1060 __func__);
1061 error = -EIO;
1062 goto bad2;
1064 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1065 DPRINTF(sc, ATH_DBG_FATAL,
1066 "%s: unable to setup xmit queue for VO traffic\n",
1067 __func__);
1068 error = -EIO;
1069 goto bad2;
1072 /* Initializes the noise floor to a reasonable default value.
1073 * Later on this will be updated during ANI processing. */
1075 sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1076 setup_timer(&sc->sc_ani.timer, ath_ani_calibrate, (unsigned long)sc);
1078 sc->sc_rc = ath_rate_attach(ah);
1079 if (sc->sc_rc == NULL) {
1080 error = -EIO;
1081 goto bad2;
1084 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1085 ATH9K_CIPHER_TKIP, NULL)) {
1087 * Whether we should enable h/w TKIP MIC.
1088 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1089 * report WMM capable, so it's always safe to turn on
1090 * TKIP MIC in this case.
1092 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1093 0, 1, NULL);
1097 * Check whether the separate key cache entries
1098 * are required to handle both tx+rx MIC keys.
1099 * With split mic keys the number of stations is limited
1100 * to 27 otherwise 59.
1102 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1103 ATH9K_CIPHER_TKIP, NULL)
1104 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1105 ATH9K_CIPHER_MIC, NULL)
1106 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1107 0, NULL))
1108 sc->sc_splitmic = 1;
1110 /* turn on mcast key search if possible */
1111 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1112 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1113 1, NULL);
1115 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1116 sc->sc_config.txpowlimit_override = 0;
1118 /* 11n Capabilities */
1119 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1120 sc->sc_flags |= SC_OP_TXAGGR;
1121 sc->sc_flags |= SC_OP_RXAGGR;
1124 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1125 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
1127 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1128 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1130 ath9k_hw_getmac(ah, sc->sc_myaddr);
1131 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1132 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1133 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1134 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1137 sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1139 /* initialize beacon slots */
1140 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
1141 sc->sc_bslot[i] = ATH_IF_ID_ANY;
1143 /* save MISC configurations */
1144 sc->sc_config.swBeaconProcess = 1;
1146 #ifdef CONFIG_SLOW_ANT_DIV
1147 /* range is 40 - 255, we use something in the middle */
1148 ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
1149 #endif
1151 /* setup channels and rates */
1153 sc->sbands[IEEE80211_BAND_2GHZ].channels =
1154 sc->channels[IEEE80211_BAND_2GHZ];
1155 sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
1156 sc->rates[IEEE80211_BAND_2GHZ];
1157 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1159 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) {
1160 sc->sbands[IEEE80211_BAND_5GHZ].channels =
1161 sc->channels[IEEE80211_BAND_5GHZ];
1162 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1163 sc->rates[IEEE80211_BAND_5GHZ];
1164 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
1167 return 0;
1168 bad2:
1169 /* cleanup tx queues */
1170 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1171 if (ATH_TXQ_SETUP(sc, i))
1172 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1173 bad:
1174 if (ah)
1175 ath9k_hw_detach(ah);
1177 return error;
1180 /*******************/
1181 /* Node Management */
1182 /*******************/
1184 void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
1186 struct ath_node *an;
1188 an = (struct ath_node *)sta->drv_priv;
1190 if (sc->sc_flags & SC_OP_TXAGGR)
1191 ath_tx_node_init(sc, an);
1193 an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
1194 sta->ht_cap.ampdu_factor);
1195 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
1197 ath_chainmask_sel_init(sc, an);
1198 ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
1201 void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
1203 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1205 ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
1207 if (sc->sc_flags & SC_OP_TXAGGR)
1208 ath_tx_node_cleanup(sc, an);
1212 * Set up New Node
1214 * Setup driver-specific state for a newly associated node. This routine
1215 * really only applies if compression or XR are enabled, there is no code
1216 * covering any other cases.
1219 void ath_newassoc(struct ath_softc *sc,
1220 struct ath_node *an, int isnew, int isuapsd)
1222 int tidno;
1224 /* if station reassociates, tear down the aggregation state. */
1225 if (!isnew) {
1226 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1227 if (sc->sc_flags & SC_OP_TXAGGR)
1228 ath_tx_aggr_teardown(sc, an, tidno);
1233 /**************/
1234 /* Encryption */
1235 /**************/
1237 void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
1239 ath9k_hw_keyreset(sc->sc_ah, keyix);
1240 if (freeslot)
1241 clear_bit(keyix, sc->sc_keymap);
1244 int ath_keyset(struct ath_softc *sc,
1245 u16 keyix,
1246 struct ath9k_keyval *hk,
1247 const u8 mac[ETH_ALEN])
1249 bool status;
1251 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
1252 keyix, hk, mac, false);
1254 return status != false;
1257 /***********************/
1258 /* TX Power/Regulatory */
1259 /***********************/
1262 * Set Transmit power in HAL
1264 * This routine makes the actual HAL calls to set the new transmit power
1265 * limit.
1268 void ath_update_txpow(struct ath_softc *sc)
1270 struct ath_hal *ah = sc->sc_ah;
1271 u32 txpow;
1273 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
1274 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
1275 /* read back in case value is clamped */
1276 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
1277 sc->sc_curtxpow = txpow;
1281 /**************************/
1282 /* Slow Antenna Diversity */
1283 /**************************/
1285 void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
1286 struct ath_softc *sc,
1287 int32_t rssitrig)
1289 int trig;
1291 /* antdivf_rssitrig can range from 40 - 0xff */
1292 trig = (rssitrig > 0xff) ? 0xff : rssitrig;
1293 trig = (rssitrig < 40) ? 40 : rssitrig;
1295 antdiv->antdiv_sc = sc;
1296 antdiv->antdivf_rssitrig = trig;
1299 void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
1300 u8 num_antcfg,
1301 const u8 *bssid)
1303 antdiv->antdiv_num_antcfg =
1304 num_antcfg < ATH_ANT_DIV_MAX_CFG ?
1305 num_antcfg : ATH_ANT_DIV_MAX_CFG;
1306 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1307 antdiv->antdiv_curcfg = 0;
1308 antdiv->antdiv_bestcfg = 0;
1309 antdiv->antdiv_laststatetsf = 0;
1311 memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
1313 antdiv->antdiv_start = 1;
1316 void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
1318 antdiv->antdiv_start = 0;
1321 static int32_t ath_find_max_val(int32_t *val,
1322 u8 num_val, u8 *max_index)
1324 u32 MaxVal = *val++;
1325 u32 cur_index = 0;
1327 *max_index = 0;
1328 while (++cur_index < num_val) {
1329 if (*val > MaxVal) {
1330 MaxVal = *val;
1331 *max_index = cur_index;
1334 val++;
1337 return MaxVal;
1340 void ath_slow_ant_div(struct ath_antdiv *antdiv,
1341 struct ieee80211_hdr *hdr,
1342 struct ath_rx_status *rx_stats)
1344 struct ath_softc *sc = antdiv->antdiv_sc;
1345 struct ath_hal *ah = sc->sc_ah;
1346 u64 curtsf = 0;
1347 u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
1348 __le16 fc = hdr->frame_control;
1350 if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
1351 && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
1352 antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
1353 antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
1354 curtsf = antdiv->antdiv_lastbtsf[curcfg];
1355 } else {
1356 return;
1359 switch (antdiv->antdiv_state) {
1360 case ATH_ANT_DIV_IDLE:
1361 if ((antdiv->antdiv_lastbrssi[curcfg] <
1362 antdiv->antdivf_rssitrig)
1363 && ((curtsf - antdiv->antdiv_laststatetsf) >
1364 ATH_ANT_DIV_MIN_IDLE_US)) {
1366 curcfg++;
1367 if (curcfg == antdiv->antdiv_num_antcfg)
1368 curcfg = 0;
1370 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1371 antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
1372 antdiv->antdiv_curcfg = curcfg;
1373 antdiv->antdiv_laststatetsf = curtsf;
1374 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1377 break;
1379 case ATH_ANT_DIV_SCAN:
1380 if ((curtsf - antdiv->antdiv_laststatetsf) <
1381 ATH_ANT_DIV_MIN_SCAN_US)
1382 break;
1384 curcfg++;
1385 if (curcfg == antdiv->antdiv_num_antcfg)
1386 curcfg = 0;
1388 if (curcfg == antdiv->antdiv_bestcfg) {
1389 ath_find_max_val(antdiv->antdiv_lastbrssi,
1390 antdiv->antdiv_num_antcfg, &bestcfg);
1391 if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
1392 antdiv->antdiv_bestcfg = bestcfg;
1393 antdiv->antdiv_curcfg = bestcfg;
1394 antdiv->antdiv_laststatetsf = curtsf;
1395 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1397 } else {
1398 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1399 antdiv->antdiv_curcfg = curcfg;
1400 antdiv->antdiv_laststatetsf = curtsf;
1401 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1405 break;
1409 /***********************/
1410 /* Descriptor Handling */
1411 /***********************/
1414 * Set up DMA descriptors
1416 * This function will allocate both the DMA descriptor structure, and the
1417 * buffers it contains. These are used to contain the descriptors used
1418 * by the system.
1421 int ath_descdma_setup(struct ath_softc *sc,
1422 struct ath_descdma *dd,
1423 struct list_head *head,
1424 const char *name,
1425 int nbuf,
1426 int ndesc)
1428 #define DS2PHYS(_dd, _ds) \
1429 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1430 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1431 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1433 struct ath_desc *ds;
1434 struct ath_buf *bf;
1435 int i, bsize, error;
1437 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
1438 __func__, name, nbuf, ndesc);
1440 /* ath_desc must be a multiple of DWORDs */
1441 if ((sizeof(struct ath_desc) % 4) != 0) {
1442 DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
1443 __func__);
1444 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1445 error = -ENOMEM;
1446 goto fail;
1449 dd->dd_name = name;
1450 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1453 * Need additional DMA memory because we can't use
1454 * descriptors that cross the 4K page boundary. Assume
1455 * one skipped descriptor per 4K page.
1457 if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1458 u32 ndesc_skipped =
1459 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1460 u32 dma_len;
1462 while (ndesc_skipped) {
1463 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1464 dd->dd_desc_len += dma_len;
1466 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1470 /* allocate descriptors */
1471 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1472 dd->dd_desc_len,
1473 &dd->dd_desc_paddr);
1474 if (dd->dd_desc == NULL) {
1475 error = -ENOMEM;
1476 goto fail;
1478 ds = dd->dd_desc;
1479 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
1480 __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
1481 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1483 /* allocate buffers */
1484 bsize = sizeof(struct ath_buf) * nbuf;
1485 bf = kmalloc(bsize, GFP_KERNEL);
1486 if (bf == NULL) {
1487 error = -ENOMEM;
1488 goto fail2;
1490 memset(bf, 0, bsize);
1491 dd->dd_bufptr = bf;
1493 INIT_LIST_HEAD(head);
1494 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1495 bf->bf_desc = ds;
1496 bf->bf_daddr = DS2PHYS(dd, ds);
1498 if (!(sc->sc_ah->ah_caps.hw_caps &
1499 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1501 * Skip descriptor addresses which can cause 4KB
1502 * boundary crossing (addr + length) with a 32 dword
1503 * descriptor fetch.
1505 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1506 ASSERT((caddr_t) bf->bf_desc <
1507 ((caddr_t) dd->dd_desc +
1508 dd->dd_desc_len));
1510 ds += ndesc;
1511 bf->bf_desc = ds;
1512 bf->bf_daddr = DS2PHYS(dd, ds);
1515 list_add_tail(&bf->list, head);
1517 return 0;
1518 fail2:
1519 pci_free_consistent(sc->pdev,
1520 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1521 fail:
1522 memset(dd, 0, sizeof(*dd));
1523 return error;
1524 #undef ATH_DESC_4KB_BOUND_CHECK
1525 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1526 #undef DS2PHYS
1530 * Cleanup DMA descriptors
1532 * This function will free the DMA block that was allocated for the descriptor
1533 * pool. Since this was allocated as one "chunk", it is freed in the same
1534 * manner.
1537 void ath_descdma_cleanup(struct ath_softc *sc,
1538 struct ath_descdma *dd,
1539 struct list_head *head)
1541 /* Free memory associated with descriptors */
1542 pci_free_consistent(sc->pdev,
1543 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1545 INIT_LIST_HEAD(head);
1546 kfree(dd->dd_bufptr);
1547 memset(dd, 0, sizeof(*dd));
1550 /*************/
1551 /* Utilities */
1552 /*************/
1554 int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1556 int qnum;
1558 switch (queue) {
1559 case 0:
1560 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1561 break;
1562 case 1:
1563 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1564 break;
1565 case 2:
1566 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1567 break;
1568 case 3:
1569 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1570 break;
1571 default:
1572 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1573 break;
1576 return qnum;
1579 int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1581 int qnum;
1583 switch (queue) {
1584 case ATH9K_WME_AC_VO:
1585 qnum = 0;
1586 break;
1587 case ATH9K_WME_AC_VI:
1588 qnum = 1;
1589 break;
1590 case ATH9K_WME_AC_BE:
1591 qnum = 2;
1592 break;
1593 case ATH9K_WME_AC_BK:
1594 qnum = 3;
1595 break;
1596 default:
1597 qnum = -1;
1598 break;
1601 return qnum;
1606 * Expand time stamp to TSF
1608 * Extend 15-bit time stamp from rx descriptor to
1609 * a full 64-bit TSF using the current h/w TSF.
1612 u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
1614 u64 tsf;
1616 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1617 if ((tsf & 0x7fff) < rstamp)
1618 tsf -= 0x8000;
1619 return (tsf & ~0x7fff) | rstamp;
1623 * Set Default Antenna
1625 * Call into the HAL to set the default antenna to use. Not really valid for
1626 * MIMO technology.
1629 void ath_setdefantenna(void *context, u32 antenna)
1631 struct ath_softc *sc = (struct ath_softc *)context;
1632 struct ath_hal *ah = sc->sc_ah;
1634 /* XXX block beacon interrupts */
1635 ath9k_hw_setantenna(ah, antenna);
1636 sc->sc_defant = antenna;
1637 sc->sc_rxotherant = 0;
1641 * Set Slot Time
1643 * This will wake up the chip if required, and set the slot time for the
1644 * frame (maximum transmit time). Slot time is assumed to be already set
1645 * in the ATH object member sc_slottime
1648 void ath_setslottime(struct ath_softc *sc)
1650 ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
1651 sc->sc_updateslot = OK;