ath9k: Remove ath9k_rate_table
[linux-2.6/kvm.git] / drivers / net / wireless / ath9k / core.c
blobfb6a013f3f314858892f5381f0b814f437e61248
1 /*
2 * Copyright (c) 2008, Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include "core.h"
18 #include "regd.h"
20 static u32 ath_chainmask_sel_up_rssi_thres =
21 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
22 static u32 ath_chainmask_sel_down_rssi_thres =
23 ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
24 static u32 ath_chainmask_sel_period =
25 ATH_CHAINMASK_SEL_TIMEOUT;
27 /* return bus cachesize in 4B word units */
29 static void bus_read_cachesize(struct ath_softc *sc, int *csz)
31 u8 u8tmp;
33 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
34 *csz = (int)u8tmp;
37 * This check was put in to avoid "unplesant" consequences if
38 * the bootrom has not fully initialized all PCI devices.
39 * Sometimes the cache line size register is not set
42 if (*csz == 0)
43 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
46 static u8 parse_mpdudensity(u8 mpdudensity)
49 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
50 * 0 for no restriction
51 * 1 for 1/4 us
52 * 2 for 1/2 us
53 * 3 for 1 us
54 * 4 for 2 us
55 * 5 for 4 us
56 * 6 for 8 us
57 * 7 for 16 us
59 switch (mpdudensity) {
60 case 0:
61 return 0;
62 case 1:
63 case 2:
64 case 3:
65 /* Our lower layer calculations limit our precision to
66 1 microsecond */
67 return 1;
68 case 4:
69 return 2;
70 case 5:
71 return 4;
72 case 6:
73 return 8;
74 case 7:
75 return 16;
76 default:
77 return 0;
82 * Set current operating mode
84 static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
86 sc->sc_curmode = mode;
88 * All protection frames are transmited at 2Mb/s for
89 * 11g, otherwise at 1Mb/s.
90 * XXX select protection rate index from rate table.
92 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
96 * Set up rate table (legacy rates)
98 static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
100 struct ath_rate_table *rate_table = NULL;
101 struct ieee80211_supported_band *sband;
102 struct ieee80211_rate *rate;
103 int i, maxrates;
105 switch (band) {
106 case IEEE80211_BAND_2GHZ:
107 rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
108 break;
109 case IEEE80211_BAND_5GHZ:
110 rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
111 break;
112 default:
113 break;
116 if (rate_table == NULL)
117 return;
119 sband = &sc->sbands[band];
120 rate = sc->rates[band];
122 if (rate_table->rate_cnt > ATH_RATE_MAX)
123 maxrates = ATH_RATE_MAX;
124 else
125 maxrates = rate_table->rate_cnt;
127 for (i = 0; i < maxrates; i++) {
128 rate[i].bitrate = rate_table->info[i].ratekbps / 100;
129 rate[i].hw_value = rate_table->info[i].ratecode;
130 sband->n_bitrates++;
131 DPRINTF(sc, ATH_DBG_CONFIG,
132 "%s: Rate: %2dMbps, ratecode: %2d\n",
133 __func__,
134 rate[i].bitrate / 10,
135 rate[i].hw_value);
140 * Set up channel list
142 static int ath_setup_channels(struct ath_softc *sc)
144 struct ath_hal *ah = sc->sc_ah;
145 int nchan, i, a = 0, b = 0;
146 u8 regclassids[ATH_REGCLASSIDS_MAX];
147 u32 nregclass = 0;
148 struct ieee80211_supported_band *band_2ghz;
149 struct ieee80211_supported_band *band_5ghz;
150 struct ieee80211_channel *chan_2ghz;
151 struct ieee80211_channel *chan_5ghz;
152 struct ath9k_channel *c;
154 /* Fill in ah->ah_channels */
155 if (!ath9k_regd_init_channels(ah, ATH_CHAN_MAX, (u32 *)&nchan,
156 regclassids, ATH_REGCLASSIDS_MAX,
157 &nregclass, CTRY_DEFAULT, false, 1)) {
158 u32 rd = ah->ah_currentRD;
159 DPRINTF(sc, ATH_DBG_FATAL,
160 "%s: unable to collect channel list; "
161 "regdomain likely %u country code %u\n",
162 __func__, rd, CTRY_DEFAULT);
163 return -EINVAL;
166 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
167 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
168 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
169 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
171 for (i = 0; i < nchan; i++) {
172 c = &ah->ah_channels[i];
173 if (IS_CHAN_2GHZ(c)) {
174 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
175 chan_2ghz[a].center_freq = c->channel;
176 chan_2ghz[a].max_power = c->maxTxPower;
178 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
179 chan_2ghz[a].flags |= IEEE80211_CHAN_NO_IBSS;
180 if (c->channelFlags & CHANNEL_PASSIVE)
181 chan_2ghz[a].flags |= IEEE80211_CHAN_PASSIVE_SCAN;
183 band_2ghz->n_channels = ++a;
185 DPRINTF(sc, ATH_DBG_CONFIG,
186 "%s: 2MHz channel: %d, "
187 "channelFlags: 0x%x\n",
188 __func__, c->channel, c->channelFlags);
189 } else if (IS_CHAN_5GHZ(c)) {
190 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
191 chan_5ghz[b].center_freq = c->channel;
192 chan_5ghz[b].max_power = c->maxTxPower;
194 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
195 chan_5ghz[b].flags |= IEEE80211_CHAN_NO_IBSS;
196 if (c->channelFlags & CHANNEL_PASSIVE)
197 chan_5ghz[b].flags |= IEEE80211_CHAN_PASSIVE_SCAN;
199 band_5ghz->n_channels = ++b;
201 DPRINTF(sc, ATH_DBG_CONFIG,
202 "%s: 5MHz channel: %d, "
203 "channelFlags: 0x%x\n",
204 __func__, c->channel, c->channelFlags);
208 return 0;
212 * Determine mode from channel flags
214 * This routine will provide the enumerated WIRELESSS_MODE value based
215 * on the settings of the channel flags. If no valid set of flags
216 * exist, the lowest mode (11b) is selected.
219 static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
221 if (chan->chanmode == CHANNEL_A)
222 return ATH9K_MODE_11A;
223 else if (chan->chanmode == CHANNEL_G)
224 return ATH9K_MODE_11G;
225 else if (chan->chanmode == CHANNEL_B)
226 return ATH9K_MODE_11B;
227 else if (chan->chanmode == CHANNEL_A_HT20)
228 return ATH9K_MODE_11NA_HT20;
229 else if (chan->chanmode == CHANNEL_G_HT20)
230 return ATH9K_MODE_11NG_HT20;
231 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
232 return ATH9K_MODE_11NA_HT40PLUS;
233 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
234 return ATH9K_MODE_11NA_HT40MINUS;
235 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
236 return ATH9K_MODE_11NG_HT40PLUS;
237 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
238 return ATH9K_MODE_11NG_HT40MINUS;
240 WARN_ON(1); /* should not get here */
242 return ATH9K_MODE_11B;
246 * Set the current channel
248 * Set/change channels. If the channel is really being changed, it's done
249 * by reseting the chip. To accomplish this we must first cleanup any pending
250 * DMA, then restart stuff after a la ath_init.
252 int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
254 struct ath_hal *ah = sc->sc_ah;
255 bool fastcc = true, stopped;
257 if (sc->sc_flags & SC_OP_INVALID) /* the device is invalid or removed */
258 return -EIO;
260 DPRINTF(sc, ATH_DBG_CONFIG,
261 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
262 __func__,
263 ath9k_hw_mhz2ieee(ah, sc->sc_ah->ah_curchan->channel,
264 sc->sc_ah->ah_curchan->channelFlags),
265 sc->sc_ah->ah_curchan->channel,
266 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
267 hchan->channel, hchan->channelFlags);
269 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
270 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
271 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
272 (sc->sc_flags & SC_OP_FULL_RESET)) {
273 int status;
275 * This is only performed if the channel settings have
276 * actually changed.
278 * To switch channels clear any pending DMA operations;
279 * wait long enough for the RX fifo to drain, reset the
280 * hardware at the new frequency, and then re-enable
281 * the relevant bits of the h/w.
283 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
284 ath_draintxq(sc, false); /* clear pending tx frames */
285 stopped = ath_stoprecv(sc); /* turn off frame recv */
287 /* XXX: do not flush receive queue here. We don't want
288 * to flush data frames already in queue because of
289 * changing channel. */
291 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
292 fastcc = false;
294 spin_lock_bh(&sc->sc_resetlock);
295 if (!ath9k_hw_reset(ah, hchan,
296 sc->sc_ht_info.tx_chan_width,
297 sc->sc_tx_chainmask,
298 sc->sc_rx_chainmask,
299 sc->sc_ht_extprotspacing,
300 fastcc, &status)) {
301 DPRINTF(sc, ATH_DBG_FATAL,
302 "%s: unable to reset channel %u (%uMhz) "
303 "flags 0x%x hal status %u\n", __func__,
304 ath9k_hw_mhz2ieee(ah, hchan->channel,
305 hchan->channelFlags),
306 hchan->channel, hchan->channelFlags, status);
307 spin_unlock_bh(&sc->sc_resetlock);
308 return -EIO;
310 spin_unlock_bh(&sc->sc_resetlock);
312 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
313 sc->sc_flags &= ~SC_OP_FULL_RESET;
315 /* Re-enable rx framework */
316 if (ath_startrecv(sc) != 0) {
317 DPRINTF(sc, ATH_DBG_FATAL,
318 "%s: unable to restart recv logic\n", __func__);
319 return -EIO;
322 * Change channels and update the h/w rate map
323 * if we're switching; e.g. 11a to 11b/g.
325 ath_setcurmode(sc, ath_chan2mode(hchan));
327 ath_update_txpow(sc); /* update tx power state */
329 * Re-enable interrupts.
331 ath9k_hw_set_interrupts(ah, sc->sc_imask);
333 return 0;
336 /**********************/
337 /* Chainmask Handling */
338 /**********************/
340 static void ath_chainmask_sel_timertimeout(unsigned long data)
342 struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
343 cm->switch_allowed = 1;
346 /* Start chainmask select timer */
347 static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
349 cm->switch_allowed = 0;
350 mod_timer(&cm->timer, ath_chainmask_sel_period);
353 /* Stop chainmask select timer */
354 static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
356 cm->switch_allowed = 0;
357 del_timer_sync(&cm->timer);
360 static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
362 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
364 memset(cm, 0, sizeof(struct ath_chainmask_sel));
366 cm->cur_tx_mask = sc->sc_tx_chainmask;
367 cm->cur_rx_mask = sc->sc_rx_chainmask;
368 cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
369 setup_timer(&cm->timer,
370 ath_chainmask_sel_timertimeout, (unsigned long) cm);
373 int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
375 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
378 * Disable auto-swtiching in one of the following if conditions.
379 * sc_chainmask_auto_sel is used for internal global auto-switching
380 * enabled/disabled setting
382 if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
383 cm->cur_tx_mask = sc->sc_tx_chainmask;
384 return cm->cur_tx_mask;
387 if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
388 return cm->cur_tx_mask;
390 if (cm->switch_allowed) {
391 /* Switch down from tx 3 to tx 2. */
392 if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
393 ATH_RSSI_OUT(cm->tx_avgrssi) >=
394 ath_chainmask_sel_down_rssi_thres) {
395 cm->cur_tx_mask = sc->sc_tx_chainmask;
397 /* Don't let another switch happen until
398 * this timer expires */
399 ath_chainmask_sel_timerstart(cm);
401 /* Switch up from tx 2 to 3. */
402 else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
403 ATH_RSSI_OUT(cm->tx_avgrssi) <=
404 ath_chainmask_sel_up_rssi_thres) {
405 cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
407 /* Don't let another switch happen
408 * until this timer expires */
409 ath_chainmask_sel_timerstart(cm);
413 return cm->cur_tx_mask;
417 * Update tx/rx chainmask. For legacy association,
418 * hard code chainmask to 1x1, for 11n association, use
419 * the chainmask configuration.
422 void ath_update_chainmask(struct ath_softc *sc, int is_ht)
424 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
425 if (is_ht) {
426 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
427 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
428 } else {
429 sc->sc_tx_chainmask = 1;
430 sc->sc_rx_chainmask = 1;
433 DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
434 __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
437 /*******/
438 /* ANI */
439 /*******/
442 * This routine performs the periodic noise floor calibration function
443 * that is used to adjust and optimize the chip performance. This
444 * takes environmental changes (location, temperature) into account.
445 * When the task is complete, it reschedules itself depending on the
446 * appropriate interval that was calculated.
449 static void ath_ani_calibrate(unsigned long data)
451 struct ath_softc *sc;
452 struct ath_hal *ah;
453 bool longcal = false;
454 bool shortcal = false;
455 bool aniflag = false;
456 unsigned int timestamp = jiffies_to_msecs(jiffies);
457 u32 cal_interval;
459 sc = (struct ath_softc *)data;
460 ah = sc->sc_ah;
463 * don't calibrate when we're scanning.
464 * we are most likely not on our home channel.
466 if (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)
467 return;
469 /* Long calibration runs independently of short calibration. */
470 if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
471 longcal = true;
472 DPRINTF(sc, ATH_DBG_ANI, "%s: longcal @%lu\n",
473 __func__, jiffies);
474 sc->sc_ani.sc_longcal_timer = timestamp;
477 /* Short calibration applies only while sc_caldone is false */
478 if (!sc->sc_ani.sc_caldone) {
479 if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
480 ATH_SHORT_CALINTERVAL) {
481 shortcal = true;
482 DPRINTF(sc, ATH_DBG_ANI, "%s: shortcal @%lu\n",
483 __func__, jiffies);
484 sc->sc_ani.sc_shortcal_timer = timestamp;
485 sc->sc_ani.sc_resetcal_timer = timestamp;
487 } else {
488 if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
489 ATH_RESTART_CALINTERVAL) {
490 ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
491 &sc->sc_ani.sc_caldone);
492 if (sc->sc_ani.sc_caldone)
493 sc->sc_ani.sc_resetcal_timer = timestamp;
497 /* Verify whether we must check ANI */
498 if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
499 ATH_ANI_POLLINTERVAL) {
500 aniflag = true;
501 sc->sc_ani.sc_checkani_timer = timestamp;
504 /* Skip all processing if there's nothing to do. */
505 if (longcal || shortcal || aniflag) {
506 /* Call ANI routine if necessary */
507 if (aniflag)
508 ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
509 ah->ah_curchan);
511 /* Perform calibration if necessary */
512 if (longcal || shortcal) {
513 bool iscaldone = false;
515 if (ath9k_hw_calibrate(ah, ah->ah_curchan,
516 sc->sc_rx_chainmask, longcal,
517 &iscaldone)) {
518 if (longcal)
519 sc->sc_ani.sc_noise_floor =
520 ath9k_hw_getchan_noise(ah,
521 ah->ah_curchan);
523 DPRINTF(sc, ATH_DBG_ANI,
524 "%s: calibrate chan %u/%x nf: %d\n",
525 __func__,
526 ah->ah_curchan->channel,
527 ah->ah_curchan->channelFlags,
528 sc->sc_ani.sc_noise_floor);
529 } else {
530 DPRINTF(sc, ATH_DBG_ANY,
531 "%s: calibrate chan %u/%x failed\n",
532 __func__,
533 ah->ah_curchan->channel,
534 ah->ah_curchan->channelFlags);
536 sc->sc_ani.sc_caldone = iscaldone;
541 * Set timer interval based on previous results.
542 * The interval must be the shortest necessary to satisfy ANI,
543 * short calibration and long calibration.
546 cal_interval = ATH_ANI_POLLINTERVAL;
547 if (!sc->sc_ani.sc_caldone)
548 cal_interval = min(cal_interval, (u32)ATH_SHORT_CALINTERVAL);
550 mod_timer(&sc->sc_ani.timer, jiffies + msecs_to_jiffies(cal_interval));
553 /********/
554 /* Core */
555 /********/
557 int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
559 struct ath_hal *ah = sc->sc_ah;
560 int status;
561 int error = 0;
563 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n",
564 __func__, sc->sc_ah->ah_opmode);
566 /* Reset SERDES registers */
567 ath9k_hw_configpcipowersave(ah, 0);
570 * The basic interface to setting the hardware in a good
571 * state is ``reset''. On return the hardware is known to
572 * be powered up and with interrupts disabled. This must
573 * be followed by initialization of the appropriate bits
574 * and then setup of the interrupt mask.
577 spin_lock_bh(&sc->sc_resetlock);
578 if (!ath9k_hw_reset(ah, initial_chan,
579 sc->sc_ht_info.tx_chan_width,
580 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
581 sc->sc_ht_extprotspacing, false, &status)) {
582 DPRINTF(sc, ATH_DBG_FATAL,
583 "%s: unable to reset hardware; hal status %u "
584 "(freq %u flags 0x%x)\n", __func__, status,
585 initial_chan->channel, initial_chan->channelFlags);
586 error = -EIO;
587 spin_unlock_bh(&sc->sc_resetlock);
588 goto done;
590 spin_unlock_bh(&sc->sc_resetlock);
593 * This is needed only to setup initial state
594 * but it's best done after a reset.
596 ath_update_txpow(sc);
599 * Setup the hardware after reset:
600 * The receive engine is set going.
601 * Frame transmit is handled entirely
602 * in the frame output path; there's nothing to do
603 * here except setup the interrupt mask.
605 if (ath_startrecv(sc) != 0) {
606 DPRINTF(sc, ATH_DBG_FATAL,
607 "%s: unable to start recv logic\n", __func__);
608 error = -EIO;
609 goto done;
612 /* Setup our intr mask. */
613 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
614 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
615 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
617 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
618 sc->sc_imask |= ATH9K_INT_GTT;
620 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
621 sc->sc_imask |= ATH9K_INT_CST;
624 * Enable MIB interrupts when there are hardware phy counters.
625 * Note we only do this (at the moment) for station mode.
627 if (ath9k_hw_phycounters(ah) &&
628 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
629 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
630 sc->sc_imask |= ATH9K_INT_MIB;
632 * Some hardware processes the TIM IE and fires an
633 * interrupt when the TIM bit is set. For hardware
634 * that does, if not overridden by configuration,
635 * enable the TIM interrupt when operating as station.
637 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
638 (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
639 !sc->sc_config.swBeaconProcess)
640 sc->sc_imask |= ATH9K_INT_TIM;
642 ath_setcurmode(sc, ath_chan2mode(initial_chan));
644 sc->sc_flags &= ~SC_OP_INVALID;
646 /* Disable BMISS interrupt when we're not associated */
647 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
648 ath9k_hw_set_interrupts(sc->sc_ah,sc->sc_imask);
650 ieee80211_wake_queues(sc->hw);
651 done:
652 return error;
655 void ath_stop(struct ath_softc *sc)
657 struct ath_hal *ah = sc->sc_ah;
659 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Cleaning up\n", __func__);
661 ieee80211_stop_queues(sc->hw);
663 /* make sure h/w will not generate any interrupt
664 * before setting the invalid flag. */
665 ath9k_hw_set_interrupts(ah, 0);
667 if (!(sc->sc_flags & SC_OP_INVALID)) {
668 ath_draintxq(sc, false);
669 ath_stoprecv(sc);
670 ath9k_hw_phy_disable(ah);
671 } else
672 sc->sc_rxlink = NULL;
674 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
675 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
676 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
677 #endif
678 /* disable HAL and put h/w to sleep */
679 ath9k_hw_disable(sc->sc_ah);
680 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
682 sc->sc_flags |= SC_OP_INVALID;
685 int ath_reset(struct ath_softc *sc, bool retry_tx)
687 struct ath_hal *ah = sc->sc_ah;
688 int status;
689 int error = 0;
691 ath9k_hw_set_interrupts(ah, 0);
692 ath_draintxq(sc, retry_tx);
693 ath_stoprecv(sc);
694 ath_flushrecv(sc);
696 /* Reset chip */
697 spin_lock_bh(&sc->sc_resetlock);
698 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan,
699 sc->sc_ht_info.tx_chan_width,
700 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
701 sc->sc_ht_extprotspacing, false, &status)) {
702 DPRINTF(sc, ATH_DBG_FATAL,
703 "%s: unable to reset hardware; hal status %u\n",
704 __func__, status);
705 error = -EIO;
707 spin_unlock_bh(&sc->sc_resetlock);
709 if (ath_startrecv(sc) != 0)
710 DPRINTF(sc, ATH_DBG_FATAL,
711 "%s: unable to start recv logic\n", __func__);
714 * We may be doing a reset in response to a request
715 * that changes the channel so update any state that
716 * might change as a result.
718 ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan));
720 ath_update_txpow(sc);
722 if (sc->sc_flags & SC_OP_BEACONS)
723 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
725 ath9k_hw_set_interrupts(ah, sc->sc_imask);
727 /* Restart the txq */
728 if (retry_tx) {
729 int i;
730 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
731 if (ATH_TXQ_SETUP(sc, i)) {
732 spin_lock_bh(&sc->sc_txq[i].axq_lock);
733 ath_txq_schedule(sc, &sc->sc_txq[i]);
734 spin_unlock_bh(&sc->sc_txq[i].axq_lock);
739 return error;
742 /* Interrupt handler. Most of the actual processing is deferred.
743 * It's the caller's responsibility to ensure the chip is awake. */
745 irqreturn_t ath_isr(int irq, void *dev)
747 struct ath_softc *sc = dev;
748 struct ath_hal *ah = sc->sc_ah;
749 enum ath9k_int status;
750 bool sched = false;
752 do {
753 if (sc->sc_flags & SC_OP_INVALID) {
755 * The hardware is not ready/present, don't
756 * touch anything. Note this can happen early
757 * on if the IRQ is shared.
759 return IRQ_NONE;
761 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
762 return IRQ_NONE;
766 * Figure out the reason(s) for the interrupt. Note
767 * that the hal returns a pseudo-ISR that may include
768 * bits we haven't explicitly enabled so we mask the
769 * value to insure we only process bits we requested.
771 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
773 status &= sc->sc_imask; /* discard unasked-for bits */
776 * If there are no status bits set, then this interrupt was not
777 * for me (should have been caught above).
780 if (!status)
781 return IRQ_NONE;
783 sc->sc_intrstatus = status;
785 if (status & ATH9K_INT_FATAL) {
786 /* need a chip reset */
787 sched = true;
788 } else if (status & ATH9K_INT_RXORN) {
789 /* need a chip reset */
790 sched = true;
791 } else {
792 if (status & ATH9K_INT_SWBA) {
793 /* schedule a tasklet for beacon handling */
794 tasklet_schedule(&sc->bcon_tasklet);
796 if (status & ATH9K_INT_RXEOL) {
798 * NB: the hardware should re-read the link when
799 * RXE bit is written, but it doesn't work
800 * at least on older hardware revs.
802 sched = true;
805 if (status & ATH9K_INT_TXURN)
806 /* bump tx trigger level */
807 ath9k_hw_updatetxtriglevel(ah, true);
808 /* XXX: optimize this */
809 if (status & ATH9K_INT_RX)
810 sched = true;
811 if (status & ATH9K_INT_TX)
812 sched = true;
813 if (status & ATH9K_INT_BMISS)
814 sched = true;
815 /* carrier sense timeout */
816 if (status & ATH9K_INT_CST)
817 sched = true;
818 if (status & ATH9K_INT_MIB) {
820 * Disable interrupts until we service the MIB
821 * interrupt; otherwise it will continue to
822 * fire.
824 ath9k_hw_set_interrupts(ah, 0);
826 * Let the hal handle the event. We assume
827 * it will clear whatever condition caused
828 * the interrupt.
830 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
831 ath9k_hw_set_interrupts(ah, sc->sc_imask);
833 if (status & ATH9K_INT_TIM_TIMER) {
834 if (!(ah->ah_caps.hw_caps &
835 ATH9K_HW_CAP_AUTOSLEEP)) {
836 /* Clear RxAbort bit so that we can
837 * receive frames */
838 ath9k_hw_setrxabort(ah, 0);
839 sched = true;
843 } while (0);
845 if (sched) {
846 /* turn off every interrupt except SWBA */
847 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
848 tasklet_schedule(&sc->intr_tq);
851 return IRQ_HANDLED;
854 /* Deferred interrupt processing */
856 static void ath9k_tasklet(unsigned long data)
858 struct ath_softc *sc = (struct ath_softc *)data;
859 u32 status = sc->sc_intrstatus;
861 if (status & ATH9K_INT_FATAL) {
862 /* need a chip reset */
863 ath_reset(sc, false);
864 return;
865 } else {
867 if (status &
868 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
869 /* XXX: fill me in */
871 if (status & ATH9K_INT_RXORN) {
873 if (status & ATH9K_INT_RXEOL) {
876 spin_lock_bh(&sc->sc_rxflushlock);
877 ath_rx_tasklet(sc, 0);
878 spin_unlock_bh(&sc->sc_rxflushlock);
880 /* XXX: optimize this */
881 if (status & ATH9K_INT_TX)
882 ath_tx_tasklet(sc);
883 /* XXX: fill me in */
885 if (status & ATH9K_INT_BMISS) {
887 if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
888 if (status & ATH9K_INT_TIM) {
890 if (status & ATH9K_INT_DTIMSYNC) {
896 /* re-enable hardware interrupt */
897 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
900 int ath_init(u16 devid, struct ath_softc *sc)
902 struct ath_hal *ah = NULL;
903 int status;
904 int error = 0, i;
905 int csz = 0;
907 /* XXX: hardware will not be ready until ath_open() being called */
908 sc->sc_flags |= SC_OP_INVALID;
909 sc->sc_debug = DBG_DEFAULT;
911 spin_lock_init(&sc->sc_resetlock);
912 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
913 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
914 (unsigned long)sc);
917 * Cache line size is used to size and align various
918 * structures used to communicate with the hardware.
920 bus_read_cachesize(sc, &csz);
921 /* XXX assert csz is non-zero */
922 sc->sc_cachelsz = csz << 2; /* convert to bytes */
924 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
925 if (ah == NULL) {
926 DPRINTF(sc, ATH_DBG_FATAL,
927 "%s: unable to attach hardware; HAL status %u\n",
928 __func__, status);
929 error = -ENXIO;
930 goto bad;
932 sc->sc_ah = ah;
934 /* Get the hardware key cache size. */
935 sc->sc_keymax = ah->ah_caps.keycache_size;
936 if (sc->sc_keymax > ATH_KEYMAX) {
937 DPRINTF(sc, ATH_DBG_KEYCACHE,
938 "%s: Warning, using only %u entries in %u key cache\n",
939 __func__, ATH_KEYMAX, sc->sc_keymax);
940 sc->sc_keymax = ATH_KEYMAX;
944 * Reset the key cache since some parts do not
945 * reset the contents on initial power up.
947 for (i = 0; i < sc->sc_keymax; i++)
948 ath9k_hw_keyreset(ah, (u16) i);
950 * Mark key cache slots associated with global keys
951 * as in use. If we knew TKIP was not to be used we
952 * could leave the +32, +64, and +32+64 slots free.
953 * XXX only for splitmic.
955 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
956 set_bit(i, sc->sc_keymap);
957 set_bit(i + 32, sc->sc_keymap);
958 set_bit(i + 64, sc->sc_keymap);
959 set_bit(i + 32 + 64, sc->sc_keymap);
962 /* Collect the channel list using the default country code */
964 error = ath_setup_channels(sc);
965 if (error)
966 goto bad;
968 /* default to MONITOR mode */
969 sc->sc_ah->ah_opmode = ATH9K_M_MONITOR;
971 /* Setup rate tables */
973 ath_rate_attach(sc);
974 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
975 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
978 * Allocate hardware transmit queues: one queue for
979 * beacon frames and one data queue for each QoS
980 * priority. Note that the hal handles reseting
981 * these queues at the needed time.
983 sc->sc_bhalq = ath_beaconq_setup(ah);
984 if (sc->sc_bhalq == -1) {
985 DPRINTF(sc, ATH_DBG_FATAL,
986 "%s: unable to setup a beacon xmit queue\n", __func__);
987 error = -EIO;
988 goto bad2;
990 sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
991 if (sc->sc_cabq == NULL) {
992 DPRINTF(sc, ATH_DBG_FATAL,
993 "%s: unable to setup CAB xmit queue\n", __func__);
994 error = -EIO;
995 goto bad2;
998 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
999 ath_cabq_update(sc);
1001 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
1002 sc->sc_haltype2q[i] = -1;
1004 /* Setup data queues */
1005 /* NB: ensure BK queue is the lowest priority h/w queue */
1006 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1007 DPRINTF(sc, ATH_DBG_FATAL,
1008 "%s: unable to setup xmit queue for BK traffic\n",
1009 __func__);
1010 error = -EIO;
1011 goto bad2;
1014 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1015 DPRINTF(sc, ATH_DBG_FATAL,
1016 "%s: unable to setup xmit queue for BE traffic\n",
1017 __func__);
1018 error = -EIO;
1019 goto bad2;
1021 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1022 DPRINTF(sc, ATH_DBG_FATAL,
1023 "%s: unable to setup xmit queue for VI traffic\n",
1024 __func__);
1025 error = -EIO;
1026 goto bad2;
1028 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1029 DPRINTF(sc, ATH_DBG_FATAL,
1030 "%s: unable to setup xmit queue for VO traffic\n",
1031 __func__);
1032 error = -EIO;
1033 goto bad2;
1036 /* Initializes the noise floor to a reasonable default value.
1037 * Later on this will be updated during ANI processing. */
1039 sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1040 setup_timer(&sc->sc_ani.timer, ath_ani_calibrate, (unsigned long)sc);
1042 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1043 ATH9K_CIPHER_TKIP, NULL)) {
1045 * Whether we should enable h/w TKIP MIC.
1046 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1047 * report WMM capable, so it's always safe to turn on
1048 * TKIP MIC in this case.
1050 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1051 0, 1, NULL);
1055 * Check whether the separate key cache entries
1056 * are required to handle both tx+rx MIC keys.
1057 * With split mic keys the number of stations is limited
1058 * to 27 otherwise 59.
1060 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1061 ATH9K_CIPHER_TKIP, NULL)
1062 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1063 ATH9K_CIPHER_MIC, NULL)
1064 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1065 0, NULL))
1066 sc->sc_splitmic = 1;
1068 /* turn on mcast key search if possible */
1069 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1070 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1071 1, NULL);
1073 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1074 sc->sc_config.txpowlimit_override = 0;
1076 /* 11n Capabilities */
1077 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1078 sc->sc_flags |= SC_OP_TXAGGR;
1079 sc->sc_flags |= SC_OP_RXAGGR;
1082 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1083 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
1085 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1086 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1088 ath9k_hw_getmac(ah, sc->sc_myaddr);
1089 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1090 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1091 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1092 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1095 sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1097 /* initialize beacon slots */
1098 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
1099 sc->sc_bslot[i] = ATH_IF_ID_ANY;
1101 /* save MISC configurations */
1102 sc->sc_config.swBeaconProcess = 1;
1104 #ifdef CONFIG_SLOW_ANT_DIV
1105 /* range is 40 - 255, we use something in the middle */
1106 ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
1107 #endif
1109 /* setup channels and rates */
1111 sc->sbands[IEEE80211_BAND_2GHZ].channels =
1112 sc->channels[IEEE80211_BAND_2GHZ];
1113 sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
1114 sc->rates[IEEE80211_BAND_2GHZ];
1115 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1117 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) {
1118 sc->sbands[IEEE80211_BAND_5GHZ].channels =
1119 sc->channels[IEEE80211_BAND_5GHZ];
1120 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1121 sc->rates[IEEE80211_BAND_5GHZ];
1122 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
1125 return 0;
1126 bad2:
1127 /* cleanup tx queues */
1128 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1129 if (ATH_TXQ_SETUP(sc, i))
1130 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1131 bad:
1132 if (ah)
1133 ath9k_hw_detach(ah);
1135 return error;
1138 /*******************/
1139 /* Node Management */
1140 /*******************/
1142 void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
1144 struct ath_node *an;
1146 an = (struct ath_node *)sta->drv_priv;
1148 if (sc->sc_flags & SC_OP_TXAGGR)
1149 ath_tx_node_init(sc, an);
1151 an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
1152 sta->ht_cap.ampdu_factor);
1153 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
1155 ath_chainmask_sel_init(sc, an);
1156 ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
1159 void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
1161 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1163 ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
1165 if (sc->sc_flags & SC_OP_TXAGGR)
1166 ath_tx_node_cleanup(sc, an);
1170 * Set up New Node
1172 * Setup driver-specific state for a newly associated node. This routine
1173 * really only applies if compression or XR are enabled, there is no code
1174 * covering any other cases.
1177 void ath_newassoc(struct ath_softc *sc,
1178 struct ath_node *an, int isnew, int isuapsd)
1180 int tidno;
1182 /* if station reassociates, tear down the aggregation state. */
1183 if (!isnew) {
1184 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1185 if (sc->sc_flags & SC_OP_TXAGGR)
1186 ath_tx_aggr_teardown(sc, an, tidno);
1191 /**************/
1192 /* Encryption */
1193 /**************/
1195 void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
1197 ath9k_hw_keyreset(sc->sc_ah, keyix);
1198 if (freeslot)
1199 clear_bit(keyix, sc->sc_keymap);
1202 int ath_keyset(struct ath_softc *sc,
1203 u16 keyix,
1204 struct ath9k_keyval *hk,
1205 const u8 mac[ETH_ALEN])
1207 bool status;
1209 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
1210 keyix, hk, mac, false);
1212 return status != false;
1215 /***********************/
1216 /* TX Power/Regulatory */
1217 /***********************/
1220 * Set Transmit power in HAL
1222 * This routine makes the actual HAL calls to set the new transmit power
1223 * limit.
1226 void ath_update_txpow(struct ath_softc *sc)
1228 struct ath_hal *ah = sc->sc_ah;
1229 u32 txpow;
1231 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
1232 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
1233 /* read back in case value is clamped */
1234 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
1235 sc->sc_curtxpow = txpow;
1239 /**************************/
1240 /* Slow Antenna Diversity */
1241 /**************************/
1243 void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
1244 struct ath_softc *sc,
1245 int32_t rssitrig)
1247 int trig;
1249 /* antdivf_rssitrig can range from 40 - 0xff */
1250 trig = (rssitrig > 0xff) ? 0xff : rssitrig;
1251 trig = (rssitrig < 40) ? 40 : rssitrig;
1253 antdiv->antdiv_sc = sc;
1254 antdiv->antdivf_rssitrig = trig;
1257 void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
1258 u8 num_antcfg,
1259 const u8 *bssid)
1261 antdiv->antdiv_num_antcfg =
1262 num_antcfg < ATH_ANT_DIV_MAX_CFG ?
1263 num_antcfg : ATH_ANT_DIV_MAX_CFG;
1264 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1265 antdiv->antdiv_curcfg = 0;
1266 antdiv->antdiv_bestcfg = 0;
1267 antdiv->antdiv_laststatetsf = 0;
1269 memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
1271 antdiv->antdiv_start = 1;
1274 void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
1276 antdiv->antdiv_start = 0;
1279 static int32_t ath_find_max_val(int32_t *val,
1280 u8 num_val, u8 *max_index)
1282 u32 MaxVal = *val++;
1283 u32 cur_index = 0;
1285 *max_index = 0;
1286 while (++cur_index < num_val) {
1287 if (*val > MaxVal) {
1288 MaxVal = *val;
1289 *max_index = cur_index;
1292 val++;
1295 return MaxVal;
1298 void ath_slow_ant_div(struct ath_antdiv *antdiv,
1299 struct ieee80211_hdr *hdr,
1300 struct ath_rx_status *rx_stats)
1302 struct ath_softc *sc = antdiv->antdiv_sc;
1303 struct ath_hal *ah = sc->sc_ah;
1304 u64 curtsf = 0;
1305 u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
1306 __le16 fc = hdr->frame_control;
1308 if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
1309 && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
1310 antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
1311 antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
1312 curtsf = antdiv->antdiv_lastbtsf[curcfg];
1313 } else {
1314 return;
1317 switch (antdiv->antdiv_state) {
1318 case ATH_ANT_DIV_IDLE:
1319 if ((antdiv->antdiv_lastbrssi[curcfg] <
1320 antdiv->antdivf_rssitrig)
1321 && ((curtsf - antdiv->antdiv_laststatetsf) >
1322 ATH_ANT_DIV_MIN_IDLE_US)) {
1324 curcfg++;
1325 if (curcfg == antdiv->antdiv_num_antcfg)
1326 curcfg = 0;
1328 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1329 antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
1330 antdiv->antdiv_curcfg = curcfg;
1331 antdiv->antdiv_laststatetsf = curtsf;
1332 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1335 break;
1337 case ATH_ANT_DIV_SCAN:
1338 if ((curtsf - antdiv->antdiv_laststatetsf) <
1339 ATH_ANT_DIV_MIN_SCAN_US)
1340 break;
1342 curcfg++;
1343 if (curcfg == antdiv->antdiv_num_antcfg)
1344 curcfg = 0;
1346 if (curcfg == antdiv->antdiv_bestcfg) {
1347 ath_find_max_val(antdiv->antdiv_lastbrssi,
1348 antdiv->antdiv_num_antcfg, &bestcfg);
1349 if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
1350 antdiv->antdiv_bestcfg = bestcfg;
1351 antdiv->antdiv_curcfg = bestcfg;
1352 antdiv->antdiv_laststatetsf = curtsf;
1353 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1355 } else {
1356 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1357 antdiv->antdiv_curcfg = curcfg;
1358 antdiv->antdiv_laststatetsf = curtsf;
1359 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1363 break;
1367 /***********************/
1368 /* Descriptor Handling */
1369 /***********************/
1372 * Set up DMA descriptors
1374 * This function will allocate both the DMA descriptor structure, and the
1375 * buffers it contains. These are used to contain the descriptors used
1376 * by the system.
1379 int ath_descdma_setup(struct ath_softc *sc,
1380 struct ath_descdma *dd,
1381 struct list_head *head,
1382 const char *name,
1383 int nbuf,
1384 int ndesc)
1386 #define DS2PHYS(_dd, _ds) \
1387 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1388 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1389 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1391 struct ath_desc *ds;
1392 struct ath_buf *bf;
1393 int i, bsize, error;
1395 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
1396 __func__, name, nbuf, ndesc);
1398 /* ath_desc must be a multiple of DWORDs */
1399 if ((sizeof(struct ath_desc) % 4) != 0) {
1400 DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
1401 __func__);
1402 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1403 error = -ENOMEM;
1404 goto fail;
1407 dd->dd_name = name;
1408 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1411 * Need additional DMA memory because we can't use
1412 * descriptors that cross the 4K page boundary. Assume
1413 * one skipped descriptor per 4K page.
1415 if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1416 u32 ndesc_skipped =
1417 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1418 u32 dma_len;
1420 while (ndesc_skipped) {
1421 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1422 dd->dd_desc_len += dma_len;
1424 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1428 /* allocate descriptors */
1429 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1430 dd->dd_desc_len,
1431 &dd->dd_desc_paddr);
1432 if (dd->dd_desc == NULL) {
1433 error = -ENOMEM;
1434 goto fail;
1436 ds = dd->dd_desc;
1437 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
1438 __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
1439 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1441 /* allocate buffers */
1442 bsize = sizeof(struct ath_buf) * nbuf;
1443 bf = kmalloc(bsize, GFP_KERNEL);
1444 if (bf == NULL) {
1445 error = -ENOMEM;
1446 goto fail2;
1448 memset(bf, 0, bsize);
1449 dd->dd_bufptr = bf;
1451 INIT_LIST_HEAD(head);
1452 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1453 bf->bf_desc = ds;
1454 bf->bf_daddr = DS2PHYS(dd, ds);
1456 if (!(sc->sc_ah->ah_caps.hw_caps &
1457 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1459 * Skip descriptor addresses which can cause 4KB
1460 * boundary crossing (addr + length) with a 32 dword
1461 * descriptor fetch.
1463 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1464 ASSERT((caddr_t) bf->bf_desc <
1465 ((caddr_t) dd->dd_desc +
1466 dd->dd_desc_len));
1468 ds += ndesc;
1469 bf->bf_desc = ds;
1470 bf->bf_daddr = DS2PHYS(dd, ds);
1473 list_add_tail(&bf->list, head);
1475 return 0;
1476 fail2:
1477 pci_free_consistent(sc->pdev,
1478 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1479 fail:
1480 memset(dd, 0, sizeof(*dd));
1481 return error;
1482 #undef ATH_DESC_4KB_BOUND_CHECK
1483 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1484 #undef DS2PHYS
1488 * Cleanup DMA descriptors
1490 * This function will free the DMA block that was allocated for the descriptor
1491 * pool. Since this was allocated as one "chunk", it is freed in the same
1492 * manner.
1495 void ath_descdma_cleanup(struct ath_softc *sc,
1496 struct ath_descdma *dd,
1497 struct list_head *head)
1499 /* Free memory associated with descriptors */
1500 pci_free_consistent(sc->pdev,
1501 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1503 INIT_LIST_HEAD(head);
1504 kfree(dd->dd_bufptr);
1505 memset(dd, 0, sizeof(*dd));
1508 /*************/
1509 /* Utilities */
1510 /*************/
1512 int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1514 int qnum;
1516 switch (queue) {
1517 case 0:
1518 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1519 break;
1520 case 1:
1521 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1522 break;
1523 case 2:
1524 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1525 break;
1526 case 3:
1527 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1528 break;
1529 default:
1530 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1531 break;
1534 return qnum;
1537 int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1539 int qnum;
1541 switch (queue) {
1542 case ATH9K_WME_AC_VO:
1543 qnum = 0;
1544 break;
1545 case ATH9K_WME_AC_VI:
1546 qnum = 1;
1547 break;
1548 case ATH9K_WME_AC_BE:
1549 qnum = 2;
1550 break;
1551 case ATH9K_WME_AC_BK:
1552 qnum = 3;
1553 break;
1554 default:
1555 qnum = -1;
1556 break;
1559 return qnum;
1564 * Expand time stamp to TSF
1566 * Extend 15-bit time stamp from rx descriptor to
1567 * a full 64-bit TSF using the current h/w TSF.
1570 u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
1572 u64 tsf;
1574 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1575 if ((tsf & 0x7fff) < rstamp)
1576 tsf -= 0x8000;
1577 return (tsf & ~0x7fff) | rstamp;
1581 * Set Default Antenna
1583 * Call into the HAL to set the default antenna to use. Not really valid for
1584 * MIMO technology.
1587 void ath_setdefantenna(void *context, u32 antenna)
1589 struct ath_softc *sc = (struct ath_softc *)context;
1590 struct ath_hal *ah = sc->sc_ah;
1592 /* XXX block beacon interrupts */
1593 ath9k_hw_setantenna(ah, antenna);
1594 sc->sc_defant = antenna;
1595 sc->sc_rxotherant = 0;
1599 * Set Slot Time
1601 * This will wake up the chip if required, and set the slot time for the
1602 * frame (maximum transmit time). Slot time is assumed to be already set
1603 * in the ATH object member sc_slottime
1606 void ath_setslottime(struct ath_softc *sc)
1608 ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
1609 sc->sc_updateslot = OK;