ath9k: Fix TX queue draining
[linux-2.6/libata-dev.git] / drivers / net / wireless / ath / ath9k / mac.c
blobefc420cd42bf91322f23cf711e638b3f7e8906c2
1 /*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include "hw.h"
19 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
20 struct ath9k_tx_queue_info *qi)
22 ath_print(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
23 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
24 ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
25 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
26 ah->txurn_interrupt_mask);
28 REG_WRITE(ah, AR_IMR_S0,
29 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
30 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
31 REG_WRITE(ah, AR_IMR_S1,
32 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
33 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
34 REG_RMW_FIELD(ah, AR_IMR_S2,
35 AR_IMR_S2_QCU_TXURN, ah->txurn_interrupt_mask);
38 u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
40 return REG_READ(ah, AR_QTXDP(q));
42 EXPORT_SYMBOL(ath9k_hw_gettxbuf);
44 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
46 REG_WRITE(ah, AR_QTXDP(q), txdp);
48 EXPORT_SYMBOL(ath9k_hw_puttxbuf);
50 void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
52 ath_print(ath9k_hw_common(ah), ATH_DBG_QUEUE,
53 "Enable TXE on queue: %u\n", q);
54 REG_WRITE(ah, AR_Q_TXE, 1 << q);
56 EXPORT_SYMBOL(ath9k_hw_txstart);
58 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
60 u32 npend;
62 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
63 if (npend == 0) {
65 if (REG_READ(ah, AR_Q_TXE) & (1 << q))
66 npend = 1;
69 return npend;
71 EXPORT_SYMBOL(ath9k_hw_numtxpending);
73 /**
74 * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
76 * @ah: atheros hardware struct
77 * @bIncTrigLevel: whether or not the frame trigger level should be updated
79 * The frame trigger level specifies the minimum number of bytes,
80 * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
81 * before the PCU will initiate sending the frame on the air. This can
82 * mean we initiate transmit before a full frame is on the PCU TX FIFO.
83 * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
84 * first)
86 * Caution must be taken to ensure to set the frame trigger level based
87 * on the DMA request size. For example if the DMA request size is set to
88 * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
89 * there need to be enough space in the tx FIFO for the requested transfer
90 * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
91 * the threshold to a value beyond 6, then the transmit will hang.
93 * Current dual stream devices have a PCU TX FIFO size of 8 KB.
94 * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
95 * there is a hardware issue which forces us to use 2 KB instead so the
96 * frame trigger level must not exceed 2 KB for these chipsets.
98 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
100 u32 txcfg, curLevel, newLevel;
101 enum ath9k_int omask;
103 if (ah->tx_trig_level >= ah->config.max_txtrig_level)
104 return false;
106 omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL);
108 txcfg = REG_READ(ah, AR_TXCFG);
109 curLevel = MS(txcfg, AR_FTRIG);
110 newLevel = curLevel;
111 if (bIncTrigLevel) {
112 if (curLevel < ah->config.max_txtrig_level)
113 newLevel++;
114 } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
115 newLevel--;
116 if (newLevel != curLevel)
117 REG_WRITE(ah, AR_TXCFG,
118 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
120 ath9k_hw_set_interrupts(ah, omask);
122 ah->tx_trig_level = newLevel;
124 return newLevel != curLevel;
126 EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
128 bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
130 #define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */
131 #define ATH9K_TIME_QUANTUM 100 /* usec */
132 struct ath_common *common = ath9k_hw_common(ah);
133 struct ath9k_hw_capabilities *pCap = &ah->caps;
134 struct ath9k_tx_queue_info *qi;
135 u32 tsfLow, j, wait;
136 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
138 if (q >= pCap->total_queues) {
139 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
140 "invalid queue: %u\n", q);
141 return false;
144 qi = &ah->txq[q];
145 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
146 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
147 "inactive queue: %u\n", q);
148 return false;
151 REG_WRITE(ah, AR_Q_TXD, 1 << q);
153 for (wait = wait_time; wait != 0; wait--) {
154 if (ath9k_hw_numtxpending(ah, q) == 0)
155 break;
156 udelay(ATH9K_TIME_QUANTUM);
159 if (ath9k_hw_numtxpending(ah, q)) {
160 ath_print(common, ATH_DBG_QUEUE,
161 "%s: Num of pending TX Frames %d on Q %d\n",
162 __func__, ath9k_hw_numtxpending(ah, q), q);
164 for (j = 0; j < 2; j++) {
165 tsfLow = REG_READ(ah, AR_TSF_L32);
166 REG_WRITE(ah, AR_QUIET2,
167 SM(10, AR_QUIET2_QUIET_DUR));
168 REG_WRITE(ah, AR_QUIET_PERIOD, 100);
169 REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
170 REG_SET_BIT(ah, AR_TIMER_MODE,
171 AR_QUIET_TIMER_EN);
173 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
174 break;
176 ath_print(common, ATH_DBG_QUEUE,
177 "TSF has moved while trying to set "
178 "quiet time TSF: 0x%08x\n", tsfLow);
181 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
183 udelay(200);
184 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
186 wait = wait_time;
187 while (ath9k_hw_numtxpending(ah, q)) {
188 if ((--wait) == 0) {
189 ath_print(common, ATH_DBG_FATAL,
190 "Failed to stop TX DMA in 100 "
191 "msec after killing last frame\n");
192 break;
194 udelay(ATH9K_TIME_QUANTUM);
197 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
200 REG_WRITE(ah, AR_Q_TXD, 0);
201 return wait != 0;
203 #undef ATH9K_TX_STOP_DMA_TIMEOUT
204 #undef ATH9K_TIME_QUANTUM
206 EXPORT_SYMBOL(ath9k_hw_stoptxdma);
208 void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
209 u32 segLen, bool firstSeg,
210 bool lastSeg, const struct ath_desc *ds0)
212 struct ar5416_desc *ads = AR5416DESC(ds);
214 if (firstSeg) {
215 ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
216 } else if (lastSeg) {
217 ads->ds_ctl0 = 0;
218 ads->ds_ctl1 = segLen;
219 ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
220 ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
221 } else {
222 ads->ds_ctl0 = 0;
223 ads->ds_ctl1 = segLen | AR_TxMore;
224 ads->ds_ctl2 = 0;
225 ads->ds_ctl3 = 0;
227 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
228 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
229 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
230 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
231 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
233 EXPORT_SYMBOL(ath9k_hw_filltxdesc);
235 void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
237 struct ar5416_desc *ads = AR5416DESC(ds);
239 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
240 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
241 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
242 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
243 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
245 EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
247 int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
249 struct ar5416_desc *ads = AR5416DESC(ds);
251 if ((ads->ds_txstatus9 & AR_TxDone) == 0)
252 return -EINPROGRESS;
254 ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
255 ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
256 ds->ds_txstat.ts_status = 0;
257 ds->ds_txstat.ts_flags = 0;
259 if (ads->ds_txstatus1 & AR_FrmXmitOK)
260 ds->ds_txstat.ts_status |= ATH9K_TX_ACKED;
261 if (ads->ds_txstatus1 & AR_ExcessiveRetries)
262 ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
263 if (ads->ds_txstatus1 & AR_Filtered)
264 ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
265 if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
266 ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
267 ath9k_hw_updatetxtriglevel(ah, true);
269 if (ads->ds_txstatus9 & AR_TxOpExceeded)
270 ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
271 if (ads->ds_txstatus1 & AR_TxTimerExpired)
272 ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
274 if (ads->ds_txstatus1 & AR_DescCfgErr)
275 ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
276 if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
277 ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
278 ath9k_hw_updatetxtriglevel(ah, true);
280 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
281 ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
282 ath9k_hw_updatetxtriglevel(ah, true);
284 if (ads->ds_txstatus0 & AR_TxBaStatus) {
285 ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
286 ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
287 ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
290 ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
291 switch (ds->ds_txstat.ts_rateindex) {
292 case 0:
293 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
294 break;
295 case 1:
296 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
297 break;
298 case 2:
299 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
300 break;
301 case 3:
302 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
303 break;
306 ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
307 ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
308 ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
309 ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
310 ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
311 ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
312 ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
313 ds->ds_txstat.evm0 = ads->AR_TxEVM0;
314 ds->ds_txstat.evm1 = ads->AR_TxEVM1;
315 ds->ds_txstat.evm2 = ads->AR_TxEVM2;
316 ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
317 ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
318 ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
319 ds->ds_txstat.ts_antenna = 0;
321 return 0;
323 EXPORT_SYMBOL(ath9k_hw_txprocdesc);
325 void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
326 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
327 u32 keyIx, enum ath9k_key_type keyType, u32 flags)
329 struct ar5416_desc *ads = AR5416DESC(ds);
331 txPower += ah->txpower_indexoffset;
332 if (txPower > 63)
333 txPower = 63;
335 ads->ds_ctl0 = (pktLen & AR_FrameLen)
336 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
337 | SM(txPower, AR_XmitPower)
338 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
339 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
340 | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
341 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
343 ads->ds_ctl1 =
344 (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
345 | SM(type, AR_FrameType)
346 | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
347 | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
348 | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
350 ads->ds_ctl6 = SM(keyType, AR_EncrType);
352 if (AR_SREV_9285(ah)) {
353 ads->ds_ctl8 = 0;
354 ads->ds_ctl9 = 0;
355 ads->ds_ctl10 = 0;
356 ads->ds_ctl11 = 0;
359 EXPORT_SYMBOL(ath9k_hw_set11n_txdesc);
361 void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
362 struct ath_desc *lastds,
363 u32 durUpdateEn, u32 rtsctsRate,
364 u32 rtsctsDuration,
365 struct ath9k_11n_rate_series series[],
366 u32 nseries, u32 flags)
368 struct ar5416_desc *ads = AR5416DESC(ds);
369 struct ar5416_desc *last_ads = AR5416DESC(lastds);
370 u32 ds_ctl0;
372 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
373 ds_ctl0 = ads->ds_ctl0;
375 if (flags & ATH9K_TXDESC_RTSENA) {
376 ds_ctl0 &= ~AR_CTSEnable;
377 ds_ctl0 |= AR_RTSEnable;
378 } else {
379 ds_ctl0 &= ~AR_RTSEnable;
380 ds_ctl0 |= AR_CTSEnable;
383 ads->ds_ctl0 = ds_ctl0;
384 } else {
385 ads->ds_ctl0 =
386 (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
389 ads->ds_ctl2 = set11nTries(series, 0)
390 | set11nTries(series, 1)
391 | set11nTries(series, 2)
392 | set11nTries(series, 3)
393 | (durUpdateEn ? AR_DurUpdateEna : 0)
394 | SM(0, AR_BurstDur);
396 ads->ds_ctl3 = set11nRate(series, 0)
397 | set11nRate(series, 1)
398 | set11nRate(series, 2)
399 | set11nRate(series, 3);
401 ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
402 | set11nPktDurRTSCTS(series, 1);
404 ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
405 | set11nPktDurRTSCTS(series, 3);
407 ads->ds_ctl7 = set11nRateFlags(series, 0)
408 | set11nRateFlags(series, 1)
409 | set11nRateFlags(series, 2)
410 | set11nRateFlags(series, 3)
411 | SM(rtsctsRate, AR_RTSCTSRate);
412 last_ads->ds_ctl2 = ads->ds_ctl2;
413 last_ads->ds_ctl3 = ads->ds_ctl3;
415 EXPORT_SYMBOL(ath9k_hw_set11n_ratescenario);
417 void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
418 u32 aggrLen)
420 struct ar5416_desc *ads = AR5416DESC(ds);
422 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
423 ads->ds_ctl6 &= ~AR_AggrLen;
424 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
426 EXPORT_SYMBOL(ath9k_hw_set11n_aggr_first);
428 void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
429 u32 numDelims)
431 struct ar5416_desc *ads = AR5416DESC(ds);
432 unsigned int ctl6;
434 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
436 ctl6 = ads->ds_ctl6;
437 ctl6 &= ~AR_PadDelim;
438 ctl6 |= SM(numDelims, AR_PadDelim);
439 ads->ds_ctl6 = ctl6;
441 EXPORT_SYMBOL(ath9k_hw_set11n_aggr_middle);
443 void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
445 struct ar5416_desc *ads = AR5416DESC(ds);
447 ads->ds_ctl1 |= AR_IsAggr;
448 ads->ds_ctl1 &= ~AR_MoreAggr;
449 ads->ds_ctl6 &= ~AR_PadDelim;
451 EXPORT_SYMBOL(ath9k_hw_set11n_aggr_last);
453 void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
455 struct ar5416_desc *ads = AR5416DESC(ds);
457 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
459 EXPORT_SYMBOL(ath9k_hw_clr11n_aggr);
461 void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
462 u32 burstDuration)
464 struct ar5416_desc *ads = AR5416DESC(ds);
466 ads->ds_ctl2 &= ~AR_BurstDur;
467 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
469 EXPORT_SYMBOL(ath9k_hw_set11n_burstduration);
471 void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
472 u32 vmf)
474 struct ar5416_desc *ads = AR5416DESC(ds);
476 if (vmf)
477 ads->ds_ctl0 |= AR_VirtMoreFrag;
478 else
479 ads->ds_ctl0 &= ~AR_VirtMoreFrag;
482 void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
484 *txqs &= ah->intr_txqs;
485 ah->intr_txqs &= ~(*txqs);
487 EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs);
489 bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
490 const struct ath9k_tx_queue_info *qinfo)
492 u32 cw;
493 struct ath_common *common = ath9k_hw_common(ah);
494 struct ath9k_hw_capabilities *pCap = &ah->caps;
495 struct ath9k_tx_queue_info *qi;
497 if (q >= pCap->total_queues) {
498 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
499 "invalid queue: %u\n", q);
500 return false;
503 qi = &ah->txq[q];
504 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
505 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
506 "inactive queue: %u\n", q);
507 return false;
510 ath_print(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
512 qi->tqi_ver = qinfo->tqi_ver;
513 qi->tqi_subtype = qinfo->tqi_subtype;
514 qi->tqi_qflags = qinfo->tqi_qflags;
515 qi->tqi_priority = qinfo->tqi_priority;
516 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
517 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
518 else
519 qi->tqi_aifs = INIT_AIFS;
520 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
521 cw = min(qinfo->tqi_cwmin, 1024U);
522 qi->tqi_cwmin = 1;
523 while (qi->tqi_cwmin < cw)
524 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
525 } else
526 qi->tqi_cwmin = qinfo->tqi_cwmin;
527 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
528 cw = min(qinfo->tqi_cwmax, 1024U);
529 qi->tqi_cwmax = 1;
530 while (qi->tqi_cwmax < cw)
531 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
532 } else
533 qi->tqi_cwmax = INIT_CWMAX;
535 if (qinfo->tqi_shretry != 0)
536 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
537 else
538 qi->tqi_shretry = INIT_SH_RETRY;
539 if (qinfo->tqi_lgretry != 0)
540 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
541 else
542 qi->tqi_lgretry = INIT_LG_RETRY;
543 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
544 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
545 qi->tqi_burstTime = qinfo->tqi_burstTime;
546 qi->tqi_readyTime = qinfo->tqi_readyTime;
548 switch (qinfo->tqi_subtype) {
549 case ATH9K_WME_UPSD:
550 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
551 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
552 break;
553 default:
554 break;
557 return true;
559 EXPORT_SYMBOL(ath9k_hw_set_txq_props);
561 bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
562 struct ath9k_tx_queue_info *qinfo)
564 struct ath_common *common = ath9k_hw_common(ah);
565 struct ath9k_hw_capabilities *pCap = &ah->caps;
566 struct ath9k_tx_queue_info *qi;
568 if (q >= pCap->total_queues) {
569 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
570 "invalid queue: %u\n", q);
571 return false;
574 qi = &ah->txq[q];
575 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
576 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
577 "inactive queue: %u\n", q);
578 return false;
581 qinfo->tqi_qflags = qi->tqi_qflags;
582 qinfo->tqi_ver = qi->tqi_ver;
583 qinfo->tqi_subtype = qi->tqi_subtype;
584 qinfo->tqi_qflags = qi->tqi_qflags;
585 qinfo->tqi_priority = qi->tqi_priority;
586 qinfo->tqi_aifs = qi->tqi_aifs;
587 qinfo->tqi_cwmin = qi->tqi_cwmin;
588 qinfo->tqi_cwmax = qi->tqi_cwmax;
589 qinfo->tqi_shretry = qi->tqi_shretry;
590 qinfo->tqi_lgretry = qi->tqi_lgretry;
591 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
592 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
593 qinfo->tqi_burstTime = qi->tqi_burstTime;
594 qinfo->tqi_readyTime = qi->tqi_readyTime;
596 return true;
598 EXPORT_SYMBOL(ath9k_hw_get_txq_props);
600 int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
601 const struct ath9k_tx_queue_info *qinfo)
603 struct ath_common *common = ath9k_hw_common(ah);
604 struct ath9k_tx_queue_info *qi;
605 struct ath9k_hw_capabilities *pCap = &ah->caps;
606 int q;
608 switch (type) {
609 case ATH9K_TX_QUEUE_BEACON:
610 q = pCap->total_queues - 1;
611 break;
612 case ATH9K_TX_QUEUE_CAB:
613 q = pCap->total_queues - 2;
614 break;
615 case ATH9K_TX_QUEUE_PSPOLL:
616 q = 1;
617 break;
618 case ATH9K_TX_QUEUE_UAPSD:
619 q = pCap->total_queues - 3;
620 break;
621 case ATH9K_TX_QUEUE_DATA:
622 for (q = 0; q < pCap->total_queues; q++)
623 if (ah->txq[q].tqi_type ==
624 ATH9K_TX_QUEUE_INACTIVE)
625 break;
626 if (q == pCap->total_queues) {
627 ath_print(common, ATH_DBG_FATAL,
628 "No available TX queue\n");
629 return -1;
631 break;
632 default:
633 ath_print(common, ATH_DBG_FATAL,
634 "Invalid TX queue type: %u\n", type);
635 return -1;
638 ath_print(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
640 qi = &ah->txq[q];
641 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
642 ath_print(common, ATH_DBG_FATAL,
643 "TX queue: %u already active\n", q);
644 return -1;
646 memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
647 qi->tqi_type = type;
648 if (qinfo == NULL) {
649 qi->tqi_qflags =
650 TXQ_FLAG_TXOKINT_ENABLE
651 | TXQ_FLAG_TXERRINT_ENABLE
652 | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
653 qi->tqi_aifs = INIT_AIFS;
654 qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
655 qi->tqi_cwmax = INIT_CWMAX;
656 qi->tqi_shretry = INIT_SH_RETRY;
657 qi->tqi_lgretry = INIT_LG_RETRY;
658 qi->tqi_physCompBuf = 0;
659 } else {
660 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
661 (void) ath9k_hw_set_txq_props(ah, q, qinfo);
664 return q;
666 EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
668 bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
670 struct ath9k_hw_capabilities *pCap = &ah->caps;
671 struct ath_common *common = ath9k_hw_common(ah);
672 struct ath9k_tx_queue_info *qi;
674 if (q >= pCap->total_queues) {
675 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
676 "invalid queue: %u\n", q);
677 return false;
679 qi = &ah->txq[q];
680 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
681 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
682 "inactive queue: %u\n", q);
683 return false;
686 ath_print(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
688 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
689 ah->txok_interrupt_mask &= ~(1 << q);
690 ah->txerr_interrupt_mask &= ~(1 << q);
691 ah->txdesc_interrupt_mask &= ~(1 << q);
692 ah->txeol_interrupt_mask &= ~(1 << q);
693 ah->txurn_interrupt_mask &= ~(1 << q);
694 ath9k_hw_set_txq_interrupts(ah, qi);
696 return true;
698 EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
700 bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
702 struct ath9k_hw_capabilities *pCap = &ah->caps;
703 struct ath_common *common = ath9k_hw_common(ah);
704 struct ath9k_channel *chan = ah->curchan;
705 struct ath9k_tx_queue_info *qi;
706 u32 cwMin, chanCwMin, value;
708 if (q >= pCap->total_queues) {
709 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
710 "invalid queue: %u\n", q);
711 return false;
714 qi = &ah->txq[q];
715 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
716 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
717 "inactive queue: %u\n", q);
718 return true;
721 ath_print(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
723 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
724 if (chan && IS_CHAN_B(chan))
725 chanCwMin = INIT_CWMIN_11B;
726 else
727 chanCwMin = INIT_CWMIN;
729 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
730 } else
731 cwMin = qi->tqi_cwmin;
733 REG_WRITE(ah, AR_DLCL_IFS(q),
734 SM(cwMin, AR_D_LCL_IFS_CWMIN) |
735 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
736 SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
738 REG_WRITE(ah, AR_DRETRY_LIMIT(q),
739 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
740 SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
741 SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
743 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
744 REG_WRITE(ah, AR_DMISC(q),
745 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
747 if (qi->tqi_cbrPeriod) {
748 REG_WRITE(ah, AR_QCBRCFG(q),
749 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
750 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
751 REG_WRITE(ah, AR_QMISC(q),
752 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
753 (qi->tqi_cbrOverflowLimit ?
754 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
756 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
757 REG_WRITE(ah, AR_QRDYTIMECFG(q),
758 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
759 AR_Q_RDYTIMECFG_EN);
762 REG_WRITE(ah, AR_DCHNTIME(q),
763 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
764 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
766 if (qi->tqi_burstTime
767 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
768 REG_WRITE(ah, AR_QMISC(q),
769 REG_READ(ah, AR_QMISC(q)) |
770 AR_Q_MISC_RDYTIME_EXP_POLICY);
774 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
775 REG_WRITE(ah, AR_DMISC(q),
776 REG_READ(ah, AR_DMISC(q)) |
777 AR_D_MISC_POST_FR_BKOFF_DIS);
779 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
780 REG_WRITE(ah, AR_DMISC(q),
781 REG_READ(ah, AR_DMISC(q)) |
782 AR_D_MISC_FRAG_BKOFF_EN);
784 switch (qi->tqi_type) {
785 case ATH9K_TX_QUEUE_BEACON:
786 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
787 | AR_Q_MISC_FSP_DBA_GATED
788 | AR_Q_MISC_BEACON_USE
789 | AR_Q_MISC_CBR_INCR_DIS1);
791 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
792 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
793 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
794 | AR_D_MISC_BEACON_USE
795 | AR_D_MISC_POST_FR_BKOFF_DIS);
796 break;
797 case ATH9K_TX_QUEUE_CAB:
798 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
799 | AR_Q_MISC_FSP_DBA_GATED
800 | AR_Q_MISC_CBR_INCR_DIS1
801 | AR_Q_MISC_CBR_INCR_DIS0);
802 value = (qi->tqi_readyTime -
803 (ah->config.sw_beacon_response_time -
804 ah->config.dma_beacon_response_time) -
805 ah->config.additional_swba_backoff) * 1024;
806 REG_WRITE(ah, AR_QRDYTIMECFG(q),
807 value | AR_Q_RDYTIMECFG_EN);
808 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
809 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
810 AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
811 break;
812 case ATH9K_TX_QUEUE_PSPOLL:
813 REG_WRITE(ah, AR_QMISC(q),
814 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
815 break;
816 case ATH9K_TX_QUEUE_UAPSD:
817 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
818 AR_D_MISC_POST_FR_BKOFF_DIS);
819 break;
820 default:
821 break;
824 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
825 REG_WRITE(ah, AR_DMISC(q),
826 REG_READ(ah, AR_DMISC(q)) |
827 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
828 AR_D_MISC_ARB_LOCKOUT_CNTRL) |
829 AR_D_MISC_POST_FR_BKOFF_DIS);
832 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
833 ah->txok_interrupt_mask |= 1 << q;
834 else
835 ah->txok_interrupt_mask &= ~(1 << q);
836 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
837 ah->txerr_interrupt_mask |= 1 << q;
838 else
839 ah->txerr_interrupt_mask &= ~(1 << q);
840 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
841 ah->txdesc_interrupt_mask |= 1 << q;
842 else
843 ah->txdesc_interrupt_mask &= ~(1 << q);
844 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
845 ah->txeol_interrupt_mask |= 1 << q;
846 else
847 ah->txeol_interrupt_mask &= ~(1 << q);
848 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
849 ah->txurn_interrupt_mask |= 1 << q;
850 else
851 ah->txurn_interrupt_mask &= ~(1 << q);
852 ath9k_hw_set_txq_interrupts(ah, qi);
854 return true;
856 EXPORT_SYMBOL(ath9k_hw_resettxqueue);
858 int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
859 u32 pa, struct ath_desc *nds, u64 tsf)
861 struct ar5416_desc ads;
862 struct ar5416_desc *adsp = AR5416DESC(ds);
863 u32 phyerr;
865 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
866 return -EINPROGRESS;
868 ads.u.rx = adsp->u.rx;
870 ds->ds_rxstat.rs_status = 0;
871 ds->ds_rxstat.rs_flags = 0;
873 ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
874 ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
876 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
877 ds->ds_rxstat.rs_rssi = ATH9K_RSSI_BAD;
878 ds->ds_rxstat.rs_rssi_ctl0 = ATH9K_RSSI_BAD;
879 ds->ds_rxstat.rs_rssi_ctl1 = ATH9K_RSSI_BAD;
880 ds->ds_rxstat.rs_rssi_ctl2 = ATH9K_RSSI_BAD;
881 ds->ds_rxstat.rs_rssi_ext0 = ATH9K_RSSI_BAD;
882 ds->ds_rxstat.rs_rssi_ext1 = ATH9K_RSSI_BAD;
883 ds->ds_rxstat.rs_rssi_ext2 = ATH9K_RSSI_BAD;
884 } else {
885 ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
886 ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
887 AR_RxRSSIAnt00);
888 ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
889 AR_RxRSSIAnt01);
890 ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
891 AR_RxRSSIAnt02);
892 ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4,
893 AR_RxRSSIAnt10);
894 ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4,
895 AR_RxRSSIAnt11);
896 ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4,
897 AR_RxRSSIAnt12);
899 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
900 ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
901 else
902 ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
904 ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
905 ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
907 ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
908 ds->ds_rxstat.rs_moreaggr =
909 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
910 ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
911 ds->ds_rxstat.rs_flags =
912 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
913 ds->ds_rxstat.rs_flags |=
914 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
916 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
917 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
918 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
919 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
920 if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
921 ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
923 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
924 if (ads.ds_rxstatus8 & AR_CRCErr)
925 ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
926 else if (ads.ds_rxstatus8 & AR_PHYErr) {
927 ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
928 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
929 ds->ds_rxstat.rs_phyerr = phyerr;
930 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
931 ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
932 else if (ads.ds_rxstatus8 & AR_MichaelErr)
933 ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
936 return 0;
938 EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
940 void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
941 u32 size, u32 flags)
943 struct ar5416_desc *ads = AR5416DESC(ds);
944 struct ath9k_hw_capabilities *pCap = &ah->caps;
946 ads->ds_ctl1 = size & AR_BufLen;
947 if (flags & ATH9K_RXDESC_INTREQ)
948 ads->ds_ctl1 |= AR_RxIntrReq;
950 ads->ds_rxstatus8 &= ~AR_RxDone;
951 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
952 memset(&(ads->u), 0, sizeof(ads->u));
954 EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
957 * This can stop or re-enables RX.
959 * If bool is set this will kill any frame which is currently being
960 * transferred between the MAC and baseband and also prevent any new
961 * frames from getting started.
963 bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
965 u32 reg;
967 if (set) {
968 REG_SET_BIT(ah, AR_DIAG_SW,
969 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
971 if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
972 0, AH_WAIT_TIMEOUT)) {
973 REG_CLR_BIT(ah, AR_DIAG_SW,
974 (AR_DIAG_RX_DIS |
975 AR_DIAG_RX_ABORT));
977 reg = REG_READ(ah, AR_OBS_BUS_1);
978 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
979 "RX failed to go idle in 10 ms RXSM=0x%x\n",
980 reg);
982 return false;
984 } else {
985 REG_CLR_BIT(ah, AR_DIAG_SW,
986 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
989 return true;
991 EXPORT_SYMBOL(ath9k_hw_setrxabort);
993 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
995 REG_WRITE(ah, AR_RXDP, rxdp);
997 EXPORT_SYMBOL(ath9k_hw_putrxbuf);
999 void ath9k_hw_rxena(struct ath_hw *ah)
1001 REG_WRITE(ah, AR_CR, AR_CR_RXE);
1003 EXPORT_SYMBOL(ath9k_hw_rxena);
1005 void ath9k_hw_startpcureceive(struct ath_hw *ah)
1007 ath9k_enable_mib_counters(ah);
1009 ath9k_ani_reset(ah);
1011 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
1013 EXPORT_SYMBOL(ath9k_hw_startpcureceive);
1015 void ath9k_hw_stoppcurecv(struct ath_hw *ah)
1017 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
1019 ath9k_hw_disable_mib_counters(ah);
1021 EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
1023 bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
1025 #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
1026 #define AH_RX_TIME_QUANTUM 100 /* usec */
1027 struct ath_common *common = ath9k_hw_common(ah);
1028 int i;
1030 REG_WRITE(ah, AR_CR, AR_CR_RXD);
1032 /* Wait for rx enable bit to go low */
1033 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
1034 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
1035 break;
1036 udelay(AH_TIME_QUANTUM);
1039 if (i == 0) {
1040 ath_print(common, ATH_DBG_FATAL,
1041 "DMA failed to stop in %d ms "
1042 "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
1043 AH_RX_STOP_DMA_TIMEOUT / 1000,
1044 REG_READ(ah, AR_CR),
1045 REG_READ(ah, AR_DIAG_SW));
1046 return false;
1047 } else {
1048 return true;
1051 #undef AH_RX_TIME_QUANTUM
1052 #undef AH_RX_STOP_DMA_TIMEOUT
1054 EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
1056 int ath9k_hw_beaconq_setup(struct ath_hw *ah)
1058 struct ath9k_tx_queue_info qi;
1060 memset(&qi, 0, sizeof(qi));
1061 qi.tqi_aifs = 1;
1062 qi.tqi_cwmin = 0;
1063 qi.tqi_cwmax = 0;
1064 /* NB: don't enable any interrupts */
1065 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
1067 EXPORT_SYMBOL(ath9k_hw_beaconq_setup);