ath9k: fix rx descriptor related race condition
[linux-2.6/btrfs-unstable.git] / drivers / net / wireless / ath / ath9k / recv.c
blob2dd851a72a50162fba443872d1fe352dbb2a16b6
1 /*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/dma-mapping.h>
18 #include <linux/relay.h>
19 #include "ath9k.h"
20 #include "ar9003_mac.h"
22 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
24 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
26 return sc->ps_enabled &&
27 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
31 * Setup and link descriptors.
33 * 11N: we can no longer afford to self link the last descriptor.
34 * MAC acknowledges BA status as long as it copies frames to host
35 * buffer (or rx fifo). This can incorrectly acknowledge packets
36 * to a sender if last desc is self-linked.
38 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
40 struct ath_hw *ah = sc->sc_ah;
41 struct ath_common *common = ath9k_hw_common(ah);
42 struct ath_desc *ds;
43 struct sk_buff *skb;
45 ds = bf->bf_desc;
46 ds->ds_link = 0; /* link to null */
47 ds->ds_data = bf->bf_buf_addr;
49 /* virtual addr of the beginning of the buffer. */
50 skb = bf->bf_mpdu;
51 BUG_ON(skb == NULL);
52 ds->ds_vdata = skb->data;
55 * setup rx descriptors. The rx_bufsize here tells the hardware
56 * how much data it can DMA to us and that we are prepared
57 * to process
59 ath9k_hw_setuprxdesc(ah, ds,
60 common->rx_bufsize,
61 0);
63 if (sc->rx.rxlink == NULL)
64 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
65 else
66 *sc->rx.rxlink = bf->bf_daddr;
68 sc->rx.rxlink = &ds->ds_link;
71 static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
73 if (sc->rx.buf_hold)
74 ath_rx_buf_link(sc, sc->rx.buf_hold);
76 sc->rx.buf_hold = bf;
79 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
81 /* XXX block beacon interrupts */
82 ath9k_hw_setantenna(sc->sc_ah, antenna);
83 sc->rx.defant = antenna;
84 sc->rx.rxotherant = 0;
87 static void ath_opmode_init(struct ath_softc *sc)
89 struct ath_hw *ah = sc->sc_ah;
90 struct ath_common *common = ath9k_hw_common(ah);
92 u32 rfilt, mfilt[2];
94 /* configure rx filter */
95 rfilt = ath_calcrxfilter(sc);
96 ath9k_hw_setrxfilter(ah, rfilt);
98 /* configure bssid mask */
99 ath_hw_setbssidmask(common);
101 /* configure operational mode */
102 ath9k_hw_setopmode(ah);
104 /* calculate and install multicast filter */
105 mfilt[0] = mfilt[1] = ~0;
106 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
109 static bool ath_rx_edma_buf_link(struct ath_softc *sc,
110 enum ath9k_rx_qtype qtype)
112 struct ath_hw *ah = sc->sc_ah;
113 struct ath_rx_edma *rx_edma;
114 struct sk_buff *skb;
115 struct ath_buf *bf;
117 rx_edma = &sc->rx.rx_edma[qtype];
118 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
119 return false;
121 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
122 list_del_init(&bf->list);
124 skb = bf->bf_mpdu;
126 memset(skb->data, 0, ah->caps.rx_status_len);
127 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
128 ah->caps.rx_status_len, DMA_TO_DEVICE);
130 SKB_CB_ATHBUF(skb) = bf;
131 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
132 __skb_queue_tail(&rx_edma->rx_fifo, skb);
134 return true;
137 static void ath_rx_addbuffer_edma(struct ath_softc *sc,
138 enum ath9k_rx_qtype qtype)
140 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
141 struct ath_buf *bf, *tbf;
143 if (list_empty(&sc->rx.rxbuf)) {
144 ath_dbg(common, QUEUE, "No free rx buf available\n");
145 return;
148 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list)
149 if (!ath_rx_edma_buf_link(sc, qtype))
150 break;
154 static void ath_rx_remove_buffer(struct ath_softc *sc,
155 enum ath9k_rx_qtype qtype)
157 struct ath_buf *bf;
158 struct ath_rx_edma *rx_edma;
159 struct sk_buff *skb;
161 rx_edma = &sc->rx.rx_edma[qtype];
163 while ((skb = __skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
164 bf = SKB_CB_ATHBUF(skb);
165 BUG_ON(!bf);
166 list_add_tail(&bf->list, &sc->rx.rxbuf);
170 static void ath_rx_edma_cleanup(struct ath_softc *sc)
172 struct ath_hw *ah = sc->sc_ah;
173 struct ath_common *common = ath9k_hw_common(ah);
174 struct ath_buf *bf;
176 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
177 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
179 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
180 if (bf->bf_mpdu) {
181 dma_unmap_single(sc->dev, bf->bf_buf_addr,
182 common->rx_bufsize,
183 DMA_BIDIRECTIONAL);
184 dev_kfree_skb_any(bf->bf_mpdu);
185 bf->bf_buf_addr = 0;
186 bf->bf_mpdu = NULL;
191 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
193 skb_queue_head_init(&rx_edma->rx_fifo);
194 rx_edma->rx_fifo_hwsize = size;
197 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
199 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
200 struct ath_hw *ah = sc->sc_ah;
201 struct sk_buff *skb;
202 struct ath_buf *bf;
203 int error = 0, i;
204 u32 size;
206 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
207 ah->caps.rx_status_len);
209 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
210 ah->caps.rx_lp_qdepth);
211 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
212 ah->caps.rx_hp_qdepth);
214 size = sizeof(struct ath_buf) * nbufs;
215 bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
216 if (!bf)
217 return -ENOMEM;
219 INIT_LIST_HEAD(&sc->rx.rxbuf);
221 for (i = 0; i < nbufs; i++, bf++) {
222 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
223 if (!skb) {
224 error = -ENOMEM;
225 goto rx_init_fail;
228 memset(skb->data, 0, common->rx_bufsize);
229 bf->bf_mpdu = skb;
231 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
232 common->rx_bufsize,
233 DMA_BIDIRECTIONAL);
234 if (unlikely(dma_mapping_error(sc->dev,
235 bf->bf_buf_addr))) {
236 dev_kfree_skb_any(skb);
237 bf->bf_mpdu = NULL;
238 bf->bf_buf_addr = 0;
239 ath_err(common,
240 "dma_mapping_error() on RX init\n");
241 error = -ENOMEM;
242 goto rx_init_fail;
245 list_add_tail(&bf->list, &sc->rx.rxbuf);
248 return 0;
250 rx_init_fail:
251 ath_rx_edma_cleanup(sc);
252 return error;
255 static void ath_edma_start_recv(struct ath_softc *sc)
257 ath9k_hw_rxena(sc->sc_ah);
258 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP);
259 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP);
260 ath_opmode_init(sc);
261 ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
264 static void ath_edma_stop_recv(struct ath_softc *sc)
266 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
267 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
270 int ath_rx_init(struct ath_softc *sc, int nbufs)
272 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
273 struct sk_buff *skb;
274 struct ath_buf *bf;
275 int error = 0;
277 spin_lock_init(&sc->sc_pcu_lock);
279 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
280 sc->sc_ah->caps.rx_status_len;
282 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
283 return ath_rx_edma_init(sc, nbufs);
285 ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
286 common->cachelsz, common->rx_bufsize);
288 /* Initialize rx descriptors */
290 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
291 "rx", nbufs, 1, 0);
292 if (error != 0) {
293 ath_err(common,
294 "failed to allocate rx descriptors: %d\n",
295 error);
296 goto err;
299 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
300 skb = ath_rxbuf_alloc(common, common->rx_bufsize,
301 GFP_KERNEL);
302 if (skb == NULL) {
303 error = -ENOMEM;
304 goto err;
307 bf->bf_mpdu = skb;
308 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
309 common->rx_bufsize,
310 DMA_FROM_DEVICE);
311 if (unlikely(dma_mapping_error(sc->dev,
312 bf->bf_buf_addr))) {
313 dev_kfree_skb_any(skb);
314 bf->bf_mpdu = NULL;
315 bf->bf_buf_addr = 0;
316 ath_err(common,
317 "dma_mapping_error() on RX init\n");
318 error = -ENOMEM;
319 goto err;
322 sc->rx.rxlink = NULL;
323 err:
324 if (error)
325 ath_rx_cleanup(sc);
327 return error;
330 void ath_rx_cleanup(struct ath_softc *sc)
332 struct ath_hw *ah = sc->sc_ah;
333 struct ath_common *common = ath9k_hw_common(ah);
334 struct sk_buff *skb;
335 struct ath_buf *bf;
337 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
338 ath_rx_edma_cleanup(sc);
339 return;
342 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
343 skb = bf->bf_mpdu;
344 if (skb) {
345 dma_unmap_single(sc->dev, bf->bf_buf_addr,
346 common->rx_bufsize,
347 DMA_FROM_DEVICE);
348 dev_kfree_skb(skb);
349 bf->bf_buf_addr = 0;
350 bf->bf_mpdu = NULL;
356 * Calculate the receive filter according to the
357 * operating mode and state:
359 * o always accept unicast, broadcast, and multicast traffic
360 * o maintain current state of phy error reception (the hal
361 * may enable phy error frames for noise immunity work)
362 * o probe request frames are accepted only when operating in
363 * hostap, adhoc, or monitor modes
364 * o enable promiscuous mode according to the interface state
365 * o accept beacons:
366 * - when operating in adhoc mode so the 802.11 layer creates
367 * node table entries for peers,
368 * - when operating in station mode for collecting rssi data when
369 * the station is otherwise quiet, or
370 * - when operating as a repeater so we see repeater-sta beacons
371 * - when scanning
374 u32 ath_calcrxfilter(struct ath_softc *sc)
376 u32 rfilt;
378 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
379 | ATH9K_RX_FILTER_MCAST;
381 /* if operating on a DFS channel, enable radar pulse detection */
382 if (sc->hw->conf.radar_enabled)
383 rfilt |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR;
385 if (sc->rx.rxfilter & FIF_PROBE_REQ)
386 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
389 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
390 * mode interface or when in monitor mode. AP mode does not need this
391 * since it receives all in-BSS frames anyway.
393 if (sc->sc_ah->is_monitoring)
394 rfilt |= ATH9K_RX_FILTER_PROM;
396 if (sc->rx.rxfilter & FIF_CONTROL)
397 rfilt |= ATH9K_RX_FILTER_CONTROL;
399 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
400 (sc->nvifs <= 1) &&
401 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
402 rfilt |= ATH9K_RX_FILTER_MYBEACON;
403 else
404 rfilt |= ATH9K_RX_FILTER_BEACON;
406 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
407 (sc->rx.rxfilter & FIF_PSPOLL))
408 rfilt |= ATH9K_RX_FILTER_PSPOLL;
410 if (conf_is_ht(&sc->hw->conf))
411 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
413 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
414 /* This is needed for older chips */
415 if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160)
416 rfilt |= ATH9K_RX_FILTER_PROM;
417 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
420 if (AR_SREV_9550(sc->sc_ah))
421 rfilt |= ATH9K_RX_FILTER_4ADDRESS;
423 return rfilt;
427 int ath_startrecv(struct ath_softc *sc)
429 struct ath_hw *ah = sc->sc_ah;
430 struct ath_buf *bf, *tbf;
432 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
433 ath_edma_start_recv(sc);
434 return 0;
437 if (list_empty(&sc->rx.rxbuf))
438 goto start_recv;
440 sc->rx.buf_hold = NULL;
441 sc->rx.rxlink = NULL;
442 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
443 ath_rx_buf_link(sc, bf);
446 /* We could have deleted elements so the list may be empty now */
447 if (list_empty(&sc->rx.rxbuf))
448 goto start_recv;
450 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
451 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
452 ath9k_hw_rxena(ah);
454 start_recv:
455 ath_opmode_init(sc);
456 ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
458 return 0;
461 static void ath_flushrecv(struct ath_softc *sc)
463 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
464 ath_rx_tasklet(sc, 1, true);
465 ath_rx_tasklet(sc, 1, false);
468 bool ath_stoprecv(struct ath_softc *sc)
470 struct ath_hw *ah = sc->sc_ah;
471 bool stopped, reset = false;
473 ath9k_hw_abortpcurecv(ah);
474 ath9k_hw_setrxfilter(ah, 0);
475 stopped = ath9k_hw_stopdmarecv(ah, &reset);
477 ath_flushrecv(sc);
479 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
480 ath_edma_stop_recv(sc);
481 else
482 sc->rx.rxlink = NULL;
484 if (!(ah->ah_flags & AH_UNPLUGGED) &&
485 unlikely(!stopped)) {
486 ath_err(ath9k_hw_common(sc->sc_ah),
487 "Could not stop RX, we could be "
488 "confusing the DMA engine when we start RX up\n");
489 ATH_DBG_WARN_ON_ONCE(!stopped);
491 return stopped && !reset;
494 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
496 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
497 struct ieee80211_mgmt *mgmt;
498 u8 *pos, *end, id, elen;
499 struct ieee80211_tim_ie *tim;
501 mgmt = (struct ieee80211_mgmt *)skb->data;
502 pos = mgmt->u.beacon.variable;
503 end = skb->data + skb->len;
505 while (pos + 2 < end) {
506 id = *pos++;
507 elen = *pos++;
508 if (pos + elen > end)
509 break;
511 if (id == WLAN_EID_TIM) {
512 if (elen < sizeof(*tim))
513 break;
514 tim = (struct ieee80211_tim_ie *) pos;
515 if (tim->dtim_count != 0)
516 break;
517 return tim->bitmap_ctrl & 0x01;
520 pos += elen;
523 return false;
526 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
528 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
530 if (skb->len < 24 + 8 + 2 + 2)
531 return;
533 sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
535 if (sc->ps_flags & PS_BEACON_SYNC) {
536 sc->ps_flags &= ~PS_BEACON_SYNC;
537 ath_dbg(common, PS,
538 "Reconfigure beacon timers based on synchronized timestamp\n");
539 ath9k_set_beacon(sc);
542 if (ath_beacon_dtim_pending_cab(skb)) {
544 * Remain awake waiting for buffered broadcast/multicast
545 * frames. If the last broadcast/multicast frame is not
546 * received properly, the next beacon frame will work as
547 * a backup trigger for returning into NETWORK SLEEP state,
548 * so we are waiting for it as well.
550 ath_dbg(common, PS,
551 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
552 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
553 return;
556 if (sc->ps_flags & PS_WAIT_FOR_CAB) {
558 * This can happen if a broadcast frame is dropped or the AP
559 * fails to send a frame indicating that all CAB frames have
560 * been delivered.
562 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
563 ath_dbg(common, PS, "PS wait for CAB frames timed out\n");
567 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
569 struct ieee80211_hdr *hdr;
570 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
572 hdr = (struct ieee80211_hdr *)skb->data;
574 /* Process Beacon and CAB receive in PS state */
575 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
576 && mybeacon) {
577 ath_rx_ps_beacon(sc, skb);
578 } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
579 (ieee80211_is_data(hdr->frame_control) ||
580 ieee80211_is_action(hdr->frame_control)) &&
581 is_multicast_ether_addr(hdr->addr1) &&
582 !ieee80211_has_moredata(hdr->frame_control)) {
584 * No more broadcast/multicast frames to be received at this
585 * point.
587 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
588 ath_dbg(common, PS,
589 "All PS CAB frames received, back to sleep\n");
590 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
591 !is_multicast_ether_addr(hdr->addr1) &&
592 !ieee80211_has_morefrags(hdr->frame_control)) {
593 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
594 ath_dbg(common, PS,
595 "Going back to sleep after having received PS-Poll data (0x%lx)\n",
596 sc->ps_flags & (PS_WAIT_FOR_BEACON |
597 PS_WAIT_FOR_CAB |
598 PS_WAIT_FOR_PSPOLL_DATA |
599 PS_WAIT_FOR_TX_ACK));
603 static bool ath_edma_get_buffers(struct ath_softc *sc,
604 enum ath9k_rx_qtype qtype,
605 struct ath_rx_status *rs,
606 struct ath_buf **dest)
608 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
609 struct ath_hw *ah = sc->sc_ah;
610 struct ath_common *common = ath9k_hw_common(ah);
611 struct sk_buff *skb;
612 struct ath_buf *bf;
613 int ret;
615 skb = skb_peek(&rx_edma->rx_fifo);
616 if (!skb)
617 return false;
619 bf = SKB_CB_ATHBUF(skb);
620 BUG_ON(!bf);
622 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
623 common->rx_bufsize, DMA_FROM_DEVICE);
625 ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data);
626 if (ret == -EINPROGRESS) {
627 /*let device gain the buffer again*/
628 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
629 common->rx_bufsize, DMA_FROM_DEVICE);
630 return false;
633 __skb_unlink(skb, &rx_edma->rx_fifo);
634 if (ret == -EINVAL) {
635 /* corrupt descriptor, skip this one and the following one */
636 list_add_tail(&bf->list, &sc->rx.rxbuf);
637 ath_rx_edma_buf_link(sc, qtype);
639 skb = skb_peek(&rx_edma->rx_fifo);
640 if (skb) {
641 bf = SKB_CB_ATHBUF(skb);
642 BUG_ON(!bf);
644 __skb_unlink(skb, &rx_edma->rx_fifo);
645 list_add_tail(&bf->list, &sc->rx.rxbuf);
646 ath_rx_edma_buf_link(sc, qtype);
649 bf = NULL;
652 *dest = bf;
653 return true;
656 static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
657 struct ath_rx_status *rs,
658 enum ath9k_rx_qtype qtype)
660 struct ath_buf *bf = NULL;
662 while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
663 if (!bf)
664 continue;
666 return bf;
668 return NULL;
671 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
672 struct ath_rx_status *rs)
674 struct ath_hw *ah = sc->sc_ah;
675 struct ath_common *common = ath9k_hw_common(ah);
676 struct ath_desc *ds;
677 struct ath_buf *bf;
678 int ret;
680 if (list_empty(&sc->rx.rxbuf)) {
681 sc->rx.rxlink = NULL;
682 return NULL;
685 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
686 if (bf == sc->rx.buf_hold)
687 return NULL;
689 ds = bf->bf_desc;
692 * Must provide the virtual address of the current
693 * descriptor, the physical address, and the virtual
694 * address of the next descriptor in the h/w chain.
695 * This allows the HAL to look ahead to see if the
696 * hardware is done with a descriptor by checking the
697 * done bit in the following descriptor and the address
698 * of the current descriptor the DMA engine is working
699 * on. All this is necessary because of our use of
700 * a self-linked list to avoid rx overruns.
702 ret = ath9k_hw_rxprocdesc(ah, ds, rs);
703 if (ret == -EINPROGRESS) {
704 struct ath_rx_status trs;
705 struct ath_buf *tbf;
706 struct ath_desc *tds;
708 memset(&trs, 0, sizeof(trs));
709 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
710 sc->rx.rxlink = NULL;
711 return NULL;
714 tbf = list_entry(bf->list.next, struct ath_buf, list);
717 * On some hardware the descriptor status words could
718 * get corrupted, including the done bit. Because of
719 * this, check if the next descriptor's done bit is
720 * set or not.
722 * If the next descriptor's done bit is set, the current
723 * descriptor has been corrupted. Force s/w to discard
724 * this descriptor and continue...
727 tds = tbf->bf_desc;
728 ret = ath9k_hw_rxprocdesc(ah, tds, &trs);
729 if (ret == -EINPROGRESS)
730 return NULL;
733 * mark descriptor as zero-length and set the 'more'
734 * flag to ensure that both buffers get discarded
736 rs->rs_datalen = 0;
737 rs->rs_more = true;
740 list_del(&bf->list);
741 if (!bf->bf_mpdu)
742 return bf;
745 * Synchronize the DMA transfer with CPU before
746 * 1. accessing the frame
747 * 2. requeueing the same buffer to h/w
749 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
750 common->rx_bufsize,
751 DMA_FROM_DEVICE);
753 return bf;
756 /* Assumes you've already done the endian to CPU conversion */
757 static bool ath9k_rx_accept(struct ath_common *common,
758 struct ieee80211_hdr *hdr,
759 struct ieee80211_rx_status *rxs,
760 struct ath_rx_status *rx_stats,
761 bool *decrypt_error)
763 struct ath_softc *sc = (struct ath_softc *) common->priv;
764 bool is_mc, is_valid_tkip, strip_mic, mic_error;
765 struct ath_hw *ah = common->ah;
766 __le16 fc;
767 u8 rx_status_len = ah->caps.rx_status_len;
769 fc = hdr->frame_control;
771 is_mc = !!is_multicast_ether_addr(hdr->addr1);
772 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
773 test_bit(rx_stats->rs_keyix, common->tkip_keymap);
774 strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
775 ieee80211_has_protected(fc) &&
776 !(rx_stats->rs_status &
777 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
778 ATH9K_RXERR_KEYMISS));
781 * Key miss events are only relevant for pairwise keys where the
782 * descriptor does contain a valid key index. This has been observed
783 * mostly with CCMP encryption.
785 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
786 !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
787 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
789 if (!rx_stats->rs_datalen) {
790 RX_STAT_INC(rx_len_err);
791 return false;
795 * rs_status follows rs_datalen so if rs_datalen is too large
796 * we can take a hint that hardware corrupted it, so ignore
797 * those frames.
799 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) {
800 RX_STAT_INC(rx_len_err);
801 return false;
804 /* Only use error bits from the last fragment */
805 if (rx_stats->rs_more)
806 return true;
808 mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
809 !ieee80211_has_morefrags(fc) &&
810 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
811 (rx_stats->rs_status & ATH9K_RXERR_MIC);
814 * The rx_stats->rs_status will not be set until the end of the
815 * chained descriptors so it can be ignored if rs_more is set. The
816 * rs_more will be false at the last element of the chained
817 * descriptors.
819 if (rx_stats->rs_status != 0) {
820 u8 status_mask;
822 if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
823 rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
824 mic_error = false;
826 if (rx_stats->rs_status & ATH9K_RXERR_PHY)
827 return false;
829 if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
830 (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
831 *decrypt_error = true;
832 mic_error = false;
836 * Reject error frames with the exception of
837 * decryption and MIC failures. For monitor mode,
838 * we also ignore the CRC error.
840 status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
841 ATH9K_RXERR_KEYMISS;
843 if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL))
844 status_mask |= ATH9K_RXERR_CRC;
846 if (rx_stats->rs_status & ~status_mask)
847 return false;
851 * For unicast frames the MIC error bit can have false positives,
852 * so all MIC error reports need to be validated in software.
853 * False negatives are not common, so skip software verification
854 * if the hardware considers the MIC valid.
856 if (strip_mic)
857 rxs->flag |= RX_FLAG_MMIC_STRIPPED;
858 else if (is_mc && mic_error)
859 rxs->flag |= RX_FLAG_MMIC_ERROR;
861 return true;
864 static int ath9k_process_rate(struct ath_common *common,
865 struct ieee80211_hw *hw,
866 struct ath_rx_status *rx_stats,
867 struct ieee80211_rx_status *rxs)
869 struct ieee80211_supported_band *sband;
870 enum ieee80211_band band;
871 unsigned int i = 0;
872 struct ath_softc __maybe_unused *sc = common->priv;
874 band = hw->conf.chandef.chan->band;
875 sband = hw->wiphy->bands[band];
877 if (rx_stats->rs_rate & 0x80) {
878 /* HT rate */
879 rxs->flag |= RX_FLAG_HT;
880 rxs->flag |= rx_stats->flag;
881 rxs->rate_idx = rx_stats->rs_rate & 0x7f;
882 return 0;
885 for (i = 0; i < sband->n_bitrates; i++) {
886 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
887 rxs->rate_idx = i;
888 return 0;
890 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
891 rxs->flag |= RX_FLAG_SHORTPRE;
892 rxs->rate_idx = i;
893 return 0;
898 * No valid hardware bitrate found -- we should not get here
899 * because hardware has already validated this frame as OK.
901 ath_dbg(common, ANY,
902 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
903 rx_stats->rs_rate);
904 RX_STAT_INC(rx_rate_err);
905 return -EINVAL;
908 static void ath9k_process_rssi(struct ath_common *common,
909 struct ieee80211_hw *hw,
910 struct ieee80211_hdr *hdr,
911 struct ath_rx_status *rx_stats)
913 struct ath_softc *sc = hw->priv;
914 struct ath_hw *ah = common->ah;
915 int last_rssi;
916 int rssi = rx_stats->rs_rssi;
918 if (!rx_stats->is_mybeacon ||
919 ((ah->opmode != NL80211_IFTYPE_STATION) &&
920 (ah->opmode != NL80211_IFTYPE_ADHOC)))
921 return;
923 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
924 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
926 last_rssi = sc->last_rssi;
927 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
928 rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
929 if (rssi < 0)
930 rssi = 0;
932 /* Update Beacon RSSI, this is used by ANI. */
933 ah->stats.avgbrssi = rssi;
937 * For Decrypt or Demic errors, we only mark packet status here and always push
938 * up the frame up to let mac80211 handle the actual error case, be it no
939 * decryption key or real decryption error. This let us keep statistics there.
941 static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
942 struct ieee80211_hdr *hdr,
943 struct ath_rx_status *rx_stats,
944 struct ieee80211_rx_status *rx_status,
945 bool *decrypt_error)
947 struct ieee80211_hw *hw = sc->hw;
948 struct ath_hw *ah = sc->sc_ah;
949 struct ath_common *common = ath9k_hw_common(ah);
950 bool discard_current = sc->rx.discard_next;
952 sc->rx.discard_next = rx_stats->rs_more;
953 if (discard_current)
954 return -EINVAL;
957 * everything but the rate is checked here, the rate check is done
958 * separately to avoid doing two lookups for a rate for each frame.
960 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
961 return -EINVAL;
963 /* Only use status info from the last fragment */
964 if (rx_stats->rs_more)
965 return 0;
967 if (ath9k_process_rate(common, hw, rx_stats, rx_status))
968 return -EINVAL;
970 ath9k_process_rssi(common, hw, hdr, rx_stats);
972 rx_status->band = hw->conf.chandef.chan->band;
973 rx_status->freq = hw->conf.chandef.chan->center_freq;
974 rx_status->signal = ah->noise + rx_stats->rs_rssi;
975 rx_status->antenna = rx_stats->rs_antenna;
976 rx_status->flag |= RX_FLAG_MACTIME_END;
977 if (rx_stats->rs_moreaggr)
978 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
980 sc->rx.discard_next = false;
981 return 0;
984 static void ath9k_rx_skb_postprocess(struct ath_common *common,
985 struct sk_buff *skb,
986 struct ath_rx_status *rx_stats,
987 struct ieee80211_rx_status *rxs,
988 bool decrypt_error)
990 struct ath_hw *ah = common->ah;
991 struct ieee80211_hdr *hdr;
992 int hdrlen, padpos, padsize;
993 u8 keyix;
994 __le16 fc;
996 /* see if any padding is done by the hw and remove it */
997 hdr = (struct ieee80211_hdr *) skb->data;
998 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
999 fc = hdr->frame_control;
1000 padpos = ieee80211_hdrlen(fc);
1002 /* The MAC header is padded to have 32-bit boundary if the
1003 * packet payload is non-zero. The general calculation for
1004 * padsize would take into account odd header lengths:
1005 * padsize = (4 - padpos % 4) % 4; However, since only
1006 * even-length headers are used, padding can only be 0 or 2
1007 * bytes and we can optimize this a bit. In addition, we must
1008 * not try to remove padding from short control frames that do
1009 * not have payload. */
1010 padsize = padpos & 3;
1011 if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
1012 memmove(skb->data + padsize, skb->data, padpos);
1013 skb_pull(skb, padsize);
1016 keyix = rx_stats->rs_keyix;
1018 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
1019 ieee80211_has_protected(fc)) {
1020 rxs->flag |= RX_FLAG_DECRYPTED;
1021 } else if (ieee80211_has_protected(fc)
1022 && !decrypt_error && skb->len >= hdrlen + 4) {
1023 keyix = skb->data[hdrlen + 3] >> 6;
1025 if (test_bit(keyix, common->keymap))
1026 rxs->flag |= RX_FLAG_DECRYPTED;
1028 if (ah->sw_mgmt_crypto &&
1029 (rxs->flag & RX_FLAG_DECRYPTED) &&
1030 ieee80211_is_mgmt(fc))
1031 /* Use software decrypt for management frames. */
1032 rxs->flag &= ~RX_FLAG_DECRYPTED;
1035 #ifdef CONFIG_ATH9K_DEBUGFS
1036 static s8 fix_rssi_inv_only(u8 rssi_val)
1038 if (rssi_val == 128)
1039 rssi_val = 0;
1040 return (s8) rssi_val;
1042 #endif
1044 /* returns 1 if this was a spectral frame, even if not handled. */
1045 static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
1046 struct ath_rx_status *rs, u64 tsf)
1048 #ifdef CONFIG_ATH9K_DEBUGFS
1049 struct ath_hw *ah = sc->sc_ah;
1050 u8 bins[SPECTRAL_HT20_NUM_BINS];
1051 u8 *vdata = (u8 *)hdr;
1052 struct fft_sample_ht20 fft_sample;
1053 struct ath_radar_info *radar_info;
1054 struct ath_ht20_mag_info *mag_info;
1055 int len = rs->rs_datalen;
1056 int dc_pos;
1057 u16 length, max_magnitude;
1059 /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
1060 * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
1061 * yet, but this is supposed to be possible as well.
1063 if (rs->rs_phyerr != ATH9K_PHYERR_RADAR &&
1064 rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT &&
1065 rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL)
1066 return 0;
1068 /* check if spectral scan bit is set. This does not have to be checked
1069 * if received through a SPECTRAL phy error, but shouldn't hurt.
1071 radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
1072 if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
1073 return 0;
1075 /* Variation in the data length is possible and will be fixed later.
1076 * Note that we only support HT20 for now.
1078 * TODO: add HT20_40 support as well.
1080 if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) ||
1081 (len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1))
1082 return 1;
1084 fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20;
1085 length = sizeof(fft_sample) - sizeof(fft_sample.tlv);
1086 fft_sample.tlv.length = __cpu_to_be16(length);
1088 fft_sample.freq = __cpu_to_be16(ah->curchan->chan->center_freq);
1089 fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
1090 fft_sample.noise = ah->noise;
1092 switch (len - SPECTRAL_HT20_TOTAL_DATA_LEN) {
1093 case 0:
1094 /* length correct, nothing to do. */
1095 memcpy(bins, vdata, SPECTRAL_HT20_NUM_BINS);
1096 break;
1097 case -1:
1098 /* first byte missing, duplicate it. */
1099 memcpy(&bins[1], vdata, SPECTRAL_HT20_NUM_BINS - 1);
1100 bins[0] = vdata[0];
1101 break;
1102 case 2:
1103 /* MAC added 2 extra bytes at bin 30 and 32, remove them. */
1104 memcpy(bins, vdata, 30);
1105 bins[30] = vdata[31];
1106 memcpy(&bins[31], &vdata[33], SPECTRAL_HT20_NUM_BINS - 31);
1107 break;
1108 case 1:
1109 /* MAC added 2 extra bytes AND first byte is missing. */
1110 bins[0] = vdata[0];
1111 memcpy(&bins[0], vdata, 30);
1112 bins[31] = vdata[31];
1113 memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32);
1114 break;
1115 default:
1116 return 1;
1119 /* DC value (value in the middle) is the blind spot of the spectral
1120 * sample and invalid, interpolate it.
1122 dc_pos = SPECTRAL_HT20_NUM_BINS / 2;
1123 bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
1125 /* mag data is at the end of the frame, in front of radar_info */
1126 mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
1128 /* copy raw bins without scaling them */
1129 memcpy(fft_sample.data, bins, SPECTRAL_HT20_NUM_BINS);
1130 fft_sample.max_exp = mag_info->max_exp & 0xf;
1132 max_magnitude = spectral_max_magnitude(mag_info->all_bins);
1133 fft_sample.max_magnitude = __cpu_to_be16(max_magnitude);
1134 fft_sample.max_index = spectral_max_index(mag_info->all_bins);
1135 fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins);
1136 fft_sample.tsf = __cpu_to_be64(tsf);
1138 ath_debug_send_fft_sample(sc, &fft_sample.tlv);
1139 return 1;
1140 #else
1141 return 0;
1142 #endif
1145 static void ath9k_apply_ampdu_details(struct ath_softc *sc,
1146 struct ath_rx_status *rs, struct ieee80211_rx_status *rxs)
1148 if (rs->rs_isaggr) {
1149 rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
1151 rxs->ampdu_reference = sc->rx.ampdu_ref;
1153 if (!rs->rs_moreaggr) {
1154 rxs->flag |= RX_FLAG_AMPDU_IS_LAST;
1155 sc->rx.ampdu_ref++;
1158 if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE)
1159 rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR;
1163 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1165 struct ath_buf *bf;
1166 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
1167 struct ieee80211_rx_status *rxs;
1168 struct ath_hw *ah = sc->sc_ah;
1169 struct ath9k_hw_capabilities *pCap = &ah->caps;
1170 struct ath_common *common = ath9k_hw_common(ah);
1171 struct ieee80211_hw *hw = sc->hw;
1172 struct ieee80211_hdr *hdr;
1173 int retval;
1174 struct ath_rx_status rs;
1175 enum ath9k_rx_qtype qtype;
1176 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1177 int dma_type;
1178 u8 rx_status_len = ah->caps.rx_status_len;
1179 u64 tsf = 0;
1180 u32 tsf_lower = 0;
1181 unsigned long flags;
1182 dma_addr_t new_buf_addr;
1184 if (edma)
1185 dma_type = DMA_BIDIRECTIONAL;
1186 else
1187 dma_type = DMA_FROM_DEVICE;
1189 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
1191 tsf = ath9k_hw_gettsf64(ah);
1192 tsf_lower = tsf & 0xffffffff;
1194 do {
1195 bool decrypt_error = false;
1197 memset(&rs, 0, sizeof(rs));
1198 if (edma)
1199 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
1200 else
1201 bf = ath_get_next_rx_buf(sc, &rs);
1203 if (!bf)
1204 break;
1206 skb = bf->bf_mpdu;
1207 if (!skb)
1208 continue;
1211 * Take frame header from the first fragment and RX status from
1212 * the last one.
1214 if (sc->rx.frag)
1215 hdr_skb = sc->rx.frag;
1216 else
1217 hdr_skb = skb;
1219 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
1220 rxs = IEEE80211_SKB_RXCB(hdr_skb);
1221 if (ieee80211_is_beacon(hdr->frame_control)) {
1222 RX_STAT_INC(rx_beacons);
1223 if (!is_zero_ether_addr(common->curbssid) &&
1224 ether_addr_equal(hdr->addr3, common->curbssid))
1225 rs.is_mybeacon = true;
1226 else
1227 rs.is_mybeacon = false;
1229 else
1230 rs.is_mybeacon = false;
1232 if (ieee80211_is_data_present(hdr->frame_control) &&
1233 !ieee80211_is_qos_nullfunc(hdr->frame_control))
1234 sc->rx.num_pkts++;
1236 ath_debug_stat_rx(sc, &rs);
1238 memset(rxs, 0, sizeof(struct ieee80211_rx_status));
1240 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
1241 if (rs.rs_tstamp > tsf_lower &&
1242 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
1243 rxs->mactime -= 0x100000000ULL;
1245 if (rs.rs_tstamp < tsf_lower &&
1246 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
1247 rxs->mactime += 0x100000000ULL;
1249 if (rs.rs_phyerr == ATH9K_PHYERR_RADAR)
1250 ath9k_dfs_process_phyerr(sc, hdr, &rs, rxs->mactime);
1252 if (rs.rs_status & ATH9K_RXERR_PHY) {
1253 if (ath_process_fft(sc, hdr, &rs, rxs->mactime)) {
1254 RX_STAT_INC(rx_spectral);
1255 goto requeue_drop_frag;
1259 retval = ath9k_rx_skb_preprocess(sc, hdr, &rs, rxs,
1260 &decrypt_error);
1261 if (retval)
1262 goto requeue_drop_frag;
1264 if (rs.is_mybeacon) {
1265 sc->hw_busy_count = 0;
1266 ath_start_rx_poll(sc, 3);
1268 /* Ensure we always have an skb to requeue once we are done
1269 * processing the current buffer's skb */
1270 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
1272 /* If there is no memory we ignore the current RX'd frame,
1273 * tell hardware it can give us a new frame using the old
1274 * skb and put it at the tail of the sc->rx.rxbuf list for
1275 * processing. */
1276 if (!requeue_skb) {
1277 RX_STAT_INC(rx_oom_err);
1278 goto requeue_drop_frag;
1281 /* We will now give hardware our shiny new allocated skb */
1282 new_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
1283 common->rx_bufsize, dma_type);
1284 if (unlikely(dma_mapping_error(sc->dev, new_buf_addr))) {
1285 dev_kfree_skb_any(requeue_skb);
1286 goto requeue_drop_frag;
1289 /* Unmap the frame */
1290 dma_unmap_single(sc->dev, bf->bf_buf_addr,
1291 common->rx_bufsize, dma_type);
1293 bf->bf_mpdu = requeue_skb;
1294 bf->bf_buf_addr = new_buf_addr;
1296 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
1297 if (ah->caps.rx_status_len)
1298 skb_pull(skb, ah->caps.rx_status_len);
1300 if (!rs.rs_more)
1301 ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
1302 rxs, decrypt_error);
1304 if (rs.rs_more) {
1305 RX_STAT_INC(rx_frags);
1307 * rs_more indicates chained descriptors which can be
1308 * used to link buffers together for a sort of
1309 * scatter-gather operation.
1311 if (sc->rx.frag) {
1312 /* too many fragments - cannot handle frame */
1313 dev_kfree_skb_any(sc->rx.frag);
1314 dev_kfree_skb_any(skb);
1315 RX_STAT_INC(rx_too_many_frags_err);
1316 skb = NULL;
1318 sc->rx.frag = skb;
1319 goto requeue;
1321 if (rs.rs_status & ATH9K_RXERR_CORRUPT_DESC)
1322 goto requeue_drop_frag;
1324 if (sc->rx.frag) {
1325 int space = skb->len - skb_tailroom(hdr_skb);
1327 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1328 dev_kfree_skb(skb);
1329 RX_STAT_INC(rx_oom_err);
1330 goto requeue_drop_frag;
1333 sc->rx.frag = NULL;
1335 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
1336 skb->len);
1337 dev_kfree_skb_any(skb);
1338 skb = hdr_skb;
1341 if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
1342 skb_trim(skb, skb->len - 8);
1344 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1345 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
1346 PS_WAIT_FOR_CAB |
1347 PS_WAIT_FOR_PSPOLL_DATA)) ||
1348 ath9k_check_auto_sleep(sc))
1349 ath_rx_ps(sc, skb, rs.is_mybeacon);
1350 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1353 * Run the LNA combining algorithm only in these cases:
1355 * Standalone WLAN cards with both LNA/Antenna diversity
1356 * enabled in the EEPROM.
1358 * WLAN+BT cards which are in the supported card list
1359 * in ath_pci_id_table and the user has loaded the
1360 * driver with "bt_ant_diversity" set to true.
1362 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
1364 * Change the default rx antenna if rx diversity
1365 * chooses the other antenna 3 times in a row.
1367 if (sc->rx.defant != rs.rs_antenna) {
1368 if (++sc->rx.rxotherant >= 3)
1369 ath_setdefantenna(sc, rs.rs_antenna);
1370 } else {
1371 sc->rx.rxotherant = 0;
1374 if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) {
1375 if (common->bt_ant_diversity)
1376 ath_ant_comb_scan(sc, &rs);
1377 } else {
1378 ath_ant_comb_scan(sc, &rs);
1382 ath9k_apply_ampdu_details(sc, &rs, rxs);
1384 ieee80211_rx(hw, skb);
1386 requeue_drop_frag:
1387 if (sc->rx.frag) {
1388 dev_kfree_skb_any(sc->rx.frag);
1389 sc->rx.frag = NULL;
1391 requeue:
1392 list_add_tail(&bf->list, &sc->rx.rxbuf);
1393 if (flush)
1394 continue;
1396 if (edma) {
1397 ath_rx_edma_buf_link(sc, qtype);
1398 } else {
1399 ath_rx_buf_relink(sc, bf);
1400 ath9k_hw_rxena(ah);
1402 } while (1);
1404 if (!(ah->imask & ATH9K_INT_RXEOL)) {
1405 ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
1406 ath9k_hw_set_interrupts(ah);
1409 return 0;