2 * Copyright (c) 2008 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 * Implementation of receive path.
24 * Setup and link descriptors.
26 * 11N: we can no longer afford to self link the last descriptor.
27 * MAC acknowledges BA status as long as it copies frames to host
28 * buffer (or rx fifo). This can incorrectly acknowledge packets
29 * to a sender if last desc is self-linked.
31 * NOTE: Caller should hold the rxbuf lock.
34 static void ath_rx_buf_link(struct ath_softc
*sc
, struct ath_buf
*bf
)
36 struct ath_hal
*ah
= sc
->sc_ah
;
43 ds
->ds_link
= 0; /* link to null */
44 ds
->ds_data
= bf
->bf_buf_addr
;
47 * virtual addr of the beginning of the buffer. */
50 ds
->ds_vdata
= skb
->data
;
52 /* setup rx descriptors */
53 ath9k_hw_setuprxdesc(ah
,
55 skb_tailroom(skb
), /* buffer size */
58 if (sc
->sc_rxlink
== NULL
)
59 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
61 *sc
->sc_rxlink
= bf
->bf_daddr
;
63 sc
->sc_rxlink
= &ds
->ds_link
;
67 /* Process received BAR frame */
69 static int ath_bar_rx(struct ath_softc
*sc
,
73 struct ieee80211_bar
*bar
;
74 struct ath_arx_tid
*rxtid
;
76 struct ath_recv_status
*rx_status
;
77 int tidno
, index
, cindex
;
80 /* look at BAR contents */
82 bar
= (struct ieee80211_bar
*)skb
->data
;
83 tidno
= (le16_to_cpu(bar
->control
) & IEEE80211_BAR_CTL_TID_M
)
84 >> IEEE80211_BAR_CTL_TID_S
;
85 seqno
= le16_to_cpu(bar
->start_seq_num
) >> IEEE80211_SEQ_SEQ_SHIFT
;
87 /* process BAR - indicate all pending RX frames till the BAR seqno */
89 rxtid
= &an
->an_aggr
.rx
.tid
[tidno
];
91 spin_lock_bh(&rxtid
->tidlock
);
93 /* get relative index */
95 index
= ATH_BA_INDEX(rxtid
->seq_next
, seqno
);
97 /* drop BAR if old sequence (index is too large) */
99 if ((index
> rxtid
->baw_size
) &&
100 (index
> (IEEE80211_SEQ_MAX
- (rxtid
->baw_size
<< 2))))
101 /* discard frame, ieee layer may not treat frame as a dup */
102 goto unlock_and_free
;
104 /* complete receive processing for all pending frames upto BAR seqno */
106 cindex
= (rxtid
->baw_head
+ index
) & (ATH_TID_MAX_BUFS
- 1);
107 while ((rxtid
->baw_head
!= rxtid
->baw_tail
) &&
108 (rxtid
->baw_head
!= cindex
)) {
109 tskb
= rxtid
->rxbuf
[rxtid
->baw_head
].rx_wbuf
;
110 rx_status
= &rxtid
->rxbuf
[rxtid
->baw_head
].rx_status
;
111 rxtid
->rxbuf
[rxtid
->baw_head
].rx_wbuf
= NULL
;
114 ath_rx_subframe(an
, tskb
, rx_status
);
116 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
117 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
120 /* ... and indicate rest of the frames in-order */
122 while (rxtid
->baw_head
!= rxtid
->baw_tail
&&
123 rxtid
->rxbuf
[rxtid
->baw_head
].rx_wbuf
!= NULL
) {
124 tskb
= rxtid
->rxbuf
[rxtid
->baw_head
].rx_wbuf
;
125 rx_status
= &rxtid
->rxbuf
[rxtid
->baw_head
].rx_status
;
126 rxtid
->rxbuf
[rxtid
->baw_head
].rx_wbuf
= NULL
;
128 ath_rx_subframe(an
, tskb
, rx_status
);
130 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
131 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
135 spin_unlock_bh(&rxtid
->tidlock
);
136 /* free bar itself */
138 return IEEE80211_FTYPE_CTL
;
141 /* Function to handle a subframe of aggregation when HT is enabled */
143 static int ath_ampdu_input(struct ath_softc
*sc
,
146 struct ath_recv_status
*rx_status
)
148 struct ieee80211_hdr
*hdr
;
149 struct ath_arx_tid
*rxtid
;
150 struct ath_rxbuf
*rxbuf
;
153 int tid
= 0, index
, cindex
, rxdiff
;
157 hdr
= (struct ieee80211_hdr
*)skb
->data
;
158 fc
= hdr
->frame_control
;
160 /* collect stats of frames with non-zero version */
162 if ((le16_to_cpu(hdr
->frame_control
) & IEEE80211_FCTL_VERS
) != 0) {
167 type
= le16_to_cpu(hdr
->frame_control
) & IEEE80211_FCTL_FTYPE
;
168 subtype
= le16_to_cpu(hdr
->frame_control
) & IEEE80211_FCTL_STYPE
;
170 if (ieee80211_is_back_req(fc
))
171 return ath_bar_rx(sc
, an
, skb
);
173 /* special aggregate processing only for qos unicast data frames */
175 if (!ieee80211_is_data(fc
) ||
176 !ieee80211_is_data_qos(fc
) ||
177 is_multicast_ether_addr(hdr
->addr1
))
178 return ath_rx_subframe(an
, skb
, rx_status
);
180 /* lookup rx tid state */
182 if (ieee80211_is_data_qos(fc
)) {
183 qc
= ieee80211_get_qos_ctl(hdr
);
187 if (sc
->sc_opmode
== ATH9K_M_STA
) {
188 /* Drop the frame not belonging to me. */
189 if (memcmp(hdr
->addr1
, sc
->sc_myaddr
, ETH_ALEN
)) {
195 rxtid
= &an
->an_aggr
.rx
.tid
[tid
];
197 spin_lock(&rxtid
->tidlock
);
199 rxdiff
= (rxtid
->baw_tail
- rxtid
->baw_head
) &
200 (ATH_TID_MAX_BUFS
- 1);
203 * If the ADDBA exchange has not been completed by the source,
204 * process via legacy path (i.e. no reordering buffer is needed)
206 if (!rxtid
->addba_exchangecomplete
) {
207 spin_unlock(&rxtid
->tidlock
);
208 return ath_rx_subframe(an
, skb
, rx_status
);
211 /* extract sequence number from recvd frame */
213 rxseq
= le16_to_cpu(hdr
->seq_ctrl
) >> IEEE80211_SEQ_SEQ_SHIFT
;
215 if (rxtid
->seq_reset
) {
216 rxtid
->seq_reset
= 0;
217 rxtid
->seq_next
= rxseq
;
220 index
= ATH_BA_INDEX(rxtid
->seq_next
, rxseq
);
222 /* drop frame if old sequence (index is too large) */
224 if (index
> (IEEE80211_SEQ_MAX
- (rxtid
->baw_size
<< 2))) {
225 /* discard frame, ieee layer may not treat frame as a dup */
226 spin_unlock(&rxtid
->tidlock
);
228 return IEEE80211_FTYPE_DATA
;
231 /* sequence number is beyond block-ack window */
233 if (index
>= rxtid
->baw_size
) {
235 /* complete receive processing for all pending frames */
237 while (index
>= rxtid
->baw_size
) {
239 rxbuf
= rxtid
->rxbuf
+ rxtid
->baw_head
;
241 if (rxbuf
->rx_wbuf
!= NULL
) {
242 ath_rx_subframe(an
, rxbuf
->rx_wbuf
,
244 rxbuf
->rx_wbuf
= NULL
;
247 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
248 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
254 /* add buffer to the recv ba window */
256 cindex
= (rxtid
->baw_head
+ index
) & (ATH_TID_MAX_BUFS
- 1);
257 rxbuf
= rxtid
->rxbuf
+ cindex
;
259 if (rxbuf
->rx_wbuf
!= NULL
) {
260 spin_unlock(&rxtid
->tidlock
);
261 /* duplicate frame */
263 return IEEE80211_FTYPE_DATA
;
266 rxbuf
->rx_wbuf
= skb
;
267 rxbuf
->rx_time
= get_timestamp();
268 rxbuf
->rx_status
= *rx_status
;
270 /* advance tail if sequence received is newer
271 * than any received so far */
273 if (index
>= rxdiff
) {
274 rxtid
->baw_tail
= cindex
;
275 INCR(rxtid
->baw_tail
, ATH_TID_MAX_BUFS
);
278 /* indicate all in-order received frames */
280 while (rxtid
->baw_head
!= rxtid
->baw_tail
) {
281 rxbuf
= rxtid
->rxbuf
+ rxtid
->baw_head
;
285 ath_rx_subframe(an
, rxbuf
->rx_wbuf
, &rxbuf
->rx_status
);
286 rxbuf
->rx_wbuf
= NULL
;
288 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
289 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
293 * start a timer to flush all received frames if there are pending
296 if (rxtid
->baw_head
!= rxtid
->baw_tail
)
297 mod_timer(&rxtid
->timer
, ATH_RX_TIMEOUT
);
299 del_timer_sync(&rxtid
->timer
);
301 spin_unlock(&rxtid
->tidlock
);
302 return IEEE80211_FTYPE_DATA
;
305 /* Timer to flush all received sub-frames */
307 static void ath_rx_timer(unsigned long data
)
309 struct ath_arx_tid
*rxtid
= (struct ath_arx_tid
*)data
;
310 struct ath_node
*an
= rxtid
->an
;
311 struct ath_rxbuf
*rxbuf
;
314 spin_lock_bh(&rxtid
->tidlock
);
315 while (rxtid
->baw_head
!= rxtid
->baw_tail
) {
316 rxbuf
= rxtid
->rxbuf
+ rxtid
->baw_head
;
317 if (!rxbuf
->rx_wbuf
) {
318 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
319 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
324 * Stop if the next one is a very recent frame.
326 * Call get_timestamp in every iteration to protect against the
327 * case in which a new frame is received while we are executing
328 * this function. Using a timestamp obtained before entering
329 * the loop could lead to a very large time interval
330 * (a negative value typecast to unsigned), breaking the
333 if ((get_timestamp() - rxbuf
->rx_time
) <
334 (ATH_RX_TIMEOUT
* HZ
/ 1000))
337 ath_rx_subframe(an
, rxbuf
->rx_wbuf
,
339 rxbuf
->rx_wbuf
= NULL
;
341 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
342 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
346 * start a timer to flush all received frames if there are pending
349 if (rxtid
->baw_head
!= rxtid
->baw_tail
)
352 nosched
= 1; /* no need to re-arm the timer again */
354 spin_unlock_bh(&rxtid
->tidlock
);
357 /* Free all pending sub-frames in the re-ordering buffer */
359 static void ath_rx_flush_tid(struct ath_softc
*sc
,
360 struct ath_arx_tid
*rxtid
, int drop
)
362 struct ath_rxbuf
*rxbuf
;
364 spin_lock_bh(&rxtid
->tidlock
);
365 while (rxtid
->baw_head
!= rxtid
->baw_tail
) {
366 rxbuf
= rxtid
->rxbuf
+ rxtid
->baw_head
;
367 if (!rxbuf
->rx_wbuf
) {
368 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
369 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
374 dev_kfree_skb(rxbuf
->rx_wbuf
);
376 ath_rx_subframe(rxtid
->an
,
380 rxbuf
->rx_wbuf
= NULL
;
382 INCR(rxtid
->baw_head
, ATH_TID_MAX_BUFS
);
383 INCR(rxtid
->seq_next
, IEEE80211_SEQ_MAX
);
385 spin_unlock_bh(&rxtid
->tidlock
);
388 static struct sk_buff
*ath_rxbuf_alloc(struct ath_softc
*sc
,
395 * Cache-line-align. This is important (for the
396 * 5210 at least) as not doing so causes bogus data
400 skb
= dev_alloc_skb(len
+ sc
->sc_cachelsz
- 1);
402 off
= ((unsigned long) skb
->data
) % sc
->sc_cachelsz
;
404 skb_reserve(skb
, sc
->sc_cachelsz
- off
);
406 DPRINTF(sc
, ATH_DBG_FATAL
,
407 "%s: skbuff alloc of size %u failed\n",
415 static void ath_rx_requeue(struct ath_softc
*sc
, struct sk_buff
*skb
)
417 struct ath_buf
*bf
= ATH_RX_CONTEXT(skb
)->ctx_rxbuf
;
421 spin_lock_bh(&sc
->sc_rxbuflock
);
422 if (bf
->bf_status
& ATH_BUFSTATUS_STALE
) {
424 * This buffer is still held for hw acess.
425 * Mark it as free to be re-queued it later.
427 bf
->bf_status
|= ATH_BUFSTATUS_FREE
;
429 /* XXX: we probably never enter here, remove after
431 list_add_tail(&bf
->list
, &sc
->sc_rxbuf
);
432 ath_rx_buf_link(sc
, bf
);
434 spin_unlock_bh(&sc
->sc_rxbuflock
);
438 * The skb indicated to upper stack won't be returned to us.
439 * So we have to allocate a new one and queue it by ourselves.
441 static int ath_rx_indicate(struct ath_softc
*sc
,
443 struct ath_recv_status
*status
,
446 struct ath_buf
*bf
= ATH_RX_CONTEXT(skb
)->ctx_rxbuf
;
447 struct sk_buff
*nskb
;
450 /* indicate frame to the stack, which will free the old skb. */
451 type
= ath__rx_indicate(sc
, skb
, status
, keyix
);
453 /* allocate a new skb and queue it to for H/W processing */
454 nskb
= ath_rxbuf_alloc(sc
, sc
->sc_rxbufsize
);
457 bf
->bf_buf_addr
= ath_skb_map_single(sc
,
460 /* XXX: Remove get_dma_mem_context() */
461 get_dma_mem_context(bf
, bf_dmacontext
));
462 ATH_RX_CONTEXT(nskb
)->ctx_rxbuf
= bf
;
464 /* queue the new wbuf to H/W */
465 ath_rx_requeue(sc
, nskb
);
471 static void ath_opmode_init(struct ath_softc
*sc
)
473 struct ath_hal
*ah
= sc
->sc_ah
;
476 /* configure rx filter */
477 rfilt
= ath_calcrxfilter(sc
);
478 ath9k_hw_setrxfilter(ah
, rfilt
);
480 /* configure bssid mask */
481 if (ah
->ah_caps
.hw_caps
& ATH9K_HW_CAP_BSSIDMASK
)
482 ath9k_hw_setbssidmask(ah
, sc
->sc_bssidmask
);
484 /* configure operational mode */
485 ath9k_hw_setopmode(ah
);
487 /* Handle any link-level address change. */
488 ath9k_hw_setmac(ah
, sc
->sc_myaddr
);
490 /* calculate and install multicast filter */
491 mfilt
[0] = mfilt
[1] = ~0;
493 ath9k_hw_setmcastfilter(ah
, mfilt
[0], mfilt
[1]);
494 DPRINTF(sc
, ATH_DBG_CONFIG
,
495 "%s: RX filter 0x%x, MC filter %08x:%08x\n",
496 __func__
, rfilt
, mfilt
[0], mfilt
[1]);
499 int ath_rx_init(struct ath_softc
*sc
, int nbufs
)
506 spin_lock_init(&sc
->sc_rxflushlock
);
508 spin_lock_init(&sc
->sc_rxbuflock
);
511 * Cisco's VPN software requires that drivers be able to
512 * receive encapsulated frames that are larger than the MTU.
513 * Since we can't be sure how large a frame we'll get, setup
514 * to handle the larges on possible.
516 sc
->sc_rxbufsize
= roundup(IEEE80211_MAX_MPDU_LEN
,
520 DPRINTF(sc
, ATH_DBG_CONFIG
, "%s: cachelsz %u rxbufsize %u\n",
521 __func__
, sc
->sc_cachelsz
, sc
->sc_rxbufsize
);
523 /* Initialize rx descriptors */
525 error
= ath_descdma_setup(sc
, &sc
->sc_rxdma
, &sc
->sc_rxbuf
,
528 DPRINTF(sc
, ATH_DBG_FATAL
,
529 "%s: failed to allocate rx descriptors: %d\n",
534 /* Pre-allocate a wbuf for each rx buffer */
536 list_for_each_entry(bf
, &sc
->sc_rxbuf
, list
) {
537 skb
= ath_rxbuf_alloc(sc
, sc
->sc_rxbufsize
);
545 ath_skb_map_single(sc
, skb
, PCI_DMA_FROMDEVICE
,
546 get_dma_mem_context(bf
, bf_dmacontext
));
547 ATH_RX_CONTEXT(skb
)->ctx_rxbuf
= bf
;
549 sc
->sc_rxlink
= NULL
;
559 /* Reclaim all rx queue resources */
561 void ath_rx_cleanup(struct ath_softc
*sc
)
566 list_for_each_entry(bf
, &sc
->sc_rxbuf
, list
) {
572 /* cleanup rx descriptors */
574 if (sc
->sc_rxdma
.dd_desc_len
!= 0)
575 ath_descdma_cleanup(sc
, &sc
->sc_rxdma
, &sc
->sc_rxbuf
);
579 * Calculate the receive filter according to the
580 * operating mode and state:
582 * o always accept unicast, broadcast, and multicast traffic
583 * o maintain current state of phy error reception (the hal
584 * may enable phy error frames for noise immunity work)
585 * o probe request frames are accepted only when operating in
586 * hostap, adhoc, or monitor modes
587 * o enable promiscuous mode according to the interface state
589 * - when operating in adhoc mode so the 802.11 layer creates
590 * node table entries for peers,
591 * - when operating in station mode for collecting rssi data when
592 * the station is otherwise quiet, or
593 * - when operating as a repeater so we see repeater-sta beacons
597 u32
ath_calcrxfilter(struct ath_softc
*sc
)
599 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
603 rfilt
= (ath9k_hw_getrxfilter(sc
->sc_ah
) & RX_FILTER_PRESERVE
)
604 | ATH9K_RX_FILTER_UCAST
| ATH9K_RX_FILTER_BCAST
605 | ATH9K_RX_FILTER_MCAST
;
607 /* If not a STA, enable processing of Probe Requests */
608 if (sc
->sc_opmode
!= ATH9K_M_STA
)
609 rfilt
|= ATH9K_RX_FILTER_PROBEREQ
;
611 /* Can't set HOSTAP into promiscous mode */
612 if (((sc
->sc_opmode
!= ATH9K_M_HOSTAP
) &&
613 (sc
->rx_filter
& FIF_PROMISC_IN_BSS
)) ||
614 (sc
->sc_opmode
== ATH9K_M_MONITOR
)) {
615 rfilt
|= ATH9K_RX_FILTER_PROM
;
616 /* ??? To prevent from sending ACK */
617 rfilt
&= ~ATH9K_RX_FILTER_UCAST
;
620 if (((sc
->sc_opmode
== ATH9K_M_STA
) &&
621 (sc
->rx_filter
& FIF_BCN_PRBRESP_PROMISC
)) ||
622 (sc
->sc_opmode
== ATH9K_M_IBSS
))
623 rfilt
|= ATH9K_RX_FILTER_BEACON
;
625 /* If in HOSTAP mode, want to enable reception of PSPOLL frames
627 if (sc
->sc_opmode
== ATH9K_M_HOSTAP
)
628 rfilt
|= (ATH9K_RX_FILTER_BEACON
| ATH9K_RX_FILTER_PSPOLL
);
631 #undef RX_FILTER_PRESERVE
634 /* Enable the receive h/w following a reset. */
636 int ath_startrecv(struct ath_softc
*sc
)
638 struct ath_hal
*ah
= sc
->sc_ah
;
639 struct ath_buf
*bf
, *tbf
;
641 spin_lock_bh(&sc
->sc_rxbuflock
);
642 if (list_empty(&sc
->sc_rxbuf
))
645 sc
->sc_rxlink
= NULL
;
646 list_for_each_entry_safe(bf
, tbf
, &sc
->sc_rxbuf
, list
) {
647 if (bf
->bf_status
& ATH_BUFSTATUS_STALE
) {
648 /* restarting h/w, no need for holding descriptors */
649 bf
->bf_status
&= ~ATH_BUFSTATUS_STALE
;
651 * Upper layer may not be done with the frame yet so
652 * we can't just re-queue it to hardware. Remove it
653 * from h/w queue. It'll be re-queued when upper layer
654 * returns the frame and ath_rx_requeue_mpdu is called.
656 if (!(bf
->bf_status
& ATH_BUFSTATUS_FREE
)) {
661 /* chain descriptors */
662 ath_rx_buf_link(sc
, bf
);
665 /* We could have deleted elements so the list may be empty now */
666 if (list_empty(&sc
->sc_rxbuf
))
669 bf
= list_first_entry(&sc
->sc_rxbuf
, struct ath_buf
, list
);
670 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
671 ath9k_hw_rxena(ah
); /* enable recv descriptors */
674 spin_unlock_bh(&sc
->sc_rxbuflock
);
675 ath_opmode_init(sc
); /* set filters, etc. */
676 ath9k_hw_startpcureceive(ah
); /* re-enable PCU/DMA engine */
680 /* Disable the receive h/w in preparation for a reset. */
682 bool ath_stoprecv(struct ath_softc
*sc
)
684 struct ath_hal
*ah
= sc
->sc_ah
;
688 ath9k_hw_stoppcurecv(ah
); /* disable PCU */
689 ath9k_hw_setrxfilter(ah
, 0); /* clear recv filter */
690 stopped
= ath9k_hw_stopdmarecv(ah
); /* disable DMA engine */
691 mdelay(3); /* 3ms is long enough for 1 frame */
692 tsf
= ath9k_hw_gettsf64(ah
);
693 sc
->sc_rxlink
= NULL
; /* just in case */
697 /* Flush receive queue */
699 void ath_flushrecv(struct ath_softc
*sc
)
702 * ath_rx_tasklet may be used to handle rx interrupt and flush receive
703 * queue at the same time. Use a lock to serialize the access of rx
705 * ath_rx_tasklet cannot hold the spinlock while indicating packets.
706 * Instead, do not claim the spinlock but check for a flush in
707 * progress (see references to sc_rxflush)
709 spin_lock_bh(&sc
->sc_rxflushlock
);
712 ath_rx_tasklet(sc
, 1);
715 spin_unlock_bh(&sc
->sc_rxflushlock
);
718 /* Process an individual frame */
720 int ath_rx_input(struct ath_softc
*sc
,
724 struct ath_recv_status
*rx_status
,
725 enum ATH_RX_TYPE
*status
)
727 if (is_ampdu
&& sc
->sc_rxaggr
) {
728 *status
= ATH_RX_CONSUMED
;
729 return ath_ampdu_input(sc
, an
, skb
, rx_status
);
731 *status
= ATH_RX_NON_CONSUMED
;
736 /* Process receive queue, as well as LED, etc. */
738 int ath_rx_tasklet(struct ath_softc
*sc
, int flush
)
740 #define PA2DESC(_sc, _pa) \
741 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
742 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
744 struct ath_buf
*bf
, *bf_held
= NULL
;
746 struct ieee80211_hdr
*hdr
;
747 struct sk_buff
*skb
= NULL
;
748 struct ath_recv_status rx_status
;
749 struct ath_hal
*ah
= sc
->sc_ah
;
750 int type
, rx_processed
= 0;
757 /* If handling rx interrupt and flush is in progress => exit */
758 if (sc
->sc_rxflush
&& (flush
== 0))
761 spin_lock_bh(&sc
->sc_rxbuflock
);
762 if (list_empty(&sc
->sc_rxbuf
)) {
763 sc
->sc_rxlink
= NULL
;
764 spin_unlock_bh(&sc
->sc_rxbuflock
);
768 bf
= list_first_entry(&sc
->sc_rxbuf
, struct ath_buf
, list
);
771 * There is a race condition that BH gets scheduled after sw
772 * writes RxE and before hw re-load the last descriptor to get
773 * the newly chained one. Software must keep the last DONE
774 * descriptor as a holding descriptor - software does so by
775 * marking it with the STALE flag.
777 if (bf
->bf_status
& ATH_BUFSTATUS_STALE
) {
779 if (list_is_last(&bf_held
->list
, &sc
->sc_rxbuf
)) {
781 * The holding descriptor is the last
782 * descriptor in queue. It's safe to
783 * remove the last holding descriptor
786 list_del(&bf_held
->list
);
787 bf_held
->bf_status
&= ~ATH_BUFSTATUS_STALE
;
788 sc
->sc_rxlink
= NULL
;
790 if (bf_held
->bf_status
& ATH_BUFSTATUS_FREE
) {
791 list_add_tail(&bf_held
->list
,
793 ath_rx_buf_link(sc
, bf_held
);
795 spin_unlock_bh(&sc
->sc_rxbuflock
);
798 bf
= list_entry(bf
->list
.next
, struct ath_buf
, list
);
805 * Must provide the virtual address of the current
806 * descriptor, the physical address, and the virtual
807 * address of the next descriptor in the h/w chain.
808 * This allows the HAL to look ahead to see if the
809 * hardware is done with a descriptor by checking the
810 * done bit in the following descriptor and the address
811 * of the current descriptor the DMA engine is working
812 * on. All this is necessary because of our use of
813 * a self-linked list to avoid rx overruns.
815 retval
= ath9k_hw_rxprocdesc(ah
,
818 PA2DESC(sc
, ds
->ds_link
),
820 if (retval
== -EINPROGRESS
) {
822 struct ath_desc
*tds
;
824 if (list_is_last(&bf
->list
, &sc
->sc_rxbuf
)) {
825 spin_unlock_bh(&sc
->sc_rxbuflock
);
829 tbf
= list_entry(bf
->list
.next
, struct ath_buf
, list
);
832 * On some hardware the descriptor status words could
833 * get corrupted, including the done bit. Because of
834 * this, check if the next descriptor's done bit is
837 * If the next descriptor's done bit is set, the current
838 * descriptor has been corrupted. Force s/w to discard
839 * this descriptor and continue...
843 retval
= ath9k_hw_rxprocdesc(ah
,
845 PA2DESC(sc
, tds
->ds_link
), 0);
846 if (retval
== -EINPROGRESS
) {
847 spin_unlock_bh(&sc
->sc_rxbuflock
);
852 /* XXX: we do not support frames spanning
853 * multiple descriptors */
854 bf
->bf_status
|= ATH_BUFSTATUS_DONE
;
857 if (skb
== NULL
) { /* XXX ??? can this happen */
858 spin_unlock_bh(&sc
->sc_rxbuflock
);
862 * Now we know it's a completed frame, we can indicate the
863 * frame. Remove the previous holding descriptor and leave
864 * this one in the queue as the new holding descriptor.
867 list_del(&bf_held
->list
);
868 bf_held
->bf_status
&= ~ATH_BUFSTATUS_STALE
;
869 if (bf_held
->bf_status
& ATH_BUFSTATUS_FREE
) {
870 list_add_tail(&bf_held
->list
, &sc
->sc_rxbuf
);
871 /* try to requeue this descriptor */
872 ath_rx_buf_link(sc
, bf_held
);
876 bf
->bf_status
|= ATH_BUFSTATUS_STALE
;
879 * Release the lock here in case ieee80211_input() return
880 * the frame immediately by calling ath_rx_mpdu_requeue().
882 spin_unlock_bh(&sc
->sc_rxbuflock
);
886 * If we're asked to flush receive queue, directly
887 * chain it back at the queue without processing it.
892 hdr
= (struct ieee80211_hdr
*)skb
->data
;
893 fc
= hdr
->frame_control
;
894 memzero(&rx_status
, sizeof(struct ath_recv_status
));
896 if (ds
->ds_rxstat
.rs_more
) {
898 * Frame spans multiple descriptors; this
899 * cannot happen yet as we don't support
900 * jumbograms. If not in monitor mode,
905 * Enable this if you want to see
906 * error frames in Monitor mode.
908 if (sc
->sc_opmode
!= ATH9K_M_MONITOR
)
911 /* fall thru for monitor mode handling... */
912 } else if (ds
->ds_rxstat
.rs_status
!= 0) {
913 if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_CRC
)
914 rx_status
.flags
|= ATH_RX_FCS_ERROR
;
915 if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_PHY
) {
916 phyerr
= ds
->ds_rxstat
.rs_phyerr
& 0x1f;
920 if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_DECRYPT
) {
922 * Decrypt error. We only mark packet status
923 * here and always push up the frame up to let
924 * mac80211 handle the actual error case, be
925 * it no decryption key or real decryption
926 * error. This let us keep statistics there.
928 rx_status
.flags
|= ATH_RX_DECRYPT_ERROR
;
929 } else if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_MIC
) {
931 * Demic error. We only mark frame status here
932 * and always push up the frame up to let
933 * mac80211 handle the actual error case. This
934 * let us keep statistics there. Hardware may
935 * post a false-positive MIC error.
937 if (ieee80211_is_ctl(fc
))
939 * Sometimes, we get invalid
940 * MIC failures on valid control frames.
941 * Remove these mic errors.
943 ds
->ds_rxstat
.rs_status
&=
946 rx_status
.flags
|= ATH_RX_MIC_ERROR
;
949 * Reject error frames with the exception of
950 * decryption and MIC failures. For monitor mode,
951 * we also ignore the CRC error.
953 if (sc
->sc_opmode
== ATH9K_M_MONITOR
) {
954 if (ds
->ds_rxstat
.rs_status
&
955 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
|
959 if (ds
->ds_rxstat
.rs_status
&
960 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
)) {
966 * The status portion of the descriptor could get corrupted.
968 if (sc
->sc_rxbufsize
< ds
->ds_rxstat
.rs_datalen
)
971 * Sync and unmap the frame. At this point we're
972 * committed to passing the sk_buff somewhere so
973 * clear buf_skb; this means a new sk_buff must be
974 * allocated when the rx descriptor is setup again
975 * to receive another frame.
977 skb_put(skb
, ds
->ds_rxstat
.rs_datalen
);
978 skb
->protocol
= cpu_to_be16(ETH_P_CONTROL
);
979 rx_status
.tsf
= ath_extend_tsf(sc
, ds
->ds_rxstat
.rs_tstamp
);
981 sc
->sc_hwmap
[ds
->ds_rxstat
.rs_rate
].ieeerate
;
983 sc
->sc_hwmap
[ds
->ds_rxstat
.rs_rate
].rateKbps
;
984 rx_status
.ratecode
= ds
->ds_rxstat
.rs_rate
;
987 if (rx_status
.ratecode
& 0x80) {
988 /* TODO - add table to avoid division */
989 if (ds
->ds_rxstat
.rs_flags
& ATH9K_RX_2040
) {
990 rx_status
.flags
|= ATH_RX_40MHZ
;
992 (rx_status
.rateKbps
* 27) / 13;
994 if (ds
->ds_rxstat
.rs_flags
& ATH9K_RX_GI
)
996 (rx_status
.rateKbps
* 10) / 9;
998 rx_status
.flags
|= ATH_RX_SHORT_GI
;
1001 /* sc->sc_noise_floor is only available when the station
1002 attaches to an AP, so we use a default value
1003 if we are not yet attached. */
1005 /* XXX we should use either sc->sc_noise_floor or
1006 * ath_hal_getChanNoise(ah, &sc->sc_curchan)
1007 * to calculate the noise floor.
1008 * However, the value returned by ath_hal_getChanNoise
1009 * seems to be incorrect (-31dBm on the last test),
1010 * so we will use a hard-coded value until we
1011 * figure out what is going on.
1013 rx_status
.abs_rssi
=
1014 ds
->ds_rxstat
.rs_rssi
+ ATH_DEFAULT_NOISE_FLOOR
;
1016 pci_dma_sync_single_for_cpu(sc
->pdev
,
1019 PCI_DMA_FROMDEVICE
);
1020 pci_unmap_single(sc
->pdev
,
1023 PCI_DMA_FROMDEVICE
);
1025 /* XXX: Ah! make me more readable, use a helper */
1026 if (ah
->ah_caps
.hw_caps
& ATH9K_HW_CAP_HT
) {
1027 if (ds
->ds_rxstat
.rs_moreaggr
== 0) {
1028 rx_status
.rssictl
[0] =
1029 ds
->ds_rxstat
.rs_rssi_ctl0
;
1030 rx_status
.rssictl
[1] =
1031 ds
->ds_rxstat
.rs_rssi_ctl1
;
1032 rx_status
.rssictl
[2] =
1033 ds
->ds_rxstat
.rs_rssi_ctl2
;
1034 rx_status
.rssi
= ds
->ds_rxstat
.rs_rssi
;
1035 if (ds
->ds_rxstat
.rs_flags
& ATH9K_RX_2040
) {
1036 rx_status
.rssiextn
[0] =
1037 ds
->ds_rxstat
.rs_rssi_ext0
;
1038 rx_status
.rssiextn
[1] =
1039 ds
->ds_rxstat
.rs_rssi_ext1
;
1040 rx_status
.rssiextn
[2] =
1041 ds
->ds_rxstat
.rs_rssi_ext2
;
1043 ATH_RX_RSSI_EXTN_VALID
;
1045 rx_status
.flags
|= ATH_RX_RSSI_VALID
|
1046 ATH_RX_CHAIN_RSSI_VALID
;
1050 * Need to insert the "combined" rssi into the
1051 * status structure for upper layer processing
1053 rx_status
.rssi
= ds
->ds_rxstat
.rs_rssi
;
1054 rx_status
.flags
|= ATH_RX_RSSI_VALID
;
1057 /* Pass frames up to the stack. */
1059 type
= ath_rx_indicate(sc
, skb
,
1060 &rx_status
, ds
->ds_rxstat
.rs_keyix
);
1063 * change the default rx antenna if rx diversity chooses the
1064 * other antenna 3 times in a row.
1066 if (sc
->sc_defant
!= ds
->ds_rxstat
.rs_antenna
) {
1067 if (++sc
->sc_rxotherant
>= 3)
1068 ath_setdefantenna(sc
,
1069 ds
->ds_rxstat
.rs_antenna
);
1071 sc
->sc_rxotherant
= 0;
1074 #ifdef CONFIG_SLOW_ANT_DIV
1075 if ((rx_status
.flags
& ATH_RX_RSSI_VALID
) &&
1076 ieee80211_is_beacon(fc
)) {
1077 ath_slow_ant_div(&sc
->sc_antdiv
, hdr
, &ds
->ds_rxstat
);
1081 * For frames successfully indicated, the buffer will be
1082 * returned to us by upper layers by calling
1083 * ath_rx_mpdu_requeue, either synchronusly or asynchronously.
1084 * So we don't want to do it here in this loop.
1089 bf
->bf_status
|= ATH_BUFSTATUS_FREE
;
1093 DPRINTF(sc
, ATH_DBG_CONFIG
,
1094 "%s: Reset rx chain mask. "
1095 "Do internal reset\n", __func__
);
1097 ath_reset(sc
, false);
1104 /* Process ADDBA request in per-TID data structure */
1106 int ath_rx_aggr_start(struct ath_softc
*sc
,
1111 struct ath_arx_tid
*rxtid
;
1112 struct ath_node
*an
;
1113 struct ieee80211_hw
*hw
= sc
->hw
;
1114 struct ieee80211_supported_band
*sband
;
1117 spin_lock_bh(&sc
->node_lock
);
1118 an
= ath_node_find(sc
, (u8
*) addr
);
1119 spin_unlock_bh(&sc
->node_lock
);
1122 DPRINTF(sc
, ATH_DBG_AGGR
,
1123 "%s: Node not found to initialize RX aggregation\n",
1128 sband
= hw
->wiphy
->bands
[hw
->conf
.channel
->band
];
1129 buffersize
= IEEE80211_MIN_AMPDU_BUF
<<
1130 sband
->ht_info
.ampdu_factor
; /* FIXME */
1132 rxtid
= &an
->an_aggr
.rx
.tid
[tid
];
1134 spin_lock_bh(&rxtid
->tidlock
);
1135 if (sc
->sc_rxaggr
) {
1136 /* Allow aggregation reception
1137 * Adjust rx BA window size. Peer might indicate a
1138 * zero buffer size for a _dont_care_ condition.
1141 rxtid
->baw_size
= min(buffersize
, rxtid
->baw_size
);
1143 /* set rx sequence number */
1144 rxtid
->seq_next
= *ssn
;
1146 /* Allocate the receive buffers for this TID */
1147 DPRINTF(sc
, ATH_DBG_AGGR
,
1148 "%s: Allcating rxbuffer for TID %d\n", __func__
, tid
);
1150 if (rxtid
->rxbuf
== NULL
) {
1152 * If the rxbuff is not NULL at this point, we *probably*
1153 * already allocated the buffer on a previous ADDBA,
1154 * and this is a subsequent ADDBA that got through.
1155 * Don't allocate, but use the value in the pointer,
1156 * we zero it out when we de-allocate.
1158 rxtid
->rxbuf
= kmalloc(ATH_TID_MAX_BUFS
*
1159 sizeof(struct ath_rxbuf
), GFP_ATOMIC
);
1161 if (rxtid
->rxbuf
== NULL
) {
1162 DPRINTF(sc
, ATH_DBG_AGGR
,
1163 "%s: Unable to allocate RX buffer, "
1164 "refusing ADDBA\n", __func__
);
1166 /* Ensure the memory is zeroed out (all internal
1167 * pointers are null) */
1168 memzero(rxtid
->rxbuf
, ATH_TID_MAX_BUFS
*
1169 sizeof(struct ath_rxbuf
));
1170 DPRINTF(sc
, ATH_DBG_AGGR
,
1171 "%s: Allocated @%p\n", __func__
, rxtid
->rxbuf
);
1173 /* Allow aggregation reception */
1174 rxtid
->addba_exchangecomplete
= 1;
1177 spin_unlock_bh(&rxtid
->tidlock
);
1184 int ath_rx_aggr_stop(struct ath_softc
*sc
,
1188 struct ath_node
*an
;
1190 spin_lock_bh(&sc
->node_lock
);
1191 an
= ath_node_find(sc
, (u8
*) addr
);
1192 spin_unlock_bh(&sc
->node_lock
);
1195 DPRINTF(sc
, ATH_DBG_AGGR
,
1196 "%s: RX aggr stop for non-existent node\n", __func__
);
1200 ath_rx_aggr_teardown(sc
, an
, tid
);
1204 /* Rx aggregation tear down */
1206 void ath_rx_aggr_teardown(struct ath_softc
*sc
,
1207 struct ath_node
*an
, u8 tid
)
1209 struct ath_arx_tid
*rxtid
= &an
->an_aggr
.rx
.tid
[tid
];
1211 if (!rxtid
->addba_exchangecomplete
)
1214 del_timer_sync(&rxtid
->timer
);
1215 ath_rx_flush_tid(sc
, rxtid
, 0);
1216 rxtid
->addba_exchangecomplete
= 0;
1218 /* De-allocate the receive buffer array allocated when addba started */
1221 DPRINTF(sc
, ATH_DBG_AGGR
,
1222 "%s: Deallocating TID %d rxbuff @%p\n",
1223 __func__
, tid
, rxtid
->rxbuf
);
1224 kfree(rxtid
->rxbuf
);
1226 /* Set pointer to null to avoid reuse*/
1227 rxtid
->rxbuf
= NULL
;
1231 /* Initialize per-node receive state */
1233 void ath_rx_node_init(struct ath_softc
*sc
, struct ath_node
*an
)
1235 if (sc
->sc_rxaggr
) {
1236 struct ath_arx_tid
*rxtid
;
1239 /* Init per tid rx state */
1240 for (tidno
= 0, rxtid
= &an
->an_aggr
.rx
.tid
[tidno
];
1241 tidno
< WME_NUM_TID
;
1244 rxtid
->seq_reset
= 1;
1245 rxtid
->seq_next
= 0;
1246 rxtid
->baw_size
= WME_MAX_BA
;
1247 rxtid
->baw_head
= rxtid
->baw_tail
= 0;
1250 * Ensure the buffer pointer is null at this point
1251 * (needs to be allocated when addba is received)
1254 rxtid
->rxbuf
= NULL
;
1255 setup_timer(&rxtid
->timer
, ath_rx_timer
,
1256 (unsigned long)rxtid
);
1257 spin_lock_init(&rxtid
->tidlock
);
1260 rxtid
->addba_exchangecomplete
= 0;
1265 void ath_rx_node_cleanup(struct ath_softc
*sc
, struct ath_node
*an
)
1267 if (sc
->sc_rxaggr
) {
1268 struct ath_arx_tid
*rxtid
;
1271 /* Init per tid rx state */
1272 for (tidno
= 0, rxtid
= &an
->an_aggr
.rx
.tid
[tidno
];
1273 tidno
< WME_NUM_TID
;
1276 if (!rxtid
->addba_exchangecomplete
)
1279 /* must cancel timer first */
1280 del_timer_sync(&rxtid
->timer
);
1282 /* drop any pending sub-frames */
1283 ath_rx_flush_tid(sc
, rxtid
, 1);
1285 for (i
= 0; i
< ATH_TID_MAX_BUFS
; i
++)
1286 ASSERT(rxtid
->rxbuf
[i
].rx_wbuf
== NULL
);
1288 rxtid
->addba_exchangecomplete
= 0;
1294 /* Cleanup per-node receive state */
1296 void ath_rx_node_free(struct ath_softc
*sc
, struct ath_node
*an
)
1298 ath_rx_node_cleanup(sc
, an
);
1301 dma_addr_t
ath_skb_map_single(struct ath_softc
*sc
,
1302 struct sk_buff
*skb
,
1307 * NB: do NOT use skb->len, which is 0 on initialization.
1308 * Use skb's entire data area instead.
1310 *pa
= pci_map_single(sc
->pdev
, skb
->data
,
1311 skb_end_pointer(skb
) - skb
->head
, direction
);
1315 void ath_skb_unmap_single(struct ath_softc
*sc
,
1316 struct sk_buff
*skb
,
1320 /* Unmap skb's entire data area */
1321 pci_unmap_single(sc
->pdev
, *pa
,
1322 skb_end_pointer(skb
) - skb
->head
, direction
);