2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include "ar9003_mac.h"
20 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
22 static inline bool ath_is_alt_ant_ratio_better(int alt_ratio
, int maxdelta
,
23 int mindelta
, int main_rssi_avg
,
24 int alt_rssi_avg
, int pkt_count
)
26 return (((alt_ratio
>= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2
) &&
27 (alt_rssi_avg
> main_rssi_avg
+ maxdelta
)) ||
28 (alt_rssi_avg
> main_rssi_avg
+ mindelta
)) && (pkt_count
> 50);
31 static inline bool ath9k_check_auto_sleep(struct ath_softc
*sc
)
33 return sc
->ps_enabled
&&
34 (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_AUTOSLEEP
);
37 static struct ieee80211_hw
* ath_get_virt_hw(struct ath_softc
*sc
,
38 struct ieee80211_hdr
*hdr
)
40 struct ieee80211_hw
*hw
= sc
->pri_wiphy
->hw
;
43 spin_lock_bh(&sc
->wiphy_lock
);
44 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
45 struct ath_wiphy
*aphy
= sc
->sec_wiphy
[i
];
48 if (compare_ether_addr(hdr
->addr1
, aphy
->hw
->wiphy
->perm_addr
)
54 spin_unlock_bh(&sc
->wiphy_lock
);
59 * Setup and link descriptors.
61 * 11N: we can no longer afford to self link the last descriptor.
62 * MAC acknowledges BA status as long as it copies frames to host
63 * buffer (or rx fifo). This can incorrectly acknowledge packets
64 * to a sender if last desc is self-linked.
66 static void ath_rx_buf_link(struct ath_softc
*sc
, struct ath_buf
*bf
)
68 struct ath_hw
*ah
= sc
->sc_ah
;
69 struct ath_common
*common
= ath9k_hw_common(ah
);
76 ds
->ds_link
= 0; /* link to null */
77 ds
->ds_data
= bf
->bf_buf_addr
;
79 /* virtual addr of the beginning of the buffer. */
82 ds
->ds_vdata
= skb
->data
;
85 * setup rx descriptors. The rx_bufsize here tells the hardware
86 * how much data it can DMA to us and that we are prepared
89 ath9k_hw_setuprxdesc(ah
, ds
,
93 if (sc
->rx
.rxlink
== NULL
)
94 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
96 *sc
->rx
.rxlink
= bf
->bf_daddr
;
98 sc
->rx
.rxlink
= &ds
->ds_link
;
102 static void ath_setdefantenna(struct ath_softc
*sc
, u32 antenna
)
104 /* XXX block beacon interrupts */
105 ath9k_hw_setantenna(sc
->sc_ah
, antenna
);
106 sc
->rx
.defant
= antenna
;
107 sc
->rx
.rxotherant
= 0;
110 static void ath_opmode_init(struct ath_softc
*sc
)
112 struct ath_hw
*ah
= sc
->sc_ah
;
113 struct ath_common
*common
= ath9k_hw_common(ah
);
117 /* configure rx filter */
118 rfilt
= ath_calcrxfilter(sc
);
119 ath9k_hw_setrxfilter(ah
, rfilt
);
121 /* configure bssid mask */
122 ath_hw_setbssidmask(common
);
124 /* configure operational mode */
125 ath9k_hw_setopmode(ah
);
127 /* calculate and install multicast filter */
128 mfilt
[0] = mfilt
[1] = ~0;
129 ath9k_hw_setmcastfilter(ah
, mfilt
[0], mfilt
[1]);
132 static bool ath_rx_edma_buf_link(struct ath_softc
*sc
,
133 enum ath9k_rx_qtype qtype
)
135 struct ath_hw
*ah
= sc
->sc_ah
;
136 struct ath_rx_edma
*rx_edma
;
140 rx_edma
= &sc
->rx
.rx_edma
[qtype
];
141 if (skb_queue_len(&rx_edma
->rx_fifo
) >= rx_edma
->rx_fifo_hwsize
)
144 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
145 list_del_init(&bf
->list
);
150 memset(skb
->data
, 0, ah
->caps
.rx_status_len
);
151 dma_sync_single_for_device(sc
->dev
, bf
->bf_buf_addr
,
152 ah
->caps
.rx_status_len
, DMA_TO_DEVICE
);
154 SKB_CB_ATHBUF(skb
) = bf
;
155 ath9k_hw_addrxbuf_edma(ah
, bf
->bf_buf_addr
, qtype
);
156 skb_queue_tail(&rx_edma
->rx_fifo
, skb
);
161 static void ath_rx_addbuffer_edma(struct ath_softc
*sc
,
162 enum ath9k_rx_qtype qtype
, int size
)
164 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
167 if (list_empty(&sc
->rx
.rxbuf
)) {
168 ath_dbg(common
, ATH_DBG_QUEUE
, "No free rx buf available\n");
172 while (!list_empty(&sc
->rx
.rxbuf
)) {
175 if (!ath_rx_edma_buf_link(sc
, qtype
))
183 static void ath_rx_remove_buffer(struct ath_softc
*sc
,
184 enum ath9k_rx_qtype qtype
)
187 struct ath_rx_edma
*rx_edma
;
190 rx_edma
= &sc
->rx
.rx_edma
[qtype
];
192 while ((skb
= skb_dequeue(&rx_edma
->rx_fifo
)) != NULL
) {
193 bf
= SKB_CB_ATHBUF(skb
);
195 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
199 static void ath_rx_edma_cleanup(struct ath_softc
*sc
)
203 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_LP
);
204 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_HP
);
206 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
208 dev_kfree_skb_any(bf
->bf_mpdu
);
211 INIT_LIST_HEAD(&sc
->rx
.rxbuf
);
213 kfree(sc
->rx
.rx_bufptr
);
214 sc
->rx
.rx_bufptr
= NULL
;
217 static void ath_rx_edma_init_queue(struct ath_rx_edma
*rx_edma
, int size
)
219 skb_queue_head_init(&rx_edma
->rx_fifo
);
220 skb_queue_head_init(&rx_edma
->rx_buffers
);
221 rx_edma
->rx_fifo_hwsize
= size
;
224 static int ath_rx_edma_init(struct ath_softc
*sc
, int nbufs
)
226 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
227 struct ath_hw
*ah
= sc
->sc_ah
;
234 common
->rx_bufsize
= roundup(IEEE80211_MAX_MPDU_LEN
+
235 ah
->caps
.rx_status_len
,
236 min(common
->cachelsz
, (u16
)64));
238 ath9k_hw_set_rx_bufsize(ah
, common
->rx_bufsize
-
239 ah
->caps
.rx_status_len
);
241 ath_rx_edma_init_queue(&sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_LP
],
242 ah
->caps
.rx_lp_qdepth
);
243 ath_rx_edma_init_queue(&sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_HP
],
244 ah
->caps
.rx_hp_qdepth
);
246 size
= sizeof(struct ath_buf
) * nbufs
;
247 bf
= kzalloc(size
, GFP_KERNEL
);
251 INIT_LIST_HEAD(&sc
->rx
.rxbuf
);
252 sc
->rx
.rx_bufptr
= bf
;
254 for (i
= 0; i
< nbufs
; i
++, bf
++) {
255 skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
, GFP_KERNEL
);
261 memset(skb
->data
, 0, common
->rx_bufsize
);
264 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, skb
->data
,
267 if (unlikely(dma_mapping_error(sc
->dev
,
269 dev_kfree_skb_any(skb
);
273 "dma_mapping_error() on RX init\n");
278 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
284 ath_rx_edma_cleanup(sc
);
288 static void ath_edma_start_recv(struct ath_softc
*sc
)
290 spin_lock_bh(&sc
->rx
.rxbuflock
);
292 ath9k_hw_rxena(sc
->sc_ah
);
294 ath_rx_addbuffer_edma(sc
, ATH9K_RX_QUEUE_HP
,
295 sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_HP
].rx_fifo_hwsize
);
297 ath_rx_addbuffer_edma(sc
, ATH9K_RX_QUEUE_LP
,
298 sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_LP
].rx_fifo_hwsize
);
302 ath9k_hw_startpcureceive(sc
->sc_ah
, (sc
->sc_flags
& SC_OP_OFFCHANNEL
));
304 spin_unlock_bh(&sc
->rx
.rxbuflock
);
307 static void ath_edma_stop_recv(struct ath_softc
*sc
)
309 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_HP
);
310 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_LP
);
313 int ath_rx_init(struct ath_softc
*sc
, int nbufs
)
315 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
320 spin_lock_init(&sc
->sc_pcu_lock
);
321 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
322 spin_lock_init(&sc
->rx
.rxbuflock
);
324 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
325 return ath_rx_edma_init(sc
, nbufs
);
327 common
->rx_bufsize
= roundup(IEEE80211_MAX_MPDU_LEN
,
328 min(common
->cachelsz
, (u16
)64));
330 ath_dbg(common
, ATH_DBG_CONFIG
, "cachelsz %u rxbufsize %u\n",
331 common
->cachelsz
, common
->rx_bufsize
);
333 /* Initialize rx descriptors */
335 error
= ath_descdma_setup(sc
, &sc
->rx
.rxdma
, &sc
->rx
.rxbuf
,
339 "failed to allocate rx descriptors: %d\n",
344 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
345 skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
,
353 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, skb
->data
,
356 if (unlikely(dma_mapping_error(sc
->dev
,
358 dev_kfree_skb_any(skb
);
362 "dma_mapping_error() on RX init\n");
367 sc
->rx
.rxlink
= NULL
;
377 void ath_rx_cleanup(struct ath_softc
*sc
)
379 struct ath_hw
*ah
= sc
->sc_ah
;
380 struct ath_common
*common
= ath9k_hw_common(ah
);
384 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
385 ath_rx_edma_cleanup(sc
);
388 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
391 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
400 if (sc
->rx
.rxdma
.dd_desc_len
!= 0)
401 ath_descdma_cleanup(sc
, &sc
->rx
.rxdma
, &sc
->rx
.rxbuf
);
406 * Calculate the receive filter according to the
407 * operating mode and state:
409 * o always accept unicast, broadcast, and multicast traffic
410 * o maintain current state of phy error reception (the hal
411 * may enable phy error frames for noise immunity work)
412 * o probe request frames are accepted only when operating in
413 * hostap, adhoc, or monitor modes
414 * o enable promiscuous mode according to the interface state
416 * - when operating in adhoc mode so the 802.11 layer creates
417 * node table entries for peers,
418 * - when operating in station mode for collecting rssi data when
419 * the station is otherwise quiet, or
420 * - when operating as a repeater so we see repeater-sta beacons
424 u32
ath_calcrxfilter(struct ath_softc
*sc
)
426 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
430 rfilt
= (ath9k_hw_getrxfilter(sc
->sc_ah
) & RX_FILTER_PRESERVE
)
431 | ATH9K_RX_FILTER_UCAST
| ATH9K_RX_FILTER_BCAST
432 | ATH9K_RX_FILTER_MCAST
;
434 if (sc
->rx
.rxfilter
& FIF_PROBE_REQ
)
435 rfilt
|= ATH9K_RX_FILTER_PROBEREQ
;
438 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
439 * mode interface or when in monitor mode. AP mode does not need this
440 * since it receives all in-BSS frames anyway.
442 if (((sc
->sc_ah
->opmode
!= NL80211_IFTYPE_AP
) &&
443 (sc
->rx
.rxfilter
& FIF_PROMISC_IN_BSS
)) ||
444 (sc
->sc_ah
->is_monitoring
))
445 rfilt
|= ATH9K_RX_FILTER_PROM
;
447 if (sc
->rx
.rxfilter
& FIF_CONTROL
)
448 rfilt
|= ATH9K_RX_FILTER_CONTROL
;
450 if ((sc
->sc_ah
->opmode
== NL80211_IFTYPE_STATION
) &&
452 !(sc
->rx
.rxfilter
& FIF_BCN_PRBRESP_PROMISC
))
453 rfilt
|= ATH9K_RX_FILTER_MYBEACON
;
455 rfilt
|= ATH9K_RX_FILTER_BEACON
;
457 if ((AR_SREV_9280_20_OR_LATER(sc
->sc_ah
) ||
458 AR_SREV_9285_12_OR_LATER(sc
->sc_ah
)) &&
459 (sc
->sc_ah
->opmode
== NL80211_IFTYPE_AP
) &&
460 (sc
->rx
.rxfilter
& FIF_PSPOLL
))
461 rfilt
|= ATH9K_RX_FILTER_PSPOLL
;
463 if (conf_is_ht(&sc
->hw
->conf
))
464 rfilt
|= ATH9K_RX_FILTER_COMP_BAR
;
466 if (sc
->sec_wiphy
|| (sc
->nvifs
> 1) ||
467 (sc
->rx
.rxfilter
& FIF_OTHER_BSS
)) {
468 /* The following may also be needed for other older chips */
469 if (sc
->sc_ah
->hw_version
.macVersion
== AR_SREV_VERSION_9160
)
470 rfilt
|= ATH9K_RX_FILTER_PROM
;
471 rfilt
|= ATH9K_RX_FILTER_MCAST_BCAST_ALL
;
476 #undef RX_FILTER_PRESERVE
479 int ath_startrecv(struct ath_softc
*sc
)
481 struct ath_hw
*ah
= sc
->sc_ah
;
482 struct ath_buf
*bf
, *tbf
;
484 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
485 ath_edma_start_recv(sc
);
489 spin_lock_bh(&sc
->rx
.rxbuflock
);
490 if (list_empty(&sc
->rx
.rxbuf
))
493 sc
->rx
.rxlink
= NULL
;
494 list_for_each_entry_safe(bf
, tbf
, &sc
->rx
.rxbuf
, list
) {
495 ath_rx_buf_link(sc
, bf
);
498 /* We could have deleted elements so the list may be empty now */
499 if (list_empty(&sc
->rx
.rxbuf
))
502 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
503 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
508 ath9k_hw_startpcureceive(ah
, (sc
->sc_flags
& SC_OP_OFFCHANNEL
));
510 spin_unlock_bh(&sc
->rx
.rxbuflock
);
515 bool ath_stoprecv(struct ath_softc
*sc
)
517 struct ath_hw
*ah
= sc
->sc_ah
;
520 spin_lock_bh(&sc
->rx
.rxbuflock
);
521 ath9k_hw_abortpcurecv(ah
);
522 ath9k_hw_setrxfilter(ah
, 0);
523 stopped
= ath9k_hw_stopdmarecv(ah
);
525 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
)
526 ath_edma_stop_recv(sc
);
528 sc
->rx
.rxlink
= NULL
;
529 spin_unlock_bh(&sc
->rx
.rxbuflock
);
531 if (unlikely(!stopped
)) {
532 ath_err(ath9k_hw_common(sc
->sc_ah
),
533 "Could not stop RX, we could be "
534 "confusing the DMA engine when we start RX up\n");
535 ATH_DBG_WARN_ON_ONCE(!stopped
);
540 void ath_flushrecv(struct ath_softc
*sc
)
542 sc
->sc_flags
|= SC_OP_RXFLUSH
;
543 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
)
544 ath_rx_tasklet(sc
, 1, true);
545 ath_rx_tasklet(sc
, 1, false);
546 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
549 static bool ath_beacon_dtim_pending_cab(struct sk_buff
*skb
)
551 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
552 struct ieee80211_mgmt
*mgmt
;
553 u8
*pos
, *end
, id
, elen
;
554 struct ieee80211_tim_ie
*tim
;
556 mgmt
= (struct ieee80211_mgmt
*)skb
->data
;
557 pos
= mgmt
->u
.beacon
.variable
;
558 end
= skb
->data
+ skb
->len
;
560 while (pos
+ 2 < end
) {
563 if (pos
+ elen
> end
)
566 if (id
== WLAN_EID_TIM
) {
567 if (elen
< sizeof(*tim
))
569 tim
= (struct ieee80211_tim_ie
*) pos
;
570 if (tim
->dtim_count
!= 0)
572 return tim
->bitmap_ctrl
& 0x01;
581 static void ath_rx_ps_beacon(struct ath_softc
*sc
, struct sk_buff
*skb
)
583 struct ieee80211_mgmt
*mgmt
;
584 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
586 if (skb
->len
< 24 + 8 + 2 + 2)
589 mgmt
= (struct ieee80211_mgmt
*)skb
->data
;
590 if (memcmp(common
->curbssid
, mgmt
->bssid
, ETH_ALEN
) != 0)
591 return; /* not from our current AP */
593 sc
->ps_flags
&= ~PS_WAIT_FOR_BEACON
;
595 if (sc
->ps_flags
& PS_BEACON_SYNC
) {
596 sc
->ps_flags
&= ~PS_BEACON_SYNC
;
597 ath_dbg(common
, ATH_DBG_PS
,
598 "Reconfigure Beacon timers based on timestamp from the AP\n");
599 ath_beacon_config(sc
, NULL
);
602 if (ath_beacon_dtim_pending_cab(skb
)) {
604 * Remain awake waiting for buffered broadcast/multicast
605 * frames. If the last broadcast/multicast frame is not
606 * received properly, the next beacon frame will work as
607 * a backup trigger for returning into NETWORK SLEEP state,
608 * so we are waiting for it as well.
610 ath_dbg(common
, ATH_DBG_PS
,
611 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
612 sc
->ps_flags
|= PS_WAIT_FOR_CAB
| PS_WAIT_FOR_BEACON
;
616 if (sc
->ps_flags
& PS_WAIT_FOR_CAB
) {
618 * This can happen if a broadcast frame is dropped or the AP
619 * fails to send a frame indicating that all CAB frames have
622 sc
->ps_flags
&= ~PS_WAIT_FOR_CAB
;
623 ath_dbg(common
, ATH_DBG_PS
,
624 "PS wait for CAB frames timed out\n");
628 static void ath_rx_ps(struct ath_softc
*sc
, struct sk_buff
*skb
)
630 struct ieee80211_hdr
*hdr
;
631 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
633 hdr
= (struct ieee80211_hdr
*)skb
->data
;
635 /* Process Beacon and CAB receive in PS state */
636 if (((sc
->ps_flags
& PS_WAIT_FOR_BEACON
) || ath9k_check_auto_sleep(sc
))
637 && ieee80211_is_beacon(hdr
->frame_control
))
638 ath_rx_ps_beacon(sc
, skb
);
639 else if ((sc
->ps_flags
& PS_WAIT_FOR_CAB
) &&
640 (ieee80211_is_data(hdr
->frame_control
) ||
641 ieee80211_is_action(hdr
->frame_control
)) &&
642 is_multicast_ether_addr(hdr
->addr1
) &&
643 !ieee80211_has_moredata(hdr
->frame_control
)) {
645 * No more broadcast/multicast frames to be received at this
648 sc
->ps_flags
&= ~(PS_WAIT_FOR_CAB
| PS_WAIT_FOR_BEACON
);
649 ath_dbg(common
, ATH_DBG_PS
,
650 "All PS CAB frames received, back to sleep\n");
651 } else if ((sc
->ps_flags
& PS_WAIT_FOR_PSPOLL_DATA
) &&
652 !is_multicast_ether_addr(hdr
->addr1
) &&
653 !ieee80211_has_morefrags(hdr
->frame_control
)) {
654 sc
->ps_flags
&= ~PS_WAIT_FOR_PSPOLL_DATA
;
655 ath_dbg(common
, ATH_DBG_PS
,
656 "Going back to sleep after having received PS-Poll data (0x%lx)\n",
657 sc
->ps_flags
& (PS_WAIT_FOR_BEACON
|
659 PS_WAIT_FOR_PSPOLL_DATA
|
660 PS_WAIT_FOR_TX_ACK
));
664 static void ath_rx_send_to_mac80211(struct ieee80211_hw
*hw
,
665 struct ath_softc
*sc
, struct sk_buff
*skb
)
667 struct ieee80211_hdr
*hdr
;
669 hdr
= (struct ieee80211_hdr
*)skb
->data
;
671 /* Send the frame to mac80211 */
672 if (is_multicast_ether_addr(hdr
->addr1
)) {
675 * Deliver broadcast/multicast frames to all suitable
678 /* TODO: filter based on channel configuration */
679 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
680 struct ath_wiphy
*aphy
= sc
->sec_wiphy
[i
];
681 struct sk_buff
*nskb
;
684 nskb
= skb_copy(skb
, GFP_ATOMIC
);
687 ieee80211_rx(aphy
->hw
, nskb
);
689 ieee80211_rx(sc
->hw
, skb
);
691 /* Deliver unicast frames based on receiver address */
692 ieee80211_rx(hw
, skb
);
695 static bool ath_edma_get_buffers(struct ath_softc
*sc
,
696 enum ath9k_rx_qtype qtype
)
698 struct ath_rx_edma
*rx_edma
= &sc
->rx
.rx_edma
[qtype
];
699 struct ath_hw
*ah
= sc
->sc_ah
;
700 struct ath_common
*common
= ath9k_hw_common(ah
);
705 skb
= skb_peek(&rx_edma
->rx_fifo
);
709 bf
= SKB_CB_ATHBUF(skb
);
712 dma_sync_single_for_cpu(sc
->dev
, bf
->bf_buf_addr
,
713 common
->rx_bufsize
, DMA_FROM_DEVICE
);
715 ret
= ath9k_hw_process_rxdesc_edma(ah
, NULL
, skb
->data
);
716 if (ret
== -EINPROGRESS
) {
717 /*let device gain the buffer again*/
718 dma_sync_single_for_device(sc
->dev
, bf
->bf_buf_addr
,
719 common
->rx_bufsize
, DMA_FROM_DEVICE
);
723 __skb_unlink(skb
, &rx_edma
->rx_fifo
);
724 if (ret
== -EINVAL
) {
725 /* corrupt descriptor, skip this one and the following one */
726 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
727 ath_rx_edma_buf_link(sc
, qtype
);
728 skb
= skb_peek(&rx_edma
->rx_fifo
);
732 bf
= SKB_CB_ATHBUF(skb
);
735 __skb_unlink(skb
, &rx_edma
->rx_fifo
);
736 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
737 ath_rx_edma_buf_link(sc
, qtype
);
740 skb_queue_tail(&rx_edma
->rx_buffers
, skb
);
745 static struct ath_buf
*ath_edma_get_next_rx_buf(struct ath_softc
*sc
,
746 struct ath_rx_status
*rs
,
747 enum ath9k_rx_qtype qtype
)
749 struct ath_rx_edma
*rx_edma
= &sc
->rx
.rx_edma
[qtype
];
753 while (ath_edma_get_buffers(sc
, qtype
));
754 skb
= __skb_dequeue(&rx_edma
->rx_buffers
);
758 bf
= SKB_CB_ATHBUF(skb
);
759 ath9k_hw_process_rxdesc_edma(sc
->sc_ah
, rs
, skb
->data
);
763 static struct ath_buf
*ath_get_next_rx_buf(struct ath_softc
*sc
,
764 struct ath_rx_status
*rs
)
766 struct ath_hw
*ah
= sc
->sc_ah
;
767 struct ath_common
*common
= ath9k_hw_common(ah
);
772 if (list_empty(&sc
->rx
.rxbuf
)) {
773 sc
->rx
.rxlink
= NULL
;
777 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
781 * Must provide the virtual address of the current
782 * descriptor, the physical address, and the virtual
783 * address of the next descriptor in the h/w chain.
784 * This allows the HAL to look ahead to see if the
785 * hardware is done with a descriptor by checking the
786 * done bit in the following descriptor and the address
787 * of the current descriptor the DMA engine is working
788 * on. All this is necessary because of our use of
789 * a self-linked list to avoid rx overruns.
791 ret
= ath9k_hw_rxprocdesc(ah
, ds
, rs
, 0);
792 if (ret
== -EINPROGRESS
) {
793 struct ath_rx_status trs
;
795 struct ath_desc
*tds
;
797 memset(&trs
, 0, sizeof(trs
));
798 if (list_is_last(&bf
->list
, &sc
->rx
.rxbuf
)) {
799 sc
->rx
.rxlink
= NULL
;
803 tbf
= list_entry(bf
->list
.next
, struct ath_buf
, list
);
806 * On some hardware the descriptor status words could
807 * get corrupted, including the done bit. Because of
808 * this, check if the next descriptor's done bit is
811 * If the next descriptor's done bit is set, the current
812 * descriptor has been corrupted. Force s/w to discard
813 * this descriptor and continue...
817 ret
= ath9k_hw_rxprocdesc(ah
, tds
, &trs
, 0);
818 if (ret
== -EINPROGRESS
)
826 * Synchronize the DMA transfer with CPU before
827 * 1. accessing the frame
828 * 2. requeueing the same buffer to h/w
830 dma_sync_single_for_cpu(sc
->dev
, bf
->bf_buf_addr
,
837 /* Assumes you've already done the endian to CPU conversion */
838 static bool ath9k_rx_accept(struct ath_common
*common
,
839 struct ieee80211_hdr
*hdr
,
840 struct ieee80211_rx_status
*rxs
,
841 struct ath_rx_status
*rx_stats
,
844 #define is_mc_or_valid_tkip_keyix ((is_mc || \
845 (rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && \
846 test_bit(rx_stats->rs_keyix, common->tkip_keymap))))
848 struct ath_hw
*ah
= common
->ah
;
850 u8 rx_status_len
= ah
->caps
.rx_status_len
;
852 fc
= hdr
->frame_control
;
854 if (!rx_stats
->rs_datalen
)
857 * rs_status follows rs_datalen so if rs_datalen is too large
858 * we can take a hint that hardware corrupted it, so ignore
861 if (rx_stats
->rs_datalen
> (common
->rx_bufsize
- rx_status_len
))
865 * rs_more indicates chained descriptors which can be used
866 * to link buffers together for a sort of scatter-gather
868 * reject the frame, we don't support scatter-gather yet and
869 * the frame is probably corrupt anyway
871 if (rx_stats
->rs_more
)
875 * The rx_stats->rs_status will not be set until the end of the
876 * chained descriptors so it can be ignored if rs_more is set. The
877 * rs_more will be false at the last element of the chained
880 if (rx_stats
->rs_status
!= 0) {
881 if (rx_stats
->rs_status
& ATH9K_RXERR_CRC
)
882 rxs
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
883 if (rx_stats
->rs_status
& ATH9K_RXERR_PHY
)
886 if (rx_stats
->rs_status
& ATH9K_RXERR_DECRYPT
) {
887 *decrypt_error
= true;
888 } else if (rx_stats
->rs_status
& ATH9K_RXERR_MIC
) {
891 * The MIC error bit is only valid if the frame
892 * is not a control frame or fragment, and it was
893 * decrypted using a valid TKIP key.
895 is_mc
= !!is_multicast_ether_addr(hdr
->addr1
);
897 if (!ieee80211_is_ctl(fc
) &&
898 !ieee80211_has_morefrags(fc
) &&
899 !(le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_FRAG
) &&
900 is_mc_or_valid_tkip_keyix
)
901 rxs
->flag
|= RX_FLAG_MMIC_ERROR
;
903 rx_stats
->rs_status
&= ~ATH9K_RXERR_MIC
;
906 * Reject error frames with the exception of
907 * decryption and MIC failures. For monitor mode,
908 * we also ignore the CRC error.
910 if (ah
->is_monitoring
) {
911 if (rx_stats
->rs_status
&
912 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
|
916 if (rx_stats
->rs_status
&
917 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
)) {
925 static int ath9k_process_rate(struct ath_common
*common
,
926 struct ieee80211_hw
*hw
,
927 struct ath_rx_status
*rx_stats
,
928 struct ieee80211_rx_status
*rxs
)
930 struct ieee80211_supported_band
*sband
;
931 enum ieee80211_band band
;
934 band
= hw
->conf
.channel
->band
;
935 sband
= hw
->wiphy
->bands
[band
];
937 if (rx_stats
->rs_rate
& 0x80) {
939 rxs
->flag
|= RX_FLAG_HT
;
940 if (rx_stats
->rs_flags
& ATH9K_RX_2040
)
941 rxs
->flag
|= RX_FLAG_40MHZ
;
942 if (rx_stats
->rs_flags
& ATH9K_RX_GI
)
943 rxs
->flag
|= RX_FLAG_SHORT_GI
;
944 rxs
->rate_idx
= rx_stats
->rs_rate
& 0x7f;
948 for (i
= 0; i
< sband
->n_bitrates
; i
++) {
949 if (sband
->bitrates
[i
].hw_value
== rx_stats
->rs_rate
) {
953 if (sband
->bitrates
[i
].hw_value_short
== rx_stats
->rs_rate
) {
954 rxs
->flag
|= RX_FLAG_SHORTPRE
;
961 * No valid hardware bitrate found -- we should not get here
962 * because hardware has already validated this frame as OK.
964 ath_dbg(common
, ATH_DBG_XMIT
,
965 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
971 static void ath9k_process_rssi(struct ath_common
*common
,
972 struct ieee80211_hw
*hw
,
973 struct ieee80211_hdr
*hdr
,
974 struct ath_rx_status
*rx_stats
)
976 struct ath_wiphy
*aphy
= hw
->priv
;
977 struct ath_hw
*ah
= common
->ah
;
981 if (ah
->opmode
!= NL80211_IFTYPE_STATION
)
984 fc
= hdr
->frame_control
;
985 if (!ieee80211_is_beacon(fc
) ||
986 compare_ether_addr(hdr
->addr3
, common
->curbssid
))
989 if (rx_stats
->rs_rssi
!= ATH9K_RSSI_BAD
&& !rx_stats
->rs_moreaggr
)
990 ATH_RSSI_LPF(aphy
->last_rssi
, rx_stats
->rs_rssi
);
992 last_rssi
= aphy
->last_rssi
;
993 if (likely(last_rssi
!= ATH_RSSI_DUMMY_MARKER
))
994 rx_stats
->rs_rssi
= ATH_EP_RND(last_rssi
,
995 ATH_RSSI_EP_MULTIPLIER
);
996 if (rx_stats
->rs_rssi
< 0)
997 rx_stats
->rs_rssi
= 0;
999 /* Update Beacon RSSI, this is used by ANI. */
1000 ah
->stats
.avgbrssi
= rx_stats
->rs_rssi
;
1004 * For Decrypt or Demic errors, we only mark packet status here and always push
1005 * up the frame up to let mac80211 handle the actual error case, be it no
1006 * decryption key or real decryption error. This let us keep statistics there.
1008 static int ath9k_rx_skb_preprocess(struct ath_common
*common
,
1009 struct ieee80211_hw
*hw
,
1010 struct ieee80211_hdr
*hdr
,
1011 struct ath_rx_status
*rx_stats
,
1012 struct ieee80211_rx_status
*rx_status
,
1013 bool *decrypt_error
)
1015 memset(rx_status
, 0, sizeof(struct ieee80211_rx_status
));
1018 * everything but the rate is checked here, the rate check is done
1019 * separately to avoid doing two lookups for a rate for each frame.
1021 if (!ath9k_rx_accept(common
, hdr
, rx_status
, rx_stats
, decrypt_error
))
1024 ath9k_process_rssi(common
, hw
, hdr
, rx_stats
);
1026 if (ath9k_process_rate(common
, hw
, rx_stats
, rx_status
))
1029 rx_status
->band
= hw
->conf
.channel
->band
;
1030 rx_status
->freq
= hw
->conf
.channel
->center_freq
;
1031 rx_status
->signal
= ATH_DEFAULT_NOISE_FLOOR
+ rx_stats
->rs_rssi
;
1032 rx_status
->antenna
= rx_stats
->rs_antenna
;
1033 rx_status
->flag
|= RX_FLAG_TSFT
;
1038 static void ath9k_rx_skb_postprocess(struct ath_common
*common
,
1039 struct sk_buff
*skb
,
1040 struct ath_rx_status
*rx_stats
,
1041 struct ieee80211_rx_status
*rxs
,
1044 struct ath_hw
*ah
= common
->ah
;
1045 struct ieee80211_hdr
*hdr
;
1046 int hdrlen
, padpos
, padsize
;
1050 /* see if any padding is done by the hw and remove it */
1051 hdr
= (struct ieee80211_hdr
*) skb
->data
;
1052 hdrlen
= ieee80211_get_hdrlen_from_skb(skb
);
1053 fc
= hdr
->frame_control
;
1054 padpos
= ath9k_cmn_padpos(hdr
->frame_control
);
1056 /* The MAC header is padded to have 32-bit boundary if the
1057 * packet payload is non-zero. The general calculation for
1058 * padsize would take into account odd header lengths:
1059 * padsize = (4 - padpos % 4) % 4; However, since only
1060 * even-length headers are used, padding can only be 0 or 2
1061 * bytes and we can optimize this a bit. In addition, we must
1062 * not try to remove padding from short control frames that do
1063 * not have payload. */
1064 padsize
= padpos
& 3;
1065 if (padsize
&& skb
->len
>=padpos
+padsize
+FCS_LEN
) {
1066 memmove(skb
->data
+ padsize
, skb
->data
, padpos
);
1067 skb_pull(skb
, padsize
);
1070 keyix
= rx_stats
->rs_keyix
;
1072 if (!(keyix
== ATH9K_RXKEYIX_INVALID
) && !decrypt_error
&&
1073 ieee80211_has_protected(fc
)) {
1074 rxs
->flag
|= RX_FLAG_DECRYPTED
;
1075 } else if (ieee80211_has_protected(fc
)
1076 && !decrypt_error
&& skb
->len
>= hdrlen
+ 4) {
1077 keyix
= skb
->data
[hdrlen
+ 3] >> 6;
1079 if (test_bit(keyix
, common
->keymap
))
1080 rxs
->flag
|= RX_FLAG_DECRYPTED
;
1082 if (ah
->sw_mgmt_crypto
&&
1083 (rxs
->flag
& RX_FLAG_DECRYPTED
) &&
1084 ieee80211_is_mgmt(fc
))
1085 /* Use software decrypt for management frames. */
1086 rxs
->flag
&= ~RX_FLAG_DECRYPTED
;
1089 static void ath_lnaconf_alt_good_scan(struct ath_ant_comb
*antcomb
,
1090 struct ath_hw_antcomb_conf ant_conf
,
1093 antcomb
->quick_scan_cnt
= 0;
1095 if (ant_conf
.main_lna_conf
== ATH_ANT_DIV_COMB_LNA2
)
1096 antcomb
->rssi_lna2
= main_rssi_avg
;
1097 else if (ant_conf
.main_lna_conf
== ATH_ANT_DIV_COMB_LNA1
)
1098 antcomb
->rssi_lna1
= main_rssi_avg
;
1100 switch ((ant_conf
.main_lna_conf
<< 4) | ant_conf
.alt_lna_conf
) {
1101 case (0x10): /* LNA2 A-B */
1102 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1103 antcomb
->first_quick_scan_conf
=
1104 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1105 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA1
;
1107 case (0x20): /* LNA1 A-B */
1108 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1109 antcomb
->first_quick_scan_conf
=
1110 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1111 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA2
;
1113 case (0x21): /* LNA1 LNA2 */
1114 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA2
;
1115 antcomb
->first_quick_scan_conf
=
1116 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1117 antcomb
->second_quick_scan_conf
=
1118 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1120 case (0x12): /* LNA2 LNA1 */
1121 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1
;
1122 antcomb
->first_quick_scan_conf
=
1123 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1124 antcomb
->second_quick_scan_conf
=
1125 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1127 case (0x13): /* LNA2 A+B */
1128 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1129 antcomb
->first_quick_scan_conf
=
1130 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1131 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA1
;
1133 case (0x23): /* LNA1 A+B */
1134 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1135 antcomb
->first_quick_scan_conf
=
1136 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1137 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA2
;
1144 static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb
*antcomb
,
1145 struct ath_hw_antcomb_conf
*div_ant_conf
,
1146 int main_rssi_avg
, int alt_rssi_avg
,
1150 switch (antcomb
->quick_scan_cnt
) {
1152 /* set alt to main, and alt to first conf */
1153 div_ant_conf
->main_lna_conf
= antcomb
->main_conf
;
1154 div_ant_conf
->alt_lna_conf
= antcomb
->first_quick_scan_conf
;
1157 /* set alt to main, and alt to first conf */
1158 div_ant_conf
->main_lna_conf
= antcomb
->main_conf
;
1159 div_ant_conf
->alt_lna_conf
= antcomb
->second_quick_scan_conf
;
1160 antcomb
->rssi_first
= main_rssi_avg
;
1161 antcomb
->rssi_second
= alt_rssi_avg
;
1163 if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
) {
1165 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1166 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
,
1167 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1168 main_rssi_avg
, alt_rssi_avg
,
1169 antcomb
->total_pkt_count
))
1170 antcomb
->first_ratio
= true;
1172 antcomb
->first_ratio
= false;
1173 } else if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
) {
1174 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1175 ATH_ANT_DIV_COMB_LNA1_DELTA_MID
,
1176 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1177 main_rssi_avg
, alt_rssi_avg
,
1178 antcomb
->total_pkt_count
))
1179 antcomb
->first_ratio
= true;
1181 antcomb
->first_ratio
= false;
1183 if ((((alt_ratio
>= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2
) &&
1184 (alt_rssi_avg
> main_rssi_avg
+
1185 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
)) ||
1186 (alt_rssi_avg
> main_rssi_avg
)) &&
1187 (antcomb
->total_pkt_count
> 50))
1188 antcomb
->first_ratio
= true;
1190 antcomb
->first_ratio
= false;
1194 antcomb
->alt_good
= false;
1195 antcomb
->scan_not_start
= false;
1196 antcomb
->scan
= false;
1197 antcomb
->rssi_first
= main_rssi_avg
;
1198 antcomb
->rssi_third
= alt_rssi_avg
;
1200 if (antcomb
->second_quick_scan_conf
== ATH_ANT_DIV_COMB_LNA1
)
1201 antcomb
->rssi_lna1
= alt_rssi_avg
;
1202 else if (antcomb
->second_quick_scan_conf
==
1203 ATH_ANT_DIV_COMB_LNA2
)
1204 antcomb
->rssi_lna2
= alt_rssi_avg
;
1205 else if (antcomb
->second_quick_scan_conf
==
1206 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
) {
1207 if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
)
1208 antcomb
->rssi_lna2
= main_rssi_avg
;
1209 else if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
)
1210 antcomb
->rssi_lna1
= main_rssi_avg
;
1213 if (antcomb
->rssi_lna2
> antcomb
->rssi_lna1
+
1214 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA
)
1215 div_ant_conf
->main_lna_conf
= ATH_ANT_DIV_COMB_LNA2
;
1217 div_ant_conf
->main_lna_conf
= ATH_ANT_DIV_COMB_LNA1
;
1219 if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
) {
1220 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1221 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
,
1222 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1223 main_rssi_avg
, alt_rssi_avg
,
1224 antcomb
->total_pkt_count
))
1225 antcomb
->second_ratio
= true;
1227 antcomb
->second_ratio
= false;
1228 } else if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
) {
1229 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1230 ATH_ANT_DIV_COMB_LNA1_DELTA_MID
,
1231 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1232 main_rssi_avg
, alt_rssi_avg
,
1233 antcomb
->total_pkt_count
))
1234 antcomb
->second_ratio
= true;
1236 antcomb
->second_ratio
= false;
1238 if ((((alt_ratio
>= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2
) &&
1239 (alt_rssi_avg
> main_rssi_avg
+
1240 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
)) ||
1241 (alt_rssi_avg
> main_rssi_avg
)) &&
1242 (antcomb
->total_pkt_count
> 50))
1243 antcomb
->second_ratio
= true;
1245 antcomb
->second_ratio
= false;
1248 /* set alt to the conf with maximun ratio */
1249 if (antcomb
->first_ratio
&& antcomb
->second_ratio
) {
1250 if (antcomb
->rssi_second
> antcomb
->rssi_third
) {
1252 if ((antcomb
->first_quick_scan_conf
==
1253 ATH_ANT_DIV_COMB_LNA1
) ||
1254 (antcomb
->first_quick_scan_conf
==
1255 ATH_ANT_DIV_COMB_LNA2
))
1256 /* Set alt LNA1 or LNA2*/
1257 if (div_ant_conf
->main_lna_conf
==
1258 ATH_ANT_DIV_COMB_LNA2
)
1259 div_ant_conf
->alt_lna_conf
=
1260 ATH_ANT_DIV_COMB_LNA1
;
1262 div_ant_conf
->alt_lna_conf
=
1263 ATH_ANT_DIV_COMB_LNA2
;
1265 /* Set alt to A+B or A-B */
1266 div_ant_conf
->alt_lna_conf
=
1267 antcomb
->first_quick_scan_conf
;
1268 } else if ((antcomb
->second_quick_scan_conf
==
1269 ATH_ANT_DIV_COMB_LNA1
) ||
1270 (antcomb
->second_quick_scan_conf
==
1271 ATH_ANT_DIV_COMB_LNA2
)) {
1272 /* Set alt LNA1 or LNA2 */
1273 if (div_ant_conf
->main_lna_conf
==
1274 ATH_ANT_DIV_COMB_LNA2
)
1275 div_ant_conf
->alt_lna_conf
=
1276 ATH_ANT_DIV_COMB_LNA1
;
1278 div_ant_conf
->alt_lna_conf
=
1279 ATH_ANT_DIV_COMB_LNA2
;
1281 /* Set alt to A+B or A-B */
1282 div_ant_conf
->alt_lna_conf
=
1283 antcomb
->second_quick_scan_conf
;
1285 } else if (antcomb
->first_ratio
) {
1287 if ((antcomb
->first_quick_scan_conf
==
1288 ATH_ANT_DIV_COMB_LNA1
) ||
1289 (antcomb
->first_quick_scan_conf
==
1290 ATH_ANT_DIV_COMB_LNA2
))
1291 /* Set alt LNA1 or LNA2 */
1292 if (div_ant_conf
->main_lna_conf
==
1293 ATH_ANT_DIV_COMB_LNA2
)
1294 div_ant_conf
->alt_lna_conf
=
1295 ATH_ANT_DIV_COMB_LNA1
;
1297 div_ant_conf
->alt_lna_conf
=
1298 ATH_ANT_DIV_COMB_LNA2
;
1300 /* Set alt to A+B or A-B */
1301 div_ant_conf
->alt_lna_conf
=
1302 antcomb
->first_quick_scan_conf
;
1303 } else if (antcomb
->second_ratio
) {
1305 if ((antcomb
->second_quick_scan_conf
==
1306 ATH_ANT_DIV_COMB_LNA1
) ||
1307 (antcomb
->second_quick_scan_conf
==
1308 ATH_ANT_DIV_COMB_LNA2
))
1309 /* Set alt LNA1 or LNA2 */
1310 if (div_ant_conf
->main_lna_conf
==
1311 ATH_ANT_DIV_COMB_LNA2
)
1312 div_ant_conf
->alt_lna_conf
=
1313 ATH_ANT_DIV_COMB_LNA1
;
1315 div_ant_conf
->alt_lna_conf
=
1316 ATH_ANT_DIV_COMB_LNA2
;
1318 /* Set alt to A+B or A-B */
1319 div_ant_conf
->alt_lna_conf
=
1320 antcomb
->second_quick_scan_conf
;
1322 /* main is largest */
1323 if ((antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
) ||
1324 (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
))
1325 /* Set alt LNA1 or LNA2 */
1326 if (div_ant_conf
->main_lna_conf
==
1327 ATH_ANT_DIV_COMB_LNA2
)
1328 div_ant_conf
->alt_lna_conf
=
1329 ATH_ANT_DIV_COMB_LNA1
;
1331 div_ant_conf
->alt_lna_conf
=
1332 ATH_ANT_DIV_COMB_LNA2
;
1334 /* Set alt to A+B or A-B */
1335 div_ant_conf
->alt_lna_conf
= antcomb
->main_conf
;
1343 static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf
*ant_conf
)
1345 /* Adjust the fast_div_bias based on main and alt lna conf */
1346 switch ((ant_conf
->main_lna_conf
<< 4) | ant_conf
->alt_lna_conf
) {
1347 case (0x01): /* A-B LNA2 */
1348 ant_conf
->fast_div_bias
= 0x3b;
1350 case (0x02): /* A-B LNA1 */
1351 ant_conf
->fast_div_bias
= 0x3d;
1353 case (0x03): /* A-B A+B */
1354 ant_conf
->fast_div_bias
= 0x1;
1356 case (0x10): /* LNA2 A-B */
1357 ant_conf
->fast_div_bias
= 0x7;
1359 case (0x12): /* LNA2 LNA1 */
1360 ant_conf
->fast_div_bias
= 0x2;
1362 case (0x13): /* LNA2 A+B */
1363 ant_conf
->fast_div_bias
= 0x7;
1365 case (0x20): /* LNA1 A-B */
1366 ant_conf
->fast_div_bias
= 0x6;
1368 case (0x21): /* LNA1 LNA2 */
1369 ant_conf
->fast_div_bias
= 0x0;
1371 case (0x23): /* LNA1 A+B */
1372 ant_conf
->fast_div_bias
= 0x6;
1374 case (0x30): /* A+B A-B */
1375 ant_conf
->fast_div_bias
= 0x1;
1377 case (0x31): /* A+B LNA2 */
1378 ant_conf
->fast_div_bias
= 0x3b;
1380 case (0x32): /* A+B LNA1 */
1381 ant_conf
->fast_div_bias
= 0x3d;
1388 /* Antenna diversity and combining */
1389 static void ath_ant_comb_scan(struct ath_softc
*sc
, struct ath_rx_status
*rs
)
1391 struct ath_hw_antcomb_conf div_ant_conf
;
1392 struct ath_ant_comb
*antcomb
= &sc
->ant_comb
;
1393 int alt_ratio
= 0, alt_rssi_avg
= 0, main_rssi_avg
= 0, curr_alt_set
;
1394 int curr_main_set
, curr_bias
;
1395 int main_rssi
= rs
->rs_rssi_ctl0
;
1396 int alt_rssi
= rs
->rs_rssi_ctl1
;
1397 int rx_ant_conf
, main_ant_conf
;
1398 bool short_scan
= false;
1400 rx_ant_conf
= (rs
->rs_rssi_ctl2
>> ATH_ANT_RX_CURRENT_SHIFT
) &
1402 main_ant_conf
= (rs
->rs_rssi_ctl2
>> ATH_ANT_RX_MAIN_SHIFT
) &
1405 /* Record packet only when alt_rssi is positive */
1407 antcomb
->total_pkt_count
++;
1408 antcomb
->main_total_rssi
+= main_rssi
;
1409 antcomb
->alt_total_rssi
+= alt_rssi
;
1410 if (main_ant_conf
== rx_ant_conf
)
1411 antcomb
->main_recv_cnt
++;
1413 antcomb
->alt_recv_cnt
++;
1416 /* Short scan check */
1417 if (antcomb
->scan
&& antcomb
->alt_good
) {
1418 if (time_after(jiffies
, antcomb
->scan_start_time
+
1419 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR
)))
1422 if (antcomb
->total_pkt_count
==
1423 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT
) {
1424 alt_ratio
= ((antcomb
->alt_recv_cnt
* 100) /
1425 antcomb
->total_pkt_count
);
1426 if (alt_ratio
< ATH_ANT_DIV_COMB_ALT_ANT_RATIO
)
1431 if (((antcomb
->total_pkt_count
< ATH_ANT_DIV_COMB_MAX_PKTCOUNT
) ||
1432 rs
->rs_moreaggr
) && !short_scan
)
1435 if (antcomb
->total_pkt_count
) {
1436 alt_ratio
= ((antcomb
->alt_recv_cnt
* 100) /
1437 antcomb
->total_pkt_count
);
1438 main_rssi_avg
= (antcomb
->main_total_rssi
/
1439 antcomb
->total_pkt_count
);
1440 alt_rssi_avg
= (antcomb
->alt_total_rssi
/
1441 antcomb
->total_pkt_count
);
1445 ath9k_hw_antdiv_comb_conf_get(sc
->sc_ah
, &div_ant_conf
);
1446 curr_alt_set
= div_ant_conf
.alt_lna_conf
;
1447 curr_main_set
= div_ant_conf
.main_lna_conf
;
1448 curr_bias
= div_ant_conf
.fast_div_bias
;
1452 if (antcomb
->count
== ATH_ANT_DIV_COMB_MAX_COUNT
) {
1453 if (alt_ratio
> ATH_ANT_DIV_COMB_ALT_ANT_RATIO
) {
1454 ath_lnaconf_alt_good_scan(antcomb
, div_ant_conf
,
1456 antcomb
->alt_good
= true;
1458 antcomb
->alt_good
= false;
1462 antcomb
->scan
= true;
1463 antcomb
->scan_not_start
= true;
1466 if (!antcomb
->scan
) {
1467 if (alt_ratio
> ATH_ANT_DIV_COMB_ALT_ANT_RATIO
) {
1468 if (curr_alt_set
== ATH_ANT_DIV_COMB_LNA2
) {
1469 /* Switch main and alt LNA */
1470 div_ant_conf
.main_lna_conf
=
1471 ATH_ANT_DIV_COMB_LNA2
;
1472 div_ant_conf
.alt_lna_conf
=
1473 ATH_ANT_DIV_COMB_LNA1
;
1474 } else if (curr_alt_set
== ATH_ANT_DIV_COMB_LNA1
) {
1475 div_ant_conf
.main_lna_conf
=
1476 ATH_ANT_DIV_COMB_LNA1
;
1477 div_ant_conf
.alt_lna_conf
=
1478 ATH_ANT_DIV_COMB_LNA2
;
1482 } else if ((curr_alt_set
!= ATH_ANT_DIV_COMB_LNA1
) &&
1483 (curr_alt_set
!= ATH_ANT_DIV_COMB_LNA2
)) {
1484 /* Set alt to another LNA */
1485 if (curr_main_set
== ATH_ANT_DIV_COMB_LNA2
)
1486 div_ant_conf
.alt_lna_conf
=
1487 ATH_ANT_DIV_COMB_LNA1
;
1488 else if (curr_main_set
== ATH_ANT_DIV_COMB_LNA1
)
1489 div_ant_conf
.alt_lna_conf
=
1490 ATH_ANT_DIV_COMB_LNA2
;
1495 if ((alt_rssi_avg
< (main_rssi_avg
+
1496 ATH_ANT_DIV_COMB_LNA1_LNA2_DELTA
)))
1500 if (!antcomb
->scan_not_start
) {
1501 switch (curr_alt_set
) {
1502 case ATH_ANT_DIV_COMB_LNA2
:
1503 antcomb
->rssi_lna2
= alt_rssi_avg
;
1504 antcomb
->rssi_lna1
= main_rssi_avg
;
1505 antcomb
->scan
= true;
1507 div_ant_conf
.main_lna_conf
=
1508 ATH_ANT_DIV_COMB_LNA1
;
1509 div_ant_conf
.alt_lna_conf
=
1510 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1512 case ATH_ANT_DIV_COMB_LNA1
:
1513 antcomb
->rssi_lna1
= alt_rssi_avg
;
1514 antcomb
->rssi_lna2
= main_rssi_avg
;
1515 antcomb
->scan
= true;
1517 div_ant_conf
.main_lna_conf
= ATH_ANT_DIV_COMB_LNA2
;
1518 div_ant_conf
.alt_lna_conf
=
1519 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1521 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
:
1522 antcomb
->rssi_add
= alt_rssi_avg
;
1523 antcomb
->scan
= true;
1525 div_ant_conf
.alt_lna_conf
=
1526 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1528 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
:
1529 antcomb
->rssi_sub
= alt_rssi_avg
;
1530 antcomb
->scan
= false;
1531 if (antcomb
->rssi_lna2
>
1532 (antcomb
->rssi_lna1
+
1533 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA
)) {
1534 /* use LNA2 as main LNA */
1535 if ((antcomb
->rssi_add
> antcomb
->rssi_lna1
) &&
1536 (antcomb
->rssi_add
> antcomb
->rssi_sub
)) {
1538 div_ant_conf
.main_lna_conf
=
1539 ATH_ANT_DIV_COMB_LNA2
;
1540 div_ant_conf
.alt_lna_conf
=
1541 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1542 } else if (antcomb
->rssi_sub
>
1543 antcomb
->rssi_lna1
) {
1545 div_ant_conf
.main_lna_conf
=
1546 ATH_ANT_DIV_COMB_LNA2
;
1547 div_ant_conf
.alt_lna_conf
=
1548 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1551 div_ant_conf
.main_lna_conf
=
1552 ATH_ANT_DIV_COMB_LNA2
;
1553 div_ant_conf
.alt_lna_conf
=
1554 ATH_ANT_DIV_COMB_LNA1
;
1557 /* use LNA1 as main LNA */
1558 if ((antcomb
->rssi_add
> antcomb
->rssi_lna2
) &&
1559 (antcomb
->rssi_add
> antcomb
->rssi_sub
)) {
1561 div_ant_conf
.main_lna_conf
=
1562 ATH_ANT_DIV_COMB_LNA1
;
1563 div_ant_conf
.alt_lna_conf
=
1564 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1565 } else if (antcomb
->rssi_sub
>
1566 antcomb
->rssi_lna1
) {
1568 div_ant_conf
.main_lna_conf
=
1569 ATH_ANT_DIV_COMB_LNA1
;
1570 div_ant_conf
.alt_lna_conf
=
1571 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1574 div_ant_conf
.main_lna_conf
=
1575 ATH_ANT_DIV_COMB_LNA1
;
1576 div_ant_conf
.alt_lna_conf
=
1577 ATH_ANT_DIV_COMB_LNA2
;
1585 if (!antcomb
->alt_good
) {
1586 antcomb
->scan_not_start
= false;
1587 /* Set alt to another LNA */
1588 if (curr_main_set
== ATH_ANT_DIV_COMB_LNA2
) {
1589 div_ant_conf
.main_lna_conf
=
1590 ATH_ANT_DIV_COMB_LNA2
;
1591 div_ant_conf
.alt_lna_conf
=
1592 ATH_ANT_DIV_COMB_LNA1
;
1593 } else if (curr_main_set
== ATH_ANT_DIV_COMB_LNA1
) {
1594 div_ant_conf
.main_lna_conf
=
1595 ATH_ANT_DIV_COMB_LNA1
;
1596 div_ant_conf
.alt_lna_conf
=
1597 ATH_ANT_DIV_COMB_LNA2
;
1603 ath_select_ant_div_from_quick_scan(antcomb
, &div_ant_conf
,
1604 main_rssi_avg
, alt_rssi_avg
,
1607 antcomb
->quick_scan_cnt
++;
1610 ath_ant_div_conf_fast_divbias(&div_ant_conf
);
1612 ath9k_hw_antdiv_comb_conf_set(sc
->sc_ah
, &div_ant_conf
);
1614 antcomb
->scan_start_time
= jiffies
;
1615 antcomb
->total_pkt_count
= 0;
1616 antcomb
->main_total_rssi
= 0;
1617 antcomb
->alt_total_rssi
= 0;
1618 antcomb
->main_recv_cnt
= 0;
1619 antcomb
->alt_recv_cnt
= 0;
1622 int ath_rx_tasklet(struct ath_softc
*sc
, int flush
, bool hp
)
1625 struct sk_buff
*skb
= NULL
, *requeue_skb
;
1626 struct ieee80211_rx_status
*rxs
;
1627 struct ath_hw
*ah
= sc
->sc_ah
;
1628 struct ath_common
*common
= ath9k_hw_common(ah
);
1630 * The hw can technically differ from common->hw when using ath9k
1631 * virtual wiphy so to account for that we iterate over the active
1632 * wiphys and find the appropriate wiphy and therefore hw.
1634 struct ieee80211_hw
*hw
= NULL
;
1635 struct ieee80211_hdr
*hdr
;
1637 bool decrypt_error
= false;
1638 struct ath_rx_status rs
;
1639 enum ath9k_rx_qtype qtype
;
1640 bool edma
= !!(ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
);
1642 u8 rx_status_len
= ah
->caps
.rx_status_len
;
1645 unsigned long flags
;
1648 dma_type
= DMA_BIDIRECTIONAL
;
1650 dma_type
= DMA_FROM_DEVICE
;
1652 qtype
= hp
? ATH9K_RX_QUEUE_HP
: ATH9K_RX_QUEUE_LP
;
1653 spin_lock_bh(&sc
->rx
.rxbuflock
);
1655 tsf
= ath9k_hw_gettsf64(ah
);
1656 tsf_lower
= tsf
& 0xffffffff;
1659 /* If handling rx interrupt and flush is in progress => exit */
1660 if ((sc
->sc_flags
& SC_OP_RXFLUSH
) && (flush
== 0))
1663 memset(&rs
, 0, sizeof(rs
));
1665 bf
= ath_edma_get_next_rx_buf(sc
, &rs
, qtype
);
1667 bf
= ath_get_next_rx_buf(sc
, &rs
);
1676 hdr
= (struct ieee80211_hdr
*) (skb
->data
+ rx_status_len
);
1677 rxs
= IEEE80211_SKB_RXCB(skb
);
1679 hw
= ath_get_virt_hw(sc
, hdr
);
1681 ath_debug_stat_rx(sc
, &rs
);
1684 * If we're asked to flush receive queue, directly
1685 * chain it back at the queue without processing it.
1690 retval
= ath9k_rx_skb_preprocess(common
, hw
, hdr
, &rs
,
1691 rxs
, &decrypt_error
);
1695 rxs
->mactime
= (tsf
& ~0xffffffffULL
) | rs
.rs_tstamp
;
1696 if (rs
.rs_tstamp
> tsf_lower
&&
1697 unlikely(rs
.rs_tstamp
- tsf_lower
> 0x10000000))
1698 rxs
->mactime
-= 0x100000000ULL
;
1700 if (rs
.rs_tstamp
< tsf_lower
&&
1701 unlikely(tsf_lower
- rs
.rs_tstamp
> 0x10000000))
1702 rxs
->mactime
+= 0x100000000ULL
;
1704 /* Ensure we always have an skb to requeue once we are done
1705 * processing the current buffer's skb */
1706 requeue_skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
, GFP_ATOMIC
);
1708 /* If there is no memory we ignore the current RX'd frame,
1709 * tell hardware it can give us a new frame using the old
1710 * skb and put it at the tail of the sc->rx.rxbuf list for
1715 /* Unmap the frame */
1716 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
1720 skb_put(skb
, rs
.rs_datalen
+ ah
->caps
.rx_status_len
);
1721 if (ah
->caps
.rx_status_len
)
1722 skb_pull(skb
, ah
->caps
.rx_status_len
);
1724 ath9k_rx_skb_postprocess(common
, skb
, &rs
,
1725 rxs
, decrypt_error
);
1727 /* We will now give hardware our shiny new allocated skb */
1728 bf
->bf_mpdu
= requeue_skb
;
1729 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, requeue_skb
->data
,
1732 if (unlikely(dma_mapping_error(sc
->dev
,
1733 bf
->bf_buf_addr
))) {
1734 dev_kfree_skb_any(requeue_skb
);
1736 bf
->bf_buf_addr
= 0;
1737 ath_err(common
, "dma_mapping_error() on RX\n");
1738 ath_rx_send_to_mac80211(hw
, sc
, skb
);
1743 * change the default rx antenna if rx diversity chooses the
1744 * other antenna 3 times in a row.
1746 if (sc
->rx
.defant
!= rs
.rs_antenna
) {
1747 if (++sc
->rx
.rxotherant
>= 3)
1748 ath_setdefantenna(sc
, rs
.rs_antenna
);
1750 sc
->rx
.rxotherant
= 0;
1753 spin_lock_irqsave(&sc
->sc_pm_lock
, flags
);
1755 if ((sc
->ps_flags
& (PS_WAIT_FOR_BEACON
|
1757 PS_WAIT_FOR_PSPOLL_DATA
)) ||
1758 unlikely(ath9k_check_auto_sleep(sc
)))
1760 spin_unlock_irqrestore(&sc
->sc_pm_lock
, flags
);
1762 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_ANT_DIV_COMB
)
1763 ath_ant_comb_scan(sc
, &rs
);
1765 ath_rx_send_to_mac80211(hw
, sc
, skb
);
1769 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
1770 ath_rx_edma_buf_link(sc
, qtype
);
1772 list_move_tail(&bf
->list
, &sc
->rx
.rxbuf
);
1773 ath_rx_buf_link(sc
, bf
);
1777 spin_unlock_bh(&sc
->rx
.rxbuflock
);