2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include "ar9003_mac.h"
20 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
22 static inline bool ath_is_alt_ant_ratio_better(int alt_ratio
, int maxdelta
,
23 int mindelta
, int main_rssi_avg
,
24 int alt_rssi_avg
, int pkt_count
)
26 return (((alt_ratio
>= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2
) &&
27 (alt_rssi_avg
> main_rssi_avg
+ maxdelta
)) ||
28 (alt_rssi_avg
> main_rssi_avg
+ mindelta
)) && (pkt_count
> 50);
31 static inline bool ath9k_check_auto_sleep(struct ath_softc
*sc
)
33 return sc
->ps_enabled
&&
34 (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_AUTOSLEEP
);
37 static struct ieee80211_hw
* ath_get_virt_hw(struct ath_softc
*sc
,
38 struct ieee80211_hdr
*hdr
)
40 struct ieee80211_hw
*hw
= sc
->pri_wiphy
->hw
;
43 spin_lock_bh(&sc
->wiphy_lock
);
44 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
45 struct ath_wiphy
*aphy
= sc
->sec_wiphy
[i
];
48 if (compare_ether_addr(hdr
->addr1
, aphy
->hw
->wiphy
->perm_addr
)
54 spin_unlock_bh(&sc
->wiphy_lock
);
59 * Setup and link descriptors.
61 * 11N: we can no longer afford to self link the last descriptor.
62 * MAC acknowledges BA status as long as it copies frames to host
63 * buffer (or rx fifo). This can incorrectly acknowledge packets
64 * to a sender if last desc is self-linked.
66 static void ath_rx_buf_link(struct ath_softc
*sc
, struct ath_buf
*bf
)
68 struct ath_hw
*ah
= sc
->sc_ah
;
69 struct ath_common
*common
= ath9k_hw_common(ah
);
76 ds
->ds_link
= 0; /* link to null */
77 ds
->ds_data
= bf
->bf_buf_addr
;
79 /* virtual addr of the beginning of the buffer. */
82 ds
->ds_vdata
= skb
->data
;
85 * setup rx descriptors. The rx_bufsize here tells the hardware
86 * how much data it can DMA to us and that we are prepared
89 ath9k_hw_setuprxdesc(ah
, ds
,
93 if (sc
->rx
.rxlink
== NULL
)
94 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
96 *sc
->rx
.rxlink
= bf
->bf_daddr
;
98 sc
->rx
.rxlink
= &ds
->ds_link
;
102 static void ath_setdefantenna(struct ath_softc
*sc
, u32 antenna
)
104 /* XXX block beacon interrupts */
105 ath9k_hw_setantenna(sc
->sc_ah
, antenna
);
106 sc
->rx
.defant
= antenna
;
107 sc
->rx
.rxotherant
= 0;
110 static void ath_opmode_init(struct ath_softc
*sc
)
112 struct ath_hw
*ah
= sc
->sc_ah
;
113 struct ath_common
*common
= ath9k_hw_common(ah
);
117 /* configure rx filter */
118 rfilt
= ath_calcrxfilter(sc
);
119 ath9k_hw_setrxfilter(ah
, rfilt
);
121 /* configure bssid mask */
122 ath_hw_setbssidmask(common
);
124 /* configure operational mode */
125 ath9k_hw_setopmode(ah
);
127 /* calculate and install multicast filter */
128 mfilt
[0] = mfilt
[1] = ~0;
129 ath9k_hw_setmcastfilter(ah
, mfilt
[0], mfilt
[1]);
132 static bool ath_rx_edma_buf_link(struct ath_softc
*sc
,
133 enum ath9k_rx_qtype qtype
)
135 struct ath_hw
*ah
= sc
->sc_ah
;
136 struct ath_rx_edma
*rx_edma
;
140 rx_edma
= &sc
->rx
.rx_edma
[qtype
];
141 if (skb_queue_len(&rx_edma
->rx_fifo
) >= rx_edma
->rx_fifo_hwsize
)
144 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
145 list_del_init(&bf
->list
);
150 memset(skb
->data
, 0, ah
->caps
.rx_status_len
);
151 dma_sync_single_for_device(sc
->dev
, bf
->bf_buf_addr
,
152 ah
->caps
.rx_status_len
, DMA_TO_DEVICE
);
154 SKB_CB_ATHBUF(skb
) = bf
;
155 ath9k_hw_addrxbuf_edma(ah
, bf
->bf_buf_addr
, qtype
);
156 skb_queue_tail(&rx_edma
->rx_fifo
, skb
);
161 static void ath_rx_addbuffer_edma(struct ath_softc
*sc
,
162 enum ath9k_rx_qtype qtype
, int size
)
164 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
167 if (list_empty(&sc
->rx
.rxbuf
)) {
168 ath_print(common
, ATH_DBG_QUEUE
, "No free rx buf available\n");
172 while (!list_empty(&sc
->rx
.rxbuf
)) {
175 if (!ath_rx_edma_buf_link(sc
, qtype
))
183 static void ath_rx_remove_buffer(struct ath_softc
*sc
,
184 enum ath9k_rx_qtype qtype
)
187 struct ath_rx_edma
*rx_edma
;
190 rx_edma
= &sc
->rx
.rx_edma
[qtype
];
192 while ((skb
= skb_dequeue(&rx_edma
->rx_fifo
)) != NULL
) {
193 bf
= SKB_CB_ATHBUF(skb
);
195 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
199 static void ath_rx_edma_cleanup(struct ath_softc
*sc
)
203 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_LP
);
204 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_HP
);
206 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
208 dev_kfree_skb_any(bf
->bf_mpdu
);
211 INIT_LIST_HEAD(&sc
->rx
.rxbuf
);
213 kfree(sc
->rx
.rx_bufptr
);
214 sc
->rx
.rx_bufptr
= NULL
;
217 static void ath_rx_edma_init_queue(struct ath_rx_edma
*rx_edma
, int size
)
219 skb_queue_head_init(&rx_edma
->rx_fifo
);
220 skb_queue_head_init(&rx_edma
->rx_buffers
);
221 rx_edma
->rx_fifo_hwsize
= size
;
224 static int ath_rx_edma_init(struct ath_softc
*sc
, int nbufs
)
226 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
227 struct ath_hw
*ah
= sc
->sc_ah
;
234 common
->rx_bufsize
= roundup(IEEE80211_MAX_MPDU_LEN
+
235 ah
->caps
.rx_status_len
,
236 min(common
->cachelsz
, (u16
)64));
238 ath9k_hw_set_rx_bufsize(ah
, common
->rx_bufsize
-
239 ah
->caps
.rx_status_len
);
241 ath_rx_edma_init_queue(&sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_LP
],
242 ah
->caps
.rx_lp_qdepth
);
243 ath_rx_edma_init_queue(&sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_HP
],
244 ah
->caps
.rx_hp_qdepth
);
246 size
= sizeof(struct ath_buf
) * nbufs
;
247 bf
= kzalloc(size
, GFP_KERNEL
);
251 INIT_LIST_HEAD(&sc
->rx
.rxbuf
);
252 sc
->rx
.rx_bufptr
= bf
;
254 for (i
= 0; i
< nbufs
; i
++, bf
++) {
255 skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
, GFP_KERNEL
);
261 memset(skb
->data
, 0, common
->rx_bufsize
);
264 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, skb
->data
,
267 if (unlikely(dma_mapping_error(sc
->dev
,
269 dev_kfree_skb_any(skb
);
272 ath_print(common
, ATH_DBG_FATAL
,
273 "dma_mapping_error() on RX init\n");
278 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
284 ath_rx_edma_cleanup(sc
);
288 static void ath_edma_start_recv(struct ath_softc
*sc
)
290 spin_lock_bh(&sc
->rx
.rxbuflock
);
292 ath9k_hw_rxena(sc
->sc_ah
);
294 ath_rx_addbuffer_edma(sc
, ATH9K_RX_QUEUE_HP
,
295 sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_HP
].rx_fifo_hwsize
);
297 ath_rx_addbuffer_edma(sc
, ATH9K_RX_QUEUE_LP
,
298 sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_LP
].rx_fifo_hwsize
);
300 spin_unlock_bh(&sc
->rx
.rxbuflock
);
304 ath9k_hw_startpcureceive(sc
->sc_ah
, (sc
->sc_flags
& SC_OP_OFFCHANNEL
));
307 static void ath_edma_stop_recv(struct ath_softc
*sc
)
309 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_HP
);
310 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_LP
);
313 int ath_rx_init(struct ath_softc
*sc
, int nbufs
)
315 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
320 spin_lock_init(&sc
->rx
.rxflushlock
);
321 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
322 spin_lock_init(&sc
->rx
.rxbuflock
);
324 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
325 return ath_rx_edma_init(sc
, nbufs
);
327 common
->rx_bufsize
= roundup(IEEE80211_MAX_MPDU_LEN
,
328 min(common
->cachelsz
, (u16
)64));
330 ath_print(common
, ATH_DBG_CONFIG
, "cachelsz %u rxbufsize %u\n",
331 common
->cachelsz
, common
->rx_bufsize
);
333 /* Initialize rx descriptors */
335 error
= ath_descdma_setup(sc
, &sc
->rx
.rxdma
, &sc
->rx
.rxbuf
,
338 ath_print(common
, ATH_DBG_FATAL
,
339 "failed to allocate rx descriptors: %d\n",
344 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
345 skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
,
353 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, skb
->data
,
356 if (unlikely(dma_mapping_error(sc
->dev
,
358 dev_kfree_skb_any(skb
);
361 ath_print(common
, ATH_DBG_FATAL
,
362 "dma_mapping_error() on RX init\n");
367 sc
->rx
.rxlink
= NULL
;
377 void ath_rx_cleanup(struct ath_softc
*sc
)
379 struct ath_hw
*ah
= sc
->sc_ah
;
380 struct ath_common
*common
= ath9k_hw_common(ah
);
384 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
385 ath_rx_edma_cleanup(sc
);
388 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
391 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
400 if (sc
->rx
.rxdma
.dd_desc_len
!= 0)
401 ath_descdma_cleanup(sc
, &sc
->rx
.rxdma
, &sc
->rx
.rxbuf
);
406 * Calculate the receive filter according to the
407 * operating mode and state:
409 * o always accept unicast, broadcast, and multicast traffic
410 * o maintain current state of phy error reception (the hal
411 * may enable phy error frames for noise immunity work)
412 * o probe request frames are accepted only when operating in
413 * hostap, adhoc, or monitor modes
414 * o enable promiscuous mode according to the interface state
416 * - when operating in adhoc mode so the 802.11 layer creates
417 * node table entries for peers,
418 * - when operating in station mode for collecting rssi data when
419 * the station is otherwise quiet, or
420 * - when operating as a repeater so we see repeater-sta beacons
424 u32
ath_calcrxfilter(struct ath_softc
*sc
)
426 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
430 rfilt
= (ath9k_hw_getrxfilter(sc
->sc_ah
) & RX_FILTER_PRESERVE
)
431 | ATH9K_RX_FILTER_UCAST
| ATH9K_RX_FILTER_BCAST
432 | ATH9K_RX_FILTER_MCAST
;
434 if (sc
->rx
.rxfilter
& FIF_PROBE_REQ
)
435 rfilt
|= ATH9K_RX_FILTER_PROBEREQ
;
438 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
439 * mode interface or when in monitor mode. AP mode does not need this
440 * since it receives all in-BSS frames anyway.
442 if (((sc
->sc_ah
->opmode
!= NL80211_IFTYPE_AP
) &&
443 (sc
->rx
.rxfilter
& FIF_PROMISC_IN_BSS
)) ||
444 (sc
->sc_ah
->opmode
== NL80211_IFTYPE_MONITOR
))
445 rfilt
|= ATH9K_RX_FILTER_PROM
;
447 if (sc
->rx
.rxfilter
& FIF_CONTROL
)
448 rfilt
|= ATH9K_RX_FILTER_CONTROL
;
450 if ((sc
->sc_ah
->opmode
== NL80211_IFTYPE_STATION
) &&
452 !(sc
->rx
.rxfilter
& FIF_BCN_PRBRESP_PROMISC
))
453 rfilt
|= ATH9K_RX_FILTER_MYBEACON
;
455 rfilt
|= ATH9K_RX_FILTER_BEACON
;
457 if ((AR_SREV_9280_20_OR_LATER(sc
->sc_ah
) ||
458 AR_SREV_9285_12_OR_LATER(sc
->sc_ah
)) &&
459 (sc
->sc_ah
->opmode
== NL80211_IFTYPE_AP
) &&
460 (sc
->rx
.rxfilter
& FIF_PSPOLL
))
461 rfilt
|= ATH9K_RX_FILTER_PSPOLL
;
463 if (conf_is_ht(&sc
->hw
->conf
))
464 rfilt
|= ATH9K_RX_FILTER_COMP_BAR
;
466 if (sc
->sec_wiphy
|| (sc
->nvifs
> 1) ||
467 (sc
->rx
.rxfilter
& FIF_OTHER_BSS
)) {
468 /* The following may also be needed for other older chips */
469 if (sc
->sc_ah
->hw_version
.macVersion
== AR_SREV_VERSION_9160
)
470 rfilt
|= ATH9K_RX_FILTER_PROM
;
471 rfilt
|= ATH9K_RX_FILTER_MCAST_BCAST_ALL
;
476 #undef RX_FILTER_PRESERVE
479 int ath_startrecv(struct ath_softc
*sc
)
481 struct ath_hw
*ah
= sc
->sc_ah
;
482 struct ath_buf
*bf
, *tbf
;
484 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
485 ath_edma_start_recv(sc
);
489 spin_lock_bh(&sc
->rx
.rxbuflock
);
490 if (list_empty(&sc
->rx
.rxbuf
))
493 sc
->rx
.rxlink
= NULL
;
494 list_for_each_entry_safe(bf
, tbf
, &sc
->rx
.rxbuf
, list
) {
495 ath_rx_buf_link(sc
, bf
);
498 /* We could have deleted elements so the list may be empty now */
499 if (list_empty(&sc
->rx
.rxbuf
))
502 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
503 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
507 spin_unlock_bh(&sc
->rx
.rxbuflock
);
509 ath9k_hw_startpcureceive(ah
, (sc
->sc_flags
& SC_OP_OFFCHANNEL
));
514 bool ath_stoprecv(struct ath_softc
*sc
)
516 struct ath_hw
*ah
= sc
->sc_ah
;
519 spin_lock_bh(&sc
->rx
.rxbuflock
);
520 ath9k_hw_stoppcurecv(ah
);
521 ath9k_hw_setrxfilter(ah
, 0);
522 stopped
= ath9k_hw_stopdmarecv(ah
);
524 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
)
525 ath_edma_stop_recv(sc
);
527 sc
->rx
.rxlink
= NULL
;
528 spin_unlock_bh(&sc
->rx
.rxbuflock
);
533 void ath_flushrecv(struct ath_softc
*sc
)
535 spin_lock_bh(&sc
->rx
.rxflushlock
);
536 sc
->sc_flags
|= SC_OP_RXFLUSH
;
537 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
)
538 ath_rx_tasklet(sc
, 1, true);
539 ath_rx_tasklet(sc
, 1, false);
540 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
541 spin_unlock_bh(&sc
->rx
.rxflushlock
);
544 static bool ath_beacon_dtim_pending_cab(struct sk_buff
*skb
)
546 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
547 struct ieee80211_mgmt
*mgmt
;
548 u8
*pos
, *end
, id
, elen
;
549 struct ieee80211_tim_ie
*tim
;
551 mgmt
= (struct ieee80211_mgmt
*)skb
->data
;
552 pos
= mgmt
->u
.beacon
.variable
;
553 end
= skb
->data
+ skb
->len
;
555 while (pos
+ 2 < end
) {
558 if (pos
+ elen
> end
)
561 if (id
== WLAN_EID_TIM
) {
562 if (elen
< sizeof(*tim
))
564 tim
= (struct ieee80211_tim_ie
*) pos
;
565 if (tim
->dtim_count
!= 0)
567 return tim
->bitmap_ctrl
& 0x01;
576 static void ath_rx_ps_beacon(struct ath_softc
*sc
, struct sk_buff
*skb
)
578 struct ieee80211_mgmt
*mgmt
;
579 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
581 if (skb
->len
< 24 + 8 + 2 + 2)
584 mgmt
= (struct ieee80211_mgmt
*)skb
->data
;
585 if (memcmp(common
->curbssid
, mgmt
->bssid
, ETH_ALEN
) != 0)
586 return; /* not from our current AP */
588 sc
->ps_flags
&= ~PS_WAIT_FOR_BEACON
;
590 if (sc
->ps_flags
& PS_BEACON_SYNC
) {
591 sc
->ps_flags
&= ~PS_BEACON_SYNC
;
592 ath_print(common
, ATH_DBG_PS
,
593 "Reconfigure Beacon timers based on "
594 "timestamp from the AP\n");
595 ath_beacon_config(sc
, NULL
);
598 if (ath_beacon_dtim_pending_cab(skb
)) {
600 * Remain awake waiting for buffered broadcast/multicast
601 * frames. If the last broadcast/multicast frame is not
602 * received properly, the next beacon frame will work as
603 * a backup trigger for returning into NETWORK SLEEP state,
604 * so we are waiting for it as well.
606 ath_print(common
, ATH_DBG_PS
, "Received DTIM beacon indicating "
607 "buffered broadcast/multicast frame(s)\n");
608 sc
->ps_flags
|= PS_WAIT_FOR_CAB
| PS_WAIT_FOR_BEACON
;
612 if (sc
->ps_flags
& PS_WAIT_FOR_CAB
) {
614 * This can happen if a broadcast frame is dropped or the AP
615 * fails to send a frame indicating that all CAB frames have
618 sc
->ps_flags
&= ~PS_WAIT_FOR_CAB
;
619 ath_print(common
, ATH_DBG_PS
,
620 "PS wait for CAB frames timed out\n");
624 static void ath_rx_ps(struct ath_softc
*sc
, struct sk_buff
*skb
)
626 struct ieee80211_hdr
*hdr
;
627 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
629 hdr
= (struct ieee80211_hdr
*)skb
->data
;
631 /* Process Beacon and CAB receive in PS state */
632 if (((sc
->ps_flags
& PS_WAIT_FOR_BEACON
) || ath9k_check_auto_sleep(sc
))
633 && ieee80211_is_beacon(hdr
->frame_control
))
634 ath_rx_ps_beacon(sc
, skb
);
635 else if ((sc
->ps_flags
& PS_WAIT_FOR_CAB
) &&
636 (ieee80211_is_data(hdr
->frame_control
) ||
637 ieee80211_is_action(hdr
->frame_control
)) &&
638 is_multicast_ether_addr(hdr
->addr1
) &&
639 !ieee80211_has_moredata(hdr
->frame_control
)) {
641 * No more broadcast/multicast frames to be received at this
644 sc
->ps_flags
&= ~(PS_WAIT_FOR_CAB
| PS_WAIT_FOR_BEACON
);
645 ath_print(common
, ATH_DBG_PS
,
646 "All PS CAB frames received, back to sleep\n");
647 } else if ((sc
->ps_flags
& PS_WAIT_FOR_PSPOLL_DATA
) &&
648 !is_multicast_ether_addr(hdr
->addr1
) &&
649 !ieee80211_has_morefrags(hdr
->frame_control
)) {
650 sc
->ps_flags
&= ~PS_WAIT_FOR_PSPOLL_DATA
;
651 ath_print(common
, ATH_DBG_PS
,
652 "Going back to sleep after having received "
653 "PS-Poll data (0x%lx)\n",
654 sc
->ps_flags
& (PS_WAIT_FOR_BEACON
|
656 PS_WAIT_FOR_PSPOLL_DATA
|
657 PS_WAIT_FOR_TX_ACK
));
661 static void ath_rx_send_to_mac80211(struct ieee80211_hw
*hw
,
662 struct ath_softc
*sc
, struct sk_buff
*skb
,
663 struct ieee80211_rx_status
*rxs
)
665 struct ieee80211_hdr
*hdr
;
667 hdr
= (struct ieee80211_hdr
*)skb
->data
;
669 /* Send the frame to mac80211 */
670 if (is_multicast_ether_addr(hdr
->addr1
)) {
673 * Deliver broadcast/multicast frames to all suitable
676 /* TODO: filter based on channel configuration */
677 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
678 struct ath_wiphy
*aphy
= sc
->sec_wiphy
[i
];
679 struct sk_buff
*nskb
;
682 nskb
= skb_copy(skb
, GFP_ATOMIC
);
685 ieee80211_rx(aphy
->hw
, nskb
);
687 ieee80211_rx(sc
->hw
, skb
);
689 /* Deliver unicast frames based on receiver address */
690 ieee80211_rx(hw
, skb
);
693 static bool ath_edma_get_buffers(struct ath_softc
*sc
,
694 enum ath9k_rx_qtype qtype
)
696 struct ath_rx_edma
*rx_edma
= &sc
->rx
.rx_edma
[qtype
];
697 struct ath_hw
*ah
= sc
->sc_ah
;
698 struct ath_common
*common
= ath9k_hw_common(ah
);
703 skb
= skb_peek(&rx_edma
->rx_fifo
);
707 bf
= SKB_CB_ATHBUF(skb
);
710 dma_sync_single_for_cpu(sc
->dev
, bf
->bf_buf_addr
,
711 common
->rx_bufsize
, DMA_FROM_DEVICE
);
713 ret
= ath9k_hw_process_rxdesc_edma(ah
, NULL
, skb
->data
);
714 if (ret
== -EINPROGRESS
) {
715 /*let device gain the buffer again*/
716 dma_sync_single_for_device(sc
->dev
, bf
->bf_buf_addr
,
717 common
->rx_bufsize
, DMA_FROM_DEVICE
);
721 __skb_unlink(skb
, &rx_edma
->rx_fifo
);
722 if (ret
== -EINVAL
) {
723 /* corrupt descriptor, skip this one and the following one */
724 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
725 ath_rx_edma_buf_link(sc
, qtype
);
726 skb
= skb_peek(&rx_edma
->rx_fifo
);
730 bf
= SKB_CB_ATHBUF(skb
);
733 __skb_unlink(skb
, &rx_edma
->rx_fifo
);
734 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
735 ath_rx_edma_buf_link(sc
, qtype
);
738 skb_queue_tail(&rx_edma
->rx_buffers
, skb
);
743 static struct ath_buf
*ath_edma_get_next_rx_buf(struct ath_softc
*sc
,
744 struct ath_rx_status
*rs
,
745 enum ath9k_rx_qtype qtype
)
747 struct ath_rx_edma
*rx_edma
= &sc
->rx
.rx_edma
[qtype
];
751 while (ath_edma_get_buffers(sc
, qtype
));
752 skb
= __skb_dequeue(&rx_edma
->rx_buffers
);
756 bf
= SKB_CB_ATHBUF(skb
);
757 ath9k_hw_process_rxdesc_edma(sc
->sc_ah
, rs
, skb
->data
);
761 static struct ath_buf
*ath_get_next_rx_buf(struct ath_softc
*sc
,
762 struct ath_rx_status
*rs
)
764 struct ath_hw
*ah
= sc
->sc_ah
;
765 struct ath_common
*common
= ath9k_hw_common(ah
);
770 if (list_empty(&sc
->rx
.rxbuf
)) {
771 sc
->rx
.rxlink
= NULL
;
775 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
779 * Must provide the virtual address of the current
780 * descriptor, the physical address, and the virtual
781 * address of the next descriptor in the h/w chain.
782 * This allows the HAL to look ahead to see if the
783 * hardware is done with a descriptor by checking the
784 * done bit in the following descriptor and the address
785 * of the current descriptor the DMA engine is working
786 * on. All this is necessary because of our use of
787 * a self-linked list to avoid rx overruns.
789 ret
= ath9k_hw_rxprocdesc(ah
, ds
, rs
, 0);
790 if (ret
== -EINPROGRESS
) {
791 struct ath_rx_status trs
;
793 struct ath_desc
*tds
;
795 memset(&trs
, 0, sizeof(trs
));
796 if (list_is_last(&bf
->list
, &sc
->rx
.rxbuf
)) {
797 sc
->rx
.rxlink
= NULL
;
801 tbf
= list_entry(bf
->list
.next
, struct ath_buf
, list
);
804 * On some hardware the descriptor status words could
805 * get corrupted, including the done bit. Because of
806 * this, check if the next descriptor's done bit is
809 * If the next descriptor's done bit is set, the current
810 * descriptor has been corrupted. Force s/w to discard
811 * this descriptor and continue...
815 ret
= ath9k_hw_rxprocdesc(ah
, tds
, &trs
, 0);
816 if (ret
== -EINPROGRESS
)
824 * Synchronize the DMA transfer with CPU before
825 * 1. accessing the frame
826 * 2. requeueing the same buffer to h/w
828 dma_sync_single_for_cpu(sc
->dev
, bf
->bf_buf_addr
,
835 /* Assumes you've already done the endian to CPU conversion */
836 static bool ath9k_rx_accept(struct ath_common
*common
,
837 struct ieee80211_hdr
*hdr
,
838 struct ieee80211_rx_status
*rxs
,
839 struct ath_rx_status
*rx_stats
,
842 struct ath_hw
*ah
= common
->ah
;
844 u8 rx_status_len
= ah
->caps
.rx_status_len
;
846 fc
= hdr
->frame_control
;
848 if (!rx_stats
->rs_datalen
)
851 * rs_status follows rs_datalen so if rs_datalen is too large
852 * we can take a hint that hardware corrupted it, so ignore
855 if (rx_stats
->rs_datalen
> (common
->rx_bufsize
- rx_status_len
))
859 * rs_more indicates chained descriptors which can be used
860 * to link buffers together for a sort of scatter-gather
862 * reject the frame, we don't support scatter-gather yet and
863 * the frame is probably corrupt anyway
865 if (rx_stats
->rs_more
)
869 * The rx_stats->rs_status will not be set until the end of the
870 * chained descriptors so it can be ignored if rs_more is set. The
871 * rs_more will be false at the last element of the chained
874 if (rx_stats
->rs_status
!= 0) {
875 if (rx_stats
->rs_status
& ATH9K_RXERR_CRC
)
876 rxs
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
877 if (rx_stats
->rs_status
& ATH9K_RXERR_PHY
)
880 if (rx_stats
->rs_status
& ATH9K_RXERR_DECRYPT
) {
881 *decrypt_error
= true;
882 } else if (rx_stats
->rs_status
& ATH9K_RXERR_MIC
) {
884 * The MIC error bit is only valid if the frame
885 * is not a control frame or fragment, and it was
886 * decrypted using a valid TKIP key.
888 if (!ieee80211_is_ctl(fc
) &&
889 !ieee80211_has_morefrags(fc
) &&
890 !(le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_FRAG
) &&
891 test_bit(rx_stats
->rs_keyix
, common
->tkip_keymap
))
892 rxs
->flag
|= RX_FLAG_MMIC_ERROR
;
894 rx_stats
->rs_status
&= ~ATH9K_RXERR_MIC
;
897 * Reject error frames with the exception of
898 * decryption and MIC failures. For monitor mode,
899 * we also ignore the CRC error.
901 if (ah
->opmode
== NL80211_IFTYPE_MONITOR
) {
902 if (rx_stats
->rs_status
&
903 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
|
907 if (rx_stats
->rs_status
&
908 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
)) {
916 static int ath9k_process_rate(struct ath_common
*common
,
917 struct ieee80211_hw
*hw
,
918 struct ath_rx_status
*rx_stats
,
919 struct ieee80211_rx_status
*rxs
)
921 struct ieee80211_supported_band
*sband
;
922 enum ieee80211_band band
;
925 band
= hw
->conf
.channel
->band
;
926 sband
= hw
->wiphy
->bands
[band
];
928 if (rx_stats
->rs_rate
& 0x80) {
930 rxs
->flag
|= RX_FLAG_HT
;
931 if (rx_stats
->rs_flags
& ATH9K_RX_2040
)
932 rxs
->flag
|= RX_FLAG_40MHZ
;
933 if (rx_stats
->rs_flags
& ATH9K_RX_GI
)
934 rxs
->flag
|= RX_FLAG_SHORT_GI
;
935 rxs
->rate_idx
= rx_stats
->rs_rate
& 0x7f;
939 for (i
= 0; i
< sband
->n_bitrates
; i
++) {
940 if (sband
->bitrates
[i
].hw_value
== rx_stats
->rs_rate
) {
944 if (sband
->bitrates
[i
].hw_value_short
== rx_stats
->rs_rate
) {
945 rxs
->flag
|= RX_FLAG_SHORTPRE
;
952 * No valid hardware bitrate found -- we should not get here
953 * because hardware has already validated this frame as OK.
955 ath_print(common
, ATH_DBG_XMIT
, "unsupported hw bitrate detected "
956 "0x%02x using 1 Mbit\n", rx_stats
->rs_rate
);
961 static void ath9k_process_rssi(struct ath_common
*common
,
962 struct ieee80211_hw
*hw
,
963 struct ieee80211_hdr
*hdr
,
964 struct ath_rx_status
*rx_stats
)
966 struct ath_hw
*ah
= common
->ah
;
967 struct ieee80211_sta
*sta
;
969 int last_rssi
= ATH_RSSI_DUMMY_MARKER
;
972 fc
= hdr
->frame_control
;
976 * XXX: use ieee80211_find_sta! This requires quite a bit of work
977 * under the current ath9k virtual wiphy implementation as we have
978 * no way of tying a vif to wiphy. Typically vifs are attached to
979 * at least one sdata of a wiphy on mac80211 but with ath9k virtual
980 * wiphy you'd have to iterate over every wiphy and each sdata.
982 if (is_multicast_ether_addr(hdr
->addr1
))
983 sta
= ieee80211_find_sta_by_ifaddr(hw
, hdr
->addr2
, NULL
);
985 sta
= ieee80211_find_sta_by_ifaddr(hw
, hdr
->addr2
, hdr
->addr1
);
988 an
= (struct ath_node
*) sta
->drv_priv
;
989 if (rx_stats
->rs_rssi
!= ATH9K_RSSI_BAD
&&
990 !rx_stats
->rs_moreaggr
)
991 ATH_RSSI_LPF(an
->last_rssi
, rx_stats
->rs_rssi
);
992 last_rssi
= an
->last_rssi
;
996 if (likely(last_rssi
!= ATH_RSSI_DUMMY_MARKER
))
997 rx_stats
->rs_rssi
= ATH_EP_RND(last_rssi
,
998 ATH_RSSI_EP_MULTIPLIER
);
999 if (rx_stats
->rs_rssi
< 0)
1000 rx_stats
->rs_rssi
= 0;
1002 /* Update Beacon RSSI, this is used by ANI. */
1003 if (ieee80211_is_beacon(fc
))
1004 ah
->stats
.avgbrssi
= rx_stats
->rs_rssi
;
1008 * For Decrypt or Demic errors, we only mark packet status here and always push
1009 * up the frame up to let mac80211 handle the actual error case, be it no
1010 * decryption key or real decryption error. This let us keep statistics there.
1012 static int ath9k_rx_skb_preprocess(struct ath_common
*common
,
1013 struct ieee80211_hw
*hw
,
1014 struct ieee80211_hdr
*hdr
,
1015 struct ath_rx_status
*rx_stats
,
1016 struct ieee80211_rx_status
*rx_status
,
1017 bool *decrypt_error
)
1019 memset(rx_status
, 0, sizeof(struct ieee80211_rx_status
));
1022 * everything but the rate is checked here, the rate check is done
1023 * separately to avoid doing two lookups for a rate for each frame.
1025 if (!ath9k_rx_accept(common
, hdr
, rx_status
, rx_stats
, decrypt_error
))
1028 ath9k_process_rssi(common
, hw
, hdr
, rx_stats
);
1030 if (ath9k_process_rate(common
, hw
, rx_stats
, rx_status
))
1033 rx_status
->band
= hw
->conf
.channel
->band
;
1034 rx_status
->freq
= hw
->conf
.channel
->center_freq
;
1035 rx_status
->signal
= ATH_DEFAULT_NOISE_FLOOR
+ rx_stats
->rs_rssi
;
1036 rx_status
->antenna
= rx_stats
->rs_antenna
;
1037 rx_status
->flag
|= RX_FLAG_TSFT
;
1042 static void ath9k_rx_skb_postprocess(struct ath_common
*common
,
1043 struct sk_buff
*skb
,
1044 struct ath_rx_status
*rx_stats
,
1045 struct ieee80211_rx_status
*rxs
,
1048 struct ath_hw
*ah
= common
->ah
;
1049 struct ieee80211_hdr
*hdr
;
1050 int hdrlen
, padpos
, padsize
;
1054 /* see if any padding is done by the hw and remove it */
1055 hdr
= (struct ieee80211_hdr
*) skb
->data
;
1056 hdrlen
= ieee80211_get_hdrlen_from_skb(skb
);
1057 fc
= hdr
->frame_control
;
1058 padpos
= ath9k_cmn_padpos(hdr
->frame_control
);
1060 /* The MAC header is padded to have 32-bit boundary if the
1061 * packet payload is non-zero. The general calculation for
1062 * padsize would take into account odd header lengths:
1063 * padsize = (4 - padpos % 4) % 4; However, since only
1064 * even-length headers are used, padding can only be 0 or 2
1065 * bytes and we can optimize this a bit. In addition, we must
1066 * not try to remove padding from short control frames that do
1067 * not have payload. */
1068 padsize
= padpos
& 3;
1069 if (padsize
&& skb
->len
>=padpos
+padsize
+FCS_LEN
) {
1070 memmove(skb
->data
+ padsize
, skb
->data
, padpos
);
1071 skb_pull(skb
, padsize
);
1074 keyix
= rx_stats
->rs_keyix
;
1076 if (!(keyix
== ATH9K_RXKEYIX_INVALID
) && !decrypt_error
&&
1077 ieee80211_has_protected(fc
)) {
1078 rxs
->flag
|= RX_FLAG_DECRYPTED
;
1079 } else if (ieee80211_has_protected(fc
)
1080 && !decrypt_error
&& skb
->len
>= hdrlen
+ 4) {
1081 keyix
= skb
->data
[hdrlen
+ 3] >> 6;
1083 if (test_bit(keyix
, common
->keymap
))
1084 rxs
->flag
|= RX_FLAG_DECRYPTED
;
1086 if (ah
->sw_mgmt_crypto
&&
1087 (rxs
->flag
& RX_FLAG_DECRYPTED
) &&
1088 ieee80211_is_mgmt(fc
))
1089 /* Use software decrypt for management frames. */
1090 rxs
->flag
&= ~RX_FLAG_DECRYPTED
;
1093 static void ath_lnaconf_alt_good_scan(struct ath_ant_comb
*antcomb
,
1094 struct ath_hw_antcomb_conf ant_conf
,
1097 antcomb
->quick_scan_cnt
= 0;
1099 if (ant_conf
.main_lna_conf
== ATH_ANT_DIV_COMB_LNA2
)
1100 antcomb
->rssi_lna2
= main_rssi_avg
;
1101 else if (ant_conf
.main_lna_conf
== ATH_ANT_DIV_COMB_LNA1
)
1102 antcomb
->rssi_lna1
= main_rssi_avg
;
1104 switch ((ant_conf
.main_lna_conf
<< 4) | ant_conf
.alt_lna_conf
) {
1105 case (0x10): /* LNA2 A-B */
1106 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1107 antcomb
->first_quick_scan_conf
=
1108 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1109 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA1
;
1111 case (0x20): /* LNA1 A-B */
1112 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1113 antcomb
->first_quick_scan_conf
=
1114 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1115 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA2
;
1117 case (0x21): /* LNA1 LNA2 */
1118 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA2
;
1119 antcomb
->first_quick_scan_conf
=
1120 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1121 antcomb
->second_quick_scan_conf
=
1122 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1124 case (0x12): /* LNA2 LNA1 */
1125 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1
;
1126 antcomb
->first_quick_scan_conf
=
1127 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1128 antcomb
->second_quick_scan_conf
=
1129 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1131 case (0x13): /* LNA2 A+B */
1132 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1133 antcomb
->first_quick_scan_conf
=
1134 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1135 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA1
;
1137 case (0x23): /* LNA1 A+B */
1138 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1139 antcomb
->first_quick_scan_conf
=
1140 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1141 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA2
;
1148 static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb
*antcomb
,
1149 struct ath_hw_antcomb_conf
*div_ant_conf
,
1150 int main_rssi_avg
, int alt_rssi_avg
,
1154 switch (antcomb
->quick_scan_cnt
) {
1156 /* set alt to main, and alt to first conf */
1157 div_ant_conf
->main_lna_conf
= antcomb
->main_conf
;
1158 div_ant_conf
->alt_lna_conf
= antcomb
->first_quick_scan_conf
;
1161 /* set alt to main, and alt to first conf */
1162 div_ant_conf
->main_lna_conf
= antcomb
->main_conf
;
1163 div_ant_conf
->alt_lna_conf
= antcomb
->second_quick_scan_conf
;
1164 antcomb
->rssi_first
= main_rssi_avg
;
1165 antcomb
->rssi_second
= alt_rssi_avg
;
1167 if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
) {
1169 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1170 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
,
1171 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1172 main_rssi_avg
, alt_rssi_avg
,
1173 antcomb
->total_pkt_count
))
1174 antcomb
->first_ratio
= true;
1176 antcomb
->first_ratio
= false;
1177 } else if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
) {
1178 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1179 ATH_ANT_DIV_COMB_LNA1_DELTA_MID
,
1180 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1181 main_rssi_avg
, alt_rssi_avg
,
1182 antcomb
->total_pkt_count
))
1183 antcomb
->first_ratio
= true;
1185 antcomb
->first_ratio
= false;
1187 if ((((alt_ratio
>= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2
) &&
1188 (alt_rssi_avg
> main_rssi_avg
+
1189 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
)) ||
1190 (alt_rssi_avg
> main_rssi_avg
)) &&
1191 (antcomb
->total_pkt_count
> 50))
1192 antcomb
->first_ratio
= true;
1194 antcomb
->first_ratio
= false;
1198 antcomb
->alt_good
= false;
1199 antcomb
->scan_not_start
= false;
1200 antcomb
->scan
= false;
1201 antcomb
->rssi_first
= main_rssi_avg
;
1202 antcomb
->rssi_third
= alt_rssi_avg
;
1204 if (antcomb
->second_quick_scan_conf
== ATH_ANT_DIV_COMB_LNA1
)
1205 antcomb
->rssi_lna1
= alt_rssi_avg
;
1206 else if (antcomb
->second_quick_scan_conf
==
1207 ATH_ANT_DIV_COMB_LNA2
)
1208 antcomb
->rssi_lna2
= alt_rssi_avg
;
1209 else if (antcomb
->second_quick_scan_conf
==
1210 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
) {
1211 if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
)
1212 antcomb
->rssi_lna2
= main_rssi_avg
;
1213 else if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
)
1214 antcomb
->rssi_lna1
= main_rssi_avg
;
1217 if (antcomb
->rssi_lna2
> antcomb
->rssi_lna1
+
1218 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA
)
1219 div_ant_conf
->main_lna_conf
= ATH_ANT_DIV_COMB_LNA2
;
1221 div_ant_conf
->main_lna_conf
= ATH_ANT_DIV_COMB_LNA1
;
1223 if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
) {
1224 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1225 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
,
1226 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1227 main_rssi_avg
, alt_rssi_avg
,
1228 antcomb
->total_pkt_count
))
1229 antcomb
->second_ratio
= true;
1231 antcomb
->second_ratio
= false;
1232 } else if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
) {
1233 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1234 ATH_ANT_DIV_COMB_LNA1_DELTA_MID
,
1235 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1236 main_rssi_avg
, alt_rssi_avg
,
1237 antcomb
->total_pkt_count
))
1238 antcomb
->second_ratio
= true;
1240 antcomb
->second_ratio
= false;
1242 if ((((alt_ratio
>= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2
) &&
1243 (alt_rssi_avg
> main_rssi_avg
+
1244 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
)) ||
1245 (alt_rssi_avg
> main_rssi_avg
)) &&
1246 (antcomb
->total_pkt_count
> 50))
1247 antcomb
->second_ratio
= true;
1249 antcomb
->second_ratio
= false;
1252 /* set alt to the conf with maximun ratio */
1253 if (antcomb
->first_ratio
&& antcomb
->second_ratio
) {
1254 if (antcomb
->rssi_second
> antcomb
->rssi_third
) {
1256 if ((antcomb
->first_quick_scan_conf
==
1257 ATH_ANT_DIV_COMB_LNA1
) ||
1258 (antcomb
->first_quick_scan_conf
==
1259 ATH_ANT_DIV_COMB_LNA2
))
1260 /* Set alt LNA1 or LNA2*/
1261 if (div_ant_conf
->main_lna_conf
==
1262 ATH_ANT_DIV_COMB_LNA2
)
1263 div_ant_conf
->alt_lna_conf
=
1264 ATH_ANT_DIV_COMB_LNA1
;
1266 div_ant_conf
->alt_lna_conf
=
1267 ATH_ANT_DIV_COMB_LNA2
;
1269 /* Set alt to A+B or A-B */
1270 div_ant_conf
->alt_lna_conf
=
1271 antcomb
->first_quick_scan_conf
;
1272 } else if ((antcomb
->second_quick_scan_conf
==
1273 ATH_ANT_DIV_COMB_LNA1
) ||
1274 (antcomb
->second_quick_scan_conf
==
1275 ATH_ANT_DIV_COMB_LNA2
)) {
1276 /* Set alt LNA1 or LNA2 */
1277 if (div_ant_conf
->main_lna_conf
==
1278 ATH_ANT_DIV_COMB_LNA2
)
1279 div_ant_conf
->alt_lna_conf
=
1280 ATH_ANT_DIV_COMB_LNA1
;
1282 div_ant_conf
->alt_lna_conf
=
1283 ATH_ANT_DIV_COMB_LNA2
;
1285 /* Set alt to A+B or A-B */
1286 div_ant_conf
->alt_lna_conf
=
1287 antcomb
->second_quick_scan_conf
;
1289 } else if (antcomb
->first_ratio
) {
1291 if ((antcomb
->first_quick_scan_conf
==
1292 ATH_ANT_DIV_COMB_LNA1
) ||
1293 (antcomb
->first_quick_scan_conf
==
1294 ATH_ANT_DIV_COMB_LNA2
))
1295 /* Set alt LNA1 or LNA2 */
1296 if (div_ant_conf
->main_lna_conf
==
1297 ATH_ANT_DIV_COMB_LNA2
)
1298 div_ant_conf
->alt_lna_conf
=
1299 ATH_ANT_DIV_COMB_LNA1
;
1301 div_ant_conf
->alt_lna_conf
=
1302 ATH_ANT_DIV_COMB_LNA2
;
1304 /* Set alt to A+B or A-B */
1305 div_ant_conf
->alt_lna_conf
=
1306 antcomb
->first_quick_scan_conf
;
1307 } else if (antcomb
->second_ratio
) {
1309 if ((antcomb
->second_quick_scan_conf
==
1310 ATH_ANT_DIV_COMB_LNA1
) ||
1311 (antcomb
->second_quick_scan_conf
==
1312 ATH_ANT_DIV_COMB_LNA2
))
1313 /* Set alt LNA1 or LNA2 */
1314 if (div_ant_conf
->main_lna_conf
==
1315 ATH_ANT_DIV_COMB_LNA2
)
1316 div_ant_conf
->alt_lna_conf
=
1317 ATH_ANT_DIV_COMB_LNA1
;
1319 div_ant_conf
->alt_lna_conf
=
1320 ATH_ANT_DIV_COMB_LNA2
;
1322 /* Set alt to A+B or A-B */
1323 div_ant_conf
->alt_lna_conf
=
1324 antcomb
->second_quick_scan_conf
;
1326 /* main is largest */
1327 if ((antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
) ||
1328 (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
))
1329 /* Set alt LNA1 or LNA2 */
1330 if (div_ant_conf
->main_lna_conf
==
1331 ATH_ANT_DIV_COMB_LNA2
)
1332 div_ant_conf
->alt_lna_conf
=
1333 ATH_ANT_DIV_COMB_LNA1
;
1335 div_ant_conf
->alt_lna_conf
=
1336 ATH_ANT_DIV_COMB_LNA2
;
1338 /* Set alt to A+B or A-B */
1339 div_ant_conf
->alt_lna_conf
= antcomb
->main_conf
;
1347 static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf
*ant_conf
)
1349 /* Adjust the fast_div_bias based on main and alt lna conf */
1350 switch ((ant_conf
->main_lna_conf
<< 4) | ant_conf
->alt_lna_conf
) {
1351 case (0x01): /* A-B LNA2 */
1352 ant_conf
->fast_div_bias
= 0x3b;
1354 case (0x02): /* A-B LNA1 */
1355 ant_conf
->fast_div_bias
= 0x3d;
1357 case (0x03): /* A-B A+B */
1358 ant_conf
->fast_div_bias
= 0x1;
1360 case (0x10): /* LNA2 A-B */
1361 ant_conf
->fast_div_bias
= 0x7;
1363 case (0x12): /* LNA2 LNA1 */
1364 ant_conf
->fast_div_bias
= 0x2;
1366 case (0x13): /* LNA2 A+B */
1367 ant_conf
->fast_div_bias
= 0x7;
1369 case (0x20): /* LNA1 A-B */
1370 ant_conf
->fast_div_bias
= 0x6;
1372 case (0x21): /* LNA1 LNA2 */
1373 ant_conf
->fast_div_bias
= 0x0;
1375 case (0x23): /* LNA1 A+B */
1376 ant_conf
->fast_div_bias
= 0x6;
1378 case (0x30): /* A+B A-B */
1379 ant_conf
->fast_div_bias
= 0x1;
1381 case (0x31): /* A+B LNA2 */
1382 ant_conf
->fast_div_bias
= 0x3b;
1384 case (0x32): /* A+B LNA1 */
1385 ant_conf
->fast_div_bias
= 0x3d;
1392 /* Antenna diversity and combining */
1393 static void ath_ant_comb_scan(struct ath_softc
*sc
, struct ath_rx_status
*rs
)
1395 struct ath_hw_antcomb_conf div_ant_conf
;
1396 struct ath_ant_comb
*antcomb
= &sc
->ant_comb
;
1397 int alt_ratio
= 0, alt_rssi_avg
= 0, main_rssi_avg
= 0, curr_alt_set
;
1398 int curr_main_set
, curr_bias
;
1399 int main_rssi
= rs
->rs_rssi_ctl0
;
1400 int alt_rssi
= rs
->rs_rssi_ctl1
;
1401 int rx_ant_conf
, main_ant_conf
;
1402 bool short_scan
= false;
1404 rx_ant_conf
= (rs
->rs_rssi_ctl2
>> ATH_ANT_RX_CURRENT_SHIFT
) &
1406 main_ant_conf
= (rs
->rs_rssi_ctl2
>> ATH_ANT_RX_MAIN_SHIFT
) &
1409 /* Record packet only when alt_rssi is positive */
1411 antcomb
->total_pkt_count
++;
1412 antcomb
->main_total_rssi
+= main_rssi
;
1413 antcomb
->alt_total_rssi
+= alt_rssi
;
1414 if (main_ant_conf
== rx_ant_conf
)
1415 antcomb
->main_recv_cnt
++;
1417 antcomb
->alt_recv_cnt
++;
1420 /* Short scan check */
1421 if (antcomb
->scan
&& antcomb
->alt_good
) {
1422 if (time_after(jiffies
, antcomb
->scan_start_time
+
1423 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR
)))
1426 if (antcomb
->total_pkt_count
==
1427 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT
) {
1428 alt_ratio
= ((antcomb
->alt_recv_cnt
* 100) /
1429 antcomb
->total_pkt_count
);
1430 if (alt_ratio
< ATH_ANT_DIV_COMB_ALT_ANT_RATIO
)
1435 if (((antcomb
->total_pkt_count
< ATH_ANT_DIV_COMB_MAX_PKTCOUNT
) ||
1436 rs
->rs_moreaggr
) && !short_scan
)
1439 if (antcomb
->total_pkt_count
) {
1440 alt_ratio
= ((antcomb
->alt_recv_cnt
* 100) /
1441 antcomb
->total_pkt_count
);
1442 main_rssi_avg
= (antcomb
->main_total_rssi
/
1443 antcomb
->total_pkt_count
);
1444 alt_rssi_avg
= (antcomb
->alt_total_rssi
/
1445 antcomb
->total_pkt_count
);
1449 ath9k_hw_antdiv_comb_conf_get(sc
->sc_ah
, &div_ant_conf
);
1450 curr_alt_set
= div_ant_conf
.alt_lna_conf
;
1451 curr_main_set
= div_ant_conf
.main_lna_conf
;
1452 curr_bias
= div_ant_conf
.fast_div_bias
;
1456 if (antcomb
->count
== ATH_ANT_DIV_COMB_MAX_COUNT
) {
1457 if (alt_ratio
> ATH_ANT_DIV_COMB_ALT_ANT_RATIO
) {
1458 ath_lnaconf_alt_good_scan(antcomb
, div_ant_conf
,
1460 antcomb
->alt_good
= true;
1462 antcomb
->alt_good
= false;
1466 antcomb
->scan
= true;
1467 antcomb
->scan_not_start
= true;
1470 if (!antcomb
->scan
) {
1471 if (alt_ratio
> ATH_ANT_DIV_COMB_ALT_ANT_RATIO
) {
1472 if (curr_alt_set
== ATH_ANT_DIV_COMB_LNA2
) {
1473 /* Switch main and alt LNA */
1474 div_ant_conf
.main_lna_conf
=
1475 ATH_ANT_DIV_COMB_LNA2
;
1476 div_ant_conf
.alt_lna_conf
=
1477 ATH_ANT_DIV_COMB_LNA1
;
1478 } else if (curr_alt_set
== ATH_ANT_DIV_COMB_LNA1
) {
1479 div_ant_conf
.main_lna_conf
=
1480 ATH_ANT_DIV_COMB_LNA1
;
1481 div_ant_conf
.alt_lna_conf
=
1482 ATH_ANT_DIV_COMB_LNA2
;
1486 } else if ((curr_alt_set
!= ATH_ANT_DIV_COMB_LNA1
) &&
1487 (curr_alt_set
!= ATH_ANT_DIV_COMB_LNA2
)) {
1488 /* Set alt to another LNA */
1489 if (curr_main_set
== ATH_ANT_DIV_COMB_LNA2
)
1490 div_ant_conf
.alt_lna_conf
=
1491 ATH_ANT_DIV_COMB_LNA1
;
1492 else if (curr_main_set
== ATH_ANT_DIV_COMB_LNA1
)
1493 div_ant_conf
.alt_lna_conf
=
1494 ATH_ANT_DIV_COMB_LNA2
;
1499 if ((alt_rssi_avg
< (main_rssi_avg
+
1500 ATH_ANT_DIV_COMB_LNA1_LNA2_DELTA
)))
1504 if (!antcomb
->scan_not_start
) {
1505 switch (curr_alt_set
) {
1506 case ATH_ANT_DIV_COMB_LNA2
:
1507 antcomb
->rssi_lna2
= alt_rssi_avg
;
1508 antcomb
->rssi_lna1
= main_rssi_avg
;
1509 antcomb
->scan
= true;
1511 div_ant_conf
.main_lna_conf
=
1512 ATH_ANT_DIV_COMB_LNA1
;
1513 div_ant_conf
.alt_lna_conf
=
1514 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1516 case ATH_ANT_DIV_COMB_LNA1
:
1517 antcomb
->rssi_lna1
= alt_rssi_avg
;
1518 antcomb
->rssi_lna2
= main_rssi_avg
;
1519 antcomb
->scan
= true;
1521 div_ant_conf
.main_lna_conf
= ATH_ANT_DIV_COMB_LNA2
;
1522 div_ant_conf
.alt_lna_conf
=
1523 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1525 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
:
1526 antcomb
->rssi_add
= alt_rssi_avg
;
1527 antcomb
->scan
= true;
1529 div_ant_conf
.alt_lna_conf
=
1530 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1532 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
:
1533 antcomb
->rssi_sub
= alt_rssi_avg
;
1534 antcomb
->scan
= false;
1535 if (antcomb
->rssi_lna2
>
1536 (antcomb
->rssi_lna1
+
1537 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA
)) {
1538 /* use LNA2 as main LNA */
1539 if ((antcomb
->rssi_add
> antcomb
->rssi_lna1
) &&
1540 (antcomb
->rssi_add
> antcomb
->rssi_sub
)) {
1542 div_ant_conf
.main_lna_conf
=
1543 ATH_ANT_DIV_COMB_LNA2
;
1544 div_ant_conf
.alt_lna_conf
=
1545 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1546 } else if (antcomb
->rssi_sub
>
1547 antcomb
->rssi_lna1
) {
1549 div_ant_conf
.main_lna_conf
=
1550 ATH_ANT_DIV_COMB_LNA2
;
1551 div_ant_conf
.alt_lna_conf
=
1552 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1555 div_ant_conf
.main_lna_conf
=
1556 ATH_ANT_DIV_COMB_LNA2
;
1557 div_ant_conf
.alt_lna_conf
=
1558 ATH_ANT_DIV_COMB_LNA1
;
1561 /* use LNA1 as main LNA */
1562 if ((antcomb
->rssi_add
> antcomb
->rssi_lna2
) &&
1563 (antcomb
->rssi_add
> antcomb
->rssi_sub
)) {
1565 div_ant_conf
.main_lna_conf
=
1566 ATH_ANT_DIV_COMB_LNA1
;
1567 div_ant_conf
.alt_lna_conf
=
1568 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1569 } else if (antcomb
->rssi_sub
>
1570 antcomb
->rssi_lna1
) {
1572 div_ant_conf
.main_lna_conf
=
1573 ATH_ANT_DIV_COMB_LNA1
;
1574 div_ant_conf
.alt_lna_conf
=
1575 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1578 div_ant_conf
.main_lna_conf
=
1579 ATH_ANT_DIV_COMB_LNA1
;
1580 div_ant_conf
.alt_lna_conf
=
1581 ATH_ANT_DIV_COMB_LNA2
;
1589 if (!antcomb
->alt_good
) {
1590 antcomb
->scan_not_start
= false;
1591 /* Set alt to another LNA */
1592 if (curr_main_set
== ATH_ANT_DIV_COMB_LNA2
) {
1593 div_ant_conf
.main_lna_conf
=
1594 ATH_ANT_DIV_COMB_LNA2
;
1595 div_ant_conf
.alt_lna_conf
=
1596 ATH_ANT_DIV_COMB_LNA1
;
1597 } else if (curr_main_set
== ATH_ANT_DIV_COMB_LNA1
) {
1598 div_ant_conf
.main_lna_conf
=
1599 ATH_ANT_DIV_COMB_LNA1
;
1600 div_ant_conf
.alt_lna_conf
=
1601 ATH_ANT_DIV_COMB_LNA2
;
1607 ath_select_ant_div_from_quick_scan(antcomb
, &div_ant_conf
,
1608 main_rssi_avg
, alt_rssi_avg
,
1611 antcomb
->quick_scan_cnt
++;
1614 ath_ant_div_conf_fast_divbias(&div_ant_conf
);
1616 ath9k_hw_antdiv_comb_conf_set(sc
->sc_ah
, &div_ant_conf
);
1618 antcomb
->scan_start_time
= jiffies
;
1619 antcomb
->total_pkt_count
= 0;
1620 antcomb
->main_total_rssi
= 0;
1621 antcomb
->alt_total_rssi
= 0;
1622 antcomb
->main_recv_cnt
= 0;
1623 antcomb
->alt_recv_cnt
= 0;
1626 int ath_rx_tasklet(struct ath_softc
*sc
, int flush
, bool hp
)
1629 struct sk_buff
*skb
= NULL
, *requeue_skb
;
1630 struct ieee80211_rx_status
*rxs
;
1631 struct ath_hw
*ah
= sc
->sc_ah
;
1632 struct ath_common
*common
= ath9k_hw_common(ah
);
1634 * The hw can techncically differ from common->hw when using ath9k
1635 * virtual wiphy so to account for that we iterate over the active
1636 * wiphys and find the appropriate wiphy and therefore hw.
1638 struct ieee80211_hw
*hw
= NULL
;
1639 struct ieee80211_hdr
*hdr
;
1641 bool decrypt_error
= false;
1642 struct ath_rx_status rs
;
1643 enum ath9k_rx_qtype qtype
;
1644 bool edma
= !!(ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
);
1646 u8 rx_status_len
= ah
->caps
.rx_status_len
;
1649 unsigned long flags
;
1652 dma_type
= DMA_BIDIRECTIONAL
;
1654 dma_type
= DMA_FROM_DEVICE
;
1656 qtype
= hp
? ATH9K_RX_QUEUE_HP
: ATH9K_RX_QUEUE_LP
;
1657 spin_lock_bh(&sc
->rx
.rxbuflock
);
1659 tsf
= ath9k_hw_gettsf64(ah
);
1660 tsf_lower
= tsf
& 0xffffffff;
1663 /* If handling rx interrupt and flush is in progress => exit */
1664 if ((sc
->sc_flags
& SC_OP_RXFLUSH
) && (flush
== 0))
1667 memset(&rs
, 0, sizeof(rs
));
1669 bf
= ath_edma_get_next_rx_buf(sc
, &rs
, qtype
);
1671 bf
= ath_get_next_rx_buf(sc
, &rs
);
1680 hdr
= (struct ieee80211_hdr
*) (skb
->data
+ rx_status_len
);
1681 rxs
= IEEE80211_SKB_RXCB(skb
);
1683 hw
= ath_get_virt_hw(sc
, hdr
);
1685 ath_debug_stat_rx(sc
, &rs
);
1688 * If we're asked to flush receive queue, directly
1689 * chain it back at the queue without processing it.
1694 retval
= ath9k_rx_skb_preprocess(common
, hw
, hdr
, &rs
,
1695 rxs
, &decrypt_error
);
1699 rxs
->mactime
= (tsf
& ~0xffffffffULL
) | rs
.rs_tstamp
;
1700 if (rs
.rs_tstamp
> tsf_lower
&&
1701 unlikely(rs
.rs_tstamp
- tsf_lower
> 0x10000000))
1702 rxs
->mactime
-= 0x100000000ULL
;
1704 if (rs
.rs_tstamp
< tsf_lower
&&
1705 unlikely(tsf_lower
- rs
.rs_tstamp
> 0x10000000))
1706 rxs
->mactime
+= 0x100000000ULL
;
1708 /* Ensure we always have an skb to requeue once we are done
1709 * processing the current buffer's skb */
1710 requeue_skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
, GFP_ATOMIC
);
1712 /* If there is no memory we ignore the current RX'd frame,
1713 * tell hardware it can give us a new frame using the old
1714 * skb and put it at the tail of the sc->rx.rxbuf list for
1719 /* Unmap the frame */
1720 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
1724 skb_put(skb
, rs
.rs_datalen
+ ah
->caps
.rx_status_len
);
1725 if (ah
->caps
.rx_status_len
)
1726 skb_pull(skb
, ah
->caps
.rx_status_len
);
1728 ath9k_rx_skb_postprocess(common
, skb
, &rs
,
1729 rxs
, decrypt_error
);
1731 /* We will now give hardware our shiny new allocated skb */
1732 bf
->bf_mpdu
= requeue_skb
;
1733 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, requeue_skb
->data
,
1736 if (unlikely(dma_mapping_error(sc
->dev
,
1737 bf
->bf_buf_addr
))) {
1738 dev_kfree_skb_any(requeue_skb
);
1740 bf
->bf_buf_addr
= 0;
1741 ath_print(common
, ATH_DBG_FATAL
,
1742 "dma_mapping_error() on RX\n");
1743 ath_rx_send_to_mac80211(hw
, sc
, skb
, rxs
);
1748 * change the default rx antenna if rx diversity chooses the
1749 * other antenna 3 times in a row.
1751 if (sc
->rx
.defant
!= rs
.rs_antenna
) {
1752 if (++sc
->rx
.rxotherant
>= 3)
1753 ath_setdefantenna(sc
, rs
.rs_antenna
);
1755 sc
->rx
.rxotherant
= 0;
1758 spin_lock_irqsave(&sc
->sc_pm_lock
, flags
);
1759 if (unlikely(ath9k_check_auto_sleep(sc
) ||
1760 (sc
->ps_flags
& (PS_WAIT_FOR_BEACON
|
1762 PS_WAIT_FOR_PSPOLL_DATA
))))
1764 spin_unlock_irqrestore(&sc
->sc_pm_lock
, flags
);
1766 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_ANT_DIV_COMB
)
1767 ath_ant_comb_scan(sc
, &rs
);
1769 ath_rx_send_to_mac80211(hw
, sc
, skb
, rxs
);
1773 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
1774 ath_rx_edma_buf_link(sc
, qtype
);
1776 list_move_tail(&bf
->list
, &sc
->rx
.rxbuf
);
1777 ath_rx_buf_link(sc
, bf
);
1781 spin_unlock_bh(&sc
->rx
.rxbuflock
);