thinkpad-acpi: name event constants
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / mac80211 / rx.c
blob9e50fdbdacfc2d01512869ba38517867c963a786
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/rcupdate.h>
18 #include <net/mac80211.h>
19 #include <net/ieee80211_radiotap.h>
21 #include "ieee80211_i.h"
22 #include "driver-ops.h"
23 #include "led.h"
24 #include "mesh.h"
25 #include "wep.h"
26 #include "wpa.h"
27 #include "tkip.h"
28 #include "wme.h"
30 static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
31 struct tid_ampdu_rx *tid_agg_rx,
32 struct sk_buff *skb,
33 struct ieee80211_rx_status *status,
34 u16 mpdu_seq_num,
35 int bar_req);
37 * monitor mode reception
39 * This function cleans up the SKB, i.e. it removes all the stuff
40 * only useful for monitoring.
42 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
43 struct sk_buff *skb,
44 int rtap_len)
46 skb_pull(skb, rtap_len);
48 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
49 if (likely(skb->len > FCS_LEN))
50 skb_trim(skb, skb->len - FCS_LEN);
51 else {
52 /* driver bug */
53 WARN_ON(1);
54 dev_kfree_skb(skb);
55 skb = NULL;
59 return skb;
62 static inline int should_drop_frame(struct ieee80211_rx_status *status,
63 struct sk_buff *skb,
64 int present_fcs_len,
65 int radiotap_len)
67 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
69 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
70 return 1;
71 if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len))
72 return 1;
73 if (ieee80211_is_ctl(hdr->frame_control) &&
74 !ieee80211_is_pspoll(hdr->frame_control) &&
75 !ieee80211_is_back_req(hdr->frame_control))
76 return 1;
77 return 0;
80 static int
81 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
82 struct ieee80211_rx_status *status)
84 int len;
86 /* always present fields */
87 len = sizeof(struct ieee80211_radiotap_header) + 9;
89 if (status->flag & RX_FLAG_TSFT)
90 len += 8;
91 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
92 len += 1;
93 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
94 len += 1;
96 if (len & 1) /* padding for RX_FLAGS if necessary */
97 len++;
99 /* make sure radiotap starts at a naturally aligned address */
100 if (len % 8)
101 len = roundup(len, 8);
103 return len;
107 * ieee80211_add_rx_radiotap_header - add radiotap header
109 * add a radiotap header containing all the fields which the hardware provided.
111 static void
112 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
113 struct sk_buff *skb,
114 struct ieee80211_rx_status *status,
115 struct ieee80211_rate *rate,
116 int rtap_len)
118 struct ieee80211_radiotap_header *rthdr;
119 unsigned char *pos;
121 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
122 memset(rthdr, 0, rtap_len);
124 /* radiotap header, set always present flags */
125 rthdr->it_present =
126 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
127 (1 << IEEE80211_RADIOTAP_CHANNEL) |
128 (1 << IEEE80211_RADIOTAP_ANTENNA) |
129 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
130 rthdr->it_len = cpu_to_le16(rtap_len);
132 pos = (unsigned char *)(rthdr+1);
134 /* the order of the following fields is important */
136 /* IEEE80211_RADIOTAP_TSFT */
137 if (status->flag & RX_FLAG_TSFT) {
138 *(__le64 *)pos = cpu_to_le64(status->mactime);
139 rthdr->it_present |=
140 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
141 pos += 8;
144 /* IEEE80211_RADIOTAP_FLAGS */
145 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
146 *pos |= IEEE80211_RADIOTAP_F_FCS;
147 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
148 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
149 if (status->flag & RX_FLAG_SHORTPRE)
150 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
151 pos++;
153 /* IEEE80211_RADIOTAP_RATE */
154 if (status->flag & RX_FLAG_HT) {
156 * TODO: add following information into radiotap header once
157 * suitable fields are defined for it:
158 * - MCS index (status->rate_idx)
159 * - HT40 (status->flag & RX_FLAG_40MHZ)
160 * - short-GI (status->flag & RX_FLAG_SHORT_GI)
162 *pos = 0;
163 } else {
164 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
165 *pos = rate->bitrate / 5;
167 pos++;
169 /* IEEE80211_RADIOTAP_CHANNEL */
170 *(__le16 *)pos = cpu_to_le16(status->freq);
171 pos += 2;
172 if (status->band == IEEE80211_BAND_5GHZ)
173 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
174 IEEE80211_CHAN_5GHZ);
175 else if (rate->flags & IEEE80211_RATE_ERP_G)
176 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
177 IEEE80211_CHAN_2GHZ);
178 else
179 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK |
180 IEEE80211_CHAN_2GHZ);
181 pos += 2;
183 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
184 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
185 *pos = status->signal;
186 rthdr->it_present |=
187 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
188 pos++;
191 /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
192 if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
193 *pos = status->noise;
194 rthdr->it_present |=
195 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
196 pos++;
199 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
201 /* IEEE80211_RADIOTAP_ANTENNA */
202 *pos = status->antenna;
203 pos++;
205 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
207 /* IEEE80211_RADIOTAP_RX_FLAGS */
208 /* ensure 2 byte alignment for the 2 byte field as required */
209 if ((pos - (unsigned char *)rthdr) & 1)
210 pos++;
211 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
212 *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADPLCP);
213 pos += 2;
217 * This function copies a received frame to all monitor interfaces and
218 * returns a cleaned-up SKB that no longer includes the FCS nor the
219 * radiotap header the driver might have added.
221 static struct sk_buff *
222 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
223 struct ieee80211_rx_status *status,
224 struct ieee80211_rate *rate)
226 struct ieee80211_sub_if_data *sdata;
227 int needed_headroom = 0;
228 struct sk_buff *skb, *skb2;
229 struct net_device *prev_dev = NULL;
230 int present_fcs_len = 0;
231 int rtap_len = 0;
234 * First, we may need to make a copy of the skb because
235 * (1) we need to modify it for radiotap (if not present), and
236 * (2) the other RX handlers will modify the skb we got.
238 * We don't need to, of course, if we aren't going to return
239 * the SKB because it has a bad FCS/PLCP checksum.
241 if (status->flag & RX_FLAG_RADIOTAP)
242 rtap_len = ieee80211_get_radiotap_len(origskb->data);
243 else
244 /* room for the radiotap header based on driver features */
245 needed_headroom = ieee80211_rx_radiotap_len(local, status);
247 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
248 present_fcs_len = FCS_LEN;
250 if (!local->monitors) {
251 if (should_drop_frame(status, origskb, present_fcs_len,
252 rtap_len)) {
253 dev_kfree_skb(origskb);
254 return NULL;
257 return remove_monitor_info(local, origskb, rtap_len);
260 if (should_drop_frame(status, origskb, present_fcs_len, rtap_len)) {
261 /* only need to expand headroom if necessary */
262 skb = origskb;
263 origskb = NULL;
266 * This shouldn't trigger often because most devices have an
267 * RX header they pull before we get here, and that should
268 * be big enough for our radiotap information. We should
269 * probably export the length to drivers so that we can have
270 * them allocate enough headroom to start with.
272 if (skb_headroom(skb) < needed_headroom &&
273 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
274 dev_kfree_skb(skb);
275 return NULL;
277 } else {
279 * Need to make a copy and possibly remove radiotap header
280 * and FCS from the original.
282 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
284 origskb = remove_monitor_info(local, origskb, rtap_len);
286 if (!skb)
287 return origskb;
290 /* if necessary, prepend radiotap information */
291 if (!(status->flag & RX_FLAG_RADIOTAP))
292 ieee80211_add_rx_radiotap_header(local, skb, status, rate,
293 needed_headroom);
295 skb_reset_mac_header(skb);
296 skb->ip_summed = CHECKSUM_UNNECESSARY;
297 skb->pkt_type = PACKET_OTHERHOST;
298 skb->protocol = htons(ETH_P_802_2);
300 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
301 if (!netif_running(sdata->dev))
302 continue;
304 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
305 continue;
307 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
308 continue;
310 if (prev_dev) {
311 skb2 = skb_clone(skb, GFP_ATOMIC);
312 if (skb2) {
313 skb2->dev = prev_dev;
314 netif_rx(skb2);
318 prev_dev = sdata->dev;
319 sdata->dev->stats.rx_packets++;
320 sdata->dev->stats.rx_bytes += skb->len;
323 if (prev_dev) {
324 skb->dev = prev_dev;
325 netif_rx(skb);
326 } else
327 dev_kfree_skb(skb);
329 return origskb;
333 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
335 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
336 int tid;
338 /* does the frame have a qos control field? */
339 if (ieee80211_is_data_qos(hdr->frame_control)) {
340 u8 *qc = ieee80211_get_qos_ctl(hdr);
341 /* frame has qos control */
342 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
343 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
344 rx->flags |= IEEE80211_RX_AMSDU;
345 else
346 rx->flags &= ~IEEE80211_RX_AMSDU;
347 } else {
349 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
351 * Sequence numbers for management frames, QoS data
352 * frames with a broadcast/multicast address in the
353 * Address 1 field, and all non-QoS data frames sent
354 * by QoS STAs are assigned using an additional single
355 * modulo-4096 counter, [...]
357 * We also use that counter for non-QoS STAs.
359 tid = NUM_RX_DATA_QUEUES - 1;
362 rx->queue = tid;
363 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
364 * For now, set skb->priority to 0 for other cases. */
365 rx->skb->priority = (tid > 7) ? 0 : tid;
369 * DOC: Packet alignment
371 * Drivers always need to pass packets that are aligned to two-byte boundaries
372 * to the stack.
374 * Additionally, should, if possible, align the payload data in a way that
375 * guarantees that the contained IP header is aligned to a four-byte
376 * boundary. In the case of regular frames, this simply means aligning the
377 * payload to a four-byte boundary (because either the IP header is directly
378 * contained, or IV/RFC1042 headers that have a length divisible by four are
379 * in front of it).
381 * With A-MSDU frames, however, the payload data address must yield two modulo
382 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
383 * push the IP header further back to a multiple of four again. Thankfully, the
384 * specs were sane enough this time around to require padding each A-MSDU
385 * subframe to a length that is a multiple of four.
387 * Padding like Atheros hardware adds which is inbetween the 802.11 header and
388 * the payload is not supported, the driver is required to move the 802.11
389 * header to be directly in front of the payload in that case.
391 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
393 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
394 int hdrlen;
396 #ifndef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
397 return;
398 #endif
400 if (WARN_ONCE((unsigned long)rx->skb->data & 1,
401 "unaligned packet at 0x%p\n", rx->skb->data))
402 return;
404 if (!ieee80211_is_data_present(hdr->frame_control))
405 return;
407 hdrlen = ieee80211_hdrlen(hdr->frame_control);
408 if (rx->flags & IEEE80211_RX_AMSDU)
409 hdrlen += ETH_HLEN;
410 WARN_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3,
411 "unaligned IP payload at 0x%p\n", rx->skb->data + hdrlen);
415 /* rx handlers */
417 static ieee80211_rx_result debug_noinline
418 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
420 struct ieee80211_local *local = rx->local;
421 struct sk_buff *skb = rx->skb;
423 if (unlikely(local->hw_scanning))
424 return ieee80211_scan_rx(rx->sdata, skb, rx->status);
426 if (unlikely(local->sw_scanning)) {
427 /* drop all the other packets during a software scan anyway */
428 if (ieee80211_scan_rx(rx->sdata, skb, rx->status)
429 != RX_QUEUED)
430 dev_kfree_skb(skb);
431 return RX_QUEUED;
434 if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) {
435 /* scanning finished during invoking of handlers */
436 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
437 return RX_DROP_UNUSABLE;
440 return RX_CONTINUE;
444 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
446 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
448 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
449 return 0;
451 return ieee80211_is_robust_mgmt_frame(hdr);
455 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
457 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
459 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
460 return 0;
462 return ieee80211_is_robust_mgmt_frame(hdr);
466 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
467 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
469 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
470 struct ieee80211_mmie *mmie;
472 if (skb->len < 24 + sizeof(*mmie) ||
473 !is_multicast_ether_addr(hdr->da))
474 return -1;
476 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
477 return -1; /* not a robust management frame */
479 mmie = (struct ieee80211_mmie *)
480 (skb->data + skb->len - sizeof(*mmie));
481 if (mmie->element_id != WLAN_EID_MMIE ||
482 mmie->length != sizeof(*mmie) - 2)
483 return -1;
485 return le16_to_cpu(mmie->key_id);
489 static ieee80211_rx_result
490 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
492 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
493 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
495 if (ieee80211_is_data(hdr->frame_control)) {
496 if (!ieee80211_has_a4(hdr->frame_control))
497 return RX_DROP_MONITOR;
498 if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0)
499 return RX_DROP_MONITOR;
502 /* If there is not an established peer link and this is not a peer link
503 * establisment frame, beacon or probe, drop the frame.
506 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
507 struct ieee80211_mgmt *mgmt;
509 if (!ieee80211_is_mgmt(hdr->frame_control))
510 return RX_DROP_MONITOR;
512 if (ieee80211_is_action(hdr->frame_control)) {
513 mgmt = (struct ieee80211_mgmt *)hdr;
514 if (mgmt->u.action.category != PLINK_CATEGORY)
515 return RX_DROP_MONITOR;
516 return RX_CONTINUE;
519 if (ieee80211_is_probe_req(hdr->frame_control) ||
520 ieee80211_is_probe_resp(hdr->frame_control) ||
521 ieee80211_is_beacon(hdr->frame_control))
522 return RX_CONTINUE;
524 return RX_DROP_MONITOR;
528 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
530 if (ieee80211_is_data(hdr->frame_control) &&
531 is_multicast_ether_addr(hdr->addr1) &&
532 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata))
533 return RX_DROP_MONITOR;
534 #undef msh_h_get
536 return RX_CONTINUE;
540 static ieee80211_rx_result debug_noinline
541 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
543 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
545 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
546 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
547 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
548 rx->sta->last_seq_ctrl[rx->queue] ==
549 hdr->seq_ctrl)) {
550 if (rx->flags & IEEE80211_RX_RA_MATCH) {
551 rx->local->dot11FrameDuplicateCount++;
552 rx->sta->num_duplicates++;
554 return RX_DROP_MONITOR;
555 } else
556 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
559 if (unlikely(rx->skb->len < 16)) {
560 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
561 return RX_DROP_MONITOR;
564 /* Drop disallowed frame classes based on STA auth/assoc state;
565 * IEEE 802.11, Chap 5.5.
567 * mac80211 filters only based on association state, i.e. it drops
568 * Class 3 frames from not associated stations. hostapd sends
569 * deauth/disassoc frames when needed. In addition, hostapd is
570 * responsible for filtering on both auth and assoc states.
573 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
574 return ieee80211_rx_mesh_check(rx);
576 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
577 ieee80211_is_pspoll(hdr->frame_control)) &&
578 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
579 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
580 if ((!ieee80211_has_fromds(hdr->frame_control) &&
581 !ieee80211_has_tods(hdr->frame_control) &&
582 ieee80211_is_data(hdr->frame_control)) ||
583 !(rx->flags & IEEE80211_RX_RA_MATCH)) {
584 /* Drop IBSS frames and frames for other hosts
585 * silently. */
586 return RX_DROP_MONITOR;
589 return RX_DROP_MONITOR;
592 return RX_CONTINUE;
596 static ieee80211_rx_result debug_noinline
597 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
599 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
600 int keyidx;
601 int hdrlen;
602 ieee80211_rx_result result = RX_DROP_UNUSABLE;
603 struct ieee80211_key *stakey = NULL;
604 int mmie_keyidx = -1;
607 * Key selection 101
609 * There are four types of keys:
610 * - GTK (group keys)
611 * - IGTK (group keys for management frames)
612 * - PTK (pairwise keys)
613 * - STK (station-to-station pairwise keys)
615 * When selecting a key, we have to distinguish between multicast
616 * (including broadcast) and unicast frames, the latter can only
617 * use PTKs and STKs while the former always use GTKs and IGTKs.
618 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
619 * unicast frames can also use key indices like GTKs. Hence, if we
620 * don't have a PTK/STK we check the key index for a WEP key.
622 * Note that in a regular BSS, multicast frames are sent by the
623 * AP only, associated stations unicast the frame to the AP first
624 * which then multicasts it on their behalf.
626 * There is also a slight problem in IBSS mode: GTKs are negotiated
627 * with each station, that is something we don't currently handle.
628 * The spec seems to expect that one negotiates the same key with
629 * every station but there's no such requirement; VLANs could be
630 * possible.
634 * No point in finding a key and decrypting if the frame is neither
635 * addressed to us nor a multicast frame.
637 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
638 return RX_CONTINUE;
640 if (rx->sta)
641 stakey = rcu_dereference(rx->sta->key);
643 if (!ieee80211_has_protected(hdr->frame_control))
644 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
646 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
647 rx->key = stakey;
648 /* Skip decryption if the frame is not protected. */
649 if (!ieee80211_has_protected(hdr->frame_control))
650 return RX_CONTINUE;
651 } else if (mmie_keyidx >= 0) {
652 /* Broadcast/multicast robust management frame / BIP */
653 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
654 (rx->status->flag & RX_FLAG_IV_STRIPPED))
655 return RX_CONTINUE;
657 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
658 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
659 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
660 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
661 } else if (!ieee80211_has_protected(hdr->frame_control)) {
663 * The frame was not protected, so skip decryption. However, we
664 * need to set rx->key if there is a key that could have been
665 * used so that the frame may be dropped if encryption would
666 * have been expected.
668 struct ieee80211_key *key = NULL;
669 if (ieee80211_is_mgmt(hdr->frame_control) &&
670 is_multicast_ether_addr(hdr->addr1) &&
671 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
672 rx->key = key;
673 else if ((key = rcu_dereference(rx->sdata->default_key)))
674 rx->key = key;
675 return RX_CONTINUE;
676 } else {
678 * The device doesn't give us the IV so we won't be
679 * able to look up the key. That's ok though, we
680 * don't need to decrypt the frame, we just won't
681 * be able to keep statistics accurate.
682 * Except for key threshold notifications, should
683 * we somehow allow the driver to tell us which key
684 * the hardware used if this flag is set?
686 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
687 (rx->status->flag & RX_FLAG_IV_STRIPPED))
688 return RX_CONTINUE;
690 hdrlen = ieee80211_hdrlen(hdr->frame_control);
692 if (rx->skb->len < 8 + hdrlen)
693 return RX_DROP_UNUSABLE; /* TODO: count this? */
696 * no need to call ieee80211_wep_get_keyidx,
697 * it verifies a bunch of things we've done already
699 keyidx = rx->skb->data[hdrlen + 3] >> 6;
701 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
704 * RSNA-protected unicast frames should always be sent with
705 * pairwise or station-to-station keys, but for WEP we allow
706 * using a key index as well.
708 if (rx->key && rx->key->conf.alg != ALG_WEP &&
709 !is_multicast_ether_addr(hdr->addr1))
710 rx->key = NULL;
713 if (rx->key) {
714 rx->key->tx_rx_count++;
715 /* TODO: add threshold stuff again */
716 } else {
717 return RX_DROP_MONITOR;
720 /* Check for weak IVs if possible */
721 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
722 ieee80211_is_data(hdr->frame_control) &&
723 (!(rx->status->flag & RX_FLAG_IV_STRIPPED) ||
724 !(rx->status->flag & RX_FLAG_DECRYPTED)) &&
725 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
726 rx->sta->wep_weak_iv_count++;
728 switch (rx->key->conf.alg) {
729 case ALG_WEP:
730 result = ieee80211_crypto_wep_decrypt(rx);
731 break;
732 case ALG_TKIP:
733 result = ieee80211_crypto_tkip_decrypt(rx);
734 break;
735 case ALG_CCMP:
736 result = ieee80211_crypto_ccmp_decrypt(rx);
737 break;
738 case ALG_AES_CMAC:
739 result = ieee80211_crypto_aes_cmac_decrypt(rx);
740 break;
743 /* either the frame has been decrypted or will be dropped */
744 rx->status->flag |= RX_FLAG_DECRYPTED;
746 return result;
749 static ieee80211_rx_result debug_noinline
750 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
752 struct ieee80211_local *local;
753 struct ieee80211_hdr *hdr;
754 struct sk_buff *skb;
756 local = rx->local;
757 skb = rx->skb;
758 hdr = (struct ieee80211_hdr *) skb->data;
760 if (!local->pspolling)
761 return RX_CONTINUE;
763 if (!ieee80211_has_fromds(hdr->frame_control))
764 /* this is not from AP */
765 return RX_CONTINUE;
767 if (!ieee80211_is_data(hdr->frame_control))
768 return RX_CONTINUE;
770 if (!ieee80211_has_moredata(hdr->frame_control)) {
771 /* AP has no more frames buffered for us */
772 local->pspolling = false;
773 return RX_CONTINUE;
776 /* more data bit is set, let's request a new frame from the AP */
777 ieee80211_send_pspoll(local, rx->sdata);
779 return RX_CONTINUE;
782 static void ap_sta_ps_start(struct sta_info *sta)
784 struct ieee80211_sub_if_data *sdata = sta->sdata;
785 struct ieee80211_local *local = sdata->local;
787 atomic_inc(&sdata->bss->num_sta_ps);
788 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL);
789 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta);
790 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
791 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
792 sdata->dev->name, sta->sta.addr, sta->sta.aid);
793 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
796 static int ap_sta_ps_end(struct sta_info *sta)
798 struct ieee80211_sub_if_data *sdata = sta->sdata;
799 struct ieee80211_local *local = sdata->local;
800 int sent, buffered;
802 atomic_dec(&sdata->bss->num_sta_ps);
804 clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL);
805 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta);
807 if (!skb_queue_empty(&sta->ps_tx_buf))
808 sta_info_clear_tim_bit(sta);
810 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
811 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
812 sdata->dev->name, sta->sta.addr, sta->sta.aid);
813 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
815 /* Send all buffered frames to the station */
816 sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered);
817 buffered = ieee80211_add_pending_skbs(local, &sta->ps_tx_buf);
818 sent += buffered;
819 local->total_ps_buffered -= buffered;
821 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
822 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
823 "since STA not sleeping anymore\n", sdata->dev->name,
824 sta->sta.addr, sta->sta.aid, sent - buffered, buffered);
825 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
827 return sent;
830 static ieee80211_rx_result debug_noinline
831 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
833 struct sta_info *sta = rx->sta;
834 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
836 if (!sta)
837 return RX_CONTINUE;
839 /* Update last_rx only for IBSS packets which are for the current
840 * BSSID to avoid keeping the current IBSS network alive in cases where
841 * other STAs are using different BSSID. */
842 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
843 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
844 NL80211_IFTYPE_ADHOC);
845 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0)
846 sta->last_rx = jiffies;
847 } else
848 if (!is_multicast_ether_addr(hdr->addr1) ||
849 rx->sdata->vif.type == NL80211_IFTYPE_STATION) {
850 /* Update last_rx only for unicast frames in order to prevent
851 * the Probe Request frames (the only broadcast frames from a
852 * STA in infrastructure mode) from keeping a connection alive.
853 * Mesh beacons will update last_rx when if they are found to
854 * match the current local configuration when processed.
856 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
857 ieee80211_is_beacon(hdr->frame_control)) {
858 rx->sdata->u.mgd.last_beacon = jiffies;
859 } else
860 sta->last_rx = jiffies;
863 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
864 return RX_CONTINUE;
866 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
867 ieee80211_sta_rx_notify(rx->sdata, hdr);
869 sta->rx_fragments++;
870 sta->rx_bytes += rx->skb->len;
871 sta->last_signal = rx->status->signal;
872 sta->last_qual = rx->status->qual;
873 sta->last_noise = rx->status->noise;
876 * Change STA power saving mode only at the end of a frame
877 * exchange sequence.
879 if (!ieee80211_has_morefrags(hdr->frame_control) &&
880 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
881 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
882 if (test_sta_flags(sta, WLAN_STA_PS)) {
884 * Ignore doze->wake transitions that are
885 * indicated by non-data frames, the standard
886 * is unclear here, but for example going to
887 * PS mode and then scanning would cause a
888 * doze->wake transition for the probe request,
889 * and that is clearly undesirable.
891 if (ieee80211_is_data(hdr->frame_control) &&
892 !ieee80211_has_pm(hdr->frame_control))
893 rx->sent_ps_buffered += ap_sta_ps_end(sta);
894 } else {
895 if (ieee80211_has_pm(hdr->frame_control))
896 ap_sta_ps_start(sta);
900 /* Drop data::nullfunc frames silently, since they are used only to
901 * control station power saving mode. */
902 if (ieee80211_is_nullfunc(hdr->frame_control)) {
903 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
904 /* Update counter and free packet here to avoid counting this
905 * as a dropped packed. */
906 sta->rx_packets++;
907 dev_kfree_skb(rx->skb);
908 return RX_QUEUED;
911 return RX_CONTINUE;
912 } /* ieee80211_rx_h_sta_process */
914 static inline struct ieee80211_fragment_entry *
915 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
916 unsigned int frag, unsigned int seq, int rx_queue,
917 struct sk_buff **skb)
919 struct ieee80211_fragment_entry *entry;
920 int idx;
922 idx = sdata->fragment_next;
923 entry = &sdata->fragments[sdata->fragment_next++];
924 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
925 sdata->fragment_next = 0;
927 if (!skb_queue_empty(&entry->skb_list)) {
928 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
929 struct ieee80211_hdr *hdr =
930 (struct ieee80211_hdr *) entry->skb_list.next->data;
931 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
932 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
933 "addr1=%pM addr2=%pM\n",
934 sdata->dev->name, idx,
935 jiffies - entry->first_frag_time, entry->seq,
936 entry->last_frag, hdr->addr1, hdr->addr2);
937 #endif
938 __skb_queue_purge(&entry->skb_list);
941 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
942 *skb = NULL;
943 entry->first_frag_time = jiffies;
944 entry->seq = seq;
945 entry->rx_queue = rx_queue;
946 entry->last_frag = frag;
947 entry->ccmp = 0;
948 entry->extra_len = 0;
950 return entry;
953 static inline struct ieee80211_fragment_entry *
954 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
955 unsigned int frag, unsigned int seq,
956 int rx_queue, struct ieee80211_hdr *hdr)
958 struct ieee80211_fragment_entry *entry;
959 int i, idx;
961 idx = sdata->fragment_next;
962 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
963 struct ieee80211_hdr *f_hdr;
965 idx--;
966 if (idx < 0)
967 idx = IEEE80211_FRAGMENT_MAX - 1;
969 entry = &sdata->fragments[idx];
970 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
971 entry->rx_queue != rx_queue ||
972 entry->last_frag + 1 != frag)
973 continue;
975 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
978 * Check ftype and addresses are equal, else check next fragment
980 if (((hdr->frame_control ^ f_hdr->frame_control) &
981 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
982 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
983 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
984 continue;
986 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
987 __skb_queue_purge(&entry->skb_list);
988 continue;
990 return entry;
993 return NULL;
996 static ieee80211_rx_result debug_noinline
997 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
999 struct ieee80211_hdr *hdr;
1000 u16 sc;
1001 __le16 fc;
1002 unsigned int frag, seq;
1003 struct ieee80211_fragment_entry *entry;
1004 struct sk_buff *skb;
1006 hdr = (struct ieee80211_hdr *)rx->skb->data;
1007 fc = hdr->frame_control;
1008 sc = le16_to_cpu(hdr->seq_ctrl);
1009 frag = sc & IEEE80211_SCTL_FRAG;
1011 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1012 (rx->skb)->len < 24 ||
1013 is_multicast_ether_addr(hdr->addr1))) {
1014 /* not fragmented */
1015 goto out;
1017 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1019 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1021 if (frag == 0) {
1022 /* This is the first fragment of a new frame. */
1023 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1024 rx->queue, &(rx->skb));
1025 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
1026 ieee80211_has_protected(fc)) {
1027 /* Store CCMP PN so that we can verify that the next
1028 * fragment has a sequential PN value. */
1029 entry->ccmp = 1;
1030 memcpy(entry->last_pn,
1031 rx->key->u.ccmp.rx_pn[rx->queue],
1032 CCMP_PN_LEN);
1034 return RX_QUEUED;
1037 /* This is a fragment for a frame that should already be pending in
1038 * fragment cache. Add this fragment to the end of the pending entry.
1040 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
1041 if (!entry) {
1042 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1043 return RX_DROP_MONITOR;
1046 /* Verify that MPDUs within one MSDU have sequential PN values.
1047 * (IEEE 802.11i, 8.3.3.4.5) */
1048 if (entry->ccmp) {
1049 int i;
1050 u8 pn[CCMP_PN_LEN], *rpn;
1051 if (!rx->key || rx->key->conf.alg != ALG_CCMP)
1052 return RX_DROP_UNUSABLE;
1053 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1054 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1055 pn[i]++;
1056 if (pn[i])
1057 break;
1059 rpn = rx->key->u.ccmp.rx_pn[rx->queue];
1060 if (memcmp(pn, rpn, CCMP_PN_LEN))
1061 return RX_DROP_UNUSABLE;
1062 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1065 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1066 __skb_queue_tail(&entry->skb_list, rx->skb);
1067 entry->last_frag = frag;
1068 entry->extra_len += rx->skb->len;
1069 if (ieee80211_has_morefrags(fc)) {
1070 rx->skb = NULL;
1071 return RX_QUEUED;
1074 rx->skb = __skb_dequeue(&entry->skb_list);
1075 if (skb_tailroom(rx->skb) < entry->extra_len) {
1076 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1077 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1078 GFP_ATOMIC))) {
1079 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1080 __skb_queue_purge(&entry->skb_list);
1081 return RX_DROP_UNUSABLE;
1084 while ((skb = __skb_dequeue(&entry->skb_list))) {
1085 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1086 dev_kfree_skb(skb);
1089 /* Complete frame has been reassembled - process it now */
1090 rx->flags |= IEEE80211_RX_FRAGMENTED;
1092 out:
1093 if (rx->sta)
1094 rx->sta->rx_packets++;
1095 if (is_multicast_ether_addr(hdr->addr1))
1096 rx->local->dot11MulticastReceivedFrameCount++;
1097 else
1098 ieee80211_led_rx(rx->local);
1099 return RX_CONTINUE;
1102 static ieee80211_rx_result debug_noinline
1103 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1105 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1106 struct sk_buff *skb;
1107 int no_pending_pkts;
1108 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
1110 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
1111 !(rx->flags & IEEE80211_RX_RA_MATCH)))
1112 return RX_CONTINUE;
1114 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
1115 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1116 return RX_DROP_UNUSABLE;
1118 skb = skb_dequeue(&rx->sta->tx_filtered);
1119 if (!skb) {
1120 skb = skb_dequeue(&rx->sta->ps_tx_buf);
1121 if (skb)
1122 rx->local->total_ps_buffered--;
1124 no_pending_pkts = skb_queue_empty(&rx->sta->tx_filtered) &&
1125 skb_queue_empty(&rx->sta->ps_tx_buf);
1127 if (skb) {
1128 struct ieee80211_hdr *hdr =
1129 (struct ieee80211_hdr *) skb->data;
1132 * Tell TX path to send one frame even though the STA may
1133 * still remain is PS mode after this frame exchange.
1135 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1137 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1138 printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n",
1139 rx->sta->sta.addr, rx->sta->sta.aid,
1140 skb_queue_len(&rx->sta->ps_tx_buf));
1141 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1143 /* Use MoreData flag to indicate whether there are more
1144 * buffered frames for this STA */
1145 if (no_pending_pkts)
1146 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
1147 else
1148 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1150 dev_queue_xmit(skb);
1152 if (no_pending_pkts)
1153 sta_info_clear_tim_bit(rx->sta);
1154 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1155 } else if (!rx->sent_ps_buffered) {
1157 * FIXME: This can be the result of a race condition between
1158 * us expiring a frame and the station polling for it.
1159 * Should we send it a null-func frame indicating we
1160 * have nothing buffered for it?
1162 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
1163 "though there are no buffered frames for it\n",
1164 rx->dev->name, rx->sta->sta.addr);
1165 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1168 /* Free PS Poll skb here instead of returning RX_DROP that would
1169 * count as an dropped frame. */
1170 dev_kfree_skb(rx->skb);
1172 return RX_QUEUED;
1175 static ieee80211_rx_result debug_noinline
1176 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1178 u8 *data = rx->skb->data;
1179 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1181 if (!ieee80211_is_data_qos(hdr->frame_control))
1182 return RX_CONTINUE;
1184 /* remove the qos control field, update frame type and meta-data */
1185 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1186 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1187 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1188 /* change frame type to non QOS */
1189 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1191 return RX_CONTINUE;
1194 static int
1195 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1197 if (unlikely(!rx->sta ||
1198 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1199 return -EACCES;
1201 return 0;
1204 static int
1205 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1208 * Pass through unencrypted frames if the hardware has
1209 * decrypted them already.
1211 if (rx->status->flag & RX_FLAG_DECRYPTED)
1212 return 0;
1214 /* Drop unencrypted frames if key is set. */
1215 if (unlikely(!ieee80211_has_protected(fc) &&
1216 !ieee80211_is_nullfunc(fc) &&
1217 ieee80211_is_data(fc) &&
1218 (rx->key || rx->sdata->drop_unencrypted)))
1219 return -EACCES;
1220 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
1221 if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1222 rx->key))
1223 return -EACCES;
1224 /* BIP does not use Protected field, so need to check MMIE */
1225 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb)
1226 && ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
1227 rx->key))
1228 return -EACCES;
1230 * When using MFP, Action frames are not allowed prior to
1231 * having configured keys.
1233 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1234 ieee80211_is_robust_mgmt_frame(
1235 (struct ieee80211_hdr *) rx->skb->data)))
1236 return -EACCES;
1239 return 0;
1242 static int
1243 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1245 struct net_device *dev = rx->dev;
1246 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1248 return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type);
1252 * requires that rx->skb is a frame with ethernet header
1254 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1256 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1257 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1258 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1261 * Allow EAPOL frames to us/the PAE group address regardless
1262 * of whether the frame was encrypted or not.
1264 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1265 (compare_ether_addr(ehdr->h_dest, rx->dev->dev_addr) == 0 ||
1266 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1267 return true;
1269 if (ieee80211_802_1x_port_control(rx) ||
1270 ieee80211_drop_unencrypted(rx, fc))
1271 return false;
1273 return true;
1277 * requires that rx->skb is a frame with ethernet header
1279 static void
1280 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1282 struct net_device *dev = rx->dev;
1283 struct ieee80211_local *local = rx->local;
1284 struct sk_buff *skb, *xmit_skb;
1285 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1286 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1287 struct sta_info *dsta;
1289 skb = rx->skb;
1290 xmit_skb = NULL;
1292 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1293 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1294 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1295 (rx->flags & IEEE80211_RX_RA_MATCH)) {
1296 if (is_multicast_ether_addr(ehdr->h_dest)) {
1298 * send multicast frames both to higher layers in
1299 * local net stack and back to the wireless medium
1301 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1302 if (!xmit_skb && net_ratelimit())
1303 printk(KERN_DEBUG "%s: failed to clone "
1304 "multicast frame\n", dev->name);
1305 } else {
1306 dsta = sta_info_get(local, skb->data);
1307 if (dsta && dsta->sdata->dev == dev) {
1309 * The destination station is associated to
1310 * this AP (in this VLAN), so send the frame
1311 * directly to it and do not pass it to local
1312 * net stack.
1314 xmit_skb = skb;
1315 skb = NULL;
1320 if (skb) {
1321 int align __maybe_unused;
1323 #if defined(CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT) || !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1325 * 'align' will only take the values 0 or 2 here
1326 * since all frames are required to be aligned
1327 * to 2-byte boundaries when being passed to
1328 * mac80211. That also explains the __skb_push()
1329 * below.
1331 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1332 if (align) {
1333 if (WARN_ON(skb_headroom(skb) < 3)) {
1334 dev_kfree_skb(skb);
1335 skb = NULL;
1336 } else {
1337 u8 *data = skb->data;
1338 size_t len = skb->len;
1339 u8 *new = __skb_push(skb, align);
1340 memmove(new, data, len);
1341 __skb_trim(skb, len);
1344 #endif
1346 if (skb) {
1347 /* deliver to local stack */
1348 skb->protocol = eth_type_trans(skb, dev);
1349 memset(skb->cb, 0, sizeof(skb->cb));
1350 netif_rx(skb);
1354 if (xmit_skb) {
1355 /* send to wireless media */
1356 xmit_skb->protocol = htons(ETH_P_802_3);
1357 skb_reset_network_header(xmit_skb);
1358 skb_reset_mac_header(xmit_skb);
1359 dev_queue_xmit(xmit_skb);
1363 static ieee80211_rx_result debug_noinline
1364 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1366 struct net_device *dev = rx->dev;
1367 struct ieee80211_local *local = rx->local;
1368 u16 ethertype;
1369 u8 *payload;
1370 struct sk_buff *skb = rx->skb, *frame = NULL;
1371 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1372 __le16 fc = hdr->frame_control;
1373 const struct ethhdr *eth;
1374 int remaining, err;
1375 u8 dst[ETH_ALEN];
1376 u8 src[ETH_ALEN];
1378 if (unlikely(!ieee80211_is_data(fc)))
1379 return RX_CONTINUE;
1381 if (unlikely(!ieee80211_is_data_present(fc)))
1382 return RX_DROP_MONITOR;
1384 if (!(rx->flags & IEEE80211_RX_AMSDU))
1385 return RX_CONTINUE;
1387 err = __ieee80211_data_to_8023(rx);
1388 if (unlikely(err))
1389 return RX_DROP_UNUSABLE;
1391 skb->dev = dev;
1393 dev->stats.rx_packets++;
1394 dev->stats.rx_bytes += skb->len;
1396 /* skip the wrapping header */
1397 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
1398 if (!eth)
1399 return RX_DROP_UNUSABLE;
1401 while (skb != frame) {
1402 u8 padding;
1403 __be16 len = eth->h_proto;
1404 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
1406 remaining = skb->len;
1407 memcpy(dst, eth->h_dest, ETH_ALEN);
1408 memcpy(src, eth->h_source, ETH_ALEN);
1410 padding = ((4 - subframe_len) & 0x3);
1411 /* the last MSDU has no padding */
1412 if (subframe_len > remaining)
1413 return RX_DROP_UNUSABLE;
1415 skb_pull(skb, sizeof(struct ethhdr));
1416 /* if last subframe reuse skb */
1417 if (remaining <= subframe_len + padding)
1418 frame = skb;
1419 else {
1421 * Allocate and reserve two bytes more for payload
1422 * alignment since sizeof(struct ethhdr) is 14.
1424 frame = dev_alloc_skb(
1425 ALIGN(local->hw.extra_tx_headroom, 4) +
1426 subframe_len + 2);
1428 if (frame == NULL)
1429 return RX_DROP_UNUSABLE;
1431 skb_reserve(frame,
1432 ALIGN(local->hw.extra_tx_headroom, 4) +
1433 sizeof(struct ethhdr) + 2);
1434 memcpy(skb_put(frame, ntohs(len)), skb->data,
1435 ntohs(len));
1437 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
1438 padding);
1439 if (!eth) {
1440 dev_kfree_skb(frame);
1441 return RX_DROP_UNUSABLE;
1445 skb_reset_network_header(frame);
1446 frame->dev = dev;
1447 frame->priority = skb->priority;
1448 rx->skb = frame;
1450 payload = frame->data;
1451 ethertype = (payload[6] << 8) | payload[7];
1453 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1454 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1455 compare_ether_addr(payload,
1456 bridge_tunnel_header) == 0)) {
1457 /* remove RFC1042 or Bridge-Tunnel
1458 * encapsulation and replace EtherType */
1459 skb_pull(frame, 6);
1460 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1461 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1462 } else {
1463 memcpy(skb_push(frame, sizeof(__be16)),
1464 &len, sizeof(__be16));
1465 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1466 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1469 if (!ieee80211_frame_allowed(rx, fc)) {
1470 if (skb == frame) /* last frame */
1471 return RX_DROP_UNUSABLE;
1472 dev_kfree_skb(frame);
1473 continue;
1476 ieee80211_deliver_skb(rx);
1479 return RX_QUEUED;
1482 #ifdef CONFIG_MAC80211_MESH
1483 static ieee80211_rx_result
1484 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1486 struct ieee80211_hdr *hdr;
1487 struct ieee80211s_hdr *mesh_hdr;
1488 unsigned int hdrlen;
1489 struct sk_buff *skb = rx->skb, *fwd_skb;
1491 hdr = (struct ieee80211_hdr *) skb->data;
1492 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1493 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1495 if (!ieee80211_is_data(hdr->frame_control))
1496 return RX_CONTINUE;
1498 if (!mesh_hdr->ttl)
1499 /* illegal frame */
1500 return RX_DROP_MONITOR;
1502 if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6){
1503 struct ieee80211_sub_if_data *sdata;
1504 struct mesh_path *mppath;
1506 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1507 rcu_read_lock();
1508 mppath = mpp_path_lookup(mesh_hdr->eaddr2, sdata);
1509 if (!mppath) {
1510 mpp_path_add(mesh_hdr->eaddr2, hdr->addr4, sdata);
1511 } else {
1512 spin_lock_bh(&mppath->state_lock);
1513 mppath->exp_time = jiffies;
1514 if (compare_ether_addr(mppath->mpp, hdr->addr4) != 0)
1515 memcpy(mppath->mpp, hdr->addr4, ETH_ALEN);
1516 spin_unlock_bh(&mppath->state_lock);
1518 rcu_read_unlock();
1521 if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0)
1522 return RX_CONTINUE;
1524 mesh_hdr->ttl--;
1526 if (rx->flags & IEEE80211_RX_RA_MATCH) {
1527 if (!mesh_hdr->ttl)
1528 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1529 dropped_frames_ttl);
1530 else {
1531 struct ieee80211_hdr *fwd_hdr;
1532 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1534 if (!fwd_skb && net_ratelimit())
1535 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1536 rx->dev->name);
1538 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1540 * Save TA to addr1 to send TA a path error if a
1541 * suitable next hop is not found
1543 memcpy(fwd_hdr->addr1, fwd_hdr->addr2, ETH_ALEN);
1544 memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN);
1545 fwd_skb->dev = rx->local->mdev;
1546 fwd_skb->iif = rx->dev->ifindex;
1547 dev_queue_xmit(fwd_skb);
1551 if (is_multicast_ether_addr(hdr->addr3) ||
1552 rx->dev->flags & IFF_PROMISC)
1553 return RX_CONTINUE;
1554 else
1555 return RX_DROP_MONITOR;
1557 #endif
1559 static ieee80211_rx_result debug_noinline
1560 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1562 struct net_device *dev = rx->dev;
1563 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1564 __le16 fc = hdr->frame_control;
1565 int err;
1567 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1568 return RX_CONTINUE;
1570 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1571 return RX_DROP_MONITOR;
1573 err = __ieee80211_data_to_8023(rx);
1574 if (unlikely(err))
1575 return RX_DROP_UNUSABLE;
1577 if (!ieee80211_frame_allowed(rx, fc))
1578 return RX_DROP_MONITOR;
1580 rx->skb->dev = dev;
1582 dev->stats.rx_packets++;
1583 dev->stats.rx_bytes += rx->skb->len;
1585 ieee80211_deliver_skb(rx);
1587 return RX_QUEUED;
1590 static ieee80211_rx_result debug_noinline
1591 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1593 struct ieee80211_local *local = rx->local;
1594 struct ieee80211_hw *hw = &local->hw;
1595 struct sk_buff *skb = rx->skb;
1596 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
1597 struct tid_ampdu_rx *tid_agg_rx;
1598 u16 start_seq_num;
1599 u16 tid;
1601 if (likely(!ieee80211_is_ctl(bar->frame_control)))
1602 return RX_CONTINUE;
1604 if (ieee80211_is_back_req(bar->frame_control)) {
1605 if (!rx->sta)
1606 return RX_CONTINUE;
1607 tid = le16_to_cpu(bar->control) >> 12;
1608 if (rx->sta->ampdu_mlme.tid_state_rx[tid]
1609 != HT_AGG_STATE_OPERATIONAL)
1610 return RX_CONTINUE;
1611 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1613 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
1615 /* reset session timer */
1616 if (tid_agg_rx->timeout)
1617 mod_timer(&tid_agg_rx->session_timer,
1618 TU_TO_EXP_TIME(tid_agg_rx->timeout));
1620 /* manage reordering buffer according to requested */
1621 /* sequence number */
1622 rcu_read_lock();
1623 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, NULL,
1624 start_seq_num, 1);
1625 rcu_read_unlock();
1626 return RX_DROP_UNUSABLE;
1629 return RX_CONTINUE;
1632 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1633 struct ieee80211_mgmt *mgmt,
1634 size_t len)
1636 struct ieee80211_local *local = sdata->local;
1637 struct sk_buff *skb;
1638 struct ieee80211_mgmt *resp;
1640 if (compare_ether_addr(mgmt->da, sdata->dev->dev_addr) != 0) {
1641 /* Not to own unicast address */
1642 return;
1645 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
1646 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
1647 /* Not from the current AP. */
1648 return;
1651 if (sdata->u.mgd.state == IEEE80211_STA_MLME_ASSOCIATE) {
1652 /* Association in progress; ignore SA Query */
1653 return;
1656 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
1657 /* Too short SA Query request frame */
1658 return;
1661 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
1662 if (skb == NULL)
1663 return;
1665 skb_reserve(skb, local->hw.extra_tx_headroom);
1666 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1667 memset(resp, 0, 24);
1668 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1669 memcpy(resp->sa, sdata->dev->dev_addr, ETH_ALEN);
1670 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
1671 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1672 IEEE80211_STYPE_ACTION);
1673 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
1674 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
1675 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
1676 memcpy(resp->u.action.u.sa_query.trans_id,
1677 mgmt->u.action.u.sa_query.trans_id,
1678 WLAN_SA_QUERY_TR_ID_LEN);
1680 ieee80211_tx_skb(sdata, skb, 1);
1683 static ieee80211_rx_result debug_noinline
1684 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1686 struct ieee80211_local *local = rx->local;
1687 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1688 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1689 struct ieee80211_bss *bss;
1690 int len = rx->skb->len;
1692 if (!ieee80211_is_action(mgmt->frame_control))
1693 return RX_CONTINUE;
1695 if (!rx->sta)
1696 return RX_DROP_MONITOR;
1698 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1699 return RX_DROP_MONITOR;
1701 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1702 return RX_DROP_MONITOR;
1704 /* all categories we currently handle have action_code */
1705 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1706 return RX_DROP_MONITOR;
1708 switch (mgmt->u.action.category) {
1709 case WLAN_CATEGORY_BACK:
1711 * The aggregation code is not prepared to handle
1712 * anything but STA/AP due to the BSSID handling;
1713 * IBSS could work in the code but isn't supported
1714 * by drivers or the standard.
1716 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1717 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1718 sdata->vif.type != NL80211_IFTYPE_AP)
1719 return RX_DROP_MONITOR;
1721 switch (mgmt->u.action.u.addba_req.action_code) {
1722 case WLAN_ACTION_ADDBA_REQ:
1723 if (len < (IEEE80211_MIN_ACTION_SIZE +
1724 sizeof(mgmt->u.action.u.addba_req)))
1725 return RX_DROP_MONITOR;
1726 ieee80211_process_addba_request(local, rx->sta, mgmt, len);
1727 break;
1728 case WLAN_ACTION_ADDBA_RESP:
1729 if (len < (IEEE80211_MIN_ACTION_SIZE +
1730 sizeof(mgmt->u.action.u.addba_resp)))
1731 return RX_DROP_MONITOR;
1732 ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
1733 break;
1734 case WLAN_ACTION_DELBA:
1735 if (len < (IEEE80211_MIN_ACTION_SIZE +
1736 sizeof(mgmt->u.action.u.delba)))
1737 return RX_DROP_MONITOR;
1738 ieee80211_process_delba(sdata, rx->sta, mgmt, len);
1739 break;
1741 break;
1742 case WLAN_CATEGORY_SPECTRUM_MGMT:
1743 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
1744 return RX_DROP_MONITOR;
1746 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1747 return RX_DROP_MONITOR;
1749 switch (mgmt->u.action.u.measurement.action_code) {
1750 case WLAN_ACTION_SPCT_MSR_REQ:
1751 if (len < (IEEE80211_MIN_ACTION_SIZE +
1752 sizeof(mgmt->u.action.u.measurement)))
1753 return RX_DROP_MONITOR;
1754 ieee80211_process_measurement_req(sdata, mgmt, len);
1755 break;
1756 case WLAN_ACTION_SPCT_CHL_SWITCH:
1757 if (len < (IEEE80211_MIN_ACTION_SIZE +
1758 sizeof(mgmt->u.action.u.chan_switch)))
1759 return RX_DROP_MONITOR;
1761 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1762 return RX_DROP_MONITOR;
1764 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
1765 return RX_DROP_MONITOR;
1767 bss = ieee80211_rx_bss_get(local, sdata->u.mgd.bssid,
1768 local->hw.conf.channel->center_freq,
1769 sdata->u.mgd.ssid,
1770 sdata->u.mgd.ssid_len);
1771 if (!bss)
1772 return RX_DROP_MONITOR;
1774 ieee80211_sta_process_chanswitch(sdata,
1775 &mgmt->u.action.u.chan_switch.sw_elem, bss);
1776 ieee80211_rx_bss_put(local, bss);
1777 break;
1779 break;
1780 case WLAN_CATEGORY_SA_QUERY:
1781 if (len < (IEEE80211_MIN_ACTION_SIZE +
1782 sizeof(mgmt->u.action.u.sa_query)))
1783 return RX_DROP_MONITOR;
1784 switch (mgmt->u.action.u.sa_query.action) {
1785 case WLAN_ACTION_SA_QUERY_REQUEST:
1786 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1787 return RX_DROP_MONITOR;
1788 ieee80211_process_sa_query_req(sdata, mgmt, len);
1789 break;
1790 case WLAN_ACTION_SA_QUERY_RESPONSE:
1792 * SA Query response is currently only used in AP mode
1793 * and it is processed in user space.
1795 return RX_CONTINUE;
1797 break;
1798 default:
1799 return RX_CONTINUE;
1802 rx->sta->rx_packets++;
1803 dev_kfree_skb(rx->skb);
1804 return RX_QUEUED;
1807 static ieee80211_rx_result debug_noinline
1808 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1810 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1811 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1813 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1814 return RX_DROP_MONITOR;
1816 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1817 return RX_DROP_MONITOR;
1819 if (ieee80211_vif_is_mesh(&sdata->vif))
1820 return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status);
1822 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
1823 return ieee80211_ibss_rx_mgmt(sdata, rx->skb, rx->status);
1825 if (sdata->vif.type == NL80211_IFTYPE_STATION)
1826 return ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status);
1828 return RX_DROP_MONITOR;
1831 static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1832 struct ieee80211_hdr *hdr,
1833 struct ieee80211_rx_data *rx)
1835 int keyidx;
1836 unsigned int hdrlen;
1838 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1839 if (rx->skb->len >= hdrlen + 4)
1840 keyidx = rx->skb->data[hdrlen + 3] >> 6;
1841 else
1842 keyidx = -1;
1844 if (!rx->sta) {
1846 * Some hardware seem to generate incorrect Michael MIC
1847 * reports; ignore them to avoid triggering countermeasures.
1849 goto ignore;
1852 if (!ieee80211_has_protected(hdr->frame_control))
1853 goto ignore;
1855 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
1857 * APs with pairwise keys should never receive Michael MIC
1858 * errors for non-zero keyidx because these are reserved for
1859 * group keys and only the AP is sending real multicast
1860 * frames in the BSS.
1862 goto ignore;
1865 if (!ieee80211_is_data(hdr->frame_control) &&
1866 !ieee80211_is_auth(hdr->frame_control))
1867 goto ignore;
1869 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL);
1870 ignore:
1871 dev_kfree_skb(rx->skb);
1872 rx->skb = NULL;
1875 /* TODO: use IEEE80211_RX_FRAGMENTED */
1876 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
1878 struct ieee80211_sub_if_data *sdata;
1879 struct ieee80211_local *local = rx->local;
1880 struct ieee80211_rtap_hdr {
1881 struct ieee80211_radiotap_header hdr;
1882 u8 flags;
1883 u8 rate;
1884 __le16 chan_freq;
1885 __le16 chan_flags;
1886 } __attribute__ ((packed)) *rthdr;
1887 struct sk_buff *skb = rx->skb, *skb2;
1888 struct net_device *prev_dev = NULL;
1889 struct ieee80211_rx_status *status = rx->status;
1891 if (rx->flags & IEEE80211_RX_CMNTR_REPORTED)
1892 goto out_free_skb;
1894 if (skb_headroom(skb) < sizeof(*rthdr) &&
1895 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
1896 goto out_free_skb;
1898 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
1899 memset(rthdr, 0, sizeof(*rthdr));
1900 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
1901 rthdr->hdr.it_present =
1902 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
1903 (1 << IEEE80211_RADIOTAP_RATE) |
1904 (1 << IEEE80211_RADIOTAP_CHANNEL));
1906 rthdr->rate = rx->rate->bitrate / 5;
1907 rthdr->chan_freq = cpu_to_le16(status->freq);
1909 if (status->band == IEEE80211_BAND_5GHZ)
1910 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
1911 IEEE80211_CHAN_5GHZ);
1912 else
1913 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
1914 IEEE80211_CHAN_2GHZ);
1916 skb_set_mac_header(skb, 0);
1917 skb->ip_summed = CHECKSUM_UNNECESSARY;
1918 skb->pkt_type = PACKET_OTHERHOST;
1919 skb->protocol = htons(ETH_P_802_2);
1921 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1922 if (!netif_running(sdata->dev))
1923 continue;
1925 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
1926 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
1927 continue;
1929 if (prev_dev) {
1930 skb2 = skb_clone(skb, GFP_ATOMIC);
1931 if (skb2) {
1932 skb2->dev = prev_dev;
1933 netif_rx(skb2);
1937 prev_dev = sdata->dev;
1938 sdata->dev->stats.rx_packets++;
1939 sdata->dev->stats.rx_bytes += skb->len;
1942 if (prev_dev) {
1943 skb->dev = prev_dev;
1944 netif_rx(skb);
1945 skb = NULL;
1946 } else
1947 goto out_free_skb;
1949 rx->flags |= IEEE80211_RX_CMNTR_REPORTED;
1950 return;
1952 out_free_skb:
1953 dev_kfree_skb(skb);
1957 static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1958 struct ieee80211_rx_data *rx,
1959 struct sk_buff *skb)
1961 ieee80211_rx_result res = RX_DROP_MONITOR;
1963 rx->skb = skb;
1964 rx->sdata = sdata;
1965 rx->dev = sdata->dev;
1967 #define CALL_RXH(rxh) \
1968 do { \
1969 res = rxh(rx); \
1970 if (res != RX_CONTINUE) \
1971 goto rxh_done; \
1972 } while (0);
1974 CALL_RXH(ieee80211_rx_h_passive_scan)
1975 CALL_RXH(ieee80211_rx_h_check)
1976 CALL_RXH(ieee80211_rx_h_decrypt)
1977 CALL_RXH(ieee80211_rx_h_check_more_data)
1978 CALL_RXH(ieee80211_rx_h_sta_process)
1979 CALL_RXH(ieee80211_rx_h_defragment)
1980 CALL_RXH(ieee80211_rx_h_ps_poll)
1981 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
1982 /* must be after MMIC verify so header is counted in MPDU mic */
1983 CALL_RXH(ieee80211_rx_h_remove_qos_control)
1984 CALL_RXH(ieee80211_rx_h_amsdu)
1985 #ifdef CONFIG_MAC80211_MESH
1986 if (ieee80211_vif_is_mesh(&sdata->vif))
1987 CALL_RXH(ieee80211_rx_h_mesh_fwding);
1988 #endif
1989 CALL_RXH(ieee80211_rx_h_data)
1990 CALL_RXH(ieee80211_rx_h_ctrl)
1991 CALL_RXH(ieee80211_rx_h_action)
1992 CALL_RXH(ieee80211_rx_h_mgmt)
1994 #undef CALL_RXH
1996 rxh_done:
1997 switch (res) {
1998 case RX_DROP_MONITOR:
1999 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
2000 if (rx->sta)
2001 rx->sta->rx_dropped++;
2002 /* fall through */
2003 case RX_CONTINUE:
2004 ieee80211_rx_cooked_monitor(rx);
2005 break;
2006 case RX_DROP_UNUSABLE:
2007 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
2008 if (rx->sta)
2009 rx->sta->rx_dropped++;
2010 dev_kfree_skb(rx->skb);
2011 break;
2012 case RX_QUEUED:
2013 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
2014 break;
2018 /* main receive path */
2020 static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2021 struct ieee80211_rx_data *rx,
2022 struct ieee80211_hdr *hdr)
2024 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, sdata->vif.type);
2025 int multicast = is_multicast_ether_addr(hdr->addr1);
2027 switch (sdata->vif.type) {
2028 case NL80211_IFTYPE_STATION:
2029 if (!bssid)
2030 return 0;
2031 if (!ieee80211_bssid_match(bssid, sdata->u.mgd.bssid)) {
2032 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2033 return 0;
2034 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2035 } else if (!multicast &&
2036 compare_ether_addr(sdata->dev->dev_addr,
2037 hdr->addr1) != 0) {
2038 if (!(sdata->dev->flags & IFF_PROMISC))
2039 return 0;
2040 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2042 break;
2043 case NL80211_IFTYPE_ADHOC:
2044 if (!bssid)
2045 return 0;
2046 if (ieee80211_is_beacon(hdr->frame_control)) {
2047 return 1;
2049 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2050 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2051 return 0;
2052 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2053 } else if (!multicast &&
2054 compare_ether_addr(sdata->dev->dev_addr,
2055 hdr->addr1) != 0) {
2056 if (!(sdata->dev->flags & IFF_PROMISC))
2057 return 0;
2058 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2059 } else if (!rx->sta) {
2060 int rate_idx;
2061 if (rx->status->flag & RX_FLAG_HT)
2062 rate_idx = 0; /* TODO: HT rates */
2063 else
2064 rate_idx = rx->status->rate_idx;
2065 rx->sta = ieee80211_ibss_add_sta(sdata, bssid, hdr->addr2,
2066 BIT(rate_idx));
2068 break;
2069 case NL80211_IFTYPE_MESH_POINT:
2070 if (!multicast &&
2071 compare_ether_addr(sdata->dev->dev_addr,
2072 hdr->addr1) != 0) {
2073 if (!(sdata->dev->flags & IFF_PROMISC))
2074 return 0;
2076 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2078 break;
2079 case NL80211_IFTYPE_AP_VLAN:
2080 case NL80211_IFTYPE_AP:
2081 if (!bssid) {
2082 if (compare_ether_addr(sdata->dev->dev_addr,
2083 hdr->addr1))
2084 return 0;
2085 } else if (!ieee80211_bssid_match(bssid,
2086 sdata->dev->dev_addr)) {
2087 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2088 return 0;
2089 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2091 break;
2092 case NL80211_IFTYPE_WDS:
2093 if (bssid || !ieee80211_is_data(hdr->frame_control))
2094 return 0;
2095 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2096 return 0;
2097 break;
2098 case NL80211_IFTYPE_MONITOR:
2099 /* take everything */
2100 break;
2101 case NL80211_IFTYPE_UNSPECIFIED:
2102 case __NL80211_IFTYPE_AFTER_LAST:
2103 /* should never get here */
2104 WARN_ON(1);
2105 break;
2108 return 1;
2112 * This is the actual Rx frames handler. as it blongs to Rx path it must
2113 * be called with rcu_read_lock protection.
2115 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2116 struct sk_buff *skb,
2117 struct ieee80211_rx_status *status,
2118 struct ieee80211_rate *rate)
2120 struct ieee80211_local *local = hw_to_local(hw);
2121 struct ieee80211_sub_if_data *sdata;
2122 struct ieee80211_hdr *hdr;
2123 struct ieee80211_rx_data rx;
2124 int prepares;
2125 struct ieee80211_sub_if_data *prev = NULL;
2126 struct sk_buff *skb_new;
2128 hdr = (struct ieee80211_hdr *)skb->data;
2129 memset(&rx, 0, sizeof(rx));
2130 rx.skb = skb;
2131 rx.local = local;
2133 rx.status = status;
2134 rx.rate = rate;
2136 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
2137 local->dot11ReceivedFragmentCount++;
2139 rx.sta = sta_info_get(local, hdr->addr2);
2140 if (rx.sta) {
2141 rx.sdata = rx.sta->sdata;
2142 rx.dev = rx.sta->sdata->dev;
2145 if ((status->flag & RX_FLAG_MMIC_ERROR)) {
2146 ieee80211_rx_michael_mic_report(local->mdev, hdr, &rx);
2147 return;
2150 if (unlikely(local->sw_scanning || local->hw_scanning))
2151 rx.flags |= IEEE80211_RX_IN_SCAN;
2153 ieee80211_parse_qos(&rx);
2154 ieee80211_verify_alignment(&rx);
2156 skb = rx.skb;
2158 if (rx.sdata && ieee80211_is_data(hdr->frame_control)) {
2159 rx.flags |= IEEE80211_RX_RA_MATCH;
2160 prepares = prepare_for_handlers(rx.sdata, &rx, hdr);
2161 if (prepares)
2162 prev = rx.sdata;
2163 } else list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2164 if (!netif_running(sdata->dev))
2165 continue;
2167 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2168 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2169 continue;
2171 rx.flags |= IEEE80211_RX_RA_MATCH;
2172 prepares = prepare_for_handlers(sdata, &rx, hdr);
2174 if (!prepares)
2175 continue;
2178 * frame is destined for this interface, but if it's not
2179 * also for the previous one we handle that after the
2180 * loop to avoid copying the SKB once too much
2183 if (!prev) {
2184 prev = sdata;
2185 continue;
2189 * frame was destined for the previous interface
2190 * so invoke RX handlers for it
2193 skb_new = skb_copy(skb, GFP_ATOMIC);
2194 if (!skb_new) {
2195 if (net_ratelimit())
2196 printk(KERN_DEBUG "%s: failed to copy "
2197 "multicast frame for %s\n",
2198 wiphy_name(local->hw.wiphy),
2199 prev->dev->name);
2200 continue;
2202 ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
2203 prev = sdata;
2205 if (prev)
2206 ieee80211_invoke_rx_handlers(prev, &rx, skb);
2207 else
2208 dev_kfree_skb(skb);
2211 #define SEQ_MODULO 0x1000
2212 #define SEQ_MASK 0xfff
2214 static inline int seq_less(u16 sq1, u16 sq2)
2216 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
2219 static inline u16 seq_inc(u16 sq)
2221 return (sq + 1) & SEQ_MASK;
2224 static inline u16 seq_sub(u16 sq1, u16 sq2)
2226 return (sq1 - sq2) & SEQ_MASK;
2230 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
2231 struct tid_ampdu_rx *tid_agg_rx,
2232 int index)
2234 struct ieee80211_supported_band *sband;
2235 struct ieee80211_rate *rate;
2236 struct ieee80211_rx_status status;
2238 if (!tid_agg_rx->reorder_buf[index])
2239 goto no_frame;
2241 /* release the reordered frames to stack */
2242 memcpy(&status, tid_agg_rx->reorder_buf[index]->cb, sizeof(status));
2243 sband = hw->wiphy->bands[status.band];
2244 if (status.flag & RX_FLAG_HT)
2245 rate = sband->bitrates; /* TODO: HT rates */
2246 else
2247 rate = &sband->bitrates[status.rate_idx];
2248 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index],
2249 &status, rate);
2250 tid_agg_rx->stored_mpdu_num--;
2251 tid_agg_rx->reorder_buf[index] = NULL;
2253 no_frame:
2254 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2259 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
2260 * the skb was added to the buffer longer than this time ago, the earlier
2261 * frames that have not yet been received are assumed to be lost and the skb
2262 * can be released for processing. This may also release other skb's from the
2263 * reorder buffer if there are no additional gaps between the frames.
2265 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
2268 * As it function blongs to Rx path it must be called with
2269 * the proper rcu_read_lock protection for its flow.
2271 static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2272 struct tid_ampdu_rx *tid_agg_rx,
2273 struct sk_buff *skb,
2274 struct ieee80211_rx_status *rxstatus,
2275 u16 mpdu_seq_num,
2276 int bar_req)
2278 u16 head_seq_num, buf_size;
2279 int index;
2281 buf_size = tid_agg_rx->buf_size;
2282 head_seq_num = tid_agg_rx->head_seq_num;
2284 /* frame with out of date sequence number */
2285 if (seq_less(mpdu_seq_num, head_seq_num)) {
2286 dev_kfree_skb(skb);
2287 return 1;
2290 /* if frame sequence number exceeds our buffering window size or
2291 * block Ack Request arrived - release stored frames */
2292 if ((!seq_less(mpdu_seq_num, head_seq_num + buf_size)) || (bar_req)) {
2293 /* new head to the ordering buffer */
2294 if (bar_req)
2295 head_seq_num = mpdu_seq_num;
2296 else
2297 head_seq_num =
2298 seq_inc(seq_sub(mpdu_seq_num, buf_size));
2299 /* release stored frames up to new head to stack */
2300 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
2301 index = seq_sub(tid_agg_rx->head_seq_num,
2302 tid_agg_rx->ssn)
2303 % tid_agg_rx->buf_size;
2304 ieee80211_release_reorder_frame(hw, tid_agg_rx,
2305 index);
2307 if (bar_req)
2308 return 1;
2311 /* now the new frame is always in the range of the reordering */
2312 /* buffer window */
2313 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn)
2314 % tid_agg_rx->buf_size;
2315 /* check if we already stored this frame */
2316 if (tid_agg_rx->reorder_buf[index]) {
2317 dev_kfree_skb(skb);
2318 return 1;
2321 /* if arrived mpdu is in the right order and nothing else stored */
2322 /* release it immediately */
2323 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
2324 tid_agg_rx->stored_mpdu_num == 0) {
2325 tid_agg_rx->head_seq_num =
2326 seq_inc(tid_agg_rx->head_seq_num);
2327 return 0;
2330 /* put the frame in the reordering buffer */
2331 tid_agg_rx->reorder_buf[index] = skb;
2332 tid_agg_rx->reorder_time[index] = jiffies;
2333 memcpy(tid_agg_rx->reorder_buf[index]->cb, rxstatus,
2334 sizeof(*rxstatus));
2335 tid_agg_rx->stored_mpdu_num++;
2336 /* release the buffer until next missing frame */
2337 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
2338 % tid_agg_rx->buf_size;
2339 if (!tid_agg_rx->reorder_buf[index] &&
2340 tid_agg_rx->stored_mpdu_num > 1) {
2342 * No buffers ready to be released, but check whether any
2343 * frames in the reorder buffer have timed out.
2345 int j;
2346 int skipped = 1;
2347 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
2348 j = (j + 1) % tid_agg_rx->buf_size) {
2349 if (tid_agg_rx->reorder_buf[j] == NULL) {
2350 skipped++;
2351 continue;
2353 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
2354 HZ / 10))
2355 break;
2357 #ifdef CONFIG_MAC80211_HT_DEBUG
2358 if (net_ratelimit())
2359 printk(KERN_DEBUG "%s: release an RX reorder "
2360 "frame due to timeout on earlier "
2361 "frames\n",
2362 wiphy_name(hw->wiphy));
2363 #endif
2364 ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
2367 * Increment the head seq# also for the skipped slots.
2369 tid_agg_rx->head_seq_num =
2370 (tid_agg_rx->head_seq_num + skipped) &
2371 SEQ_MASK;
2372 skipped = 0;
2374 } else while (tid_agg_rx->reorder_buf[index]) {
2375 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
2376 index = seq_sub(tid_agg_rx->head_seq_num,
2377 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
2379 return 1;
2382 static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2383 struct sk_buff *skb,
2384 struct ieee80211_rx_status *status)
2386 struct ieee80211_hw *hw = &local->hw;
2387 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2388 struct sta_info *sta;
2389 struct tid_ampdu_rx *tid_agg_rx;
2390 u16 sc;
2391 u16 mpdu_seq_num;
2392 u8 ret = 0;
2393 int tid;
2395 sta = sta_info_get(local, hdr->addr2);
2396 if (!sta)
2397 return ret;
2399 /* filter the QoS data rx stream according to
2400 * STA/TID and check if this STA/TID is on aggregation */
2401 if (!ieee80211_is_data_qos(hdr->frame_control))
2402 goto end_reorder;
2404 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
2406 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
2407 goto end_reorder;
2409 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
2411 /* qos null data frames are excluded */
2412 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
2413 goto end_reorder;
2415 /* new un-ordered ampdu frame - process it */
2417 /* reset session timer */
2418 if (tid_agg_rx->timeout)
2419 mod_timer(&tid_agg_rx->session_timer,
2420 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2422 /* if this mpdu is fragmented - terminate rx aggregation session */
2423 sc = le16_to_cpu(hdr->seq_ctrl);
2424 if (sc & IEEE80211_SCTL_FRAG) {
2425 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
2426 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
2427 ret = 1;
2428 goto end_reorder;
2431 /* according to mpdu sequence number deal with reordering buffer */
2432 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2433 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, status,
2434 mpdu_seq_num, 0);
2435 end_reorder:
2436 return ret;
2440 * This is the receive path handler. It is called by a low level driver when an
2441 * 802.11 MPDU is received from the hardware.
2443 void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2444 struct ieee80211_rx_status *status)
2446 struct ieee80211_local *local = hw_to_local(hw);
2447 struct ieee80211_rate *rate = NULL;
2448 struct ieee80211_supported_band *sband;
2450 if (status->band < 0 ||
2451 status->band >= IEEE80211_NUM_BANDS) {
2452 WARN_ON(1);
2453 return;
2456 sband = local->hw.wiphy->bands[status->band];
2457 if (!sband) {
2458 WARN_ON(1);
2459 return;
2463 * If we're suspending, it is possible although not too likely
2464 * that we'd be receiving frames after having already partially
2465 * quiesced the stack. We can't process such frames then since
2466 * that might, for example, cause stations to be added or other
2467 * driver callbacks be invoked.
2469 if (unlikely(local->quiescing || local->suspended)) {
2470 kfree_skb(skb);
2471 return;
2474 if (status->flag & RX_FLAG_HT) {
2475 /* rate_idx is MCS index */
2476 if (WARN_ON(status->rate_idx < 0 ||
2477 status->rate_idx >= 76))
2478 return;
2479 /* HT rates are not in the table - use the highest legacy rate
2480 * for now since other parts of mac80211 may not yet be fully
2481 * MCS aware. */
2482 rate = &sband->bitrates[sband->n_bitrates - 1];
2483 } else {
2484 if (WARN_ON(status->rate_idx < 0 ||
2485 status->rate_idx >= sband->n_bitrates))
2486 return;
2487 rate = &sband->bitrates[status->rate_idx];
2491 * key references and virtual interfaces are protected using RCU
2492 * and this requires that we are in a read-side RCU section during
2493 * receive processing
2495 rcu_read_lock();
2498 * Frames with failed FCS/PLCP checksum are not returned,
2499 * all other frames are returned without radiotap header
2500 * if it was previously present.
2501 * Also, frames with less than 16 bytes are dropped.
2503 skb = ieee80211_rx_monitor(local, skb, status, rate);
2504 if (!skb) {
2505 rcu_read_unlock();
2506 return;
2510 * In theory, the block ack reordering should happen after duplicate
2511 * removal (ieee80211_rx_h_check(), which is an RX handler). As such,
2512 * the call to ieee80211_rx_reorder_ampdu() should really be moved to
2513 * happen as a new RX handler between ieee80211_rx_h_check and
2514 * ieee80211_rx_h_decrypt. This cleanup may eventually happen, but for
2515 * the time being, the call can be here since RX reorder buf processing
2516 * will implicitly skip duplicates. We could, in theory at least,
2517 * process frames that ieee80211_rx_h_passive_scan would drop (e.g.,
2518 * frames from other than operational channel), but that should not
2519 * happen in normal networks.
2521 if (!ieee80211_rx_reorder_ampdu(local, skb, status))
2522 __ieee80211_rx_handle_packet(hw, skb, status, rate);
2524 rcu_read_unlock();
2526 EXPORT_SYMBOL(__ieee80211_rx);
2528 /* This is a version of the rx handler that can be called from hard irq
2529 * context. Post the skb on the queue and schedule the tasklet */
2530 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb,
2531 struct ieee80211_rx_status *status)
2533 struct ieee80211_local *local = hw_to_local(hw);
2535 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
2537 skb->dev = local->mdev;
2538 /* copy status into skb->cb for use by tasklet */
2539 memcpy(skb->cb, status, sizeof(*status));
2540 skb->pkt_type = IEEE80211_RX_MSG;
2541 skb_queue_tail(&local->skb_queue, skb);
2542 tasklet_schedule(&local->tasklet);
2544 EXPORT_SYMBOL(ieee80211_rx_irqsafe);