update madwifi
[linux-2.6/zen-sources.git] / drivers / net / wireless / madwifi / net80211 / ieee80211_output.c
blob1bd46e7ec580c0367f8c6d34d113eff1c226979b
1 /*-
2 * Copyright (c) 2001 Atsushi Onoe
3 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
4 * All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * Alternatively, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") version 2 as published by the Free
19 * Software Foundation.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * $Id: ieee80211_output.c 3783 2008-07-17 03:44:41Z proski $
34 #ifndef EXPORT_SYMTAB
35 #define EXPORT_SYMTAB
36 #endif
39 * IEEE 802.11 output handling.
41 #ifndef AUTOCONF_INCLUDED
42 #include <linux/config.h>
43 #endif
44 #include <linux/version.h>
45 #include <linux/module.h>
46 #include <linux/skbuff.h>
47 #include <linux/netdevice.h>
48 #include <linux/if_vlan.h>
50 #include <linux/ip.h> /* XXX for TOS */
52 #include "if_llc.h"
53 #include "if_ethersubr.h"
54 #include "if_media.h"
56 #include <net80211/ieee80211_var.h>
57 #include <net80211/ieee80211_monitor.h>
58 #include <net80211/if_athproto.h>
60 #ifdef IEEE80211_DEBUG
62 * Decide if an outbound management frame should be
63 * printed when debugging is enabled. This filters some
64 * of the less interesting frames that come frequently
65 * (e.g. beacons).
67 static __inline int
68 doprint(struct ieee80211vap *vap, int subtype)
70 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
71 return (vap->iv_opmode == IEEE80211_M_IBSS);
72 return 1;
74 #endif
78 * Determine the priority based on VLAN and/or IP TOS. Priority is
79 * written into the skb->priority field. On success, returns 0. Failure
80 * due to bad or mis-matched vlan tag is indicated by non-zero return.
82 static int
83 ieee80211_classify(struct ieee80211_node *ni, struct sk_buff *skb)
85 struct ieee80211vap *vap = ni->ni_vap;
86 struct ether_header *eh = (struct ether_header *)skb->data;
87 int v_wme_ac = 0, d_wme_ac = 0;
89 /* default priority */
90 skb->priority = WME_AC_BE;
92 if (!(ni->ni_flags & IEEE80211_NODE_QOS))
93 return 0;
95 /*
96 * If node has a vlan tag then all traffic
97 * to it must have a matching vlan id.
99 if (ni->ni_vlan != 0 && vlan_tx_tag_present(skb)) {
100 u_int32_t tag=0;
101 int v_pri;
103 if (vap->iv_vlgrp == NULL) {
104 IEEE80211_NODE_STAT(ni, tx_novlantag);
105 ni->ni_stats.ns_tx_novlantag++;
106 return 1;
108 if (((tag = vlan_tx_tag_get(skb)) & VLAN_VID_MASK) !=
109 (ni->ni_vlan & VLAN_VID_MASK)) {
110 IEEE80211_NODE_STAT(ni, tx_vlanmismatch);
111 ni->ni_stats.ns_tx_vlanmismatch++;
112 return 1;
114 if (ni->ni_flags & IEEE80211_NODE_QOS) {
115 v_pri = (tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
116 switch (v_pri) {
117 case 1:
118 case 2: /* Background (BK) */
119 v_wme_ac = WME_AC_BK;
120 break;
121 case 0:
122 case 3: /* Best Effort (BE) */
123 v_wme_ac = WME_AC_BE;
124 break;
125 case 4:
126 case 5: /* Video (VI) */
127 v_wme_ac = WME_AC_VI;
128 break;
129 case 6:
130 case 7: /* Voice (VO) */
131 v_wme_ac = WME_AC_VO;
132 break;
137 if (eh->ether_type == __constant_htons(ETHERTYPE_IP)) {
138 const struct iphdr *ip = (struct iphdr *)
139 (skb->data + sizeof (struct ether_header));
141 * IP frame, map the TOS field.
143 * XXX: fill out these mappings???
145 switch (ip->tos) {
146 case 0x08: /* Background */
147 case 0x20:
148 d_wme_ac = WME_AC_BK;
149 break;
150 case 0x28: /* Video */
151 case 0xa0:
152 d_wme_ac = WME_AC_VI;
153 break;
154 case 0x30: /* Voice */
155 case 0xe0:
156 case 0x88: /* XXX UPSD */
157 case 0xb8:
158 d_wme_ac = WME_AC_VO;
159 break;
160 default: /* All others */
161 d_wme_ac = WME_AC_BE;
162 break;
164 } else {
165 d_wme_ac = WME_AC_BE;
167 skb->priority = d_wme_ac;
168 if (v_wme_ac > d_wme_ac)
169 skb->priority = v_wme_ac;
171 /* Applying ACM policy */
172 if (vap->iv_opmode == IEEE80211_M_STA) {
173 struct ieee80211com *ic = ni->ni_ic;
175 while (skb->priority != WME_AC_BK &&
176 ic->ic_wme.wme_wmeBssChanParams.cap_wmeParams[skb->priority].wmep_acm) {
177 switch (skb->priority) {
178 case WME_AC_BE:
179 skb->priority = WME_AC_BK;
180 break;
181 case WME_AC_VI:
182 skb->priority = WME_AC_BE;
183 break;
184 case WME_AC_VO:
185 skb->priority = WME_AC_VI;
186 break;
187 default:
188 skb->priority = WME_AC_BK;
189 break;
194 return 0;
198 * Context: process context (BHs disabled)
199 * It must return either NETDEV_TX_OK or NETDEV_TX_BUSY
202 ieee80211_hardstart(struct sk_buff *skb, struct net_device *dev)
204 struct ieee80211vap *vap = dev->priv;
205 struct ieee80211com *ic = vap->iv_ic;
206 struct net_device *parent = ic->ic_dev;
207 struct ieee80211_node *ni = NULL;
208 struct ether_header *eh;
210 /* Reset the SKB of new frames reaching this layer BEFORE
211 * we invoke ieee80211_skb_track. */
212 memset(SKB_CB(skb), 0, sizeof(struct ieee80211_cb));
214 /* If an SKB is passed in directly from the kernel,
215 * we take responsibility for the reference. */
216 ieee80211_skb_track(skb);
218 /* NB: parent must be up and running. */
219 if ((parent->flags & (IFF_RUNNING|IFF_UP)) != (IFF_RUNNING|IFF_UP))
220 goto bad;
222 /* No data frames go out unless we're running. */
223 if (vap->iv_state != IEEE80211_S_RUN) {
224 IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
225 "%s: ignore data packet, state %u\n",
226 __func__, vap->iv_state);
227 #if 0
228 vap->iv_stats.ist_tx_discard++;
229 #endif
230 goto bad;
233 if (vap->iv_opmode == IEEE80211_M_MONITOR) {
234 ieee80211_monitor_encap(vap, skb);
235 ieee80211_parent_queue_xmit(skb);
236 return NETDEV_TX_OK;
239 /* Cancel any running BG scan */
240 ieee80211_cancel_scan(vap);
242 /* Find the node for the destination so we can do
243 * things like power save. */
244 eh = (struct ether_header *)skb->data;
245 if (vap->iv_opmode == IEEE80211_M_WDS)
246 ni = ieee80211_find_txnode(vap, vap->wds_mac);
247 else
248 ni = ieee80211_find_txnode(vap, eh->ether_dhost);
249 if (ni == NULL) {
250 /* NB: ieee80211_find_txnode does stat+msg */
251 goto bad;
254 /* Calculate priority so drivers can find the TX queue. */
255 if (ieee80211_classify(ni, skb)) {
256 IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, ni,
257 "%s: discard, classification failure", __func__);
258 goto bad;
261 SKB_NI(skb) = ieee80211_ref_node(ni);
263 /* Power-save checks. */
264 if (WME_UAPSD_AC_CAN_TRIGGER(skb->priority, ni)) {
265 /* U-APSD power save queue */
266 /* XXXAPSD: assuming triggerable means deliverable */
267 M_FLAG_SET(skb, M_UAPSD);
268 } else if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT)) {
269 /* Station in power save mode; stick the frame
270 * on the STA's power save queue and continue.
271 * We'll get the frame back when the time is right. */
272 ieee80211_unref_node(&ni);
273 return ieee80211_pwrsave(skb);
276 dev->trans_start = jiffies;
278 #ifdef ATH_SUPERG_XR
279 /* Broadcast/multicast packets need to be sent on XR vap in addition to
280 * normal vap. */
281 if (vap->iv_xrvap && (ni == vap->iv_bss) &&
282 vap->iv_xrvap->iv_sta_assoc) {
283 struct sk_buff *skb1 = skb_copy(skb, GFP_ATOMIC);
284 if (skb1) {
285 memset(SKB_CB(skb1), 0, sizeof(struct ieee80211_cb));
286 #ifdef IEEE80211_DEBUG_REFCNT
287 M_FLAG_SET(skb1, M_SKB_TRACKED);
288 #endif /* #ifdef IEEE80211_DEBUG_REFCNT */
289 SKB_NI(skb1) = ieee80211_find_txnode(vap->iv_xrvap,
290 eh->ether_dhost);
291 /* Ignore this return code. */
292 ieee80211_parent_queue_xmit(skb1);
295 #endif
296 ieee80211_unref_node(&ni);
297 ieee80211_parent_queue_xmit(skb);
298 return NETDEV_TX_OK;
300 bad:
301 if (skb != NULL)
302 ieee80211_dev_kfree_skb(&skb);
303 if (ni != NULL)
304 ieee80211_unref_node(&ni);
305 return NETDEV_TX_OK;
309 * SKB is consumed in all cases.
311 void ieee80211_parent_queue_xmit(struct sk_buff *skb) {
312 struct ieee80211vap *vap = skb->dev->priv;
314 vap->iv_devstats.tx_packets++;
315 vap->iv_devstats.tx_bytes += skb->len;
316 vap->iv_ic->ic_lastdata = jiffies;
318 /* Dispatch the packet to the parent device */
319 skb->dev = vap->iv_ic->ic_dev;
321 if (dev_queue_xmit(skb) == NET_XMIT_DROP)
322 vap->iv_devstats.tx_dropped++;
326 * Set the direction field and address fields of an outgoing
327 * non-QoS frame. Note this should be called early on in
328 * constructing a frame as it sets i_fc[1]; other bits can
329 * then be or'd in.
331 static void
332 ieee80211_send_setup(struct ieee80211vap *vap,
333 struct ieee80211_node *ni,
334 struct ieee80211_frame *wh,
335 int type,
336 const u_int8_t sa[IEEE80211_ADDR_LEN],
337 const u_int8_t da[IEEE80211_ADDR_LEN],
338 const u_int8_t bssid[IEEE80211_ADDR_LEN])
340 #define WH4(wh) ((struct ieee80211_frame_addr4 *)wh)
342 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | type;
343 if ((type & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_DATA) {
344 switch (vap->iv_opmode) {
345 case IEEE80211_M_STA:
346 wh->i_fc[1] = IEEE80211_FC1_DIR_TODS;
347 IEEE80211_ADDR_COPY(wh->i_addr1, bssid);
348 IEEE80211_ADDR_COPY(wh->i_addr2, sa);
349 IEEE80211_ADDR_COPY(wh->i_addr3, da);
350 break;
351 case IEEE80211_M_IBSS:
352 case IEEE80211_M_AHDEMO:
353 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
354 IEEE80211_ADDR_COPY(wh->i_addr1, da);
355 IEEE80211_ADDR_COPY(wh->i_addr2, sa);
356 IEEE80211_ADDR_COPY(wh->i_addr3, bssid);
357 break;
358 case IEEE80211_M_HOSTAP:
359 wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
360 IEEE80211_ADDR_COPY(wh->i_addr1, da);
361 IEEE80211_ADDR_COPY(wh->i_addr2, bssid);
362 IEEE80211_ADDR_COPY(wh->i_addr3, sa);
363 break;
364 case IEEE80211_M_WDS:
365 wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS;
366 /* XXX cheat, bssid holds RA */
367 IEEE80211_ADDR_COPY(wh->i_addr1, bssid);
368 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
369 IEEE80211_ADDR_COPY(wh->i_addr3, da);
370 IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, sa);
371 break;
372 case IEEE80211_M_MONITOR: /* NB: to quiet compiler */
373 break;
375 } else {
376 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
377 IEEE80211_ADDR_COPY(wh->i_addr1, da);
378 IEEE80211_ADDR_COPY(wh->i_addr2, sa);
379 IEEE80211_ADDR_COPY(wh->i_addr3, bssid);
381 wh->i_dur = 0;
382 /* NB: use non-QoS tid */
383 *(__le16 *)&wh->i_seq[0] =
384 htole16(ni->ni_txseqs[0] << IEEE80211_SEQ_SEQ_SHIFT);
385 ni->ni_txseqs[0]++;
386 #undef WH4
390 * Send a management frame to the specified node. The node pointer
391 * must have a reference as the pointer will be passed to the driver
392 * and potentially held for a long time. If the frame is successfully
393 * dispatched to the driver, then it is responsible for freeing the
394 * reference (and potentially freeing up any associated storage).
396 static void
397 ieee80211_mgmt_output(struct ieee80211_node *ni, struct sk_buff *skb, int type)
399 struct ieee80211vap *vap = ni->ni_vap;
400 struct ieee80211com *ic = ni->ni_ic;
401 struct ieee80211_frame *wh;
403 KASSERT(ni != NULL, ("null node"));
405 SKB_NI(skb) = ni;
407 wh = (struct ieee80211_frame *)
408 skb_push(skb, sizeof(struct ieee80211_frame));
409 ieee80211_send_setup(vap, ni, wh,
410 IEEE80211_FC0_TYPE_MGT | type,
411 vap->iv_myaddr, ni->ni_macaddr, vap->iv_bssid);
412 /* XXX power management */
414 if ((SKB_CB(skb)->flags & M_LINK0) != 0 && ni->ni_challenge != NULL) {
415 SKB_CB(skb)->flags &= ~M_LINK0;
416 IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH, wh->i_addr1,
417 "encrypting frame (%s)", __func__);
418 wh->i_fc[1] |= IEEE80211_FC1_PROT;
421 if (IEEE80211_VAP_IS_SLEEPING(ni->ni_vap))
422 wh->i_fc[1] |= IEEE80211_FC1_PWR_MGT;
424 #ifdef IEEE80211_DEBUG
425 /* avoid printing too many frames */
426 if ((ieee80211_msg_debug(vap) && doprint(vap, type)) ||
427 ieee80211_msg_dumppkts(vap)) {
428 printk(KERN_DEBUG "[" MAC_FMT "] send %s on channel %u\n",
429 MAC_ADDR(wh->i_addr1),
430 ieee80211_mgt_subtype_name[
431 (type & IEEE80211_FC0_SUBTYPE_MASK) >>
432 IEEE80211_FC0_SUBTYPE_SHIFT],
433 ieee80211_chan2ieee(ic, ic->ic_curchan));
435 #endif
436 IEEE80211_NODE_STAT(ni, tx_mgmt);
438 (void) ic->ic_mgtstart(ic, skb);
441 /* Send a null data frame to the specified node.
443 * NB: the caller provides us with our own node reference this must not be
444 * leaked; this is necessary to deal with a race condition when
445 * probing for inactive stations. */
447 ieee80211_send_nulldata(struct ieee80211_node *ni)
449 struct ieee80211vap *vap = ni->ni_vap;
450 struct ieee80211com *ic = ni->ni_ic;
451 struct sk_buff *skb;
452 struct ieee80211_frame *wh;
453 u_int8_t *frm;
455 skb = ieee80211_getmgtframe(&frm, 0);
456 if (skb == NULL) {
457 /* XXX debug msg */
458 vap->iv_stats.is_tx_nobuf++;
459 ieee80211_unref_node(&ni);
460 return -ENOMEM;
463 wh = (struct ieee80211_frame *)
464 skb_push(skb, sizeof(struct ieee80211_frame));
465 ieee80211_send_setup(vap, ni, wh,
466 IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_NULL,
467 vap->iv_myaddr, ni->ni_macaddr, vap->iv_bssid);
468 /* NB: power management bit is never sent by an AP */
469 if ((IEEE80211_VAP_IS_SLEEPING(ni->ni_vap)) &&
470 vap->iv_opmode != IEEE80211_M_HOSTAP &&
471 vap->iv_opmode != IEEE80211_M_WDS)
472 wh->i_fc[1] |= IEEE80211_FC1_PWR_MGT;
474 IEEE80211_NODE_STAT(ni, tx_data);
476 IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS,
477 "[" MAC_FMT "] send null data frame on channel %u, pwr mgt %s\n",
478 MAC_ADDR(ni->ni_macaddr),
479 ieee80211_chan2ieee(ic, ic->ic_curchan),
480 wh->i_fc[1] & IEEE80211_FC1_PWR_MGT ? "ena" : "dis");
482 /* XXX assign some priority; this probably is wrong */
483 skb->priority = WME_AC_BE;
484 SKB_NI(skb) = PASS_NODE(ni);
486 (void) ic->ic_mgtstart(ic, skb); /* cheat */
488 return 0;
492 * NB: unlike ieee80211_send_nulldata(), the node refcnt is
493 * bumped within this function.
496 ieee80211_send_qosnulldata(struct ieee80211_node *ni, int ac)
498 struct ieee80211vap *vap = ni->ni_vap;
499 struct ieee80211com *ic = ni->ni_ic;
500 struct sk_buff *skb;
501 struct ieee80211_qosframe *qwh;
502 u_int8_t *frm;
503 int tid;
505 skb = ieee80211_getmgtframe(&frm, 2);
506 if (skb == NULL) {
507 /* XXX debug msg */
508 vap->iv_stats.is_tx_nobuf++;
509 return -ENOMEM;
511 SKB_NI(skb) = ieee80211_ref_node(ni);
513 skb->priority = ac;
514 qwh = (struct ieee80211_qosframe *)skb_push(skb, sizeof(struct ieee80211_qosframe));
516 qwh = (struct ieee80211_qosframe *)skb->data;
518 ieee80211_send_setup(vap, ni, (struct ieee80211_frame *)qwh,
519 IEEE80211_FC0_TYPE_DATA,
520 vap->iv_myaddr, /* SA */
521 ni->ni_macaddr, /* DA */
522 vap->iv_bssid);
524 qwh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA |
525 IEEE80211_FC0_SUBTYPE_QOS_NULL;
527 if (IEEE80211_VAP_IS_SLEEPING(ni->ni_vap))
528 qwh->i_fc[1] |= IEEE80211_FC1_PWR_MGT;
530 /* map from access class/queue to 11e header priority value */
531 tid = WME_AC_TO_TID(ac);
532 qwh->i_qos[0] = tid & IEEE80211_QOS_TID;
533 if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[ac].wmep_noackPolicy)
534 qwh->i_qos[0] |= (1 << IEEE80211_QOS_ACKPOLICY_S) & IEEE80211_QOS_ACKPOLICY;
535 qwh->i_qos[1] = 0;
537 IEEE80211_NODE_STAT(ni, tx_data);
539 if (WME_UAPSD_AC_CAN_TRIGGER(skb->priority, ni)) {
540 /* U-APSD power save queue */
541 /* XXXAPSD: assuming triggerable means deliverable */
542 M_FLAG_SET(skb, M_UAPSD);
545 (void) ic->ic_mgtstart(ic, skb); /* cheat */
547 return 0;
549 EXPORT_SYMBOL(ieee80211_send_qosnulldata);
552 * Ensure there is sufficient headroom and tailroom to
553 * encapsulate the 802.11 data frame. If room isn't
554 * already there, reallocate so there is enough space.
555 * Drivers and cipher modules assume we have done the
556 * necessary work and fail rudely if they don't find
557 * the space they need.
559 static struct sk_buff *
560 ieee80211_skbhdr_adjust(struct ieee80211vap *vap, int hdrsize,
561 struct ieee80211_key *key, struct sk_buff *skb, int ismulticast)
563 /* XXX pre-calculate per node? */
564 int need_headroom = LLC_SNAPFRAMELEN + hdrsize + IEEE80211_ADDR_LEN;
565 int need_tailroom = 0;
566 #ifdef ATH_SUPERG_FF
567 int isff = ATH_FF_MAGIC_PRESENT(skb);
568 int inter_headroom = sizeof(struct ether_header) + LLC_SNAPFRAMELEN + ATH_FF_MAX_HDR_PAD;
569 struct sk_buff *skb2 = NULL;
571 if (isff) {
572 need_headroom += sizeof(struct athl2p_tunnel_hdr) + ATH_FF_MAX_HDR_PAD +
573 inter_headroom;
574 skb2 = skb->next;
576 #endif
578 if (key != NULL) {
579 const struct ieee80211_cipher *cip = key->wk_cipher;
581 * Adjust for crypto needs. When hardware crypto is
582 * being used we assume the hardware/driver will deal
583 * with any padding (on the fly, without needing to
584 * expand the frame contents). When software crypto
585 * is used we need to ensure room is available at the
586 * front and back and also for any per-MSDU additions.
588 /* XXX belongs in crypto code? */
589 need_headroom += cip->ic_header;
590 /* XXX pre-calculate per key */
591 if (key->wk_flags & IEEE80211_KEY_SWCRYPT)
592 need_tailroom += cip->ic_trailer;
594 ** If tx frag is needed and cipher is TKIP,
595 ** then allocate the additional tailroom for SW MIC computation.
597 if (skb->len > vap->iv_fragthreshold &&
598 !ismulticast &&
599 cip->ic_cipher == IEEE80211_CIPHER_TKIP)
600 need_tailroom += cip->ic_miclen;
601 else
602 if (key->wk_flags & IEEE80211_KEY_SWMIC)
603 need_tailroom += cip->ic_miclen;
606 if (skb_shared(skb)) {
607 /* Take our own reference to the node in the clone */
608 ieee80211_ref_node(SKB_NI(skb));
609 /* Unshare the node, decrementing users in the old skb */
610 skb = skb_unshare(skb, GFP_ATOMIC);
613 #ifdef ATH_SUPERG_FF
614 if (isff) {
615 if (skb == NULL) {
616 IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
617 "%s: cannot unshare for encapsulation\n",
618 __func__);
619 vap->iv_stats.is_tx_nobuf++;
620 ieee80211_dev_kfree_skb(&skb2);
622 return NULL;
625 /* first skb header */
626 if (skb_headroom(skb) < need_headroom) {
627 struct sk_buff *tmp = skb;
628 skb = skb_realloc_headroom(skb, need_headroom);
629 if (skb == NULL) {
630 IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
631 "%s: cannot expand storage (head1)\n",
632 __func__);
633 vap->iv_stats.is_tx_nobuf++;
634 ieee80211_dev_kfree_skb(&skb2);
635 return NULL;
636 } else
637 ieee80211_skb_copy_noderef(tmp, skb);
638 ieee80211_dev_kfree_skb(&tmp);
639 /* NB: cb[] area was copied, but not next ptr. must do that
640 * prior to return on success. */
643 /* second skb with header and tail adjustments possible */
644 if (skb_tailroom(skb2) < need_tailroom) {
645 int n = 0;
646 if (inter_headroom > skb_headroom(skb2))
647 n = inter_headroom - skb_headroom(skb2);
648 if (pskb_expand_head(skb2, n,
649 need_tailroom - skb_tailroom(skb2), GFP_ATOMIC)) {
650 ieee80211_dev_kfree_skb(&skb2);
651 IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
652 "%s: cannot expand storage (tail2)\n",
653 __func__);
654 vap->iv_stats.is_tx_nobuf++;
655 /* this shouldn't happen, but don't send first ff either */
656 ieee80211_dev_kfree_skb(&skb);
658 } else if (skb_headroom(skb2) < inter_headroom) {
659 struct sk_buff *tmp = skb2;
661 skb2 = skb_realloc_headroom(skb2, inter_headroom);
662 if (skb2 == NULL) {
663 IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
664 "%s: cannot expand storage (head2)\n",
665 __func__);
666 vap->iv_stats.is_tx_nobuf++;
667 /* this shouldn't happen, but don't send first ff either */
668 ieee80211_dev_kfree_skb(&skb);
669 skb = NULL;
670 } else
671 ieee80211_skb_copy_noderef(tmp, skb);
672 ieee80211_dev_kfree_skb(&tmp);
674 if (skb) {
675 skb->next = skb2;
677 return skb;
679 #endif /* ATH_SUPERG_FF */
680 if (skb == NULL) {
681 IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
682 "%s: cannot unshare for encapsulation\n", __func__);
683 vap->iv_stats.is_tx_nobuf++;
684 } else if (skb_tailroom(skb) < need_tailroom) {
685 int n = 0;
686 if (need_headroom > skb_headroom(skb))
687 n = need_headroom - skb_headroom(skb);
688 if (pskb_expand_head(skb, n, need_tailroom -
689 skb_tailroom(skb), GFP_ATOMIC)) {
690 IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
691 "%s: cannot expand storage (tail)\n", __func__);
692 vap->iv_stats.is_tx_nobuf++;
693 ieee80211_dev_kfree_skb(&skb);
695 } else if (skb_headroom(skb) < need_headroom) {
696 struct sk_buff *tmp = skb;
697 skb = skb_realloc_headroom(skb, need_headroom);
698 /* Increment reference count after copy */
699 if (skb == NULL) {
700 IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
701 "%s: cannot expand storage (head)\n", __func__);
702 vap->iv_stats.is_tx_nobuf++;
703 } else
704 ieee80211_skb_copy_noderef(tmp, skb);
705 ieee80211_dev_kfree_skb(&tmp);
708 return skb;
711 #define KEY_UNDEFINED(k) ((k).wk_cipher == &ieee80211_cipher_none)
713 * Return the transmit key to use in sending a unicast frame.
714 * If a unicast key is set we use that. When no unicast key is set
715 * we fall back to the default transmit key.
717 static __inline struct ieee80211_key *
718 ieee80211_crypto_getucastkey(struct ieee80211vap *vap, struct ieee80211_node *ni)
720 if (KEY_UNDEFINED(ni->ni_ucastkey)) {
721 if (vap->iv_def_txkey == IEEE80211_KEYIX_NONE ||
722 KEY_UNDEFINED(vap->iv_nw_keys[vap->iv_def_txkey]))
723 return NULL;
724 return &vap->iv_nw_keys[vap->iv_def_txkey];
725 } else
726 return &ni->ni_ucastkey;
730 * Return the transmit key to use in sending a multicast frame.
731 * Multicast traffic always uses the group key which is installed as
732 * the default tx key.
734 static __inline struct ieee80211_key *
735 ieee80211_crypto_getmcastkey(struct ieee80211vap *vap, struct ieee80211_node *ni)
737 if (vap->iv_def_txkey == IEEE80211_KEYIX_NONE ||
738 KEY_UNDEFINED(vap->iv_nw_keys[vap->iv_def_txkey]))
739 return NULL;
740 return &vap->iv_nw_keys[vap->iv_def_txkey];
744 * Encapsulate an outbound data frame. The mbuf chain is updated and
745 * a reference to the destination node is returned. If an error is
746 * encountered NULL is returned and the node reference will also be NULL.
748 * NB: The caller is responsible for freeing a returned node reference.
749 * The convention is ic_bss is not reference counted; the caller must
750 * maintain that.
752 struct sk_buff *
753 ieee80211_encap(struct ieee80211_node *ni, struct sk_buff *skb, int *framecnt)
755 #define WH4(wh) ((struct ieee80211_frame_addr4 *)wh)
756 struct ieee80211vap *vap = ni->ni_vap;
757 struct ieee80211com *ic = ni->ni_ic;
758 struct ether_header eh;
759 struct ieee80211_frame *wh, *twh;
760 struct ieee80211_key *key;
761 struct llc *llc;
762 int hdrsize, datalen, addqos;
763 int hdrsize_nopad;
764 struct sk_buff *framelist = NULL;
765 struct sk_buff *tskb;
766 int fragcnt = 1;
767 int pdusize = 0;
768 int ismulticast = 0;
769 int use4addr = 0;
770 #ifdef ATH_SUPERG_FF
771 struct sk_buff *skb2 = NULL;
772 struct ether_header eh2;
773 int isff = ATH_FF_MAGIC_PRESENT(skb);
775 if (isff) {
776 #if 0
777 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPG,
778 "%s: handling fast-frame skb (%p)\n", __func__, skb);
779 #endif
780 skb2 = skb->next;
781 if (skb2 == NULL) {
782 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPG,
783 "%s: fast-frame error, only 1 skb\n", __func__);
784 goto bad;
786 memcpy(&eh2, skb2->data, sizeof(struct ether_header));
787 skb_pull(skb2, sizeof(struct ether_header));
789 #endif
790 memcpy(&eh, skb->data, sizeof(struct ether_header));
791 skb_pull(skb, sizeof(struct ether_header));
794 * Ensure space for additional headers. First identify
795 * transmit key to use in calculating any buffer adjustments
796 * required. This is also used below to do privacy
797 * encapsulation work. Then calculate the 802.11 header
798 * size and any padding required by the driver.
800 * Note key may be NULL if we fall back to the default
801 * transmit key and that is not set. In that case the
802 * buffer may not be expanded as needed by the cipher
803 * routines, but they will/should discard it.
805 if (vap->iv_flags & IEEE80211_F_PRIVACY) {
806 if (vap->iv_opmode == IEEE80211_M_STA ||
807 !IEEE80211_IS_MULTICAST(eh.ether_dhost))
808 key = ieee80211_crypto_getucastkey(vap, ni);
809 else
810 key = ieee80211_crypto_getmcastkey(vap, ni);
811 if ((key == NULL) && (eh.ether_type != htons(ETHERTYPE_PAE))) {
812 IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO,
813 eh.ether_dhost,
814 "no default transmit key (%s) deftxkey %u",
815 __func__, vap->iv_def_txkey);
816 vap->iv_stats.is_tx_nodefkey++;
817 goto bad;
819 } else
820 key = NULL;
822 addqos = (ni->ni_flags & IEEE80211_NODE_QOS) &&
823 (eh.ether_type != htons(ETHERTYPE_PAE));
824 if (addqos)
825 hdrsize = sizeof(struct ieee80211_qosframe);
826 else
827 hdrsize = sizeof(struct ieee80211_frame);
829 switch (vap->iv_opmode) {
830 case IEEE80211_M_IBSS:
831 case IEEE80211_M_AHDEMO:
832 ismulticast = IEEE80211_IS_MULTICAST(eh.ether_dhost);
833 break;
834 case IEEE80211_M_WDS:
835 use4addr = 1;
836 ismulticast = IEEE80211_IS_MULTICAST(ni->ni_macaddr);
837 break;
838 case IEEE80211_M_HOSTAP:
839 if (!IEEE80211_IS_MULTICAST(eh.ether_dhost) &&
840 !IEEE80211_ADDR_EQ(eh.ether_dhost, ni->ni_macaddr)) {
841 use4addr = 1;
842 ismulticast = IEEE80211_IS_MULTICAST(ni->ni_macaddr);
843 } else
844 ismulticast = IEEE80211_IS_MULTICAST(eh.ether_dhost);
845 break;
846 case IEEE80211_M_STA:
847 if ((vap->iv_flags_ext & IEEE80211_FEXT_WDS) &&
848 !IEEE80211_ADDR_EQ(eh.ether_shost, vap->iv_myaddr)) {
849 use4addr = 1;
850 ismulticast = IEEE80211_IS_MULTICAST(ni->ni_macaddr);
851 /* Add a WDS entry to the station VAP */
852 if (IEEE80211_IS_MULTICAST(eh.ether_dhost)) {
853 struct ieee80211_node_table *nt = &ic->ic_sta;
854 struct ieee80211_node *ni_wds
855 = ieee80211_find_wds_node(nt, eh.ether_shost);
856 if (ni_wds)
857 ieee80211_unref_node(&ni_wds);
858 else
859 ieee80211_add_wds_addr(nt, ni, eh.ether_shost, 0);
861 } else
862 ismulticast = IEEE80211_IS_MULTICAST(vap->iv_bssid);
863 break;
864 default:
865 break;
868 if (use4addr)
869 hdrsize += IEEE80211_ADDR_LEN;
871 hdrsize_nopad = hdrsize;
872 if (ic->ic_flags & IEEE80211_F_DATAPAD)
873 hdrsize = roundup(hdrsize, sizeof(u_int32_t));
875 skb = ieee80211_skbhdr_adjust(vap, hdrsize, key, skb, ismulticast);
876 if (skb == NULL) {
877 /* NB: ieee80211_skbhdr_adjust handles msgs+statistics */
878 goto bad;
881 #ifdef ATH_SUPERG_FF
882 if (isff) {
883 struct ether_header *eh_inter;
884 struct athl2p_tunnel_hdr *ffhdr;
885 u_int16_t payload = skb->len + LLC_SNAPFRAMELEN;
886 int padded_len = payload + LLC_SNAPFRAMELEN + sizeof(struct ether_header);
888 /* in case header adjustments altered skb2 */
889 skb2 = skb->next;
890 if (skb2 == NULL) {
891 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPG,
892 "%s: skb (%p) hdr adjust dropped 2nd skb\n",
893 __func__, skb);
894 goto bad;
898 * add first skb tunnel hdrs
901 llc = (struct llc *)skb_push(skb, LLC_SNAPFRAMELEN);
902 llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
903 llc->llc_control = LLC_UI;
904 llc->llc_snap.org_code[0] = 0;
905 llc->llc_snap.org_code[1] = 0;
906 llc->llc_snap.org_code[2] = 0;
907 llc->llc_snap.ether_type = eh.ether_type;
909 eh_inter = (struct ether_header *)skb_push(skb, sizeof(struct ether_header));
910 memcpy(eh_inter, &eh, sizeof(struct ether_header) - sizeof eh.ether_type);
911 eh_inter->ether_type = htons(payload);
913 /* overall ff encap header */
914 /* XXX: the offset of 2, below, should be computed. but... it will not
915 * practically ever change.
917 ffhdr = (struct athl2p_tunnel_hdr *)skb_push(skb, sizeof(struct athl2p_tunnel_hdr) + 2);
918 memset(ffhdr, 0, sizeof(struct athl2p_tunnel_hdr) + 2);
921 * add second skb tunnel hdrs
924 payload = skb2->len + LLC_SNAPFRAMELEN;
926 llc = (struct llc *)skb_push(skb2, LLC_SNAPFRAMELEN);
927 if (llc == NULL) {
928 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPG,
929 "%s: failed to push llc for 2nd skb (%p)\n",
930 __func__, skb);
931 return NULL;
933 llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
934 llc->llc_control = LLC_UI;
935 llc->llc_snap.org_code[0] = 0;
936 llc->llc_snap.org_code[1] = 0;
937 llc->llc_snap.org_code[2] = 0;
938 llc->llc_snap.ether_type = eh2.ether_type;
940 eh_inter = (struct ether_header *)skb_push(skb2, sizeof(struct ether_header));
941 if (eh_inter == NULL) {
942 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPG,
943 "%s: failed to push eth hdr for 2nd skb (%p)\n",
944 __func__, skb);
945 return NULL;
948 memcpy(eh_inter, &eh2, sizeof(struct ether_header) - sizeof eh2.ether_type);
949 eh_inter->ether_type = htons(payload);
951 /* variable length pad */
952 skb_push(skb2, roundup(padded_len, 4) - padded_len);
954 #endif
956 llc = (struct llc *)skb_push(skb, LLC_SNAPFRAMELEN);
957 llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
958 llc->llc_control = LLC_UI;
959 #ifndef ATH_SUPERG_FF
960 llc->llc_snap.org_code[0] = 0;
961 llc->llc_snap.org_code[1] = 0;
962 llc->llc_snap.org_code[2] = 0;
963 llc->llc_snap.ether_type = eh.ether_type;
964 #else /* ATH_SUPERG_FF */
965 if (isff) {
966 llc->llc_snap.org_code[0] = ATH_SNAP_ORGCODE_0;
967 llc->llc_snap.org_code[1] = ATH_SNAP_ORGCODE_1;
968 llc->llc_snap.org_code[2] = ATH_SNAP_ORGCODE_2;
969 llc->llc_snap.ether_type = htons(ATH_ETH_TYPE);
970 } else {
971 llc->llc_snap.org_code[0] = 0;
972 llc->llc_snap.org_code[1] = 0;
973 llc->llc_snap.org_code[2] = 0;
974 llc->llc_snap.ether_type = eh.ether_type;
976 #endif /* ATH_SUPERG_FF */
977 datalen = skb->len; /* NB: w/o 802.11 header */
979 wh = (struct ieee80211_frame *)skb_push(skb, hdrsize);
980 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA;
981 wh->i_dur = 0;
982 if (use4addr) {
983 wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS;
984 IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_macaddr);
985 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
986 IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost);
987 IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, eh.ether_shost);
988 } else {
989 switch (vap->iv_opmode) {
990 case IEEE80211_M_IBSS:
991 case IEEE80211_M_AHDEMO:
992 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
993 IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost);
994 IEEE80211_ADDR_COPY(wh->i_addr2, eh.ether_shost);
996 * NB: always use the bssid from iv_bssid as the
997 * neighbor's may be stale after an ibss merge
999 IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_bssid);
1000 break;
1001 case IEEE80211_M_STA:
1002 wh->i_fc[1] = IEEE80211_FC1_DIR_TODS;
1003 IEEE80211_ADDR_COPY(wh->i_addr1, vap->iv_bssid);
1004 IEEE80211_ADDR_COPY(wh->i_addr2, eh.ether_shost);
1005 IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost);
1006 break;
1007 case IEEE80211_M_HOSTAP:
1008 wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
1009 IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost);
1010 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_bssid);
1011 IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_shost);
1012 if (M_PWR_SAV_GET(skb)) {
1013 if (IEEE80211_NODE_SAVEQ_QLEN(ni)) {
1014 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
1015 M_PWR_SAV_CLR(skb);
1018 break;
1019 case IEEE80211_M_WDS:
1020 wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS;
1021 IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_macaddr);
1022 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
1023 IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost);
1024 IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, eh.ether_shost);
1025 break;
1026 case IEEE80211_M_MONITOR:
1027 goto bad;
1030 if (IEEE80211_VAP_IS_SLEEPING(vap))
1031 wh->i_fc[1] |= IEEE80211_FC1_PWR_MGT;
1032 if (addqos) {
1033 struct ieee80211_qosframe *qwh =
1034 (struct ieee80211_qosframe *)wh;
1035 u_int8_t *qos;
1036 int tid;
1038 qos = &qwh->i_qos[0];
1039 if (use4addr)
1040 qos += IEEE80211_ADDR_LEN;
1041 /* map from access class/queue to 11e header priority value */
1042 tid = WME_AC_TO_TID(skb->priority);
1043 qos[0] = tid & IEEE80211_QOS_TID;
1044 if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[skb->priority].wmep_noackPolicy)
1045 qos[0] |= (1 << IEEE80211_QOS_ACKPOLICY_S) & IEEE80211_QOS_ACKPOLICY;
1046 qos[1] = 0;
1047 qwh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_QOS;
1049 *(__le16 *)&wh->i_seq[0] =
1050 htole16(ni->ni_txseqs[tid] << IEEE80211_SEQ_SEQ_SHIFT);
1051 ni->ni_txseqs[tid]++;
1052 } else {
1053 *(__le16 *)wh->i_seq =
1054 htole16(ni->ni_txseqs[0] << IEEE80211_SEQ_SEQ_SHIFT);
1055 ni->ni_txseqs[0]++;
1058 /* Is transmit fragmentation needed? */
1059 if (skb->len > vap->iv_fragthreshold &&
1060 !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1061 int pktlen, skbcnt, tailsize, ciphdrsize;
1062 struct ieee80211_cipher *cip;
1064 pktlen = skb->len;
1065 ciphdrsize = 0;
1066 tailsize = IEEE80211_CRC_LEN;
1068 if (key != NULL) {
1069 cip = (struct ieee80211_cipher *)key->wk_cipher;
1070 ciphdrsize = cip->ic_header;
1071 tailsize += (cip->ic_trailer + cip->ic_miclen);
1073 /* Add the 8 bytes MIC length. */
1074 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP)
1075 pktlen += IEEE80211_WEP_MICLEN;
1078 pdusize = vap->iv_fragthreshold - (hdrsize_nopad + ciphdrsize);
1079 fragcnt = *framecnt =
1080 ((pktlen - hdrsize_nopad) / pdusize) +
1081 (((pktlen - hdrsize_nopad) % pdusize == 0) ? 0 : 1);
1084 * Allocate sk_buff for each subsequent fragment; First fragment
1085 * reuses input skb.
1087 for (skbcnt = 1; skbcnt < fragcnt; skbcnt++) {
1088 tskb = ieee80211_dev_alloc_skb(hdrsize + ciphdrsize + pdusize + tailsize);
1089 if (tskb == NULL)
1090 break;
1092 tskb->next = framelist;
1093 framelist = tskb;
1096 if (skbcnt != fragcnt)
1097 goto bad;
1099 else
1100 *framecnt = fragcnt;
1102 if (key != NULL) {
1104 * IEEE 802.1X: send EAPOL frames always in the clear.
1105 * WPA/WPA2: encrypt EAPOL keys when pairwise keys are set.
1107 if (eh.ether_type != __constant_htons(ETHERTYPE_PAE) ||
1108 ((vap->iv_flags & IEEE80211_F_WPA) &&
1109 (vap->iv_opmode == IEEE80211_M_STA ?
1110 !KEY_UNDEFINED(*key) : !KEY_UNDEFINED(ni->ni_ucastkey)))) {
1111 int force_swmic = (fragcnt > 1) ? 1 : 0;
1113 wh->i_fc[1] |= IEEE80211_FC1_PROT;
1115 if (!ieee80211_crypto_enmic(vap, key, skb, force_swmic)) {
1116 IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_OUTPUT,
1117 eh.ether_dhost,
1118 "%s", "enmic failed, discard frame");
1119 vap->iv_stats.is_crypto_enmicfail++;
1120 goto bad;
1125 if (fragcnt > 1) {
1126 int fragnum, offset, pdulen;
1127 void *pdu;
1129 fragnum = 0;
1130 wh = twh = (struct ieee80211_frame *)skb->data;
1133 ** Setup WLAN headers as fragment headers
1135 wh->i_fc[1] |= IEEE80211_FC1_MORE_FRAG;
1137 *(__le16 *)&wh->i_seq[0] |=
1138 htole16((fragnum & IEEE80211_SEQ_FRAG_MASK) <<
1139 IEEE80211_SEQ_FRAG_SHIFT);
1140 fragnum++;
1142 offset = hdrsize + pdusize;
1143 datalen = (skb->len - hdrsize) - pdusize;
1145 IEEE80211_NODE_STAT(ni, tx_data);
1146 IEEE80211_NODE_STAT_ADD(ni, tx_bytes, pdusize);
1148 for (tskb = framelist; tskb != NULL; tskb = tskb->next) {
1150 * Copy WLAN header into each frag header skb
1152 twh = (struct ieee80211_frame *)skb_put(tskb, hdrsize);
1153 memcpy((void *)twh, (void *)wh, hdrsize);
1155 *(__le16 *)&twh->i_seq[0] |=
1156 htole16((fragnum & IEEE80211_SEQ_FRAG_MASK) <<
1157 IEEE80211_SEQ_FRAG_SHIFT);
1158 fragnum++;
1160 if (pdusize <= datalen)
1161 pdulen = pdusize;
1162 else
1163 pdulen = datalen;
1166 * Copy fragment payload from input skb.
1167 * Doing copies isn't intuitive from
1168 * a performance perspective, however,
1169 * for this case, it is believed to be
1170 * more efficient than cloning skbs.
1172 pdu = skb_put(tskb, pdulen);
1173 memcpy(pdu, (void *)skb->data + offset, pdulen);
1175 offset += pdusize;
1176 datalen -= pdusize;
1178 IEEE80211_NODE_STAT(ni, tx_data);
1179 IEEE80211_NODE_STAT_ADD(ni, tx_bytes, pdulen);
1182 twh->i_fc[1] &= ~IEEE80211_FC1_MORE_FRAG;
1183 skb_trim(skb, hdrsize + pdusize);
1184 skb->next = framelist;
1185 } else {
1186 IEEE80211_NODE_STAT(ni, tx_data);
1187 IEEE80211_NODE_STAT_ADD(ni, tx_bytes, datalen);
1189 #ifdef ATH_SUPERG_FF
1190 /* Account for a second skb in the same packet when FF is on */
1191 if (skb->next) {
1192 datalen = skb->next->len;
1193 IEEE80211_NODE_STAT(ni, tx_data);
1194 IEEE80211_NODE_STAT_ADD(ni, tx_bytes, datalen);
1196 #endif
1199 return skb;
1200 bad:
1201 if (framelist != NULL) {
1202 ieee80211_dev_kfree_skb_list(&framelist);
1205 if (skb != NULL) {
1206 ieee80211_dev_kfree_skb_list(&skb);
1208 return NULL;
1209 #undef WH4
1211 EXPORT_SYMBOL(ieee80211_encap);
1212 #undef KEY_UNDEFINED
1215 * Add a supported rates element id to a frame.
1217 u_int8_t *
1218 ieee80211_add_rates(u_int8_t *frm, const struct ieee80211_rateset *rs)
1220 int nrates;
1222 *frm++ = IEEE80211_ELEMID_RATES;
1223 nrates = rs->rs_nrates;
1224 if (nrates > IEEE80211_RATE_SIZE)
1225 nrates = IEEE80211_RATE_SIZE;
1226 *frm++ = nrates;
1227 memcpy(frm, rs->rs_rates, nrates);
1228 return frm + nrates;
1232 * Add an extended supported rates element id to a frame.
1234 u_int8_t *
1235 ieee80211_add_xrates(u_int8_t *frm, const struct ieee80211_rateset *rs)
1238 * Add an extended supported rates element if operating in 11g mode.
1240 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
1241 int nrates = rs->rs_nrates - IEEE80211_RATE_SIZE;
1242 *frm++ = IEEE80211_ELEMID_XRATES;
1243 *frm++ = nrates;
1244 memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates);
1245 frm += nrates;
1247 return frm;
1251 * Add an ssid elemet to a frame.
1253 static u_int8_t *
1254 ieee80211_add_ssid(u_int8_t *frm, const u_int8_t *ssid, u_int len)
1256 *frm++ = IEEE80211_ELEMID_SSID;
1257 *frm++ = len;
1258 memcpy(frm, ssid, len);
1259 return frm + len;
1263 * Add an erp element to a frame.
1265 u_int8_t *
1266 ieee80211_add_erp(u_int8_t *frm, struct ieee80211com *ic)
1268 u_int8_t erp;
1270 *frm++ = IEEE80211_ELEMID_ERP;
1271 *frm++ = 1;
1272 erp = 0;
1273 if (ic->ic_nonerpsta != 0)
1274 erp |= IEEE80211_ERP_NON_ERP_PRESENT;
1275 if (ic->ic_flags & IEEE80211_F_USEPROT)
1276 erp |= IEEE80211_ERP_USE_PROTECTION;
1277 if ((ic->ic_flags & IEEE80211_F_USEBARKER) || (ic->ic_nonerpsta > 0))
1278 erp |= IEEE80211_ERP_LONG_PREAMBLE;
1279 *frm++ = erp;
1280 return frm;
1284 * Add a country information element to a frame.
1286 u_int8_t *
1287 ieee80211_add_country(u_int8_t *frm, struct ieee80211com *ic)
1289 /* add country code */
1290 memcpy(frm, (u_int8_t *)&ic->ic_country_ie,
1291 ic->ic_country_ie.country_len + 2);
1292 frm += ic->ic_country_ie.country_len + 2;
1293 return frm;
1297 * Add Power Constraint information element
1299 u_int8_t *
1300 ieee80211_add_pwrcnstr(u_int8_t *frm, struct ieee80211com *ic)
1302 struct ieee80211_ie_pwrcnstr *ie =
1303 (struct ieee80211_ie_pwrcnstr *)frm;
1304 ie->pc_id = IEEE80211_ELEMID_PWRCNSTR;
1305 ie->pc_len = 1;
1306 ie->pc_lpc = IEEE80211_PWRCONSTRAINT_VAL(ic);
1307 frm += sizeof(*ie);
1308 return frm;
1312 * Add Power Capability information element
1314 static u_int8_t *
1315 ieee80211_add_pwrcap(u_int8_t *frm, struct ieee80211com *ic)
1317 struct ieee80211_ie_pwrcap *ie =
1318 (struct ieee80211_ie_pwrcap *)frm;
1319 ie->pc_id = IEEE80211_ELEMID_PWRCAP;
1320 ie->pc_len = 2;
1321 ie->pc_mintxpow = ic->ic_bsschan->ic_minpower;
1322 ie->pc_maxtxpow = ic->ic_bsschan->ic_maxpower;
1323 frm += sizeof(*ie);
1324 return frm;
1328 * Add Supported Channels information element
1330 static u_int8_t *
1331 ieee80211_add_suppchan(u_int8_t *frm, struct ieee80211com *ic)
1333 memcpy(frm, (u_int8_t *)&ic->ic_sc_ie,
1334 ic->ic_sc_ie.sc_len + 2);
1335 frm += ic->ic_sc_ie.sc_len + 2;
1336 return frm;
1339 static u_int8_t *
1340 ieee80211_setup_wpa_ie(struct ieee80211vap *vap, u_int8_t *ie)
1342 #define WPA_OUI_BYTES 0x00, 0x50, 0xf2
1343 #define ADDSHORT(frm, v) do { \
1344 frm[0] = (v) & 0xff; \
1345 frm[1] = (v) >> 8; \
1346 frm += 2; \
1347 } while (0)
1348 #define ADDSELECTOR(frm, sel) do { \
1349 memcpy(frm, sel, 4); \
1350 frm += 4; \
1351 } while (0)
1352 static const u_int8_t oui[4] = { WPA_OUI_BYTES, WPA_OUI_TYPE };
1353 static const u_int8_t cipher_suite[][4] = {
1354 { WPA_OUI_BYTES, WPA_CSE_WEP40 }, /* NB: 40-bit */
1355 { WPA_OUI_BYTES, WPA_CSE_TKIP },
1356 { 0x00, 0x00, 0x00, 0x00 }, /* XXX WRAP */
1357 { WPA_OUI_BYTES, WPA_CSE_CCMP },
1358 { 0x00, 0x00, 0x00, 0x00 }, /* XXX CKIP */
1359 { WPA_OUI_BYTES, WPA_CSE_NULL },
1361 static const u_int8_t wep104_suite[4] =
1362 { WPA_OUI_BYTES, WPA_CSE_WEP104 };
1363 static const u_int8_t key_mgt_unspec[4] =
1364 { WPA_OUI_BYTES, WPA_ASE_8021X_UNSPEC };
1365 static const u_int8_t key_mgt_psk[4] =
1366 { WPA_OUI_BYTES, WPA_ASE_8021X_PSK };
1367 const struct ieee80211_rsnparms *rsn = &vap->iv_bss->ni_rsn;
1368 u_int8_t *frm = ie;
1369 u_int8_t *selcnt;
1371 *frm++ = IEEE80211_ELEMID_VENDOR;
1372 *frm++ = 0; /* length filled in below */
1373 memcpy(frm, oui, sizeof(oui)); /* WPA OUI */
1374 frm += sizeof(oui);
1375 ADDSHORT(frm, WPA_VERSION);
1377 /* XXX filter out CKIP */
1379 /* multicast cipher */
1380 if (rsn->rsn_mcastcipher == IEEE80211_CIPHER_WEP &&
1381 rsn->rsn_mcastkeylen >= 13)
1382 ADDSELECTOR(frm, wep104_suite);
1383 else
1384 ADDSELECTOR(frm, cipher_suite[rsn->rsn_mcastcipher]);
1386 /* unicast cipher list */
1387 selcnt = frm;
1388 ADDSHORT(frm, 0); /* selector count */
1389 if (rsn->rsn_ucastcipherset & (1 << IEEE80211_CIPHER_AES_CCM)) {
1390 selcnt[0]++;
1391 ADDSELECTOR(frm, cipher_suite[IEEE80211_CIPHER_AES_CCM]);
1393 if (rsn->rsn_ucastcipherset & (1 << IEEE80211_CIPHER_TKIP)) {
1394 selcnt[0]++;
1395 ADDSELECTOR(frm, cipher_suite[IEEE80211_CIPHER_TKIP]);
1398 /* authenticator selector list */
1399 selcnt = frm;
1400 ADDSHORT(frm, 0); /* selector count */
1401 if (rsn->rsn_keymgmtset & WPA_ASE_8021X_UNSPEC) {
1402 selcnt[0]++;
1403 ADDSELECTOR(frm, key_mgt_unspec);
1405 if (rsn->rsn_keymgmtset & WPA_ASE_8021X_PSK) {
1406 selcnt[0]++;
1407 ADDSELECTOR(frm, key_mgt_psk);
1410 /* optional capabilities */
1411 if ((rsn->rsn_caps != 0) && (rsn->rsn_caps != RSN_CAP_PREAUTH))
1412 ADDSHORT(frm, rsn->rsn_caps);
1414 /* calculate element length */
1415 ie[1] = frm - ie - 2;
1416 KASSERT(ie[1] + 2 <= sizeof(struct ieee80211_ie_wpa),
1417 ("WPA IE too big, %u > %u",
1418 ie[1] + 2, (int)sizeof(struct ieee80211_ie_wpa)));
1419 return frm;
1420 #undef ADDSHORT
1421 #undef ADDSELECTOR
1422 #undef WPA_OUI_BYTES
1425 static u_int8_t *
1426 ieee80211_setup_rsn_ie(struct ieee80211vap *vap, u_int8_t *ie)
1428 #define RSN_OUI_BYTES 0x00, 0x0f, 0xac
1429 #define ADDSHORT(frm, v) do { \
1430 frm[0] = (v) & 0xff; \
1431 frm[1] = (v) >> 8; \
1432 frm += 2; \
1433 } while (0)
1434 #define ADDSELECTOR(frm, sel) do { \
1435 memcpy(frm, sel, 4); \
1436 frm += 4; \
1437 } while (0)
1438 static const u_int8_t cipher_suite[][4] = {
1439 { RSN_OUI_BYTES, RSN_CSE_WEP40 }, /* NB: 40-bit */
1440 { RSN_OUI_BYTES, RSN_CSE_TKIP },
1441 { RSN_OUI_BYTES, RSN_CSE_WRAP },
1442 { RSN_OUI_BYTES, RSN_CSE_CCMP },
1443 { 0x00, 0x00, 0x00, 0x00 }, /* XXX CKIP */
1444 { RSN_OUI_BYTES, RSN_CSE_NULL },
1446 static const u_int8_t wep104_suite[4] =
1447 { RSN_OUI_BYTES, RSN_CSE_WEP104 };
1448 static const u_int8_t key_mgt_unspec[4] =
1449 { RSN_OUI_BYTES, RSN_ASE_8021X_UNSPEC };
1450 static const u_int8_t key_mgt_psk[4] =
1451 { RSN_OUI_BYTES, RSN_ASE_8021X_PSK };
1452 const struct ieee80211_rsnparms *rsn = &vap->iv_bss->ni_rsn;
1453 u_int8_t *frm = ie;
1454 u_int8_t *selcnt;
1456 *frm++ = IEEE80211_ELEMID_RSN;
1457 *frm++ = 0; /* length filled in below */
1458 ADDSHORT(frm, RSN_VERSION);
1460 /* XXX filter out CKIP */
1462 /* multicast cipher */
1463 if (rsn->rsn_mcastcipher == IEEE80211_CIPHER_WEP &&
1464 rsn->rsn_mcastkeylen >= 13)
1465 ADDSELECTOR(frm, wep104_suite);
1466 else
1467 ADDSELECTOR(frm, cipher_suite[rsn->rsn_mcastcipher]);
1469 /* unicast cipher list */
1470 selcnt = frm;
1471 ADDSHORT(frm, 0); /* selector count */
1472 if (rsn->rsn_ucastcipherset & (1 << IEEE80211_CIPHER_AES_CCM)) {
1473 selcnt[0]++;
1474 ADDSELECTOR(frm, cipher_suite[IEEE80211_CIPHER_AES_CCM]);
1476 if (rsn->rsn_ucastcipherset & (1 << IEEE80211_CIPHER_TKIP)) {
1477 selcnt[0]++;
1478 ADDSELECTOR(frm, cipher_suite[IEEE80211_CIPHER_TKIP]);
1481 /* authenticator selector list */
1482 selcnt = frm;
1483 ADDSHORT(frm, 0); /* selector count */
1484 if (rsn->rsn_keymgmtset & WPA_ASE_8021X_UNSPEC) {
1485 selcnt[0]++;
1486 ADDSELECTOR(frm, key_mgt_unspec);
1488 if (rsn->rsn_keymgmtset & WPA_ASE_8021X_PSK) {
1489 selcnt[0]++;
1490 ADDSELECTOR(frm, key_mgt_psk);
1493 /* capabilities */
1494 ADDSHORT(frm, rsn->rsn_caps);
1495 /* XXX PMKID */
1497 /* calculate element length */
1498 ie[1] = frm - ie - 2;
1499 KASSERT(ie[1] + 2 <= sizeof(struct ieee80211_ie_wpa),
1500 ("RSN IE too big, %u > %u",
1501 ie[1] + 2, (int)sizeof(struct ieee80211_ie_wpa)));
1502 return frm;
1503 #undef ADDSELECTOR
1504 #undef ADDSHORT
1505 #undef RSN_OUI_BYTES
1509 * Add a WPA/RSN element to a frame.
1511 u_int8_t *
1512 ieee80211_add_wpa(u_int8_t *frm, struct ieee80211vap *vap)
1515 KASSERT(vap->iv_flags & IEEE80211_F_WPA, ("no WPA/RSN!"));
1516 if (vap->iv_flags & IEEE80211_F_WPA2)
1517 frm = ieee80211_setup_rsn_ie(vap, frm);
1518 if (vap->iv_flags & IEEE80211_F_WPA1)
1519 frm = ieee80211_setup_wpa_ie(vap, frm);
1520 return frm;
1523 #define WME_OUI_BYTES 0x00, 0x50, 0xf2
1525 * Add a WME Info element to a frame.
1527 static u_int8_t *
1528 ieee80211_add_wme(u_int8_t *frm, struct ieee80211_node *ni)
1530 static const u_int8_t oui[4] = { WME_OUI_BYTES, WME_OUI_TYPE };
1531 struct ieee80211_ie_wme *ie = (struct ieee80211_ie_wme *)frm;
1532 struct ieee80211_wme_state *wme = &ni->ni_ic->ic_wme;
1533 struct ieee80211vap *vap = ni->ni_vap;
1535 *frm++ = IEEE80211_ELEMID_VENDOR;
1536 *frm++ = 0; /* length filled in below */
1537 memcpy(frm, oui, sizeof(oui)); /* WME OUI */
1538 frm += sizeof(oui);
1539 *frm++ = WME_INFO_OUI_SUBTYPE; /* OUI subtype */
1540 *frm++ = WME_VERSION; /* protocol version */
1541 /* QoS Info field depends on operating mode */
1542 switch (vap->iv_opmode) {
1543 case IEEE80211_M_HOSTAP:
1544 *frm = wme->wme_bssChanParams.cap_info_count;
1545 if (IEEE80211_VAP_UAPSD_ENABLED(vap))
1546 *frm |= WME_CAPINFO_UAPSD_EN;
1547 frm++;
1548 break;
1549 case IEEE80211_M_STA:
1550 *frm++ = vap->iv_uapsdinfo;
1551 break;
1552 default:
1553 *frm++ = 0;
1556 ie->wme_len = frm - &ie->wme_oui[0];
1558 return frm;
1562 * Add a WME Parameter element to a frame.
1564 u_int8_t *
1565 ieee80211_add_wme_param(u_int8_t *frm, struct ieee80211_wme_state *wme,
1566 int uapsd_enable)
1568 #define SM(_v, _f) (((_v) << _f##_S) & _f)
1569 #define ADDSHORT(frm, v) do { \
1570 frm[0] = (v) & 0xff; \
1571 frm[1] = (v) >> 8; \
1572 frm += 2; \
1573 } while (0)
1574 static const u_int8_t oui[4] = { WME_OUI_BYTES, WME_OUI_TYPE };
1575 struct ieee80211_wme_param *ie = (struct ieee80211_wme_param *)frm;
1576 int i;
1578 *frm++ = IEEE80211_ELEMID_VENDOR;
1579 *frm++ = 0; /* length filled in below */
1580 memcpy(frm, oui, sizeof(oui)); /* WME OUI */
1581 frm += sizeof(oui);
1582 *frm++ = WME_PARAM_OUI_SUBTYPE; /* OUI subtype */
1583 *frm++ = WME_VERSION; /* protocol version */
1584 *frm = wme->wme_bssChanParams.cap_info_count;
1585 if (uapsd_enable)
1586 *frm |= WME_CAPINFO_UAPSD_EN;
1587 frm++;
1588 *frm++ = 0; /* reserved field */
1589 for (i = 0; i < WME_NUM_AC; i++) {
1590 const struct wmeParams *ac =
1591 &wme->wme_bssChanParams.cap_wmeParams[i];
1592 *frm++ = SM(i, WME_PARAM_ACI) |
1593 SM(ac->wmep_acm, WME_PARAM_ACM) |
1594 SM(ac->wmep_aifsn, WME_PARAM_AIFSN);
1595 *frm++ = SM(ac->wmep_logcwmax, WME_PARAM_LOGCWMAX) |
1596 SM(ac->wmep_logcwmin, WME_PARAM_LOGCWMIN);
1597 ADDSHORT(frm, ac->wmep_txopLimit);
1600 ie->param_len = frm - &ie->param_oui[0];
1602 return frm;
1603 #undef ADDSHORT
1605 #undef WME_OUI_BYTES
1608 * Add an Atheros Advanaced Capability element to a frame
1610 u_int8_t *
1611 ieee80211_add_athAdvCap(u_int8_t *frm, u_int8_t capability, u_int16_t defaultKey)
1613 static const u_int8_t oui[6] = {(ATH_OUI & 0xff), ((ATH_OUI >>8) & 0xff),
1614 ((ATH_OUI >> 16) & 0xff), ATH_OUI_TYPE,
1615 ATH_OUI_SUBTYPE, ATH_OUI_VERSION};
1616 struct ieee80211_ie_athAdvCap *ie = (struct ieee80211_ie_athAdvCap *)frm;
1618 *frm++ = IEEE80211_ELEMID_VENDOR;
1619 *frm++ = 0; /* Length filled in below */
1620 memcpy(frm, oui, sizeof(oui)); /* Atheros OUI, type, subtype, and version for adv capabilities */
1621 frm += sizeof(oui);
1622 *frm++ = capability;
1624 /* Setup default key index in little endian byte order */
1625 *frm++ = (defaultKey & 0xff);
1626 *frm++ = ((defaultKey >> 8) & 0xff);
1627 ie->athAdvCap_len = frm - &ie->athAdvCap_oui[0];
1629 return frm;
1633 * Add XR IE element to a frame
1635 #ifdef ATH_SUPERG_XR
1636 u_int8_t *
1637 ieee80211_add_xr_param(u_int8_t *frm, struct ieee80211vap *vap)
1639 static const u_int8_t oui[6] = {(ATH_OUI & 0xff), ((ATH_OUI >>8) & 0xff),
1640 ((ATH_OUI >> 16) & 0xff), ATH_OUI_TYPE_XR,
1641 ATH_OUI_SUBTYPE_XR, ATH_OUI_VER_XR};
1642 struct ieee80211_xr_param *ie = (struct ieee80211_xr_param *)frm;
1644 *frm++ = IEEE80211_ELEMID_VENDOR;
1645 *frm++ = 0; /* Length filled in below */
1646 memcpy(frm, oui, sizeof(oui)); /* Atheros OUI, type, subtype, and version for adv capabilities */
1647 frm += sizeof(oui);
1648 *frm++ = 0; /* XR info */
1650 /* copy the BSSIDs */
1651 if (vap->iv_flags & IEEE80211_F_XR) {
1652 IEEE80211_ADDR_COPY(frm, vap->iv_xrvap->iv_bssid); /* Base BSSID */
1653 frm += IEEE80211_ADDR_LEN;
1654 IEEE80211_ADDR_COPY(frm, vap->iv_bssid); /* XR BSSID */
1655 frm += IEEE80211_ADDR_LEN;
1656 *(__le16 *)frm = htole16(vap->iv_bss->ni_intval); /* XR beacon interval */
1657 frm += 2;
1658 *frm++ = vap->iv_xrvap->iv_ath_cap; /* Base mode capability */
1659 *frm++ = vap->iv_ath_cap; /* XR mode capability */
1660 } else {
1661 IEEE80211_ADDR_COPY(frm, vap->iv_bssid);
1662 frm += IEEE80211_ADDR_LEN;
1663 IEEE80211_ADDR_COPY(frm, vap->iv_xrvap->iv_bssid);
1664 frm += IEEE80211_ADDR_LEN;
1665 *(__le16 *)frm = htole16(vap->iv_bss->ni_intval);
1666 frm += 2;
1667 *frm++ = vap->iv_ath_cap;
1668 *frm++ = vap->iv_xrvap->iv_ath_cap;
1670 ie->param_len = frm - &ie->param_oui[0];
1671 return frm;
1673 #endif
1675 * Send a probe request frame with the specified ssid
1676 * and any optional information element data.
1679 ieee80211_send_probereq(struct ieee80211_node *ni,
1680 const u_int8_t sa[IEEE80211_ADDR_LEN],
1681 const u_int8_t da[IEEE80211_ADDR_LEN],
1682 const u_int8_t bssid[IEEE80211_ADDR_LEN],
1683 const u_int8_t *ssid, size_t ssidlen,
1684 const void *optie, size_t optielen)
1686 struct ieee80211vap *vap = ni->ni_vap;
1687 struct ieee80211com *ic = ni->ni_ic;
1688 enum ieee80211_phymode mode;
1689 struct ieee80211_frame *wh;
1690 struct sk_buff *skb;
1691 u_int8_t *frm;
1694 * prreq frame format
1695 * [tlv] ssid
1696 * [tlv] supported rates
1697 * [tlv] extended supported rates
1698 * [tlv] user-specified IEs
1700 skb = ieee80211_getmgtframe(&frm, 2 + IEEE80211_NWID_LEN +
1701 2 + IEEE80211_RATE_SIZE +
1702 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) +
1703 (optie != NULL ? optielen : 0) +
1704 vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_REQ].length);
1705 if (skb == NULL) {
1706 vap->iv_stats.is_tx_nobuf++;
1707 return -ENOMEM;
1710 frm = ieee80211_add_ssid(frm, ssid, ssidlen);
1711 mode = ieee80211_chan2mode(ic->ic_curchan);
1712 frm = ieee80211_add_rates(frm, &ic->ic_sup_rates[mode]);
1713 frm = ieee80211_add_xrates(frm, &ic->ic_sup_rates[mode]);
1715 if (optie != NULL) {
1716 memcpy(frm, optie, optielen);
1717 frm += optielen;
1720 if (vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_REQ].ie) {
1721 memcpy(frm, vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_REQ].ie,
1722 vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_REQ].length);
1723 frm += vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_REQ].length;
1726 skb_trim(skb, frm - skb->data);
1728 SKB_NI(skb) = ieee80211_ref_node(ni);
1730 wh = (struct ieee80211_frame *)
1731 skb_push(skb, sizeof(struct ieee80211_frame));
1732 ieee80211_send_setup(vap, ni, wh,
1733 IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_REQ,
1734 sa, da, bssid);
1735 /* XXX power management? */
1737 IEEE80211_NODE_STAT(ni, tx_probereq);
1738 IEEE80211_NODE_STAT(ni, tx_mgmt);
1740 IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS,
1741 "[" MAC_FMT "] send probe req on channel %u\n",
1742 MAC_ADDR(wh->i_addr1),
1743 ieee80211_chan2ieee(ic, ic->ic_curchan));
1745 (void)ic->ic_mgtstart(ic, skb);
1746 return 0;
1750 * Send a management frame. The node is for the destination (or ic_bss
1751 * when in station mode). Nodes other than ic_bss have their reference
1752 * count bumped to reflect our use for an indeterminate time.
1755 ieee80211_send_mgmt(struct ieee80211_node *ni, int type, int arg)
1757 #define senderr(_x, _v) do { vap->iv_stats._v++; ret = _x; goto bad; } while (0)
1758 struct ieee80211vap *vap = ni->ni_vap;
1759 struct ieee80211com *ic = ni->ni_ic;
1760 struct sk_buff *skb;
1761 u_int8_t *frm;
1762 int frm_len;
1763 u_int16_t capinfo;
1764 ieee80211_keyix_t def_keyindex;
1765 int has_challenge, is_shared_key, ret, timer, status;
1767 KASSERT(ni != NULL, ("null node"));
1769 timer = 0;
1770 switch (type) {
1771 case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
1773 * probe response frame format
1774 * [8] time stamp
1775 * [2] beacon interval
1776 * [2] capability information
1777 * [tlv] ssid
1778 * [tlv] supported rates
1779 * [7] FH/DS parameter set
1780 * [tlv] IBSS parameter set
1781 * [tlv] country code
1782 * [3] power constraint
1783 * [3] extended rate phy (ERP)
1784 * [tlv] extended supported rates
1785 * [tlv] WME parameters
1786 * [tlv] WPA/RSN parameters
1787 * [tlv] Atheros Advanced Capabilities
1788 * [tlv] AtherosXR parameters
1790 frm_len = 8
1791 + sizeof(u_int16_t)
1792 + sizeof(u_int16_t)
1793 + 2 + IEEE80211_NWID_LEN
1794 + 2 + IEEE80211_RATE_SIZE
1795 + 7 /* max(7,3) */
1796 /* XXX allocate max size */
1797 + 2 + ic->ic_country_ie.country_len
1800 + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE)
1801 + sizeof(struct ieee80211_wme_param)
1802 /* XXX !WPA1+WPA2 fits w/o a cluster */
1803 + (vap->iv_flags & IEEE80211_F_WPA ?
1804 2 * sizeof(struct ieee80211_ie_wpa) : 0)
1805 + sizeof(struct ieee80211_ie_athAdvCap)
1806 + vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_RESP].length;
1807 #ifdef ATH_SUPERG_XR
1808 if (vap->iv_ath_cap & IEEE80211_ATHC_XR)
1809 frm_len += sizeof(struct ieee80211_xr_param);
1810 #endif
1811 skb = ieee80211_getmgtframe(&frm, frm_len);
1812 if (skb == NULL)
1813 senderr(ENOMEM, is_tx_nobuf);
1815 /* timestamp should be filled later */
1816 memset(frm, 0, 8);
1817 frm += 8;
1819 /* beacon interval */
1820 *(__le16 *)frm = htole16(vap->iv_bss->ni_intval);
1821 frm += 2;
1823 /* cap. info */
1824 if (vap->iv_opmode == IEEE80211_M_IBSS)
1825 capinfo = IEEE80211_CAPINFO_IBSS;
1826 else
1827 capinfo = IEEE80211_CAPINFO_ESS;
1828 if (vap->iv_flags & IEEE80211_F_PRIVACY)
1829 capinfo |= IEEE80211_CAPINFO_PRIVACY;
1830 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1831 IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
1832 capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
1833 if (ic->ic_flags & IEEE80211_F_SHSLOT)
1834 capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
1835 *(__le16 *)frm = htole16(capinfo);
1836 frm += 2;
1838 /* ssid */
1839 frm = ieee80211_add_ssid(frm, vap->iv_bss->ni_essid,
1840 vap->iv_bss->ni_esslen);
1842 /* supported rates */
1843 frm = ieee80211_add_rates(frm, &ni->ni_rates);
1845 /* XXX: FH/DS parameter set, correct ? */
1846 if (ic->ic_phytype == IEEE80211_T_FH) {
1847 *frm++ = IEEE80211_ELEMID_FHPARMS;
1848 *frm++ = 5;
1849 *frm++ = ni->ni_fhdwell & 0x00ff;
1850 *frm++ = (ni->ni_fhdwell >> 8) & 0x00ff;
1851 *frm++ = IEEE80211_FH_CHANSET(
1852 ieee80211_chan2ieee(ic, ic->ic_curchan));
1853 *frm++ = IEEE80211_FH_CHANPAT(
1854 ieee80211_chan2ieee(ic, ic->ic_curchan));
1855 *frm++ = ni->ni_fhindex;
1856 } else {
1857 *frm++ = IEEE80211_ELEMID_DSPARMS;
1858 *frm++ = 1;
1859 *frm++ = ieee80211_chan2ieee(ic, ic->ic_curchan);
1862 if (vap->iv_opmode == IEEE80211_M_IBSS) {
1863 *frm++ = IEEE80211_ELEMID_IBSSPARMS;
1864 *frm++ = 2;
1865 *frm++ = 0;
1866 *frm++ = 0; /* TODO: ATIM window */
1869 /* country code */
1870 if ((ic->ic_flags & IEEE80211_F_DOTH) ||
1871 (ic->ic_flags_ext & IEEE80211_FEXT_COUNTRYIE))
1872 frm = ieee80211_add_country(frm, ic);
1874 /* power constraint */
1875 if (ic->ic_flags & IEEE80211_F_DOTH)
1876 frm = ieee80211_add_pwrcnstr(frm, ic);
1878 /* ERP */
1879 if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
1880 frm = ieee80211_add_erp(frm, ic);
1882 /* Ext. Supp. Rates */
1883 frm = ieee80211_add_xrates(frm, &ni->ni_rates);
1885 /* WME */
1886 if (vap->iv_flags & IEEE80211_F_WME)
1887 frm = ieee80211_add_wme_param(frm, &ic->ic_wme,
1888 IEEE80211_VAP_UAPSD_ENABLED(vap));
1890 /* WPA */
1891 if (vap->iv_flags & IEEE80211_F_WPA)
1892 frm = ieee80211_add_wpa(frm, vap);
1894 /* AthAdvCaps */
1895 if (vap->iv_bss && vap->iv_bss->ni_ath_flags)
1896 frm = ieee80211_add_athAdvCap(frm, vap->iv_bss->ni_ath_flags,
1897 vap->iv_bss->ni_ath_defkeyindex);
1898 #ifdef ATH_SUPERG_XR
1899 /* XR params */
1900 if (vap->iv_xrvap && vap->iv_ath_cap & IEEE80211_ATHC_XR)
1901 frm = ieee80211_add_xr_param(frm, vap);
1902 #endif
1903 if (vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_RESP].ie) {
1904 memcpy(frm, vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_RESP].ie,
1905 vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_RESP].length);
1906 frm += vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_RESP].length;
1909 skb_trim(skb, frm - skb->data);
1910 break;
1912 case IEEE80211_FC0_SUBTYPE_AUTH:
1913 status = arg >> 16;
1914 arg &= 0xffff;
1915 has_challenge = ((arg == IEEE80211_AUTH_SHARED_CHALLENGE ||
1916 arg == IEEE80211_AUTH_SHARED_RESPONSE) &&
1917 ni->ni_challenge != NULL);
1920 * Deduce whether we're doing open authentication or
1921 * shared key authentication. We do the latter if
1922 * we're in the middle of a shared key authentication
1923 * handshake or if we're initiating an authentication
1924 * request and configured to use shared key.
1926 is_shared_key = has_challenge ||
1927 arg >= IEEE80211_AUTH_SHARED_RESPONSE ||
1928 (arg == IEEE80211_AUTH_SHARED_REQUEST &&
1929 vap->iv_bss->ni_authmode == IEEE80211_AUTH_SHARED);
1931 skb = ieee80211_getmgtframe(&frm,
1932 3 * sizeof(u_int16_t)
1933 + (has_challenge && status == IEEE80211_STATUS_SUCCESS ?
1934 sizeof(u_int16_t)+IEEE80211_CHALLENGE_LEN : 0));
1935 if (skb == NULL)
1936 senderr(ENOMEM, is_tx_nobuf);
1938 ((__le16 *)frm)[0] =
1939 (is_shared_key) ? htole16(IEEE80211_AUTH_ALG_SHARED)
1940 : htole16(IEEE80211_AUTH_ALG_OPEN);
1941 ((__le16 *)frm)[1] = htole16(arg); /* sequence number */
1942 ((__le16 *)frm)[2] = htole16(status); /* status */
1944 if (has_challenge && status == IEEE80211_STATUS_SUCCESS) {
1945 ((__le16 *)frm)[3] =
1946 htole16((IEEE80211_CHALLENGE_LEN << 8) |
1947 IEEE80211_ELEMID_CHALLENGE);
1948 memcpy(&((__le16 *)frm)[4], ni->ni_challenge,
1949 IEEE80211_CHALLENGE_LEN);
1950 if (arg == IEEE80211_AUTH_SHARED_RESPONSE) {
1951 IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni,
1952 "request encrypt frame (%s)", __func__);
1953 SKB_CB(skb)->flags |= M_LINK0; /* WEP-encrypt, please */
1957 /* XXX not right for shared key */
1958 if (status == IEEE80211_STATUS_SUCCESS)
1959 IEEE80211_NODE_STAT(ni, tx_auth);
1960 else
1961 IEEE80211_NODE_STAT(ni, tx_auth_fail);
1963 if (vap->iv_opmode == IEEE80211_M_STA)
1964 timer = IEEE80211_TRANS_WAIT;
1965 break;
1967 case IEEE80211_FC0_SUBTYPE_DEAUTH:
1968 IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni,
1969 "send station deauthenticate (reason %d)", arg);
1970 skb = ieee80211_getmgtframe(&frm, sizeof(u_int16_t));
1971 if (skb == NULL)
1972 senderr(ENOMEM, is_tx_nobuf);
1973 *(__le16 *)frm = htole16(arg); /* reason */
1975 IEEE80211_NODE_STAT(ni, tx_deauth);
1976 IEEE80211_NODE_STAT_SET(ni, tx_deauth_code, arg);
1978 ieee80211_node_unauthorize(ni); /* port closed */
1979 break;
1981 case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
1982 case IEEE80211_FC0_SUBTYPE_REASSOC_REQ:
1984 * asreq frame format
1985 * [2] capability information
1986 * [2] listen interval
1987 * [6*] current AP address (reassoc only)
1988 * [tlv] ssid
1989 * [tlv] supported rates
1990 * [4] power capability (802.11h)
1991 * [tlv] supported channels element (802.11h)
1992 * [tlv] extended supported rates
1993 * [tlv] WME [if enabled and AP capable]
1994 * [tlv] Atheros advanced capabilities
1995 * [tlv] user-specified IEs
1997 skb = ieee80211_getmgtframe(&frm,
1998 sizeof(u_int16_t) +
1999 sizeof(u_int16_t) +
2000 IEEE80211_ADDR_LEN +
2001 2 + IEEE80211_NWID_LEN +
2002 2 + IEEE80211_RATE_SIZE +
2003 4 + (2 + ic->ic_sc_ie.sc_len) +
2004 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) +
2005 sizeof(struct ieee80211_ie_wme) +
2006 sizeof(struct ieee80211_ie_athAdvCap) +
2007 (vap->iv_opt_ie != NULL ? vap->iv_opt_ie_len : 0) +
2008 vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_REQ].length);
2009 if (skb == NULL)
2010 senderr(ENOMEM, is_tx_nobuf);
2012 capinfo = 0;
2013 if (vap->iv_opmode == IEEE80211_M_IBSS)
2014 capinfo |= IEEE80211_CAPINFO_IBSS;
2015 else /* IEEE80211_M_STA */
2016 capinfo |= IEEE80211_CAPINFO_ESS;
2017 if (vap->iv_flags & IEEE80211_F_PRIVACY)
2018 capinfo |= IEEE80211_CAPINFO_PRIVACY;
2020 * NB: Some 11a APs reject the request when
2021 * short premable is set.
2023 /* Capability information */
2024 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
2025 IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
2026 capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
2027 if ((ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME) &&
2028 (ic->ic_caps & IEEE80211_C_SHSLOT))
2029 capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
2030 *(__le16 *)frm = htole16(capinfo);
2031 frm += 2;
2033 /* listen interval */
2034 *(__le16 *)frm = htole16(ic->ic_lintval);
2035 frm += 2;
2037 /* Current AP address */
2038 if (type == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
2039 IEEE80211_ADDR_COPY(frm, vap->iv_bssid);
2040 frm += IEEE80211_ADDR_LEN;
2042 /* ssid */
2043 frm = ieee80211_add_ssid(frm, ni->ni_essid, ni->ni_esslen);
2044 /* supported rates */
2045 frm = ieee80211_add_rates(frm, &ni->ni_rates);
2046 /* power capability/supported channels */
2047 if (ic->ic_flags & IEEE80211_F_DOTH) {
2048 frm = ieee80211_add_pwrcap(frm, ic);
2049 frm = ieee80211_add_suppchan(frm, ic);
2051 /* ext. supp. rates */
2052 frm = ieee80211_add_xrates(frm, &ni->ni_rates);
2054 /* wme */
2055 if ((vap->iv_flags & IEEE80211_F_WME) && ni->ni_wme_ie != NULL)
2056 frm = ieee80211_add_wme(frm, ni);
2057 /* ath adv. cap */
2058 if (ni->ni_ath_flags & vap->iv_ath_cap) {
2059 IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
2060 "Adding ath adv cap ie: ni_ath_flags = %02x, "
2061 "iv_ath_cap = %02x", ni->ni_ath_flags,
2062 vap->iv_ath_cap);
2064 /* Setup default key index for static wep case */
2065 def_keyindex = IEEE80211_INVAL_DEFKEY;
2066 if (((vap->iv_flags & IEEE80211_F_WPA) == 0) &&
2067 (ni->ni_authmode != IEEE80211_AUTH_8021X) &&
2068 (vap->iv_def_txkey != IEEE80211_KEYIX_NONE))
2069 def_keyindex = vap->iv_def_txkey;
2071 frm = ieee80211_add_athAdvCap(frm,
2072 ni->ni_ath_flags & vap->iv_ath_cap,
2073 def_keyindex);
2076 /* User-spec */
2077 if (vap->iv_opt_ie != NULL) {
2078 memcpy(frm, vap->iv_opt_ie, vap->iv_opt_ie_len);
2079 frm += vap->iv_opt_ie_len;
2082 if (vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_REQ].ie) {
2083 memcpy(frm, vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_REQ].ie,
2084 vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_REQ].length);
2085 frm += vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_REQ].length;
2088 skb_trim(skb, frm - skb->data);
2090 timer = IEEE80211_TRANS_WAIT;
2091 break;
2093 case IEEE80211_FC0_SUBTYPE_ASSOC_RESP:
2094 case IEEE80211_FC0_SUBTYPE_REASSOC_RESP:
2096 * asreq frame format
2097 * [2] capability information
2098 * [2] status
2099 * [2] association ID
2100 * [tlv] supported rates
2101 * [tlv] extended supported rates
2102 * [tlv] WME (if enabled and STA enabled)
2103 * [tlv] Atheros Advanced Capabilities
2105 skb = ieee80211_getmgtframe(&frm,
2106 3 * sizeof(u_int16_t) +
2107 2 + IEEE80211_RATE_SIZE +
2108 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) +
2109 sizeof(struct ieee80211_wme_param) +
2110 (vap->iv_ath_cap ? sizeof(struct ieee80211_ie_athAdvCap):0) +
2111 vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_RESP].length);
2112 if (skb == NULL)
2113 senderr(ENOMEM, is_tx_nobuf);
2115 /* Capability Information */
2116 capinfo = IEEE80211_CAPINFO_ESS;
2117 if (vap->iv_flags & IEEE80211_F_PRIVACY)
2118 capinfo |= IEEE80211_CAPINFO_PRIVACY;
2119 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
2120 IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
2121 capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
2122 if (ic->ic_flags & IEEE80211_F_SHSLOT)
2123 capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
2124 *(__le16 *)frm = htole16(capinfo);
2125 frm += 2;
2127 /* status */
2128 *(__le16 *)frm = htole16(arg);
2129 frm += 2;
2131 /* Assoc ID */
2132 if (arg == IEEE80211_STATUS_SUCCESS) {
2133 *(__le16 *)frm = htole16(ni->ni_associd);
2134 IEEE80211_NODE_STAT(ni, tx_assoc);
2135 } else
2136 IEEE80211_NODE_STAT(ni, tx_assoc_fail);
2137 frm += 2;
2139 /* supported rates */
2140 frm = ieee80211_add_rates(frm, &ni->ni_rates);
2142 /* ext. suppo. rates */
2143 frm = ieee80211_add_xrates(frm, &ni->ni_rates);
2145 /* wme */
2146 if ((vap->iv_flags & IEEE80211_F_WME) && ni->ni_wme_ie != NULL)
2147 frm = ieee80211_add_wme_param(frm, &ic->ic_wme,
2148 IEEE80211_VAP_UAPSD_ENABLED(vap));
2150 /* athAdvCap */
2151 if (vap->iv_ath_cap)
2152 frm = ieee80211_add_athAdvCap(frm,
2153 vap->iv_ath_cap & ni->ni_ath_flags,
2154 ni->ni_ath_defkeyindex);
2156 if (vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_RESP].ie) {
2157 memcpy(frm, vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_RESP].ie,
2158 vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_RESP].length);
2159 frm += vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_RESP].length;
2162 skb_trim(skb, frm - skb->data);
2163 break;
2165 case IEEE80211_FC0_SUBTYPE_DISASSOC:
2166 IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
2167 "send station disassociate (reason %d)", arg);
2168 skb = ieee80211_getmgtframe(&frm, sizeof(u_int16_t));
2169 if (skb == NULL)
2170 senderr(ENOMEM, is_tx_nobuf);
2171 *(__le16 *)frm = htole16(arg); /* reason */
2173 IEEE80211_NODE_STAT(ni, tx_disassoc);
2174 IEEE80211_NODE_STAT_SET(ni, tx_disassoc_code, arg);
2175 break;
2177 default:
2178 IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni,
2179 "invalid mgmt frame type %u", type);
2180 senderr(EINVAL, is_tx_unknownmgt);
2181 /* NOTREACHED */
2184 ieee80211_mgmt_output(ieee80211_ref_node(ni), skb, type);
2185 if (timer)
2186 mod_timer(&vap->iv_mgtsend, jiffies + timer * HZ);
2187 return 0;
2188 bad:
2189 return ret;
2190 #undef senderr
2194 * Send PS-POLL from to bss. Should only be called when as STA.
2196 void
2197 ieee80211_send_pspoll(struct ieee80211_node *ni)
2199 struct ieee80211vap *vap = ni->ni_vap;
2200 struct ieee80211com *ic = ni->ni_ic;
2201 struct sk_buff *skb;
2202 struct ieee80211_ctlframe_addr2 *wh;
2204 skb = ieee80211_dev_alloc_skb(sizeof(struct ieee80211_ctlframe_addr2));
2205 if (skb == NULL) return;
2207 SKB_NI(skb) = ieee80211_ref_node(ni);
2208 skb->priority = WME_AC_VO;
2210 wh = (struct ieee80211_ctlframe_addr2 *)skb_put(skb, sizeof(struct ieee80211_ctlframe_addr2));
2212 wh->i_aidordur = htole16(0xc000 | IEEE80211_NODE_AID(ni));
2213 IEEE80211_ADDR_COPY(wh->i_addr1, vap->iv_bssid);
2214 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
2215 wh->i_fc[0] = 0;
2216 wh->i_fc[1] = 0;
2217 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL |
2218 IEEE80211_FC0_SUBTYPE_PS_POLL;
2219 if (IEEE80211_VAP_IS_SLEEPING(ni->ni_vap))
2220 wh->i_fc[1] |= IEEE80211_FC1_PWR_MGT;
2222 (void) ic->ic_mgtstart(ic, skb); /* cheat */
2226 #ifdef ATH_SUPERG_XR
2228 * constructs and returns a contention free frames.
2229 * currently used for Group poll in XR mode.
2231 struct sk_buff *
2232 ieee80211_getcfframe(struct ieee80211vap *vap, int type)
2234 u_int8_t *frm;
2235 struct sk_buff *skb;
2236 struct ieee80211_frame *wh;
2237 struct ieee80211com *ic = vap->iv_ic;
2240 skb = ieee80211_getmgtframe(&frm, 0);
2241 if (skb == NULL)
2242 return NULL;
2243 wh = (struct ieee80211_frame *)
2244 skb_push(skb, sizeof(struct ieee80211_frame));
2245 if (type == IEEE80211_FC0_SUBTYPE_CFPOLL) {
2246 wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
2247 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA | type;
2248 wh->i_dur = htole16(0x8000);
2249 } else if (type == IEEE80211_FC0_SUBTYPE_CF_END) {
2250 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
2251 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL | type;
2252 wh->i_dur = 0;
2254 IEEE80211_ADDR_COPY(wh->i_addr1, ic->ic_dev->broadcast);
2255 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
2256 IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_bssid);
2257 return skb;
2259 EXPORT_SYMBOL(ieee80211_getcfframe);
2260 #endif