hpsa: honor queue depth of physical devices
[linux-2.6/btrfs-unstable.git] / drivers / staging / rtl8192e / rtllib_tx.c
blob4f68ffe41475660f293299a613d19f8affff0771
1 /******************************************************************************
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 The full GNU General Public License is included in this distribution in the
19 file called LICENSE.
21 Contact Information:
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************
27 Few modifications for Realtek's Wi-Fi drivers by
28 Andrea Merello <andrea.merello@gmail.com>
30 A special thanks goes to Realtek for their support !
32 ******************************************************************************/
34 #include <linux/compiler.h>
35 #include <linux/errno.h>
36 #include <linux/if_arp.h>
37 #include <linux/in6.h>
38 #include <linux/in.h>
39 #include <linux/ip.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/netdevice.h>
43 #include <linux/pci.h>
44 #include <linux/proc_fs.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/tcp.h>
48 #include <linux/types.h>
49 #include <linux/wireless.h>
50 #include <linux/etherdevice.h>
51 #include <linux/uaccess.h>
52 #include <linux/if_vlan.h>
54 #include "rtllib.h"
59 802.11 Data Frame
62 802.11 frame_control for data frames - 2 bytes
63 ,-----------------------------------------------------------------------------------------.
64 bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e |
65 |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
66 val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x |
67 |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
68 desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep |
69 | | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | |
70 '-----------------------------------------------------------------------------------------'
73 802.11 Data Frame |
74 ,--------- 'ctrl' expands to >-----------'
76 ,--'---,-------------------------------------------------------------.
77 Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
78 |------|------|---------|---------|---------|------|---------|------|
79 Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
80 | | tion | (BSSID) | | | ence | data | |
81 `--------------------------------------------------| |------'
82 Total: 28 non-data bytes `----.----'
84 .- 'Frame data' expands to <---------------------------'
87 ,---------------------------------------------------.
88 Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
89 |------|------|---------|----------|------|---------|
90 Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
91 | DSAP | SSAP | | | | Packet |
92 | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
93 `-----------------------------------------| |
94 Total: 8 non-data bytes `----.----'
96 .- 'IP Packet' expands, if WEP enabled, to <--'
99 ,-----------------------.
100 Bytes | 4 | 0-2296 | 4 |
101 |-----|-----------|-----|
102 Desc. | IV | Encrypted | ICV |
103 | | IP Packet | |
104 `-----------------------'
105 Total: 8 non-data bytes
108 802.3 Ethernet Data Frame
110 ,-----------------------------------------.
111 Bytes | 6 | 6 | 2 | Variable | 4 |
112 |-------|-------|------|-----------|------|
113 Desc. | Dest. | Source| Type | IP Packet | fcs |
114 | MAC | MAC | | | |
115 `-----------------------------------------'
116 Total: 18 non-data bytes
118 In the event that fragmentation is required, the incoming payload is split into
119 N parts of size ieee->fts. The first fragment contains the SNAP header and the
120 remaining packets are just data.
122 If encryption is enabled, each fragment payload size is reduced by enough space
123 to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
124 So if you have 1500 bytes of payload with ieee->fts set to 500 without
125 encryption it will take 3 frames. With WEP it will take 4 frames as the
126 payload of each frame is reduced to 492 bytes.
128 * SKB visualization
130 * ,- skb->data
132 * | ETHERNET HEADER ,-<-- PAYLOAD
133 * | | 14 bytes from skb->data
134 * | 2 bytes for Type --> ,T. | (sizeof ethhdr)
135 * | | | |
136 * |,-Dest.--. ,--Src.---. | | |
137 * | 6 bytes| | 6 bytes | | | |
138 * v | | | | | |
139 * 0 | v 1 | v | v 2
140 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
141 * ^ | ^ | ^ |
142 * | | | | | |
143 * | | | | `T' <---- 2 bytes for Type
144 * | | | |
145 * | | '---SNAP--' <-------- 6 bytes for SNAP
146 * | |
147 * `-IV--' <-------------------- 4 bytes for IV (WEP)
149 * SNAP HEADER
153 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
154 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
156 inline int rtllib_put_snap(u8 *data, u16 h_proto)
158 struct rtllib_snap_hdr *snap;
159 u8 *oui;
161 snap = (struct rtllib_snap_hdr *)data;
162 snap->dsap = 0xaa;
163 snap->ssap = 0xaa;
164 snap->ctrl = 0x03;
166 if (h_proto == 0x8137 || h_proto == 0x80f3)
167 oui = P802_1H_OUI;
168 else
169 oui = RFC1042_OUI;
170 snap->oui[0] = oui[0];
171 snap->oui[1] = oui[1];
172 snap->oui[2] = oui[2];
174 *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
176 return SNAP_SIZE + sizeof(u16);
179 int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
180 int hdr_len)
182 struct lib80211_crypt_data *crypt = NULL;
183 int res;
185 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
187 if (!(crypt && crypt->ops)) {
188 printk(KERN_INFO "=========>%s(), crypt is null\n", __func__);
189 return -1;
191 /* To encrypt, frame format is:
192 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
194 /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
195 * call both MSDU and MPDU encryption functions from here. */
196 atomic_inc(&crypt->refcnt);
197 res = 0;
198 if (crypt->ops->encrypt_msdu)
199 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
200 if (res == 0 && crypt->ops->encrypt_mpdu)
201 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
203 atomic_dec(&crypt->refcnt);
204 if (res < 0) {
205 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
206 ieee->dev->name, frag->len);
207 ieee->ieee_stats.tx_discards++;
208 return -1;
211 return 0;
215 void rtllib_txb_free(struct rtllib_txb *txb)
217 if (unlikely(!txb))
218 return;
219 kfree(txb);
222 static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
223 gfp_t gfp_mask)
225 struct rtllib_txb *txb;
226 int i;
228 txb = kmalloc(sizeof(struct rtllib_txb) + (sizeof(u8 *) * nr_frags),
229 gfp_mask);
230 if (!txb)
231 return NULL;
233 memset(txb, 0, sizeof(struct rtllib_txb));
234 txb->nr_frags = nr_frags;
235 txb->frag_size = cpu_to_le16(txb_size);
237 for (i = 0; i < nr_frags; i++) {
238 txb->fragments[i] = dev_alloc_skb(txb_size);
239 if (unlikely(!txb->fragments[i])) {
240 i--;
241 break;
243 memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
245 if (unlikely(i != nr_frags)) {
246 while (i >= 0)
247 dev_kfree_skb_any(txb->fragments[i--]);
248 kfree(txb);
249 return NULL;
251 return txb;
254 static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu)
256 struct ethhdr *eth;
257 struct iphdr *ip;
259 eth = (struct ethhdr *)skb->data;
260 if (eth->h_proto != htons(ETH_P_IP))
261 return 0;
263 RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA, skb->data, skb->len);
264 ip = ip_hdr(skb);
265 switch (ip->tos & 0xfc) {
266 case 0x20:
267 return 2;
268 case 0x40:
269 return 1;
270 case 0x60:
271 return 3;
272 case 0x80:
273 return 4;
274 case 0xa0:
275 return 5;
276 case 0xc0:
277 return 6;
278 case 0xe0:
279 return 7;
280 default:
281 return 0;
285 static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
286 struct sk_buff *skb,
287 struct cb_desc *tcb_desc)
289 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
290 struct tx_ts_record *pTxTs = NULL;
291 struct rtllib_hdr_1addr *hdr = (struct rtllib_hdr_1addr *)skb->data;
293 if (rtllib_act_scanning(ieee, false))
294 return;
296 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
297 return;
298 if (!IsQoSDataFrame(skb->data))
299 return;
300 if (is_multicast_ether_addr(hdr->addr1))
301 return;
303 if (tcb_desc->bdhcp || ieee->CntAfterLink < 2)
304 return;
306 if (pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION)
307 return;
309 if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
310 return;
311 if (pHTInfo->bCurrentAMPDUEnable) {
312 if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
313 skb->priority, TX_DIR, true)) {
314 printk(KERN_INFO "%s: can't get TS\n", __func__);
315 return;
317 if (pTxTs->TxAdmittedBARecord.bValid == false) {
318 if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
319 KEY_TYPE_NA)) {
321 } else if (tcb_desc->bdhcp == 1) {
323 } else if (!pTxTs->bDisable_AddBa) {
324 TsStartAddBaProcess(ieee, pTxTs);
326 goto FORCED_AGG_SETTING;
327 } else if (pTxTs->bUsingBa == false) {
328 if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum,
329 (pTxTs->TxCurSeq+1)%4096))
330 pTxTs->bUsingBa = true;
331 else
332 goto FORCED_AGG_SETTING;
334 if (ieee->iw_mode == IW_MODE_INFRA) {
335 tcb_desc->bAMPDUEnable = true;
336 tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
337 tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
340 FORCED_AGG_SETTING:
341 switch (pHTInfo->ForcedAMPDUMode) {
342 case HT_AGG_AUTO:
343 break;
345 case HT_AGG_FORCE_ENABLE:
346 tcb_desc->bAMPDUEnable = true;
347 tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
348 tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
349 break;
351 case HT_AGG_FORCE_DISABLE:
352 tcb_desc->bAMPDUEnable = false;
353 tcb_desc->ampdu_density = 0;
354 tcb_desc->ampdu_factor = 0;
355 break;
359 static void rtllib_qurey_ShortPreambleMode(struct rtllib_device *ieee,
360 struct cb_desc *tcb_desc)
362 tcb_desc->bUseShortPreamble = false;
363 if (tcb_desc->data_rate == 2)
364 return;
365 else if (ieee->current_network.capability &
366 WLAN_CAPABILITY_SHORT_PREAMBLE)
367 tcb_desc->bUseShortPreamble = true;
370 static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
371 struct cb_desc *tcb_desc)
373 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
375 tcb_desc->bUseShortGI = false;
377 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
378 return;
380 if (pHTInfo->bForcedShortGI) {
381 tcb_desc->bUseShortGI = true;
382 return;
385 if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz)
386 tcb_desc->bUseShortGI = true;
387 else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz)
388 tcb_desc->bUseShortGI = true;
391 static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
392 struct cb_desc *tcb_desc)
394 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
396 tcb_desc->bPacketBW = false;
398 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
399 return;
401 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
402 return;
404 if ((tcb_desc->data_rate & 0x80) == 0)
405 return;
406 if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz &&
407 !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
408 tcb_desc->bPacketBW = true;
411 static void rtllib_query_protectionmode(struct rtllib_device *ieee,
412 struct cb_desc *tcb_desc,
413 struct sk_buff *skb)
415 tcb_desc->bRTSSTBC = false;
416 tcb_desc->bRTSUseShortGI = false;
417 tcb_desc->bCTSEnable = false;
418 tcb_desc->RTSSC = 0;
419 tcb_desc->bRTSBW = false;
421 if (tcb_desc->bBroadcast || tcb_desc->bMulticast)
422 return;
424 if (is_broadcast_ether_addr(skb->data+16))
425 return;
427 if (ieee->mode < IEEE_N_24G) {
428 if (skb->len > ieee->rts) {
429 tcb_desc->bRTSEnable = true;
430 tcb_desc->rts_rate = MGN_24M;
431 } else if (ieee->current_network.buseprotection) {
432 tcb_desc->bRTSEnable = true;
433 tcb_desc->bCTSEnable = true;
434 tcb_desc->rts_rate = MGN_24M;
436 return;
437 } else {
438 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
440 while (true) {
441 if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
442 tcb_desc->bCTSEnable = true;
443 tcb_desc->rts_rate = MGN_24M;
444 tcb_desc->bRTSEnable = true;
445 break;
446 } else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS |
447 HT_IOT_ACT_PURE_N_MODE)) {
448 tcb_desc->bRTSEnable = true;
449 tcb_desc->rts_rate = MGN_24M;
450 break;
452 if (ieee->current_network.buseprotection) {
453 tcb_desc->bRTSEnable = true;
454 tcb_desc->bCTSEnable = true;
455 tcb_desc->rts_rate = MGN_24M;
456 break;
458 if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) {
459 u8 HTOpMode = pHTInfo->CurrentOpMode;
461 if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 ||
462 HTOpMode == 3)) ||
463 (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) {
464 tcb_desc->rts_rate = MGN_24M;
465 tcb_desc->bRTSEnable = true;
466 break;
469 if (skb->len > ieee->rts) {
470 tcb_desc->rts_rate = MGN_24M;
471 tcb_desc->bRTSEnable = true;
472 break;
474 if (tcb_desc->bAMPDUEnable) {
475 tcb_desc->rts_rate = MGN_24M;
476 tcb_desc->bRTSEnable = false;
477 break;
479 goto NO_PROTECTION;
482 if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
483 tcb_desc->bUseShortPreamble = true;
484 if (ieee->iw_mode == IW_MODE_MASTER)
485 goto NO_PROTECTION;
486 return;
487 NO_PROTECTION:
488 tcb_desc->bRTSEnable = false;
489 tcb_desc->bCTSEnable = false;
490 tcb_desc->rts_rate = 0;
491 tcb_desc->RTSSC = 0;
492 tcb_desc->bRTSBW = false;
496 static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
497 struct cb_desc *tcb_desc)
499 if (ieee->bTxDisableRateFallBack)
500 tcb_desc->bTxDisableRateFallBack = true;
502 if (ieee->bTxUseDriverAssingedRate)
503 tcb_desc->bTxUseDriverAssingedRate = true;
504 if (!tcb_desc->bTxDisableRateFallBack ||
505 !tcb_desc->bTxUseDriverAssingedRate) {
506 if (ieee->iw_mode == IW_MODE_INFRA ||
507 ieee->iw_mode == IW_MODE_ADHOC)
508 tcb_desc->RATRIndex = 0;
512 u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
513 u8 *dst)
515 u16 seqnum = 0;
517 if (is_multicast_ether_addr(dst))
518 return 0;
519 if (IsQoSDataFrame(skb->data)) {
520 struct tx_ts_record *pTS = NULL;
522 if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
523 skb->priority, TX_DIR, true))
524 return 0;
525 seqnum = pTS->TxCurSeq;
526 pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
527 return seqnum;
529 return 0;
532 static int wme_downgrade_ac(struct sk_buff *skb)
534 switch (skb->priority) {
535 case 6:
536 case 7:
537 skb->priority = 5; /* VO -> VI */
538 return 0;
539 case 4:
540 case 5:
541 skb->priority = 3; /* VI -> BE */
542 return 0;
543 case 0:
544 case 3:
545 skb->priority = 1; /* BE -> BK */
546 return 0;
547 default:
548 return -1;
552 int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
554 struct rtllib_device *ieee = (struct rtllib_device *)
555 netdev_priv_rsl(dev);
556 struct rtllib_txb *txb = NULL;
557 struct rtllib_hdr_3addrqos *frag_hdr;
558 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
559 unsigned long flags;
560 struct net_device_stats *stats = &ieee->stats;
561 int ether_type = 0, encrypt;
562 int bytes, fc, qos_ctl = 0, hdr_len;
563 struct sk_buff *skb_frag;
564 struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
565 .duration_id = 0,
566 .seq_ctl = 0,
567 .qos_ctl = 0
569 u8 dest[ETH_ALEN], src[ETH_ALEN];
570 int qos_actived = ieee->current_network.qos_data.active;
571 struct lib80211_crypt_data *crypt = NULL;
572 struct cb_desc *tcb_desc;
573 u8 bIsMulticast = false;
574 u8 IsAmsdu = false;
575 bool bdhcp = false;
577 spin_lock_irqsave(&ieee->lock, flags);
579 /* If there is no driver handler to take the TXB, don't bother
580 * creating it... */
581 if ((!ieee->hard_start_xmit && !(ieee->softmac_features &
582 IEEE_SOFTMAC_TX_QUEUE)) ||
583 ((!ieee->softmac_data_hard_start_xmit &&
584 (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
585 printk(KERN_WARNING "%s: No xmit handler.\n",
586 ieee->dev->name);
587 goto success;
591 if (likely(ieee->raw_tx == 0)) {
592 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
593 printk(KERN_WARNING "%s: skb too small (%d).\n",
594 ieee->dev->name, skb->len);
595 goto success;
597 /* Save source and destination addresses */
598 memcpy(dest, skb->data, ETH_ALEN);
599 memcpy(src, skb->data+ETH_ALEN, ETH_ALEN);
601 memset(skb->cb, 0, sizeof(skb->cb));
602 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
604 if (ieee->iw_mode == IW_MODE_MONITOR) {
605 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
606 if (unlikely(!txb)) {
607 printk(KERN_WARNING "%s: Could not allocate "
608 "TXB\n",
609 ieee->dev->name);
610 goto failed;
613 txb->encrypted = 0;
614 txb->payload_size = cpu_to_le16(skb->len);
615 memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
616 skb->len);
618 goto success;
621 if (skb->len > 282) {
622 if (ETH_P_IP == ether_type) {
623 const struct iphdr *ip = (struct iphdr *)
624 ((u8 *)skb->data+14);
625 if (IPPROTO_UDP == ip->protocol) {
626 struct udphdr *udp;
628 udp = (struct udphdr *)((u8 *)ip +
629 (ip->ihl << 2));
630 if (((((u8 *)udp)[1] == 68) &&
631 (((u8 *)udp)[3] == 67)) ||
632 ((((u8 *)udp)[1] == 67) &&
633 (((u8 *)udp)[3] == 68))) {
634 bdhcp = true;
635 ieee->LPSDelayCnt = 200;
638 } else if (ETH_P_ARP == ether_type) {
639 printk(KERN_INFO "=================>DHCP "
640 "Protocol start tx ARP pkt!!\n");
641 bdhcp = true;
642 ieee->LPSDelayCnt =
643 ieee->current_network.tim.tim_count;
647 skb->priority = rtllib_classify(skb, IsAmsdu);
648 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
649 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
650 ieee->host_encrypt && crypt && crypt->ops;
651 if (!encrypt && ieee->ieee802_1x &&
652 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
653 stats->tx_dropped++;
654 goto success;
656 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
657 struct eapol *eap = (struct eapol *)(skb->data +
658 sizeof(struct ethhdr) - SNAP_SIZE -
659 sizeof(u16));
660 RTLLIB_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
661 eap_get_type(eap->type));
664 /* Advance the SKB to the start of the payload */
665 skb_pull(skb, sizeof(struct ethhdr));
667 /* Determine total amount of storage required for TXB packets */
668 bytes = skb->len + SNAP_SIZE + sizeof(u16);
670 if (encrypt)
671 fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
672 else
673 fc = RTLLIB_FTYPE_DATA;
675 if (qos_actived)
676 fc |= RTLLIB_STYPE_QOS_DATA;
677 else
678 fc |= RTLLIB_STYPE_DATA;
680 if (ieee->iw_mode == IW_MODE_INFRA) {
681 fc |= RTLLIB_FCTL_TODS;
682 /* To DS: Addr1 = BSSID, Addr2 = SA,
683 Addr3 = DA */
684 memcpy(&header.addr1, ieee->current_network.bssid,
685 ETH_ALEN);
686 memcpy(&header.addr2, &src, ETH_ALEN);
687 if (IsAmsdu)
688 memcpy(&header.addr3,
689 ieee->current_network.bssid, ETH_ALEN);
690 else
691 memcpy(&header.addr3, &dest, ETH_ALEN);
692 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
693 /* not From/To DS: Addr1 = DA, Addr2 = SA,
694 Addr3 = BSSID */
695 memcpy(&header.addr1, dest, ETH_ALEN);
696 memcpy(&header.addr2, src, ETH_ALEN);
697 memcpy(&header.addr3, ieee->current_network.bssid,
698 ETH_ALEN);
701 bIsMulticast = is_multicast_ether_addr(header.addr1);
703 header.frame_ctl = cpu_to_le16(fc);
705 /* Determine fragmentation size based on destination (multicast
706 * and broadcast are not fragmented) */
707 if (bIsMulticast) {
708 frag_size = MAX_FRAG_THRESHOLD;
709 qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
710 } else {
711 frag_size = ieee->fts;
712 qos_ctl = 0;
715 if (qos_actived) {
716 hdr_len = RTLLIB_3ADDR_LEN + 2;
718 /* in case we are a client verify acm is not set for this ac */
719 while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
720 printk(KERN_INFO "skb->priority = %x\n", skb->priority);
721 if (wme_downgrade_ac(skb))
722 break;
723 printk(KERN_INFO "converted skb->priority = %x\n",
724 skb->priority);
726 qos_ctl |= skb->priority;
727 header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
728 } else {
729 hdr_len = RTLLIB_3ADDR_LEN;
731 /* Determine amount of payload per fragment. Regardless of if
732 * this stack is providing the full 802.11 header, one will
733 * eventually be affixed to this fragment -- so we must account
734 * for it when determining the amount of payload space. */
735 bytes_per_frag = frag_size - hdr_len;
736 if (ieee->config &
737 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
738 bytes_per_frag -= RTLLIB_FCS_LEN;
740 /* Each fragment may need to have room for encrypting
741 * pre/postfix */
742 if (encrypt) {
743 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
744 crypt->ops->extra_mpdu_postfix_len +
745 crypt->ops->extra_msdu_prefix_len +
746 crypt->ops->extra_msdu_postfix_len;
748 /* Number of fragments is the total bytes_per_frag /
749 * payload_per_fragment */
750 nr_frags = bytes / bytes_per_frag;
751 bytes_last_frag = bytes % bytes_per_frag;
752 if (bytes_last_frag)
753 nr_frags++;
754 else
755 bytes_last_frag = bytes_per_frag;
757 /* When we allocate the TXB we allocate enough space for the
758 * reserve and full fragment bytes (bytes_per_frag doesn't
759 * include prefix, postfix, header, FCS, etc.) */
760 txb = rtllib_alloc_txb(nr_frags, frag_size +
761 ieee->tx_headroom, GFP_ATOMIC);
762 if (unlikely(!txb)) {
763 printk(KERN_WARNING "%s: Could not allocate TXB\n",
764 ieee->dev->name);
765 goto failed;
767 txb->encrypted = encrypt;
768 txb->payload_size = cpu_to_le16(bytes);
770 if (qos_actived)
771 txb->queue_index = UP2AC(skb->priority);
772 else
773 txb->queue_index = WME_AC_BE;
775 for (i = 0; i < nr_frags; i++) {
776 skb_frag = txb->fragments[i];
777 tcb_desc = (struct cb_desc *)(skb_frag->cb +
778 MAX_DEV_ADDR_SIZE);
779 if (qos_actived) {
780 skb_frag->priority = skb->priority;
781 tcb_desc->queue_index = UP2AC(skb->priority);
782 } else {
783 skb_frag->priority = WME_AC_BE;
784 tcb_desc->queue_index = WME_AC_BE;
786 skb_reserve(skb_frag, ieee->tx_headroom);
788 if (encrypt) {
789 if (ieee->hwsec_active)
790 tcb_desc->bHwSec = 1;
791 else
792 tcb_desc->bHwSec = 0;
793 skb_reserve(skb_frag,
794 crypt->ops->extra_mpdu_prefix_len +
795 crypt->ops->extra_msdu_prefix_len);
796 } else {
797 tcb_desc->bHwSec = 0;
799 frag_hdr = (struct rtllib_hdr_3addrqos *)
800 skb_put(skb_frag, hdr_len);
801 memcpy(frag_hdr, &header, hdr_len);
803 /* If this is not the last fragment, then add the
804 * MOREFRAGS bit to the frame control */
805 if (i != nr_frags - 1) {
806 frag_hdr->frame_ctl = cpu_to_le16(
807 fc | RTLLIB_FCTL_MOREFRAGS);
808 bytes = bytes_per_frag;
810 } else {
811 /* The last fragment has the remaining length */
812 bytes = bytes_last_frag;
814 if ((qos_actived) && (!bIsMulticast)) {
815 frag_hdr->seq_ctl =
816 cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
817 header.addr1));
818 frag_hdr->seq_ctl =
819 cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl)<<4 | i);
820 } else {
821 frag_hdr->seq_ctl =
822 cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
824 /* Put a SNAP header on the first fragment */
825 if (i == 0) {
826 rtllib_put_snap(
827 skb_put(skb_frag, SNAP_SIZE +
828 sizeof(u16)), ether_type);
829 bytes -= SNAP_SIZE + sizeof(u16);
832 memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
834 /* Advance the SKB... */
835 skb_pull(skb, bytes);
837 /* Encryption routine will move the header forward in
838 * order to insert the IV between the header and the
839 * payload */
840 if (encrypt)
841 rtllib_encrypt_fragment(ieee, skb_frag,
842 hdr_len);
843 if (ieee->config &
844 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
845 skb_put(skb_frag, 4);
848 if ((qos_actived) && (!bIsMulticast)) {
849 if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
850 ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
851 else
852 ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
853 } else {
854 if (ieee->seq_ctrl[0] == 0xFFF)
855 ieee->seq_ctrl[0] = 0;
856 else
857 ieee->seq_ctrl[0]++;
859 } else {
860 if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
861 printk(KERN_WARNING "%s: skb too small (%d).\n",
862 ieee->dev->name, skb->len);
863 goto success;
866 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
867 if (!txb) {
868 printk(KERN_WARNING "%s: Could not allocate TXB\n",
869 ieee->dev->name);
870 goto failed;
873 txb->encrypted = 0;
874 txb->payload_size = cpu_to_le16(skb->len);
875 memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
876 skb->len);
879 success:
880 if (txb) {
881 struct cb_desc *tcb_desc = (struct cb_desc *)
882 (txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
883 tcb_desc->bTxEnableFwCalcDur = 1;
884 tcb_desc->priority = skb->priority;
886 if (ether_type == ETH_P_PAE) {
887 if (ieee->pHTInfo->IOTAction &
888 HT_IOT_ACT_WA_IOT_Broadcom) {
889 tcb_desc->data_rate =
890 MgntQuery_TxRateExcludeCCKRates(ieee);
891 tcb_desc->bTxDisableRateFallBack = false;
892 } else {
893 tcb_desc->data_rate = ieee->basic_rate;
894 tcb_desc->bTxDisableRateFallBack = 1;
898 tcb_desc->RATRIndex = 7;
899 tcb_desc->bTxUseDriverAssingedRate = 1;
900 } else {
901 if (is_multicast_ether_addr(header.addr1))
902 tcb_desc->bMulticast = 1;
903 if (is_broadcast_ether_addr(header.addr1))
904 tcb_desc->bBroadcast = 1;
905 rtllib_txrate_selectmode(ieee, tcb_desc);
906 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
907 tcb_desc->data_rate = ieee->basic_rate;
908 else
909 tcb_desc->data_rate = CURRENT_RATE(ieee->mode,
910 ieee->rate, ieee->HTCurrentOperaRate);
912 if (bdhcp) {
913 if (ieee->pHTInfo->IOTAction &
914 HT_IOT_ACT_WA_IOT_Broadcom) {
915 tcb_desc->data_rate =
916 MgntQuery_TxRateExcludeCCKRates(ieee);
917 tcb_desc->bTxDisableRateFallBack = false;
918 } else {
919 tcb_desc->data_rate = MGN_1M;
920 tcb_desc->bTxDisableRateFallBack = 1;
924 tcb_desc->RATRIndex = 7;
925 tcb_desc->bTxUseDriverAssingedRate = 1;
926 tcb_desc->bdhcp = 1;
929 rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
930 rtllib_tx_query_agg_cap(ieee, txb->fragments[0],
931 tcb_desc);
932 rtllib_query_HTCapShortGI(ieee, tcb_desc);
933 rtllib_query_BandwidthMode(ieee, tcb_desc);
934 rtllib_query_protectionmode(ieee, tcb_desc,
935 txb->fragments[0]);
938 spin_unlock_irqrestore(&ieee->lock, flags);
939 dev_kfree_skb_any(skb);
940 if (txb) {
941 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
942 dev->stats.tx_packets++;
943 dev->stats.tx_bytes += le16_to_cpu(txb->payload_size);
944 rtllib_softmac_xmit(txb, ieee);
945 } else {
946 if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
947 stats->tx_packets++;
948 stats->tx_bytes += le16_to_cpu(txb->payload_size);
949 return 0;
951 rtllib_txb_free(txb);
955 return 0;
957 failed:
958 spin_unlock_irqrestore(&ieee->lock, flags);
959 netif_stop_queue(dev);
960 stats->tx_errors++;
961 return 1;
964 int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
966 memset(skb->cb, 0, sizeof(skb->cb));
967 return rtllib_xmit_inter(skb, dev);
969 EXPORT_SYMBOL(rtllib_xmit);