mwifiex: correction in MSDU padding logic
[linux-2.6/libata-dev.git] / drivers / net / wireless / mwifiex / 11n_aggr.c
blob395f1bfd41027f788901b62b5ef4621ac019956d
1 /*
2 * Marvell Wireless LAN device driver: 802.11n Aggregation
4 * Copyright (C) 2011, Marvell International Ltd.
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
20 #include "decl.h"
21 #include "ioctl.h"
22 #include "util.h"
23 #include "fw.h"
24 #include "main.h"
25 #include "wmm.h"
26 #include "11n.h"
27 #include "11n_aggr.h"
30 * Creates an AMSDU subframe for aggregation into one AMSDU packet.
32 * The resultant AMSDU subframe format is -
34 * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+
35 * | DA | SA | Length | SNAP header | MSDU |
36 * | data[0..5] | data[6..11] | | | data[14..] |
37 * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+
38 * <--6-bytes--> <--6-bytes--> <--2-bytes--><--8-bytes--> <--n-bytes-->
40 * This function also computes the amount of padding required to make the
41 * buffer length multiple of 4 bytes.
43 * Data => |DA|SA|SNAP-TYPE|........ .|
44 * MSDU => |DA|SA|Length|SNAP|...... ..|
46 static int
47 mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
48 struct sk_buff *skb_src, int *pad)
51 int dt_offset;
52 struct rfc_1042_hdr snap = {
53 0xaa, /* LLC DSAP */
54 0xaa, /* LLC SSAP */
55 0x03, /* LLC CTRL */
56 {0x00, 0x00, 0x00}, /* SNAP OUI */
57 0x0000 /* SNAP type */
59 * This field will be overwritten
60 * later with ethertype
63 struct tx_packet_hdr *tx_header;
65 tx_header = (void *)skb_put(skb_aggr, sizeof(*tx_header));
67 /* Copy DA and SA */
68 dt_offset = 2 * ETH_ALEN;
69 memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset);
71 /* Copy SNAP header */
72 snap.snap_type = *(u16 *) ((u8 *)skb_src->data + dt_offset);
73 dt_offset += sizeof(u16);
75 memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr));
77 skb_pull(skb_src, dt_offset);
79 /* Update Length field */
80 tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN);
82 /* Add payload */
83 memcpy(skb_put(skb_aggr, skb_src->len), skb_src->data, skb_src->len);
85 /* Add padding for new MSDU to start from 4 byte boundary */
86 *pad = (4 - ((unsigned long)skb_aggr->tail & 0x3)) % 4;
88 return skb_aggr->len + *pad;
92 * Adds TxPD to AMSDU header.
94 * Each AMSDU packet will contain one TxPD at the beginning,
95 * followed by multiple AMSDU subframes.
97 static void
98 mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
99 struct sk_buff *skb)
101 struct txpd *local_tx_pd;
103 skb_push(skb, sizeof(*local_tx_pd));
105 local_tx_pd = (struct txpd *) skb->data;
106 memset(local_tx_pd, 0, sizeof(struct txpd));
108 /* Original priority has been overwritten */
109 local_tx_pd->priority = (u8) skb->priority;
110 local_tx_pd->pkt_delay_2ms =
111 mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
112 local_tx_pd->bss_num = priv->bss_num;
113 local_tx_pd->bss_type = priv->bss_type;
114 /* Always zero as the data is followed by struct txpd */
115 local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
116 local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU);
117 local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
118 sizeof(*local_tx_pd));
120 if (local_tx_pd->tx_control == 0)
121 /* TxCtrl set by user or default */
122 local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
124 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
125 priv->adapter->pps_uapsd_mode) {
126 if (true == mwifiex_check_last_packet_indication(priv)) {
127 priv->adapter->tx_lock_flag = true;
128 local_tx_pd->flags =
129 MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET;
135 * Create aggregated packet.
137 * This function creates an aggregated MSDU packet, by combining buffers
138 * from the RA list. Each individual buffer is encapsulated as an AMSDU
139 * subframe and all such subframes are concatenated together to form the
140 * AMSDU packet.
142 * A TxPD is also added to the front of the resultant AMSDU packets for
143 * transmission. The resultant packets format is -
145 * +---- ~ ----+------ ~ ------+------ ~ ------+-..-+------ ~ ------+
146 * | TxPD |AMSDU sub-frame|AMSDU sub-frame| .. |AMSDU sub-frame|
147 * | | 1 | 2 | .. | n |
148 * +---- ~ ----+------ ~ ------+------ ~ ------+ .. +------ ~ ------+
151 mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
152 struct mwifiex_ra_list_tbl *pra_list, int headroom,
153 int ptrindex, unsigned long ra_list_flags)
154 __releases(&priv->wmm.ra_list_spinlock)
156 struct mwifiex_adapter *adapter = priv->adapter;
157 struct sk_buff *skb_aggr, *skb_src;
158 struct mwifiex_txinfo *tx_info_aggr, *tx_info_src;
159 int pad = 0, ret;
160 struct mwifiex_tx_param tx_param;
161 struct txpd *ptx_pd = NULL;
163 skb_src = skb_peek(&pra_list->skb_head);
164 if (!skb_src) {
165 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
166 ra_list_flags);
167 return 0;
170 tx_info_src = MWIFIEX_SKB_TXCB(skb_src);
171 skb_aggr = dev_alloc_skb(adapter->tx_buf_size);
172 if (!skb_aggr) {
173 dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__);
174 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
175 ra_list_flags);
176 return -1;
178 skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
179 tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr);
181 tx_info_aggr->bss_type = tx_info_src->bss_type;
182 tx_info_aggr->bss_num = tx_info_src->bss_num;
183 skb_aggr->priority = skb_src->priority;
185 do {
186 /* Check if AMSDU can accommodate this MSDU */
187 if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN))
188 break;
190 skb_src = skb_dequeue(&pra_list->skb_head);
192 pra_list->total_pkts_size -= skb_src->len;
194 atomic_dec(&priv->wmm.tx_pkts_queued);
196 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
197 ra_list_flags);
198 mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad);
200 mwifiex_write_data_complete(adapter, skb_src, 0);
202 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
204 if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
205 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
206 ra_list_flags);
207 return -1;
210 if (skb_tailroom(skb_aggr) < pad) {
211 pad = 0;
212 break;
214 skb_put(skb_aggr, pad);
216 skb_src = skb_peek(&pra_list->skb_head);
218 } while (skb_src);
220 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
222 /* Last AMSDU packet does not need padding */
223 skb_trim(skb_aggr, skb_aggr->len - pad);
225 /* Form AMSDU */
226 mwifiex_11n_form_amsdu_txpd(priv, skb_aggr);
227 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
228 ptx_pd = (struct txpd *)skb_aggr->data;
230 skb_push(skb_aggr, headroom);
232 if (adapter->iface_type == MWIFIEX_USB) {
233 adapter->data_sent = true;
234 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
235 skb_aggr, NULL);
236 } else {
238 * Padding per MSDU will affect the length of next
239 * packet and hence the exact length of next packet
240 * is uncertain here.
242 * Also, aggregation of transmission buffer, while
243 * downloading the data to the card, wont gain much
244 * on the AMSDU packets as the AMSDU packets utilizes
245 * the transmission buffer space to the maximum
246 * (adapter->tx_buf_size).
248 tx_param.next_pkt_len = 0;
250 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
251 skb_aggr, &tx_param);
253 switch (ret) {
254 case -EBUSY:
255 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
256 if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
257 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
258 ra_list_flags);
259 mwifiex_write_data_complete(adapter, skb_aggr, -1);
260 return -1;
262 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
263 adapter->pps_uapsd_mode && adapter->tx_lock_flag) {
264 priv->adapter->tx_lock_flag = false;
265 if (ptx_pd)
266 ptx_pd->flags = 0;
269 skb_queue_tail(&pra_list->skb_head, skb_aggr);
271 pra_list->total_pkts_size += skb_aggr->len;
273 atomic_inc(&priv->wmm.tx_pkts_queued);
275 tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
276 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
277 ra_list_flags);
278 dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
279 break;
280 case -1:
281 adapter->data_sent = false;
282 dev_err(adapter->dev, "%s: host_to_card failed: %#x\n",
283 __func__, ret);
284 adapter->dbg.num_tx_host_to_card_failure++;
285 mwifiex_write_data_complete(adapter, skb_aggr, ret);
286 return 0;
287 case -EINPROGRESS:
288 adapter->data_sent = false;
289 break;
290 case 0:
291 mwifiex_write_data_complete(adapter, skb_aggr, ret);
292 break;
293 default:
294 break;
296 if (ret != -EBUSY) {
297 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
298 if (mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
299 priv->wmm.packets_out[ptrindex]++;
300 priv->wmm.tid_tbl_ptr[ptrindex].ra_list_curr = pra_list;
302 /* Now bss_prio_cur pointer points to next node */
303 adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
304 list_first_entry(
305 &adapter->bss_prio_tbl[priv->bss_priority]
306 .bss_prio_cur->list,
307 struct mwifiex_bss_prio_node, list);
308 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
309 ra_list_flags);
312 return 0;