GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / uwb / i1480 / i1480u-wlp / rx.c
blob1335a74b34a610a1209c66967722416da59371e7
1 /*
2 * WUSB Wire Adapter: WLP interface
3 * Driver for the Linux Network stack.
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
23 * i1480u's RX handling is simple. i1480u will send the received
24 * network packets broken up in fragments; 1 to N fragments make a
25 * packet, we assemble them together and deliver the packet with netif_rx().
27 * Beacuse each USB transfer is a *single* fragment (except when the
28 * transfer contains a first fragment), each URB called thus
29 * back contains one or two fragments. So we queue N URBs, each with its own
30 * fragment buffer. When a URB is done, we process it (adding to the
31 * current skb from the fragment buffer until complete). Once
32 * processed, we requeue the URB. There is always a bunch of URBs
33 * ready to take data, so the intergap should be minimal.
35 * An URB's transfer buffer is the data field of a socket buffer. This
36 * reduces copying as data can be passed directly to network layer. If a
37 * complete packet or 1st fragment is received the URB's transfer buffer is
38 * taken away from it and used to send data to the network layer. In this
39 * case a new transfer buffer is allocated to the URB before being requeued.
40 * If a "NEXT" or "LAST" fragment is received, the fragment contents is
41 * appended to the RX packet under construction and the transfer buffer
42 * is reused. To be able to use this buffer to assemble complete packets
43 * we set each buffer's size to that of the MAX ethernet packet that can
44 * be received. There is thus room for improvement in memory usage.
46 * When the max tx fragment size increases, we should be able to read
47 * data into the skbs directly with very simple code.
49 * ROADMAP:
51 * ENTRY POINTS:
53 * i1480u_rx_setup(): setup RX context [from i1480u_open()]
55 * i1480u_rx_release(): release RX context [from i1480u_stop()]
57 * i1480u_rx_cb(): called when the RX USB URB receives a
58 * packet. It removes the header and pushes it up
59 * the Linux netdev stack with netif_rx().
61 * i1480u_rx_buffer()
62 * i1480u_drop() and i1480u_fix()
63 * i1480u_skb_deliver
67 #include <linux/gfp.h>
68 #include <linux/netdevice.h>
69 #include <linux/etherdevice.h>
70 #include "i1480u-wlp.h"
73 * Setup the RX context
75 * Each URB is provided with a transfer_buffer that is the data field
76 * of a new socket buffer.
78 int i1480u_rx_setup(struct i1480u *i1480u)
80 int result, cnt;
81 struct device *dev = &i1480u->usb_iface->dev;
82 struct net_device *net_dev = i1480u->net_dev;
83 struct usb_endpoint_descriptor *epd;
84 struct sk_buff *skb;
86 /* Alloc RX stuff */
87 i1480u->rx_skb = NULL; /* not in process of receiving packet */
88 result = -ENOMEM;
89 epd = &i1480u->usb_iface->cur_altsetting->endpoint[1].desc;
90 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
91 struct i1480u_rx_buf *rx_buf = &i1480u->rx_buf[cnt];
92 rx_buf->i1480u = i1480u;
93 skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
94 if (!skb) {
95 dev_err(dev,
96 "RX: cannot allocate RX buffer %d\n", cnt);
97 result = -ENOMEM;
98 goto error;
100 skb->dev = net_dev;
101 skb->ip_summed = CHECKSUM_NONE;
102 skb_reserve(skb, 2);
103 rx_buf->data = skb;
104 rx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
105 if (unlikely(rx_buf->urb == NULL)) {
106 dev_err(dev, "RX: cannot allocate URB %d\n", cnt);
107 result = -ENOMEM;
108 goto error;
110 usb_fill_bulk_urb(rx_buf->urb, i1480u->usb_dev,
111 usb_rcvbulkpipe(i1480u->usb_dev, epd->bEndpointAddress),
112 rx_buf->data->data, i1480u_MAX_RX_PKT_SIZE - 2,
113 i1480u_rx_cb, rx_buf);
114 result = usb_submit_urb(rx_buf->urb, GFP_NOIO);
115 if (unlikely(result < 0)) {
116 dev_err(dev, "RX: cannot submit URB %d: %d\n",
117 cnt, result);
118 goto error;
121 return 0;
123 error:
124 i1480u_rx_release(i1480u);
125 return result;
129 /* Release resources associated to the rx context */
130 void i1480u_rx_release(struct i1480u *i1480u)
132 int cnt;
133 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
134 if (i1480u->rx_buf[cnt].data)
135 dev_kfree_skb(i1480u->rx_buf[cnt].data);
136 if (i1480u->rx_buf[cnt].urb) {
137 usb_kill_urb(i1480u->rx_buf[cnt].urb);
138 usb_free_urb(i1480u->rx_buf[cnt].urb);
141 if (i1480u->rx_skb != NULL)
142 dev_kfree_skb(i1480u->rx_skb);
145 static
146 void i1480u_rx_unlink_urbs(struct i1480u *i1480u)
148 int cnt;
149 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
150 if (i1480u->rx_buf[cnt].urb)
151 usb_unlink_urb(i1480u->rx_buf[cnt].urb);
155 /* Fix an out-of-sequence packet */
156 #define i1480u_fix(i1480u, msg...) \
157 do { \
158 if (printk_ratelimit()) \
159 dev_err(&i1480u->usb_iface->dev, msg); \
160 dev_kfree_skb_irq(i1480u->rx_skb); \
161 i1480u->rx_skb = NULL; \
162 i1480u->rx_untd_pkt_size = 0; \
163 } while (0)
166 /* Drop an out-of-sequence packet */
167 #define i1480u_drop(i1480u, msg...) \
168 do { \
169 if (printk_ratelimit()) \
170 dev_err(&i1480u->usb_iface->dev, msg); \
171 i1480u->net_dev->stats.rx_dropped++; \
172 } while (0)
177 /* Finalizes setting up the SKB and delivers it
179 * We first pass the incoming frame to WLP substack for verification. It
180 * may also be a WLP association frame in which case WLP will take over the
181 * processing. If WLP does not take it over it will still verify it, if the
182 * frame is invalid the skb will be freed by WLP and we will not continue
183 * parsing.
184 * */
185 static
186 void i1480u_skb_deliver(struct i1480u *i1480u)
188 int should_parse;
189 struct net_device *net_dev = i1480u->net_dev;
190 struct device *dev = &i1480u->usb_iface->dev;
192 should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb,
193 &i1480u->rx_srcaddr);
194 if (!should_parse)
195 goto out;
196 i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev);
197 net_dev->stats.rx_packets++;
198 net_dev->stats.rx_bytes += i1480u->rx_untd_pkt_size;
200 netif_rx(i1480u->rx_skb); /* deliver */
201 out:
202 i1480u->rx_skb = NULL;
203 i1480u->rx_untd_pkt_size = 0;
208 * Process a buffer of data received from the USB RX endpoint
210 * First fragment arrives with next or last fragment. All other fragments
211 * arrive alone.
213 * /me hates long functions.
215 static
216 void i1480u_rx_buffer(struct i1480u_rx_buf *rx_buf)
218 unsigned pkt_completed = 0; /* !0 when we got all pkt fragments */
219 size_t untd_hdr_size, untd_frg_size;
220 size_t i1480u_hdr_size;
221 struct wlp_rx_hdr *i1480u_hdr = NULL;
223 struct i1480u *i1480u = rx_buf->i1480u;
224 struct sk_buff *skb = rx_buf->data;
225 int size_left = rx_buf->urb->actual_length;
226 void *ptr = rx_buf->urb->transfer_buffer; /* also rx_buf->data->data */
227 struct untd_hdr *untd_hdr;
229 struct net_device *net_dev = i1480u->net_dev;
230 struct device *dev = &i1480u->usb_iface->dev;
231 struct sk_buff *new_skb;
233 i1480u_hdr_size = sizeof(struct wlp_rx_hdr);
235 while (size_left > 0) {
236 if (pkt_completed) {
237 i1480u_drop(i1480u, "RX: fragment follows completed"
238 "packet in same buffer. Dropping\n");
239 break;
241 untd_hdr = ptr;
242 if (size_left < sizeof(*untd_hdr)) { /* Check the UNTD header */
243 i1480u_drop(i1480u, "RX: short UNTD header! Dropping\n");
244 goto out;
246 if (unlikely(untd_hdr_rx_tx(untd_hdr) == 0)) { /* Paranoia: TX set? */
247 i1480u_drop(i1480u, "RX: TX bit set! Dropping\n");
248 goto out;
250 switch (untd_hdr_type(untd_hdr)) { /* Check the UNTD header type */
251 case i1480u_PKT_FRAG_1ST: {
252 struct untd_hdr_1st *untd_hdr_1st = (void *) untd_hdr;
253 dev_dbg(dev, "1st fragment\n");
254 untd_hdr_size = sizeof(struct untd_hdr_1st);
255 if (i1480u->rx_skb != NULL)
256 i1480u_fix(i1480u, "RX: 1st fragment out of "
257 "sequence! Fixing\n");
258 if (size_left < untd_hdr_size + i1480u_hdr_size) {
259 i1480u_drop(i1480u, "RX: short 1st fragment! "
260 "Dropping\n");
261 goto out;
263 i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len)
264 - i1480u_hdr_size;
265 untd_frg_size = le16_to_cpu(untd_hdr_1st->fragment_len);
266 if (size_left < untd_hdr_size + untd_frg_size) {
267 i1480u_drop(i1480u,
268 "RX: short payload! Dropping\n");
269 goto out;
271 i1480u->rx_skb = skb;
272 i1480u_hdr = (void *) untd_hdr_1st + untd_hdr_size;
273 i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
274 skb_put(i1480u->rx_skb, untd_hdr_size + untd_frg_size);
275 skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
276 stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
277 stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
278 rx_buf->data = NULL; /* need to create new buffer */
279 break;
281 case i1480u_PKT_FRAG_NXT: {
282 dev_dbg(dev, "nxt fragment\n");
283 untd_hdr_size = sizeof(struct untd_hdr_rst);
284 if (i1480u->rx_skb == NULL) {
285 i1480u_drop(i1480u, "RX: next fragment out of "
286 "sequence! Dropping\n");
287 goto out;
289 if (size_left < untd_hdr_size) {
290 i1480u_drop(i1480u, "RX: short NXT fragment! "
291 "Dropping\n");
292 goto out;
294 untd_frg_size = le16_to_cpu(untd_hdr->len);
295 if (size_left < untd_hdr_size + untd_frg_size) {
296 i1480u_drop(i1480u,
297 "RX: short payload! Dropping\n");
298 goto out;
300 memmove(skb_put(i1480u->rx_skb, untd_frg_size),
301 ptr + untd_hdr_size, untd_frg_size);
302 break;
304 case i1480u_PKT_FRAG_LST: {
305 dev_dbg(dev, "Lst fragment\n");
306 untd_hdr_size = sizeof(struct untd_hdr_rst);
307 if (i1480u->rx_skb == NULL) {
308 i1480u_drop(i1480u, "RX: last fragment out of "
309 "sequence! Dropping\n");
310 goto out;
312 if (size_left < untd_hdr_size) {
313 i1480u_drop(i1480u, "RX: short LST fragment! "
314 "Dropping\n");
315 goto out;
317 untd_frg_size = le16_to_cpu(untd_hdr->len);
318 if (size_left < untd_frg_size + untd_hdr_size) {
319 i1480u_drop(i1480u,
320 "RX: short payload! Dropping\n");
321 goto out;
323 memmove(skb_put(i1480u->rx_skb, untd_frg_size),
324 ptr + untd_hdr_size, untd_frg_size);
325 pkt_completed = 1;
326 break;
328 case i1480u_PKT_FRAG_CMP: {
329 dev_dbg(dev, "cmp fragment\n");
330 untd_hdr_size = sizeof(struct untd_hdr_cmp);
331 if (i1480u->rx_skb != NULL)
332 i1480u_fix(i1480u, "RX: fix out-of-sequence CMP"
333 " fragment!\n");
334 if (size_left < untd_hdr_size + i1480u_hdr_size) {
335 i1480u_drop(i1480u, "RX: short CMP fragment! "
336 "Dropping\n");
337 goto out;
339 i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len);
340 untd_frg_size = i1480u->rx_untd_pkt_size;
341 if (size_left < i1480u->rx_untd_pkt_size + untd_hdr_size) {
342 i1480u_drop(i1480u,
343 "RX: short payload! Dropping\n");
344 goto out;
346 i1480u->rx_skb = skb;
347 i1480u_hdr = (void *) untd_hdr + untd_hdr_size;
348 i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
349 stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
350 stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
351 skb_put(i1480u->rx_skb, untd_hdr_size + i1480u->rx_untd_pkt_size);
352 skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
353 rx_buf->data = NULL; /* for hand off skb to network stack */
354 pkt_completed = 1;
355 i1480u->rx_untd_pkt_size -= i1480u_hdr_size; /* accurate stat */
356 break;
358 default:
359 i1480u_drop(i1480u, "RX: unknown packet type %u! "
360 "Dropping\n", untd_hdr_type(untd_hdr));
361 goto out;
363 size_left -= untd_hdr_size + untd_frg_size;
364 if (size_left > 0)
365 ptr += untd_hdr_size + untd_frg_size;
367 if (pkt_completed)
368 i1480u_skb_deliver(i1480u);
369 out:
370 /* recreate needed RX buffers*/
371 if (rx_buf->data == NULL) {
372 /* buffer is being used to receive packet, create new */
373 new_skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
374 if (!new_skb) {
375 if (printk_ratelimit())
376 dev_err(dev,
377 "RX: cannot allocate RX buffer\n");
378 } else {
379 new_skb->dev = net_dev;
380 new_skb->ip_summed = CHECKSUM_NONE;
381 skb_reserve(new_skb, 2);
382 rx_buf->data = new_skb;
385 return;
390 * Called when an RX URB has finished receiving or has found some kind
391 * of error condition.
393 * LIMITATIONS:
395 * - We read USB-transfers, each transfer contains a SINGLE fragment
396 * (can contain a complete packet, or a 1st, next, or last fragment
397 * of a packet).
398 * Looks like a transfer can contain more than one fragment (07/18/06)
400 * - Each transfer buffer is the size of the maximum packet size (minus
401 * headroom), i1480u_MAX_PKT_SIZE - 2
403 * - We always read the full USB-transfer, no partials.
405 * - Each transfer is read directly into a skb. This skb will be used to
406 * send data to the upper layers if it is the first fragment or a complete
407 * packet. In the other cases the data will be copied from the skb to
408 * another skb that is being prepared for the upper layers from a prev
409 * first fragment.
411 * It is simply too much of a pain. Gosh, there should be a unified
412 * SG infrastructure for *everything* [so that I could declare a SG
413 * buffer, pass it to USB for receiving, append some space to it if
414 * I wish, receive more until I have the whole chunk, adapt
415 * pointers on each fragment to remove hardware headers and then
416 * attach that to an skbuff and netif_rx()].
418 void i1480u_rx_cb(struct urb *urb)
420 int result;
421 int do_parse_buffer = 1;
422 struct i1480u_rx_buf *rx_buf = urb->context;
423 struct i1480u *i1480u = rx_buf->i1480u;
424 struct device *dev = &i1480u->usb_iface->dev;
425 unsigned long flags;
426 u8 rx_buf_idx = rx_buf - i1480u->rx_buf;
428 switch (urb->status) {
429 case 0:
430 break;
431 case -ECONNRESET: /* Not an error, but a controlled situation; */
432 case -ENOENT: /* (we killed the URB)...so, no broadcast */
433 case -ESHUTDOWN: /* going away! */
434 dev_err(dev, "RX URB[%u]: goind down %d\n",
435 rx_buf_idx, urb->status);
436 goto error;
437 default:
438 dev_err(dev, "RX URB[%u]: unknown status %d\n",
439 rx_buf_idx, urb->status);
440 if (edc_inc(&i1480u->rx_errors, EDC_MAX_ERRORS,
441 EDC_ERROR_TIMEFRAME)) {
442 dev_err(dev, "RX: max acceptable errors exceeded,"
443 " resetting device.\n");
444 i1480u_rx_unlink_urbs(i1480u);
445 wlp_reset_all(&i1480u->wlp);
446 goto error;
448 do_parse_buffer = 0;
449 break;
451 spin_lock_irqsave(&i1480u->lock, flags);
452 /* chew the data fragments, extract network packets */
453 if (do_parse_buffer) {
454 i1480u_rx_buffer(rx_buf);
455 if (rx_buf->data) {
456 rx_buf->urb->transfer_buffer = rx_buf->data->data;
457 result = usb_submit_urb(rx_buf->urb, GFP_ATOMIC);
458 if (result < 0) {
459 dev_err(dev, "RX URB[%u]: cannot submit %d\n",
460 rx_buf_idx, result);
464 spin_unlock_irqrestore(&i1480u->lock, flags);
465 error:
466 return;