GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / uwb / i1480 / i1480u-wlp / tx.c
blobb1227217b91a692084c9205d917577c1c2e9b497
3 #include <linux/slab.h>
4 #include "i1480u-wlp.h"
6 enum {
7 /* This is only for Next and Last TX packets */
8 i1480u_MAX_PL_SIZE = i1480u_MAX_FRG_SIZE
9 - sizeof(struct untd_hdr_rst),
12 /* Free resources allocated to a i1480u tx context. */
13 static
14 void i1480u_tx_free(struct i1480u_tx *wtx)
16 kfree(wtx->buf);
17 if (wtx->skb)
18 dev_kfree_skb_irq(wtx->skb);
19 usb_free_urb(wtx->urb);
20 kfree(wtx);
23 static
24 void i1480u_tx_destroy(struct i1480u *i1480u, struct i1480u_tx *wtx)
26 unsigned long flags;
27 spin_lock_irqsave(&i1480u->tx_list_lock, flags); /* not active any more */
28 list_del(&wtx->list_node);
29 i1480u_tx_free(wtx);
30 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
33 static
34 void i1480u_tx_unlink_urbs(struct i1480u *i1480u)
36 unsigned long flags;
37 struct i1480u_tx *wtx, *next;
39 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
40 list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
41 usb_unlink_urb(wtx->urb);
43 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
47 static
48 void i1480u_tx_cb(struct urb *urb)
50 struct i1480u_tx *wtx = urb->context;
51 struct i1480u *i1480u = wtx->i1480u;
52 struct net_device *net_dev = i1480u->net_dev;
53 struct device *dev = &i1480u->usb_iface->dev;
54 unsigned long flags;
56 switch (urb->status) {
57 case 0:
58 spin_lock_irqsave(&i1480u->lock, flags);
59 net_dev->stats.tx_packets++;
60 net_dev->stats.tx_bytes += urb->actual_length;
61 spin_unlock_irqrestore(&i1480u->lock, flags);
62 break;
63 case -ECONNRESET: /* Not an error, but a controlled situation; */
64 case -ENOENT: /* (we killed the URB)...so, no broadcast */
65 dev_dbg(dev, "notif endp: reset/noent %d\n", urb->status);
66 netif_stop_queue(net_dev);
67 break;
68 case -ESHUTDOWN: /* going away! */
69 dev_dbg(dev, "notif endp: down %d\n", urb->status);
70 netif_stop_queue(net_dev);
71 break;
72 default:
73 dev_err(dev, "TX: unknown URB status %d\n", urb->status);
74 if (edc_inc(&i1480u->tx_errors, EDC_MAX_ERRORS,
75 EDC_ERROR_TIMEFRAME)) {
76 dev_err(dev, "TX: max acceptable errors exceeded."
77 "Reset device.\n");
78 netif_stop_queue(net_dev);
79 i1480u_tx_unlink_urbs(i1480u);
80 wlp_reset_all(&i1480u->wlp);
82 break;
84 i1480u_tx_destroy(i1480u, wtx);
85 if (atomic_dec_return(&i1480u->tx_inflight.count)
86 <= i1480u->tx_inflight.threshold
87 && netif_queue_stopped(net_dev)
88 && i1480u->tx_inflight.threshold != 0) {
89 netif_start_queue(net_dev);
90 atomic_inc(&i1480u->tx_inflight.restart_count);
92 return;
97 * Given a buffer that doesn't fit in a single fragment, create an
98 * scatter/gather structure for delivery to the USB pipe.
100 * Implements functionality of i1480u_tx_create().
102 * @wtx: tx descriptor
103 * @skb: skb to send
104 * @gfp_mask: gfp allocation mask
105 * @returns: Pointer to @wtx if ok, NULL on error.
107 * Sorry, TOO LONG a function, but breaking it up is kind of hard
109 * This will break the buffer in chunks smaller than
110 * i1480u_MAX_FRG_SIZE (including the header) and add proper headers
111 * to each:
113 * 1st header \
114 * i1480 tx header | fragment 1
115 * fragment data /
116 * nxt header \ fragment 2
117 * fragment data /
118 * ..
119 * ..
120 * last header \ fragment 3
121 * last fragment data /
123 * This does not fill the i1480 TX header, it is left up to the
124 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
126 * This function consumes the skb unless there is an error.
128 static
129 int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb,
130 gfp_t gfp_mask)
132 int result;
133 void *pl;
134 size_t pl_size;
136 void *pl_itr, *buf_itr;
137 size_t pl_size_left, frgs, pl_size_1st, frg_pl_size = 0;
138 struct untd_hdr_1st *untd_hdr_1st;
139 struct wlp_tx_hdr *wlp_tx_hdr;
140 struct untd_hdr_rst *untd_hdr_rst;
142 wtx->skb = NULL;
143 pl = skb->data;
144 pl_itr = pl;
145 pl_size = skb->len;
146 pl_size_left = pl_size; /* payload size */
147 /* First fragment; fits as much as i1480u_MAX_FRG_SIZE minus
148 * the headers */
149 pl_size_1st = i1480u_MAX_FRG_SIZE
150 - sizeof(struct untd_hdr_1st) - sizeof(struct wlp_tx_hdr);
151 BUG_ON(pl_size_1st > pl_size);
152 pl_size_left -= pl_size_1st;
153 /* The rest have an smaller header (no i1480 TX header). We
154 * need to break up the payload in blocks smaller than
155 * i1480u_MAX_PL_SIZE (payload excluding header). */
156 frgs = (pl_size_left + i1480u_MAX_PL_SIZE - 1) / i1480u_MAX_PL_SIZE;
157 /* Allocate space for the new buffer. In this new buffer we'll
158 * place the headers followed by the data fragment, headers,
159 * data fragments, etc..
161 result = -ENOMEM;
162 wtx->buf_size = sizeof(*untd_hdr_1st)
163 + sizeof(*wlp_tx_hdr)
164 + frgs * sizeof(*untd_hdr_rst)
165 + pl_size;
166 wtx->buf = kmalloc(wtx->buf_size, gfp_mask);
167 if (wtx->buf == NULL)
168 goto error_buf_alloc;
170 buf_itr = wtx->buf; /* We got the space, let's fill it up */
171 /* Fill 1st fragment */
172 untd_hdr_1st = buf_itr;
173 buf_itr += sizeof(*untd_hdr_1st);
174 untd_hdr_set_type(&untd_hdr_1st->hdr, i1480u_PKT_FRAG_1ST);
175 untd_hdr_set_rx_tx(&untd_hdr_1st->hdr, 0);
176 untd_hdr_1st->hdr.len = cpu_to_le16(pl_size + sizeof(*wlp_tx_hdr));
177 untd_hdr_1st->fragment_len =
178 cpu_to_le16(pl_size_1st + sizeof(*wlp_tx_hdr));
179 memset(untd_hdr_1st->padding, 0, sizeof(untd_hdr_1st->padding));
180 /* Set up i1480 header info */
181 wlp_tx_hdr = wtx->wlp_tx_hdr = buf_itr;
182 buf_itr += sizeof(*wlp_tx_hdr);
183 /* Copy the first fragment */
184 memcpy(buf_itr, pl_itr, pl_size_1st);
185 pl_itr += pl_size_1st;
186 buf_itr += pl_size_1st;
188 /* Now do each remaining fragment */
189 result = -EINVAL;
190 while (pl_size_left > 0) {
191 if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf
192 > wtx->buf_size) {
193 printk(KERN_ERR "BUG: no space for header\n");
194 goto error_bug;
196 untd_hdr_rst = buf_itr;
197 buf_itr += sizeof(*untd_hdr_rst);
198 if (pl_size_left > i1480u_MAX_PL_SIZE) {
199 frg_pl_size = i1480u_MAX_PL_SIZE;
200 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_NXT);
201 } else {
202 frg_pl_size = pl_size_left;
203 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST);
205 untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0);
206 untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size);
207 untd_hdr_rst->padding = 0;
208 if (buf_itr + frg_pl_size - wtx->buf
209 > wtx->buf_size) {
210 printk(KERN_ERR "BUG: no space for payload\n");
211 goto error_bug;
213 memcpy(buf_itr, pl_itr, frg_pl_size);
214 buf_itr += frg_pl_size;
215 pl_itr += frg_pl_size;
216 pl_size_left -= frg_pl_size;
218 dev_kfree_skb_irq(skb);
219 return 0;
221 error_bug:
222 printk(KERN_ERR
223 "BUG: skb %u bytes\n"
224 "BUG: frg_pl_size %zd i1480u_MAX_FRG_SIZE %u\n"
225 "BUG: buf_itr %zu buf_size %zu pl_size_left %zu\n",
226 skb->len,
227 frg_pl_size, i1480u_MAX_FRG_SIZE,
228 buf_itr - wtx->buf, wtx->buf_size, pl_size_left);
230 kfree(wtx->buf);
231 error_buf_alloc:
232 return result;
237 * Given a buffer that fits in a single fragment, fill out a @wtx
238 * struct for transmitting it down the USB pipe.
240 * Uses the fact that we have space reserved in front of the skbuff
241 * for hardware headers :]
243 * This does not fill the i1480 TX header, it is left up to the
244 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
246 * @pl: pointer to payload data
247 * @pl_size: size of the payuload
249 * This function does not consume the @skb.
251 static
252 int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb,
253 gfp_t gfp_mask)
255 struct untd_hdr_cmp *untd_hdr_cmp;
256 struct wlp_tx_hdr *wlp_tx_hdr;
258 wtx->buf = NULL;
259 wtx->skb = skb;
260 BUG_ON(skb_headroom(skb) < sizeof(*wlp_tx_hdr));
261 wlp_tx_hdr = (void *) __skb_push(skb, sizeof(*wlp_tx_hdr));
262 wtx->wlp_tx_hdr = wlp_tx_hdr;
263 BUG_ON(skb_headroom(skb) < sizeof(*untd_hdr_cmp));
264 untd_hdr_cmp = (void *) __skb_push(skb, sizeof(*untd_hdr_cmp));
266 untd_hdr_set_type(&untd_hdr_cmp->hdr, i1480u_PKT_FRAG_CMP);
267 untd_hdr_set_rx_tx(&untd_hdr_cmp->hdr, 0);
268 untd_hdr_cmp->hdr.len = cpu_to_le16(skb->len - sizeof(*untd_hdr_cmp));
269 untd_hdr_cmp->padding = 0;
270 return 0;
275 * Given a skb to transmit, massage it to become palatable for the TX pipe
277 * This will break the buffer in chunks smaller than
278 * i1480u_MAX_FRG_SIZE and add proper headers to each.
280 * 1st header \
281 * i1480 tx header | fragment 1
282 * fragment data /
283 * nxt header \ fragment 2
284 * fragment data /
285 * ..
286 * ..
287 * last header \ fragment 3
288 * last fragment data /
290 * Each fragment will be always smaller or equal to i1480u_MAX_FRG_SIZE.
292 * If the first fragment is smaller than i1480u_MAX_FRG_SIZE, then the
293 * following is composed:
295 * complete header \
296 * i1480 tx header | single fragment
297 * packet data /
299 * We were going to use s/g support, but because the interface is
300 * synch and at the end there is plenty of overhead to do it, it
301 * didn't seem that worth for data that is going to be smaller than
302 * one page.
304 static
305 struct i1480u_tx *i1480u_tx_create(struct i1480u *i1480u,
306 struct sk_buff *skb, gfp_t gfp_mask)
308 int result;
309 struct usb_endpoint_descriptor *epd;
310 int usb_pipe;
311 unsigned long flags;
313 struct i1480u_tx *wtx;
314 const size_t pl_max_size =
315 i1480u_MAX_FRG_SIZE - sizeof(struct untd_hdr_cmp)
316 - sizeof(struct wlp_tx_hdr);
318 wtx = kmalloc(sizeof(*wtx), gfp_mask);
319 if (wtx == NULL)
320 goto error_wtx_alloc;
321 wtx->urb = usb_alloc_urb(0, gfp_mask);
322 if (wtx->urb == NULL)
323 goto error_urb_alloc;
324 epd = &i1480u->usb_iface->cur_altsetting->endpoint[2].desc;
325 usb_pipe = usb_sndbulkpipe(i1480u->usb_dev, epd->bEndpointAddress);
326 /* Fits in a single complete packet or need to split? */
327 if (skb->len > pl_max_size) {
328 result = i1480u_tx_create_n(wtx, skb, gfp_mask);
329 if (result < 0)
330 goto error_create;
331 usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
332 wtx->buf, wtx->buf_size, i1480u_tx_cb, wtx);
333 } else {
334 result = i1480u_tx_create_1(wtx, skb, gfp_mask);
335 if (result < 0)
336 goto error_create;
337 usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
338 skb->data, skb->len, i1480u_tx_cb, wtx);
340 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
341 list_add(&wtx->list_node, &i1480u->tx_list);
342 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
343 return wtx;
345 error_create:
346 kfree(wtx->urb);
347 error_urb_alloc:
348 kfree(wtx);
349 error_wtx_alloc:
350 return NULL;
354 * Actual fragmentation and transmission of frame
356 * @wlp: WLP substack data structure
357 * @skb: To be transmitted
358 * @dst: Device address of destination
359 * @returns: 0 on success, <0 on failure
361 * This function can also be called directly (not just from
362 * hard_start_xmit), so we also check here if the interface is up before
363 * taking sending anything.
365 int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb,
366 struct uwb_dev_addr *dst)
368 int result = -ENXIO;
369 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
370 struct device *dev = &i1480u->usb_iface->dev;
371 struct net_device *net_dev = i1480u->net_dev;
372 struct i1480u_tx *wtx;
373 struct wlp_tx_hdr *wlp_tx_hdr;
374 static unsigned char dev_bcast[2] = { 0xff, 0xff };
376 BUG_ON(i1480u->wlp.rc == NULL);
377 if ((net_dev->flags & IFF_UP) == 0)
378 goto out;
379 result = -EBUSY;
380 if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) {
381 netif_stop_queue(net_dev);
382 goto error_max_inflight;
384 result = -ENOMEM;
385 wtx = i1480u_tx_create(i1480u, skb, GFP_ATOMIC);
386 if (unlikely(wtx == NULL)) {
387 if (printk_ratelimit())
388 dev_err(dev, "TX: no memory for WLP TX URB,"
389 "dropping packet (in flight %d)\n",
390 atomic_read(&i1480u->tx_inflight.count));
391 netif_stop_queue(net_dev);
392 goto error_wtx_alloc;
394 wtx->i1480u = i1480u;
395 /* Fill out the i1480 header; @i1480u->def_tx_hdr read without
396 * locking. We do so because they are kind of orthogonal to
397 * each other (and thus not changed in an atomic batch).
398 * The ETH header is right after the WLP TX header. */
399 wlp_tx_hdr = wtx->wlp_tx_hdr;
400 *wlp_tx_hdr = i1480u->options.def_tx_hdr;
401 wlp_tx_hdr->dstaddr = *dst;
402 if (!memcmp(&wlp_tx_hdr->dstaddr, dev_bcast, sizeof(dev_bcast))
403 && (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)) {
404 /*Broadcast message directed to DRP host. Send as best effort
405 * on PCA. */
406 wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority);
409 result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */
410 if (result < 0) {
411 dev_err(dev, "TX: cannot submit URB: %d\n", result);
412 /* We leave the freeing of skb to calling function */
413 wtx->skb = NULL;
414 goto error_tx_urb_submit;
416 atomic_inc(&i1480u->tx_inflight.count);
417 net_dev->trans_start = jiffies;
418 return result;
420 error_tx_urb_submit:
421 i1480u_tx_destroy(i1480u, wtx);
422 error_wtx_alloc:
423 error_max_inflight:
424 out:
425 return result;
430 * Transmit an skb Called when an skbuf has to be transmitted
432 * The skb is first passed to WLP substack to ensure this is a valid
433 * frame. If valid the device address of destination will be filled and
434 * the WLP header prepended to the skb. If this step fails we fake sending
435 * the frame, if we return an error the network stack will just keep trying.
437 * Broadcast frames inside a WSS needs to be treated special as multicast is
438 * not supported. A broadcast frame is sent as unicast to each member of the
439 * WSS - this is done by the WLP substack when it finds a broadcast frame.
440 * So, we test if the WLP substack took over the skb and only transmit it
441 * if it has not (been taken over).
443 * @net_dev->xmit_lock is held
445 netdev_tx_t i1480u_hard_start_xmit(struct sk_buff *skb,
446 struct net_device *net_dev)
448 int result;
449 struct i1480u *i1480u = netdev_priv(net_dev);
450 struct device *dev = &i1480u->usb_iface->dev;
451 struct uwb_dev_addr dst;
453 if ((net_dev->flags & IFF_UP) == 0)
454 goto error;
455 result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst);
456 if (result < 0) {
457 dev_err(dev, "WLP verification of TX frame failed (%d). "
458 "Dropping packet.\n", result);
459 goto error;
460 } else if (result == 1) {
461 /* trans_start time will be set when WLP actually transmits
462 * the frame */
463 goto out;
465 result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst);
466 if (result < 0) {
467 dev_err(dev, "Frame TX failed (%d).\n", result);
468 goto error;
470 return NETDEV_TX_OK;
471 error:
472 dev_kfree_skb_any(skb);
473 net_dev->stats.tx_dropped++;
474 out:
475 return NETDEV_TX_OK;
480 * Called when a pkt transmission doesn't complete in a reasonable period
481 * Device reset may sleep - do it outside of interrupt context (delayed)
483 void i1480u_tx_timeout(struct net_device *net_dev)
485 struct i1480u *i1480u = netdev_priv(net_dev);
487 wlp_reset_all(&i1480u->wlp);
491 void i1480u_tx_release(struct i1480u *i1480u)
493 unsigned long flags;
494 struct i1480u_tx *wtx, *next;
495 int count = 0, empty;
497 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
498 list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
499 count++;
500 usb_unlink_urb(wtx->urb);
502 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
503 count = count*10; /* i1480ut 200ms per unlinked urb (intervals of 20ms) */
505 * We don't like this sollution too much (dirty as it is), but
506 * it is cheaper than putting a refcount on each i1480u_tx and
507 * i1480uting for all of them to go away...
509 * Called when no more packets can be added to tx_list
510 * so can i1480ut for it to be empty.
512 while (1) {
513 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
514 empty = list_empty(&i1480u->tx_list);
515 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
516 if (empty)
517 break;
518 count--;
519 BUG_ON(count == 0);
520 msleep(20);