3 * Data transfer and URB enqueing
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
33 * Sounds simple, it is a pain to implement.
40 * LIFE CYCLE / STATE DIAGRAM
44 * THIS CODE IS DISGUSTING
46 * Warned you are; it's my second try and still not happy with it.
52 * - Supports DMA xfers, control, bulk and maybe interrupt
54 * - Does not recycle unused rpipes
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
62 * Two methods it could be done:
64 * (a) set up a timer everytime an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different requried components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/hash.h>
105 static void wa_xfer_delayed_run(struct wa_rpipe
*);
108 * Life cycle governed by 'struct urb' (the refcount of the struct is
109 * that of the 'struct urb' and usb_free_urb() would free the whole
114 struct urb
*dto_urb
; /* for data output? */
115 struct list_head list_node
; /* for rpipe->req_list */
116 struct wa_xfer
*xfer
; /* out xfer */
117 u8 index
; /* which segment we are */
118 enum wa_seg_status status
;
119 ssize_t result
; /* bytes xfered or error */
120 struct wa_xfer_hdr xfer_hdr
;
121 u8 xfer_extra
[]; /* xtra space for xfer_hdr_ctl */
124 static void wa_seg_init(struct wa_seg
*seg
)
126 /* usb_init_urb() repeats a lot of work, so we do it here */
127 kref_init(&seg
->urb
.kref
);
131 * Protected by xfer->lock
136 struct list_head list_node
;
140 struct wahc
*wa
; /* Wire adapter we are plugged to */
141 struct usb_host_endpoint
*ep
;
142 struct urb
*urb
; /* URB we are transfering for */
143 struct wa_seg
**seg
; /* transfer segments */
144 u8 segs
, segs_submitted
, segs_done
;
145 unsigned is_inbound
:1;
150 gfp_t gfp
; /* allocation mask */
152 struct wusb_dev
*wusb_dev
; /* for activity timestamps */
155 static inline void wa_xfer_init(struct wa_xfer
*xfer
)
157 kref_init(&xfer
->refcnt
);
158 INIT_LIST_HEAD(&xfer
->list_node
);
159 spin_lock_init(&xfer
->lock
);
163 * Destory a transfer structure
165 * Note that the xfer->seg[index] thingies follow the URB life cycle,
166 * so we need to put them, not free them.
168 static void wa_xfer_destroy(struct kref
*_xfer
)
170 struct wa_xfer
*xfer
= container_of(_xfer
, struct wa_xfer
, refcnt
);
173 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
174 if (xfer
->is_inbound
)
175 usb_put_urb(xfer
->seg
[cnt
]->dto_urb
);
176 usb_put_urb(&xfer
->seg
[cnt
]->urb
);
182 static void wa_xfer_get(struct wa_xfer
*xfer
)
184 kref_get(&xfer
->refcnt
);
187 static void wa_xfer_put(struct wa_xfer
*xfer
)
189 kref_put(&xfer
->refcnt
, wa_xfer_destroy
);
195 * xfer->lock has to be unlocked
197 * We take xfer->lock for setting the result; this is a barrier
198 * against drivers/usb/core/hcd.c:unlink1() being called after we call
199 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
200 * reference to the transfer.
202 static void wa_xfer_giveback(struct wa_xfer
*xfer
)
206 spin_lock_irqsave(&xfer
->wa
->xfer_list_lock
, flags
);
207 list_del_init(&xfer
->list_node
);
208 spin_unlock_irqrestore(&xfer
->wa
->xfer_list_lock
, flags
);
209 /* FIXME: segmentation broken -- kills DWA */
210 wusbhc_giveback_urb(xfer
->wa
->wusb
, xfer
->urb
, xfer
->result
);
218 * xfer->lock has to be unlocked
220 static void wa_xfer_completion(struct wa_xfer
*xfer
)
223 wusb_dev_put(xfer
->wusb_dev
);
224 rpipe_put(xfer
->ep
->hcpriv
);
225 wa_xfer_giveback(xfer
);
229 * If transfer is done, wrap it up and return true
231 * xfer->lock has to be locked
233 static unsigned __wa_xfer_is_done(struct wa_xfer
*xfer
)
235 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
236 unsigned result
, cnt
;
238 struct urb
*urb
= xfer
->urb
;
239 unsigned found_short
= 0;
241 result
= xfer
->segs_done
== xfer
->segs_submitted
;
244 urb
->actual_length
= 0;
245 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
246 seg
= xfer
->seg
[cnt
];
247 switch (seg
->status
) {
249 if (found_short
&& seg
->result
> 0) {
250 dev_dbg(dev
, "xfer %p#%u: bad short segments (%zu)\n",
251 xfer
, cnt
, seg
->result
);
252 urb
->status
= -EINVAL
;
255 urb
->actual_length
+= seg
->result
;
256 if (seg
->result
< xfer
->seg_size
257 && cnt
!= xfer
->segs
-1)
259 dev_dbg(dev
, "xfer %p#%u: DONE short %d "
260 "result %zu urb->actual_length %d\n",
261 xfer
, seg
->index
, found_short
, seg
->result
,
265 xfer
->result
= seg
->result
;
266 dev_dbg(dev
, "xfer %p#%u: ERROR result %zu\n",
267 xfer
, seg
->index
, seg
->result
);
270 dev_dbg(dev
, "xfer %p#%u ABORTED: result %d\n",
271 xfer
, seg
->index
, urb
->status
);
272 xfer
->result
= urb
->status
;
275 dev_warn(dev
, "xfer %p#%u: is_done bad state %d\n",
276 xfer
, cnt
, seg
->status
);
277 xfer
->result
= -EINVAL
;
287 * Initialize a transfer's ID
289 * We need to use a sequential number; if we use the pointer or the
290 * hash of the pointer, it can repeat over sequential transfers and
291 * then it will confuse the HWA....wonder why in hell they put a 32
292 * bit handle in there then.
294 static void wa_xfer_id_init(struct wa_xfer
*xfer
)
296 xfer
->id
= atomic_add_return(1, &xfer
->wa
->xfer_id_count
);
300 * Return the xfer's ID associated with xfer
304 static u32
wa_xfer_id(struct wa_xfer
*xfer
)
310 * Search for a transfer list ID on the HCD's URB list
312 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
313 * 32-bit hash of the pointer.
315 * @returns NULL if not found.
317 static struct wa_xfer
*wa_xfer_get_by_id(struct wahc
*wa
, u32 id
)
320 struct wa_xfer
*xfer_itr
;
321 spin_lock_irqsave(&wa
->xfer_list_lock
, flags
);
322 list_for_each_entry(xfer_itr
, &wa
->xfer_list
, list_node
) {
323 if (id
== xfer_itr
->id
) {
324 wa_xfer_get(xfer_itr
);
330 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags
);
334 struct wa_xfer_abort_buffer
{
336 struct wa_xfer_abort cmd
;
339 static void __wa_xfer_abort_cb(struct urb
*urb
)
341 struct wa_xfer_abort_buffer
*b
= urb
->context
;
342 usb_put_urb(&b
->urb
);
346 * Aborts an ongoing transaction
348 * Assumes the transfer is referenced and locked and in a submitted
349 * state (mainly that there is an endpoint/rpipe assigned).
351 * The callback (see above) does nothing but freeing up the data by
352 * putting the URB. Because the URB is allocated at the head of the
353 * struct, the whole space we allocated is kfreed.
355 * We'll get an 'aborted transaction' xfer result on DTI, that'll
356 * politely ignore because at this point the transaction has been
357 * marked as aborted already.
359 static void __wa_xfer_abort(struct wa_xfer
*xfer
)
362 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
363 struct wa_xfer_abort_buffer
*b
;
364 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
366 b
= kmalloc(sizeof(*b
), GFP_ATOMIC
);
369 b
->cmd
.bLength
= sizeof(b
->cmd
);
370 b
->cmd
.bRequestType
= WA_XFER_ABORT
;
371 b
->cmd
.wRPipe
= rpipe
->descr
.wRPipeIndex
;
372 b
->cmd
.dwTransferID
= wa_xfer_id(xfer
);
374 usb_init_urb(&b
->urb
);
375 usb_fill_bulk_urb(&b
->urb
, xfer
->wa
->usb_dev
,
376 usb_sndbulkpipe(xfer
->wa
->usb_dev
,
377 xfer
->wa
->dto_epd
->bEndpointAddress
),
378 &b
->cmd
, sizeof(b
->cmd
), __wa_xfer_abort_cb
, b
);
379 result
= usb_submit_urb(&b
->urb
, GFP_ATOMIC
);
382 return; /* callback frees! */
386 if (printk_ratelimit())
387 dev_err(dev
, "xfer %p: Can't submit abort request: %d\n",
397 * @returns < 0 on error, transfer segment request size if ok
399 static ssize_t
__wa_xfer_setup_sizes(struct wa_xfer
*xfer
,
400 enum wa_xfer_type
*pxfer_type
)
403 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
405 struct urb
*urb
= xfer
->urb
;
406 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
408 switch (rpipe
->descr
.bmAttribute
& 0x3) {
409 case USB_ENDPOINT_XFER_CONTROL
:
410 *pxfer_type
= WA_XFER_TYPE_CTL
;
411 result
= sizeof(struct wa_xfer_ctl
);
413 case USB_ENDPOINT_XFER_INT
:
414 case USB_ENDPOINT_XFER_BULK
:
415 *pxfer_type
= WA_XFER_TYPE_BI
;
416 result
= sizeof(struct wa_xfer_bi
);
418 case USB_ENDPOINT_XFER_ISOC
:
419 dev_err(dev
, "FIXME: ISOC not implemented\n");
425 result
= -EINVAL
; /* shut gcc up */
427 xfer
->is_inbound
= urb
->pipe
& USB_DIR_IN
? 1 : 0;
428 xfer
->is_dma
= urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
? 1 : 0;
429 xfer
->seg_size
= le16_to_cpu(rpipe
->descr
.wBlocks
)
430 * 1 << (xfer
->wa
->wa_descr
->bRPipeBlockSize
- 1);
431 /* Compute the segment size and make sure it is a multiple of
432 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
434 maxpktsize
= le16_to_cpu(rpipe
->descr
.wMaxPacketSize
);
435 if (xfer
->seg_size
< maxpktsize
) {
436 dev_err(dev
, "HW BUG? seg_size %zu smaller than maxpktsize "
437 "%zu\n", xfer
->seg_size
, maxpktsize
);
441 xfer
->seg_size
= (xfer
->seg_size
/ maxpktsize
) * maxpktsize
;
442 xfer
->segs
= (urb
->transfer_buffer_length
+ xfer
->seg_size
- 1)
444 if (xfer
->segs
>= WA_SEGS_MAX
) {
445 dev_err(dev
, "BUG? ops, number of segments %d bigger than %d\n",
446 (int)(urb
->transfer_buffer_length
/ xfer
->seg_size
),
451 if (xfer
->segs
== 0 && *pxfer_type
== WA_XFER_TYPE_CTL
)
457 /* Fill in the common request header and xfer-type specific data. */
458 static void __wa_xfer_setup_hdr0(struct wa_xfer
*xfer
,
459 struct wa_xfer_hdr
*xfer_hdr0
,
460 enum wa_xfer_type xfer_type
,
461 size_t xfer_hdr_size
)
463 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
465 xfer_hdr0
= &xfer
->seg
[0]->xfer_hdr
;
466 xfer_hdr0
->bLength
= xfer_hdr_size
;
467 xfer_hdr0
->bRequestType
= xfer_type
;
468 xfer_hdr0
->wRPipe
= rpipe
->descr
.wRPipeIndex
;
469 xfer_hdr0
->dwTransferID
= wa_xfer_id(xfer
);
470 xfer_hdr0
->bTransferSegment
= 0;
472 case WA_XFER_TYPE_CTL
: {
473 struct wa_xfer_ctl
*xfer_ctl
=
474 container_of(xfer_hdr0
, struct wa_xfer_ctl
, hdr
);
475 xfer_ctl
->bmAttribute
= xfer
->is_inbound
? 1 : 0;
476 BUG_ON(xfer
->urb
->transfer_flags
& URB_NO_SETUP_DMA_MAP
477 && xfer
->urb
->setup_packet
== NULL
);
478 memcpy(&xfer_ctl
->baSetupData
, xfer
->urb
->setup_packet
,
479 sizeof(xfer_ctl
->baSetupData
));
482 case WA_XFER_TYPE_BI
:
484 case WA_XFER_TYPE_ISO
:
485 printk(KERN_ERR
"FIXME: ISOC not implemented\n");
492 * Callback for the OUT data phase of the segment request
494 * Check wa_seg_cb(); most comments also apply here because this
495 * function does almost the same thing and they work closely
498 * If the seg request has failed but this DTO phase has suceeded,
499 * wa_seg_cb() has already failed the segment and moved the
500 * status to WA_SEG_ERROR, so this will go through 'case 0' and
501 * effectively do nothing.
503 static void wa_seg_dto_cb(struct urb
*urb
)
505 struct wa_seg
*seg
= urb
->context
;
506 struct wa_xfer
*xfer
= seg
->xfer
;
509 struct wa_rpipe
*rpipe
;
511 unsigned rpipe_ready
= 0;
514 switch (urb
->status
) {
516 spin_lock_irqsave(&xfer
->lock
, flags
);
518 dev
= &wa
->usb_iface
->dev
;
519 dev_dbg(dev
, "xfer %p#%u: data out done (%d bytes)\n",
520 xfer
, seg
->index
, urb
->actual_length
);
521 if (seg
->status
< WA_SEG_PENDING
)
522 seg
->status
= WA_SEG_PENDING
;
523 seg
->result
= urb
->actual_length
;
524 spin_unlock_irqrestore(&xfer
->lock
, flags
);
526 case -ECONNRESET
: /* URB unlinked; no need to do anything */
527 case -ENOENT
: /* as it was done by the who unlinked us */
529 default: /* Other errors ... */
530 spin_lock_irqsave(&xfer
->lock
, flags
);
532 dev
= &wa
->usb_iface
->dev
;
533 rpipe
= xfer
->ep
->hcpriv
;
534 dev_dbg(dev
, "xfer %p#%u: data out error %d\n",
535 xfer
, seg
->index
, urb
->status
);
536 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
537 EDC_ERROR_TIMEFRAME
)){
538 dev_err(dev
, "DTO: URB max acceptable errors "
539 "exceeded, resetting device\n");
542 if (seg
->status
!= WA_SEG_ERROR
) {
543 seg
->status
= WA_SEG_ERROR
;
544 seg
->result
= urb
->status
;
546 __wa_xfer_abort(xfer
);
547 rpipe_ready
= rpipe_avail_inc(rpipe
);
548 done
= __wa_xfer_is_done(xfer
);
550 spin_unlock_irqrestore(&xfer
->lock
, flags
);
552 wa_xfer_completion(xfer
);
554 wa_xfer_delayed_run(rpipe
);
559 * Callback for the segment request
561 * If succesful transition state (unless already transitioned or
562 * outbound transfer); otherwise, take a note of the error, mark this
563 * segment done and try completion.
565 * Note we don't access until we are sure that the transfer hasn't
566 * been cancelled (ECONNRESET, ENOENT), which could mean that
567 * seg->xfer could be already gone.
569 * We have to check before setting the status to WA_SEG_PENDING
570 * because sometimes the xfer result callback arrives before this
571 * callback (geeeeeeze), so it might happen that we are already in
572 * another state. As well, we don't set it if the transfer is inbound,
573 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
576 static void wa_seg_cb(struct urb
*urb
)
578 struct wa_seg
*seg
= urb
->context
;
579 struct wa_xfer
*xfer
= seg
->xfer
;
582 struct wa_rpipe
*rpipe
;
584 unsigned rpipe_ready
;
587 switch (urb
->status
) {
589 spin_lock_irqsave(&xfer
->lock
, flags
);
591 dev
= &wa
->usb_iface
->dev
;
592 dev_dbg(dev
, "xfer %p#%u: request done\n", xfer
, seg
->index
);
593 if (xfer
->is_inbound
&& seg
->status
< WA_SEG_PENDING
)
594 seg
->status
= WA_SEG_PENDING
;
595 spin_unlock_irqrestore(&xfer
->lock
, flags
);
597 case -ECONNRESET
: /* URB unlinked; no need to do anything */
598 case -ENOENT
: /* as it was done by the who unlinked us */
600 default: /* Other errors ... */
601 spin_lock_irqsave(&xfer
->lock
, flags
);
603 dev
= &wa
->usb_iface
->dev
;
604 rpipe
= xfer
->ep
->hcpriv
;
605 if (printk_ratelimit())
606 dev_err(dev
, "xfer %p#%u: request error %d\n",
607 xfer
, seg
->index
, urb
->status
);
608 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
609 EDC_ERROR_TIMEFRAME
)){
610 dev_err(dev
, "DTO: URB max acceptable errors "
611 "exceeded, resetting device\n");
614 usb_unlink_urb(seg
->dto_urb
);
615 seg
->status
= WA_SEG_ERROR
;
616 seg
->result
= urb
->status
;
618 __wa_xfer_abort(xfer
);
619 rpipe_ready
= rpipe_avail_inc(rpipe
);
620 done
= __wa_xfer_is_done(xfer
);
621 spin_unlock_irqrestore(&xfer
->lock
, flags
);
623 wa_xfer_completion(xfer
);
625 wa_xfer_delayed_run(rpipe
);
630 * Allocate the segs array and initialize each of them
632 * The segments are freed by wa_xfer_destroy() when the xfer use count
633 * drops to zero; however, because each segment is given the same life
634 * cycle as the USB URB it contains, it is actually freed by
635 * usb_put_urb() on the contained USB URB (twisted, eh?).
637 static int __wa_xfer_setup_segs(struct wa_xfer
*xfer
, size_t xfer_hdr_size
)
640 size_t alloc_size
= sizeof(*xfer
->seg
[0])
641 - sizeof(xfer
->seg
[0]->xfer_hdr
) + xfer_hdr_size
;
642 struct usb_device
*usb_dev
= xfer
->wa
->usb_dev
;
643 const struct usb_endpoint_descriptor
*dto_epd
= xfer
->wa
->dto_epd
;
645 size_t buf_itr
, buf_size
, buf_itr_size
;
648 xfer
->seg
= kcalloc(xfer
->segs
, sizeof(xfer
->seg
[0]), GFP_ATOMIC
);
649 if (xfer
->seg
== NULL
)
650 goto error_segs_kzalloc
;
652 buf_size
= xfer
->urb
->transfer_buffer_length
;
653 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
654 seg
= xfer
->seg
[cnt
] = kzalloc(alloc_size
, GFP_ATOMIC
);
656 goto error_seg_kzalloc
;
660 usb_fill_bulk_urb(&seg
->urb
, usb_dev
,
661 usb_sndbulkpipe(usb_dev
,
662 dto_epd
->bEndpointAddress
),
663 &seg
->xfer_hdr
, xfer_hdr_size
,
665 buf_itr_size
= buf_size
> xfer
->seg_size
?
666 xfer
->seg_size
: buf_size
;
667 if (xfer
->is_inbound
== 0 && buf_size
> 0) {
668 seg
->dto_urb
= usb_alloc_urb(0, GFP_ATOMIC
);
669 if (seg
->dto_urb
== NULL
)
670 goto error_dto_alloc
;
672 seg
->dto_urb
, usb_dev
,
673 usb_sndbulkpipe(usb_dev
,
674 dto_epd
->bEndpointAddress
),
675 NULL
, 0, wa_seg_dto_cb
, seg
);
677 seg
->dto_urb
->transfer_dma
=
678 xfer
->urb
->transfer_dma
+ buf_itr
;
679 seg
->dto_urb
->transfer_flags
|=
680 URB_NO_TRANSFER_DMA_MAP
;
682 seg
->dto_urb
->transfer_buffer
=
683 xfer
->urb
->transfer_buffer
+ buf_itr
;
684 seg
->dto_urb
->transfer_buffer_length
= buf_itr_size
;
686 seg
->status
= WA_SEG_READY
;
687 buf_itr
+= buf_itr_size
;
688 buf_size
-= buf_itr_size
;
693 kfree(xfer
->seg
[cnt
]);
696 /* use the fact that cnt is left at were it failed */
697 for (; cnt
> 0; cnt
--) {
698 if (xfer
->is_inbound
== 0)
699 kfree(xfer
->seg
[cnt
]->dto_urb
);
700 kfree(xfer
->seg
[cnt
]);
707 * Allocates all the stuff needed to submit a transfer
709 * Breaks the whole data buffer in a list of segments, each one has a
710 * structure allocated to it and linked in xfer->seg[index]
712 * FIXME: merge setup_segs() and the last part of this function, no
713 * need to do two for loops when we could run everything in a
716 static int __wa_xfer_setup(struct wa_xfer
*xfer
, struct urb
*urb
)
719 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
720 enum wa_xfer_type xfer_type
= 0; /* shut up GCC */
721 size_t xfer_hdr_size
, cnt
, transfer_size
;
722 struct wa_xfer_hdr
*xfer_hdr0
, *xfer_hdr
;
724 result
= __wa_xfer_setup_sizes(xfer
, &xfer_type
);
726 goto error_setup_sizes
;
727 xfer_hdr_size
= result
;
728 result
= __wa_xfer_setup_segs(xfer
, xfer_hdr_size
);
730 dev_err(dev
, "xfer %p: Failed to allocate %d segments: %d\n",
731 xfer
, xfer
->segs
, result
);
732 goto error_setup_segs
;
734 /* Fill the first header */
735 xfer_hdr0
= &xfer
->seg
[0]->xfer_hdr
;
736 wa_xfer_id_init(xfer
);
737 __wa_xfer_setup_hdr0(xfer
, xfer_hdr0
, xfer_type
, xfer_hdr_size
);
739 /* Fill remainig headers */
740 xfer_hdr
= xfer_hdr0
;
741 transfer_size
= urb
->transfer_buffer_length
;
742 xfer_hdr0
->dwTransferLength
= transfer_size
> xfer
->seg_size
?
743 xfer
->seg_size
: transfer_size
;
744 transfer_size
-= xfer
->seg_size
;
745 for (cnt
= 1; cnt
< xfer
->segs
; cnt
++) {
746 xfer_hdr
= &xfer
->seg
[cnt
]->xfer_hdr
;
747 memcpy(xfer_hdr
, xfer_hdr0
, xfer_hdr_size
);
748 xfer_hdr
->bTransferSegment
= cnt
;
749 xfer_hdr
->dwTransferLength
= transfer_size
> xfer
->seg_size
?
750 cpu_to_le32(xfer
->seg_size
)
751 : cpu_to_le32(transfer_size
);
752 xfer
->seg
[cnt
]->status
= WA_SEG_READY
;
753 transfer_size
-= xfer
->seg_size
;
755 xfer_hdr
->bTransferSegment
|= 0x80; /* this is the last segment */
765 * rpipe->seg_lock is held!
767 static int __wa_seg_submit(struct wa_rpipe
*rpipe
, struct wa_xfer
*xfer
,
771 result
= usb_submit_urb(&seg
->urb
, GFP_ATOMIC
);
773 printk(KERN_ERR
"xfer %p#%u: REQ submit failed: %d\n",
774 xfer
, seg
->index
, result
);
775 goto error_seg_submit
;
778 result
= usb_submit_urb(seg
->dto_urb
, GFP_ATOMIC
);
780 printk(KERN_ERR
"xfer %p#%u: DTO submit failed: %d\n",
781 xfer
, seg
->index
, result
);
782 goto error_dto_submit
;
785 seg
->status
= WA_SEG_SUBMITTED
;
786 rpipe_avail_dec(rpipe
);
790 usb_unlink_urb(&seg
->urb
);
792 seg
->status
= WA_SEG_ERROR
;
793 seg
->result
= result
;
798 * Execute more queued request segments until the maximum concurrent allowed
800 * The ugly unlock/lock sequence on the error path is needed as the
801 * xfer->lock normally nests the seg_lock and not viceversa.
804 static void wa_xfer_delayed_run(struct wa_rpipe
*rpipe
)
807 struct device
*dev
= &rpipe
->wa
->usb_iface
->dev
;
809 struct wa_xfer
*xfer
;
812 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
813 while (atomic_read(&rpipe
->segs_available
) > 0
814 && !list_empty(&rpipe
->seg_list
)) {
815 seg
= list_entry(rpipe
->seg_list
.next
, struct wa_seg
,
817 list_del(&seg
->list_node
);
819 result
= __wa_seg_submit(rpipe
, xfer
, seg
);
820 dev_dbg(dev
, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
821 xfer
, seg
->index
, atomic_read(&rpipe
->segs_available
), result
);
822 if (unlikely(result
< 0)) {
823 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
824 spin_lock_irqsave(&xfer
->lock
, flags
);
825 __wa_xfer_abort(xfer
);
827 spin_unlock_irqrestore(&xfer
->lock
, flags
);
828 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
831 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
836 * xfer->lock is taken
838 * On failure submitting we just stop submitting and return error;
839 * wa_urb_enqueue_b() will execute the completion path
841 static int __wa_xfer_submit(struct wa_xfer
*xfer
)
844 struct wahc
*wa
= xfer
->wa
;
845 struct device
*dev
= &wa
->usb_iface
->dev
;
849 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
850 size_t maxrequests
= le16_to_cpu(rpipe
->descr
.wRequests
);
854 spin_lock_irqsave(&wa
->xfer_list_lock
, flags
);
855 list_add_tail(&xfer
->list_node
, &wa
->xfer_list
);
856 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags
);
858 BUG_ON(atomic_read(&rpipe
->segs_available
) > maxrequests
);
860 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
861 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
862 available
= atomic_read(&rpipe
->segs_available
);
863 empty
= list_empty(&rpipe
->seg_list
);
864 seg
= xfer
->seg
[cnt
];
865 dev_dbg(dev
, "xfer %p#%u: available %u empty %u (%s)\n",
866 xfer
, cnt
, available
, empty
,
867 available
== 0 || !empty
? "delayed" : "submitted");
868 if (available
== 0 || !empty
) {
869 dev_dbg(dev
, "xfer %p#%u: delayed\n", xfer
, cnt
);
870 seg
->status
= WA_SEG_DELAYED
;
871 list_add_tail(&seg
->list_node
, &rpipe
->seg_list
);
873 result
= __wa_seg_submit(rpipe
, xfer
, seg
);
875 __wa_xfer_abort(xfer
);
876 goto error_seg_submit
;
879 xfer
->segs_submitted
++;
882 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
887 * Second part of a URB/transfer enqueuement
889 * Assumes this comes from wa_urb_enqueue() [maybe through
890 * wa_urb_enqueue_run()]. At this point:
892 * xfer->wa filled and refcounted
893 * xfer->ep filled with rpipe refcounted if
895 * xfer->urb filled and refcounted (this is the case when called
896 * from wa_urb_enqueue() as we come from usb_submit_urb()
897 * and when called by wa_urb_enqueue_run(), as we took an
898 * extra ref dropped by _run() after we return).
901 * If we fail at __wa_xfer_submit(), then we just check if we are done
902 * and if so, we run the completion procedure. However, if we are not
903 * yet done, we do nothing and wait for the completion handlers from
904 * the submitted URBs or from the xfer-result path to kick in. If xfer
905 * result never kicks in, the xfer will timeout from the USB code and
906 * dequeue() will be called.
908 static void wa_urb_enqueue_b(struct wa_xfer
*xfer
)
912 struct urb
*urb
= xfer
->urb
;
913 struct wahc
*wa
= xfer
->wa
;
914 struct wusbhc
*wusbhc
= wa
->wusb
;
915 struct wusb_dev
*wusb_dev
;
918 result
= rpipe_get_by_ep(wa
, xfer
->ep
, urb
, xfer
->gfp
);
920 goto error_rpipe_get
;
922 /* FIXME: segmentation broken -- kills DWA */
923 mutex_lock(&wusbhc
->mutex
); /* get a WUSB dev */
924 if (urb
->dev
== NULL
) {
925 mutex_unlock(&wusbhc
->mutex
);
928 wusb_dev
= __wusb_dev_get_by_usb_dev(wusbhc
, urb
->dev
);
929 if (wusb_dev
== NULL
) {
930 mutex_unlock(&wusbhc
->mutex
);
933 mutex_unlock(&wusbhc
->mutex
);
935 spin_lock_irqsave(&xfer
->lock
, flags
);
936 xfer
->wusb_dev
= wusb_dev
;
937 result
= urb
->status
;
938 if (urb
->status
!= -EINPROGRESS
)
941 result
= __wa_xfer_setup(xfer
, urb
);
943 goto error_xfer_setup
;
944 result
= __wa_xfer_submit(xfer
);
946 goto error_xfer_submit
;
947 spin_unlock_irqrestore(&xfer
->lock
, flags
);
950 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
951 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
956 spin_unlock_irqrestore(&xfer
->lock
, flags
);
957 /* FIXME: segmentation broken, kills DWA */
959 wusb_dev_put(wusb_dev
);
961 rpipe_put(xfer
->ep
->hcpriv
);
963 xfer
->result
= result
;
964 wa_xfer_giveback(xfer
);
968 done
= __wa_xfer_is_done(xfer
);
969 xfer
->result
= result
;
970 spin_unlock_irqrestore(&xfer
->lock
, flags
);
972 wa_xfer_completion(xfer
);
976 * Execute the delayed transfers in the Wire Adapter @wa
978 * We need to be careful here, as dequeue() could be called in the
979 * middle. That's why we do the whole thing under the
980 * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
981 * and then checks the list -- so as we would be acquiring in inverse
982 * order, we just drop the lock once we have the xfer and reacquire it
985 void wa_urb_enqueue_run(struct work_struct
*ws
)
987 struct wahc
*wa
= container_of(ws
, struct wahc
, xfer_work
);
988 struct wa_xfer
*xfer
, *next
;
991 spin_lock_irq(&wa
->xfer_list_lock
);
992 list_for_each_entry_safe(xfer
, next
, &wa
->xfer_delayed_list
,
994 list_del_init(&xfer
->list_node
);
995 spin_unlock_irq(&wa
->xfer_list_lock
);
998 wa_urb_enqueue_b(xfer
);
999 usb_put_urb(urb
); /* taken when queuing */
1001 spin_lock_irq(&wa
->xfer_list_lock
);
1003 spin_unlock_irq(&wa
->xfer_list_lock
);
1005 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run
);
1008 * Submit a transfer to the Wire Adapter in a delayed way
1010 * The process of enqueuing involves possible sleeps() [see
1011 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1012 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1014 * @urb: We own a reference to it done by the HCI Linux USB stack that
1015 * will be given up by calling usb_hcd_giveback_urb() or by
1016 * returning error from this function -> ergo we don't have to
1019 int wa_urb_enqueue(struct wahc
*wa
, struct usb_host_endpoint
*ep
,
1020 struct urb
*urb
, gfp_t gfp
)
1023 struct device
*dev
= &wa
->usb_iface
->dev
;
1024 struct wa_xfer
*xfer
;
1025 unsigned long my_flags
;
1026 unsigned cant_sleep
= irqs_disabled() | in_atomic();
1028 if (urb
->transfer_buffer
== NULL
1029 && !(urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
)
1030 && urb
->transfer_buffer_length
!= 0) {
1031 dev_err(dev
, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb
);
1036 xfer
= kzalloc(sizeof(*xfer
), gfp
);
1041 if (urb
->status
!= -EINPROGRESS
) /* cancelled */
1042 goto error_dequeued
; /* before starting? */
1044 xfer
->wa
= wa_get(wa
);
1050 dev_dbg(dev
, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1051 xfer
, urb
, urb
->pipe
, urb
->transfer_buffer_length
,
1052 urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
? "dma" : "nodma",
1053 urb
->pipe
& USB_DIR_IN
? "inbound" : "outbound",
1054 cant_sleep
? "deferred" : "inline");
1058 spin_lock_irqsave(&wa
->xfer_list_lock
, my_flags
);
1059 list_add_tail(&xfer
->list_node
, &wa
->xfer_delayed_list
);
1060 spin_unlock_irqrestore(&wa
->xfer_list_lock
, my_flags
);
1061 queue_work(wusbd
, &wa
->xfer_work
);
1063 wa_urb_enqueue_b(xfer
);
1072 EXPORT_SYMBOL_GPL(wa_urb_enqueue
);
1075 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1076 * handler] is called.
1078 * Until a transfer goes successfully through wa_urb_enqueue() it
1079 * needs to be dequeued with completion calling; when stuck in delayed
1080 * or before wa_xfer_setup() is called, we need to do completion.
1082 * not setup If there is no hcpriv yet, that means that that enqueue
1083 * still had no time to set the xfer up. Because
1084 * urb->status should be other than -EINPROGRESS,
1085 * enqueue() will catch that and bail out.
1087 * If the transfer has gone through setup, we just need to clean it
1088 * up. If it has gone through submit(), we have to abort it [with an
1089 * asynch request] and then make sure we cancel each segment.
1092 int wa_urb_dequeue(struct wahc
*wa
, struct urb
*urb
)
1094 unsigned long flags
, flags2
;
1095 struct wa_xfer
*xfer
;
1097 struct wa_rpipe
*rpipe
;
1099 unsigned rpipe_ready
= 0;
1103 /* NOthing setup yet enqueue will see urb->status !=
1104 * -EINPROGRESS (by hcd layer) and bail out with
1105 * error, no need to do completion
1107 BUG_ON(urb
->status
== -EINPROGRESS
);
1110 spin_lock_irqsave(&xfer
->lock
, flags
);
1111 rpipe
= xfer
->ep
->hcpriv
;
1112 /* Check the delayed list -> if there, release and complete */
1113 spin_lock_irqsave(&wa
->xfer_list_lock
, flags2
);
1114 if (!list_empty(&xfer
->list_node
) && xfer
->seg
== NULL
)
1115 goto dequeue_delayed
;
1116 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags2
);
1117 if (xfer
->seg
== NULL
) /* still hasn't reached */
1118 goto out_unlock
; /* setup(), enqueue_b() completes */
1119 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1120 __wa_xfer_abort(xfer
);
1121 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
1122 seg
= xfer
->seg
[cnt
];
1123 switch (seg
->status
) {
1124 case WA_SEG_NOTREADY
:
1126 printk(KERN_ERR
"xfer %p#%u: dequeue bad state %u\n",
1127 xfer
, cnt
, seg
->status
);
1130 case WA_SEG_DELAYED
:
1131 seg
->status
= WA_SEG_ABORTED
;
1132 spin_lock_irqsave(&rpipe
->seg_lock
, flags2
);
1133 list_del(&seg
->list_node
);
1135 rpipe_ready
= rpipe_avail_inc(rpipe
);
1136 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags2
);
1138 case WA_SEG_SUBMITTED
:
1139 seg
->status
= WA_SEG_ABORTED
;
1140 usb_unlink_urb(&seg
->urb
);
1141 if (xfer
->is_inbound
== 0)
1142 usb_unlink_urb(seg
->dto_urb
);
1144 rpipe_ready
= rpipe_avail_inc(rpipe
);
1146 case WA_SEG_PENDING
:
1147 seg
->status
= WA_SEG_ABORTED
;
1149 rpipe_ready
= rpipe_avail_inc(rpipe
);
1151 case WA_SEG_DTI_PENDING
:
1152 usb_unlink_urb(wa
->dti_urb
);
1153 seg
->status
= WA_SEG_ABORTED
;
1155 rpipe_ready
= rpipe_avail_inc(rpipe
);
1159 case WA_SEG_ABORTED
:
1163 xfer
->result
= urb
->status
; /* -ENOENT or -ECONNRESET */
1164 __wa_xfer_is_done(xfer
);
1165 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1166 wa_xfer_completion(xfer
);
1168 wa_xfer_delayed_run(rpipe
);
1172 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1177 list_del_init(&xfer
->list_node
);
1178 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags2
);
1179 xfer
->result
= urb
->status
;
1180 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1181 wa_xfer_giveback(xfer
);
1182 usb_put_urb(urb
); /* we got a ref in enqueue() */
1185 EXPORT_SYMBOL_GPL(wa_urb_dequeue
);
1188 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1191 * Positive errno values are internal inconsistencies and should be
1192 * flagged louder. Negative are to be passed up to the user in the
1195 * @status: USB WA status code -- high two bits are stripped.
1197 static int wa_xfer_status_to_errno(u8 status
)
1200 u8 real_status
= status
;
1201 static int xlat
[] = {
1202 [WA_XFER_STATUS_SUCCESS
] = 0,
1203 [WA_XFER_STATUS_HALTED
] = -EPIPE
,
1204 [WA_XFER_STATUS_DATA_BUFFER_ERROR
] = -ENOBUFS
,
1205 [WA_XFER_STATUS_BABBLE
] = -EOVERFLOW
,
1206 [WA_XFER_RESERVED
] = EINVAL
,
1207 [WA_XFER_STATUS_NOT_FOUND
] = 0,
1208 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE
] = -ENOMEM
,
1209 [WA_XFER_STATUS_TRANSACTION_ERROR
] = -EILSEQ
,
1210 [WA_XFER_STATUS_ABORTED
] = -EINTR
,
1211 [WA_XFER_STATUS_RPIPE_NOT_READY
] = EINVAL
,
1212 [WA_XFER_INVALID_FORMAT
] = EINVAL
,
1213 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER
] = EINVAL
,
1214 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH
] = EINVAL
,
1220 if (status
>= ARRAY_SIZE(xlat
)) {
1221 if (printk_ratelimit())
1222 printk(KERN_ERR
"%s(): BUG? "
1223 "Unknown WA transfer status 0x%02x\n",
1224 __func__
, real_status
);
1227 errno
= xlat
[status
];
1228 if (unlikely(errno
> 0)) {
1229 if (printk_ratelimit())
1230 printk(KERN_ERR
"%s(): BUG? "
1231 "Inconsistent WA status: 0x%02x\n",
1232 __func__
, real_status
);
1239 * Process a xfer result completion message
1241 * inbound transfers: need to schedule a DTI read
1243 * FIXME: this functio needs to be broken up in parts
1245 static void wa_xfer_result_chew(struct wahc
*wa
, struct wa_xfer
*xfer
)
1248 struct device
*dev
= &wa
->usb_iface
->dev
;
1249 unsigned long flags
;
1252 struct wa_rpipe
*rpipe
;
1253 struct wa_xfer_result
*xfer_result
= wa
->xfer_result
;
1256 unsigned rpipe_ready
= 0;
1258 spin_lock_irqsave(&xfer
->lock
, flags
);
1259 seg_idx
= xfer_result
->bTransferSegment
& 0x7f;
1260 if (unlikely(seg_idx
>= xfer
->segs
))
1262 seg
= xfer
->seg
[seg_idx
];
1263 rpipe
= xfer
->ep
->hcpriv
;
1264 usb_status
= xfer_result
->bTransferStatus
;
1265 dev_dbg(dev
, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1266 xfer
, seg_idx
, usb_status
, seg
->status
);
1267 if (seg
->status
== WA_SEG_ABORTED
1268 || seg
->status
== WA_SEG_ERROR
) /* already handled */
1269 goto segment_aborted
;
1270 if (seg
->status
== WA_SEG_SUBMITTED
) /* ops, got here */
1271 seg
->status
= WA_SEG_PENDING
; /* before wa_seg{_dto}_cb() */
1272 if (seg
->status
!= WA_SEG_PENDING
) {
1273 if (printk_ratelimit())
1274 dev_err(dev
, "xfer %p#%u: Bad segment state %u\n",
1275 xfer
, seg_idx
, seg
->status
);
1276 seg
->status
= WA_SEG_PENDING
; /* workaround/"fix" it */
1278 if (usb_status
& 0x80) {
1279 seg
->result
= wa_xfer_status_to_errno(usb_status
);
1280 dev_err(dev
, "DTI: xfer %p#%u failed (0x%02x)\n",
1281 xfer
, seg
->index
, usb_status
);
1282 goto error_complete
;
1284 /* FIXME: we ignore warnings, tally them for stats */
1285 if (usb_status
& 0x40) /* Warning?... */
1286 usb_status
= 0; /* ... pass */
1287 if (xfer
->is_inbound
) { /* IN data phase: read to buffer */
1288 seg
->status
= WA_SEG_DTI_PENDING
;
1289 BUG_ON(wa
->buf_in_urb
->status
== -EINPROGRESS
);
1291 wa
->buf_in_urb
->transfer_dma
=
1292 xfer
->urb
->transfer_dma
1293 + seg_idx
* xfer
->seg_size
;
1294 wa
->buf_in_urb
->transfer_flags
1295 |= URB_NO_TRANSFER_DMA_MAP
;
1297 wa
->buf_in_urb
->transfer_buffer
=
1298 xfer
->urb
->transfer_buffer
1299 + seg_idx
* xfer
->seg_size
;
1300 wa
->buf_in_urb
->transfer_flags
1301 &= ~URB_NO_TRANSFER_DMA_MAP
;
1303 wa
->buf_in_urb
->transfer_buffer_length
=
1304 le32_to_cpu(xfer_result
->dwTransferLength
);
1305 wa
->buf_in_urb
->context
= seg
;
1306 result
= usb_submit_urb(wa
->buf_in_urb
, GFP_ATOMIC
);
1308 goto error_submit_buf_in
;
1310 /* OUT data phase, complete it -- */
1311 seg
->status
= WA_SEG_DONE
;
1312 seg
->result
= le32_to_cpu(xfer_result
->dwTransferLength
);
1314 rpipe_ready
= rpipe_avail_inc(rpipe
);
1315 done
= __wa_xfer_is_done(xfer
);
1317 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1319 wa_xfer_completion(xfer
);
1321 wa_xfer_delayed_run(rpipe
);
1324 error_submit_buf_in
:
1325 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
)) {
1326 dev_err(dev
, "DTI: URB max acceptable errors "
1327 "exceeded, resetting device\n");
1330 if (printk_ratelimit())
1331 dev_err(dev
, "xfer %p#%u: can't submit DTI data phase: %d\n",
1332 xfer
, seg_idx
, result
);
1333 seg
->result
= result
;
1335 seg
->status
= WA_SEG_ERROR
;
1337 rpipe_ready
= rpipe_avail_inc(rpipe
);
1338 __wa_xfer_abort(xfer
);
1339 done
= __wa_xfer_is_done(xfer
);
1340 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1342 wa_xfer_completion(xfer
);
1344 wa_xfer_delayed_run(rpipe
);
1348 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1349 wa_urb_dequeue(wa
, xfer
->urb
);
1350 if (printk_ratelimit())
1351 dev_err(dev
, "xfer %p#%u: bad segment\n", xfer
, seg_idx
);
1352 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
)) {
1353 dev_err(dev
, "DTI: URB max acceptable errors "
1354 "exceeded, resetting device\n");
1360 /* nothing to do, as the aborter did the completion */
1361 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1365 * Callback for the IN data phase
1367 * If succesful transition state; otherwise, take a note of the
1368 * error, mark this segment done and try completion.
1370 * Note we don't access until we are sure that the transfer hasn't
1371 * been cancelled (ECONNRESET, ENOENT), which could mean that
1372 * seg->xfer could be already gone.
1374 static void wa_buf_in_cb(struct urb
*urb
)
1376 struct wa_seg
*seg
= urb
->context
;
1377 struct wa_xfer
*xfer
= seg
->xfer
;
1380 struct wa_rpipe
*rpipe
;
1381 unsigned rpipe_ready
;
1382 unsigned long flags
;
1385 switch (urb
->status
) {
1387 spin_lock_irqsave(&xfer
->lock
, flags
);
1389 dev
= &wa
->usb_iface
->dev
;
1390 rpipe
= xfer
->ep
->hcpriv
;
1391 dev_dbg(dev
, "xfer %p#%u: data in done (%zu bytes)\n",
1392 xfer
, seg
->index
, (size_t)urb
->actual_length
);
1393 seg
->status
= WA_SEG_DONE
;
1394 seg
->result
= urb
->actual_length
;
1396 rpipe_ready
= rpipe_avail_inc(rpipe
);
1397 done
= __wa_xfer_is_done(xfer
);
1398 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1400 wa_xfer_completion(xfer
);
1402 wa_xfer_delayed_run(rpipe
);
1404 case -ECONNRESET
: /* URB unlinked; no need to do anything */
1405 case -ENOENT
: /* as it was done by the who unlinked us */
1407 default: /* Other errors ... */
1408 spin_lock_irqsave(&xfer
->lock
, flags
);
1410 dev
= &wa
->usb_iface
->dev
;
1411 rpipe
= xfer
->ep
->hcpriv
;
1412 if (printk_ratelimit())
1413 dev_err(dev
, "xfer %p#%u: data in error %d\n",
1414 xfer
, seg
->index
, urb
->status
);
1415 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
1416 EDC_ERROR_TIMEFRAME
)){
1417 dev_err(dev
, "DTO: URB max acceptable errors "
1418 "exceeded, resetting device\n");
1421 seg
->status
= WA_SEG_ERROR
;
1422 seg
->result
= urb
->status
;
1424 rpipe_ready
= rpipe_avail_inc(rpipe
);
1425 __wa_xfer_abort(xfer
);
1426 done
= __wa_xfer_is_done(xfer
);
1427 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1429 wa_xfer_completion(xfer
);
1431 wa_xfer_delayed_run(rpipe
);
1436 * Handle an incoming transfer result buffer
1438 * Given a transfer result buffer, it completes the transfer (possibly
1439 * scheduling and buffer in read) and then resubmits the DTI URB for a
1440 * new transfer result read.
1443 * The xfer_result DTI URB state machine
1445 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1447 * We start in OFF mode, the first xfer_result notification [through
1448 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1451 * We receive a buffer -- if it is not a xfer_result, we complain and
1452 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1453 * request accounting. If it is an IN segment, we move to RBI and post
1454 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1455 * repost the DTI-URB and move to RXR state. if there was no IN
1456 * segment, it will repost the DTI-URB.
1458 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1459 * errors) in the URBs.
1461 static void wa_xfer_result_cb(struct urb
*urb
)
1464 struct wahc
*wa
= urb
->context
;
1465 struct device
*dev
= &wa
->usb_iface
->dev
;
1466 struct wa_xfer_result
*xfer_result
;
1468 struct wa_xfer
*xfer
;
1471 BUG_ON(wa
->dti_urb
!= urb
);
1472 switch (wa
->dti_urb
->status
) {
1474 /* We have a xfer result buffer; check it */
1475 dev_dbg(dev
, "DTI: xfer result %d bytes at %p\n",
1476 urb
->actual_length
, urb
->transfer_buffer
);
1477 if (wa
->dti_urb
->actual_length
!= sizeof(*xfer_result
)) {
1478 dev_err(dev
, "DTI Error: xfer result--bad size "
1479 "xfer result (%d bytes vs %zu needed)\n",
1480 urb
->actual_length
, sizeof(*xfer_result
));
1483 xfer_result
= wa
->xfer_result
;
1484 if (xfer_result
->hdr
.bLength
!= sizeof(*xfer_result
)) {
1485 dev_err(dev
, "DTI Error: xfer result--"
1486 "bad header length %u\n",
1487 xfer_result
->hdr
.bLength
);
1490 if (xfer_result
->hdr
.bNotifyType
!= WA_XFER_RESULT
) {
1491 dev_err(dev
, "DTI Error: xfer result--"
1492 "bad header type 0x%02x\n",
1493 xfer_result
->hdr
.bNotifyType
);
1496 usb_status
= xfer_result
->bTransferStatus
& 0x3f;
1497 if (usb_status
== WA_XFER_STATUS_ABORTED
1498 || usb_status
== WA_XFER_STATUS_NOT_FOUND
)
1499 /* taken care of already */
1501 xfer_id
= xfer_result
->dwTransferID
;
1502 xfer
= wa_xfer_get_by_id(wa
, xfer_id
);
1504 /* FIXME: transaction might have been cancelled */
1505 dev_err(dev
, "DTI Error: xfer result--"
1506 "unknown xfer 0x%08x (status 0x%02x)\n",
1507 xfer_id
, usb_status
);
1510 wa_xfer_result_chew(wa
, xfer
);
1513 case -ENOENT
: /* (we killed the URB)...so, no broadcast */
1514 case -ESHUTDOWN
: /* going away! */
1515 dev_dbg(dev
, "DTI: going down! %d\n", urb
->status
);
1519 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
,
1520 EDC_ERROR_TIMEFRAME
)) {
1521 dev_err(dev
, "DTI: URB max acceptable errors "
1522 "exceeded, resetting device\n");
1526 if (printk_ratelimit())
1527 dev_err(dev
, "DTI: URB error %d\n", urb
->status
);
1530 /* Resubmit the DTI URB */
1531 result
= usb_submit_urb(wa
->dti_urb
, GFP_ATOMIC
);
1533 dev_err(dev
, "DTI Error: Could not submit DTI URB (%d), "
1534 "resetting\n", result
);
1542 * Transfer complete notification
1544 * Called from the notif.c code. We get a notification on EP2 saying
1545 * that some endpoint has some transfer result data available. We are
1548 * To speed up things, we always have a URB reading the DTI URB; we
1549 * don't really set it up and start it until the first xfer complete
1550 * notification arrives, which is what we do here.
1552 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1555 * So here we just initialize the DTI URB for reading transfer result
1556 * notifications and also the buffer-in URB, for reading buffers. Then
1557 * we just submit the DTI URB.
1559 * @wa shall be referenced
1561 void wa_handle_notif_xfer(struct wahc
*wa
, struct wa_notif_hdr
*notif_hdr
)
1564 struct device
*dev
= &wa
->usb_iface
->dev
;
1565 struct wa_notif_xfer
*notif_xfer
;
1566 const struct usb_endpoint_descriptor
*dti_epd
= wa
->dti_epd
;
1568 notif_xfer
= container_of(notif_hdr
, struct wa_notif_xfer
, hdr
);
1569 BUG_ON(notif_hdr
->bNotifyType
!= WA_NOTIF_TRANSFER
);
1571 if ((0x80 | notif_xfer
->bEndpoint
) != dti_epd
->bEndpointAddress
) {
1572 /* FIXME: hardcoded limitation, adapt */
1573 dev_err(dev
, "BUG: DTI ep is %u, not %u (hack me)\n",
1574 notif_xfer
->bEndpoint
, dti_epd
->bEndpointAddress
);
1577 if (wa
->dti_urb
!= NULL
) /* DTI URB already started */
1580 wa
->dti_urb
= usb_alloc_urb(0, GFP_KERNEL
);
1581 if (wa
->dti_urb
== NULL
) {
1582 dev_err(dev
, "Can't allocate DTI URB\n");
1583 goto error_dti_urb_alloc
;
1586 wa
->dti_urb
, wa
->usb_dev
,
1587 usb_rcvbulkpipe(wa
->usb_dev
, 0x80 | notif_xfer
->bEndpoint
),
1588 wa
->xfer_result
, wa
->xfer_result_size
,
1589 wa_xfer_result_cb
, wa
);
1591 wa
->buf_in_urb
= usb_alloc_urb(0, GFP_KERNEL
);
1592 if (wa
->buf_in_urb
== NULL
) {
1593 dev_err(dev
, "Can't allocate BUF-IN URB\n");
1594 goto error_buf_in_urb_alloc
;
1597 wa
->buf_in_urb
, wa
->usb_dev
,
1598 usb_rcvbulkpipe(wa
->usb_dev
, 0x80 | notif_xfer
->bEndpoint
),
1599 NULL
, 0, wa_buf_in_cb
, wa
);
1600 result
= usb_submit_urb(wa
->dti_urb
, GFP_KERNEL
);
1602 dev_err(dev
, "DTI Error: Could not submit DTI URB (%d), "
1603 "resetting\n", result
);
1604 goto error_dti_urb_submit
;
1609 error_dti_urb_submit
:
1610 usb_put_urb(wa
->buf_in_urb
);
1611 error_buf_in_urb_alloc
:
1612 usb_put_urb(wa
->dti_urb
);
1614 error_dti_urb_alloc
: