3 * Data transfer and URB enqueing
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
33 * Sounds simple, it is a pain to implement.
40 * LIFE CYCLE / STATE DIAGRAM
44 * THIS CODE IS DISGUSTING
46 * Warned you are; it's my second try and still not happy with it.
52 * - Supports DMA xfers, control, bulk and maybe interrupt
54 * - Does not recycle unused rpipes
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
62 * Two methods it could be done:
64 * (a) set up a timer every time an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different required components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/slab.h>
85 #include <linux/hash.h>
86 #include <linux/ratelimit.h>
87 #include <linux/export.h>
88 #include <linux/scatterlist.h>
94 /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
110 static void wa_xfer_delayed_run(struct wa_rpipe
*);
111 static int __wa_xfer_delayed_run(struct wa_rpipe
*rpipe
, int *dto_waiting
);
114 * Life cycle governed by 'struct urb' (the refcount of the struct is
115 * that of the 'struct urb' and usb_free_urb() would free the whole
119 struct urb tr_urb
; /* transfer request urb. */
120 struct urb
*isoc_pack_desc_urb
; /* for isoc packet descriptor. */
121 struct urb
*dto_urb
; /* for data output. */
122 struct list_head list_node
; /* for rpipe->req_list */
123 struct wa_xfer
*xfer
; /* out xfer */
124 u8 index
; /* which segment we are */
125 int isoc_frame_count
; /* number of isoc frames in this segment. */
126 int isoc_frame_offset
; /* starting frame offset in the xfer URB. */
127 int isoc_size
; /* size of all isoc frames sent by this seg. */
128 enum wa_seg_status status
;
129 ssize_t result
; /* bytes xfered or error */
130 struct wa_xfer_hdr xfer_hdr
;
133 static inline void wa_seg_init(struct wa_seg
*seg
)
135 usb_init_urb(&seg
->tr_urb
);
137 /* set the remaining memory to 0. */
138 memset(((void *)seg
) + sizeof(seg
->tr_urb
), 0,
139 sizeof(*seg
) - sizeof(seg
->tr_urb
));
143 * Protected by xfer->lock
148 struct list_head list_node
;
152 struct wahc
*wa
; /* Wire adapter we are plugged to */
153 struct usb_host_endpoint
*ep
;
154 struct urb
*urb
; /* URB we are transferring for */
155 struct wa_seg
**seg
; /* transfer segments */
156 u8 segs
, segs_submitted
, segs_done
;
157 unsigned is_inbound
:1;
161 /* Isoc frame that the current transfer buffer corresponds to. */
162 int dto_isoc_frame_index
;
164 gfp_t gfp
; /* allocation mask */
166 struct wusb_dev
*wusb_dev
; /* for activity timestamps */
169 static void __wa_populate_dto_urb_isoc(struct wa_xfer
*xfer
,
170 struct wa_seg
*seg
, int curr_iso_frame
);
172 static inline void wa_xfer_init(struct wa_xfer
*xfer
)
174 kref_init(&xfer
->refcnt
);
175 INIT_LIST_HEAD(&xfer
->list_node
);
176 spin_lock_init(&xfer
->lock
);
180 * Destroy a transfer structure
182 * Note that freeing xfer->seg[cnt]->tr_urb will free the containing
183 * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
185 static void wa_xfer_destroy(struct kref
*_xfer
)
187 struct wa_xfer
*xfer
= container_of(_xfer
, struct wa_xfer
, refcnt
);
190 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
191 struct wa_seg
*seg
= xfer
->seg
[cnt
];
193 usb_free_urb(seg
->isoc_pack_desc_urb
);
195 kfree(seg
->dto_urb
->sg
);
196 usb_free_urb(seg
->dto_urb
);
198 usb_free_urb(&seg
->tr_urb
);
206 static void wa_xfer_get(struct wa_xfer
*xfer
)
208 kref_get(&xfer
->refcnt
);
211 static void wa_xfer_put(struct wa_xfer
*xfer
)
213 kref_put(&xfer
->refcnt
, wa_xfer_destroy
);
217 * Try to get exclusive access to the DTO endpoint resource. Return true
220 static inline int __wa_dto_try_get(struct wahc
*wa
)
222 return (test_and_set_bit(0, &wa
->dto_in_use
) == 0);
225 /* Release the DTO endpoint resource. */
226 static inline void __wa_dto_put(struct wahc
*wa
)
228 clear_bit_unlock(0, &wa
->dto_in_use
);
231 /* Service RPIPEs that are waiting on the DTO resource. */
232 static void wa_check_for_delayed_rpipes(struct wahc
*wa
)
236 struct wa_rpipe
*rpipe
;
238 spin_lock_irqsave(&wa
->rpipe_lock
, flags
);
239 while (!list_empty(&wa
->rpipe_delayed_list
) && !dto_waiting
) {
240 rpipe
= list_first_entry(&wa
->rpipe_delayed_list
,
241 struct wa_rpipe
, list_node
);
242 __wa_xfer_delayed_run(rpipe
, &dto_waiting
);
243 /* remove this RPIPE from the list if it is not waiting. */
245 pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
247 le16_to_cpu(rpipe
->descr
.wRPipeIndex
));
248 list_del_init(&rpipe
->list_node
);
251 spin_unlock_irqrestore(&wa
->rpipe_lock
, flags
);
254 /* add this RPIPE to the end of the delayed RPIPE list. */
255 static void wa_add_delayed_rpipe(struct wahc
*wa
, struct wa_rpipe
*rpipe
)
259 spin_lock_irqsave(&wa
->rpipe_lock
, flags
);
260 /* add rpipe to the list if it is not already on it. */
261 if (list_empty(&rpipe
->list_node
)) {
262 pr_debug("%s: adding RPIPE %d to the delayed list.\n",
263 __func__
, le16_to_cpu(rpipe
->descr
.wRPipeIndex
));
264 list_add_tail(&rpipe
->list_node
, &wa
->rpipe_delayed_list
);
266 spin_unlock_irqrestore(&wa
->rpipe_lock
, flags
);
272 * xfer->lock has to be unlocked
274 * We take xfer->lock for setting the result; this is a barrier
275 * against drivers/usb/core/hcd.c:unlink1() being called after we call
276 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
277 * reference to the transfer.
279 static void wa_xfer_giveback(struct wa_xfer
*xfer
)
283 spin_lock_irqsave(&xfer
->wa
->xfer_list_lock
, flags
);
284 list_del_init(&xfer
->list_node
);
285 spin_unlock_irqrestore(&xfer
->wa
->xfer_list_lock
, flags
);
286 /* FIXME: segmentation broken -- kills DWA */
287 wusbhc_giveback_urb(xfer
->wa
->wusb
, xfer
->urb
, xfer
->result
);
295 * xfer->lock has to be unlocked
297 static void wa_xfer_completion(struct wa_xfer
*xfer
)
300 wusb_dev_put(xfer
->wusb_dev
);
301 rpipe_put(xfer
->ep
->hcpriv
);
302 wa_xfer_giveback(xfer
);
306 * Initialize a transfer's ID
308 * We need to use a sequential number; if we use the pointer or the
309 * hash of the pointer, it can repeat over sequential transfers and
310 * then it will confuse the HWA....wonder why in hell they put a 32
311 * bit handle in there then.
313 static void wa_xfer_id_init(struct wa_xfer
*xfer
)
315 xfer
->id
= atomic_add_return(1, &xfer
->wa
->xfer_id_count
);
318 /* Return the xfer's ID. */
319 static inline u32
wa_xfer_id(struct wa_xfer
*xfer
)
324 /* Return the xfer's ID in transport format (little endian). */
325 static inline __le32
wa_xfer_id_le32(struct wa_xfer
*xfer
)
327 return cpu_to_le32(xfer
->id
);
331 * If transfer is done, wrap it up and return true
333 * xfer->lock has to be locked
335 static unsigned __wa_xfer_is_done(struct wa_xfer
*xfer
)
337 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
338 unsigned result
, cnt
;
340 struct urb
*urb
= xfer
->urb
;
341 unsigned found_short
= 0;
343 result
= xfer
->segs_done
== xfer
->segs_submitted
;
346 urb
->actual_length
= 0;
347 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
348 seg
= xfer
->seg
[cnt
];
349 switch (seg
->status
) {
351 if (found_short
&& seg
->result
> 0) {
352 dev_dbg(dev
, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
353 xfer
, wa_xfer_id(xfer
), cnt
,
355 urb
->status
= -EINVAL
;
358 urb
->actual_length
+= seg
->result
;
359 if (!(usb_pipeisoc(xfer
->urb
->pipe
))
360 && seg
->result
< xfer
->seg_size
361 && cnt
!= xfer
->segs
-1)
363 dev_dbg(dev
, "xfer %p ID %08X#%u: DONE short %d "
364 "result %zu urb->actual_length %d\n",
365 xfer
, wa_xfer_id(xfer
), seg
->index
, found_short
,
366 seg
->result
, urb
->actual_length
);
369 xfer
->result
= seg
->result
;
370 dev_dbg(dev
, "xfer %p ID %08X#%u: ERROR result %zu(0x%08zX)\n",
371 xfer
, wa_xfer_id(xfer
), seg
->index
, seg
->result
,
375 dev_dbg(dev
, "xfer %p ID %08X#%u ABORTED: result %d\n",
376 xfer
, wa_xfer_id(xfer
), seg
->index
,
378 xfer
->result
= urb
->status
;
381 dev_warn(dev
, "xfer %p ID %08X#%u: is_done bad state %d\n",
382 xfer
, wa_xfer_id(xfer
), cnt
, seg
->status
);
383 xfer
->result
= -EINVAL
;
393 * Search for a transfer list ID on the HCD's URB list
395 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
396 * 32-bit hash of the pointer.
398 * @returns NULL if not found.
400 static struct wa_xfer
*wa_xfer_get_by_id(struct wahc
*wa
, u32 id
)
403 struct wa_xfer
*xfer_itr
;
404 spin_lock_irqsave(&wa
->xfer_list_lock
, flags
);
405 list_for_each_entry(xfer_itr
, &wa
->xfer_list
, list_node
) {
406 if (id
== xfer_itr
->id
) {
407 wa_xfer_get(xfer_itr
);
413 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags
);
417 struct wa_xfer_abort_buffer
{
419 struct wa_xfer_abort cmd
;
422 static void __wa_xfer_abort_cb(struct urb
*urb
)
424 struct wa_xfer_abort_buffer
*b
= urb
->context
;
425 usb_put_urb(&b
->urb
);
429 * Aborts an ongoing transaction
431 * Assumes the transfer is referenced and locked and in a submitted
432 * state (mainly that there is an endpoint/rpipe assigned).
434 * The callback (see above) does nothing but freeing up the data by
435 * putting the URB. Because the URB is allocated at the head of the
436 * struct, the whole space we allocated is kfreed. *
438 static int __wa_xfer_abort(struct wa_xfer
*xfer
)
440 int result
= -ENOMEM
;
441 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
442 struct wa_xfer_abort_buffer
*b
;
443 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
445 b
= kmalloc(sizeof(*b
), GFP_ATOMIC
);
448 b
->cmd
.bLength
= sizeof(b
->cmd
);
449 b
->cmd
.bRequestType
= WA_XFER_ABORT
;
450 b
->cmd
.wRPipe
= rpipe
->descr
.wRPipeIndex
;
451 b
->cmd
.dwTransferID
= wa_xfer_id_le32(xfer
);
453 usb_init_urb(&b
->urb
);
454 usb_fill_bulk_urb(&b
->urb
, xfer
->wa
->usb_dev
,
455 usb_sndbulkpipe(xfer
->wa
->usb_dev
,
456 xfer
->wa
->dto_epd
->bEndpointAddress
),
457 &b
->cmd
, sizeof(b
->cmd
), __wa_xfer_abort_cb
, b
);
458 result
= usb_submit_urb(&b
->urb
, GFP_ATOMIC
);
461 return result
; /* callback frees! */
465 if (printk_ratelimit())
466 dev_err(dev
, "xfer %p: Can't submit abort request: %d\n",
475 * Calculate the number of isoc frames starting from isoc_frame_offset
476 * that will fit a in transfer segment.
478 static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer
*xfer
,
479 int isoc_frame_offset
, int *total_size
)
481 int segment_size
= 0, frame_count
= 0;
482 int index
= isoc_frame_offset
;
483 struct usb_iso_packet_descriptor
*iso_frame_desc
=
484 xfer
->urb
->iso_frame_desc
;
486 while ((index
< xfer
->urb
->number_of_packets
)
487 && ((segment_size
+ iso_frame_desc
[index
].length
)
488 <= xfer
->seg_size
)) {
490 * For Alereon HWA devices, only include an isoc frame in a
491 * segment if it is physically contiguous with the previous
492 * frame. This is required because those devices expect
493 * the isoc frames to be sent as a single USB transaction as
494 * opposed to one transaction per frame with standard HWA.
496 if ((xfer
->wa
->quirks
& WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC
)
497 && (index
> isoc_frame_offset
)
498 && ((iso_frame_desc
[index
- 1].offset
+
499 iso_frame_desc
[index
- 1].length
) !=
500 iso_frame_desc
[index
].offset
))
503 /* this frame fits. count it. */
505 segment_size
+= iso_frame_desc
[index
].length
;
507 /* move to the next isoc frame. */
511 *total_size
= segment_size
;
517 * @returns < 0 on error, transfer segment request size if ok
519 static ssize_t
__wa_xfer_setup_sizes(struct wa_xfer
*xfer
,
520 enum wa_xfer_type
*pxfer_type
)
523 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
525 struct urb
*urb
= xfer
->urb
;
526 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
528 switch (rpipe
->descr
.bmAttribute
& 0x3) {
529 case USB_ENDPOINT_XFER_CONTROL
:
530 *pxfer_type
= WA_XFER_TYPE_CTL
;
531 result
= sizeof(struct wa_xfer_ctl
);
533 case USB_ENDPOINT_XFER_INT
:
534 case USB_ENDPOINT_XFER_BULK
:
535 *pxfer_type
= WA_XFER_TYPE_BI
;
536 result
= sizeof(struct wa_xfer_bi
);
538 case USB_ENDPOINT_XFER_ISOC
:
539 if (usb_pipeout(urb
->pipe
)) {
540 *pxfer_type
= WA_XFER_TYPE_ISO
;
541 result
= sizeof(struct wa_xfer_hwaiso
);
543 dev_err(dev
, "FIXME: ISOC IN not implemented\n");
551 result
= -EINVAL
; /* shut gcc up */
553 xfer
->is_inbound
= urb
->pipe
& USB_DIR_IN
? 1 : 0;
554 xfer
->is_dma
= urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
? 1 : 0;
556 maxpktsize
= le16_to_cpu(rpipe
->descr
.wMaxPacketSize
);
557 if ((rpipe
->descr
.bmAttribute
& 0x3) == USB_ENDPOINT_XFER_ISOC
) {
560 xfer
->seg_size
= maxpktsize
;
563 * loop over urb->number_of_packets to determine how many
564 * xfer segments will be needed to send the isoc frames.
566 while (index
< urb
->number_of_packets
) {
567 int seg_size
; /* don't care. */
568 index
+= __wa_seg_calculate_isoc_frame_count(xfer
,
573 xfer
->seg_size
= le16_to_cpu(rpipe
->descr
.wBlocks
)
574 * 1 << (xfer
->wa
->wa_descr
->bRPipeBlockSize
- 1);
575 /* Compute the segment size and make sure it is a multiple of
576 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
578 if (xfer
->seg_size
< maxpktsize
) {
580 "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
581 xfer
->seg_size
, maxpktsize
);
585 xfer
->seg_size
= (xfer
->seg_size
/ maxpktsize
) * maxpktsize
;
586 xfer
->segs
= DIV_ROUND_UP(urb
->transfer_buffer_length
,
588 if (xfer
->segs
== 0 && *pxfer_type
== WA_XFER_TYPE_CTL
)
592 if (xfer
->segs
> WA_SEGS_MAX
) {
593 dev_err(dev
, "BUG? oops, number of segments %zu bigger than %d\n",
594 (urb
->transfer_buffer_length
/xfer
->seg_size
),
603 static void __wa_setup_isoc_packet_descr(
604 struct wa_xfer_packet_info_hwaiso
*packet_desc
,
605 struct wa_xfer
*xfer
,
606 struct wa_seg
*seg
) {
607 struct usb_iso_packet_descriptor
*iso_frame_desc
=
608 xfer
->urb
->iso_frame_desc
;
611 /* populate isoc packet descriptor. */
612 packet_desc
->bPacketType
= WA_XFER_ISO_PACKET_INFO
;
613 packet_desc
->wLength
= cpu_to_le16(sizeof(*packet_desc
) +
614 (sizeof(packet_desc
->PacketLength
[0]) *
615 seg
->isoc_frame_count
));
616 for (frame_index
= 0; frame_index
< seg
->isoc_frame_count
;
618 int offset_index
= frame_index
+ seg
->isoc_frame_offset
;
619 packet_desc
->PacketLength
[frame_index
] =
620 cpu_to_le16(iso_frame_desc
[offset_index
].length
);
625 /* Fill in the common request header and xfer-type specific data. */
626 static void __wa_xfer_setup_hdr0(struct wa_xfer
*xfer
,
627 struct wa_xfer_hdr
*xfer_hdr0
,
628 enum wa_xfer_type xfer_type
,
629 size_t xfer_hdr_size
)
631 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
632 struct wa_seg
*seg
= xfer
->seg
[0];
634 xfer_hdr0
= &seg
->xfer_hdr
;
635 xfer_hdr0
->bLength
= xfer_hdr_size
;
636 xfer_hdr0
->bRequestType
= xfer_type
;
637 xfer_hdr0
->wRPipe
= rpipe
->descr
.wRPipeIndex
;
638 xfer_hdr0
->dwTransferID
= wa_xfer_id_le32(xfer
);
639 xfer_hdr0
->bTransferSegment
= 0;
641 case WA_XFER_TYPE_CTL
: {
642 struct wa_xfer_ctl
*xfer_ctl
=
643 container_of(xfer_hdr0
, struct wa_xfer_ctl
, hdr
);
644 xfer_ctl
->bmAttribute
= xfer
->is_inbound
? 1 : 0;
645 memcpy(&xfer_ctl
->baSetupData
, xfer
->urb
->setup_packet
,
646 sizeof(xfer_ctl
->baSetupData
));
649 case WA_XFER_TYPE_BI
:
651 case WA_XFER_TYPE_ISO
: {
652 struct wa_xfer_hwaiso
*xfer_iso
=
653 container_of(xfer_hdr0
, struct wa_xfer_hwaiso
, hdr
);
654 struct wa_xfer_packet_info_hwaiso
*packet_desc
=
655 ((void *)xfer_iso
) + xfer_hdr_size
;
657 /* populate the isoc section of the transfer request. */
658 xfer_iso
->dwNumOfPackets
= cpu_to_le32(seg
->isoc_frame_count
);
659 /* populate isoc packet descriptor. */
660 __wa_setup_isoc_packet_descr(packet_desc
, xfer
, seg
);
669 * Callback for the OUT data phase of the segment request
671 * Check wa_seg_tr_cb(); most comments also apply here because this
672 * function does almost the same thing and they work closely
675 * If the seg request has failed but this DTO phase has succeeded,
676 * wa_seg_tr_cb() has already failed the segment and moved the
677 * status to WA_SEG_ERROR, so this will go through 'case 0' and
678 * effectively do nothing.
680 static void wa_seg_dto_cb(struct urb
*urb
)
682 struct wa_seg
*seg
= urb
->context
;
683 struct wa_xfer
*xfer
= seg
->xfer
;
686 struct wa_rpipe
*rpipe
;
688 unsigned rpipe_ready
= 0;
689 int data_send_done
= 1, release_dto
= 0, holding_dto
= 0;
693 /* free the sg if it was used. */
697 spin_lock_irqsave(&xfer
->lock
, flags
);
699 dev
= &wa
->usb_iface
->dev
;
700 if (usb_pipeisoc(xfer
->urb
->pipe
)) {
701 /* Alereon HWA sends all isoc frames in a single transfer. */
702 if (wa
->quirks
& WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC
)
703 xfer
->dto_isoc_frame_index
+= seg
->isoc_frame_count
;
705 xfer
->dto_isoc_frame_index
+= 1;
706 if (xfer
->dto_isoc_frame_index
< seg
->isoc_frame_count
) {
708 holding_dto
= 1; /* checked in error cases. */
710 * if this is the last isoc frame of the segment, we
711 * can release DTO after sending this frame.
713 if ((xfer
->dto_isoc_frame_index
+ 1) >=
714 seg
->isoc_frame_count
)
717 dev_dbg(dev
, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
718 wa_xfer_id(xfer
), seg
->index
,
719 xfer
->dto_isoc_frame_index
, holding_dto
, release_dto
);
721 spin_unlock_irqrestore(&xfer
->lock
, flags
);
723 switch (urb
->status
) {
725 spin_lock_irqsave(&xfer
->lock
, flags
);
726 seg
->result
+= urb
->actual_length
;
727 if (data_send_done
) {
728 dev_dbg(dev
, "xfer 0x%08X#%u: data out done (%zu bytes)\n",
729 wa_xfer_id(xfer
), seg
->index
, seg
->result
);
730 if (seg
->status
< WA_SEG_PENDING
)
731 seg
->status
= WA_SEG_PENDING
;
733 /* should only hit this for isoc xfers. */
735 * Populate the dto URB with the next isoc frame buffer,
736 * send the URB and release DTO if we no longer need it.
738 __wa_populate_dto_urb_isoc(xfer
, seg
,
739 seg
->isoc_frame_offset
+
740 xfer
->dto_isoc_frame_index
);
742 /* resubmit the URB with the next isoc frame. */
743 result
= usb_submit_urb(seg
->dto_urb
, GFP_ATOMIC
);
745 dev_err(dev
, "xfer 0x%08X#%u: DTO submit failed: %d\n",
746 wa_xfer_id(xfer
), seg
->index
, result
);
747 spin_unlock_irqrestore(&xfer
->lock
, flags
);
748 goto error_dto_submit
;
751 spin_unlock_irqrestore(&xfer
->lock
, flags
);
754 wa_check_for_delayed_rpipes(wa
);
757 case -ECONNRESET
: /* URB unlinked; no need to do anything */
758 case -ENOENT
: /* as it was done by the who unlinked us */
761 wa_check_for_delayed_rpipes(wa
);
764 default: /* Other errors ... */
765 dev_err(dev
, "xfer 0x%08X#%u: data out error %d\n",
766 wa_xfer_id(xfer
), seg
->index
, urb
->status
);
774 spin_lock_irqsave(&xfer
->lock
, flags
);
775 rpipe
= xfer
->ep
->hcpriv
;
776 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
777 EDC_ERROR_TIMEFRAME
)){
778 dev_err(dev
, "DTO: URB max acceptable errors exceeded, resetting device\n");
781 if (seg
->status
!= WA_SEG_ERROR
) {
782 seg
->status
= WA_SEG_ERROR
;
783 seg
->result
= urb
->status
;
785 __wa_xfer_abort(xfer
);
786 rpipe_ready
= rpipe_avail_inc(rpipe
);
787 done
= __wa_xfer_is_done(xfer
);
789 spin_unlock_irqrestore(&xfer
->lock
, flags
);
792 wa_check_for_delayed_rpipes(wa
);
795 wa_xfer_completion(xfer
);
797 wa_xfer_delayed_run(rpipe
);
802 * Callback for the isoc packet descriptor phase of the segment request
804 * Check wa_seg_tr_cb(); most comments also apply here because this
805 * function does almost the same thing and they work closely
808 * If the seg request has failed but this phase has succeeded,
809 * wa_seg_tr_cb() has already failed the segment and moved the
810 * status to WA_SEG_ERROR, so this will go through 'case 0' and
811 * effectively do nothing.
813 static void wa_seg_iso_pack_desc_cb(struct urb
*urb
)
815 struct wa_seg
*seg
= urb
->context
;
816 struct wa_xfer
*xfer
= seg
->xfer
;
819 struct wa_rpipe
*rpipe
;
821 unsigned rpipe_ready
= 0;
824 switch (urb
->status
) {
826 spin_lock_irqsave(&xfer
->lock
, flags
);
828 dev
= &wa
->usb_iface
->dev
;
829 dev_dbg(dev
, "iso xfer %08X#%u: packet descriptor done\n",
830 wa_xfer_id(xfer
), seg
->index
);
831 if (xfer
->is_inbound
&& seg
->status
< WA_SEG_PENDING
)
832 seg
->status
= WA_SEG_PENDING
;
833 spin_unlock_irqrestore(&xfer
->lock
, flags
);
835 case -ECONNRESET
: /* URB unlinked; no need to do anything */
836 case -ENOENT
: /* as it was done by the who unlinked us */
838 default: /* Other errors ... */
839 spin_lock_irqsave(&xfer
->lock
, flags
);
841 dev
= &wa
->usb_iface
->dev
;
842 rpipe
= xfer
->ep
->hcpriv
;
843 pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n",
844 wa_xfer_id(xfer
), seg
->index
, urb
->status
);
845 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
846 EDC_ERROR_TIMEFRAME
)){
847 dev_err(dev
, "DTO: URB max acceptable errors exceeded, resetting device\n");
850 if (seg
->status
!= WA_SEG_ERROR
) {
851 usb_unlink_urb(seg
->dto_urb
);
852 seg
->status
= WA_SEG_ERROR
;
853 seg
->result
= urb
->status
;
855 __wa_xfer_abort(xfer
);
856 rpipe_ready
= rpipe_avail_inc(rpipe
);
857 done
= __wa_xfer_is_done(xfer
);
859 spin_unlock_irqrestore(&xfer
->lock
, flags
);
861 wa_xfer_completion(xfer
);
863 wa_xfer_delayed_run(rpipe
);
868 * Callback for the segment request
870 * If successful transition state (unless already transitioned or
871 * outbound transfer); otherwise, take a note of the error, mark this
872 * segment done and try completion.
874 * Note we don't access until we are sure that the transfer hasn't
875 * been cancelled (ECONNRESET, ENOENT), which could mean that
876 * seg->xfer could be already gone.
878 * We have to check before setting the status to WA_SEG_PENDING
879 * because sometimes the xfer result callback arrives before this
880 * callback (geeeeeeze), so it might happen that we are already in
881 * another state. As well, we don't set it if the transfer is not inbound,
882 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
885 static void wa_seg_tr_cb(struct urb
*urb
)
887 struct wa_seg
*seg
= urb
->context
;
888 struct wa_xfer
*xfer
= seg
->xfer
;
891 struct wa_rpipe
*rpipe
;
893 unsigned rpipe_ready
;
896 switch (urb
->status
) {
898 spin_lock_irqsave(&xfer
->lock
, flags
);
900 dev
= &wa
->usb_iface
->dev
;
901 dev_dbg(dev
, "xfer %p ID 0x%08X#%u: request done\n",
902 xfer
, wa_xfer_id(xfer
), seg
->index
);
903 if (xfer
->is_inbound
&&
904 seg
->status
< WA_SEG_PENDING
&&
905 !(usb_pipeisoc(xfer
->urb
->pipe
)))
906 seg
->status
= WA_SEG_PENDING
;
907 spin_unlock_irqrestore(&xfer
->lock
, flags
);
909 case -ECONNRESET
: /* URB unlinked; no need to do anything */
910 case -ENOENT
: /* as it was done by the who unlinked us */
912 default: /* Other errors ... */
913 spin_lock_irqsave(&xfer
->lock
, flags
);
915 dev
= &wa
->usb_iface
->dev
;
916 rpipe
= xfer
->ep
->hcpriv
;
917 if (printk_ratelimit())
918 dev_err(dev
, "xfer %p ID 0x%08X#%u: request error %d\n",
919 xfer
, wa_xfer_id(xfer
), seg
->index
,
921 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
922 EDC_ERROR_TIMEFRAME
)){
923 dev_err(dev
, "DTO: URB max acceptable errors "
924 "exceeded, resetting device\n");
927 usb_unlink_urb(seg
->isoc_pack_desc_urb
);
928 usb_unlink_urb(seg
->dto_urb
);
929 seg
->status
= WA_SEG_ERROR
;
930 seg
->result
= urb
->status
;
932 __wa_xfer_abort(xfer
);
933 rpipe_ready
= rpipe_avail_inc(rpipe
);
934 done
= __wa_xfer_is_done(xfer
);
935 spin_unlock_irqrestore(&xfer
->lock
, flags
);
937 wa_xfer_completion(xfer
);
939 wa_xfer_delayed_run(rpipe
);
944 * Allocate an SG list to store bytes_to_transfer bytes and copy the
945 * subset of the in_sg that matches the buffer subset
946 * we are about to transfer.
948 static struct scatterlist
*wa_xfer_create_subset_sg(struct scatterlist
*in_sg
,
949 const unsigned int bytes_transferred
,
950 const unsigned int bytes_to_transfer
, unsigned int *out_num_sgs
)
952 struct scatterlist
*out_sg
;
953 unsigned int bytes_processed
= 0, offset_into_current_page_data
= 0,
955 struct scatterlist
*current_xfer_sg
= in_sg
;
956 struct scatterlist
*current_seg_sg
, *last_seg_sg
;
958 /* skip previously transferred pages. */
959 while ((current_xfer_sg
) &&
960 (bytes_processed
< bytes_transferred
)) {
961 bytes_processed
+= current_xfer_sg
->length
;
963 /* advance the sg if current segment starts on or past the
965 if (bytes_processed
<= bytes_transferred
)
966 current_xfer_sg
= sg_next(current_xfer_sg
);
969 /* the data for the current segment starts in current_xfer_sg.
970 calculate the offset. */
971 if (bytes_processed
> bytes_transferred
) {
972 offset_into_current_page_data
= current_xfer_sg
->length
-
973 (bytes_processed
- bytes_transferred
);
976 /* calculate the number of pages needed by this segment. */
977 nents
= DIV_ROUND_UP((bytes_to_transfer
+
978 offset_into_current_page_data
+
979 current_xfer_sg
->offset
),
982 out_sg
= kmalloc((sizeof(struct scatterlist
) * nents
), GFP_ATOMIC
);
984 sg_init_table(out_sg
, nents
);
986 /* copy the portion of the incoming SG that correlates to the
987 * data to be transferred by this segment to the segment SG. */
988 last_seg_sg
= current_seg_sg
= out_sg
;
991 /* reset nents and calculate the actual number of sg entries
994 while ((bytes_processed
< bytes_to_transfer
) &&
995 current_seg_sg
&& current_xfer_sg
) {
996 unsigned int page_len
= min((current_xfer_sg
->length
-
997 offset_into_current_page_data
),
998 (bytes_to_transfer
- bytes_processed
));
1000 sg_set_page(current_seg_sg
, sg_page(current_xfer_sg
),
1002 current_xfer_sg
->offset
+
1003 offset_into_current_page_data
);
1005 bytes_processed
+= page_len
;
1007 last_seg_sg
= current_seg_sg
;
1008 current_seg_sg
= sg_next(current_seg_sg
);
1009 current_xfer_sg
= sg_next(current_xfer_sg
);
1011 /* only the first page may require additional offset. */
1012 offset_into_current_page_data
= 0;
1016 /* update num_sgs and terminate the list since we may have
1017 * concatenated pages. */
1018 sg_mark_end(last_seg_sg
);
1019 *out_num_sgs
= nents
;
1026 * Populate DMA buffer info for the isoc dto urb.
1028 static void __wa_populate_dto_urb_isoc(struct wa_xfer
*xfer
,
1029 struct wa_seg
*seg
, int curr_iso_frame
)
1031 seg
->dto_urb
->transfer_flags
|= URB_NO_TRANSFER_DMA_MAP
;
1032 seg
->dto_urb
->sg
= NULL
;
1033 seg
->dto_urb
->num_sgs
= 0;
1034 /* dto urb buffer address pulled from iso_frame_desc. */
1035 seg
->dto_urb
->transfer_dma
= xfer
->urb
->transfer_dma
+
1036 xfer
->urb
->iso_frame_desc
[curr_iso_frame
].offset
;
1037 /* The Alereon HWA sends a single URB with all isoc segs. */
1038 if (xfer
->wa
->quirks
& WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC
)
1039 seg
->dto_urb
->transfer_buffer_length
= seg
->isoc_size
;
1041 seg
->dto_urb
->transfer_buffer_length
=
1042 xfer
->urb
->iso_frame_desc
[curr_iso_frame
].length
;
1046 * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
1048 static int __wa_populate_dto_urb(struct wa_xfer
*xfer
,
1049 struct wa_seg
*seg
, size_t buf_itr_offset
, size_t buf_itr_size
)
1054 seg
->dto_urb
->transfer_dma
=
1055 xfer
->urb
->transfer_dma
+ buf_itr_offset
;
1056 seg
->dto_urb
->transfer_flags
|= URB_NO_TRANSFER_DMA_MAP
;
1057 seg
->dto_urb
->sg
= NULL
;
1058 seg
->dto_urb
->num_sgs
= 0;
1060 /* do buffer or SG processing. */
1061 seg
->dto_urb
->transfer_flags
&=
1062 ~URB_NO_TRANSFER_DMA_MAP
;
1063 /* this should always be 0 before a resubmit. */
1064 seg
->dto_urb
->num_mapped_sgs
= 0;
1066 if (xfer
->urb
->transfer_buffer
) {
1067 seg
->dto_urb
->transfer_buffer
=
1068 xfer
->urb
->transfer_buffer
+
1070 seg
->dto_urb
->sg
= NULL
;
1071 seg
->dto_urb
->num_sgs
= 0;
1073 seg
->dto_urb
->transfer_buffer
= NULL
;
1076 * allocate an SG list to store seg_size bytes
1077 * and copy the subset of the xfer->urb->sg that
1078 * matches the buffer subset we are about to
1081 seg
->dto_urb
->sg
= wa_xfer_create_subset_sg(
1083 buf_itr_offset
, buf_itr_size
,
1084 &(seg
->dto_urb
->num_sgs
));
1085 if (!(seg
->dto_urb
->sg
))
1089 seg
->dto_urb
->transfer_buffer_length
= buf_itr_size
;
1095 * Allocate the segs array and initialize each of them
1097 * The segments are freed by wa_xfer_destroy() when the xfer use count
1098 * drops to zero; however, because each segment is given the same life
1099 * cycle as the USB URB it contains, it is actually freed by
1100 * usb_put_urb() on the contained USB URB (twisted, eh?).
1102 static int __wa_xfer_setup_segs(struct wa_xfer
*xfer
, size_t xfer_hdr_size
)
1104 int result
, cnt
, iso_frame_offset
;
1105 size_t alloc_size
= sizeof(*xfer
->seg
[0])
1106 - sizeof(xfer
->seg
[0]->xfer_hdr
) + xfer_hdr_size
;
1107 struct usb_device
*usb_dev
= xfer
->wa
->usb_dev
;
1108 const struct usb_endpoint_descriptor
*dto_epd
= xfer
->wa
->dto_epd
;
1110 size_t buf_itr
, buf_size
, buf_itr_size
;
1111 int xfer_isoc_frame_offset
= 0;
1114 xfer
->seg
= kcalloc(xfer
->segs
, sizeof(xfer
->seg
[0]), GFP_ATOMIC
);
1115 if (xfer
->seg
== NULL
)
1116 goto error_segs_kzalloc
;
1118 buf_size
= xfer
->urb
->transfer_buffer_length
;
1119 iso_frame_offset
= 0;
1120 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
1121 size_t iso_pkt_descr_size
= 0;
1122 int seg_isoc_frame_count
= 0, seg_isoc_size
= 0;
1124 if (usb_pipeisoc(xfer
->urb
->pipe
)) {
1125 seg_isoc_frame_count
=
1126 __wa_seg_calculate_isoc_frame_count(xfer
,
1127 xfer_isoc_frame_offset
, &seg_isoc_size
);
1129 iso_pkt_descr_size
=
1130 sizeof(struct wa_xfer_packet_info_hwaiso
) +
1131 (seg_isoc_frame_count
* sizeof(__le16
));
1133 seg
= xfer
->seg
[cnt
] = kmalloc(alloc_size
+ iso_pkt_descr_size
,
1136 goto error_seg_kmalloc
;
1140 seg
->isoc_frame_count
= seg_isoc_frame_count
;
1141 seg
->isoc_frame_offset
= xfer_isoc_frame_offset
;
1142 seg
->isoc_size
= seg_isoc_size
;
1143 usb_fill_bulk_urb(&seg
->tr_urb
, usb_dev
,
1144 usb_sndbulkpipe(usb_dev
,
1145 dto_epd
->bEndpointAddress
),
1146 &seg
->xfer_hdr
, xfer_hdr_size
,
1148 buf_itr_size
= min(buf_size
, xfer
->seg_size
);
1149 if (xfer
->is_inbound
== 0 && buf_size
> 0) {
1150 /* outbound data. */
1151 seg
->dto_urb
= usb_alloc_urb(0, GFP_ATOMIC
);
1152 if (seg
->dto_urb
== NULL
)
1153 goto error_dto_alloc
;
1155 seg
->dto_urb
, usb_dev
,
1156 usb_sndbulkpipe(usb_dev
,
1157 dto_epd
->bEndpointAddress
),
1158 NULL
, 0, wa_seg_dto_cb
, seg
);
1160 if (usb_pipeisoc(xfer
->urb
->pipe
)) {
1161 /* iso packet descriptor. */
1162 seg
->isoc_pack_desc_urb
=
1163 usb_alloc_urb(0, GFP_ATOMIC
);
1164 if (seg
->isoc_pack_desc_urb
== NULL
)
1165 goto error_iso_pack_desc_alloc
;
1167 * The buffer for the isoc packet descriptor
1168 * after the transfer request header in the
1169 * segment object memory buffer.
1172 seg
->isoc_pack_desc_urb
, usb_dev
,
1173 usb_sndbulkpipe(usb_dev
,
1174 dto_epd
->bEndpointAddress
),
1175 (void *)(&seg
->xfer_hdr
) +
1178 wa_seg_iso_pack_desc_cb
, seg
);
1181 * Fill in the xfer buffer information for the
1182 * first isoc frame. Subsequent frames in this
1183 * segment will be filled in and sent from the
1184 * DTO completion routine, if needed.
1186 __wa_populate_dto_urb_isoc(xfer
, seg
,
1187 xfer_isoc_frame_offset
);
1188 /* adjust starting frame offset for next seg. */
1189 xfer_isoc_frame_offset
+= seg_isoc_frame_count
;
1191 /* fill in the xfer buffer information. */
1192 result
= __wa_populate_dto_urb(xfer
, seg
,
1193 buf_itr
, buf_itr_size
);
1195 goto error_seg_outbound_populate
;
1197 buf_itr
+= buf_itr_size
;
1198 buf_size
-= buf_itr_size
;
1201 seg
->status
= WA_SEG_READY
;
1206 * Free the memory for the current segment which failed to init.
1207 * Use the fact that cnt is left at were it failed. The remaining
1208 * segments will be cleaned up by wa_xfer_destroy.
1210 error_iso_pack_desc_alloc
:
1211 error_seg_outbound_populate
:
1212 usb_free_urb(xfer
->seg
[cnt
]->dto_urb
);
1214 kfree(xfer
->seg
[cnt
]);
1215 xfer
->seg
[cnt
] = NULL
;
1222 * Allocates all the stuff needed to submit a transfer
1224 * Breaks the whole data buffer in a list of segments, each one has a
1225 * structure allocated to it and linked in xfer->seg[index]
1227 * FIXME: merge setup_segs() and the last part of this function, no
1228 * need to do two for loops when we could run everything in a
1231 static int __wa_xfer_setup(struct wa_xfer
*xfer
, struct urb
*urb
)
1234 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
1235 enum wa_xfer_type xfer_type
= 0; /* shut up GCC */
1236 size_t xfer_hdr_size
, cnt
, transfer_size
;
1237 struct wa_xfer_hdr
*xfer_hdr0
, *xfer_hdr
;
1239 result
= __wa_xfer_setup_sizes(xfer
, &xfer_type
);
1241 goto error_setup_sizes
;
1242 xfer_hdr_size
= result
;
1243 result
= __wa_xfer_setup_segs(xfer
, xfer_hdr_size
);
1245 dev_err(dev
, "xfer %p: Failed to allocate %d segments: %d\n",
1246 xfer
, xfer
->segs
, result
);
1247 goto error_setup_segs
;
1249 /* Fill the first header */
1250 xfer_hdr0
= &xfer
->seg
[0]->xfer_hdr
;
1251 wa_xfer_id_init(xfer
);
1252 __wa_xfer_setup_hdr0(xfer
, xfer_hdr0
, xfer_type
, xfer_hdr_size
);
1254 /* Fill remaining headers */
1255 xfer_hdr
= xfer_hdr0
;
1256 if (xfer_type
== WA_XFER_TYPE_ISO
) {
1257 xfer_hdr0
->dwTransferLength
=
1258 cpu_to_le32(xfer
->seg
[0]->isoc_size
);
1259 for (cnt
= 1; cnt
< xfer
->segs
; cnt
++) {
1260 struct wa_xfer_packet_info_hwaiso
*packet_desc
;
1261 struct wa_seg
*seg
= xfer
->seg
[cnt
];
1263 xfer_hdr
= &seg
->xfer_hdr
;
1264 packet_desc
= ((void *)xfer_hdr
) + xfer_hdr_size
;
1266 * Copy values from the 0th header. Segment specific
1267 * values are set below.
1269 memcpy(xfer_hdr
, xfer_hdr0
, xfer_hdr_size
);
1270 xfer_hdr
->bTransferSegment
= cnt
;
1271 xfer_hdr
->dwTransferLength
=
1272 cpu_to_le32(seg
->isoc_size
);
1273 __wa_setup_isoc_packet_descr(packet_desc
, xfer
, seg
);
1274 seg
->status
= WA_SEG_READY
;
1277 transfer_size
= urb
->transfer_buffer_length
;
1278 xfer_hdr0
->dwTransferLength
= transfer_size
> xfer
->seg_size
?
1279 cpu_to_le32(xfer
->seg_size
) :
1280 cpu_to_le32(transfer_size
);
1281 transfer_size
-= xfer
->seg_size
;
1282 for (cnt
= 1; cnt
< xfer
->segs
; cnt
++) {
1283 xfer_hdr
= &xfer
->seg
[cnt
]->xfer_hdr
;
1284 memcpy(xfer_hdr
, xfer_hdr0
, xfer_hdr_size
);
1285 xfer_hdr
->bTransferSegment
= cnt
;
1286 xfer_hdr
->dwTransferLength
=
1287 transfer_size
> xfer
->seg_size
?
1288 cpu_to_le32(xfer
->seg_size
)
1289 : cpu_to_le32(transfer_size
);
1290 xfer
->seg
[cnt
]->status
= WA_SEG_READY
;
1291 transfer_size
-= xfer
->seg_size
;
1294 xfer_hdr
->bTransferSegment
|= 0x80; /* this is the last segment */
1304 * rpipe->seg_lock is held!
1306 static int __wa_seg_submit(struct wa_rpipe
*rpipe
, struct wa_xfer
*xfer
,
1307 struct wa_seg
*seg
, int *dto_done
)
1311 /* default to done unless we encounter a multi-frame isoc segment. */
1314 /* submit the transfer request. */
1315 result
= usb_submit_urb(&seg
->tr_urb
, GFP_ATOMIC
);
1317 pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
1318 __func__
, xfer
, seg
->index
, result
);
1319 goto error_seg_submit
;
1321 /* submit the isoc packet descriptor if present. */
1322 if (seg
->isoc_pack_desc_urb
) {
1323 struct wahc
*wa
= xfer
->wa
;
1325 result
= usb_submit_urb(seg
->isoc_pack_desc_urb
, GFP_ATOMIC
);
1327 pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
1328 __func__
, xfer
, seg
->index
, result
);
1329 goto error_iso_pack_desc_submit
;
1331 xfer
->dto_isoc_frame_index
= 0;
1333 * If this segment contains more than one isoc frame, hold
1334 * onto the dto resource until we send all frames.
1335 * Only applies to non-Alereon devices.
1337 if (((wa
->quirks
& WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC
) == 0)
1338 && (seg
->isoc_frame_count
> 1))
1341 /* submit the out data if this is an out request. */
1343 result
= usb_submit_urb(seg
->dto_urb
, GFP_ATOMIC
);
1345 pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
1346 __func__
, xfer
, seg
->index
, result
);
1347 goto error_dto_submit
;
1350 seg
->status
= WA_SEG_SUBMITTED
;
1351 rpipe_avail_dec(rpipe
);
1355 usb_unlink_urb(seg
->isoc_pack_desc_urb
);
1356 error_iso_pack_desc_submit
:
1357 usb_unlink_urb(&seg
->tr_urb
);
1359 seg
->status
= WA_SEG_ERROR
;
1360 seg
->result
= result
;
1366 * Execute more queued request segments until the maximum concurrent allowed.
1367 * Return true if the DTO resource was acquired and released.
1369 * The ugly unlock/lock sequence on the error path is needed as the
1370 * xfer->lock normally nests the seg_lock and not viceversa.
1372 static int __wa_xfer_delayed_run(struct wa_rpipe
*rpipe
, int *dto_waiting
)
1374 int result
, dto_acquired
= 0, dto_done
= 0;
1375 struct device
*dev
= &rpipe
->wa
->usb_iface
->dev
;
1377 struct wa_xfer
*xfer
;
1378 unsigned long flags
;
1382 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
1383 while (atomic_read(&rpipe
->segs_available
) > 0
1384 && !list_empty(&rpipe
->seg_list
)
1385 && (dto_acquired
= __wa_dto_try_get(rpipe
->wa
))) {
1386 seg
= list_first_entry(&(rpipe
->seg_list
), struct wa_seg
,
1388 list_del(&seg
->list_node
);
1390 result
= __wa_seg_submit(rpipe
, xfer
, seg
, &dto_done
);
1391 /* release the dto resource if this RPIPE is done with it. */
1393 __wa_dto_put(rpipe
->wa
);
1394 dev_dbg(dev
, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
1395 xfer
, wa_xfer_id(xfer
), seg
->index
,
1396 atomic_read(&rpipe
->segs_available
), result
);
1397 if (unlikely(result
< 0)) {
1398 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
1399 spin_lock_irqsave(&xfer
->lock
, flags
);
1400 __wa_xfer_abort(xfer
);
1402 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1403 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
1407 * Mark this RPIPE as waiting if dto was not acquired, there are
1408 * delayed segs and no active transfers to wake us up later.
1410 if (!dto_acquired
&& !list_empty(&rpipe
->seg_list
)
1411 && (atomic_read(&rpipe
->segs_available
) ==
1412 le16_to_cpu(rpipe
->descr
.wRequests
)))
1415 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
1420 static void wa_xfer_delayed_run(struct wa_rpipe
*rpipe
)
1423 int dto_done
= __wa_xfer_delayed_run(rpipe
, &dto_waiting
);
1426 * If this RPIPE is waiting on the DTO resource, add it to the tail of
1428 * Otherwise, if the WA DTO resource was acquired and released by
1429 * __wa_xfer_delayed_run, another RPIPE may have attempted to acquire
1430 * DTO and failed during that time. Check the delayed list and process
1431 * any waiters. Start searching from the next RPIPE index.
1434 wa_add_delayed_rpipe(rpipe
->wa
, rpipe
);
1436 wa_check_for_delayed_rpipes(rpipe
->wa
);
1441 * xfer->lock is taken
1443 * On failure submitting we just stop submitting and return error;
1444 * wa_urb_enqueue_b() will execute the completion path
1446 static int __wa_xfer_submit(struct wa_xfer
*xfer
)
1448 int result
, dto_acquired
= 0, dto_done
= 0, dto_waiting
= 0;
1449 struct wahc
*wa
= xfer
->wa
;
1450 struct device
*dev
= &wa
->usb_iface
->dev
;
1453 unsigned long flags
;
1454 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
1455 size_t maxrequests
= le16_to_cpu(rpipe
->descr
.wRequests
);
1459 spin_lock_irqsave(&wa
->xfer_list_lock
, flags
);
1460 list_add_tail(&xfer
->list_node
, &wa
->xfer_list
);
1461 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags
);
1463 BUG_ON(atomic_read(&rpipe
->segs_available
) > maxrequests
);
1465 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
1466 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
1469 available
= atomic_read(&rpipe
->segs_available
);
1470 empty
= list_empty(&rpipe
->seg_list
);
1471 seg
= xfer
->seg
[cnt
];
1472 if (available
&& empty
) {
1474 * Only attempt to acquire DTO if we have a segment
1477 dto_acquired
= __wa_dto_try_get(rpipe
->wa
);
1480 result
= __wa_seg_submit(rpipe
, xfer
, seg
,
1482 dev_dbg(dev
, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n",
1483 xfer
, wa_xfer_id(xfer
), cnt
, available
,
1486 __wa_dto_put(rpipe
->wa
);
1489 __wa_xfer_abort(xfer
);
1490 goto error_seg_submit
;
1496 dev_dbg(dev
, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n",
1497 xfer
, wa_xfer_id(xfer
), cnt
, available
, empty
);
1498 seg
->status
= WA_SEG_DELAYED
;
1499 list_add_tail(&seg
->list_node
, &rpipe
->seg_list
);
1501 xfer
->segs_submitted
++;
1505 * Mark this RPIPE as waiting if dto was not acquired, there are
1506 * delayed segs and no active transfers to wake us up later.
1508 if (!dto_acquired
&& !list_empty(&rpipe
->seg_list
)
1509 && (atomic_read(&rpipe
->segs_available
) ==
1510 le16_to_cpu(rpipe
->descr
.wRequests
)))
1512 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
1515 wa_add_delayed_rpipe(rpipe
->wa
, rpipe
);
1517 wa_check_for_delayed_rpipes(rpipe
->wa
);
1523 * Second part of a URB/transfer enqueuement
1525 * Assumes this comes from wa_urb_enqueue() [maybe through
1526 * wa_urb_enqueue_run()]. At this point:
1528 * xfer->wa filled and refcounted
1529 * xfer->ep filled with rpipe refcounted if
1531 * xfer->urb filled and refcounted (this is the case when called
1532 * from wa_urb_enqueue() as we come from usb_submit_urb()
1533 * and when called by wa_urb_enqueue_run(), as we took an
1534 * extra ref dropped by _run() after we return).
1537 * If we fail at __wa_xfer_submit(), then we just check if we are done
1538 * and if so, we run the completion procedure. However, if we are not
1539 * yet done, we do nothing and wait for the completion handlers from
1540 * the submitted URBs or from the xfer-result path to kick in. If xfer
1541 * result never kicks in, the xfer will timeout from the USB code and
1542 * dequeue() will be called.
1544 static int wa_urb_enqueue_b(struct wa_xfer
*xfer
)
1547 unsigned long flags
;
1548 struct urb
*urb
= xfer
->urb
;
1549 struct wahc
*wa
= xfer
->wa
;
1550 struct wusbhc
*wusbhc
= wa
->wusb
;
1551 struct wusb_dev
*wusb_dev
;
1554 result
= rpipe_get_by_ep(wa
, xfer
->ep
, urb
, xfer
->gfp
);
1556 pr_err("%s: error_rpipe_get\n", __func__
);
1557 goto error_rpipe_get
;
1560 /* FIXME: segmentation broken -- kills DWA */
1561 mutex_lock(&wusbhc
->mutex
); /* get a WUSB dev */
1562 if (urb
->dev
== NULL
) {
1563 mutex_unlock(&wusbhc
->mutex
);
1564 pr_err("%s: error usb dev gone\n", __func__
);
1565 goto error_dev_gone
;
1567 wusb_dev
= __wusb_dev_get_by_usb_dev(wusbhc
, urb
->dev
);
1568 if (wusb_dev
== NULL
) {
1569 mutex_unlock(&wusbhc
->mutex
);
1570 pr_err("%s: error wusb dev gone\n", __func__
);
1571 goto error_dev_gone
;
1573 mutex_unlock(&wusbhc
->mutex
);
1575 spin_lock_irqsave(&xfer
->lock
, flags
);
1576 xfer
->wusb_dev
= wusb_dev
;
1577 result
= urb
->status
;
1578 if (urb
->status
!= -EINPROGRESS
) {
1579 pr_err("%s: error_dequeued\n", __func__
);
1580 goto error_dequeued
;
1583 result
= __wa_xfer_setup(xfer
, urb
);
1585 pr_err("%s: error_xfer_setup\n", __func__
);
1586 goto error_xfer_setup
;
1588 result
= __wa_xfer_submit(xfer
);
1590 pr_err("%s: error_xfer_submit\n", __func__
);
1591 goto error_xfer_submit
;
1593 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1597 * this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1598 * does a wa_xfer_put() that will call wa_xfer_destroy() and undo
1603 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1604 /* FIXME: segmentation broken, kills DWA */
1606 wusb_dev_put(wusb_dev
);
1608 rpipe_put(xfer
->ep
->hcpriv
);
1610 xfer
->result
= result
;
1614 done
= __wa_xfer_is_done(xfer
);
1615 xfer
->result
= result
;
1616 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1618 wa_xfer_completion(xfer
);
1619 /* return success since the completion routine will run. */
1624 * Execute the delayed transfers in the Wire Adapter @wa
1626 * We need to be careful here, as dequeue() could be called in the
1627 * middle. That's why we do the whole thing under the
1628 * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
1629 * and then checks the list -- so as we would be acquiring in inverse
1630 * order, we move the delayed list to a separate list while locked and then
1631 * submit them without the list lock held.
1633 void wa_urb_enqueue_run(struct work_struct
*ws
)
1635 struct wahc
*wa
= container_of(ws
, struct wahc
, xfer_enqueue_work
);
1636 struct wa_xfer
*xfer
, *next
;
1638 LIST_HEAD(tmp_list
);
1640 /* Create a copy of the wa->xfer_delayed_list while holding the lock */
1641 spin_lock_irq(&wa
->xfer_list_lock
);
1642 list_cut_position(&tmp_list
, &wa
->xfer_delayed_list
,
1643 wa
->xfer_delayed_list
.prev
);
1644 spin_unlock_irq(&wa
->xfer_list_lock
);
1647 * enqueue from temp list without list lock held since wa_urb_enqueue_b
1648 * can take xfer->lock as well as lock mutexes.
1650 list_for_each_entry_safe(xfer
, next
, &tmp_list
, list_node
) {
1651 list_del_init(&xfer
->list_node
);
1654 if (wa_urb_enqueue_b(xfer
) < 0)
1655 wa_xfer_giveback(xfer
);
1656 usb_put_urb(urb
); /* taken when queuing */
1659 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run
);
1662 * Process the errored transfers on the Wire Adapter outside of interrupt.
1664 void wa_process_errored_transfers_run(struct work_struct
*ws
)
1666 struct wahc
*wa
= container_of(ws
, struct wahc
, xfer_error_work
);
1667 struct wa_xfer
*xfer
, *next
;
1668 LIST_HEAD(tmp_list
);
1670 pr_info("%s: Run delayed STALL processing.\n", __func__
);
1672 /* Create a copy of the wa->xfer_errored_list while holding the lock */
1673 spin_lock_irq(&wa
->xfer_list_lock
);
1674 list_cut_position(&tmp_list
, &wa
->xfer_errored_list
,
1675 wa
->xfer_errored_list
.prev
);
1676 spin_unlock_irq(&wa
->xfer_list_lock
);
1679 * run rpipe_clear_feature_stalled from temp list without list lock
1682 list_for_each_entry_safe(xfer
, next
, &tmp_list
, list_node
) {
1683 struct usb_host_endpoint
*ep
;
1684 unsigned long flags
;
1685 struct wa_rpipe
*rpipe
;
1687 spin_lock_irqsave(&xfer
->lock
, flags
);
1690 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1692 /* clear RPIPE feature stalled without holding a lock. */
1693 rpipe_clear_feature_stalled(wa
, ep
);
1695 /* complete the xfer. This removes it from the tmp list. */
1696 wa_xfer_completion(xfer
);
1698 /* check for work. */
1699 wa_xfer_delayed_run(rpipe
);
1702 EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run
);
1705 * Submit a transfer to the Wire Adapter in a delayed way
1707 * The process of enqueuing involves possible sleeps() [see
1708 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1709 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1711 * @urb: We own a reference to it done by the HCI Linux USB stack that
1712 * will be given up by calling usb_hcd_giveback_urb() or by
1713 * returning error from this function -> ergo we don't have to
1716 int wa_urb_enqueue(struct wahc
*wa
, struct usb_host_endpoint
*ep
,
1717 struct urb
*urb
, gfp_t gfp
)
1720 struct device
*dev
= &wa
->usb_iface
->dev
;
1721 struct wa_xfer
*xfer
;
1722 unsigned long my_flags
;
1723 unsigned cant_sleep
= irqs_disabled() | in_atomic();
1725 if ((urb
->transfer_buffer
== NULL
)
1726 && (urb
->sg
== NULL
)
1727 && !(urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
)
1728 && urb
->transfer_buffer_length
!= 0) {
1729 dev_err(dev
, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb
);
1734 xfer
= kzalloc(sizeof(*xfer
), gfp
);
1739 if (urb
->status
!= -EINPROGRESS
) /* cancelled */
1740 goto error_dequeued
; /* before starting? */
1742 xfer
->wa
= wa_get(wa
);
1748 dev_dbg(dev
, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1749 xfer
, urb
, urb
->pipe
, urb
->transfer_buffer_length
,
1750 urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
? "dma" : "nodma",
1751 urb
->pipe
& USB_DIR_IN
? "inbound" : "outbound",
1752 cant_sleep
? "deferred" : "inline");
1756 spin_lock_irqsave(&wa
->xfer_list_lock
, my_flags
);
1757 list_add_tail(&xfer
->list_node
, &wa
->xfer_delayed_list
);
1758 spin_unlock_irqrestore(&wa
->xfer_list_lock
, my_flags
);
1759 queue_work(wusbd
, &wa
->xfer_enqueue_work
);
1761 result
= wa_urb_enqueue_b(xfer
);
1764 * URB submit/enqueue failed. Clean up, return an
1765 * error and do not run the callback. This avoids
1766 * an infinite submit/complete loop.
1768 dev_err(dev
, "%s: URB enqueue failed: %d\n",
1782 EXPORT_SYMBOL_GPL(wa_urb_enqueue
);
1785 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1786 * handler] is called.
1788 * Until a transfer goes successfully through wa_urb_enqueue() it
1789 * needs to be dequeued with completion calling; when stuck in delayed
1790 * or before wa_xfer_setup() is called, we need to do completion.
1792 * not setup If there is no hcpriv yet, that means that that enqueue
1793 * still had no time to set the xfer up. Because
1794 * urb->status should be other than -EINPROGRESS,
1795 * enqueue() will catch that and bail out.
1797 * If the transfer has gone through setup, we just need to clean it
1798 * up. If it has gone through submit(), we have to abort it [with an
1799 * asynch request] and then make sure we cancel each segment.
1802 int wa_urb_dequeue(struct wahc
*wa
, struct urb
*urb
)
1804 unsigned long flags
, flags2
;
1805 struct wa_xfer
*xfer
;
1807 struct wa_rpipe
*rpipe
;
1808 unsigned cnt
, done
= 0, xfer_abort_pending
;
1809 unsigned rpipe_ready
= 0;
1814 * Nothing setup yet enqueue will see urb->status !=
1815 * -EINPROGRESS (by hcd layer) and bail out with
1816 * error, no need to do completion
1818 BUG_ON(urb
->status
== -EINPROGRESS
);
1821 spin_lock_irqsave(&xfer
->lock
, flags
);
1822 pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__
, wa_xfer_id(xfer
));
1823 rpipe
= xfer
->ep
->hcpriv
;
1824 if (rpipe
== NULL
) {
1825 pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
1826 __func__
, wa_xfer_id(xfer
),
1827 "Probably already aborted.\n" );
1830 /* Check the delayed list -> if there, release and complete */
1831 spin_lock_irqsave(&wa
->xfer_list_lock
, flags2
);
1832 if (!list_empty(&xfer
->list_node
) && xfer
->seg
== NULL
)
1833 goto dequeue_delayed
;
1834 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags2
);
1835 if (xfer
->seg
== NULL
) /* still hasn't reached */
1836 goto out_unlock
; /* setup(), enqueue_b() completes */
1837 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1838 xfer_abort_pending
= __wa_xfer_abort(xfer
) >= 0;
1839 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
1840 seg
= xfer
->seg
[cnt
];
1841 pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
1842 __func__
, wa_xfer_id(xfer
), cnt
, seg
->status
);
1843 switch (seg
->status
) {
1844 case WA_SEG_NOTREADY
:
1846 printk(KERN_ERR
"xfer %p#%u: dequeue bad state %u\n",
1847 xfer
, cnt
, seg
->status
);
1850 case WA_SEG_DELAYED
:
1852 * delete from rpipe delayed list. If no segments on
1853 * this xfer have been submitted, __wa_xfer_is_done will
1854 * trigger a giveback below. Otherwise, the submitted
1855 * segments will be completed in the DTI interrupt.
1857 seg
->status
= WA_SEG_ABORTED
;
1858 spin_lock_irqsave(&rpipe
->seg_lock
, flags2
);
1859 list_del(&seg
->list_node
);
1861 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags2
);
1865 case WA_SEG_ABORTED
:
1868 * In the states below, the HWA device already knows
1869 * about the transfer. If an abort request was sent,
1870 * allow the HWA to process it and wait for the
1871 * results. Otherwise, the DTI state and seg completed
1872 * counts can get out of sync.
1874 case WA_SEG_SUBMITTED
:
1875 case WA_SEG_PENDING
:
1876 case WA_SEG_DTI_PENDING
:
1878 * Check if the abort was successfully sent. This could
1879 * be false if the HWA has been removed but we haven't
1880 * gotten the disconnect notification yet.
1882 if (!xfer_abort_pending
) {
1883 seg
->status
= WA_SEG_ABORTED
;
1884 rpipe_ready
= rpipe_avail_inc(rpipe
);
1890 xfer
->result
= urb
->status
; /* -ENOENT or -ECONNRESET */
1891 done
= __wa_xfer_is_done(xfer
);
1892 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1894 wa_xfer_completion(xfer
);
1896 wa_xfer_delayed_run(rpipe
);
1900 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1905 list_del_init(&xfer
->list_node
);
1906 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags2
);
1907 xfer
->result
= urb
->status
;
1908 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1909 wa_xfer_giveback(xfer
);
1910 usb_put_urb(urb
); /* we got a ref in enqueue() */
1913 EXPORT_SYMBOL_GPL(wa_urb_dequeue
);
1916 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1919 * Positive errno values are internal inconsistencies and should be
1920 * flagged louder. Negative are to be passed up to the user in the
1923 * @status: USB WA status code -- high two bits are stripped.
1925 static int wa_xfer_status_to_errno(u8 status
)
1928 u8 real_status
= status
;
1929 static int xlat
[] = {
1930 [WA_XFER_STATUS_SUCCESS
] = 0,
1931 [WA_XFER_STATUS_HALTED
] = -EPIPE
,
1932 [WA_XFER_STATUS_DATA_BUFFER_ERROR
] = -ENOBUFS
,
1933 [WA_XFER_STATUS_BABBLE
] = -EOVERFLOW
,
1934 [WA_XFER_RESERVED
] = EINVAL
,
1935 [WA_XFER_STATUS_NOT_FOUND
] = 0,
1936 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE
] = -ENOMEM
,
1937 [WA_XFER_STATUS_TRANSACTION_ERROR
] = -EILSEQ
,
1938 [WA_XFER_STATUS_ABORTED
] = -EINTR
,
1939 [WA_XFER_STATUS_RPIPE_NOT_READY
] = EINVAL
,
1940 [WA_XFER_INVALID_FORMAT
] = EINVAL
,
1941 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER
] = EINVAL
,
1942 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH
] = EINVAL
,
1948 if (status
>= ARRAY_SIZE(xlat
)) {
1949 printk_ratelimited(KERN_ERR
"%s(): BUG? "
1950 "Unknown WA transfer status 0x%02x\n",
1951 __func__
, real_status
);
1954 errno
= xlat
[status
];
1955 if (unlikely(errno
> 0)) {
1956 printk_ratelimited(KERN_ERR
"%s(): BUG? "
1957 "Inconsistent WA status: 0x%02x\n",
1958 __func__
, real_status
);
1965 * If a last segment flag and/or a transfer result error is encountered,
1966 * no other segment transfer results will be returned from the device.
1967 * Mark the remaining submitted or pending xfers as completed so that
1968 * the xfer will complete cleanly.
1970 static void wa_complete_remaining_xfer_segs(struct wa_xfer
*xfer
,
1971 struct wa_seg
*incoming_seg
)
1974 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
1976 for (index
= incoming_seg
->index
+ 1; index
< xfer
->segs_submitted
;
1978 struct wa_seg
*current_seg
= xfer
->seg
[index
];
1980 BUG_ON(current_seg
== NULL
);
1982 switch (current_seg
->status
) {
1983 case WA_SEG_SUBMITTED
:
1984 case WA_SEG_PENDING
:
1985 case WA_SEG_DTI_PENDING
:
1986 rpipe_avail_inc(rpipe
);
1988 * do not increment RPIPE avail for the WA_SEG_DELAYED case
1989 * since it has not been submitted to the RPIPE.
1991 case WA_SEG_DELAYED
:
1993 current_seg
->status
= incoming_seg
->status
;
1995 case WA_SEG_ABORTED
:
1998 WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
1999 __func__
, wa_xfer_id(xfer
), index
,
2000 current_seg
->status
);
2007 * Process a xfer result completion message
2009 * inbound transfers: need to schedule a buf_in_urb read
2011 * FIXME: this function needs to be broken up in parts
2013 static void wa_xfer_result_chew(struct wahc
*wa
, struct wa_xfer
*xfer
,
2014 struct wa_xfer_result
*xfer_result
)
2017 struct device
*dev
= &wa
->usb_iface
->dev
;
2018 unsigned long flags
;
2021 struct wa_rpipe
*rpipe
;
2024 unsigned rpipe_ready
= 0;
2026 spin_lock_irqsave(&xfer
->lock
, flags
);
2027 seg_idx
= xfer_result
->bTransferSegment
& 0x7f;
2028 if (unlikely(seg_idx
>= xfer
->segs
))
2030 seg
= xfer
->seg
[seg_idx
];
2031 rpipe
= xfer
->ep
->hcpriv
;
2032 usb_status
= xfer_result
->bTransferStatus
;
2033 dev_dbg(dev
, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
2034 xfer
, wa_xfer_id(xfer
), seg_idx
, usb_status
, seg
->status
);
2035 if (seg
->status
== WA_SEG_ABORTED
2036 || seg
->status
== WA_SEG_ERROR
) /* already handled */
2037 goto segment_aborted
;
2038 if (seg
->status
== WA_SEG_SUBMITTED
) /* ops, got here */
2039 seg
->status
= WA_SEG_PENDING
; /* before wa_seg{_dto}_cb() */
2040 if (seg
->status
!= WA_SEG_PENDING
) {
2041 if (printk_ratelimit())
2042 dev_err(dev
, "xfer %p#%u: Bad segment state %u\n",
2043 xfer
, seg_idx
, seg
->status
);
2044 seg
->status
= WA_SEG_PENDING
; /* workaround/"fix" it */
2046 if (usb_status
& 0x80) {
2047 seg
->result
= wa_xfer_status_to_errno(usb_status
);
2048 dev_err(dev
, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
2049 xfer
, xfer
->id
, seg
->index
, usb_status
);
2050 seg
->status
= ((usb_status
& 0x7F) == WA_XFER_STATUS_ABORTED
) ?
2051 WA_SEG_ABORTED
: WA_SEG_ERROR
;
2052 goto error_complete
;
2054 /* FIXME: we ignore warnings, tally them for stats */
2055 if (usb_status
& 0x40) /* Warning?... */
2056 usb_status
= 0; /* ... pass */
2057 if (usb_pipeisoc(xfer
->urb
->pipe
)) {
2058 /* set up WA state to read the isoc packet status next. */
2059 wa
->dti_isoc_xfer_in_progress
= wa_xfer_id(xfer
);
2060 wa
->dti_isoc_xfer_seg
= seg_idx
;
2061 wa
->dti_state
= WA_DTI_ISOC_PACKET_STATUS_PENDING
;
2062 } else if (xfer
->is_inbound
) { /* IN data phase: read to buffer */
2063 seg
->status
= WA_SEG_DTI_PENDING
;
2064 BUG_ON(wa
->buf_in_urb
->status
== -EINPROGRESS
);
2065 /* this should always be 0 before a resubmit. */
2066 wa
->buf_in_urb
->num_mapped_sgs
= 0;
2069 wa
->buf_in_urb
->transfer_dma
=
2070 xfer
->urb
->transfer_dma
2071 + (seg_idx
* xfer
->seg_size
);
2072 wa
->buf_in_urb
->transfer_flags
2073 |= URB_NO_TRANSFER_DMA_MAP
;
2074 wa
->buf_in_urb
->transfer_buffer
= NULL
;
2075 wa
->buf_in_urb
->sg
= NULL
;
2076 wa
->buf_in_urb
->num_sgs
= 0;
2078 /* do buffer or SG processing. */
2079 wa
->buf_in_urb
->transfer_flags
2080 &= ~URB_NO_TRANSFER_DMA_MAP
;
2082 if (xfer
->urb
->transfer_buffer
) {
2083 wa
->buf_in_urb
->transfer_buffer
=
2084 xfer
->urb
->transfer_buffer
2085 + (seg_idx
* xfer
->seg_size
);
2086 wa
->buf_in_urb
->sg
= NULL
;
2087 wa
->buf_in_urb
->num_sgs
= 0;
2089 /* allocate an SG list to store seg_size bytes
2090 and copy the subset of the xfer->urb->sg
2091 that matches the buffer subset we are
2093 wa
->buf_in_urb
->sg
= wa_xfer_create_subset_sg(
2095 seg_idx
* xfer
->seg_size
,
2097 xfer_result
->dwTransferLength
),
2098 &(wa
->buf_in_urb
->num_sgs
));
2100 if (!(wa
->buf_in_urb
->sg
)) {
2101 wa
->buf_in_urb
->num_sgs
= 0;
2102 goto error_sg_alloc
;
2104 wa
->buf_in_urb
->transfer_buffer
= NULL
;
2107 wa
->buf_in_urb
->transfer_buffer_length
=
2108 le32_to_cpu(xfer_result
->dwTransferLength
);
2109 wa
->buf_in_urb
->context
= seg
;
2110 result
= usb_submit_urb(wa
->buf_in_urb
, GFP_ATOMIC
);
2112 goto error_submit_buf_in
;
2114 /* OUT data phase, complete it -- */
2115 seg
->status
= WA_SEG_DONE
;
2116 seg
->result
= le32_to_cpu(xfer_result
->dwTransferLength
);
2118 rpipe_ready
= rpipe_avail_inc(rpipe
);
2119 done
= __wa_xfer_is_done(xfer
);
2121 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2123 wa_xfer_completion(xfer
);
2125 wa_xfer_delayed_run(rpipe
);
2128 error_submit_buf_in
:
2129 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
)) {
2130 dev_err(dev
, "DTI: URB max acceptable errors "
2131 "exceeded, resetting device\n");
2134 if (printk_ratelimit())
2135 dev_err(dev
, "xfer %p#%u: can't submit DTI data phase: %d\n",
2136 xfer
, seg_idx
, result
);
2137 seg
->result
= result
;
2138 kfree(wa
->buf_in_urb
->sg
);
2139 wa
->buf_in_urb
->sg
= NULL
;
2141 __wa_xfer_abort(xfer
);
2142 seg
->status
= WA_SEG_ERROR
;
2145 rpipe_ready
= rpipe_avail_inc(rpipe
);
2146 wa_complete_remaining_xfer_segs(xfer
, seg
);
2147 done
= __wa_xfer_is_done(xfer
);
2149 * queue work item to clear STALL for control endpoints.
2150 * Otherwise, let endpoint_reset take care of it.
2152 if (((usb_status
& 0x3f) == WA_XFER_STATUS_HALTED
) &&
2153 usb_endpoint_xfer_control(&xfer
->ep
->desc
) &&
2156 dev_info(dev
, "Control EP stall. Queue delayed work.\n");
2157 spin_lock_irq(&wa
->xfer_list_lock
);
2158 /* move xfer from xfer_list to xfer_errored_list. */
2159 list_move_tail(&xfer
->list_node
, &wa
->xfer_errored_list
);
2160 spin_unlock_irq(&wa
->xfer_list_lock
);
2161 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2162 queue_work(wusbd
, &wa
->xfer_error_work
);
2164 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2166 wa_xfer_completion(xfer
);
2168 wa_xfer_delayed_run(rpipe
);
2174 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2175 wa_urb_dequeue(wa
, xfer
->urb
);
2176 if (printk_ratelimit())
2177 dev_err(dev
, "xfer %p#%u: bad segment\n", xfer
, seg_idx
);
2178 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
)) {
2179 dev_err(dev
, "DTI: URB max acceptable errors "
2180 "exceeded, resetting device\n");
2186 /* nothing to do, as the aborter did the completion */
2187 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2191 * Process a isochronous packet status message
2193 * inbound transfers: need to schedule a buf_in_urb read
2195 static void wa_process_iso_packet_status(struct wahc
*wa
, struct urb
*urb
)
2197 struct device
*dev
= &wa
->usb_iface
->dev
;
2198 struct wa_xfer_packet_status_hwaiso
*packet_status
;
2199 struct wa_xfer_packet_status_len_hwaiso
*status_array
;
2200 struct wa_xfer
*xfer
;
2201 unsigned long flags
;
2203 struct wa_rpipe
*rpipe
;
2205 unsigned rpipe_ready
= 0, seg_index
;
2208 /* We have a xfer result buffer; check it */
2209 dev_dbg(dev
, "DTI: isoc packet status %d bytes at %p\n",
2210 urb
->actual_length
, urb
->transfer_buffer
);
2211 packet_status
= (struct wa_xfer_packet_status_hwaiso
*)(wa
->dti_buf
);
2212 if (packet_status
->bPacketType
!= WA_XFER_ISO_PACKET_STATUS
) {
2213 dev_err(dev
, "DTI Error: isoc packet status--bad type 0x%02x\n",
2214 packet_status
->bPacketType
);
2215 goto error_parse_buffer
;
2217 xfer
= wa_xfer_get_by_id(wa
, wa
->dti_isoc_xfer_in_progress
);
2219 dev_err(dev
, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
2220 wa
->dti_isoc_xfer_in_progress
);
2221 goto error_parse_buffer
;
2223 spin_lock_irqsave(&xfer
->lock
, flags
);
2224 if (unlikely(wa
->dti_isoc_xfer_seg
>= xfer
->segs
))
2226 seg
= xfer
->seg
[wa
->dti_isoc_xfer_seg
];
2227 rpipe
= xfer
->ep
->hcpriv
;
2228 expected_size
= sizeof(*packet_status
) +
2229 (sizeof(packet_status
->PacketStatus
[0]) *
2230 seg
->isoc_frame_count
);
2231 if (urb
->actual_length
!= expected_size
) {
2232 dev_err(dev
, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n",
2233 urb
->actual_length
, expected_size
);
2236 if (le16_to_cpu(packet_status
->wLength
) != expected_size
) {
2237 dev_err(dev
, "DTI Error: isoc packet status--bad length %u\n",
2238 le16_to_cpu(packet_status
->wLength
));
2241 /* isoc packet status and lengths back xfer urb. */
2242 status_array
= packet_status
->PacketStatus
;
2243 for (seg_index
= 0; seg_index
< seg
->isoc_frame_count
; ++seg_index
) {
2244 xfer
->urb
->iso_frame_desc
[seg
->index
].status
=
2245 wa_xfer_status_to_errno(
2246 le16_to_cpu(status_array
[seg_index
].PacketStatus
));
2247 xfer
->urb
->iso_frame_desc
[seg
->index
].actual_length
=
2248 le16_to_cpu(status_array
[seg_index
].PacketLength
);
2251 if (!xfer
->is_inbound
) {
2252 /* OUT transfer, complete it -- */
2253 seg
->status
= WA_SEG_DONE
;
2255 rpipe_ready
= rpipe_avail_inc(rpipe
);
2256 done
= __wa_xfer_is_done(xfer
);
2258 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2259 wa
->dti_state
= WA_DTI_TRANSFER_RESULT_PENDING
;
2261 wa_xfer_completion(xfer
);
2263 wa_xfer_delayed_run(rpipe
);
2268 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2275 * Callback for the IN data phase
2277 * If successful transition state; otherwise, take a note of the
2278 * error, mark this segment done and try completion.
2280 * Note we don't access until we are sure that the transfer hasn't
2281 * been cancelled (ECONNRESET, ENOENT), which could mean that
2282 * seg->xfer could be already gone.
2284 static void wa_buf_in_cb(struct urb
*urb
)
2286 struct wa_seg
*seg
= urb
->context
;
2287 struct wa_xfer
*xfer
= seg
->xfer
;
2290 struct wa_rpipe
*rpipe
;
2291 unsigned rpipe_ready
;
2292 unsigned long flags
;
2295 /* free the sg if it was used. */
2299 switch (urb
->status
) {
2301 spin_lock_irqsave(&xfer
->lock
, flags
);
2303 dev
= &wa
->usb_iface
->dev
;
2304 rpipe
= xfer
->ep
->hcpriv
;
2305 dev_dbg(dev
, "xfer %p#%u: data in done (%zu bytes)\n",
2306 xfer
, seg
->index
, (size_t)urb
->actual_length
);
2307 seg
->status
= WA_SEG_DONE
;
2308 seg
->result
= urb
->actual_length
;
2310 rpipe_ready
= rpipe_avail_inc(rpipe
);
2311 done
= __wa_xfer_is_done(xfer
);
2312 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2314 wa_xfer_completion(xfer
);
2316 wa_xfer_delayed_run(rpipe
);
2318 case -ECONNRESET
: /* URB unlinked; no need to do anything */
2319 case -ENOENT
: /* as it was done by the who unlinked us */
2321 default: /* Other errors ... */
2322 spin_lock_irqsave(&xfer
->lock
, flags
);
2324 dev
= &wa
->usb_iface
->dev
;
2325 rpipe
= xfer
->ep
->hcpriv
;
2326 if (printk_ratelimit())
2327 dev_err(dev
, "xfer %p#%u: data in error %d\n",
2328 xfer
, seg
->index
, urb
->status
);
2329 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
2330 EDC_ERROR_TIMEFRAME
)){
2331 dev_err(dev
, "DTO: URB max acceptable errors "
2332 "exceeded, resetting device\n");
2335 seg
->status
= WA_SEG_ERROR
;
2336 seg
->result
= urb
->status
;
2338 rpipe_ready
= rpipe_avail_inc(rpipe
);
2339 __wa_xfer_abort(xfer
);
2340 done
= __wa_xfer_is_done(xfer
);
2341 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2343 wa_xfer_completion(xfer
);
2345 wa_xfer_delayed_run(rpipe
);
2350 * Handle an incoming transfer result buffer
2352 * Given a transfer result buffer, it completes the transfer (possibly
2353 * scheduling and buffer in read) and then resubmits the DTI URB for a
2354 * new transfer result read.
2357 * The xfer_result DTI URB state machine
2359 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
2361 * We start in OFF mode, the first xfer_result notification [through
2362 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
2365 * We receive a buffer -- if it is not a xfer_result, we complain and
2366 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
2367 * request accounting. If it is an IN segment, we move to RBI and post
2368 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
2369 * repost the DTI-URB and move to RXR state. if there was no IN
2370 * segment, it will repost the DTI-URB.
2372 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
2373 * errors) in the URBs.
2375 static void wa_dti_cb(struct urb
*urb
)
2378 struct wahc
*wa
= urb
->context
;
2379 struct device
*dev
= &wa
->usb_iface
->dev
;
2383 BUG_ON(wa
->dti_urb
!= urb
);
2384 switch (wa
->dti_urb
->status
) {
2386 if (wa
->dti_state
== WA_DTI_TRANSFER_RESULT_PENDING
) {
2387 struct wa_xfer_result
*xfer_result
;
2388 struct wa_xfer
*xfer
;
2390 /* We have a xfer result buffer; check it */
2391 dev_dbg(dev
, "DTI: xfer result %d bytes at %p\n",
2392 urb
->actual_length
, urb
->transfer_buffer
);
2393 if (urb
->actual_length
!= sizeof(*xfer_result
)) {
2394 dev_err(dev
, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
2396 sizeof(*xfer_result
));
2399 xfer_result
= (struct wa_xfer_result
*)(wa
->dti_buf
);
2400 if (xfer_result
->hdr
.bLength
!= sizeof(*xfer_result
)) {
2401 dev_err(dev
, "DTI Error: xfer result--bad header length %u\n",
2402 xfer_result
->hdr
.bLength
);
2405 if (xfer_result
->hdr
.bNotifyType
!= WA_XFER_RESULT
) {
2406 dev_err(dev
, "DTI Error: xfer result--bad header type 0x%02x\n",
2407 xfer_result
->hdr
.bNotifyType
);
2410 usb_status
= xfer_result
->bTransferStatus
& 0x3f;
2411 if (usb_status
== WA_XFER_STATUS_NOT_FOUND
)
2412 /* taken care of already */
2414 xfer_id
= le32_to_cpu(xfer_result
->dwTransferID
);
2415 xfer
= wa_xfer_get_by_id(wa
, xfer_id
);
2417 /* FIXME: transaction not found. */
2418 dev_err(dev
, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
2419 xfer_id
, usb_status
);
2422 wa_xfer_result_chew(wa
, xfer
, xfer_result
);
2424 } else if (wa
->dti_state
== WA_DTI_ISOC_PACKET_STATUS_PENDING
) {
2425 wa_process_iso_packet_status(wa
, urb
);
2427 dev_err(dev
, "DTI Error: unexpected EP state = %d\n",
2431 case -ENOENT
: /* (we killed the URB)...so, no broadcast */
2432 case -ESHUTDOWN
: /* going away! */
2433 dev_dbg(dev
, "DTI: going down! %d\n", urb
->status
);
2437 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
,
2438 EDC_ERROR_TIMEFRAME
)) {
2439 dev_err(dev
, "DTI: URB max acceptable errors "
2440 "exceeded, resetting device\n");
2444 if (printk_ratelimit())
2445 dev_err(dev
, "DTI: URB error %d\n", urb
->status
);
2448 /* Resubmit the DTI URB */
2449 result
= usb_submit_urb(wa
->dti_urb
, GFP_ATOMIC
);
2451 dev_err(dev
, "DTI Error: Could not submit DTI URB (%d), "
2452 "resetting\n", result
);
2460 * Transfer complete notification
2462 * Called from the notif.c code. We get a notification on EP2 saying
2463 * that some endpoint has some transfer result data available. We are
2466 * To speed up things, we always have a URB reading the DTI URB; we
2467 * don't really set it up and start it until the first xfer complete
2468 * notification arrives, which is what we do here.
2470 * Follow up in wa_dti_cb(), as that's where the whole state
2473 * So here we just initialize the DTI URB for reading transfer result
2474 * notifications and also the buffer-in URB, for reading buffers. Then
2475 * we just submit the DTI URB.
2477 * @wa shall be referenced
2479 void wa_handle_notif_xfer(struct wahc
*wa
, struct wa_notif_hdr
*notif_hdr
)
2482 struct device
*dev
= &wa
->usb_iface
->dev
;
2483 struct wa_notif_xfer
*notif_xfer
;
2484 const struct usb_endpoint_descriptor
*dti_epd
= wa
->dti_epd
;
2486 notif_xfer
= container_of(notif_hdr
, struct wa_notif_xfer
, hdr
);
2487 BUG_ON(notif_hdr
->bNotifyType
!= WA_NOTIF_TRANSFER
);
2489 if ((0x80 | notif_xfer
->bEndpoint
) != dti_epd
->bEndpointAddress
) {
2490 /* FIXME: hardcoded limitation, adapt */
2491 dev_err(dev
, "BUG: DTI ep is %u, not %u (hack me)\n",
2492 notif_xfer
->bEndpoint
, dti_epd
->bEndpointAddress
);
2495 if (wa
->dti_urb
!= NULL
) /* DTI URB already started */
2498 wa
->dti_urb
= usb_alloc_urb(0, GFP_KERNEL
);
2499 if (wa
->dti_urb
== NULL
) {
2500 dev_err(dev
, "Can't allocate DTI URB\n");
2501 goto error_dti_urb_alloc
;
2504 wa
->dti_urb
, wa
->usb_dev
,
2505 usb_rcvbulkpipe(wa
->usb_dev
, 0x80 | notif_xfer
->bEndpoint
),
2506 wa
->dti_buf
, wa
->dti_buf_size
,
2509 wa
->buf_in_urb
= usb_alloc_urb(0, GFP_KERNEL
);
2510 if (wa
->buf_in_urb
== NULL
) {
2511 dev_err(dev
, "Can't allocate BUF-IN URB\n");
2512 goto error_buf_in_urb_alloc
;
2515 wa
->buf_in_urb
, wa
->usb_dev
,
2516 usb_rcvbulkpipe(wa
->usb_dev
, 0x80 | notif_xfer
->bEndpoint
),
2517 NULL
, 0, wa_buf_in_cb
, wa
);
2518 result
= usb_submit_urb(wa
->dti_urb
, GFP_KERNEL
);
2520 dev_err(dev
, "DTI Error: Could not submit DTI URB (%d), "
2521 "resetting\n", result
);
2522 goto error_dti_urb_submit
;
2527 error_dti_urb_submit
:
2528 usb_put_urb(wa
->buf_in_urb
);
2529 wa
->buf_in_urb
= NULL
;
2530 error_buf_in_urb_alloc
:
2531 usb_put_urb(wa
->dti_urb
);
2533 error_dti_urb_alloc
: