2 * Copyright (C) 2001-2004 by David Brownell
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 /* this file is part of ehci-hcd.c */
21 /*-------------------------------------------------------------------------*/
24 * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
26 * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
27 * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
28 * buffers needed for the larger number). We use one QH per endpoint, queue
29 * multiple urbs (all three types) per endpoint. URBs may need several qtds.
31 * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with
32 * interrupts) needs careful scheduling. Performance improvements can be
33 * an ongoing challenge. That's in "ehci-sched.c".
35 * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
36 * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
37 * (b) special fields in qh entries or (c) split iso entries. TTs will
38 * buffer low/full speed data so the host collects it at high speed.
41 /*-------------------------------------------------------------------------*/
43 /* fill a qtd, returning how much of the buffer we were able to queue up */
46 qtd_fill(struct ehci_hcd
*ehci
, struct ehci_qtd
*qtd
, dma_addr_t buf
,
47 size_t len
, int token
, int maxpacket
)
52 /* one buffer entry per 4K ... first might be short or unaligned */
53 qtd
->hw_buf
[0] = cpu_to_hc32(ehci
, (u32
)addr
);
54 qtd
->hw_buf_hi
[0] = cpu_to_hc32(ehci
, (u32
)(addr
>> 32));
55 count
= 0x1000 - (buf
& 0x0fff); /* rest of that page */
56 if (likely (len
< count
)) /* ... iff needed */
62 /* per-qtd limit: from 16K to 20K (best alignment) */
63 for (i
= 1; count
< len
&& i
< 5; i
++) {
65 qtd
->hw_buf
[i
] = cpu_to_hc32(ehci
, (u32
)addr
);
66 qtd
->hw_buf_hi
[i
] = cpu_to_hc32(ehci
,
69 if ((count
+ 0x1000) < len
)
75 /* short packets may only terminate transfers */
77 count
-= (count
% maxpacket
);
79 qtd
->hw_token
= cpu_to_hc32(ehci
, (count
<< 16) | token
);
85 /*-------------------------------------------------------------------------*/
88 qh_update (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
, struct ehci_qtd
*qtd
)
90 struct ehci_qh_hw
*hw
= qh
->hw
;
92 /* writes to an active overlay are unsafe */
93 BUG_ON(qh
->qh_state
!= QH_STATE_IDLE
);
95 hw
->hw_qtd_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
96 hw
->hw_alt_next
= EHCI_LIST_END(ehci
);
98 /* Except for control endpoints, we make hardware maintain data
99 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
100 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
103 if (!(hw
->hw_info1
& cpu_to_hc32(ehci
, QH_TOGGLE_CTL
))) {
104 unsigned is_out
, epnum
;
107 epnum
= (hc32_to_cpup(ehci
, &hw
->hw_info1
) >> 8) & 0x0f;
108 if (unlikely (!usb_gettoggle (qh
->dev
, epnum
, is_out
))) {
109 hw
->hw_token
&= ~cpu_to_hc32(ehci
, QTD_TOGGLE
);
110 usb_settoggle (qh
->dev
, epnum
, is_out
, 1);
114 hw
->hw_token
&= cpu_to_hc32(ehci
, QTD_TOGGLE
| QTD_STS_PING
);
117 /* if it weren't for a common silicon quirk (writing the dummy into the qh
118 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
119 * recovery (including urb dequeue) would need software changes to a QH...
122 qh_refresh (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
124 struct ehci_qtd
*qtd
;
126 if (list_empty (&qh
->qtd_list
))
129 qtd
= list_entry (qh
->qtd_list
.next
,
130 struct ehci_qtd
, qtd_list
);
131 /* first qtd may already be partially processed */
132 if (cpu_to_hc32(ehci
, qtd
->qtd_dma
) == qh
->hw
->hw_current
)
137 qh_update (ehci
, qh
, qtd
);
140 /*-------------------------------------------------------------------------*/
142 static void qh_link_async(struct ehci_hcd
*ehci
, struct ehci_qh
*qh
);
144 static void ehci_clear_tt_buffer_complete(struct usb_hcd
*hcd
,
145 struct usb_host_endpoint
*ep
)
147 struct ehci_hcd
*ehci
= hcd_to_ehci(hcd
);
148 struct ehci_qh
*qh
= ep
->hcpriv
;
151 spin_lock_irqsave(&ehci
->lock
, flags
);
153 if (qh
->qh_state
== QH_STATE_IDLE
&& !list_empty(&qh
->qtd_list
)
154 && ehci
->rh_state
== EHCI_RH_RUNNING
)
155 qh_link_async(ehci
, qh
);
156 spin_unlock_irqrestore(&ehci
->lock
, flags
);
159 static void ehci_clear_tt_buffer(struct ehci_hcd
*ehci
, struct ehci_qh
*qh
,
160 struct urb
*urb
, u32 token
)
163 /* If an async split transaction gets an error or is unlinked,
164 * the TT buffer may be left in an indeterminate state. We
165 * have to clear the TT buffer.
167 * Note: this routine is never called for Isochronous transfers.
169 if (urb
->dev
->tt
&& !usb_pipeint(urb
->pipe
) && !qh
->clearing_tt
) {
171 struct usb_device
*tt
= urb
->dev
->tt
->hub
;
173 "clear tt buffer port %d, a%d ep%d t%08x\n",
174 urb
->dev
->ttport
, urb
->dev
->devnum
,
175 usb_pipeendpoint(urb
->pipe
), token
);
177 if (!ehci_is_TDI(ehci
)
178 || urb
->dev
->tt
->hub
!=
179 ehci_to_hcd(ehci
)->self
.root_hub
) {
180 if (usb_hub_clear_tt_buffer(urb
) == 0)
184 /* REVISIT ARC-derived cores don't clear the root
185 * hub TT buffer in this way...
191 static int qtd_copy_status (
192 struct ehci_hcd
*ehci
,
198 int status
= -EINPROGRESS
;
200 /* count IN/OUT bytes, not SETUP (even short packets) */
201 if (likely (QTD_PID (token
) != 2))
202 urb
->actual_length
+= length
- QTD_LENGTH (token
);
204 /* don't modify error codes */
205 if (unlikely(urb
->unlinked
))
208 /* force cleanup after short read; not always an error */
209 if (unlikely (IS_SHORT_READ (token
)))
212 /* serious "can't proceed" faults reported by the hardware */
213 if (token
& QTD_STS_HALT
) {
214 if (token
& QTD_STS_BABBLE
) {
215 /* FIXME "must" disable babbling device's port too */
217 /* CERR nonzero + halt --> stall */
218 } else if (QTD_CERR(token
)) {
221 /* In theory, more than one of the following bits can be set
222 * since they are sticky and the transaction is retried.
223 * Which to test first is rather arbitrary.
225 } else if (token
& QTD_STS_MMF
) {
226 /* fs/ls interrupt xfer missed the complete-split */
228 } else if (token
& QTD_STS_DBE
) {
229 status
= (QTD_PID (token
) == 1) /* IN ? */
230 ? -ENOSR
/* hc couldn't read data */
231 : -ECOMM
; /* hc couldn't write data */
232 } else if (token
& QTD_STS_XACT
) {
233 /* timeout, bad CRC, wrong PID, etc */
234 ehci_dbg(ehci
, "devpath %s ep%d%s 3strikes\n",
236 usb_pipeendpoint(urb
->pipe
),
237 usb_pipein(urb
->pipe
) ? "in" : "out");
239 } else { /* unknown */
244 "dev%d ep%d%s qtd token %08x --> status %d\n",
245 usb_pipedevice (urb
->pipe
),
246 usb_pipeendpoint (urb
->pipe
),
247 usb_pipein (urb
->pipe
) ? "in" : "out",
255 ehci_urb_done(struct ehci_hcd
*ehci
, struct urb
*urb
, int status
)
256 __releases(ehci
->lock
)
257 __acquires(ehci
->lock
)
259 if (likely (urb
->hcpriv
!= NULL
)) {
260 struct ehci_qh
*qh
= (struct ehci_qh
*) urb
->hcpriv
;
262 /* S-mask in a QH means it's an interrupt urb */
263 if ((qh
->hw
->hw_info2
& cpu_to_hc32(ehci
, QH_SMASK
)) != 0) {
265 /* ... update hc-wide periodic stats (for usbfs) */
266 ehci_to_hcd(ehci
)->self
.bandwidth_int_reqs
--;
270 if (unlikely(urb
->unlinked
)) {
271 COUNT(ehci
->stats
.unlink
);
273 /* report non-error and short read status as zero */
274 if (status
== -EINPROGRESS
|| status
== -EREMOTEIO
)
276 COUNT(ehci
->stats
.complete
);
279 #ifdef EHCI_URB_TRACE
281 "%s %s urb %p ep%d%s status %d len %d/%d\n",
282 __func__
, urb
->dev
->devpath
, urb
,
283 usb_pipeendpoint (urb
->pipe
),
284 usb_pipein (urb
->pipe
) ? "in" : "out",
286 urb
->actual_length
, urb
->transfer_buffer_length
);
289 /* complete() can reenter this HCD */
290 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci
), urb
);
291 spin_unlock (&ehci
->lock
);
292 usb_hcd_giveback_urb(ehci_to_hcd(ehci
), urb
, status
);
293 spin_lock (&ehci
->lock
);
296 static int qh_schedule (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
);
299 * Process and free completed qtds for a qh, returning URBs to drivers.
300 * Chases up to qh->hw_current. Returns number of completions called,
301 * indicating how much "real" work we did.
304 qh_completions (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
306 struct ehci_qtd
*last
, *end
= qh
->dummy
;
307 struct list_head
*entry
, *tmp
;
312 struct ehci_qh_hw
*hw
= qh
->hw
;
314 if (unlikely (list_empty (&qh
->qtd_list
)))
317 /* completions (or tasks on other cpus) must never clobber HALT
318 * till we've gone through and cleaned everything up, even when
319 * they add urbs to this qh's queue or mark them for unlinking.
321 * NOTE: unlinking expects to be done in queue order.
323 * It's a bug for qh->qh_state to be anything other than
324 * QH_STATE_IDLE, unless our caller is scan_async() or
327 state
= qh
->qh_state
;
328 qh
->qh_state
= QH_STATE_COMPLETING
;
329 stopped
= (state
== QH_STATE_IDLE
);
333 last_status
= -EINPROGRESS
;
334 qh
->needs_rescan
= 0;
336 /* remove de-activated QTDs from front of queue.
337 * after faults (including short reads), cleanup this urb
338 * then let the queue advance.
339 * if queue is stopped, handles unlinks.
341 list_for_each_safe (entry
, tmp
, &qh
->qtd_list
) {
342 struct ehci_qtd
*qtd
;
346 qtd
= list_entry (entry
, struct ehci_qtd
, qtd_list
);
349 /* clean up any state from previous QTD ...*/
351 if (likely (last
->urb
!= urb
)) {
352 ehci_urb_done(ehci
, last
->urb
, last_status
);
354 last_status
= -EINPROGRESS
;
356 ehci_qtd_free (ehci
, last
);
360 /* ignore urbs submitted during completions we reported */
364 /* hardware copies qtd out of qh overlay */
366 token
= hc32_to_cpu(ehci
, qtd
->hw_token
);
368 /* always clean up qtds the hc de-activated */
370 if ((token
& QTD_STS_ACTIVE
) == 0) {
372 /* Report Data Buffer Error: non-fatal but useful */
373 if (token
& QTD_STS_DBE
)
375 "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
377 usb_endpoint_num(&urb
->ep
->desc
),
378 usb_endpoint_dir_in(&urb
->ep
->desc
) ? "in" : "out",
379 urb
->transfer_buffer_length
,
383 /* on STALL, error, and short reads this urb must
384 * complete and all its qtds must be recycled.
386 if ((token
& QTD_STS_HALT
) != 0) {
388 /* retry transaction errors until we
389 * reach the software xacterr limit
391 if ((token
& QTD_STS_XACT
) &&
392 QTD_CERR(token
) == 0 &&
393 ++qh
->xacterrs
< QH_XACTERR_MAX
&&
396 "detected XactErr len %zu/%zu retry %d\n",
397 qtd
->length
- QTD_LENGTH(token
), qtd
->length
, qh
->xacterrs
);
399 /* reset the token in the qtd and the
400 * qh overlay (which still contains
401 * the qtd) so that we pick up from
404 token
&= ~QTD_STS_HALT
;
405 token
|= QTD_STS_ACTIVE
|
406 (EHCI_TUNE_CERR
<< 10);
407 qtd
->hw_token
= cpu_to_hc32(ehci
,
410 hw
->hw_token
= cpu_to_hc32(ehci
,
416 /* magic dummy for some short reads; qh won't advance.
417 * that silicon quirk can kick in with this dummy too.
419 * other short reads won't stop the queue, including
420 * control transfers (status stage handles that) or
421 * most other single-qtd reads ... the queue stops if
422 * URB_SHORT_NOT_OK was set so the driver submitting
423 * the urbs could clean it up.
425 } else if (IS_SHORT_READ (token
)
426 && !(qtd
->hw_alt_next
427 & EHCI_LIST_END(ehci
))) {
431 /* stop scanning when we reach qtds the hc is using */
432 } else if (likely (!stopped
433 && ehci
->rh_state
>= EHCI_RH_RUNNING
)) {
436 /* scan the whole queue for unlinks whenever it stops */
440 /* cancel everything if we halt, suspend, etc */
441 if (ehci
->rh_state
< EHCI_RH_RUNNING
)
442 last_status
= -ESHUTDOWN
;
444 /* this qtd is active; skip it unless a previous qtd
445 * for its urb faulted, or its urb was canceled.
447 else if (last_status
== -EINPROGRESS
&& !urb
->unlinked
)
450 /* qh unlinked; token in overlay may be most current */
451 if (state
== QH_STATE_IDLE
452 && cpu_to_hc32(ehci
, qtd
->qtd_dma
)
454 token
= hc32_to_cpu(ehci
, hw
->hw_token
);
456 /* An unlink may leave an incomplete
457 * async transaction in the TT buffer.
458 * We have to clear it.
460 ehci_clear_tt_buffer(ehci
, qh
, urb
, token
);
464 /* unless we already know the urb's status, collect qtd status
465 * and update count of bytes transferred. in common short read
466 * cases with only one data qtd (including control transfers),
467 * queue processing won't halt. but with two or more qtds (for
468 * example, with a 32 KB transfer), when the first qtd gets a
469 * short read the second must be removed by hand.
471 if (last_status
== -EINPROGRESS
) {
472 last_status
= qtd_copy_status(ehci
, urb
,
474 if (last_status
== -EREMOTEIO
476 & EHCI_LIST_END(ehci
)))
477 last_status
= -EINPROGRESS
;
479 /* As part of low/full-speed endpoint-halt processing
480 * we must clear the TT buffer (11.17.5).
482 if (unlikely(last_status
!= -EINPROGRESS
&&
483 last_status
!= -EREMOTEIO
)) {
484 /* The TT's in some hubs malfunction when they
485 * receive this request following a STALL (they
486 * stop sending isochronous packets). Since a
487 * STALL can't leave the TT buffer in a busy
488 * state (if you believe Figures 11-48 - 11-51
489 * in the USB 2.0 spec), we won't clear the TT
490 * buffer in this case. Strictly speaking this
491 * is a violation of the spec.
493 if (last_status
!= -EPIPE
)
494 ehci_clear_tt_buffer(ehci
, qh
, urb
,
499 /* if we're removing something not at the queue head,
500 * patch the hardware queue pointer.
502 if (stopped
&& qtd
->qtd_list
.prev
!= &qh
->qtd_list
) {
503 last
= list_entry (qtd
->qtd_list
.prev
,
504 struct ehci_qtd
, qtd_list
);
505 last
->hw_next
= qtd
->hw_next
;
508 /* remove qtd; it's recycled after possible urb completion */
509 list_del (&qtd
->qtd_list
);
512 /* reinit the xacterr counter for the next qtd */
516 /* last urb's completion might still need calling */
517 if (likely (last
!= NULL
)) {
518 ehci_urb_done(ehci
, last
->urb
, last_status
);
520 ehci_qtd_free (ehci
, last
);
523 /* Do we need to rescan for URBs dequeued during a giveback? */
524 if (unlikely(qh
->needs_rescan
)) {
525 /* If the QH is already unlinked, do the rescan now. */
526 if (state
== QH_STATE_IDLE
)
529 /* Otherwise we have to wait until the QH is fully unlinked.
530 * Our caller will start an unlink if qh->needs_rescan is
531 * set. But if an unlink has already started, nothing needs
534 if (state
!= QH_STATE_LINKED
)
535 qh
->needs_rescan
= 0;
538 /* restore original state; caller must unlink or relink */
539 qh
->qh_state
= state
;
541 /* be sure the hardware's done with the qh before refreshing
542 * it after fault cleanup, or recovering from silicon wrongly
543 * overlaying the dummy qtd (which reduces DMA chatter).
545 if (stopped
!= 0 || hw
->hw_qtd_next
== EHCI_LIST_END(ehci
)) {
548 qh_refresh(ehci
, qh
);
550 case QH_STATE_LINKED
:
551 /* We won't refresh a QH that's linked (after the HC
552 * stopped the queue). That avoids a race:
553 * - HC reads first part of QH;
554 * - CPU updates that first part and the token;
555 * - HC reads rest of that QH, including token
556 * Result: HC gets an inconsistent image, and then
557 * DMAs to/from the wrong memory (corrupting it).
559 * That should be rare for interrupt transfers,
560 * except maybe high bandwidth ...
563 /* Tell the caller to start an unlink */
564 qh
->needs_rescan
= 1;
566 /* otherwise, unlink already started */
573 /*-------------------------------------------------------------------------*/
575 // high bandwidth multiplier, as encoded in highspeed endpoint descriptors
576 #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
577 // ... and packet size, for any kind of endpoint descriptor
578 #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
581 * reverse of qh_urb_transaction: free a list of TDs.
582 * used for cleanup after errors, before HC sees an URB's TDs.
584 static void qtd_list_free (
585 struct ehci_hcd
*ehci
,
587 struct list_head
*qtd_list
589 struct list_head
*entry
, *temp
;
591 list_for_each_safe (entry
, temp
, qtd_list
) {
592 struct ehci_qtd
*qtd
;
594 qtd
= list_entry (entry
, struct ehci_qtd
, qtd_list
);
595 list_del (&qtd
->qtd_list
);
596 ehci_qtd_free (ehci
, qtd
);
601 * create a list of filled qtds for this URB; won't link into qh.
603 static struct list_head
*
605 struct ehci_hcd
*ehci
,
607 struct list_head
*head
,
610 struct ehci_qtd
*qtd
, *qtd_prev
;
612 int len
, this_sg_len
, maxpacket
;
616 struct scatterlist
*sg
;
619 * URBs map to sequences of QTDs: one logical transaction
621 qtd
= ehci_qtd_alloc (ehci
, flags
);
624 list_add_tail (&qtd
->qtd_list
, head
);
627 token
= QTD_STS_ACTIVE
;
628 token
|= (EHCI_TUNE_CERR
<< 10);
629 /* for split transactions, SplitXState initialized to zero */
631 len
= urb
->transfer_buffer_length
;
632 is_input
= usb_pipein (urb
->pipe
);
633 if (usb_pipecontrol (urb
->pipe
)) {
635 qtd_fill(ehci
, qtd
, urb
->setup_dma
,
636 sizeof (struct usb_ctrlrequest
),
637 token
| (2 /* "setup" */ << 8), 8);
639 /* ... and always at least one more pid */
642 qtd
= ehci_qtd_alloc (ehci
, flags
);
646 qtd_prev
->hw_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
647 list_add_tail (&qtd
->qtd_list
, head
);
649 /* for zero length DATA stages, STATUS is always IN */
651 token
|= (1 /* "in" */ << 8);
655 * data transfer stage: buffer setup
657 i
= urb
->num_mapped_sgs
;
658 if (len
> 0 && i
> 0) {
660 buf
= sg_dma_address(sg
);
662 /* urb->transfer_buffer_length may be smaller than the
663 * size of the scatterlist (or vice versa)
665 this_sg_len
= min_t(int, sg_dma_len(sg
), len
);
668 buf
= urb
->transfer_dma
;
673 token
|= (1 /* "in" */ << 8);
674 /* else it's already initted to "out" pid (0 << 8) */
676 maxpacket
= max_packet(usb_maxpacket(urb
->dev
, urb
->pipe
, !is_input
));
679 * buffer gets wrapped in one or more qtds;
680 * last one may be "short" (including zero len)
681 * and may serve as a control status ack
686 this_qtd_len
= qtd_fill(ehci
, qtd
, buf
, this_sg_len
, token
,
688 this_sg_len
-= this_qtd_len
;
693 * short reads advance to a "magic" dummy instead of the next
694 * qtd ... that forces the queue to stop, for manual cleanup.
695 * (this will usually be overridden later.)
698 qtd
->hw_alt_next
= ehci
->async
->hw
->hw_alt_next
;
700 /* qh makes control packets use qtd toggle; maybe switch it */
701 if ((maxpacket
& (this_qtd_len
+ (maxpacket
- 1))) == 0)
704 if (likely(this_sg_len
<= 0)) {
705 if (--i
<= 0 || len
<= 0)
708 buf
= sg_dma_address(sg
);
709 this_sg_len
= min_t(int, sg_dma_len(sg
), len
);
713 qtd
= ehci_qtd_alloc (ehci
, flags
);
717 qtd_prev
->hw_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
718 list_add_tail (&qtd
->qtd_list
, head
);
722 * unless the caller requires manual cleanup after short reads,
723 * have the alt_next mechanism keep the queue running after the
724 * last data qtd (the only one, for control and most other cases).
726 if (likely ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0
727 || usb_pipecontrol (urb
->pipe
)))
728 qtd
->hw_alt_next
= EHCI_LIST_END(ehci
);
731 * control requests may need a terminating data "status" ack;
732 * other OUT ones may need a terminating short packet
735 if (likely (urb
->transfer_buffer_length
!= 0)) {
738 if (usb_pipecontrol (urb
->pipe
)) {
740 token
^= 0x0100; /* "in" <--> "out" */
741 token
|= QTD_TOGGLE
; /* force DATA1 */
742 } else if (usb_pipeout(urb
->pipe
)
743 && (urb
->transfer_flags
& URB_ZERO_PACKET
)
744 && !(urb
->transfer_buffer_length
% maxpacket
)) {
749 qtd
= ehci_qtd_alloc (ehci
, flags
);
753 qtd_prev
->hw_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
754 list_add_tail (&qtd
->qtd_list
, head
);
756 /* never any data in such packets */
757 qtd_fill(ehci
, qtd
, 0, 0, token
, 0);
761 /* by default, enable interrupt on urb completion */
762 if (likely (!(urb
->transfer_flags
& URB_NO_INTERRUPT
)))
763 qtd
->hw_token
|= cpu_to_hc32(ehci
, QTD_IOC
);
767 qtd_list_free (ehci
, urb
, head
);
771 /*-------------------------------------------------------------------------*/
773 // Would be best to create all qh's from config descriptors,
774 // when each interface/altsetting is established. Unlink
775 // any previous qh and cancel its urbs first; endpoints are
776 // implicitly reset then (data toggle too).
777 // That'd mean updating how usbcore talks to HCDs. (2.7?)
781 * Each QH holds a qtd list; a QH is used for everything except iso.
783 * For interrupt urbs, the scheduler must set the microframe scheduling
784 * mask(s) each time the QH gets scheduled. For highspeed, that's
785 * just one microframe in the s-mask. For split interrupt transactions
786 * there are additional complications: c-mask, maybe FSTNs.
788 static struct ehci_qh
*
790 struct ehci_hcd
*ehci
,
794 struct ehci_qh
*qh
= ehci_qh_alloc (ehci
, flags
);
795 u32 info1
= 0, info2
= 0;
798 struct usb_tt
*tt
= urb
->dev
->tt
;
799 struct ehci_qh_hw
*hw
;
805 * init endpoint/device data for this QH
807 info1
|= usb_pipeendpoint (urb
->pipe
) << 8;
808 info1
|= usb_pipedevice (urb
->pipe
) << 0;
810 is_input
= usb_pipein (urb
->pipe
);
811 type
= usb_pipetype (urb
->pipe
);
812 maxp
= usb_maxpacket (urb
->dev
, urb
->pipe
, !is_input
);
814 /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
815 * acts like up to 3KB, but is built from smaller packets.
817 if (max_packet(maxp
) > 1024) {
818 ehci_dbg(ehci
, "bogus qh maxpacket %d\n", max_packet(maxp
));
822 /* Compute interrupt scheduling parameters just once, and save.
823 * - allowing for high bandwidth, how many nsec/uframe are used?
824 * - split transactions need a second CSPLIT uframe; same question
825 * - splits also need a schedule gap (for full/low speed I/O)
826 * - qh has a polling interval
828 * For control/bulk requests, the HC or TT handles these.
830 if (type
== PIPE_INTERRUPT
) {
831 qh
->usecs
= NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH
,
833 hb_mult(maxp
) * max_packet(maxp
)));
834 qh
->start
= NO_FRAME
;
835 qh
->stamp
= ehci
->periodic_stamp
;
837 if (urb
->dev
->speed
== USB_SPEED_HIGH
) {
841 qh
->period
= urb
->interval
>> 3;
842 if (qh
->period
== 0 && urb
->interval
!= 1) {
843 /* NOTE interval 2 or 4 uframes could work.
844 * But interval 1 scheduling is simpler, and
845 * includes high bandwidth.
848 } else if (qh
->period
> ehci
->periodic_size
) {
849 qh
->period
= ehci
->periodic_size
;
850 urb
->interval
= qh
->period
<< 3;
855 /* gap is f(FS/LS transfer times) */
856 qh
->gap_uf
= 1 + usb_calc_bus_time (urb
->dev
->speed
,
857 is_input
, 0, maxp
) / (125 * 1000);
859 /* FIXME this just approximates SPLIT/CSPLIT times */
860 if (is_input
) { // SPLIT, gap, CSPLIT+DATA
861 qh
->c_usecs
= qh
->usecs
+ HS_USECS (0);
862 qh
->usecs
= HS_USECS (1);
863 } else { // SPLIT+DATA, gap, CSPLIT
864 qh
->usecs
+= HS_USECS (1);
865 qh
->c_usecs
= HS_USECS (0);
868 think_time
= tt
? tt
->think_time
: 0;
869 qh
->tt_usecs
= NS_TO_US (think_time
+
870 usb_calc_bus_time (urb
->dev
->speed
,
871 is_input
, 0, max_packet (maxp
)));
872 qh
->period
= urb
->interval
;
873 if (qh
->period
> ehci
->periodic_size
) {
874 qh
->period
= ehci
->periodic_size
;
875 urb
->interval
= qh
->period
;
880 /* support for tt scheduling, and access to toggles */
884 switch (urb
->dev
->speed
) {
886 info1
|= QH_LOW_SPEED
;
890 /* EPS 0 means "full" */
891 if (type
!= PIPE_INTERRUPT
)
892 info1
|= (EHCI_TUNE_RL_TT
<< 28);
893 if (type
== PIPE_CONTROL
) {
894 info1
|= QH_CONTROL_EP
; /* for TT */
895 info1
|= QH_TOGGLE_CTL
; /* toggle from qtd */
899 info2
|= (EHCI_TUNE_MULT_TT
<< 30);
901 /* Some Freescale processors have an erratum in which the
902 * port number in the queue head was 0..N-1 instead of 1..N.
904 if (ehci_has_fsl_portno_bug(ehci
))
905 info2
|= (urb
->dev
->ttport
-1) << 23;
907 info2
|= urb
->dev
->ttport
<< 23;
909 /* set the address of the TT; for TDI's integrated
910 * root hub tt, leave it zeroed.
912 if (tt
&& tt
->hub
!= ehci_to_hcd(ehci
)->self
.root_hub
)
913 info2
|= tt
->hub
->devnum
<< 16;
915 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
919 case USB_SPEED_HIGH
: /* no TT involved */
920 info1
|= QH_HIGH_SPEED
;
921 if (type
== PIPE_CONTROL
) {
922 info1
|= (EHCI_TUNE_RL_HS
<< 28);
923 info1
|= 64 << 16; /* usb2 fixed maxpacket */
924 info1
|= QH_TOGGLE_CTL
; /* toggle from qtd */
925 info2
|= (EHCI_TUNE_MULT_HS
<< 30);
926 } else if (type
== PIPE_BULK
) {
927 info1
|= (EHCI_TUNE_RL_HS
<< 28);
928 /* The USB spec says that high speed bulk endpoints
929 * always use 512 byte maxpacket. But some device
930 * vendors decided to ignore that, and MSFT is happy
931 * to help them do so. So now people expect to use
932 * such nonconformant devices with Linux too; sigh.
934 info1
|= max_packet(maxp
) << 16;
935 info2
|= (EHCI_TUNE_MULT_HS
<< 30);
936 } else { /* PIPE_INTERRUPT */
937 info1
|= max_packet (maxp
) << 16;
938 info2
|= hb_mult (maxp
) << 30;
942 ehci_dbg(ehci
, "bogus dev %p speed %d\n", urb
->dev
,
945 qh_destroy(ehci
, qh
);
949 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
951 /* init as live, toggle clear, advance to dummy */
952 qh
->qh_state
= QH_STATE_IDLE
;
954 hw
->hw_info1
= cpu_to_hc32(ehci
, info1
);
955 hw
->hw_info2
= cpu_to_hc32(ehci
, info2
);
956 qh
->is_out
= !is_input
;
957 usb_settoggle (urb
->dev
, usb_pipeendpoint (urb
->pipe
), !is_input
, 1);
958 qh_refresh (ehci
, qh
);
962 /*-------------------------------------------------------------------------*/
964 static void enable_async(struct ehci_hcd
*ehci
)
966 if (ehci
->async_count
++)
969 /* Stop waiting to turn off the async schedule */
970 ehci
->enabled_hrtimer_events
&= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC
);
972 /* Don't start the schedule until ASS is 0 */
976 static void disable_async(struct ehci_hcd
*ehci
)
978 if (--ehci
->async_count
)
981 /* The async schedule and async_unlink list are supposed to be empty */
982 WARN_ON(ehci
->async
->qh_next
.qh
|| ehci
->async_unlink
);
984 /* Don't turn off the schedule until ASS is 1 */
988 /* move qh (and its qtds) onto async queue; maybe enable queue. */
990 static void qh_link_async (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
992 __hc32 dma
= QH_NEXT(ehci
, qh
->qh_dma
);
993 struct ehci_qh
*head
;
995 /* Don't link a QH if there's a Clear-TT-Buffer pending */
996 if (unlikely(qh
->clearing_tt
))
999 WARN_ON(qh
->qh_state
!= QH_STATE_IDLE
);
1001 /* clear halt and/or toggle; and maybe recover from silicon quirk */
1002 qh_refresh(ehci
, qh
);
1004 /* splice right after start */
1006 qh
->qh_next
= head
->qh_next
;
1007 qh
->hw
->hw_next
= head
->hw
->hw_next
;
1010 head
->qh_next
.qh
= qh
;
1011 head
->hw
->hw_next
= dma
;
1014 qh
->qh_state
= QH_STATE_LINKED
;
1015 /* qtd completions reported later by interrupt */
1020 /*-------------------------------------------------------------------------*/
1023 * For control/bulk/interrupt, return QH with these TDs appended.
1024 * Allocates and initializes the QH if necessary.
1025 * Returns null if it can't allocate a QH it needs to.
1026 * If the QH has TDs (urbs) already, that's great.
1028 static struct ehci_qh
*qh_append_tds (
1029 struct ehci_hcd
*ehci
,
1031 struct list_head
*qtd_list
,
1036 struct ehci_qh
*qh
= NULL
;
1037 __hc32 qh_addr_mask
= cpu_to_hc32(ehci
, 0x7f);
1039 qh
= (struct ehci_qh
*) *ptr
;
1040 if (unlikely (qh
== NULL
)) {
1041 /* can't sleep here, we have ehci->lock... */
1042 qh
= qh_make (ehci
, urb
, GFP_ATOMIC
);
1045 if (likely (qh
!= NULL
)) {
1046 struct ehci_qtd
*qtd
;
1048 if (unlikely (list_empty (qtd_list
)))
1051 qtd
= list_entry (qtd_list
->next
, struct ehci_qtd
,
1054 /* control qh may need patching ... */
1055 if (unlikely (epnum
== 0)) {
1057 /* usb_reset_device() briefly reverts to address 0 */
1058 if (usb_pipedevice (urb
->pipe
) == 0)
1059 qh
->hw
->hw_info1
&= ~qh_addr_mask
;
1062 /* just one way to queue requests: swap with the dummy qtd.
1063 * only hc or qh_refresh() ever modify the overlay.
1065 if (likely (qtd
!= NULL
)) {
1066 struct ehci_qtd
*dummy
;
1070 /* to avoid racing the HC, use the dummy td instead of
1071 * the first td of our list (becomes new dummy). both
1072 * tds stay deactivated until we're done, when the
1073 * HC is allowed to fetch the old dummy (4.10.2).
1075 token
= qtd
->hw_token
;
1076 qtd
->hw_token
= HALT_BIT(ehci
);
1080 dma
= dummy
->qtd_dma
;
1082 dummy
->qtd_dma
= dma
;
1084 list_del (&qtd
->qtd_list
);
1085 list_add (&dummy
->qtd_list
, qtd_list
);
1086 list_splice_tail(qtd_list
, &qh
->qtd_list
);
1088 ehci_qtd_init(ehci
, qtd
, qtd
->qtd_dma
);
1091 /* hc must see the new dummy at list end */
1093 qtd
= list_entry (qh
->qtd_list
.prev
,
1094 struct ehci_qtd
, qtd_list
);
1095 qtd
->hw_next
= QTD_NEXT(ehci
, dma
);
1097 /* let the hc process these next qtds */
1099 dummy
->hw_token
= token
;
1107 /*-------------------------------------------------------------------------*/
1111 struct ehci_hcd
*ehci
,
1113 struct list_head
*qtd_list
,
1117 unsigned long flags
;
1118 struct ehci_qh
*qh
= NULL
;
1121 epnum
= urb
->ep
->desc
.bEndpointAddress
;
1123 #ifdef EHCI_URB_TRACE
1125 struct ehci_qtd
*qtd
;
1126 qtd
= list_entry(qtd_list
->next
, struct ehci_qtd
, qtd_list
);
1128 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
1129 __func__
, urb
->dev
->devpath
, urb
,
1130 epnum
& 0x0f, (epnum
& USB_DIR_IN
) ? "in" : "out",
1131 urb
->transfer_buffer_length
,
1132 qtd
, urb
->ep
->hcpriv
);
1136 spin_lock_irqsave (&ehci
->lock
, flags
);
1137 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci
)))) {
1141 rc
= usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci
), urb
);
1145 qh
= qh_append_tds(ehci
, urb
, qtd_list
, epnum
, &urb
->ep
->hcpriv
);
1146 if (unlikely(qh
== NULL
)) {
1147 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci
), urb
);
1152 /* Control/bulk operations through TTs don't need scheduling,
1153 * the HC and TT handle it when the TT has a buffer ready.
1155 if (likely (qh
->qh_state
== QH_STATE_IDLE
))
1156 qh_link_async(ehci
, qh
);
1158 spin_unlock_irqrestore (&ehci
->lock
, flags
);
1159 if (unlikely (qh
== NULL
))
1160 qtd_list_free (ehci
, urb
, qtd_list
);
1164 /*-------------------------------------------------------------------------*/
1166 static void single_unlink_async(struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
1168 struct ehci_qh
*prev
;
1170 /* Add to the end of the list of QHs waiting for the next IAAD */
1171 qh
->qh_state
= QH_STATE_UNLINK
;
1172 if (ehci
->async_unlink
)
1173 ehci
->async_unlink_last
->unlink_next
= qh
;
1175 ehci
->async_unlink
= qh
;
1176 ehci
->async_unlink_last
= qh
;
1178 /* Unlink it from the schedule */
1180 while (prev
->qh_next
.qh
!= qh
)
1181 prev
= prev
->qh_next
.qh
;
1183 prev
->hw
->hw_next
= qh
->hw
->hw_next
;
1184 prev
->qh_next
= qh
->qh_next
;
1185 if (ehci
->qh_scan_next
== qh
)
1186 ehci
->qh_scan_next
= qh
->qh_next
.qh
;
1189 static void start_iaa_cycle(struct ehci_hcd
*ehci
, bool nested
)
1192 * Do nothing if an IAA cycle is already running or
1193 * if one will be started shortly.
1195 if (ehci
->async_iaa
|| ehci
->async_unlinking
)
1198 /* Do all the waiting QHs at once */
1199 ehci
->async_iaa
= ehci
->async_unlink
;
1200 ehci
->async_unlink
= NULL
;
1202 /* If the controller isn't running, we don't have to wait for it */
1203 if (unlikely(ehci
->rh_state
< EHCI_RH_RUNNING
)) {
1204 if (!nested
) /* Avoid recursion */
1205 end_unlink_async(ehci
);
1207 /* Otherwise start a new IAA cycle */
1208 } else if (likely(ehci
->rh_state
== EHCI_RH_RUNNING
)) {
1209 /* Make sure the unlinks are all visible to the hardware */
1212 ehci_writel(ehci
, ehci
->command
| CMD_IAAD
,
1213 &ehci
->regs
->command
);
1214 ehci_readl(ehci
, &ehci
->regs
->command
);
1215 ehci_enable_event(ehci
, EHCI_HRTIMER_IAA_WATCHDOG
, true);
1219 /* the async qh for the qtds being unlinked are now gone from the HC */
1221 static void end_unlink_async(struct ehci_hcd
*ehci
)
1225 if (ehci
->has_synopsys_hc_bug
)
1226 ehci_writel(ehci
, (u32
) ehci
->async
->qh_dma
,
1227 &ehci
->regs
->async_next
);
1229 /* Process the idle QHs */
1231 ehci
->async_unlinking
= true;
1232 while (ehci
->async_iaa
) {
1233 qh
= ehci
->async_iaa
;
1234 ehci
->async_iaa
= qh
->unlink_next
;
1235 qh
->unlink_next
= NULL
;
1237 qh
->qh_state
= QH_STATE_IDLE
;
1238 qh
->qh_next
.qh
= NULL
;
1240 qh_completions(ehci
, qh
);
1241 if (!list_empty(&qh
->qtd_list
) &&
1242 ehci
->rh_state
== EHCI_RH_RUNNING
)
1243 qh_link_async(ehci
, qh
);
1244 disable_async(ehci
);
1246 ehci
->async_unlinking
= false;
1248 /* Start a new IAA cycle if any QHs are waiting for it */
1249 if (ehci
->async_unlink
) {
1250 start_iaa_cycle(ehci
, true);
1251 if (unlikely(ehci
->rh_state
< EHCI_RH_RUNNING
))
1256 static void unlink_empty_async(struct ehci_hcd
*ehci
)
1258 struct ehci_qh
*qh
, *next
;
1259 bool stopped
= (ehci
->rh_state
< EHCI_RH_RUNNING
);
1260 bool check_unlinks_later
= false;
1262 /* Unlink all the async QHs that have been empty for a timer cycle */
1263 next
= ehci
->async
->qh_next
.qh
;
1266 next
= qh
->qh_next
.qh
;
1268 if (list_empty(&qh
->qtd_list
) &&
1269 qh
->qh_state
== QH_STATE_LINKED
) {
1270 if (!stopped
&& qh
->unlink_cycle
==
1271 ehci
->async_unlink_cycle
)
1272 check_unlinks_later
= true;
1274 single_unlink_async(ehci
, qh
);
1278 /* Start a new IAA cycle if any QHs are waiting for it */
1279 if (ehci
->async_unlink
)
1280 start_iaa_cycle(ehci
, false);
1282 /* QHs that haven't been empty for long enough will be handled later */
1283 if (check_unlinks_later
) {
1284 ehci_enable_event(ehci
, EHCI_HRTIMER_ASYNC_UNLINKS
, true);
1285 ++ehci
->async_unlink_cycle
;
1289 /* makes sure the async qh will become idle */
1290 /* caller must own ehci->lock */
1292 static void start_unlink_async(struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
1295 * If the QH isn't linked then there's nothing we can do
1296 * unless we were called during a giveback, in which case
1297 * qh_completions() has to deal with it.
1299 if (qh
->qh_state
!= QH_STATE_LINKED
) {
1300 if (qh
->qh_state
== QH_STATE_COMPLETING
)
1301 qh
->needs_rescan
= 1;
1305 single_unlink_async(ehci
, qh
);
1306 start_iaa_cycle(ehci
, false);
1309 /*-------------------------------------------------------------------------*/
1311 static void scan_async (struct ehci_hcd
*ehci
)
1314 bool check_unlinks_later
= false;
1316 ehci
->qh_scan_next
= ehci
->async
->qh_next
.qh
;
1317 while (ehci
->qh_scan_next
) {
1318 qh
= ehci
->qh_scan_next
;
1319 ehci
->qh_scan_next
= qh
->qh_next
.qh
;
1321 /* clean any finished work for this qh */
1322 if (!list_empty(&qh
->qtd_list
)) {
1326 * Unlinks could happen here; completion reporting
1327 * drops the lock. That's why ehci->qh_scan_next
1328 * always holds the next qh to scan; if the next qh
1329 * gets unlinked then ehci->qh_scan_next is adjusted
1330 * in single_unlink_async().
1332 temp
= qh_completions(ehci
, qh
);
1333 if (qh
->needs_rescan
) {
1334 start_unlink_async(ehci
, qh
);
1335 } else if (list_empty(&qh
->qtd_list
)
1336 && qh
->qh_state
== QH_STATE_LINKED
) {
1337 qh
->unlink_cycle
= ehci
->async_unlink_cycle
;
1338 check_unlinks_later
= true;
1339 } else if (temp
!= 0)
1345 * Unlink empty entries, reducing DMA usage as well
1346 * as HCD schedule-scanning costs. Delay for any qh
1347 * we just scanned, there's a not-unusual case that it
1348 * doesn't stay idle for long.
1350 if (check_unlinks_later
&& ehci
->rh_state
== EHCI_RH_RUNNING
&&
1351 !(ehci
->enabled_hrtimer_events
&
1352 BIT(EHCI_HRTIMER_ASYNC_UNLINKS
))) {
1353 ehci_enable_event(ehci
, EHCI_HRTIMER_ASYNC_UNLINKS
, true);
1354 ++ehci
->async_unlink_cycle
;