2 * Copyright (C) 2001-2004 by David Brownell
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 /* this file is part of ehci-hcd.c */
21 /*-------------------------------------------------------------------------*/
24 * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
26 * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
27 * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
28 * buffers needed for the larger number). We use one QH per endpoint, queue
29 * multiple urbs (all three types) per endpoint. URBs may need several qtds.
31 * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with
32 * interrupts) needs careful scheduling. Performance improvements can be
33 * an ongoing challenge. That's in "ehci-sched.c".
35 * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
36 * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
37 * (b) special fields in qh entries or (c) split iso entries. TTs will
38 * buffer low/full speed data so the host collects it at high speed.
41 /*-------------------------------------------------------------------------*/
43 /* fill a qtd, returning how much of the buffer we were able to queue up */
46 qtd_fill(struct ehci_hcd
*ehci
, struct ehci_qtd
*qtd
, dma_addr_t buf
,
47 size_t len
, int token
, int maxpacket
)
52 /* one buffer entry per 4K ... first might be short or unaligned */
53 qtd
->hw_buf
[0] = cpu_to_hc32(ehci
, (u32
)addr
);
54 qtd
->hw_buf_hi
[0] = cpu_to_hc32(ehci
, (u32
)(addr
>> 32));
55 count
= 0x1000 - (buf
& 0x0fff); /* rest of that page */
56 if (likely (len
< count
)) /* ... iff needed */
62 /* per-qtd limit: from 16K to 20K (best alignment) */
63 for (i
= 1; count
< len
&& i
< 5; i
++) {
65 qtd
->hw_buf
[i
] = cpu_to_hc32(ehci
, (u32
)addr
);
66 qtd
->hw_buf_hi
[i
] = cpu_to_hc32(ehci
,
69 if ((count
+ 0x1000) < len
)
75 /* short packets may only terminate transfers */
77 count
-= (count
% maxpacket
);
79 qtd
->hw_token
= cpu_to_hc32(ehci
, (count
<< 16) | token
);
85 /*-------------------------------------------------------------------------*/
88 qh_update (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
, struct ehci_qtd
*qtd
)
90 struct ehci_qh_hw
*hw
= qh
->hw
;
92 /* writes to an active overlay are unsafe */
93 BUG_ON(qh
->qh_state
!= QH_STATE_IDLE
);
95 hw
->hw_qtd_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
96 hw
->hw_alt_next
= EHCI_LIST_END(ehci
);
98 /* Except for control endpoints, we make hardware maintain data
99 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
100 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
103 if (!(hw
->hw_info1
& cpu_to_hc32(ehci
, 1 << 14))) {
104 unsigned is_out
, epnum
;
106 is_out
= !(qtd
->hw_token
& cpu_to_hc32(ehci
, 1 << 8));
107 epnum
= (hc32_to_cpup(ehci
, &hw
->hw_info1
) >> 8) & 0x0f;
108 if (unlikely (!usb_gettoggle (qh
->dev
, epnum
, is_out
))) {
109 hw
->hw_token
&= ~cpu_to_hc32(ehci
, QTD_TOGGLE
);
110 usb_settoggle (qh
->dev
, epnum
, is_out
, 1);
114 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
116 hw
->hw_token
&= cpu_to_hc32(ehci
, QTD_TOGGLE
| QTD_STS_PING
);
119 /* if it weren't for a common silicon quirk (writing the dummy into the qh
120 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
121 * recovery (including urb dequeue) would need software changes to a QH...
124 qh_refresh (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
126 struct ehci_qtd
*qtd
;
128 if (list_empty (&qh
->qtd_list
))
131 qtd
= list_entry (qh
->qtd_list
.next
,
132 struct ehci_qtd
, qtd_list
);
133 /* first qtd may already be partially processed */
134 if (cpu_to_hc32(ehci
, qtd
->qtd_dma
) == qh
->hw
->hw_current
)
139 qh_update (ehci
, qh
, qtd
);
142 /*-------------------------------------------------------------------------*/
144 static int qtd_copy_status (
145 struct ehci_hcd
*ehci
,
151 int status
= -EINPROGRESS
;
153 /* count IN/OUT bytes, not SETUP (even short packets) */
154 if (likely (QTD_PID (token
) != 2))
155 urb
->actual_length
+= length
- QTD_LENGTH (token
);
157 /* don't modify error codes */
158 if (unlikely(urb
->unlinked
))
161 /* force cleanup after short read; not always an error */
162 if (unlikely (IS_SHORT_READ (token
)))
165 /* serious "can't proceed" faults reported by the hardware */
166 if (token
& QTD_STS_HALT
) {
167 if (token
& QTD_STS_BABBLE
) {
168 /* FIXME "must" disable babbling device's port too */
170 /* CERR nonzero + halt --> stall */
171 } else if (QTD_CERR(token
)) {
174 /* In theory, more than one of the following bits can be set
175 * since they are sticky and the transaction is retried.
176 * Which to test first is rather arbitrary.
178 } else if (token
& QTD_STS_MMF
) {
179 /* fs/ls interrupt xfer missed the complete-split */
181 } else if (token
& QTD_STS_DBE
) {
182 status
= (QTD_PID (token
) == 1) /* IN ? */
183 ? -ENOSR
/* hc couldn't read data */
184 : -ECOMM
; /* hc couldn't write data */
185 } else if (token
& QTD_STS_XACT
) {
186 /* timeout, bad CRC, wrong PID, etc */
187 ehci_dbg(ehci
, "devpath %s ep%d%s 3strikes\n",
189 usb_pipeendpoint(urb
->pipe
),
190 usb_pipein(urb
->pipe
) ? "in" : "out");
192 } else { /* unknown */
197 "dev%d ep%d%s qtd token %08x --> status %d\n",
198 usb_pipedevice (urb
->pipe
),
199 usb_pipeendpoint (urb
->pipe
),
200 usb_pipein (urb
->pipe
) ? "in" : "out",
203 /* if async CSPLIT failed, try cleaning out the TT buffer */
206 && !usb_pipeint(urb
->pipe
)
207 && ((token
& QTD_STS_MMF
) != 0
208 || QTD_CERR(token
) == 0)
209 && (!ehci_is_TDI(ehci
)
210 || urb
->dev
->tt
->hub
!=
211 ehci_to_hcd(ehci
)->self
.root_hub
)) {
213 struct usb_device
*tt
= urb
->dev
->tt
->hub
;
215 "clear tt buffer port %d, a%d ep%d t%08x\n",
216 urb
->dev
->ttport
, urb
->dev
->devnum
,
217 usb_pipeendpoint (urb
->pipe
), token
);
219 /* REVISIT ARC-derived cores don't clear the root
220 * hub TT buffer in this way...
222 usb_hub_tt_clear_buffer (urb
->dev
, urb
->pipe
);
230 ehci_urb_done(struct ehci_hcd
*ehci
, struct urb
*urb
, int status
)
231 __releases(ehci
->lock
)
232 __acquires(ehci
->lock
)
234 if (likely (urb
->hcpriv
!= NULL
)) {
235 struct ehci_qh
*qh
= (struct ehci_qh
*) urb
->hcpriv
;
237 /* S-mask in a QH means it's an interrupt urb */
238 if ((qh
->hw
->hw_info2
& cpu_to_hc32(ehci
, QH_SMASK
)) != 0) {
240 /* ... update hc-wide periodic stats (for usbfs) */
241 ehci_to_hcd(ehci
)->self
.bandwidth_int_reqs
--;
246 if (unlikely(urb
->unlinked
)) {
247 COUNT(ehci
->stats
.unlink
);
249 /* report non-error and short read status as zero */
250 if (status
== -EINPROGRESS
|| status
== -EREMOTEIO
)
252 COUNT(ehci
->stats
.complete
);
255 #ifdef EHCI_URB_TRACE
257 "%s %s urb %p ep%d%s status %d len %d/%d\n",
258 __FUNCTION__
, urb
->dev
->devpath
, urb
,
259 usb_pipeendpoint (urb
->pipe
),
260 usb_pipein (urb
->pipe
) ? "in" : "out",
262 urb
->actual_length
, urb
->transfer_buffer_length
);
265 /* complete() can reenter this HCD */
266 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci
), urb
);
267 spin_unlock (&ehci
->lock
);
268 usb_hcd_giveback_urb(ehci_to_hcd(ehci
), urb
, status
);
269 spin_lock (&ehci
->lock
);
272 #ifdef EHCI_QTD_CACHE
274 ehci_qtdc_unlink (struct ehci_hcd
*ehci
, struct urb
*unlink
, struct pt_regs
*regs
)
276 struct list_head
*entry
, *tmp
;
277 unsigned ret
= -ENOENT
;
281 spin_lock_irqsave (&ehci
->lock
, flags
);
283 for (i
= 0; i
< NUM_QTD_CACHE
; i
++) {
284 ehci_qtdc_t
*qtdc_this
= ehci
->qtdc
[i
];
286 /* skip if cache empty or found in previous cache */
287 if (unlikely (!qtdc_this
|| list_empty(&qtdc_this
->cache
)) || (ret
== 0))
290 list_for_each_safe (entry
, tmp
, &qtdc_this
->cache
) {
291 struct ehci_qtd
*qtd
;
295 qtd
= list_entry (entry
, struct ehci_qtd
, qtd_list
);
298 if (likely (urb
!= unlink
))
301 if (qtd
->qtd_list
.prev
!= &qtdc_this
->cache
) {
302 struct ehci_qtd
*last
= 0;
303 last
= list_entry (qtd
->qtd_list
.prev
,
304 struct ehci_qtd
, qtd_list
);
305 last
->hw_next
= qtd
->hw_next
;
307 list_del (&qtd
->qtd_list
);
308 spin_lock_irqsave (&urb
->lock
, flags
);
309 urb
->transfer_flags
&= ~URB_QTD_CACHED
;
310 spin_unlock_irqrestore (&urb
->lock
, flags
);
311 ehci_urb_done (ehci
, urb
);
312 ehci_qtd_free (ehci
, qtd
);
319 spin_unlock_irqrestore (&ehci
->lock
, flags
);
322 #endif /* EHCI_QTD_CACHE */
325 static void start_unlink_async (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
);
326 static void unlink_async (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
);
328 static int qh_schedule (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
);
331 * Process and free completed qtds for a qh, returning URBs to drivers.
332 * Chases up to qh->hw_current. Returns number of completions called,
333 * indicating how much "real" work we did.
336 qh_completions (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
338 struct ehci_qtd
*last
, *end
= qh
->dummy
;
339 struct list_head
*entry
, *tmp
;
344 const __le32 halt
= HALT_BIT(ehci
);
345 struct ehci_qh_hw
*hw
= qh
->hw
;
347 if (unlikely (list_empty (&qh
->qtd_list
)))
350 /* completions (or tasks on other cpus) must never clobber HALT
351 * till we've gone through and cleaned everything up, even when
352 * they add urbs to this qh's queue or mark them for unlinking.
354 * NOTE: unlinking expects to be done in queue order.
356 * It's a bug for qh->qh_state to be anything other than
357 * QH_STATE_IDLE, unless our caller is scan_async() or
360 state
= qh
->qh_state
;
361 qh
->qh_state
= QH_STATE_COMPLETING
;
362 stopped
= (state
== QH_STATE_IDLE
);
366 last_status
= -EINPROGRESS
;
367 qh
->needs_rescan
= 0;
369 /* remove de-activated QTDs from front of queue.
370 * after faults (including short reads), cleanup this urb
371 * then let the queue advance.
372 * if queue is stopped, handles unlinks.
374 list_for_each_safe (entry
, tmp
, &qh
->qtd_list
) {
375 struct ehci_qtd
*qtd
;
379 qtd
= list_entry (entry
, struct ehci_qtd
, qtd_list
);
382 /* clean up any state from previous QTD ...*/
384 if (likely (last
->urb
!= urb
)) {
385 ehci_urb_done(ehci
, last
->urb
, last_status
);
387 last_status
= -EINPROGRESS
;
389 ehci_qtd_free (ehci
, last
);
393 /* ignore urbs submitted during completions we reported */
397 /* hardware copies qtd out of qh overlay */
399 token
= hc32_to_cpu(ehci
, qtd
->hw_token
);
401 /* always clean up qtds the hc de-activated */
403 if ((token
& QTD_STS_ACTIVE
) == 0) {
405 /* on STALL, error, and short reads this urb must
406 * complete and all its qtds must be recycled.
408 if ((token
& QTD_STS_HALT
) != 0) {
410 /* retry transaction errors until we
411 * reach the software xacterr limit
413 if ((token
& QTD_STS_XACT
) &&
414 QTD_CERR(token
) == 0 &&
415 ++qh
->xacterrs
< QH_XACTERR_MAX
&&
418 "detected XactErr len %zu/%zu retry %d\n",
419 qtd
->length
- QTD_LENGTH(token
), qtd
->length
, qh
->xacterrs
);
421 /* reset the token in the qtd and the
422 * qh overlay (which still contains
423 * the qtd) so that we pick up from
426 token
&= ~QTD_STS_HALT
;
427 token
|= QTD_STS_ACTIVE
|
428 (EHCI_TUNE_CERR
<< 10);
429 qtd
->hw_token
= cpu_to_hc32(ehci
,
432 hw
->hw_token
= cpu_to_hc32(ehci
,
438 /* magic dummy for some short reads; qh won't advance.
439 * that silicon quirk can kick in with this dummy too.
441 * other short reads won't stop the queue, including
442 * control transfers (status stage handles that) or
443 * most other single-qtd reads ... the queue stops if
444 * URB_SHORT_NOT_OK was set so the driver submitting
445 * the urbs could clean it up.
447 } else if (IS_SHORT_READ (token
)
448 && !(qtd
->hw_alt_next
449 & EHCI_LIST_END(ehci
))) {
454 /* stop scanning when we reach qtds the hc is using */
455 } else if (likely (!stopped
456 && HC_IS_RUNNING (ehci_to_hcd(ehci
)->state
))) {
459 /* scan the whole queue for unlinks whenever it stops */
463 /* cancel everything if we halt, suspend, etc */
464 if (!HC_IS_RUNNING(ehci_to_hcd(ehci
)->state
))
465 last_status
= -ESHUTDOWN
;
467 /* this qtd is active; skip it unless a previous qtd
468 * for its urb faulted, or its urb was canceled.
470 else if (last_status
== -EINPROGRESS
&& !urb
->unlinked
)
473 /* qh unlinked; token in overlay may be most current */
474 if (state
== QH_STATE_IDLE
475 && cpu_to_hc32(ehci
, qtd
->qtd_dma
)
477 token
= hc32_to_cpu(ehci
, hw
->hw_token
);
479 /* force halt for unlinked or blocked qh, so we'll
480 * patch the qh later and so that completions can't
481 * activate it while we "know" it's stopped.
483 if ((halt
& hw
->hw_token
) == 0) {
485 hw
->hw_token
|= halt
;
490 /* unless we already know the urb's status, collect qtd status
491 * and update count of bytes transferred. in common short read
492 * cases with only one data qtd (including control transfers),
493 * queue processing won't halt. but with two or more qtds (for
494 * example, with a 32 KB transfer), when the first qtd gets a
495 * short read the second must be removed by hand.
497 if (last_status
== -EINPROGRESS
) {
498 last_status
= qtd_copy_status(ehci
, urb
,
500 if (last_status
== -EREMOTEIO
502 & EHCI_LIST_END(ehci
)))
503 last_status
= -EINPROGRESS
;
506 /* if we're removing something not at the queue head,
507 * patch the hardware queue pointer.
509 if (stopped
&& qtd
->qtd_list
.prev
!= &qh
->qtd_list
) {
510 last
= list_entry (qtd
->qtd_list
.prev
,
511 struct ehci_qtd
, qtd_list
);
512 last
->hw_next
= qtd
->hw_next
;
515 /* remove qtd; it's recycled after possible urb completion */
516 list_del (&qtd
->qtd_list
);
519 /* reinit the xacterr counter for the next qtd */
523 /* last urb's completion might still need calling */
524 if (likely (last
!= NULL
)) {
525 ehci_urb_done(ehci
, last
->urb
, last_status
);
527 ehci_qtd_free (ehci
, last
);
530 /* Do we need to rescan for URBs dequeued during a giveback? */
531 if (unlikely(qh
->needs_rescan
)) {
532 /* If the QH is already unlinked, do the rescan now. */
533 if (state
== QH_STATE_IDLE
)
536 /* Otherwise we have to wait until the QH is fully unlinked.
537 * Our caller will start an unlink if qh->needs_rescan is
538 * set. But if an unlink has already started, nothing needs
541 if (state
!= QH_STATE_LINKED
)
542 qh
->needs_rescan
= 0;
545 /* restore original state; caller must unlink or relink */
546 qh
->qh_state
= state
;
548 /* be sure the hardware's done with the qh before refreshing
549 * it after fault cleanup, or recovering from silicon wrongly
550 * overlaying the dummy qtd (which reduces DMA chatter).
552 if (stopped
!= 0 || hw
->hw_qtd_next
== EHCI_LIST_END(ehci
)) {
555 qh_refresh(ehci
, qh
);
557 case QH_STATE_LINKED
:
558 /* We won't refresh a QH that's linked (after the HC
559 * stopped the queue). That avoids a race:
560 * - HC reads first part of QH;
561 * - CPU updates that first part and the token;
562 * - HC reads rest of that QH, including token
563 * Result: HC gets an inconsistent image, and then
564 * DMAs to/from the wrong memory (corrupting it).
566 * That should be rare for interrupt transfers,
567 * except maybe high bandwidth ...
570 /* Tell the caller to start an unlink */
571 qh
->needs_rescan
= 1;
573 /* otherwise, unlink already started */
580 /*-------------------------------------------------------------------------*/
582 // high bandwidth multiplier, as encoded in highspeed endpoint descriptors
583 #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
584 // ... and packet size, for any kind of endpoint descriptor
585 #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
588 * reverse of qh_urb_transaction: free a list of TDs.
589 * used for cleanup after errors, before HC sees an URB's TDs.
591 static void qtd_list_free (
592 struct ehci_hcd
*ehci
,
594 struct list_head
*qtd_list
596 struct list_head
*entry
, *temp
;
598 list_for_each_safe (entry
, temp
, qtd_list
) {
599 struct ehci_qtd
*qtd
;
601 qtd
= list_entry (entry
, struct ehci_qtd
, qtd_list
);
602 list_del (&qtd
->qtd_list
);
603 ehci_qtd_free (ehci
, qtd
);
608 * create a list of filled qtds for this URB; won't link into qh.
610 static struct list_head
*
612 struct ehci_hcd
*ehci
,
614 struct list_head
*head
,
617 struct ehci_qtd
*qtd
, *qtd_prev
;
624 * URBs map to sequences of QTDs: one logical transaction
626 qtd
= ehci_qtd_alloc (ehci
, flags
);
629 list_add_tail (&qtd
->qtd_list
, head
);
632 token
= QTD_STS_ACTIVE
;
633 token
|= (EHCI_TUNE_CERR
<< 10);
634 /* for split transactions, SplitXState initialized to zero */
636 len
= urb
->transfer_buffer_length
;
637 is_input
= usb_pipein (urb
->pipe
);
638 if (usb_pipecontrol (urb
->pipe
)) {
640 qtd_fill(ehci
, qtd
, urb
->setup_dma
,
641 sizeof (struct usb_ctrlrequest
),
642 token
| (2 /* "setup" */ << 8), 8);
644 /* ... and always at least one more pid */
647 qtd
= ehci_qtd_alloc (ehci
, flags
);
651 qtd_prev
->hw_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
652 list_add_tail (&qtd
->qtd_list
, head
);
654 /* for zero length DATA stages, STATUS is always IN */
656 token
|= (1 /* "in" */ << 8);
660 * data transfer stage: buffer setup
662 buf
= urb
->transfer_dma
;
665 token
|= (1 /* "in" */ << 8);
666 /* else it's already initted to "out" pid (0 << 8) */
668 maxpacket
= max_packet(usb_maxpacket(urb
->dev
, urb
->pipe
, !is_input
));
671 * buffer gets wrapped in one or more qtds;
672 * last one may be "short" (including zero len)
673 * and may serve as a control status ack
678 this_qtd_len
= qtd_fill(ehci
, qtd
, buf
, len
, token
, maxpacket
);
683 * short reads advance to a "magic" dummy instead of the next
684 * qtd ... that forces the queue to stop, for manual cleanup.
685 * (this will usually be overridden later.)
688 qtd
->hw_alt_next
= ehci
->async
->hw
->hw_alt_next
;
690 /* qh makes control packets use qtd toggle; maybe switch it */
691 if ((maxpacket
& (this_qtd_len
+ (maxpacket
- 1))) == 0)
694 if (likely (len
<= 0))
698 qtd
= ehci_qtd_alloc (ehci
, flags
);
702 qtd_prev
->hw_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
703 list_add_tail (&qtd
->qtd_list
, head
);
707 * unless the caller requires manual cleanup after short reads,
708 * have the alt_next mechanism keep the queue running after the
709 * last data qtd (the only one, for control and most other cases).
711 if (likely ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0
712 || usb_pipecontrol (urb
->pipe
)))
713 qtd
->hw_alt_next
= EHCI_LIST_END(ehci
);
716 * control requests may need a terminating data "status" ack;
717 * bulk ones may need a terminating short packet (zero length).
719 if (likely (urb
->transfer_buffer_length
!= 0)) {
722 if (usb_pipecontrol (urb
->pipe
)) {
724 token
^= 0x0100; /* "in" <--> "out" */
725 token
|= QTD_TOGGLE
; /* force DATA1 */
726 } else if (usb_pipebulk (urb
->pipe
)
727 && (urb
->transfer_flags
& URB_ZERO_PACKET
)
728 && !(urb
->transfer_buffer_length
% maxpacket
)) {
733 qtd
= ehci_qtd_alloc (ehci
, flags
);
737 qtd_prev
->hw_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
738 list_add_tail (&qtd
->qtd_list
, head
);
740 /* never any data in such packets */
741 qtd_fill(ehci
, qtd
, 0, 0, token
, 0);
745 /* by default, enable interrupt on urb completion */
746 if (likely (!(urb
->transfer_flags
& URB_NO_INTERRUPT
)))
747 qtd
->hw_token
|= cpu_to_hc32(ehci
, QTD_IOC
);
751 qtd_list_free (ehci
, urb
, head
);
755 /*-------------------------------------------------------------------------*/
757 // Would be best to create all qh's from config descriptors,
758 // when each interface/altsetting is established. Unlink
759 // any previous qh and cancel its urbs first; endpoints are
760 // implicitly reset then (data toggle too).
761 // That'd mean updating how usbcore talks to HCDs. (2.7?)
765 * Each QH holds a qtd list; a QH is used for everything except iso.
767 * For interrupt urbs, the scheduler must set the microframe scheduling
768 * mask(s) each time the QH gets scheduled. For highspeed, that's
769 * just one microframe in the s-mask. For split interrupt transactions
770 * there are additional complications: c-mask, maybe FSTNs.
772 static struct ehci_qh
*
774 struct ehci_hcd
*ehci
,
778 struct ehci_qh
*qh
= ehci_qh_alloc (ehci
, flags
);
779 u32 info1
= 0, info2
= 0;
782 struct usb_tt
*tt
= urb
->dev
->tt
;
783 struct ehci_qh_hw
*hw
;
789 * init endpoint/device data for this QH
791 info1
|= usb_pipeendpoint (urb
->pipe
) << 8;
792 info1
|= usb_pipedevice (urb
->pipe
) << 0;
794 is_input
= usb_pipein (urb
->pipe
);
795 type
= usb_pipetype (urb
->pipe
);
796 maxp
= usb_maxpacket (urb
->dev
, urb
->pipe
, !is_input
);
798 /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
799 * acts like up to 3KB, but is built from smaller packets.
801 if (max_packet(maxp
) > 1024) {
802 ehci_dbg(ehci
, "bogus qh maxpacket %d\n", max_packet(maxp
));
806 /* Compute interrupt scheduling parameters just once, and save.
807 * - allowing for high bandwidth, how many nsec/uframe are used?
808 * - split transactions need a second CSPLIT uframe; same question
809 * - splits also need a schedule gap (for full/low speed I/O)
810 * - qh has a polling interval
812 * For control/bulk requests, the HC or TT handles these.
814 if (type
== PIPE_INTERRUPT
) {
815 qh
->usecs
= NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH
,
817 hb_mult(maxp
) * max_packet(maxp
)));
818 qh
->start
= NO_FRAME
;
820 if (urb
->dev
->speed
== USB_SPEED_HIGH
) {
824 qh
->period
= urb
->interval
>> 3;
825 if (qh
->period
== 0 && urb
->interval
!= 1) {
826 /* NOTE interval 2 or 4 uframes could work.
827 * But interval 1 scheduling is simpler, and
828 * includes high bandwidth.
831 } else if (qh
->period
> ehci
->periodic_size
) {
832 qh
->period
= ehci
->periodic_size
;
833 urb
->interval
= qh
->period
<< 3;
838 /* gap is f(FS/LS transfer times) */
839 qh
->gap_uf
= 1 + usb_calc_bus_time (urb
->dev
->speed
,
840 is_input
, 0, maxp
) / (125 * 1000);
842 /* FIXME this just approximates SPLIT/CSPLIT times */
843 if (is_input
) { // SPLIT, gap, CSPLIT+DATA
844 qh
->c_usecs
= qh
->usecs
+ HS_USECS (0);
845 qh
->usecs
= HS_USECS (1);
846 } else { // SPLIT+DATA, gap, CSPLIT
847 qh
->usecs
+= HS_USECS (1);
848 qh
->c_usecs
= HS_USECS (0);
851 think_time
= tt
? tt
->think_time
: 0;
852 qh
->tt_usecs
= NS_TO_US (think_time
+
853 usb_calc_bus_time (urb
->dev
->speed
,
854 is_input
, 0, max_packet (maxp
)));
855 qh
->period
= urb
->interval
;
856 if (qh
->period
> ehci
->periodic_size
) {
857 qh
->period
= ehci
->periodic_size
;
858 urb
->interval
= qh
->period
;
863 /* support for tt scheduling, and access to toggles */
867 switch (urb
->dev
->speed
) {
869 info1
|= (1 << 12); /* EPS "low" */
873 /* EPS 0 means "full" */
874 if (type
!= PIPE_INTERRUPT
)
875 info1
|= (EHCI_TUNE_RL_TT
<< 28);
876 if (type
== PIPE_CONTROL
) {
877 info1
|= (1 << 27); /* for TT */
878 info1
|= 1 << 14; /* toggle from qtd */
882 info2
|= (EHCI_TUNE_MULT_TT
<< 30);
884 /* Some Freescale processors have an erratum in which the
885 * port number in the queue head was 0..N-1 instead of 1..N.
887 if (ehci_has_fsl_portno_bug(ehci
))
888 info2
|= (urb
->dev
->ttport
-1) << 23;
890 info2
|= urb
->dev
->ttport
<< 23;
892 /* set the address of the TT; for TDI's integrated
893 * root hub tt, leave it zeroed.
895 if (tt
&& tt
->hub
!= ehci_to_hcd(ehci
)->self
.root_hub
)
896 info2
|= tt
->hub
->devnum
<< 16;
898 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
902 case USB_SPEED_HIGH
: /* no TT involved */
903 info1
|= (2 << 12); /* EPS "high" */
904 if (type
== PIPE_CONTROL
) {
905 info1
|= (EHCI_TUNE_RL_HS
<< 28);
906 info1
|= 64 << 16; /* usb2 fixed maxpacket */
907 info1
|= 1 << 14; /* toggle from qtd */
908 info2
|= (EHCI_TUNE_MULT_HS
<< 30);
909 } else if (type
== PIPE_BULK
) {
910 info1
|= (EHCI_TUNE_RL_HS
<< 28);
911 /* The USB spec says that high speed bulk endpoints
912 * always use 512 byte maxpacket. But some device
913 * vendors decided to ignore that, and MSFT is happy
914 * to help them do so. So now people expect to use
915 * such nonconformant devices with Linux too; sigh.
917 info1
|= max_packet(maxp
) << 16;
918 info2
|= (EHCI_TUNE_MULT_HS
<< 30);
919 } else { /* PIPE_INTERRUPT */
920 info1
|= max_packet (maxp
) << 16;
921 info2
|= hb_mult (maxp
) << 30;
925 dbg ("bogus dev %p speed %d", urb
->dev
, urb
->dev
->speed
);
931 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
933 /* init as live, toggle clear, advance to dummy */
934 qh
->qh_state
= QH_STATE_IDLE
;
936 hw
->hw_info1
= cpu_to_hc32(ehci
, info1
);
937 hw
->hw_info2
= cpu_to_hc32(ehci
, info2
);
938 usb_settoggle (urb
->dev
, usb_pipeendpoint (urb
->pipe
), !is_input
, 1);
939 qh_refresh (ehci
, qh
);
943 /*-------------------------------------------------------------------------*/
945 /* move qh (and its qtds) onto async queue; maybe enable queue. */
947 static void qh_link_async (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
949 __hc32 dma
= QH_NEXT(ehci
, qh
->qh_dma
);
950 struct ehci_qh
*head
;
952 WARN_ON(qh
->qh_state
!= QH_STATE_IDLE
);
954 /* (re)start the async schedule? */
956 timer_action_done (ehci
, TIMER_ASYNC_OFF
);
957 if (!head
->qh_next
.qh
) {
958 u32 cmd
= ehci_readl(ehci
, &ehci
->regs
->command
);
960 if (!(cmd
& CMD_ASE
)) {
961 /* in case a clear of CMD_ASE didn't take yet */
962 (void)handshake(ehci
, &ehci
->regs
->status
,
964 cmd
|= CMD_ASE
| CMD_RUN
;
965 ehci_writel(ehci
, cmd
, &ehci
->regs
->command
);
966 ehci_to_hcd(ehci
)->state
= HC_STATE_RUNNING
;
967 /* posted write need not be known to HC yet ... */
971 /* clear halt and/or toggle; and maybe recover from silicon quirk */
972 qh_refresh(ehci
, qh
);
974 /* splice right after start */
975 qh
->qh_next
= head
->qh_next
;
976 qh
->hw
->hw_next
= head
->hw
->hw_next
;
979 head
->qh_next
.qh
= qh
;
980 head
->hw
->hw_next
= dma
;
983 qh
->qh_state
= QH_STATE_LINKED
;
984 /* qtd completions reported later by interrupt */
987 /*-------------------------------------------------------------------------*/
990 * For control/bulk/interrupt, return QH with these TDs appended.
991 * Allocates and initializes the QH if necessary.
992 * Returns null if it can't allocate a QH it needs to.
993 * If the QH has TDs (urbs) already, that's great.
995 static struct ehci_qh
*qh_append_tds (
996 struct ehci_hcd
*ehci
,
998 struct list_head
*qtd_list
,
1003 struct ehci_qh
*qh
= NULL
;
1004 __hc32 qh_addr_mask
= cpu_to_hc32(ehci
, 0x7f);
1006 qh
= (struct ehci_qh
*) *ptr
;
1007 if (unlikely (qh
== NULL
)) {
1008 /* can't sleep here, we have ehci->lock... */
1009 qh
= qh_make (ehci
, urb
, GFP_ATOMIC
);
1013 if(ehci_optimized(ehci
, qh
) >= 0)
1015 ehci_err(ehci
, "EHCI Fastpath: Attempted non-optimzed write to optimzed pipe\n");
1019 if (likely (qh
!= NULL
)) {
1020 struct ehci_qtd
*qtd
;
1022 if (unlikely (list_empty (qtd_list
)))
1025 qtd
= list_entry (qtd_list
->next
, struct ehci_qtd
,
1028 /* control qh may need patching ... */
1029 if (unlikely (epnum
== 0)) {
1031 /* usb_reset_device() briefly reverts to address 0 */
1032 if (usb_pipedevice (urb
->pipe
) == 0)
1033 qh
->hw
->hw_info1
&= ~qh_addr_mask
;
1036 #ifdef EHCI_QTD_CACHE
1038 struct list_head
*entry
;
1039 struct ehci_qtd
*qtd2
;
1041 list_for_each (entry
, qtd_list
) {
1042 qtd2
= list_entry (entry
, struct ehci_qtd
, qtd_list
);
1043 qtd2
->urb
->hcpriv
= qh_get (qh
);
1046 #endif /* EHCI_QTD_CACHE */
1048 /* just one way to queue requests: swap with the dummy qtd.
1049 * only hc or qh_refresh() ever modify the overlay.
1051 if (likely (qtd
!= NULL
)) {
1052 struct ehci_qtd
*dummy
;
1056 /* to avoid racing the HC, use the dummy td instead of
1057 * the first td of our list (becomes new dummy). both
1058 * tds stay deactivated until we're done, when the
1059 * HC is allowed to fetch the old dummy (4.10.2).
1061 token
= qtd
->hw_token
;
1062 qtd
->hw_token
= HALT_BIT(ehci
);
1066 dma
= dummy
->qtd_dma
;
1068 dummy
->qtd_dma
= dma
;
1070 list_del (&qtd
->qtd_list
);
1071 list_add (&dummy
->qtd_list
, qtd_list
);
1072 __list_splice (qtd_list
, qh
->qtd_list
.prev
);
1074 ehci_qtd_init(ehci
, qtd
, qtd
->qtd_dma
);
1077 /* hc must see the new dummy at list end */
1079 qtd
= list_entry (qh
->qtd_list
.prev
,
1080 struct ehci_qtd
, qtd_list
);
1081 qtd
->hw_next
= QTD_NEXT(ehci
, dma
);
1083 /* let the hc process these next qtds */
1085 dummy
->hw_token
= token
;
1087 urb
->hcpriv
= qh_get (qh
);
1093 /*-------------------------------------------------------------------------*/
1097 struct ehci_hcd
*ehci
,
1099 struct list_head
*qtd_list
,
1102 struct ehci_qtd
*qtd
;
1104 unsigned long flags
;
1105 struct ehci_qh
*qh
= NULL
;
1108 qtd
= list_entry (qtd_list
->next
, struct ehci_qtd
, qtd_list
);
1109 epnum
= urb
->ep
->desc
.bEndpointAddress
;
1111 #ifdef EHCI_URB_TRACE
1113 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
1114 __FUNCTION__
, urb
->dev
->devpath
, urb
,
1115 epnum
& 0x0f, (epnum
& USB_DIR_IN
) ? "in" : "out",
1116 urb
->transfer_buffer_length
,
1117 qtd
, urb
->ep
->hcpriv
);
1120 spin_lock_irqsave (&ehci
->lock
, flags
);
1122 #ifdef EHCI_QTD_CACHE
1124 ehci_qtdc_t
*qtdc_hit
= NULL
;
1126 if (!ehci
->qtdc_dev
) {
1127 if (ehci
->qtdc_vid
&& (urb
->dev
->descriptor
.idVendor
== ehci
->qtdc_vid
) &&
1128 ehci
->qtdc_pid
&& (urb
->dev
->descriptor
.idProduct
== ehci
->qtdc_pid
)) {
1129 ehci
->qtdc_dev
= urb
->dev
;
1130 printk("QTDC: matched pid %x vid %x dev %p\n",
1131 urb
->dev
->descriptor
.idProduct
, urb
->dev
->descriptor
.idVendor
,
1136 if (ehci
->qtdc
[0] && (urb
->dev
== ehci
->qtdc_dev
) && (epnum
== ehci
->qtdc
[0]->ep
))
1137 qtdc_hit
= ehci
->qtdc
[0];
1138 else if (ehci
->qtdc
[1] && (urb
->dev
== ehci
->qtdc_dev
) &&
1139 (epnum
== ehci
->qtdc
[1]->ep
))
1140 qtdc_hit
= ehci
->qtdc
[1];
1142 if (likely ((int)qtdc_hit
)) {
1143 unsigned long flags2
;
1145 /* Link the hw_next when there're cached qtd's in qtdc_hit */
1146 if (likely (qtdc_hit
->cnt
)) {
1147 struct ehci_qtd
*qtd_prev
, *qtd_this
;
1149 qtd_prev
= list_entry (qtdc_hit
->cache
.prev
, struct ehci_qtd
, qtd_list
);
1150 qtd_this
= list_entry (qtd_list
->next
, struct ehci_qtd
, qtd_list
);
1151 qtd_prev
->hw_next
= QTD_NEXT (qtd_this
->qtd_dma
);
1154 if (likely (qtdc_hit
->cnt
< qtdc_hit
->size
)) { /* queue it to the cache and return */
1155 /* Set the urb cached flag */
1156 spin_lock_irqsave (&urb
->lock
, flags2
);
1157 urb
->transfer_flags
|= URB_QTD_CACHED
;
1158 spin_unlock_irqrestore (&urb
->lock
, flags2
);
1160 list_splice_init (qtd_list
, qtdc_hit
->cache
.prev
);
1162 #ifdef EHCI_QTDC_DEBUG
1163 qtdc_hit
->cached_qtd
++;
1164 #endif /* EHCI_QTDC_DEBUG */
1165 QTDC_TRACE(qtdc_hit
, ("caching! cnt %d\n", qtdc_hit
->cnt
));
1166 mod_timer (&qtdc_hit
->watchdog
,
1167 jiffies
+ qtdc_hit
->timeout
);
1168 spin_unlock_irqrestore (&ehci
->lock
, flags
);
1170 } else { /* insert the cache list into qtd_list and go on */
1171 struct list_head
*entry
;
1173 /* clear urb cached flag */
1174 list_for_each (entry
, &qtdc_hit
->cache
) {
1175 qtd
= list_entry (entry
, struct ehci_qtd
, qtd_list
);
1177 spin_lock_irqsave (&urb
->lock
, flags2
);
1178 urb
->transfer_flags
&= ~URB_QTD_CACHED
;
1179 spin_unlock_irqrestore (&urb
->lock
, flags2
);
1181 list_splice_init (&qtdc_hit
->cache
, qtd_list
);
1182 #ifdef EHCI_QTDC_DEBUG
1183 qtdc_hit
->release_qtd
+= qtdc_hit
->cnt
;
1184 qtdc_hit
->release_cnt
++;
1185 #endif /* EHCI_QTDC_DEBUG */
1186 QTDC_TRACE(qtdc_hit
, ("releasing! cnt %d\n", qtdc_hit
->cnt
));
1188 del_timer_sync (&qtdc_hit
->watchdog
);
1192 #endif /* EHCI_QTD_CACHE */
1194 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE
,
1195 &ehci_to_hcd(ehci
)->flags
))) {
1199 rc
= usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci
), urb
);
1203 qh
= qh_append_tds(ehci
, urb
, qtd_list
, epnum
, &urb
->ep
->hcpriv
);
1204 if (unlikely(qh
== NULL
)) {
1205 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci
), urb
);
1210 /* Control/bulk operations through TTs don't need scheduling,
1211 * the HC and TT handle it when the TT has a buffer ready.
1213 if (likely (qh
->qh_state
== QH_STATE_IDLE
))
1214 qh_link_async (ehci
, qh_get (qh
));
1216 spin_unlock_irqrestore (&ehci
->lock
, flags
);
1217 if (unlikely (qh
== NULL
))
1218 qtd_list_free (ehci
, urb
, qtd_list
);
1222 /*-------------------------------------------------------------------------*/
1224 /* the async qh for the qtds being reclaimed are now unlinked from the HC */
1226 static void end_unlink_async (struct ehci_hcd
*ehci
)
1228 struct ehci_qh
*qh
= ehci
->reclaim
;
1229 struct ehci_qh
*next
;
1231 iaa_watchdog_done(ehci
);
1233 // qh->hw_next = cpu_to_hc32(qh->qh_dma);
1234 qh
->qh_state
= QH_STATE_IDLE
;
1235 qh
->qh_next
.qh
= NULL
;
1236 qh_put (qh
); // refcount from reclaim
1238 /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
1240 ehci
->reclaim
= next
;
1243 qh_completions (ehci
, qh
);
1245 if (!list_empty (&qh
->qtd_list
)
1246 && HC_IS_RUNNING (ehci_to_hcd(ehci
)->state
))
1247 qh_link_async (ehci
, qh
);
1249 qh_put (qh
); // refcount from async list
1251 /* it's not free to turn the async schedule on/off; leave it
1252 * active but idle for a while once it empties.
1254 if (HC_IS_RUNNING (ehci_to_hcd(ehci
)->state
)
1255 && ehci
->async
->qh_next
.qh
== NULL
)
1256 timer_action (ehci
, TIMER_ASYNC_OFF
);
1260 ehci
->reclaim
= NULL
;
1261 start_unlink_async (ehci
, next
);
1265 /* makes sure the async qh will become idle */
1266 /* caller must own ehci->lock */
1268 static void start_unlink_async (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
1270 int cmd
= ehci_readl(ehci
, &ehci
->regs
->command
);
1271 struct ehci_qh
*prev
;
1274 assert_spin_locked(&ehci
->lock
);
1276 || (qh
->qh_state
!= QH_STATE_LINKED
1277 && qh
->qh_state
!= QH_STATE_UNLINK_WAIT
)
1282 if(ehci_optimized(ehci
, qh
) >= 0)
1284 ehci_err(ehci
, "EHCI Fastpath: Regular unlink of optimzed pipe\n");
1287 /* stop async schedule right now? */
1288 if (unlikely (qh
== ehci
->async
)) {
1289 /* can't get here without STS_ASS set */
1290 if (ehci_to_hcd(ehci
)->state
!= HC_STATE_HALT
1291 && !ehci
->reclaim
) {
1292 /* ... and CMD_IAAD clear */
1293 ehci_writel(ehci
, cmd
& ~CMD_ASE
,
1294 &ehci
->regs
->command
);
1296 // handshake later, if we need to
1297 timer_action_done (ehci
, TIMER_ASYNC_OFF
);
1302 qh
->qh_state
= QH_STATE_UNLINK
;
1303 ehci
->reclaim
= qh
= qh_get (qh
);
1306 while (prev
->qh_next
.qh
!= qh
)
1307 prev
= prev
->qh_next
.qh
;
1309 prev
->hw
->hw_next
= qh
->hw
->hw_next
;
1310 prev
->qh_next
= qh
->qh_next
;
1313 /* If the controller isn't running, we don't have to wait for it */
1314 if (unlikely(!HC_IS_RUNNING(ehci_to_hcd(ehci
)->state
))) {
1315 /* if (unlikely (qh->reclaim != 0))
1316 * this will recurse, probably not much
1318 end_unlink_async (ehci
);
1323 ehci_writel(ehci
, cmd
, &ehci
->regs
->command
);
1324 (void)ehci_readl(ehci
, &ehci
->regs
->command
);
1325 iaa_watchdog_start(ehci
);
1328 /*-------------------------------------------------------------------------*/
1330 static void scan_async (struct ehci_hcd
*ehci
)
1333 enum ehci_timer_action action
= TIMER_IO_WATCHDOG
;
1335 ehci
->stamp
= ehci_readl(ehci
, &ehci
->regs
->frame_index
);
1336 timer_action_done (ehci
, TIMER_ASYNC_SHRINK
);
1338 qh
= ehci
->async
->qh_next
.qh
;
1339 if (likely (qh
!= NULL
)) {
1341 /* clean any finished work for this qh */
1342 if (!list_empty (&qh
->qtd_list
)
1343 && qh
->stamp
!= ehci
->stamp
) {
1346 /* unlinks could happen here; completion
1347 * reporting drops the lock. rescan using
1348 * the latest schedule, but don't rescan
1349 * qhs we already finished (no looping).
1352 qh
->stamp
= ehci
->stamp
;
1353 temp
= qh_completions (ehci
, qh
);
1354 if (qh
->needs_rescan
)
1355 unlink_async(ehci
, qh
);
1362 /* unlink idle entries, reducing DMA usage as well
1363 * as HCD schedule-scanning costs. delay for any qh
1364 * we just scanned, there's a not-unusual case that it
1365 * doesn't stay idle for long.
1366 * (plus, avoids some kind of re-activation race.)
1368 if (list_empty(&qh
->qtd_list
)
1369 && qh
->qh_state
== QH_STATE_LINKED
) {
1371 && ((ehci
->stamp
- qh
->stamp
) & 0x1fff)
1372 >= (EHCI_SHRINK_FRAMES
* 8))
1373 start_unlink_async(ehci
, qh
);
1375 action
= TIMER_ASYNC_SHRINK
;
1378 qh
= qh
->qh_next
.qh
;
1381 if (action
== TIMER_ASYNC_SHRINK
)
1382 timer_action (ehci
, TIMER_ASYNC_SHRINK
);