2 * Copyright (C) 2001-2004 by David Brownell
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 /* this file is part of ehci-hcd.c */
21 /*-------------------------------------------------------------------------*/
24 * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
26 * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
27 * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
28 * buffers needed for the larger number). We use one QH per endpoint, queue
29 * multiple urbs (all three types) per endpoint. URBs may need several qtds.
31 * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with
32 * interrupts) needs careful scheduling. Performance improvements can be
33 * an ongoing challenge. That's in "ehci-sched.c".
35 * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
36 * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
37 * (b) special fields in qh entries or (c) split iso entries. TTs will
38 * buffer low/full speed data so the host collects it at high speed.
41 /*-------------------------------------------------------------------------*/
43 /* fill a qtd, returning how much of the buffer we were able to queue up */
46 qtd_fill(struct ehci_hcd
*ehci
, struct ehci_qtd
*qtd
, dma_addr_t buf
,
47 size_t len
, int token
, int maxpacket
)
52 /* one buffer entry per 4K ... first might be short or unaligned */
53 qtd
->hw_buf
[0] = cpu_to_hc32(ehci
, (u32
)addr
);
54 qtd
->hw_buf_hi
[0] = cpu_to_hc32(ehci
, (u32
)(addr
>> 32));
55 count
= 0x1000 - (buf
& 0x0fff); /* rest of that page */
56 if (likely (len
< count
)) /* ... iff needed */
62 /* per-qtd limit: from 16K to 20K (best alignment) */
63 for (i
= 1; count
< len
&& i
< 5; i
++) {
65 qtd
->hw_buf
[i
] = cpu_to_hc32(ehci
, (u32
)addr
);
66 qtd
->hw_buf_hi
[i
] = cpu_to_hc32(ehci
,
69 if ((count
+ 0x1000) < len
)
75 /* short packets may only terminate transfers */
77 count
-= (count
% maxpacket
);
79 qtd
->hw_token
= cpu_to_hc32(ehci
, (count
<< 16) | token
);
85 /*-------------------------------------------------------------------------*/
88 qh_update (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
, struct ehci_qtd
*qtd
)
90 /* writes to an active overlay are unsafe */
91 BUG_ON(qh
->qh_state
!= QH_STATE_IDLE
);
93 qh
->hw_qtd_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
94 qh
->hw_alt_next
= EHCI_LIST_END(ehci
);
96 /* Except for control endpoints, we make hardware maintain data
97 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
98 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
101 if (!(qh
->hw_info1
& cpu_to_hc32(ehci
, 1 << 14))) {
102 unsigned is_out
, epnum
;
104 is_out
= !(qtd
->hw_token
& cpu_to_hc32(ehci
, 1 << 8));
105 epnum
= (hc32_to_cpup(ehci
, &qh
->hw_info1
) >> 8) & 0x0f;
106 if (unlikely (!usb_gettoggle (qh
->dev
, epnum
, is_out
))) {
107 qh
->hw_token
&= ~cpu_to_hc32(ehci
, QTD_TOGGLE
);
108 usb_settoggle (qh
->dev
, epnum
, is_out
, 1);
112 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
114 qh
->hw_token
&= cpu_to_hc32(ehci
, QTD_TOGGLE
| QTD_STS_PING
);
117 /* if it weren't for a common silicon quirk (writing the dummy into the qh
118 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
119 * recovery (including urb dequeue) would need software changes to a QH...
122 qh_refresh (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
124 struct ehci_qtd
*qtd
;
126 if (list_empty (&qh
->qtd_list
))
129 qtd
= list_entry (qh
->qtd_list
.next
,
130 struct ehci_qtd
, qtd_list
);
131 /* first qtd may already be partially processed */
132 if (cpu_to_hc32(ehci
, qtd
->qtd_dma
) == qh
->hw_current
)
137 qh_update (ehci
, qh
, qtd
);
140 /*-------------------------------------------------------------------------*/
142 static int qtd_copy_status (
143 struct ehci_hcd
*ehci
,
149 int status
= -EINPROGRESS
;
151 /* count IN/OUT bytes, not SETUP (even short packets) */
152 if (likely (QTD_PID (token
) != 2))
153 urb
->actual_length
+= length
- QTD_LENGTH (token
);
155 /* don't modify error codes */
156 if (unlikely(urb
->unlinked
))
159 /* force cleanup after short read; not always an error */
160 if (unlikely (IS_SHORT_READ (token
)))
163 /* serious "can't proceed" faults reported by the hardware */
164 if (token
& QTD_STS_HALT
) {
165 if (token
& QTD_STS_BABBLE
) {
166 /* FIXME "must" disable babbling device's port too */
168 } else if (token
& QTD_STS_MMF
) {
169 /* fs/ls interrupt xfer missed the complete-split */
171 } else if (token
& QTD_STS_DBE
) {
172 status
= (QTD_PID (token
) == 1) /* IN ? */
173 ? -ENOSR
/* hc couldn't read data */
174 : -ECOMM
; /* hc couldn't write data */
175 } else if (token
& QTD_STS_XACT
) {
176 /* timeout, bad crc, wrong PID, etc; retried */
177 if (QTD_CERR (token
))
180 ehci_dbg (ehci
, "devpath %s ep%d%s 3strikes\n",
182 usb_pipeendpoint (urb
->pipe
),
183 usb_pipein (urb
->pipe
) ? "in" : "out");
186 /* CERR nonzero + no errors + halt --> stall */
187 } else if (QTD_CERR (token
))
193 "dev%d ep%d%s qtd token %08x --> status %d\n",
194 usb_pipedevice (urb
->pipe
),
195 usb_pipeendpoint (urb
->pipe
),
196 usb_pipein (urb
->pipe
) ? "in" : "out",
199 /* if async CSPLIT failed, try cleaning out the TT buffer */
202 && !usb_pipeint(urb
->pipe
)
203 && ((token
& QTD_STS_MMF
) != 0
204 || QTD_CERR(token
) == 0)
205 && (!ehci_is_TDI(ehci
)
206 || urb
->dev
->tt
->hub
!=
207 ehci_to_hcd(ehci
)->self
.root_hub
)) {
209 struct usb_device
*tt
= urb
->dev
->tt
->hub
;
211 "clear tt buffer port %d, a%d ep%d t%08x\n",
212 urb
->dev
->ttport
, urb
->dev
->devnum
,
213 usb_pipeendpoint (urb
->pipe
), token
);
215 /* REVISIT ARC-derived cores don't clear the root
216 * hub TT buffer in this way...
218 usb_hub_tt_clear_buffer (urb
->dev
, urb
->pipe
);
226 ehci_urb_done(struct ehci_hcd
*ehci
, struct urb
*urb
, int status
)
227 __releases(ehci
->lock
)
228 __acquires(ehci
->lock
)
230 if (likely (urb
->hcpriv
!= NULL
)) {
231 struct ehci_qh
*qh
= (struct ehci_qh
*) urb
->hcpriv
;
233 /* S-mask in a QH means it's an interrupt urb */
234 if ((qh
->hw_info2
& cpu_to_hc32(ehci
, QH_SMASK
)) != 0) {
236 /* ... update hc-wide periodic stats (for usbfs) */
237 ehci_to_hcd(ehci
)->self
.bandwidth_int_reqs
--;
242 if (unlikely(urb
->unlinked
)) {
243 COUNT(ehci
->stats
.unlink
);
245 /* report non-error and short read status as zero */
246 if (status
== -EINPROGRESS
|| status
== -EREMOTEIO
)
248 COUNT(ehci
->stats
.complete
);
251 #ifdef EHCI_URB_TRACE
253 "%s %s urb %p ep%d%s status %d len %d/%d\n",
254 __func__
, urb
->dev
->devpath
, urb
,
255 usb_pipeendpoint (urb
->pipe
),
256 usb_pipein (urb
->pipe
) ? "in" : "out",
258 urb
->actual_length
, urb
->transfer_buffer_length
);
261 /* complete() can reenter this HCD */
262 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci
), urb
);
263 spin_unlock (&ehci
->lock
);
264 usb_hcd_giveback_urb(ehci_to_hcd(ehci
), urb
, status
);
265 spin_lock (&ehci
->lock
);
268 static void start_unlink_async (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
);
269 static void unlink_async (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
);
271 static void intr_deschedule (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
);
272 static int qh_schedule (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
);
275 * Process and free completed qtds for a qh, returning URBs to drivers.
276 * Chases up to qh->hw_current. Returns number of completions called,
277 * indicating how much "real" work we did.
280 qh_completions (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
282 struct ehci_qtd
*last
= NULL
, *end
= qh
->dummy
;
283 struct list_head
*entry
, *tmp
;
284 int last_status
= -EINPROGRESS
;
288 __le32 halt
= HALT_BIT(ehci
);
290 if (unlikely (list_empty (&qh
->qtd_list
)))
293 /* completions (or tasks on other cpus) must never clobber HALT
294 * till we've gone through and cleaned everything up, even when
295 * they add urbs to this qh's queue or mark them for unlinking.
297 * NOTE: unlinking expects to be done in queue order.
299 state
= qh
->qh_state
;
300 qh
->qh_state
= QH_STATE_COMPLETING
;
301 stopped
= (state
== QH_STATE_IDLE
);
303 /* remove de-activated QTDs from front of queue.
304 * after faults (including short reads), cleanup this urb
305 * then let the queue advance.
306 * if queue is stopped, handles unlinks.
308 list_for_each_safe (entry
, tmp
, &qh
->qtd_list
) {
309 struct ehci_qtd
*qtd
;
313 qtd
= list_entry (entry
, struct ehci_qtd
, qtd_list
);
316 /* clean up any state from previous QTD ...*/
318 if (likely (last
->urb
!= urb
)) {
319 ehci_urb_done(ehci
, last
->urb
, last_status
);
321 last_status
= -EINPROGRESS
;
323 ehci_qtd_free (ehci
, last
);
327 /* ignore urbs submitted during completions we reported */
331 /* hardware copies qtd out of qh overlay */
333 token
= hc32_to_cpu(ehci
, qtd
->hw_token
);
335 /* always clean up qtds the hc de-activated */
337 if ((token
& QTD_STS_ACTIVE
) == 0) {
339 /* on STALL, error, and short reads this urb must
340 * complete and all its qtds must be recycled.
342 if ((token
& QTD_STS_HALT
) != 0) {
344 /* retry transaction errors until we
345 * reach the software xacterr limit
347 if ((token
& QTD_STS_XACT
) &&
348 QTD_CERR(token
) == 0 &&
349 --qh
->xacterrs
> 0 &&
352 "detected XactErr len %d/%d retry %d\n",
353 qtd
->length
- QTD_LENGTH(token
), qtd
->length
,
354 QH_XACTERR_MAX
- qh
->xacterrs
);
356 /* reset the token in the qtd and the
357 * qh overlay (which still contains
358 * the qtd) so that we pick up from
361 token
&= ~QTD_STS_HALT
;
362 token
|= QTD_STS_ACTIVE
|
363 (EHCI_TUNE_CERR
<< 10);
364 qtd
->hw_token
= cpu_to_hc32(ehci
,
367 qh
->hw_token
= cpu_to_hc32(ehci
, token
);
372 /* magic dummy for some short reads; qh won't advance.
373 * that silicon quirk can kick in with this dummy too.
375 * other short reads won't stop the queue, including
376 * control transfers (status stage handles that) or
377 * most other single-qtd reads ... the queue stops if
378 * URB_SHORT_NOT_OK was set so the driver submitting
379 * the urbs could clean it up.
381 } else if (IS_SHORT_READ (token
)
382 && !(qtd
->hw_alt_next
383 & EHCI_LIST_END(ehci
))) {
388 /* stop scanning when we reach qtds the hc is using */
389 } else if (likely (!stopped
390 && HC_IS_RUNNING (ehci_to_hcd(ehci
)->state
))) {
393 /* scan the whole queue for unlinks whenever it stops */
397 /* cancel everything if we halt, suspend, etc */
398 if (!HC_IS_RUNNING(ehci_to_hcd(ehci
)->state
))
399 last_status
= -ESHUTDOWN
;
401 /* this qtd is active; skip it unless a previous qtd
402 * for its urb faulted, or its urb was canceled.
404 else if (last_status
== -EINPROGRESS
&& !urb
->unlinked
)
407 /* qh unlinked; token in overlay may be most current */
408 if (state
== QH_STATE_IDLE
409 && cpu_to_hc32(ehci
, qtd
->qtd_dma
)
411 token
= hc32_to_cpu(ehci
, qh
->hw_token
);
413 /* force halt for unlinked or blocked qh, so we'll
414 * patch the qh later and so that completions can't
415 * activate it while we "know" it's stopped.
417 if ((halt
& qh
->hw_token
) == 0) {
419 qh
->hw_token
|= halt
;
424 /* unless we already know the urb's status, collect qtd status
425 * and update count of bytes transferred. in common short read
426 * cases with only one data qtd (including control transfers),
427 * queue processing won't halt. but with two or more qtds (for
428 * example, with a 32 KB transfer), when the first qtd gets a
429 * short read the second must be removed by hand.
431 if (last_status
== -EINPROGRESS
) {
432 last_status
= qtd_copy_status(ehci
, urb
,
434 if (last_status
== -EREMOTEIO
436 & EHCI_LIST_END(ehci
)))
437 last_status
= -EINPROGRESS
;
440 /* if we're removing something not at the queue head,
441 * patch the hardware queue pointer.
443 if (stopped
&& qtd
->qtd_list
.prev
!= &qh
->qtd_list
) {
444 last
= list_entry (qtd
->qtd_list
.prev
,
445 struct ehci_qtd
, qtd_list
);
446 last
->hw_next
= qtd
->hw_next
;
449 /* remove qtd; it's recycled after possible urb completion */
450 list_del (&qtd
->qtd_list
);
453 /* reinit the xacterr counter for the next qtd */
454 qh
->xacterrs
= QH_XACTERR_MAX
;
457 /* last urb's completion might still need calling */
458 if (likely (last
!= NULL
)) {
459 ehci_urb_done(ehci
, last
->urb
, last_status
);
461 ehci_qtd_free (ehci
, last
);
464 /* restore original state; caller must unlink or relink */
465 qh
->qh_state
= state
;
467 /* be sure the hardware's done with the qh before refreshing
468 * it after fault cleanup, or recovering from silicon wrongly
469 * overlaying the dummy qtd (which reduces DMA chatter).
471 if (stopped
!= 0 || qh
->hw_qtd_next
== EHCI_LIST_END(ehci
)) {
474 qh_refresh(ehci
, qh
);
476 case QH_STATE_LINKED
:
477 /* We won't refresh a QH that's linked (after the HC
478 * stopped the queue). That avoids a race:
479 * - HC reads first part of QH;
480 * - CPU updates that first part and the token;
481 * - HC reads rest of that QH, including token
482 * Result: HC gets an inconsistent image, and then
483 * DMAs to/from the wrong memory (corrupting it).
485 * That should be rare for interrupt transfers,
486 * except maybe high bandwidth ...
488 if ((cpu_to_hc32(ehci
, QH_SMASK
)
489 & qh
->hw_info2
) != 0) {
490 intr_deschedule (ehci
, qh
);
491 (void) qh_schedule (ehci
, qh
);
493 unlink_async (ehci
, qh
);
495 /* otherwise, unlink already started */
502 /*-------------------------------------------------------------------------*/
504 // high bandwidth multiplier, as encoded in highspeed endpoint descriptors
505 #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
506 // ... and packet size, for any kind of endpoint descriptor
507 #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
510 * reverse of qh_urb_transaction: free a list of TDs.
511 * used for cleanup after errors, before HC sees an URB's TDs.
513 static void qtd_list_free (
514 struct ehci_hcd
*ehci
,
516 struct list_head
*qtd_list
518 struct list_head
*entry
, *temp
;
520 list_for_each_safe (entry
, temp
, qtd_list
) {
521 struct ehci_qtd
*qtd
;
523 qtd
= list_entry (entry
, struct ehci_qtd
, qtd_list
);
524 list_del (&qtd
->qtd_list
);
525 ehci_qtd_free (ehci
, qtd
);
530 * create a list of filled qtds for this URB; won't link into qh.
532 static struct list_head
*
534 struct ehci_hcd
*ehci
,
536 struct list_head
*head
,
539 struct ehci_qtd
*qtd
, *qtd_prev
;
546 * URBs map to sequences of QTDs: one logical transaction
548 qtd
= ehci_qtd_alloc (ehci
, flags
);
551 list_add_tail (&qtd
->qtd_list
, head
);
554 token
= QTD_STS_ACTIVE
;
555 token
|= (EHCI_TUNE_CERR
<< 10);
556 /* for split transactions, SplitXState initialized to zero */
558 len
= urb
->transfer_buffer_length
;
559 is_input
= usb_pipein (urb
->pipe
);
560 if (usb_pipecontrol (urb
->pipe
)) {
562 qtd_fill(ehci
, qtd
, urb
->setup_dma
,
563 sizeof (struct usb_ctrlrequest
),
564 token
| (2 /* "setup" */ << 8), 8);
566 /* ... and always at least one more pid */
569 qtd
= ehci_qtd_alloc (ehci
, flags
);
573 qtd_prev
->hw_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
574 list_add_tail (&qtd
->qtd_list
, head
);
576 /* for zero length DATA stages, STATUS is always IN */
578 token
|= (1 /* "in" */ << 8);
582 * data transfer stage: buffer setup
584 buf
= urb
->transfer_dma
;
587 token
|= (1 /* "in" */ << 8);
588 /* else it's already initted to "out" pid (0 << 8) */
590 maxpacket
= max_packet(usb_maxpacket(urb
->dev
, urb
->pipe
, !is_input
));
593 * buffer gets wrapped in one or more qtds;
594 * last one may be "short" (including zero len)
595 * and may serve as a control status ack
600 this_qtd_len
= qtd_fill(ehci
, qtd
, buf
, len
, token
, maxpacket
);
605 * short reads advance to a "magic" dummy instead of the next
606 * qtd ... that forces the queue to stop, for manual cleanup.
607 * (this will usually be overridden later.)
610 qtd
->hw_alt_next
= ehci
->async
->hw_alt_next
;
612 /* qh makes control packets use qtd toggle; maybe switch it */
613 if ((maxpacket
& (this_qtd_len
+ (maxpacket
- 1))) == 0)
616 if (likely (len
<= 0))
620 qtd
= ehci_qtd_alloc (ehci
, flags
);
624 qtd_prev
->hw_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
625 list_add_tail (&qtd
->qtd_list
, head
);
629 * unless the caller requires manual cleanup after short reads,
630 * have the alt_next mechanism keep the queue running after the
631 * last data qtd (the only one, for control and most other cases).
633 if (likely ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0
634 || usb_pipecontrol (urb
->pipe
)))
635 qtd
->hw_alt_next
= EHCI_LIST_END(ehci
);
638 * control requests may need a terminating data "status" ack;
639 * bulk ones may need a terminating short packet (zero length).
641 if (likely (urb
->transfer_buffer_length
!= 0)) {
644 if (usb_pipecontrol (urb
->pipe
)) {
646 token
^= 0x0100; /* "in" <--> "out" */
647 token
|= QTD_TOGGLE
; /* force DATA1 */
648 } else if (usb_pipebulk (urb
->pipe
)
649 && (urb
->transfer_flags
& URB_ZERO_PACKET
)
650 && !(urb
->transfer_buffer_length
% maxpacket
)) {
655 qtd
= ehci_qtd_alloc (ehci
, flags
);
659 qtd_prev
->hw_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
660 list_add_tail (&qtd
->qtd_list
, head
);
662 /* never any data in such packets */
663 qtd_fill(ehci
, qtd
, 0, 0, token
, 0);
667 /* by default, enable interrupt on urb completion */
668 if (likely (!(urb
->transfer_flags
& URB_NO_INTERRUPT
)))
669 qtd
->hw_token
|= cpu_to_hc32(ehci
, QTD_IOC
);
673 qtd_list_free (ehci
, urb
, head
);
677 /*-------------------------------------------------------------------------*/
679 // Would be best to create all qh's from config descriptors,
680 // when each interface/altsetting is established. Unlink
681 // any previous qh and cancel its urbs first; endpoints are
682 // implicitly reset then (data toggle too).
683 // That'd mean updating how usbcore talks to HCDs. (2.7?)
687 * Each QH holds a qtd list; a QH is used for everything except iso.
689 * For interrupt urbs, the scheduler must set the microframe scheduling
690 * mask(s) each time the QH gets scheduled. For highspeed, that's
691 * just one microframe in the s-mask. For split interrupt transactions
692 * there are additional complications: c-mask, maybe FSTNs.
694 static struct ehci_qh
*
696 struct ehci_hcd
*ehci
,
700 struct ehci_qh
*qh
= ehci_qh_alloc (ehci
, flags
);
701 u32 info1
= 0, info2
= 0;
704 struct usb_tt
*tt
= urb
->dev
->tt
;
710 * init endpoint/device data for this QH
712 info1
|= usb_pipeendpoint (urb
->pipe
) << 8;
713 info1
|= usb_pipedevice (urb
->pipe
) << 0;
715 is_input
= usb_pipein (urb
->pipe
);
716 type
= usb_pipetype (urb
->pipe
);
717 maxp
= usb_maxpacket (urb
->dev
, urb
->pipe
, !is_input
);
719 /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
720 * acts like up to 3KB, but is built from smaller packets.
722 if (max_packet(maxp
) > 1024) {
723 ehci_dbg(ehci
, "bogus qh maxpacket %d\n", max_packet(maxp
));
727 /* Compute interrupt scheduling parameters just once, and save.
728 * - allowing for high bandwidth, how many nsec/uframe are used?
729 * - split transactions need a second CSPLIT uframe; same question
730 * - splits also need a schedule gap (for full/low speed I/O)
731 * - qh has a polling interval
733 * For control/bulk requests, the HC or TT handles these.
735 if (type
== PIPE_INTERRUPT
) {
736 qh
->usecs
= NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH
,
738 hb_mult(maxp
) * max_packet(maxp
)));
739 qh
->start
= NO_FRAME
;
741 if (urb
->dev
->speed
== USB_SPEED_HIGH
) {
745 qh
->period
= urb
->interval
>> 3;
746 if (qh
->period
== 0 && urb
->interval
!= 1) {
747 /* NOTE interval 2 or 4 uframes could work.
748 * But interval 1 scheduling is simpler, and
749 * includes high bandwidth.
751 dbg ("intr period %d uframes, NYET!",
758 /* gap is f(FS/LS transfer times) */
759 qh
->gap_uf
= 1 + usb_calc_bus_time (urb
->dev
->speed
,
760 is_input
, 0, maxp
) / (125 * 1000);
762 /* FIXME this just approximates SPLIT/CSPLIT times */
763 if (is_input
) { // SPLIT, gap, CSPLIT+DATA
764 qh
->c_usecs
= qh
->usecs
+ HS_USECS (0);
765 qh
->usecs
= HS_USECS (1);
766 } else { // SPLIT+DATA, gap, CSPLIT
767 qh
->usecs
+= HS_USECS (1);
768 qh
->c_usecs
= HS_USECS (0);
771 think_time
= tt
? tt
->think_time
: 0;
772 qh
->tt_usecs
= NS_TO_US (think_time
+
773 usb_calc_bus_time (urb
->dev
->speed
,
774 is_input
, 0, max_packet (maxp
)));
775 qh
->period
= urb
->interval
;
779 /* support for tt scheduling, and access to toggles */
783 switch (urb
->dev
->speed
) {
785 info1
|= (1 << 12); /* EPS "low" */
789 /* EPS 0 means "full" */
790 if (type
!= PIPE_INTERRUPT
)
791 info1
|= (EHCI_TUNE_RL_TT
<< 28);
792 if (type
== PIPE_CONTROL
) {
793 info1
|= (1 << 27); /* for TT */
794 info1
|= 1 << 14; /* toggle from qtd */
798 info2
|= (EHCI_TUNE_MULT_TT
<< 30);
800 /* Some Freescale processors have an erratum in which the
801 * port number in the queue head was 0..N-1 instead of 1..N.
803 if (ehci_has_fsl_portno_bug(ehci
))
804 info2
|= (urb
->dev
->ttport
-1) << 23;
806 info2
|= urb
->dev
->ttport
<< 23;
808 /* set the address of the TT; for TDI's integrated
809 * root hub tt, leave it zeroed.
811 if (tt
&& tt
->hub
!= ehci_to_hcd(ehci
)->self
.root_hub
)
812 info2
|= tt
->hub
->devnum
<< 16;
814 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
818 case USB_SPEED_HIGH
: /* no TT involved */
819 info1
|= (2 << 12); /* EPS "high" */
820 if (type
== PIPE_CONTROL
) {
821 info1
|= (EHCI_TUNE_RL_HS
<< 28);
822 info1
|= 64 << 16; /* usb2 fixed maxpacket */
823 info1
|= 1 << 14; /* toggle from qtd */
824 info2
|= (EHCI_TUNE_MULT_HS
<< 30);
825 } else if (type
== PIPE_BULK
) {
826 info1
|= (EHCI_TUNE_RL_HS
<< 28);
827 /* The USB spec says that high speed bulk endpoints
828 * always use 512 byte maxpacket. But some device
829 * vendors decided to ignore that, and MSFT is happy
830 * to help them do so. So now people expect to use
831 * such nonconformant devices with Linux too; sigh.
833 info1
|= max_packet(maxp
) << 16;
834 info2
|= (EHCI_TUNE_MULT_HS
<< 30);
835 } else { /* PIPE_INTERRUPT */
836 info1
|= max_packet (maxp
) << 16;
837 info2
|= hb_mult (maxp
) << 30;
841 dbg ("bogus dev %p speed %d", urb
->dev
, urb
->dev
->speed
);
847 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
849 /* init as live, toggle clear, advance to dummy */
850 qh
->qh_state
= QH_STATE_IDLE
;
851 qh
->hw_info1
= cpu_to_hc32(ehci
, info1
);
852 qh
->hw_info2
= cpu_to_hc32(ehci
, info2
);
853 usb_settoggle (urb
->dev
, usb_pipeendpoint (urb
->pipe
), !is_input
, 1);
854 qh_refresh (ehci
, qh
);
858 /*-------------------------------------------------------------------------*/
860 /* move qh (and its qtds) onto async queue; maybe enable queue. */
862 static void qh_link_async (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
864 __hc32 dma
= QH_NEXT(ehci
, qh
->qh_dma
);
865 struct ehci_qh
*head
;
867 /* (re)start the async schedule? */
869 timer_action_done (ehci
, TIMER_ASYNC_OFF
);
870 if (!head
->qh_next
.qh
) {
871 u32 cmd
= ehci_readl(ehci
, &ehci
->regs
->command
);
873 if (!(cmd
& CMD_ASE
)) {
874 /* in case a clear of CMD_ASE didn't take yet */
875 (void)handshake(ehci
, &ehci
->regs
->status
,
877 cmd
|= CMD_ASE
| CMD_RUN
;
878 ehci_writel(ehci
, cmd
, &ehci
->regs
->command
);
879 ehci_to_hcd(ehci
)->state
= HC_STATE_RUNNING
;
880 /* posted write need not be known to HC yet ... */
884 /* clear halt and/or toggle; and maybe recover from silicon quirk */
885 if (qh
->qh_state
== QH_STATE_IDLE
)
886 qh_refresh (ehci
, qh
);
888 /* splice right after start */
889 qh
->qh_next
= head
->qh_next
;
890 qh
->hw_next
= head
->hw_next
;
893 head
->qh_next
.qh
= qh
;
896 qh
->xacterrs
= QH_XACTERR_MAX
;
897 qh
->qh_state
= QH_STATE_LINKED
;
898 /* qtd completions reported later by interrupt */
901 /*-------------------------------------------------------------------------*/
904 * For control/bulk/interrupt, return QH with these TDs appended.
905 * Allocates and initializes the QH if necessary.
906 * Returns null if it can't allocate a QH it needs to.
907 * If the QH has TDs (urbs) already, that's great.
909 static struct ehci_qh
*qh_append_tds (
910 struct ehci_hcd
*ehci
,
912 struct list_head
*qtd_list
,
917 struct ehci_qh
*qh
= NULL
;
918 __hc32 qh_addr_mask
= cpu_to_hc32(ehci
, 0x7f);
920 qh
= (struct ehci_qh
*) *ptr
;
921 if (unlikely (qh
== NULL
)) {
922 /* can't sleep here, we have ehci->lock... */
923 qh
= qh_make (ehci
, urb
, GFP_ATOMIC
);
926 if (likely (qh
!= NULL
)) {
927 struct ehci_qtd
*qtd
;
929 if (unlikely (list_empty (qtd_list
)))
932 qtd
= list_entry (qtd_list
->next
, struct ehci_qtd
,
935 /* control qh may need patching ... */
936 if (unlikely (epnum
== 0)) {
938 /* usb_reset_device() briefly reverts to address 0 */
939 if (usb_pipedevice (urb
->pipe
) == 0)
940 qh
->hw_info1
&= ~qh_addr_mask
;
943 /* just one way to queue requests: swap with the dummy qtd.
944 * only hc or qh_refresh() ever modify the overlay.
946 if (likely (qtd
!= NULL
)) {
947 struct ehci_qtd
*dummy
;
951 /* to avoid racing the HC, use the dummy td instead of
952 * the first td of our list (becomes new dummy). both
953 * tds stay deactivated until we're done, when the
954 * HC is allowed to fetch the old dummy (4.10.2).
956 token
= qtd
->hw_token
;
957 qtd
->hw_token
= HALT_BIT(ehci
);
961 dma
= dummy
->qtd_dma
;
963 dummy
->qtd_dma
= dma
;
965 list_del (&qtd
->qtd_list
);
966 list_add (&dummy
->qtd_list
, qtd_list
);
967 list_splice_tail(qtd_list
, &qh
->qtd_list
);
969 ehci_qtd_init(ehci
, qtd
, qtd
->qtd_dma
);
972 /* hc must see the new dummy at list end */
974 qtd
= list_entry (qh
->qtd_list
.prev
,
975 struct ehci_qtd
, qtd_list
);
976 qtd
->hw_next
= QTD_NEXT(ehci
, dma
);
978 /* let the hc process these next qtds */
980 dummy
->hw_token
= token
;
982 urb
->hcpriv
= qh_get (qh
);
988 /*-------------------------------------------------------------------------*/
992 struct ehci_hcd
*ehci
,
994 struct list_head
*qtd_list
,
997 struct ehci_qtd
*qtd
;
1000 struct ehci_qh
*qh
= NULL
;
1003 qtd
= list_entry (qtd_list
->next
, struct ehci_qtd
, qtd_list
);
1004 epnum
= urb
->ep
->desc
.bEndpointAddress
;
1006 #ifdef EHCI_URB_TRACE
1008 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
1009 __func__
, urb
->dev
->devpath
, urb
,
1010 epnum
& 0x0f, (epnum
& USB_DIR_IN
) ? "in" : "out",
1011 urb
->transfer_buffer_length
,
1012 qtd
, urb
->ep
->hcpriv
);
1015 spin_lock_irqsave (&ehci
->lock
, flags
);
1016 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE
,
1017 &ehci_to_hcd(ehci
)->flags
))) {
1021 rc
= usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci
), urb
);
1025 qh
= qh_append_tds(ehci
, urb
, qtd_list
, epnum
, &urb
->ep
->hcpriv
);
1026 if (unlikely(qh
== NULL
)) {
1027 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci
), urb
);
1032 /* Control/bulk operations through TTs don't need scheduling,
1033 * the HC and TT handle it when the TT has a buffer ready.
1035 if (likely (qh
->qh_state
== QH_STATE_IDLE
))
1036 qh_link_async (ehci
, qh_get (qh
));
1038 spin_unlock_irqrestore (&ehci
->lock
, flags
);
1039 if (unlikely (qh
== NULL
))
1040 qtd_list_free (ehci
, urb
, qtd_list
);
1044 /*-------------------------------------------------------------------------*/
1046 /* the async qh for the qtds being reclaimed are now unlinked from the HC */
1048 static void end_unlink_async (struct ehci_hcd
*ehci
)
1050 struct ehci_qh
*qh
= ehci
->reclaim
;
1051 struct ehci_qh
*next
;
1053 iaa_watchdog_done(ehci
);
1055 // qh->hw_next = cpu_to_hc32(qh->qh_dma);
1056 qh
->qh_state
= QH_STATE_IDLE
;
1057 qh
->qh_next
.qh
= NULL
;
1058 qh_put (qh
); // refcount from reclaim
1060 /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
1062 ehci
->reclaim
= next
;
1065 qh_completions (ehci
, qh
);
1067 if (!list_empty (&qh
->qtd_list
)
1068 && HC_IS_RUNNING (ehci_to_hcd(ehci
)->state
))
1069 qh_link_async (ehci
, qh
);
1071 qh_put (qh
); // refcount from async list
1073 /* it's not free to turn the async schedule on/off; leave it
1074 * active but idle for a while once it empties.
1076 if (HC_IS_RUNNING (ehci_to_hcd(ehci
)->state
)
1077 && ehci
->async
->qh_next
.qh
== NULL
)
1078 timer_action (ehci
, TIMER_ASYNC_OFF
);
1082 ehci
->reclaim
= NULL
;
1083 start_unlink_async (ehci
, next
);
1087 /* makes sure the async qh will become idle */
1088 /* caller must own ehci->lock */
1090 static void start_unlink_async (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
1092 int cmd
= ehci_readl(ehci
, &ehci
->regs
->command
);
1093 struct ehci_qh
*prev
;
1096 assert_spin_locked(&ehci
->lock
);
1098 || (qh
->qh_state
!= QH_STATE_LINKED
1099 && qh
->qh_state
!= QH_STATE_UNLINK_WAIT
)
1104 /* stop async schedule right now? */
1105 if (unlikely (qh
== ehci
->async
)) {
1106 /* can't get here without STS_ASS set */
1107 if (ehci_to_hcd(ehci
)->state
!= HC_STATE_HALT
1108 && !ehci
->reclaim
) {
1109 /* ... and CMD_IAAD clear */
1110 ehci_writel(ehci
, cmd
& ~CMD_ASE
,
1111 &ehci
->regs
->command
);
1113 // handshake later, if we need to
1114 timer_action_done (ehci
, TIMER_ASYNC_OFF
);
1119 qh
->qh_state
= QH_STATE_UNLINK
;
1120 ehci
->reclaim
= qh
= qh_get (qh
);
1123 while (prev
->qh_next
.qh
!= qh
)
1124 prev
= prev
->qh_next
.qh
;
1126 prev
->hw_next
= qh
->hw_next
;
1127 prev
->qh_next
= qh
->qh_next
;
1130 /* If the controller isn't running, we don't have to wait for it */
1131 if (unlikely(!HC_IS_RUNNING(ehci_to_hcd(ehci
)->state
))) {
1132 /* if (unlikely (qh->reclaim != 0))
1133 * this will recurse, probably not much
1135 end_unlink_async (ehci
);
1140 ehci_writel(ehci
, cmd
, &ehci
->regs
->command
);
1141 (void)ehci_readl(ehci
, &ehci
->regs
->command
);
1142 iaa_watchdog_start(ehci
);
1145 /*-------------------------------------------------------------------------*/
1147 static void scan_async (struct ehci_hcd
*ehci
)
1150 enum ehci_timer_action action
= TIMER_IO_WATCHDOG
;
1152 ehci
->stamp
= ehci_readl(ehci
, &ehci
->regs
->frame_index
);
1153 timer_action_done (ehci
, TIMER_ASYNC_SHRINK
);
1155 qh
= ehci
->async
->qh_next
.qh
;
1156 if (likely (qh
!= NULL
)) {
1158 /* clean any finished work for this qh */
1159 if (!list_empty (&qh
->qtd_list
)
1160 && qh
->stamp
!= ehci
->stamp
) {
1163 /* unlinks could happen here; completion
1164 * reporting drops the lock. rescan using
1165 * the latest schedule, but don't rescan
1166 * qhs we already finished (no looping).
1169 qh
->stamp
= ehci
->stamp
;
1170 temp
= qh_completions (ehci
, qh
);
1177 /* unlink idle entries, reducing DMA usage as well
1178 * as HCD schedule-scanning costs. delay for any qh
1179 * we just scanned, there's a not-unusual case that it
1180 * doesn't stay idle for long.
1181 * (plus, avoids some kind of re-activation race.)
1183 if (list_empty(&qh
->qtd_list
)
1184 && qh
->qh_state
== QH_STATE_LINKED
) {
1186 && ((ehci
->stamp
- qh
->stamp
) & 0x1fff)
1187 >= (EHCI_SHRINK_FRAMES
* 8))
1188 start_unlink_async(ehci
, qh
);
1190 action
= TIMER_ASYNC_SHRINK
;
1193 qh
= qh
->qh_next
.qh
;
1196 if (action
== TIMER_ASYNC_SHRINK
)
1197 timer_action (ehci
, TIMER_ASYNC_SHRINK
);