2 * Copyright (c) 2001-2004 by David Brownell
3 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software Foundation,
17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 /* this file is part of ehci-hcd.c */
22 /*-------------------------------------------------------------------------*/
25 * EHCI scheduled transaction support: interrupt, iso, split iso
26 * These are called "periodic" transactions in the EHCI spec.
28 * Note that for interrupt transfers, the QH/QTD manipulation is shared
29 * with the "asynchronous" transaction support (control/bulk transfers).
30 * The only real difference is in how interrupt transfers are scheduled.
32 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
33 * It keeps track of every ITD (or SITD) that's linked, and holds enough
34 * pre-calculated schedule data to make appending to the queue be quick.
37 static int ehci_get_frame (struct usb_hcd
*hcd
);
40 * periodic_next_shadow - return "next" pointer on shadow list
41 * @periodic: host pointer to qh/itd/sitd
42 * @tag: hardware tag for type of this record
44 static union ehci_shadow
*
45 periodic_next_shadow(struct ehci_hcd
*ehci
, union ehci_shadow
*periodic
,
48 switch (hc32_to_cpu(ehci
, tag
)) {
50 return &periodic
->qh
->qh_next
;
52 return &periodic
->fstn
->fstn_next
;
54 return &periodic
->itd
->itd_next
;
57 return &periodic
->sitd
->sitd_next
;
62 shadow_next_periodic(struct ehci_hcd
*ehci
, union ehci_shadow
*periodic
,
65 switch (hc32_to_cpu(ehci
, tag
)) {
66 /* our ehci_shadow.qh is actually software part */
68 return &periodic
->qh
->hw
->hw_next
;
69 /* others are hw parts */
71 return periodic
->hw_next
;
75 /* caller must hold ehci->lock */
76 static void periodic_unlink (struct ehci_hcd
*ehci
, unsigned frame
, void *ptr
)
78 union ehci_shadow
*prev_p
= &ehci
->pshadow
[frame
];
79 __hc32
*hw_p
= &ehci
->periodic
[frame
];
80 union ehci_shadow here
= *prev_p
;
82 /* find predecessor of "ptr"; hw and shadow lists are in sync */
83 while (here
.ptr
&& here
.ptr
!= ptr
) {
84 prev_p
= periodic_next_shadow(ehci
, prev_p
,
85 Q_NEXT_TYPE(ehci
, *hw_p
));
86 hw_p
= shadow_next_periodic(ehci
, &here
,
87 Q_NEXT_TYPE(ehci
, *hw_p
));
90 /* an interrupt entry (at list end) could have been shared */
94 /* update shadow and hardware lists ... the old "next" pointers
95 * from ptr may still be in use, the caller updates them.
97 *prev_p
= *periodic_next_shadow(ehci
, &here
,
98 Q_NEXT_TYPE(ehci
, *hw_p
));
100 if (!ehci
->use_dummy_qh
||
101 *shadow_next_periodic(ehci
, &here
, Q_NEXT_TYPE(ehci
, *hw_p
))
102 != EHCI_LIST_END(ehci
))
103 *hw_p
= *shadow_next_periodic(ehci
, &here
,
104 Q_NEXT_TYPE(ehci
, *hw_p
));
106 *hw_p
= ehci
->dummy
->qh_dma
;
109 /* how many of the uframe's 125 usecs are allocated? */
110 static unsigned short
111 periodic_usecs (struct ehci_hcd
*ehci
, unsigned frame
, unsigned uframe
)
113 __hc32
*hw_p
= &ehci
->periodic
[frame
];
114 union ehci_shadow
*q
= &ehci
->pshadow
[frame
];
116 struct ehci_qh_hw
*hw
;
119 switch (hc32_to_cpu(ehci
, Q_NEXT_TYPE(ehci
, *hw_p
))) {
122 /* is it in the S-mask? */
123 if (hw
->hw_info2
& cpu_to_hc32(ehci
, 1 << uframe
))
124 usecs
+= q
->qh
->usecs
;
126 if (hw
->hw_info2
& cpu_to_hc32(ehci
,
128 usecs
+= q
->qh
->c_usecs
;
134 /* for "save place" FSTNs, count the relevant INTR
135 * bandwidth from the previous frame
137 if (q
->fstn
->hw_prev
!= EHCI_LIST_END(ehci
)) {
138 ehci_dbg (ehci
, "ignoring FSTN cost ...\n");
140 hw_p
= &q
->fstn
->hw_next
;
141 q
= &q
->fstn
->fstn_next
;
144 if (q
->itd
->hw_transaction
[uframe
])
145 usecs
+= q
->itd
->stream
->usecs
;
146 hw_p
= &q
->itd
->hw_next
;
147 q
= &q
->itd
->itd_next
;
150 /* is it in the S-mask? (count SPLIT, DATA) */
151 if (q
->sitd
->hw_uframe
& cpu_to_hc32(ehci
,
153 if (q
->sitd
->hw_fullspeed_ep
&
154 cpu_to_hc32(ehci
, 1<<31))
155 usecs
+= q
->sitd
->stream
->usecs
;
156 else /* worst case for OUT start-split */
157 usecs
+= HS_USECS_ISO (188);
160 /* ... C-mask? (count CSPLIT, DATA) */
161 if (q
->sitd
->hw_uframe
&
162 cpu_to_hc32(ehci
, 1 << (8 + uframe
))) {
163 /* worst case for IN complete-split */
164 usecs
+= q
->sitd
->stream
->c_usecs
;
167 hw_p
= &q
->sitd
->hw_next
;
168 q
= &q
->sitd
->sitd_next
;
173 if (usecs
> ehci
->uframe_periodic_max
)
174 ehci_err (ehci
, "uframe %d sched overrun: %d usecs\n",
175 frame
* 8 + uframe
, usecs
);
180 /*-------------------------------------------------------------------------*/
182 static int same_tt (struct usb_device
*dev1
, struct usb_device
*dev2
)
184 if (!dev1
->tt
|| !dev2
->tt
)
186 if (dev1
->tt
!= dev2
->tt
)
189 return dev1
->ttport
== dev2
->ttport
;
194 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
196 /* Which uframe does the low/fullspeed transfer start in?
198 * The parameter is the mask of ssplits in "H-frame" terms
199 * and this returns the transfer start uframe in "B-frame" terms,
200 * which allows both to match, e.g. a ssplit in "H-frame" uframe 0
201 * will cause a transfer in "B-frame" uframe 0. "B-frames" lag
202 * "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7.
204 static inline unsigned char tt_start_uframe(struct ehci_hcd
*ehci
, __hc32 mask
)
206 unsigned char smask
= QH_SMASK
& hc32_to_cpu(ehci
, mask
);
208 ehci_err(ehci
, "invalid empty smask!\n");
209 /* uframe 7 can't have bw so this will indicate failure */
212 return ffs(smask
) - 1;
215 static const unsigned char
216 max_tt_usecs
[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
218 /* carryover low/fullspeed bandwidth that crosses uframe boundries */
219 static inline void carryover_tt_bandwidth(unsigned short tt_usecs
[8])
222 for (i
=0; i
<7; i
++) {
223 if (max_tt_usecs
[i
] < tt_usecs
[i
]) {
224 tt_usecs
[i
+1] += tt_usecs
[i
] - max_tt_usecs
[i
];
225 tt_usecs
[i
] = max_tt_usecs
[i
];
230 /* How many of the tt's periodic downstream 1000 usecs are allocated?
232 * While this measures the bandwidth in terms of usecs/uframe,
233 * the low/fullspeed bus has no notion of uframes, so any particular
234 * low/fullspeed transfer can "carry over" from one uframe to the next,
235 * since the TT just performs downstream transfers in sequence.
237 * For example two separate 100 usec transfers can start in the same uframe,
238 * and the second one would "carry over" 75 usecs into the next uframe.
242 struct ehci_hcd
*ehci
,
243 struct usb_device
*dev
,
245 unsigned short tt_usecs
[8]
248 __hc32
*hw_p
= &ehci
->periodic
[frame
];
249 union ehci_shadow
*q
= &ehci
->pshadow
[frame
];
252 memset(tt_usecs
, 0, 16);
255 switch (hc32_to_cpu(ehci
, Q_NEXT_TYPE(ehci
, *hw_p
))) {
257 hw_p
= &q
->itd
->hw_next
;
258 q
= &q
->itd
->itd_next
;
261 if (same_tt(dev
, q
->qh
->dev
)) {
262 uf
= tt_start_uframe(ehci
, q
->qh
->hw
->hw_info2
);
263 tt_usecs
[uf
] += q
->qh
->tt_usecs
;
265 hw_p
= &q
->qh
->hw
->hw_next
;
269 if (same_tt(dev
, q
->sitd
->urb
->dev
)) {
270 uf
= tt_start_uframe(ehci
, q
->sitd
->hw_uframe
);
271 tt_usecs
[uf
] += q
->sitd
->stream
->tt_usecs
;
273 hw_p
= &q
->sitd
->hw_next
;
274 q
= &q
->sitd
->sitd_next
;
278 ehci_dbg(ehci
, "ignoring periodic frame %d FSTN\n",
280 hw_p
= &q
->fstn
->hw_next
;
281 q
= &q
->fstn
->fstn_next
;
285 carryover_tt_bandwidth(tt_usecs
);
287 if (max_tt_usecs
[7] < tt_usecs
[7])
288 ehci_err(ehci
, "frame %d tt sched overrun: %d usecs\n",
289 frame
, tt_usecs
[7] - max_tt_usecs
[7]);
293 * Return true if the device's tt's downstream bus is available for a
294 * periodic transfer of the specified length (usecs), starting at the
295 * specified frame/uframe. Note that (as summarized in section 11.19
296 * of the usb 2.0 spec) TTs can buffer multiple transactions for each
299 * The uframe parameter is when the fullspeed/lowspeed transfer
300 * should be executed in "B-frame" terms, which is the same as the
301 * highspeed ssplit's uframe (which is in "H-frame" terms). For example
302 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
303 * See the EHCI spec sec 4.5 and fig 4.7.
305 * This checks if the full/lowspeed bus, at the specified starting uframe,
306 * has the specified bandwidth available, according to rules listed
307 * in USB 2.0 spec section 11.18.1 fig 11-60.
309 * This does not check if the transfer would exceed the max ssplit
310 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
311 * since proper scheduling limits ssplits to less than 16 per uframe.
313 static int tt_available (
314 struct ehci_hcd
*ehci
,
316 struct usb_device
*dev
,
322 if ((period
== 0) || (uframe
>= 7)) /* error */
325 for (; frame
< ehci
->periodic_size
; frame
+= period
) {
326 unsigned short tt_usecs
[8];
328 periodic_tt_usecs (ehci
, dev
, frame
, tt_usecs
);
330 ehci_vdbg(ehci
, "tt frame %d check %d usecs start uframe %d in"
331 " schedule %d/%d/%d/%d/%d/%d/%d/%d\n",
332 frame
, usecs
, uframe
,
333 tt_usecs
[0], tt_usecs
[1], tt_usecs
[2], tt_usecs
[3],
334 tt_usecs
[4], tt_usecs
[5], tt_usecs
[6], tt_usecs
[7]);
336 if (max_tt_usecs
[uframe
] <= tt_usecs
[uframe
]) {
337 ehci_vdbg(ehci
, "frame %d uframe %d fully scheduled\n",
342 /* special case for isoc transfers larger than 125us:
343 * the first and each subsequent fully used uframe
344 * must be empty, so as to not illegally delay
345 * already scheduled transactions
348 int ufs
= (usecs
/ 125);
350 for (i
= uframe
; i
< (uframe
+ ufs
) && i
< 8; i
++)
351 if (0 < tt_usecs
[i
]) {
353 "multi-uframe xfer can't fit "
354 "in frame %d uframe %d\n",
360 tt_usecs
[uframe
] += usecs
;
362 carryover_tt_bandwidth(tt_usecs
);
364 /* fail if the carryover pushed bw past the last uframe's limit */
365 if (max_tt_usecs
[7] < tt_usecs
[7]) {
367 "tt unavailable usecs %d frame %d uframe %d\n",
368 usecs
, frame
, uframe
);
378 /* return true iff the device's transaction translator is available
379 * for a periodic transfer starting at the specified frame, using
380 * all the uframes in the mask.
382 static int tt_no_collision (
383 struct ehci_hcd
*ehci
,
385 struct usb_device
*dev
,
390 if (period
== 0) /* error */
393 /* note bandwidth wastage: split never follows csplit
394 * (different dev or endpoint) until the next uframe.
395 * calling convention doesn't make that distinction.
397 for (; frame
< ehci
->periodic_size
; frame
+= period
) {
398 union ehci_shadow here
;
400 struct ehci_qh_hw
*hw
;
402 here
= ehci
->pshadow
[frame
];
403 type
= Q_NEXT_TYPE(ehci
, ehci
->periodic
[frame
]);
405 switch (hc32_to_cpu(ehci
, type
)) {
407 type
= Q_NEXT_TYPE(ehci
, here
.itd
->hw_next
);
408 here
= here
.itd
->itd_next
;
412 if (same_tt (dev
, here
.qh
->dev
)) {
415 mask
= hc32_to_cpu(ehci
,
417 /* "knows" no gap is needed */
422 type
= Q_NEXT_TYPE(ehci
, hw
->hw_next
);
423 here
= here
.qh
->qh_next
;
426 if (same_tt (dev
, here
.sitd
->urb
->dev
)) {
429 mask
= hc32_to_cpu(ehci
, here
.sitd
431 /* FIXME assumes no gap for IN! */
436 type
= Q_NEXT_TYPE(ehci
, here
.sitd
->hw_next
);
437 here
= here
.sitd
->sitd_next
;
442 "periodic frame %d bogus type %d\n",
446 /* collision or error */
455 #endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
457 /*-------------------------------------------------------------------------*/
459 static void enable_periodic(struct ehci_hcd
*ehci
)
461 if (ehci
->periodic_count
++)
464 /* Stop waiting to turn off the periodic schedule */
465 ehci
->enabled_hrtimer_events
&= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC
);
467 /* Don't start the schedule until PSS is 0 */
469 turn_on_io_watchdog(ehci
);
472 static void disable_periodic(struct ehci_hcd
*ehci
)
474 if (--ehci
->periodic_count
)
477 /* Don't turn off the schedule until PSS is 1 */
481 /*-------------------------------------------------------------------------*/
483 /* periodic schedule slots have iso tds (normal or split) first, then a
484 * sparse tree for active interrupt transfers.
486 * this just links in a qh; caller guarantees uframe masks are set right.
487 * no FSTN support (yet; ehci 0.96+)
489 static void qh_link_periodic(struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
492 unsigned period
= qh
->period
;
494 dev_dbg (&qh
->dev
->dev
,
495 "link qh%d-%04x/%p start %d [%d/%d us]\n",
496 period
, hc32_to_cpup(ehci
, &qh
->hw
->hw_info2
)
497 & (QH_CMASK
| QH_SMASK
),
498 qh
, qh
->start
, qh
->usecs
, qh
->c_usecs
);
500 /* high bandwidth, or otherwise every microframe */
504 for (i
= qh
->start
; i
< ehci
->periodic_size
; i
+= period
) {
505 union ehci_shadow
*prev
= &ehci
->pshadow
[i
];
506 __hc32
*hw_p
= &ehci
->periodic
[i
];
507 union ehci_shadow here
= *prev
;
510 /* skip the iso nodes at list head */
512 type
= Q_NEXT_TYPE(ehci
, *hw_p
);
513 if (type
== cpu_to_hc32(ehci
, Q_TYPE_QH
))
515 prev
= periodic_next_shadow(ehci
, prev
, type
);
516 hw_p
= shadow_next_periodic(ehci
, &here
, type
);
520 /* sorting each branch by period (slow-->fast)
521 * enables sharing interior tree nodes
523 while (here
.ptr
&& qh
!= here
.qh
) {
524 if (qh
->period
> here
.qh
->period
)
526 prev
= &here
.qh
->qh_next
;
527 hw_p
= &here
.qh
->hw
->hw_next
;
530 /* link in this qh, unless some earlier pass did that */
534 qh
->hw
->hw_next
= *hw_p
;
537 *hw_p
= QH_NEXT (ehci
, qh
->qh_dma
);
540 qh
->qh_state
= QH_STATE_LINKED
;
544 /* update per-qh bandwidth for usbfs */
545 ehci_to_hcd(ehci
)->self
.bandwidth_allocated
+= qh
->period
546 ? ((qh
->usecs
+ qh
->c_usecs
) / qh
->period
)
549 list_add(&qh
->intr_node
, &ehci
->intr_qh_list
);
551 /* maybe enable periodic schedule processing */
553 enable_periodic(ehci
);
556 static void qh_unlink_periodic(struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
562 * If qh is for a low/full-speed device, simply unlinking it
563 * could interfere with an ongoing split transaction. To unlink
564 * it safely would require setting the QH_INACTIVATE bit and
565 * waiting at least one frame, as described in EHCI 4.12.2.5.
567 * We won't bother with any of this. Instead, we assume that the
568 * only reason for unlinking an interrupt QH while the current URB
569 * is still active is to dequeue all the URBs (flush the whole
572 * If rebalancing the periodic schedule is ever implemented, this
573 * approach will no longer be valid.
576 /* high bandwidth, or otherwise part of every microframe */
577 if ((period
= qh
->period
) == 0)
580 for (i
= qh
->start
; i
< ehci
->periodic_size
; i
+= period
)
581 periodic_unlink (ehci
, i
, qh
);
583 /* update per-qh bandwidth for usbfs */
584 ehci_to_hcd(ehci
)->self
.bandwidth_allocated
-= qh
->period
585 ? ((qh
->usecs
+ qh
->c_usecs
) / qh
->period
)
588 dev_dbg (&qh
->dev
->dev
,
589 "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
591 hc32_to_cpup(ehci
, &qh
->hw
->hw_info2
) & (QH_CMASK
| QH_SMASK
),
592 qh
, qh
->start
, qh
->usecs
, qh
->c_usecs
);
594 /* qh->qh_next still "live" to HC */
595 qh
->qh_state
= QH_STATE_UNLINK
;
596 qh
->qh_next
.ptr
= NULL
;
598 if (ehci
->qh_scan_next
== qh
)
599 ehci
->qh_scan_next
= list_entry(qh
->intr_node
.next
,
600 struct ehci_qh
, intr_node
);
601 list_del(&qh
->intr_node
);
604 static void start_unlink_intr(struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
606 /* If the QH isn't linked then there's nothing we can do. */
607 if (qh
->qh_state
!= QH_STATE_LINKED
)
610 qh_unlink_periodic (ehci
, qh
);
612 /* Make sure the unlinks are visible before starting the timer */
616 * The EHCI spec doesn't say how long it takes the controller to
617 * stop accessing an unlinked interrupt QH. The timer delay is
618 * 9 uframes; presumably that will be long enough.
620 qh
->unlink_cycle
= ehci
->intr_unlink_cycle
;
622 /* New entries go at the end of the intr_unlink list */
623 list_add_tail(&qh
->unlink_node
, &ehci
->intr_unlink
);
625 if (ehci
->intr_unlinking
)
626 ; /* Avoid recursive calls */
627 else if (ehci
->rh_state
< EHCI_RH_RUNNING
)
628 ehci_handle_intr_unlinks(ehci
);
629 else if (ehci
->intr_unlink
.next
== &qh
->unlink_node
) {
630 ehci_enable_event(ehci
, EHCI_HRTIMER_UNLINK_INTR
, true);
631 ++ehci
->intr_unlink_cycle
;
635 static void end_unlink_intr(struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
637 struct ehci_qh_hw
*hw
= qh
->hw
;
640 qh
->qh_state
= QH_STATE_IDLE
;
641 hw
->hw_next
= EHCI_LIST_END(ehci
);
643 if (!list_empty(&qh
->qtd_list
))
644 qh_completions(ehci
, qh
);
646 /* reschedule QH iff another request is queued */
647 if (!list_empty(&qh
->qtd_list
) && ehci
->rh_state
== EHCI_RH_RUNNING
) {
648 rc
= qh_schedule(ehci
, qh
);
650 qh_refresh(ehci
, qh
);
651 qh_link_periodic(ehci
, qh
);
654 /* An error here likely indicates handshake failure
655 * or no space left in the schedule. Neither fault
656 * should happen often ...
658 * FIXME kill the now-dysfunctional queued urbs
661 ehci_err(ehci
, "can't reschedule qh %p, err %d\n",
666 /* maybe turn off periodic schedule */
668 disable_periodic(ehci
);
671 /*-------------------------------------------------------------------------*/
673 static int check_period (
674 struct ehci_hcd
*ehci
,
682 /* complete split running into next frame?
683 * given FSTN support, we could sometimes check...
688 /* convert "usecs we need" to "max already claimed" */
689 usecs
= ehci
->uframe_periodic_max
- usecs
;
691 /* we "know" 2 and 4 uframe intervals were rejected; so
692 * for period 0, check _every_ microframe in the schedule.
694 if (unlikely (period
== 0)) {
696 for (uframe
= 0; uframe
< 7; uframe
++) {
697 claimed
= periodic_usecs (ehci
, frame
, uframe
);
701 } while ((frame
+= 1) < ehci
->periodic_size
);
703 /* just check the specified uframe, at that period */
706 claimed
= periodic_usecs (ehci
, frame
, uframe
);
709 } while ((frame
+= period
) < ehci
->periodic_size
);
716 static int check_intr_schedule (
717 struct ehci_hcd
*ehci
,
720 const struct ehci_qh
*qh
,
724 int retval
= -ENOSPC
;
727 if (qh
->c_usecs
&& uframe
>= 6) /* FSTN territory? */
730 if (!check_period (ehci
, frame
, uframe
, qh
->period
, qh
->usecs
))
738 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
739 if (tt_available (ehci
, qh
->period
, qh
->dev
, frame
, uframe
,
743 /* TODO : this may need FSTN for SSPLIT in uframe 5. */
744 for (i
=uframe
+1; i
<8 && i
<uframe
+4; i
++)
745 if (!check_period (ehci
, frame
, i
,
746 qh
->period
, qh
->c_usecs
))
753 *c_maskp
= cpu_to_hc32(ehci
, mask
<< 8);
756 /* Make sure this tt's buffer is also available for CSPLITs.
757 * We pessimize a bit; probably the typical full speed case
758 * doesn't need the second CSPLIT.
760 * NOTE: both SPLIT and CSPLIT could be checked in just
763 mask
= 0x03 << (uframe
+ qh
->gap_uf
);
764 *c_maskp
= cpu_to_hc32(ehci
, mask
<< 8);
767 if (tt_no_collision (ehci
, qh
->period
, qh
->dev
, frame
, mask
)) {
768 if (!check_period (ehci
, frame
, uframe
+ qh
->gap_uf
+ 1,
769 qh
->period
, qh
->c_usecs
))
771 if (!check_period (ehci
, frame
, uframe
+ qh
->gap_uf
,
772 qh
->period
, qh
->c_usecs
))
781 /* "first fit" scheduling policy used the first time through,
782 * or when the previous schedule slot can't be re-used.
784 static int qh_schedule(struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
789 unsigned frame
; /* 0..(qh->period - 1), or NO_FRAME */
790 struct ehci_qh_hw
*hw
= qh
->hw
;
792 hw
->hw_next
= EHCI_LIST_END(ehci
);
795 /* reuse the previous schedule slots, if we can */
796 if (frame
< qh
->period
) {
797 uframe
= ffs(hc32_to_cpup(ehci
, &hw
->hw_info2
) & QH_SMASK
);
798 status
= check_intr_schedule (ehci
, frame
, --uframe
,
806 /* else scan the schedule to find a group of slots such that all
807 * uframes have enough periodic bandwidth available.
810 /* "normal" case, uframing flexible except with splits */
814 for (i
= qh
->period
; status
&& i
> 0; --i
) {
815 frame
= ++ehci
->random_frame
% qh
->period
;
816 for (uframe
= 0; uframe
< 8; uframe
++) {
817 status
= check_intr_schedule (ehci
,
825 /* qh->period == 0 means every uframe */
828 status
= check_intr_schedule (ehci
, 0, 0, qh
, &c_mask
);
834 /* reset S-frame and (maybe) C-frame masks */
835 hw
->hw_info2
&= cpu_to_hc32(ehci
, ~(QH_CMASK
| QH_SMASK
));
836 hw
->hw_info2
|= qh
->period
837 ? cpu_to_hc32(ehci
, 1 << uframe
)
838 : cpu_to_hc32(ehci
, QH_SMASK
);
839 hw
->hw_info2
|= c_mask
;
841 ehci_dbg (ehci
, "reused qh %p schedule\n", qh
);
847 static int intr_submit (
848 struct ehci_hcd
*ehci
,
850 struct list_head
*qtd_list
,
857 struct list_head empty
;
859 /* get endpoint and transfer/schedule data */
860 epnum
= urb
->ep
->desc
.bEndpointAddress
;
862 spin_lock_irqsave (&ehci
->lock
, flags
);
864 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci
)))) {
866 goto done_not_linked
;
868 status
= usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci
), urb
);
869 if (unlikely(status
))
870 goto done_not_linked
;
872 /* get qh and force any scheduling errors */
873 INIT_LIST_HEAD (&empty
);
874 qh
= qh_append_tds(ehci
, urb
, &empty
, epnum
, &urb
->ep
->hcpriv
);
879 if (qh
->qh_state
== QH_STATE_IDLE
) {
880 if ((status
= qh_schedule (ehci
, qh
)) != 0)
884 /* then queue the urb's tds to the qh */
885 qh
= qh_append_tds(ehci
, urb
, qtd_list
, epnum
, &urb
->ep
->hcpriv
);
888 /* stuff into the periodic schedule */
889 if (qh
->qh_state
== QH_STATE_IDLE
) {
890 qh_refresh(ehci
, qh
);
891 qh_link_periodic(ehci
, qh
);
894 /* ... update usbfs periodic stats */
895 ehci_to_hcd(ehci
)->self
.bandwidth_int_reqs
++;
898 if (unlikely(status
))
899 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci
), urb
);
901 spin_unlock_irqrestore (&ehci
->lock
, flags
);
903 qtd_list_free (ehci
, urb
, qtd_list
);
908 static void scan_intr(struct ehci_hcd
*ehci
)
912 list_for_each_entry_safe(qh
, ehci
->qh_scan_next
, &ehci
->intr_qh_list
,
915 /* clean any finished work for this qh */
916 if (!list_empty(&qh
->qtd_list
)) {
920 * Unlinks could happen here; completion reporting
921 * drops the lock. That's why ehci->qh_scan_next
922 * always holds the next qh to scan; if the next qh
923 * gets unlinked then ehci->qh_scan_next is adjusted
924 * in qh_unlink_periodic().
926 temp
= qh_completions(ehci
, qh
);
927 if (unlikely(temp
|| (list_empty(&qh
->qtd_list
) &&
928 qh
->qh_state
== QH_STATE_LINKED
)))
929 start_unlink_intr(ehci
, qh
);
934 /*-------------------------------------------------------------------------*/
936 /* ehci_iso_stream ops work with both ITD and SITD */
938 static struct ehci_iso_stream
*
939 iso_stream_alloc (gfp_t mem_flags
)
941 struct ehci_iso_stream
*stream
;
943 stream
= kzalloc(sizeof *stream
, mem_flags
);
944 if (likely (stream
!= NULL
)) {
945 INIT_LIST_HEAD(&stream
->td_list
);
946 INIT_LIST_HEAD(&stream
->free_list
);
947 stream
->next_uframe
= -1;
954 struct ehci_hcd
*ehci
,
955 struct ehci_iso_stream
*stream
,
956 struct usb_device
*dev
,
961 static const u8 smask_out
[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
964 unsigned epnum
, maxp
;
969 * this might be a "high bandwidth" highspeed endpoint,
970 * as encoded in the ep descriptor's wMaxPacket field
972 epnum
= usb_pipeendpoint (pipe
);
973 is_input
= usb_pipein (pipe
) ? USB_DIR_IN
: 0;
974 maxp
= usb_maxpacket(dev
, pipe
, !is_input
);
981 /* knows about ITD vs SITD */
982 if (dev
->speed
== USB_SPEED_HIGH
) {
983 unsigned multi
= hb_mult(maxp
);
985 stream
->highspeed
= 1;
987 maxp
= max_packet(maxp
);
991 stream
->buf0
= cpu_to_hc32(ehci
, (epnum
<< 8) | dev
->devnum
);
992 stream
->buf1
= cpu_to_hc32(ehci
, buf1
);
993 stream
->buf2
= cpu_to_hc32(ehci
, multi
);
995 /* usbfs wants to report the average usecs per frame tied up
996 * when transfers on this endpoint are scheduled ...
998 stream
->usecs
= HS_USECS_ISO (maxp
);
999 bandwidth
= stream
->usecs
* 8;
1000 bandwidth
/= interval
;
1007 addr
= dev
->ttport
<< 24;
1008 if (!ehci_is_TDI(ehci
)
1010 ehci_to_hcd(ehci
)->self
.root_hub
))
1011 addr
|= dev
->tt
->hub
->devnum
<< 16;
1013 addr
|= dev
->devnum
;
1014 stream
->usecs
= HS_USECS_ISO (maxp
);
1015 think_time
= dev
->tt
? dev
->tt
->think_time
: 0;
1016 stream
->tt_usecs
= NS_TO_US (think_time
+ usb_calc_bus_time (
1017 dev
->speed
, is_input
, 1, maxp
));
1018 hs_transfers
= max (1u, (maxp
+ 187) / 188);
1023 stream
->c_usecs
= stream
->usecs
;
1024 stream
->usecs
= HS_USECS_ISO (1);
1025 stream
->raw_mask
= 1;
1027 /* c-mask as specified in USB 2.0 11.18.4 3.c */
1028 tmp
= (1 << (hs_transfers
+ 2)) - 1;
1029 stream
->raw_mask
|= tmp
<< (8 + 2);
1031 stream
->raw_mask
= smask_out
[hs_transfers
- 1];
1032 bandwidth
= stream
->usecs
+ stream
->c_usecs
;
1033 bandwidth
/= interval
<< 3;
1035 /* stream->splits gets created from raw_mask later */
1036 stream
->address
= cpu_to_hc32(ehci
, addr
);
1038 stream
->bandwidth
= bandwidth
;
1042 stream
->bEndpointAddress
= is_input
| epnum
;
1043 stream
->interval
= interval
;
1044 stream
->maxp
= maxp
;
1047 static struct ehci_iso_stream
*
1048 iso_stream_find (struct ehci_hcd
*ehci
, struct urb
*urb
)
1051 struct ehci_iso_stream
*stream
;
1052 struct usb_host_endpoint
*ep
;
1053 unsigned long flags
;
1055 epnum
= usb_pipeendpoint (urb
->pipe
);
1056 if (usb_pipein(urb
->pipe
))
1057 ep
= urb
->dev
->ep_in
[epnum
];
1059 ep
= urb
->dev
->ep_out
[epnum
];
1061 spin_lock_irqsave (&ehci
->lock
, flags
);
1062 stream
= ep
->hcpriv
;
1064 if (unlikely (stream
== NULL
)) {
1065 stream
= iso_stream_alloc(GFP_ATOMIC
);
1066 if (likely (stream
!= NULL
)) {
1067 ep
->hcpriv
= stream
;
1069 iso_stream_init(ehci
, stream
, urb
->dev
, urb
->pipe
,
1073 /* if dev->ep [epnum] is a QH, hw is set */
1074 } else if (unlikely (stream
->hw
!= NULL
)) {
1075 ehci_dbg (ehci
, "dev %s ep%d%s, not iso??\n",
1076 urb
->dev
->devpath
, epnum
,
1077 usb_pipein(urb
->pipe
) ? "in" : "out");
1081 spin_unlock_irqrestore (&ehci
->lock
, flags
);
1085 /*-------------------------------------------------------------------------*/
1087 /* ehci_iso_sched ops can be ITD-only or SITD-only */
1089 static struct ehci_iso_sched
*
1090 iso_sched_alloc (unsigned packets
, gfp_t mem_flags
)
1092 struct ehci_iso_sched
*iso_sched
;
1093 int size
= sizeof *iso_sched
;
1095 size
+= packets
* sizeof (struct ehci_iso_packet
);
1096 iso_sched
= kzalloc(size
, mem_flags
);
1097 if (likely (iso_sched
!= NULL
)) {
1098 INIT_LIST_HEAD (&iso_sched
->td_list
);
1105 struct ehci_hcd
*ehci
,
1106 struct ehci_iso_sched
*iso_sched
,
1107 struct ehci_iso_stream
*stream
,
1112 dma_addr_t dma
= urb
->transfer_dma
;
1114 /* how many uframes are needed for these transfers */
1115 iso_sched
->span
= urb
->number_of_packets
* stream
->interval
;
1117 /* figure out per-uframe itd fields that we'll need later
1118 * when we fit new itds into the schedule.
1120 for (i
= 0; i
< urb
->number_of_packets
; i
++) {
1121 struct ehci_iso_packet
*uframe
= &iso_sched
->packet
[i
];
1126 length
= urb
->iso_frame_desc
[i
].length
;
1127 buf
= dma
+ urb
->iso_frame_desc
[i
].offset
;
1129 trans
= EHCI_ISOC_ACTIVE
;
1130 trans
|= buf
& 0x0fff;
1131 if (unlikely (((i
+ 1) == urb
->number_of_packets
))
1132 && !(urb
->transfer_flags
& URB_NO_INTERRUPT
))
1133 trans
|= EHCI_ITD_IOC
;
1134 trans
|= length
<< 16;
1135 uframe
->transaction
= cpu_to_hc32(ehci
, trans
);
1137 /* might need to cross a buffer page within a uframe */
1138 uframe
->bufp
= (buf
& ~(u64
)0x0fff);
1140 if (unlikely ((uframe
->bufp
!= (buf
& ~(u64
)0x0fff))))
1147 struct ehci_iso_stream
*stream
,
1148 struct ehci_iso_sched
*iso_sched
1153 // caller must hold ehci->lock!
1154 list_splice (&iso_sched
->td_list
, &stream
->free_list
);
1159 itd_urb_transaction (
1160 struct ehci_iso_stream
*stream
,
1161 struct ehci_hcd
*ehci
,
1166 struct ehci_itd
*itd
;
1170 struct ehci_iso_sched
*sched
;
1171 unsigned long flags
;
1173 sched
= iso_sched_alloc (urb
->number_of_packets
, mem_flags
);
1174 if (unlikely (sched
== NULL
))
1177 itd_sched_init(ehci
, sched
, stream
, urb
);
1179 if (urb
->interval
< 8)
1180 num_itds
= 1 + (sched
->span
+ 7) / 8;
1182 num_itds
= urb
->number_of_packets
;
1184 /* allocate/init ITDs */
1185 spin_lock_irqsave (&ehci
->lock
, flags
);
1186 for (i
= 0; i
< num_itds
; i
++) {
1189 * Use iTDs from the free list, but not iTDs that may
1190 * still be in use by the hardware.
1192 if (likely(!list_empty(&stream
->free_list
))) {
1193 itd
= list_first_entry(&stream
->free_list
,
1194 struct ehci_itd
, itd_list
);
1195 if (itd
->frame
== ehci
->now_frame
)
1197 list_del (&itd
->itd_list
);
1198 itd_dma
= itd
->itd_dma
;
1201 spin_unlock_irqrestore (&ehci
->lock
, flags
);
1202 itd
= dma_pool_alloc (ehci
->itd_pool
, mem_flags
,
1204 spin_lock_irqsave (&ehci
->lock
, flags
);
1206 iso_sched_free(stream
, sched
);
1207 spin_unlock_irqrestore(&ehci
->lock
, flags
);
1212 memset (itd
, 0, sizeof *itd
);
1213 itd
->itd_dma
= itd_dma
;
1214 itd
->frame
= 9999; /* an invalid value */
1215 list_add (&itd
->itd_list
, &sched
->td_list
);
1217 spin_unlock_irqrestore (&ehci
->lock
, flags
);
1219 /* temporarily store schedule info in hcpriv */
1220 urb
->hcpriv
= sched
;
1221 urb
->error_count
= 0;
1225 /*-------------------------------------------------------------------------*/
1229 struct ehci_hcd
*ehci
,
1238 /* can't commit more than uframe_periodic_max usec */
1239 if (periodic_usecs (ehci
, uframe
>> 3, uframe
& 0x7)
1240 > (ehci
->uframe_periodic_max
- usecs
))
1243 /* we know urb->interval is 2^N uframes */
1245 } while (uframe
< mod
);
1251 struct ehci_hcd
*ehci
,
1253 struct ehci_iso_stream
*stream
,
1255 struct ehci_iso_sched
*sched
,
1262 mask
= stream
->raw_mask
<< (uframe
& 7);
1264 /* for IN, don't wrap CSPLIT into the next frame */
1268 /* check bandwidth */
1269 uframe
%= period_uframes
;
1270 frame
= uframe
>> 3;
1272 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
1273 /* The tt's fullspeed bus bandwidth must be available.
1274 * tt_available scheduling guarantees 10+% for control/bulk.
1277 if (!tt_available(ehci
, period_uframes
>> 3,
1278 stream
->udev
, frame
, uf
, stream
->tt_usecs
))
1281 /* tt must be idle for start(s), any gap, and csplit.
1282 * assume scheduling slop leaves 10+% for control/bulk.
1284 if (!tt_no_collision(ehci
, period_uframes
>> 3,
1285 stream
->udev
, frame
, mask
))
1289 /* this multi-pass logic is simple, but performance may
1290 * suffer when the schedule data isn't cached.
1295 frame
= uframe
>> 3;
1298 /* check starts (OUT uses more than one) */
1299 max_used
= ehci
->uframe_periodic_max
- stream
->usecs
;
1300 for (tmp
= stream
->raw_mask
& 0xff; tmp
; tmp
>>= 1, uf
++) {
1301 if (periodic_usecs (ehci
, frame
, uf
) > max_used
)
1305 /* for IN, check CSPLIT */
1306 if (stream
->c_usecs
) {
1308 max_used
= ehci
->uframe_periodic_max
- stream
->c_usecs
;
1312 if ((stream
->raw_mask
& tmp
) == 0)
1314 if (periodic_usecs (ehci
, frame
, uf
)
1320 /* we know urb->interval is 2^N uframes */
1321 uframe
+= period_uframes
;
1322 } while (uframe
< mod
);
1324 stream
->splits
= cpu_to_hc32(ehci
, stream
->raw_mask
<< (uframe
& 7));
1329 * This scheduler plans almost as far into the future as it has actual
1330 * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
1331 * "as small as possible" to be cache-friendlier.) That limits the size
1332 * transfers you can stream reliably; avoid more than 64 msec per urb.
1333 * Also avoid queue depths of less than ehci's worst irq latency (affected
1334 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1335 * and other factors); or more than about 230 msec total (for portability,
1336 * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
1339 #define SCHEDULING_DELAY 40 /* microframes */
1342 iso_stream_schedule (
1343 struct ehci_hcd
*ehci
,
1345 struct ehci_iso_stream
*stream
1348 u32 now
, base
, next
, start
, period
, span
;
1350 unsigned mod
= ehci
->periodic_size
<< 3;
1351 struct ehci_iso_sched
*sched
= urb
->hcpriv
;
1353 period
= urb
->interval
;
1355 if (!stream
->highspeed
) {
1360 now
= ehci_read_frame_index(ehci
) & (mod
- 1);
1362 /* Typical case: reuse current schedule, stream is still active.
1363 * Hopefully there are no gaps from the host falling behind
1364 * (irq delays etc). If there are, the behavior depends on
1365 * whether URB_ISO_ASAP is set.
1367 if (likely (!list_empty (&stream
->td_list
))) {
1369 /* Take the isochronous scheduling threshold into account */
1371 next
= now
+ ehci
->i_thresh
; /* uframe cache */
1373 next
= (now
+ 2 + 7) & ~0x07; /* full frame cache */
1376 * Use ehci->last_iso_frame as the base. There can't be any
1377 * TDs scheduled for earlier than that.
1379 base
= ehci
->last_iso_frame
<< 3;
1380 next
= (next
- base
) & (mod
- 1);
1381 start
= (stream
->next_uframe
- base
) & (mod
- 1);
1383 /* Is the schedule already full? */
1384 if (unlikely(start
< period
)) {
1385 ehci_dbg(ehci
, "iso sched full %p (%u-%u < %u mod %u)\n",
1386 urb
, stream
->next_uframe
, base
,
1392 /* Behind the scheduling threshold? */
1393 if (unlikely(start
< next
)) {
1395 /* USB_ISO_ASAP: Round up to the first available slot */
1396 if (urb
->transfer_flags
& URB_ISO_ASAP
)
1397 start
+= (next
- start
+ period
- 1) & -period
;
1400 * Not ASAP: Use the next slot in the stream. If
1401 * the entire URB falls before the threshold, fail.
1403 else if (start
+ span
- period
< next
) {
1404 ehci_dbg(ehci
, "iso urb late %p (%u+%u < %u)\n",
1406 span
- period
, next
+ base
);
1415 /* need to schedule; when's the next (u)frame we could start?
1416 * this is bigger than ehci->i_thresh allows; scheduling itself
1417 * isn't free, the delay should handle reasonably slow cpus. it
1418 * can also help high bandwidth if the dma and irq loads don't
1419 * jump until after the queue is primed.
1425 start
= base
+ SCHEDULING_DELAY
;
1427 /* find a uframe slot with enough bandwidth.
1428 * Early uframes are more precious because full-speed
1429 * iso IN transfers can't use late uframes,
1430 * and therefore they should be allocated last.
1436 /* check schedule: enough space? */
1437 if (stream
->highspeed
) {
1438 if (itd_slot_ok(ehci
, mod
, start
,
1439 stream
->usecs
, period
))
1442 if ((start
% 8) >= 6)
1444 if (sitd_slot_ok(ehci
, mod
, stream
,
1445 start
, sched
, period
))
1448 } while (start
> next
&& !done
);
1450 /* no room in the schedule */
1452 ehci_dbg(ehci
, "iso sched full %p", urb
);
1458 /* Tried to schedule too far into the future? */
1459 if (unlikely(start
- base
+ span
- period
>= mod
)) {
1460 ehci_dbg(ehci
, "request %p would overflow (%u+%u >= %u)\n",
1461 urb
, start
- base
, span
- period
, mod
);
1466 stream
->next_uframe
= start
& (mod
- 1);
1468 /* report high speed start in uframes; full speed, in frames */
1469 urb
->start_frame
= stream
->next_uframe
;
1470 if (!stream
->highspeed
)
1471 urb
->start_frame
>>= 3;
1473 /* Make sure scan_isoc() sees these */
1474 if (ehci
->isoc_count
== 0)
1475 ehci
->last_iso_frame
= now
>> 3;
1479 iso_sched_free(stream
, sched
);
1484 /*-------------------------------------------------------------------------*/
1487 itd_init(struct ehci_hcd
*ehci
, struct ehci_iso_stream
*stream
,
1488 struct ehci_itd
*itd
)
1492 /* it's been recently zeroed */
1493 itd
->hw_next
= EHCI_LIST_END(ehci
);
1494 itd
->hw_bufp
[0] = stream
->buf0
;
1495 itd
->hw_bufp
[1] = stream
->buf1
;
1496 itd
->hw_bufp
[2] = stream
->buf2
;
1498 for (i
= 0; i
< 8; i
++)
1501 /* All other fields are filled when scheduling */
1506 struct ehci_hcd
*ehci
,
1507 struct ehci_itd
*itd
,
1508 struct ehci_iso_sched
*iso_sched
,
1513 struct ehci_iso_packet
*uf
= &iso_sched
->packet
[index
];
1514 unsigned pg
= itd
->pg
;
1516 // BUG_ON (pg == 6 && uf->cross);
1519 itd
->index
[uframe
] = index
;
1521 itd
->hw_transaction
[uframe
] = uf
->transaction
;
1522 itd
->hw_transaction
[uframe
] |= cpu_to_hc32(ehci
, pg
<< 12);
1523 itd
->hw_bufp
[pg
] |= cpu_to_hc32(ehci
, uf
->bufp
& ~(u32
)0);
1524 itd
->hw_bufp_hi
[pg
] |= cpu_to_hc32(ehci
, (u32
)(uf
->bufp
>> 32));
1526 /* iso_frame_desc[].offset must be strictly increasing */
1527 if (unlikely (uf
->cross
)) {
1528 u64 bufp
= uf
->bufp
+ 4096;
1531 itd
->hw_bufp
[pg
] |= cpu_to_hc32(ehci
, bufp
& ~(u32
)0);
1532 itd
->hw_bufp_hi
[pg
] |= cpu_to_hc32(ehci
, (u32
)(bufp
>> 32));
1537 itd_link (struct ehci_hcd
*ehci
, unsigned frame
, struct ehci_itd
*itd
)
1539 union ehci_shadow
*prev
= &ehci
->pshadow
[frame
];
1540 __hc32
*hw_p
= &ehci
->periodic
[frame
];
1541 union ehci_shadow here
= *prev
;
1544 /* skip any iso nodes which might belong to previous microframes */
1546 type
= Q_NEXT_TYPE(ehci
, *hw_p
);
1547 if (type
== cpu_to_hc32(ehci
, Q_TYPE_QH
))
1549 prev
= periodic_next_shadow(ehci
, prev
, type
);
1550 hw_p
= shadow_next_periodic(ehci
, &here
, type
);
1554 itd
->itd_next
= here
;
1555 itd
->hw_next
= *hw_p
;
1559 *hw_p
= cpu_to_hc32(ehci
, itd
->itd_dma
| Q_TYPE_ITD
);
1562 /* fit urb's itds into the selected schedule slot; activate as needed */
1563 static void itd_link_urb(
1564 struct ehci_hcd
*ehci
,
1567 struct ehci_iso_stream
*stream
1571 unsigned next_uframe
, uframe
, frame
;
1572 struct ehci_iso_sched
*iso_sched
= urb
->hcpriv
;
1573 struct ehci_itd
*itd
;
1575 next_uframe
= stream
->next_uframe
& (mod
- 1);
1577 if (unlikely (list_empty(&stream
->td_list
))) {
1578 ehci_to_hcd(ehci
)->self
.bandwidth_allocated
1579 += stream
->bandwidth
;
1581 "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
1582 urb
->dev
->devpath
, stream
->bEndpointAddress
& 0x0f,
1583 (stream
->bEndpointAddress
& USB_DIR_IN
) ? "in" : "out",
1585 next_uframe
>> 3, next_uframe
& 0x7);
1588 if (ehci_to_hcd(ehci
)->self
.bandwidth_isoc_reqs
== 0) {
1589 if (ehci
->amd_pll_fix
== 1)
1590 usb_amd_quirk_pll_disable();
1593 ehci_to_hcd(ehci
)->self
.bandwidth_isoc_reqs
++;
1595 /* fill iTDs uframe by uframe */
1596 for (packet
= 0, itd
= NULL
; packet
< urb
->number_of_packets
; ) {
1598 /* ASSERT: we have all necessary itds */
1599 // BUG_ON (list_empty (&iso_sched->td_list));
1601 /* ASSERT: no itds for this endpoint in this uframe */
1603 itd
= list_entry (iso_sched
->td_list
.next
,
1604 struct ehci_itd
, itd_list
);
1605 list_move_tail (&itd
->itd_list
, &stream
->td_list
);
1606 itd
->stream
= stream
;
1608 itd_init (ehci
, stream
, itd
);
1611 uframe
= next_uframe
& 0x07;
1612 frame
= next_uframe
>> 3;
1614 itd_patch(ehci
, itd
, iso_sched
, packet
, uframe
);
1616 next_uframe
+= stream
->interval
;
1617 next_uframe
&= mod
- 1;
1620 /* link completed itds into the schedule */
1621 if (((next_uframe
>> 3) != frame
)
1622 || packet
== urb
->number_of_packets
) {
1623 itd_link(ehci
, frame
& (ehci
->periodic_size
- 1), itd
);
1627 stream
->next_uframe
= next_uframe
;
1629 /* don't need that schedule data any more */
1630 iso_sched_free (stream
, iso_sched
);
1631 urb
->hcpriv
= stream
;
1634 enable_periodic(ehci
);
1637 #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1639 /* Process and recycle a completed ITD. Return true iff its urb completed,
1640 * and hence its completion callback probably added things to the hardware
1643 * Note that we carefully avoid recycling this descriptor until after any
1644 * completion callback runs, so that it won't be reused quickly. That is,
1645 * assuming (a) no more than two urbs per frame on this endpoint, and also
1646 * (b) only this endpoint's completions submit URBs. It seems some silicon
1647 * corrupts things if you reuse completed descriptors very quickly...
1649 static bool itd_complete(struct ehci_hcd
*ehci
, struct ehci_itd
*itd
)
1651 struct urb
*urb
= itd
->urb
;
1652 struct usb_iso_packet_descriptor
*desc
;
1656 struct ehci_iso_stream
*stream
= itd
->stream
;
1657 struct usb_device
*dev
;
1658 bool retval
= false;
1660 /* for each uframe with a packet */
1661 for (uframe
= 0; uframe
< 8; uframe
++) {
1662 if (likely (itd
->index
[uframe
] == -1))
1664 urb_index
= itd
->index
[uframe
];
1665 desc
= &urb
->iso_frame_desc
[urb_index
];
1667 t
= hc32_to_cpup(ehci
, &itd
->hw_transaction
[uframe
]);
1668 itd
->hw_transaction
[uframe
] = 0;
1670 /* report transfer status */
1671 if (unlikely (t
& ISO_ERRS
)) {
1673 if (t
& EHCI_ISOC_BUF_ERR
)
1674 desc
->status
= usb_pipein (urb
->pipe
)
1675 ? -ENOSR
/* hc couldn't read */
1676 : -ECOMM
; /* hc couldn't write */
1677 else if (t
& EHCI_ISOC_BABBLE
)
1678 desc
->status
= -EOVERFLOW
;
1679 else /* (t & EHCI_ISOC_XACTERR) */
1680 desc
->status
= -EPROTO
;
1682 /* HC need not update length with this error */
1683 if (!(t
& EHCI_ISOC_BABBLE
)) {
1684 desc
->actual_length
= EHCI_ITD_LENGTH(t
);
1685 urb
->actual_length
+= desc
->actual_length
;
1687 } else if (likely ((t
& EHCI_ISOC_ACTIVE
) == 0)) {
1689 desc
->actual_length
= EHCI_ITD_LENGTH(t
);
1690 urb
->actual_length
+= desc
->actual_length
;
1692 /* URB was too late */
1697 /* handle completion now? */
1698 if (likely ((urb_index
+ 1) != urb
->number_of_packets
))
1701 /* ASSERT: it's really the last itd for this urb
1702 list_for_each_entry (itd, &stream->td_list, itd_list)
1703 BUG_ON (itd->urb == urb);
1706 /* give urb back to the driver; completion often (re)submits */
1708 ehci_urb_done(ehci
, urb
, 0);
1713 disable_periodic(ehci
);
1715 ehci_to_hcd(ehci
)->self
.bandwidth_isoc_reqs
--;
1716 if (ehci_to_hcd(ehci
)->self
.bandwidth_isoc_reqs
== 0) {
1717 if (ehci
->amd_pll_fix
== 1)
1718 usb_amd_quirk_pll_enable();
1721 if (unlikely(list_is_singular(&stream
->td_list
))) {
1722 ehci_to_hcd(ehci
)->self
.bandwidth_allocated
1723 -= stream
->bandwidth
;
1725 "deschedule devp %s ep%d%s-iso\n",
1726 dev
->devpath
, stream
->bEndpointAddress
& 0x0f,
1727 (stream
->bEndpointAddress
& USB_DIR_IN
) ? "in" : "out");
1733 /* Add to the end of the free list for later reuse */
1734 list_move_tail(&itd
->itd_list
, &stream
->free_list
);
1736 /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
1737 if (list_empty(&stream
->td_list
)) {
1738 list_splice_tail_init(&stream
->free_list
,
1739 &ehci
->cached_itd_list
);
1740 start_free_itds(ehci
);
1746 /*-------------------------------------------------------------------------*/
1748 static int itd_submit (struct ehci_hcd
*ehci
, struct urb
*urb
,
1751 int status
= -EINVAL
;
1752 unsigned long flags
;
1753 struct ehci_iso_stream
*stream
;
1755 /* Get iso_stream head */
1756 stream
= iso_stream_find (ehci
, urb
);
1757 if (unlikely (stream
== NULL
)) {
1758 ehci_dbg (ehci
, "can't get iso stream\n");
1761 if (unlikely (urb
->interval
!= stream
->interval
)) {
1762 ehci_dbg (ehci
, "can't change iso interval %d --> %d\n",
1763 stream
->interval
, urb
->interval
);
1767 #ifdef EHCI_URB_TRACE
1769 "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1770 __func__
, urb
->dev
->devpath
, urb
,
1771 usb_pipeendpoint (urb
->pipe
),
1772 usb_pipein (urb
->pipe
) ? "in" : "out",
1773 urb
->transfer_buffer_length
,
1774 urb
->number_of_packets
, urb
->interval
,
1778 /* allocate ITDs w/o locking anything */
1779 status
= itd_urb_transaction (stream
, ehci
, urb
, mem_flags
);
1780 if (unlikely (status
< 0)) {
1781 ehci_dbg (ehci
, "can't init itds\n");
1785 /* schedule ... need to lock */
1786 spin_lock_irqsave (&ehci
->lock
, flags
);
1787 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci
)))) {
1788 status
= -ESHUTDOWN
;
1789 goto done_not_linked
;
1791 status
= usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci
), urb
);
1792 if (unlikely(status
))
1793 goto done_not_linked
;
1794 status
= iso_stream_schedule(ehci
, urb
, stream
);
1795 if (likely (status
== 0))
1796 itd_link_urb (ehci
, urb
, ehci
->periodic_size
<< 3, stream
);
1798 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci
), urb
);
1800 spin_unlock_irqrestore (&ehci
->lock
, flags
);
1805 /*-------------------------------------------------------------------------*/
1808 * "Split ISO TDs" ... used for USB 1.1 devices going through the
1809 * TTs in USB 2.0 hubs. These need microframe scheduling.
1814 struct ehci_hcd
*ehci
,
1815 struct ehci_iso_sched
*iso_sched
,
1816 struct ehci_iso_stream
*stream
,
1821 dma_addr_t dma
= urb
->transfer_dma
;
1823 /* how many frames are needed for these transfers */
1824 iso_sched
->span
= urb
->number_of_packets
* stream
->interval
;
1826 /* figure out per-frame sitd fields that we'll need later
1827 * when we fit new sitds into the schedule.
1829 for (i
= 0; i
< urb
->number_of_packets
; i
++) {
1830 struct ehci_iso_packet
*packet
= &iso_sched
->packet
[i
];
1835 length
= urb
->iso_frame_desc
[i
].length
& 0x03ff;
1836 buf
= dma
+ urb
->iso_frame_desc
[i
].offset
;
1838 trans
= SITD_STS_ACTIVE
;
1839 if (((i
+ 1) == urb
->number_of_packets
)
1840 && !(urb
->transfer_flags
& URB_NO_INTERRUPT
))
1842 trans
|= length
<< 16;
1843 packet
->transaction
= cpu_to_hc32(ehci
, trans
);
1845 /* might need to cross a buffer page within a td */
1847 packet
->buf1
= (buf
+ length
) & ~0x0fff;
1848 if (packet
->buf1
!= (buf
& ~(u64
)0x0fff))
1851 /* OUT uses multiple start-splits */
1852 if (stream
->bEndpointAddress
& USB_DIR_IN
)
1854 length
= (length
+ 187) / 188;
1855 if (length
> 1) /* BEGIN vs ALL */
1857 packet
->buf1
|= length
;
1862 sitd_urb_transaction (
1863 struct ehci_iso_stream
*stream
,
1864 struct ehci_hcd
*ehci
,
1869 struct ehci_sitd
*sitd
;
1870 dma_addr_t sitd_dma
;
1872 struct ehci_iso_sched
*iso_sched
;
1873 unsigned long flags
;
1875 iso_sched
= iso_sched_alloc (urb
->number_of_packets
, mem_flags
);
1876 if (iso_sched
== NULL
)
1879 sitd_sched_init(ehci
, iso_sched
, stream
, urb
);
1881 /* allocate/init sITDs */
1882 spin_lock_irqsave (&ehci
->lock
, flags
);
1883 for (i
= 0; i
< urb
->number_of_packets
; i
++) {
1885 /* NOTE: for now, we don't try to handle wraparound cases
1886 * for IN (using sitd->hw_backpointer, like a FSTN), which
1887 * means we never need two sitds for full speed packets.
1891 * Use siTDs from the free list, but not siTDs that may
1892 * still be in use by the hardware.
1894 if (likely(!list_empty(&stream
->free_list
))) {
1895 sitd
= list_first_entry(&stream
->free_list
,
1896 struct ehci_sitd
, sitd_list
);
1897 if (sitd
->frame
== ehci
->now_frame
)
1899 list_del (&sitd
->sitd_list
);
1900 sitd_dma
= sitd
->sitd_dma
;
1903 spin_unlock_irqrestore (&ehci
->lock
, flags
);
1904 sitd
= dma_pool_alloc (ehci
->sitd_pool
, mem_flags
,
1906 spin_lock_irqsave (&ehci
->lock
, flags
);
1908 iso_sched_free(stream
, iso_sched
);
1909 spin_unlock_irqrestore(&ehci
->lock
, flags
);
1914 memset (sitd
, 0, sizeof *sitd
);
1915 sitd
->sitd_dma
= sitd_dma
;
1916 sitd
->frame
= 9999; /* an invalid value */
1917 list_add (&sitd
->sitd_list
, &iso_sched
->td_list
);
1920 /* temporarily store schedule info in hcpriv */
1921 urb
->hcpriv
= iso_sched
;
1922 urb
->error_count
= 0;
1924 spin_unlock_irqrestore (&ehci
->lock
, flags
);
1928 /*-------------------------------------------------------------------------*/
1932 struct ehci_hcd
*ehci
,
1933 struct ehci_iso_stream
*stream
,
1934 struct ehci_sitd
*sitd
,
1935 struct ehci_iso_sched
*iso_sched
,
1939 struct ehci_iso_packet
*uf
= &iso_sched
->packet
[index
];
1940 u64 bufp
= uf
->bufp
;
1942 sitd
->hw_next
= EHCI_LIST_END(ehci
);
1943 sitd
->hw_fullspeed_ep
= stream
->address
;
1944 sitd
->hw_uframe
= stream
->splits
;
1945 sitd
->hw_results
= uf
->transaction
;
1946 sitd
->hw_backpointer
= EHCI_LIST_END(ehci
);
1949 sitd
->hw_buf
[0] = cpu_to_hc32(ehci
, bufp
);
1950 sitd
->hw_buf_hi
[0] = cpu_to_hc32(ehci
, bufp
>> 32);
1952 sitd
->hw_buf
[1] = cpu_to_hc32(ehci
, uf
->buf1
);
1955 sitd
->hw_buf_hi
[1] = cpu_to_hc32(ehci
, bufp
>> 32);
1956 sitd
->index
= index
;
1960 sitd_link (struct ehci_hcd
*ehci
, unsigned frame
, struct ehci_sitd
*sitd
)
1962 /* note: sitd ordering could matter (CSPLIT then SSPLIT) */
1963 sitd
->sitd_next
= ehci
->pshadow
[frame
];
1964 sitd
->hw_next
= ehci
->periodic
[frame
];
1965 ehci
->pshadow
[frame
].sitd
= sitd
;
1966 sitd
->frame
= frame
;
1968 ehci
->periodic
[frame
] = cpu_to_hc32(ehci
, sitd
->sitd_dma
| Q_TYPE_SITD
);
1971 /* fit urb's sitds into the selected schedule slot; activate as needed */
1972 static void sitd_link_urb(
1973 struct ehci_hcd
*ehci
,
1976 struct ehci_iso_stream
*stream
1980 unsigned next_uframe
;
1981 struct ehci_iso_sched
*sched
= urb
->hcpriv
;
1982 struct ehci_sitd
*sitd
;
1984 next_uframe
= stream
->next_uframe
;
1986 if (list_empty(&stream
->td_list
)) {
1987 /* usbfs ignores TT bandwidth */
1988 ehci_to_hcd(ehci
)->self
.bandwidth_allocated
1989 += stream
->bandwidth
;
1991 "sched devp %s ep%d%s-iso [%d] %dms/%04x\n",
1992 urb
->dev
->devpath
, stream
->bEndpointAddress
& 0x0f,
1993 (stream
->bEndpointAddress
& USB_DIR_IN
) ? "in" : "out",
1994 (next_uframe
>> 3) & (ehci
->periodic_size
- 1),
1995 stream
->interval
, hc32_to_cpu(ehci
, stream
->splits
));
1998 if (ehci_to_hcd(ehci
)->self
.bandwidth_isoc_reqs
== 0) {
1999 if (ehci
->amd_pll_fix
== 1)
2000 usb_amd_quirk_pll_disable();
2003 ehci_to_hcd(ehci
)->self
.bandwidth_isoc_reqs
++;
2005 /* fill sITDs frame by frame */
2006 for (packet
= 0, sitd
= NULL
;
2007 packet
< urb
->number_of_packets
;
2010 /* ASSERT: we have all necessary sitds */
2011 BUG_ON (list_empty (&sched
->td_list
));
2013 /* ASSERT: no itds for this endpoint in this frame */
2015 sitd
= list_entry (sched
->td_list
.next
,
2016 struct ehci_sitd
, sitd_list
);
2017 list_move_tail (&sitd
->sitd_list
, &stream
->td_list
);
2018 sitd
->stream
= stream
;
2021 sitd_patch(ehci
, stream
, sitd
, sched
, packet
);
2022 sitd_link(ehci
, (next_uframe
>> 3) & (ehci
->periodic_size
- 1),
2025 next_uframe
+= stream
->interval
<< 3;
2027 stream
->next_uframe
= next_uframe
& (mod
- 1);
2029 /* don't need that schedule data any more */
2030 iso_sched_free (stream
, sched
);
2031 urb
->hcpriv
= stream
;
2034 enable_periodic(ehci
);
2037 /*-------------------------------------------------------------------------*/
2039 #define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
2040 | SITD_STS_XACT | SITD_STS_MMF)
2042 /* Process and recycle a completed SITD. Return true iff its urb completed,
2043 * and hence its completion callback probably added things to the hardware
2046 * Note that we carefully avoid recycling this descriptor until after any
2047 * completion callback runs, so that it won't be reused quickly. That is,
2048 * assuming (a) no more than two urbs per frame on this endpoint, and also
2049 * (b) only this endpoint's completions submit URBs. It seems some silicon
2050 * corrupts things if you reuse completed descriptors very quickly...
2052 static bool sitd_complete(struct ehci_hcd
*ehci
, struct ehci_sitd
*sitd
)
2054 struct urb
*urb
= sitd
->urb
;
2055 struct usb_iso_packet_descriptor
*desc
;
2058 struct ehci_iso_stream
*stream
= sitd
->stream
;
2059 struct usb_device
*dev
;
2060 bool retval
= false;
2062 urb_index
= sitd
->index
;
2063 desc
= &urb
->iso_frame_desc
[urb_index
];
2064 t
= hc32_to_cpup(ehci
, &sitd
->hw_results
);
2066 /* report transfer status */
2067 if (unlikely(t
& SITD_ERRS
)) {
2069 if (t
& SITD_STS_DBE
)
2070 desc
->status
= usb_pipein (urb
->pipe
)
2071 ? -ENOSR
/* hc couldn't read */
2072 : -ECOMM
; /* hc couldn't write */
2073 else if (t
& SITD_STS_BABBLE
)
2074 desc
->status
= -EOVERFLOW
;
2075 else /* XACT, MMF, etc */
2076 desc
->status
= -EPROTO
;
2077 } else if (unlikely(t
& SITD_STS_ACTIVE
)) {
2078 /* URB was too late */
2082 desc
->actual_length
= desc
->length
- SITD_LENGTH(t
);
2083 urb
->actual_length
+= desc
->actual_length
;
2086 /* handle completion now? */
2087 if ((urb_index
+ 1) != urb
->number_of_packets
)
2090 /* ASSERT: it's really the last sitd for this urb
2091 list_for_each_entry (sitd, &stream->td_list, sitd_list)
2092 BUG_ON (sitd->urb == urb);
2095 /* give urb back to the driver; completion often (re)submits */
2097 ehci_urb_done(ehci
, urb
, 0);
2102 disable_periodic(ehci
);
2104 ehci_to_hcd(ehci
)->self
.bandwidth_isoc_reqs
--;
2105 if (ehci_to_hcd(ehci
)->self
.bandwidth_isoc_reqs
== 0) {
2106 if (ehci
->amd_pll_fix
== 1)
2107 usb_amd_quirk_pll_enable();
2110 if (list_is_singular(&stream
->td_list
)) {
2111 ehci_to_hcd(ehci
)->self
.bandwidth_allocated
2112 -= stream
->bandwidth
;
2114 "deschedule devp %s ep%d%s-iso\n",
2115 dev
->devpath
, stream
->bEndpointAddress
& 0x0f,
2116 (stream
->bEndpointAddress
& USB_DIR_IN
) ? "in" : "out");
2122 /* Add to the end of the free list for later reuse */
2123 list_move_tail(&sitd
->sitd_list
, &stream
->free_list
);
2125 /* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
2126 if (list_empty(&stream
->td_list
)) {
2127 list_splice_tail_init(&stream
->free_list
,
2128 &ehci
->cached_sitd_list
);
2129 start_free_itds(ehci
);
2136 static int sitd_submit (struct ehci_hcd
*ehci
, struct urb
*urb
,
2139 int status
= -EINVAL
;
2140 unsigned long flags
;
2141 struct ehci_iso_stream
*stream
;
2143 /* Get iso_stream head */
2144 stream
= iso_stream_find (ehci
, urb
);
2145 if (stream
== NULL
) {
2146 ehci_dbg (ehci
, "can't get iso stream\n");
2149 if (urb
->interval
!= stream
->interval
) {
2150 ehci_dbg (ehci
, "can't change iso interval %d --> %d\n",
2151 stream
->interval
, urb
->interval
);
2155 #ifdef EHCI_URB_TRACE
2157 "submit %p dev%s ep%d%s-iso len %d\n",
2158 urb
, urb
->dev
->devpath
,
2159 usb_pipeendpoint (urb
->pipe
),
2160 usb_pipein (urb
->pipe
) ? "in" : "out",
2161 urb
->transfer_buffer_length
);
2164 /* allocate SITDs */
2165 status
= sitd_urb_transaction (stream
, ehci
, urb
, mem_flags
);
2167 ehci_dbg (ehci
, "can't init sitds\n");
2171 /* schedule ... need to lock */
2172 spin_lock_irqsave (&ehci
->lock
, flags
);
2173 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci
)))) {
2174 status
= -ESHUTDOWN
;
2175 goto done_not_linked
;
2177 status
= usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci
), urb
);
2178 if (unlikely(status
))
2179 goto done_not_linked
;
2180 status
= iso_stream_schedule(ehci
, urb
, stream
);
2182 sitd_link_urb (ehci
, urb
, ehci
->periodic_size
<< 3, stream
);
2184 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci
), urb
);
2186 spin_unlock_irqrestore (&ehci
->lock
, flags
);
2191 /*-------------------------------------------------------------------------*/
2193 static void scan_isoc(struct ehci_hcd
*ehci
)
2195 unsigned uf
, now_frame
, frame
;
2196 unsigned fmask
= ehci
->periodic_size
- 1;
2197 bool modified
, live
;
2200 * When running, scan from last scan point up to "now"
2201 * else clean up by scanning everything that's left.
2202 * Touches as few pages as possible: cache-friendly.
2204 if (ehci
->rh_state
>= EHCI_RH_RUNNING
) {
2205 uf
= ehci_read_frame_index(ehci
);
2206 now_frame
= (uf
>> 3) & fmask
;
2209 now_frame
= (ehci
->last_iso_frame
- 1) & fmask
;
2212 ehci
->now_frame
= now_frame
;
2214 frame
= ehci
->last_iso_frame
;
2216 union ehci_shadow q
, *q_p
;
2220 /* scan each element in frame's queue for completions */
2221 q_p
= &ehci
->pshadow
[frame
];
2222 hw_p
= &ehci
->periodic
[frame
];
2224 type
= Q_NEXT_TYPE(ehci
, *hw_p
);
2227 while (q
.ptr
!= NULL
) {
2228 switch (hc32_to_cpu(ehci
, type
)) {
2230 /* If this ITD is still active, leave it for
2231 * later processing ... check the next entry.
2232 * No need to check for activity unless the
2235 if (frame
== now_frame
&& live
) {
2237 for (uf
= 0; uf
< 8; uf
++) {
2238 if (q
.itd
->hw_transaction
[uf
] &
2243 q_p
= &q
.itd
->itd_next
;
2244 hw_p
= &q
.itd
->hw_next
;
2245 type
= Q_NEXT_TYPE(ehci
,
2252 /* Take finished ITDs out of the schedule
2253 * and process them: recycle, maybe report
2254 * URB completion. HC won't cache the
2255 * pointer for much longer, if at all.
2257 *q_p
= q
.itd
->itd_next
;
2258 if (!ehci
->use_dummy_qh
||
2259 q
.itd
->hw_next
!= EHCI_LIST_END(ehci
))
2260 *hw_p
= q
.itd
->hw_next
;
2262 *hw_p
= ehci
->dummy
->qh_dma
;
2263 type
= Q_NEXT_TYPE(ehci
, q
.itd
->hw_next
);
2265 modified
= itd_complete (ehci
, q
.itd
);
2269 /* If this SITD is still active, leave it for
2270 * later processing ... check the next entry.
2271 * No need to check for activity unless the
2274 if (((frame
== now_frame
) ||
2275 (((frame
+ 1) & fmask
) == now_frame
))
2277 && (q
.sitd
->hw_results
&
2278 SITD_ACTIVE(ehci
))) {
2280 q_p
= &q
.sitd
->sitd_next
;
2281 hw_p
= &q
.sitd
->hw_next
;
2282 type
= Q_NEXT_TYPE(ehci
,
2288 /* Take finished SITDs out of the schedule
2289 * and process them: recycle, maybe report
2292 *q_p
= q
.sitd
->sitd_next
;
2293 if (!ehci
->use_dummy_qh
||
2294 q
.sitd
->hw_next
!= EHCI_LIST_END(ehci
))
2295 *hw_p
= q
.sitd
->hw_next
;
2297 *hw_p
= ehci
->dummy
->qh_dma
;
2298 type
= Q_NEXT_TYPE(ehci
, q
.sitd
->hw_next
);
2300 modified
= sitd_complete (ehci
, q
.sitd
);
2304 ehci_dbg(ehci
, "corrupt type %d frame %d shadow %p\n",
2305 type
, frame
, q
.ptr
);
2310 /* End of the iTDs and siTDs */
2315 /* assume completion callbacks modify the queue */
2316 if (unlikely(modified
&& ehci
->isoc_count
> 0))
2320 /* Stop when we have reached the current frame */
2321 if (frame
== now_frame
)
2324 /* The last frame may still have active siTDs */
2325 ehci
->last_iso_frame
= frame
;
2326 frame
= (frame
+ 1) & fmask
;