Merge with Linux 2.5.74.
[linux-2.6/linux-mips.git] / drivers / usb / host / uhci-hcd.c
blob17f98168bf014f6cdd12fb060c38acb34be98b72
1 /*
2 * Universal Host Controller Interface driver for USB.
4 * Maintainer: Johannes Erdfelt <johannes@erdfelt.com>
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
17 * Intel documents this fairly well, and as far as I know there
18 * are no royalties or anything like that, but even so there are
19 * people who decided that they want to do the same thing in a
20 * completely different way.
22 * WARNING! The USB documentation is downright evil. Most of it
23 * is just crap, written by a committee. You're better off ignoring
24 * most of it, the important stuff is:
25 * - the low-level protocol (fairly simple but lots of small details)
26 * - working around the horridness of the rest
29 #include <linux/config.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/kernel.h>
33 #include <linux/init.h>
34 #include <linux/delay.h>
35 #include <linux/ioport.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/smp_lock.h>
39 #include <linux/errno.h>
40 #include <linux/unistd.h>
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/proc_fs.h>
44 #ifdef CONFIG_USB_DEBUG
45 #define DEBUG
46 #else
47 #undef DEBUG
48 #endif
49 #include <linux/usb.h>
51 #include <asm/uaccess.h>
52 #include <asm/io.h>
53 #include <asm/irq.h>
54 #include <asm/system.h>
56 #include "../core/hcd.h"
57 #include "uhci-hcd.h"
59 #include <linux/pm.h>
62 * Version Information
64 #define DRIVER_VERSION "v2.1"
65 #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber"
66 #define DRIVER_DESC "USB Universal Host Controller Interface driver"
69 * debug = 0, no debugging messages
70 * debug = 1, dump failed URB's except for stalls
71 * debug = 2, dump all failed URB's (including stalls)
72 * show all queues in /proc/driver/uhci/[pci_addr]
73 * debug = 3, show all TD's in URB's when dumping
75 #ifdef DEBUG
76 static int debug = 1;
77 #else
78 static int debug = 0;
79 #endif
80 MODULE_PARM(debug, "i");
81 MODULE_PARM_DESC(debug, "Debug level");
82 static char *errbuf;
83 #define ERRBUF_LEN (PAGE_SIZE * 8)
85 #include "uhci-hub.c"
86 #include "uhci-debug.c"
88 static kmem_cache_t *uhci_up_cachep; /* urb_priv */
90 static int uhci_get_current_frame_number(struct uhci_hcd *uhci);
91 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
92 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb);
94 static void hc_state_transitions(struct uhci_hcd *uhci);
96 /* If a transfer is still active after this much time, turn off FSBR */
97 #define IDLE_TIMEOUT (HZ / 20) /* 50 ms */
98 #define FSBR_DELAY (HZ / 20) /* 50 ms */
100 /* When we timeout an idle transfer for FSBR, we'll switch it over to */
101 /* depth first traversal. We'll do it in groups of this number of TD's */
102 /* to make sure it doesn't hog all of the bandwidth */
103 #define DEPTH_INTERVAL 5
106 * Technically, updating td->status here is a race, but it's not really a
107 * problem. The worst that can happen is that we set the IOC bit again
108 * generating a spurious interrupt. We could fix this by creating another
109 * QH and leaving the IOC bit always set, but then we would have to play
110 * games with the FSBR code to make sure we get the correct order in all
111 * the cases. I don't think it's worth the effort
113 static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci)
115 unsigned long flags;
117 spin_lock_irqsave(&uhci->frame_list_lock, flags);
118 uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
119 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
122 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
124 unsigned long flags;
126 spin_lock_irqsave(&uhci->frame_list_lock, flags);
127 uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
128 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
131 static inline void uhci_add_complete(struct uhci_hcd *uhci, struct urb *urb)
133 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
134 unsigned long flags;
136 spin_lock_irqsave(&uhci->complete_list_lock, flags);
137 list_add_tail(&urbp->complete_list, &uhci->complete_list);
138 spin_unlock_irqrestore(&uhci->complete_list_lock, flags);
141 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci, struct usb_device *dev)
143 dma_addr_t dma_handle;
144 struct uhci_td *td;
146 td = pci_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
147 if (!td)
148 return NULL;
150 td->dma_handle = dma_handle;
152 td->link = UHCI_PTR_TERM;
153 td->buffer = 0;
155 td->frame = -1;
156 td->dev = dev;
158 INIT_LIST_HEAD(&td->list);
159 INIT_LIST_HEAD(&td->fl_list);
161 usb_get_dev(dev);
163 return td;
166 static inline void uhci_fill_td(struct uhci_td *td, __u32 status,
167 __u32 token, __u32 buffer)
169 td->status = cpu_to_le32(status);
170 td->token = cpu_to_le32(token);
171 td->buffer = cpu_to_le32(buffer);
175 * We insert Isochronous URB's directly into the frame list at the beginning
177 static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum)
179 unsigned long flags;
181 framenum %= UHCI_NUMFRAMES;
183 spin_lock_irqsave(&uhci->frame_list_lock, flags);
185 td->frame = framenum;
187 /* Is there a TD already mapped there? */
188 if (uhci->fl->frame_cpu[framenum]) {
189 struct uhci_td *ftd, *ltd;
191 ftd = uhci->fl->frame_cpu[framenum];
192 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
194 list_add_tail(&td->fl_list, &ftd->fl_list);
196 td->link = ltd->link;
197 mb();
198 ltd->link = cpu_to_le32(td->dma_handle);
199 } else {
200 td->link = uhci->fl->frame[framenum];
201 mb();
202 uhci->fl->frame[framenum] = cpu_to_le32(td->dma_handle);
203 uhci->fl->frame_cpu[framenum] = td;
206 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
209 static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td)
211 unsigned long flags;
213 /* If it's not inserted, don't remove it */
214 spin_lock_irqsave(&uhci->frame_list_lock, flags);
215 if (td->frame == -1 && list_empty(&td->fl_list))
216 goto out;
218 if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) {
219 if (list_empty(&td->fl_list)) {
220 uhci->fl->frame[td->frame] = td->link;
221 uhci->fl->frame_cpu[td->frame] = NULL;
222 } else {
223 struct uhci_td *ntd;
225 ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
226 uhci->fl->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
227 uhci->fl->frame_cpu[td->frame] = ntd;
229 } else {
230 struct uhci_td *ptd;
232 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
233 ptd->link = td->link;
236 mb();
237 td->link = UHCI_PTR_TERM;
239 list_del_init(&td->fl_list);
240 td->frame = -1;
242 out:
243 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
247 * Inserts a td into qh list at the top.
249 static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, u32 breadth)
251 struct list_head *tmp, *head;
252 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
253 struct uhci_td *td, *ptd;
255 if (list_empty(&urbp->td_list))
256 return;
258 head = &urbp->td_list;
259 tmp = head->next;
261 /* Ordering isn't important here yet since the QH hasn't been */
262 /* inserted into the schedule yet */
263 td = list_entry(tmp, struct uhci_td, list);
265 /* Add the first TD to the QH element pointer */
266 qh->element = cpu_to_le32(td->dma_handle) | breadth;
268 ptd = td;
270 /* Then link the rest of the TD's */
271 tmp = tmp->next;
272 while (tmp != head) {
273 td = list_entry(tmp, struct uhci_td, list);
275 tmp = tmp->next;
277 ptd->link = cpu_to_le32(td->dma_handle) | breadth;
279 ptd = td;
282 ptd->link = UHCI_PTR_TERM;
285 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
287 if (!list_empty(&td->list))
288 dbg("td %p is still in list!", td);
289 if (!list_empty(&td->fl_list))
290 dbg("td %p is still in fl_list!", td);
292 if (td->dev)
293 usb_put_dev(td->dev);
295 pci_pool_free(uhci->td_pool, td, td->dma_handle);
298 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *dev)
300 dma_addr_t dma_handle;
301 struct uhci_qh *qh;
303 qh = pci_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
304 if (!qh)
305 return NULL;
307 qh->dma_handle = dma_handle;
309 qh->element = UHCI_PTR_TERM;
310 qh->link = UHCI_PTR_TERM;
312 qh->dev = dev;
313 qh->urbp = NULL;
315 INIT_LIST_HEAD(&qh->list);
316 INIT_LIST_HEAD(&qh->remove_list);
318 usb_get_dev(dev);
320 return qh;
323 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
325 if (!list_empty(&qh->list))
326 dbg("qh %p list not empty!", qh);
327 if (!list_empty(&qh->remove_list))
328 dbg("qh %p still in remove_list!", qh);
330 if (qh->dev)
331 usb_put_dev(qh->dev);
333 pci_pool_free(uhci->qh_pool, qh, qh->dma_handle);
337 * Append this urb's qh after the last qh in skelqh->list
338 * MUST be called with uhci->frame_list_lock acquired
340 * Note that urb_priv.queue_list doesn't have a separate queue head;
341 * it's a ring with every element "live".
343 static void _uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
345 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
346 struct list_head *tmp;
347 struct uhci_qh *lqh;
349 /* Grab the last QH */
350 lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
353 * Patch this endpoint's URB's QHs to point to the next skelqh:
354 * skelqh --> ... lqh --> newqh --> next skelqh
355 * Do this first, so the HC always sees the right QH after this one.
357 list_for_each (tmp, &urbp->queue_list) {
358 struct urb_priv *turbp =
359 list_entry(tmp, struct urb_priv, queue_list);
361 turbp->qh->link = lqh->link;
363 urbp->qh->link = lqh->link;
364 wmb(); /* Ordering is important */
367 * Patch QHs for previous endpoint's queued URBs? HC goes
368 * here next, not to the next skelqh it now points to.
370 * lqh --> td ... --> qh ... --> td --> qh ... --> td
371 * | | |
372 * v v v
373 * +<----------------+-----------------+
375 * newqh --> td ... --> td
378 * ...
380 * The HC could see (and use!) any of these as we write them.
382 if (lqh->urbp) {
383 list_for_each (tmp, &lqh->urbp->queue_list) {
384 struct urb_priv *turbp =
385 list_entry(tmp, struct urb_priv, queue_list);
387 turbp->qh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
390 lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
392 list_add_tail(&urbp->qh->list, &skelqh->list);
395 static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
397 unsigned long flags;
399 spin_lock_irqsave(&uhci->frame_list_lock, flags);
400 _uhci_insert_qh(uhci, skelqh, urb);
401 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
405 * Start removal of QH from schedule; it finishes next frame.
406 * TDs should be unlinked before this is called.
408 static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
410 unsigned long flags;
411 struct uhci_qh *pqh;
413 if (!qh)
414 return;
416 qh->urbp = NULL;
419 * Only go through the hoops if it's actually linked in
420 * Queued QHs are removed in uhci_delete_queued_urb,
421 * since (for queued URBs) the pqh is pointed to the next
422 * QH in the queue, not the next endpoint's QH.
424 spin_lock_irqsave(&uhci->frame_list_lock, flags);
425 if (!list_empty(&qh->list)) {
426 pqh = list_entry(qh->list.prev, struct uhci_qh, list);
428 if (pqh->urbp) {
429 struct list_head *head, *tmp;
431 head = &pqh->urbp->queue_list;
432 tmp = head->next;
433 while (head != tmp) {
434 struct urb_priv *turbp =
435 list_entry(tmp, struct urb_priv, queue_list);
437 tmp = tmp->next;
439 turbp->qh->link = qh->link;
443 pqh->link = qh->link;
444 mb();
445 /* Leave qh->link in case the HC is on the QH now, it will */
446 /* continue the rest of the schedule */
447 qh->element = UHCI_PTR_TERM;
449 list_del_init(&qh->list);
451 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
453 spin_lock_irqsave(&uhci->qh_remove_list_lock, flags);
455 /* Check to see if the remove list is empty. Set the IOC bit */
456 /* to force an interrupt so we can remove the QH */
457 if (list_empty(&uhci->qh_remove_list))
458 uhci_set_next_interrupt(uhci);
460 list_add(&qh->remove_list, &uhci->qh_remove_list);
462 spin_unlock_irqrestore(&uhci->qh_remove_list_lock, flags);
465 static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
467 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
468 struct list_head *head, *tmp;
470 head = &urbp->td_list;
471 tmp = head->next;
472 while (head != tmp) {
473 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
475 tmp = tmp->next;
477 if (toggle)
478 td->token |= cpu_to_le32(TD_TOKEN_TOGGLE);
479 else
480 td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
483 toggle ^= 1;
486 return toggle;
489 /* This function will append one URB's QH to another URB's QH. This is for */
490 /* queuing interrupt, control or bulk transfers */
491 static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb)
493 struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
494 struct list_head *tmp;
495 struct uhci_td *lltd;
496 unsigned long flags;
498 eurbp = eurb->hcpriv;
499 urbp = urb->hcpriv;
501 spin_lock_irqsave(&uhci->frame_list_lock, flags);
503 /* Find the first URB in the queue */
504 if (eurbp->queued) {
505 struct list_head *head = &eurbp->queue_list;
507 tmp = head->next;
508 while (tmp != head) {
509 struct urb_priv *turbp =
510 list_entry(tmp, struct urb_priv, queue_list);
512 if (!turbp->queued)
513 break;
515 tmp = tmp->next;
517 } else
518 tmp = &eurbp->queue_list;
520 furbp = list_entry(tmp, struct urb_priv, queue_list);
521 lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
523 lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
525 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe),
526 uhci_fixup_toggle(urb, uhci_toggle(td_token(lltd)) ^ 1));
528 /* All qh's in the queue need to link to the next queue */
529 urbp->qh->link = eurbp->qh->link;
531 mb(); /* Make sure we flush everything */
533 lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
535 list_add_tail(&urbp->queue_list, &furbp->queue_list);
537 urbp->queued = 1;
539 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
542 static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
544 struct urb_priv *urbp, *nurbp;
545 struct list_head *head, *tmp;
546 struct urb_priv *purbp;
547 struct uhci_td *pltd;
548 unsigned int toggle;
549 unsigned long flags;
551 urbp = urb->hcpriv;
553 spin_lock_irqsave(&uhci->frame_list_lock, flags);
555 if (list_empty(&urbp->queue_list))
556 goto out;
558 nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
560 /* Fix up the toggle for the next URB's */
561 if (!urbp->queued)
562 /* We just set the toggle in uhci_unlink_generic */
563 toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
564 else {
565 /* If we're in the middle of the queue, grab the toggle */
566 /* from the TD previous to us */
567 purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
568 queue_list);
570 pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
572 toggle = uhci_toggle(td_token(pltd)) ^ 1;
575 head = &urbp->queue_list;
576 tmp = head->next;
577 while (head != tmp) {
578 struct urb_priv *turbp;
580 turbp = list_entry(tmp, struct urb_priv, queue_list);
582 tmp = tmp->next;
584 if (!turbp->queued)
585 break;
587 toggle = uhci_fixup_toggle(turbp->urb, toggle);
590 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
591 usb_pipeout(urb->pipe), toggle);
593 if (!urbp->queued) {
594 struct uhci_qh *pqh;
596 nurbp->queued = 0;
599 * Fixup the previous QH's queue to link to the new head
600 * of this queue.
602 pqh = list_entry(urbp->qh->list.prev, struct uhci_qh, list);
604 if (pqh->urbp) {
605 struct list_head *head, *tmp;
607 head = &pqh->urbp->queue_list;
608 tmp = head->next;
609 while (head != tmp) {
610 struct urb_priv *turbp =
611 list_entry(tmp, struct urb_priv, queue_list);
613 tmp = tmp->next;
615 turbp->qh->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
619 pqh->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
621 list_add_tail(&nurbp->qh->list, &urbp->qh->list);
622 list_del_init(&urbp->qh->list);
623 } else {
624 /* We're somewhere in the middle (or end). A bit trickier */
625 /* than the head scenario */
626 purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
627 queue_list);
629 pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
630 if (nurbp->queued)
631 pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
632 else
633 /* The next URB happens to be the beginning, so */
634 /* we're the last, end the chain */
635 pltd->link = UHCI_PTR_TERM;
638 list_del_init(&urbp->queue_list);
640 out:
641 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
644 static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
646 struct urb_priv *urbp;
648 urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
649 if (!urbp) {
650 err("uhci_alloc_urb_priv: couldn't allocate memory for urb_priv\n");
651 return NULL;
654 memset((void *)urbp, 0, sizeof(*urbp));
656 urbp->inserttime = jiffies;
657 urbp->fsbrtime = jiffies;
658 urbp->urb = urb;
659 urbp->dev = urb->dev;
661 INIT_LIST_HEAD(&urbp->td_list);
662 INIT_LIST_HEAD(&urbp->queue_list);
663 INIT_LIST_HEAD(&urbp->complete_list);
664 INIT_LIST_HEAD(&urbp->urb_list);
666 list_add_tail(&urbp->urb_list, &uhci->urb_list);
668 urb->hcpriv = urbp;
670 return urbp;
674 * MUST be called with urb->lock acquired
676 static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
678 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
680 td->urb = urb;
682 list_add_tail(&td->list, &urbp->td_list);
686 * MUST be called with urb->lock acquired
688 static void uhci_remove_td_from_urb(struct uhci_td *td)
690 if (list_empty(&td->list))
691 return;
693 list_del_init(&td->list);
695 td->urb = NULL;
699 * MUST be called with urb->lock acquired
701 static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
703 struct list_head *head, *tmp;
704 struct urb_priv *urbp;
706 urbp = (struct urb_priv *)urb->hcpriv;
707 if (!urbp)
708 return;
710 if (!list_empty(&urbp->urb_list))
711 warn("uhci_destroy_urb_priv: urb %p still on uhci->urb_list or uhci->remove_list", urb);
713 if (!list_empty(&urbp->complete_list))
714 warn("uhci_destroy_urb_priv: urb %p still on uhci->complete_list", urb);
716 head = &urbp->td_list;
717 tmp = head->next;
718 while (tmp != head) {
719 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
721 tmp = tmp->next;
723 uhci_remove_td_from_urb(td);
724 uhci_remove_td(uhci, td);
725 uhci_free_td(uhci, td);
728 urb->hcpriv = NULL;
729 kmem_cache_free(uhci_up_cachep, urbp);
732 static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
734 unsigned long flags;
735 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
737 spin_lock_irqsave(&uhci->frame_list_lock, flags);
739 if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
740 urbp->fsbr = 1;
741 if (!uhci->fsbr++ && !uhci->fsbrtimeout)
742 uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_hs_control_qh->dma_handle) | UHCI_PTR_QH;
745 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
748 static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
750 unsigned long flags;
751 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
753 spin_lock_irqsave(&uhci->frame_list_lock, flags);
755 if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
756 urbp->fsbr = 0;
757 if (!--uhci->fsbr)
758 uhci->fsbrtimeout = jiffies + FSBR_DELAY;
761 spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
765 * Map status to standard result codes
767 * <status> is (td->status & 0xFE0000) [a.k.a. uhci_status_bits(td->status)]
768 * <dir_out> is True for output TDs and False for input TDs.
770 static int uhci_map_status(int status, int dir_out)
772 if (!status)
773 return 0;
774 if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
775 return -EPROTO;
776 if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
777 if (dir_out)
778 return -ETIMEDOUT;
779 else
780 return -EILSEQ;
782 if (status & TD_CTRL_NAK) /* NAK */
783 return -ETIMEDOUT;
784 if (status & TD_CTRL_BABBLE) /* Babble */
785 return -EOVERFLOW;
786 if (status & TD_CTRL_DBUFERR) /* Buffer error */
787 return -ENOSR;
788 if (status & TD_CTRL_STALLED) /* Stalled */
789 return -EPIPE;
790 if (status & TD_CTRL_ACTIVE) /* Active */
791 return 0;
793 return -EINVAL;
797 * Control transfers
799 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
801 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
802 struct uhci_td *td;
803 struct uhci_qh *qh, *skelqh;
804 unsigned long destination, status;
805 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
806 int len = urb->transfer_buffer_length;
807 dma_addr_t data = urb->transfer_dma;
809 /* The "pipe" thing contains the destination in bits 8--18 */
810 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
812 /* 3 errors */
813 status = TD_CTRL_ACTIVE | uhci_maxerr(3);
814 if (urb->dev->speed == USB_SPEED_LOW)
815 status |= TD_CTRL_LS;
818 * Build the TD for the control request
820 td = uhci_alloc_td(uhci, urb->dev);
821 if (!td)
822 return -ENOMEM;
824 uhci_add_td_to_urb(urb, td);
825 uhci_fill_td(td, status, destination | uhci_explen(7),
826 urb->setup_dma);
829 * If direction is "send", change the frame from SETUP (0x2D)
830 * to OUT (0xE1). Else change it from SETUP to IN (0x69).
832 destination ^= (USB_PID_SETUP ^ usb_packetid(urb->pipe));
834 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
835 status |= TD_CTRL_SPD;
838 * Build the DATA TD's
840 while (len > 0) {
841 int pktsze = len;
843 if (pktsze > maxsze)
844 pktsze = maxsze;
846 td = uhci_alloc_td(uhci, urb->dev);
847 if (!td)
848 return -ENOMEM;
850 /* Alternate Data0/1 (start with Data1) */
851 destination ^= TD_TOKEN_TOGGLE;
853 uhci_add_td_to_urb(urb, td);
854 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1),
855 data);
857 data += pktsze;
858 len -= pktsze;
862 * Build the final TD for control status
864 td = uhci_alloc_td(uhci, urb->dev);
865 if (!td)
866 return -ENOMEM;
869 * It's IN if the pipe is an output pipe or we're not expecting
870 * data back.
872 destination &= ~TD_TOKEN_PID_MASK;
873 if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
874 destination |= USB_PID_IN;
875 else
876 destination |= USB_PID_OUT;
878 destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
880 status &= ~TD_CTRL_SPD;
882 uhci_add_td_to_urb(urb, td);
883 uhci_fill_td(td, status | TD_CTRL_IOC,
884 destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0);
886 qh = uhci_alloc_qh(uhci, urb->dev);
887 if (!qh)
888 return -ENOMEM;
890 urbp->qh = qh;
891 qh->urbp = urbp;
893 uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
895 /* Low speed transfers get a different queue, and won't hog the bus */
896 if (urb->dev->speed == USB_SPEED_LOW)
897 skelqh = uhci->skel_ls_control_qh;
898 else {
899 skelqh = uhci->skel_hs_control_qh;
900 uhci_inc_fsbr(uhci, urb);
903 if (eurb)
904 uhci_append_queued_urb(uhci, eurb, urb);
905 else
906 uhci_insert_qh(uhci, skelqh, urb);
908 return -EINPROGRESS;
912 * If control was short, then end status packet wasn't sent, so this
913 * reorganize s so it's sent to finish the transfer. The original QH is
914 * removed from the skel and discarded; all TDs except the last (status)
915 * are deleted; the last (status) TD is put on a new QH which is reinserted
916 * into the skel. Since the last TD and urb_priv are reused, the TD->link
917 * and urb_priv maintain any queued QHs.
919 static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb)
921 struct list_head *tmp, *head;
922 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
924 urbp->short_control_packet = 1;
926 /* Create a new QH to avoid pointer overwriting problems */
927 uhci_remove_qh(uhci, urbp->qh);
929 /* Delete all of the TD's except for the status TD at the end */
930 head = &urbp->td_list;
931 tmp = head->next;
932 while (tmp != head && tmp->next != head) {
933 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
935 tmp = tmp->next;
937 uhci_remove_td_from_urb(td);
938 uhci_remove_td(uhci, td);
939 uhci_free_td(uhci, td);
942 urbp->qh = uhci_alloc_qh(uhci, urb->dev);
943 if (!urbp->qh) {
944 err("unable to allocate new QH for control retrigger");
945 return -ENOMEM;
948 urbp->qh->urbp = urbp;
950 /* One TD, who cares about Breadth first? */
951 uhci_insert_tds_in_qh(urbp->qh, urb, UHCI_PTR_DEPTH);
953 /* Low speed transfers get a different queue */
954 if (urb->dev->speed == USB_SPEED_LOW)
955 uhci_insert_qh(uhci, uhci->skel_ls_control_qh, urb);
956 else
957 uhci_insert_qh(uhci, uhci->skel_hs_control_qh, urb);
959 return -EINPROGRESS;
963 static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
965 struct list_head *tmp, *head;
966 struct urb_priv *urbp = urb->hcpriv;
967 struct uhci_td *td;
968 unsigned int status;
969 int ret = 0;
971 if (list_empty(&urbp->td_list))
972 return -EINVAL;
974 head = &urbp->td_list;
976 if (urbp->short_control_packet) {
977 tmp = head->prev;
978 goto status_phase;
981 tmp = head->next;
982 td = list_entry(tmp, struct uhci_td, list);
984 /* The first TD is the SETUP phase, check the status, but skip */
985 /* the count */
986 status = uhci_status_bits(td_status(td));
987 if (status & TD_CTRL_ACTIVE)
988 return -EINPROGRESS;
990 if (status)
991 goto td_error;
993 urb->actual_length = 0;
995 /* The rest of the TD's (but the last) are data */
996 tmp = tmp->next;
997 while (tmp != head && tmp->next != head) {
998 td = list_entry(tmp, struct uhci_td, list);
1000 tmp = tmp->next;
1002 status = uhci_status_bits(td_status(td));
1003 if (status & TD_CTRL_ACTIVE)
1004 return -EINPROGRESS;
1006 urb->actual_length += uhci_actual_length(td_status(td));
1008 if (status)
1009 goto td_error;
1011 /* Check to see if we received a short packet */
1012 if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
1013 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
1014 ret = -EREMOTEIO;
1015 goto err;
1018 if (uhci_packetid(td_token(td)) == USB_PID_IN)
1019 return usb_control_retrigger_status(uhci, urb);
1020 else
1021 return 0;
1025 status_phase:
1026 td = list_entry(tmp, struct uhci_td, list);
1028 /* Control status phase */
1029 status = td_status(td);
1031 #ifdef I_HAVE_BUGGY_APC_BACKUPS
1032 /* APC BackUPS Pro kludge */
1033 /* It tries to send all of the descriptor instead of the amount */
1034 /* we requested */
1035 if (status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */
1036 status & TD_CTRL_ACTIVE &&
1037 status & TD_CTRL_NAK)
1038 return 0;
1039 #endif
1041 if (status & TD_CTRL_ACTIVE)
1042 return -EINPROGRESS;
1044 if (uhci_status_bits(status))
1045 goto td_error;
1047 return 0;
1049 td_error:
1050 ret = uhci_map_status(status, uhci_packetout(td_token(td)));
1052 err:
1053 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1054 /* Some debugging code */
1055 dbg("uhci_result_control() failed with status %x", status);
1057 if (errbuf) {
1058 /* Print the chain for debugging purposes */
1059 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
1061 lprintk(errbuf);
1065 return ret;
1069 * Common submit for bulk and interrupt
1071 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh)
1073 struct uhci_td *td;
1074 struct uhci_qh *qh;
1075 unsigned long destination, status;
1076 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
1077 int len = urb->transfer_buffer_length;
1078 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1079 dma_addr_t data = urb->transfer_dma;
1081 if (len < 0)
1082 return -EINVAL;
1084 /* The "pipe" thing contains the destination in bits 8--18 */
1085 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1087 status = uhci_maxerr(3) | TD_CTRL_ACTIVE;
1088 if (urb->dev->speed == USB_SPEED_LOW)
1089 status |= TD_CTRL_LS;
1090 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
1091 status |= TD_CTRL_SPD;
1094 * Build the DATA TD's
1096 do { /* Allow zero length packets */
1097 int pktsze = len;
1099 if (pktsze > maxsze)
1100 pktsze = maxsze;
1102 td = uhci_alloc_td(uhci, urb->dev);
1103 if (!td)
1104 return -ENOMEM;
1106 uhci_add_td_to_urb(urb, td);
1107 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) |
1108 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1109 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
1110 data);
1112 data += pktsze;
1113 len -= maxsze;
1115 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1116 usb_pipeout(urb->pipe));
1117 } while (len > 0);
1120 * URB_ZERO_PACKET means adding a 0-length packet, if direction
1121 * is OUT and the transfer_length was an exact multiple of maxsze,
1122 * hence (len = transfer_length - N * maxsze) == 0
1123 * however, if transfer_length == 0, the zero packet was already
1124 * prepared above.
1126 if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) &&
1127 !len && urb->transfer_buffer_length) {
1128 td = uhci_alloc_td(uhci, urb->dev);
1129 if (!td)
1130 return -ENOMEM;
1132 uhci_add_td_to_urb(urb, td);
1133 uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) |
1134 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1135 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
1136 data);
1138 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1139 usb_pipeout(urb->pipe));
1142 /* Set the flag on the last packet */
1143 td->status |= cpu_to_le32(TD_CTRL_IOC);
1145 qh = uhci_alloc_qh(uhci, urb->dev);
1146 if (!qh)
1147 return -ENOMEM;
1149 urbp->qh = qh;
1150 qh->urbp = urbp;
1152 /* Always breadth first */
1153 uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
1155 if (eurb)
1156 uhci_append_queued_urb(uhci, eurb, urb);
1157 else
1158 uhci_insert_qh(uhci, skelqh, urb);
1160 return -EINPROGRESS;
1164 * Common result for bulk and interrupt
1166 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1168 struct list_head *tmp, *head;
1169 struct urb_priv *urbp = urb->hcpriv;
1170 struct uhci_td *td;
1171 unsigned int status = 0;
1172 int ret = 0;
1174 urb->actual_length = 0;
1176 head = &urbp->td_list;
1177 tmp = head->next;
1178 while (tmp != head) {
1179 td = list_entry(tmp, struct uhci_td, list);
1181 tmp = tmp->next;
1183 status = uhci_status_bits(td_status(td));
1184 if (status & TD_CTRL_ACTIVE)
1185 return -EINPROGRESS;
1187 urb->actual_length += uhci_actual_length(td_status(td));
1189 if (status)
1190 goto td_error;
1192 if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
1193 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
1194 ret = -EREMOTEIO;
1195 goto err;
1196 } else
1197 return 0;
1201 return 0;
1203 td_error:
1204 ret = uhci_map_status(status, uhci_packetout(td_token(td)));
1205 if (ret == -EPIPE)
1206 /* endpoint has stalled - mark it halted */
1207 usb_endpoint_halt(urb->dev, uhci_endpoint(td_token(td)),
1208 uhci_packetout(td_token(td)));
1210 err:
1212 * Enable this chunk of code if you want to see some more debugging.
1213 * But be careful, it has the tendancy to starve out khubd and prevent
1214 * disconnects from happening successfully if you have a slow debug
1215 * log interface (like a serial console.
1217 #if 0
1218 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1219 /* Some debugging code */
1220 dbg("uhci_result_common() failed with status %x", status);
1222 if (errbuf) {
1223 /* Print the chain for debugging purposes */
1224 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
1226 lprintk(errbuf);
1229 #endif
1230 return ret;
1233 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1235 int ret;
1237 /* Can't have low speed bulk transfers */
1238 if (urb->dev->speed == USB_SPEED_LOW)
1239 return -EINVAL;
1241 ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh);
1242 if (ret == -EINPROGRESS)
1243 uhci_inc_fsbr(uhci, urb);
1245 return ret;
1248 static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1250 /* USB 1.1 interrupt transfers only involve one packet per interval;
1251 * that's the uhci_submit_common() "breadth first" policy. Drivers
1252 * can submit urbs of any length, but longer ones might need many
1253 * intervals to complete.
1255 return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]);
1259 * Bulk and interrupt use common result
1261 #define uhci_result_bulk uhci_result_common
1262 #define uhci_result_interrupt uhci_result_common
1265 * Isochronous transfers
1267 static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
1269 struct urb *last_urb = NULL;
1270 struct list_head *tmp, *head;
1271 int ret = 0;
1273 head = &uhci->urb_list;
1274 tmp = head->next;
1275 while (tmp != head) {
1276 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1277 struct urb *u = up->urb;
1279 tmp = tmp->next;
1281 /* look for pending URB's with identical pipe handle */
1282 if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
1283 (u->status == -EINPROGRESS) && (u != urb)) {
1284 if (!last_urb)
1285 *start = u->start_frame;
1286 last_urb = u;
1290 if (last_urb) {
1291 *end = (last_urb->start_frame + last_urb->number_of_packets *
1292 last_urb->interval) & (UHCI_NUMFRAMES-1);
1293 ret = 0;
1294 } else
1295 ret = -1; /* no previous urb found */
1297 return ret;
1300 static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
1302 int limits;
1303 unsigned int start = 0, end = 0;
1305 if (urb->number_of_packets > 900) /* 900? Why? */
1306 return -EFBIG;
1308 limits = isochronous_find_limits(uhci, urb, &start, &end);
1310 if (urb->transfer_flags & URB_ISO_ASAP) {
1311 if (limits) {
1312 int curframe;
1314 curframe = uhci_get_current_frame_number(uhci) % UHCI_NUMFRAMES;
1315 urb->start_frame = (curframe + 10) % UHCI_NUMFRAMES;
1316 } else
1317 urb->start_frame = end;
1318 } else {
1319 urb->start_frame %= UHCI_NUMFRAMES;
1320 /* FIXME: Sanity check */
1323 return 0;
1327 * Isochronous transfers
1329 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1331 struct uhci_td *td;
1332 int i, ret, frame;
1333 int status, destination;
1335 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1336 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1338 ret = isochronous_find_start(uhci, urb);
1339 if (ret)
1340 return ret;
1342 frame = urb->start_frame;
1343 for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) {
1344 if (!urb->iso_frame_desc[i].length)
1345 continue;
1347 td = uhci_alloc_td(uhci, urb->dev);
1348 if (!td)
1349 return -ENOMEM;
1351 uhci_add_td_to_urb(urb, td);
1352 uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1),
1353 urb->transfer_dma + urb->iso_frame_desc[i].offset);
1355 if (i + 1 >= urb->number_of_packets)
1356 td->status |= cpu_to_le32(TD_CTRL_IOC);
1358 uhci_insert_td_frame_list(uhci, td, frame);
1361 return -EINPROGRESS;
1364 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1366 struct list_head *tmp, *head;
1367 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1368 int status;
1369 int i, ret = 0;
1371 urb->actual_length = 0;
1373 i = 0;
1374 head = &urbp->td_list;
1375 tmp = head->next;
1376 while (tmp != head) {
1377 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1378 int actlength;
1380 tmp = tmp->next;
1382 if (td_status(td) & TD_CTRL_ACTIVE)
1383 return -EINPROGRESS;
1385 actlength = uhci_actual_length(td_status(td));
1386 urb->iso_frame_desc[i].actual_length = actlength;
1387 urb->actual_length += actlength;
1389 status = uhci_map_status(uhci_status_bits(td_status(td)), usb_pipeout(urb->pipe));
1390 urb->iso_frame_desc[i].status = status;
1391 if (status) {
1392 urb->error_count++;
1393 ret = status;
1396 i++;
1399 return ret;
1403 * MUST be called with uhci->urb_list_lock acquired
1405 static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
1407 struct list_head *tmp, *head;
1409 /* We don't match Isoc transfers since they are special */
1410 if (usb_pipeisoc(urb->pipe))
1411 return NULL;
1413 head = &uhci->urb_list;
1414 tmp = head->next;
1415 while (tmp != head) {
1416 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1417 struct urb *u = up->urb;
1419 tmp = tmp->next;
1421 if (u->dev == urb->dev && u->status == -EINPROGRESS) {
1422 /* For control, ignore the direction */
1423 if (usb_pipecontrol(urb->pipe) &&
1424 (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN))
1425 return u;
1426 else if (u->pipe == urb->pipe)
1427 return u;
1431 return NULL;
1434 static int uhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, int mem_flags)
1436 int ret = -EINVAL;
1437 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1438 unsigned long flags;
1439 struct urb *eurb;
1440 int bustime;
1442 spin_lock_irqsave(&uhci->urb_list_lock, flags);
1444 eurb = uhci_find_urb_ep(uhci, urb);
1446 if (!uhci_alloc_urb_priv(uhci, urb)) {
1447 spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1448 return -ENOMEM;
1451 switch (usb_pipetype(urb->pipe)) {
1452 case PIPE_CONTROL:
1453 ret = uhci_submit_control(uhci, urb, eurb);
1454 break;
1455 case PIPE_INTERRUPT:
1456 if (!eurb) {
1457 bustime = usb_check_bandwidth(urb->dev, urb);
1458 if (bustime < 0)
1459 ret = bustime;
1460 else {
1461 ret = uhci_submit_interrupt(uhci, urb, eurb);
1462 if (ret == -EINPROGRESS)
1463 usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1465 } else { /* inherit from parent */
1466 urb->bandwidth = eurb->bandwidth;
1467 ret = uhci_submit_interrupt(uhci, urb, eurb);
1469 break;
1470 case PIPE_BULK:
1471 ret = uhci_submit_bulk(uhci, urb, eurb);
1472 break;
1473 case PIPE_ISOCHRONOUS:
1474 bustime = usb_check_bandwidth(urb->dev, urb);
1475 if (bustime < 0) {
1476 ret = bustime;
1477 break;
1480 ret = uhci_submit_isochronous(uhci, urb);
1481 if (ret == -EINPROGRESS)
1482 usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1483 break;
1486 if (ret != -EINPROGRESS) {
1487 /* Submit failed, so delete it from the urb_list */
1488 struct urb_priv *urbp = urb->hcpriv;
1490 list_del_init(&urbp->urb_list);
1491 spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1492 uhci_destroy_urb_priv (uhci, urb);
1494 return ret;
1497 spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1499 return 0;
1503 * Return the result of a transfer
1505 * MUST be called with urb_list_lock acquired
1507 static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
1509 int ret = -EINVAL;
1510 unsigned long flags;
1511 struct urb_priv *urbp;
1513 spin_lock_irqsave(&urb->lock, flags);
1515 urbp = (struct urb_priv *)urb->hcpriv;
1517 if (urb->status != -EINPROGRESS) {
1518 info("uhci_transfer_result: called for URB %p not in flight?", urb);
1519 goto out;
1522 switch (usb_pipetype(urb->pipe)) {
1523 case PIPE_CONTROL:
1524 ret = uhci_result_control(uhci, urb);
1525 break;
1526 case PIPE_INTERRUPT:
1527 ret = uhci_result_interrupt(uhci, urb);
1528 break;
1529 case PIPE_BULK:
1530 ret = uhci_result_bulk(uhci, urb);
1531 break;
1532 case PIPE_ISOCHRONOUS:
1533 ret = uhci_result_isochronous(uhci, urb);
1534 break;
1537 urbp->status = ret;
1539 if (ret == -EINPROGRESS)
1540 goto out;
1542 switch (usb_pipetype(urb->pipe)) {
1543 case PIPE_CONTROL:
1544 case PIPE_BULK:
1545 case PIPE_ISOCHRONOUS:
1546 /* Release bandwidth for Interrupt or Isoc. transfers */
1547 /* Spinlock needed ? */
1548 if (urb->bandwidth)
1549 usb_release_bandwidth(urb->dev, urb, 1);
1550 uhci_unlink_generic(uhci, urb);
1551 break;
1552 case PIPE_INTERRUPT:
1553 /* Release bandwidth for Interrupt or Isoc. transfers */
1554 /* Make sure we don't release if we have a queued URB */
1555 spin_lock(&uhci->frame_list_lock);
1556 /* Spinlock needed ? */
1557 if (list_empty(&urbp->queue_list) && urb->bandwidth)
1558 usb_release_bandwidth(urb->dev, urb, 0);
1559 else
1560 /* bandwidth was passed on to queued URB, */
1561 /* so don't let usb_unlink_urb() release it */
1562 urb->bandwidth = 0;
1563 spin_unlock(&uhci->frame_list_lock);
1564 uhci_unlink_generic(uhci, urb);
1565 break;
1566 default:
1567 info("uhci_transfer_result: unknown pipe type %d for urb %p\n",
1568 usb_pipetype(urb->pipe), urb);
1571 /* Remove it from uhci->urb_list */
1572 list_del_init(&urbp->urb_list);
1574 uhci_add_complete(uhci, urb);
1576 out:
1577 spin_unlock_irqrestore(&urb->lock, flags);
1581 * MUST be called with urb->lock acquired
1583 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
1585 struct list_head *head, *tmp;
1586 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1587 int prevactive = 1;
1589 /* We can get called when urbp allocation fails, so check */
1590 if (!urbp)
1591 return;
1593 uhci_dec_fsbr(uhci, urb); /* Safe since it checks */
1596 * Now we need to find out what the last successful toggle was
1597 * so we can update the local data toggle for the next transfer
1599 * There's 3 way's the last successful completed TD is found:
1601 * 1) The TD is NOT active and the actual length < expected length
1602 * 2) The TD is NOT active and it's the last TD in the chain
1603 * 3) The TD is active and the previous TD is NOT active
1605 * Control and Isochronous ignore the toggle, so this is safe
1606 * for all types
1608 head = &urbp->td_list;
1609 tmp = head->next;
1610 while (tmp != head) {
1611 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1613 tmp = tmp->next;
1615 if (!(td_status(td) & TD_CTRL_ACTIVE) &&
1616 (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td)) ||
1617 tmp == head))
1618 usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1619 uhci_packetout(td_token(td)),
1620 uhci_toggle(td_token(td)) ^ 1);
1621 else if ((td_status(td) & TD_CTRL_ACTIVE) && !prevactive)
1622 usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1623 uhci_packetout(td_token(td)),
1624 uhci_toggle(td_token(td)));
1626 prevactive = td_status(td) & TD_CTRL_ACTIVE;
1629 uhci_delete_queued_urb(uhci, urb);
1631 /* The interrupt loop will reclaim the QH's */
1632 uhci_remove_qh(uhci, urbp->qh);
1633 urbp->qh = NULL;
1636 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1638 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1639 unsigned long flags;
1640 struct urb_priv *urbp = urb->hcpriv;
1642 /* If this is an interrupt URB that is being killed in urb->complete, */
1643 /* then just set its status and return */
1644 if (!urbp) {
1645 urb->status = -ECONNRESET;
1646 return 0;
1649 spin_lock_irqsave(&uhci->urb_list_lock, flags);
1651 list_del_init(&urbp->urb_list);
1653 uhci_unlink_generic(uhci, urb);
1655 spin_lock(&uhci->urb_remove_list_lock);
1657 /* If we're the first, set the next interrupt bit */
1658 if (list_empty(&uhci->urb_remove_list))
1659 uhci_set_next_interrupt(uhci);
1660 list_add(&urbp->urb_list, &uhci->urb_remove_list);
1662 spin_unlock(&uhci->urb_remove_list_lock);
1663 spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1664 return 0;
1667 static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
1669 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1670 struct list_head *head, *tmp;
1671 int count = 0;
1673 uhci_dec_fsbr(uhci, urb);
1675 urbp->fsbr_timeout = 1;
1678 * Ideally we would want to fix qh->element as well, but it's
1679 * read/write by the HC, so that can introduce a race. It's not
1680 * really worth the hassle
1683 head = &urbp->td_list;
1684 tmp = head->next;
1685 while (tmp != head) {
1686 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1688 tmp = tmp->next;
1691 * Make sure we don't do the last one (since it'll have the
1692 * TERM bit set) as well as we skip every so many TD's to
1693 * make sure it doesn't hog the bandwidth
1695 if (tmp != head && (count % DEPTH_INTERVAL) == (DEPTH_INTERVAL - 1))
1696 td->link |= UHCI_PTR_DEPTH;
1698 count++;
1701 return 0;
1705 * uhci_get_current_frame_number()
1707 * returns the current frame number for a USB bus/controller.
1709 static int uhci_get_current_frame_number(struct uhci_hcd *uhci)
1711 return inw(uhci->io_addr + USBFRNUM);
1714 static int init_stall_timer(struct usb_hcd *hcd);
1716 static void stall_callback(unsigned long ptr)
1718 struct usb_hcd *hcd = (struct usb_hcd *)ptr;
1719 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1720 struct list_head list, *tmp, *head;
1721 unsigned long flags;
1723 INIT_LIST_HEAD(&list);
1725 spin_lock_irqsave(&uhci->urb_list_lock, flags);
1726 head = &uhci->urb_list;
1727 tmp = head->next;
1728 while (tmp != head) {
1729 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1730 struct urb *u = up->urb;
1732 tmp = tmp->next;
1734 spin_lock(&u->lock);
1736 /* Check if the FSBR timed out */
1737 if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
1738 uhci_fsbr_timeout(uhci, u);
1740 /* Check if the URB timed out */
1741 if (u->timeout && time_after_eq(jiffies, up->inserttime + u->timeout))
1742 list_move_tail(&up->urb_list, &list);
1744 spin_unlock(&u->lock);
1746 spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1748 head = &list;
1749 tmp = head->next;
1750 while (tmp != head) {
1751 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1752 struct urb *u = up->urb;
1754 tmp = tmp->next;
1756 uhci_urb_dequeue(hcd, u);
1759 /* Really disable FSBR */
1760 if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
1761 uhci->fsbrtimeout = 0;
1762 uhci->skel_term_qh->link = UHCI_PTR_TERM;
1765 /* Poll for and perform state transitions */
1766 hc_state_transitions(uhci);
1768 init_stall_timer(hcd);
1771 static int init_stall_timer(struct usb_hcd *hcd)
1773 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1775 init_timer(&uhci->stall_timer);
1776 uhci->stall_timer.function = stall_callback;
1777 uhci->stall_timer.data = (unsigned long)hcd;
1778 uhci->stall_timer.expires = jiffies + (HZ / 10);
1779 add_timer(&uhci->stall_timer);
1781 return 0;
1784 static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
1786 struct list_head *tmp, *head;
1787 unsigned long flags;
1789 spin_lock_irqsave(&uhci->qh_remove_list_lock, flags);
1790 head = &uhci->qh_remove_list;
1791 tmp = head->next;
1792 while (tmp != head) {
1793 struct uhci_qh *qh = list_entry(tmp, struct uhci_qh, remove_list);
1795 tmp = tmp->next;
1797 list_del_init(&qh->remove_list);
1799 uhci_free_qh(uhci, qh);
1801 spin_unlock_irqrestore(&uhci->qh_remove_list_lock, flags);
1804 static void uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs)
1806 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1807 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1808 int status;
1809 unsigned long flags;
1811 spin_lock_irqsave(&urb->lock, flags);
1812 status = urbp->status;
1813 uhci_destroy_urb_priv(uhci, urb);
1815 if (urb->status != -ENOENT && urb->status != -ECONNRESET)
1816 urb->status = status;
1817 spin_unlock_irqrestore(&urb->lock, flags);
1819 usb_hcd_giveback_urb(hcd, urb, regs);
1822 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs)
1824 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1825 struct list_head *tmp, *head;
1826 unsigned long flags;
1828 spin_lock_irqsave(&uhci->complete_list_lock, flags);
1829 head = &uhci->complete_list;
1830 tmp = head->next;
1831 while (tmp != head) {
1832 struct urb_priv *urbp = list_entry(tmp, struct urb_priv, complete_list);
1833 struct urb *urb = urbp->urb;
1835 list_del_init(&urbp->complete_list);
1836 spin_unlock_irqrestore(&uhci->complete_list_lock, flags);
1838 uhci_finish_urb(hcd, urb, regs);
1840 spin_lock_irqsave(&uhci->complete_list_lock, flags);
1841 head = &uhci->complete_list;
1842 tmp = head->next;
1844 spin_unlock_irqrestore(&uhci->complete_list_lock, flags);
1847 static void uhci_remove_pending_qhs(struct uhci_hcd *uhci)
1849 struct list_head *tmp, *head;
1850 unsigned long flags;
1852 spin_lock_irqsave(&uhci->urb_remove_list_lock, flags);
1853 head = &uhci->urb_remove_list;
1854 tmp = head->next;
1855 while (tmp != head) {
1856 struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
1857 struct urb *urb = urbp->urb;
1859 tmp = tmp->next;
1861 list_del_init(&urbp->urb_list);
1863 urbp->status = urb->status = -ECONNRESET;
1865 uhci_add_complete(uhci, urb);
1867 spin_unlock_irqrestore(&uhci->urb_remove_list_lock, flags);
1870 static void uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs)
1872 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1873 unsigned int io_addr = uhci->io_addr;
1874 unsigned short status;
1875 struct list_head *tmp, *head;
1878 * Read the interrupt status, and write it back to clear the
1879 * interrupt cause
1881 status = inw(io_addr + USBSTS);
1882 if (!status) /* shared interrupt, not mine */
1883 return;
1884 outw(status, io_addr + USBSTS); /* Clear it */
1886 if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
1887 if (status & USBSTS_HSE)
1888 err("%x: host system error, PCI problems?", io_addr);
1889 if (status & USBSTS_HCPE)
1890 err("%x: host controller process error. something bad happened", io_addr);
1891 if ((status & USBSTS_HCH) && uhci->state > 0) {
1892 err("%x: host controller halted. very bad", io_addr);
1893 /* FIXME: Reset the controller, fix the offending TD */
1897 if (status & USBSTS_RD)
1898 uhci->resume_detect = 1;
1900 uhci_free_pending_qhs(uhci);
1902 uhci_remove_pending_qhs(uhci);
1904 uhci_clear_next_interrupt(uhci);
1906 /* Walk the list of pending URB's to see which ones completed */
1907 spin_lock(&uhci->urb_list_lock);
1908 head = &uhci->urb_list;
1909 tmp = head->next;
1910 while (tmp != head) {
1911 struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
1912 struct urb *urb = urbp->urb;
1914 tmp = tmp->next;
1916 /* Checks the status and does all of the magic necessary */
1917 uhci_transfer_result(uhci, urb);
1919 spin_unlock(&uhci->urb_list_lock);
1921 uhci_finish_completion(hcd, regs);
1924 static void reset_hc(struct uhci_hcd *uhci)
1926 unsigned int io_addr = uhci->io_addr;
1928 /* Global reset for 50ms */
1929 uhci->state = UHCI_RESET;
1930 outw(USBCMD_GRESET, io_addr + USBCMD);
1931 set_current_state(TASK_UNINTERRUPTIBLE);
1932 schedule_timeout((HZ*50+999) / 1000);
1933 set_current_state(TASK_RUNNING);
1934 outw(0, io_addr + USBCMD);
1936 /* Another 10ms delay */
1937 set_current_state(TASK_UNINTERRUPTIBLE);
1938 schedule_timeout((HZ*10+999) / 1000);
1939 set_current_state(TASK_RUNNING);
1940 uhci->resume_detect = 0;
1943 static void suspend_hc(struct uhci_hcd *uhci)
1945 unsigned int io_addr = uhci->io_addr;
1947 dbg("%x: suspend_hc", io_addr);
1948 uhci->state = UHCI_SUSPENDED;
1949 uhci->resume_detect = 0;
1950 outw(USBCMD_EGSM, io_addr + USBCMD);
1953 static void wakeup_hc(struct uhci_hcd *uhci)
1955 unsigned int io_addr = uhci->io_addr;
1957 switch (uhci->state) {
1958 case UHCI_SUSPENDED: /* Start the resume */
1959 dbg("%x: wakeup_hc", io_addr);
1961 /* Global resume for >= 20ms */
1962 outw(USBCMD_FGR | USBCMD_EGSM, io_addr + USBCMD);
1963 uhci->state = UHCI_RESUMING_1;
1964 uhci->state_end = jiffies + (20*HZ+999) / 1000;
1965 break;
1967 case UHCI_RESUMING_1: /* End global resume */
1968 uhci->state = UHCI_RESUMING_2;
1969 outw(0, io_addr + USBCMD);
1970 /* Falls through */
1972 case UHCI_RESUMING_2: /* Wait for EOP to be sent */
1973 if (inw(io_addr + USBCMD) & USBCMD_FGR)
1974 break;
1976 /* Run for at least 1 second, and
1977 * mark it configured with a 64-byte max packet */
1978 uhci->state = UHCI_RUNNING_GRACE;
1979 uhci->state_end = jiffies + HZ;
1980 outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP,
1981 io_addr + USBCMD);
1982 break;
1984 case UHCI_RUNNING_GRACE: /* Now allowed to suspend */
1985 uhci->state = UHCI_RUNNING;
1986 break;
1988 default:
1989 break;
1993 static int ports_active(struct uhci_hcd *uhci)
1995 unsigned int io_addr = uhci->io_addr;
1996 int connection = 0;
1997 int i;
1999 for (i = 0; i < uhci->rh_numports; i++)
2000 connection |= (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_CCS);
2002 return connection;
2005 static int suspend_allowed(struct uhci_hcd *uhci)
2007 unsigned int io_addr = uhci->io_addr;
2008 int i;
2010 if (!uhci->hcd.pdev ||
2011 uhci->hcd.pdev->vendor != PCI_VENDOR_ID_INTEL ||
2012 uhci->hcd.pdev->device != PCI_DEVICE_ID_INTEL_82371AB_2)
2013 return 1;
2015 /* This is a 82371AB/EB/MB USB controller which has a bug that
2016 * causes false resume indications if any port has an
2017 * over current condition. To prevent problems, we will not
2018 * allow a global suspend if any ports are OC.
2020 * Some motherboards using the 82371AB/EB/MB (but not the USB portion)
2021 * appear to hardwire the over current inputs active to disable
2022 * the USB ports.
2025 /* check for over current condition on any port */
2026 for (i = 0; i < uhci->rh_numports; i++) {
2027 if (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_OC)
2028 return 0;
2031 return 1;
2034 static void hc_state_transitions(struct uhci_hcd *uhci)
2036 switch (uhci->state) {
2037 case UHCI_RUNNING:
2039 /* global suspend if nothing connected for 1 second */
2040 if (!ports_active(uhci) && suspend_allowed(uhci)) {
2041 uhci->state = UHCI_SUSPENDING_GRACE;
2042 uhci->state_end = jiffies + HZ;
2044 break;
2046 case UHCI_SUSPENDING_GRACE:
2047 if (ports_active(uhci))
2048 uhci->state = UHCI_RUNNING;
2049 else if (time_after_eq(jiffies, uhci->state_end))
2050 suspend_hc(uhci);
2051 break;
2053 case UHCI_SUSPENDED:
2055 /* wakeup if requested by a device */
2056 if (uhci->resume_detect)
2057 wakeup_hc(uhci);
2058 break;
2060 case UHCI_RESUMING_1:
2061 case UHCI_RESUMING_2:
2062 case UHCI_RUNNING_GRACE:
2063 if (time_after_eq(jiffies, uhci->state_end))
2064 wakeup_hc(uhci);
2065 break;
2067 default:
2068 break;
2072 static void start_hc(struct uhci_hcd *uhci)
2074 unsigned int io_addr = uhci->io_addr;
2075 int timeout = 1000;
2078 * Reset the HC - this will force us to get a
2079 * new notification of any already connected
2080 * ports due to the virtual disconnect that it
2081 * implies.
2083 outw(USBCMD_HCRESET, io_addr + USBCMD);
2084 while (inw(io_addr + USBCMD) & USBCMD_HCRESET) {
2085 if (!--timeout) {
2086 printk(KERN_ERR "uhci: USBCMD_HCRESET timed out!\n");
2087 break;
2091 /* Turn on all interrupts */
2092 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP,
2093 io_addr + USBINTR);
2095 /* Start at frame 0 */
2096 outw(0, io_addr + USBFRNUM);
2097 outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD);
2099 /* Run and mark it configured with a 64-byte max packet */
2100 uhci->state = UHCI_RUNNING_GRACE;
2101 uhci->state_end = jiffies + HZ;
2102 outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
2104 uhci->hcd.state = USB_STATE_READY;
2108 * De-allocate all resources..
2110 static void release_uhci(struct uhci_hcd *uhci)
2112 int i;
2114 for (i = 0; i < UHCI_NUM_SKELQH; i++)
2115 if (uhci->skelqh[i]) {
2116 uhci_free_qh(uhci, uhci->skelqh[i]);
2117 uhci->skelqh[i] = NULL;
2120 if (uhci->term_td) {
2121 uhci_free_td(uhci, uhci->term_td);
2122 uhci->term_td = NULL;
2125 if (uhci->qh_pool) {
2126 pci_pool_destroy(uhci->qh_pool);
2127 uhci->qh_pool = NULL;
2130 if (uhci->td_pool) {
2131 pci_pool_destroy(uhci->td_pool);
2132 uhci->td_pool = NULL;
2135 if (uhci->fl) {
2136 pci_free_consistent(uhci->hcd.pdev, sizeof(*uhci->fl), uhci->fl, uhci->fl->dma_handle);
2137 uhci->fl = NULL;
2140 #ifdef CONFIG_PROC_FS
2141 if (uhci->proc_entry) {
2142 remove_proc_entry(uhci->hcd.self.bus_name, uhci_proc_root);
2143 uhci->proc_entry = NULL;
2145 #endif
2149 * Allocate a frame list, and then setup the skeleton
2151 * The hardware doesn't really know any difference
2152 * in the queues, but the order does matter for the
2153 * protocols higher up. The order is:
2155 * - any isochronous events handled before any
2156 * of the queues. We don't do that here, because
2157 * we'll create the actual TD entries on demand.
2158 * - The first queue is the interrupt queue.
2159 * - The second queue is the control queue, split into low and high speed
2160 * - The third queue is bulk queue.
2161 * - The fourth queue is the bandwidth reclamation queue, which loops back
2162 * to the high speed control queue.
2164 static int __devinit uhci_start(struct usb_hcd *hcd)
2166 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2167 int retval = -EBUSY;
2168 int i, port;
2169 unsigned io_size;
2170 dma_addr_t dma_handle;
2171 struct usb_device *udev;
2172 #ifdef CONFIG_PROC_FS
2173 struct proc_dir_entry *ent;
2174 #endif
2176 uhci->io_addr = (unsigned long) hcd->regs;
2177 io_size = pci_resource_len(hcd->pdev, hcd->region);
2179 #ifdef CONFIG_PROC_FS
2180 ent = create_proc_entry(hcd->self.bus_name, S_IFREG|S_IRUGO|S_IWUSR, uhci_proc_root);
2181 if (!ent) {
2182 err("couldn't create uhci proc entry");
2183 retval = -ENOMEM;
2184 goto err_create_proc_entry;
2187 ent->data = uhci;
2188 ent->proc_fops = &uhci_proc_operations;
2189 ent->size = 0;
2190 uhci->proc_entry = ent;
2191 #endif
2193 /* Reset here so we don't get any interrupts from an old setup */
2194 /* or broken setup */
2195 reset_hc(uhci);
2197 uhci->fsbr = 0;
2198 uhci->fsbrtimeout = 0;
2200 spin_lock_init(&uhci->qh_remove_list_lock);
2201 INIT_LIST_HEAD(&uhci->qh_remove_list);
2203 spin_lock_init(&uhci->urb_remove_list_lock);
2204 INIT_LIST_HEAD(&uhci->urb_remove_list);
2206 spin_lock_init(&uhci->urb_list_lock);
2207 INIT_LIST_HEAD(&uhci->urb_list);
2209 spin_lock_init(&uhci->complete_list_lock);
2210 INIT_LIST_HEAD(&uhci->complete_list);
2212 spin_lock_init(&uhci->frame_list_lock);
2214 uhci->fl = pci_alloc_consistent(hcd->pdev, sizeof(*uhci->fl), &dma_handle);
2215 if (!uhci->fl) {
2216 err("unable to allocate consistent memory for frame list");
2217 goto err_alloc_fl;
2220 memset((void *)uhci->fl, 0, sizeof(*uhci->fl));
2222 uhci->fl->dma_handle = dma_handle;
2224 uhci->td_pool = pci_pool_create("uhci_td", hcd->pdev,
2225 sizeof(struct uhci_td), 16, 0);
2226 if (!uhci->td_pool) {
2227 err("unable to create td pci_pool");
2228 goto err_create_td_pool;
2231 uhci->qh_pool = pci_pool_create("uhci_qh", hcd->pdev,
2232 sizeof(struct uhci_qh), 16, 0);
2233 if (!uhci->qh_pool) {
2234 err("unable to create qh pci_pool");
2235 goto err_create_qh_pool;
2238 /* Initialize the root hub */
2240 /* UHCI specs says devices must have 2 ports, but goes on to say */
2241 /* they may have more but give no way to determine how many they */
2242 /* have. However, according to the UHCI spec, Bit 7 is always set */
2243 /* to 1. So we try to use this to our advantage */
2244 for (port = 0; port < (io_size - 0x10) / 2; port++) {
2245 unsigned int portstatus;
2247 portstatus = inw(uhci->io_addr + 0x10 + (port * 2));
2248 if (!(portstatus & 0x0080))
2249 break;
2251 if (debug)
2252 info("detected %d ports", port);
2254 /* This is experimental so anything less than 2 or greater than 8 is */
2255 /* something weird and we'll ignore it */
2256 if (port < 2 || port > 8) {
2257 info("port count misdetected? forcing to 2 ports");
2258 port = 2;
2261 uhci->rh_numports = port;
2263 hcd->self.root_hub = udev = usb_alloc_dev(NULL, &hcd->self);
2264 if (!udev) {
2265 err("unable to allocate root hub");
2266 goto err_alloc_root_hub;
2269 uhci->term_td = uhci_alloc_td(uhci, udev);
2270 if (!uhci->term_td) {
2271 err("unable to allocate terminating TD");
2272 goto err_alloc_term_td;
2275 for (i = 0; i < UHCI_NUM_SKELQH; i++) {
2276 uhci->skelqh[i] = uhci_alloc_qh(uhci, udev);
2277 if (!uhci->skelqh[i]) {
2278 err("unable to allocate QH %d", i);
2279 goto err_alloc_skelqh;
2284 * 8 Interrupt queues; link int2 to int1, int4 to int2, etc
2285 * then link int1 to control and control to bulk
2287 uhci->skel_int128_qh->link = cpu_to_le32(uhci->skel_int64_qh->dma_handle) | UHCI_PTR_QH;
2288 uhci->skel_int64_qh->link = cpu_to_le32(uhci->skel_int32_qh->dma_handle) | UHCI_PTR_QH;
2289 uhci->skel_int32_qh->link = cpu_to_le32(uhci->skel_int16_qh->dma_handle) | UHCI_PTR_QH;
2290 uhci->skel_int16_qh->link = cpu_to_le32(uhci->skel_int8_qh->dma_handle) | UHCI_PTR_QH;
2291 uhci->skel_int8_qh->link = cpu_to_le32(uhci->skel_int4_qh->dma_handle) | UHCI_PTR_QH;
2292 uhci->skel_int4_qh->link = cpu_to_le32(uhci->skel_int2_qh->dma_handle) | UHCI_PTR_QH;
2293 uhci->skel_int2_qh->link = cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH;
2294 uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH;
2296 uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_hs_control_qh->dma_handle) | UHCI_PTR_QH;
2297 uhci->skel_hs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH;
2298 uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH;
2300 /* This dummy TD is to work around a bug in Intel PIIX controllers */
2301 uhci_fill_td(uhci->term_td, 0, (UHCI_NULL_DATA_SIZE << 21) |
2302 (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
2303 uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle);
2305 uhci->skel_term_qh->link = UHCI_PTR_TERM;
2306 uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle);
2309 * Fill the frame list: make all entries point to
2310 * the proper interrupt queue.
2312 * This is probably silly, but it's a simple way to
2313 * scatter the interrupt queues in a way that gives
2314 * us a reasonable dynamic range for irq latencies.
2316 for (i = 0; i < UHCI_NUMFRAMES; i++) {
2317 int irq = 0;
2319 if (i & 1) {
2320 irq++;
2321 if (i & 2) {
2322 irq++;
2323 if (i & 4) {
2324 irq++;
2325 if (i & 8) {
2326 irq++;
2327 if (i & 16) {
2328 irq++;
2329 if (i & 32) {
2330 irq++;
2331 if (i & 64)
2332 irq++;
2340 /* Only place we don't use the frame list routines */
2341 uhci->fl->frame[i] = cpu_to_le32(uhci->skelqh[7 - irq]->dma_handle);
2344 start_hc(uhci);
2346 init_stall_timer(hcd);
2348 /* disable legacy emulation */
2349 pci_write_config_word(hcd->pdev, USBLEGSUP, USBLEGSUP_DEFAULT);
2351 usb_connect(udev);
2352 udev->speed = USB_SPEED_FULL;
2354 if (usb_register_root_hub(udev, &hcd->pdev->dev) != 0) {
2355 err("unable to start root hub");
2356 retval = -ENOMEM;
2357 goto err_start_root_hub;
2360 return 0;
2363 * error exits:
2365 err_start_root_hub:
2366 reset_hc(uhci);
2368 del_timer_sync(&uhci->stall_timer);
2370 err_alloc_skelqh:
2371 for (i = 0; i < UHCI_NUM_SKELQH; i++)
2372 if (uhci->skelqh[i]) {
2373 uhci_free_qh(uhci, uhci->skelqh[i]);
2374 uhci->skelqh[i] = NULL;
2377 uhci_free_td(uhci, uhci->term_td);
2378 uhci->term_td = NULL;
2380 err_alloc_term_td:
2381 usb_put_dev(udev);
2382 hcd->self.root_hub = NULL;
2384 err_alloc_root_hub:
2385 pci_pool_destroy(uhci->qh_pool);
2386 uhci->qh_pool = NULL;
2388 err_create_qh_pool:
2389 pci_pool_destroy(uhci->td_pool);
2390 uhci->td_pool = NULL;
2392 err_create_td_pool:
2393 pci_free_consistent(hcd->pdev, sizeof(*uhci->fl), uhci->fl, uhci->fl->dma_handle);
2394 uhci->fl = NULL;
2396 err_alloc_fl:
2397 #ifdef CONFIG_PROC_FS
2398 remove_proc_entry(hcd->self.bus_name, uhci_proc_root);
2399 uhci->proc_entry = NULL;
2401 err_create_proc_entry:
2402 #endif
2404 return retval;
2407 static void uhci_stop(struct usb_hcd *hcd)
2409 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2411 del_timer_sync(&uhci->stall_timer);
2414 * At this point, we're guaranteed that no new connects can be made
2415 * to this bus since there are no more parents
2417 uhci_free_pending_qhs(uhci);
2418 uhci_remove_pending_qhs(uhci);
2420 reset_hc(uhci);
2422 uhci_free_pending_qhs(uhci);
2424 release_uhci(uhci);
2427 #ifdef CONFIG_PM
2428 static int uhci_suspend(struct usb_hcd *hcd, u32 state)
2430 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2432 /* Don't try to suspend broken motherboards, reset instead */
2433 if (suspend_allowed(uhci))
2434 suspend_hc(uhci);
2435 else
2436 reset_hc(uhci);
2437 return 0;
2440 static int uhci_resume(struct usb_hcd *hcd)
2442 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2444 pci_set_master(uhci->hcd.pdev);
2446 if (uhci->state == UHCI_SUSPENDED)
2447 uhci->resume_detect = 1;
2448 else {
2449 reset_hc(uhci);
2450 start_hc(uhci);
2452 uhci->hcd.state = USB_STATE_READY;
2453 return 0;
2455 #endif
2457 static struct usb_hcd *uhci_hcd_alloc(void)
2459 struct uhci_hcd *uhci;
2461 uhci = (struct uhci_hcd *)kmalloc(sizeof(*uhci), GFP_KERNEL);
2462 if (!uhci)
2463 return NULL;
2465 memset(uhci, 0, sizeof(*uhci));
2466 return &uhci->hcd;
2469 static void uhci_hcd_free(struct usb_hcd *hcd)
2471 kfree(hcd_to_uhci(hcd));
2474 static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
2476 return uhci_get_current_frame_number(hcd_to_uhci(hcd));
2479 static const char hcd_name[] = "uhci-hcd";
2481 static const struct hc_driver uhci_driver = {
2482 .description = hcd_name,
2484 /* Generic hardware linkage */
2485 .irq = uhci_irq,
2486 .flags = HCD_USB11,
2488 /* Basic lifecycle operations */
2489 .start = uhci_start,
2490 #ifdef CONFIG_PM
2491 .suspend = uhci_suspend,
2492 .resume = uhci_resume,
2493 #endif
2494 .stop = uhci_stop,
2496 .hcd_alloc = uhci_hcd_alloc,
2497 .hcd_free = uhci_hcd_free,
2499 .urb_enqueue = uhci_urb_enqueue,
2500 .urb_dequeue = uhci_urb_dequeue,
2502 .get_frame_number = uhci_hcd_get_frame_number,
2504 .hub_status_data = uhci_hub_status_data,
2505 .hub_control = uhci_hub_control,
2508 static const struct pci_device_id __devinitdata uhci_pci_ids[] = { {
2510 /* handle any USB UHCI controller */
2511 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0x00),
2512 .class_mask = ~0,
2513 .driver_data = (unsigned long) &uhci_driver,
2515 /* no matter who makes it */
2516 .vendor = PCI_ANY_ID,
2517 .device = PCI_ANY_ID,
2518 .subvendor = PCI_ANY_ID,
2519 .subdevice = PCI_ANY_ID,
2521 }, { /* end: all zeroes */ }
2524 MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
2526 static struct pci_driver uhci_pci_driver = {
2527 .name = (char *)hcd_name,
2528 .id_table = uhci_pci_ids,
2530 .probe = usb_hcd_pci_probe,
2531 .remove = usb_hcd_pci_remove,
2533 #ifdef CONFIG_PM
2534 .suspend = usb_hcd_pci_suspend,
2535 .resume = usb_hcd_pci_resume,
2536 #endif /* PM */
2539 static int __init uhci_hcd_init(void)
2541 int retval = -ENOMEM;
2543 info(DRIVER_DESC " " DRIVER_VERSION);
2545 if (usb_disabled())
2546 return -ENODEV;
2548 if (debug) {
2549 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
2550 if (!errbuf)
2551 goto errbuf_failed;
2554 #ifdef CONFIG_PROC_FS
2555 uhci_proc_root = create_proc_entry("driver/uhci", S_IFDIR, 0);
2556 if (!uhci_proc_root)
2557 goto proc_failed;
2558 #endif
2560 uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
2561 sizeof(struct urb_priv), 0, 0, NULL, NULL);
2562 if (!uhci_up_cachep)
2563 goto up_failed;
2565 retval = pci_module_init(&uhci_pci_driver);
2566 if (retval)
2567 goto init_failed;
2569 return 0;
2571 init_failed:
2572 if (kmem_cache_destroy(uhci_up_cachep))
2573 printk(KERN_INFO "uhci: not all urb_priv's were freed\n");
2575 up_failed:
2577 #ifdef CONFIG_PROC_FS
2578 remove_proc_entry("driver/uhci", 0);
2580 proc_failed:
2581 #endif
2582 if (errbuf)
2583 kfree(errbuf);
2585 errbuf_failed:
2587 return retval;
2590 static void __exit uhci_hcd_cleanup(void)
2592 pci_unregister_driver(&uhci_pci_driver);
2594 if (kmem_cache_destroy(uhci_up_cachep))
2595 printk(KERN_INFO "uhci: not all urb_priv's were freed\n");
2597 #ifdef CONFIG_PROC_FS
2598 remove_proc_entry("driver/uhci", 0);
2599 #endif
2601 if (errbuf)
2602 kfree(errbuf);
2605 module_init(uhci_hcd_init);
2606 module_exit(uhci_hcd_cleanup);
2608 MODULE_AUTHOR(DRIVER_AUTHOR);
2609 MODULE_DESCRIPTION(DRIVER_DESC);
2610 MODULE_LICENSE("GPL");