New routers supported
[tomato.git] / release / src-rt / linux / linux-2.6 / drivers / usb / host / ehci-hcd.c
blob2240cd9cc4a105796bcbc2578303efa6331c4600
1 /*
2  * Copyright (c) 2000-2004 by David Brownell
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License as published by the
6  * Free Software Foundation; either version 2 of the License, or (at your
7  * option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  */
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/dmapool.h>
22 #include <linux/kernel.h>
23 #include <linux/delay.h>
24 #include <linux/ioport.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/errno.h>
29 #include <linux/init.h>
30 #include <linux/timer.h>
31 #include <linux/ktime.h>
32 #include <linux/list.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/usb.h>
36 #include <linux/moduleparam.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/debugfs.h>
40 #include "../core/hcd.h"
42 #include <asm/byteorder.h>
43 #include <asm/io.h>
44 #include <asm/irq.h>
45 #include <asm/system.h>
46 #include <asm/unaligned.h>
48 /*-------------------------------------------------------------------------*/
51  * EHCI hc_driver implementation ... experimental, incomplete.
52  * Based on the final 1.0 register interface specification.
53  *
54  * USB 2.0 shows up in upcoming www.pcmcia.org technology.
55  * First was PCMCIA, like ISA; then CardBus, which is PCI.
56  * Next comes "CardBay", using USB 2.0 signals.
57  *
58  * Contains additional contributions by Brad Hards, Rory Bolt, and others.
59  * Special thanks to Intel and VIA for providing host controllers to
60  * test this driver on, and Cypress (including In-System Design) for
61  * providing early devices for those host controllers to talk to!
62  */
64 #define DRIVER_AUTHOR "David Brownell"
65 #define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
67 static const char       hcd_name [] = "ehci_hcd";
70 #undef VERBOSE_DEBUG
71 #undef EHCI_URB_TRACE
73 #ifdef DEBUG
74 #define EHCI_STATS
75 #endif
77 /* magic numbers that can affect system performance */
78 #define EHCI_TUNE_CERR          3       /* 0-3 qtd retries; 0 == don't stop */
79 #define EHCI_TUNE_RL_HS         4       /* nak throttle; see 4.9 */
80 #define EHCI_TUNE_RL_TT         0
81 #define EHCI_TUNE_MULT_HS       1       /* 1-3 transactions/uframe; 4.10.3 */
82 #define EHCI_TUNE_MULT_TT       1
84  * Some drivers think it's safe to schedule isochronous transfers more than
85  * 256 ms into the future (partly as a result of an old bug in the scheduling
86  * code).  In an attempt to avoid trouble, we will use a minimum scheduling
87  * length of 512 frames instead of 256.
88  */
89 #define EHCI_TUNE_FLS           1       /* (medium) 512-frame schedule */
91 #define EHCI_IAA_MSECS          10              /* arbitrary */
92 #define EHCI_IO_JIFFIES         (HZ/10)         /* io watchdog > irq_thresh */
93 #define EHCI_ASYNC_JIFFIES      (HZ/20)         /* async idle timeout */
94 #define EHCI_SHRINK_FRAMES      5               /* async qh unlink delay */
96 /* Initial IRQ latency:  faster than hw default */
97 static int log2_irq_thresh = 0;         // 0 to 6
98 module_param (log2_irq_thresh, int, S_IRUGO);
99 MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
101 /* initial park setting:  slower than hw default */
102 static unsigned park = 0;
103 module_param (park, uint, S_IRUGO);
104 MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
106 /* for flakey hardware, ignore overcurrent indicators */
107 static int ignore_oc = 0;
108 module_param (ignore_oc, bool, S_IRUGO);
109 MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
111 #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
113 /*-------------------------------------------------------------------------*/
115 #include "ehci.h"
116 #include "ehci-dbg.c"
118 /*-------------------------------------------------------------------------*/
120 static void
121 timer_action(struct ehci_hcd *ehci, enum ehci_timer_action action)
123         /* Don't override timeouts which shrink or (later) disable
124          * the async ring; just the I/O watchdog.  Note that if a
125          * SHRINK were pending, OFF would never be requested.
126          */
127         if (timer_pending(&ehci->watchdog)
128                         && ((BIT(TIMER_ASYNC_SHRINK) | BIT(TIMER_ASYNC_OFF))
129                                 & ehci->actions))
130                 return;
132         if (!test_and_set_bit(action, &ehci->actions)) {
133                 unsigned long t;
135                 switch (action) {
136                 case TIMER_IO_WATCHDOG:
137                         if (!ehci->need_io_watchdog)
138                                 return;
139                         t = EHCI_IO_JIFFIES;
140                         break;
141                 case TIMER_ASYNC_OFF:
142                         t = EHCI_ASYNC_JIFFIES;
143                         break;
144                 /* case TIMER_ASYNC_SHRINK: */
145                 default:
146                         /* add a jiffie since we synch against the
147                          * 8 KHz uframe counter.
148                          */
149                         t = DIV_ROUND_UP(EHCI_SHRINK_FRAMES * HZ, 1000) + 1;
150                         break;
151                 }
152                 mod_timer(&ehci->watchdog, t + jiffies);
153         }
156 /*-------------------------------------------------------------------------*/
159  * handshake - spin reading hc until handshake completes or fails
160  * @ptr: address of hc register to be read
161  * @mask: bits to look at in result of read
162  * @done: value of those bits when handshake succeeds
163  * @usec: timeout in microseconds
165  * Returns negative errno, or zero on success
167  * Success happens when the "mask" bits have the specified value (hardware
168  * handshake done).  There are two failure modes:  "usec" have passed (major
169  * hardware flakeout), or the register reads as all-ones (hardware removed).
171  * That last failure should_only happen in cases like physical cardbus eject
172  * before driver shutdown. But it also seems to be caused by bugs in cardbus
173  * bridge shutdown:  shutting down the bridge before the devices using it.
174  */
175 static int handshake (struct ehci_hcd *ehci, void __iomem *ptr,
176                       u32 mask, u32 done, int usec)
178         u32     result;
180         do {
181                 result = ehci_readl(ehci, ptr);
182                 if (result == ~(u32)0)          /* card removed */
183                         return -ENODEV;
184                 result &= mask;
185                 if (result == done)
186                         return 0;
187                 udelay (1);
188                 usec--;
189         } while (usec > 0);
190         return -ETIMEDOUT;
193 /* force HC to halt state from unknown (EHCI spec section 2.3) */
194 static int ehci_halt (struct ehci_hcd *ehci)
196         u32     temp = ehci_readl(ehci, &ehci->regs->status);
198         /* disable any irqs left enabled by previous code */
199         ehci_writel(ehci, 0, &ehci->regs->intr_enable);
201         if ((temp & STS_HALT) != 0)
202                 return 0;
204         temp = ehci_readl(ehci, &ehci->regs->command);
205         temp &= ~CMD_RUN;
206         ehci_writel(ehci, temp, &ehci->regs->command);
207         return handshake (ehci, &ehci->regs->status,
208                           STS_HALT, STS_HALT, 16 * 125);
211 static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr,
212                                        u32 mask, u32 done, int usec)
214         int error;
216         error = handshake(ehci, ptr, mask, done, usec);
217         if (error) {
218                 ehci_halt(ehci);
219                 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
220                 ehci_err(ehci, "force halt; handshake %p %08x %08x -> %d\n",
221                         ptr, mask, done, error);
222         }
224         return error;
227 /* put TDI/ARC silicon into EHCI mode */
228 static void tdi_reset (struct ehci_hcd *ehci)
230         u32 __iomem     *reg_ptr;
231         u32             tmp;
233         reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE);
234         tmp = ehci_readl(ehci, reg_ptr);
235         tmp |= USBMODE_CM_HC;
236         /* The default byte access to MMR space is LE after
237          * controller reset. Set the required endian mode
238          * for transfer buffers to match the host microprocessor
239          */
240         if (ehci_big_endian_mmio(ehci))
241                 tmp |= USBMODE_BE;
242         ehci_writel(ehci, tmp, reg_ptr);
245 /* reset a non-running (STS_HALT == 1) controller */
246 static int ehci_reset (struct ehci_hcd *ehci)
248         int     retval;
249         u32     command = ehci_readl(ehci, &ehci->regs->command);
251         command |= CMD_RESET;
252         dbg_cmd (ehci, "reset", command);
253         ehci_writel(ehci, command, &ehci->regs->command);
254         ehci_to_hcd(ehci)->state = HC_STATE_HALT;
255         ehci->next_statechange = jiffies;
256         retval = handshake (ehci, &ehci->regs->command,
257                             CMD_RESET, 0, 250 * 1000);
259         if (retval)
260                 return retval;
262         if (ehci_is_TDI(ehci))
263                 tdi_reset (ehci);
265         return retval;
268 static int ehci_optimized(struct ehci_hcd *ehci, struct ehci_qh *qh)
270         int p;
271         for (p = 0; p < 3; ++p)
272                 if (ehci->ehci_pipes[p] == qh)
273                         return p;
274         return -1;
277 /* idle the controller (from running) */
278 static void ehci_quiesce (struct ehci_hcd *ehci)
280         u32     temp;
282 #ifdef DEBUG
283         if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
284                 BUG ();
285 #endif
287         /* wait for any schedule enables/disables to take effect */
288         temp = ehci_readl(ehci, &ehci->regs->command) << 10;
289         temp &= STS_ASS | STS_PSS;
290         if (handshake_on_error_set_halt(ehci, &ehci->regs->status,
291                                         STS_ASS | STS_PSS, temp, 16 * 125))
292                 return;
294         /* then disable anything that's still active */
295         temp = ehci_readl(ehci, &ehci->regs->command);
296         temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
297         ehci_writel(ehci, temp, &ehci->regs->command);
299         /* hardware can take 16 microframes to turn off ... */
300         handshake_on_error_set_halt(ehci, &ehci->regs->status,
301                                     STS_ASS | STS_PSS, 0, 16 * 125);
304 /*-------------------------------------------------------------------------*/
306 static void end_unlink_async(struct ehci_hcd *ehci);
307 static void ehci_work(struct ehci_hcd *ehci);
309 #include "ehci-hub.c"
310 #include "ehci-mem.c"
311 #include "ehci-q.c"
312 #include "ehci-sched.c"
314 /*-------------------------------------------------------------------------*/
316 static int qtdc_pid = 0;
317 module_param (qtdc_pid, int, S_IRUGO);
318 MODULE_PARM_DESC (qtdc_pid, "qtd cache device pid");
320 static int qtdc_vid = 0;
321 module_param (qtdc_vid, int, S_IRUGO);
322 MODULE_PARM_DESC (qtdc_vid, "qtd cache device vid");
324 static int qtdc0_ep = -1;       /* turn off qtd cache by default */
325 module_param (qtdc0_ep, int, S_IRUGO);
326 MODULE_PARM_DESC (qtdc0_ep, "qtd cache 0 endpoint");
328 static int qtdc0_sz = 0;        /* turn off qtd cache by default */
329 module_param (qtdc0_sz, int, S_IRUGO);
330 MODULE_PARM_DESC (qtdc0_sz, "qtd cache 0 size (# of qtd's)");
332 static int qtdc0_to = 1;
333 module_param (qtdc0_to, int, S_IRUGO);
334 MODULE_PARM_DESC (qtdc0_to, "qtd cache 0 timeout (ms)");
336 static int qtdc1_ep = -1;       /* turn off qtd cache by default */
337 module_param (qtdc1_ep, int, S_IRUGO);
338 MODULE_PARM_DESC (qtdc1_ep, "qtd cache 1 endpoint");
340 static int qtdc1_sz = 0;        /* turn off qtd cache by default */
341 module_param (qtdc1_sz, int, S_IRUGO);
342 MODULE_PARM_DESC (qtdc1_sz, "qtd cache 1 size (# of qtd's)");
344 static int qtdc1_to = 1;
345 module_param (qtdc1_to, int, S_IRUGO);
346 MODULE_PARM_DESC (qtdc1_to, "qtd cache 1 timeout (ms)");
348 static int qtdc0_ml = QTDC_MSG_ERR;
349 module_param (qtdc0_ml, int, S_IRUGO);
350 MODULE_PARM_DESC (qtdc0_ml, "qtd cache 0 msglevel");
352 static int qtdc1_ml = QTDC_MSG_ERR;
353 module_param (qtdc1_ml, int, S_IRUGO);
354 MODULE_PARM_DESC (qtdc1_ml, "qtd cache 1 msglevel");
356 #ifdef EHCI_QTD_CACHE
357 static void ehci_qtdc_watchdog (unsigned long param)
359         ehci_qtdc_t             *qtdc = (ehci_qtdc_t*) param;
360         struct ehci_hcd         *ehci = (struct ehci_hcd *)(qtdc->ehci);
361         unsigned long           flags, flags2;
362         struct ehci_qtd         *qtd;
363         struct urb              *urb = NULL;
364         //struct hcd_dev                *dev;
365         int                     epnum;
366         struct usb_host_endpoint        *ep;
367         struct list_head        *entry;
368         struct ehci_qh          *qh = 0;
370         spin_lock_irqsave (&ehci->lock, flags);
372         if (unlikely (qtdc->cnt <= 0))
373                 goto done;
375 #ifdef EHCI_QTDC_DEBUG
376         if (unlikely (list_empty(&qtdc->cache)))
377                 QTDC_ERR(qtdc, ("cnt %d but cache empty\n", qtdc->cnt));
378 #endif  /* EHCI_QTDC_DEBUG */
380         QTDC_TRACE(qtdc, ("watchdog release! cnt %d\n", qtdc->cnt));
381         list_for_each (entry, &qtdc->cache) {
382                 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
383                 urb = qtd->urb;
384                 spin_lock_irqsave (&urb->lock, flags2);
385                 urb->transfer_flags &= ~URB_QTD_CACHED;
386                 spin_unlock_irqrestore (&urb->lock, flags2);
387         }
388 //      dev = (struct hcd_dev *)urb->dev->hcpriv;
389         epnum = usb_pipeendpoint (urb->pipe);
390         if (usb_pipeout(urb->pipe)) {
391                 WARN_ON(usb_pipein(urb->pipe));
392                 ep = urb->dev->ep_out[epnum];
393         } else {
394                 WARN_ON(usb_pipeout(urb->pipe));
395                 ep = urb->dev->ep_in[epnum];
396         }
397         if (usb_pipein (urb->pipe) && !usb_pipecontrol (urb->pipe))
398                 epnum |= 0x10;
400 #ifdef EHCI_QTDC_DEBUG
401         qtdc->timeout_qtd += qtdc->cnt;
402         if (qtdc->cnt > qtdc->timeout_qtd_max)
403                 qtdc->timeout_qtd_max = qtdc->cnt;
404         qtdc->timeout_cnt++;
405         if ((jiffies - qtdc->last_printed) > (10 * HZ)) {
406                 QTDC_STATS(qtdc, ("cached_qtd %lu\nrelease_qtd %lu release_cnt %lu\n"
407                                 "timeout_qtd %lu  timeout_qtd_max %lu timeout_cnt %lu avg_timeout_qtd %lu\n",
408                                 qtdc->cached_qtd, qtdc->release_qtd, qtdc->release_cnt,
409                                 qtdc->timeout_qtd, qtdc->timeout_qtd_max, qtdc->timeout_cnt,
410                                 (qtdc->timeout_qtd / qtdc->timeout_cnt)));
411                 qtdc->last_printed = jiffies;
412         }
413 #endif  /* EHCI_QTDC_DEBUG */
414         qtdc->cnt = 0;
416         qh = qh_append_tds (ehci, urb, &qtdc->cache, epnum, &ep->hcpriv);
417         /* Control/bulk operations through TTs don't need scheduling,
418          * the HC and TT handle it when the TT has a buffer ready.
419          */
420         if (likely (qh != 0)) {
421                 if (likely (qh->qh_state == QH_STATE_IDLE))
422                         qh_link_async (ehci, qh_get (qh));
423         }
425         /* clean up qtd cache */
426         INIT_LIST_HEAD(&qtdc->cache);
428 done:
429         spin_unlock_irqrestore (&ehci->lock, flags);
430         return;
433 ehci_qtdc_t *ehci_qtdc_init(struct ehci_hcd *ehci, int vid, int pid, int num, int ep, int size, int timeout, unsigned int msglevel)
435         ehci_qtdc_t     *qtdc;
436         
437         if (pid == 0 || vid == 0) {
438                 ehci_err (ehci, "pid %x vid %x not valid\n", pid, vid);
439                 return NULL;
440         }
442         if (num >= NUM_QTD_CACHE) {
443                 ehci_err (ehci, "qtdc %d exceeding limit %d\n", num, NUM_QTD_CACHE);
444                 return NULL;
445         }
447         if ((ep < 0) || (ep > 0x1f)) {
448                 ehci_err (ehci, "qtdc %d disabled: invalid ep 0x%x\n", num, ep);
449                 return NULL;
450         }
452         if (size <= 0) {
453                 ehci_err (ehci, "qtdc %d disabled: invalid size %d\n", num, size);
454                 return NULL;
455         }
457         if (timeout <= 0) {
458                 ehci_err (ehci, "qtdc %d disabled: invalid timeout %d\n", num, timeout);
459                 return NULL;
460         }
462         qtdc = kmalloc(sizeof(ehci_qtdc_t), GFP_KERNEL);
463         if (!qtdc) {
464                 ehci_err (ehci, "qtdc %d disabled: alloc failed\n", num);
465                 return NULL;
466         }
468         memset(qtdc, 0, sizeof(ehci_qtdc_t));
470         ehci->qtdc_vid = vid;
471         ehci->qtdc_pid = pid;
472         qtdc->ehci = (void*)ehci;
473         qtdc->num = num;
474         qtdc->ep = ep;
475         qtdc->size = size;
476         qtdc->timeout = (timeout * HZ) / 1000;  /* in ms */
477 #ifdef EHCI_QTDC_DEBUG
478         qtdc->msglevel = msglevel;
479 #endif  /* EHCI_QTDC_DEBUG */
481         INIT_LIST_HEAD(&qtdc->cache); 
483         init_timer (&qtdc->watchdog);
484         qtdc->watchdog.function = ehci_qtdc_watchdog;
485         qtdc->watchdog.data = (unsigned long) qtdc;
487         ehci_info (ehci, "qtdc %d enabled: vid %x pid %x ep 0x%x size %d timeout %d\n",
488                 num, vid, pid, ep, size, timeout);
490         return qtdc;
493 void ehci_qtdc_deinit(ehci_qtdc_t* qtdc)
495         if (list_empty(&qtdc->cache))
496                 BUG();
497         kfree(qtdc);
499 #endif  /* EHCI_QTD_CACHE */
504 /*-------------------------------------------------------------------------*/
506 static void ehci_iaa_watchdog(unsigned long param)
508         struct ehci_hcd         *ehci = (struct ehci_hcd *) param;
509         unsigned long           flags;
511         spin_lock_irqsave (&ehci->lock, flags);
513         /* Lost IAA irqs wedge things badly; seen first with a vt8235.
514          * So we need this watchdog, but must protect it against both
515          * (a) SMP races against real IAA firing and retriggering, and
516          * (b) clean HC shutdown, when IAA watchdog was pending.
517          */
518         if (ehci->reclaim
519                         && !timer_pending(&ehci->iaa_watchdog)
520                         && HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
521                 u32 cmd, status;
523                 /* If we get here, IAA is *REALLY* late.  It's barely
524                  * conceivable that the system is so busy that CMD_IAAD
525                  * is still legitimately set, so let's be sure it's
526                  * clear before we read STS_IAA.  (The HC should clear
527                  * CMD_IAAD when it sets STS_IAA.)
528                  */
529                 cmd = ehci_readl(ehci, &ehci->regs->command);
530                 if (cmd & CMD_IAAD)
531                         ehci_writel(ehci, cmd & ~CMD_IAAD,
532                                         &ehci->regs->command);
534                 /* If IAA is set here it either legitimately triggered
535                  * before we cleared IAAD above (but _way_ late, so we'll
536                  * still count it as lost) ... or a silicon erratum:
537                  * - VIA seems to set IAA without triggering the IRQ;
538                  * - IAAD potentially cleared without setting IAA.
539                  */
540                 status = ehci_readl(ehci, &ehci->regs->status);
541                 if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
542                         COUNT (ehci->stats.lost_iaa);
543                         ehci_writel(ehci, STS_IAA, &ehci->regs->status);
544                 }
546                 ehci_vdbg(ehci, "IAA watchdog: status %x cmd %x\n",
547                                 status, cmd);
548                 end_unlink_async(ehci);
549         }
551         spin_unlock_irqrestore(&ehci->lock, flags);
554 static void ehci_watchdog(unsigned long param)
556         struct ehci_hcd         *ehci = (struct ehci_hcd *) param;
557         unsigned long           flags;
559         spin_lock_irqsave(&ehci->lock, flags);
561         /* stop async processing after it's idled a bit */
562         if (test_bit (TIMER_ASYNC_OFF, &ehci->actions))
563                 start_unlink_async (ehci, ehci->async);
565         /* ehci could run by timer, without IRQs ... */
566         ehci_work (ehci);
568         spin_unlock_irqrestore (&ehci->lock, flags);
571 /* On some systems, leaving remote wakeup enabled prevents system shutdown.
572  * The firmware seems to think that powering off is a wakeup event!
573  * This routine turns off remote wakeup and everything else, on all ports.
574  */
575 static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
577         int     port = HCS_N_PORTS(ehci->hcs_params);
579         while (port--)
580                 ehci_writel(ehci, PORT_RWC_BITS,
581                                 &ehci->regs->port_status[port]);
585  * Halt HC, turn off all ports, and let the BIOS use the companion controllers.
586  * Should be called with ehci->lock held.
587  */
588 static void ehci_silence_controller(struct ehci_hcd *ehci)
590         ehci_halt(ehci);
591         ehci_turn_off_all_ports(ehci);
593         /* make BIOS/etc use companion controller during reboot */
594         ehci_writel(ehci, 0, &ehci->regs->configured_flag);
596         /* unblock posted writes */
597         ehci_readl(ehci, &ehci->regs->configured_flag);
600 /* ehci_shutdown kick in for silicon on any bus (not just pci, etc).
601  * This forcibly disables dma and IRQs, helping kexec and other cases
602  * where the next system software may expect clean state.
603  */
604 static void ehci_shutdown(struct usb_hcd *hcd)
606         struct ehci_hcd *ehci = hcd_to_ehci(hcd);
608         del_timer_sync(&ehci->watchdog);
609         del_timer_sync(&ehci->iaa_watchdog);
611         spin_lock_irq(&ehci->lock);
612         ehci_silence_controller(ehci);
613         spin_unlock_irq(&ehci->lock);
616 static void ehci_port_power (struct ehci_hcd *ehci, int is_on)
618         unsigned port;
620         if (!HCS_PPC (ehci->hcs_params))
621                 return;
623         ehci_dbg (ehci, "...power%s ports...\n", is_on ? "up" : "down");
624         for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; )
625                 (void) ehci_hub_control(ehci_to_hcd(ehci),
626                                 is_on ? SetPortFeature : ClearPortFeature,
627                                 USB_PORT_FEAT_POWER,
628                                 port--, NULL, 0);
629         /* Flush those writes */
630         ehci_readl(ehci, &ehci->regs->command);
631         msleep(20);
634 /*-------------------------------------------------------------------------*/
637  * ehci_work is called from some interrupts, timers, and so on.
638  * it calls driver completion functions, after dropping ehci->lock.
639  */
640 static void ehci_work (struct ehci_hcd *ehci)
642         timer_action_done (ehci, TIMER_IO_WATCHDOG);
644         /* another CPU may drop ehci->lock during a schedule scan while
645          * it reports urb completions.  this flag guards against bogus
646          * attempts at re-entrant schedule scanning.
647          */
648         if (ehci->scanning)
649                 return;
650         ehci->scanning = 1;
651         scan_async (ehci);
652         if (ehci->next_uframe != -1)
653                 scan_periodic (ehci);
654         ehci->scanning = 0;
656         /* the IO watchdog guards against hardware or driver bugs that
657          * misplace IRQs, and should let us run completely without IRQs.
658          * such lossage has been observed on both VT6202 and VT8235.
659          */
660         if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) &&
661                         (ehci->async->qh_next.ptr != NULL ||
662                          ehci->periodic_sched != 0))
663                 timer_action (ehci, TIMER_IO_WATCHDOG);
667  * Called when the ehci_hcd module is removed.
668  */
669 static void ehci_stop (struct usb_hcd *hcd)
671         struct ehci_hcd         *ehci = hcd_to_ehci (hcd);
673         ehci_dbg (ehci, "stop\n");
675         /* no more interrupts ... */
676         del_timer_sync (&ehci->watchdog);
677         del_timer_sync(&ehci->iaa_watchdog);
678 #ifdef EHCI_QTD_CACHE
679         del_timer_sync (&ehci->qtdc_watchdog);
680         ehci_qtdc_deinit (ehci->qtdc[0]);
681         ehci_qtdc_deinit (ehci->qtdc[1]);
682 #endif /* EHCI_QTD_CACHE */
683         spin_lock_irq(&ehci->lock);
684         if (HC_IS_RUNNING (hcd->state))
685                 ehci_quiesce (ehci);
687         ehci_silence_controller(ehci);
688         ehci_reset (ehci);
689         spin_unlock_irq(&ehci->lock);
691         remove_companion_file(ehci);
692         remove_debug_files (ehci);
694         /* root hub is shut down separately (first, when possible) */
695         spin_lock_irq (&ehci->lock);
696         if (ehci->async)
697                 ehci_work (ehci);
698         spin_unlock_irq (&ehci->lock);
699         ehci_mem_cleanup (ehci);
701 #ifdef  EHCI_STATS
702         ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
703                 ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
704                 ehci->stats.lost_iaa);
705         ehci_dbg (ehci, "complete %ld unlink %ld\n",
706                 ehci->stats.complete, ehci->stats.unlink);
707 #endif
709         dbg_status (ehci, "ehci_stop completed",
710                     ehci_readl(ehci, &ehci->regs->status));
713 /* one-time init, only for memory state */
714 static int ehci_init(struct usb_hcd *hcd)
716         struct ehci_hcd         *ehci = hcd_to_ehci(hcd);
717         u32                     temp;
718         int                     retval;
719         u32                     hcc_params;
720         struct ehci_qh_hw       *hw;
722         ehci_info(ehci, "EHCI Fastpath: New EHCI driver starting\n");
724         spin_lock_init(&ehci->lock);
726         /*
727          * keep io watchdog by default, those good HCDs could turn off it later
728          */
729         ehci->need_io_watchdog = 1;
730         init_timer(&ehci->watchdog);
731         ehci->watchdog.function = ehci_watchdog;
732         ehci->watchdog.data = (unsigned long) ehci;
734 #ifdef EHCI_QTD_CACHE
735         ehci->qtdc[0] = ehci_qtdc_init(ehci, qtdc_vid, qtdc_pid, 0, qtdc0_ep, qtdc0_sz, qtdc0_to, qtdc0_ml);
736         ehci->qtdc[1] = ehci_qtdc_init(ehci, qtdc_vid, qtdc_pid, 1, qtdc1_ep, qtdc1_sz, qtdc1_to, qtdc1_ml);
737 #endif  /* EHCI_QTD_CACHE */
741         init_timer(&ehci->iaa_watchdog);
742         ehci->iaa_watchdog.function = ehci_iaa_watchdog;
743         ehci->iaa_watchdog.data = (unsigned long) ehci;
745         /*
746          * hw default: 1K periodic list heads, one per frame.
747          * periodic_size can shrink by USBCMD update if hcc_params allows.
748          */
749         ehci->periodic_size = DEFAULT_I_TDPS;
750         INIT_LIST_HEAD(&ehci->cached_itd_list);
751         INIT_LIST_HEAD(&ehci->cached_sitd_list);
752         if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
753                 return retval;
755         /* controllers may cache some of the periodic schedule ... */
756         hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
757         if (HCC_ISOC_CACHE(hcc_params))         // full frame cache
758                 ehci->i_thresh = 2 + 8;
759         else                                    // N microframes cached
760                 ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
762         ehci->reclaim = NULL;
763         ehci->next_uframe = -1;
764         ehci->clock_frame = -1;
766         /*
767          * dedicate a qh for the async ring head, since we couldn't unlink
768          * a 'real' qh without stopping the async schedule [4.8].  use it
769          * as the 'reclamation list head' too.
770          * its dummy is used in hw_alt_next of many tds, to prevent the qh
771          * from automatically advancing to the next td after short reads.
772          */
773         ehci->async->qh_next.qh = NULL;
774         hw = ehci->async->hw;
775         hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
776         hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
777         hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
778         hw->hw_qtd_next = EHCI_LIST_END(ehci);
779         ehci->async->qh_state = QH_STATE_LINKED;
780         hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma);
782         /* clear interrupt enables, set irq latency */
783         if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
784                 log2_irq_thresh = 0;
785         temp = 1 << (16 + log2_irq_thresh);
786         if (HCC_CANPARK(hcc_params)) {
787                 /* HW default park == 3, on hardware that supports it (like
788                  * NVidia and ALI silicon), maximizes throughput on the async
789                  * schedule by avoiding QH fetches between transfers.
790                  *
791                  * With fast usb storage devices and NForce2, "park" seems to
792                  * make problems:  throughput reduction (!), data errors...
793                  */
794                 if (park) {
795                         park = min(park, (unsigned) 3);
796                         temp |= CMD_PARK;
797                         temp |= park << 8;
798                 }
799                 ehci_dbg(ehci, "park %d\n", park);
800         }
801         if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
802                 /* periodic schedule size can be smaller than default */
803                 temp &= ~(3 << 2);
804                 temp |= (EHCI_TUNE_FLS << 2);
805                 switch (EHCI_TUNE_FLS) {
806                 case 0: ehci->periodic_size = 1024; break;
807                 case 1: ehci->periodic_size = 512; break;
808                 case 2: ehci->periodic_size = 256; break;
809                 default:        BUG();
810                 }
811         }
812         ehci->command = temp;
814         return 0;
817 /* start HC running; it's halted, ehci_init() has been run (once) */
818 static int ehci_run (struct usb_hcd *hcd)
820         struct ehci_hcd         *ehci = hcd_to_ehci (hcd);
821         int                     retval;
822         u32                     temp;
823         u32                     hcc_params;
825         hcd->uses_new_polling = 1;
826         hcd->poll_rh = 0;
828         /* EHCI spec section 4.1 */
829         if ((retval = ehci_reset(ehci)) != 0) {
830                 ehci_mem_cleanup(ehci);
831                 return retval;
832         }
833         ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
834         ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
836         /*
837          * hcc_params controls whether ehci->regs->segment must (!!!)
838          * be used; it constrains QH/ITD/SITD and QTD locations.
839          * pci_pool consistent memory always uses segment zero.
840          * streaming mappings for I/O buffers, like pci_map_single(),
841          * can return segments above 4GB, if the device allows.
842          *
843          * NOTE:  the dma mask is visible through dma_supported(), so
844          * drivers can pass this info along ... like NETIF_F_HIGHDMA,
845          * Scsi_Host.highmem_io, and so forth.  It's readonly to all
846          * host side drivers though.
847          */
848         hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
849         if (HCC_64BIT_ADDR(hcc_params)) {
850                 ehci_writel(ehci, 0, &ehci->regs->segment);
851 #if 0
852 // this is deeply broken on almost all architectures
853                 if (!dma_set_mask(hcd->self.controller, DMA_64BIT_MASK))
854                         ehci_info(ehci, "enabled 64bit DMA\n");
855 #endif
856         }
859         // Philips, Intel, and maybe others need CMD_RUN before the
860         // root hub will detect new devices (why?); NEC doesn't
861         ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
862         ehci->command |= CMD_RUN;
863         ehci_writel(ehci, ehci->command, &ehci->regs->command);
864         dbg_cmd (ehci, "init", ehci->command);
866         /*
867          * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
868          * are explicitly handed to companion controller(s), so no TT is
869          * involved with the root hub.  (Except where one is integrated,
870          * and there's no companion controller unless maybe for USB OTG.)
871          *
872          * Turning on the CF flag will transfer ownership of all ports
873          * from the companions to the EHCI controller.  If any of the
874          * companions are in the middle of a port reset at the time, it
875          * could cause trouble.  Write-locking ehci_cf_port_reset_rwsem
876          * guarantees that no resets are in progress.  After we set CF,
877          * a short delay lets the hardware catch up; new resets shouldn't
878          * be started before the port switching actions could complete.
879          */
880         down_write(&ehci_cf_port_reset_rwsem);
881         hcd->state = HC_STATE_RUNNING;
882         ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
883         ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
884         msleep(5);
885         up_write(&ehci_cf_port_reset_rwsem);
886         ehci->last_periodic_enable = ktime_get_real();
888         temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
889         ehci_info (ehci,
890                 "USB %x.%x started, EHCI %x.%02x%s\n",
891                 ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
892                 temp >> 8, temp & 0xff,
893                 ignore_oc ? ", overcurrent ignored" : "");
895         ehci_writel(ehci, INTR_MASK,
896                     &ehci->regs->intr_enable); /* Turn On Interrupts */
898         /* GRR this is run-once init(), being done every time the HC starts.
899          * So long as they're part of class devices, we can't do it init()
900          * since the class device isn't created that early.
901          */
902         create_debug_files(ehci);
903         create_companion_file(ehci);
905         return 0;
908 /*-------------------------------------------------------------------------*/
910 static irqreturn_t ehci_irq (struct usb_hcd *hcd)
912         struct ehci_hcd         *ehci = hcd_to_ehci (hcd);
913         u32                     status, masked_status, pcd_status = 0, cmd;
914         int                     bh;
916         spin_lock (&ehci->lock);
918         status = ehci_readl(ehci, &ehci->regs->status);
920         /* e.g. cardbus physical eject */
921         if (status == ~(u32) 0) {
922                 ehci_dbg (ehci, "device removed\n");
923                 goto dead;
924         }
926         masked_status = status & INTR_MASK;
927         if (!masked_status) {           /* irq sharing? */
928                 spin_unlock(&ehci->lock);
929                 return IRQ_NONE;
930         }
932         /* clear (just) interrupts */
933         ehci_writel(ehci, masked_status, &ehci->regs->status);
934         cmd = ehci_readl(ehci, &ehci->regs->command);
935         bh = 0;
937 #ifdef  VERBOSE_DEBUG
938         /* unrequested/ignored: Frame List Rollover */
939         dbg_status (ehci, "irq", status);
940 #endif
942         /* INT, ERR, and IAA interrupt rates can be throttled */
944         /* normal [4.15.1.2] or error [4.15.1.1] completion */
945         if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
946                 if (likely ((status & STS_ERR) == 0))
947                         COUNT (ehci->stats.normal);
948                 else
949                         COUNT (ehci->stats.error);
950                 bh = 1;
951         }
953         /* complete the unlinking of some qh [4.15.2.3] */
954         if (status & STS_IAA) {
955                 /* guard against (alleged) silicon errata */
956                 if (cmd & CMD_IAAD) {
957                         ehci_writel(ehci, cmd & ~CMD_IAAD,
958                                         &ehci->regs->command);
959                         ehci_dbg(ehci, "IAA with IAAD still set?\n");
960                 }
961                 if (ehci->reclaim) {
962                         COUNT(ehci->stats.reclaim);
963                         end_unlink_async(ehci);
964                 } else
965                         ehci_dbg(ehci, "IAA with nothing to reclaim?\n");
966         }
968         /* remote wakeup [4.3.1] */
969         if (status & STS_PCD) {
970                 unsigned        i = HCS_N_PORTS (ehci->hcs_params);
972                 /* kick root hub later */
973                 pcd_status = status;
975                 /* resume root hub? */
976                 if (!(cmd & CMD_RUN))
977                         usb_hcd_resume_root_hub(hcd);
979                 while (i--) {
980                         int pstatus = ehci_readl(ehci,
981                                                  &ehci->regs->port_status [i]);
983                         if (pstatus & PORT_OWNER)
984                                 continue;
985                         if (!(test_bit(i, &ehci->suspended_ports) &&
986                                         ((pstatus & PORT_RESUME) ||
987                                                 !(pstatus & PORT_SUSPEND)) &&
988                                         (pstatus & PORT_PE) &&
989                                         ehci->reset_done[i] == 0))
990                                 continue;
992                         /* start 20 msec resume signaling from this port,
993                          * and make khubd collect PORT_STAT_C_SUSPEND to
994                          * stop that signaling.  Use 5 ms extra for safety,
995                          * like usb_port_resume() does.
996                          */
997                         ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
998                         ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
999                         mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
1000                 }
1001         }
1003         /* PCI errors [4.15.2.4] */
1004         if (unlikely ((status & STS_FATAL) != 0)) {
1005                 ehci_err(ehci, "fatal error\n");
1006                 dbg_cmd(ehci, "fatal", cmd);
1007                 dbg_status(ehci, "fatal", status);
1008                 ehci_halt(ehci);
1009 dead:
1010                 ehci_reset(ehci);
1011                 ehci_writel(ehci, 0, &ehci->regs->configured_flag);
1012                 /* generic layer kills/unlinks all urbs, then
1013                  * uses ehci_stop to clean up the rest
1014                  */
1015                 bh = 1;
1016         }
1018         if (bh)
1019                 ehci_work (ehci);
1020         spin_unlock (&ehci->lock);
1021         if (pcd_status)
1022                 usb_hcd_poll_rh_status(hcd);
1023         return IRQ_HANDLED;
1026 /*-------------------------------------------------------------------------*/
1029  * non-error returns are a promise to giveback() the urb later
1030  * we drop ownership so next owner (or urb unlink) can get it
1032  * urb + dev is in hcd.self.controller.urb_list
1033  * we're queueing TDs onto software and hardware lists
1035  * hcd-specific init for hcpriv hasn't been done yet
1037  * NOTE:  control, bulk, and interrupt share the same code to append TDs
1038  * to a (possibly active) QH, and the same QH scanning code.
1039  */
1040 static int ehci_urb_enqueue (
1041         struct usb_hcd  *hcd,
1042         struct urb      *urb,
1043         gfp_t           mem_flags
1044 ) {
1045         struct ehci_hcd         *ehci = hcd_to_ehci (hcd);
1046         struct list_head        qtd_list;
1048         INIT_LIST_HEAD (&qtd_list);
1050         switch (usb_pipetype (urb->pipe)) {
1051         case PIPE_CONTROL:
1052                 /* qh_completions() code doesn't handle all the fault cases
1053                  * in multi-TD control transfers.  Even 1KB is rare anyway.
1054                  */
1055                 if (urb->transfer_buffer_length > (16 * 1024))
1056                         return -EMSGSIZE;
1057                 /* FALLTHROUGH */
1058         /* case PIPE_BULK: */
1059         default:
1060                 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
1061                         return -ENOMEM;
1062                 return submit_async(ehci, urb, &qtd_list, mem_flags);
1064         case PIPE_INTERRUPT:
1065                 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
1066                         return -ENOMEM;
1067                 return intr_submit(ehci, urb, &qtd_list, mem_flags);
1069         case PIPE_ISOCHRONOUS:
1070                 if (urb->dev->speed == USB_SPEED_HIGH)
1071                         return itd_submit (ehci, urb, mem_flags);
1072                 else
1073                         return sitd_submit (ehci, urb, mem_flags);
1074         }
1077 static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
1079         /* failfast */
1080         if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state) && ehci->reclaim)
1081                 end_unlink_async(ehci);
1083         /* If the QH isn't linked then there's nothing we can do
1084          * unless we were called during a giveback, in which case
1085          * qh_completions() has to deal with it.
1086          */
1087         if (qh->qh_state != QH_STATE_LINKED) {
1088                 if (qh->qh_state == QH_STATE_COMPLETING)
1089                         qh->needs_rescan = 1;
1090                 return;
1091         }
1093         /* defer till later if busy */
1094         if (ehci->reclaim) {
1095                 struct ehci_qh          *last;
1097                 for (last = ehci->reclaim;
1098                                 last->reclaim;
1099                                 last = last->reclaim)
1100                         continue;
1101                 qh->qh_state = QH_STATE_UNLINK_WAIT;
1102                 last->reclaim = qh;
1104         /* start IAA cycle */
1105         } else
1106                 start_unlink_async (ehci, qh);
1109 /* remove from hardware lists
1110  * completions normally happen asynchronously
1111  */
1113 static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1115         struct ehci_hcd         *ehci = hcd_to_ehci (hcd);
1116         struct ehci_qh          *qh;
1117         unsigned long           flags;
1118         int                     rc;
1120         spin_lock_irqsave (&ehci->lock, flags);
1121         rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1122         if (rc)
1123                 goto done;
1125         switch (usb_pipetype (urb->pipe)) {
1126         // case PIPE_CONTROL:
1127         // case PIPE_BULK:
1128         default:
1129 #ifdef EHCI_QTD_CACHE
1130                 if (urb->transfer_flags & URB_QTD_CACHED) {
1131                         if (ehci_qtdc_unlink(ehci, urb, NULL))
1132                                 err ("%s: can't dequeue urb %p from qtdc", __FUNCTION__, urb);
1133                         break;
1134                 }
1135 #endif /* EHCI_QTD_CACHE */
1137                 qh = (struct ehci_qh *) urb->hcpriv;
1138                 if (!qh)
1139                         break;
1140                 switch (qh->qh_state) {
1141                 case QH_STATE_LINKED:
1142                 case QH_STATE_COMPLETING:
1143                         unlink_async(ehci, qh);
1144                         break;
1145                 case QH_STATE_UNLINK:
1146                 case QH_STATE_UNLINK_WAIT:
1147                         /* already started */
1148                         break;
1149                 case QH_STATE_IDLE:
1150                         WARN_ON(1);
1151                         break;
1152                 }
1153                 break;
1155         case PIPE_INTERRUPT:
1156                 qh = (struct ehci_qh *) urb->hcpriv;
1157                 if (!qh)
1158                         break;
1159                 switch (qh->qh_state) {
1160                 case QH_STATE_LINKED:
1161                 case QH_STATE_COMPLETING:
1162                         intr_deschedule (ehci, qh);
1163                         break;
1164                 case QH_STATE_IDLE:
1165                         qh_completions (ehci, qh);
1166                         break;
1167                 default:
1168                         ehci_dbg (ehci, "bogus qh %p state %d\n",
1169                                         qh, qh->qh_state);
1170                         goto done;
1171                 }
1172                 break;
1174         case PIPE_ISOCHRONOUS:
1175                 // itd or sitd ...
1177                 // wait till next completion, do it then.
1178                 // completion irqs can wait up to 1024 msec,
1179                 break;
1180         }
1181 done:
1182         spin_unlock_irqrestore (&ehci->lock, flags);
1183         return rc;
1186 /*-------------------------------------------------------------------------*/
1188 // bulk qh holds the data toggle
1190 static void
1191 ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
1193         struct ehci_hcd         *ehci = hcd_to_ehci (hcd);
1194         unsigned long           flags;
1195         struct ehci_qh          *qh, *tmp;
1197         /* ASSERT:  any requests/urbs are being unlinked */
1198         /* ASSERT:  nobody can be submitting urbs for this any more */
1200 rescan:
1201         spin_lock_irqsave (&ehci->lock, flags);
1202         qh = ep->hcpriv;
1203         if (!qh)
1204                 goto done;
1206         /* endpoints can be iso streams.  for now, we don't
1207          * accelerate iso completions ... so spin a while.
1208          */
1209         if (qh->hw == NULL) {
1210                 ehci_vdbg (ehci, "iso delay\n");
1211                 goto idle_timeout;
1212         }
1214         if (!HC_IS_RUNNING (hcd->state))
1215                 qh->qh_state = QH_STATE_IDLE;
1216         switch (qh->qh_state) {
1217         case QH_STATE_LINKED:
1218         case QH_STATE_COMPLETING:
1219                 for (tmp = ehci->async->qh_next.qh;
1220                                 tmp && tmp != qh;
1221                                 tmp = tmp->qh_next.qh)
1222                         continue;
1223                 /* periodic qh self-unlinks on empty, and a COMPLETING qh
1224                  * may already be unlinked.
1225                  */
1226                 if (tmp)
1227                         unlink_async(ehci, qh);
1228                 /* FALL THROUGH */
1229         case QH_STATE_UNLINK:           /* wait for hw to finish? */
1230         case QH_STATE_UNLINK_WAIT:
1231 idle_timeout:
1232                 spin_unlock_irqrestore (&ehci->lock, flags);
1233                 schedule_timeout_uninterruptible(1);
1234                 goto rescan;
1235         case QH_STATE_IDLE:             /* fully unlinked */
1236                 if (list_empty (&qh->qtd_list)) {
1237                         qh_put (qh);
1238                         break;
1239                 }
1240                 /* else FALL THROUGH */
1241         default:
1242                 /* caller was supposed to have unlinked any requests;
1243                  * that's not our job.  just leak this memory.
1244                  */
1245                 ehci_err (ehci, "qh %p (#%02x) state %d%s\n",
1246                         qh, ep->desc.bEndpointAddress, qh->qh_state,
1247                         list_empty (&qh->qtd_list) ? "" : "(has tds)");
1248                 break;
1249         }
1250         ep->hcpriv = NULL;
1251 done:
1252         spin_unlock_irqrestore (&ehci->lock, flags);
1253         return;
1256 static int ehci_get_frame (struct usb_hcd *hcd)
1258         struct ehci_hcd         *ehci = hcd_to_ehci (hcd);
1259         return (ehci_readl(ehci, &ehci->regs->frame_index) >> 3) %
1260                 ehci->periodic_size;
1263 /*-------------------------------------------------------------------------*/
1265 MODULE_DESCRIPTION(DRIVER_DESC);
1266 MODULE_AUTHOR (DRIVER_AUTHOR);
1267 MODULE_LICENSE ("GPL");
1269 #ifdef CONFIG_PCI
1270 #include "ehci-pci.c"
1271 #define PCI_DRIVER              ehci_pci_driver
1272 #endif
1274 #ifdef CONFIG_USB_EHCI_FSL
1275 #include "ehci-fsl.c"
1276 #define PLATFORM_DRIVER         ehci_fsl_driver
1277 #endif
1279 #ifdef CONFIG_SOC_AU1200
1280 #include "ehci-au1xxx.c"
1281 #define PLATFORM_DRIVER         ehci_hcd_au1xxx_driver
1282 #endif
1284 #ifdef CONFIG_PPC_PS3
1285 #include "ehci-ps3.c"
1286 #define PS3_SYSTEM_BUS_DRIVER   ps3_ehci_driver
1287 #endif
1289 #ifdef CONFIG_440EPX
1290 #include "ehci-ppc-soc.c"
1291 #define PLATFORM_DRIVER         ehci_ppc_soc_driver
1292 #endif
1294 #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
1295     !defined(PS3_SYSTEM_BUS_DRIVER)
1296 #error "missing bus glue for ehci-hcd"
1297 #endif
1299 static int __init ehci_hcd_init(void)
1301         int retval = 0;
1303         if (usb_disabled())
1304                 return -ENODEV;
1306         printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
1307         set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
1308         if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
1309                         test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
1310                 printk(KERN_WARNING "Warning! ehci_hcd should always be loaded"
1311                                 " before uhci_hcd and ohci_hcd, not after\n");
1313         pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n",
1314                  hcd_name,
1315                  sizeof(struct ehci_qh), sizeof(struct ehci_qtd),
1316                  sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
1318 #ifdef DEBUG
1319         ehci_debug_root = debugfs_create_dir("ehci", NULL);
1320         if (!ehci_debug_root) {
1321                 retval = -ENOENT;
1322                 goto err_debug;
1323         }
1324 #endif
1326 #ifdef PLATFORM_DRIVER
1327         retval = platform_driver_register(&PLATFORM_DRIVER);
1328         if (retval < 0) {
1329 #ifdef DEBUG
1330                 debugfs_remove(ehci_debug_root);
1331                 ehci_debug_root = NULL;
1332 #endif
1333                 return retval;
1334         }
1335 #endif
1337 #ifdef PCI_DRIVER
1338         retval = pci_register_driver(&PCI_DRIVER);
1339         if (retval < 0) {
1340 #ifdef DEBUG
1341                 debugfs_remove(ehci_debug_root);
1342                 ehci_debug_root = NULL;
1343 #endif
1344 #ifdef PLATFORM_DRIVER
1345                 platform_driver_unregister(&PLATFORM_DRIVER);
1346 #endif
1347                 return retval;
1348         }
1349 #endif
1351 #ifdef PS3_SYSTEM_BUS_DRIVER
1352         retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
1353         if (retval < 0) {
1354 #ifdef DEBUG
1355                 debugfs_remove(ehci_debug_root);
1356                 ehci_debug_root = NULL;
1357 #endif
1358 #ifdef PLATFORM_DRIVER
1359                 platform_driver_unregister(&PLATFORM_DRIVER);
1360 #endif
1361 #ifdef PCI_DRIVER
1362                 pci_unregister_driver(&PCI_DRIVER);
1363 #endif
1364 err_debug:
1365         clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
1366                 return retval;
1367         }
1368 #endif
1370         return retval;
1372 module_init(ehci_hcd_init);
1374 static void __exit ehci_hcd_cleanup(void)
1376 #ifdef PLATFORM_DRIVER
1377         platform_driver_unregister(&PLATFORM_DRIVER);
1378 #endif
1379 #ifdef PCI_DRIVER
1380         pci_unregister_driver(&PCI_DRIVER);
1381 #endif
1382 #ifdef PS3_SYSTEM_BUS_DRIVER
1383         ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
1384 #endif
1385 #ifdef DEBUG
1386         debugfs_remove(ehci_debug_root);
1387 #endif
1388         clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
1390 module_exit(ehci_hcd_cleanup);