2 * Copyright (c) 2000-2004 by David Brownell
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/dmapool.h>
22 #include <linux/kernel.h>
23 #include <linux/delay.h>
24 #include <linux/ioport.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/errno.h>
29 #include <linux/init.h>
30 #include <linux/timer.h>
31 #include <linux/ktime.h>
32 #include <linux/list.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/usb.h>
36 #include <linux/moduleparam.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/debugfs.h>
40 #include "../core/hcd.h"
42 #include <asm/byteorder.h>
45 #include <asm/system.h>
46 #include <asm/unaligned.h>
48 /*-------------------------------------------------------------------------*/
51 * EHCI hc_driver implementation ... experimental, incomplete.
52 * Based on the final 1.0 register interface specification.
54 * USB 2.0 shows up in upcoming www.pcmcia.org technology.
55 * First was PCMCIA, like ISA; then CardBus, which is PCI.
56 * Next comes "CardBay", using USB 2.0 signals.
58 * Contains additional contributions by Brad Hards, Rory Bolt, and others.
59 * Special thanks to Intel and VIA for providing host controllers to
60 * test this driver on, and Cypress (including In-System Design) for
61 * providing early devices for those host controllers to talk to!
64 #define DRIVER_AUTHOR "David Brownell"
65 #define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
67 static const char hcd_name
[] = "ehci_hcd";
77 /* magic numbers that can affect system performance */
78 #define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
79 #define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
80 #define EHCI_TUNE_RL_TT 0
81 #define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
82 #define EHCI_TUNE_MULT_TT 1
84 * Some drivers think it's safe to schedule isochronous transfers more than
85 * 256 ms into the future (partly as a result of an old bug in the scheduling
86 * code). In an attempt to avoid trouble, we will use a minimum scheduling
87 * length of 512 frames instead of 256.
89 #define EHCI_TUNE_FLS 1 /* (medium) 512-frame schedule */
91 #define EHCI_IAA_MSECS 10 /* arbitrary */
92 #define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */
93 #define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */
94 #define EHCI_SHRINK_FRAMES 5 /* async qh unlink delay */
96 /* Initial IRQ latency: faster than hw default */
97 static int log2_irq_thresh
= 0; // 0 to 6
98 module_param (log2_irq_thresh
, int, S_IRUGO
);
99 MODULE_PARM_DESC (log2_irq_thresh
, "log2 IRQ latency, 1-64 microframes");
101 /* initial park setting: slower than hw default */
102 static unsigned park
= 0;
103 module_param (park
, uint
, S_IRUGO
);
104 MODULE_PARM_DESC (park
, "park setting; 1-3 back-to-back async packets");
106 /* for flakey hardware, ignore overcurrent indicators */
107 static int ignore_oc
= 0;
108 module_param (ignore_oc
, bool, S_IRUGO
);
109 MODULE_PARM_DESC (ignore_oc
, "ignore bogus hardware overcurrent indications");
111 #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
113 /*-------------------------------------------------------------------------*/
116 #include "ehci-dbg.c"
118 /*-------------------------------------------------------------------------*/
121 timer_action(struct ehci_hcd
*ehci
, enum ehci_timer_action action
)
123 /* Don't override timeouts which shrink or (later) disable
124 * the async ring; just the I/O watchdog. Note that if a
125 * SHRINK were pending, OFF would never be requested.
127 if (timer_pending(&ehci
->watchdog
)
128 && ((BIT(TIMER_ASYNC_SHRINK
) | BIT(TIMER_ASYNC_OFF
))
132 if (!test_and_set_bit(action
, &ehci
->actions
)) {
136 case TIMER_IO_WATCHDOG
:
137 if (!ehci
->need_io_watchdog
)
141 case TIMER_ASYNC_OFF
:
142 t
= EHCI_ASYNC_JIFFIES
;
144 /* case TIMER_ASYNC_SHRINK: */
146 /* add a jiffie since we synch against the
147 * 8 KHz uframe counter.
149 t
= DIV_ROUND_UP(EHCI_SHRINK_FRAMES
* HZ
, 1000) + 1;
152 mod_timer(&ehci
->watchdog
, t
+ jiffies
);
156 /*-------------------------------------------------------------------------*/
159 * handshake - spin reading hc until handshake completes or fails
160 * @ptr: address of hc register to be read
161 * @mask: bits to look at in result of read
162 * @done: value of those bits when handshake succeeds
163 * @usec: timeout in microseconds
165 * Returns negative errno, or zero on success
167 * Success happens when the "mask" bits have the specified value (hardware
168 * handshake done). There are two failure modes: "usec" have passed (major
169 * hardware flakeout), or the register reads as all-ones (hardware removed).
171 * That last failure should_only happen in cases like physical cardbus eject
172 * before driver shutdown. But it also seems to be caused by bugs in cardbus
173 * bridge shutdown: shutting down the bridge before the devices using it.
175 static int handshake (struct ehci_hcd
*ehci
, void __iomem
*ptr
,
176 u32 mask
, u32 done
, int usec
)
181 result
= ehci_readl(ehci
, ptr
);
182 if (result
== ~(u32
)0) /* card removed */
193 /* force HC to halt state from unknown (EHCI spec section 2.3) */
194 static int ehci_halt (struct ehci_hcd
*ehci
)
196 u32 temp
= ehci_readl(ehci
, &ehci
->regs
->status
);
198 /* disable any irqs left enabled by previous code */
199 ehci_writel(ehci
, 0, &ehci
->regs
->intr_enable
);
201 if ((temp
& STS_HALT
) != 0)
204 temp
= ehci_readl(ehci
, &ehci
->regs
->command
);
206 ehci_writel(ehci
, temp
, &ehci
->regs
->command
);
207 return handshake (ehci
, &ehci
->regs
->status
,
208 STS_HALT
, STS_HALT
, 16 * 125);
211 static int handshake_on_error_set_halt(struct ehci_hcd
*ehci
, void __iomem
*ptr
,
212 u32 mask
, u32 done
, int usec
)
216 error
= handshake(ehci
, ptr
, mask
, done
, usec
);
219 ehci_to_hcd(ehci
)->state
= HC_STATE_HALT
;
220 ehci_err(ehci
, "force halt; handshake %p %08x %08x -> %d\n",
221 ptr
, mask
, done
, error
);
227 /* put TDI/ARC silicon into EHCI mode */
228 static void tdi_reset (struct ehci_hcd
*ehci
)
230 u32 __iomem
*reg_ptr
;
233 reg_ptr
= (u32 __iomem
*)(((u8 __iomem
*)ehci
->regs
) + USBMODE
);
234 tmp
= ehci_readl(ehci
, reg_ptr
);
235 tmp
|= USBMODE_CM_HC
;
236 /* The default byte access to MMR space is LE after
237 * controller reset. Set the required endian mode
238 * for transfer buffers to match the host microprocessor
240 if (ehci_big_endian_mmio(ehci
))
242 ehci_writel(ehci
, tmp
, reg_ptr
);
245 /* reset a non-running (STS_HALT == 1) controller */
246 static int ehci_reset (struct ehci_hcd
*ehci
)
249 u32 command
= ehci_readl(ehci
, &ehci
->regs
->command
);
251 command
|= CMD_RESET
;
252 dbg_cmd (ehci
, "reset", command
);
253 ehci_writel(ehci
, command
, &ehci
->regs
->command
);
254 ehci_to_hcd(ehci
)->state
= HC_STATE_HALT
;
255 ehci
->next_statechange
= jiffies
;
256 retval
= handshake (ehci
, &ehci
->regs
->command
,
257 CMD_RESET
, 0, 250 * 1000);
262 if (ehci_is_TDI(ehci
))
268 static int ehci_optimized(struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
271 for (p
= 0; p
< 3; ++p
)
272 if (ehci
->ehci_pipes
[p
] == qh
)
277 /* idle the controller (from running) */
278 static void ehci_quiesce (struct ehci_hcd
*ehci
)
283 if (!HC_IS_RUNNING (ehci_to_hcd(ehci
)->state
))
287 /* wait for any schedule enables/disables to take effect */
288 temp
= ehci_readl(ehci
, &ehci
->regs
->command
) << 10;
289 temp
&= STS_ASS
| STS_PSS
;
290 if (handshake_on_error_set_halt(ehci
, &ehci
->regs
->status
,
291 STS_ASS
| STS_PSS
, temp
, 16 * 125))
294 /* then disable anything that's still active */
295 temp
= ehci_readl(ehci
, &ehci
->regs
->command
);
296 temp
&= ~(CMD_ASE
| CMD_IAAD
| CMD_PSE
);
297 ehci_writel(ehci
, temp
, &ehci
->regs
->command
);
299 /* hardware can take 16 microframes to turn off ... */
300 handshake_on_error_set_halt(ehci
, &ehci
->regs
->status
,
301 STS_ASS
| STS_PSS
, 0, 16 * 125);
304 /*-------------------------------------------------------------------------*/
306 static void end_unlink_async(struct ehci_hcd
*ehci
);
307 static void ehci_work(struct ehci_hcd
*ehci
);
309 #include "ehci-hub.c"
310 #include "ehci-mem.c"
312 #include "ehci-sched.c"
314 /*-------------------------------------------------------------------------*/
316 static int qtdc_pid
= 0;
317 module_param (qtdc_pid
, int, S_IRUGO
);
318 MODULE_PARM_DESC (qtdc_pid
, "qtd cache device pid");
320 static int qtdc_vid
= 0;
321 module_param (qtdc_vid
, int, S_IRUGO
);
322 MODULE_PARM_DESC (qtdc_vid
, "qtd cache device vid");
324 static int qtdc0_ep
= -1; /* turn off qtd cache by default */
325 module_param (qtdc0_ep
, int, S_IRUGO
);
326 MODULE_PARM_DESC (qtdc0_ep
, "qtd cache 0 endpoint");
328 static int qtdc0_sz
= 0; /* turn off qtd cache by default */
329 module_param (qtdc0_sz
, int, S_IRUGO
);
330 MODULE_PARM_DESC (qtdc0_sz
, "qtd cache 0 size (# of qtd's)");
332 static int qtdc0_to
= 1;
333 module_param (qtdc0_to
, int, S_IRUGO
);
334 MODULE_PARM_DESC (qtdc0_to
, "qtd cache 0 timeout (ms)");
336 static int qtdc1_ep
= -1; /* turn off qtd cache by default */
337 module_param (qtdc1_ep
, int, S_IRUGO
);
338 MODULE_PARM_DESC (qtdc1_ep
, "qtd cache 1 endpoint");
340 static int qtdc1_sz
= 0; /* turn off qtd cache by default */
341 module_param (qtdc1_sz
, int, S_IRUGO
);
342 MODULE_PARM_DESC (qtdc1_sz
, "qtd cache 1 size (# of qtd's)");
344 static int qtdc1_to
= 1;
345 module_param (qtdc1_to
, int, S_IRUGO
);
346 MODULE_PARM_DESC (qtdc1_to
, "qtd cache 1 timeout (ms)");
348 static int qtdc0_ml
= QTDC_MSG_ERR
;
349 module_param (qtdc0_ml
, int, S_IRUGO
);
350 MODULE_PARM_DESC (qtdc0_ml
, "qtd cache 0 msglevel");
352 static int qtdc1_ml
= QTDC_MSG_ERR
;
353 module_param (qtdc1_ml
, int, S_IRUGO
);
354 MODULE_PARM_DESC (qtdc1_ml
, "qtd cache 1 msglevel");
356 #ifdef EHCI_QTD_CACHE
357 static void ehci_qtdc_watchdog (unsigned long param
)
359 ehci_qtdc_t
*qtdc
= (ehci_qtdc_t
*) param
;
360 struct ehci_hcd
*ehci
= (struct ehci_hcd
*)(qtdc
->ehci
);
361 unsigned long flags
, flags2
;
362 struct ehci_qtd
*qtd
;
363 struct urb
*urb
= NULL
;
364 //struct hcd_dev *dev;
366 struct usb_host_endpoint
*ep
;
367 struct list_head
*entry
;
368 struct ehci_qh
*qh
= 0;
370 spin_lock_irqsave (&ehci
->lock
, flags
);
372 if (unlikely (qtdc
->cnt
<= 0))
375 #ifdef EHCI_QTDC_DEBUG
376 if (unlikely (list_empty(&qtdc
->cache
)))
377 QTDC_ERR(qtdc
, ("cnt %d but cache empty\n", qtdc
->cnt
));
378 #endif /* EHCI_QTDC_DEBUG */
380 QTDC_TRACE(qtdc
, ("watchdog release! cnt %d\n", qtdc
->cnt
));
381 list_for_each (entry
, &qtdc
->cache
) {
382 qtd
= list_entry (entry
, struct ehci_qtd
, qtd_list
);
384 spin_lock_irqsave (&urb
->lock
, flags2
);
385 urb
->transfer_flags
&= ~URB_QTD_CACHED
;
386 spin_unlock_irqrestore (&urb
->lock
, flags2
);
388 // dev = (struct hcd_dev *)urb->dev->hcpriv;
389 epnum
= usb_pipeendpoint (urb
->pipe
);
390 if (usb_pipeout(urb
->pipe
)) {
391 WARN_ON(usb_pipein(urb
->pipe
));
392 ep
= urb
->dev
->ep_out
[epnum
];
394 WARN_ON(usb_pipeout(urb
->pipe
));
395 ep
= urb
->dev
->ep_in
[epnum
];
397 if (usb_pipein (urb
->pipe
) && !usb_pipecontrol (urb
->pipe
))
400 #ifdef EHCI_QTDC_DEBUG
401 qtdc
->timeout_qtd
+= qtdc
->cnt
;
402 if (qtdc
->cnt
> qtdc
->timeout_qtd_max
)
403 qtdc
->timeout_qtd_max
= qtdc
->cnt
;
405 if ((jiffies
- qtdc
->last_printed
) > (10 * HZ
)) {
406 QTDC_STATS(qtdc
, ("cached_qtd %lu\nrelease_qtd %lu release_cnt %lu\n"
407 "timeout_qtd %lu timeout_qtd_max %lu timeout_cnt %lu avg_timeout_qtd %lu\n",
408 qtdc
->cached_qtd
, qtdc
->release_qtd
, qtdc
->release_cnt
,
409 qtdc
->timeout_qtd
, qtdc
->timeout_qtd_max
, qtdc
->timeout_cnt
,
410 (qtdc
->timeout_qtd
/ qtdc
->timeout_cnt
)));
411 qtdc
->last_printed
= jiffies
;
413 #endif /* EHCI_QTDC_DEBUG */
416 qh
= qh_append_tds (ehci
, urb
, &qtdc
->cache
, epnum
, &ep
->hcpriv
);
417 /* Control/bulk operations through TTs don't need scheduling,
418 * the HC and TT handle it when the TT has a buffer ready.
420 if (likely (qh
!= 0)) {
421 if (likely (qh
->qh_state
== QH_STATE_IDLE
))
422 qh_link_async (ehci
, qh_get (qh
));
425 /* clean up qtd cache */
426 INIT_LIST_HEAD(&qtdc
->cache
);
429 spin_unlock_irqrestore (&ehci
->lock
, flags
);
433 ehci_qtdc_t
*ehci_qtdc_init(struct ehci_hcd
*ehci
, int vid
, int pid
, int num
, int ep
, int size
, int timeout
, unsigned int msglevel
)
437 if (pid
== 0 || vid
== 0) {
438 ehci_err (ehci
, "pid %x vid %x not valid\n", pid
, vid
);
442 if (num
>= NUM_QTD_CACHE
) {
443 ehci_err (ehci
, "qtdc %d exceeding limit %d\n", num
, NUM_QTD_CACHE
);
447 if ((ep
< 0) || (ep
> 0x1f)) {
448 ehci_err (ehci
, "qtdc %d disabled: invalid ep 0x%x\n", num
, ep
);
453 ehci_err (ehci
, "qtdc %d disabled: invalid size %d\n", num
, size
);
458 ehci_err (ehci
, "qtdc %d disabled: invalid timeout %d\n", num
, timeout
);
462 qtdc
= kmalloc(sizeof(ehci_qtdc_t
), GFP_KERNEL
);
464 ehci_err (ehci
, "qtdc %d disabled: alloc failed\n", num
);
468 memset(qtdc
, 0, sizeof(ehci_qtdc_t
));
470 ehci
->qtdc_vid
= vid
;
471 ehci
->qtdc_pid
= pid
;
472 qtdc
->ehci
= (void*)ehci
;
476 qtdc
->timeout
= (timeout
* HZ
) / 1000; /* in ms */
477 #ifdef EHCI_QTDC_DEBUG
478 qtdc
->msglevel
= msglevel
;
479 #endif /* EHCI_QTDC_DEBUG */
481 INIT_LIST_HEAD(&qtdc
->cache
);
483 init_timer (&qtdc
->watchdog
);
484 qtdc
->watchdog
.function
= ehci_qtdc_watchdog
;
485 qtdc
->watchdog
.data
= (unsigned long) qtdc
;
487 ehci_info (ehci
, "qtdc %d enabled: vid %x pid %x ep 0x%x size %d timeout %d\n",
488 num
, vid
, pid
, ep
, size
, timeout
);
493 void ehci_qtdc_deinit(ehci_qtdc_t
* qtdc
)
495 if (list_empty(&qtdc
->cache
))
499 #endif /* EHCI_QTD_CACHE */
504 /*-------------------------------------------------------------------------*/
506 static void ehci_iaa_watchdog(unsigned long param
)
508 struct ehci_hcd
*ehci
= (struct ehci_hcd
*) param
;
511 spin_lock_irqsave (&ehci
->lock
, flags
);
513 /* Lost IAA irqs wedge things badly; seen first with a vt8235.
514 * So we need this watchdog, but must protect it against both
515 * (a) SMP races against real IAA firing and retriggering, and
516 * (b) clean HC shutdown, when IAA watchdog was pending.
519 && !timer_pending(&ehci
->iaa_watchdog
)
520 && HC_IS_RUNNING(ehci_to_hcd(ehci
)->state
)) {
523 /* If we get here, IAA is *REALLY* late. It's barely
524 * conceivable that the system is so busy that CMD_IAAD
525 * is still legitimately set, so let's be sure it's
526 * clear before we read STS_IAA. (The HC should clear
527 * CMD_IAAD when it sets STS_IAA.)
529 cmd
= ehci_readl(ehci
, &ehci
->regs
->command
);
531 ehci_writel(ehci
, cmd
& ~CMD_IAAD
,
532 &ehci
->regs
->command
);
534 /* If IAA is set here it either legitimately triggered
535 * before we cleared IAAD above (but _way_ late, so we'll
536 * still count it as lost) ... or a silicon erratum:
537 * - VIA seems to set IAA without triggering the IRQ;
538 * - IAAD potentially cleared without setting IAA.
540 status
= ehci_readl(ehci
, &ehci
->regs
->status
);
541 if ((status
& STS_IAA
) || !(cmd
& CMD_IAAD
)) {
542 COUNT (ehci
->stats
.lost_iaa
);
543 ehci_writel(ehci
, STS_IAA
, &ehci
->regs
->status
);
546 ehci_vdbg(ehci
, "IAA watchdog: status %x cmd %x\n",
548 end_unlink_async(ehci
);
551 spin_unlock_irqrestore(&ehci
->lock
, flags
);
554 static void ehci_watchdog(unsigned long param
)
556 struct ehci_hcd
*ehci
= (struct ehci_hcd
*) param
;
559 spin_lock_irqsave(&ehci
->lock
, flags
);
561 /* stop async processing after it's idled a bit */
562 if (test_bit (TIMER_ASYNC_OFF
, &ehci
->actions
))
563 start_unlink_async (ehci
, ehci
->async
);
565 /* ehci could run by timer, without IRQs ... */
568 spin_unlock_irqrestore (&ehci
->lock
, flags
);
571 /* On some systems, leaving remote wakeup enabled prevents system shutdown.
572 * The firmware seems to think that powering off is a wakeup event!
573 * This routine turns off remote wakeup and everything else, on all ports.
575 static void ehci_turn_off_all_ports(struct ehci_hcd
*ehci
)
577 int port
= HCS_N_PORTS(ehci
->hcs_params
);
580 ehci_writel(ehci
, PORT_RWC_BITS
,
581 &ehci
->regs
->port_status
[port
]);
585 * Halt HC, turn off all ports, and let the BIOS use the companion controllers.
586 * Should be called with ehci->lock held.
588 static void ehci_silence_controller(struct ehci_hcd
*ehci
)
591 ehci_turn_off_all_ports(ehci
);
593 /* make BIOS/etc use companion controller during reboot */
594 ehci_writel(ehci
, 0, &ehci
->regs
->configured_flag
);
596 /* unblock posted writes */
597 ehci_readl(ehci
, &ehci
->regs
->configured_flag
);
600 /* ehci_shutdown kick in for silicon on any bus (not just pci, etc).
601 * This forcibly disables dma and IRQs, helping kexec and other cases
602 * where the next system software may expect clean state.
604 static void ehci_shutdown(struct usb_hcd
*hcd
)
606 struct ehci_hcd
*ehci
= hcd_to_ehci(hcd
);
608 del_timer_sync(&ehci
->watchdog
);
609 del_timer_sync(&ehci
->iaa_watchdog
);
611 spin_lock_irq(&ehci
->lock
);
612 ehci_silence_controller(ehci
);
613 spin_unlock_irq(&ehci
->lock
);
616 static void ehci_port_power (struct ehci_hcd
*ehci
, int is_on
)
620 if (!HCS_PPC (ehci
->hcs_params
))
623 ehci_dbg (ehci
, "...power%s ports...\n", is_on
? "up" : "down");
624 for (port
= HCS_N_PORTS (ehci
->hcs_params
); port
> 0; )
625 (void) ehci_hub_control(ehci_to_hcd(ehci
),
626 is_on
? SetPortFeature
: ClearPortFeature
,
629 /* Flush those writes */
630 ehci_readl(ehci
, &ehci
->regs
->command
);
634 /*-------------------------------------------------------------------------*/
637 * ehci_work is called from some interrupts, timers, and so on.
638 * it calls driver completion functions, after dropping ehci->lock.
640 static void ehci_work (struct ehci_hcd
*ehci
)
642 timer_action_done (ehci
, TIMER_IO_WATCHDOG
);
644 /* another CPU may drop ehci->lock during a schedule scan while
645 * it reports urb completions. this flag guards against bogus
646 * attempts at re-entrant schedule scanning.
652 if (ehci
->next_uframe
!= -1)
653 scan_periodic (ehci
);
656 /* the IO watchdog guards against hardware or driver bugs that
657 * misplace IRQs, and should let us run completely without IRQs.
658 * such lossage has been observed on both VT6202 and VT8235.
660 if (HC_IS_RUNNING (ehci_to_hcd(ehci
)->state
) &&
661 (ehci
->async
->qh_next
.ptr
!= NULL
||
662 ehci
->periodic_sched
!= 0))
663 timer_action (ehci
, TIMER_IO_WATCHDOG
);
667 * Called when the ehci_hcd module is removed.
669 static void ehci_stop (struct usb_hcd
*hcd
)
671 struct ehci_hcd
*ehci
= hcd_to_ehci (hcd
);
673 ehci_dbg (ehci
, "stop\n");
675 /* no more interrupts ... */
676 del_timer_sync (&ehci
->watchdog
);
677 del_timer_sync(&ehci
->iaa_watchdog
);
678 #ifdef EHCI_QTD_CACHE
679 del_timer_sync (&ehci
->qtdc_watchdog
);
680 ehci_qtdc_deinit (ehci
->qtdc
[0]);
681 ehci_qtdc_deinit (ehci
->qtdc
[1]);
682 #endif /* EHCI_QTD_CACHE */
683 spin_lock_irq(&ehci
->lock
);
684 if (HC_IS_RUNNING (hcd
->state
))
687 ehci_silence_controller(ehci
);
689 spin_unlock_irq(&ehci
->lock
);
691 remove_companion_file(ehci
);
692 remove_debug_files (ehci
);
694 /* root hub is shut down separately (first, when possible) */
695 spin_lock_irq (&ehci
->lock
);
698 spin_unlock_irq (&ehci
->lock
);
699 ehci_mem_cleanup (ehci
);
702 ehci_dbg (ehci
, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
703 ehci
->stats
.normal
, ehci
->stats
.error
, ehci
->stats
.reclaim
,
704 ehci
->stats
.lost_iaa
);
705 ehci_dbg (ehci
, "complete %ld unlink %ld\n",
706 ehci
->stats
.complete
, ehci
->stats
.unlink
);
709 dbg_status (ehci
, "ehci_stop completed",
710 ehci_readl(ehci
, &ehci
->regs
->status
));
713 /* one-time init, only for memory state */
714 static int ehci_init(struct usb_hcd
*hcd
)
716 struct ehci_hcd
*ehci
= hcd_to_ehci(hcd
);
720 struct ehci_qh_hw
*hw
;
722 ehci_info(ehci
, "EHCI Fastpath: New EHCI driver starting\n");
724 spin_lock_init(&ehci
->lock
);
727 * keep io watchdog by default, those good HCDs could turn off it later
729 ehci
->need_io_watchdog
= 1;
730 init_timer(&ehci
->watchdog
);
731 ehci
->watchdog
.function
= ehci_watchdog
;
732 ehci
->watchdog
.data
= (unsigned long) ehci
;
734 #ifdef EHCI_QTD_CACHE
735 ehci
->qtdc
[0] = ehci_qtdc_init(ehci
, qtdc_vid
, qtdc_pid
, 0, qtdc0_ep
, qtdc0_sz
, qtdc0_to
, qtdc0_ml
);
736 ehci
->qtdc
[1] = ehci_qtdc_init(ehci
, qtdc_vid
, qtdc_pid
, 1, qtdc1_ep
, qtdc1_sz
, qtdc1_to
, qtdc1_ml
);
737 #endif /* EHCI_QTD_CACHE */
741 init_timer(&ehci
->iaa_watchdog
);
742 ehci
->iaa_watchdog
.function
= ehci_iaa_watchdog
;
743 ehci
->iaa_watchdog
.data
= (unsigned long) ehci
;
746 * hw default: 1K periodic list heads, one per frame.
747 * periodic_size can shrink by USBCMD update if hcc_params allows.
749 ehci
->periodic_size
= DEFAULT_I_TDPS
;
750 INIT_LIST_HEAD(&ehci
->cached_itd_list
);
751 INIT_LIST_HEAD(&ehci
->cached_sitd_list
);
752 if ((retval
= ehci_mem_init(ehci
, GFP_KERNEL
)) < 0)
755 /* controllers may cache some of the periodic schedule ... */
756 hcc_params
= ehci_readl(ehci
, &ehci
->caps
->hcc_params
);
757 if (HCC_ISOC_CACHE(hcc_params
)) // full frame cache
758 ehci
->i_thresh
= 2 + 8;
759 else // N microframes cached
760 ehci
->i_thresh
= 2 + HCC_ISOC_THRES(hcc_params
);
762 ehci
->reclaim
= NULL
;
763 ehci
->next_uframe
= -1;
764 ehci
->clock_frame
= -1;
767 * dedicate a qh for the async ring head, since we couldn't unlink
768 * a 'real' qh without stopping the async schedule [4.8]. use it
769 * as the 'reclamation list head' too.
770 * its dummy is used in hw_alt_next of many tds, to prevent the qh
771 * from automatically advancing to the next td after short reads.
773 ehci
->async
->qh_next
.qh
= NULL
;
774 hw
= ehci
->async
->hw
;
775 hw
->hw_next
= QH_NEXT(ehci
, ehci
->async
->qh_dma
);
776 hw
->hw_info1
= cpu_to_hc32(ehci
, QH_HEAD
);
777 hw
->hw_token
= cpu_to_hc32(ehci
, QTD_STS_HALT
);
778 hw
->hw_qtd_next
= EHCI_LIST_END(ehci
);
779 ehci
->async
->qh_state
= QH_STATE_LINKED
;
780 hw
->hw_alt_next
= QTD_NEXT(ehci
, ehci
->async
->dummy
->qtd_dma
);
782 /* clear interrupt enables, set irq latency */
783 if (log2_irq_thresh
< 0 || log2_irq_thresh
> 6)
785 temp
= 1 << (16 + log2_irq_thresh
);
786 if (HCC_CANPARK(hcc_params
)) {
787 /* HW default park == 3, on hardware that supports it (like
788 * NVidia and ALI silicon), maximizes throughput on the async
789 * schedule by avoiding QH fetches between transfers.
791 * With fast usb storage devices and NForce2, "park" seems to
792 * make problems: throughput reduction (!), data errors...
795 park
= min(park
, (unsigned) 3);
799 ehci_dbg(ehci
, "park %d\n", park
);
801 if (HCC_PGM_FRAMELISTLEN(hcc_params
)) {
802 /* periodic schedule size can be smaller than default */
804 temp
|= (EHCI_TUNE_FLS
<< 2);
805 switch (EHCI_TUNE_FLS
) {
806 case 0: ehci
->periodic_size
= 1024; break;
807 case 1: ehci
->periodic_size
= 512; break;
808 case 2: ehci
->periodic_size
= 256; break;
812 ehci
->command
= temp
;
817 /* start HC running; it's halted, ehci_init() has been run (once) */
818 static int ehci_run (struct usb_hcd
*hcd
)
820 struct ehci_hcd
*ehci
= hcd_to_ehci (hcd
);
825 hcd
->uses_new_polling
= 1;
828 /* EHCI spec section 4.1 */
829 if ((retval
= ehci_reset(ehci
)) != 0) {
830 ehci_mem_cleanup(ehci
);
833 ehci_writel(ehci
, ehci
->periodic_dma
, &ehci
->regs
->frame_list
);
834 ehci_writel(ehci
, (u32
)ehci
->async
->qh_dma
, &ehci
->regs
->async_next
);
837 * hcc_params controls whether ehci->regs->segment must (!!!)
838 * be used; it constrains QH/ITD/SITD and QTD locations.
839 * pci_pool consistent memory always uses segment zero.
840 * streaming mappings for I/O buffers, like pci_map_single(),
841 * can return segments above 4GB, if the device allows.
843 * NOTE: the dma mask is visible through dma_supported(), so
844 * drivers can pass this info along ... like NETIF_F_HIGHDMA,
845 * Scsi_Host.highmem_io, and so forth. It's readonly to all
846 * host side drivers though.
848 hcc_params
= ehci_readl(ehci
, &ehci
->caps
->hcc_params
);
849 if (HCC_64BIT_ADDR(hcc_params
)) {
850 ehci_writel(ehci
, 0, &ehci
->regs
->segment
);
852 // this is deeply broken on almost all architectures
853 if (!dma_set_mask(hcd
->self
.controller
, DMA_64BIT_MASK
))
854 ehci_info(ehci
, "enabled 64bit DMA\n");
859 // Philips, Intel, and maybe others need CMD_RUN before the
860 // root hub will detect new devices (why?); NEC doesn't
861 ehci
->command
&= ~(CMD_LRESET
|CMD_IAAD
|CMD_PSE
|CMD_ASE
|CMD_RESET
);
862 ehci
->command
|= CMD_RUN
;
863 ehci_writel(ehci
, ehci
->command
, &ehci
->regs
->command
);
864 dbg_cmd (ehci
, "init", ehci
->command
);
867 * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
868 * are explicitly handed to companion controller(s), so no TT is
869 * involved with the root hub. (Except where one is integrated,
870 * and there's no companion controller unless maybe for USB OTG.)
872 * Turning on the CF flag will transfer ownership of all ports
873 * from the companions to the EHCI controller. If any of the
874 * companions are in the middle of a port reset at the time, it
875 * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
876 * guarantees that no resets are in progress. After we set CF,
877 * a short delay lets the hardware catch up; new resets shouldn't
878 * be started before the port switching actions could complete.
880 down_write(&ehci_cf_port_reset_rwsem
);
881 hcd
->state
= HC_STATE_RUNNING
;
882 ehci_writel(ehci
, FLAG_CF
, &ehci
->regs
->configured_flag
);
883 ehci_readl(ehci
, &ehci
->regs
->command
); /* unblock posted writes */
885 up_write(&ehci_cf_port_reset_rwsem
);
886 ehci
->last_periodic_enable
= ktime_get_real();
888 temp
= HC_VERSION(ehci_readl(ehci
, &ehci
->caps
->hc_capbase
));
890 "USB %x.%x started, EHCI %x.%02x%s\n",
891 ((ehci
->sbrn
& 0xf0)>>4), (ehci
->sbrn
& 0x0f),
892 temp
>> 8, temp
& 0xff,
893 ignore_oc
? ", overcurrent ignored" : "");
895 ehci_writel(ehci
, INTR_MASK
,
896 &ehci
->regs
->intr_enable
); /* Turn On Interrupts */
898 /* GRR this is run-once init(), being done every time the HC starts.
899 * So long as they're part of class devices, we can't do it init()
900 * since the class device isn't created that early.
902 create_debug_files(ehci
);
903 create_companion_file(ehci
);
908 /*-------------------------------------------------------------------------*/
910 static irqreturn_t
ehci_irq (struct usb_hcd
*hcd
)
912 struct ehci_hcd
*ehci
= hcd_to_ehci (hcd
);
913 u32 status
, masked_status
, pcd_status
= 0, cmd
;
916 spin_lock (&ehci
->lock
);
918 status
= ehci_readl(ehci
, &ehci
->regs
->status
);
920 /* e.g. cardbus physical eject */
921 if (status
== ~(u32
) 0) {
922 ehci_dbg (ehci
, "device removed\n");
926 masked_status
= status
& INTR_MASK
;
927 if (!masked_status
) { /* irq sharing? */
928 spin_unlock(&ehci
->lock
);
932 /* clear (just) interrupts */
933 ehci_writel(ehci
, masked_status
, &ehci
->regs
->status
);
934 cmd
= ehci_readl(ehci
, &ehci
->regs
->command
);
938 /* unrequested/ignored: Frame List Rollover */
939 dbg_status (ehci
, "irq", status
);
942 /* INT, ERR, and IAA interrupt rates can be throttled */
944 /* normal [4.15.1.2] or error [4.15.1.1] completion */
945 if (likely ((status
& (STS_INT
|STS_ERR
)) != 0)) {
946 if (likely ((status
& STS_ERR
) == 0))
947 COUNT (ehci
->stats
.normal
);
949 COUNT (ehci
->stats
.error
);
953 /* complete the unlinking of some qh [4.15.2.3] */
954 if (status
& STS_IAA
) {
955 /* guard against (alleged) silicon errata */
956 if (cmd
& CMD_IAAD
) {
957 ehci_writel(ehci
, cmd
& ~CMD_IAAD
,
958 &ehci
->regs
->command
);
959 ehci_dbg(ehci
, "IAA with IAAD still set?\n");
962 COUNT(ehci
->stats
.reclaim
);
963 end_unlink_async(ehci
);
965 ehci_dbg(ehci
, "IAA with nothing to reclaim?\n");
968 /* remote wakeup [4.3.1] */
969 if (status
& STS_PCD
) {
970 unsigned i
= HCS_N_PORTS (ehci
->hcs_params
);
972 /* kick root hub later */
975 /* resume root hub? */
976 if (!(cmd
& CMD_RUN
))
977 usb_hcd_resume_root_hub(hcd
);
980 int pstatus
= ehci_readl(ehci
,
981 &ehci
->regs
->port_status
[i
]);
983 if (pstatus
& PORT_OWNER
)
985 if (!(test_bit(i
, &ehci
->suspended_ports
) &&
986 ((pstatus
& PORT_RESUME
) ||
987 !(pstatus
& PORT_SUSPEND
)) &&
988 (pstatus
& PORT_PE
) &&
989 ehci
->reset_done
[i
] == 0))
992 /* start 20 msec resume signaling from this port,
993 * and make khubd collect PORT_STAT_C_SUSPEND to
994 * stop that signaling. Use 5 ms extra for safety,
995 * like usb_port_resume() does.
997 ehci
->reset_done
[i
] = jiffies
+ msecs_to_jiffies(25);
998 ehci_dbg (ehci
, "port %d remote wakeup\n", i
+ 1);
999 mod_timer(&hcd
->rh_timer
, ehci
->reset_done
[i
]);
1003 /* PCI errors [4.15.2.4] */
1004 if (unlikely ((status
& STS_FATAL
) != 0)) {
1005 ehci_err(ehci
, "fatal error\n");
1006 dbg_cmd(ehci
, "fatal", cmd
);
1007 dbg_status(ehci
, "fatal", status
);
1011 ehci_writel(ehci
, 0, &ehci
->regs
->configured_flag
);
1012 /* generic layer kills/unlinks all urbs, then
1013 * uses ehci_stop to clean up the rest
1020 spin_unlock (&ehci
->lock
);
1022 usb_hcd_poll_rh_status(hcd
);
1026 /*-------------------------------------------------------------------------*/
1029 * non-error returns are a promise to giveback() the urb later
1030 * we drop ownership so next owner (or urb unlink) can get it
1032 * urb + dev is in hcd.self.controller.urb_list
1033 * we're queueing TDs onto software and hardware lists
1035 * hcd-specific init for hcpriv hasn't been done yet
1037 * NOTE: control, bulk, and interrupt share the same code to append TDs
1038 * to a (possibly active) QH, and the same QH scanning code.
1040 static int ehci_urb_enqueue (
1041 struct usb_hcd
*hcd
,
1045 struct ehci_hcd
*ehci
= hcd_to_ehci (hcd
);
1046 struct list_head qtd_list
;
1048 INIT_LIST_HEAD (&qtd_list
);
1050 switch (usb_pipetype (urb
->pipe
)) {
1052 /* qh_completions() code doesn't handle all the fault cases
1053 * in multi-TD control transfers. Even 1KB is rare anyway.
1055 if (urb
->transfer_buffer_length
> (16 * 1024))
1058 /* case PIPE_BULK: */
1060 if (!qh_urb_transaction (ehci
, urb
, &qtd_list
, mem_flags
))
1062 return submit_async(ehci
, urb
, &qtd_list
, mem_flags
);
1064 case PIPE_INTERRUPT
:
1065 if (!qh_urb_transaction (ehci
, urb
, &qtd_list
, mem_flags
))
1067 return intr_submit(ehci
, urb
, &qtd_list
, mem_flags
);
1069 case PIPE_ISOCHRONOUS
:
1070 if (urb
->dev
->speed
== USB_SPEED_HIGH
)
1071 return itd_submit (ehci
, urb
, mem_flags
);
1073 return sitd_submit (ehci
, urb
, mem_flags
);
1077 static void unlink_async (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
1080 if (!HC_IS_RUNNING(ehci_to_hcd(ehci
)->state
) && ehci
->reclaim
)
1081 end_unlink_async(ehci
);
1083 /* If the QH isn't linked then there's nothing we can do
1084 * unless we were called during a giveback, in which case
1085 * qh_completions() has to deal with it.
1087 if (qh
->qh_state
!= QH_STATE_LINKED
) {
1088 if (qh
->qh_state
== QH_STATE_COMPLETING
)
1089 qh
->needs_rescan
= 1;
1093 /* defer till later if busy */
1094 if (ehci
->reclaim
) {
1095 struct ehci_qh
*last
;
1097 for (last
= ehci
->reclaim
;
1099 last
= last
->reclaim
)
1101 qh
->qh_state
= QH_STATE_UNLINK_WAIT
;
1104 /* start IAA cycle */
1106 start_unlink_async (ehci
, qh
);
1109 /* remove from hardware lists
1110 * completions normally happen asynchronously
1113 static int ehci_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
1115 struct ehci_hcd
*ehci
= hcd_to_ehci (hcd
);
1117 unsigned long flags
;
1120 spin_lock_irqsave (&ehci
->lock
, flags
);
1121 rc
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
1125 switch (usb_pipetype (urb
->pipe
)) {
1126 // case PIPE_CONTROL:
1129 #ifdef EHCI_QTD_CACHE
1130 if (urb
->transfer_flags
& URB_QTD_CACHED
) {
1131 if (ehci_qtdc_unlink(ehci
, urb
, NULL
))
1132 err ("%s: can't dequeue urb %p from qtdc", __FUNCTION__
, urb
);
1135 #endif /* EHCI_QTD_CACHE */
1137 qh
= (struct ehci_qh
*) urb
->hcpriv
;
1140 switch (qh
->qh_state
) {
1141 case QH_STATE_LINKED
:
1142 case QH_STATE_COMPLETING
:
1143 unlink_async(ehci
, qh
);
1145 case QH_STATE_UNLINK
:
1146 case QH_STATE_UNLINK_WAIT
:
1147 /* already started */
1155 case PIPE_INTERRUPT
:
1156 qh
= (struct ehci_qh
*) urb
->hcpriv
;
1159 switch (qh
->qh_state
) {
1160 case QH_STATE_LINKED
:
1161 case QH_STATE_COMPLETING
:
1162 intr_deschedule (ehci
, qh
);
1165 qh_completions (ehci
, qh
);
1168 ehci_dbg (ehci
, "bogus qh %p state %d\n",
1174 case PIPE_ISOCHRONOUS
:
1177 // wait till next completion, do it then.
1178 // completion irqs can wait up to 1024 msec,
1182 spin_unlock_irqrestore (&ehci
->lock
, flags
);
1186 /*-------------------------------------------------------------------------*/
1188 // bulk qh holds the data toggle
1191 ehci_endpoint_disable (struct usb_hcd
*hcd
, struct usb_host_endpoint
*ep
)
1193 struct ehci_hcd
*ehci
= hcd_to_ehci (hcd
);
1194 unsigned long flags
;
1195 struct ehci_qh
*qh
, *tmp
;
1197 /* ASSERT: any requests/urbs are being unlinked */
1198 /* ASSERT: nobody can be submitting urbs for this any more */
1201 spin_lock_irqsave (&ehci
->lock
, flags
);
1206 /* endpoints can be iso streams. for now, we don't
1207 * accelerate iso completions ... so spin a while.
1209 if (qh
->hw
== NULL
) {
1210 ehci_vdbg (ehci
, "iso delay\n");
1214 if (!HC_IS_RUNNING (hcd
->state
))
1215 qh
->qh_state
= QH_STATE_IDLE
;
1216 switch (qh
->qh_state
) {
1217 case QH_STATE_LINKED
:
1218 case QH_STATE_COMPLETING
:
1219 for (tmp
= ehci
->async
->qh_next
.qh
;
1221 tmp
= tmp
->qh_next
.qh
)
1223 /* periodic qh self-unlinks on empty, and a COMPLETING qh
1224 * may already be unlinked.
1227 unlink_async(ehci
, qh
);
1229 case QH_STATE_UNLINK
: /* wait for hw to finish? */
1230 case QH_STATE_UNLINK_WAIT
:
1232 spin_unlock_irqrestore (&ehci
->lock
, flags
);
1233 schedule_timeout_uninterruptible(1);
1235 case QH_STATE_IDLE
: /* fully unlinked */
1236 if (list_empty (&qh
->qtd_list
)) {
1240 /* else FALL THROUGH */
1242 /* caller was supposed to have unlinked any requests;
1243 * that's not our job. just leak this memory.
1245 ehci_err (ehci
, "qh %p (#%02x) state %d%s\n",
1246 qh
, ep
->desc
.bEndpointAddress
, qh
->qh_state
,
1247 list_empty (&qh
->qtd_list
) ? "" : "(has tds)");
1252 spin_unlock_irqrestore (&ehci
->lock
, flags
);
1256 static int ehci_get_frame (struct usb_hcd
*hcd
)
1258 struct ehci_hcd
*ehci
= hcd_to_ehci (hcd
);
1259 return (ehci_readl(ehci
, &ehci
->regs
->frame_index
) >> 3) %
1260 ehci
->periodic_size
;
1263 /*-------------------------------------------------------------------------*/
1265 MODULE_DESCRIPTION(DRIVER_DESC
);
1266 MODULE_AUTHOR (DRIVER_AUTHOR
);
1267 MODULE_LICENSE ("GPL");
1270 #include "ehci-pci.c"
1271 #define PCI_DRIVER ehci_pci_driver
1274 #ifdef CONFIG_USB_EHCI_FSL
1275 #include "ehci-fsl.c"
1276 #define PLATFORM_DRIVER ehci_fsl_driver
1279 #ifdef CONFIG_SOC_AU1200
1280 #include "ehci-au1xxx.c"
1281 #define PLATFORM_DRIVER ehci_hcd_au1xxx_driver
1284 #ifdef CONFIG_PPC_PS3
1285 #include "ehci-ps3.c"
1286 #define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver
1289 #ifdef CONFIG_440EPX
1290 #include "ehci-ppc-soc.c"
1291 #define PLATFORM_DRIVER ehci_ppc_soc_driver
1294 #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
1295 !defined(PS3_SYSTEM_BUS_DRIVER)
1296 #error "missing bus glue for ehci-hcd"
1299 static int __init
ehci_hcd_init(void)
1306 printk(KERN_INFO
"%s: " DRIVER_DESC
"\n", hcd_name
);
1307 set_bit(USB_EHCI_LOADED
, &usb_hcds_loaded
);
1308 if (test_bit(USB_UHCI_LOADED
, &usb_hcds_loaded
) ||
1309 test_bit(USB_OHCI_LOADED
, &usb_hcds_loaded
))
1310 printk(KERN_WARNING
"Warning! ehci_hcd should always be loaded"
1311 " before uhci_hcd and ohci_hcd, not after\n");
1313 pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n",
1315 sizeof(struct ehci_qh
), sizeof(struct ehci_qtd
),
1316 sizeof(struct ehci_itd
), sizeof(struct ehci_sitd
));
1319 ehci_debug_root
= debugfs_create_dir("ehci", NULL
);
1320 if (!ehci_debug_root
) {
1326 #ifdef PLATFORM_DRIVER
1327 retval
= platform_driver_register(&PLATFORM_DRIVER
);
1330 debugfs_remove(ehci_debug_root
);
1331 ehci_debug_root
= NULL
;
1338 retval
= pci_register_driver(&PCI_DRIVER
);
1341 debugfs_remove(ehci_debug_root
);
1342 ehci_debug_root
= NULL
;
1344 #ifdef PLATFORM_DRIVER
1345 platform_driver_unregister(&PLATFORM_DRIVER
);
1351 #ifdef PS3_SYSTEM_BUS_DRIVER
1352 retval
= ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER
);
1355 debugfs_remove(ehci_debug_root
);
1356 ehci_debug_root
= NULL
;
1358 #ifdef PLATFORM_DRIVER
1359 platform_driver_unregister(&PLATFORM_DRIVER
);
1362 pci_unregister_driver(&PCI_DRIVER
);
1365 clear_bit(USB_EHCI_LOADED
, &usb_hcds_loaded
);
1372 module_init(ehci_hcd_init
);
1374 static void __exit
ehci_hcd_cleanup(void)
1376 #ifdef PLATFORM_DRIVER
1377 platform_driver_unregister(&PLATFORM_DRIVER
);
1380 pci_unregister_driver(&PCI_DRIVER
);
1382 #ifdef PS3_SYSTEM_BUS_DRIVER
1383 ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER
);
1386 debugfs_remove(ehci_debug_root
);
1388 clear_bit(USB_EHCI_LOADED
, &usb_hcds_loaded
);
1390 module_exit(ehci_hcd_cleanup
);