2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/irq.h>
24 #include <linux/module.h>
28 #define DRIVER_AUTHOR "Sarah Sharp"
29 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
31 /* TODO: copied from ehci-hcd.c - can this be refactored? */
33 * handshake - spin reading hc until handshake completes or fails
34 * @ptr: address of hc register to be read
35 * @mask: bits to look at in result of read
36 * @done: value of those bits when handshake succeeds
37 * @usec: timeout in microseconds
39 * Returns negative errno, or zero on success
41 * Success happens when the "mask" bits have the specified value (hardware
42 * handshake done). There are two failure modes: "usec" have passed (major
43 * hardware flakeout), or the register reads as all-ones (hardware removed).
45 static int handshake(struct xhci_hcd
*xhci
, void __iomem
*ptr
,
46 u32 mask
, u32 done
, int usec
)
51 result
= xhci_readl(xhci
, ptr
);
52 if (result
== ~(u32
)0) /* card removed */
64 * Force HC into halt state.
66 * Disable any IRQs and clear the run/stop bit.
67 * HC will complete any current and actively pipelined transactions, and
68 * should halt within 16 microframes of the run/stop bit being cleared.
69 * Read HC Halted bit in the status register to see when the HC is finished.
70 * XXX: shouldn't we set HC_STATE_HALT here somewhere?
72 int xhci_halt(struct xhci_hcd
*xhci
)
78 xhci_dbg(xhci
, "// Halt the HC\n");
79 /* Disable all interrupts from the host controller */
81 halted
= xhci_readl(xhci
, &xhci
->op_regs
->status
) & STS_HALT
;
85 cmd
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
87 xhci_writel(xhci
, cmd
, &xhci
->op_regs
->command
);
89 return handshake(xhci
, &xhci
->op_regs
->status
,
90 STS_HALT
, STS_HALT
, XHCI_MAX_HALT_USEC
);
94 * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
96 * This resets pipelines, timers, counters, state machines, etc.
97 * Transactions will be terminated immediately, and operational registers
98 * will be set to their defaults.
100 int xhci_reset(struct xhci_hcd
*xhci
)
105 state
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
106 BUG_ON((state
& STS_HALT
) == 0);
108 xhci_dbg(xhci
, "// Reset the HC\n");
109 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
110 command
|= CMD_RESET
;
111 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
112 /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
113 xhci_to_hcd(xhci
)->state
= HC_STATE_HALT
;
115 return handshake(xhci
, &xhci
->op_regs
->command
, CMD_RESET
, 0, 250 * 1000);
119 * Stop the HC from processing the endpoint queues.
121 static void xhci_quiesce(struct xhci_hcd
*xhci
)
124 * Queues are per endpoint, so we need to disable an endpoint or slot.
126 * To disable a slot, we need to insert a disable slot command on the
127 * command ring and ring the doorbell. This will also free any internal
128 * resources associated with the slot (which might not be what we want).
130 * A Release Endpoint command sounds better - doesn't free internal HC
131 * memory, but removes the endpoints from the schedule and releases the
132 * bandwidth, disables the doorbells, and clears the endpoint enable
133 * flag. Usually used prior to a set interface command.
135 * TODO: Implement after command ring code is done.
137 BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci
)->state
));
138 xhci_dbg(xhci
, "Finished quiescing -- code not written yet\n");
142 /* Set up MSI-X table for entry 0 (may claim other entries later) */
143 static int xhci_setup_msix(struct xhci_hcd
*xhci
)
146 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
148 xhci
->msix_count
= 0;
149 /* XXX: did I do this right? ixgbe does kcalloc for more than one */
150 xhci
->msix_entries
= kmalloc(sizeof(struct msix_entry
), GFP_KERNEL
);
151 if (!xhci
->msix_entries
) {
152 xhci_err(xhci
, "Failed to allocate MSI-X entries\n");
155 xhci
->msix_entries
[0].entry
= 0;
157 ret
= pci_enable_msix(pdev
, xhci
->msix_entries
, xhci
->msix_count
);
159 xhci_err(xhci
, "Failed to enable MSI-X\n");
164 * Pass the xhci pointer value as the request_irq "cookie".
165 * If more irqs are added, this will need to be unique for each one.
167 ret
= request_irq(xhci
->msix_entries
[0].vector
, &xhci_irq
, 0,
168 "xHCI", xhci_to_hcd(xhci
));
170 xhci_err(xhci
, "Failed to allocate MSI-X interrupt\n");
173 xhci_dbg(xhci
, "Finished setting up MSI-X\n");
177 pci_disable_msix(pdev
);
179 kfree(xhci
->msix_entries
);
180 xhci
->msix_entries
= NULL
;
184 /* XXX: code duplication; can xhci_setup_msix call this? */
185 /* Free any IRQs and disable MSI-X */
186 static void xhci_cleanup_msix(struct xhci_hcd
*xhci
)
188 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
189 if (!xhci
->msix_entries
)
192 free_irq(xhci
->msix_entries
[0].vector
, xhci
);
193 pci_disable_msix(pdev
);
194 kfree(xhci
->msix_entries
);
195 xhci
->msix_entries
= NULL
;
196 xhci_dbg(xhci
, "Finished cleaning up MSI-X\n");
201 * Initialize memory for HCD and xHC (one-time init).
203 * Program the PAGESIZE register, initialize the device context array, create
204 * device contexts (?), set up a command ring segment (or two?), create event
205 * ring (one for now).
207 int xhci_init(struct usb_hcd
*hcd
)
209 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
212 xhci_dbg(xhci
, "xhci_init\n");
213 spin_lock_init(&xhci
->lock
);
214 retval
= xhci_mem_init(xhci
, GFP_KERNEL
);
215 xhci_dbg(xhci
, "Finished xhci_init\n");
221 * Called in interrupt context when there might be work
222 * queued on the event ring
224 * xhci->lock must be held by caller.
226 static void xhci_work(struct xhci_hcd
*xhci
)
231 * Clear the op reg interrupt status first,
232 * so we can receive interrupts from other MSI-X interrupters.
233 * Write 1 to clear the interrupt status.
235 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
237 xhci_writel(xhci
, temp
, &xhci
->op_regs
->status
);
238 /* FIXME when MSI-X is supported and there are multiple vectors */
239 /* Clear the MSI-X event interrupt status */
241 /* Acknowledge the interrupt */
242 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
244 xhci_writel(xhci
, temp
, &xhci
->ir_set
->irq_pending
);
245 /* Flush posted writes */
246 xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
248 /* FIXME this should be a delayed service routine that clears the EHB */
251 /* Clear the event handler busy flag; the event ring should be empty. */
252 temp
= xhci_readl(xhci
, &xhci
->ir_set
->erst_dequeue
[0]);
253 xhci_writel(xhci
, temp
& ~ERST_EHB
, &xhci
->ir_set
->erst_dequeue
[0]);
254 /* Flush posted writes -- FIXME is this necessary? */
255 xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
258 /*-------------------------------------------------------------------------*/
261 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
262 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
263 * indicators of an event TRB error, but we check the status *first* to be safe.
265 irqreturn_t
xhci_irq(struct usb_hcd
*hcd
)
267 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
270 spin_lock(&xhci
->lock
);
271 /* Check if the xHC generated the interrupt, or the irq is shared */
272 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
273 temp2
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
274 if (!(temp
& STS_EINT
) && !ER_IRQ_PENDING(temp2
)) {
275 spin_unlock(&xhci
->lock
);
279 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
280 if (temp
& STS_FATAL
) {
281 xhci_warn(xhci
, "WARNING: Host System Error\n");
283 xhci_to_hcd(xhci
)->state
= HC_STATE_HALT
;
288 spin_unlock(&xhci
->lock
);
293 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
294 void event_ring_work(unsigned long arg
)
298 struct xhci_hcd
*xhci
= (struct xhci_hcd
*) arg
;
301 xhci_dbg(xhci
, "Poll event ring: %lu\n", jiffies
);
303 spin_lock_irqsave(&xhci
->lock
, flags
);
304 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
305 xhci_dbg(xhci
, "op reg status = 0x%x\n", temp
);
306 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
307 xhci_dbg(xhci
, "ir_set 0 pending = 0x%x\n", temp
);
308 xhci_dbg(xhci
, "No-op commands handled = %d\n", xhci
->noops_handled
);
309 xhci_dbg(xhci
, "HC error bitmask = 0x%x\n", xhci
->error_bitmask
);
310 xhci
->error_bitmask
= 0;
311 xhci_dbg(xhci
, "Event ring:\n");
312 xhci_debug_segment(xhci
, xhci
->event_ring
->deq_seg
);
313 xhci_dbg_ring_ptrs(xhci
, xhci
->event_ring
);
314 temp
= xhci_readl(xhci
, &xhci
->ir_set
->erst_dequeue
[0]);
315 temp
&= ERST_PTR_MASK
;
316 xhci_dbg(xhci
, "ERST deq = 0x%x\n", temp
);
317 xhci_dbg(xhci
, "Command ring:\n");
318 xhci_debug_segment(xhci
, xhci
->cmd_ring
->deq_seg
);
319 xhci_dbg_ring_ptrs(xhci
, xhci
->cmd_ring
);
320 xhci_dbg_cmd_ptrs(xhci
);
321 for (i
= 0; i
< MAX_HC_SLOTS
; ++i
) {
323 for (j
= 0; j
< 31; ++j
) {
324 if (xhci
->devs
[i
]->ep_rings
[j
]) {
325 xhci_dbg(xhci
, "Dev %d endpoint ring %d:\n", i
, j
);
326 xhci_debug_segment(xhci
, xhci
->devs
[i
]->ep_rings
[j
]->deq_seg
);
332 if (xhci
->noops_submitted
!= NUM_TEST_NOOPS
)
333 if (setup_one_noop(xhci
))
335 spin_unlock_irqrestore(&xhci
->lock
, flags
);
338 mod_timer(&xhci
->event_ring_timer
, jiffies
+ POLL_TIMEOUT
* HZ
);
340 xhci_dbg(xhci
, "Quit polling the event ring.\n");
345 * Start the HC after it was halted.
347 * This function is called by the USB core when the HC driver is added.
348 * Its opposite is xhci_stop().
350 * xhci_init() must be called once before this function can be called.
351 * Reset the HC, enable device slot contexts, program DCBAAP, and
352 * set command ring pointer and event ring pointer.
354 * Setup MSI-X vectors and enable interrupts.
356 int xhci_run(struct usb_hcd
*hcd
)
359 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
360 void (*doorbell
)(struct xhci_hcd
*) = NULL
;
362 hcd
->uses_new_polling
= 1;
365 xhci_dbg(xhci
, "xhci_run\n");
366 #if 0 /* FIXME: MSI not setup yet */
367 /* Do this at the very last minute */
368 ret
= xhci_setup_msix(xhci
);
374 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
375 init_timer(&xhci
->event_ring_timer
);
376 xhci
->event_ring_timer
.data
= (unsigned long) xhci
;
377 xhci
->event_ring_timer
.function
= event_ring_work
;
378 /* Poll the event ring */
379 xhci
->event_ring_timer
.expires
= jiffies
+ POLL_TIMEOUT
* HZ
;
381 xhci_dbg(xhci
, "Setting event ring polling timer\n");
382 add_timer(&xhci
->event_ring_timer
);
385 xhci_dbg(xhci
, "// Set the interrupt modulation register\n");
386 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_control
);
389 xhci_writel(xhci
, temp
, &xhci
->ir_set
->irq_control
);
391 /* Set the HCD state before we enable the irqs */
392 hcd
->state
= HC_STATE_RUNNING
;
393 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
395 xhci_dbg(xhci
, "// Enable interrupts, cmd = 0x%x.\n",
397 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
399 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
400 xhci_dbg(xhci
, "// Enabling event ring interrupter 0x%x"
401 " by writing 0x%x to irq_pending\n",
402 (unsigned int) xhci
->ir_set
,
403 (unsigned int) ER_IRQ_ENABLE(temp
));
404 xhci_writel(xhci
, ER_IRQ_ENABLE(temp
),
405 &xhci
->ir_set
->irq_pending
);
406 xhci_print_ir_set(xhci
, xhci
->ir_set
, 0);
408 if (NUM_TEST_NOOPS
> 0)
409 doorbell
= setup_one_noop(xhci
);
411 xhci_dbg(xhci
, "Command ring memory map follows:\n");
412 xhci_debug_ring(xhci
, xhci
->cmd_ring
);
413 xhci_dbg_ring_ptrs(xhci
, xhci
->cmd_ring
);
414 xhci_dbg_cmd_ptrs(xhci
);
416 xhci_dbg(xhci
, "ERST memory map follows:\n");
417 xhci_dbg_erst(xhci
, &xhci
->erst
);
418 xhci_dbg(xhci
, "Event ring:\n");
419 xhci_debug_ring(xhci
, xhci
->event_ring
);
420 xhci_dbg_ring_ptrs(xhci
, xhci
->event_ring
);
421 temp
= xhci_readl(xhci
, &xhci
->ir_set
->erst_dequeue
[1]);
422 xhci_dbg(xhci
, "ERST deq upper = 0x%x\n", temp
);
423 temp
= xhci_readl(xhci
, &xhci
->ir_set
->erst_dequeue
[0]);
424 temp
&= ERST_PTR_MASK
;
425 xhci_dbg(xhci
, "ERST deq = 0x%x\n", temp
);
427 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
429 xhci_dbg(xhci
, "// Turn on HC, cmd = 0x%x.\n",
431 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
432 /* Flush PCI posted writes */
433 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
434 xhci_dbg(xhci
, "// @%x = 0x%x\n",
435 (unsigned int) &xhci
->op_regs
->command
, temp
);
439 xhci_dbg(xhci
, "Finished xhci_run\n");
446 * This function is called by the USB core when the HC driver is removed.
447 * Its opposite is xhci_run().
449 * Disable device contexts, disable IRQs, and quiesce the HC.
450 * Reset the HC, finish any completed transactions, and cleanup memory.
452 void xhci_stop(struct usb_hcd
*hcd
)
455 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
457 spin_lock_irq(&xhci
->lock
);
458 if (HC_IS_RUNNING(hcd
->state
))
462 spin_unlock_irq(&xhci
->lock
);
464 #if 0 /* No MSI yet */
465 xhci_cleanup_msix(xhci
);
467 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
468 /* Tell the event ring poll function not to reschedule */
470 del_timer_sync(&xhci
->event_ring_timer
);
473 xhci_dbg(xhci
, "// Disabling event ring interrupts\n");
474 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
475 xhci_writel(xhci
, temp
& ~STS_EINT
, &xhci
->op_regs
->status
);
476 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
477 xhci_writel(xhci
, ER_IRQ_DISABLE(temp
),
478 &xhci
->ir_set
->irq_pending
);
479 xhci_print_ir_set(xhci
, xhci
->ir_set
, 0);
481 xhci_dbg(xhci
, "cleaning up memory\n");
482 xhci_mem_cleanup(xhci
);
483 xhci_dbg(xhci
, "xhci_stop completed - status = %x\n",
484 xhci_readl(xhci
, &xhci
->op_regs
->status
));
488 * Shutdown HC (not bus-specific)
490 * This is called when the machine is rebooting or halting. We assume that the
491 * machine will be powered off, and the HC's internal state will be reset.
492 * Don't bother to free memory.
494 void xhci_shutdown(struct usb_hcd
*hcd
)
496 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
498 spin_lock_irq(&xhci
->lock
);
500 spin_unlock_irq(&xhci
->lock
);
503 xhci_cleanup_msix(xhci
);
506 xhci_dbg(xhci
, "xhci_shutdown completed - status = %x\n",
507 xhci_readl(xhci
, &xhci
->op_regs
->status
));
510 /*-------------------------------------------------------------------------*/
513 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
514 * HCDs. Find the index for an endpoint given its descriptor. Use the return
515 * value to right shift 1 for the bitmask.
517 * Index = (epnum * 2) + direction - 1,
518 * where direction = 0 for OUT, 1 for IN.
519 * For control endpoints, the IN index is used (OUT index is unused), so
520 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
522 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor
*desc
)
525 if (usb_endpoint_xfer_control(desc
))
526 index
= (unsigned int) (usb_endpoint_num(desc
)*2);
528 index
= (unsigned int) (usb_endpoint_num(desc
)*2) +
529 (usb_endpoint_dir_in(desc
) ? 1 : 0) - 1;
533 /* Find the flag for this endpoint (for use in the control context). Use the
534 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
537 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor
*desc
)
539 return 1 << (xhci_get_endpoint_index(desc
) + 1);
542 /* Compute the last valid endpoint context index. Basically, this is the
543 * endpoint index plus one. For slot contexts with more than valid endpoint,
544 * we find the most significant bit set in the added contexts flags.
545 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
546 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
548 static inline unsigned int xhci_last_valid_endpoint(u32 added_ctxs
)
550 return fls(added_ctxs
) - 1;
553 /* Returns 1 if the arguments are OK;
554 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
556 int xhci_check_args(struct usb_hcd
*hcd
, struct usb_device
*udev
,
557 struct usb_host_endpoint
*ep
, int check_ep
, const char *func
) {
558 if (!hcd
|| (check_ep
&& !ep
) || !udev
) {
559 printk(KERN_DEBUG
"xHCI %s called with invalid args\n",
564 printk(KERN_DEBUG
"xHCI %s called for root hub\n",
568 if (!udev
->slot_id
) {
569 printk(KERN_DEBUG
"xHCI %s called with unaddressed device\n",
577 * non-error returns are a promise to giveback() the urb later
578 * we drop ownership so next owner (or urb unlink) can get it
580 int xhci_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
, gfp_t mem_flags
)
582 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
585 unsigned int slot_id
, ep_index
;
587 if (!urb
|| xhci_check_args(hcd
, urb
->dev
, urb
->ep
, true, __func__
) <= 0)
590 slot_id
= urb
->dev
->slot_id
;
591 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
593 spin_lock_irqsave(&xhci
->lock
, flags
);
594 if (!xhci
->devs
|| !xhci
->devs
[slot_id
]) {
596 dev_warn(&urb
->dev
->dev
, "WARN: urb submitted for dev with no Slot ID\n");
599 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
)) {
601 xhci_dbg(xhci
, "urb submitted during PCI suspend\n");
605 if (usb_endpoint_xfer_control(&urb
->ep
->desc
))
606 ret
= queue_ctrl_tx(xhci
, mem_flags
, urb
, slot_id
, ep_index
);
607 else if (usb_endpoint_xfer_bulk(&urb
->ep
->desc
))
608 ret
= queue_bulk_tx(xhci
, mem_flags
, urb
, slot_id
, ep_index
);
612 spin_unlock_irqrestore(&xhci
->lock
, flags
);
617 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
618 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
619 * should pick up where it left off in the TD, unless a Set Transfer Ring
620 * Dequeue Pointer is issued.
622 * The TRBs that make up the buffers for the canceled URB will be "removed" from
623 * the ring. Since the ring is a contiguous structure, they can't be physically
624 * removed. Instead, there are two options:
626 * 1) If the HC is in the middle of processing the URB to be canceled, we
627 * simply move the ring's dequeue pointer past those TRBs using the Set
628 * Transfer Ring Dequeue Pointer command. This will be the common case,
629 * when drivers timeout on the last submitted URB and attempt to cancel.
631 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
632 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
633 * HC will need to invalidate the any TRBs it has cached after the stop
634 * endpoint command, as noted in the xHCI 0.95 errata.
636 * 3) The TD may have completed by the time the Stop Endpoint Command
637 * completes, so software needs to handle that case too.
639 * This function should protect against the TD enqueueing code ringing the
640 * doorbell while this code is waiting for a Stop Endpoint command to complete.
641 * It also needs to account for multiple cancellations on happening at the same
642 * time for the same endpoint.
644 * Note that this function can be called in any context, or so says
645 * usb_hcd_unlink_urb()
647 int xhci_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
651 struct xhci_hcd
*xhci
;
653 unsigned int ep_index
;
654 struct xhci_ring
*ep_ring
;
656 xhci
= hcd_to_xhci(hcd
);
657 spin_lock_irqsave(&xhci
->lock
, flags
);
658 /* Make sure the URB hasn't completed or been unlinked already */
659 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
660 if (ret
|| !urb
->hcpriv
)
663 xhci_dbg(xhci
, "Cancel URB 0x%x\n", (unsigned int) urb
);
664 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
665 ep_ring
= xhci
->devs
[urb
->dev
->slot_id
]->ep_rings
[ep_index
];
666 td
= (struct xhci_td
*) urb
->hcpriv
;
668 ep_ring
->cancels_pending
++;
669 list_add_tail(&td
->cancelled_td_list
, &ep_ring
->cancelled_td_list
);
670 /* Queue a stop endpoint command, but only if this is
671 * the first cancellation to be handled.
673 if (ep_ring
->cancels_pending
== 1) {
674 queue_stop_endpoint(xhci
, urb
->dev
->slot_id
, ep_index
);
678 spin_unlock_irqrestore(&xhci
->lock
, flags
);
682 /* Drop an endpoint from a new bandwidth configuration for this device.
683 * Only one call to this function is allowed per endpoint before
684 * check_bandwidth() or reset_bandwidth() must be called.
685 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
686 * add the endpoint to the schedule with possibly new parameters denoted by a
687 * different endpoint descriptor in usb_host_endpoint.
688 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
691 int xhci_drop_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
692 struct usb_host_endpoint
*ep
)
695 struct xhci_hcd
*xhci
;
696 struct xhci_device_control
*in_ctx
;
697 unsigned int last_ctx
;
698 unsigned int ep_index
;
699 struct xhci_ep_ctx
*ep_ctx
;
701 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
704 ret
= xhci_check_args(hcd
, udev
, ep
, 1, __func__
);
705 xhci_dbg(xhci
, "%s called for udev %#x\n", __func__
, (unsigned int) udev
);
708 xhci
= hcd_to_xhci(hcd
);
710 drop_flag
= xhci_get_endpoint_flag(&ep
->desc
);
711 if (drop_flag
== SLOT_FLAG
|| drop_flag
== EP0_FLAG
) {
712 xhci_dbg(xhci
, "xHCI %s - can't drop slot or ep 0 %#x\n",
713 __func__
, drop_flag
);
717 spin_lock_irqsave(&xhci
->lock
, flags
);
718 if (!xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
719 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
721 spin_unlock_irqrestore(&xhci
->lock
, flags
);
725 in_ctx
= xhci
->devs
[udev
->slot_id
]->in_ctx
;
726 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
727 ep_ctx
= &xhci
->devs
[udev
->slot_id
]->out_ctx
->ep
[ep_index
];
728 /* If the HC already knows the endpoint is disabled,
729 * or the HCD has noted it is disabled, ignore this request
731 if ((ep_ctx
->ep_info
& EP_STATE_MASK
) == EP_STATE_DISABLED
||
732 in_ctx
->drop_flags
& xhci_get_endpoint_flag(&ep
->desc
)) {
733 xhci_warn(xhci
, "xHCI %s called with disabled ep %#x\n",
734 __func__
, (unsigned int) ep
);
735 spin_unlock_irqrestore(&xhci
->lock
, flags
);
739 in_ctx
->drop_flags
|= drop_flag
;
740 new_drop_flags
= in_ctx
->drop_flags
;
742 in_ctx
->add_flags
= ~drop_flag
;
743 new_add_flags
= in_ctx
->add_flags
;
745 last_ctx
= xhci_last_valid_endpoint(in_ctx
->add_flags
);
746 /* Update the last valid endpoint context, if we deleted the last one */
747 if ((in_ctx
->slot
.dev_info
& LAST_CTX_MASK
) > LAST_CTX(last_ctx
)) {
748 in_ctx
->slot
.dev_info
&= ~LAST_CTX_MASK
;
749 in_ctx
->slot
.dev_info
|= LAST_CTX(last_ctx
);
751 new_slot_info
= in_ctx
->slot
.dev_info
;
753 xhci_endpoint_zero(xhci
, xhci
->devs
[udev
->slot_id
], ep
);
755 spin_unlock_irqrestore(&xhci
->lock
, flags
);
757 xhci_dbg(xhci
, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
758 (unsigned int) ep
->desc
.bEndpointAddress
,
760 (unsigned int) new_drop_flags
,
761 (unsigned int) new_add_flags
,
762 (unsigned int) new_slot_info
);
766 /* Add an endpoint to a new possible bandwidth configuration for this device.
767 * Only one call to this function is allowed per endpoint before
768 * check_bandwidth() or reset_bandwidth() must be called.
769 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
770 * add the endpoint to the schedule with possibly new parameters denoted by a
771 * different endpoint descriptor in usb_host_endpoint.
772 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
775 int xhci_add_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
776 struct usb_host_endpoint
*ep
)
779 struct xhci_hcd
*xhci
;
780 struct xhci_device_control
*in_ctx
;
781 unsigned int ep_index
;
782 struct xhci_ep_ctx
*ep_ctx
;
784 unsigned int last_ctx
;
785 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
788 ret
= xhci_check_args(hcd
, udev
, ep
, 1, __func__
);
791 xhci
= hcd_to_xhci(hcd
);
793 added_ctxs
= xhci_get_endpoint_flag(&ep
->desc
);
794 last_ctx
= xhci_last_valid_endpoint(added_ctxs
);
795 if (added_ctxs
== SLOT_FLAG
|| added_ctxs
== EP0_FLAG
) {
796 /* FIXME when we have to issue an evaluate endpoint command to
797 * deal with ep0 max packet size changing once we get the
800 xhci_dbg(xhci
, "xHCI %s - can't add slot or ep 0 %#x\n",
801 __func__
, added_ctxs
);
805 spin_lock_irqsave(&xhci
->lock
, flags
);
806 if (!xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
807 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
809 spin_unlock_irqrestore(&xhci
->lock
, flags
);
813 in_ctx
= xhci
->devs
[udev
->slot_id
]->in_ctx
;
814 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
815 ep_ctx
= &xhci
->devs
[udev
->slot_id
]->out_ctx
->ep
[ep_index
];
816 /* If the HCD has already noted the endpoint is enabled,
817 * ignore this request.
819 if (in_ctx
->add_flags
& xhci_get_endpoint_flag(&ep
->desc
)) {
820 xhci_warn(xhci
, "xHCI %s called with enabled ep %#x\n",
821 __func__
, (unsigned int) ep
);
822 spin_unlock_irqrestore(&xhci
->lock
, flags
);
826 if (xhci_endpoint_init(xhci
, xhci
->devs
[udev
->slot_id
], udev
, ep
) < 0) {
827 dev_dbg(&udev
->dev
, "%s - could not initialize ep %#x\n",
828 __func__
, ep
->desc
.bEndpointAddress
);
829 spin_unlock_irqrestore(&xhci
->lock
, flags
);
833 in_ctx
->add_flags
|= added_ctxs
;
834 new_add_flags
= in_ctx
->add_flags
;
836 /* If xhci_endpoint_disable() was called for this endpoint, but the
837 * xHC hasn't been notified yet through the check_bandwidth() call,
838 * this re-adds a new state for the endpoint from the new endpoint
839 * descriptors. We must drop and re-add this endpoint, so we leave the
842 new_drop_flags
= in_ctx
->drop_flags
;
844 /* Update the last valid endpoint context, if we just added one past */
845 if ((in_ctx
->slot
.dev_info
& LAST_CTX_MASK
) < LAST_CTX(last_ctx
)) {
846 in_ctx
->slot
.dev_info
&= ~LAST_CTX_MASK
;
847 in_ctx
->slot
.dev_info
|= LAST_CTX(last_ctx
);
849 new_slot_info
= in_ctx
->slot
.dev_info
;
850 spin_unlock_irqrestore(&xhci
->lock
, flags
);
852 xhci_dbg(xhci
, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
853 (unsigned int) ep
->desc
.bEndpointAddress
,
855 (unsigned int) new_drop_flags
,
856 (unsigned int) new_add_flags
,
857 (unsigned int) new_slot_info
);
861 static void xhci_zero_in_ctx(struct xhci_virt_device
*virt_dev
)
863 struct xhci_ep_ctx
*ep_ctx
;
866 /* When a device's add flag and drop flag are zero, any subsequent
867 * configure endpoint command will leave that endpoint's state
868 * untouched. Make sure we don't leave any old state in the input
871 virt_dev
->in_ctx
->drop_flags
= 0;
872 virt_dev
->in_ctx
->add_flags
= 0;
873 virt_dev
->in_ctx
->slot
.dev_info
&= ~LAST_CTX_MASK
;
874 /* Endpoint 0 is always valid */
875 virt_dev
->in_ctx
->slot
.dev_info
|= LAST_CTX(1);
876 for (i
= 1; i
< 31; ++i
) {
877 ep_ctx
= &virt_dev
->in_ctx
->ep
[i
];
879 ep_ctx
->ep_info2
= 0;
886 int xhci_check_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
892 struct xhci_hcd
*xhci
;
893 struct xhci_virt_device
*virt_dev
;
895 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, __func__
);
898 xhci
= hcd_to_xhci(hcd
);
900 spin_lock_irqsave(&xhci
->lock
, flags
);
901 if (!udev
->slot_id
|| !xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
902 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
904 spin_unlock_irqrestore(&xhci
->lock
, flags
);
907 xhci_dbg(xhci
, "%s called for udev %#x\n", __func__
, (unsigned int) udev
);
908 virt_dev
= xhci
->devs
[udev
->slot_id
];
910 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
911 virt_dev
->in_ctx
->add_flags
|= SLOT_FLAG
;
912 virt_dev
->in_ctx
->add_flags
&= ~EP0_FLAG
;
913 virt_dev
->in_ctx
->drop_flags
&= ~SLOT_FLAG
;
914 virt_dev
->in_ctx
->drop_flags
&= ~EP0_FLAG
;
915 xhci_dbg(xhci
, "New Input Control Context:\n");
916 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, virt_dev
->in_ctx_dma
,
917 LAST_CTX_TO_EP_NUM(virt_dev
->in_ctx
->slot
.dev_info
));
919 ret
= queue_configure_endpoint(xhci
, virt_dev
->in_ctx_dma
, udev
->slot_id
);
921 xhci_dbg(xhci
, "FIXME allocate a new ring segment\n");
922 spin_unlock_irqrestore(&xhci
->lock
, flags
);
926 spin_unlock_irqrestore(&xhci
->lock
, flags
);
928 /* Wait for the configure endpoint command to complete */
929 timeleft
= wait_for_completion_interruptible_timeout(
930 &virt_dev
->cmd_completion
,
931 USB_CTRL_SET_TIMEOUT
);
933 xhci_warn(xhci
, "%s while waiting for configure endpoint command\n",
934 timeleft
== 0 ? "Timeout" : "Signal");
935 /* FIXME cancel the configure endpoint command */
939 spin_lock_irqsave(&xhci
->lock
, flags
);
940 switch (virt_dev
->cmd_status
) {
942 dev_warn(&udev
->dev
, "Not enough host controller resources "
943 "for new device state.\n");
945 /* FIXME: can we allocate more resources for the HC? */
948 dev_warn(&udev
->dev
, "Not enough bandwidth "
949 "for new device state.\n");
951 /* FIXME: can we go back to the old state? */
954 /* the HCD set up something wrong */
955 dev_warn(&udev
->dev
, "ERROR: Endpoint drop flag = 0, add flag = 1, "
956 "and endpoint is not disabled.\n");
960 dev_dbg(&udev
->dev
, "Successful Endpoint Configure command\n");
963 xhci_err(xhci
, "ERROR: unexpected command completion "
964 "code 0x%x.\n", virt_dev
->cmd_status
);
969 /* Callee should call reset_bandwidth() */
970 spin_unlock_irqrestore(&xhci
->lock
, flags
);
974 xhci_dbg(xhci
, "Output context after successful config ep cmd:\n");
975 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, virt_dev
->out_ctx_dma
,
976 LAST_CTX_TO_EP_NUM(virt_dev
->in_ctx
->slot
.dev_info
));
978 xhci_zero_in_ctx(virt_dev
);
979 /* Free any old rings */
980 for (i
= 1; i
< 31; ++i
) {
981 if (virt_dev
->new_ep_rings
[i
]) {
982 xhci_ring_free(xhci
, virt_dev
->ep_rings
[i
]);
983 virt_dev
->ep_rings
[i
] = virt_dev
->new_ep_rings
[i
];
984 virt_dev
->new_ep_rings
[i
] = NULL
;
988 spin_unlock_irqrestore(&xhci
->lock
, flags
);
993 void xhci_reset_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
996 struct xhci_hcd
*xhci
;
997 struct xhci_virt_device
*virt_dev
;
1000 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, __func__
);
1003 xhci
= hcd_to_xhci(hcd
);
1005 spin_lock_irqsave(&xhci
->lock
, flags
);
1006 if (!xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
1007 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
1009 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1012 xhci_dbg(xhci
, "%s called for udev %#x\n", __func__
, (unsigned int) udev
);
1013 virt_dev
= xhci
->devs
[udev
->slot_id
];
1014 /* Free any rings allocated for added endpoints */
1015 for (i
= 0; i
< 31; ++i
) {
1016 if (virt_dev
->new_ep_rings
[i
]) {
1017 xhci_ring_free(xhci
, virt_dev
->new_ep_rings
[i
]);
1018 virt_dev
->new_ep_rings
[i
] = NULL
;
1021 xhci_zero_in_ctx(virt_dev
);
1022 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1026 * At this point, the struct usb_device is about to go away, the device has
1027 * disconnected, and all traffic has been stopped and the endpoints have been
1028 * disabled. Free any HC data structures associated with that device.
1030 void xhci_free_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1032 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1033 unsigned long flags
;
1035 if (udev
->slot_id
== 0)
1038 spin_lock_irqsave(&xhci
->lock
, flags
);
1039 if (queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
)) {
1040 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1041 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
1045 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1047 * Event command completion handler will free any data structures
1048 * associated with the slot
1053 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
1054 * timed out, or allocating memory failed. Returns 1 on success.
1056 int xhci_alloc_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1058 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1059 unsigned long flags
;
1063 spin_lock_irqsave(&xhci
->lock
, flags
);
1064 ret
= queue_slot_control(xhci
, TRB_ENABLE_SLOT
, 0);
1066 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1067 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
1071 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1073 /* XXX: how much time for xHC slot assignment? */
1074 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
1075 USB_CTRL_SET_TIMEOUT
);
1076 if (timeleft
<= 0) {
1077 xhci_warn(xhci
, "%s while waiting for a slot\n",
1078 timeleft
== 0 ? "Timeout" : "Signal");
1079 /* FIXME cancel the enable slot request */
1083 spin_lock_irqsave(&xhci
->lock
, flags
);
1084 if (!xhci
->slot_id
) {
1085 xhci_err(xhci
, "Error while assigning device slot ID\n");
1086 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1089 if (!xhci_alloc_virt_device(xhci
, xhci
->slot_id
, udev
, GFP_KERNEL
)) {
1090 /* Disable slot, if we can do it without mem alloc */
1091 xhci_warn(xhci
, "Could not allocate xHCI USB device data structures\n");
1092 if (!queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
))
1094 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1097 udev
->slot_id
= xhci
->slot_id
;
1098 /* Is this a LS or FS device under a HS hub? */
1099 /* Hub or peripherial? */
1100 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1105 * Issue an Address Device command (which will issue a SetAddress request to
1107 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
1108 * we should only issue and wait on one address command at the same time.
1110 * We add one to the device address issued by the hardware because the USB core
1111 * uses address 1 for the root hubs (even though they're not really devices).
1113 int xhci_address_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1115 unsigned long flags
;
1117 struct xhci_virt_device
*virt_dev
;
1119 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1122 if (!udev
->slot_id
) {
1123 xhci_dbg(xhci
, "Bad Slot ID %d\n", udev
->slot_id
);
1127 spin_lock_irqsave(&xhci
->lock
, flags
);
1128 virt_dev
= xhci
->devs
[udev
->slot_id
];
1130 /* If this is a Set Address to an unconfigured device, setup ep 0 */
1132 xhci_setup_addressable_virt_dev(xhci
, udev
);
1133 /* Otherwise, assume the core has the device configured how it wants */
1135 ret
= queue_address_device(xhci
, virt_dev
->in_ctx_dma
, udev
->slot_id
);
1137 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1138 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
1142 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1144 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
1145 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
1146 USB_CTRL_SET_TIMEOUT
);
1147 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
1148 * the SetAddress() "recovery interval" required by USB and aborting the
1149 * command on a timeout.
1151 if (timeleft
<= 0) {
1152 xhci_warn(xhci
, "%s while waiting for a slot\n",
1153 timeleft
== 0 ? "Timeout" : "Signal");
1154 /* FIXME cancel the address device command */
1158 spin_lock_irqsave(&xhci
->lock
, flags
);
1159 switch (virt_dev
->cmd_status
) {
1160 case COMP_CTX_STATE
:
1162 xhci_err(xhci
, "Setup ERROR: address device command for slot %d.\n",
1167 dev_warn(&udev
->dev
, "Device not responding to set address.\n");
1171 xhci_dbg(xhci
, "Successful Address Device command\n");
1174 xhci_err(xhci
, "ERROR: unexpected command completion "
1175 "code 0x%x.\n", virt_dev
->cmd_status
);
1180 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1183 temp
= xhci_readl(xhci
, &xhci
->op_regs
->dcbaa_ptr
[0]);
1184 xhci_dbg(xhci
, "Op regs DCBAA ptr[0] = %#08x\n", temp
);
1185 temp
= xhci_readl(xhci
, &xhci
->op_regs
->dcbaa_ptr
[1]);
1186 xhci_dbg(xhci
, "Op regs DCBAA ptr[1] = %#08x\n", temp
);
1187 xhci_dbg(xhci
, "Slot ID %d dcbaa entry[0] @%08x = %#08x\n",
1189 (unsigned int) &xhci
->dcbaa
->dev_context_ptrs
[2*udev
->slot_id
],
1190 xhci
->dcbaa
->dev_context_ptrs
[2*udev
->slot_id
]);
1191 xhci_dbg(xhci
, "Slot ID %d dcbaa entry[1] @%08x = %#08x\n",
1193 (unsigned int) &xhci
->dcbaa
->dev_context_ptrs
[2*udev
->slot_id
+1],
1194 xhci
->dcbaa
->dev_context_ptrs
[2*udev
->slot_id
+1]);
1195 xhci_dbg(xhci
, "Output Context DMA address = %#08x\n",
1196 virt_dev
->out_ctx_dma
);
1197 xhci_dbg(xhci
, "Slot ID %d Input Context:\n", udev
->slot_id
);
1198 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, virt_dev
->in_ctx_dma
, 2);
1199 xhci_dbg(xhci
, "Slot ID %d Output Context:\n", udev
->slot_id
);
1200 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, virt_dev
->out_ctx_dma
, 2);
1202 * USB core uses address 1 for the roothubs, so we add one to the
1203 * address given back to us by the HC.
1205 udev
->devnum
= (virt_dev
->out_ctx
->slot
.dev_state
& DEV_ADDR_MASK
) + 1;
1206 /* Zero the input context control for later use */
1207 virt_dev
->in_ctx
->add_flags
= 0;
1208 virt_dev
->in_ctx
->drop_flags
= 0;
1209 /* Mirror flags in the output context for future ep enable/disable */
1210 virt_dev
->out_ctx
->add_flags
= SLOT_FLAG
| EP0_FLAG
;
1211 virt_dev
->out_ctx
->drop_flags
= 0;
1212 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1214 xhci_dbg(xhci
, "Device address = %d\n", udev
->devnum
);
1215 /* XXX Meh, not sure if anyone else but choose_address uses this. */
1216 set_bit(udev
->devnum
, udev
->bus
->devmap
.devicemap
);
1221 int xhci_get_frame(struct usb_hcd
*hcd
)
1223 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1224 /* EHCI mods by the periodic size. Why? */
1225 return xhci_readl(xhci
, &xhci
->run_regs
->microframe_index
) >> 3;
1228 MODULE_DESCRIPTION(DRIVER_DESC
);
1229 MODULE_AUTHOR(DRIVER_AUTHOR
);
1230 MODULE_LICENSE("GPL");
1232 static int __init
xhci_hcd_init(void)
1237 retval
= xhci_register_pci();
1240 printk(KERN_DEBUG
"Problem registering PCI driver.");
1246 module_init(xhci_hcd_init
);
1248 static void __exit
xhci_hcd_cleanup(void)
1251 xhci_unregister_pci();
1254 module_exit(xhci_hcd_cleanup
);