2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/irq.h>
24 #include <linux/log2.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/slab.h>
31 #define DRIVER_AUTHOR "Sarah Sharp"
32 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
34 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
35 static int link_quirk
;
36 module_param(link_quirk
, int, S_IRUGO
| S_IWUSR
);
37 MODULE_PARM_DESC(link_quirk
, "Don't clear the chain bit on a link TRB");
39 /* TODO: copied from ehci-hcd.c - can this be refactored? */
41 * handshake - spin reading hc until handshake completes or fails
42 * @ptr: address of hc register to be read
43 * @mask: bits to look at in result of read
44 * @done: value of those bits when handshake succeeds
45 * @usec: timeout in microseconds
47 * Returns negative errno, or zero on success
49 * Success happens when the "mask" bits have the specified value (hardware
50 * handshake done). There are two failure modes: "usec" have passed (major
51 * hardware flakeout), or the register reads as all-ones (hardware removed).
53 static int handshake(struct xhci_hcd
*xhci
, void __iomem
*ptr
,
54 u32 mask
, u32 done
, int usec
)
59 result
= xhci_readl(xhci
, ptr
);
60 if (result
== ~(u32
)0) /* card removed */
72 * Disable interrupts and begin the xHCI halting process.
74 void xhci_quiesce(struct xhci_hcd
*xhci
)
81 halted
= xhci_readl(xhci
, &xhci
->op_regs
->status
) & STS_HALT
;
85 cmd
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
87 xhci_writel(xhci
, cmd
, &xhci
->op_regs
->command
);
91 * Force HC into halt state.
93 * Disable any IRQs and clear the run/stop bit.
94 * HC will complete any current and actively pipelined transactions, and
95 * should halt within 16 microframes of the run/stop bit being cleared.
96 * Read HC Halted bit in the status register to see when the HC is finished.
97 * XXX: shouldn't we set HC_STATE_HALT here somewhere?
99 int xhci_halt(struct xhci_hcd
*xhci
)
101 xhci_dbg(xhci
, "// Halt the HC\n");
104 return handshake(xhci
, &xhci
->op_regs
->status
,
105 STS_HALT
, STS_HALT
, XHCI_MAX_HALT_USEC
);
109 * Set the run bit and wait for the host to be running.
111 int xhci_start(struct xhci_hcd
*xhci
)
116 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
118 xhci_dbg(xhci
, "// Turn on HC, cmd = 0x%x.\n",
120 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
123 * Wait for the HCHalted Status bit to be 0 to indicate the host is
126 ret
= handshake(xhci
, &xhci
->op_regs
->status
,
127 STS_HALT
, 0, XHCI_MAX_HALT_USEC
);
128 if (ret
== -ETIMEDOUT
)
129 xhci_err(xhci
, "Host took too long to start, "
130 "waited %u microseconds.\n",
136 * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
138 * This resets pipelines, timers, counters, state machines, etc.
139 * Transactions will be terminated immediately, and operational registers
140 * will be set to their defaults.
142 int xhci_reset(struct xhci_hcd
*xhci
)
148 state
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
149 if ((state
& STS_HALT
) == 0) {
150 xhci_warn(xhci
, "Host controller not halted, aborting reset.\n");
154 xhci_dbg(xhci
, "// Reset the HC\n");
155 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
156 command
|= CMD_RESET
;
157 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
158 /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
159 xhci_to_hcd(xhci
)->state
= HC_STATE_HALT
;
161 ret
= handshake(xhci
, &xhci
->op_regs
->command
,
162 CMD_RESET
, 0, 250 * 1000);
166 xhci_dbg(xhci
, "Wait for controller to be ready for doorbell rings\n");
168 * xHCI cannot write to any doorbells or operational registers other
169 * than status until the "Controller Not Ready" flag is cleared.
171 return handshake(xhci
, &xhci
->op_regs
->status
, STS_CNR
, 0, 250 * 1000);
176 /* Set up MSI-X table for entry 0 (may claim other entries later) */
177 static int xhci_setup_msix(struct xhci_hcd
*xhci
)
180 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
182 xhci
->msix_count
= 0;
183 /* XXX: did I do this right? ixgbe does kcalloc for more than one */
184 xhci
->msix_entries
= kmalloc(sizeof(struct msix_entry
), GFP_KERNEL
);
185 if (!xhci
->msix_entries
) {
186 xhci_err(xhci
, "Failed to allocate MSI-X entries\n");
189 xhci
->msix_entries
[0].entry
= 0;
191 ret
= pci_enable_msix(pdev
, xhci
->msix_entries
, xhci
->msix_count
);
193 xhci_err(xhci
, "Failed to enable MSI-X\n");
198 * Pass the xhci pointer value as the request_irq "cookie".
199 * If more irqs are added, this will need to be unique for each one.
201 ret
= request_irq(xhci
->msix_entries
[0].vector
, &xhci_irq
, 0,
202 "xHCI", xhci_to_hcd(xhci
));
204 xhci_err(xhci
, "Failed to allocate MSI-X interrupt\n");
207 xhci_dbg(xhci
, "Finished setting up MSI-X\n");
211 pci_disable_msix(pdev
);
213 kfree(xhci
->msix_entries
);
214 xhci
->msix_entries
= NULL
;
218 /* XXX: code duplication; can xhci_setup_msix call this? */
219 /* Free any IRQs and disable MSI-X */
220 static void xhci_cleanup_msix(struct xhci_hcd
*xhci
)
222 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
223 if (!xhci
->msix_entries
)
226 free_irq(xhci
->msix_entries
[0].vector
, xhci
);
227 pci_disable_msix(pdev
);
228 kfree(xhci
->msix_entries
);
229 xhci
->msix_entries
= NULL
;
230 xhci_dbg(xhci
, "Finished cleaning up MSI-X\n");
235 * Initialize memory for HCD and xHC (one-time init).
237 * Program the PAGESIZE register, initialize the device context array, create
238 * device contexts (?), set up a command ring segment (or two?), create event
239 * ring (one for now).
241 int xhci_init(struct usb_hcd
*hcd
)
243 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
246 xhci_dbg(xhci
, "xhci_init\n");
247 spin_lock_init(&xhci
->lock
);
249 xhci_dbg(xhci
, "QUIRK: Not clearing Link TRB chain bits.\n");
250 xhci
->quirks
|= XHCI_LINK_TRB_QUIRK
;
252 xhci_dbg(xhci
, "xHCI doesn't need link TRB QUIRK\n");
254 retval
= xhci_mem_init(xhci
, GFP_KERNEL
);
255 xhci_dbg(xhci
, "Finished xhci_init\n");
261 * Called in interrupt context when there might be work
262 * queued on the event ring
264 * xhci->lock must be held by caller.
266 static void xhci_work(struct xhci_hcd
*xhci
)
272 * Clear the op reg interrupt status first,
273 * so we can receive interrupts from other MSI-X interrupters.
274 * Write 1 to clear the interrupt status.
276 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
278 xhci_writel(xhci
, temp
, &xhci
->op_regs
->status
);
279 /* FIXME when MSI-X is supported and there are multiple vectors */
280 /* Clear the MSI-X event interrupt status */
282 /* Acknowledge the interrupt */
283 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
285 xhci_writel(xhci
, temp
, &xhci
->ir_set
->irq_pending
);
286 /* Flush posted writes */
287 xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
289 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
290 xhci_dbg(xhci
, "xHCI dying, ignoring interrupt. "
291 "Shouldn't IRQs be disabled?\n");
293 /* FIXME this should be a delayed service routine
294 * that clears the EHB.
296 xhci_handle_event(xhci
);
298 /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
299 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
300 xhci_write_64(xhci
, temp_64
| ERST_EHB
, &xhci
->ir_set
->erst_dequeue
);
301 /* Flush posted writes -- FIXME is this necessary? */
302 xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
305 /*-------------------------------------------------------------------------*/
308 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
309 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
310 * indicators of an event TRB error, but we check the status *first* to be safe.
312 irqreturn_t
xhci_irq(struct usb_hcd
*hcd
)
314 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
318 spin_lock(&xhci
->lock
);
319 trb
= xhci
->event_ring
->dequeue
;
320 /* Check if the xHC generated the interrupt, or the irq is shared */
321 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
322 temp2
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
323 if (temp
== 0xffffffff && temp2
== 0xffffffff)
326 if (!(temp
& STS_EINT
) && !ER_IRQ_PENDING(temp2
)) {
327 spin_unlock(&xhci
->lock
);
330 xhci_dbg(xhci
, "op reg status = %08x\n", temp
);
331 xhci_dbg(xhci
, "ir set irq_pending = %08x\n", temp2
);
332 xhci_dbg(xhci
, "Event ring dequeue ptr:\n");
333 xhci_dbg(xhci
, "@%llx %08x %08x %08x %08x\n",
334 (unsigned long long)xhci_trb_virt_to_dma(xhci
->event_ring
->deq_seg
, trb
),
335 lower_32_bits(trb
->link
.segment_ptr
),
336 upper_32_bits(trb
->link
.segment_ptr
),
337 (unsigned int) trb
->link
.intr_target
,
338 (unsigned int) trb
->link
.control
);
340 if (temp
& STS_FATAL
) {
341 xhci_warn(xhci
, "WARNING: Host System Error\n");
344 xhci_to_hcd(xhci
)->state
= HC_STATE_HALT
;
345 spin_unlock(&xhci
->lock
);
350 spin_unlock(&xhci
->lock
);
355 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
356 void xhci_event_ring_work(unsigned long arg
)
361 struct xhci_hcd
*xhci
= (struct xhci_hcd
*) arg
;
364 xhci_dbg(xhci
, "Poll event ring: %lu\n", jiffies
);
366 spin_lock_irqsave(&xhci
->lock
, flags
);
367 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
368 xhci_dbg(xhci
, "op reg status = 0x%x\n", temp
);
369 if (temp
== 0xffffffff || (xhci
->xhc_state
& XHCI_STATE_DYING
)) {
370 xhci_dbg(xhci
, "HW died, polling stopped.\n");
371 spin_unlock_irqrestore(&xhci
->lock
, flags
);
375 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
376 xhci_dbg(xhci
, "ir_set 0 pending = 0x%x\n", temp
);
377 xhci_dbg(xhci
, "No-op commands handled = %d\n", xhci
->noops_handled
);
378 xhci_dbg(xhci
, "HC error bitmask = 0x%x\n", xhci
->error_bitmask
);
379 xhci
->error_bitmask
= 0;
380 xhci_dbg(xhci
, "Event ring:\n");
381 xhci_debug_segment(xhci
, xhci
->event_ring
->deq_seg
);
382 xhci_dbg_ring_ptrs(xhci
, xhci
->event_ring
);
383 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
384 temp_64
&= ~ERST_PTR_MASK
;
385 xhci_dbg(xhci
, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64
);
386 xhci_dbg(xhci
, "Command ring:\n");
387 xhci_debug_segment(xhci
, xhci
->cmd_ring
->deq_seg
);
388 xhci_dbg_ring_ptrs(xhci
, xhci
->cmd_ring
);
389 xhci_dbg_cmd_ptrs(xhci
);
390 for (i
= 0; i
< MAX_HC_SLOTS
; ++i
) {
393 for (j
= 0; j
< 31; ++j
) {
394 xhci_dbg_ep_rings(xhci
, i
, j
, &xhci
->devs
[i
]->eps
[j
]);
398 if (xhci
->noops_submitted
!= NUM_TEST_NOOPS
)
399 if (xhci_setup_one_noop(xhci
))
400 xhci_ring_cmd_db(xhci
);
401 spin_unlock_irqrestore(&xhci
->lock
, flags
);
404 mod_timer(&xhci
->event_ring_timer
, jiffies
+ POLL_TIMEOUT
* HZ
);
406 xhci_dbg(xhci
, "Quit polling the event ring.\n");
411 * Start the HC after it was halted.
413 * This function is called by the USB core when the HC driver is added.
414 * Its opposite is xhci_stop().
416 * xhci_init() must be called once before this function can be called.
417 * Reset the HC, enable device slot contexts, program DCBAAP, and
418 * set command ring pointer and event ring pointer.
420 * Setup MSI-X vectors and enable interrupts.
422 int xhci_run(struct usb_hcd
*hcd
)
426 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
427 void (*doorbell
)(struct xhci_hcd
*) = NULL
;
429 hcd
->uses_new_polling
= 1;
432 xhci_dbg(xhci
, "xhci_run\n");
433 #if 0 /* FIXME: MSI not setup yet */
434 /* Do this at the very last minute */
435 ret
= xhci_setup_msix(xhci
);
441 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
442 init_timer(&xhci
->event_ring_timer
);
443 xhci
->event_ring_timer
.data
= (unsigned long) xhci
;
444 xhci
->event_ring_timer
.function
= xhci_event_ring_work
;
445 /* Poll the event ring */
446 xhci
->event_ring_timer
.expires
= jiffies
+ POLL_TIMEOUT
* HZ
;
448 xhci_dbg(xhci
, "Setting event ring polling timer\n");
449 add_timer(&xhci
->event_ring_timer
);
452 xhci_dbg(xhci
, "Command ring memory map follows:\n");
453 xhci_debug_ring(xhci
, xhci
->cmd_ring
);
454 xhci_dbg_ring_ptrs(xhci
, xhci
->cmd_ring
);
455 xhci_dbg_cmd_ptrs(xhci
);
457 xhci_dbg(xhci
, "ERST memory map follows:\n");
458 xhci_dbg_erst(xhci
, &xhci
->erst
);
459 xhci_dbg(xhci
, "Event ring:\n");
460 xhci_debug_ring(xhci
, xhci
->event_ring
);
461 xhci_dbg_ring_ptrs(xhci
, xhci
->event_ring
);
462 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
463 temp_64
&= ~ERST_PTR_MASK
;
464 xhci_dbg(xhci
, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64
);
466 xhci_dbg(xhci
, "// Set the interrupt modulation register\n");
467 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_control
);
468 temp
&= ~ER_IRQ_INTERVAL_MASK
;
470 xhci_writel(xhci
, temp
, &xhci
->ir_set
->irq_control
);
472 /* Set the HCD state before we enable the irqs */
473 hcd
->state
= HC_STATE_RUNNING
;
474 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
476 xhci_dbg(xhci
, "// Enable interrupts, cmd = 0x%x.\n",
478 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
480 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
481 xhci_dbg(xhci
, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
482 xhci
->ir_set
, (unsigned int) ER_IRQ_ENABLE(temp
));
483 xhci_writel(xhci
, ER_IRQ_ENABLE(temp
),
484 &xhci
->ir_set
->irq_pending
);
485 xhci_print_ir_set(xhci
, xhci
->ir_set
, 0);
487 if (NUM_TEST_NOOPS
> 0)
488 doorbell
= xhci_setup_one_noop(xhci
);
489 if (xhci
->quirks
& XHCI_NEC_HOST
)
490 xhci_queue_vendor_command(xhci
, 0, 0, 0,
491 TRB_TYPE(TRB_NEC_GET_FW
));
493 if (xhci_start(xhci
)) {
498 xhci_dbg(xhci
, "// @%p = 0x%x\n", &xhci
->op_regs
->command
, temp
);
501 if (xhci
->quirks
& XHCI_NEC_HOST
)
502 xhci_ring_cmd_db(xhci
);
504 xhci_dbg(xhci
, "Finished xhci_run\n");
511 * This function is called by the USB core when the HC driver is removed.
512 * Its opposite is xhci_run().
514 * Disable device contexts, disable IRQs, and quiesce the HC.
515 * Reset the HC, finish any completed transactions, and cleanup memory.
517 void xhci_stop(struct usb_hcd
*hcd
)
520 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
522 spin_lock_irq(&xhci
->lock
);
525 spin_unlock_irq(&xhci
->lock
);
527 #if 0 /* No MSI yet */
528 xhci_cleanup_msix(xhci
);
530 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
531 /* Tell the event ring poll function not to reschedule */
533 del_timer_sync(&xhci
->event_ring_timer
);
536 xhci_dbg(xhci
, "// Disabling event ring interrupts\n");
537 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
538 xhci_writel(xhci
, temp
& ~STS_EINT
, &xhci
->op_regs
->status
);
539 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
540 xhci_writel(xhci
, ER_IRQ_DISABLE(temp
),
541 &xhci
->ir_set
->irq_pending
);
542 xhci_print_ir_set(xhci
, xhci
->ir_set
, 0);
544 xhci_dbg(xhci
, "cleaning up memory\n");
545 xhci_mem_cleanup(xhci
);
546 xhci_dbg(xhci
, "xhci_stop completed - status = %x\n",
547 xhci_readl(xhci
, &xhci
->op_regs
->status
));
551 * Shutdown HC (not bus-specific)
553 * This is called when the machine is rebooting or halting. We assume that the
554 * machine will be powered off, and the HC's internal state will be reset.
555 * Don't bother to free memory.
557 void xhci_shutdown(struct usb_hcd
*hcd
)
559 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
561 spin_lock_irq(&xhci
->lock
);
563 spin_unlock_irq(&xhci
->lock
);
566 xhci_cleanup_msix(xhci
);
569 xhci_dbg(xhci
, "xhci_shutdown completed - status = %x\n",
570 xhci_readl(xhci
, &xhci
->op_regs
->status
));
573 /*-------------------------------------------------------------------------*/
576 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
577 * HCDs. Find the index for an endpoint given its descriptor. Use the return
578 * value to right shift 1 for the bitmask.
580 * Index = (epnum * 2) + direction - 1,
581 * where direction = 0 for OUT, 1 for IN.
582 * For control endpoints, the IN index is used (OUT index is unused), so
583 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
585 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor
*desc
)
588 if (usb_endpoint_xfer_control(desc
))
589 index
= (unsigned int) (usb_endpoint_num(desc
)*2);
591 index
= (unsigned int) (usb_endpoint_num(desc
)*2) +
592 (usb_endpoint_dir_in(desc
) ? 1 : 0) - 1;
596 /* Find the flag for this endpoint (for use in the control context). Use the
597 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
600 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor
*desc
)
602 return 1 << (xhci_get_endpoint_index(desc
) + 1);
605 /* Find the flag for this endpoint (for use in the control context). Use the
606 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
609 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index
)
611 return 1 << (ep_index
+ 1);
614 /* Compute the last valid endpoint context index. Basically, this is the
615 * endpoint index plus one. For slot contexts with more than valid endpoint,
616 * we find the most significant bit set in the added contexts flags.
617 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
618 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
620 unsigned int xhci_last_valid_endpoint(u32 added_ctxs
)
622 return fls(added_ctxs
) - 1;
625 /* Returns 1 if the arguments are OK;
626 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
628 int xhci_check_args(struct usb_hcd
*hcd
, struct usb_device
*udev
,
629 struct usb_host_endpoint
*ep
, int check_ep
, const char *func
) {
630 if (!hcd
|| (check_ep
&& !ep
) || !udev
) {
631 printk(KERN_DEBUG
"xHCI %s called with invalid args\n",
636 printk(KERN_DEBUG
"xHCI %s called for root hub\n",
640 if (!udev
->slot_id
) {
641 printk(KERN_DEBUG
"xHCI %s called with unaddressed device\n",
648 static int xhci_configure_endpoint(struct xhci_hcd
*xhci
,
649 struct usb_device
*udev
, struct xhci_command
*command
,
650 bool ctx_change
, bool must_succeed
);
653 * Full speed devices may have a max packet size greater than 8 bytes, but the
654 * USB core doesn't know that until it reads the first 8 bytes of the
655 * descriptor. If the usb_device's max packet size changes after that point,
656 * we need to issue an evaluate context command and wait on it.
658 static int xhci_check_maxpacket(struct xhci_hcd
*xhci
, unsigned int slot_id
,
659 unsigned int ep_index
, struct urb
*urb
)
661 struct xhci_container_ctx
*in_ctx
;
662 struct xhci_container_ctx
*out_ctx
;
663 struct xhci_input_control_ctx
*ctrl_ctx
;
664 struct xhci_ep_ctx
*ep_ctx
;
666 int hw_max_packet_size
;
669 out_ctx
= xhci
->devs
[slot_id
]->out_ctx
;
670 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
671 hw_max_packet_size
= MAX_PACKET_DECODED(ep_ctx
->ep_info2
);
672 max_packet_size
= urb
->dev
->ep0
.desc
.wMaxPacketSize
;
673 if (hw_max_packet_size
!= max_packet_size
) {
674 xhci_dbg(xhci
, "Max Packet Size for ep 0 changed.\n");
675 xhci_dbg(xhci
, "Max packet size in usb_device = %d\n",
677 xhci_dbg(xhci
, "Max packet size in xHCI HW = %d\n",
679 xhci_dbg(xhci
, "Issuing evaluate context command.\n");
681 /* Set up the modified control endpoint 0 */
682 xhci_endpoint_copy(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
683 xhci
->devs
[slot_id
]->out_ctx
, ep_index
);
684 in_ctx
= xhci
->devs
[slot_id
]->in_ctx
;
685 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
686 ep_ctx
->ep_info2
&= ~MAX_PACKET_MASK
;
687 ep_ctx
->ep_info2
|= MAX_PACKET(max_packet_size
);
689 /* Set up the input context flags for the command */
690 /* FIXME: This won't work if a non-default control endpoint
691 * changes max packet sizes.
693 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
694 ctrl_ctx
->add_flags
= EP0_FLAG
;
695 ctrl_ctx
->drop_flags
= 0;
697 xhci_dbg(xhci
, "Slot %d input context\n", slot_id
);
698 xhci_dbg_ctx(xhci
, in_ctx
, ep_index
);
699 xhci_dbg(xhci
, "Slot %d output context\n", slot_id
);
700 xhci_dbg_ctx(xhci
, out_ctx
, ep_index
);
702 ret
= xhci_configure_endpoint(xhci
, urb
->dev
, NULL
,
705 /* Clean up the input context for later use by bandwidth
708 ctrl_ctx
->add_flags
= SLOT_FLAG
;
714 * non-error returns are a promise to giveback() the urb later
715 * we drop ownership so next owner (or urb unlink) can get it
717 int xhci_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
, gfp_t mem_flags
)
719 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
722 unsigned int slot_id
, ep_index
;
725 if (!urb
|| xhci_check_args(hcd
, urb
->dev
, urb
->ep
, true, __func__
) <= 0)
728 slot_id
= urb
->dev
->slot_id
;
729 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
731 if (!xhci
->devs
|| !xhci
->devs
[slot_id
]) {
733 dev_warn(&urb
->dev
->dev
, "WARN: urb submitted for dev with no Slot ID\n");
737 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
)) {
739 xhci_dbg(xhci
, "urb submitted during PCI suspend\n");
743 if (usb_endpoint_xfer_control(&urb
->ep
->desc
)) {
744 /* Check to see if the max packet size for the default control
745 * endpoint changed during FS device enumeration
747 if (urb
->dev
->speed
== USB_SPEED_FULL
) {
748 ret
= xhci_check_maxpacket(xhci
, slot_id
,
754 /* We have a spinlock and interrupts disabled, so we must pass
755 * atomic context to this function, which may allocate memory.
757 spin_lock_irqsave(&xhci
->lock
, flags
);
758 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
760 ret
= xhci_queue_ctrl_tx(xhci
, GFP_ATOMIC
, urb
,
762 spin_unlock_irqrestore(&xhci
->lock
, flags
);
763 } else if (usb_endpoint_xfer_bulk(&urb
->ep
->desc
)) {
764 spin_lock_irqsave(&xhci
->lock
, flags
);
765 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
767 if (xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
&
768 EP_GETTING_STREAMS
) {
769 xhci_warn(xhci
, "WARN: Can't enqueue URB while bulk ep "
770 "is transitioning to using streams.\n");
772 } else if (xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
&
773 EP_GETTING_NO_STREAMS
) {
774 xhci_warn(xhci
, "WARN: Can't enqueue URB while bulk ep "
775 "is transitioning to "
776 "not having streams.\n");
779 ret
= xhci_queue_bulk_tx(xhci
, GFP_ATOMIC
, urb
,
782 spin_unlock_irqrestore(&xhci
->lock
, flags
);
783 } else if (usb_endpoint_xfer_int(&urb
->ep
->desc
)) {
784 spin_lock_irqsave(&xhci
->lock
, flags
);
785 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
787 ret
= xhci_queue_intr_tx(xhci
, GFP_ATOMIC
, urb
,
789 spin_unlock_irqrestore(&xhci
->lock
, flags
);
796 xhci_dbg(xhci
, "Ep 0x%x: URB %p submitted for "
797 "non-responsive xHCI host.\n",
798 urb
->ep
->desc
.bEndpointAddress
, urb
);
799 spin_unlock_irqrestore(&xhci
->lock
, flags
);
804 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
805 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
806 * should pick up where it left off in the TD, unless a Set Transfer Ring
807 * Dequeue Pointer is issued.
809 * The TRBs that make up the buffers for the canceled URB will be "removed" from
810 * the ring. Since the ring is a contiguous structure, they can't be physically
811 * removed. Instead, there are two options:
813 * 1) If the HC is in the middle of processing the URB to be canceled, we
814 * simply move the ring's dequeue pointer past those TRBs using the Set
815 * Transfer Ring Dequeue Pointer command. This will be the common case,
816 * when drivers timeout on the last submitted URB and attempt to cancel.
818 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
819 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
820 * HC will need to invalidate the any TRBs it has cached after the stop
821 * endpoint command, as noted in the xHCI 0.95 errata.
823 * 3) The TD may have completed by the time the Stop Endpoint Command
824 * completes, so software needs to handle that case too.
826 * This function should protect against the TD enqueueing code ringing the
827 * doorbell while this code is waiting for a Stop Endpoint command to complete.
828 * It also needs to account for multiple cancellations on happening at the same
829 * time for the same endpoint.
831 * Note that this function can be called in any context, or so says
832 * usb_hcd_unlink_urb()
834 int xhci_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
839 struct xhci_hcd
*xhci
;
841 unsigned int ep_index
;
842 struct xhci_ring
*ep_ring
;
843 struct xhci_virt_ep
*ep
;
845 xhci
= hcd_to_xhci(hcd
);
846 spin_lock_irqsave(&xhci
->lock
, flags
);
847 /* Make sure the URB hasn't completed or been unlinked already */
848 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
849 if (ret
|| !urb
->hcpriv
)
851 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
852 if (temp
== 0xffffffff) {
853 xhci_dbg(xhci
, "HW died, freeing TD.\n");
854 td
= (struct xhci_td
*) urb
->hcpriv
;
856 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
857 spin_unlock_irqrestore(&xhci
->lock
, flags
);
858 usb_hcd_giveback_urb(xhci_to_hcd(xhci
), urb
, -ESHUTDOWN
);
862 if (xhci
->xhc_state
& XHCI_STATE_DYING
) {
863 xhci_dbg(xhci
, "Ep 0x%x: URB %p to be canceled on "
864 "non-responsive xHCI host.\n",
865 urb
->ep
->desc
.bEndpointAddress
, urb
);
866 /* Let the stop endpoint command watchdog timer (which set this
867 * state) finish cleaning up the endpoint TD lists. We must
868 * have caught it in the middle of dropping a lock and giving
874 xhci_dbg(xhci
, "Cancel URB %p\n", urb
);
875 xhci_dbg(xhci
, "Event ring:\n");
876 xhci_debug_ring(xhci
, xhci
->event_ring
);
877 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
878 ep
= &xhci
->devs
[urb
->dev
->slot_id
]->eps
[ep_index
];
879 ep_ring
= xhci_urb_to_transfer_ring(xhci
, urb
);
885 xhci_dbg(xhci
, "Endpoint ring:\n");
886 xhci_debug_ring(xhci
, ep_ring
);
887 td
= (struct xhci_td
*) urb
->hcpriv
;
889 list_add_tail(&td
->cancelled_td_list
, &ep
->cancelled_td_list
);
890 /* Queue a stop endpoint command, but only if this is
891 * the first cancellation to be handled.
893 if (!(ep
->ep_state
& EP_HALT_PENDING
)) {
894 ep
->ep_state
|= EP_HALT_PENDING
;
895 ep
->stop_cmds_pending
++;
896 ep
->stop_cmd_timer
.expires
= jiffies
+
897 XHCI_STOP_EP_CMD_TIMEOUT
* HZ
;
898 add_timer(&ep
->stop_cmd_timer
);
899 xhci_queue_stop_endpoint(xhci
, urb
->dev
->slot_id
, ep_index
);
900 xhci_ring_cmd_db(xhci
);
903 spin_unlock_irqrestore(&xhci
->lock
, flags
);
907 /* Drop an endpoint from a new bandwidth configuration for this device.
908 * Only one call to this function is allowed per endpoint before
909 * check_bandwidth() or reset_bandwidth() must be called.
910 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
911 * add the endpoint to the schedule with possibly new parameters denoted by a
912 * different endpoint descriptor in usb_host_endpoint.
913 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
916 * The USB core will not allow URBs to be queued to an endpoint that is being
917 * disabled, so there's no need for mutual exclusion to protect
918 * the xhci->devs[slot_id] structure.
920 int xhci_drop_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
921 struct usb_host_endpoint
*ep
)
923 struct xhci_hcd
*xhci
;
924 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
925 struct xhci_input_control_ctx
*ctrl_ctx
;
926 struct xhci_slot_ctx
*slot_ctx
;
927 unsigned int last_ctx
;
928 unsigned int ep_index
;
929 struct xhci_ep_ctx
*ep_ctx
;
931 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
934 ret
= xhci_check_args(hcd
, udev
, ep
, 1, __func__
);
937 xhci
= hcd_to_xhci(hcd
);
938 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
940 drop_flag
= xhci_get_endpoint_flag(&ep
->desc
);
941 if (drop_flag
== SLOT_FLAG
|| drop_flag
== EP0_FLAG
) {
942 xhci_dbg(xhci
, "xHCI %s - can't drop slot or ep 0 %#x\n",
943 __func__
, drop_flag
);
947 if (!xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
948 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
953 in_ctx
= xhci
->devs
[udev
->slot_id
]->in_ctx
;
954 out_ctx
= xhci
->devs
[udev
->slot_id
]->out_ctx
;
955 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
956 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
957 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
958 /* If the HC already knows the endpoint is disabled,
959 * or the HCD has noted it is disabled, ignore this request
961 if ((ep_ctx
->ep_info
& EP_STATE_MASK
) == EP_STATE_DISABLED
||
962 ctrl_ctx
->drop_flags
& xhci_get_endpoint_flag(&ep
->desc
)) {
963 xhci_warn(xhci
, "xHCI %s called with disabled ep %p\n",
968 ctrl_ctx
->drop_flags
|= drop_flag
;
969 new_drop_flags
= ctrl_ctx
->drop_flags
;
971 ctrl_ctx
->add_flags
&= ~drop_flag
;
972 new_add_flags
= ctrl_ctx
->add_flags
;
974 last_ctx
= xhci_last_valid_endpoint(ctrl_ctx
->add_flags
);
975 slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
976 /* Update the last valid endpoint context, if we deleted the last one */
977 if ((slot_ctx
->dev_info
& LAST_CTX_MASK
) > LAST_CTX(last_ctx
)) {
978 slot_ctx
->dev_info
&= ~LAST_CTX_MASK
;
979 slot_ctx
->dev_info
|= LAST_CTX(last_ctx
);
981 new_slot_info
= slot_ctx
->dev_info
;
983 xhci_endpoint_zero(xhci
, xhci
->devs
[udev
->slot_id
], ep
);
985 xhci_dbg(xhci
, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
986 (unsigned int) ep
->desc
.bEndpointAddress
,
988 (unsigned int) new_drop_flags
,
989 (unsigned int) new_add_flags
,
990 (unsigned int) new_slot_info
);
994 /* Add an endpoint to a new possible bandwidth configuration for this device.
995 * Only one call to this function is allowed per endpoint before
996 * check_bandwidth() or reset_bandwidth() must be called.
997 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
998 * add the endpoint to the schedule with possibly new parameters denoted by a
999 * different endpoint descriptor in usb_host_endpoint.
1000 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1003 * The USB core will not allow URBs to be queued to an endpoint until the
1004 * configuration or alt setting is installed in the device, so there's no need
1005 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1007 int xhci_add_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1008 struct usb_host_endpoint
*ep
)
1010 struct xhci_hcd
*xhci
;
1011 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
1012 unsigned int ep_index
;
1013 struct xhci_ep_ctx
*ep_ctx
;
1014 struct xhci_slot_ctx
*slot_ctx
;
1015 struct xhci_input_control_ctx
*ctrl_ctx
;
1017 unsigned int last_ctx
;
1018 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
1021 ret
= xhci_check_args(hcd
, udev
, ep
, 1, __func__
);
1023 /* So we won't queue a reset ep command for a root hub */
1027 xhci
= hcd_to_xhci(hcd
);
1029 added_ctxs
= xhci_get_endpoint_flag(&ep
->desc
);
1030 last_ctx
= xhci_last_valid_endpoint(added_ctxs
);
1031 if (added_ctxs
== SLOT_FLAG
|| added_ctxs
== EP0_FLAG
) {
1032 /* FIXME when we have to issue an evaluate endpoint command to
1033 * deal with ep0 max packet size changing once we get the
1036 xhci_dbg(xhci
, "xHCI %s - can't add slot or ep 0 %#x\n",
1037 __func__
, added_ctxs
);
1041 if (!xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
1042 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
1047 in_ctx
= xhci
->devs
[udev
->slot_id
]->in_ctx
;
1048 out_ctx
= xhci
->devs
[udev
->slot_id
]->out_ctx
;
1049 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1050 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1051 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
1052 /* If the HCD has already noted the endpoint is enabled,
1053 * ignore this request.
1055 if (ctrl_ctx
->add_flags
& xhci_get_endpoint_flag(&ep
->desc
)) {
1056 xhci_warn(xhci
, "xHCI %s called with enabled ep %p\n",
1062 * Configuration and alternate setting changes must be done in
1063 * process context, not interrupt context (or so documenation
1064 * for usb_set_interface() and usb_set_configuration() claim).
1066 if (xhci_endpoint_init(xhci
, xhci
->devs
[udev
->slot_id
],
1067 udev
, ep
, GFP_NOIO
) < 0) {
1068 dev_dbg(&udev
->dev
, "%s - could not initialize ep %#x\n",
1069 __func__
, ep
->desc
.bEndpointAddress
);
1073 ctrl_ctx
->add_flags
|= added_ctxs
;
1074 new_add_flags
= ctrl_ctx
->add_flags
;
1076 /* If xhci_endpoint_disable() was called for this endpoint, but the
1077 * xHC hasn't been notified yet through the check_bandwidth() call,
1078 * this re-adds a new state for the endpoint from the new endpoint
1079 * descriptors. We must drop and re-add this endpoint, so we leave the
1082 new_drop_flags
= ctrl_ctx
->drop_flags
;
1084 slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
1085 /* Update the last valid endpoint context, if we just added one past */
1086 if ((slot_ctx
->dev_info
& LAST_CTX_MASK
) < LAST_CTX(last_ctx
)) {
1087 slot_ctx
->dev_info
&= ~LAST_CTX_MASK
;
1088 slot_ctx
->dev_info
|= LAST_CTX(last_ctx
);
1090 new_slot_info
= slot_ctx
->dev_info
;
1092 /* Store the usb_device pointer for later use */
1095 xhci_dbg(xhci
, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1096 (unsigned int) ep
->desc
.bEndpointAddress
,
1098 (unsigned int) new_drop_flags
,
1099 (unsigned int) new_add_flags
,
1100 (unsigned int) new_slot_info
);
1104 static void xhci_zero_in_ctx(struct xhci_hcd
*xhci
, struct xhci_virt_device
*virt_dev
)
1106 struct xhci_input_control_ctx
*ctrl_ctx
;
1107 struct xhci_ep_ctx
*ep_ctx
;
1108 struct xhci_slot_ctx
*slot_ctx
;
1111 /* When a device's add flag and drop flag are zero, any subsequent
1112 * configure endpoint command will leave that endpoint's state
1113 * untouched. Make sure we don't leave any old state in the input
1114 * endpoint contexts.
1116 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
1117 ctrl_ctx
->drop_flags
= 0;
1118 ctrl_ctx
->add_flags
= 0;
1119 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
1120 slot_ctx
->dev_info
&= ~LAST_CTX_MASK
;
1121 /* Endpoint 0 is always valid */
1122 slot_ctx
->dev_info
|= LAST_CTX(1);
1123 for (i
= 1; i
< 31; ++i
) {
1124 ep_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, i
);
1125 ep_ctx
->ep_info
= 0;
1126 ep_ctx
->ep_info2
= 0;
1128 ep_ctx
->tx_info
= 0;
1132 static int xhci_configure_endpoint_result(struct xhci_hcd
*xhci
,
1133 struct usb_device
*udev
, int *cmd_status
)
1137 switch (*cmd_status
) {
1139 dev_warn(&udev
->dev
, "Not enough host controller resources "
1140 "for new device state.\n");
1142 /* FIXME: can we allocate more resources for the HC? */
1145 dev_warn(&udev
->dev
, "Not enough bandwidth "
1146 "for new device state.\n");
1148 /* FIXME: can we go back to the old state? */
1151 /* the HCD set up something wrong */
1152 dev_warn(&udev
->dev
, "ERROR: Endpoint drop flag = 0, "
1154 "and endpoint is not disabled.\n");
1158 dev_dbg(&udev
->dev
, "Successful Endpoint Configure command\n");
1162 xhci_err(xhci
, "ERROR: unexpected command completion "
1163 "code 0x%x.\n", *cmd_status
);
1170 static int xhci_evaluate_context_result(struct xhci_hcd
*xhci
,
1171 struct usb_device
*udev
, int *cmd_status
)
1174 struct xhci_virt_device
*virt_dev
= xhci
->devs
[udev
->slot_id
];
1176 switch (*cmd_status
) {
1178 dev_warn(&udev
->dev
, "WARN: xHCI driver setup invalid evaluate "
1179 "context command.\n");
1183 dev_warn(&udev
->dev
, "WARN: slot not enabled for"
1184 "evaluate context command.\n");
1185 case COMP_CTX_STATE
:
1186 dev_warn(&udev
->dev
, "WARN: invalid context state for "
1187 "evaluate context command.\n");
1188 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 1);
1192 dev_dbg(&udev
->dev
, "Successful evaluate context command\n");
1196 xhci_err(xhci
, "ERROR: unexpected command completion "
1197 "code 0x%x.\n", *cmd_status
);
1204 /* Issue a configure endpoint command or evaluate context command
1205 * and wait for it to finish.
1207 static int xhci_configure_endpoint(struct xhci_hcd
*xhci
,
1208 struct usb_device
*udev
,
1209 struct xhci_command
*command
,
1210 bool ctx_change
, bool must_succeed
)
1214 unsigned long flags
;
1215 struct xhci_container_ctx
*in_ctx
;
1216 struct completion
*cmd_completion
;
1218 struct xhci_virt_device
*virt_dev
;
1220 spin_lock_irqsave(&xhci
->lock
, flags
);
1221 virt_dev
= xhci
->devs
[udev
->slot_id
];
1223 in_ctx
= command
->in_ctx
;
1224 cmd_completion
= command
->completion
;
1225 cmd_status
= &command
->status
;
1226 command
->command_trb
= xhci
->cmd_ring
->enqueue
;
1227 list_add_tail(&command
->cmd_list
, &virt_dev
->cmd_list
);
1229 in_ctx
= virt_dev
->in_ctx
;
1230 cmd_completion
= &virt_dev
->cmd_completion
;
1231 cmd_status
= &virt_dev
->cmd_status
;
1233 init_completion(cmd_completion
);
1236 ret
= xhci_queue_configure_endpoint(xhci
, in_ctx
->dma
,
1237 udev
->slot_id
, must_succeed
);
1239 ret
= xhci_queue_evaluate_context(xhci
, in_ctx
->dma
,
1243 list_del(&command
->cmd_list
);
1244 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1245 xhci_dbg(xhci
, "FIXME allocate a new ring segment\n");
1248 xhci_ring_cmd_db(xhci
);
1249 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1251 /* Wait for the configure endpoint command to complete */
1252 timeleft
= wait_for_completion_interruptible_timeout(
1254 USB_CTRL_SET_TIMEOUT
);
1255 if (timeleft
<= 0) {
1256 xhci_warn(xhci
, "%s while waiting for %s command\n",
1257 timeleft
== 0 ? "Timeout" : "Signal",
1259 "configure endpoint" :
1260 "evaluate context");
1261 /* FIXME cancel the configure endpoint command */
1266 return xhci_configure_endpoint_result(xhci
, udev
, cmd_status
);
1267 return xhci_evaluate_context_result(xhci
, udev
, cmd_status
);
1270 /* Called after one or more calls to xhci_add_endpoint() or
1271 * xhci_drop_endpoint(). If this call fails, the USB core is expected
1272 * to call xhci_reset_bandwidth().
1274 * Since we are in the middle of changing either configuration or
1275 * installing a new alt setting, the USB core won't allow URBs to be
1276 * enqueued for any endpoint on the old config or interface. Nothing
1277 * else should be touching the xhci->devs[slot_id] structure, so we
1278 * don't need to take the xhci->lock for manipulating that.
1280 int xhci_check_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1284 struct xhci_hcd
*xhci
;
1285 struct xhci_virt_device
*virt_dev
;
1286 struct xhci_input_control_ctx
*ctrl_ctx
;
1287 struct xhci_slot_ctx
*slot_ctx
;
1289 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, __func__
);
1292 xhci
= hcd_to_xhci(hcd
);
1294 if (!udev
->slot_id
|| !xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
1295 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
1299 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
1300 virt_dev
= xhci
->devs
[udev
->slot_id
];
1302 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
1303 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
1304 ctrl_ctx
->add_flags
|= SLOT_FLAG
;
1305 ctrl_ctx
->add_flags
&= ~EP0_FLAG
;
1306 ctrl_ctx
->drop_flags
&= ~SLOT_FLAG
;
1307 ctrl_ctx
->drop_flags
&= ~EP0_FLAG
;
1308 xhci_dbg(xhci
, "New Input Control Context:\n");
1309 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
1310 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
,
1311 LAST_CTX_TO_EP_NUM(slot_ctx
->dev_info
));
1313 ret
= xhci_configure_endpoint(xhci
, udev
, NULL
,
1316 /* Callee should call reset_bandwidth() */
1320 xhci_dbg(xhci
, "Output context after successful config ep cmd:\n");
1321 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
,
1322 LAST_CTX_TO_EP_NUM(slot_ctx
->dev_info
));
1324 xhci_zero_in_ctx(xhci
, virt_dev
);
1325 /* Install new rings and free or cache any old rings */
1326 for (i
= 1; i
< 31; ++i
) {
1327 if (!virt_dev
->eps
[i
].new_ring
)
1329 /* Only cache or free the old ring if it exists.
1330 * It may not if this is the first add of an endpoint.
1332 if (virt_dev
->eps
[i
].ring
) {
1333 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
1335 virt_dev
->eps
[i
].ring
= virt_dev
->eps
[i
].new_ring
;
1336 virt_dev
->eps
[i
].new_ring
= NULL
;
1342 void xhci_reset_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1344 struct xhci_hcd
*xhci
;
1345 struct xhci_virt_device
*virt_dev
;
1348 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, __func__
);
1351 xhci
= hcd_to_xhci(hcd
);
1353 if (!xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
1354 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
1358 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
1359 virt_dev
= xhci
->devs
[udev
->slot_id
];
1360 /* Free any rings allocated for added endpoints */
1361 for (i
= 0; i
< 31; ++i
) {
1362 if (virt_dev
->eps
[i
].new_ring
) {
1363 xhci_ring_free(xhci
, virt_dev
->eps
[i
].new_ring
);
1364 virt_dev
->eps
[i
].new_ring
= NULL
;
1367 xhci_zero_in_ctx(xhci
, virt_dev
);
1370 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd
*xhci
,
1371 struct xhci_container_ctx
*in_ctx
,
1372 struct xhci_container_ctx
*out_ctx
,
1373 u32 add_flags
, u32 drop_flags
)
1375 struct xhci_input_control_ctx
*ctrl_ctx
;
1376 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1377 ctrl_ctx
->add_flags
= add_flags
;
1378 ctrl_ctx
->drop_flags
= drop_flags
;
1379 xhci_slot_copy(xhci
, in_ctx
, out_ctx
);
1380 ctrl_ctx
->add_flags
|= SLOT_FLAG
;
1382 xhci_dbg(xhci
, "Input Context:\n");
1383 xhci_dbg_ctx(xhci
, in_ctx
, xhci_last_valid_endpoint(add_flags
));
1386 void xhci_setup_input_ctx_for_quirk(struct xhci_hcd
*xhci
,
1387 unsigned int slot_id
, unsigned int ep_index
,
1388 struct xhci_dequeue_state
*deq_state
)
1390 struct xhci_container_ctx
*in_ctx
;
1391 struct xhci_ep_ctx
*ep_ctx
;
1395 xhci_endpoint_copy(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
1396 xhci
->devs
[slot_id
]->out_ctx
, ep_index
);
1397 in_ctx
= xhci
->devs
[slot_id
]->in_ctx
;
1398 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
1399 addr
= xhci_trb_virt_to_dma(deq_state
->new_deq_seg
,
1400 deq_state
->new_deq_ptr
);
1402 xhci_warn(xhci
, "WARN Cannot submit config ep after "
1403 "reset ep command\n");
1404 xhci_warn(xhci
, "WARN deq seg = %p, deq ptr = %p\n",
1405 deq_state
->new_deq_seg
,
1406 deq_state
->new_deq_ptr
);
1409 ep_ctx
->deq
= addr
| deq_state
->new_cycle_state
;
1411 added_ctxs
= xhci_get_endpoint_flag_from_index(ep_index
);
1412 xhci_setup_input_ctx_for_config_ep(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
1413 xhci
->devs
[slot_id
]->out_ctx
, added_ctxs
, added_ctxs
);
1416 void xhci_cleanup_stalled_ring(struct xhci_hcd
*xhci
,
1417 struct usb_device
*udev
, unsigned int ep_index
)
1419 struct xhci_dequeue_state deq_state
;
1420 struct xhci_virt_ep
*ep
;
1422 xhci_dbg(xhci
, "Cleaning up stalled endpoint ring\n");
1423 ep
= &xhci
->devs
[udev
->slot_id
]->eps
[ep_index
];
1424 /* We need to move the HW's dequeue pointer past this TD,
1425 * or it will attempt to resend it on the next doorbell ring.
1427 xhci_find_new_dequeue_state(xhci
, udev
->slot_id
,
1428 ep_index
, ep
->stopped_stream
, ep
->stopped_td
,
1431 /* HW with the reset endpoint quirk will use the saved dequeue state to
1432 * issue a configure endpoint command later.
1434 if (!(xhci
->quirks
& XHCI_RESET_EP_QUIRK
)) {
1435 xhci_dbg(xhci
, "Queueing new dequeue state\n");
1436 xhci_queue_new_dequeue_state(xhci
, udev
->slot_id
,
1437 ep_index
, ep
->stopped_stream
, &deq_state
);
1439 /* Better hope no one uses the input context between now and the
1440 * reset endpoint completion!
1441 * XXX: No idea how this hardware will react when stream rings
1444 xhci_dbg(xhci
, "Setting up input context for "
1445 "configure endpoint command\n");
1446 xhci_setup_input_ctx_for_quirk(xhci
, udev
->slot_id
,
1447 ep_index
, &deq_state
);
1451 /* Deal with stalled endpoints. The core should have sent the control message
1452 * to clear the halt condition. However, we need to make the xHCI hardware
1453 * reset its sequence number, since a device will expect a sequence number of
1454 * zero after the halt condition is cleared.
1455 * Context: in_interrupt
1457 void xhci_endpoint_reset(struct usb_hcd
*hcd
,
1458 struct usb_host_endpoint
*ep
)
1460 struct xhci_hcd
*xhci
;
1461 struct usb_device
*udev
;
1462 unsigned int ep_index
;
1463 unsigned long flags
;
1465 struct xhci_virt_ep
*virt_ep
;
1467 xhci
= hcd_to_xhci(hcd
);
1468 udev
= (struct usb_device
*) ep
->hcpriv
;
1469 /* Called with a root hub endpoint (or an endpoint that wasn't added
1470 * with xhci_add_endpoint()
1474 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1475 virt_ep
= &xhci
->devs
[udev
->slot_id
]->eps
[ep_index
];
1476 if (!virt_ep
->stopped_td
) {
1477 xhci_dbg(xhci
, "Endpoint 0x%x not halted, refusing to reset.\n",
1478 ep
->desc
.bEndpointAddress
);
1481 if (usb_endpoint_xfer_control(&ep
->desc
)) {
1482 xhci_dbg(xhci
, "Control endpoint stall already handled.\n");
1486 xhci_dbg(xhci
, "Queueing reset endpoint command\n");
1487 spin_lock_irqsave(&xhci
->lock
, flags
);
1488 ret
= xhci_queue_reset_ep(xhci
, udev
->slot_id
, ep_index
);
1490 * Can't change the ring dequeue pointer until it's transitioned to the
1491 * stopped state, which is only upon a successful reset endpoint
1492 * command. Better hope that last command worked!
1495 xhci_cleanup_stalled_ring(xhci
, udev
, ep_index
);
1496 kfree(virt_ep
->stopped_td
);
1497 xhci_ring_cmd_db(xhci
);
1499 virt_ep
->stopped_td
= NULL
;
1500 virt_ep
->stopped_trb
= NULL
;
1501 virt_ep
->stopped_stream
= 0;
1502 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1505 xhci_warn(xhci
, "FIXME allocate a new ring segment\n");
1508 static int xhci_check_streams_endpoint(struct xhci_hcd
*xhci
,
1509 struct usb_device
*udev
, struct usb_host_endpoint
*ep
,
1510 unsigned int slot_id
)
1513 unsigned int ep_index
;
1514 unsigned int ep_state
;
1518 ret
= xhci_check_args(xhci_to_hcd(xhci
), udev
, ep
, 1, __func__
);
1521 if (ep
->ss_ep_comp
.bmAttributes
== 0) {
1522 xhci_warn(xhci
, "WARN: SuperSpeed Endpoint Companion"
1523 " descriptor for ep 0x%x does not support streams\n",
1524 ep
->desc
.bEndpointAddress
);
1528 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1529 ep_state
= xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
;
1530 if (ep_state
& EP_HAS_STREAMS
||
1531 ep_state
& EP_GETTING_STREAMS
) {
1532 xhci_warn(xhci
, "WARN: SuperSpeed bulk endpoint 0x%x "
1533 "already has streams set up.\n",
1534 ep
->desc
.bEndpointAddress
);
1535 xhci_warn(xhci
, "Send email to xHCI maintainer and ask for "
1536 "dynamic stream context array reallocation.\n");
1539 if (!list_empty(&xhci
->devs
[slot_id
]->eps
[ep_index
].ring
->td_list
)) {
1540 xhci_warn(xhci
, "Cannot setup streams for SuperSpeed bulk "
1541 "endpoint 0x%x; URBs are pending.\n",
1542 ep
->desc
.bEndpointAddress
);
1548 static void xhci_calculate_streams_entries(struct xhci_hcd
*xhci
,
1549 unsigned int *num_streams
, unsigned int *num_stream_ctxs
)
1551 unsigned int max_streams
;
1553 /* The stream context array size must be a power of two */
1554 *num_stream_ctxs
= roundup_pow_of_two(*num_streams
);
1556 * Find out how many primary stream array entries the host controller
1557 * supports. Later we may use secondary stream arrays (similar to 2nd
1558 * level page entries), but that's an optional feature for xHCI host
1559 * controllers. xHCs must support at least 4 stream IDs.
1561 max_streams
= HCC_MAX_PSA(xhci
->hcc_params
);
1562 if (*num_stream_ctxs
> max_streams
) {
1563 xhci_dbg(xhci
, "xHCI HW only supports %u stream ctx entries.\n",
1565 *num_stream_ctxs
= max_streams
;
1566 *num_streams
= max_streams
;
1570 /* Returns an error code if one of the endpoint already has streams.
1571 * This does not change any data structures, it only checks and gathers
1574 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd
*xhci
,
1575 struct usb_device
*udev
,
1576 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
1577 unsigned int *num_streams
, u32
*changed_ep_bitmask
)
1579 unsigned int max_streams
;
1580 unsigned int endpoint_flag
;
1584 for (i
= 0; i
< num_eps
; i
++) {
1585 ret
= xhci_check_streams_endpoint(xhci
, udev
,
1586 eps
[i
], udev
->slot_id
);
1590 max_streams
= USB_SS_MAX_STREAMS(
1591 eps
[i
]->ss_ep_comp
.bmAttributes
);
1592 if (max_streams
< (*num_streams
- 1)) {
1593 xhci_dbg(xhci
, "Ep 0x%x only supports %u stream IDs.\n",
1594 eps
[i
]->desc
.bEndpointAddress
,
1596 *num_streams
= max_streams
+1;
1599 endpoint_flag
= xhci_get_endpoint_flag(&eps
[i
]->desc
);
1600 if (*changed_ep_bitmask
& endpoint_flag
)
1602 *changed_ep_bitmask
|= endpoint_flag
;
1607 static u32
xhci_calculate_no_streams_bitmask(struct xhci_hcd
*xhci
,
1608 struct usb_device
*udev
,
1609 struct usb_host_endpoint
**eps
, unsigned int num_eps
)
1611 u32 changed_ep_bitmask
= 0;
1612 unsigned int slot_id
;
1613 unsigned int ep_index
;
1614 unsigned int ep_state
;
1617 slot_id
= udev
->slot_id
;
1618 if (!xhci
->devs
[slot_id
])
1621 for (i
= 0; i
< num_eps
; i
++) {
1622 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
1623 ep_state
= xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
;
1624 /* Are streams already being freed for the endpoint? */
1625 if (ep_state
& EP_GETTING_NO_STREAMS
) {
1626 xhci_warn(xhci
, "WARN Can't disable streams for "
1628 "streams are being disabled already.",
1629 eps
[i
]->desc
.bEndpointAddress
);
1632 /* Are there actually any streams to free? */
1633 if (!(ep_state
& EP_HAS_STREAMS
) &&
1634 !(ep_state
& EP_GETTING_STREAMS
)) {
1635 xhci_warn(xhci
, "WARN Can't disable streams for "
1637 "streams are already disabled!",
1638 eps
[i
]->desc
.bEndpointAddress
);
1639 xhci_warn(xhci
, "WARN xhci_free_streams() called "
1640 "with non-streams endpoint\n");
1643 changed_ep_bitmask
|= xhci_get_endpoint_flag(&eps
[i
]->desc
);
1645 return changed_ep_bitmask
;
1649 * The USB device drivers use this function (though the HCD interface in USB
1650 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
1651 * coordinate mass storage command queueing across multiple endpoints (basically
1652 * a stream ID == a task ID).
1654 * Setting up streams involves allocating the same size stream context array
1655 * for each endpoint and issuing a configure endpoint command for all endpoints.
1657 * Don't allow the call to succeed if one endpoint only supports one stream
1658 * (which means it doesn't support streams at all).
1660 * Drivers may get less stream IDs than they asked for, if the host controller
1661 * hardware or endpoints claim they can't support the number of requested
1664 int xhci_alloc_streams(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1665 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
1666 unsigned int num_streams
, gfp_t mem_flags
)
1669 struct xhci_hcd
*xhci
;
1670 struct xhci_virt_device
*vdev
;
1671 struct xhci_command
*config_cmd
;
1672 unsigned int ep_index
;
1673 unsigned int num_stream_ctxs
;
1674 unsigned long flags
;
1675 u32 changed_ep_bitmask
= 0;
1680 /* Add one to the number of streams requested to account for
1681 * stream 0 that is reserved for xHCI usage.
1684 xhci
= hcd_to_xhci(hcd
);
1685 xhci_dbg(xhci
, "Driver wants %u stream IDs (including stream 0).\n",
1688 config_cmd
= xhci_alloc_command(xhci
, true, true, mem_flags
);
1690 xhci_dbg(xhci
, "Could not allocate xHCI command structure.\n");
1694 /* Check to make sure all endpoints are not already configured for
1695 * streams. While we're at it, find the maximum number of streams that
1696 * all the endpoints will support and check for duplicate endpoints.
1698 spin_lock_irqsave(&xhci
->lock
, flags
);
1699 ret
= xhci_calculate_streams_and_bitmask(xhci
, udev
, eps
,
1700 num_eps
, &num_streams
, &changed_ep_bitmask
);
1702 xhci_free_command(xhci
, config_cmd
);
1703 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1706 if (num_streams
<= 1) {
1707 xhci_warn(xhci
, "WARN: endpoints can't handle "
1708 "more than one stream.\n");
1709 xhci_free_command(xhci
, config_cmd
);
1710 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1713 vdev
= xhci
->devs
[udev
->slot_id
];
1714 /* Mark each endpoint as being in transistion, so
1715 * xhci_urb_enqueue() will reject all URBs.
1717 for (i
= 0; i
< num_eps
; i
++) {
1718 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
1719 vdev
->eps
[ep_index
].ep_state
|= EP_GETTING_STREAMS
;
1721 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1723 /* Setup internal data structures and allocate HW data structures for
1724 * streams (but don't install the HW structures in the input context
1725 * until we're sure all memory allocation succeeded).
1727 xhci_calculate_streams_entries(xhci
, &num_streams
, &num_stream_ctxs
);
1728 xhci_dbg(xhci
, "Need %u stream ctx entries for %u stream IDs.\n",
1729 num_stream_ctxs
, num_streams
);
1731 for (i
= 0; i
< num_eps
; i
++) {
1732 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
1733 vdev
->eps
[ep_index
].stream_info
= xhci_alloc_stream_info(xhci
,
1735 num_streams
, mem_flags
);
1736 if (!vdev
->eps
[ep_index
].stream_info
)
1738 /* Set maxPstreams in endpoint context and update deq ptr to
1739 * point to stream context array. FIXME
1743 /* Set up the input context for a configure endpoint command. */
1744 for (i
= 0; i
< num_eps
; i
++) {
1745 struct xhci_ep_ctx
*ep_ctx
;
1747 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
1748 ep_ctx
= xhci_get_ep_ctx(xhci
, config_cmd
->in_ctx
, ep_index
);
1750 xhci_endpoint_copy(xhci
, config_cmd
->in_ctx
,
1751 vdev
->out_ctx
, ep_index
);
1752 xhci_setup_streams_ep_input_ctx(xhci
, ep_ctx
,
1753 vdev
->eps
[ep_index
].stream_info
);
1755 /* Tell the HW to drop its old copy of the endpoint context info
1756 * and add the updated copy from the input context.
1758 xhci_setup_input_ctx_for_config_ep(xhci
, config_cmd
->in_ctx
,
1759 vdev
->out_ctx
, changed_ep_bitmask
, changed_ep_bitmask
);
1761 /* Issue and wait for the configure endpoint command */
1762 ret
= xhci_configure_endpoint(xhci
, udev
, config_cmd
,
1765 /* xHC rejected the configure endpoint command for some reason, so we
1766 * leave the old ring intact and free our internal streams data
1772 spin_lock_irqsave(&xhci
->lock
, flags
);
1773 for (i
= 0; i
< num_eps
; i
++) {
1774 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
1775 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_STREAMS
;
1776 xhci_dbg(xhci
, "Slot %u ep ctx %u now has streams.\n",
1777 udev
->slot_id
, ep_index
);
1778 vdev
->eps
[ep_index
].ep_state
|= EP_HAS_STREAMS
;
1780 xhci_free_command(xhci
, config_cmd
);
1781 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1783 /* Subtract 1 for stream 0, which drivers can't use */
1784 return num_streams
- 1;
1787 /* If it didn't work, free the streams! */
1788 for (i
= 0; i
< num_eps
; i
++) {
1789 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
1790 xhci_free_stream_info(xhci
, vdev
->eps
[ep_index
].stream_info
);
1791 vdev
->eps
[ep_index
].stream_info
= NULL
;
1792 /* FIXME Unset maxPstreams in endpoint context and
1793 * update deq ptr to point to normal string ring.
1795 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_STREAMS
;
1796 vdev
->eps
[ep_index
].ep_state
&= ~EP_HAS_STREAMS
;
1797 xhci_endpoint_zero(xhci
, vdev
, eps
[i
]);
1799 xhci_free_command(xhci
, config_cmd
);
1803 /* Transition the endpoint from using streams to being a "normal" endpoint
1806 * Modify the endpoint context state, submit a configure endpoint command,
1807 * and free all endpoint rings for streams if that completes successfully.
1809 int xhci_free_streams(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1810 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
1814 struct xhci_hcd
*xhci
;
1815 struct xhci_virt_device
*vdev
;
1816 struct xhci_command
*command
;
1817 unsigned int ep_index
;
1818 unsigned long flags
;
1819 u32 changed_ep_bitmask
;
1821 xhci
= hcd_to_xhci(hcd
);
1822 vdev
= xhci
->devs
[udev
->slot_id
];
1824 /* Set up a configure endpoint command to remove the streams rings */
1825 spin_lock_irqsave(&xhci
->lock
, flags
);
1826 changed_ep_bitmask
= xhci_calculate_no_streams_bitmask(xhci
,
1827 udev
, eps
, num_eps
);
1828 if (changed_ep_bitmask
== 0) {
1829 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1833 /* Use the xhci_command structure from the first endpoint. We may have
1834 * allocated too many, but the driver may call xhci_free_streams() for
1835 * each endpoint it grouped into one call to xhci_alloc_streams().
1837 ep_index
= xhci_get_endpoint_index(&eps
[0]->desc
);
1838 command
= vdev
->eps
[ep_index
].stream_info
->free_streams_command
;
1839 for (i
= 0; i
< num_eps
; i
++) {
1840 struct xhci_ep_ctx
*ep_ctx
;
1842 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
1843 ep_ctx
= xhci_get_ep_ctx(xhci
, command
->in_ctx
, ep_index
);
1844 xhci
->devs
[udev
->slot_id
]->eps
[ep_index
].ep_state
|=
1845 EP_GETTING_NO_STREAMS
;
1847 xhci_endpoint_copy(xhci
, command
->in_ctx
,
1848 vdev
->out_ctx
, ep_index
);
1849 xhci_setup_no_streams_ep_input_ctx(xhci
, ep_ctx
,
1850 &vdev
->eps
[ep_index
]);
1852 xhci_setup_input_ctx_for_config_ep(xhci
, command
->in_ctx
,
1853 vdev
->out_ctx
, changed_ep_bitmask
, changed_ep_bitmask
);
1854 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1856 /* Issue and wait for the configure endpoint command,
1857 * which must succeed.
1859 ret
= xhci_configure_endpoint(xhci
, udev
, command
,
1862 /* xHC rejected the configure endpoint command for some reason, so we
1863 * leave the streams rings intact.
1868 spin_lock_irqsave(&xhci
->lock
, flags
);
1869 for (i
= 0; i
< num_eps
; i
++) {
1870 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
1871 xhci_free_stream_info(xhci
, vdev
->eps
[ep_index
].stream_info
);
1872 vdev
->eps
[ep_index
].stream_info
= NULL
;
1873 /* FIXME Unset maxPstreams in endpoint context and
1874 * update deq ptr to point to normal string ring.
1876 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_NO_STREAMS
;
1877 vdev
->eps
[ep_index
].ep_state
&= ~EP_HAS_STREAMS
;
1879 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1885 * This submits a Reset Device Command, which will set the device state to 0,
1886 * set the device address to 0, and disable all the endpoints except the default
1887 * control endpoint. The USB core should come back and call
1888 * xhci_address_device(), and then re-set up the configuration. If this is
1889 * called because of a usb_reset_and_verify_device(), then the old alternate
1890 * settings will be re-installed through the normal bandwidth allocation
1893 * Wait for the Reset Device command to finish. Remove all structures
1894 * associated with the endpoints that were disabled. Clear the input device
1895 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
1897 int xhci_reset_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1900 unsigned long flags
;
1901 struct xhci_hcd
*xhci
;
1902 unsigned int slot_id
;
1903 struct xhci_virt_device
*virt_dev
;
1904 struct xhci_command
*reset_device_cmd
;
1906 int last_freed_endpoint
;
1908 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, __func__
);
1911 xhci
= hcd_to_xhci(hcd
);
1912 slot_id
= udev
->slot_id
;
1913 virt_dev
= xhci
->devs
[slot_id
];
1915 xhci_dbg(xhci
, "%s called with invalid slot ID %u\n",
1920 xhci_dbg(xhci
, "Resetting device with slot ID %u\n", slot_id
);
1921 /* Allocate the command structure that holds the struct completion.
1922 * Assume we're in process context, since the normal device reset
1923 * process has to wait for the device anyway. Storage devices are
1924 * reset as part of error handling, so use GFP_NOIO instead of
1927 reset_device_cmd
= xhci_alloc_command(xhci
, false, true, GFP_NOIO
);
1928 if (!reset_device_cmd
) {
1929 xhci_dbg(xhci
, "Couldn't allocate command structure.\n");
1933 /* Attempt to submit the Reset Device command to the command ring */
1934 spin_lock_irqsave(&xhci
->lock
, flags
);
1935 reset_device_cmd
->command_trb
= xhci
->cmd_ring
->enqueue
;
1936 list_add_tail(&reset_device_cmd
->cmd_list
, &virt_dev
->cmd_list
);
1937 ret
= xhci_queue_reset_device(xhci
, slot_id
);
1939 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
1940 list_del(&reset_device_cmd
->cmd_list
);
1941 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1942 goto command_cleanup
;
1944 xhci_ring_cmd_db(xhci
);
1945 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1947 /* Wait for the Reset Device command to finish */
1948 timeleft
= wait_for_completion_interruptible_timeout(
1949 reset_device_cmd
->completion
,
1950 USB_CTRL_SET_TIMEOUT
);
1951 if (timeleft
<= 0) {
1952 xhci_warn(xhci
, "%s while waiting for reset device command\n",
1953 timeleft
== 0 ? "Timeout" : "Signal");
1954 spin_lock_irqsave(&xhci
->lock
, flags
);
1955 /* The timeout might have raced with the event ring handler, so
1956 * only delete from the list if the item isn't poisoned.
1958 if (reset_device_cmd
->cmd_list
.next
!= LIST_POISON1
)
1959 list_del(&reset_device_cmd
->cmd_list
);
1960 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1962 goto command_cleanup
;
1965 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
1966 * unless we tried to reset a slot ID that wasn't enabled,
1967 * or the device wasn't in the addressed or configured state.
1969 ret
= reset_device_cmd
->status
;
1971 case COMP_EBADSLT
: /* 0.95 completion code for bad slot ID */
1972 case COMP_CTX_STATE
: /* 0.96 completion code for same thing */
1973 xhci_info(xhci
, "Can't reset device (slot ID %u) in %s state\n",
1975 xhci_get_slot_state(xhci
, virt_dev
->out_ctx
));
1976 xhci_info(xhci
, "Not freeing device rings.\n");
1977 /* Don't treat this as an error. May change my mind later. */
1979 goto command_cleanup
;
1981 xhci_dbg(xhci
, "Successful reset device command.\n");
1984 if (xhci_is_vendor_info_code(xhci
, ret
))
1986 xhci_warn(xhci
, "Unknown completion code %u for "
1987 "reset device command.\n", ret
);
1989 goto command_cleanup
;
1992 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
1993 last_freed_endpoint
= 1;
1994 for (i
= 1; i
< 31; ++i
) {
1995 if (!virt_dev
->eps
[i
].ring
)
1997 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
1998 last_freed_endpoint
= i
;
2000 xhci_dbg(xhci
, "Output context after successful reset device cmd:\n");
2001 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, last_freed_endpoint
);
2005 xhci_free_command(xhci
, reset_device_cmd
);
2010 * At this point, the struct usb_device is about to go away, the device has
2011 * disconnected, and all traffic has been stopped and the endpoints have been
2012 * disabled. Free any HC data structures associated with that device.
2014 void xhci_free_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
2016 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
2017 struct xhci_virt_device
*virt_dev
;
2018 unsigned long flags
;
2022 if (udev
->slot_id
== 0)
2024 virt_dev
= xhci
->devs
[udev
->slot_id
];
2028 /* Stop any wayward timer functions (which may grab the lock) */
2029 for (i
= 0; i
< 31; ++i
) {
2030 virt_dev
->eps
[i
].ep_state
&= ~EP_HALT_PENDING
;
2031 del_timer_sync(&virt_dev
->eps
[i
].stop_cmd_timer
);
2034 spin_lock_irqsave(&xhci
->lock
, flags
);
2035 /* Don't disable the slot if the host controller is dead. */
2036 state
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
2037 if (state
== 0xffffffff || (xhci
->xhc_state
& XHCI_STATE_DYING
)) {
2038 xhci_free_virt_device(xhci
, udev
->slot_id
);
2039 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2043 if (xhci_queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
)) {
2044 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2045 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
2048 xhci_ring_cmd_db(xhci
);
2049 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2051 * Event command completion handler will free any data structures
2052 * associated with the slot. XXX Can free sleep?
2057 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
2058 * timed out, or allocating memory failed. Returns 1 on success.
2060 int xhci_alloc_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
2062 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
2063 unsigned long flags
;
2067 spin_lock_irqsave(&xhci
->lock
, flags
);
2068 ret
= xhci_queue_slot_control(xhci
, TRB_ENABLE_SLOT
, 0);
2070 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2071 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
2074 xhci_ring_cmd_db(xhci
);
2075 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2077 /* XXX: how much time for xHC slot assignment? */
2078 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
2079 USB_CTRL_SET_TIMEOUT
);
2080 if (timeleft
<= 0) {
2081 xhci_warn(xhci
, "%s while waiting for a slot\n",
2082 timeleft
== 0 ? "Timeout" : "Signal");
2083 /* FIXME cancel the enable slot request */
2087 if (!xhci
->slot_id
) {
2088 xhci_err(xhci
, "Error while assigning device slot ID\n");
2091 /* xhci_alloc_virt_device() does not touch rings; no need to lock */
2092 if (!xhci_alloc_virt_device(xhci
, xhci
->slot_id
, udev
, GFP_KERNEL
)) {
2093 /* Disable slot, if we can do it without mem alloc */
2094 xhci_warn(xhci
, "Could not allocate xHCI USB device data structures\n");
2095 spin_lock_irqsave(&xhci
->lock
, flags
);
2096 if (!xhci_queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
))
2097 xhci_ring_cmd_db(xhci
);
2098 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2101 udev
->slot_id
= xhci
->slot_id
;
2102 /* Is this a LS or FS device under a HS hub? */
2103 /* Hub or peripherial? */
2108 * Issue an Address Device command (which will issue a SetAddress request to
2110 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
2111 * we should only issue and wait on one address command at the same time.
2113 * We add one to the device address issued by the hardware because the USB core
2114 * uses address 1 for the root hubs (even though they're not really devices).
2116 int xhci_address_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
2118 unsigned long flags
;
2120 struct xhci_virt_device
*virt_dev
;
2122 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
2123 struct xhci_slot_ctx
*slot_ctx
;
2124 struct xhci_input_control_ctx
*ctrl_ctx
;
2127 if (!udev
->slot_id
) {
2128 xhci_dbg(xhci
, "Bad Slot ID %d\n", udev
->slot_id
);
2132 virt_dev
= xhci
->devs
[udev
->slot_id
];
2134 /* If this is a Set Address to an unconfigured device, setup ep 0 */
2136 xhci_setup_addressable_virt_dev(xhci
, udev
);
2137 /* Otherwise, assume the core has the device configured how it wants */
2138 xhci_dbg(xhci
, "Slot ID %d Input Context:\n", udev
->slot_id
);
2139 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, 2);
2141 spin_lock_irqsave(&xhci
->lock
, flags
);
2142 ret
= xhci_queue_address_device(xhci
, virt_dev
->in_ctx
->dma
,
2145 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2146 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
2149 xhci_ring_cmd_db(xhci
);
2150 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2152 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
2153 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
2154 USB_CTRL_SET_TIMEOUT
);
2155 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
2156 * the SetAddress() "recovery interval" required by USB and aborting the
2157 * command on a timeout.
2159 if (timeleft
<= 0) {
2160 xhci_warn(xhci
, "%s while waiting for a slot\n",
2161 timeleft
== 0 ? "Timeout" : "Signal");
2162 /* FIXME cancel the address device command */
2166 switch (virt_dev
->cmd_status
) {
2167 case COMP_CTX_STATE
:
2169 xhci_err(xhci
, "Setup ERROR: address device command for slot %d.\n",
2174 dev_warn(&udev
->dev
, "Device not responding to set address.\n");
2178 xhci_dbg(xhci
, "Successful Address Device command\n");
2181 xhci_err(xhci
, "ERROR: unexpected command completion "
2182 "code 0x%x.\n", virt_dev
->cmd_status
);
2183 xhci_dbg(xhci
, "Slot ID %d Output Context:\n", udev
->slot_id
);
2184 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 2);
2191 temp_64
= xhci_read_64(xhci
, &xhci
->op_regs
->dcbaa_ptr
);
2192 xhci_dbg(xhci
, "Op regs DCBAA ptr = %#016llx\n", temp_64
);
2193 xhci_dbg(xhci
, "Slot ID %d dcbaa entry @%p = %#016llx\n",
2195 &xhci
->dcbaa
->dev_context_ptrs
[udev
->slot_id
],
2196 (unsigned long long)
2197 xhci
->dcbaa
->dev_context_ptrs
[udev
->slot_id
]);
2198 xhci_dbg(xhci
, "Output Context DMA address = %#08llx\n",
2199 (unsigned long long)virt_dev
->out_ctx
->dma
);
2200 xhci_dbg(xhci
, "Slot ID %d Input Context:\n", udev
->slot_id
);
2201 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, 2);
2202 xhci_dbg(xhci
, "Slot ID %d Output Context:\n", udev
->slot_id
);
2203 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 2);
2205 * USB core uses address 1 for the roothubs, so we add one to the
2206 * address given back to us by the HC.
2208 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->out_ctx
);
2209 udev
->devnum
= (slot_ctx
->dev_state
& DEV_ADDR_MASK
) + 1;
2210 /* Zero the input context control for later use */
2211 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
2212 ctrl_ctx
->add_flags
= 0;
2213 ctrl_ctx
->drop_flags
= 0;
2215 xhci_dbg(xhci
, "Device address = %d\n", udev
->devnum
);
2216 /* XXX Meh, not sure if anyone else but choose_address uses this. */
2217 set_bit(udev
->devnum
, udev
->bus
->devmap
.devicemap
);
2222 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
2223 * internal data structures for the device.
2225 int xhci_update_hub_device(struct usb_hcd
*hcd
, struct usb_device
*hdev
,
2226 struct usb_tt
*tt
, gfp_t mem_flags
)
2228 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
2229 struct xhci_virt_device
*vdev
;
2230 struct xhci_command
*config_cmd
;
2231 struct xhci_input_control_ctx
*ctrl_ctx
;
2232 struct xhci_slot_ctx
*slot_ctx
;
2233 unsigned long flags
;
2234 unsigned think_time
;
2237 /* Ignore root hubs */
2241 vdev
= xhci
->devs
[hdev
->slot_id
];
2243 xhci_warn(xhci
, "Cannot update hub desc for unknown device.\n");
2246 config_cmd
= xhci_alloc_command(xhci
, true, true, mem_flags
);
2248 xhci_dbg(xhci
, "Could not allocate xHCI command structure.\n");
2252 spin_lock_irqsave(&xhci
->lock
, flags
);
2253 xhci_slot_copy(xhci
, config_cmd
->in_ctx
, vdev
->out_ctx
);
2254 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, config_cmd
->in_ctx
);
2255 ctrl_ctx
->add_flags
|= SLOT_FLAG
;
2256 slot_ctx
= xhci_get_slot_ctx(xhci
, config_cmd
->in_ctx
);
2257 slot_ctx
->dev_info
|= DEV_HUB
;
2259 slot_ctx
->dev_info
|= DEV_MTT
;
2260 if (xhci
->hci_version
> 0x95) {
2261 xhci_dbg(xhci
, "xHCI version %x needs hub "
2262 "TT think time and number of ports\n",
2263 (unsigned int) xhci
->hci_version
);
2264 slot_ctx
->dev_info2
|= XHCI_MAX_PORTS(hdev
->maxchild
);
2265 /* Set TT think time - convert from ns to FS bit times.
2266 * 0 = 8 FS bit times, 1 = 16 FS bit times,
2267 * 2 = 24 FS bit times, 3 = 32 FS bit times.
2269 think_time
= tt
->think_time
;
2270 if (think_time
!= 0)
2271 think_time
= (think_time
/ 666) - 1;
2272 slot_ctx
->tt_info
|= TT_THINK_TIME(think_time
);
2274 xhci_dbg(xhci
, "xHCI version %x doesn't need hub "
2275 "TT think time or number of ports\n",
2276 (unsigned int) xhci
->hci_version
);
2278 slot_ctx
->dev_state
= 0;
2279 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2281 xhci_dbg(xhci
, "Set up %s for hub device.\n",
2282 (xhci
->hci_version
> 0x95) ?
2283 "configure endpoint" : "evaluate context");
2284 xhci_dbg(xhci
, "Slot %u Input Context:\n", hdev
->slot_id
);
2285 xhci_dbg_ctx(xhci
, config_cmd
->in_ctx
, 0);
2287 /* Issue and wait for the configure endpoint or
2288 * evaluate context command.
2290 if (xhci
->hci_version
> 0x95)
2291 ret
= xhci_configure_endpoint(xhci
, hdev
, config_cmd
,
2294 ret
= xhci_configure_endpoint(xhci
, hdev
, config_cmd
,
2297 xhci_dbg(xhci
, "Slot %u Output Context:\n", hdev
->slot_id
);
2298 xhci_dbg_ctx(xhci
, vdev
->out_ctx
, 0);
2300 xhci_free_command(xhci
, config_cmd
);
2304 int xhci_get_frame(struct usb_hcd
*hcd
)
2306 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
2307 /* EHCI mods by the periodic size. Why? */
2308 return xhci_readl(xhci
, &xhci
->run_regs
->microframe_index
) >> 3;
2311 MODULE_DESCRIPTION(DRIVER_DESC
);
2312 MODULE_AUTHOR(DRIVER_AUTHOR
);
2313 MODULE_LICENSE("GPL");
2315 static int __init
xhci_hcd_init(void)
2320 retval
= xhci_register_pci();
2323 printk(KERN_DEBUG
"Problem registering PCI driver.");
2328 * Check the compiler generated sizes of structures that must be laid
2329 * out in specific ways for hardware access.
2331 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array
) != 256*32/8);
2332 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx
) != 8*32/8);
2333 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx
) != 8*32/8);
2334 /* xhci_device_control has eight fields, and also
2335 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
2337 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx
) != 4*32/8);
2338 BUILD_BUG_ON(sizeof(union xhci_trb
) != 4*32/8);
2339 BUILD_BUG_ON(sizeof(struct xhci_erst_entry
) != 4*32/8);
2340 BUILD_BUG_ON(sizeof(struct xhci_cap_regs
) != 7*32/8);
2341 BUILD_BUG_ON(sizeof(struct xhci_intr_reg
) != 8*32/8);
2342 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
2343 BUILD_BUG_ON(sizeof(struct xhci_run_regs
) != (8+8*128)*32/8);
2344 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array
) != 256*32/8);
2347 module_init(xhci_hcd_init
);
2349 static void __exit
xhci_hcd_cleanup(void)
2352 xhci_unregister_pci();
2355 module_exit(xhci_hcd_cleanup
);