2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/pci.h>
24 #include <linux/irq.h>
25 #include <linux/log2.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/slab.h>
32 #define DRIVER_AUTHOR "Sarah Sharp"
33 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
35 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
36 static int link_quirk
;
37 module_param(link_quirk
, int, S_IRUGO
| S_IWUSR
);
38 MODULE_PARM_DESC(link_quirk
, "Don't clear the chain bit on a link TRB");
40 /* TODO: copied from ehci-hcd.c - can this be refactored? */
42 * handshake - spin reading hc until handshake completes or fails
43 * @ptr: address of hc register to be read
44 * @mask: bits to look at in result of read
45 * @done: value of those bits when handshake succeeds
46 * @usec: timeout in microseconds
48 * Returns negative errno, or zero on success
50 * Success happens when the "mask" bits have the specified value (hardware
51 * handshake done). There are two failure modes: "usec" have passed (major
52 * hardware flakeout), or the register reads as all-ones (hardware removed).
54 static int handshake(struct xhci_hcd
*xhci
, void __iomem
*ptr
,
55 u32 mask
, u32 done
, int usec
)
60 result
= xhci_readl(xhci
, ptr
);
61 if (result
== ~(u32
)0) /* card removed */
73 * Disable interrupts and begin the xHCI halting process.
75 void xhci_quiesce(struct xhci_hcd
*xhci
)
82 halted
= xhci_readl(xhci
, &xhci
->op_regs
->status
) & STS_HALT
;
86 cmd
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
88 xhci_writel(xhci
, cmd
, &xhci
->op_regs
->command
);
92 * Force HC into halt state.
94 * Disable any IRQs and clear the run/stop bit.
95 * HC will complete any current and actively pipelined transactions, and
96 * should halt within 16 microframes of the run/stop bit being cleared.
97 * Read HC Halted bit in the status register to see when the HC is finished.
98 * XXX: shouldn't we set HC_STATE_HALT here somewhere?
100 int xhci_halt(struct xhci_hcd
*xhci
)
102 xhci_dbg(xhci
, "// Halt the HC\n");
105 return handshake(xhci
, &xhci
->op_regs
->status
,
106 STS_HALT
, STS_HALT
, XHCI_MAX_HALT_USEC
);
110 * Set the run bit and wait for the host to be running.
112 int xhci_start(struct xhci_hcd
*xhci
)
117 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
119 xhci_dbg(xhci
, "// Turn on HC, cmd = 0x%x.\n",
121 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
124 * Wait for the HCHalted Status bit to be 0 to indicate the host is
127 ret
= handshake(xhci
, &xhci
->op_regs
->status
,
128 STS_HALT
, 0, XHCI_MAX_HALT_USEC
);
129 if (ret
== -ETIMEDOUT
)
130 xhci_err(xhci
, "Host took too long to start, "
131 "waited %u microseconds.\n",
137 * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
139 * This resets pipelines, timers, counters, state machines, etc.
140 * Transactions will be terminated immediately, and operational registers
141 * will be set to their defaults.
143 int xhci_reset(struct xhci_hcd
*xhci
)
149 state
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
150 if ((state
& STS_HALT
) == 0) {
151 xhci_warn(xhci
, "Host controller not halted, aborting reset.\n");
155 xhci_dbg(xhci
, "// Reset the HC\n");
156 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
157 command
|= CMD_RESET
;
158 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
159 /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
160 xhci_to_hcd(xhci
)->state
= HC_STATE_HALT
;
162 ret
= handshake(xhci
, &xhci
->op_regs
->command
,
163 CMD_RESET
, 0, 250 * 1000);
167 xhci_dbg(xhci
, "Wait for controller to be ready for doorbell rings\n");
169 * xHCI cannot write to any doorbells or operational registers other
170 * than status until the "Controller Not Ready" flag is cleared.
172 return handshake(xhci
, &xhci
->op_regs
->status
, STS_CNR
, 0, 250 * 1000);
175 static irqreturn_t
xhci_msi_irq(int irq
, struct usb_hcd
*hcd
)
179 set_bit(HCD_FLAG_SAW_IRQ
, &hcd
->flags
);
188 * free all IRQs request
190 static void xhci_free_irq(struct xhci_hcd
*xhci
)
193 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
195 /* return if using legacy interrupt */
196 if (xhci_to_hcd(xhci
)->irq
>= 0)
199 if (xhci
->msix_entries
) {
200 for (i
= 0; i
< xhci
->msix_count
; i
++)
201 if (xhci
->msix_entries
[i
].vector
)
202 free_irq(xhci
->msix_entries
[i
].vector
,
204 } else if (pdev
->irq
>= 0)
205 free_irq(pdev
->irq
, xhci_to_hcd(xhci
));
213 static int xhci_setup_msi(struct xhci_hcd
*xhci
)
216 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
218 ret
= pci_enable_msi(pdev
);
220 xhci_err(xhci
, "failed to allocate MSI entry\n");
224 ret
= request_irq(pdev
->irq
, (irq_handler_t
)xhci_msi_irq
,
225 0, "xhci_hcd", xhci_to_hcd(xhci
));
227 xhci_err(xhci
, "disable MSI interrupt\n");
228 pci_disable_msi(pdev
);
237 static int xhci_setup_msix(struct xhci_hcd
*xhci
)
240 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
243 * calculate number of msi-x vectors supported.
244 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
245 * with max number of interrupters based on the xhci HCSPARAMS1.
246 * - num_online_cpus: maximum msi-x vectors per CPUs core.
247 * Add additional 1 vector to ensure always available interrupt.
249 xhci
->msix_count
= min(num_online_cpus() + 1,
250 HCS_MAX_INTRS(xhci
->hcs_params1
));
253 kmalloc((sizeof(struct msix_entry
))*xhci
->msix_count
,
255 if (!xhci
->msix_entries
) {
256 xhci_err(xhci
, "Failed to allocate MSI-X entries\n");
260 for (i
= 0; i
< xhci
->msix_count
; i
++) {
261 xhci
->msix_entries
[i
].entry
= i
;
262 xhci
->msix_entries
[i
].vector
= 0;
265 ret
= pci_enable_msix(pdev
, xhci
->msix_entries
, xhci
->msix_count
);
267 xhci_err(xhci
, "Failed to enable MSI-X\n");
271 for (i
= 0; i
< xhci
->msix_count
; i
++) {
272 ret
= request_irq(xhci
->msix_entries
[i
].vector
,
273 (irq_handler_t
)xhci_msi_irq
,
274 0, "xhci_hcd", xhci_to_hcd(xhci
));
282 xhci_err(xhci
, "disable MSI-X interrupt\n");
284 pci_disable_msix(pdev
);
286 kfree(xhci
->msix_entries
);
287 xhci
->msix_entries
= NULL
;
291 /* Free any IRQs and disable MSI-X */
292 static void xhci_cleanup_msix(struct xhci_hcd
*xhci
)
294 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
298 if (xhci
->msix_entries
) {
299 pci_disable_msix(pdev
);
300 kfree(xhci
->msix_entries
);
301 xhci
->msix_entries
= NULL
;
303 pci_disable_msi(pdev
);
310 * Initialize memory for HCD and xHC (one-time init).
312 * Program the PAGESIZE register, initialize the device context array, create
313 * device contexts (?), set up a command ring segment (or two?), create event
314 * ring (one for now).
316 int xhci_init(struct usb_hcd
*hcd
)
318 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
321 xhci_dbg(xhci
, "xhci_init\n");
322 spin_lock_init(&xhci
->lock
);
324 xhci_dbg(xhci
, "QUIRK: Not clearing Link TRB chain bits.\n");
325 xhci
->quirks
|= XHCI_LINK_TRB_QUIRK
;
327 xhci_dbg(xhci
, "xHCI doesn't need link TRB QUIRK\n");
329 retval
= xhci_mem_init(xhci
, GFP_KERNEL
);
330 xhci_dbg(xhci
, "Finished xhci_init\n");
336 * Called in interrupt context when there might be work
337 * queued on the event ring
339 * xhci->lock must be held by caller.
341 static void xhci_work(struct xhci_hcd
*xhci
)
347 * Clear the op reg interrupt status first,
348 * so we can receive interrupts from other MSI-X interrupters.
349 * Write 1 to clear the interrupt status.
351 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
353 xhci_writel(xhci
, temp
, &xhci
->op_regs
->status
);
354 /* FIXME when MSI-X is supported and there are multiple vectors */
355 /* Clear the MSI-X event interrupt status */
357 /* Acknowledge the interrupt */
358 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
360 xhci_writel(xhci
, temp
, &xhci
->ir_set
->irq_pending
);
361 /* Flush posted writes */
362 xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
364 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
365 xhci_dbg(xhci
, "xHCI dying, ignoring interrupt. "
366 "Shouldn't IRQs be disabled?\n");
368 /* FIXME this should be a delayed service routine
369 * that clears the EHB.
371 xhci_handle_event(xhci
);
373 /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
374 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
375 xhci_write_64(xhci
, temp_64
| ERST_EHB
, &xhci
->ir_set
->erst_dequeue
);
376 /* Flush posted writes -- FIXME is this necessary? */
377 xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
380 /*-------------------------------------------------------------------------*/
383 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
384 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
385 * indicators of an event TRB error, but we check the status *first* to be safe.
387 irqreturn_t
xhci_irq(struct usb_hcd
*hcd
)
389 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
393 spin_lock(&xhci
->lock
);
394 trb
= xhci
->event_ring
->dequeue
;
395 /* Check if the xHC generated the interrupt, or the irq is shared */
396 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
397 temp2
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
398 if (temp
== 0xffffffff && temp2
== 0xffffffff)
401 if (!(temp
& STS_EINT
) && !ER_IRQ_PENDING(temp2
)) {
402 spin_unlock(&xhci
->lock
);
405 xhci_dbg(xhci
, "op reg status = %08x\n", temp
);
406 xhci_dbg(xhci
, "ir set irq_pending = %08x\n", temp2
);
407 xhci_dbg(xhci
, "Event ring dequeue ptr:\n");
408 xhci_dbg(xhci
, "@%llx %08x %08x %08x %08x\n",
409 (unsigned long long)xhci_trb_virt_to_dma(xhci
->event_ring
->deq_seg
, trb
),
410 lower_32_bits(trb
->link
.segment_ptr
),
411 upper_32_bits(trb
->link
.segment_ptr
),
412 (unsigned int) trb
->link
.intr_target
,
413 (unsigned int) trb
->link
.control
);
415 if (temp
& STS_FATAL
) {
416 xhci_warn(xhci
, "WARNING: Host System Error\n");
419 xhci_to_hcd(xhci
)->state
= HC_STATE_HALT
;
420 spin_unlock(&xhci
->lock
);
425 spin_unlock(&xhci
->lock
);
430 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
431 void xhci_event_ring_work(unsigned long arg
)
436 struct xhci_hcd
*xhci
= (struct xhci_hcd
*) arg
;
439 xhci_dbg(xhci
, "Poll event ring: %lu\n", jiffies
);
441 spin_lock_irqsave(&xhci
->lock
, flags
);
442 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
443 xhci_dbg(xhci
, "op reg status = 0x%x\n", temp
);
444 if (temp
== 0xffffffff || (xhci
->xhc_state
& XHCI_STATE_DYING
)) {
445 xhci_dbg(xhci
, "HW died, polling stopped.\n");
446 spin_unlock_irqrestore(&xhci
->lock
, flags
);
450 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
451 xhci_dbg(xhci
, "ir_set 0 pending = 0x%x\n", temp
);
452 xhci_dbg(xhci
, "No-op commands handled = %d\n", xhci
->noops_handled
);
453 xhci_dbg(xhci
, "HC error bitmask = 0x%x\n", xhci
->error_bitmask
);
454 xhci
->error_bitmask
= 0;
455 xhci_dbg(xhci
, "Event ring:\n");
456 xhci_debug_segment(xhci
, xhci
->event_ring
->deq_seg
);
457 xhci_dbg_ring_ptrs(xhci
, xhci
->event_ring
);
458 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
459 temp_64
&= ~ERST_PTR_MASK
;
460 xhci_dbg(xhci
, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64
);
461 xhci_dbg(xhci
, "Command ring:\n");
462 xhci_debug_segment(xhci
, xhci
->cmd_ring
->deq_seg
);
463 xhci_dbg_ring_ptrs(xhci
, xhci
->cmd_ring
);
464 xhci_dbg_cmd_ptrs(xhci
);
465 for (i
= 0; i
< MAX_HC_SLOTS
; ++i
) {
468 for (j
= 0; j
< 31; ++j
) {
469 xhci_dbg_ep_rings(xhci
, i
, j
, &xhci
->devs
[i
]->eps
[j
]);
473 if (xhci
->noops_submitted
!= NUM_TEST_NOOPS
)
474 if (xhci_setup_one_noop(xhci
))
475 xhci_ring_cmd_db(xhci
);
476 spin_unlock_irqrestore(&xhci
->lock
, flags
);
479 mod_timer(&xhci
->event_ring_timer
, jiffies
+ POLL_TIMEOUT
* HZ
);
481 xhci_dbg(xhci
, "Quit polling the event ring.\n");
486 * Start the HC after it was halted.
488 * This function is called by the USB core when the HC driver is added.
489 * Its opposite is xhci_stop().
491 * xhci_init() must be called once before this function can be called.
492 * Reset the HC, enable device slot contexts, program DCBAAP, and
493 * set command ring pointer and event ring pointer.
495 * Setup MSI-X vectors and enable interrupts.
497 int xhci_run(struct usb_hcd
*hcd
)
502 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
503 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
504 void (*doorbell
)(struct xhci_hcd
*) = NULL
;
506 hcd
->uses_new_polling
= 1;
508 xhci_dbg(xhci
, "xhci_run\n");
509 /* unregister the legacy interrupt */
511 free_irq(hcd
->irq
, hcd
);
514 ret
= xhci_setup_msix(xhci
);
516 /* fall back to msi*/
517 ret
= xhci_setup_msi(xhci
);
520 /* fall back to legacy interrupt*/
521 ret
= request_irq(pdev
->irq
, &usb_hcd_irq
, IRQF_SHARED
,
522 hcd
->irq_descr
, hcd
);
524 xhci_err(xhci
, "request interrupt %d failed\n",
528 hcd
->irq
= pdev
->irq
;
531 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
532 init_timer(&xhci
->event_ring_timer
);
533 xhci
->event_ring_timer
.data
= (unsigned long) xhci
;
534 xhci
->event_ring_timer
.function
= xhci_event_ring_work
;
535 /* Poll the event ring */
536 xhci
->event_ring_timer
.expires
= jiffies
+ POLL_TIMEOUT
* HZ
;
538 xhci_dbg(xhci
, "Setting event ring polling timer\n");
539 add_timer(&xhci
->event_ring_timer
);
542 xhci_dbg(xhci
, "Command ring memory map follows:\n");
543 xhci_debug_ring(xhci
, xhci
->cmd_ring
);
544 xhci_dbg_ring_ptrs(xhci
, xhci
->cmd_ring
);
545 xhci_dbg_cmd_ptrs(xhci
);
547 xhci_dbg(xhci
, "ERST memory map follows:\n");
548 xhci_dbg_erst(xhci
, &xhci
->erst
);
549 xhci_dbg(xhci
, "Event ring:\n");
550 xhci_debug_ring(xhci
, xhci
->event_ring
);
551 xhci_dbg_ring_ptrs(xhci
, xhci
->event_ring
);
552 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
553 temp_64
&= ~ERST_PTR_MASK
;
554 xhci_dbg(xhci
, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64
);
556 xhci_dbg(xhci
, "// Set the interrupt modulation register\n");
557 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_control
);
558 temp
&= ~ER_IRQ_INTERVAL_MASK
;
560 xhci_writel(xhci
, temp
, &xhci
->ir_set
->irq_control
);
562 /* Set the HCD state before we enable the irqs */
563 hcd
->state
= HC_STATE_RUNNING
;
564 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
566 xhci_dbg(xhci
, "// Enable interrupts, cmd = 0x%x.\n",
568 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
570 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
571 xhci_dbg(xhci
, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
572 xhci
->ir_set
, (unsigned int) ER_IRQ_ENABLE(temp
));
573 xhci_writel(xhci
, ER_IRQ_ENABLE(temp
),
574 &xhci
->ir_set
->irq_pending
);
575 xhci_print_ir_set(xhci
, xhci
->ir_set
, 0);
577 if (NUM_TEST_NOOPS
> 0)
578 doorbell
= xhci_setup_one_noop(xhci
);
579 if (xhci
->quirks
& XHCI_NEC_HOST
)
580 xhci_queue_vendor_command(xhci
, 0, 0, 0,
581 TRB_TYPE(TRB_NEC_GET_FW
));
583 if (xhci_start(xhci
)) {
590 if (xhci
->quirks
& XHCI_NEC_HOST
)
591 xhci_ring_cmd_db(xhci
);
593 xhci_dbg(xhci
, "Finished xhci_run\n");
600 * This function is called by the USB core when the HC driver is removed.
601 * Its opposite is xhci_run().
603 * Disable device contexts, disable IRQs, and quiesce the HC.
604 * Reset the HC, finish any completed transactions, and cleanup memory.
606 void xhci_stop(struct usb_hcd
*hcd
)
609 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
611 spin_lock_irq(&xhci
->lock
);
614 xhci_cleanup_msix(xhci
);
615 spin_unlock_irq(&xhci
->lock
);
617 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
618 /* Tell the event ring poll function not to reschedule */
620 del_timer_sync(&xhci
->event_ring_timer
);
623 xhci_dbg(xhci
, "// Disabling event ring interrupts\n");
624 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
625 xhci_writel(xhci
, temp
& ~STS_EINT
, &xhci
->op_regs
->status
);
626 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
627 xhci_writel(xhci
, ER_IRQ_DISABLE(temp
),
628 &xhci
->ir_set
->irq_pending
);
629 xhci_print_ir_set(xhci
, xhci
->ir_set
, 0);
631 xhci_dbg(xhci
, "cleaning up memory\n");
632 xhci_mem_cleanup(xhci
);
633 xhci_dbg(xhci
, "xhci_stop completed - status = %x\n",
634 xhci_readl(xhci
, &xhci
->op_regs
->status
));
638 * Shutdown HC (not bus-specific)
640 * This is called when the machine is rebooting or halting. We assume that the
641 * machine will be powered off, and the HC's internal state will be reset.
642 * Don't bother to free memory.
644 void xhci_shutdown(struct usb_hcd
*hcd
)
646 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
648 spin_lock_irq(&xhci
->lock
);
650 xhci_cleanup_msix(xhci
);
651 spin_unlock_irq(&xhci
->lock
);
653 xhci_dbg(xhci
, "xhci_shutdown completed - status = %x\n",
654 xhci_readl(xhci
, &xhci
->op_regs
->status
));
657 /*-------------------------------------------------------------------------*/
660 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
661 * HCDs. Find the index for an endpoint given its descriptor. Use the return
662 * value to right shift 1 for the bitmask.
664 * Index = (epnum * 2) + direction - 1,
665 * where direction = 0 for OUT, 1 for IN.
666 * For control endpoints, the IN index is used (OUT index is unused), so
667 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
669 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor
*desc
)
672 if (usb_endpoint_xfer_control(desc
))
673 index
= (unsigned int) (usb_endpoint_num(desc
)*2);
675 index
= (unsigned int) (usb_endpoint_num(desc
)*2) +
676 (usb_endpoint_dir_in(desc
) ? 1 : 0) - 1;
680 /* Find the flag for this endpoint (for use in the control context). Use the
681 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
684 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor
*desc
)
686 return 1 << (xhci_get_endpoint_index(desc
) + 1);
689 /* Find the flag for this endpoint (for use in the control context). Use the
690 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
693 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index
)
695 return 1 << (ep_index
+ 1);
698 /* Compute the last valid endpoint context index. Basically, this is the
699 * endpoint index plus one. For slot contexts with more than valid endpoint,
700 * we find the most significant bit set in the added contexts flags.
701 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
702 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
704 unsigned int xhci_last_valid_endpoint(u32 added_ctxs
)
706 return fls(added_ctxs
) - 1;
709 /* Returns 1 if the arguments are OK;
710 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
712 int xhci_check_args(struct usb_hcd
*hcd
, struct usb_device
*udev
,
713 struct usb_host_endpoint
*ep
, int check_ep
, const char *func
) {
714 if (!hcd
|| (check_ep
&& !ep
) || !udev
) {
715 printk(KERN_DEBUG
"xHCI %s called with invalid args\n",
720 printk(KERN_DEBUG
"xHCI %s called for root hub\n",
724 if (!udev
->slot_id
) {
725 printk(KERN_DEBUG
"xHCI %s called with unaddressed device\n",
732 static int xhci_configure_endpoint(struct xhci_hcd
*xhci
,
733 struct usb_device
*udev
, struct xhci_command
*command
,
734 bool ctx_change
, bool must_succeed
);
737 * Full speed devices may have a max packet size greater than 8 bytes, but the
738 * USB core doesn't know that until it reads the first 8 bytes of the
739 * descriptor. If the usb_device's max packet size changes after that point,
740 * we need to issue an evaluate context command and wait on it.
742 static int xhci_check_maxpacket(struct xhci_hcd
*xhci
, unsigned int slot_id
,
743 unsigned int ep_index
, struct urb
*urb
)
745 struct xhci_container_ctx
*in_ctx
;
746 struct xhci_container_ctx
*out_ctx
;
747 struct xhci_input_control_ctx
*ctrl_ctx
;
748 struct xhci_ep_ctx
*ep_ctx
;
750 int hw_max_packet_size
;
753 out_ctx
= xhci
->devs
[slot_id
]->out_ctx
;
754 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
755 hw_max_packet_size
= MAX_PACKET_DECODED(ep_ctx
->ep_info2
);
756 max_packet_size
= urb
->dev
->ep0
.desc
.wMaxPacketSize
;
757 if (hw_max_packet_size
!= max_packet_size
) {
758 xhci_dbg(xhci
, "Max Packet Size for ep 0 changed.\n");
759 xhci_dbg(xhci
, "Max packet size in usb_device = %d\n",
761 xhci_dbg(xhci
, "Max packet size in xHCI HW = %d\n",
763 xhci_dbg(xhci
, "Issuing evaluate context command.\n");
765 /* Set up the modified control endpoint 0 */
766 xhci_endpoint_copy(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
767 xhci
->devs
[slot_id
]->out_ctx
, ep_index
);
768 in_ctx
= xhci
->devs
[slot_id
]->in_ctx
;
769 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
770 ep_ctx
->ep_info2
&= ~MAX_PACKET_MASK
;
771 ep_ctx
->ep_info2
|= MAX_PACKET(max_packet_size
);
773 /* Set up the input context flags for the command */
774 /* FIXME: This won't work if a non-default control endpoint
775 * changes max packet sizes.
777 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
778 ctrl_ctx
->add_flags
= EP0_FLAG
;
779 ctrl_ctx
->drop_flags
= 0;
781 xhci_dbg(xhci
, "Slot %d input context\n", slot_id
);
782 xhci_dbg_ctx(xhci
, in_ctx
, ep_index
);
783 xhci_dbg(xhci
, "Slot %d output context\n", slot_id
);
784 xhci_dbg_ctx(xhci
, out_ctx
, ep_index
);
786 ret
= xhci_configure_endpoint(xhci
, urb
->dev
, NULL
,
789 /* Clean up the input context for later use by bandwidth
792 ctrl_ctx
->add_flags
= SLOT_FLAG
;
798 * non-error returns are a promise to giveback() the urb later
799 * we drop ownership so next owner (or urb unlink) can get it
801 int xhci_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
, gfp_t mem_flags
)
803 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
806 unsigned int slot_id
, ep_index
;
809 if (!urb
|| xhci_check_args(hcd
, urb
->dev
, urb
->ep
, true, __func__
) <= 0)
812 slot_id
= urb
->dev
->slot_id
;
813 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
815 if (!xhci
->devs
|| !xhci
->devs
[slot_id
]) {
817 dev_warn(&urb
->dev
->dev
, "WARN: urb submitted for dev with no Slot ID\n");
821 if (!HCD_HW_ACCESSIBLE(hcd
)) {
823 xhci_dbg(xhci
, "urb submitted during PCI suspend\n");
827 if (usb_endpoint_xfer_control(&urb
->ep
->desc
)) {
828 /* Check to see if the max packet size for the default control
829 * endpoint changed during FS device enumeration
831 if (urb
->dev
->speed
== USB_SPEED_FULL
) {
832 ret
= xhci_check_maxpacket(xhci
, slot_id
,
838 /* We have a spinlock and interrupts disabled, so we must pass
839 * atomic context to this function, which may allocate memory.
841 spin_lock_irqsave(&xhci
->lock
, flags
);
842 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
844 ret
= xhci_queue_ctrl_tx(xhci
, GFP_ATOMIC
, urb
,
846 spin_unlock_irqrestore(&xhci
->lock
, flags
);
847 } else if (usb_endpoint_xfer_bulk(&urb
->ep
->desc
)) {
848 spin_lock_irqsave(&xhci
->lock
, flags
);
849 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
851 if (xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
&
852 EP_GETTING_STREAMS
) {
853 xhci_warn(xhci
, "WARN: Can't enqueue URB while bulk ep "
854 "is transitioning to using streams.\n");
856 } else if (xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
&
857 EP_GETTING_NO_STREAMS
) {
858 xhci_warn(xhci
, "WARN: Can't enqueue URB while bulk ep "
859 "is transitioning to "
860 "not having streams.\n");
863 ret
= xhci_queue_bulk_tx(xhci
, GFP_ATOMIC
, urb
,
866 spin_unlock_irqrestore(&xhci
->lock
, flags
);
867 } else if (usb_endpoint_xfer_int(&urb
->ep
->desc
)) {
868 spin_lock_irqsave(&xhci
->lock
, flags
);
869 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
871 ret
= xhci_queue_intr_tx(xhci
, GFP_ATOMIC
, urb
,
873 spin_unlock_irqrestore(&xhci
->lock
, flags
);
880 xhci_dbg(xhci
, "Ep 0x%x: URB %p submitted for "
881 "non-responsive xHCI host.\n",
882 urb
->ep
->desc
.bEndpointAddress
, urb
);
883 spin_unlock_irqrestore(&xhci
->lock
, flags
);
888 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
889 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
890 * should pick up where it left off in the TD, unless a Set Transfer Ring
891 * Dequeue Pointer is issued.
893 * The TRBs that make up the buffers for the canceled URB will be "removed" from
894 * the ring. Since the ring is a contiguous structure, they can't be physically
895 * removed. Instead, there are two options:
897 * 1) If the HC is in the middle of processing the URB to be canceled, we
898 * simply move the ring's dequeue pointer past those TRBs using the Set
899 * Transfer Ring Dequeue Pointer command. This will be the common case,
900 * when drivers timeout on the last submitted URB and attempt to cancel.
902 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
903 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
904 * HC will need to invalidate the any TRBs it has cached after the stop
905 * endpoint command, as noted in the xHCI 0.95 errata.
907 * 3) The TD may have completed by the time the Stop Endpoint Command
908 * completes, so software needs to handle that case too.
910 * This function should protect against the TD enqueueing code ringing the
911 * doorbell while this code is waiting for a Stop Endpoint command to complete.
912 * It also needs to account for multiple cancellations on happening at the same
913 * time for the same endpoint.
915 * Note that this function can be called in any context, or so says
916 * usb_hcd_unlink_urb()
918 int xhci_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
923 struct xhci_hcd
*xhci
;
925 unsigned int ep_index
;
926 struct xhci_ring
*ep_ring
;
927 struct xhci_virt_ep
*ep
;
929 xhci
= hcd_to_xhci(hcd
);
930 spin_lock_irqsave(&xhci
->lock
, flags
);
931 /* Make sure the URB hasn't completed or been unlinked already */
932 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
933 if (ret
|| !urb
->hcpriv
)
935 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
936 if (temp
== 0xffffffff) {
937 xhci_dbg(xhci
, "HW died, freeing TD.\n");
938 td
= (struct xhci_td
*) urb
->hcpriv
;
940 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
941 spin_unlock_irqrestore(&xhci
->lock
, flags
);
942 usb_hcd_giveback_urb(xhci_to_hcd(xhci
), urb
, -ESHUTDOWN
);
946 if (xhci
->xhc_state
& XHCI_STATE_DYING
) {
947 xhci_dbg(xhci
, "Ep 0x%x: URB %p to be canceled on "
948 "non-responsive xHCI host.\n",
949 urb
->ep
->desc
.bEndpointAddress
, urb
);
950 /* Let the stop endpoint command watchdog timer (which set this
951 * state) finish cleaning up the endpoint TD lists. We must
952 * have caught it in the middle of dropping a lock and giving
958 xhci_dbg(xhci
, "Cancel URB %p\n", urb
);
959 xhci_dbg(xhci
, "Event ring:\n");
960 xhci_debug_ring(xhci
, xhci
->event_ring
);
961 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
962 ep
= &xhci
->devs
[urb
->dev
->slot_id
]->eps
[ep_index
];
963 ep_ring
= xhci_urb_to_transfer_ring(xhci
, urb
);
969 xhci_dbg(xhci
, "Endpoint ring:\n");
970 xhci_debug_ring(xhci
, ep_ring
);
971 td
= (struct xhci_td
*) urb
->hcpriv
;
973 list_add_tail(&td
->cancelled_td_list
, &ep
->cancelled_td_list
);
974 /* Queue a stop endpoint command, but only if this is
975 * the first cancellation to be handled.
977 if (!(ep
->ep_state
& EP_HALT_PENDING
)) {
978 ep
->ep_state
|= EP_HALT_PENDING
;
979 ep
->stop_cmds_pending
++;
980 ep
->stop_cmd_timer
.expires
= jiffies
+
981 XHCI_STOP_EP_CMD_TIMEOUT
* HZ
;
982 add_timer(&ep
->stop_cmd_timer
);
983 xhci_queue_stop_endpoint(xhci
, urb
->dev
->slot_id
, ep_index
);
984 xhci_ring_cmd_db(xhci
);
987 spin_unlock_irqrestore(&xhci
->lock
, flags
);
991 /* Drop an endpoint from a new bandwidth configuration for this device.
992 * Only one call to this function is allowed per endpoint before
993 * check_bandwidth() or reset_bandwidth() must be called.
994 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
995 * add the endpoint to the schedule with possibly new parameters denoted by a
996 * different endpoint descriptor in usb_host_endpoint.
997 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1000 * The USB core will not allow URBs to be queued to an endpoint that is being
1001 * disabled, so there's no need for mutual exclusion to protect
1002 * the xhci->devs[slot_id] structure.
1004 int xhci_drop_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1005 struct usb_host_endpoint
*ep
)
1007 struct xhci_hcd
*xhci
;
1008 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
1009 struct xhci_input_control_ctx
*ctrl_ctx
;
1010 struct xhci_slot_ctx
*slot_ctx
;
1011 unsigned int last_ctx
;
1012 unsigned int ep_index
;
1013 struct xhci_ep_ctx
*ep_ctx
;
1015 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
1018 ret
= xhci_check_args(hcd
, udev
, ep
, 1, __func__
);
1021 xhci
= hcd_to_xhci(hcd
);
1022 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
1024 drop_flag
= xhci_get_endpoint_flag(&ep
->desc
);
1025 if (drop_flag
== SLOT_FLAG
|| drop_flag
== EP0_FLAG
) {
1026 xhci_dbg(xhci
, "xHCI %s - can't drop slot or ep 0 %#x\n",
1027 __func__
, drop_flag
);
1031 if (!xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
1032 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
1037 in_ctx
= xhci
->devs
[udev
->slot_id
]->in_ctx
;
1038 out_ctx
= xhci
->devs
[udev
->slot_id
]->out_ctx
;
1039 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1040 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1041 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
1042 /* If the HC already knows the endpoint is disabled,
1043 * or the HCD has noted it is disabled, ignore this request
1045 if ((ep_ctx
->ep_info
& EP_STATE_MASK
) == EP_STATE_DISABLED
||
1046 ctrl_ctx
->drop_flags
& xhci_get_endpoint_flag(&ep
->desc
)) {
1047 xhci_warn(xhci
, "xHCI %s called with disabled ep %p\n",
1052 ctrl_ctx
->drop_flags
|= drop_flag
;
1053 new_drop_flags
= ctrl_ctx
->drop_flags
;
1055 ctrl_ctx
->add_flags
&= ~drop_flag
;
1056 new_add_flags
= ctrl_ctx
->add_flags
;
1058 last_ctx
= xhci_last_valid_endpoint(ctrl_ctx
->add_flags
);
1059 slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
1060 /* Update the last valid endpoint context, if we deleted the last one */
1061 if ((slot_ctx
->dev_info
& LAST_CTX_MASK
) > LAST_CTX(last_ctx
)) {
1062 slot_ctx
->dev_info
&= ~LAST_CTX_MASK
;
1063 slot_ctx
->dev_info
|= LAST_CTX(last_ctx
);
1065 new_slot_info
= slot_ctx
->dev_info
;
1067 xhci_endpoint_zero(xhci
, xhci
->devs
[udev
->slot_id
], ep
);
1069 xhci_dbg(xhci
, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1070 (unsigned int) ep
->desc
.bEndpointAddress
,
1072 (unsigned int) new_drop_flags
,
1073 (unsigned int) new_add_flags
,
1074 (unsigned int) new_slot_info
);
1078 /* Add an endpoint to a new possible bandwidth configuration for this device.
1079 * Only one call to this function is allowed per endpoint before
1080 * check_bandwidth() or reset_bandwidth() must be called.
1081 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1082 * add the endpoint to the schedule with possibly new parameters denoted by a
1083 * different endpoint descriptor in usb_host_endpoint.
1084 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1087 * The USB core will not allow URBs to be queued to an endpoint until the
1088 * configuration or alt setting is installed in the device, so there's no need
1089 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1091 int xhci_add_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1092 struct usb_host_endpoint
*ep
)
1094 struct xhci_hcd
*xhci
;
1095 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
1096 unsigned int ep_index
;
1097 struct xhci_ep_ctx
*ep_ctx
;
1098 struct xhci_slot_ctx
*slot_ctx
;
1099 struct xhci_input_control_ctx
*ctrl_ctx
;
1101 unsigned int last_ctx
;
1102 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
1105 ret
= xhci_check_args(hcd
, udev
, ep
, 1, __func__
);
1107 /* So we won't queue a reset ep command for a root hub */
1111 xhci
= hcd_to_xhci(hcd
);
1113 added_ctxs
= xhci_get_endpoint_flag(&ep
->desc
);
1114 last_ctx
= xhci_last_valid_endpoint(added_ctxs
);
1115 if (added_ctxs
== SLOT_FLAG
|| added_ctxs
== EP0_FLAG
) {
1116 /* FIXME when we have to issue an evaluate endpoint command to
1117 * deal with ep0 max packet size changing once we get the
1120 xhci_dbg(xhci
, "xHCI %s - can't add slot or ep 0 %#x\n",
1121 __func__
, added_ctxs
);
1125 if (!xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
1126 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
1131 in_ctx
= xhci
->devs
[udev
->slot_id
]->in_ctx
;
1132 out_ctx
= xhci
->devs
[udev
->slot_id
]->out_ctx
;
1133 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1134 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1135 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
1136 /* If the HCD has already noted the endpoint is enabled,
1137 * ignore this request.
1139 if (ctrl_ctx
->add_flags
& xhci_get_endpoint_flag(&ep
->desc
)) {
1140 xhci_warn(xhci
, "xHCI %s called with enabled ep %p\n",
1146 * Configuration and alternate setting changes must be done in
1147 * process context, not interrupt context (or so documenation
1148 * for usb_set_interface() and usb_set_configuration() claim).
1150 if (xhci_endpoint_init(xhci
, xhci
->devs
[udev
->slot_id
],
1151 udev
, ep
, GFP_NOIO
) < 0) {
1152 dev_dbg(&udev
->dev
, "%s - could not initialize ep %#x\n",
1153 __func__
, ep
->desc
.bEndpointAddress
);
1157 ctrl_ctx
->add_flags
|= added_ctxs
;
1158 new_add_flags
= ctrl_ctx
->add_flags
;
1160 /* If xhci_endpoint_disable() was called for this endpoint, but the
1161 * xHC hasn't been notified yet through the check_bandwidth() call,
1162 * this re-adds a new state for the endpoint from the new endpoint
1163 * descriptors. We must drop and re-add this endpoint, so we leave the
1166 new_drop_flags
= ctrl_ctx
->drop_flags
;
1168 slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
1169 /* Update the last valid endpoint context, if we just added one past */
1170 if ((slot_ctx
->dev_info
& LAST_CTX_MASK
) < LAST_CTX(last_ctx
)) {
1171 slot_ctx
->dev_info
&= ~LAST_CTX_MASK
;
1172 slot_ctx
->dev_info
|= LAST_CTX(last_ctx
);
1174 new_slot_info
= slot_ctx
->dev_info
;
1176 /* Store the usb_device pointer for later use */
1179 xhci_dbg(xhci
, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1180 (unsigned int) ep
->desc
.bEndpointAddress
,
1182 (unsigned int) new_drop_flags
,
1183 (unsigned int) new_add_flags
,
1184 (unsigned int) new_slot_info
);
1188 static void xhci_zero_in_ctx(struct xhci_hcd
*xhci
, struct xhci_virt_device
*virt_dev
)
1190 struct xhci_input_control_ctx
*ctrl_ctx
;
1191 struct xhci_ep_ctx
*ep_ctx
;
1192 struct xhci_slot_ctx
*slot_ctx
;
1195 /* When a device's add flag and drop flag are zero, any subsequent
1196 * configure endpoint command will leave that endpoint's state
1197 * untouched. Make sure we don't leave any old state in the input
1198 * endpoint contexts.
1200 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
1201 ctrl_ctx
->drop_flags
= 0;
1202 ctrl_ctx
->add_flags
= 0;
1203 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
1204 slot_ctx
->dev_info
&= ~LAST_CTX_MASK
;
1205 /* Endpoint 0 is always valid */
1206 slot_ctx
->dev_info
|= LAST_CTX(1);
1207 for (i
= 1; i
< 31; ++i
) {
1208 ep_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, i
);
1209 ep_ctx
->ep_info
= 0;
1210 ep_ctx
->ep_info2
= 0;
1212 ep_ctx
->tx_info
= 0;
1216 static int xhci_configure_endpoint_result(struct xhci_hcd
*xhci
,
1217 struct usb_device
*udev
, int *cmd_status
)
1221 switch (*cmd_status
) {
1223 dev_warn(&udev
->dev
, "Not enough host controller resources "
1224 "for new device state.\n");
1226 /* FIXME: can we allocate more resources for the HC? */
1229 dev_warn(&udev
->dev
, "Not enough bandwidth "
1230 "for new device state.\n");
1232 /* FIXME: can we go back to the old state? */
1235 /* the HCD set up something wrong */
1236 dev_warn(&udev
->dev
, "ERROR: Endpoint drop flag = 0, "
1238 "and endpoint is not disabled.\n");
1242 dev_dbg(&udev
->dev
, "Successful Endpoint Configure command\n");
1246 xhci_err(xhci
, "ERROR: unexpected command completion "
1247 "code 0x%x.\n", *cmd_status
);
1254 static int xhci_evaluate_context_result(struct xhci_hcd
*xhci
,
1255 struct usb_device
*udev
, int *cmd_status
)
1258 struct xhci_virt_device
*virt_dev
= xhci
->devs
[udev
->slot_id
];
1260 switch (*cmd_status
) {
1262 dev_warn(&udev
->dev
, "WARN: xHCI driver setup invalid evaluate "
1263 "context command.\n");
1267 dev_warn(&udev
->dev
, "WARN: slot not enabled for"
1268 "evaluate context command.\n");
1269 case COMP_CTX_STATE
:
1270 dev_warn(&udev
->dev
, "WARN: invalid context state for "
1271 "evaluate context command.\n");
1272 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 1);
1276 dev_dbg(&udev
->dev
, "Successful evaluate context command\n");
1280 xhci_err(xhci
, "ERROR: unexpected command completion "
1281 "code 0x%x.\n", *cmd_status
);
1288 /* Issue a configure endpoint command or evaluate context command
1289 * and wait for it to finish.
1291 static int xhci_configure_endpoint(struct xhci_hcd
*xhci
,
1292 struct usb_device
*udev
,
1293 struct xhci_command
*command
,
1294 bool ctx_change
, bool must_succeed
)
1298 unsigned long flags
;
1299 struct xhci_container_ctx
*in_ctx
;
1300 struct completion
*cmd_completion
;
1302 struct xhci_virt_device
*virt_dev
;
1304 spin_lock_irqsave(&xhci
->lock
, flags
);
1305 virt_dev
= xhci
->devs
[udev
->slot_id
];
1307 in_ctx
= command
->in_ctx
;
1308 cmd_completion
= command
->completion
;
1309 cmd_status
= &command
->status
;
1310 command
->command_trb
= xhci
->cmd_ring
->enqueue
;
1311 list_add_tail(&command
->cmd_list
, &virt_dev
->cmd_list
);
1313 in_ctx
= virt_dev
->in_ctx
;
1314 cmd_completion
= &virt_dev
->cmd_completion
;
1315 cmd_status
= &virt_dev
->cmd_status
;
1317 init_completion(cmd_completion
);
1320 ret
= xhci_queue_configure_endpoint(xhci
, in_ctx
->dma
,
1321 udev
->slot_id
, must_succeed
);
1323 ret
= xhci_queue_evaluate_context(xhci
, in_ctx
->dma
,
1327 list_del(&command
->cmd_list
);
1328 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1329 xhci_dbg(xhci
, "FIXME allocate a new ring segment\n");
1332 xhci_ring_cmd_db(xhci
);
1333 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1335 /* Wait for the configure endpoint command to complete */
1336 timeleft
= wait_for_completion_interruptible_timeout(
1338 USB_CTRL_SET_TIMEOUT
);
1339 if (timeleft
<= 0) {
1340 xhci_warn(xhci
, "%s while waiting for %s command\n",
1341 timeleft
== 0 ? "Timeout" : "Signal",
1343 "configure endpoint" :
1344 "evaluate context");
1345 /* FIXME cancel the configure endpoint command */
1350 return xhci_configure_endpoint_result(xhci
, udev
, cmd_status
);
1351 return xhci_evaluate_context_result(xhci
, udev
, cmd_status
);
1354 /* Called after one or more calls to xhci_add_endpoint() or
1355 * xhci_drop_endpoint(). If this call fails, the USB core is expected
1356 * to call xhci_reset_bandwidth().
1358 * Since we are in the middle of changing either configuration or
1359 * installing a new alt setting, the USB core won't allow URBs to be
1360 * enqueued for any endpoint on the old config or interface. Nothing
1361 * else should be touching the xhci->devs[slot_id] structure, so we
1362 * don't need to take the xhci->lock for manipulating that.
1364 int xhci_check_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1368 struct xhci_hcd
*xhci
;
1369 struct xhci_virt_device
*virt_dev
;
1370 struct xhci_input_control_ctx
*ctrl_ctx
;
1371 struct xhci_slot_ctx
*slot_ctx
;
1373 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, __func__
);
1376 xhci
= hcd_to_xhci(hcd
);
1378 if (!udev
->slot_id
|| !xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
1379 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
1383 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
1384 virt_dev
= xhci
->devs
[udev
->slot_id
];
1386 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
1387 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
1388 ctrl_ctx
->add_flags
|= SLOT_FLAG
;
1389 ctrl_ctx
->add_flags
&= ~EP0_FLAG
;
1390 ctrl_ctx
->drop_flags
&= ~SLOT_FLAG
;
1391 ctrl_ctx
->drop_flags
&= ~EP0_FLAG
;
1392 xhci_dbg(xhci
, "New Input Control Context:\n");
1393 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
1394 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
,
1395 LAST_CTX_TO_EP_NUM(slot_ctx
->dev_info
));
1397 ret
= xhci_configure_endpoint(xhci
, udev
, NULL
,
1400 /* Callee should call reset_bandwidth() */
1404 xhci_dbg(xhci
, "Output context after successful config ep cmd:\n");
1405 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
,
1406 LAST_CTX_TO_EP_NUM(slot_ctx
->dev_info
));
1408 xhci_zero_in_ctx(xhci
, virt_dev
);
1409 /* Install new rings and free or cache any old rings */
1410 for (i
= 1; i
< 31; ++i
) {
1411 if (!virt_dev
->eps
[i
].new_ring
)
1413 /* Only cache or free the old ring if it exists.
1414 * It may not if this is the first add of an endpoint.
1416 if (virt_dev
->eps
[i
].ring
) {
1417 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
1419 virt_dev
->eps
[i
].ring
= virt_dev
->eps
[i
].new_ring
;
1420 virt_dev
->eps
[i
].new_ring
= NULL
;
1426 void xhci_reset_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1428 struct xhci_hcd
*xhci
;
1429 struct xhci_virt_device
*virt_dev
;
1432 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, __func__
);
1435 xhci
= hcd_to_xhci(hcd
);
1437 if (!xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
1438 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
1442 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
1443 virt_dev
= xhci
->devs
[udev
->slot_id
];
1444 /* Free any rings allocated for added endpoints */
1445 for (i
= 0; i
< 31; ++i
) {
1446 if (virt_dev
->eps
[i
].new_ring
) {
1447 xhci_ring_free(xhci
, virt_dev
->eps
[i
].new_ring
);
1448 virt_dev
->eps
[i
].new_ring
= NULL
;
1451 xhci_zero_in_ctx(xhci
, virt_dev
);
1454 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd
*xhci
,
1455 struct xhci_container_ctx
*in_ctx
,
1456 struct xhci_container_ctx
*out_ctx
,
1457 u32 add_flags
, u32 drop_flags
)
1459 struct xhci_input_control_ctx
*ctrl_ctx
;
1460 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1461 ctrl_ctx
->add_flags
= add_flags
;
1462 ctrl_ctx
->drop_flags
= drop_flags
;
1463 xhci_slot_copy(xhci
, in_ctx
, out_ctx
);
1464 ctrl_ctx
->add_flags
|= SLOT_FLAG
;
1466 xhci_dbg(xhci
, "Input Context:\n");
1467 xhci_dbg_ctx(xhci
, in_ctx
, xhci_last_valid_endpoint(add_flags
));
1470 void xhci_setup_input_ctx_for_quirk(struct xhci_hcd
*xhci
,
1471 unsigned int slot_id
, unsigned int ep_index
,
1472 struct xhci_dequeue_state
*deq_state
)
1474 struct xhci_container_ctx
*in_ctx
;
1475 struct xhci_ep_ctx
*ep_ctx
;
1479 xhci_endpoint_copy(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
1480 xhci
->devs
[slot_id
]->out_ctx
, ep_index
);
1481 in_ctx
= xhci
->devs
[slot_id
]->in_ctx
;
1482 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
1483 addr
= xhci_trb_virt_to_dma(deq_state
->new_deq_seg
,
1484 deq_state
->new_deq_ptr
);
1486 xhci_warn(xhci
, "WARN Cannot submit config ep after "
1487 "reset ep command\n");
1488 xhci_warn(xhci
, "WARN deq seg = %p, deq ptr = %p\n",
1489 deq_state
->new_deq_seg
,
1490 deq_state
->new_deq_ptr
);
1493 ep_ctx
->deq
= addr
| deq_state
->new_cycle_state
;
1495 added_ctxs
= xhci_get_endpoint_flag_from_index(ep_index
);
1496 xhci_setup_input_ctx_for_config_ep(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
1497 xhci
->devs
[slot_id
]->out_ctx
, added_ctxs
, added_ctxs
);
1500 void xhci_cleanup_stalled_ring(struct xhci_hcd
*xhci
,
1501 struct usb_device
*udev
, unsigned int ep_index
)
1503 struct xhci_dequeue_state deq_state
;
1504 struct xhci_virt_ep
*ep
;
1506 xhci_dbg(xhci
, "Cleaning up stalled endpoint ring\n");
1507 ep
= &xhci
->devs
[udev
->slot_id
]->eps
[ep_index
];
1508 /* We need to move the HW's dequeue pointer past this TD,
1509 * or it will attempt to resend it on the next doorbell ring.
1511 xhci_find_new_dequeue_state(xhci
, udev
->slot_id
,
1512 ep_index
, ep
->stopped_stream
, ep
->stopped_td
,
1515 /* HW with the reset endpoint quirk will use the saved dequeue state to
1516 * issue a configure endpoint command later.
1518 if (!(xhci
->quirks
& XHCI_RESET_EP_QUIRK
)) {
1519 xhci_dbg(xhci
, "Queueing new dequeue state\n");
1520 xhci_queue_new_dequeue_state(xhci
, udev
->slot_id
,
1521 ep_index
, ep
->stopped_stream
, &deq_state
);
1523 /* Better hope no one uses the input context between now and the
1524 * reset endpoint completion!
1525 * XXX: No idea how this hardware will react when stream rings
1528 xhci_dbg(xhci
, "Setting up input context for "
1529 "configure endpoint command\n");
1530 xhci_setup_input_ctx_for_quirk(xhci
, udev
->slot_id
,
1531 ep_index
, &deq_state
);
1535 /* Deal with stalled endpoints. The core should have sent the control message
1536 * to clear the halt condition. However, we need to make the xHCI hardware
1537 * reset its sequence number, since a device will expect a sequence number of
1538 * zero after the halt condition is cleared.
1539 * Context: in_interrupt
1541 void xhci_endpoint_reset(struct usb_hcd
*hcd
,
1542 struct usb_host_endpoint
*ep
)
1544 struct xhci_hcd
*xhci
;
1545 struct usb_device
*udev
;
1546 unsigned int ep_index
;
1547 unsigned long flags
;
1549 struct xhci_virt_ep
*virt_ep
;
1551 xhci
= hcd_to_xhci(hcd
);
1552 udev
= (struct usb_device
*) ep
->hcpriv
;
1553 /* Called with a root hub endpoint (or an endpoint that wasn't added
1554 * with xhci_add_endpoint()
1558 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1559 virt_ep
= &xhci
->devs
[udev
->slot_id
]->eps
[ep_index
];
1560 if (!virt_ep
->stopped_td
) {
1561 xhci_dbg(xhci
, "Endpoint 0x%x not halted, refusing to reset.\n",
1562 ep
->desc
.bEndpointAddress
);
1565 if (usb_endpoint_xfer_control(&ep
->desc
)) {
1566 xhci_dbg(xhci
, "Control endpoint stall already handled.\n");
1570 xhci_dbg(xhci
, "Queueing reset endpoint command\n");
1571 spin_lock_irqsave(&xhci
->lock
, flags
);
1572 ret
= xhci_queue_reset_ep(xhci
, udev
->slot_id
, ep_index
);
1574 * Can't change the ring dequeue pointer until it's transitioned to the
1575 * stopped state, which is only upon a successful reset endpoint
1576 * command. Better hope that last command worked!
1579 xhci_cleanup_stalled_ring(xhci
, udev
, ep_index
);
1580 kfree(virt_ep
->stopped_td
);
1581 xhci_ring_cmd_db(xhci
);
1583 virt_ep
->stopped_td
= NULL
;
1584 virt_ep
->stopped_trb
= NULL
;
1585 virt_ep
->stopped_stream
= 0;
1586 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1589 xhci_warn(xhci
, "FIXME allocate a new ring segment\n");
1592 static int xhci_check_streams_endpoint(struct xhci_hcd
*xhci
,
1593 struct usb_device
*udev
, struct usb_host_endpoint
*ep
,
1594 unsigned int slot_id
)
1597 unsigned int ep_index
;
1598 unsigned int ep_state
;
1602 ret
= xhci_check_args(xhci_to_hcd(xhci
), udev
, ep
, 1, __func__
);
1605 if (ep
->ss_ep_comp
.bmAttributes
== 0) {
1606 xhci_warn(xhci
, "WARN: SuperSpeed Endpoint Companion"
1607 " descriptor for ep 0x%x does not support streams\n",
1608 ep
->desc
.bEndpointAddress
);
1612 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1613 ep_state
= xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
;
1614 if (ep_state
& EP_HAS_STREAMS
||
1615 ep_state
& EP_GETTING_STREAMS
) {
1616 xhci_warn(xhci
, "WARN: SuperSpeed bulk endpoint 0x%x "
1617 "already has streams set up.\n",
1618 ep
->desc
.bEndpointAddress
);
1619 xhci_warn(xhci
, "Send email to xHCI maintainer and ask for "
1620 "dynamic stream context array reallocation.\n");
1623 if (!list_empty(&xhci
->devs
[slot_id
]->eps
[ep_index
].ring
->td_list
)) {
1624 xhci_warn(xhci
, "Cannot setup streams for SuperSpeed bulk "
1625 "endpoint 0x%x; URBs are pending.\n",
1626 ep
->desc
.bEndpointAddress
);
1632 static void xhci_calculate_streams_entries(struct xhci_hcd
*xhci
,
1633 unsigned int *num_streams
, unsigned int *num_stream_ctxs
)
1635 unsigned int max_streams
;
1637 /* The stream context array size must be a power of two */
1638 *num_stream_ctxs
= roundup_pow_of_two(*num_streams
);
1640 * Find out how many primary stream array entries the host controller
1641 * supports. Later we may use secondary stream arrays (similar to 2nd
1642 * level page entries), but that's an optional feature for xHCI host
1643 * controllers. xHCs must support at least 4 stream IDs.
1645 max_streams
= HCC_MAX_PSA(xhci
->hcc_params
);
1646 if (*num_stream_ctxs
> max_streams
) {
1647 xhci_dbg(xhci
, "xHCI HW only supports %u stream ctx entries.\n",
1649 *num_stream_ctxs
= max_streams
;
1650 *num_streams
= max_streams
;
1654 /* Returns an error code if one of the endpoint already has streams.
1655 * This does not change any data structures, it only checks and gathers
1658 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd
*xhci
,
1659 struct usb_device
*udev
,
1660 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
1661 unsigned int *num_streams
, u32
*changed_ep_bitmask
)
1663 unsigned int max_streams
;
1664 unsigned int endpoint_flag
;
1668 for (i
= 0; i
< num_eps
; i
++) {
1669 ret
= xhci_check_streams_endpoint(xhci
, udev
,
1670 eps
[i
], udev
->slot_id
);
1674 max_streams
= USB_SS_MAX_STREAMS(
1675 eps
[i
]->ss_ep_comp
.bmAttributes
);
1676 if (max_streams
< (*num_streams
- 1)) {
1677 xhci_dbg(xhci
, "Ep 0x%x only supports %u stream IDs.\n",
1678 eps
[i
]->desc
.bEndpointAddress
,
1680 *num_streams
= max_streams
+1;
1683 endpoint_flag
= xhci_get_endpoint_flag(&eps
[i
]->desc
);
1684 if (*changed_ep_bitmask
& endpoint_flag
)
1686 *changed_ep_bitmask
|= endpoint_flag
;
1691 static u32
xhci_calculate_no_streams_bitmask(struct xhci_hcd
*xhci
,
1692 struct usb_device
*udev
,
1693 struct usb_host_endpoint
**eps
, unsigned int num_eps
)
1695 u32 changed_ep_bitmask
= 0;
1696 unsigned int slot_id
;
1697 unsigned int ep_index
;
1698 unsigned int ep_state
;
1701 slot_id
= udev
->slot_id
;
1702 if (!xhci
->devs
[slot_id
])
1705 for (i
= 0; i
< num_eps
; i
++) {
1706 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
1707 ep_state
= xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
;
1708 /* Are streams already being freed for the endpoint? */
1709 if (ep_state
& EP_GETTING_NO_STREAMS
) {
1710 xhci_warn(xhci
, "WARN Can't disable streams for "
1712 "streams are being disabled already.",
1713 eps
[i
]->desc
.bEndpointAddress
);
1716 /* Are there actually any streams to free? */
1717 if (!(ep_state
& EP_HAS_STREAMS
) &&
1718 !(ep_state
& EP_GETTING_STREAMS
)) {
1719 xhci_warn(xhci
, "WARN Can't disable streams for "
1721 "streams are already disabled!",
1722 eps
[i
]->desc
.bEndpointAddress
);
1723 xhci_warn(xhci
, "WARN xhci_free_streams() called "
1724 "with non-streams endpoint\n");
1727 changed_ep_bitmask
|= xhci_get_endpoint_flag(&eps
[i
]->desc
);
1729 return changed_ep_bitmask
;
1733 * The USB device drivers use this function (though the HCD interface in USB
1734 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
1735 * coordinate mass storage command queueing across multiple endpoints (basically
1736 * a stream ID == a task ID).
1738 * Setting up streams involves allocating the same size stream context array
1739 * for each endpoint and issuing a configure endpoint command for all endpoints.
1741 * Don't allow the call to succeed if one endpoint only supports one stream
1742 * (which means it doesn't support streams at all).
1744 * Drivers may get less stream IDs than they asked for, if the host controller
1745 * hardware or endpoints claim they can't support the number of requested
1748 int xhci_alloc_streams(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1749 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
1750 unsigned int num_streams
, gfp_t mem_flags
)
1753 struct xhci_hcd
*xhci
;
1754 struct xhci_virt_device
*vdev
;
1755 struct xhci_command
*config_cmd
;
1756 unsigned int ep_index
;
1757 unsigned int num_stream_ctxs
;
1758 unsigned long flags
;
1759 u32 changed_ep_bitmask
= 0;
1764 /* Add one to the number of streams requested to account for
1765 * stream 0 that is reserved for xHCI usage.
1768 xhci
= hcd_to_xhci(hcd
);
1769 xhci_dbg(xhci
, "Driver wants %u stream IDs (including stream 0).\n",
1772 config_cmd
= xhci_alloc_command(xhci
, true, true, mem_flags
);
1774 xhci_dbg(xhci
, "Could not allocate xHCI command structure.\n");
1778 /* Check to make sure all endpoints are not already configured for
1779 * streams. While we're at it, find the maximum number of streams that
1780 * all the endpoints will support and check for duplicate endpoints.
1782 spin_lock_irqsave(&xhci
->lock
, flags
);
1783 ret
= xhci_calculate_streams_and_bitmask(xhci
, udev
, eps
,
1784 num_eps
, &num_streams
, &changed_ep_bitmask
);
1786 xhci_free_command(xhci
, config_cmd
);
1787 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1790 if (num_streams
<= 1) {
1791 xhci_warn(xhci
, "WARN: endpoints can't handle "
1792 "more than one stream.\n");
1793 xhci_free_command(xhci
, config_cmd
);
1794 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1797 vdev
= xhci
->devs
[udev
->slot_id
];
1798 /* Mark each endpoint as being in transistion, so
1799 * xhci_urb_enqueue() will reject all URBs.
1801 for (i
= 0; i
< num_eps
; i
++) {
1802 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
1803 vdev
->eps
[ep_index
].ep_state
|= EP_GETTING_STREAMS
;
1805 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1807 /* Setup internal data structures and allocate HW data structures for
1808 * streams (but don't install the HW structures in the input context
1809 * until we're sure all memory allocation succeeded).
1811 xhci_calculate_streams_entries(xhci
, &num_streams
, &num_stream_ctxs
);
1812 xhci_dbg(xhci
, "Need %u stream ctx entries for %u stream IDs.\n",
1813 num_stream_ctxs
, num_streams
);
1815 for (i
= 0; i
< num_eps
; i
++) {
1816 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
1817 vdev
->eps
[ep_index
].stream_info
= xhci_alloc_stream_info(xhci
,
1819 num_streams
, mem_flags
);
1820 if (!vdev
->eps
[ep_index
].stream_info
)
1822 /* Set maxPstreams in endpoint context and update deq ptr to
1823 * point to stream context array. FIXME
1827 /* Set up the input context for a configure endpoint command. */
1828 for (i
= 0; i
< num_eps
; i
++) {
1829 struct xhci_ep_ctx
*ep_ctx
;
1831 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
1832 ep_ctx
= xhci_get_ep_ctx(xhci
, config_cmd
->in_ctx
, ep_index
);
1834 xhci_endpoint_copy(xhci
, config_cmd
->in_ctx
,
1835 vdev
->out_ctx
, ep_index
);
1836 xhci_setup_streams_ep_input_ctx(xhci
, ep_ctx
,
1837 vdev
->eps
[ep_index
].stream_info
);
1839 /* Tell the HW to drop its old copy of the endpoint context info
1840 * and add the updated copy from the input context.
1842 xhci_setup_input_ctx_for_config_ep(xhci
, config_cmd
->in_ctx
,
1843 vdev
->out_ctx
, changed_ep_bitmask
, changed_ep_bitmask
);
1845 /* Issue and wait for the configure endpoint command */
1846 ret
= xhci_configure_endpoint(xhci
, udev
, config_cmd
,
1849 /* xHC rejected the configure endpoint command for some reason, so we
1850 * leave the old ring intact and free our internal streams data
1856 spin_lock_irqsave(&xhci
->lock
, flags
);
1857 for (i
= 0; i
< num_eps
; i
++) {
1858 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
1859 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_STREAMS
;
1860 xhci_dbg(xhci
, "Slot %u ep ctx %u now has streams.\n",
1861 udev
->slot_id
, ep_index
);
1862 vdev
->eps
[ep_index
].ep_state
|= EP_HAS_STREAMS
;
1864 xhci_free_command(xhci
, config_cmd
);
1865 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1867 /* Subtract 1 for stream 0, which drivers can't use */
1868 return num_streams
- 1;
1871 /* If it didn't work, free the streams! */
1872 for (i
= 0; i
< num_eps
; i
++) {
1873 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
1874 xhci_free_stream_info(xhci
, vdev
->eps
[ep_index
].stream_info
);
1875 vdev
->eps
[ep_index
].stream_info
= NULL
;
1876 /* FIXME Unset maxPstreams in endpoint context and
1877 * update deq ptr to point to normal string ring.
1879 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_STREAMS
;
1880 vdev
->eps
[ep_index
].ep_state
&= ~EP_HAS_STREAMS
;
1881 xhci_endpoint_zero(xhci
, vdev
, eps
[i
]);
1883 xhci_free_command(xhci
, config_cmd
);
1887 /* Transition the endpoint from using streams to being a "normal" endpoint
1890 * Modify the endpoint context state, submit a configure endpoint command,
1891 * and free all endpoint rings for streams if that completes successfully.
1893 int xhci_free_streams(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1894 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
1898 struct xhci_hcd
*xhci
;
1899 struct xhci_virt_device
*vdev
;
1900 struct xhci_command
*command
;
1901 unsigned int ep_index
;
1902 unsigned long flags
;
1903 u32 changed_ep_bitmask
;
1905 xhci
= hcd_to_xhci(hcd
);
1906 vdev
= xhci
->devs
[udev
->slot_id
];
1908 /* Set up a configure endpoint command to remove the streams rings */
1909 spin_lock_irqsave(&xhci
->lock
, flags
);
1910 changed_ep_bitmask
= xhci_calculate_no_streams_bitmask(xhci
,
1911 udev
, eps
, num_eps
);
1912 if (changed_ep_bitmask
== 0) {
1913 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1917 /* Use the xhci_command structure from the first endpoint. We may have
1918 * allocated too many, but the driver may call xhci_free_streams() for
1919 * each endpoint it grouped into one call to xhci_alloc_streams().
1921 ep_index
= xhci_get_endpoint_index(&eps
[0]->desc
);
1922 command
= vdev
->eps
[ep_index
].stream_info
->free_streams_command
;
1923 for (i
= 0; i
< num_eps
; i
++) {
1924 struct xhci_ep_ctx
*ep_ctx
;
1926 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
1927 ep_ctx
= xhci_get_ep_ctx(xhci
, command
->in_ctx
, ep_index
);
1928 xhci
->devs
[udev
->slot_id
]->eps
[ep_index
].ep_state
|=
1929 EP_GETTING_NO_STREAMS
;
1931 xhci_endpoint_copy(xhci
, command
->in_ctx
,
1932 vdev
->out_ctx
, ep_index
);
1933 xhci_setup_no_streams_ep_input_ctx(xhci
, ep_ctx
,
1934 &vdev
->eps
[ep_index
]);
1936 xhci_setup_input_ctx_for_config_ep(xhci
, command
->in_ctx
,
1937 vdev
->out_ctx
, changed_ep_bitmask
, changed_ep_bitmask
);
1938 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1940 /* Issue and wait for the configure endpoint command,
1941 * which must succeed.
1943 ret
= xhci_configure_endpoint(xhci
, udev
, command
,
1946 /* xHC rejected the configure endpoint command for some reason, so we
1947 * leave the streams rings intact.
1952 spin_lock_irqsave(&xhci
->lock
, flags
);
1953 for (i
= 0; i
< num_eps
; i
++) {
1954 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
1955 xhci_free_stream_info(xhci
, vdev
->eps
[ep_index
].stream_info
);
1956 vdev
->eps
[ep_index
].stream_info
= NULL
;
1957 /* FIXME Unset maxPstreams in endpoint context and
1958 * update deq ptr to point to normal string ring.
1960 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_NO_STREAMS
;
1961 vdev
->eps
[ep_index
].ep_state
&= ~EP_HAS_STREAMS
;
1963 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1969 * This submits a Reset Device Command, which will set the device state to 0,
1970 * set the device address to 0, and disable all the endpoints except the default
1971 * control endpoint. The USB core should come back and call
1972 * xhci_address_device(), and then re-set up the configuration. If this is
1973 * called because of a usb_reset_and_verify_device(), then the old alternate
1974 * settings will be re-installed through the normal bandwidth allocation
1977 * Wait for the Reset Device command to finish. Remove all structures
1978 * associated with the endpoints that were disabled. Clear the input device
1979 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
1981 int xhci_reset_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1984 unsigned long flags
;
1985 struct xhci_hcd
*xhci
;
1986 unsigned int slot_id
;
1987 struct xhci_virt_device
*virt_dev
;
1988 struct xhci_command
*reset_device_cmd
;
1990 int last_freed_endpoint
;
1992 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, __func__
);
1995 xhci
= hcd_to_xhci(hcd
);
1996 slot_id
= udev
->slot_id
;
1997 virt_dev
= xhci
->devs
[slot_id
];
1999 xhci_dbg(xhci
, "%s called with invalid slot ID %u\n",
2004 xhci_dbg(xhci
, "Resetting device with slot ID %u\n", slot_id
);
2005 /* Allocate the command structure that holds the struct completion.
2006 * Assume we're in process context, since the normal device reset
2007 * process has to wait for the device anyway. Storage devices are
2008 * reset as part of error handling, so use GFP_NOIO instead of
2011 reset_device_cmd
= xhci_alloc_command(xhci
, false, true, GFP_NOIO
);
2012 if (!reset_device_cmd
) {
2013 xhci_dbg(xhci
, "Couldn't allocate command structure.\n");
2017 /* Attempt to submit the Reset Device command to the command ring */
2018 spin_lock_irqsave(&xhci
->lock
, flags
);
2019 reset_device_cmd
->command_trb
= xhci
->cmd_ring
->enqueue
;
2020 list_add_tail(&reset_device_cmd
->cmd_list
, &virt_dev
->cmd_list
);
2021 ret
= xhci_queue_reset_device(xhci
, slot_id
);
2023 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
2024 list_del(&reset_device_cmd
->cmd_list
);
2025 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2026 goto command_cleanup
;
2028 xhci_ring_cmd_db(xhci
);
2029 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2031 /* Wait for the Reset Device command to finish */
2032 timeleft
= wait_for_completion_interruptible_timeout(
2033 reset_device_cmd
->completion
,
2034 USB_CTRL_SET_TIMEOUT
);
2035 if (timeleft
<= 0) {
2036 xhci_warn(xhci
, "%s while waiting for reset device command\n",
2037 timeleft
== 0 ? "Timeout" : "Signal");
2038 spin_lock_irqsave(&xhci
->lock
, flags
);
2039 /* The timeout might have raced with the event ring handler, so
2040 * only delete from the list if the item isn't poisoned.
2042 if (reset_device_cmd
->cmd_list
.next
!= LIST_POISON1
)
2043 list_del(&reset_device_cmd
->cmd_list
);
2044 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2046 goto command_cleanup
;
2049 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
2050 * unless we tried to reset a slot ID that wasn't enabled,
2051 * or the device wasn't in the addressed or configured state.
2053 ret
= reset_device_cmd
->status
;
2055 case COMP_EBADSLT
: /* 0.95 completion code for bad slot ID */
2056 case COMP_CTX_STATE
: /* 0.96 completion code for same thing */
2057 xhci_info(xhci
, "Can't reset device (slot ID %u) in %s state\n",
2059 xhci_get_slot_state(xhci
, virt_dev
->out_ctx
));
2060 xhci_info(xhci
, "Not freeing device rings.\n");
2061 /* Don't treat this as an error. May change my mind later. */
2063 goto command_cleanup
;
2065 xhci_dbg(xhci
, "Successful reset device command.\n");
2068 if (xhci_is_vendor_info_code(xhci
, ret
))
2070 xhci_warn(xhci
, "Unknown completion code %u for "
2071 "reset device command.\n", ret
);
2073 goto command_cleanup
;
2076 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
2077 last_freed_endpoint
= 1;
2078 for (i
= 1; i
< 31; ++i
) {
2079 if (!virt_dev
->eps
[i
].ring
)
2081 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
2082 last_freed_endpoint
= i
;
2084 xhci_dbg(xhci
, "Output context after successful reset device cmd:\n");
2085 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, last_freed_endpoint
);
2089 xhci_free_command(xhci
, reset_device_cmd
);
2094 * At this point, the struct usb_device is about to go away, the device has
2095 * disconnected, and all traffic has been stopped and the endpoints have been
2096 * disabled. Free any HC data structures associated with that device.
2098 void xhci_free_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
2100 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
2101 struct xhci_virt_device
*virt_dev
;
2102 unsigned long flags
;
2106 if (udev
->slot_id
== 0)
2108 virt_dev
= xhci
->devs
[udev
->slot_id
];
2112 /* Stop any wayward timer functions (which may grab the lock) */
2113 for (i
= 0; i
< 31; ++i
) {
2114 virt_dev
->eps
[i
].ep_state
&= ~EP_HALT_PENDING
;
2115 del_timer_sync(&virt_dev
->eps
[i
].stop_cmd_timer
);
2118 spin_lock_irqsave(&xhci
->lock
, flags
);
2119 /* Don't disable the slot if the host controller is dead. */
2120 state
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
2121 if (state
== 0xffffffff || (xhci
->xhc_state
& XHCI_STATE_DYING
)) {
2122 xhci_free_virt_device(xhci
, udev
->slot_id
);
2123 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2127 if (xhci_queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
)) {
2128 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2129 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
2132 xhci_ring_cmd_db(xhci
);
2133 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2135 * Event command completion handler will free any data structures
2136 * associated with the slot. XXX Can free sleep?
2141 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
2142 * timed out, or allocating memory failed. Returns 1 on success.
2144 int xhci_alloc_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
2146 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
2147 unsigned long flags
;
2151 spin_lock_irqsave(&xhci
->lock
, flags
);
2152 ret
= xhci_queue_slot_control(xhci
, TRB_ENABLE_SLOT
, 0);
2154 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2155 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
2158 xhci_ring_cmd_db(xhci
);
2159 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2161 /* XXX: how much time for xHC slot assignment? */
2162 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
2163 USB_CTRL_SET_TIMEOUT
);
2164 if (timeleft
<= 0) {
2165 xhci_warn(xhci
, "%s while waiting for a slot\n",
2166 timeleft
== 0 ? "Timeout" : "Signal");
2167 /* FIXME cancel the enable slot request */
2171 if (!xhci
->slot_id
) {
2172 xhci_err(xhci
, "Error while assigning device slot ID\n");
2175 /* xhci_alloc_virt_device() does not touch rings; no need to lock */
2176 if (!xhci_alloc_virt_device(xhci
, xhci
->slot_id
, udev
, GFP_KERNEL
)) {
2177 /* Disable slot, if we can do it without mem alloc */
2178 xhci_warn(xhci
, "Could not allocate xHCI USB device data structures\n");
2179 spin_lock_irqsave(&xhci
->lock
, flags
);
2180 if (!xhci_queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
))
2181 xhci_ring_cmd_db(xhci
);
2182 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2185 udev
->slot_id
= xhci
->slot_id
;
2186 /* Is this a LS or FS device under a HS hub? */
2187 /* Hub or peripherial? */
2192 * Issue an Address Device command (which will issue a SetAddress request to
2194 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
2195 * we should only issue and wait on one address command at the same time.
2197 * We add one to the device address issued by the hardware because the USB core
2198 * uses address 1 for the root hubs (even though they're not really devices).
2200 int xhci_address_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
2202 unsigned long flags
;
2204 struct xhci_virt_device
*virt_dev
;
2206 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
2207 struct xhci_slot_ctx
*slot_ctx
;
2208 struct xhci_input_control_ctx
*ctrl_ctx
;
2211 if (!udev
->slot_id
) {
2212 xhci_dbg(xhci
, "Bad Slot ID %d\n", udev
->slot_id
);
2216 virt_dev
= xhci
->devs
[udev
->slot_id
];
2218 /* If this is a Set Address to an unconfigured device, setup ep 0 */
2220 xhci_setup_addressable_virt_dev(xhci
, udev
);
2222 xhci_copy_ep0_dequeue_into_input_ctx(xhci
, udev
);
2223 /* Otherwise, assume the core has the device configured how it wants */
2224 xhci_dbg(xhci
, "Slot ID %d Input Context:\n", udev
->slot_id
);
2225 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, 2);
2227 spin_lock_irqsave(&xhci
->lock
, flags
);
2228 ret
= xhci_queue_address_device(xhci
, virt_dev
->in_ctx
->dma
,
2231 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2232 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
2235 xhci_ring_cmd_db(xhci
);
2236 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2238 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
2239 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
2240 USB_CTRL_SET_TIMEOUT
);
2241 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
2242 * the SetAddress() "recovery interval" required by USB and aborting the
2243 * command on a timeout.
2245 if (timeleft
<= 0) {
2246 xhci_warn(xhci
, "%s while waiting for a slot\n",
2247 timeleft
== 0 ? "Timeout" : "Signal");
2248 /* FIXME cancel the address device command */
2252 switch (virt_dev
->cmd_status
) {
2253 case COMP_CTX_STATE
:
2255 xhci_err(xhci
, "Setup ERROR: address device command for slot %d.\n",
2260 dev_warn(&udev
->dev
, "Device not responding to set address.\n");
2264 xhci_dbg(xhci
, "Successful Address Device command\n");
2267 xhci_err(xhci
, "ERROR: unexpected command completion "
2268 "code 0x%x.\n", virt_dev
->cmd_status
);
2269 xhci_dbg(xhci
, "Slot ID %d Output Context:\n", udev
->slot_id
);
2270 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 2);
2277 temp_64
= xhci_read_64(xhci
, &xhci
->op_regs
->dcbaa_ptr
);
2278 xhci_dbg(xhci
, "Op regs DCBAA ptr = %#016llx\n", temp_64
);
2279 xhci_dbg(xhci
, "Slot ID %d dcbaa entry @%p = %#016llx\n",
2281 &xhci
->dcbaa
->dev_context_ptrs
[udev
->slot_id
],
2282 (unsigned long long)
2283 xhci
->dcbaa
->dev_context_ptrs
[udev
->slot_id
]);
2284 xhci_dbg(xhci
, "Output Context DMA address = %#08llx\n",
2285 (unsigned long long)virt_dev
->out_ctx
->dma
);
2286 xhci_dbg(xhci
, "Slot ID %d Input Context:\n", udev
->slot_id
);
2287 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, 2);
2288 xhci_dbg(xhci
, "Slot ID %d Output Context:\n", udev
->slot_id
);
2289 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 2);
2291 * USB core uses address 1 for the roothubs, so we add one to the
2292 * address given back to us by the HC.
2294 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->out_ctx
);
2295 udev
->devnum
= (slot_ctx
->dev_state
& DEV_ADDR_MASK
) + 1;
2296 /* Zero the input context control for later use */
2297 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
2298 ctrl_ctx
->add_flags
= 0;
2299 ctrl_ctx
->drop_flags
= 0;
2301 xhci_dbg(xhci
, "Device address = %d\n", udev
->devnum
);
2302 /* XXX Meh, not sure if anyone else but choose_address uses this. */
2303 set_bit(udev
->devnum
, udev
->bus
->devmap
.devicemap
);
2308 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
2309 * internal data structures for the device.
2311 int xhci_update_hub_device(struct usb_hcd
*hcd
, struct usb_device
*hdev
,
2312 struct usb_tt
*tt
, gfp_t mem_flags
)
2314 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
2315 struct xhci_virt_device
*vdev
;
2316 struct xhci_command
*config_cmd
;
2317 struct xhci_input_control_ctx
*ctrl_ctx
;
2318 struct xhci_slot_ctx
*slot_ctx
;
2319 unsigned long flags
;
2320 unsigned think_time
;
2323 /* Ignore root hubs */
2327 vdev
= xhci
->devs
[hdev
->slot_id
];
2329 xhci_warn(xhci
, "Cannot update hub desc for unknown device.\n");
2332 config_cmd
= xhci_alloc_command(xhci
, true, true, mem_flags
);
2334 xhci_dbg(xhci
, "Could not allocate xHCI command structure.\n");
2338 spin_lock_irqsave(&xhci
->lock
, flags
);
2339 xhci_slot_copy(xhci
, config_cmd
->in_ctx
, vdev
->out_ctx
);
2340 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, config_cmd
->in_ctx
);
2341 ctrl_ctx
->add_flags
|= SLOT_FLAG
;
2342 slot_ctx
= xhci_get_slot_ctx(xhci
, config_cmd
->in_ctx
);
2343 slot_ctx
->dev_info
|= DEV_HUB
;
2345 slot_ctx
->dev_info
|= DEV_MTT
;
2346 if (xhci
->hci_version
> 0x95) {
2347 xhci_dbg(xhci
, "xHCI version %x needs hub "
2348 "TT think time and number of ports\n",
2349 (unsigned int) xhci
->hci_version
);
2350 slot_ctx
->dev_info2
|= XHCI_MAX_PORTS(hdev
->maxchild
);
2351 /* Set TT think time - convert from ns to FS bit times.
2352 * 0 = 8 FS bit times, 1 = 16 FS bit times,
2353 * 2 = 24 FS bit times, 3 = 32 FS bit times.
2355 think_time
= tt
->think_time
;
2356 if (think_time
!= 0)
2357 think_time
= (think_time
/ 666) - 1;
2358 slot_ctx
->tt_info
|= TT_THINK_TIME(think_time
);
2360 xhci_dbg(xhci
, "xHCI version %x doesn't need hub "
2361 "TT think time or number of ports\n",
2362 (unsigned int) xhci
->hci_version
);
2364 slot_ctx
->dev_state
= 0;
2365 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2367 xhci_dbg(xhci
, "Set up %s for hub device.\n",
2368 (xhci
->hci_version
> 0x95) ?
2369 "configure endpoint" : "evaluate context");
2370 xhci_dbg(xhci
, "Slot %u Input Context:\n", hdev
->slot_id
);
2371 xhci_dbg_ctx(xhci
, config_cmd
->in_ctx
, 0);
2373 /* Issue and wait for the configure endpoint or
2374 * evaluate context command.
2376 if (xhci
->hci_version
> 0x95)
2377 ret
= xhci_configure_endpoint(xhci
, hdev
, config_cmd
,
2380 ret
= xhci_configure_endpoint(xhci
, hdev
, config_cmd
,
2383 xhci_dbg(xhci
, "Slot %u Output Context:\n", hdev
->slot_id
);
2384 xhci_dbg_ctx(xhci
, vdev
->out_ctx
, 0);
2386 xhci_free_command(xhci
, config_cmd
);
2390 int xhci_get_frame(struct usb_hcd
*hcd
)
2392 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
2393 /* EHCI mods by the periodic size. Why? */
2394 return xhci_readl(xhci
, &xhci
->run_regs
->microframe_index
) >> 3;
2397 MODULE_DESCRIPTION(DRIVER_DESC
);
2398 MODULE_AUTHOR(DRIVER_AUTHOR
);
2399 MODULE_LICENSE("GPL");
2401 static int __init
xhci_hcd_init(void)
2406 retval
= xhci_register_pci();
2409 printk(KERN_DEBUG
"Problem registering PCI driver.");
2414 * Check the compiler generated sizes of structures that must be laid
2415 * out in specific ways for hardware access.
2417 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array
) != 256*32/8);
2418 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx
) != 8*32/8);
2419 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx
) != 8*32/8);
2420 /* xhci_device_control has eight fields, and also
2421 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
2423 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx
) != 4*32/8);
2424 BUILD_BUG_ON(sizeof(union xhci_trb
) != 4*32/8);
2425 BUILD_BUG_ON(sizeof(struct xhci_erst_entry
) != 4*32/8);
2426 BUILD_BUG_ON(sizeof(struct xhci_cap_regs
) != 7*32/8);
2427 BUILD_BUG_ON(sizeof(struct xhci_intr_reg
) != 8*32/8);
2428 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
2429 BUILD_BUG_ON(sizeof(struct xhci_run_regs
) != (8+8*128)*32/8);
2430 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array
) != 256*32/8);
2433 module_init(xhci_hcd_init
);
2435 static void __exit
xhci_hcd_cleanup(void)
2438 xhci_unregister_pci();
2441 module_exit(xhci_hcd_cleanup
);