2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #define XHCI_INIT_VALUE 0x0
27 /* Add verbose debugging later, just print everything for now */
29 void xhci_dbg_regs(struct xhci_hcd
*xhci
)
33 xhci_dbg(xhci
, "// xHCI capability registers at %p:\n",
35 temp
= xhci_readl(xhci
, &xhci
->cap_regs
->hc_capbase
);
36 xhci_dbg(xhci
, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
37 &xhci
->cap_regs
->hc_capbase
, temp
);
38 xhci_dbg(xhci
, "// CAPLENGTH: 0x%x\n",
39 (unsigned int) HC_LENGTH(temp
));
41 xhci_dbg(xhci
, "// HCIVERSION: 0x%x\n",
42 (unsigned int) HC_VERSION(temp
));
45 xhci_dbg(xhci
, "// xHCI operational registers at %p:\n", xhci
->op_regs
);
47 temp
= xhci_readl(xhci
, &xhci
->cap_regs
->run_regs_off
);
48 xhci_dbg(xhci
, "// @%p = 0x%x RTSOFF\n",
49 &xhci
->cap_regs
->run_regs_off
,
50 (unsigned int) temp
& RTSOFF_MASK
);
51 xhci_dbg(xhci
, "// xHCI runtime registers at %p:\n", xhci
->run_regs
);
53 temp
= xhci_readl(xhci
, &xhci
->cap_regs
->db_off
);
54 xhci_dbg(xhci
, "// @%p = 0x%x DBOFF\n", &xhci
->cap_regs
->db_off
, temp
);
55 xhci_dbg(xhci
, "// Doorbell array at %p:\n", xhci
->dba
);
58 static void xhci_print_cap_regs(struct xhci_hcd
*xhci
)
62 xhci_dbg(xhci
, "xHCI capability registers at %p:\n", xhci
->cap_regs
);
64 temp
= xhci_readl(xhci
, &xhci
->cap_regs
->hc_capbase
);
65 xhci_dbg(xhci
, "CAPLENGTH AND HCIVERSION 0x%x:\n",
67 xhci_dbg(xhci
, "CAPLENGTH: 0x%x\n",
68 (unsigned int) HC_LENGTH(temp
));
69 xhci_dbg(xhci
, "HCIVERSION: 0x%x\n",
70 (unsigned int) HC_VERSION(temp
));
72 temp
= xhci_readl(xhci
, &xhci
->cap_regs
->hcs_params1
);
73 xhci_dbg(xhci
, "HCSPARAMS 1: 0x%x\n",
75 xhci_dbg(xhci
, " Max device slots: %u\n",
76 (unsigned int) HCS_MAX_SLOTS(temp
));
77 xhci_dbg(xhci
, " Max interrupters: %u\n",
78 (unsigned int) HCS_MAX_INTRS(temp
));
79 xhci_dbg(xhci
, " Max ports: %u\n",
80 (unsigned int) HCS_MAX_PORTS(temp
));
82 temp
= xhci_readl(xhci
, &xhci
->cap_regs
->hcs_params2
);
83 xhci_dbg(xhci
, "HCSPARAMS 2: 0x%x\n",
85 xhci_dbg(xhci
, " Isoc scheduling threshold: %u\n",
86 (unsigned int) HCS_IST(temp
));
87 xhci_dbg(xhci
, " Maximum allowed segments in event ring: %u\n",
88 (unsigned int) HCS_ERST_MAX(temp
));
90 temp
= xhci_readl(xhci
, &xhci
->cap_regs
->hcs_params3
);
91 xhci_dbg(xhci
, "HCSPARAMS 3 0x%x:\n",
93 xhci_dbg(xhci
, " Worst case U1 device exit latency: %u\n",
94 (unsigned int) HCS_U1_LATENCY(temp
));
95 xhci_dbg(xhci
, " Worst case U2 device exit latency: %u\n",
96 (unsigned int) HCS_U2_LATENCY(temp
));
98 temp
= xhci_readl(xhci
, &xhci
->cap_regs
->hcc_params
);
99 xhci_dbg(xhci
, "HCC PARAMS 0x%x:\n", (unsigned int) temp
);
100 xhci_dbg(xhci
, " HC generates %s bit addresses\n",
101 HCC_64BIT_ADDR(temp
) ? "64" : "32");
103 xhci_dbg(xhci
, " FIXME: more HCCPARAMS debugging\n");
105 temp
= xhci_readl(xhci
, &xhci
->cap_regs
->run_regs_off
);
106 xhci_dbg(xhci
, "RTSOFF 0x%x:\n", temp
& RTSOFF_MASK
);
109 static void xhci_print_command_reg(struct xhci_hcd
*xhci
)
113 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
114 xhci_dbg(xhci
, "USBCMD 0x%x:\n", temp
);
115 xhci_dbg(xhci
, " HC is %s\n",
116 (temp
& CMD_RUN
) ? "running" : "being stopped");
117 xhci_dbg(xhci
, " HC has %sfinished hard reset\n",
118 (temp
& CMD_RESET
) ? "not " : "");
119 xhci_dbg(xhci
, " Event Interrupts %s\n",
120 (temp
& CMD_EIE
) ? "enabled " : "disabled");
121 xhci_dbg(xhci
, " Host System Error Interrupts %s\n",
122 (temp
& CMD_EIE
) ? "enabled " : "disabled");
123 xhci_dbg(xhci
, " HC has %sfinished light reset\n",
124 (temp
& CMD_LRESET
) ? "not " : "");
127 static void xhci_print_status(struct xhci_hcd
*xhci
)
131 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
132 xhci_dbg(xhci
, "USBSTS 0x%x:\n", temp
);
133 xhci_dbg(xhci
, " Event ring is %sempty\n",
134 (temp
& STS_EINT
) ? "not " : "");
135 xhci_dbg(xhci
, " %sHost System Error\n",
136 (temp
& STS_FATAL
) ? "WARNING: " : "No ");
137 xhci_dbg(xhci
, " HC is %s\n",
138 (temp
& STS_HALT
) ? "halted" : "running");
141 static void xhci_print_op_regs(struct xhci_hcd
*xhci
)
143 xhci_dbg(xhci
, "xHCI operational registers at %p:\n", xhci
->op_regs
);
144 xhci_print_command_reg(xhci
);
145 xhci_print_status(xhci
);
148 static void xhci_print_ports(struct xhci_hcd
*xhci
)
153 char *names
[NUM_PORT_REGS
] = {
160 ports
= HCS_MAX_PORTS(xhci
->hcs_params1
);
161 addr
= &xhci
->op_regs
->port_status_base
;
162 for (i
= 0; i
< ports
; i
++) {
163 for (j
= 0; j
< NUM_PORT_REGS
; ++j
) {
164 xhci_dbg(xhci
, "%p port %s reg = 0x%x\n",
166 (unsigned int) xhci_readl(xhci
, addr
));
172 void xhci_print_ir_set(struct xhci_hcd
*xhci
, struct xhci_intr_reg
*ir_set
, int set_num
)
178 addr
= &ir_set
->irq_pending
;
179 temp
= xhci_readl(xhci
, addr
);
180 if (temp
== XHCI_INIT_VALUE
)
183 xhci_dbg(xhci
, " %p: ir_set[%i]\n", ir_set
, set_num
);
185 xhci_dbg(xhci
, " %p: ir_set.pending = 0x%x\n", addr
,
188 addr
= &ir_set
->irq_control
;
189 temp
= xhci_readl(xhci
, addr
);
190 xhci_dbg(xhci
, " %p: ir_set.control = 0x%x\n", addr
,
193 addr
= &ir_set
->erst_size
;
194 temp
= xhci_readl(xhci
, addr
);
195 xhci_dbg(xhci
, " %p: ir_set.erst_size = 0x%x\n", addr
,
198 addr
= &ir_set
->rsvd
;
199 temp
= xhci_readl(xhci
, addr
);
200 if (temp
!= XHCI_INIT_VALUE
)
201 xhci_dbg(xhci
, " WARN: %p: ir_set.rsvd = 0x%x\n",
202 addr
, (unsigned int)temp
);
204 addr
= &ir_set
->erst_base
;
205 temp_64
= xhci_read_64(xhci
, addr
);
206 xhci_dbg(xhci
, " %p: ir_set.erst_base = @%08llx\n",
209 addr
= &ir_set
->erst_dequeue
;
210 temp_64
= xhci_read_64(xhci
, addr
);
211 xhci_dbg(xhci
, " %p: ir_set.erst_dequeue = @%08llx\n",
215 void xhci_print_run_regs(struct xhci_hcd
*xhci
)
220 xhci_dbg(xhci
, "xHCI runtime registers at %p:\n", xhci
->run_regs
);
221 temp
= xhci_readl(xhci
, &xhci
->run_regs
->microframe_index
);
222 xhci_dbg(xhci
, " %p: Microframe index = 0x%x\n",
223 &xhci
->run_regs
->microframe_index
,
224 (unsigned int) temp
);
225 for (i
= 0; i
< 7; ++i
) {
226 temp
= xhci_readl(xhci
, &xhci
->run_regs
->rsvd
[i
]);
227 if (temp
!= XHCI_INIT_VALUE
)
228 xhci_dbg(xhci
, " WARN: %p: Rsvd[%i] = 0x%x\n",
229 &xhci
->run_regs
->rsvd
[i
],
230 i
, (unsigned int) temp
);
234 void xhci_print_registers(struct xhci_hcd
*xhci
)
236 xhci_print_cap_regs(xhci
);
237 xhci_print_op_regs(xhci
);
238 xhci_print_ports(xhci
);
241 void xhci_print_trb_offsets(struct xhci_hcd
*xhci
, union xhci_trb
*trb
)
244 for (i
= 0; i
< 4; ++i
)
245 xhci_dbg(xhci
, "Offset 0x%x = 0x%x\n",
246 i
*4, trb
->generic
.field
[i
]);
250 * Debug a transfer request block (TRB).
252 void xhci_debug_trb(struct xhci_hcd
*xhci
, union xhci_trb
*trb
)
255 u32 type
= xhci_readl(xhci
, &trb
->link
.control
) & TRB_TYPE_BITMASK
;
258 case TRB_TYPE(TRB_LINK
):
259 xhci_dbg(xhci
, "Link TRB:\n");
260 xhci_print_trb_offsets(xhci
, trb
);
262 address
= trb
->link
.segment_ptr
;
263 xhci_dbg(xhci
, "Next ring segment DMA address = 0x%llx\n", address
);
265 xhci_dbg(xhci
, "Interrupter target = 0x%x\n",
266 GET_INTR_TARGET(trb
->link
.intr_target
));
267 xhci_dbg(xhci
, "Cycle bit = %u\n",
268 (unsigned int) (trb
->link
.control
& TRB_CYCLE
));
269 xhci_dbg(xhci
, "Toggle cycle bit = %u\n",
270 (unsigned int) (trb
->link
.control
& LINK_TOGGLE
));
271 xhci_dbg(xhci
, "No Snoop bit = %u\n",
272 (unsigned int) (trb
->link
.control
& TRB_NO_SNOOP
));
274 case TRB_TYPE(TRB_TRANSFER
):
275 address
= trb
->trans_event
.buffer
;
277 * FIXME: look at flags to figure out if it's an address or if
278 * the data is directly in the buffer field.
280 xhci_dbg(xhci
, "DMA address or buffer contents= %llu\n", address
);
282 case TRB_TYPE(TRB_COMPLETION
):
283 address
= trb
->event_cmd
.cmd_trb
;
284 xhci_dbg(xhci
, "Command TRB pointer = %llu\n", address
);
285 xhci_dbg(xhci
, "Completion status = %u\n",
286 (unsigned int) GET_COMP_CODE(trb
->event_cmd
.status
));
287 xhci_dbg(xhci
, "Flags = 0x%x\n", (unsigned int) trb
->event_cmd
.flags
);
290 xhci_dbg(xhci
, "Unknown TRB with TRB type ID %u\n",
291 (unsigned int) type
>>10);
292 xhci_print_trb_offsets(xhci
, trb
);
298 * Debug a segment with an xHCI ring.
300 * @return The Link TRB of the segment, or NULL if there is no Link TRB
301 * (which is a bug, since all segments must have a Link TRB).
303 * Prints out all TRBs in the segment, even those after the Link TRB.
305 * XXX: should we print out TRBs that the HC owns? As long as we don't
306 * write, that should be fine... We shouldn't expect that the memory pointed to
307 * by the TRB is valid at all. Do we care about ones the HC owns? Probably,
310 void xhci_debug_segment(struct xhci_hcd
*xhci
, struct xhci_segment
*seg
)
313 u32 addr
= (u32
) seg
->dma
;
314 union xhci_trb
*trb
= seg
->trbs
;
316 for (i
= 0; i
< TRBS_PER_SEGMENT
; ++i
) {
318 xhci_dbg(xhci
, "@%08x %08x %08x %08x %08x\n", addr
,
319 lower_32_bits(trb
->link
.segment_ptr
),
320 upper_32_bits(trb
->link
.segment_ptr
),
321 (unsigned int) trb
->link
.intr_target
,
322 (unsigned int) trb
->link
.control
);
323 addr
+= sizeof(*trb
);
327 void xhci_dbg_ring_ptrs(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
)
329 xhci_dbg(xhci
, "Ring deq = %p (virt), 0x%llx (dma)\n",
331 (unsigned long long)xhci_trb_virt_to_dma(ring
->deq_seg
,
333 xhci_dbg(xhci
, "Ring deq updated %u times\n",
335 xhci_dbg(xhci
, "Ring enq = %p (virt), 0x%llx (dma)\n",
337 (unsigned long long)xhci_trb_virt_to_dma(ring
->enq_seg
,
339 xhci_dbg(xhci
, "Ring enq updated %u times\n",
344 * Debugging for an xHCI ring, which is a queue broken into multiple segments.
346 * Print out each segment in the ring. Check that the DMA address in
347 * each link segment actually matches the segment's stored DMA address.
348 * Check that the link end bit is only set at the end of the ring.
349 * Check that the dequeue and enqueue pointers point to real data in this ring
350 * (not some other ring).
352 void xhci_debug_ring(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
)
354 /* FIXME: Throw an error if any segment doesn't have a Link TRB */
355 struct xhci_segment
*seg
;
356 struct xhci_segment
*first_seg
= ring
->first_seg
;
357 xhci_debug_segment(xhci
, first_seg
);
359 if (!ring
->enq_updates
&& !ring
->deq_updates
) {
360 xhci_dbg(xhci
, " Ring has not been updated\n");
363 for (seg
= first_seg
->next
; seg
!= first_seg
; seg
= seg
->next
)
364 xhci_debug_segment(xhci
, seg
);
367 void xhci_dbg_erst(struct xhci_hcd
*xhci
, struct xhci_erst
*erst
)
369 u32 addr
= (u32
) erst
->erst_dma_addr
;
371 struct xhci_erst_entry
*entry
;
373 for (i
= 0; i
< erst
->num_entries
; ++i
) {
374 entry
= &erst
->entries
[i
];
375 xhci_dbg(xhci
, "@%08x %08x %08x %08x %08x\n",
377 lower_32_bits(entry
->seg_addr
),
378 upper_32_bits(entry
->seg_addr
),
379 (unsigned int) entry
->seg_size
,
380 (unsigned int) entry
->rsvd
);
381 addr
+= sizeof(*entry
);
385 void xhci_dbg_cmd_ptrs(struct xhci_hcd
*xhci
)
389 val
= xhci_read_64(xhci
, &xhci
->op_regs
->cmd_ring
);
390 xhci_dbg(xhci
, "// xHC command ring deq ptr low bits + flags = @%08x\n",
392 xhci_dbg(xhci
, "// xHC command ring deq ptr high bits = @%08x\n",
396 /* Print the last 32 bytes for 64-byte contexts */
397 static void dbg_rsvd64(struct xhci_hcd
*xhci
, u64
*ctx
, dma_addr_t dma
)
400 for (i
= 0; i
< 4; ++i
) {
401 xhci_dbg(xhci
, "@%p (virt) @%08llx "
402 "(dma) %#08llx - rsvd64[%d]\n",
403 &ctx
[4 + i
], (unsigned long long)dma
,
409 void xhci_dbg_slot_ctx(struct xhci_hcd
*xhci
, struct xhci_container_ctx
*ctx
)
411 /* Fields are 32 bits wide, DMA addresses are in bytes */
412 int field_size
= 32 / 8;
415 struct xhci_slot_ctx
*slot_ctx
= xhci_get_slot_ctx(xhci
, ctx
);
416 dma_addr_t dma
= ctx
->dma
+ ((unsigned long)slot_ctx
- (unsigned long)ctx
);
417 int csz
= HCC_64BYTE_CONTEXT(xhci
->hcc_params
);
419 xhci_dbg(xhci
, "Slot Context:\n");
420 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
422 (unsigned long long)dma
, slot_ctx
->dev_info
);
424 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
425 &slot_ctx
->dev_info2
,
426 (unsigned long long)dma
, slot_ctx
->dev_info2
);
428 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
430 (unsigned long long)dma
, slot_ctx
->tt_info
);
432 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
433 &slot_ctx
->dev_state
,
434 (unsigned long long)dma
, slot_ctx
->dev_state
);
436 for (i
= 0; i
< 4; ++i
) {
437 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
438 &slot_ctx
->reserved
[i
], (unsigned long long)dma
,
439 slot_ctx
->reserved
[i
], i
);
444 dbg_rsvd64(xhci
, (u64
*)slot_ctx
, dma
);
447 void xhci_dbg_ep_ctx(struct xhci_hcd
*xhci
,
448 struct xhci_container_ctx
*ctx
,
449 unsigned int last_ep
)
452 int last_ep_ctx
= 31;
453 /* Fields are 32 bits wide, DMA addresses are in bytes */
454 int field_size
= 32 / 8;
455 int csz
= HCC_64BYTE_CONTEXT(xhci
->hcc_params
);
458 last_ep_ctx
= last_ep
+ 1;
459 for (i
= 0; i
< last_ep_ctx
; ++i
) {
460 struct xhci_ep_ctx
*ep_ctx
= xhci_get_ep_ctx(xhci
, ctx
, i
);
461 dma_addr_t dma
= ctx
->dma
+
462 ((unsigned long)ep_ctx
- (unsigned long)ctx
);
464 xhci_dbg(xhci
, "Endpoint %02d Context:\n", i
);
465 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
467 (unsigned long long)dma
, ep_ctx
->ep_info
);
469 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
471 (unsigned long long)dma
, ep_ctx
->ep_info2
);
473 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
475 (unsigned long long)dma
, ep_ctx
->deq
);
477 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
479 (unsigned long long)dma
, ep_ctx
->tx_info
);
481 for (j
= 0; j
< 3; ++j
) {
482 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
483 &ep_ctx
->reserved
[j
],
484 (unsigned long long)dma
,
485 ep_ctx
->reserved
[j
], j
);
490 dbg_rsvd64(xhci
, (u64
*)ep_ctx
, dma
);
494 void xhci_dbg_ctx(struct xhci_hcd
*xhci
,
495 struct xhci_container_ctx
*ctx
,
496 unsigned int last_ep
)
499 /* Fields are 32 bits wide, DMA addresses are in bytes */
500 int field_size
= 32 / 8;
501 struct xhci_slot_ctx
*slot_ctx
;
502 dma_addr_t dma
= ctx
->dma
;
503 int csz
= HCC_64BYTE_CONTEXT(xhci
->hcc_params
);
505 if (ctx
->type
== XHCI_CTX_TYPE_INPUT
) {
506 struct xhci_input_control_ctx
*ctrl_ctx
=
507 xhci_get_input_control_ctx(xhci
, ctx
);
508 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
509 &ctrl_ctx
->drop_flags
, (unsigned long long)dma
,
510 ctrl_ctx
->drop_flags
);
512 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
513 &ctrl_ctx
->add_flags
, (unsigned long long)dma
,
514 ctrl_ctx
->add_flags
);
516 for (i
= 0; i
< 6; ++i
) {
517 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
518 &ctrl_ctx
->rsvd2
[i
], (unsigned long long)dma
,
519 ctrl_ctx
->rsvd2
[i
], i
);
524 dbg_rsvd64(xhci
, (u64
*)ctrl_ctx
, dma
);
527 slot_ctx
= xhci_get_slot_ctx(xhci
, ctx
);
528 xhci_dbg_slot_ctx(xhci
, ctx
);
529 xhci_dbg_ep_ctx(xhci
, ctx
, last_ep
);