2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/usb.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include <linux/dmapool.h>
31 * Allocates a generic ring segment from the ring pool, sets the dma address,
32 * initializes the segment to zero, and sets the private next pointer to NULL.
35 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
37 static struct xhci_segment
*xhci_segment_alloc(struct xhci_hcd
*xhci
, gfp_t flags
)
39 struct xhci_segment
*seg
;
42 seg
= kzalloc(sizeof *seg
, flags
);
45 xhci_dbg(xhci
, "Allocating priv segment structure at %p\n", seg
);
47 seg
->trbs
= dma_pool_alloc(xhci
->segment_pool
, flags
, &dma
);
52 xhci_dbg(xhci
, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
53 seg
->trbs
, (unsigned long long)dma
);
55 memset(seg
->trbs
, 0, SEGMENT_SIZE
);
62 static void xhci_segment_free(struct xhci_hcd
*xhci
, struct xhci_segment
*seg
)
67 xhci_dbg(xhci
, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
68 seg
->trbs
, (unsigned long long)seg
->dma
);
69 dma_pool_free(xhci
->segment_pool
, seg
->trbs
, seg
->dma
);
72 xhci_dbg(xhci
, "Freeing priv segment structure at %p\n", seg
);
77 * Make the prev segment point to the next segment.
79 * Change the last TRB in the prev segment to be a Link TRB which points to the
80 * DMA address of the next segment. The caller needs to set any Link TRB
81 * related flags, such as End TRB, Toggle Cycle, and no snoop.
83 static void xhci_link_segments(struct xhci_hcd
*xhci
, struct xhci_segment
*prev
,
84 struct xhci_segment
*next
, bool link_trbs
)
92 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.segment_ptr
= next
->dma
;
94 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
95 val
= prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
;
96 val
&= ~TRB_TYPE_BITMASK
;
97 val
|= TRB_TYPE(TRB_LINK
);
98 /* Always set the chain bit with 0.95 hardware */
99 if (xhci_link_trb_quirk(xhci
))
101 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
= val
;
103 xhci_dbg(xhci
, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
104 (unsigned long long)prev
->dma
,
105 (unsigned long long)next
->dma
);
108 /* XXX: Do we need the hcd structure in all these functions? */
109 void xhci_ring_free(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
)
111 struct xhci_segment
*seg
;
112 struct xhci_segment
*first_seg
;
114 if (!ring
|| !ring
->first_seg
)
116 first_seg
= ring
->first_seg
;
117 seg
= first_seg
->next
;
118 xhci_dbg(xhci
, "Freeing ring at %p\n", ring
);
119 while (seg
!= first_seg
) {
120 struct xhci_segment
*next
= seg
->next
;
121 xhci_segment_free(xhci
, seg
);
124 xhci_segment_free(xhci
, first_seg
);
125 ring
->first_seg
= NULL
;
129 static void xhci_initialize_ring_info(struct xhci_ring
*ring
)
131 /* The ring is empty, so the enqueue pointer == dequeue pointer */
132 ring
->enqueue
= ring
->first_seg
->trbs
;
133 ring
->enq_seg
= ring
->first_seg
;
134 ring
->dequeue
= ring
->enqueue
;
135 ring
->deq_seg
= ring
->first_seg
;
136 /* The ring is initialized to 0. The producer must write 1 to the cycle
137 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
138 * compare CCS to the cycle bit to check ownership, so CCS = 1.
140 ring
->cycle_state
= 1;
141 /* Not necessary for new rings, but needed for re-initialized rings */
142 ring
->enq_updates
= 0;
143 ring
->deq_updates
= 0;
147 * Create a new ring with zero or more segments.
149 * Link each segment together into a ring.
150 * Set the end flag and the cycle toggle bit on the last segment.
151 * See section 4.9.1 and figures 15 and 16.
153 static struct xhci_ring
*xhci_ring_alloc(struct xhci_hcd
*xhci
,
154 unsigned int num_segs
, bool link_trbs
, gfp_t flags
)
156 struct xhci_ring
*ring
;
157 struct xhci_segment
*prev
;
159 ring
= kzalloc(sizeof *(ring
), flags
);
160 xhci_dbg(xhci
, "Allocating ring at %p\n", ring
);
164 INIT_LIST_HEAD(&ring
->td_list
);
168 ring
->first_seg
= xhci_segment_alloc(xhci
, flags
);
169 if (!ring
->first_seg
)
173 prev
= ring
->first_seg
;
174 while (num_segs
> 0) {
175 struct xhci_segment
*next
;
177 next
= xhci_segment_alloc(xhci
, flags
);
180 xhci_link_segments(xhci
, prev
, next
, link_trbs
);
185 xhci_link_segments(xhci
, prev
, ring
->first_seg
, link_trbs
);
188 /* See section 4.9.2.1 and 6.4.4.1 */
189 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
|= (LINK_TOGGLE
);
190 xhci_dbg(xhci
, "Wrote link toggle flag to"
191 " segment %p (virtual), 0x%llx (DMA)\n",
192 prev
, (unsigned long long)prev
->dma
);
194 xhci_initialize_ring_info(ring
);
198 xhci_ring_free(xhci
, ring
);
202 void xhci_free_or_cache_endpoint_ring(struct xhci_hcd
*xhci
,
203 struct xhci_virt_device
*virt_dev
,
204 unsigned int ep_index
)
208 rings_cached
= virt_dev
->num_rings_cached
;
209 if (rings_cached
< XHCI_MAX_RINGS_CACHED
) {
210 virt_dev
->num_rings_cached
++;
211 rings_cached
= virt_dev
->num_rings_cached
;
212 virt_dev
->ring_cache
[rings_cached
] =
213 virt_dev
->eps
[ep_index
].ring
;
214 xhci_dbg(xhci
, "Cached old ring, "
215 "%d ring%s cached\n",
217 (rings_cached
> 1) ? "s" : "");
219 xhci_ring_free(xhci
, virt_dev
->eps
[ep_index
].ring
);
220 xhci_dbg(xhci
, "Ring cache full (%d rings), "
222 virt_dev
->num_rings_cached
);
224 virt_dev
->eps
[ep_index
].ring
= NULL
;
227 /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
228 * pointers to the beginning of the ring.
230 static void xhci_reinit_cached_ring(struct xhci_hcd
*xhci
,
231 struct xhci_ring
*ring
)
233 struct xhci_segment
*seg
= ring
->first_seg
;
236 sizeof(union xhci_trb
)*TRBS_PER_SEGMENT
);
237 /* All endpoint rings have link TRBs */
238 xhci_link_segments(xhci
, seg
, seg
->next
, 1);
240 } while (seg
!= ring
->first_seg
);
241 xhci_initialize_ring_info(ring
);
242 /* td list should be empty since all URBs have been cancelled,
243 * but just in case...
245 INIT_LIST_HEAD(&ring
->td_list
);
248 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
250 struct xhci_container_ctx
*xhci_alloc_container_ctx(struct xhci_hcd
*xhci
,
251 int type
, gfp_t flags
)
253 struct xhci_container_ctx
*ctx
= kzalloc(sizeof(*ctx
), flags
);
257 BUG_ON((type
!= XHCI_CTX_TYPE_DEVICE
) && (type
!= XHCI_CTX_TYPE_INPUT
));
259 ctx
->size
= HCC_64BYTE_CONTEXT(xhci
->hcc_params
) ? 2048 : 1024;
260 if (type
== XHCI_CTX_TYPE_INPUT
)
261 ctx
->size
+= CTX_SIZE(xhci
->hcc_params
);
263 ctx
->bytes
= dma_pool_alloc(xhci
->device_pool
, flags
, &ctx
->dma
);
264 memset(ctx
->bytes
, 0, ctx
->size
);
268 void xhci_free_container_ctx(struct xhci_hcd
*xhci
,
269 struct xhci_container_ctx
*ctx
)
273 dma_pool_free(xhci
->device_pool
, ctx
->bytes
, ctx
->dma
);
277 struct xhci_input_control_ctx
*xhci_get_input_control_ctx(struct xhci_hcd
*xhci
,
278 struct xhci_container_ctx
*ctx
)
280 BUG_ON(ctx
->type
!= XHCI_CTX_TYPE_INPUT
);
281 return (struct xhci_input_control_ctx
*)ctx
->bytes
;
284 struct xhci_slot_ctx
*xhci_get_slot_ctx(struct xhci_hcd
*xhci
,
285 struct xhci_container_ctx
*ctx
)
287 if (ctx
->type
== XHCI_CTX_TYPE_DEVICE
)
288 return (struct xhci_slot_ctx
*)ctx
->bytes
;
290 return (struct xhci_slot_ctx
*)
291 (ctx
->bytes
+ CTX_SIZE(xhci
->hcc_params
));
294 struct xhci_ep_ctx
*xhci_get_ep_ctx(struct xhci_hcd
*xhci
,
295 struct xhci_container_ctx
*ctx
,
296 unsigned int ep_index
)
298 /* increment ep index by offset of start of ep ctx array */
300 if (ctx
->type
== XHCI_CTX_TYPE_INPUT
)
303 return (struct xhci_ep_ctx
*)
304 (ctx
->bytes
+ (ep_index
* CTX_SIZE(xhci
->hcc_params
)));
307 static void xhci_init_endpoint_timer(struct xhci_hcd
*xhci
,
308 struct xhci_virt_ep
*ep
)
310 init_timer(&ep
->stop_cmd_timer
);
311 ep
->stop_cmd_timer
.data
= (unsigned long) ep
;
312 ep
->stop_cmd_timer
.function
= xhci_stop_endpoint_command_watchdog
;
316 /* All the xhci_tds in the ring's TD list should be freed at this point */
317 void xhci_free_virt_device(struct xhci_hcd
*xhci
, int slot_id
)
319 struct xhci_virt_device
*dev
;
322 /* Slot ID 0 is reserved */
323 if (slot_id
== 0 || !xhci
->devs
[slot_id
])
326 dev
= xhci
->devs
[slot_id
];
327 xhci
->dcbaa
->dev_context_ptrs
[slot_id
] = 0;
331 for (i
= 0; i
< 31; ++i
)
332 if (dev
->eps
[i
].ring
)
333 xhci_ring_free(xhci
, dev
->eps
[i
].ring
);
335 if (dev
->ring_cache
) {
336 for (i
= 0; i
< dev
->num_rings_cached
; i
++)
337 xhci_ring_free(xhci
, dev
->ring_cache
[i
]);
338 kfree(dev
->ring_cache
);
342 xhci_free_container_ctx(xhci
, dev
->in_ctx
);
344 xhci_free_container_ctx(xhci
, dev
->out_ctx
);
346 kfree(xhci
->devs
[slot_id
]);
347 xhci
->devs
[slot_id
] = 0;
350 int xhci_alloc_virt_device(struct xhci_hcd
*xhci
, int slot_id
,
351 struct usb_device
*udev
, gfp_t flags
)
353 struct xhci_virt_device
*dev
;
356 /* Slot ID 0 is reserved */
357 if (slot_id
== 0 || xhci
->devs
[slot_id
]) {
358 xhci_warn(xhci
, "Bad Slot ID %d\n", slot_id
);
362 xhci
->devs
[slot_id
] = kzalloc(sizeof(*xhci
->devs
[slot_id
]), flags
);
363 if (!xhci
->devs
[slot_id
])
365 dev
= xhci
->devs
[slot_id
];
367 /* Allocate the (output) device context that will be used in the HC. */
368 dev
->out_ctx
= xhci_alloc_container_ctx(xhci
, XHCI_CTX_TYPE_DEVICE
, flags
);
372 xhci_dbg(xhci
, "Slot %d output ctx = 0x%llx (dma)\n", slot_id
,
373 (unsigned long long)dev
->out_ctx
->dma
);
375 /* Allocate the (input) device context for address device command */
376 dev
->in_ctx
= xhci_alloc_container_ctx(xhci
, XHCI_CTX_TYPE_INPUT
, flags
);
380 xhci_dbg(xhci
, "Slot %d input ctx = 0x%llx (dma)\n", slot_id
,
381 (unsigned long long)dev
->in_ctx
->dma
);
383 /* Initialize the cancellation list and watchdog timers for each ep */
384 for (i
= 0; i
< 31; i
++) {
385 xhci_init_endpoint_timer(xhci
, &dev
->eps
[i
]);
386 INIT_LIST_HEAD(&dev
->eps
[i
].cancelled_td_list
);
389 /* Allocate endpoint 0 ring */
390 dev
->eps
[0].ring
= xhci_ring_alloc(xhci
, 1, true, flags
);
391 if (!dev
->eps
[0].ring
)
394 /* Allocate pointers to the ring cache */
395 dev
->ring_cache
= kzalloc(
396 sizeof(struct xhci_ring
*)*XHCI_MAX_RINGS_CACHED
,
398 if (!dev
->ring_cache
)
400 dev
->num_rings_cached
= 0;
402 init_completion(&dev
->cmd_completion
);
403 INIT_LIST_HEAD(&dev
->cmd_list
);
405 /* Point to output device context in dcbaa. */
406 xhci
->dcbaa
->dev_context_ptrs
[slot_id
] = dev
->out_ctx
->dma
;
407 xhci_dbg(xhci
, "Set slot id %d dcbaa entry %p to 0x%llx\n",
409 &xhci
->dcbaa
->dev_context_ptrs
[slot_id
],
410 (unsigned long long) xhci
->dcbaa
->dev_context_ptrs
[slot_id
]);
414 xhci_free_virt_device(xhci
, slot_id
);
418 /* Setup an xHCI virtual device for a Set Address command */
419 int xhci_setup_addressable_virt_dev(struct xhci_hcd
*xhci
, struct usb_device
*udev
)
421 struct xhci_virt_device
*dev
;
422 struct xhci_ep_ctx
*ep0_ctx
;
423 struct usb_device
*top_dev
;
424 struct xhci_slot_ctx
*slot_ctx
;
425 struct xhci_input_control_ctx
*ctrl_ctx
;
427 dev
= xhci
->devs
[udev
->slot_id
];
428 /* Slot ID 0 is reserved */
429 if (udev
->slot_id
== 0 || !dev
) {
430 xhci_warn(xhci
, "Slot ID %d is not assigned to this device\n",
434 ep0_ctx
= xhci_get_ep_ctx(xhci
, dev
->in_ctx
, 0);
435 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, dev
->in_ctx
);
436 slot_ctx
= xhci_get_slot_ctx(xhci
, dev
->in_ctx
);
438 /* 2) New slot context and endpoint 0 context are valid*/
439 ctrl_ctx
->add_flags
= SLOT_FLAG
| EP0_FLAG
;
441 /* 3) Only the control endpoint is valid - one endpoint context */
442 slot_ctx
->dev_info
|= LAST_CTX(1);
444 slot_ctx
->dev_info
|= (u32
) udev
->route
;
445 switch (udev
->speed
) {
446 case USB_SPEED_SUPER
:
447 slot_ctx
->dev_info
|= (u32
) SLOT_SPEED_SS
;
450 slot_ctx
->dev_info
|= (u32
) SLOT_SPEED_HS
;
453 slot_ctx
->dev_info
|= (u32
) SLOT_SPEED_FS
;
456 slot_ctx
->dev_info
|= (u32
) SLOT_SPEED_LS
;
458 case USB_SPEED_WIRELESS
:
459 xhci_dbg(xhci
, "FIXME xHCI doesn't support wireless speeds\n");
463 /* Speed was set earlier, this shouldn't happen. */
466 /* Find the root hub port this device is under */
467 for (top_dev
= udev
; top_dev
->parent
&& top_dev
->parent
->parent
;
468 top_dev
= top_dev
->parent
)
469 /* Found device below root hub */;
470 slot_ctx
->dev_info2
|= (u32
) ROOT_HUB_PORT(top_dev
->portnum
);
471 xhci_dbg(xhci
, "Set root hub portnum to %d\n", top_dev
->portnum
);
473 /* Is this a LS/FS device under a HS hub? */
474 if ((udev
->speed
== USB_SPEED_LOW
|| udev
->speed
== USB_SPEED_FULL
) &&
476 slot_ctx
->tt_info
= udev
->tt
->hub
->slot_id
;
477 slot_ctx
->tt_info
|= udev
->ttport
<< 8;
479 slot_ctx
->dev_info
|= DEV_MTT
;
481 xhci_dbg(xhci
, "udev->tt = %p\n", udev
->tt
);
482 xhci_dbg(xhci
, "udev->ttport = 0x%x\n", udev
->ttport
);
484 /* Step 4 - ring already allocated */
486 ep0_ctx
->ep_info2
= EP_TYPE(CTRL_EP
);
488 * XXX: Not sure about wireless USB devices.
490 switch (udev
->speed
) {
491 case USB_SPEED_SUPER
:
492 ep0_ctx
->ep_info2
|= MAX_PACKET(512);
495 /* USB core guesses at a 64-byte max packet first for FS devices */
497 ep0_ctx
->ep_info2
|= MAX_PACKET(64);
500 ep0_ctx
->ep_info2
|= MAX_PACKET(8);
502 case USB_SPEED_WIRELESS
:
503 xhci_dbg(xhci
, "FIXME xHCI doesn't support wireless speeds\n");
510 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
511 ep0_ctx
->ep_info2
|= MAX_BURST(0);
512 ep0_ctx
->ep_info2
|= ERROR_COUNT(3);
515 dev
->eps
[0].ring
->first_seg
->dma
;
516 ep0_ctx
->deq
|= dev
->eps
[0].ring
->cycle_state
;
518 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
523 /* Return the polling or NAK interval.
525 * The polling interval is expressed in "microframes". If xHCI's Interval field
526 * is set to N, it will service the endpoint every 2^(Interval)*125us.
528 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
531 static inline unsigned int xhci_get_endpoint_interval(struct usb_device
*udev
,
532 struct usb_host_endpoint
*ep
)
534 unsigned int interval
= 0;
536 switch (udev
->speed
) {
539 if (usb_endpoint_xfer_control(&ep
->desc
) ||
540 usb_endpoint_xfer_bulk(&ep
->desc
))
541 interval
= ep
->desc
.bInterval
;
542 /* Fall through - SS and HS isoc/int have same decoding */
543 case USB_SPEED_SUPER
:
544 if (usb_endpoint_xfer_int(&ep
->desc
) ||
545 usb_endpoint_xfer_isoc(&ep
->desc
)) {
546 if (ep
->desc
.bInterval
== 0)
549 interval
= ep
->desc
.bInterval
- 1;
552 if (interval
!= ep
->desc
.bInterval
+ 1)
553 dev_warn(&udev
->dev
, "ep %#x - rounding interval to %d microframes\n",
554 ep
->desc
.bEndpointAddress
, 1 << interval
);
557 /* Convert bInterval (in 1-255 frames) to microframes and round down to
558 * nearest power of 2.
562 if (usb_endpoint_xfer_int(&ep
->desc
) ||
563 usb_endpoint_xfer_isoc(&ep
->desc
)) {
564 interval
= fls(8*ep
->desc
.bInterval
) - 1;
569 if ((1 << interval
) != 8*ep
->desc
.bInterval
)
571 "ep %#x - rounding interval"
572 " to %d microframes, "
573 "ep desc says %d microframes\n",
574 ep
->desc
.bEndpointAddress
,
576 8*ep
->desc
.bInterval
);
582 return EP_INTERVAL(interval
);
585 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
586 * High speed endpoint descriptors can define "the number of additional
587 * transaction opportunities per microframe", but that goes in the Max Burst
588 * endpoint context field.
590 static inline u32
xhci_get_endpoint_mult(struct usb_device
*udev
,
591 struct usb_host_endpoint
*ep
)
593 if (udev
->speed
!= USB_SPEED_SUPER
|| !ep
->ss_ep_comp
||
594 !usb_endpoint_xfer_isoc(&ep
->desc
))
596 return ep
->ss_ep_comp
->desc
.bmAttributes
;
599 static inline u32
xhci_get_endpoint_type(struct usb_device
*udev
,
600 struct usb_host_endpoint
*ep
)
605 in
= usb_endpoint_dir_in(&ep
->desc
);
606 if (usb_endpoint_xfer_control(&ep
->desc
)) {
607 type
= EP_TYPE(CTRL_EP
);
608 } else if (usb_endpoint_xfer_bulk(&ep
->desc
)) {
610 type
= EP_TYPE(BULK_IN_EP
);
612 type
= EP_TYPE(BULK_OUT_EP
);
613 } else if (usb_endpoint_xfer_isoc(&ep
->desc
)) {
615 type
= EP_TYPE(ISOC_IN_EP
);
617 type
= EP_TYPE(ISOC_OUT_EP
);
618 } else if (usb_endpoint_xfer_int(&ep
->desc
)) {
620 type
= EP_TYPE(INT_IN_EP
);
622 type
= EP_TYPE(INT_OUT_EP
);
629 /* Return the maximum endpoint service interval time (ESIT) payload.
630 * Basically, this is the maxpacket size, multiplied by the burst size
633 static inline u32
xhci_get_max_esit_payload(struct xhci_hcd
*xhci
,
634 struct usb_device
*udev
,
635 struct usb_host_endpoint
*ep
)
640 /* Only applies for interrupt or isochronous endpoints */
641 if (usb_endpoint_xfer_control(&ep
->desc
) ||
642 usb_endpoint_xfer_bulk(&ep
->desc
))
645 if (udev
->speed
== USB_SPEED_SUPER
) {
647 return ep
->ss_ep_comp
->desc
.wBytesPerInterval
;
648 xhci_warn(xhci
, "WARN no SS endpoint companion descriptor.\n");
649 /* Assume no bursts, no multiple opportunities to send. */
650 return ep
->desc
.wMaxPacketSize
;
653 max_packet
= ep
->desc
.wMaxPacketSize
& 0x3ff;
654 max_burst
= (ep
->desc
.wMaxPacketSize
& 0x1800) >> 11;
655 /* A 0 in max burst means 1 transfer per ESIT */
656 return max_packet
* (max_burst
+ 1);
659 int xhci_endpoint_init(struct xhci_hcd
*xhci
,
660 struct xhci_virt_device
*virt_dev
,
661 struct usb_device
*udev
,
662 struct usb_host_endpoint
*ep
,
665 unsigned int ep_index
;
666 struct xhci_ep_ctx
*ep_ctx
;
667 struct xhci_ring
*ep_ring
;
668 unsigned int max_packet
;
669 unsigned int max_burst
;
670 u32 max_esit_payload
;
672 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
673 ep_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, ep_index
);
675 /* Set up the endpoint ring */
676 virt_dev
->eps
[ep_index
].new_ring
=
677 xhci_ring_alloc(xhci
, 1, true, mem_flags
);
678 if (!virt_dev
->eps
[ep_index
].new_ring
) {
679 /* Attempt to use the ring cache */
680 if (virt_dev
->num_rings_cached
== 0)
682 virt_dev
->eps
[ep_index
].new_ring
=
683 virt_dev
->ring_cache
[virt_dev
->num_rings_cached
];
684 virt_dev
->ring_cache
[virt_dev
->num_rings_cached
] = NULL
;
685 virt_dev
->num_rings_cached
--;
686 xhci_reinit_cached_ring(xhci
, virt_dev
->eps
[ep_index
].new_ring
);
688 ep_ring
= virt_dev
->eps
[ep_index
].new_ring
;
689 ep_ctx
->deq
= ep_ring
->first_seg
->dma
| ep_ring
->cycle_state
;
691 ep_ctx
->ep_info
= xhci_get_endpoint_interval(udev
, ep
);
692 ep_ctx
->ep_info
|= EP_MULT(xhci_get_endpoint_mult(udev
, ep
));
694 /* FIXME dig Mult and streams info out of ep companion desc */
696 /* Allow 3 retries for everything but isoc;
697 * error count = 0 means infinite retries.
699 if (!usb_endpoint_xfer_isoc(&ep
->desc
))
700 ep_ctx
->ep_info2
= ERROR_COUNT(3);
702 ep_ctx
->ep_info2
= ERROR_COUNT(1);
704 ep_ctx
->ep_info2
|= xhci_get_endpoint_type(udev
, ep
);
706 /* Set the max packet size and max burst */
707 switch (udev
->speed
) {
708 case USB_SPEED_SUPER
:
709 max_packet
= ep
->desc
.wMaxPacketSize
;
710 ep_ctx
->ep_info2
|= MAX_PACKET(max_packet
);
711 /* dig out max burst from ep companion desc */
712 if (!ep
->ss_ep_comp
) {
713 xhci_warn(xhci
, "WARN no SS endpoint companion descriptor.\n");
716 max_packet
= ep
->ss_ep_comp
->desc
.bMaxBurst
;
718 ep_ctx
->ep_info2
|= MAX_BURST(max_packet
);
721 /* bits 11:12 specify the number of additional transaction
722 * opportunities per microframe (USB 2.0, section 9.6.6)
724 if (usb_endpoint_xfer_isoc(&ep
->desc
) ||
725 usb_endpoint_xfer_int(&ep
->desc
)) {
726 max_burst
= (ep
->desc
.wMaxPacketSize
& 0x1800) >> 11;
727 ep_ctx
->ep_info2
|= MAX_BURST(max_burst
);
732 max_packet
= ep
->desc
.wMaxPacketSize
& 0x3ff;
733 ep_ctx
->ep_info2
|= MAX_PACKET(max_packet
);
738 max_esit_payload
= xhci_get_max_esit_payload(xhci
, udev
, ep
);
739 ep_ctx
->tx_info
= MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload
);
742 * XXX no idea how to calculate the average TRB buffer length for bulk
743 * endpoints, as the driver gives us no clue how big each scatter gather
744 * list entry (or buffer) is going to be.
746 * For isochronous and interrupt endpoints, we set it to the max
747 * available, until we have new API in the USB core to allow drivers to
748 * declare how much bandwidth they actually need.
750 * Normally, it would be calculated by taking the total of the buffer
751 * lengths in the TD and then dividing by the number of TRBs in a TD,
752 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
753 * use Event Data TRBs, and we don't chain in a link TRB on short
754 * transfers, we're basically dividing by 1.
756 ep_ctx
->tx_info
|= AVG_TRB_LENGTH_FOR_EP(max_esit_payload
);
758 /* FIXME Debug endpoint context */
762 void xhci_endpoint_zero(struct xhci_hcd
*xhci
,
763 struct xhci_virt_device
*virt_dev
,
764 struct usb_host_endpoint
*ep
)
766 unsigned int ep_index
;
767 struct xhci_ep_ctx
*ep_ctx
;
769 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
770 ep_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, ep_index
);
773 ep_ctx
->ep_info2
= 0;
776 /* Don't free the endpoint ring until the set interface or configuration
781 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
782 * Useful when you want to change one particular aspect of the endpoint and then
783 * issue a configure endpoint command.
785 void xhci_endpoint_copy(struct xhci_hcd
*xhci
,
786 struct xhci_container_ctx
*in_ctx
,
787 struct xhci_container_ctx
*out_ctx
,
788 unsigned int ep_index
)
790 struct xhci_ep_ctx
*out_ep_ctx
;
791 struct xhci_ep_ctx
*in_ep_ctx
;
793 out_ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
794 in_ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
796 in_ep_ctx
->ep_info
= out_ep_ctx
->ep_info
;
797 in_ep_ctx
->ep_info2
= out_ep_ctx
->ep_info2
;
798 in_ep_ctx
->deq
= out_ep_ctx
->deq
;
799 in_ep_ctx
->tx_info
= out_ep_ctx
->tx_info
;
802 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
803 * Useful when you want to change one particular aspect of the endpoint and then
804 * issue a configure endpoint command. Only the context entries field matters,
805 * but we'll copy the whole thing anyway.
807 void xhci_slot_copy(struct xhci_hcd
*xhci
,
808 struct xhci_container_ctx
*in_ctx
,
809 struct xhci_container_ctx
*out_ctx
)
811 struct xhci_slot_ctx
*in_slot_ctx
;
812 struct xhci_slot_ctx
*out_slot_ctx
;
814 in_slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
815 out_slot_ctx
= xhci_get_slot_ctx(xhci
, out_ctx
);
817 in_slot_ctx
->dev_info
= out_slot_ctx
->dev_info
;
818 in_slot_ctx
->dev_info2
= out_slot_ctx
->dev_info2
;
819 in_slot_ctx
->tt_info
= out_slot_ctx
->tt_info
;
820 in_slot_ctx
->dev_state
= out_slot_ctx
->dev_state
;
823 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
824 static int scratchpad_alloc(struct xhci_hcd
*xhci
, gfp_t flags
)
827 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
828 int num_sp
= HCS_MAX_SCRATCHPAD(xhci
->hcs_params2
);
830 xhci_dbg(xhci
, "Allocating %d scratchpad buffers\n", num_sp
);
835 xhci
->scratchpad
= kzalloc(sizeof(*xhci
->scratchpad
), flags
);
836 if (!xhci
->scratchpad
)
839 xhci
->scratchpad
->sp_array
=
840 pci_alloc_consistent(to_pci_dev(dev
),
841 num_sp
* sizeof(u64
),
842 &xhci
->scratchpad
->sp_dma
);
843 if (!xhci
->scratchpad
->sp_array
)
846 xhci
->scratchpad
->sp_buffers
= kzalloc(sizeof(void *) * num_sp
, flags
);
847 if (!xhci
->scratchpad
->sp_buffers
)
850 xhci
->scratchpad
->sp_dma_buffers
=
851 kzalloc(sizeof(dma_addr_t
) * num_sp
, flags
);
853 if (!xhci
->scratchpad
->sp_dma_buffers
)
856 xhci
->dcbaa
->dev_context_ptrs
[0] = xhci
->scratchpad
->sp_dma
;
857 for (i
= 0; i
< num_sp
; i
++) {
859 void *buf
= pci_alloc_consistent(to_pci_dev(dev
),
860 xhci
->page_size
, &dma
);
864 xhci
->scratchpad
->sp_array
[i
] = dma
;
865 xhci
->scratchpad
->sp_buffers
[i
] = buf
;
866 xhci
->scratchpad
->sp_dma_buffers
[i
] = dma
;
872 for (i
= i
- 1; i
>= 0; i
--) {
873 pci_free_consistent(to_pci_dev(dev
), xhci
->page_size
,
874 xhci
->scratchpad
->sp_buffers
[i
],
875 xhci
->scratchpad
->sp_dma_buffers
[i
]);
877 kfree(xhci
->scratchpad
->sp_dma_buffers
);
880 kfree(xhci
->scratchpad
->sp_buffers
);
883 pci_free_consistent(to_pci_dev(dev
), num_sp
* sizeof(u64
),
884 xhci
->scratchpad
->sp_array
,
885 xhci
->scratchpad
->sp_dma
);
888 kfree(xhci
->scratchpad
);
889 xhci
->scratchpad
= NULL
;
895 static void scratchpad_free(struct xhci_hcd
*xhci
)
899 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
901 if (!xhci
->scratchpad
)
904 num_sp
= HCS_MAX_SCRATCHPAD(xhci
->hcs_params2
);
906 for (i
= 0; i
< num_sp
; i
++) {
907 pci_free_consistent(pdev
, xhci
->page_size
,
908 xhci
->scratchpad
->sp_buffers
[i
],
909 xhci
->scratchpad
->sp_dma_buffers
[i
]);
911 kfree(xhci
->scratchpad
->sp_dma_buffers
);
912 kfree(xhci
->scratchpad
->sp_buffers
);
913 pci_free_consistent(pdev
, num_sp
* sizeof(u64
),
914 xhci
->scratchpad
->sp_array
,
915 xhci
->scratchpad
->sp_dma
);
916 kfree(xhci
->scratchpad
);
917 xhci
->scratchpad
= NULL
;
920 struct xhci_command
*xhci_alloc_command(struct xhci_hcd
*xhci
,
921 bool allocate_in_ctx
, bool allocate_completion
,
924 struct xhci_command
*command
;
926 command
= kzalloc(sizeof(*command
), mem_flags
);
930 if (allocate_in_ctx
) {
932 xhci_alloc_container_ctx(xhci
, XHCI_CTX_TYPE_INPUT
,
934 if (!command
->in_ctx
) {
940 if (allocate_completion
) {
941 command
->completion
=
942 kzalloc(sizeof(struct completion
), mem_flags
);
943 if (!command
->completion
) {
944 xhci_free_container_ctx(xhci
, command
->in_ctx
);
948 init_completion(command
->completion
);
952 INIT_LIST_HEAD(&command
->cmd_list
);
956 void xhci_free_command(struct xhci_hcd
*xhci
,
957 struct xhci_command
*command
)
959 xhci_free_container_ctx(xhci
,
961 kfree(command
->completion
);
965 void xhci_mem_cleanup(struct xhci_hcd
*xhci
)
967 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
971 /* Free the Event Ring Segment Table and the actual Event Ring */
973 xhci_writel(xhci
, 0, &xhci
->ir_set
->erst_size
);
974 xhci_write_64(xhci
, 0, &xhci
->ir_set
->erst_base
);
975 xhci_write_64(xhci
, 0, &xhci
->ir_set
->erst_dequeue
);
977 size
= sizeof(struct xhci_erst_entry
)*(xhci
->erst
.num_entries
);
978 if (xhci
->erst
.entries
)
979 pci_free_consistent(pdev
, size
,
980 xhci
->erst
.entries
, xhci
->erst
.erst_dma_addr
);
981 xhci
->erst
.entries
= NULL
;
982 xhci_dbg(xhci
, "Freed ERST\n");
983 if (xhci
->event_ring
)
984 xhci_ring_free(xhci
, xhci
->event_ring
);
985 xhci
->event_ring
= NULL
;
986 xhci_dbg(xhci
, "Freed event ring\n");
988 xhci_write_64(xhci
, 0, &xhci
->op_regs
->cmd_ring
);
990 xhci_ring_free(xhci
, xhci
->cmd_ring
);
991 xhci
->cmd_ring
= NULL
;
992 xhci_dbg(xhci
, "Freed command ring\n");
994 for (i
= 1; i
< MAX_HC_SLOTS
; ++i
)
995 xhci_free_virt_device(xhci
, i
);
997 if (xhci
->segment_pool
)
998 dma_pool_destroy(xhci
->segment_pool
);
999 xhci
->segment_pool
= NULL
;
1000 xhci_dbg(xhci
, "Freed segment pool\n");
1002 if (xhci
->device_pool
)
1003 dma_pool_destroy(xhci
->device_pool
);
1004 xhci
->device_pool
= NULL
;
1005 xhci_dbg(xhci
, "Freed device context pool\n");
1007 xhci_write_64(xhci
, 0, &xhci
->op_regs
->dcbaa_ptr
);
1009 pci_free_consistent(pdev
, sizeof(*xhci
->dcbaa
),
1010 xhci
->dcbaa
, xhci
->dcbaa
->dma
);
1013 scratchpad_free(xhci
);
1014 xhci
->page_size
= 0;
1015 xhci
->page_shift
= 0;
1018 static int xhci_test_trb_in_td(struct xhci_hcd
*xhci
,
1019 struct xhci_segment
*input_seg
,
1020 union xhci_trb
*start_trb
,
1021 union xhci_trb
*end_trb
,
1022 dma_addr_t input_dma
,
1023 struct xhci_segment
*result_seg
,
1024 char *test_name
, int test_number
)
1026 unsigned long long start_dma
;
1027 unsigned long long end_dma
;
1028 struct xhci_segment
*seg
;
1030 start_dma
= xhci_trb_virt_to_dma(input_seg
, start_trb
);
1031 end_dma
= xhci_trb_virt_to_dma(input_seg
, end_trb
);
1033 seg
= trb_in_td(input_seg
, start_trb
, end_trb
, input_dma
);
1034 if (seg
!= result_seg
) {
1035 xhci_warn(xhci
, "WARN: %s TRB math test %d failed!\n",
1036 test_name
, test_number
);
1037 xhci_warn(xhci
, "Tested TRB math w/ seg %p and "
1038 "input DMA 0x%llx\n",
1040 (unsigned long long) input_dma
);
1041 xhci_warn(xhci
, "starting TRB %p (0x%llx DMA), "
1042 "ending TRB %p (0x%llx DMA)\n",
1043 start_trb
, start_dma
,
1045 xhci_warn(xhci
, "Expected seg %p, got seg %p\n",
1052 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1053 static int xhci_check_trb_in_td_math(struct xhci_hcd
*xhci
, gfp_t mem_flags
)
1056 dma_addr_t input_dma
;
1057 struct xhci_segment
*result_seg
;
1058 } simple_test_vector
[] = {
1059 /* A zeroed DMA field should fail */
1061 /* One TRB before the ring start should fail */
1062 { xhci
->event_ring
->first_seg
->dma
- 16, NULL
},
1063 /* One byte before the ring start should fail */
1064 { xhci
->event_ring
->first_seg
->dma
- 1, NULL
},
1065 /* Starting TRB should succeed */
1066 { xhci
->event_ring
->first_seg
->dma
, xhci
->event_ring
->first_seg
},
1067 /* Ending TRB should succeed */
1068 { xhci
->event_ring
->first_seg
->dma
+ (TRBS_PER_SEGMENT
- 1)*16,
1069 xhci
->event_ring
->first_seg
},
1070 /* One byte after the ring end should fail */
1071 { xhci
->event_ring
->first_seg
->dma
+ (TRBS_PER_SEGMENT
- 1)*16 + 1, NULL
},
1072 /* One TRB after the ring end should fail */
1073 { xhci
->event_ring
->first_seg
->dma
+ (TRBS_PER_SEGMENT
)*16, NULL
},
1074 /* An address of all ones should fail */
1075 { (dma_addr_t
) (~0), NULL
},
1078 struct xhci_segment
*input_seg
;
1079 union xhci_trb
*start_trb
;
1080 union xhci_trb
*end_trb
;
1081 dma_addr_t input_dma
;
1082 struct xhci_segment
*result_seg
;
1083 } complex_test_vector
[] = {
1084 /* Test feeding a valid DMA address from a different ring */
1085 { .input_seg
= xhci
->event_ring
->first_seg
,
1086 .start_trb
= xhci
->event_ring
->first_seg
->trbs
,
1087 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 1],
1088 .input_dma
= xhci
->cmd_ring
->first_seg
->dma
,
1091 /* Test feeding a valid end TRB from a different ring */
1092 { .input_seg
= xhci
->event_ring
->first_seg
,
1093 .start_trb
= xhci
->event_ring
->first_seg
->trbs
,
1094 .end_trb
= &xhci
->cmd_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 1],
1095 .input_dma
= xhci
->cmd_ring
->first_seg
->dma
,
1098 /* Test feeding a valid start and end TRB from a different ring */
1099 { .input_seg
= xhci
->event_ring
->first_seg
,
1100 .start_trb
= xhci
->cmd_ring
->first_seg
->trbs
,
1101 .end_trb
= &xhci
->cmd_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 1],
1102 .input_dma
= xhci
->cmd_ring
->first_seg
->dma
,
1105 /* TRB in this ring, but after this TD */
1106 { .input_seg
= xhci
->event_ring
->first_seg
,
1107 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[0],
1108 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[3],
1109 .input_dma
= xhci
->event_ring
->first_seg
->dma
+ 4*16,
1112 /* TRB in this ring, but before this TD */
1113 { .input_seg
= xhci
->event_ring
->first_seg
,
1114 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[3],
1115 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[6],
1116 .input_dma
= xhci
->event_ring
->first_seg
->dma
+ 2*16,
1119 /* TRB in this ring, but after this wrapped TD */
1120 { .input_seg
= xhci
->event_ring
->first_seg
,
1121 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 3],
1122 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[1],
1123 .input_dma
= xhci
->event_ring
->first_seg
->dma
+ 2*16,
1126 /* TRB in this ring, but before this wrapped TD */
1127 { .input_seg
= xhci
->event_ring
->first_seg
,
1128 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 3],
1129 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[1],
1130 .input_dma
= xhci
->event_ring
->first_seg
->dma
+ (TRBS_PER_SEGMENT
- 4)*16,
1133 /* TRB not in this ring, and we have a wrapped TD */
1134 { .input_seg
= xhci
->event_ring
->first_seg
,
1135 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 3],
1136 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[1],
1137 .input_dma
= xhci
->cmd_ring
->first_seg
->dma
+ 2*16,
1142 unsigned int num_tests
;
1145 num_tests
= sizeof(simple_test_vector
) / sizeof(simple_test_vector
[0]);
1146 for (i
= 0; i
< num_tests
; i
++) {
1147 ret
= xhci_test_trb_in_td(xhci
,
1148 xhci
->event_ring
->first_seg
,
1149 xhci
->event_ring
->first_seg
->trbs
,
1150 &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 1],
1151 simple_test_vector
[i
].input_dma
,
1152 simple_test_vector
[i
].result_seg
,
1158 num_tests
= sizeof(complex_test_vector
) / sizeof(complex_test_vector
[0]);
1159 for (i
= 0; i
< num_tests
; i
++) {
1160 ret
= xhci_test_trb_in_td(xhci
,
1161 complex_test_vector
[i
].input_seg
,
1162 complex_test_vector
[i
].start_trb
,
1163 complex_test_vector
[i
].end_trb
,
1164 complex_test_vector
[i
].input_dma
,
1165 complex_test_vector
[i
].result_seg
,
1170 xhci_dbg(xhci
, "TRB math tests passed.\n");
1175 int xhci_mem_init(struct xhci_hcd
*xhci
, gfp_t flags
)
1178 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
1179 unsigned int val
, val2
;
1181 struct xhci_segment
*seg
;
1185 page_size
= xhci_readl(xhci
, &xhci
->op_regs
->page_size
);
1186 xhci_dbg(xhci
, "Supported page size register = 0x%x\n", page_size
);
1187 for (i
= 0; i
< 16; i
++) {
1188 if ((0x1 & page_size
) != 0)
1190 page_size
= page_size
>> 1;
1193 xhci_dbg(xhci
, "Supported page size of %iK\n", (1 << (i
+12)) / 1024);
1195 xhci_warn(xhci
, "WARN: no supported page size\n");
1196 /* Use 4K pages, since that's common and the minimum the HC supports */
1197 xhci
->page_shift
= 12;
1198 xhci
->page_size
= 1 << xhci
->page_shift
;
1199 xhci_dbg(xhci
, "HCD page size set to %iK\n", xhci
->page_size
/ 1024);
1202 * Program the Number of Device Slots Enabled field in the CONFIG
1203 * register with the max value of slots the HC can handle.
1205 val
= HCS_MAX_SLOTS(xhci_readl(xhci
, &xhci
->cap_regs
->hcs_params1
));
1206 xhci_dbg(xhci
, "// xHC can handle at most %d device slots.\n",
1207 (unsigned int) val
);
1208 val2
= xhci_readl(xhci
, &xhci
->op_regs
->config_reg
);
1209 val
|= (val2
& ~HCS_SLOTS_MASK
);
1210 xhci_dbg(xhci
, "// Setting Max device slots reg = 0x%x.\n",
1211 (unsigned int) val
);
1212 xhci_writel(xhci
, val
, &xhci
->op_regs
->config_reg
);
1215 * Section 5.4.8 - doorbell array must be
1216 * "physically contiguous and 64-byte (cache line) aligned".
1218 xhci
->dcbaa
= pci_alloc_consistent(to_pci_dev(dev
),
1219 sizeof(*xhci
->dcbaa
), &dma
);
1222 memset(xhci
->dcbaa
, 0, sizeof *(xhci
->dcbaa
));
1223 xhci
->dcbaa
->dma
= dma
;
1224 xhci_dbg(xhci
, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
1225 (unsigned long long)xhci
->dcbaa
->dma
, xhci
->dcbaa
);
1226 xhci_write_64(xhci
, dma
, &xhci
->op_regs
->dcbaa_ptr
);
1229 * Initialize the ring segment pool. The ring must be a contiguous
1230 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
1231 * however, the command ring segment needs 64-byte aligned segments,
1232 * so we pick the greater alignment need.
1234 xhci
->segment_pool
= dma_pool_create("xHCI ring segments", dev
,
1235 SEGMENT_SIZE
, 64, xhci
->page_size
);
1237 /* See Table 46 and Note on Figure 55 */
1238 xhci
->device_pool
= dma_pool_create("xHCI input/output contexts", dev
,
1239 2112, 64, xhci
->page_size
);
1240 if (!xhci
->segment_pool
|| !xhci
->device_pool
)
1243 /* Set up the command ring to have one segments for now. */
1244 xhci
->cmd_ring
= xhci_ring_alloc(xhci
, 1, true, flags
);
1245 if (!xhci
->cmd_ring
)
1247 xhci_dbg(xhci
, "Allocated command ring at %p\n", xhci
->cmd_ring
);
1248 xhci_dbg(xhci
, "First segment DMA is 0x%llx\n",
1249 (unsigned long long)xhci
->cmd_ring
->first_seg
->dma
);
1251 /* Set the address in the Command Ring Control register */
1252 val_64
= xhci_read_64(xhci
, &xhci
->op_regs
->cmd_ring
);
1253 val_64
= (val_64
& (u64
) CMD_RING_RSVD_BITS
) |
1254 (xhci
->cmd_ring
->first_seg
->dma
& (u64
) ~CMD_RING_RSVD_BITS
) |
1255 xhci
->cmd_ring
->cycle_state
;
1256 xhci_dbg(xhci
, "// Setting command ring address to 0x%x\n", val
);
1257 xhci_write_64(xhci
, val_64
, &xhci
->op_regs
->cmd_ring
);
1258 xhci_dbg_cmd_ptrs(xhci
);
1260 val
= xhci_readl(xhci
, &xhci
->cap_regs
->db_off
);
1262 xhci_dbg(xhci
, "// Doorbell array is located at offset 0x%x"
1263 " from cap regs base addr\n", val
);
1264 xhci
->dba
= (void *) xhci
->cap_regs
+ val
;
1265 xhci_dbg_regs(xhci
);
1266 xhci_print_run_regs(xhci
);
1267 /* Set ir_set to interrupt register set 0 */
1268 xhci
->ir_set
= (void *) xhci
->run_regs
->ir_set
;
1271 * Event ring setup: Allocate a normal ring, but also setup
1272 * the event ring segment table (ERST). Section 4.9.3.
1274 xhci_dbg(xhci
, "// Allocating event ring\n");
1275 xhci
->event_ring
= xhci_ring_alloc(xhci
, ERST_NUM_SEGS
, false, flags
);
1276 if (!xhci
->event_ring
)
1278 if (xhci_check_trb_in_td_math(xhci
, flags
) < 0)
1281 xhci
->erst
.entries
= pci_alloc_consistent(to_pci_dev(dev
),
1282 sizeof(struct xhci_erst_entry
)*ERST_NUM_SEGS
, &dma
);
1283 if (!xhci
->erst
.entries
)
1285 xhci_dbg(xhci
, "// Allocated event ring segment table at 0x%llx\n",
1286 (unsigned long long)dma
);
1288 memset(xhci
->erst
.entries
, 0, sizeof(struct xhci_erst_entry
)*ERST_NUM_SEGS
);
1289 xhci
->erst
.num_entries
= ERST_NUM_SEGS
;
1290 xhci
->erst
.erst_dma_addr
= dma
;
1291 xhci_dbg(xhci
, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
1292 xhci
->erst
.num_entries
,
1294 (unsigned long long)xhci
->erst
.erst_dma_addr
);
1296 /* set ring base address and size for each segment table entry */
1297 for (val
= 0, seg
= xhci
->event_ring
->first_seg
; val
< ERST_NUM_SEGS
; val
++) {
1298 struct xhci_erst_entry
*entry
= &xhci
->erst
.entries
[val
];
1299 entry
->seg_addr
= seg
->dma
;
1300 entry
->seg_size
= TRBS_PER_SEGMENT
;
1305 /* set ERST count with the number of entries in the segment table */
1306 val
= xhci_readl(xhci
, &xhci
->ir_set
->erst_size
);
1307 val
&= ERST_SIZE_MASK
;
1308 val
|= ERST_NUM_SEGS
;
1309 xhci_dbg(xhci
, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
1311 xhci_writel(xhci
, val
, &xhci
->ir_set
->erst_size
);
1313 xhci_dbg(xhci
, "// Set ERST entries to point to event ring.\n");
1314 /* set the segment table base address */
1315 xhci_dbg(xhci
, "// Set ERST base address for ir_set 0 = 0x%llx\n",
1316 (unsigned long long)xhci
->erst
.erst_dma_addr
);
1317 val_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_base
);
1318 val_64
&= ERST_PTR_MASK
;
1319 val_64
|= (xhci
->erst
.erst_dma_addr
& (u64
) ~ERST_PTR_MASK
);
1320 xhci_write_64(xhci
, val_64
, &xhci
->ir_set
->erst_base
);
1322 /* Set the event ring dequeue address */
1323 xhci_set_hc_event_deq(xhci
);
1324 xhci_dbg(xhci
, "Wrote ERST address to ir_set 0.\n");
1325 xhci_print_ir_set(xhci
, xhci
->ir_set
, 0);
1328 * XXX: Might need to set the Interrupter Moderation Register to
1329 * something other than the default (~1ms minimum between interrupts).
1330 * See section 5.5.1.2.
1332 init_completion(&xhci
->addr_dev
);
1333 for (i
= 0; i
< MAX_HC_SLOTS
; ++i
)
1336 if (scratchpad_alloc(xhci
, flags
))
1342 xhci_warn(xhci
, "Couldn't initialize memory\n");
1343 xhci_mem_cleanup(xhci
);