2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/usb.h>
24 #include <linux/pci.h>
29 * Allocates a generic ring segment from the ring pool, sets the dma address,
30 * initializes the segment to zero, and sets the private next pointer to NULL.
33 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
35 static struct xhci_segment
*xhci_segment_alloc(struct xhci_hcd
*xhci
, gfp_t flags
)
37 struct xhci_segment
*seg
;
40 seg
= kzalloc(sizeof *seg
, flags
);
43 xhci_dbg(xhci
, "Allocating priv segment structure at 0x%x\n",
46 seg
->trbs
= dma_pool_alloc(xhci
->segment_pool
, flags
, &dma
);
51 xhci_dbg(xhci
, "// Allocating segment at 0x%x (virtual) 0x%x (DMA)\n",
52 (unsigned int) seg
->trbs
, (u32
) dma
);
54 memset(seg
->trbs
, 0, SEGMENT_SIZE
);
61 static void xhci_segment_free(struct xhci_hcd
*xhci
, struct xhci_segment
*seg
)
66 xhci_dbg(xhci
, "Freeing DMA segment at 0x%x"
67 " (virtual) 0x%x (DMA)\n",
68 (unsigned int) seg
->trbs
, (u32
) seg
->dma
);
69 dma_pool_free(xhci
->segment_pool
, seg
->trbs
, seg
->dma
);
72 xhci_dbg(xhci
, "Freeing priv segment structure at 0x%x\n",
78 * Make the prev segment point to the next segment.
80 * Change the last TRB in the prev segment to be a Link TRB which points to the
81 * DMA address of the next segment. The caller needs to set any Link TRB
82 * related flags, such as End TRB, Toggle Cycle, and no snoop.
84 static void xhci_link_segments(struct xhci_hcd
*xhci
, struct xhci_segment
*prev
,
85 struct xhci_segment
*next
, bool link_trbs
)
93 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.segment_ptr
[0] = next
->dma
;
95 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
96 val
= prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
;
97 val
&= ~TRB_TYPE_BITMASK
;
98 val
|= TRB_TYPE(TRB_LINK
);
99 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
= val
;
101 xhci_dbg(xhci
, "Linking segment 0x%x to segment 0x%x (DMA)\n",
102 prev
->dma
, next
->dma
);
105 /* XXX: Do we need the hcd structure in all these functions? */
106 void xhci_ring_free(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
)
108 struct xhci_segment
*seg
;
109 struct xhci_segment
*first_seg
;
111 if (!ring
|| !ring
->first_seg
)
113 first_seg
= ring
->first_seg
;
114 seg
= first_seg
->next
;
115 xhci_dbg(xhci
, "Freeing ring at 0x%x\n", (unsigned int) ring
);
116 while (seg
!= first_seg
) {
117 struct xhci_segment
*next
= seg
->next
;
118 xhci_segment_free(xhci
, seg
);
121 xhci_segment_free(xhci
, first_seg
);
122 ring
->first_seg
= NULL
;
127 * Create a new ring with zero or more segments.
129 * Link each segment together into a ring.
130 * Set the end flag and the cycle toggle bit on the last segment.
131 * See section 4.9.1 and figures 15 and 16.
133 static struct xhci_ring
*xhci_ring_alloc(struct xhci_hcd
*xhci
,
134 unsigned int num_segs
, bool link_trbs
, gfp_t flags
)
136 struct xhci_ring
*ring
;
137 struct xhci_segment
*prev
;
139 ring
= kzalloc(sizeof *(ring
), flags
);
140 xhci_dbg(xhci
, "Allocating ring at 0x%x\n", (unsigned int) ring
);
144 INIT_LIST_HEAD(&ring
->td_list
);
148 ring
->first_seg
= xhci_segment_alloc(xhci
, flags
);
149 if (!ring
->first_seg
)
153 prev
= ring
->first_seg
;
154 while (num_segs
> 0) {
155 struct xhci_segment
*next
;
157 next
= xhci_segment_alloc(xhci
, flags
);
160 xhci_link_segments(xhci
, prev
, next
, link_trbs
);
165 xhci_link_segments(xhci
, prev
, ring
->first_seg
, link_trbs
);
168 /* See section 4.9.2.1 and 6.4.4.1 */
169 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
|= (LINK_TOGGLE
);
170 xhci_dbg(xhci
, "Wrote link toggle flag to"
171 " segment 0x%x (virtual), 0x%x (DMA)\n",
172 (unsigned int) prev
, (u32
) prev
->dma
);
174 /* The ring is empty, so the enqueue pointer == dequeue pointer */
175 ring
->enqueue
= ring
->first_seg
->trbs
;
176 ring
->enq_seg
= ring
->first_seg
;
177 ring
->dequeue
= ring
->enqueue
;
178 ring
->deq_seg
= ring
->first_seg
;
179 /* The ring is initialized to 0. The producer must write 1 to the cycle
180 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
181 * compare CCS to the cycle bit to check ownership, so CCS = 1.
183 ring
->cycle_state
= 1;
188 xhci_ring_free(xhci
, ring
);
192 /* All the xhci_tds in the ring's TD list should be freed at this point */
193 void xhci_free_virt_device(struct xhci_hcd
*xhci
, int slot_id
)
195 struct xhci_virt_device
*dev
;
198 /* Slot ID 0 is reserved */
199 if (slot_id
== 0 || !xhci
->devs
[slot_id
])
202 dev
= xhci
->devs
[slot_id
];
203 xhci
->dcbaa
->dev_context_ptrs
[2*slot_id
] = 0;
204 xhci
->dcbaa
->dev_context_ptrs
[2*slot_id
+ 1] = 0;
208 for (i
= 0; i
< 31; ++i
)
209 if (dev
->ep_rings
[i
])
210 xhci_ring_free(xhci
, dev
->ep_rings
[i
]);
213 dma_pool_free(xhci
->device_pool
,
214 dev
->in_ctx
, dev
->in_ctx_dma
);
216 dma_pool_free(xhci
->device_pool
,
217 dev
->out_ctx
, dev
->out_ctx_dma
);
218 kfree(xhci
->devs
[slot_id
]);
219 xhci
->devs
[slot_id
] = 0;
222 int xhci_alloc_virt_device(struct xhci_hcd
*xhci
, int slot_id
,
223 struct usb_device
*udev
, gfp_t flags
)
226 struct xhci_virt_device
*dev
;
228 /* Slot ID 0 is reserved */
229 if (slot_id
== 0 || xhci
->devs
[slot_id
]) {
230 xhci_warn(xhci
, "Bad Slot ID %d\n", slot_id
);
234 xhci
->devs
[slot_id
] = kzalloc(sizeof(*xhci
->devs
[slot_id
]), flags
);
235 if (!xhci
->devs
[slot_id
])
237 dev
= xhci
->devs
[slot_id
];
239 /* Allocate the (output) device context that will be used in the HC */
240 dev
->out_ctx
= dma_pool_alloc(xhci
->device_pool
, flags
, &dma
);
243 dev
->out_ctx_dma
= dma
;
244 xhci_dbg(xhci
, "Slot %d output ctx = 0x%x (dma)\n", slot_id
, dma
);
245 memset(dev
->out_ctx
, 0, sizeof(*dev
->out_ctx
));
247 /* Allocate the (input) device context for address device command */
248 dev
->in_ctx
= dma_pool_alloc(xhci
->device_pool
, flags
, &dma
);
251 dev
->in_ctx_dma
= dma
;
252 xhci_dbg(xhci
, "Slot %d input ctx = 0x%x (dma)\n", slot_id
, dma
);
253 memset(dev
->in_ctx
, 0, sizeof(*dev
->in_ctx
));
255 /* Allocate endpoint 0 ring */
256 dev
->ep_rings
[0] = xhci_ring_alloc(xhci
, 1, true, flags
);
257 if (!dev
->ep_rings
[0])
260 init_completion(&dev
->cmd_completion
);
263 * Point to output device context in dcbaa; skip the output control
264 * context, which is eight 32 bit fields (or 32 bytes long)
266 xhci
->dcbaa
->dev_context_ptrs
[2*slot_id
] =
267 (u32
) dev
->out_ctx_dma
+ (32);
268 xhci_dbg(xhci
, "Set slot id %d dcbaa entry 0x%x to 0x%x\n",
270 (unsigned int) &xhci
->dcbaa
->dev_context_ptrs
[2*slot_id
],
272 xhci
->dcbaa
->dev_context_ptrs
[2*slot_id
+ 1] = 0;
276 xhci_free_virt_device(xhci
, slot_id
);
280 /* Setup an xHCI virtual device for a Set Address command */
281 int xhci_setup_addressable_virt_dev(struct xhci_hcd
*xhci
, struct usb_device
*udev
)
283 struct xhci_virt_device
*dev
;
284 struct xhci_ep_ctx
*ep0_ctx
;
285 struct usb_device
*top_dev
;
287 dev
= xhci
->devs
[udev
->slot_id
];
288 /* Slot ID 0 is reserved */
289 if (udev
->slot_id
== 0 || !dev
) {
290 xhci_warn(xhci
, "Slot ID %d is not assigned to this device\n",
294 ep0_ctx
= &dev
->in_ctx
->ep
[0];
296 /* 2) New slot context and endpoint 0 context are valid*/
297 dev
->in_ctx
->add_flags
= SLOT_FLAG
| EP0_FLAG
;
299 /* 3) Only the control endpoint is valid - one endpoint context */
300 dev
->in_ctx
->slot
.dev_info
|= LAST_CTX(1);
302 switch (udev
->speed
) {
303 case USB_SPEED_SUPER
:
304 dev
->in_ctx
->slot
.dev_info
|= (u32
) udev
->route
;
305 dev
->in_ctx
->slot
.dev_info
|= (u32
) SLOT_SPEED_SS
;
308 dev
->in_ctx
->slot
.dev_info
|= (u32
) SLOT_SPEED_HS
;
311 dev
->in_ctx
->slot
.dev_info
|= (u32
) SLOT_SPEED_FS
;
314 dev
->in_ctx
->slot
.dev_info
|= (u32
) SLOT_SPEED_LS
;
316 case USB_SPEED_VARIABLE
:
317 xhci_dbg(xhci
, "FIXME xHCI doesn't support wireless speeds\n");
321 /* Speed was set earlier, this shouldn't happen. */
324 /* Find the root hub port this device is under */
325 for (top_dev
= udev
; top_dev
->parent
&& top_dev
->parent
->parent
;
326 top_dev
= top_dev
->parent
)
327 /* Found device below root hub */;
328 dev
->in_ctx
->slot
.dev_info2
|= (u32
) ROOT_HUB_PORT(top_dev
->portnum
);
329 xhci_dbg(xhci
, "Set root hub portnum to %d\n", top_dev
->portnum
);
331 /* Is this a LS/FS device under a HS hub? */
333 * FIXME: I don't think this is right, where does the TT info for the
334 * roothub or parent hub come from?
336 if ((udev
->speed
== USB_SPEED_LOW
|| udev
->speed
== USB_SPEED_FULL
) &&
338 dev
->in_ctx
->slot
.tt_info
= udev
->tt
->hub
->slot_id
;
339 dev
->in_ctx
->slot
.tt_info
|= udev
->ttport
<< 8;
341 xhci_dbg(xhci
, "udev->tt = 0x%x\n", (unsigned int) udev
->tt
);
342 xhci_dbg(xhci
, "udev->ttport = 0x%x\n", udev
->ttport
);
344 /* Step 4 - ring already allocated */
346 ep0_ctx
->ep_info2
= EP_TYPE(CTRL_EP
);
348 * See section 4.3 bullet 6:
349 * The default Max Packet size for ep0 is "8 bytes for a USB2
350 * LS/FS/HS device or 512 bytes for a USB3 SS device"
351 * XXX: Not sure about wireless USB devices.
353 if (udev
->speed
== USB_SPEED_SUPER
)
354 ep0_ctx
->ep_info2
|= MAX_PACKET(512);
356 ep0_ctx
->ep_info2
|= MAX_PACKET(8);
357 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
358 ep0_ctx
->ep_info2
|= MAX_BURST(0);
359 ep0_ctx
->ep_info2
|= ERROR_COUNT(3);
362 dev
->ep_rings
[0]->first_seg
->dma
;
363 ep0_ctx
->deq
[0] |= dev
->ep_rings
[0]->cycle_state
;
366 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
371 /* Return the polling or NAK interval.
373 * The polling interval is expressed in "microframes". If xHCI's Interval field
374 * is set to N, it will service the endpoint every 2^(Interval)*125us.
376 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
379 static inline unsigned int xhci_get_endpoint_interval(struct usb_device
*udev
,
380 struct usb_host_endpoint
*ep
)
382 unsigned int interval
= 0;
384 switch (udev
->speed
) {
387 if (usb_endpoint_xfer_control(&ep
->desc
) ||
388 usb_endpoint_xfer_bulk(&ep
->desc
))
389 interval
= ep
->desc
.bInterval
;
390 /* Fall through - SS and HS isoc/int have same decoding */
391 case USB_SPEED_SUPER
:
392 if (usb_endpoint_xfer_int(&ep
->desc
) ||
393 usb_endpoint_xfer_isoc(&ep
->desc
)) {
394 if (ep
->desc
.bInterval
== 0)
397 interval
= ep
->desc
.bInterval
- 1;
400 if (interval
!= ep
->desc
.bInterval
+ 1)
401 dev_warn(&udev
->dev
, "ep %#x - rounding interval to %d microframes\n",
402 ep
->desc
.bEndpointAddress
, 1 << interval
);
405 /* Convert bInterval (in 1-255 frames) to microframes and round down to
406 * nearest power of 2.
410 if (usb_endpoint_xfer_int(&ep
->desc
) ||
411 usb_endpoint_xfer_isoc(&ep
->desc
)) {
412 interval
= fls(8*ep
->desc
.bInterval
) - 1;
417 if ((1 << interval
) != 8*ep
->desc
.bInterval
)
418 dev_warn(&udev
->dev
, "ep %#x - rounding interval to %d microframes\n",
419 ep
->desc
.bEndpointAddress
, 1 << interval
);
425 return EP_INTERVAL(interval
);
428 static inline u32
xhci_get_endpoint_type(struct usb_device
*udev
,
429 struct usb_host_endpoint
*ep
)
434 in
= usb_endpoint_dir_in(&ep
->desc
);
435 if (usb_endpoint_xfer_control(&ep
->desc
)) {
436 type
= EP_TYPE(CTRL_EP
);
437 } else if (usb_endpoint_xfer_bulk(&ep
->desc
)) {
439 type
= EP_TYPE(BULK_IN_EP
);
441 type
= EP_TYPE(BULK_OUT_EP
);
442 } else if (usb_endpoint_xfer_isoc(&ep
->desc
)) {
444 type
= EP_TYPE(ISOC_IN_EP
);
446 type
= EP_TYPE(ISOC_OUT_EP
);
447 } else if (usb_endpoint_xfer_int(&ep
->desc
)) {
449 type
= EP_TYPE(INT_IN_EP
);
451 type
= EP_TYPE(INT_OUT_EP
);
458 int xhci_endpoint_init(struct xhci_hcd
*xhci
,
459 struct xhci_virt_device
*virt_dev
,
460 struct usb_device
*udev
,
461 struct usb_host_endpoint
*ep
)
463 unsigned int ep_index
;
464 struct xhci_ep_ctx
*ep_ctx
;
465 struct xhci_ring
*ep_ring
;
466 unsigned int max_packet
;
467 unsigned int max_burst
;
469 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
470 ep_ctx
= &virt_dev
->in_ctx
->ep
[ep_index
];
472 /* Set up the endpoint ring */
473 virt_dev
->new_ep_rings
[ep_index
] = xhci_ring_alloc(xhci
, 1, true, GFP_KERNEL
);
474 if (!virt_dev
->new_ep_rings
[ep_index
])
476 ep_ring
= virt_dev
->new_ep_rings
[ep_index
];
478 ep_ctx
->deq
[0] = ep_ring
->first_seg
->dma
| ep_ring
->cycle_state
;
480 ep_ctx
->ep_info
= xhci_get_endpoint_interval(udev
, ep
);
482 /* FIXME dig Mult and streams info out of ep companion desc */
484 /* Allow 3 retries for everything but isoc */
485 if (!usb_endpoint_xfer_isoc(&ep
->desc
))
486 ep_ctx
->ep_info2
= ERROR_COUNT(3);
488 ep_ctx
->ep_info2
= ERROR_COUNT(0);
490 ep_ctx
->ep_info2
|= xhci_get_endpoint_type(udev
, ep
);
492 /* Set the max packet size and max burst */
493 switch (udev
->speed
) {
494 case USB_SPEED_SUPER
:
495 max_packet
= ep
->desc
.wMaxPacketSize
;
496 ep_ctx
->ep_info2
|= MAX_PACKET(max_packet
);
497 /* FIXME dig out burst from ep companion desc */
500 /* bits 11:12 specify the number of additional transaction
501 * opportunities per microframe (USB 2.0, section 9.6.6)
503 if (usb_endpoint_xfer_isoc(&ep
->desc
) ||
504 usb_endpoint_xfer_int(&ep
->desc
)) {
505 max_burst
= (ep
->desc
.wMaxPacketSize
& 0x1800) >> 11;
506 ep_ctx
->ep_info2
|= MAX_BURST(max_burst
);
511 max_packet
= ep
->desc
.wMaxPacketSize
& 0x3ff;
512 ep_ctx
->ep_info2
|= MAX_PACKET(max_packet
);
517 /* FIXME Debug endpoint context */
521 void xhci_endpoint_zero(struct xhci_hcd
*xhci
,
522 struct xhci_virt_device
*virt_dev
,
523 struct usb_host_endpoint
*ep
)
525 unsigned int ep_index
;
526 struct xhci_ep_ctx
*ep_ctx
;
528 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
529 ep_ctx
= &virt_dev
->in_ctx
->ep
[ep_index
];
532 ep_ctx
->ep_info2
= 0;
536 /* Don't free the endpoint ring until the set interface or configuration
541 void xhci_mem_cleanup(struct xhci_hcd
*xhci
)
543 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
547 /* Free the Event Ring Segment Table and the actual Event Ring */
548 xhci_writel(xhci
, 0, &xhci
->ir_set
->erst_size
);
549 xhci_writel(xhci
, 0, &xhci
->ir_set
->erst_base
[1]);
550 xhci_writel(xhci
, 0, &xhci
->ir_set
->erst_base
[0]);
551 xhci_writel(xhci
, 0, &xhci
->ir_set
->erst_dequeue
[1]);
552 xhci_writel(xhci
, 0, &xhci
->ir_set
->erst_dequeue
[0]);
553 size
= sizeof(struct xhci_erst_entry
)*(xhci
->erst
.num_entries
);
554 if (xhci
->erst
.entries
)
555 pci_free_consistent(pdev
, size
,
556 xhci
->erst
.entries
, xhci
->erst
.erst_dma_addr
);
557 xhci
->erst
.entries
= NULL
;
558 xhci_dbg(xhci
, "Freed ERST\n");
559 if (xhci
->event_ring
)
560 xhci_ring_free(xhci
, xhci
->event_ring
);
561 xhci
->event_ring
= NULL
;
562 xhci_dbg(xhci
, "Freed event ring\n");
564 xhci_writel(xhci
, 0, &xhci
->op_regs
->cmd_ring
[1]);
565 xhci_writel(xhci
, 0, &xhci
->op_regs
->cmd_ring
[0]);
567 xhci_ring_free(xhci
, xhci
->cmd_ring
);
568 xhci
->cmd_ring
= NULL
;
569 xhci_dbg(xhci
, "Freed command ring\n");
571 for (i
= 1; i
< MAX_HC_SLOTS
; ++i
)
572 xhci_free_virt_device(xhci
, i
);
574 if (xhci
->segment_pool
)
575 dma_pool_destroy(xhci
->segment_pool
);
576 xhci
->segment_pool
= NULL
;
577 xhci_dbg(xhci
, "Freed segment pool\n");
579 if (xhci
->device_pool
)
580 dma_pool_destroy(xhci
->device_pool
);
581 xhci
->device_pool
= NULL
;
582 xhci_dbg(xhci
, "Freed device context pool\n");
584 xhci_writel(xhci
, 0, &xhci
->op_regs
->dcbaa_ptr
[1]);
585 xhci_writel(xhci
, 0, &xhci
->op_regs
->dcbaa_ptr
[0]);
587 pci_free_consistent(pdev
, sizeof(*xhci
->dcbaa
),
588 xhci
->dcbaa
, xhci
->dcbaa
->dma
);
592 xhci
->page_shift
= 0;
595 int xhci_mem_init(struct xhci_hcd
*xhci
, gfp_t flags
)
598 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
599 unsigned int val
, val2
;
600 struct xhci_segment
*seg
;
604 page_size
= xhci_readl(xhci
, &xhci
->op_regs
->page_size
);
605 xhci_dbg(xhci
, "Supported page size register = 0x%x\n", page_size
);
606 for (i
= 0; i
< 16; i
++) {
607 if ((0x1 & page_size
) != 0)
609 page_size
= page_size
>> 1;
612 xhci_dbg(xhci
, "Supported page size of %iK\n", (1 << (i
+12)) / 1024);
614 xhci_warn(xhci
, "WARN: no supported page size\n");
615 /* Use 4K pages, since that's common and the minimum the HC supports */
616 xhci
->page_shift
= 12;
617 xhci
->page_size
= 1 << xhci
->page_shift
;
618 xhci_dbg(xhci
, "HCD page size set to %iK\n", xhci
->page_size
/ 1024);
621 * Program the Number of Device Slots Enabled field in the CONFIG
622 * register with the max value of slots the HC can handle.
624 val
= HCS_MAX_SLOTS(xhci_readl(xhci
, &xhci
->cap_regs
->hcs_params1
));
625 xhci_dbg(xhci
, "// xHC can handle at most %d device slots.\n",
627 val2
= xhci_readl(xhci
, &xhci
->op_regs
->config_reg
);
628 val
|= (val2
& ~HCS_SLOTS_MASK
);
629 xhci_dbg(xhci
, "// Setting Max device slots reg = 0x%x.\n",
631 xhci_writel(xhci
, val
, &xhci
->op_regs
->config_reg
);
634 * Section 5.4.8 - doorbell array must be
635 * "physically contiguous and 64-byte (cache line) aligned".
637 xhci
->dcbaa
= pci_alloc_consistent(to_pci_dev(dev
),
638 sizeof(*xhci
->dcbaa
), &dma
);
641 memset(xhci
->dcbaa
, 0, sizeof *(xhci
->dcbaa
));
642 xhci
->dcbaa
->dma
= dma
;
643 xhci_dbg(xhci
, "// Device context base array address = 0x%x (DMA), 0x%x (virt)\n",
644 xhci
->dcbaa
->dma
, (unsigned int) xhci
->dcbaa
);
645 xhci_writel(xhci
, (u32
) 0, &xhci
->op_regs
->dcbaa_ptr
[1]);
646 xhci_writel(xhci
, dma
, &xhci
->op_regs
->dcbaa_ptr
[0]);
649 * Initialize the ring segment pool. The ring must be a contiguous
650 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
651 * however, the command ring segment needs 64-byte aligned segments,
652 * so we pick the greater alignment need.
654 xhci
->segment_pool
= dma_pool_create("xHCI ring segments", dev
,
655 SEGMENT_SIZE
, 64, xhci
->page_size
);
656 /* See Table 46 and Note on Figure 55 */
657 /* FIXME support 64-byte contexts */
658 xhci
->device_pool
= dma_pool_create("xHCI input/output contexts", dev
,
659 sizeof(struct xhci_device_control
),
660 64, xhci
->page_size
);
661 if (!xhci
->segment_pool
|| !xhci
->device_pool
)
664 /* Set up the command ring to have one segments for now. */
665 xhci
->cmd_ring
= xhci_ring_alloc(xhci
, 1, true, flags
);
668 xhci_dbg(xhci
, "Allocated command ring at 0x%x\n", (unsigned int) xhci
->cmd_ring
);
669 xhci_dbg(xhci
, "First segment DMA is 0x%x\n", (unsigned int) xhci
->cmd_ring
->first_seg
->dma
);
671 /* Set the address in the Command Ring Control register */
672 val
= xhci_readl(xhci
, &xhci
->op_regs
->cmd_ring
[0]);
673 val
= (val
& ~CMD_RING_ADDR_MASK
) |
674 (xhci
->cmd_ring
->first_seg
->dma
& CMD_RING_ADDR_MASK
) |
675 xhci
->cmd_ring
->cycle_state
;
676 xhci_dbg(xhci
, "// Setting command ring address high bits to 0x0\n");
677 xhci_writel(xhci
, (u32
) 0, &xhci
->op_regs
->cmd_ring
[1]);
678 xhci_dbg(xhci
, "// Setting command ring address low bits to 0x%x\n", val
);
679 xhci_writel(xhci
, val
, &xhci
->op_regs
->cmd_ring
[0]);
680 xhci_dbg_cmd_ptrs(xhci
);
682 val
= xhci_readl(xhci
, &xhci
->cap_regs
->db_off
);
684 xhci_dbg(xhci
, "// Doorbell array is located at offset 0x%x"
685 " from cap regs base addr\n", val
);
686 xhci
->dba
= (void *) xhci
->cap_regs
+ val
;
688 xhci_print_run_regs(xhci
);
689 /* Set ir_set to interrupt register set 0 */
690 xhci
->ir_set
= (void *) xhci
->run_regs
->ir_set
;
693 * Event ring setup: Allocate a normal ring, but also setup
694 * the event ring segment table (ERST). Section 4.9.3.
696 xhci_dbg(xhci
, "// Allocating event ring\n");
697 xhci
->event_ring
= xhci_ring_alloc(xhci
, ERST_NUM_SEGS
, false, flags
);
698 if (!xhci
->event_ring
)
701 xhci
->erst
.entries
= pci_alloc_consistent(to_pci_dev(dev
),
702 sizeof(struct xhci_erst_entry
)*ERST_NUM_SEGS
, &dma
);
703 if (!xhci
->erst
.entries
)
705 xhci_dbg(xhci
, "// Allocated event ring segment table at 0x%x\n", dma
);
707 memset(xhci
->erst
.entries
, 0, sizeof(struct xhci_erst_entry
)*ERST_NUM_SEGS
);
708 xhci
->erst
.num_entries
= ERST_NUM_SEGS
;
709 xhci
->erst
.erst_dma_addr
= dma
;
710 xhci_dbg(xhci
, "Set ERST to 0; private num segs = %i, virt addr = 0x%x, dma addr = 0x%x\n",
711 xhci
->erst
.num_entries
,
712 (unsigned int) xhci
->erst
.entries
,
713 xhci
->erst
.erst_dma_addr
);
715 /* set ring base address and size for each segment table entry */
716 for (val
= 0, seg
= xhci
->event_ring
->first_seg
; val
< ERST_NUM_SEGS
; val
++) {
717 struct xhci_erst_entry
*entry
= &xhci
->erst
.entries
[val
];
718 entry
->seg_addr
[1] = 0;
719 entry
->seg_addr
[0] = seg
->dma
;
720 entry
->seg_size
= TRBS_PER_SEGMENT
;
725 /* set ERST count with the number of entries in the segment table */
726 val
= xhci_readl(xhci
, &xhci
->ir_set
->erst_size
);
727 val
&= ERST_SIZE_MASK
;
728 val
|= ERST_NUM_SEGS
;
729 xhci_dbg(xhci
, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
731 xhci_writel(xhci
, val
, &xhci
->ir_set
->erst_size
);
733 xhci_dbg(xhci
, "// Set ERST entries to point to event ring.\n");
734 /* set the segment table base address */
735 xhci_dbg(xhci
, "// Set ERST base address for ir_set 0 = 0x%x\n",
736 xhci
->erst
.erst_dma_addr
);
737 xhci_writel(xhci
, 0, &xhci
->ir_set
->erst_base
[1]);
738 val
= xhci_readl(xhci
, &xhci
->ir_set
->erst_base
[0]);
739 val
&= ERST_PTR_MASK
;
740 val
|= (xhci
->erst
.erst_dma_addr
& ~ERST_PTR_MASK
);
741 xhci_writel(xhci
, val
, &xhci
->ir_set
->erst_base
[0]);
743 /* Set the event ring dequeue address */
744 set_hc_event_deq(xhci
);
745 xhci_dbg(xhci
, "Wrote ERST address to ir_set 0.\n");
746 xhci_print_ir_set(xhci
, xhci
->ir_set
, 0);
749 * XXX: Might need to set the Interrupter Moderation Register to
750 * something other than the default (~1ms minimum between interrupts).
751 * See section 5.5.1.2.
753 init_completion(&xhci
->addr_dev
);
754 for (i
= 0; i
< MAX_HC_SLOTS
; ++i
)
759 xhci_warn(xhci
, "Couldn't initialize memory\n");
760 xhci_mem_cleanup(xhci
);