2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
13 * Copyright (c) 2017, Joyent, Inc.
17 * Extensible Host Controller Interface (xHCI) USB Driver
19 * The xhci driver is an HCI driver for USB that bridges the gap between client
20 * device drivers and implements the actual way that we talk to devices. The
21 * xhci specification provides access to USB 3.x capable devices, as well as all
22 * prior generations. Like other host controllers, it both provides the way to
23 * talk to devices and also is treated like a hub (often called the root hub).
25 * This driver is part of the USBA (USB Architecture). It implements the HCDI
26 * (host controller device interface) end of USBA. These entry points are used
27 * by the USBA on behalf of client device drivers to access their devices. The
28 * driver also provides notifications to deal with hot plug events, which are
29 * quite common in USB.
35 * To properly understand the xhci driver and the design of the USBA HCDI
36 * interfaces it implements, it helps to have a bit of background into how USB
37 * devices are structured and understand how they work at a high-level.
39 * USB devices, like PCI devices, are broken down into different classes of
40 * device. For example, with USB you have hubs, human-input devices (keyboards,
41 * mice, etc.), mass storage, etc. Every device also has a vendor and device ID.
42 * Many client drivers bind to an entire class of device, for example, the hubd
43 * driver (to hubs) or scsa2usb (USB storage). However, there are other drivers
44 * that bind to explicit IDs such as usbsprl (specific USB to Serial devices).
46 * USB SPEEDS AND VERSIONS
48 * USB devices are often referred to in two different ways. One way they're
49 * described is with the USB version that they conform to. In the wild, you're
50 * most likely going to see USB 1.1, 2.0, 2.1, and 3.0. However, you may also
51 * see devices referred to as 'full-', 'low-', 'high-', and 'super-' speed
54 * The latter description describes the maximum theoretical speed of a given
55 * device. For example, a super-speed device theoretically caps out around 5
56 * Gbit/s, whereas a low-speed device caps out at 1.5 Mbit/s.
58 * In general, each speed usually corresponds to a specific USB protocol
59 * generation. For example, all USB 3.0 devices are super-speed devices. All
60 * 'high-speed' devices are USB 2.x devices. Full-speed devices are special in
61 * that they can either be USB 1.x or USB 2.x devices. Low-speed devices are
62 * only a USB 1.x thing, they did not jump the fire line to USB 2.x.
64 * USB 3.0 devices and ports generally have the wiring for both USB 2.0 and USB
65 * 3.0. When a USB 3.x device is plugged into a USB 2.0 port or hub, then it
66 * will report its version as USB 2.1, to indicate that it is actually a USB 3.x
71 * A given USB device is made up of endpoints. A request, or transfer, is made
72 * to a specific USB endpoint. These endpoints can provide different services
73 * and have different expectations around the size of the data that'll be used
74 * in a given request and the periodicity of requests. Endpoints themselves are
75 * either used to make one-shot requests, for example, making requests to a mass
76 * storage device for a given sector, or for making periodic requests where you
77 * end up polling on the endpoint, for example, polling on a USB keyboard for
80 * Each endpoint encodes two different pieces of information: a direction and a
81 * type. There are two different directions: IN and OUT. These refer to the
82 * general direction that data moves relative to the operating system. For
83 * example, an IN transfer transfers data in to the operating system, from the
84 * device. An OUT transfer transfers data from the operating system, out to the
87 * There are four different kinds of endpoints:
89 * BULK These transfers are large transfers of data to or from
90 * a device. The most common use for bulk transfers is for
91 * mass storage devices. Though they are often also used by
92 * network devices and more. Bulk endpoints do not have an
93 * explicit time component to them. They are always used
94 * for one-shot transfers.
96 * CONTROL These transfers are used to manipulate devices
97 * themselves and are used for USB protocol level
98 * operations (whether device-specific, class-specific, or
99 * generic across all of USB). Unlike other transfers,
100 * control transfers are always bi-directional and use
101 * different kinds of transfers.
103 * INTERRUPT Interrupt transfers are used for small transfers that
104 * happen infrequently, but need reasonable latency. A good
105 * example of interrupt transfers is to receive input from
106 * a USB keyboard. Interrupt-IN transfers are generally
107 * polled. Meaning that a client (device driver) opens up
108 * an interrupt-IN pipe to poll on it, and receives
109 * periodic updates whenever there is information
110 * available. However, Interrupt transfers can be used
111 * as one-shot transfers both going IN and OUT.
113 * ISOCHRONOUS These transfers are things that happen once per
114 * time-interval at a very regular rate. A good example of
115 * these transfers are for audio and video. A device may
116 * describe an interval as 10ms at which point it will read
117 * or write the next batch of data every 10ms and transform
118 * it for the user. There are no one-shot Isochronous-IN
119 * transfers. There are one-shot Isochronous-OUT transfers,
120 * but these are used by device drivers to always provide
121 * the system with sufficient data.
123 * To find out information about the endpoints, USB devices have a series of
124 * descriptors that cover different aspects of the device. For example, there
125 * are endpoint descriptors which cover the properties of endpoints such as the
126 * maximum packet size or polling interval.
128 * Descriptors exist at all levels of USB. For example, there are general
129 * descriptors for every device. The USB device descriptor is described in
130 * usb_dev_descr(9S). Host controllers will look at these descriptors to ensure
131 * that they program the device correctly; however, they are more often used by
132 * client device drivers. There are also descriptors that exist at a class
133 * level. For example, the hub class has a class-specific descriptor which
134 * describes properties of the hub. That information is requested for and used
137 * All of the different descriptors are gathered by the system and placed into a
138 * tree which USBA sometimes calls the 'Configuration Cloud'. Client device
139 * drivers gain access to this cloud and then use them to open endpoints, which
140 * are called pipes in USBA (and some revisions of the USB specification).
142 * Each pipe gives access to a specific endpoint on the device which can be used
143 * to perform transfers of a specific type and direction. For example, a mass
144 * storage device often has three different endpoints, the default control
145 * endpoint (which every device has), a Bulk-IN endpoint, and a Bulk-OUT
146 * endpoint. The device driver ends up with three open pipes. One to the default
147 * control endpoint to configure the device, and then the other two are used to
150 * These routines translate more or less directly into calls to a host
151 * controller driver. A request to open a pipe takes an endpoint descriptor that
152 * describes the properties of the pipe, and the host controller driver (this
153 * driver) goes through and does any work necessary to allow the client device
154 * driver to access it. Once the pipe is open, it either makes one-shot
155 * transfers specific to the transfer type or it starts performing a periodic
156 * poll of an endpoint.
158 * All of these different actions translate into requests to the host
159 * controller. The host controller driver itself is in charge of making sure
160 * that all of the required resources for polling are allocated with a request
161 * and then proceed to give the driver's periodic callbacks.
163 * HUBS AND HOST CONTROLLERS
165 * Every device is always plugged into a hub, even if the device is itself a
166 * hub. This continues until we reach what we call the root-hub. The root-hub is
167 * special in that it is not an actual USB hub, but is integrated into the host
168 * controller and is manipulated in its own way. For example, the host
169 * controller is used to turn on and off a given port's power. This may happen
170 * over any interface, though the most common way is through PCI.
172 * In addition to the normal character device that exists for a host controller
173 * driver, as part of attaching, the host controller binds to an instance of the
174 * hubd driver. While the root-hub is a bit of a fiction, everyone models the
175 * root-hub as the same as any other hub that's plugged in. The hub kernel
176 * module doesn't know that the hub isn't a physical device that's been plugged
177 * in. The host controller driver simulates that view by taking hub requests
178 * that are made and translating them into corresponding requests that are
179 * understood by the host controller, for example, reading and writing to a
180 * memory mapped register.
182 * The hub driver polls for changes in device state using an Interrupt-IN
183 * request, which is the same as is done for the root-hub. This allows the host
184 * controller driver to not have to know about the implementation of device hot
185 * plug, merely react to requests from a hub, the same as if it were an external
186 * device. When the hub driver detects a change, it will go through the
187 * corresponding state machine and attach or detach the corresponding client
188 * device driver, depending if the device was inserted or removed.
190 * We detect the changes for the Interrupt-IN primarily based on the port state
191 * change events that are delivered to the event ring. Whenever any event is
192 * fired, we use this to update the hub driver about _all_ ports with
193 * outstanding events. This more closely matches how a hub is supposed to behave
194 * and leaves things less likely for the hub driver to end up without clearing a
197 * PACKET SIZES AND BURSTING
199 * A given USB endpoint has an explicit packet size and a number of packets that
200 * can be sent per time interval. These concepts are abstracted away from client
201 * device drives usually, though they sometimes inform the upper bounds of what
202 * a device can perform.
204 * The host controller uses this information to transform arbitrary transfer
205 * requests into USB protocol packets. One of the nice things about the host
206 * controllers is that they abstract away all of the signaling and semantics of
207 * the actual USB protocols, allowing for life to be slightly easier in the
210 * That said, if the host controller is not programmed correctly, these can end
211 * up causing transaction errors and other problems in response to the data that
212 * the host controller is trying to send or receive.
218 * The driver is made up of the following files. Many of these have their own
219 * theory statements to describe what they do. Here, we touch on each of the
220 * purpose of each of these files.
222 * xhci_command.c: This file contains the logic to issue commands to the
223 * controller as well as the actual functions that the
224 * other parts of the driver use to cause those commands.
226 * xhci_context.c: This file manages various data structures used by the
227 * controller to manage the controller's and device's
228 * context data structures. See more in the xHCI Overview
229 * and General Design for more information.
231 * xhci_dma.c: This manages the allocation of DMA memory and DMA
232 * attributes for controller, whether memory is for a
233 * transfer or something else. This file also deals with
234 * all the logic of getting data in and out of DMA buffers.
236 * xhci_endpoint.c: This manages all of the logic of handling endpoints or
237 * pipes. It deals with endpoint configuration, I/O
238 * scheduling, timeouts, and callbacks to USBA.
240 * xhci_event.c: This manages callbacks from the hardware to the driver.
241 * This covers command completion notifications and I/O
244 * xhci_hub.c: This manages the virtual root-hub. It basically
245 * implements and translates all of the USB level requests
246 * into xhci specific implements. It also contains the
247 * functions to register this hub with USBA.
249 * xhci_intr.c: This manages the underlying interrupt allocation,
250 * interrupt moderation, and interrupt routines.
252 * xhci_quirks.c: This manages information about buggy hardware that's
253 * been collected and experienced primarily from other
256 * xhci_ring.c: This manages the abstraction of a ring in xhci, which is
257 * the primary of communication between the driver and the
258 * hardware, whether for the controller or a device.
260 * xhci_usba.c: This implements all of the HCDI functions required by
261 * USBA. This is the main entry point that drivers and the
262 * kernel frameworks will reach to start any operation.
263 * Many functions here will end up in the command and
266 * xhci.c: This provides the main kernel DDI interfaces and
267 * performs device initialization.
269 * xhci.h: This is the primary header file which defines
270 * illumos-specific data structures and constants to manage
273 * xhcireg.h: This header file defines all of the register offsets,
274 * masks, and related macros. It also contains all of the
275 * constants that are used in various structures as defined
276 * by the specification, such as command offsets, etc.
278 * xhci_ioctl.h: This contains a few private ioctls that are used by a
279 * private debugging command. These are private.
281 * cmd/xhci/xhci_portsc: This is a private utility that can be useful for
282 * debugging xhci state. It is the only consumer of
283 * xhci_ioctl.h and the private ioctls.
285 * ----------------------------------
286 * xHCI Overview and Structure Layout
287 * ----------------------------------
289 * The design and structure of this driver follows from the way that the xHCI
290 * specification tells us that we have to work with hardware. First we'll give a
291 * rough summary of how that works, though the xHCI 1.1 specification should be
292 * referenced when going through this.
294 * There are three primary parts of the hardware -- registers, contexts, and
295 * rings. The registers are memory mapped registers that come in four sets,
296 * though all are found within the first BAR. These are used to program and
297 * control the hardware and aspects of the devices. Beyond more traditional
298 * device programming there are two primary sets of registers that are
301 * o Port Status and Control Registers (XHCI_PORTSC)
302 * o Doorbell Array (XHCI_DOORBELL)
304 * The port status and control registers are used to get and manipulate the
305 * status of a given device. For example, turning on and off the power to it.
306 * The Doorbell Array is used to kick off I/O operations and start the
307 * processing of an I/O ring.
309 * The contexts are data structures that represent various pieces of information
310 * in the controller. These contexts are generally filled out by the driver and
311 * then acknowledged and consumed by the hardware. There are controller-wide
312 * contexts (mostly managed in xhci_context.c) that are used to point to the
313 * contexts that exist for each device in the system. The primary context is
314 * called the Device Context Base Address Array (DCBAA).
316 * Each device in the system is allocated a 'slot', which is used to index into
317 * the DCBAA. Slots are assigned based on issuing commands to the controller.
318 * There are a fixed number of slots that determine the maximum number of
319 * devices that can end up being supported in the system. Note this includes all
320 * the devices plugged into the USB device tree, not just devices plugged into
321 * ports on the chassis.
323 * For each device, there is a context structure that describes properties of
324 * the device. For example, what speed is the device, is it a hub, etc. The
325 * context has slots for the device and for each endpoint on the device. As
326 * endpoints are enabled, their context information which describes things like
327 * the maximum packet size, is filled in and enabled. The mapping between these
328 * contexts look like:
332 * +--------+ Device Context
333 * | Slot 0 |------------------>+--------------+
334 * +--------+ | Slot Context |
335 * | ... | +--------------+ +----------+
336 * +--------+ +------+ | Endpoint 0 |------>| I/O Ring |
337 * | Slot n |-->| NULL | | Context (Bi) | +----------+
338 * +--------+ +------+ +--------------+
351 * These contexts are always owned by the controller, though we can read them
352 * after various operations complete. Commands that toggle device state use a
353 * specific input context, which is a variant of the device context. The only
354 * difference is that it has an input context structure ahead of it to say which
355 * sections of the device context should be evaluated.
357 * Each active endpoint points us to an I/O ring, which leads us to the third
358 * main data structure that's used by the device: rings. Rings are made up of
359 * transfer request blocks (TRBs), which are joined together to form a given
360 * transfer description (TD) which represents a single I/O request.
362 * These rings are used to issue I/O to individual endpoints, to issue commands
363 * to the controller, and to receive notification of changes and completions.
364 * Issued commands go on the special ring called the command ring while the
365 * change and completion notifications go on the event ring. More details are
366 * available in xhci_ring.c. Each of these structures is represented by an
369 * Each ring can be made up of one or more disjoint regions of DMA; however, we
370 * only use a single one. This also impacts some additional registers and
371 * structures that exist. The event ring has an indirection table called the
372 * Event Ring Segment Table (ERST). Each entry in the table (a segment)
373 * describes a chunk of the event ring.
375 * One other thing worth calling out is the scratchpad. The scratchpad is a way
376 * for the controller to be given arbitrary memory by the OS that it can use.
377 * There are two parts to the scratchpad. The first part is an array whose
378 * entries contain pointers to the actual addresses for the pages. The second
379 * part that we allocate are the actual pages themselves.
381 * -----------------------------
382 * Endpoint State and Management
383 * -----------------------------
385 * Endpoint management is one of the key parts to the xhci driver as every
386 * endpoint is a pipe that a device driver uses, so they are our primary
387 * currency. Endpoints are enabled and disabled when the client device drivers
388 * open and close a pipe. When an endpoint is enabled, we have to fill in an
389 * endpoint's context structure with information about the endpoint. These
390 * basically tell the controller important properties which it uses to ensure
391 * that there is adequate bandwidth for the device.
393 * Each endpoint has its own ring as described in the previous section. We place
394 * TRBs (transfer request blocks) onto a given ring to request I/O be performed.
395 * Responses are placed on the event ring, in other words, the rings associated
396 * with an endpoint are purely for producing I/O.
398 * Endpoints have a defined state machine as described in xHCI 1.1 / 4.8.3.
399 * These states generally correspond with the state of the endpoint to process
400 * I/O and handle timeouts. The driver basically follows a similar state machine
401 * as described there. There are some deviations. For example, what they
402 * describe as 'running' we break into both the Idle and Running states below.
403 * We also have a notion of timed out and quiescing. The following image
404 * summarizes the states and transitions:
406 * +------+ +-----------+
407 * | Idle |---------*--------------------->| Running |<-+
408 * +------+ . I/O queued on +-----------+ |
409 * ^ ring and timeout | | | |
410 * | scheduled. | | | |
412 * +-----*---------------------------------+ | | |
413 * | . No I/Os remain | | |
415 * | +------*------------------+ | |
420 * | +-----------+ +--------+ |
421 * | | Timed Out | | Halted | |
422 * | +-----------+ +--------+ |
424 * | | +-----------+ | |
425 * | +-->| Quiescing |<----------+ |
427 * | No TRBs. | . TRBs |
428 * | remain . | . Remain |
429 * +----------*----<------+-------->-------*-----------+
431 * Normally, a given endpoint will oscillate between having TRBs scheduled and
432 * not. Every time a new I/O is added to the endpoint, we'll ring the doorbell,
433 * making sure that we're processing the ring, presuming that the endpoint isn't
434 * in one of the error states.
436 * To detect device hangs, we have an active timeout(9F) per active endpoint
437 * that ticks at a one second rate while we still have TRBs outstanding on an
438 * endpoint. Once all outstanding TRBs have been processed, the timeout will
439 * stop itself and there will be no active checking until the endpoint has I/O
440 * scheduled on it again.
442 * There are two primary ways that things can go wrong on the endpoint. We can
443 * either have a timeout or an event that transitions the endpoint to the Halted
444 * state. In the halted state, we need to issue explicit commands to reset the
445 * endpoint before removing the I/O.
447 * The way we handle both a timeout and a halted condition is similar, but the
448 * way they are triggered is different. When we detect a halted condition, we
449 * don't immediately clean it up, and wait for the client device driver (or USBA
450 * on its behalf) to issue a pipe reset. When we detect a timeout, we
451 * immediately take action (assuming no other action is ongoing).
453 * In both cases, we quiesce the device, which takes care of dealing with taking
454 * the endpoint from whatever state it may be in and taking the appropriate
455 * actions based on the state machine in xHCI 1.1 / 4.8.3. The end of quiescing
456 * leaves the device stopped, which allows us to update the ring's pointer and
457 * remove any TRBs that are causing problems.
459 * As part of all this, we ensure that we can only be quiescing the device from
460 * a given path at a time. Any requests to schedule I/O during this time will
463 * The following image describes the state machine for the timeout logic. It
464 * ties into the image above.
466 * +----------+ +---------+
467 * | Disabled |-----*--------------------->| Enabled |<--+
468 * +----------+ . TRBs scheduled +---------+ *. 1 sec timer
469 * ^ and no active | | | | fires and
470 * | timer. | | | | another
471 * | | | +--+--+ quiesce, in
472 * | | | | a bad state,
473 * +------*------------------------------+ | ^ or decrement
474 * | . 1 sec timer | | I/O timeout
476 * | no TRBs or | +--------------+
477 * | endpoint shutdown | |
478 * | *. . timer counter |
481 * | +--------------+ |
482 * +-------------*---------------<--| Quiesce ring |->---*-------+
483 * . No more | and fail I/O | . restart
484 * I/Os +--------------+ timer as
487 * As we described above, when there are active TRBs and I/Os, a 1 second
488 * timeout(9F) will be active. Each second, we decrement a counter on the
489 * current, active I/O until either a new I/O takes the head, or the counter
490 * reaches zero. If the counter reaches zero, then we go through, quiesce the
491 * ring, and then clean things up.
497 * It's worth calling out periodic endpoints explicitly, as they operate
498 * somewhat differently. Periodic endpoints are limited to Interrupt-IN and
499 * Isochronous-IN. The USBA often uses the term polling for these. That's
500 * because the client only needs to make a single API call; however, they'll
501 * receive multiple callbacks until either an error occurs or polling is
502 * requested to be terminated.
504 * When we have one of these periodic requests, we end up always rescheduling
505 * I/O requests, as well as, having a specific number of pre-existing I/O
506 * requests to cover the periodic needs, in case of latency spikes. Normally,
507 * when replying to a request, we use the request handle that we were given.
508 * However, when we have a periodic request, we're required to duplicate the
509 * handle before giving them data.
511 * However, the duplication is a bit tricky. For everything that was duplicated,
512 * the framework expects us to submit data. Because of that we, don't duplicate
513 * them until they are needed. This minimizes the likelihood that we have
514 * outstanding requests to deal with when we encounter a fatal polling failure.
516 * Most of the polling setup logic happens in xhci_usba.c in
517 * xhci_hcdi_periodic_init(). The consumption and duplication is handled in
524 * The following images relate the core data structures. The primary structure
525 * in the system is the xhci_t. This is the per-controller data structure that
526 * exists for each instance of the driver. From there, each device in the system
527 * is represented by an xhci_device_t and each endpoint is represented by an
528 * xhci_endpoint_t. For each client that opens a given endpoint, there is an
529 * xhci_pipe_t. For each I/O related ring, there is an xhci_ring_t in the
532 * +------------------------+
537 * | uint_t ---+--> Capability regs offset
538 * | uint_t ---+--> Operational regs offset
539 * | uint_t ---+--> Runtime regs offset
540 * | uint_t ---+--> Doorbell regs offset
541 * | xhci_state_flags_t ---+--> Device state flags
542 * | xhci_quirks_t ---+--> Device quirk flags
543 * | xhci_capability_t ---+--> Controller capability structure
544 * | xhci_dcbaa_t ---+----------------------------------+
545 * | xhci_scratchpad_t ---+---------+ |
546 * | xhci_command_ing_t ---+------+ | v
547 * | xhci_event_ring_t ---+----+ | | +---------------------+
548 * | xhci_usba_t ---+--+ | | | | Device Context |
549 * +------------------------+ | | | | | Base Address |
550 * | | | | | Array Structure |
551 * | | | | | xhci_dcbaa_t |
552 * +-------------------------------+ | | | | |
553 * | +-------------------------------+ | | DCBAA KVA <-+-- uint64_t * |
554 * | | +----------------------------+ | DMA Buffer <-+-- xhci_dma_buffer_t |
555 * | | v | +---------------------+
556 * | | +--------------------------+ +-----------------------+
557 * | | | Event Ring | |
558 * | | | Management | |
559 * | | | xhci_event_ring_t | v
560 * | | | | Event Ring +----------------------+
561 * | | | xhci_event_segment_t * --|-> Segment VA | Scratchpad (Extra |
562 * | | | xhci_dma_buffer_t --|-> Segment DMA Buf. | Controller Memory) |
563 * | | | xhci_ring_t --|--+ | xhci_scratchpad_t |
564 * | | +--------------------------+ | Scratchpad | |
565 * | | | Base Array KVA <-+- uint64_t * |
566 * | +------------+ | Array DMA Buf. <-+- xhci_dma_buffer_t |
567 * | v | Scratchpad DMA <-+- xhci_dma_buffer_t * |
568 * | +---------------------------+ | Buffer per page +----------------------+
569 * | | Command Ring | |
570 * | | xhci_command_ring_t | +------------------------------+
572 * | | xhci_ring_t --+-> Command Ring --->------------+
573 * | | list_t --+-> Command List v
574 * | | timeout_id_t --+-> Timeout State +---------------------+
575 * | | xhci_command_ring_state_t +-> State Flags | I/O Ring |
576 * | +---------------------------+ | xhci_ring_t |
578 * | Ring DMA Buf. <-+-- xhci_dma_buffer_t |
579 * | Ring Length <-+-- uint_t |
580 * | Ring Entry KVA <-+-- xhci_trb_t * |
581 * | +---------------------------+ Ring Head <-+-- uint_t |
582 * +--->| USBA State | Ring Tail <-+-- uint_t |
583 * | xhci_usba_t | Ring Cycle <-+-- uint_t |
584 * | | +---------------------+
585 * | usba_hcdi_ops_t * -+-> USBA Ops Vector ^
586 * | usb_dev_dscr_t -+-> USB Virtual Device Descriptor |
587 * | usb_ss_hub_descr_t -+-> USB Virtual Hub Descriptor |
588 * | usba_pipe_handle_data_t * +-> Interrupt polling client |
589 * | usb_intr_req_t -+-> Interrupt polling request |
590 * | uint32_t --+-> Interrupt polling device mask |
591 * | list_t --+-> Pipe List (Active Users) |
592 * | list_t --+-------------------+ |
593 * +---------------------------+ | ^
596 * +-------------------------------+ +---------------+ |
597 * | USB Device |------------>| USB Device |--> ... |
598 * | xhci_device_t | | xhci_device_t | |
599 * | | +---------------+ |
600 * | usb_port_t --+-> USB Port plugged into |
601 * | uint8_t --+-> Slot Number |
602 * | boolean_t --+-> Address Assigned |
603 * | usba_device_t * --+-> USBA Device State |
604 * | xhci_dma_buffer_t --+-> Input Context DMA Buffer |
605 * | xhci_input_context_t * --+-> Input Context KVA |
606 * | xhci_slot_contex_t * --+-> Input Slot Context KVA |
607 * | xhci_endpoint_context_t *[] --+-> Input Endpoint Context KVA |
608 * | xhci_dma_buffer_t --+-> Output Context DMA Buffer |
609 * | xhci_slot_context_t * --+-> Output Slot Context KVA ^
610 * | xhci_endpoint_context_t *[] --+-> Output Endpoint Context KVA |
611 * | xhci_endpoint_t *[] --+-> Endpoint Tracking ---+ |
612 * +-------------------------------+ | |
615 * +------------------------------+ +-----------------+ |
616 * | Endpoint Data |----------->| Endpoint Data |--> ... |
617 * | xhci_endpoint_t | | xhci_endpoint_t | |
618 * | | +-----------------+ |
619 * | int --+-> Endpoint Number |
620 * | int --+-> Endpoint Type |
621 * | xhci_endpoint_state_t --+-> Endpoint State |
622 * | timeout_id_t --+-> Endpoint Timeout State |
623 * | usba_pipe_handle_data_t * --+-> USBA Client Handle |
624 * | xhci_ring_t --+-> Endpoint I/O Ring -------->--------+
625 * | list_t --+-> Transfer List --------+
626 * +------------------------------+ |
628 * +-------------------------+ +--------------------+
629 * | Transfer Structure |----------------->| Transfer Structure |-> ...
630 * | xhci_transfer_t | | xhci_transfer_t |
631 * | | +--------------------+
632 * | xhci_dma_buffer_t --+-> I/O DMA Buffer
633 * | uint_t --+-> Number of TRBs
634 * | uint_t --+-> Short transfer data
635 * | uint_t --+-> Timeout seconds remaining
636 * | usb_cr_t --+-> USB Transfer return value
637 * | boolean_t --+-> Data direction
638 * | xhci_trb_t * --+-> Host-order transfer requests for I/O
639 * | usb_isoc_pkt_descr_t * -+-> Isochronous only response data
640 * | usb_opaque_t --+-> USBA Request Handle
641 * +-------------------------+
647 * There are three different tiers of locks that exist in the driver. First,
648 * there is a lock for each controller: xhci_t`xhci_lock. This protects all the
649 * data for that instance of the controller. If there are multiple instances of
650 * the xHCI controller in the system, each one is independent and protected
651 * separately. The two do not share any data.
653 * From there, there are two other, specific locks in the system:
655 * o xhci_command_ring_t`xcr_lock
656 * o xhci_device_t`xd_imtx
658 * There is only one xcr_lock per controller, like the xhci_lock. It protects
659 * the state of the command ring. However, there is on xd_imtx per device.
660 * Recall that each device is scoped to a given controller. This protects the
661 * input slot context for a given device.
663 * There are a few important rules to keep in mind here that are true
664 * universally throughout the driver:
666 * 1) Always grab the xhci_t`xhci_lock, before grabbing any of the other locks.
667 * 2) A given xhci_device_t`xd_imtx, must be taken before grabbing the
668 * xhci_command_ring_t`xcr_lock.
669 * 3) A given thread can only hold one of the given xhci_device_t`xd_imtx locks
670 * at a given time. In other words, we should never be manipulating the input
671 * context of two different devices at once.
672 * 4) It is safe to hold the xhci_device_t`xd_imtx while tearing down the
673 * endpoint timer. Conversely, the endpoint specific logic should never enter
676 * --------------------
677 * Relationship to EHCI
678 * --------------------
680 * On some Intel chipsets, a given physical port on the system may be routed to
681 * one of the EHCI or xHCI controllers. This association can be dynamically
682 * changed by writing to platform specific registers as handled by the quirk
683 * logic in xhci_quirk.c.
685 * As these ports may support USB 3.x speeds, we always route all such ports to
686 * the xHCI controller, when supported. In addition, to minimize disruptions
687 * from devices being enumerated and attached to the EHCI driver and then
688 * disappearing, we generally attempt to load the xHCI controller before the
689 * EHCI controller. This logic is not done in the driver; however, it is done in
690 * other parts of the kernel like in uts/common/io/consconfig_dacf.c in the
691 * function consconfig_load_drivres().
697 * The primary future work in this driver spans two different, but related
698 * areas. The first area is around controller resets and how they tie into FM.
699 * Presently, we do not have a good way to handle controllers coming and going
700 * in the broader USB stack or properly reconfigure the device after a reset.
701 * Secondly, we don't handle the suspend and resume of devices and drivers.
704 #include <sys/param.h>
705 #include <sys/modctl.h>
706 #include <sys/conf.h>
707 #include <sys/devops.h>
709 #include <sys/sunddi.h>
710 #include <sys/cmn_err.h>
711 #include <sys/ddifm.h>
713 #include <sys/class.h>
714 #include <sys/policy.h>
716 #include <sys/usb/hcd/xhci/xhci.h>
717 #include <sys/usb/hcd/xhci/xhci_ioctl.h>
720 * We want to use the first BAR to access its registers. The regs[] array is
721 * ordered based on the rules for the PCI supplement to IEEE 1275. So regs[1]
722 * will always be the first BAR.
724 #define XHCI_REG_NUMBER 1
727 * This task queue exists as a global taskq that is used for resetting the
728 * device in the face of FM or runtime errors. Each instance of the device
729 * (xhci_t) happens to have a single taskq_dispatch_ent already allocated so we
730 * know that we should always be able to dispatch such an event.
732 static taskq_t
*xhci_taskq
;
735 * Global soft state for per-instance data. Note that we must use the soft state
736 * routines and cannot use the ddi_set_driver_private() routines. The USB
737 * framework presumes that it can use the dip's private data.
739 void *xhci_soft_state
;
742 * This is the time in us that we wait after a controller resets before we
743 * consider reading any register. There are some controllers that want at least
744 * 1 ms, therefore we default to 10 ms.
746 clock_t xhci_reset_delay
= 10000;
749 xhci_error(xhci_t
*xhcip
, const char *fmt
, ...)
754 if (xhcip
!= NULL
&& xhcip
->xhci_dip
!= NULL
) {
755 vdev_err(xhcip
->xhci_dip
, CE_WARN
, fmt
, ap
);
757 vcmn_err(CE_WARN
, fmt
, ap
);
763 xhci_log(xhci_t
*xhcip
, const char *fmt
, ...)
768 if (xhcip
!= NULL
&& xhcip
->xhci_dip
!= NULL
) {
769 vdev_err(xhcip
->xhci_dip
, CE_NOTE
, fmt
, ap
);
771 vcmn_err(CE_NOTE
, fmt
, ap
);
777 * USBA is in charge of creating device nodes for us. USBA explicitly ORs in the
778 * constant HUBD_IS_ROOT_HUB, so we have to undo that when we're looking at
779 * things here. A simple bitwise-and will take care of this. And hey, it could
780 * always be more complex, USBA could clone!
783 xhci_get_dip(dev_t dev
)
786 int instance
= getminor(dev
) & ~HUBD_IS_ROOT_HUB
;
788 xhcip
= ddi_get_soft_state(xhci_soft_state
, instance
);
790 return (xhcip
->xhci_dip
);
795 xhci_get8(xhci_t
*xhcip
, xhci_reg_type_t rtt
, uintptr_t off
)
797 uintptr_t addr
, roff
;
801 roff
= xhcip
->xhci_regs_capoff
;
804 roff
= xhcip
->xhci_regs_operoff
;
807 roff
= xhcip
->xhci_regs_runoff
;
810 roff
= xhcip
->xhci_regs_dooroff
;
813 panic("called %s with bad reg type: %d", __func__
, rtt
);
815 ASSERT(roff
!= PCI_EINVAL32
);
816 addr
= roff
+ off
+ (uintptr_t)xhcip
->xhci_regs_base
;
818 return (ddi_get8(xhcip
->xhci_regs_handle
, (void *)addr
));
822 xhci_get16(xhci_t
*xhcip
, xhci_reg_type_t rtt
, uintptr_t off
)
824 uintptr_t addr
, roff
;
828 roff
= xhcip
->xhci_regs_capoff
;
831 roff
= xhcip
->xhci_regs_operoff
;
834 roff
= xhcip
->xhci_regs_runoff
;
837 roff
= xhcip
->xhci_regs_dooroff
;
840 panic("called %s with bad reg type: %d", __func__
, rtt
);
842 ASSERT(roff
!= PCI_EINVAL32
);
843 addr
= roff
+ off
+ (uintptr_t)xhcip
->xhci_regs_base
;
845 return (ddi_get16(xhcip
->xhci_regs_handle
, (void *)addr
));
849 xhci_get32(xhci_t
*xhcip
, xhci_reg_type_t rtt
, uintptr_t off
)
851 uintptr_t addr
, roff
;
855 roff
= xhcip
->xhci_regs_capoff
;
858 roff
= xhcip
->xhci_regs_operoff
;
861 roff
= xhcip
->xhci_regs_runoff
;
864 roff
= xhcip
->xhci_regs_dooroff
;
867 panic("called %s with bad reg type: %d", __func__
, rtt
);
869 ASSERT(roff
!= PCI_EINVAL32
);
870 addr
= roff
+ off
+ (uintptr_t)xhcip
->xhci_regs_base
;
872 return (ddi_get32(xhcip
->xhci_regs_handle
, (void *)addr
));
876 xhci_get64(xhci_t
*xhcip
, xhci_reg_type_t rtt
, uintptr_t off
)
878 uintptr_t addr
, roff
;
882 roff
= xhcip
->xhci_regs_capoff
;
885 roff
= xhcip
->xhci_regs_operoff
;
888 roff
= xhcip
->xhci_regs_runoff
;
891 roff
= xhcip
->xhci_regs_dooroff
;
894 panic("called %s with bad reg type: %d", __func__
, rtt
);
896 ASSERT(roff
!= PCI_EINVAL32
);
897 addr
= roff
+ off
+ (uintptr_t)xhcip
->xhci_regs_base
;
899 return (ddi_get64(xhcip
->xhci_regs_handle
, (void *)addr
));
903 xhci_put8(xhci_t
*xhcip
, xhci_reg_type_t rtt
, uintptr_t off
, uint8_t val
)
905 uintptr_t addr
, roff
;
909 roff
= xhcip
->xhci_regs_capoff
;
912 roff
= xhcip
->xhci_regs_operoff
;
915 roff
= xhcip
->xhci_regs_runoff
;
918 roff
= xhcip
->xhci_regs_dooroff
;
921 panic("called %s with bad reg type: %d", __func__
, rtt
);
923 ASSERT(roff
!= PCI_EINVAL32
);
924 addr
= roff
+ off
+ (uintptr_t)xhcip
->xhci_regs_base
;
926 ddi_put8(xhcip
->xhci_regs_handle
, (void *)addr
, val
);
930 xhci_put16(xhci_t
*xhcip
, xhci_reg_type_t rtt
, uintptr_t off
, uint16_t val
)
932 uintptr_t addr
, roff
;
936 roff
= xhcip
->xhci_regs_capoff
;
939 roff
= xhcip
->xhci_regs_operoff
;
942 roff
= xhcip
->xhci_regs_runoff
;
945 roff
= xhcip
->xhci_regs_dooroff
;
948 panic("called %s with bad reg type: %d", __func__
, rtt
);
950 ASSERT(roff
!= PCI_EINVAL32
);
951 addr
= roff
+ off
+ (uintptr_t)xhcip
->xhci_regs_base
;
953 ddi_put16(xhcip
->xhci_regs_handle
, (void *)addr
, val
);
957 xhci_put32(xhci_t
*xhcip
, xhci_reg_type_t rtt
, uintptr_t off
, uint32_t val
)
959 uintptr_t addr
, roff
;
963 roff
= xhcip
->xhci_regs_capoff
;
966 roff
= xhcip
->xhci_regs_operoff
;
969 roff
= xhcip
->xhci_regs_runoff
;
972 roff
= xhcip
->xhci_regs_dooroff
;
975 panic("called %s with bad reg type: %d", __func__
, rtt
);
977 ASSERT(roff
!= PCI_EINVAL32
);
978 addr
= roff
+ off
+ (uintptr_t)xhcip
->xhci_regs_base
;
980 ddi_put32(xhcip
->xhci_regs_handle
, (void *)addr
, val
);
984 xhci_put64(xhci_t
*xhcip
, xhci_reg_type_t rtt
, uintptr_t off
, uint64_t val
)
986 uintptr_t addr
, roff
;
990 roff
= xhcip
->xhci_regs_capoff
;
993 roff
= xhcip
->xhci_regs_operoff
;
996 roff
= xhcip
->xhci_regs_runoff
;
999 roff
= xhcip
->xhci_regs_dooroff
;
1002 panic("called %s with bad reg type: %d", __func__
, rtt
);
1004 ASSERT(roff
!= PCI_EINVAL32
);
1005 addr
= roff
+ off
+ (uintptr_t)xhcip
->xhci_regs_base
;
1007 ddi_put64(xhcip
->xhci_regs_handle
, (void *)addr
, val
);
1011 xhci_check_regs_acc(xhci_t
*xhcip
)
1016 * Treat the case where we can't check as fine so we can treat the code
1019 if (!DDI_FM_ACC_ERR_CAP(xhcip
->xhci_fm_caps
))
1022 ddi_fm_acc_err_get(xhcip
->xhci_regs_handle
, &de
, DDI_FME_VERSION
);
1023 ddi_fm_acc_err_clear(xhcip
->xhci_regs_handle
, DDI_FME_VERSION
);
1024 return (de
.fme_status
);
1028 * As a leaf PCIe driver, we just post the ereport and continue on.
1032 xhci_fm_error_cb(dev_info_t
*dip
, ddi_fm_error_t
*err
, const void *impl_data
)
1034 pci_ereport_post(dip
, err
, NULL
);
1035 return (err
->fme_status
);
1039 xhci_fm_fini(xhci_t
*xhcip
)
1041 if (xhcip
->xhci_fm_caps
== 0)
1044 if (DDI_FM_ERRCB_CAP(xhcip
->xhci_fm_caps
))
1045 ddi_fm_handler_unregister(xhcip
->xhci_dip
);
1047 if (DDI_FM_EREPORT_CAP(xhcip
->xhci_fm_caps
) ||
1048 DDI_FM_ERRCB_CAP(xhcip
->xhci_fm_caps
))
1049 pci_ereport_teardown(xhcip
->xhci_dip
);
1051 ddi_fm_fini(xhcip
->xhci_dip
);
1055 xhci_fm_init(xhci_t
*xhcip
)
1057 ddi_iblock_cookie_t iblk
;
1058 int def
= DDI_FM_EREPORT_CAPABLE
| DDI_FM_ACCCHK_CAPABLE
|
1059 DDI_FM_DMACHK_CAPABLE
| DDI_FM_ERRCB_CAPABLE
;
1061 xhcip
->xhci_fm_caps
= ddi_prop_get_int(DDI_DEV_T_ANY
, xhcip
->xhci_dip
,
1062 DDI_PROP_DONTPASS
, "fm_capable", def
);
1064 if (xhcip
->xhci_fm_caps
< 0) {
1065 xhcip
->xhci_fm_caps
= 0;
1066 } else if (xhcip
->xhci_fm_caps
& ~def
) {
1067 xhcip
->xhci_fm_caps
&= def
;
1070 if (xhcip
->xhci_fm_caps
== 0)
1073 ddi_fm_init(xhcip
->xhci_dip
, &xhcip
->xhci_fm_caps
, &iblk
);
1074 if (DDI_FM_EREPORT_CAP(xhcip
->xhci_fm_caps
) ||
1075 DDI_FM_ERRCB_CAP(xhcip
->xhci_fm_caps
)) {
1076 pci_ereport_setup(xhcip
->xhci_dip
);
1079 if (DDI_FM_ERRCB_CAP(xhcip
->xhci_fm_caps
)) {
1080 ddi_fm_handler_register(xhcip
->xhci_dip
,
1081 xhci_fm_error_cb
, xhcip
);
1086 xhci_reg_poll(xhci_t
*xhcip
, xhci_reg_type_t rt
, int reg
, uint32_t mask
,
1087 uint32_t targ
, uint_t tries
, int delay_ms
)
1091 for (i
= 0; i
< tries
; i
++) {
1092 uint32_t val
= xhci_get32(xhcip
, rt
, reg
);
1093 if (xhci_check_regs_acc(xhcip
) != DDI_FM_OK
) {
1094 ddi_fm_service_impact(xhcip
->xhci_dip
,
1099 if ((val
& mask
) == targ
)
1102 delay(drv_usectohz(delay_ms
* 1000));
1108 xhci_regs_map(xhci_t
*xhcip
)
1112 ddi_device_acc_attr_t da
;
1114 if (ddi_dev_regsize(xhcip
->xhci_dip
, XHCI_REG_NUMBER
, &memsize
) !=
1116 xhci_error(xhcip
, "failed to get register set size");
1120 bzero(&da
, sizeof (ddi_device_acc_attr_t
));
1121 da
.devacc_attr_version
= DDI_DEVICE_ATTR_V0
;
1122 da
.devacc_attr_endian_flags
= DDI_STRUCTURE_LE_ACC
;
1123 da
.devacc_attr_dataorder
= DDI_STRICTORDER_ACC
;
1124 if (DDI_FM_ACC_ERR_CAP(xhcip
->xhci_fm_caps
)) {
1125 da
.devacc_attr_access
= DDI_FLAGERR_ACC
;
1127 da
.devacc_attr_access
= DDI_DEFAULT_ACC
;
1130 ret
= ddi_regs_map_setup(xhcip
->xhci_dip
, XHCI_REG_NUMBER
,
1131 &xhcip
->xhci_regs_base
, 0, memsize
, &da
, &xhcip
->xhci_regs_handle
);
1133 if (ret
!= DDI_SUCCESS
) {
1134 xhci_error(xhcip
, "failed to map device registers: %d", ret
);
1142 xhci_regs_init(xhci_t
*xhcip
)
1145 * The capabilities always begin at offset zero.
1147 xhcip
->xhci_regs_capoff
= 0;
1148 xhcip
->xhci_regs_operoff
= xhci_get8(xhcip
, XHCI_R_CAP
, XHCI_CAPLENGTH
);
1149 xhcip
->xhci_regs_runoff
= xhci_get32(xhcip
, XHCI_R_CAP
, XHCI_RTSOFF
);
1150 xhcip
->xhci_regs_runoff
&= ~0x1f;
1151 xhcip
->xhci_regs_dooroff
= xhci_get32(xhcip
, XHCI_R_CAP
, XHCI_DBOFF
);
1152 xhcip
->xhci_regs_dooroff
&= ~0x3;
1154 if (xhci_check_regs_acc(xhcip
) != DDI_FM_OK
) {
1155 xhci_error(xhcip
, "failed to initialize controller register "
1156 "offsets: encountered FM register error");
1157 ddi_fm_service_impact(xhcip
->xhci_dip
, DDI_SERVICE_LOST
);
1165 * Read various parameters from PCI configuration space and from the Capability
1166 * registers that we'll need to register the device. We cache all of the
1167 * Capability registers.
1170 xhci_read_params(xhci_t
*xhcip
)
1174 uint32_t struc1
, struc2
, struc3
, cap1
, cap2
, pgsz
;
1175 uint32_t psize
, pbit
, capreg
;
1176 xhci_capability_t
*xcap
;
1180 * While it's tempting to do a 16-bit read at offset 0x2, unfortunately,
1181 * a few emulated systems don't support reading at offset 0x2 for the
1182 * version. Instead we need to read the caplength register and get the
1185 capreg
= xhci_get32(xhcip
, XHCI_R_CAP
, XHCI_CAPLENGTH
);
1186 vers
= XHCI_VERSION_MASK(capreg
);
1187 usb
= pci_config_get8(xhcip
->xhci_cfg_handle
, PCI_XHCI_USBREV
);
1188 struc1
= xhci_get32(xhcip
, XHCI_R_CAP
, XHCI_HCSPARAMS1
);
1189 struc2
= xhci_get32(xhcip
, XHCI_R_CAP
, XHCI_HCSPARAMS2
);
1190 struc3
= xhci_get32(xhcip
, XHCI_R_CAP
, XHCI_HCSPARAMS3
);
1191 cap1
= xhci_get32(xhcip
, XHCI_R_CAP
, XHCI_HCCPARAMS1
);
1192 cap2
= xhci_get32(xhcip
, XHCI_R_CAP
, XHCI_HCCPARAMS2
);
1193 pgsz
= xhci_get32(xhcip
, XHCI_R_OPER
, XHCI_PAGESIZE
);
1194 if (xhci_check_regs_acc(xhcip
) != DDI_FM_OK
) {
1195 xhci_error(xhcip
, "failed to read controller parameters: "
1196 "encountered FM register error");
1197 ddi_fm_service_impact(xhcip
->xhci_dip
, DDI_SERVICE_LOST
);
1201 xcap
= &xhcip
->xhci_caps
;
1202 xcap
->xcap_usb_vers
= usb
;
1203 xcap
->xcap_hci_vers
= vers
;
1204 xcap
->xcap_max_slots
= XHCI_HCS1_DEVSLOT_MAX(struc1
);
1205 xcap
->xcap_max_intrs
= XHCI_HCS1_IRQ_MAX(struc1
);
1206 xcap
->xcap_max_ports
= XHCI_HCS1_N_PORTS(struc1
);
1207 if (xcap
->xcap_max_ports
> MAX_PORTS
) {
1208 xhci_error(xhcip
, "Root hub has %d ports, but system only "
1209 "supports %d, limiting to %d\n", xcap
->xcap_max_ports
,
1210 MAX_PORTS
, MAX_PORTS
);
1211 xcap
->xcap_max_ports
= MAX_PORTS
;
1214 xcap
->xcap_ist_micro
= XHCI_HCS2_IST_MICRO(struc2
);
1215 xcap
->xcap_ist
= XHCI_HCS2_IST(struc2
);
1216 xcap
->xcap_max_esrt
= XHCI_HCS2_ERST_MAX(struc2
);
1217 xcap
->xcap_scratch_restore
= XHCI_HCS2_SPR(struc2
);
1218 xcap
->xcap_max_scratch
= XHCI_HCS2_SPB_MAX(struc2
);
1220 xcap
->xcap_u1_lat
= XHCI_HCS3_U1_DEL(struc3
);
1221 xcap
->xcap_u2_lat
= XHCI_HCS3_U2_DEL(struc3
);
1223 xcap
->xcap_flags
= XHCI_HCC1_FLAGS_MASK(cap1
);
1224 xcap
->xcap_max_psa
= XHCI_HCC1_PSA_SZ_MAX(cap1
);
1225 xcap
->xcap_xecp_off
= XHCI_HCC1_XECP(cap1
);
1226 xcap
->xcap_flags2
= XHCI_HCC2_FLAGS_MASK(cap2
);
1229 * We don't have documentation for what changed from before xHCI 0.96,
1230 * so we just refuse to support versions before 0.96. We also will
1231 * ignore anything with a major version greater than 1.
1233 if (xcap
->xcap_hci_vers
< 0x96 || xcap
->xcap_hci_vers
>= 0x200) {
1234 xhci_error(xhcip
, "Encountered unsupported xHCI version 0.%2x",
1235 xcap
->xcap_hci_vers
);
1240 * Determine the smallest size page that the controller supports and
1241 * make sure that it matches our pagesize. We basically check here for
1242 * the presence of 4k and 8k pages. The basis of the pagesize is used
1243 * extensively throughout the code and specification. While we could
1244 * support other page sizes here, given that we don't support systems
1245 * with it at this time, it doesn't make much sense.
1249 pbit
= XHCI_PAGESIZE_4K
;
1251 } else if (ps
== 0x2000) {
1252 pbit
= XHCI_PAGESIZE_8K
;
1255 xhci_error(xhcip
, "Encountered host page size that the driver "
1256 "doesn't know how to handle: %lx\n", ps
);
1260 if (!(pgsz
& pbit
)) {
1261 xhci_error(xhcip
, "Encountered controller that didn't support "
1262 "the host page size (%d), supports: %x", psize
, pgsz
);
1265 xcap
->xcap_pagesize
= psize
;
1271 * Apply known workarounds and issues. These reports come from other
1272 * Operating Systems and have been collected over time.
1275 xhci_identify(xhci_t
*xhcip
)
1277 xhci_quirks_populate(xhcip
);
1279 if (xhcip
->xhci_quirks
& XHCI_QUIRK_NO_MSI
) {
1280 xhcip
->xhci_caps
.xcap_intr_types
= DDI_INTR_TYPE_FIXED
;
1282 xhcip
->xhci_caps
.xcap_intr_types
= DDI_INTR_TYPE_FIXED
|
1283 DDI_INTR_TYPE_MSI
| DDI_INTR_TYPE_MSIX
;
1286 if (xhcip
->xhci_quirks
& XHCI_QUIRK_32_ONLY
) {
1287 xhcip
->xhci_caps
.xcap_flags
&= ~XCAP_AC64
;
1294 xhci_alloc_intr_handle(xhci_t
*xhcip
, int type
)
1299 * Normally a well-behaving driver would more carefully request an
1300 * amount of interrupts based on the number available, etc. But since we
1301 * only actually want a single interrupt, we're just going to go ahead
1302 * and ask for a single interrupt.
1304 ret
= ddi_intr_alloc(xhcip
->xhci_dip
, &xhcip
->xhci_intr_hdl
, type
, 0,
1305 XHCI_NINTR
, &xhcip
->xhci_intr_num
, DDI_INTR_ALLOC_NORMAL
);
1306 if (ret
!= DDI_SUCCESS
) {
1307 xhci_log(xhcip
, "!failed to allocate interrupts of type %d: %d",
1311 xhcip
->xhci_intr_type
= type
;
1317 xhci_alloc_intrs(xhci_t
*xhcip
)
1319 int intr_types
, ret
;
1321 if (XHCI_NINTR
> xhcip
->xhci_caps
.xcap_max_intrs
) {
1322 xhci_error(xhcip
, "controller does not support the minimum "
1323 "number of interrupts required (%d), supports %d",
1324 XHCI_NINTR
, xhcip
->xhci_caps
.xcap_max_intrs
);
1328 if ((ret
= ddi_intr_get_supported_types(xhcip
->xhci_dip
,
1329 &intr_types
)) != DDI_SUCCESS
) {
1330 xhci_error(xhcip
, "failed to get supported interrupt types: "
1336 * Mask off interrupt types we've already ruled out due to quirks or
1339 intr_types
&= xhcip
->xhci_caps
.xcap_intr_types
;
1340 if (intr_types
& DDI_INTR_TYPE_MSIX
) {
1341 if (xhci_alloc_intr_handle(xhcip
, DDI_INTR_TYPE_MSIX
))
1345 if (intr_types
& DDI_INTR_TYPE_MSI
) {
1346 if (xhci_alloc_intr_handle(xhcip
, DDI_INTR_TYPE_MSI
))
1350 if (intr_types
& DDI_INTR_TYPE_FIXED
) {
1351 if (xhci_alloc_intr_handle(xhcip
, DDI_INTR_TYPE_FIXED
))
1355 xhci_error(xhcip
, "failed to allocate an interrupt, supported types: "
1356 "0x%x", intr_types
);
1361 xhci_add_intr_handler(xhci_t
*xhcip
)
1365 if ((ret
= ddi_intr_get_pri(xhcip
->xhci_intr_hdl
,
1366 &xhcip
->xhci_intr_pri
)) != DDI_SUCCESS
) {
1367 xhci_error(xhcip
, "failed to get interrupt priority: %d", ret
);
1371 if ((ret
= ddi_intr_get_cap(xhcip
->xhci_intr_hdl
,
1372 &xhcip
->xhci_intr_caps
)) != DDI_SUCCESS
) {
1373 xhci_error(xhcip
, "failed to get interrupt capabilities: %d",
1378 if ((ret
= ddi_intr_add_handler(xhcip
->xhci_intr_hdl
, xhci_intr
, xhcip
,
1379 (uintptr_t)0)) != DDI_SUCCESS
) {
1380 xhci_error(xhcip
, "failed to add interrupt handler: %d", ret
);
1387 * Find a capability with an identifier whose value is 'id'. The 'init' argument
1388 * gives us the offset to start searching at. See xHCI 1.1 / 7 for more
1389 * information. This is more or less exactly like PCI capabilities.
1392 xhci_find_ext_cap(xhci_t
*xhcip
, uint32_t id
, uint32_t init
, uint32_t *outp
)
1398 * If we have no offset, we're done.
1400 if (xhcip
->xhci_caps
.xcap_xecp_off
== 0)
1403 off
= xhcip
->xhci_caps
.xcap_xecp_off
<< 2;
1408 cap_hdr
= xhci_get32(xhcip
, XHCI_R_CAP
, off
);
1409 if (xhci_check_regs_acc(xhcip
) != DDI_FM_OK
) {
1410 xhci_error(xhcip
, "failed to read xhci extended "
1411 "capabilities at offset 0x%x: encountered FM "
1412 "register error", off
);
1413 ddi_fm_service_impact(xhcip
->xhci_dip
,
1418 if (cap_hdr
== PCI_EINVAL32
)
1420 if (XHCI_XECP_ID(cap_hdr
) == id
&&
1421 (init
== UINT32_MAX
|| off
> init
)) {
1425 next
= XHCI_XECP_NEXT(cap_hdr
);
1427 * Watch out for overflow if we somehow end up with a more than
1430 if (next
<< 2 > (INT32_MAX
- off
))
1432 } while (next
!= 0);
1438 * For mostly information purposes, we'd like to walk to augment the devinfo
1439 * tree with the number of ports that support USB 2 and USB 3. Note though that
1440 * these ports may be overlapping. Many ports can support both USB 2 and USB 3
1441 * and are wired up to the same physical port, even though they show up as
1442 * separate 'ports' in the xhci sense.
1445 xhci_port_count(xhci_t
*xhcip
)
1447 uint_t nusb2
= 0, nusb3
= 0;
1448 uint32_t off
= UINT32_MAX
;
1450 while (xhci_find_ext_cap(xhcip
, XHCI_ID_PROTOCOLS
, off
, &off
) ==
1452 uint32_t rvers
, rport
;
1455 * See xHCI 1.1 / 7.2 for the format of this. The first uint32_t
1456 * has version information while the third uint32_t has the port
1459 rvers
= xhci_get32(xhcip
, XHCI_R_CAP
, off
);
1460 rport
= xhci_get32(xhcip
, XHCI_R_CAP
, off
+ 8);
1461 if (xhci_check_regs_acc(xhcip
) != DDI_FM_OK
) {
1462 xhci_error(xhcip
, "failed to read xhci port counts: "
1463 "encountered fatal FM register error");
1464 ddi_fm_service_impact(xhcip
->xhci_dip
,
1469 rvers
= XHCI_XECP_PROT_MAJOR(rvers
);
1470 rport
= XHCI_XECP_PROT_PCOUNT(rport
);
1474 } else if (rvers
<= 2) {
1477 xhci_error(xhcip
, "encountered port capabilities with "
1478 "unknown major USB version: %d\n", rvers
);
1482 (void) ddi_prop_update_int(DDI_DEV_T_NONE
, xhcip
->xhci_dip
,
1483 "usb2-capable-ports", nusb2
);
1484 (void) ddi_prop_update_int(DDI_DEV_T_NONE
, xhcip
->xhci_dip
,
1485 "usb3-capable-ports", nusb3
);
1491 * Take over control from the BIOS or other firmware, if applicable.
1494 xhci_controller_takeover(xhci_t
*xhcip
)
1500 * If we can't find the legacy capability, then there's nothing to do.
1502 if (xhci_find_ext_cap(xhcip
, XHCI_ID_USB_LEGACY
, UINT32_MAX
, &off
) ==
1505 val
= xhci_get32(xhcip
, XHCI_R_CAP
, off
);
1506 if (xhci_check_regs_acc(xhcip
) != DDI_FM_OK
) {
1507 xhci_error(xhcip
, "failed to read BIOS take over registers: "
1508 "encountered fatal FM register error");
1509 ddi_fm_service_impact(xhcip
->xhci_dip
, DDI_SERVICE_LOST
);
1513 if (val
& XHCI_BIOS_OWNED
) {
1514 val
|= XHCI_OS_OWNED
;
1515 xhci_put32(xhcip
, XHCI_R_CAP
, off
, val
);
1516 if (xhci_check_regs_acc(xhcip
) != DDI_FM_OK
) {
1517 xhci_error(xhcip
, "failed to write BIOS take over "
1518 "registers: encountered fatal FM register error");
1519 ddi_fm_service_impact(xhcip
->xhci_dip
,
1525 * Wait up to 5 seconds for things to change. While this number
1526 * isn't specified in the xHCI spec, it seems to be the de facto
1527 * value that various systems are using today. We'll use a 10ms
1528 * interval to check.
1530 ret
= xhci_reg_poll(xhcip
, XHCI_R_CAP
, off
,
1531 XHCI_BIOS_OWNED
| XHCI_OS_OWNED
, XHCI_OS_OWNED
, 500, 10);
1534 if (ret
== ETIMEDOUT
) {
1535 xhci_log(xhcip
, "!timed out waiting for firmware to "
1536 "hand off, taking over");
1537 val
&= ~XHCI_BIOS_OWNED
;
1538 xhci_put32(xhcip
, XHCI_R_CAP
, off
, val
);
1539 if (xhci_check_regs_acc(xhcip
) != DDI_FM_OK
) {
1540 xhci_error(xhcip
, "failed to write forced "
1541 "takeover: encountered fatal FM register "
1543 ddi_fm_service_impact(xhcip
->xhci_dip
,
1550 val
= xhci_get32(xhcip
, XHCI_R_CAP
, off
+ XHCI_XECP_LEGCTLSTS
);
1551 if (xhci_check_regs_acc(xhcip
) != DDI_FM_OK
) {
1552 xhci_error(xhcip
, "failed to read legacy control registers: "
1553 "encountered fatal FM register error");
1554 ddi_fm_service_impact(xhcip
->xhci_dip
, DDI_SERVICE_LOST
);
1557 val
&= XHCI_XECP_SMI_MASK
;
1558 val
|= XHCI_XECP_CLEAR_SMI
;
1559 xhci_put32(xhcip
, XHCI_R_CAP
, off
+ XHCI_XECP_LEGCTLSTS
, val
);
1560 if (xhci_check_regs_acc(xhcip
) != DDI_FM_OK
) {
1561 xhci_error(xhcip
, "failed to write legacy control registers: "
1562 "encountered fatal FM register error");
1563 ddi_fm_service_impact(xhcip
->xhci_dip
, DDI_SERVICE_LOST
);
1571 xhci_controller_stop(xhci_t
*xhcip
)
1575 cmdreg
= xhci_get32(xhcip
, XHCI_R_OPER
, XHCI_USBCMD
);
1576 if (xhci_check_regs_acc(xhcip
) != DDI_FM_OK
) {
1577 xhci_error(xhcip
, "failed to read USB Command register: "
1578 "encountered fatal FM register error");
1579 ddi_fm_service_impact(xhcip
->xhci_dip
, DDI_SERVICE_LOST
);
1583 cmdreg
&= ~(XHCI_CMD_RS
| XHCI_CMD_INTE
);
1584 xhci_put32(xhcip
, XHCI_R_OPER
, XHCI_USBCMD
, cmdreg
);
1585 if (xhci_check_regs_acc(xhcip
) != DDI_FM_OK
) {
1586 xhci_error(xhcip
, "failed to write USB Command register: "
1587 "encountered fatal FM register error");
1588 ddi_fm_service_impact(xhcip
->xhci_dip
, DDI_SERVICE_LOST
);
1593 * Wait up to 50ms for this to occur. The specification says that this
1594 * should stop within 16ms, but we give ourselves a bit more time just
1597 return (xhci_reg_poll(xhcip
, XHCI_R_OPER
, XHCI_USBSTS
, XHCI_STS_HCH
,
1598 XHCI_STS_HCH
, 50, 10));
1602 xhci_controller_reset(xhci_t
*xhcip
)
1607 cmdreg
= xhci_get32(xhcip
, XHCI_R_OPER
, XHCI_USBCMD
);
1608 if (xhci_check_regs_acc(xhcip
) != DDI_FM_OK
) {
1609 xhci_error(xhcip
, "failed to read USB Command register for "
1610 "reset: encountered fatal FM register error");
1611 ddi_fm_service_impact(xhcip
->xhci_dip
, DDI_SERVICE_LOST
);
1615 cmdreg
|= XHCI_CMD_HCRST
;
1616 xhci_put32(xhcip
, XHCI_R_OPER
, XHCI_USBCMD
, cmdreg
);
1617 if (xhci_check_regs_acc(xhcip
) != DDI_FM_OK
) {
1618 xhci_error(xhcip
, "failed to write USB Command register for "
1619 "reset: encountered fatal FM register error");
1620 ddi_fm_service_impact(xhcip
->xhci_dip
, DDI_SERVICE_LOST
);
1625 * Some controllers apparently don't want to be touched for at least 1ms
1626 * after we initiate the reset. Therefore give all controllers this
1627 * moment to breathe.
1629 delay(drv_usectohz(xhci_reset_delay
));
1632 * To tell that the reset has completed we first verify that the reset
1633 * has finished and that the USBCMD register no longer has the reset bit
1634 * asserted. However, once that's done we have to go verify that CNR
1635 * (Controller Not Ready) is no longer asserted.
1637 if ((ret
= xhci_reg_poll(xhcip
, XHCI_R_OPER
, XHCI_USBCMD
,
1638 XHCI_CMD_HCRST
, 0, 500, 10)) != 0)
1641 return (xhci_reg_poll(xhcip
, XHCI_R_OPER
, XHCI_USBSTS
,
1642 XHCI_STS_CNR
, 0, 500, 10));
1646 * Take care of all the required initialization before we can actually enable
1647 * the controller. This means that we need to:
1649 * o Program the maximum number of slots
1650 * o Program the DCBAAP and allocate the scratchpad
1651 * o Program the Command Ring
1652 * o Initialize the Event Ring
1653 * o Enable interrupts (set imod)
1656 xhci_controller_configure(xhci_t
*xhcip
)
1661 config
= xhci_get32(xhcip
, XHCI_R_OPER
, XHCI_CONFIG
);
1662 config
&= ~XHCI_CONFIG_SLOTS_MASK
;
1663 config
|= xhcip
->xhci_caps
.xcap_max_slots
;
1664 xhci_put32(xhcip
, XHCI_R_OPER
, XHCI_CONFIG
, config
);
1665 if (xhci_check_regs_acc(xhcip
) != DDI_FM_OK
) {
1666 ddi_fm_service_impact(xhcip
->xhci_dip
, DDI_SERVICE_LOST
);
1670 if ((ret
= xhci_context_init(xhcip
)) != 0) {
1673 reason
= "fatal FM I/O error occurred";
1674 } else if (ret
== ENOMEM
) {
1675 reason
= "unable to allocate DMA memory";
1677 reason
= "unexpected error occurred";
1680 xhci_error(xhcip
, "failed to initialize xhci context "
1681 "registers: %s (%d)", reason
, ret
);
1685 if ((ret
= xhci_command_ring_init(xhcip
)) != 0) {
1686 xhci_error(xhcip
, "failed to initialize commands: %d", ret
);
1690 if ((ret
= xhci_event_init(xhcip
)) != 0) {
1691 xhci_error(xhcip
, "failed to initialize events: %d", ret
);
1695 if ((ret
= xhci_intr_conf(xhcip
)) != 0) {
1696 xhci_error(xhcip
, "failed to configure interrupts: %d", ret
);
1704 xhci_controller_start(xhci_t
*xhcip
)
1708 reg
= xhci_get32(xhcip
, XHCI_R_OPER
, XHCI_USBCMD
);
1709 if (xhci_check_regs_acc(xhcip
) != DDI_FM_OK
) {
1710 xhci_error(xhcip
, "failed to read USB Command register for "
1711 "start: encountered fatal FM register error");
1712 ddi_fm_service_impact(xhcip
->xhci_dip
, DDI_SERVICE_LOST
);
1717 xhci_put32(xhcip
, XHCI_R_OPER
, XHCI_USBCMD
, reg
);
1718 if (xhci_check_regs_acc(xhcip
) != DDI_FM_OK
) {
1719 xhci_error(xhcip
, "failed to write USB Command register for "
1720 "start: encountered fatal FM register error");
1721 ddi_fm_service_impact(xhcip
->xhci_dip
, DDI_SERVICE_LOST
);
1725 return (xhci_reg_poll(xhcip
, XHCI_R_OPER
, XHCI_USBSTS
,
1726 XHCI_STS_HCH
, 0, 500, 10));
1731 xhci_reset_task(void *arg
)
1734 * Longer term, we'd like to properly perform a controller reset.
1735 * However, that requires a bit more assistance from USBA to work
1736 * properly and tear down devices. In the meantime, we panic.
1738 panic("XHCI runtime reset required");
1742 * This function is called when we've detected a fatal FM condition that has
1743 * resulted in a loss of service and we need to force a reset of the controller
1744 * as a whole. Only one such reset may be ongoing at a time.
1747 xhci_fm_runtime_reset(xhci_t
*xhcip
)
1749 boolean_t locked
= B_FALSE
;
1751 if (mutex_owned(&xhcip
->xhci_lock
)) {
1754 mutex_enter(&xhcip
->xhci_lock
);
1758 * If we're already in the error state than a reset is already ongoing
1759 * and there is nothing for us to do here.
1761 if (xhcip
->xhci_state
& XHCI_S_ERROR
) {
1765 xhcip
->xhci_state
|= XHCI_S_ERROR
;
1766 ddi_fm_service_impact(xhcip
->xhci_dip
, DDI_SERVICE_LOST
);
1767 taskq_dispatch_ent(xhci_taskq
, xhci_reset_task
, xhcip
, 0,
1771 mutex_exit(&xhcip
->xhci_lock
);
1776 xhci_ioctl_portsc(xhci_t
*xhcip
, intptr_t arg
)
1779 xhci_ioctl_portsc_t xhi
;
1781 bzero(&xhi
, sizeof (xhci_ioctl_portsc_t
));
1782 xhi
.xhi_nports
= xhcip
->xhci_caps
.xcap_max_ports
;
1783 for (i
= 1; i
<= xhcip
->xhci_caps
.xcap_max_ports
; i
++) {
1784 xhi
.xhi_portsc
[i
] = xhci_get32(xhcip
, XHCI_R_OPER
,
1788 if (ddi_copyout(&xhi
, (void *)(uintptr_t)arg
, sizeof (xhi
), 0) != 0)
1795 xhci_ioctl_clear(xhci_t
*xhcip
, intptr_t arg
)
1798 xhci_ioctl_clear_t xic
;
1800 if (ddi_copyin((const void *)(uintptr_t)arg
, &xic
, sizeof (xic
),
1804 if (xic
.xic_port
== 0 || xic
.xic_port
>
1805 xhcip
->xhci_caps
.xcap_max_ports
)
1808 reg
= xhci_get32(xhcip
, XHCI_R_OPER
, XHCI_PORTSC(xic
.xic_port
));
1809 reg
&= ~XHCI_PS_CLEAR
;
1810 reg
|= XHCI_PS_CSC
| XHCI_PS_PEC
| XHCI_PS_WRC
| XHCI_PS_OCC
|
1811 XHCI_PS_PRC
| XHCI_PS_PLC
| XHCI_PS_CEC
;
1812 xhci_put32(xhcip
, XHCI_R_OPER
, XHCI_PORTSC(xic
.xic_port
), reg
);
1818 xhci_ioctl_setpls(xhci_t
*xhcip
, intptr_t arg
)
1821 xhci_ioctl_setpls_t xis
;
1823 if (ddi_copyin((const void *)(uintptr_t)arg
, &xis
, sizeof (xis
),
1827 if (xis
.xis_port
== 0 || xis
.xis_port
>
1828 xhcip
->xhci_caps
.xcap_max_ports
)
1831 if (xis
.xis_pls
& ~0xf)
1834 reg
= xhci_get32(xhcip
, XHCI_R_OPER
, XHCI_PORTSC(xis
.xis_port
));
1835 reg
&= ~XHCI_PS_CLEAR
;
1836 reg
|= XHCI_PS_PLS_SET(xis
.xis_pls
);
1838 xhci_put32(xhcip
, XHCI_R_OPER
, XHCI_PORTSC(xis
.xis_port
), reg
);
1844 xhci_open(dev_t
*devp
, int flags
, int otyp
, cred_t
*credp
)
1846 dev_info_t
*dip
= xhci_get_dip(*devp
);
1848 return (usba_hubdi_open(dip
, devp
, flags
, otyp
, credp
));
1852 xhci_ioctl(dev_t dev
, int cmd
, intptr_t arg
, int mode
, cred_t
*credp
,
1855 dev_info_t
*dip
= xhci_get_dip(dev
);
1857 if (cmd
== XHCI_IOCTL_PORTSC
||
1858 cmd
== XHCI_IOCTL_CLEAR
||
1859 cmd
== XHCI_IOCTL_SETPLS
) {
1860 xhci_t
*xhcip
= ddi_get_soft_state(xhci_soft_state
,
1861 getminor(dev
) & ~HUBD_IS_ROOT_HUB
);
1863 if (secpolicy_xhci(credp
) != 0 ||
1864 crgetzoneid(credp
) != GLOBAL_ZONEID
)
1870 if (!(mode
& FWRITE
))
1873 if (cmd
== XHCI_IOCTL_PORTSC
)
1874 return (xhci_ioctl_portsc(xhcip
, arg
));
1875 else if (cmd
== XHCI_IOCTL_CLEAR
)
1876 return (xhci_ioctl_clear(xhcip
, arg
));
1878 return (xhci_ioctl_setpls(xhcip
, arg
));
1881 return (usba_hubdi_ioctl(dip
, dev
, cmd
, arg
, mode
, credp
, rvalp
));
1885 xhci_close(dev_t dev
, int flag
, int otyp
, cred_t
*credp
)
1887 dev_info_t
*dip
= xhci_get_dip(dev
);
1889 return (usba_hubdi_close(dip
, dev
, flag
, otyp
, credp
));
1893 * We try to clean up everything that we can. The only thing that we let stop us
1894 * at this time is a failure to remove the root hub, which is realistically the
1895 * equivalent of our EBUSY case.
1898 xhci_cleanup(xhci_t
*xhcip
)
1902 if (xhcip
->xhci_seq
& XHCI_ATTACH_ROOT_HUB
) {
1903 if ((ret
= xhci_root_hub_fini(xhcip
)) != 0)
1907 if (xhcip
->xhci_seq
& XHCI_ATTACH_USBA
) {
1908 xhci_hcd_fini(xhcip
);
1911 if (xhcip
->xhci_seq
& XHCI_ATTACH_STARTED
) {
1912 mutex_enter(&xhcip
->xhci_lock
);
1913 while (xhcip
->xhci_state
& XHCI_S_ERROR
)
1914 cv_wait(&xhcip
->xhci_statecv
, &xhcip
->xhci_lock
);
1915 mutex_exit(&xhcip
->xhci_lock
);
1917 (void) xhci_controller_stop(xhcip
);
1921 * Always release the context, command, and event data. They handle the
1922 * fact that they me be in an arbitrary state or unallocated.
1924 xhci_event_fini(xhcip
);
1925 xhci_command_ring_fini(xhcip
);
1926 xhci_context_fini(xhcip
);
1928 if (xhcip
->xhci_seq
& XHCI_ATTACH_INTR_ENABLE
) {
1929 (void) xhci_ddi_intr_disable(xhcip
);
1932 if (xhcip
->xhci_seq
& XHCI_ATTACH_SYNCH
) {
1933 cv_destroy(&xhcip
->xhci_statecv
);
1934 mutex_destroy(&xhcip
->xhci_lock
);
1937 if (xhcip
->xhci_seq
& XHCI_ATTACH_INTR_ADD
) {
1938 if ((ret
= ddi_intr_remove_handler(xhcip
->xhci_intr_hdl
)) !=
1940 xhci_error(xhcip
, "failed to remove interrupt "
1941 "handler: %d", ret
);
1945 if (xhcip
->xhci_seq
& XHCI_ATTACH_INTR_ALLOC
) {
1946 if ((ret
= ddi_intr_free(xhcip
->xhci_intr_hdl
)) !=
1948 xhci_error(xhcip
, "failed to free interrupts: %d", ret
);
1952 if (xhcip
->xhci_seq
& XHCI_ATTACH_REGS_MAP
) {
1953 ddi_regs_map_free(&xhcip
->xhci_regs_handle
);
1954 xhcip
->xhci_regs_handle
= NULL
;
1957 if (xhcip
->xhci_seq
& XHCI_ATTACH_PCI_CONFIG
) {
1958 pci_config_teardown(&xhcip
->xhci_cfg_handle
);
1959 xhcip
->xhci_cfg_handle
= NULL
;
1962 if (xhcip
->xhci_seq
& XHCI_ATTACH_FM
) {
1963 xhci_fm_fini(xhcip
);
1964 xhcip
->xhci_fm_caps
= 0;
1967 inst
= ddi_get_instance(xhcip
->xhci_dip
);
1968 xhcip
->xhci_dip
= NULL
;
1969 ddi_soft_state_free(xhci_soft_state
, inst
);
1971 return (DDI_SUCCESS
);
1975 xhci_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
1977 int ret
, inst
, route
;
1980 if (cmd
!= DDI_ATTACH
)
1981 return (DDI_FAILURE
);
1983 inst
= ddi_get_instance(dip
);
1984 if (ddi_soft_state_zalloc(xhci_soft_state
, inst
) != 0)
1985 return (DDI_FAILURE
);
1986 xhcip
= ddi_get_soft_state(xhci_soft_state
, ddi_get_instance(dip
));
1987 xhcip
->xhci_dip
= dip
;
1989 xhcip
->xhci_regs_capoff
= PCI_EINVAL32
;
1990 xhcip
->xhci_regs_operoff
= PCI_EINVAL32
;
1991 xhcip
->xhci_regs_runoff
= PCI_EINVAL32
;
1992 xhcip
->xhci_regs_dooroff
= PCI_EINVAL32
;
1994 xhci_fm_init(xhcip
);
1995 xhcip
->xhci_seq
|= XHCI_ATTACH_FM
;
1997 if (pci_config_setup(xhcip
->xhci_dip
, &xhcip
->xhci_cfg_handle
) !=
2001 xhcip
->xhci_seq
|= XHCI_ATTACH_PCI_CONFIG
;
2002 xhcip
->xhci_vendor_id
= pci_config_get16(xhcip
->xhci_cfg_handle
,
2004 xhcip
->xhci_device_id
= pci_config_get16(xhcip
->xhci_cfg_handle
,
2007 if (xhci_regs_map(xhcip
) == B_FALSE
) {
2011 xhcip
->xhci_seq
|= XHCI_ATTACH_REGS_MAP
;
2013 if (xhci_regs_init(xhcip
) == B_FALSE
)
2016 if (xhci_read_params(xhcip
) == B_FALSE
)
2019 if (xhci_identify(xhcip
) == B_FALSE
)
2022 if (xhci_alloc_intrs(xhcip
) == B_FALSE
)
2024 xhcip
->xhci_seq
|= XHCI_ATTACH_INTR_ALLOC
;
2026 if (xhci_add_intr_handler(xhcip
) == B_FALSE
)
2028 xhcip
->xhci_seq
|= XHCI_ATTACH_INTR_ADD
;
2030 mutex_init(&xhcip
->xhci_lock
, NULL
, MUTEX_DRIVER
,
2031 (void *)(uintptr_t)xhcip
->xhci_intr_pri
);
2032 cv_init(&xhcip
->xhci_statecv
, NULL
, CV_DRIVER
, NULL
);
2033 xhcip
->xhci_seq
|= XHCI_ATTACH_SYNCH
;
2035 if (xhci_port_count(xhcip
) == B_FALSE
)
2038 if (xhci_controller_takeover(xhcip
) == B_FALSE
)
2042 * We don't enable interrupts until after we take over the controller
2043 * from the BIOS. We've observed cases where this can cause spurious
2046 if (xhci_ddi_intr_enable(xhcip
) == B_FALSE
)
2048 xhcip
->xhci_seq
|= XHCI_ATTACH_INTR_ENABLE
;
2050 if ((ret
= xhci_controller_stop(xhcip
)) != 0) {
2051 xhci_error(xhcip
, "failed to stop controller: %s",
2052 ret
== EIO
? "encountered FM register error" :
2053 "timed out while waiting for controller");
2057 if ((ret
= xhci_controller_reset(xhcip
)) != 0) {
2058 xhci_error(xhcip
, "failed to reset controller: %s",
2059 ret
== EIO
? "encountered FM register error" :
2060 "timed out while waiting for controller");
2064 if ((ret
= xhci_controller_configure(xhcip
)) != 0) {
2065 xhci_error(xhcip
, "failed to configure controller: %d", ret
);
2070 * Some systems support having ports routed to both an ehci and xhci
2071 * controller. If we support it and the user hasn't requested otherwise
2072 * via a driver.conf tuning, we reroute it now.
2074 route
= ddi_prop_get_int(DDI_DEV_T_ANY
, xhcip
->xhci_dip
,
2075 DDI_PROP_DONTPASS
, "xhci-reroute", XHCI_PROP_REROUTE_DEFAULT
);
2076 if (route
!= XHCI_PROP_REROUTE_DISABLE
&&
2077 (xhcip
->xhci_quirks
& XHCI_QUIRK_INTC_EHCI
))
2078 (void) xhci_reroute_intel(xhcip
);
2080 if ((ret
= xhci_controller_start(xhcip
)) != 0) {
2081 xhci_log(xhcip
, "failed to reset controller: %s",
2082 ret
== EIO
? "encountered FM register error" :
2083 "timed out while waiting for controller");
2086 xhcip
->xhci_seq
|= XHCI_ATTACH_STARTED
;
2089 * Finally, register ourselves with the USB framework itself.
2091 if ((ret
= xhci_hcd_init(xhcip
)) != 0) {
2092 xhci_error(xhcip
, "failed to register hcd with usba");
2095 xhcip
->xhci_seq
|= XHCI_ATTACH_USBA
;
2097 if ((ret
= xhci_root_hub_init(xhcip
)) != 0) {
2098 xhci_error(xhcip
, "failed to load the root hub driver");
2101 xhcip
->xhci_seq
|= XHCI_ATTACH_ROOT_HUB
;
2103 return (DDI_SUCCESS
);
2106 (void) xhci_cleanup(xhcip
);
2107 return (DDI_FAILURE
);
2111 xhci_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
2115 if (cmd
!= DDI_DETACH
)
2116 return (DDI_FAILURE
);
2118 xhcip
= ddi_get_soft_state(xhci_soft_state
, ddi_get_instance(dip
));
2119 if (xhcip
== NULL
) {
2120 dev_err(dip
, CE_WARN
, "detach called without soft state!");
2121 return (DDI_FAILURE
);
2124 return (xhci_cleanup(xhcip
));
2129 xhci_getinfo(dev_info_t
*dip
, ddi_info_cmd_t cmd
, void *arg
, void **outp
)
2135 case DDI_INFO_DEVT2DEVINFO
:
2137 *outp
= xhci_get_dip(dev
);
2139 return (DDI_FAILURE
);
2141 case DDI_INFO_DEVT2INSTANCE
:
2143 inst
= getminor(dev
) & ~HUBD_IS_ROOT_HUB
;
2144 *outp
= (void *)(uintptr_t)inst
;
2147 return (DDI_FAILURE
);
2150 return (DDI_SUCCESS
);
2153 static struct cb_ops xhci_cb_ops
= {
2154 xhci_open
, /* cb_open */
2155 xhci_close
, /* cb_close */
2156 nodev
, /* cb_strategy */
2157 nodev
, /* cb_print */
2158 nodev
, /* cb_dump */
2159 nodev
, /* cb_read */
2160 nodev
, /* cb_write */
2161 xhci_ioctl
, /* cb_ioctl */
2162 nodev
, /* cb_devmap */
2163 nodev
, /* cb_mmap */
2164 nodev
, /* cb_segmap */
2165 nochpoll
, /* cb_chpoll */
2166 ddi_prop_op
, /* cb_prop_op */
2167 NULL
, /* cb_stream */
2168 D_MP
| D_HOTPLUG
, /* cb_flag */
2169 CB_REV
, /* cb_rev */
2170 nodev
, /* cb_aread */
2171 nodev
/* cb_awrite */
2174 static struct dev_ops xhci_dev_ops
= {
2175 DEVO_REV
, /* devo_rev */
2176 0, /* devo_refcnt */
2177 xhci_getinfo
, /* devo_getinfo */
2178 nulldev
, /* devo_identify */
2179 nulldev
, /* devo_probe */
2180 xhci_attach
, /* devo_attach */
2181 xhci_detach
, /* devo_detach */
2182 nodev
, /* devo_reset */
2183 &xhci_cb_ops
, /* devo_cb_ops */
2184 &usba_hubdi_busops
, /* devo_bus_ops */
2185 usba_hubdi_root_hub_power
, /* devo_power */
2186 ddi_quiesce_not_supported
/* devo_quiesce */
2189 static struct modldrv xhci_modldrv
= {
2195 static struct modlinkage xhci_modlinkage
= {
2206 if ((ret
= ddi_soft_state_init(&xhci_soft_state
, sizeof (xhci_t
),
2211 xhci_taskq
= taskq_create("xhci_taskq", 1, minclsyspri
, 0, 0, 0);
2212 if (xhci_taskq
== NULL
) {
2213 ddi_soft_state_fini(&xhci_soft_state
);
2217 if ((ret
= mod_install(&xhci_modlinkage
)) != 0) {
2218 taskq_destroy(xhci_taskq
);
2226 _info(struct modinfo
*modinfop
)
2228 return (mod_info(&xhci_modlinkage
, modinfop
));
2236 if ((ret
= mod_remove(&xhci_modlinkage
)) != 0)
2239 if (xhci_taskq
!= NULL
) {
2240 taskq_destroy(xhci_taskq
);
2244 ddi_soft_state_fini(&xhci_soft_state
);