Merge illumos-gate
[unleashed/lotheac.git] / usr / src / uts / common / io / usb / hcd / ehci / ehci_util.c
blobc5117a1c47d7873efed47042c5076ebc3a0df3c0
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2018, Joyent, Inc.
28 * EHCI Host Controller Driver (EHCI)
30 * The EHCI driver is a software driver which interfaces to the Universal
31 * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
32 * the Host Controller is defined by the EHCI Host Controller Interface.
34 * This module contains the main EHCI driver code which handles all USB
35 * transfers, bandwidth allocations and other general functionalities.
38 #include <sys/usb/hcd/ehci/ehcid.h>
39 #include <sys/usb/hcd/ehci/ehci_isoch.h>
40 #include <sys/usb/hcd/ehci/ehci_xfer.h>
43 * EHCI MSI tunable:
45 * By default MSI is enabled on all supported platforms except for the
46 * EHCI controller of ULI1575 South bridge.
48 boolean_t ehci_enable_msi = B_TRUE;
50 /* Pointer to the state structure */
51 extern void *ehci_statep;
53 extern void ehci_handle_endpoint_reclaimation(ehci_state_t *);
55 extern uint_t ehci_vt62x2_workaround;
56 extern int force_ehci_off;
58 /* Adjustable variables for the size of the pools */
59 int ehci_qh_pool_size = EHCI_QH_POOL_SIZE;
60 int ehci_qtd_pool_size = EHCI_QTD_POOL_SIZE;
63 * Initialize the values which the order of 32ms intr qh are executed
64 * by the host controller in the lattice tree.
66 static uchar_t ehci_index[EHCI_NUM_INTR_QH_LISTS] =
67 {0x00, 0x10, 0x08, 0x18,
68 0x04, 0x14, 0x0c, 0x1c,
69 0x02, 0x12, 0x0a, 0x1a,
70 0x06, 0x16, 0x0e, 0x1e,
71 0x01, 0x11, 0x09, 0x19,
72 0x05, 0x15, 0x0d, 0x1d,
73 0x03, 0x13, 0x0b, 0x1b,
74 0x07, 0x17, 0x0f, 0x1f};
77 * Initialize the values which are used to calculate start split mask
78 * for the low/full/high speed interrupt and isochronous endpoints.
80 static uint_t ehci_start_split_mask[15] = {
82 * For high/full/low speed usb devices. For high speed
83 * device with polling interval greater than or equal
84 * to 8us (125us).
86 0x01, /* 00000001 */
87 0x02, /* 00000010 */
88 0x04, /* 00000100 */
89 0x08, /* 00001000 */
90 0x10, /* 00010000 */
91 0x20, /* 00100000 */
92 0x40, /* 01000000 */
93 0x80, /* 10000000 */
95 /* Only for high speed devices with polling interval 4us */
96 0x11, /* 00010001 */
97 0x22, /* 00100010 */
98 0x44, /* 01000100 */
99 0x88, /* 10001000 */
101 /* Only for high speed devices with polling interval 2us */
102 0x55, /* 01010101 */
103 0xaa, /* 10101010 */
105 /* Only for high speed devices with polling interval 1us */
106 0xff /* 11111111 */
110 * Initialize the values which are used to calculate complete split mask
111 * for the low/full speed interrupt and isochronous endpoints.
113 static uint_t ehci_intr_complete_split_mask[7] = {
114 /* Only full/low speed devices */
115 0x1c, /* 00011100 */
116 0x38, /* 00111000 */
117 0x70, /* 01110000 */
118 0xe0, /* 11100000 */
119 0x00, /* Need FSTN feature */
120 0x00, /* Need FSTN feature */
121 0x00 /* Need FSTN feature */
126 * EHCI Internal Function Prototypes
129 /* Host Controller Driver (HCD) initialization functions */
130 void ehci_set_dma_attributes(ehci_state_t *ehcip);
131 int ehci_allocate_pools(ehci_state_t *ehcip);
132 void ehci_decode_ddi_dma_addr_bind_handle_result(
133 ehci_state_t *ehcip,
134 int result);
135 int ehci_map_regs(ehci_state_t *ehcip);
136 int ehci_register_intrs_and_init_mutex(
137 ehci_state_t *ehcip);
138 static int ehci_add_intrs(ehci_state_t *ehcip,
139 int intr_type);
140 int ehci_init_ctlr(ehci_state_t *ehcip,
141 int init_type);
142 static int ehci_take_control(ehci_state_t *ehcip);
143 static int ehci_init_periodic_frame_lst_table(
144 ehci_state_t *ehcip);
145 static void ehci_build_interrupt_lattice(
146 ehci_state_t *ehcip);
147 usba_hcdi_ops_t *ehci_alloc_hcdi_ops(ehci_state_t *ehcip);
149 /* Host Controller Driver (HCD) deinitialization functions */
150 int ehci_cleanup(ehci_state_t *ehcip);
151 static void ehci_rem_intrs(ehci_state_t *ehcip);
152 int ehci_cpr_suspend(ehci_state_t *ehcip);
153 int ehci_cpr_resume(ehci_state_t *ehcip);
155 /* Bandwidth Allocation functions */
156 int ehci_allocate_bandwidth(ehci_state_t *ehcip,
157 usba_pipe_handle_data_t *ph,
158 uint_t *pnode,
159 uchar_t *smask,
160 uchar_t *cmask);
161 static int ehci_allocate_high_speed_bandwidth(
162 ehci_state_t *ehcip,
163 usba_pipe_handle_data_t *ph,
164 uint_t *hnode,
165 uchar_t *smask,
166 uchar_t *cmask);
167 static int ehci_allocate_classic_tt_bandwidth(
168 ehci_state_t *ehcip,
169 usba_pipe_handle_data_t *ph,
170 uint_t pnode);
171 void ehci_deallocate_bandwidth(ehci_state_t *ehcip,
172 usba_pipe_handle_data_t *ph,
173 uint_t pnode,
174 uchar_t smask,
175 uchar_t cmask);
176 static void ehci_deallocate_high_speed_bandwidth(
177 ehci_state_t *ehcip,
178 usba_pipe_handle_data_t *ph,
179 uint_t hnode,
180 uchar_t smask,
181 uchar_t cmask);
182 static void ehci_deallocate_classic_tt_bandwidth(
183 ehci_state_t *ehcip,
184 usba_pipe_handle_data_t *ph,
185 uint_t pnode);
186 static int ehci_compute_high_speed_bandwidth(
187 ehci_state_t *ehcip,
188 usb_ep_descr_t *endpoint,
189 usb_port_status_t port_status,
190 uint_t *sbandwidth,
191 uint_t *cbandwidth);
192 static int ehci_compute_classic_bandwidth(
193 usb_ep_descr_t *endpoint,
194 usb_port_status_t port_status,
195 uint_t *bandwidth);
196 int ehci_adjust_polling_interval(
197 ehci_state_t *ehcip,
198 usb_ep_descr_t *endpoint,
199 usb_port_status_t port_status);
200 static int ehci_adjust_high_speed_polling_interval(
201 ehci_state_t *ehcip,
202 usb_ep_descr_t *endpoint);
203 static uint_t ehci_lattice_height(uint_t interval);
204 static uint_t ehci_lattice_parent(uint_t node);
205 static uint_t ehci_find_periodic_node(
206 uint_t leaf,
207 int interval);
208 static uint_t ehci_leftmost_leaf(uint_t node,
209 uint_t height);
210 static uint_t ehci_pow_2(uint_t x);
211 static uint_t ehci_log_2(uint_t x);
212 static int ehci_find_bestfit_hs_mask(
213 ehci_state_t *ehcip,
214 uchar_t *smask,
215 uint_t *pnode,
216 usb_ep_descr_t *endpoint,
217 uint_t bandwidth,
218 int interval);
219 static int ehci_find_bestfit_ls_intr_mask(
220 ehci_state_t *ehcip,
221 uchar_t *smask,
222 uchar_t *cmask,
223 uint_t *pnode,
224 uint_t sbandwidth,
225 uint_t cbandwidth,
226 int interval);
227 static int ehci_find_bestfit_sitd_in_mask(
228 ehci_state_t *ehcip,
229 uchar_t *smask,
230 uchar_t *cmask,
231 uint_t *pnode,
232 uint_t sbandwidth,
233 uint_t cbandwidth,
234 int interval);
235 static int ehci_find_bestfit_sitd_out_mask(
236 ehci_state_t *ehcip,
237 uchar_t *smask,
238 uint_t *pnode,
239 uint_t sbandwidth,
240 int interval);
241 static uint_t ehci_calculate_bw_availability_mask(
242 ehci_state_t *ehcip,
243 uint_t bandwidth,
244 int leaf,
245 int leaf_count,
246 uchar_t *bw_mask);
247 static void ehci_update_bw_availability(
248 ehci_state_t *ehcip,
249 int bandwidth,
250 int leftmost_leaf,
251 int leaf_count,
252 uchar_t mask);
254 /* Miscellaneous functions */
255 ehci_state_t *ehci_obtain_state(
256 dev_info_t *dip);
257 int ehci_state_is_operational(
258 ehci_state_t *ehcip);
259 int ehci_do_soft_reset(
260 ehci_state_t *ehcip);
261 usb_req_attrs_t ehci_get_xfer_attrs(ehci_state_t *ehcip,
262 ehci_pipe_private_t *pp,
263 ehci_trans_wrapper_t *tw);
264 usb_frame_number_t ehci_get_current_frame_number(
265 ehci_state_t *ehcip);
266 static void ehci_cpr_cleanup(
267 ehci_state_t *ehcip);
268 int ehci_wait_for_sof(
269 ehci_state_t *ehcip);
270 void ehci_toggle_scheduler(
271 ehci_state_t *ehcip);
272 void ehci_print_caps(ehci_state_t *ehcip);
273 void ehci_print_regs(ehci_state_t *ehcip);
274 void ehci_print_qh(ehci_state_t *ehcip,
275 ehci_qh_t *qh);
276 void ehci_print_qtd(ehci_state_t *ehcip,
277 ehci_qtd_t *qtd);
278 void ehci_create_stats(ehci_state_t *ehcip);
279 void ehci_destroy_stats(ehci_state_t *ehcip);
280 void ehci_do_intrs_stats(ehci_state_t *ehcip,
281 int val);
282 void ehci_do_byte_stats(ehci_state_t *ehcip,
283 size_t len,
284 uint8_t attr,
285 uint8_t addr);
288 * check if this ehci controller can support PM
291 ehci_hcdi_pm_support(dev_info_t *dip)
293 ehci_state_t *ehcip = ddi_get_soft_state(ehci_statep,
294 ddi_get_instance(dip));
296 if (((ehcip->ehci_vendor_id == PCI_VENDOR_NEC_COMBO) &&
297 (ehcip->ehci_device_id == PCI_DEVICE_NEC_COMBO)) ||
299 ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
300 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) ||
302 (ehcip->ehci_vendor_id == PCI_VENDOR_VIA)) {
304 return (USB_SUCCESS);
307 return (USB_FAILURE);
310 void
311 ehci_dma_attr_workaround(ehci_state_t *ehcip)
314 * Some Nvidia chips can not handle qh dma address above 2G.
315 * The bit 31 of the dma address might be omitted and it will
316 * cause system crash or other unpredicable result. So force
317 * the dma address allocated below 2G to make ehci work.
319 if (PCI_VENDOR_NVIDIA == ehcip->ehci_vendor_id) {
320 switch (ehcip->ehci_device_id) {
321 case PCI_DEVICE_NVIDIA_CK804:
322 case PCI_DEVICE_NVIDIA_MCP04:
323 USB_DPRINTF_L2(PRINT_MASK_ATTA,
324 ehcip->ehci_log_hdl,
325 "ehci_dma_attr_workaround: NVIDIA dma "
326 "workaround enabled, force dma address "
327 "to be allocated below 2G");
328 ehcip->ehci_dma_attr.dma_attr_addr_hi =
329 0x7fffffffull;
330 break;
331 default:
332 break;
339 * Host Controller Driver (HCD) initialization functions
343 * ehci_set_dma_attributes:
345 * Set the limits in the DMA attributes structure. Most of the values used
346 * in the DMA limit structures are the default values as specified by the
347 * Writing PCI device drivers document.
349 void
350 ehci_set_dma_attributes(ehci_state_t *ehcip)
352 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
353 "ehci_set_dma_attributes:");
355 /* Initialize the DMA attributes */
356 ehcip->ehci_dma_attr.dma_attr_version = DMA_ATTR_V0;
357 ehcip->ehci_dma_attr.dma_attr_addr_lo = 0x00000000ull;
358 ehcip->ehci_dma_attr.dma_attr_addr_hi = 0xfffffffeull;
360 /* 32 bit addressing */
361 ehcip->ehci_dma_attr.dma_attr_count_max = EHCI_DMA_ATTR_COUNT_MAX;
363 /* Byte alignment */
364 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
367 * Since PCI specification is byte alignment, the
368 * burst size field should be set to 1 for PCI devices.
370 ehcip->ehci_dma_attr.dma_attr_burstsizes = 0x1;
372 ehcip->ehci_dma_attr.dma_attr_minxfer = 0x1;
373 ehcip->ehci_dma_attr.dma_attr_maxxfer = EHCI_DMA_ATTR_MAX_XFER;
374 ehcip->ehci_dma_attr.dma_attr_seg = 0xffffffffull;
375 ehcip->ehci_dma_attr.dma_attr_sgllen = 1;
376 ehcip->ehci_dma_attr.dma_attr_granular = EHCI_DMA_ATTR_GRANULAR;
377 ehcip->ehci_dma_attr.dma_attr_flags = 0;
378 ehci_dma_attr_workaround(ehcip);
383 * ehci_allocate_pools:
385 * Allocate the system memory for the Endpoint Descriptor (QH) and for the
386 * Transfer Descriptor (QTD) pools. Both QH and QTD structures must be aligned
387 * to a 16 byte boundary.
390 ehci_allocate_pools(ehci_state_t *ehcip)
392 ddi_device_acc_attr_t dev_attr;
393 size_t real_length;
394 int result;
395 uint_t ccount;
396 int i;
398 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
399 "ehci_allocate_pools:");
401 /* The host controller will be little endian */
402 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
403 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
404 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
406 /* Byte alignment */
407 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_TD_QH_ALIGNMENT;
409 /* Allocate the QTD pool DMA handle */
410 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
411 DDI_DMA_SLEEP, 0,
412 &ehcip->ehci_qtd_pool_dma_handle) != DDI_SUCCESS) {
414 goto failure;
417 /* Allocate the memory for the QTD pool */
418 if (ddi_dma_mem_alloc(ehcip->ehci_qtd_pool_dma_handle,
419 ehci_qtd_pool_size * sizeof (ehci_qtd_t),
420 &dev_attr,
421 DDI_DMA_CONSISTENT,
422 DDI_DMA_SLEEP,
424 (caddr_t *)&ehcip->ehci_qtd_pool_addr,
425 &real_length,
426 &ehcip->ehci_qtd_pool_mem_handle)) {
428 goto failure;
431 /* Map the QTD pool into the I/O address space */
432 result = ddi_dma_addr_bind_handle(
433 ehcip->ehci_qtd_pool_dma_handle,
434 NULL,
435 (caddr_t)ehcip->ehci_qtd_pool_addr,
436 real_length,
437 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
438 DDI_DMA_SLEEP,
439 NULL,
440 &ehcip->ehci_qtd_pool_cookie,
441 &ccount);
443 bzero((void *)ehcip->ehci_qtd_pool_addr,
444 ehci_qtd_pool_size * sizeof (ehci_qtd_t));
446 /* Process the result */
447 if (result == DDI_DMA_MAPPED) {
448 /* The cookie count should be 1 */
449 if (ccount != 1) {
450 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
451 "ehci_allocate_pools: More than 1 cookie");
453 goto failure;
455 } else {
456 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
457 "ehci_allocate_pools: Result = %d", result);
459 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
461 goto failure;
465 * DMA addresses for QTD pools are bound
467 ehcip->ehci_dma_addr_bind_flag |= EHCI_QTD_POOL_BOUND;
469 /* Initialize the QTD pool */
470 for (i = 0; i < ehci_qtd_pool_size; i ++) {
471 Set_QTD(ehcip->ehci_qtd_pool_addr[i].
472 qtd_state, EHCI_QTD_FREE);
475 /* Allocate the QTD pool DMA handle */
476 if (ddi_dma_alloc_handle(ehcip->ehci_dip,
477 &ehcip->ehci_dma_attr,
478 DDI_DMA_SLEEP,
480 &ehcip->ehci_qh_pool_dma_handle) != DDI_SUCCESS) {
481 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
482 "ehci_allocate_pools: ddi_dma_alloc_handle failed");
484 goto failure;
487 /* Allocate the memory for the QH pool */
488 if (ddi_dma_mem_alloc(ehcip->ehci_qh_pool_dma_handle,
489 ehci_qh_pool_size * sizeof (ehci_qh_t),
490 &dev_attr,
491 DDI_DMA_CONSISTENT,
492 DDI_DMA_SLEEP,
494 (caddr_t *)&ehcip->ehci_qh_pool_addr,
495 &real_length,
496 &ehcip->ehci_qh_pool_mem_handle) != DDI_SUCCESS) {
497 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
498 "ehci_allocate_pools: ddi_dma_mem_alloc failed");
500 goto failure;
503 result = ddi_dma_addr_bind_handle(ehcip->ehci_qh_pool_dma_handle,
504 NULL,
505 (caddr_t)ehcip->ehci_qh_pool_addr,
506 real_length,
507 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
508 DDI_DMA_SLEEP,
509 NULL,
510 &ehcip->ehci_qh_pool_cookie,
511 &ccount);
513 bzero((void *)ehcip->ehci_qh_pool_addr,
514 ehci_qh_pool_size * sizeof (ehci_qh_t));
516 /* Process the result */
517 if (result == DDI_DMA_MAPPED) {
518 /* The cookie count should be 1 */
519 if (ccount != 1) {
520 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
521 "ehci_allocate_pools: More than 1 cookie");
523 goto failure;
525 } else {
526 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
528 goto failure;
532 * DMA addresses for QH pools are bound
534 ehcip->ehci_dma_addr_bind_flag |= EHCI_QH_POOL_BOUND;
536 /* Initialize the QH pool */
537 for (i = 0; i < ehci_qh_pool_size; i ++) {
538 Set_QH(ehcip->ehci_qh_pool_addr[i].qh_state, EHCI_QH_FREE);
541 /* Byte alignment */
542 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
544 return (DDI_SUCCESS);
546 failure:
547 /* Byte alignment */
548 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
550 return (DDI_FAILURE);
555 * ehci_decode_ddi_dma_addr_bind_handle_result:
557 * Process the return values of ddi_dma_addr_bind_handle()
559 void
560 ehci_decode_ddi_dma_addr_bind_handle_result(
561 ehci_state_t *ehcip,
562 int result)
564 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
565 "ehci_decode_ddi_dma_addr_bind_handle_result:");
567 switch (result) {
568 case DDI_DMA_PARTIAL_MAP:
569 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
570 "Partial transfers not allowed");
571 break;
572 case DDI_DMA_INUSE:
573 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
574 "Handle is in use");
575 break;
576 case DDI_DMA_NORESOURCES:
577 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
578 "No resources");
579 break;
580 case DDI_DMA_NOMAPPING:
581 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
582 "No mapping");
583 break;
584 case DDI_DMA_TOOBIG:
585 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
586 "Object is too big");
587 break;
588 default:
589 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
590 "Unknown dma error");
596 * ehci_map_regs:
598 * The Host Controller (HC) contains a set of on-chip operational registers
599 * and which should be mapped into a non-cacheable portion of the system
600 * addressable space.
603 ehci_map_regs(ehci_state_t *ehcip)
605 ddi_device_acc_attr_t attr;
606 uint16_t cmd_reg;
607 uint_t length;
609 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_map_regs:");
611 /* Check to make sure we have memory access */
612 if (pci_config_setup(ehcip->ehci_dip,
613 &ehcip->ehci_config_handle) != DDI_SUCCESS) {
615 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
616 "ehci_map_regs: Config error");
618 return (DDI_FAILURE);
621 /* Make sure Memory Access Enable is set */
622 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
624 if (!(cmd_reg & PCI_COMM_MAE)) {
626 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
627 "ehci_map_regs: Memory base address access disabled");
629 return (DDI_FAILURE);
632 /* The host controller will be little endian */
633 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
634 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
635 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
637 /* Map in EHCI Capability registers */
638 if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
639 (caddr_t *)&ehcip->ehci_capsp, 0,
640 sizeof (ehci_caps_t), &attr,
641 &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
643 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
644 "ehci_map_regs: Map setup error");
646 return (DDI_FAILURE);
649 length = ddi_get8(ehcip->ehci_caps_handle,
650 (uint8_t *)&ehcip->ehci_capsp->ehci_caps_length);
652 /* Free the original mapping */
653 ddi_regs_map_free(&ehcip->ehci_caps_handle);
655 /* Re-map in EHCI Capability and Operational registers */
656 if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
657 (caddr_t *)&ehcip->ehci_capsp, 0,
658 length + sizeof (ehci_regs_t), &attr,
659 &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
661 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
662 "ehci_map_regs: Map setup error");
664 return (DDI_FAILURE);
667 /* Get the pointer to EHCI Operational Register */
668 ehcip->ehci_regsp = (ehci_regs_t *)
669 ((uintptr_t)ehcip->ehci_capsp + length);
671 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
672 "ehci_map_regs: Capsp 0x%p Regsp 0x%p\n",
673 (void *)ehcip->ehci_capsp, (void *)ehcip->ehci_regsp);
675 return (DDI_SUCCESS);
679 * The following simulated polling is for debugging purposes only.
680 * It is activated on x86 by setting usb-polling=true in GRUB or ehci.conf.
682 static int
683 ehci_is_polled(dev_info_t *dip)
685 int ret;
686 char *propval;
688 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
689 "usb-polling", &propval) != DDI_SUCCESS)
691 return (0);
693 ret = (strcmp(propval, "true") == 0);
694 ddi_prop_free(propval);
696 return (ret);
699 static void
700 ehci_poll_intr(void *arg)
702 /* poll every msec */
703 for (;;) {
704 (void) ehci_intr(arg, NULL);
705 ddi_msleep(1);
710 * ehci_register_intrs_and_init_mutex:
712 * Register interrupts and initialize each mutex and condition variables
715 ehci_register_intrs_and_init_mutex(ehci_state_t *ehcip)
717 int intr_types;
719 #if defined(__x86)
720 uint8_t iline;
721 #endif
723 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
724 "ehci_register_intrs_and_init_mutex:");
727 * There is a known MSI hardware bug with the EHCI controller
728 * of ULI1575 southbridge. Hence MSI is disabled for this chip.
730 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
731 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) {
732 ehcip->ehci_msi_enabled = B_FALSE;
733 } else {
734 /* Set the MSI enable flag from the global EHCI MSI tunable */
735 ehcip->ehci_msi_enabled = ehci_enable_msi;
738 /* launch polling thread instead of enabling pci interrupt */
739 if (ehci_is_polled(ehcip->ehci_dip)) {
740 extern pri_t maxclsyspri;
742 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
743 "ehci_register_intrs_and_init_mutex: "
744 "running in simulated polled mode");
746 (void) thread_create(NULL, 0, ehci_poll_intr, ehcip, 0, &p0,
747 TS_RUN, maxclsyspri);
749 goto skip_intr;
752 #if defined(__x86)
754 * Make sure that the interrupt pin is connected to the
755 * interrupt controller on x86. Interrupt line 255 means
756 * "unknown" or "not connected" (PCI spec 6.2.4, footnote 43).
757 * If we would return failure when interrupt line equals 255, then
758 * high speed devices will be routed to companion host controllers.
759 * However, it is not necessary to return failure here, and
760 * o/uhci codes don't check the interrupt line either.
761 * But it's good to log a message here for debug purposes.
763 iline = pci_config_get8(ehcip->ehci_config_handle,
764 PCI_CONF_ILINE);
766 if (iline == 255) {
767 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
768 "ehci_register_intrs_and_init_mutex: "
769 "interrupt line value out of range (%d)",
770 iline);
772 #endif /* __x86 */
774 /* Get supported interrupt types */
775 if (ddi_intr_get_supported_types(ehcip->ehci_dip,
776 &intr_types) != DDI_SUCCESS) {
777 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
778 "ehci_register_intrs_and_init_mutex: "
779 "ddi_intr_get_supported_types failed");
781 return (DDI_FAILURE);
784 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
785 "ehci_register_intrs_and_init_mutex: "
786 "supported interrupt types 0x%x", intr_types);
788 if ((intr_types & DDI_INTR_TYPE_MSI) && ehcip->ehci_msi_enabled) {
789 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_MSI)
790 != DDI_SUCCESS) {
791 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
792 "ehci_register_intrs_and_init_mutex: MSI "
793 "registration failed, trying FIXED interrupt \n");
794 } else {
795 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
796 "ehci_register_intrs_and_init_mutex: "
797 "Using MSI interrupt type\n");
799 ehcip->ehci_intr_type = DDI_INTR_TYPE_MSI;
800 ehcip->ehci_flags |= EHCI_INTR;
804 if ((!(ehcip->ehci_flags & EHCI_INTR)) &&
805 (intr_types & DDI_INTR_TYPE_FIXED)) {
806 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_FIXED)
807 != DDI_SUCCESS) {
808 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
809 "ehci_register_intrs_and_init_mutex: "
810 "FIXED interrupt registration failed\n");
812 return (DDI_FAILURE);
815 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
816 "ehci_register_intrs_and_init_mutex: "
817 "Using FIXED interrupt type\n");
819 ehcip->ehci_intr_type = DDI_INTR_TYPE_FIXED;
820 ehcip->ehci_flags |= EHCI_INTR;
823 skip_intr:
824 /* Create prototype for advance on async schedule */
825 cv_init(&ehcip->ehci_async_schedule_advance_cv,
826 NULL, CV_DRIVER, NULL);
828 return (DDI_SUCCESS);
833 * ehci_add_intrs:
835 * Register FIXED or MSI interrupts.
837 static int
838 ehci_add_intrs(ehci_state_t *ehcip, int intr_type)
840 int actual, avail, intr_size, count = 0;
841 int i, flag, ret;
843 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
844 "ehci_add_intrs: interrupt type 0x%x", intr_type);
846 /* Get number of interrupts */
847 ret = ddi_intr_get_nintrs(ehcip->ehci_dip, intr_type, &count);
848 if ((ret != DDI_SUCCESS) || (count == 0)) {
849 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
850 "ehci_add_intrs: ddi_intr_get_nintrs() failure, "
851 "ret: %d, count: %d", ret, count);
853 return (DDI_FAILURE);
856 /* Get number of available interrupts */
857 ret = ddi_intr_get_navail(ehcip->ehci_dip, intr_type, &avail);
858 if ((ret != DDI_SUCCESS) || (avail == 0)) {
859 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
860 "ehci_add_intrs: ddi_intr_get_navail() failure, "
861 "ret: %d, count: %d", ret, count);
863 return (DDI_FAILURE);
866 if (avail < count) {
867 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
868 "ehci_add_intrs: ehci_add_intrs: nintrs () "
869 "returned %d, navail returned %d\n", count, avail);
872 /* Allocate an array of interrupt handles */
873 intr_size = count * sizeof (ddi_intr_handle_t);
874 ehcip->ehci_htable = kmem_zalloc(intr_size, KM_SLEEP);
876 flag = (intr_type == DDI_INTR_TYPE_MSI) ?
877 DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL;
879 /* call ddi_intr_alloc() */
880 ret = ddi_intr_alloc(ehcip->ehci_dip, ehcip->ehci_htable,
881 intr_type, 0, count, &actual, flag);
883 if ((ret != DDI_SUCCESS) || (actual == 0)) {
884 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
885 "ehci_add_intrs: ddi_intr_alloc() failed %d", ret);
887 kmem_free(ehcip->ehci_htable, intr_size);
889 return (DDI_FAILURE);
892 if (actual < count) {
893 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
894 "ehci_add_intrs: Requested: %d, Received: %d\n",
895 count, actual);
897 for (i = 0; i < actual; i++)
898 (void) ddi_intr_free(ehcip->ehci_htable[i]);
900 kmem_free(ehcip->ehci_htable, intr_size);
902 return (DDI_FAILURE);
905 ehcip->ehci_intr_cnt = actual;
907 if ((ret = ddi_intr_get_pri(ehcip->ehci_htable[0],
908 &ehcip->ehci_intr_pri)) != DDI_SUCCESS) {
909 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
910 "ehci_add_intrs: ddi_intr_get_pri() failed %d", ret);
912 for (i = 0; i < actual; i++)
913 (void) ddi_intr_free(ehcip->ehci_htable[i]);
915 kmem_free(ehcip->ehci_htable, intr_size);
917 return (DDI_FAILURE);
920 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
921 "ehci_add_intrs: Supported Interrupt priority 0x%x",
922 ehcip->ehci_intr_pri);
924 /* Test for high level mutex */
925 if (ehcip->ehci_intr_pri >= ddi_intr_get_hilevel_pri()) {
926 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
927 "ehci_add_intrs: Hi level interrupt not supported");
929 for (i = 0; i < actual; i++)
930 (void) ddi_intr_free(ehcip->ehci_htable[i]);
932 kmem_free(ehcip->ehci_htable, intr_size);
934 return (DDI_FAILURE);
937 /* Initialize the mutex */
938 mutex_init(&ehcip->ehci_int_mutex, NULL, MUTEX_DRIVER,
939 DDI_INTR_PRI(ehcip->ehci_intr_pri));
941 /* Call ddi_intr_add_handler() */
942 for (i = 0; i < actual; i++) {
943 if ((ret = ddi_intr_add_handler(ehcip->ehci_htable[i],
944 ehci_intr, (caddr_t)ehcip,
945 (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
946 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
947 "ehci_add_intrs:ddi_intr_add_handler() "
948 "failed %d", ret);
950 for (i = 0; i < actual; i++)
951 (void) ddi_intr_free(ehcip->ehci_htable[i]);
953 mutex_destroy(&ehcip->ehci_int_mutex);
954 kmem_free(ehcip->ehci_htable, intr_size);
956 return (DDI_FAILURE);
960 if ((ret = ddi_intr_get_cap(ehcip->ehci_htable[0],
961 &ehcip->ehci_intr_cap)) != DDI_SUCCESS) {
962 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
963 "ehci_add_intrs: ddi_intr_get_cap() failed %d", ret);
965 for (i = 0; i < actual; i++) {
966 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
967 (void) ddi_intr_free(ehcip->ehci_htable[i]);
970 mutex_destroy(&ehcip->ehci_int_mutex);
971 kmem_free(ehcip->ehci_htable, intr_size);
973 return (DDI_FAILURE);
976 /* Enable all interrupts */
977 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
978 /* Call ddi_intr_block_enable() for MSI interrupts */
979 (void) ddi_intr_block_enable(ehcip->ehci_htable,
980 ehcip->ehci_intr_cnt);
981 } else {
982 /* Call ddi_intr_enable for MSI or FIXED interrupts */
983 for (i = 0; i < ehcip->ehci_intr_cnt; i++)
984 (void) ddi_intr_enable(ehcip->ehci_htable[i]);
987 return (DDI_SUCCESS);
992 * ehci_init_hardware
994 * take control from BIOS, reset EHCI host controller, and check version, etc.
997 ehci_init_hardware(ehci_state_t *ehcip)
999 int revision;
1000 uint16_t cmd_reg;
1001 int abort_on_BIOS_take_over_failure;
1003 /* Take control from the BIOS */
1004 if (ehci_take_control(ehcip) != USB_SUCCESS) {
1006 /* read .conf file properties */
1007 abort_on_BIOS_take_over_failure =
1008 ddi_prop_get_int(DDI_DEV_T_ANY,
1009 ehcip->ehci_dip, DDI_PROP_DONTPASS,
1010 "abort-on-BIOS-take-over-failure", 0);
1012 if (abort_on_BIOS_take_over_failure) {
1014 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1015 "Unable to take control from BIOS.");
1017 return (DDI_FAILURE);
1020 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1021 "Unable to take control from BIOS. Failure is ignored.");
1024 /* set Memory Master Enable */
1025 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
1026 cmd_reg |= (PCI_COMM_MAE | PCI_COMM_ME);
1027 pci_config_put16(ehcip->ehci_config_handle, PCI_CONF_COMM, cmd_reg);
1029 /* Reset the EHCI host controller */
1030 Set_OpReg(ehci_command,
1031 Get_OpReg(ehci_command) | EHCI_CMD_HOST_CTRL_RESET);
1033 /* Wait 10ms for reset to complete */
1034 drv_usecwait(EHCI_RESET_TIMEWAIT);
1036 ASSERT(Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED);
1038 /* Verify the version number */
1039 revision = Get_16Cap(ehci_version);
1041 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1042 "ehci_init_hardware: Revision 0x%x", revision);
1045 * EHCI driver supports EHCI host controllers compliant to
1046 * 0.95 and higher revisions of EHCI specifications.
1048 if (revision < EHCI_REVISION_0_95) {
1050 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1051 "Revision 0x%x is not supported", revision);
1053 return (DDI_FAILURE);
1056 if (ehcip->ehci_hc_soft_state == EHCI_CTLR_INIT_STATE) {
1058 /* Initialize the Frame list base address area */
1059 if (ehci_init_periodic_frame_lst_table(ehcip) != DDI_SUCCESS) {
1061 return (DDI_FAILURE);
1065 * For performance reasons, do not insert anything into the
1066 * asynchronous list or activate the asynch list schedule until
1067 * there is a valid QH.
1069 ehcip->ehci_head_of_async_sched_list = NULL;
1071 if ((ehcip->ehci_vendor_id == PCI_VENDOR_VIA) &&
1072 (ehci_vt62x2_workaround & EHCI_VIA_ASYNC_SCHEDULE)) {
1074 * The driver is unable to reliably stop the asynch
1075 * list schedule on VIA VT6202 controllers, so we
1076 * always keep a dummy QH on the list.
1078 ehci_qh_t *dummy_async_qh =
1079 ehci_alloc_qh(ehcip, NULL, 0);
1081 Set_QH(dummy_async_qh->qh_link_ptr,
1082 ((ehci_qh_cpu_to_iommu(ehcip, dummy_async_qh) &
1083 EHCI_QH_LINK_PTR) | EHCI_QH_LINK_REF_QH));
1085 /* Set this QH to be the "head" of the circular list */
1086 Set_QH(dummy_async_qh->qh_ctrl,
1087 Get_QH(dummy_async_qh->qh_ctrl) |
1088 EHCI_QH_CTRL_RECLAIM_HEAD);
1090 Set_QH(dummy_async_qh->qh_next_qtd,
1091 EHCI_QH_NEXT_QTD_PTR_VALID);
1092 Set_QH(dummy_async_qh->qh_alt_next_qtd,
1093 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1095 ehcip->ehci_head_of_async_sched_list = dummy_async_qh;
1096 ehcip->ehci_open_async_count++;
1097 ehcip->ehci_async_req_count++;
1101 return (DDI_SUCCESS);
1106 * ehci_init_workaround
1108 * some workarounds during initializing ehci
1111 ehci_init_workaround(ehci_state_t *ehcip)
1114 * Acer Labs Inc. M5273 EHCI controller does not send
1115 * interrupts unless the Root hub ports are routed to the EHCI
1116 * host controller; so route the ports now, before we test for
1117 * the presence of SOFs interrupts.
1119 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) {
1120 /* Route all Root hub ports to EHCI host controller */
1121 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1125 * VIA chips have some issues and may not work reliably.
1126 * Revisions >= 0x80 are part of a southbridge and appear
1127 * to be reliable with the workaround.
1128 * For revisions < 0x80, if we were bound using class
1129 * complain, else proceed. This will allow the user to
1130 * bind ehci specifically to this chip and not have the
1131 * warnings
1133 if (ehcip->ehci_vendor_id == PCI_VENDOR_VIA) {
1135 if (ehcip->ehci_rev_id >= PCI_VIA_REVISION_6212) {
1137 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1138 "ehci_init_workaround: Applying VIA workarounds "
1139 "for the 6212 chip.");
1141 } else if (strcmp(DEVI(ehcip->ehci_dip)->devi_binding_name,
1142 "pciclass,0c0320") == 0) {
1144 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1145 "Due to recently discovered incompatibilities");
1146 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1147 "with this USB controller, USB2.x transfer");
1148 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1149 "support has been disabled. This device will");
1150 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1151 "continue to function as a USB1.x controller.");
1152 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1153 "If you are interested in enabling USB2.x");
1154 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1155 "support please, refer to the ehci(7D) man page.");
1156 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1157 "Please also refer to www.sun.com/io for");
1158 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1159 "Solaris Ready products and to");
1160 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1161 "www.sun.com/bigadmin/hcl for additional");
1162 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1163 "compatible USB products.");
1165 return (DDI_FAILURE);
1167 } else if (ehci_vt62x2_workaround) {
1169 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1170 "Applying VIA workarounds");
1174 return (DDI_SUCCESS);
1179 * ehci_init_check_status
1181 * Check if EHCI host controller is running
1184 ehci_init_check_status(ehci_state_t *ehcip)
1186 clock_t sof_time_wait;
1189 * Get the number of clock ticks to wait.
1190 * This is based on the maximum time it takes for a frame list rollover
1191 * and maximum time wait for SOFs to begin.
1193 sof_time_wait = drv_usectohz((EHCI_NUM_PERIODIC_FRAME_LISTS * 1000) +
1194 EHCI_SOF_TIMEWAIT);
1196 /* Tell the ISR to broadcast ehci_async_schedule_advance_cv */
1197 ehcip->ehci_flags |= EHCI_CV_INTR;
1199 /* We need to add a delay to allow the chip time to start running */
1200 (void) cv_reltimedwait(&ehcip->ehci_async_schedule_advance_cv,
1201 &ehcip->ehci_int_mutex, sof_time_wait, TR_CLOCK_TICK);
1204 * Check EHCI host controller is running, otherwise return failure.
1206 if ((ehcip->ehci_flags & EHCI_CV_INTR) ||
1207 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) {
1209 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1210 "No SOF interrupts have been received, this USB EHCI host"
1211 "controller is unusable");
1214 * Route all Root hub ports to Classic host
1215 * controller, in case this is an unusable ALI M5273
1216 * EHCI controller.
1218 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) {
1219 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC);
1222 return (DDI_FAILURE);
1225 return (DDI_SUCCESS);
1230 * ehci_init_ctlr:
1232 * Initialize the Host Controller (HC).
1235 ehci_init_ctlr(ehci_state_t *ehcip, int init_type)
1237 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_init_ctlr:");
1239 if (init_type == EHCI_NORMAL_INITIALIZATION) {
1241 if (ehci_init_hardware(ehcip) != DDI_SUCCESS) {
1243 return (DDI_FAILURE);
1248 * Check for Asynchronous schedule park capability feature. If this
1249 * feature is supported, then, program ehci command register with
1250 * appropriate values..
1252 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_ASYNC_SCHED_PARK_CAP) {
1254 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1255 "ehci_init_ctlr: Async park mode is supported");
1257 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1258 (EHCI_CMD_ASYNC_PARK_ENABLE |
1259 EHCI_CMD_ASYNC_PARK_COUNT_3)));
1263 * Check for programmable periodic frame list feature. If this
1264 * feature is supported, then, program ehci command register with
1265 * 1024 frame list value.
1267 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_PROG_FRAME_LIST_FLAG) {
1269 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1270 "ehci_init_ctlr: Variable programmable periodic "
1271 "frame list is supported");
1273 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1274 EHCI_CMD_FRAME_1024_SIZE));
1278 * Currently EHCI driver doesn't support 64 bit addressing.
1280 * If we are using 64 bit addressing capability, then, program
1281 * ehci_ctrl_segment register with 4 Gigabyte segment where all
1282 * of the interface data structures are allocated.
1284 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_64BIT_ADDR_CAP) {
1286 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1287 "ehci_init_ctlr: EHCI driver doesn't support "
1288 "64 bit addressing");
1291 /* 64 bit addressing is not support */
1292 Set_OpReg(ehci_ctrl_segment, 0x00000000);
1294 /* Turn on/off the schedulers */
1295 ehci_toggle_scheduler(ehcip);
1297 /* Set host controller soft state to operational */
1298 ehcip->ehci_hc_soft_state = EHCI_CTLR_OPERATIONAL_STATE;
1301 * Set the Periodic Frame List Base Address register with the
1302 * starting physical address of the Periodic Frame List.
1304 Set_OpReg(ehci_periodic_list_base,
1305 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
1306 EHCI_PERIODIC_LIST_BASE));
1309 * Set ehci_interrupt to enable all interrupts except Root
1310 * Hub Status change interrupt.
1312 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
1313 EHCI_INTR_FRAME_LIST_ROLLOVER | EHCI_INTR_USB_ERROR |
1314 EHCI_INTR_USB);
1317 * Set the desired interrupt threshold and turn on EHCI host controller.
1319 Set_OpReg(ehci_command,
1320 ((Get_OpReg(ehci_command) & ~EHCI_CMD_INTR_THRESHOLD) |
1321 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN)));
1323 ASSERT(Get_OpReg(ehci_command) & EHCI_CMD_HOST_CTRL_RUN);
1325 if (init_type == EHCI_NORMAL_INITIALIZATION) {
1327 if (ehci_init_workaround(ehcip) != DDI_SUCCESS) {
1329 /* Set host controller soft state to error */
1330 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
1332 return (DDI_FAILURE);
1335 if (ehci_init_check_status(ehcip) != DDI_SUCCESS) {
1337 /* Set host controller soft state to error */
1338 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
1340 return (DDI_FAILURE);
1343 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1344 "ehci_init_ctlr: SOF's have started");
1347 /* Route all Root hub ports to EHCI host controller */
1348 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1350 return (DDI_SUCCESS);
1354 * ehci_take_control:
1356 * Handshake to take EHCI control from BIOS if necessary. Its only valid for
1357 * x86 machines, because sparc doesn't have a BIOS.
1358 * On x86 machine, the take control process includes
1359 * o get the base address of the extended capability list
1360 * o find out the capability for handoff synchronization in the list.
1361 * o check if BIOS has owned the host controller.
1362 * o set the OS Owned semaphore bit, ask the BIOS to release the ownership.
1363 * o wait for a constant time and check if BIOS has relinquished control.
1365 /* ARGSUSED */
1366 static int
1367 ehci_take_control(ehci_state_t *ehcip)
1369 #if defined(__x86)
1370 uint32_t extended_cap;
1371 uint32_t extended_cap_offset;
1372 uint32_t extended_cap_id;
1373 uint_t retry;
1375 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1376 "ehci_take_control:");
1379 * According EHCI Spec 2.2.4, get EECP base address from HCCPARAMS
1380 * register.
1382 extended_cap_offset = (Get_Cap(ehci_hcc_params) & EHCI_HCC_EECP) >>
1383 EHCI_HCC_EECP_SHIFT;
1386 * According EHCI Spec 2.2.4, if the extended capability offset is
1387 * less than 40h then its not valid. This means we don't need to
1388 * worry about BIOS handoff.
1390 if (extended_cap_offset < EHCI_HCC_EECP_MIN_OFFSET) {
1392 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1393 "ehci_take_control: Hardware doesn't support legacy.");
1395 goto success;
1399 * According EHCI Spec 2.1.7, A zero offset indicates the
1400 * end of the extended capability list.
1402 while (extended_cap_offset) {
1404 /* Get the extended capability value. */
1405 extended_cap = pci_config_get32(ehcip->ehci_config_handle,
1406 extended_cap_offset);
1409 * It's possible that we'll receive an invalid PCI read here due
1410 * to something going wrong due to platform firmware. This has
1411 * been observed in the wild depending on the version of ACPI in
1412 * use. If this happens, we'll assume that the capability does
1413 * not exist and that we do not need to take control from the
1414 * BIOS.
1416 if (extended_cap == PCI_EINVAL32) {
1417 extended_cap_id = EHCI_EX_CAP_ID_RESERVED;
1418 break;
1421 /* Get the capability ID */
1422 extended_cap_id = (extended_cap & EHCI_EX_CAP_ID) >>
1423 EHCI_EX_CAP_ID_SHIFT;
1425 /* Check if the card support legacy */
1426 if (extended_cap_id == EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1427 break;
1430 /* Get the offset of the next capability */
1431 extended_cap_offset = (extended_cap & EHCI_EX_CAP_NEXT_PTR) >>
1432 EHCI_EX_CAP_NEXT_PTR_SHIFT;
1436 * Unable to find legacy support in hardware's extended capability list.
1437 * This means we don't need to worry about BIOS handoff.
1439 if (extended_cap_id != EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1441 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1442 "ehci_take_control: Hardware doesn't support legacy");
1444 goto success;
1447 /* Check if BIOS has owned it. */
1448 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1450 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1451 "ehci_take_control: BIOS does not own EHCI");
1453 goto success;
1457 * According EHCI Spec 5.1, The OS driver initiates an ownership
1458 * request by setting the OS Owned semaphore to a one. The OS
1459 * waits for the BIOS Owned bit to go to a zero before attempting
1460 * to use the EHCI controller. The time that OS must wait for BIOS
1461 * to respond to the request for ownership is beyond the scope of
1462 * this specification.
1463 * It waits up to EHCI_TAKEOVER_WAIT_COUNT*EHCI_TAKEOVER_DELAY ms
1464 * for BIOS to release the ownership.
1466 extended_cap |= EHCI_LEGSUP_OS_OWNED_SEM;
1467 pci_config_put32(ehcip->ehci_config_handle, extended_cap_offset,
1468 extended_cap);
1470 for (retry = 0; retry < EHCI_TAKEOVER_WAIT_COUNT; retry++) {
1472 /* wait a special interval */
1473 #ifndef __lock_lint
1474 delay(drv_usectohz(EHCI_TAKEOVER_DELAY));
1475 #endif
1476 /* Check to see if the BIOS has released the ownership */
1477 extended_cap = pci_config_get32(
1478 ehcip->ehci_config_handle, extended_cap_offset);
1480 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1482 USB_DPRINTF_L3(PRINT_MASK_ATTA,
1483 ehcip->ehci_log_hdl,
1484 "ehci_take_control: BIOS has released "
1485 "the ownership. retry = %d", retry);
1487 goto success;
1492 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1493 "ehci_take_control: take control from BIOS failed.");
1495 return (USB_FAILURE);
1497 success:
1499 #endif /* __x86 */
1500 return (USB_SUCCESS);
1505 * ehci_init_periodic_frame_list_table :
1507 * Allocate the system memory and initialize Host Controller
1508 * Periodic Frame List table area. The starting of the Periodic
1509 * Frame List Table area must be 4096 byte aligned.
1511 static int
1512 ehci_init_periodic_frame_lst_table(ehci_state_t *ehcip)
1514 ddi_device_acc_attr_t dev_attr;
1515 size_t real_length;
1516 uint_t ccount;
1517 int result;
1519 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1521 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1522 "ehci_init_periodic_frame_lst_table:");
1524 /* The host controller will be little endian */
1525 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1526 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1527 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1529 /* Force the required 4K restrictive alignment */
1530 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_PFL_ALIGNMENT;
1532 /* Create space for the Periodic Frame List */
1533 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
1534 DDI_DMA_SLEEP, 0, &ehcip->ehci_pflt_dma_handle) != DDI_SUCCESS) {
1536 goto failure;
1539 if (ddi_dma_mem_alloc(ehcip->ehci_pflt_dma_handle,
1540 sizeof (ehci_periodic_frame_list_t),
1541 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
1542 0, (caddr_t *)&ehcip->ehci_periodic_frame_list_tablep,
1543 &real_length, &ehcip->ehci_pflt_mem_handle)) {
1545 goto failure;
1548 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1549 "ehci_init_periodic_frame_lst_table: "
1550 "Real length %lu", real_length);
1552 /* Map the whole Periodic Frame List into the I/O address space */
1553 result = ddi_dma_addr_bind_handle(ehcip->ehci_pflt_dma_handle,
1554 NULL, (caddr_t)ehcip->ehci_periodic_frame_list_tablep,
1555 real_length, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1556 DDI_DMA_SLEEP, NULL, &ehcip->ehci_pflt_cookie, &ccount);
1558 if (result == DDI_DMA_MAPPED) {
1559 /* The cookie count should be 1 */
1560 if (ccount != 1) {
1561 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1562 "ehci_init_periodic_frame_lst_table: "
1563 "More than 1 cookie");
1565 goto failure;
1567 } else {
1568 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
1570 goto failure;
1573 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1574 "ehci_init_periodic_frame_lst_table: virtual 0x%p physical 0x%x",
1575 (void *)ehcip->ehci_periodic_frame_list_tablep,
1576 ehcip->ehci_pflt_cookie.dmac_address);
1579 * DMA addresses for Periodic Frame List are bound.
1581 ehcip->ehci_dma_addr_bind_flag |= EHCI_PFLT_DMA_BOUND;
1583 bzero((void *)ehcip->ehci_periodic_frame_list_tablep, real_length);
1585 /* Initialize the Periodic Frame List */
1586 ehci_build_interrupt_lattice(ehcip);
1588 /* Reset Byte Alignment to Default */
1589 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1591 return (DDI_SUCCESS);
1592 failure:
1593 /* Byte alignment */
1594 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1596 return (DDI_FAILURE);
1601 * ehci_build_interrupt_lattice:
1603 * Construct the interrupt lattice tree using static Endpoint Descriptors
1604 * (QH). This interrupt lattice tree will have total of 32 interrupt QH
1605 * lists and the Host Controller (HC) processes one interrupt QH list in
1606 * every frame. The Host Controller traverses the periodic schedule by
1607 * constructing an array offset reference from the Periodic List Base Address
1608 * register and bits 12 to 3 of Frame Index register. It fetches the element
1609 * and begins traversing the graph of linked schedule data structures.
1611 static void
1612 ehci_build_interrupt_lattice(ehci_state_t *ehcip)
1614 ehci_qh_t *list_array = ehcip->ehci_qh_pool_addr;
1615 ushort_t ehci_index[EHCI_NUM_PERIODIC_FRAME_LISTS];
1616 ehci_periodic_frame_list_t *periodic_frame_list =
1617 ehcip->ehci_periodic_frame_list_tablep;
1618 ushort_t *temp, num_of_nodes;
1619 uintptr_t addr;
1620 int i, j, k;
1622 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1623 "ehci_build_interrupt_lattice:");
1626 * Reserve the first 63 Endpoint Descriptor (QH) structures
1627 * in the pool as static endpoints & these are required for
1628 * constructing interrupt lattice tree.
1630 for (i = 0; i < EHCI_NUM_STATIC_NODES; i++) {
1631 Set_QH(list_array[i].qh_state, EHCI_QH_STATIC);
1632 Set_QH(list_array[i].qh_status, EHCI_QH_STS_HALTED);
1633 Set_QH(list_array[i].qh_next_qtd, EHCI_QH_NEXT_QTD_PTR_VALID);
1634 Set_QH(list_array[i].qh_alt_next_qtd,
1635 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1639 * Make sure that last Endpoint on the periodic frame list terminates
1640 * periodic schedule.
1642 Set_QH(list_array[0].qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
1644 /* Build the interrupt lattice tree */
1645 for (i = 0; i < (EHCI_NUM_STATIC_NODES / 2); i++) {
1647 * The next pointer in the host controller endpoint
1648 * descriptor must contain an iommu address. Calculate
1649 * the offset into the cpu address and add this to the
1650 * starting iommu address.
1652 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)&list_array[i]);
1654 Set_QH(list_array[2*i + 1].qh_link_ptr,
1655 addr | EHCI_QH_LINK_REF_QH);
1656 Set_QH(list_array[2*i + 2].qh_link_ptr,
1657 addr | EHCI_QH_LINK_REF_QH);
1660 /* Build the tree bottom */
1661 temp = (unsigned short *)
1662 kmem_zalloc(EHCI_NUM_PERIODIC_FRAME_LISTS * 2, KM_SLEEP);
1664 num_of_nodes = 1;
1667 * Initialize the values which are used for setting up head pointers
1668 * for the 32ms scheduling lists which starts from the Periodic Frame
1669 * List.
1671 for (i = 0; i < ehci_log_2(EHCI_NUM_PERIODIC_FRAME_LISTS); i++) {
1672 for (j = 0, k = 0; k < num_of_nodes; k++, j++) {
1673 ehci_index[j++] = temp[k];
1674 ehci_index[j] = temp[k] + ehci_pow_2(i);
1677 num_of_nodes *= 2;
1678 for (k = 0; k < num_of_nodes; k++)
1679 temp[k] = ehci_index[k];
1682 kmem_free((void *)temp, (EHCI_NUM_PERIODIC_FRAME_LISTS * 2));
1685 * Initialize the interrupt list in the Periodic Frame List Table
1686 * so that it points to the bottom of the tree.
1688 for (i = 0, j = 0; i < ehci_pow_2(TREE_HEIGHT); i++) {
1689 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)
1690 (&list_array[((EHCI_NUM_STATIC_NODES + 1) / 2) + i - 1]));
1692 ASSERT(addr);
1694 for (k = 0; k < ehci_pow_2(TREE_HEIGHT); k++) {
1695 Set_PFLT(periodic_frame_list->
1696 ehci_periodic_frame_list_table[ehci_index[j++]],
1697 (uint32_t)(addr | EHCI_QH_LINK_REF_QH));
1704 * ehci_alloc_hcdi_ops:
1706 * The HCDI interfaces or entry points are the software interfaces used by
1707 * the Universal Serial Bus Driver (USBA) to access the services of the
1708 * Host Controller Driver (HCD). During HCD initialization, inform USBA
1709 * about all available HCDI interfaces or entry points.
1711 usba_hcdi_ops_t *
1712 ehci_alloc_hcdi_ops(ehci_state_t *ehcip)
1714 usba_hcdi_ops_t *usba_hcdi_ops;
1716 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1717 "ehci_alloc_hcdi_ops:");
1719 usba_hcdi_ops = usba_alloc_hcdi_ops();
1721 usba_hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION;
1723 usba_hcdi_ops->usba_hcdi_pm_support = ehci_hcdi_pm_support;
1724 usba_hcdi_ops->usba_hcdi_pipe_open = ehci_hcdi_pipe_open;
1725 usba_hcdi_ops->usba_hcdi_pipe_close = ehci_hcdi_pipe_close;
1727 usba_hcdi_ops->usba_hcdi_pipe_reset = ehci_hcdi_pipe_reset;
1728 usba_hcdi_ops->usba_hcdi_pipe_reset_data_toggle =
1729 ehci_hcdi_pipe_reset_data_toggle;
1731 usba_hcdi_ops->usba_hcdi_pipe_ctrl_xfer = ehci_hcdi_pipe_ctrl_xfer;
1732 usba_hcdi_ops->usba_hcdi_pipe_bulk_xfer = ehci_hcdi_pipe_bulk_xfer;
1733 usba_hcdi_ops->usba_hcdi_pipe_intr_xfer = ehci_hcdi_pipe_intr_xfer;
1734 usba_hcdi_ops->usba_hcdi_pipe_isoc_xfer = ehci_hcdi_pipe_isoc_xfer;
1736 usba_hcdi_ops->usba_hcdi_bulk_transfer_size =
1737 ehci_hcdi_bulk_transfer_size;
1739 usba_hcdi_ops->usba_hcdi_pipe_stop_intr_polling =
1740 ehci_hcdi_pipe_stop_intr_polling;
1741 usba_hcdi_ops->usba_hcdi_pipe_stop_isoc_polling =
1742 ehci_hcdi_pipe_stop_isoc_polling;
1744 usba_hcdi_ops->usba_hcdi_get_current_frame_number =
1745 ehci_hcdi_get_current_frame_number;
1746 usba_hcdi_ops->usba_hcdi_get_max_isoc_pkts =
1747 ehci_hcdi_get_max_isoc_pkts;
1749 usba_hcdi_ops->usba_hcdi_console_input_init =
1750 ehci_hcdi_polled_input_init;
1751 usba_hcdi_ops->usba_hcdi_console_input_enter =
1752 ehci_hcdi_polled_input_enter;
1753 usba_hcdi_ops->usba_hcdi_console_read =
1754 ehci_hcdi_polled_read;
1755 usba_hcdi_ops->usba_hcdi_console_input_exit =
1756 ehci_hcdi_polled_input_exit;
1757 usba_hcdi_ops->usba_hcdi_console_input_fini =
1758 ehci_hcdi_polled_input_fini;
1760 usba_hcdi_ops->usba_hcdi_console_output_init =
1761 ehci_hcdi_polled_output_init;
1762 usba_hcdi_ops->usba_hcdi_console_output_enter =
1763 ehci_hcdi_polled_output_enter;
1764 usba_hcdi_ops->usba_hcdi_console_write =
1765 ehci_hcdi_polled_write;
1766 usba_hcdi_ops->usba_hcdi_console_output_exit =
1767 ehci_hcdi_polled_output_exit;
1768 usba_hcdi_ops->usba_hcdi_console_output_fini =
1769 ehci_hcdi_polled_output_fini;
1770 return (usba_hcdi_ops);
1775 * Host Controller Driver (HCD) deinitialization functions
1779 * ehci_cleanup:
1781 * Cleanup on attach failure or detach
1784 ehci_cleanup(ehci_state_t *ehcip)
1786 ehci_trans_wrapper_t *tw;
1787 ehci_pipe_private_t *pp;
1788 ehci_qtd_t *qtd;
1789 int i, ctrl, rval;
1790 int flags = ehcip->ehci_flags;
1792 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_cleanup:");
1794 if (flags & EHCI_RHREG) {
1795 /* Unload the root hub driver */
1796 if (ehci_unload_root_hub_driver(ehcip) != USB_SUCCESS) {
1798 return (DDI_FAILURE);
1802 if (flags & EHCI_USBAREG) {
1803 /* Unregister this HCD instance with USBA */
1804 usba_hcdi_unregister(ehcip->ehci_dip);
1807 if (flags & EHCI_INTR) {
1809 mutex_enter(&ehcip->ehci_int_mutex);
1811 /* Disable all EHCI QH list processing */
1812 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
1813 ~(EHCI_CMD_ASYNC_SCHED_ENABLE |
1814 EHCI_CMD_PERIODIC_SCHED_ENABLE)));
1816 /* Disable all EHCI interrupts */
1817 Set_OpReg(ehci_interrupt, 0);
1819 /* wait for the next SOF */
1820 (void) ehci_wait_for_sof(ehcip);
1822 /* Route all Root hub ports to Classic host controller */
1823 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC);
1825 /* Stop the EHCI host controller */
1826 Set_OpReg(ehci_command,
1827 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
1829 mutex_exit(&ehcip->ehci_int_mutex);
1831 /* Wait for sometime */
1832 delay(drv_usectohz(EHCI_TIMEWAIT));
1834 ehci_rem_intrs(ehcip);
1837 /* Unmap the EHCI registers */
1838 if (ehcip->ehci_caps_handle) {
1839 ddi_regs_map_free(&ehcip->ehci_caps_handle);
1842 if (ehcip->ehci_config_handle) {
1843 pci_config_teardown(&ehcip->ehci_config_handle);
1846 /* Free all the buffers */
1847 if (ehcip->ehci_qtd_pool_addr && ehcip->ehci_qtd_pool_mem_handle) {
1848 for (i = 0; i < ehci_qtd_pool_size; i ++) {
1849 qtd = &ehcip->ehci_qtd_pool_addr[i];
1850 ctrl = Get_QTD(ehcip->
1851 ehci_qtd_pool_addr[i].qtd_state);
1853 if ((ctrl != EHCI_QTD_FREE) &&
1854 (ctrl != EHCI_QTD_DUMMY) &&
1855 (qtd->qtd_trans_wrapper)) {
1857 mutex_enter(&ehcip->ehci_int_mutex);
1859 tw = (ehci_trans_wrapper_t *)
1860 EHCI_LOOKUP_ID((uint32_t)
1861 Get_QTD(qtd->qtd_trans_wrapper));
1863 /* Obtain the pipe private structure */
1864 pp = tw->tw_pipe_private;
1866 /* Stop the the transfer timer */
1867 ehci_stop_xfer_timer(ehcip, tw,
1868 EHCI_REMOVE_XFER_ALWAYS);
1870 ehci_deallocate_tw(ehcip, pp, tw);
1872 mutex_exit(&ehcip->ehci_int_mutex);
1877 * If EHCI_QTD_POOL_BOUND flag is set, then unbind
1878 * the handle for QTD pools.
1880 if ((ehcip->ehci_dma_addr_bind_flag &
1881 EHCI_QTD_POOL_BOUND) == EHCI_QTD_POOL_BOUND) {
1883 rval = ddi_dma_unbind_handle(
1884 ehcip->ehci_qtd_pool_dma_handle);
1886 ASSERT(rval == DDI_SUCCESS);
1888 ddi_dma_mem_free(&ehcip->ehci_qtd_pool_mem_handle);
1891 /* Free the QTD pool */
1892 if (ehcip->ehci_qtd_pool_dma_handle) {
1893 ddi_dma_free_handle(&ehcip->ehci_qtd_pool_dma_handle);
1896 if (ehcip->ehci_qh_pool_addr && ehcip->ehci_qh_pool_mem_handle) {
1898 * If EHCI_QH_POOL_BOUND flag is set, then unbind
1899 * the handle for QH pools.
1901 if ((ehcip->ehci_dma_addr_bind_flag &
1902 EHCI_QH_POOL_BOUND) == EHCI_QH_POOL_BOUND) {
1904 rval = ddi_dma_unbind_handle(
1905 ehcip->ehci_qh_pool_dma_handle);
1907 ASSERT(rval == DDI_SUCCESS);
1910 ddi_dma_mem_free(&ehcip->ehci_qh_pool_mem_handle);
1913 /* Free the QH pool */
1914 if (ehcip->ehci_qh_pool_dma_handle) {
1915 ddi_dma_free_handle(&ehcip->ehci_qh_pool_dma_handle);
1918 /* Free the Periodic frame list table (PFLT) area */
1919 if (ehcip->ehci_periodic_frame_list_tablep &&
1920 ehcip->ehci_pflt_mem_handle) {
1922 * If EHCI_PFLT_DMA_BOUND flag is set, then unbind
1923 * the handle for PFLT.
1925 if ((ehcip->ehci_dma_addr_bind_flag &
1926 EHCI_PFLT_DMA_BOUND) == EHCI_PFLT_DMA_BOUND) {
1928 rval = ddi_dma_unbind_handle(
1929 ehcip->ehci_pflt_dma_handle);
1931 ASSERT(rval == DDI_SUCCESS);
1934 ddi_dma_mem_free(&ehcip->ehci_pflt_mem_handle);
1937 (void) ehci_isoc_cleanup(ehcip);
1939 if (ehcip->ehci_pflt_dma_handle) {
1940 ddi_dma_free_handle(&ehcip->ehci_pflt_dma_handle);
1943 if (flags & EHCI_INTR) {
1944 /* Destroy the mutex */
1945 mutex_destroy(&ehcip->ehci_int_mutex);
1947 /* Destroy the async schedule advance condition variable */
1948 cv_destroy(&ehcip->ehci_async_schedule_advance_cv);
1951 /* clean up kstat structs */
1952 ehci_destroy_stats(ehcip);
1954 /* Free ehci hcdi ops */
1955 if (ehcip->ehci_hcdi_ops) {
1956 usba_free_hcdi_ops(ehcip->ehci_hcdi_ops);
1959 if (flags & EHCI_ZALLOC) {
1961 usb_free_log_hdl(ehcip->ehci_log_hdl);
1963 /* Remove all properties that might have been created */
1964 ddi_prop_remove_all(ehcip->ehci_dip);
1966 /* Free the soft state */
1967 ddi_soft_state_free(ehci_statep,
1968 ddi_get_instance(ehcip->ehci_dip));
1971 return (DDI_SUCCESS);
1976 * ehci_rem_intrs:
1978 * Unregister FIXED or MSI interrupts
1980 static void
1981 ehci_rem_intrs(ehci_state_t *ehcip)
1983 int i;
1985 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1986 "ehci_rem_intrs: interrupt type 0x%x", ehcip->ehci_intr_type);
1988 /* Disable all interrupts */
1989 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
1990 (void) ddi_intr_block_disable(ehcip->ehci_htable,
1991 ehcip->ehci_intr_cnt);
1992 } else {
1993 for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
1994 (void) ddi_intr_disable(ehcip->ehci_htable[i]);
1998 /* Call ddi_intr_remove_handler() */
1999 for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
2000 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
2001 (void) ddi_intr_free(ehcip->ehci_htable[i]);
2004 kmem_free(ehcip->ehci_htable,
2005 ehcip->ehci_intr_cnt * sizeof (ddi_intr_handle_t));
2010 * ehci_cpr_suspend
2013 ehci_cpr_suspend(ehci_state_t *ehcip)
2015 int i;
2017 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2018 "ehci_cpr_suspend:");
2020 /* Call into the root hub and suspend it */
2021 if (usba_hubdi_detach(ehcip->ehci_dip, DDI_SUSPEND) != DDI_SUCCESS) {
2023 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2024 "ehci_cpr_suspend: root hub fails to suspend");
2026 return (DDI_FAILURE);
2029 /* Only root hub's intr pipe should be open at this time */
2030 mutex_enter(&ehcip->ehci_int_mutex);
2032 ASSERT(ehcip->ehci_open_pipe_count == 0);
2034 /* Just wait till all resources are reclaimed */
2035 i = 0;
2036 while ((ehcip->ehci_reclaim_list != NULL) && (i++ < 3)) {
2037 ehci_handle_endpoint_reclaimation(ehcip);
2038 (void) ehci_wait_for_sof(ehcip);
2040 ASSERT(ehcip->ehci_reclaim_list == NULL);
2042 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2043 "ehci_cpr_suspend: Disable HC QH list processing");
2045 /* Disable all EHCI QH list processing */
2046 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
2047 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE)));
2049 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2050 "ehci_cpr_suspend: Disable HC interrupts");
2052 /* Disable all EHCI interrupts */
2053 Set_OpReg(ehci_interrupt, 0);
2055 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2056 "ehci_cpr_suspend: Wait for the next SOF");
2058 /* Wait for the next SOF */
2059 if (ehci_wait_for_sof(ehcip) != USB_SUCCESS) {
2061 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2062 "ehci_cpr_suspend: ehci host controller suspend failed");
2064 mutex_exit(&ehcip->ehci_int_mutex);
2065 return (DDI_FAILURE);
2069 * Stop the ehci host controller
2070 * if usb keyboard is not connected.
2072 if (ehcip->ehci_polled_kbd_count == 0 || force_ehci_off != 0) {
2073 Set_OpReg(ehci_command,
2074 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
2078 /* Set host controller soft state to suspend */
2079 ehcip->ehci_hc_soft_state = EHCI_CTLR_SUSPEND_STATE;
2081 mutex_exit(&ehcip->ehci_int_mutex);
2083 return (DDI_SUCCESS);
2088 * ehci_cpr_resume
2091 ehci_cpr_resume(ehci_state_t *ehcip)
2093 mutex_enter(&ehcip->ehci_int_mutex);
2095 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2096 "ehci_cpr_resume: Restart the controller");
2098 /* Cleanup ehci specific information across cpr */
2099 ehci_cpr_cleanup(ehcip);
2101 /* Restart the controller */
2102 if (ehci_init_ctlr(ehcip, EHCI_NORMAL_INITIALIZATION) != DDI_SUCCESS) {
2104 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2105 "ehci_cpr_resume: ehci host controller resume failed ");
2107 mutex_exit(&ehcip->ehci_int_mutex);
2109 return (DDI_FAILURE);
2112 mutex_exit(&ehcip->ehci_int_mutex);
2114 /* Now resume the root hub */
2115 if (usba_hubdi_attach(ehcip->ehci_dip, DDI_RESUME) != DDI_SUCCESS) {
2117 return (DDI_FAILURE);
2120 return (DDI_SUCCESS);
2125 * Bandwidth Allocation functions
2129 * ehci_allocate_bandwidth:
2131 * Figure out whether or not this interval may be supported. Return the index
2132 * into the lattice if it can be supported. Return allocation failure if it
2133 * can not be supported.
2136 ehci_allocate_bandwidth(
2137 ehci_state_t *ehcip,
2138 usba_pipe_handle_data_t *ph,
2139 uint_t *pnode,
2140 uchar_t *smask,
2141 uchar_t *cmask)
2143 int error = USB_SUCCESS;
2145 /* This routine is protected by the ehci_int_mutex */
2146 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2148 /* Reset the pnode to the last checked pnode */
2149 *pnode = 0;
2151 /* Allocate high speed bandwidth */
2152 if ((error = ehci_allocate_high_speed_bandwidth(ehcip,
2153 ph, pnode, smask, cmask)) != USB_SUCCESS) {
2155 return (error);
2159 * For low/full speed usb devices, allocate classic TT bandwidth
2160 * in additional to high speed bandwidth.
2162 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2164 /* Allocate classic TT bandwidth */
2165 if ((error = ehci_allocate_classic_tt_bandwidth(
2166 ehcip, ph, *pnode)) != USB_SUCCESS) {
2168 /* Deallocate high speed bandwidth */
2169 ehci_deallocate_high_speed_bandwidth(
2170 ehcip, ph, *pnode, *smask, *cmask);
2174 return (error);
2179 * ehci_allocate_high_speed_bandwidth:
2181 * Allocate high speed bandwidth for the low/full/high speed interrupt and
2182 * isochronous endpoints.
2184 static int
2185 ehci_allocate_high_speed_bandwidth(
2186 ehci_state_t *ehcip,
2187 usba_pipe_handle_data_t *ph,
2188 uint_t *pnode,
2189 uchar_t *smask,
2190 uchar_t *cmask)
2192 uint_t sbandwidth, cbandwidth;
2193 int interval;
2194 usb_ep_descr_t *endpoint = &ph->p_ep;
2195 usba_device_t *child_ud;
2196 usb_port_status_t port_status;
2197 int error;
2199 /* This routine is protected by the ehci_int_mutex */
2200 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2202 /* Get child's usba device structure */
2203 child_ud = ph->p_usba_device;
2205 mutex_enter(&child_ud->usb_mutex);
2207 /* Get the current usb device's port status */
2208 port_status = ph->p_usba_device->usb_port_status;
2210 mutex_exit(&child_ud->usb_mutex);
2213 * Calculate the length in bytes of a transaction on this
2214 * periodic endpoint. Return failure if maximum packet is
2215 * zero.
2217 error = ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2218 port_status, &sbandwidth, &cbandwidth);
2219 if (error != USB_SUCCESS) {
2221 return (error);
2225 * Adjust polling interval to be a power of 2.
2226 * If this interval can't be supported, return
2227 * allocation failure.
2229 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2230 if (interval == USB_FAILURE) {
2232 return (USB_FAILURE);
2235 if (port_status == USBA_HIGH_SPEED_DEV) {
2236 /* Allocate bandwidth for high speed devices */
2237 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2238 USB_EP_ATTR_ISOCH) {
2239 error = USB_SUCCESS;
2240 } else {
2242 error = ehci_find_bestfit_hs_mask(ehcip, smask, pnode,
2243 endpoint, sbandwidth, interval);
2246 *cmask = 0x00;
2248 } else {
2249 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2250 USB_EP_ATTR_INTR) {
2252 /* Allocate bandwidth for low speed interrupt */
2253 error = ehci_find_bestfit_ls_intr_mask(ehcip,
2254 smask, cmask, pnode, sbandwidth, cbandwidth,
2255 interval);
2256 } else {
2257 if ((endpoint->bEndpointAddress &
2258 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2260 /* Allocate bandwidth for sitd in */
2261 error = ehci_find_bestfit_sitd_in_mask(ehcip,
2262 smask, cmask, pnode, sbandwidth, cbandwidth,
2263 interval);
2264 } else {
2266 /* Allocate bandwidth for sitd out */
2267 error = ehci_find_bestfit_sitd_out_mask(ehcip,
2268 smask, pnode, sbandwidth, interval);
2269 *cmask = 0x00;
2274 if (error != USB_SUCCESS) {
2275 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2276 "ehci_allocate_high_speed_bandwidth: Reached maximum "
2277 "bandwidth value and cannot allocate bandwidth for a "
2278 "given high-speed periodic endpoint");
2280 return (USB_NO_BANDWIDTH);
2283 return (error);
2288 * ehci_allocate_classic_tt_speed_bandwidth:
2290 * Allocate classic TT bandwidth for the low/full speed interrupt and
2291 * isochronous endpoints.
2293 static int
2294 ehci_allocate_classic_tt_bandwidth(
2295 ehci_state_t *ehcip,
2296 usba_pipe_handle_data_t *ph,
2297 uint_t pnode)
2299 uint_t bandwidth, min;
2300 uint_t height, leftmost, list;
2301 usb_ep_descr_t *endpoint = &ph->p_ep;
2302 usba_device_t *child_ud, *parent_ud;
2303 usb_port_status_t port_status;
2304 int i, interval;
2306 /* This routine is protected by the ehci_int_mutex */
2307 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2309 /* Get child's usba device structure */
2310 child_ud = ph->p_usba_device;
2312 mutex_enter(&child_ud->usb_mutex);
2314 /* Get the current usb device's port status */
2315 port_status = child_ud->usb_port_status;
2317 /* Get the parent high speed hub's usba device structure */
2318 parent_ud = child_ud->usb_hs_hub_usba_dev;
2320 mutex_exit(&child_ud->usb_mutex);
2322 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2323 "ehci_allocate_classic_tt_bandwidth: "
2324 "child_ud 0x%p parent_ud 0x%p",
2325 (void *)child_ud, (void *)parent_ud);
2328 * Calculate the length in bytes of a transaction on this
2329 * periodic endpoint. Return failure if maximum packet is
2330 * zero.
2332 if (ehci_compute_classic_bandwidth(endpoint,
2333 port_status, &bandwidth) != USB_SUCCESS) {
2335 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2336 "ehci_allocate_classic_tt_bandwidth: Periodic endpoint "
2337 "with zero endpoint maximum packet size is not supported");
2339 return (USB_NOT_SUPPORTED);
2342 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2343 "ehci_allocate_classic_tt_bandwidth: bandwidth %d", bandwidth);
2345 mutex_enter(&parent_ud->usb_mutex);
2348 * If the length in bytes plus the allocated bandwidth exceeds
2349 * the maximum, return bandwidth allocation failure.
2351 if ((parent_ud->usb_hs_hub_min_bandwidth + bandwidth) >
2352 FS_PERIODIC_BANDWIDTH) {
2354 mutex_exit(&parent_ud->usb_mutex);
2356 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2357 "ehci_allocate_classic_tt_bandwidth: Reached maximum "
2358 "bandwidth value and cannot allocate bandwidth for a "
2359 "given low/full speed periodic endpoint");
2361 return (USB_NO_BANDWIDTH);
2364 mutex_exit(&parent_ud->usb_mutex);
2366 /* Adjust polling interval to be a power of 2 */
2367 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2369 /* Find the height in the tree */
2370 height = ehci_lattice_height(interval);
2372 /* Find the leftmost leaf in the subtree specified by the node. */
2373 leftmost = ehci_leftmost_leaf(pnode, height);
2375 mutex_enter(&parent_ud->usb_mutex);
2377 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2378 list = ehci_index[leftmost + i];
2380 if ((parent_ud->usb_hs_hub_bandwidth[list] +
2381 bandwidth) > FS_PERIODIC_BANDWIDTH) {
2383 mutex_exit(&parent_ud->usb_mutex);
2385 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2386 "ehci_allocate_classic_tt_bandwidth: Reached "
2387 "maximum bandwidth value and cannot allocate "
2388 "bandwidth for low/full periodic endpoint");
2390 return (USB_NO_BANDWIDTH);
2395 * All the leaves for this node must be updated with the bandwidth.
2397 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2398 list = ehci_index[leftmost + i];
2399 parent_ud->usb_hs_hub_bandwidth[list] += bandwidth;
2402 /* Find the leaf with the smallest allocated bandwidth */
2403 min = parent_ud->usb_hs_hub_bandwidth[0];
2405 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2406 if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2407 min = parent_ud->usb_hs_hub_bandwidth[i];
2411 /* Save the minimum for later use */
2412 parent_ud->usb_hs_hub_min_bandwidth = min;
2414 mutex_exit(&parent_ud->usb_mutex);
2416 return (USB_SUCCESS);
2421 * ehci_deallocate_bandwidth:
2423 * Deallocate bandwidth for the given node in the lattice and the length
2424 * of transfer.
2426 void
2427 ehci_deallocate_bandwidth(
2428 ehci_state_t *ehcip,
2429 usba_pipe_handle_data_t *ph,
2430 uint_t pnode,
2431 uchar_t smask,
2432 uchar_t cmask)
2434 /* This routine is protected by the ehci_int_mutex */
2435 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2437 ehci_deallocate_high_speed_bandwidth(ehcip, ph, pnode, smask, cmask);
2440 * For low/full speed usb devices, deallocate classic TT bandwidth
2441 * in additional to high speed bandwidth.
2443 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2445 /* Deallocate classic TT bandwidth */
2446 ehci_deallocate_classic_tt_bandwidth(ehcip, ph, pnode);
2452 * ehci_deallocate_high_speed_bandwidth:
2454 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2456 static void
2457 ehci_deallocate_high_speed_bandwidth(
2458 ehci_state_t *ehcip,
2459 usba_pipe_handle_data_t *ph,
2460 uint_t pnode,
2461 uchar_t smask,
2462 uchar_t cmask)
2464 uint_t height, leftmost;
2465 uint_t list_count;
2466 uint_t sbandwidth, cbandwidth;
2467 int interval;
2468 usb_ep_descr_t *endpoint = &ph->p_ep;
2469 usba_device_t *child_ud;
2470 usb_port_status_t port_status;
2472 /* This routine is protected by the ehci_int_mutex */
2473 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2475 /* Get child's usba device structure */
2476 child_ud = ph->p_usba_device;
2478 mutex_enter(&child_ud->usb_mutex);
2480 /* Get the current usb device's port status */
2481 port_status = ph->p_usba_device->usb_port_status;
2483 mutex_exit(&child_ud->usb_mutex);
2485 (void) ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2486 port_status, &sbandwidth, &cbandwidth);
2488 /* Adjust polling interval to be a power of 2 */
2489 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2491 /* Find the height in the tree */
2492 height = ehci_lattice_height(interval);
2495 * Find the leftmost leaf in the subtree specified by the node
2497 leftmost = ehci_leftmost_leaf(pnode, height);
2499 list_count = EHCI_NUM_INTR_QH_LISTS/interval;
2501 /* Delete the bandwidth from the appropriate lists */
2502 if (port_status == USBA_HIGH_SPEED_DEV) {
2504 ehci_update_bw_availability(ehcip, -sbandwidth,
2505 leftmost, list_count, smask);
2506 } else {
2507 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2508 USB_EP_ATTR_INTR) {
2510 ehci_update_bw_availability(ehcip, -sbandwidth,
2511 leftmost, list_count, smask);
2512 ehci_update_bw_availability(ehcip, -cbandwidth,
2513 leftmost, list_count, cmask);
2514 } else {
2515 if ((endpoint->bEndpointAddress &
2516 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2518 ehci_update_bw_availability(ehcip, -sbandwidth,
2519 leftmost, list_count, smask);
2520 ehci_update_bw_availability(ehcip,
2521 -MAX_UFRAME_SITD_XFER, leftmost,
2522 list_count, cmask);
2523 } else {
2525 ehci_update_bw_availability(ehcip,
2526 -MAX_UFRAME_SITD_XFER, leftmost,
2527 list_count, smask);
2534 * ehci_deallocate_classic_tt_bandwidth:
2536 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2538 static void
2539 ehci_deallocate_classic_tt_bandwidth(
2540 ehci_state_t *ehcip,
2541 usba_pipe_handle_data_t *ph,
2542 uint_t pnode)
2544 uint_t bandwidth, height, leftmost, list, min;
2545 int i, interval;
2546 usb_ep_descr_t *endpoint = &ph->p_ep;
2547 usba_device_t *child_ud, *parent_ud;
2548 usb_port_status_t port_status;
2550 /* This routine is protected by the ehci_int_mutex */
2551 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2553 /* Get child's usba device structure */
2554 child_ud = ph->p_usba_device;
2556 mutex_enter(&child_ud->usb_mutex);
2558 /* Get the current usb device's port status */
2559 port_status = child_ud->usb_port_status;
2561 /* Get the parent high speed hub's usba device structure */
2562 parent_ud = child_ud->usb_hs_hub_usba_dev;
2564 mutex_exit(&child_ud->usb_mutex);
2566 /* Obtain the bandwidth */
2567 (void) ehci_compute_classic_bandwidth(endpoint,
2568 port_status, &bandwidth);
2570 /* Adjust polling interval to be a power of 2 */
2571 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2573 /* Find the height in the tree */
2574 height = ehci_lattice_height(interval);
2576 /* Find the leftmost leaf in the subtree specified by the node */
2577 leftmost = ehci_leftmost_leaf(pnode, height);
2579 mutex_enter(&parent_ud->usb_mutex);
2581 /* Delete the bandwidth from the appropriate lists */
2582 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2583 list = ehci_index[leftmost + i];
2584 parent_ud->usb_hs_hub_bandwidth[list] -= bandwidth;
2587 /* Find the leaf with the smallest allocated bandwidth */
2588 min = parent_ud->usb_hs_hub_bandwidth[0];
2590 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2591 if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2592 min = parent_ud->usb_hs_hub_bandwidth[i];
2596 /* Save the minimum for later use */
2597 parent_ud->usb_hs_hub_min_bandwidth = min;
2599 mutex_exit(&parent_ud->usb_mutex);
2604 * ehci_compute_high_speed_bandwidth:
2606 * Given a periodic endpoint (interrupt or isochronous) determine the total
2607 * bandwidth for one transaction. The EHCI host controller traverses the
2608 * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2609 * services an endpoint, only a single transaction attempt is made. The HC
2610 * moves to the next Endpoint Descriptor after the first transaction attempt
2611 * rather than finishing the entire Transfer Descriptor. Therefore, when a
2612 * Transfer Descriptor is inserted into the lattice, we will only count the
2613 * number of bytes for one transaction.
2615 * The following are the formulas used for calculating bandwidth in terms
2616 * bytes and it is for the single USB high speed transaction. The protocol
2617 * overheads will be different for each of type of USB transfer & all these
2618 * formulas & protocol overheads are derived from the 5.11.3 section of the
2619 * USB 2.0 Specification.
2621 * High-Speed:
2622 * Protocol overhead + ((MaxPktSz * 7)/6) + Host_Delay
2624 * Split Transaction: (Low/Full speed devices connected behind usb2.0 hub)
2626 * Protocol overhead + Split transaction overhead +
2627 * ((MaxPktSz * 7)/6) + Host_Delay;
2629 /* ARGSUSED */
2630 static int
2631 ehci_compute_high_speed_bandwidth(
2632 ehci_state_t *ehcip,
2633 usb_ep_descr_t *endpoint,
2634 usb_port_status_t port_status,
2635 uint_t *sbandwidth,
2636 uint_t *cbandwidth)
2638 ushort_t maxpacketsize = endpoint->wMaxPacketSize;
2640 /* Return failure if endpoint maximum packet is zero */
2641 if (maxpacketsize == 0) {
2642 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2643 "ehci_allocate_high_speed_bandwidth: Periodic endpoint "
2644 "with zero endpoint maximum packet size is not supported");
2646 return (USB_NOT_SUPPORTED);
2649 /* Add bit-stuffing overhead */
2650 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2652 /* Add Host Controller specific delay to required bandwidth */
2653 *sbandwidth = EHCI_HOST_CONTROLLER_DELAY;
2655 /* Add xfer specific protocol overheads */
2656 if ((endpoint->bmAttributes &
2657 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2658 /* High speed interrupt transaction */
2659 *sbandwidth += HS_NON_ISOC_PROTO_OVERHEAD;
2660 } else {
2661 /* Isochronous transaction */
2662 *sbandwidth += HS_ISOC_PROTO_OVERHEAD;
2666 * For low/full speed devices, add split transaction specific
2667 * overheads.
2669 if (port_status != USBA_HIGH_SPEED_DEV) {
2671 * Add start and complete split transaction
2672 * tokens overheads.
2674 *cbandwidth = *sbandwidth + COMPLETE_SPLIT_OVERHEAD;
2675 *sbandwidth += START_SPLIT_OVERHEAD;
2677 /* Add data overhead depending on data direction */
2678 if ((endpoint->bEndpointAddress &
2679 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2680 *cbandwidth += maxpacketsize;
2681 } else {
2682 if ((endpoint->bmAttributes &
2683 USB_EP_ATTR_MASK) == USB_EP_ATTR_ISOCH) {
2684 /* There is no compete splits for out */
2685 *cbandwidth = 0;
2687 *sbandwidth += maxpacketsize;
2689 } else {
2690 uint_t xactions;
2692 /* Get the max transactions per microframe */
2693 xactions = ((maxpacketsize & USB_EP_MAX_XACTS_MASK) >>
2694 USB_EP_MAX_XACTS_SHIFT) + 1;
2696 /* High speed transaction */
2697 *sbandwidth += maxpacketsize;
2699 /* Calculate bandwidth per micro-frame */
2700 *sbandwidth *= xactions;
2702 *cbandwidth = 0;
2705 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2706 "ehci_allocate_high_speed_bandwidth: "
2707 "Start split bandwidth %d Complete split bandwidth %d",
2708 *sbandwidth, *cbandwidth);
2710 return (USB_SUCCESS);
2715 * ehci_compute_classic_bandwidth:
2717 * Given a periodic endpoint (interrupt or isochronous) determine the total
2718 * bandwidth for one transaction. The EHCI host controller traverses the
2719 * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2720 * services an endpoint, only a single transaction attempt is made. The HC
2721 * moves to the next Endpoint Descriptor after the first transaction attempt
2722 * rather than finishing the entire Transfer Descriptor. Therefore, when a
2723 * Transfer Descriptor is inserted into the lattice, we will only count the
2724 * number of bytes for one transaction.
2726 * The following are the formulas used for calculating bandwidth in terms
2727 * bytes and it is for the single USB high speed transaction. The protocol
2728 * overheads will be different for each of type of USB transfer & all these
2729 * formulas & protocol overheads are derived from the 5.11.3 section of the
2730 * USB 2.0 Specification.
2732 * Low-Speed:
2733 * Protocol overhead + Hub LS overhead +
2734 * (Low Speed clock * ((MaxPktSz * 7)/6)) + TT_Delay
2736 * Full-Speed:
2737 * Protocol overhead + ((MaxPktSz * 7)/6) + TT_Delay
2739 /* ARGSUSED */
2740 static int
2741 ehci_compute_classic_bandwidth(
2742 usb_ep_descr_t *endpoint,
2743 usb_port_status_t port_status,
2744 uint_t *bandwidth)
2746 ushort_t maxpacketsize = endpoint->wMaxPacketSize;
2749 * If endpoint maximum packet is zero, then return immediately.
2751 if (maxpacketsize == 0) {
2753 return (USB_NOT_SUPPORTED);
2756 /* Add TT delay to required bandwidth */
2757 *bandwidth = TT_DELAY;
2759 /* Add bit-stuffing overhead */
2760 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2762 switch (port_status) {
2763 case USBA_LOW_SPEED_DEV:
2764 /* Low speed interrupt transaction */
2765 *bandwidth += (LOW_SPEED_PROTO_OVERHEAD +
2766 HUB_LOW_SPEED_PROTO_OVERHEAD +
2767 (LOW_SPEED_CLOCK * maxpacketsize));
2768 break;
2769 case USBA_FULL_SPEED_DEV:
2770 /* Full speed transaction */
2771 *bandwidth += maxpacketsize;
2773 /* Add xfer specific protocol overheads */
2774 if ((endpoint->bmAttributes &
2775 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2776 /* Full speed interrupt transaction */
2777 *bandwidth += FS_NON_ISOC_PROTO_OVERHEAD;
2778 } else {
2779 /* Isochronous and input transaction */
2780 if ((endpoint->bEndpointAddress &
2781 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2782 *bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD;
2783 } else {
2784 /* Isochronous and output transaction */
2785 *bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD;
2788 break;
2791 return (USB_SUCCESS);
2796 * ehci_adjust_polling_interval:
2798 * Adjust bandwidth according usb device speed.
2800 /* ARGSUSED */
2802 ehci_adjust_polling_interval(
2803 ehci_state_t *ehcip,
2804 usb_ep_descr_t *endpoint,
2805 usb_port_status_t port_status)
2807 uint_t interval;
2808 int i = 0;
2810 /* Get the polling interval */
2811 interval = endpoint->bInterval;
2813 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2814 "ehci_adjust_polling_interval: Polling interval 0x%x", interval);
2817 * According USB 2.0 Specifications, a high-speed endpoint's
2818 * polling intervals are specified interms of 125us or micro
2819 * frame, where as full/low endpoint's polling intervals are
2820 * specified in milliseconds.
2822 * A high speed interrupt/isochronous endpoints can specify
2823 * desired polling interval between 1 to 16 micro-frames,
2824 * where as full/low endpoints can specify between 1 to 255
2825 * milliseconds.
2827 switch (port_status) {
2828 case USBA_LOW_SPEED_DEV:
2830 * Low speed endpoints are limited to specifying
2831 * only 8ms to 255ms in this driver. If a device
2832 * reports a polling interval that is less than 8ms,
2833 * it will use 8 ms instead.
2835 if (interval < LS_MIN_POLL_INTERVAL) {
2837 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2838 "Low speed endpoint's poll interval of %d ms "
2839 "is below threshold. Rounding up to %d ms",
2840 interval, LS_MIN_POLL_INTERVAL);
2842 interval = LS_MIN_POLL_INTERVAL;
2846 * Return an error if the polling interval is greater
2847 * than 255ms.
2849 if (interval > LS_MAX_POLL_INTERVAL) {
2851 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2852 "Low speed endpoint's poll interval is "
2853 "greater than %d ms", LS_MAX_POLL_INTERVAL);
2855 return (USB_FAILURE);
2857 break;
2859 case USBA_FULL_SPEED_DEV:
2861 * Return an error if the polling interval is less
2862 * than 1ms and greater than 255ms.
2864 if ((interval < FS_MIN_POLL_INTERVAL) &&
2865 (interval > FS_MAX_POLL_INTERVAL)) {
2867 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2868 "Full speed endpoint's poll interval must "
2869 "be between %d and %d ms", FS_MIN_POLL_INTERVAL,
2870 FS_MAX_POLL_INTERVAL);
2872 return (USB_FAILURE);
2874 break;
2875 case USBA_HIGH_SPEED_DEV:
2877 * Return an error if the polling interval is less 1
2878 * and greater than 16. Convert this value to 125us
2879 * units using 2^(bInterval -1). refer usb 2.0 spec
2880 * page 51 for details.
2882 if ((interval < HS_MIN_POLL_INTERVAL) &&
2883 (interval > HS_MAX_POLL_INTERVAL)) {
2885 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2886 "High speed endpoint's poll interval "
2887 "must be between %d and %d units",
2888 HS_MIN_POLL_INTERVAL, HS_MAX_POLL_INTERVAL);
2890 return (USB_FAILURE);
2893 /* Adjust high speed device polling interval */
2894 interval =
2895 ehci_adjust_high_speed_polling_interval(ehcip, endpoint);
2897 break;
2901 * If polling interval is greater than 32ms,
2902 * adjust polling interval equal to 32ms.
2904 if (interval > EHCI_NUM_INTR_QH_LISTS) {
2905 interval = EHCI_NUM_INTR_QH_LISTS;
2909 * Find the nearest power of 2 that's less
2910 * than interval.
2912 while ((ehci_pow_2(i)) <= interval) {
2913 i++;
2916 return (ehci_pow_2((i - 1)));
2921 * ehci_adjust_high_speed_polling_interval:
2923 /* ARGSUSED */
2924 static int
2925 ehci_adjust_high_speed_polling_interval(
2926 ehci_state_t *ehcip,
2927 usb_ep_descr_t *endpoint)
2929 uint_t interval;
2931 /* Get the polling interval */
2932 interval = ehci_pow_2(endpoint->bInterval - 1);
2935 * Convert polling interval from micro seconds
2936 * to milli seconds.
2938 if (interval <= EHCI_MAX_UFRAMES) {
2939 interval = 1;
2940 } else {
2941 interval = interval/EHCI_MAX_UFRAMES;
2944 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2945 "ehci_adjust_high_speed_polling_interval: "
2946 "High speed adjusted interval 0x%x", interval);
2948 return (interval);
2953 * ehci_lattice_height:
2955 * Given the requested bandwidth, find the height in the tree at which the
2956 * nodes for this bandwidth fall. The height is measured as the number of
2957 * nodes from the leaf to the level specified by bandwidth The root of the
2958 * tree is at height TREE_HEIGHT.
2960 static uint_t
2961 ehci_lattice_height(uint_t interval)
2963 return (TREE_HEIGHT - (ehci_log_2(interval)));
2968 * ehci_lattice_parent:
2970 * Given a node in the lattice, find the index of the parent node
2972 static uint_t
2973 ehci_lattice_parent(uint_t node)
2975 if ((node % 2) == 0) {
2977 return ((node/2) - 1);
2978 } else {
2980 return ((node + 1)/2 - 1);
2986 * ehci_find_periodic_node:
2988 * Based on the "real" array leaf node and interval, get the periodic node.
2990 static uint_t
2991 ehci_find_periodic_node(uint_t leaf, int interval)
2993 uint_t lattice_leaf;
2994 uint_t height = ehci_lattice_height(interval);
2995 uint_t pnode;
2996 int i;
2998 /* Get the leaf number in the lattice */
2999 lattice_leaf = leaf + EHCI_NUM_INTR_QH_LISTS - 1;
3001 /* Get the node in the lattice based on the height and leaf */
3002 pnode = lattice_leaf;
3003 for (i = 0; i < height; i++) {
3004 pnode = ehci_lattice_parent(pnode);
3007 return (pnode);
3012 * ehci_leftmost_leaf:
3014 * Find the leftmost leaf in the subtree specified by the node. Height refers
3015 * to number of nodes from the bottom of the tree to the node, including the
3016 * node.
3018 * The formula for a zero based tree is:
3019 * 2^H * Node + 2^H - 1
3020 * The leaf of the tree is an array, convert the number for the array.
3021 * Subtract the size of nodes not in the array
3022 * 2^H * Node + 2^H - 1 - (EHCI_NUM_INTR_QH_LISTS - 1) =
3023 * 2^H * Node + 2^H - EHCI_NUM_INTR_QH_LISTS =
3024 * 2^H * (Node + 1) - EHCI_NUM_INTR_QH_LISTS
3026 * 1 2
3027 * 0 1 2 3
3029 static uint_t
3030 ehci_leftmost_leaf(
3031 uint_t node,
3032 uint_t height)
3034 return ((ehci_pow_2(height) * (node + 1)) - EHCI_NUM_INTR_QH_LISTS);
3039 * ehci_pow_2:
3041 * Compute 2 to the power
3043 static uint_t
3044 ehci_pow_2(uint_t x)
3046 if (x == 0) {
3048 return (1);
3049 } else {
3051 return (2 << (x - 1));
3057 * ehci_log_2:
3059 * Compute log base 2 of x
3061 static uint_t
3062 ehci_log_2(uint_t x)
3064 int i = 0;
3066 while (x != 1) {
3067 x = x >> 1;
3068 i++;
3071 return (i);
3076 * ehci_find_bestfit_hs_mask:
3078 * Find the smask and cmask in the bandwidth allocation, and update the
3079 * bandwidth allocation.
3081 static int
3082 ehci_find_bestfit_hs_mask(
3083 ehci_state_t *ehcip,
3084 uchar_t *smask,
3085 uint_t *pnode,
3086 usb_ep_descr_t *endpoint,
3087 uint_t bandwidth,
3088 int interval)
3090 int i;
3091 uint_t elements, index;
3092 int array_leaf, best_array_leaf;
3093 uint_t node_bandwidth, best_node_bandwidth;
3094 uint_t leaf_count;
3095 uchar_t bw_mask;
3096 uchar_t best_smask;
3098 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3099 "ehci_find_bestfit_hs_mask: ");
3101 /* Get all the valid smasks */
3102 switch (ehci_pow_2(endpoint->bInterval - 1)) {
3103 case EHCI_INTR_1US_POLL:
3104 index = EHCI_1US_MASK_INDEX;
3105 elements = EHCI_INTR_1US_POLL;
3106 break;
3107 case EHCI_INTR_2US_POLL:
3108 index = EHCI_2US_MASK_INDEX;
3109 elements = EHCI_INTR_2US_POLL;
3110 break;
3111 case EHCI_INTR_4US_POLL:
3112 index = EHCI_4US_MASK_INDEX;
3113 elements = EHCI_INTR_4US_POLL;
3114 break;
3115 case EHCI_INTR_XUS_POLL:
3116 default:
3117 index = EHCI_XUS_MASK_INDEX;
3118 elements = EHCI_INTR_XUS_POLL;
3119 break;
3122 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3125 * Because of the way the leaves are setup, we will automatically
3126 * hit the leftmost leaf of every possible node with this interval.
3128 best_smask = 0x00;
3129 best_node_bandwidth = 0;
3130 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3131 /* Find the bandwidth mask */
3132 node_bandwidth = ehci_calculate_bw_availability_mask(ehcip,
3133 bandwidth, ehci_index[array_leaf], leaf_count, &bw_mask);
3136 * If this node cannot support our requirements skip to the
3137 * next leaf.
3139 if (bw_mask == 0x00) {
3140 continue;
3144 * Now make sure our bandwidth requirements can be
3145 * satisfied with one of smasks in this node.
3147 *smask = 0x00;
3148 for (i = index; i < (index + elements); i++) {
3149 /* Check the start split mask value */
3150 if (ehci_start_split_mask[index] & bw_mask) {
3151 *smask = ehci_start_split_mask[index];
3152 break;
3157 * If an appropriate smask is found save the information if:
3158 * o best_smask has not been found yet.
3159 * - or -
3160 * o This is the node with the least amount of bandwidth
3162 if ((*smask != 0x00) &&
3163 ((best_smask == 0x00) ||
3164 (best_node_bandwidth > node_bandwidth))) {
3166 best_node_bandwidth = node_bandwidth;
3167 best_array_leaf = array_leaf;
3168 best_smask = *smask;
3173 * If we find node that can handle the bandwidth populate the
3174 * appropriate variables and return success.
3176 if (best_smask) {
3177 *smask = best_smask;
3178 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3179 interval);
3180 ehci_update_bw_availability(ehcip, bandwidth,
3181 ehci_index[best_array_leaf], leaf_count, best_smask);
3183 return (USB_SUCCESS);
3186 return (USB_FAILURE);
3191 * ehci_find_bestfit_ls_intr_mask:
3193 * Find the smask and cmask in the bandwidth allocation.
3195 static int
3196 ehci_find_bestfit_ls_intr_mask(
3197 ehci_state_t *ehcip,
3198 uchar_t *smask,
3199 uchar_t *cmask,
3200 uint_t *pnode,
3201 uint_t sbandwidth,
3202 uint_t cbandwidth,
3203 int interval)
3205 int i;
3206 uint_t elements, index;
3207 int array_leaf, best_array_leaf;
3208 uint_t node_sbandwidth, node_cbandwidth;
3209 uint_t best_node_bandwidth;
3210 uint_t leaf_count;
3211 uchar_t bw_smask, bw_cmask;
3212 uchar_t best_smask, best_cmask;
3214 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3215 "ehci_find_bestfit_ls_intr_mask: ");
3217 /* For low and full speed devices */
3218 index = EHCI_XUS_MASK_INDEX;
3219 elements = EHCI_INTR_4MS_POLL;
3221 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3224 * Because of the way the leaves are setup, we will automatically
3225 * hit the leftmost leaf of every possible node with this interval.
3227 best_smask = 0x00;
3228 best_node_bandwidth = 0;
3229 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3230 /* Find the bandwidth mask */
3231 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3232 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3233 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3234 cbandwidth, ehci_index[array_leaf], leaf_count, &bw_cmask);
3237 * If this node cannot support our requirements skip to the
3238 * next leaf.
3240 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3241 continue;
3245 * Now make sure our bandwidth requirements can be
3246 * satisfied with one of smasks in this node.
3248 *smask = 0x00;
3249 *cmask = 0x00;
3250 for (i = index; i < (index + elements); i++) {
3251 /* Check the start split mask value */
3252 if ((ehci_start_split_mask[index] & bw_smask) &&
3253 (ehci_intr_complete_split_mask[index] & bw_cmask)) {
3254 *smask = ehci_start_split_mask[index];
3255 *cmask = ehci_intr_complete_split_mask[index];
3256 break;
3261 * If an appropriate smask is found save the information if:
3262 * o best_smask has not been found yet.
3263 * - or -
3264 * o This is the node with the least amount of bandwidth
3266 if ((*smask != 0x00) &&
3267 ((best_smask == 0x00) ||
3268 (best_node_bandwidth >
3269 (node_sbandwidth + node_cbandwidth)))) {
3270 best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3271 best_array_leaf = array_leaf;
3272 best_smask = *smask;
3273 best_cmask = *cmask;
3278 * If we find node that can handle the bandwidth populate the
3279 * appropriate variables and return success.
3281 if (best_smask) {
3282 *smask = best_smask;
3283 *cmask = best_cmask;
3284 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3285 interval);
3286 ehci_update_bw_availability(ehcip, sbandwidth,
3287 ehci_index[best_array_leaf], leaf_count, best_smask);
3288 ehci_update_bw_availability(ehcip, cbandwidth,
3289 ehci_index[best_array_leaf], leaf_count, best_cmask);
3291 return (USB_SUCCESS);
3294 return (USB_FAILURE);
3299 * ehci_find_bestfit_sitd_in_mask:
3301 * Find the smask and cmask in the bandwidth allocation.
3303 static int
3304 ehci_find_bestfit_sitd_in_mask(
3305 ehci_state_t *ehcip,
3306 uchar_t *smask,
3307 uchar_t *cmask,
3308 uint_t *pnode,
3309 uint_t sbandwidth,
3310 uint_t cbandwidth,
3311 int interval)
3313 int i, uFrames, found;
3314 int array_leaf, best_array_leaf;
3315 uint_t node_sbandwidth, node_cbandwidth;
3316 uint_t best_node_bandwidth;
3317 uint_t leaf_count;
3318 uchar_t bw_smask, bw_cmask;
3319 uchar_t best_smask, best_cmask;
3321 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3322 "ehci_find_bestfit_sitd_in_mask: ");
3324 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3327 * Because of the way the leaves are setup, we will automatically
3328 * hit the leftmost leaf of every possible node with this interval.
3329 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3332 * Need to add an additional 2 uFrames, if the "L"ast
3333 * complete split is before uFrame 6. See section
3334 * 11.8.4 in USB 2.0 Spec. Currently we do not support
3335 * the "Back Ptr" which means we support on IN of
3336 * ~4*MAX_UFRAME_SITD_XFER bandwidth/
3338 uFrames = (cbandwidth / MAX_UFRAME_SITD_XFER) + 2;
3339 if (cbandwidth % MAX_UFRAME_SITD_XFER) {
3340 uFrames++;
3342 if (uFrames > 6) {
3344 return (USB_FAILURE);
3346 *smask = 0x1;
3347 *cmask = 0x00;
3348 for (i = 0; i < uFrames; i++) {
3349 *cmask = *cmask << 1;
3350 *cmask |= 0x1;
3352 /* cmask must start 2 frames after the smask */
3353 *cmask = *cmask << 2;
3355 found = 0;
3356 best_smask = 0x00;
3357 best_node_bandwidth = 0;
3358 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3359 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3360 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3361 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3362 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3363 &bw_cmask);
3366 * If this node cannot support our requirements skip to the
3367 * next leaf.
3369 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3370 continue;
3373 for (i = 0; i < (EHCI_MAX_UFRAMES - uFrames - 2); i++) {
3374 if ((*smask & bw_smask) && (*cmask & bw_cmask)) {
3375 found = 1;
3376 break;
3378 *smask = *smask << 1;
3379 *cmask = *cmask << 1;
3383 * If an appropriate smask is found save the information if:
3384 * o best_smask has not been found yet.
3385 * - or -
3386 * o This is the node with the least amount of bandwidth
3388 if (found &&
3389 ((best_smask == 0x00) ||
3390 (best_node_bandwidth >
3391 (node_sbandwidth + node_cbandwidth)))) {
3392 best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3393 best_array_leaf = array_leaf;
3394 best_smask = *smask;
3395 best_cmask = *cmask;
3400 * If we find node that can handle the bandwidth populate the
3401 * appropriate variables and return success.
3403 if (best_smask) {
3404 *smask = best_smask;
3405 *cmask = best_cmask;
3406 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3407 interval);
3408 ehci_update_bw_availability(ehcip, sbandwidth,
3409 ehci_index[best_array_leaf], leaf_count, best_smask);
3410 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3411 ehci_index[best_array_leaf], leaf_count, best_cmask);
3413 return (USB_SUCCESS);
3416 return (USB_FAILURE);
3421 * ehci_find_bestfit_sitd_out_mask:
3423 * Find the smask in the bandwidth allocation.
3425 static int
3426 ehci_find_bestfit_sitd_out_mask(
3427 ehci_state_t *ehcip,
3428 uchar_t *smask,
3429 uint_t *pnode,
3430 uint_t sbandwidth,
3431 int interval)
3433 int i, uFrames, found;
3434 int array_leaf, best_array_leaf;
3435 uint_t node_sbandwidth;
3436 uint_t best_node_bandwidth;
3437 uint_t leaf_count;
3438 uchar_t bw_smask;
3439 uchar_t best_smask;
3441 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3442 "ehci_find_bestfit_sitd_out_mask: ");
3444 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3447 * Because of the way the leaves are setup, we will automatically
3448 * hit the leftmost leaf of every possible node with this interval.
3449 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3451 *smask = 0x00;
3452 uFrames = sbandwidth / MAX_UFRAME_SITD_XFER;
3453 if (sbandwidth % MAX_UFRAME_SITD_XFER) {
3454 uFrames++;
3456 for (i = 0; i < uFrames; i++) {
3457 *smask = *smask << 1;
3458 *smask |= 0x1;
3461 found = 0;
3462 best_smask = 0x00;
3463 best_node_bandwidth = 0;
3464 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3465 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3466 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3467 &bw_smask);
3470 * If this node cannot support our requirements skip to the
3471 * next leaf.
3473 if (bw_smask == 0x00) {
3474 continue;
3477 /* You cannot have a start split on the 8th uFrame */
3478 for (i = 0; (*smask & 0x80) == 0; i++) {
3479 if (*smask & bw_smask) {
3480 found = 1;
3481 break;
3483 *smask = *smask << 1;
3487 * If an appropriate smask is found save the information if:
3488 * o best_smask has not been found yet.
3489 * - or -
3490 * o This is the node with the least amount of bandwidth
3492 if (found &&
3493 ((best_smask == 0x00) ||
3494 (best_node_bandwidth > node_sbandwidth))) {
3495 best_node_bandwidth = node_sbandwidth;
3496 best_array_leaf = array_leaf;
3497 best_smask = *smask;
3502 * If we find node that can handle the bandwidth populate the
3503 * appropriate variables and return success.
3505 if (best_smask) {
3506 *smask = best_smask;
3507 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3508 interval);
3509 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3510 ehci_index[best_array_leaf], leaf_count, best_smask);
3512 return (USB_SUCCESS);
3515 return (USB_FAILURE);
3520 * ehci_calculate_bw_availability_mask:
3522 * Returns the "total bandwidth used" in this node.
3523 * Populates bw_mask with the uFrames that can support the bandwidth.
3525 * If all the Frames cannot support this bandwidth, then bw_mask
3526 * will return 0x00 and the "total bandwidth used" will be invalid.
3528 static uint_t
3529 ehci_calculate_bw_availability_mask(
3530 ehci_state_t *ehcip,
3531 uint_t bandwidth,
3532 int leaf,
3533 int leaf_count,
3534 uchar_t *bw_mask)
3536 int i, j;
3537 uchar_t bw_uframe;
3538 int uframe_total;
3539 ehci_frame_bandwidth_t *fbp;
3540 uint_t total_bandwidth = 0;
3542 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3543 "ehci_calculate_bw_availability_mask: leaf %d leaf count %d",
3544 leaf, leaf_count);
3546 /* Start by saying all uFrames are available */
3547 *bw_mask = 0xFF;
3549 for (i = 0; (i < leaf_count) || (*bw_mask == 0x00); i++) {
3550 fbp = &ehcip->ehci_frame_bandwidth[leaf + i];
3552 total_bandwidth += fbp->ehci_allocated_frame_bandwidth;
3554 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3556 * If the uFrame in bw_mask is available check to see if
3557 * it can support the additional bandwidth.
3559 bw_uframe = (*bw_mask & (0x1 << j));
3560 uframe_total =
3561 fbp->ehci_micro_frame_bandwidth[j] +
3562 bandwidth;
3563 if ((bw_uframe) &&
3564 (uframe_total > HS_PERIODIC_BANDWIDTH)) {
3565 *bw_mask = *bw_mask & ~bw_uframe;
3570 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3571 "ehci_calculate_bw_availability_mask: bandwidth mask 0x%x",
3572 *bw_mask);
3574 return (total_bandwidth);
3579 * ehci_update_bw_availability:
3581 * The leftmost leaf needs to be in terms of array position and
3582 * not the actual lattice position.
3584 static void
3585 ehci_update_bw_availability(
3586 ehci_state_t *ehcip,
3587 int bandwidth,
3588 int leftmost_leaf,
3589 int leaf_count,
3590 uchar_t mask)
3592 int i, j;
3593 ehci_frame_bandwidth_t *fbp;
3594 int uFrame_bandwidth[8];
3596 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3597 "ehci_update_bw_availability: "
3598 "leaf %d count %d bandwidth 0x%x mask 0x%x",
3599 leftmost_leaf, leaf_count, bandwidth, mask);
3601 ASSERT(leftmost_leaf < 32);
3602 ASSERT(leftmost_leaf >= 0);
3604 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3605 if (mask & 0x1) {
3606 uFrame_bandwidth[j] = bandwidth;
3607 } else {
3608 uFrame_bandwidth[j] = 0;
3611 mask = mask >> 1;
3614 /* Updated all the effected leafs with the bandwidth */
3615 for (i = 0; i < leaf_count; i++) {
3616 fbp = &ehcip->ehci_frame_bandwidth[leftmost_leaf + i];
3618 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3619 fbp->ehci_micro_frame_bandwidth[j] +=
3620 uFrame_bandwidth[j];
3621 fbp->ehci_allocated_frame_bandwidth +=
3622 uFrame_bandwidth[j];
3628 * Miscellaneous functions
3632 * ehci_obtain_state:
3634 * NOTE: This function is also called from POLLED MODE.
3636 ehci_state_t *
3637 ehci_obtain_state(dev_info_t *dip)
3639 int instance = ddi_get_instance(dip);
3641 ehci_state_t *state = ddi_get_soft_state(ehci_statep, instance);
3643 ASSERT(state != NULL);
3645 return (state);
3650 * ehci_state_is_operational:
3652 * Check the Host controller state and return proper values.
3655 ehci_state_is_operational(ehci_state_t *ehcip)
3657 int val;
3659 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3661 switch (ehcip->ehci_hc_soft_state) {
3662 case EHCI_CTLR_INIT_STATE:
3663 case EHCI_CTLR_SUSPEND_STATE:
3664 val = USB_FAILURE;
3665 break;
3666 case EHCI_CTLR_OPERATIONAL_STATE:
3667 val = USB_SUCCESS;
3668 break;
3669 case EHCI_CTLR_ERROR_STATE:
3670 val = USB_HC_HARDWARE_ERROR;
3671 break;
3672 default:
3673 val = USB_FAILURE;
3674 break;
3677 return (val);
3682 * ehci_do_soft_reset
3684 * Do soft reset of ehci host controller.
3687 ehci_do_soft_reset(ehci_state_t *ehcip)
3689 usb_frame_number_t before_frame_number, after_frame_number;
3690 ehci_regs_t *ehci_save_regs;
3692 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3694 /* Increment host controller error count */
3695 ehcip->ehci_hc_error++;
3697 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3698 "ehci_do_soft_reset:"
3699 "Reset ehci host controller 0x%x", ehcip->ehci_hc_error);
3702 * Allocate space for saving current Host Controller
3703 * registers. Don't do any recovery if allocation
3704 * fails.
3706 ehci_save_regs = (ehci_regs_t *)
3707 kmem_zalloc(sizeof (ehci_regs_t), KM_NOSLEEP);
3709 if (ehci_save_regs == NULL) {
3710 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3711 "ehci_do_soft_reset: kmem_zalloc failed");
3713 return (USB_FAILURE);
3716 /* Save current ehci registers */
3717 ehci_save_regs->ehci_command = Get_OpReg(ehci_command);
3718 ehci_save_regs->ehci_interrupt = Get_OpReg(ehci_interrupt);
3719 ehci_save_regs->ehci_ctrl_segment = Get_OpReg(ehci_ctrl_segment);
3720 ehci_save_regs->ehci_async_list_addr = Get_OpReg(ehci_async_list_addr);
3721 ehci_save_regs->ehci_config_flag = Get_OpReg(ehci_config_flag);
3722 ehci_save_regs->ehci_periodic_list_base =
3723 Get_OpReg(ehci_periodic_list_base);
3725 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3726 "ehci_do_soft_reset: Save reg = 0x%p", (void *)ehci_save_regs);
3728 /* Disable all list processing and interrupts */
3729 Set_OpReg(ehci_command, Get_OpReg(ehci_command) &
3730 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE));
3732 /* Disable all EHCI interrupts */
3733 Set_OpReg(ehci_interrupt, 0);
3735 /* Wait for few milliseconds */
3736 drv_usecwait(EHCI_SOF_TIMEWAIT);
3738 /* Do light soft reset of ehci host controller */
3739 Set_OpReg(ehci_command,
3740 Get_OpReg(ehci_command) | EHCI_CMD_LIGHT_HC_RESET);
3742 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3743 "ehci_do_soft_reset: Reset in progress");
3745 /* Wait for reset to complete */
3746 drv_usecwait(EHCI_RESET_TIMEWAIT);
3749 * Restore previous saved EHCI register value
3750 * into the current EHCI registers.
3752 Set_OpReg(ehci_ctrl_segment, (uint32_t)
3753 ehci_save_regs->ehci_ctrl_segment);
3755 Set_OpReg(ehci_periodic_list_base, (uint32_t)
3756 ehci_save_regs->ehci_periodic_list_base);
3758 Set_OpReg(ehci_async_list_addr, (uint32_t)
3759 ehci_save_regs->ehci_async_list_addr);
3762 * For some reason this register might get nulled out by
3763 * the Uli M1575 South Bridge. To workaround the hardware
3764 * problem, check the value after write and retry if the
3765 * last write fails.
3767 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
3768 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
3769 (ehci_save_regs->ehci_async_list_addr !=
3770 Get_OpReg(ehci_async_list_addr))) {
3771 int retry = 0;
3773 Set_OpRegRetry(ehci_async_list_addr, (uint32_t)
3774 ehci_save_regs->ehci_async_list_addr, retry);
3775 if (retry >= EHCI_MAX_RETRY) {
3776 USB_DPRINTF_L2(PRINT_MASK_ATTA,
3777 ehcip->ehci_log_hdl, "ehci_do_soft_reset:"
3778 " ASYNCLISTADDR write failed.");
3780 return (USB_FAILURE);
3782 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
3783 "ehci_do_soft_reset: ASYNCLISTADDR "
3784 "write failed, retry=%d", retry);
3787 Set_OpReg(ehci_config_flag, (uint32_t)
3788 ehci_save_regs->ehci_config_flag);
3790 /* Enable both Asynchronous and Periodic Schedule if necessary */
3791 ehci_toggle_scheduler(ehcip);
3794 * Set ehci_interrupt to enable all interrupts except Root
3795 * Hub Status change and frame list rollover interrupts.
3797 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
3798 EHCI_INTR_FRAME_LIST_ROLLOVER |
3799 EHCI_INTR_USB_ERROR |
3800 EHCI_INTR_USB);
3803 * Deallocate the space that allocated for saving
3804 * HC registers.
3806 kmem_free((void *) ehci_save_regs, sizeof (ehci_regs_t));
3809 * Set the desired interrupt threshold, frame list size (if
3810 * applicable) and turn EHCI host controller.
3812 Set_OpReg(ehci_command, ((Get_OpReg(ehci_command) &
3813 ~EHCI_CMD_INTR_THRESHOLD) |
3814 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN)));
3816 /* Wait 10ms for EHCI to start sending SOF */
3817 drv_usecwait(EHCI_RESET_TIMEWAIT);
3820 * Get the current usb frame number before waiting for
3821 * few milliseconds.
3823 before_frame_number = ehci_get_current_frame_number(ehcip);
3825 /* Wait for few milliseconds */
3826 drv_usecwait(EHCI_SOF_TIMEWAIT);
3829 * Get the current usb frame number after waiting for
3830 * few milliseconds.
3832 after_frame_number = ehci_get_current_frame_number(ehcip);
3834 USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3835 "ehci_do_soft_reset: Before Frame Number 0x%llx "
3836 "After Frame Number 0x%llx",
3837 (unsigned long long)before_frame_number,
3838 (unsigned long long)after_frame_number);
3840 if ((after_frame_number <= before_frame_number) &&
3841 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) {
3843 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3844 "ehci_do_soft_reset: Soft reset failed");
3846 return (USB_FAILURE);
3849 return (USB_SUCCESS);
3854 * ehci_get_xfer_attrs:
3856 * Get the attributes of a particular xfer.
3858 * NOTE: This function is also called from POLLED MODE.
3860 usb_req_attrs_t
3861 ehci_get_xfer_attrs(
3862 ehci_state_t *ehcip,
3863 ehci_pipe_private_t *pp,
3864 ehci_trans_wrapper_t *tw)
3866 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep;
3867 usb_req_attrs_t attrs = USB_ATTRS_NONE;
3869 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3870 "ehci_get_xfer_attrs:");
3872 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
3873 case USB_EP_ATTR_CONTROL:
3874 attrs = ((usb_ctrl_req_t *)
3875 tw->tw_curr_xfer_reqp)->ctrl_attributes;
3876 break;
3877 case USB_EP_ATTR_BULK:
3878 attrs = ((usb_bulk_req_t *)
3879 tw->tw_curr_xfer_reqp)->bulk_attributes;
3880 break;
3881 case USB_EP_ATTR_INTR:
3882 attrs = ((usb_intr_req_t *)
3883 tw->tw_curr_xfer_reqp)->intr_attributes;
3884 break;
3887 return (attrs);
3892 * ehci_get_current_frame_number:
3894 * Get the current software based usb frame number.
3896 usb_frame_number_t
3897 ehci_get_current_frame_number(ehci_state_t *ehcip)
3899 usb_frame_number_t usb_frame_number;
3900 usb_frame_number_t ehci_fno, micro_frame_number;
3902 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3904 ehci_fno = ehcip->ehci_fno;
3905 micro_frame_number = Get_OpReg(ehci_frame_index) & 0x3FFF;
3908 * Calculate current software based usb frame number.
3910 * This code accounts for the fact that frame number is
3911 * updated by the Host Controller before the ehci driver
3912 * gets an FrameListRollover interrupt that will adjust
3913 * Frame higher part.
3915 * Refer ehci specification 1.0, section 2.3.2, page 21.
3917 micro_frame_number = ((micro_frame_number & 0x1FFF) |
3918 ehci_fno) + (((micro_frame_number & 0x3FFF) ^
3919 ehci_fno) & 0x2000);
3922 * Micro Frame number is equivalent to 125 usec. Eight
3923 * Micro Frame numbers are equivalent to one millsecond
3924 * or one usb frame number.
3926 usb_frame_number = micro_frame_number >>
3927 EHCI_uFRAMES_PER_USB_FRAME_SHIFT;
3929 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3930 "ehci_get_current_frame_number: "
3931 "Current usb uframe number = 0x%llx "
3932 "Current usb frame number = 0x%llx",
3933 (unsigned long long)micro_frame_number,
3934 (unsigned long long)usb_frame_number);
3936 return (usb_frame_number);
3941 * ehci_cpr_cleanup:
3943 * Cleanup ehci state and other ehci specific informations across
3944 * Check Point Resume (CPR).
3946 static void
3947 ehci_cpr_cleanup(ehci_state_t *ehcip)
3949 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3951 /* Reset software part of usb frame number */
3952 ehcip->ehci_fno = 0;
3957 * ehci_wait_for_sof:
3959 * Wait for couple of SOF interrupts
3962 ehci_wait_for_sof(ehci_state_t *ehcip)
3964 usb_frame_number_t before_frame_number, after_frame_number;
3965 int error = USB_SUCCESS;
3967 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3968 ehcip->ehci_log_hdl, "ehci_wait_for_sof");
3970 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3972 error = ehci_state_is_operational(ehcip);
3974 if (error != USB_SUCCESS) {
3976 return (error);
3979 /* Get the current usb frame number before waiting for two SOFs */
3980 before_frame_number = ehci_get_current_frame_number(ehcip);
3982 mutex_exit(&ehcip->ehci_int_mutex);
3984 /* Wait for few milliseconds */
3985 delay(drv_usectohz(EHCI_SOF_TIMEWAIT));
3987 mutex_enter(&ehcip->ehci_int_mutex);
3989 /* Get the current usb frame number after woken up */
3990 after_frame_number = ehci_get_current_frame_number(ehcip);
3992 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3993 "ehci_wait_for_sof: framenumber: before 0x%llx "
3994 "after 0x%llx",
3995 (unsigned long long)before_frame_number,
3996 (unsigned long long)after_frame_number);
3998 /* Return failure, if usb frame number has not been changed */
3999 if (after_frame_number <= before_frame_number) {
4001 if ((ehci_do_soft_reset(ehcip)) != USB_SUCCESS) {
4003 USB_DPRINTF_L0(PRINT_MASK_LISTS,
4004 ehcip->ehci_log_hdl, "No SOF interrupts");
4006 /* Set host controller soft state to error */
4007 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
4009 return (USB_FAILURE);
4014 return (USB_SUCCESS);
4018 * Toggle the async/periodic schedule based on opened pipe count.
4019 * During pipe cleanup(in pipe reset case), the pipe's QH is temporarily
4020 * disabled. But the TW on the pipe is not freed. In this case, we need
4021 * to disable async/periodic schedule for some non-compatible hardware.
4022 * Otherwise, the hardware will overwrite software's configuration of
4023 * the QH.
4025 void
4026 ehci_toggle_scheduler_on_pipe(ehci_state_t *ehcip)
4028 uint_t temp_reg, cmd_reg;
4030 cmd_reg = Get_OpReg(ehci_command);
4031 temp_reg = cmd_reg;
4034 * Enable/Disable asynchronous scheduler, and
4035 * turn on/off async list door bell
4037 if (ehcip->ehci_open_async_count) {
4038 if ((ehcip->ehci_async_req_count > 0) &&
4039 ((cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE) == 0)) {
4041 * For some reason this address might get nulled out by
4042 * the ehci chip. Set it here just in case it is null.
4044 Set_OpReg(ehci_async_list_addr,
4045 ehci_qh_cpu_to_iommu(ehcip,
4046 ehcip->ehci_head_of_async_sched_list));
4049 * For some reason this register might get nulled out by
4050 * the Uli M1575 Southbridge. To workaround the HW
4051 * problem, check the value after write and retry if the
4052 * last write fails.
4054 * If the ASYNCLISTADDR remains "stuck" after
4055 * EHCI_MAX_RETRY retries, then the M1575 is broken
4056 * and is stuck in an inconsistent state and is about
4057 * to crash the machine with a trn_oor panic when it
4058 * does a DMA read from 0x0. It is better to panic
4059 * now rather than wait for the trn_oor crash; this
4060 * way Customer Service will have a clean signature
4061 * that indicts the M1575 chip rather than a
4062 * mysterious and hard-to-diagnose trn_oor panic.
4064 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4065 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
4066 (ehci_qh_cpu_to_iommu(ehcip,
4067 ehcip->ehci_head_of_async_sched_list) !=
4068 Get_OpReg(ehci_async_list_addr))) {
4069 int retry = 0;
4071 Set_OpRegRetry(ehci_async_list_addr,
4072 ehci_qh_cpu_to_iommu(ehcip,
4073 ehcip->ehci_head_of_async_sched_list),
4074 retry);
4075 if (retry >= EHCI_MAX_RETRY)
4076 cmn_err(CE_PANIC,
4077 "ehci_toggle_scheduler_on_pipe: "
4078 "ASYNCLISTADDR write failed.");
4080 USB_DPRINTF_L2(PRINT_MASK_ATTA,
4081 ehcip->ehci_log_hdl,
4082 "ehci_toggle_scheduler_on_pipe:"
4083 " ASYNCLISTADDR write failed, retry=%d",
4084 retry);
4087 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE;
4089 } else {
4090 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE;
4093 if (ehcip->ehci_open_periodic_count) {
4094 if ((ehcip->ehci_periodic_req_count > 0) &&
4095 ((cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE) == 0)) {
4097 * For some reason this address get's nulled out by
4098 * the ehci chip. Set it here just in case it is null.
4100 Set_OpReg(ehci_periodic_list_base,
4101 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
4102 0xFFFFF000));
4103 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE;
4105 } else {
4106 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE;
4109 /* Just an optimization */
4110 if (temp_reg != cmd_reg) {
4111 Set_OpReg(ehci_command, cmd_reg);
4117 * ehci_toggle_scheduler:
4119 * Turn scheduler based on pipe open count.
4121 void
4122 ehci_toggle_scheduler(ehci_state_t *ehcip)
4124 uint_t temp_reg, cmd_reg;
4127 * For performance optimization, we need to change the bits
4128 * if (async == 1||async == 0) OR (periodic == 1||periodic == 0)
4130 * Related bits already enabled if
4131 * async and periodic req counts are > 1
4132 * OR async req count > 1 & no periodic pipe
4133 * OR periodic req count > 1 & no async pipe
4135 if (((ehcip->ehci_async_req_count > 1) &&
4136 (ehcip->ehci_periodic_req_count > 1)) ||
4137 ((ehcip->ehci_async_req_count > 1) &&
4138 (ehcip->ehci_open_periodic_count == 0)) ||
4139 ((ehcip->ehci_periodic_req_count > 1) &&
4140 (ehcip->ehci_open_async_count == 0))) {
4141 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4142 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4143 "async/periodic bits no need to change");
4145 return;
4148 cmd_reg = Get_OpReg(ehci_command);
4149 temp_reg = cmd_reg;
4152 * Enable/Disable asynchronous scheduler, and
4153 * turn on/off async list door bell
4155 if (ehcip->ehci_async_req_count > 1) {
4156 /* we already enable the async bit */
4157 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4158 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4159 "async bit already enabled: cmd_reg=0x%x", cmd_reg);
4160 } else if (ehcip->ehci_async_req_count == 1) {
4161 if (!(cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE)) {
4163 * For some reason this address might get nulled out by
4164 * the ehci chip. Set it here just in case it is null.
4165 * If it's not null, we should not reset the
4166 * ASYNCLISTADDR, because it's updated by hardware to
4167 * point to the next queue head to be executed.
4169 if (!Get_OpReg(ehci_async_list_addr)) {
4170 Set_OpReg(ehci_async_list_addr,
4171 ehci_qh_cpu_to_iommu(ehcip,
4172 ehcip->ehci_head_of_async_sched_list));
4176 * For some reason this register might get nulled out by
4177 * the Uli M1575 Southbridge. To workaround the HW
4178 * problem, check the value after write and retry if the
4179 * last write fails.
4181 * If the ASYNCLISTADDR remains "stuck" after
4182 * EHCI_MAX_RETRY retries, then the M1575 is broken
4183 * and is stuck in an inconsistent state and is about
4184 * to crash the machine with a trn_oor panic when it
4185 * does a DMA read from 0x0. It is better to panic
4186 * now rather than wait for the trn_oor crash; this
4187 * way Customer Service will have a clean signature
4188 * that indicts the M1575 chip rather than a
4189 * mysterious and hard-to-diagnose trn_oor panic.
4191 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4192 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
4193 (ehci_qh_cpu_to_iommu(ehcip,
4194 ehcip->ehci_head_of_async_sched_list) !=
4195 Get_OpReg(ehci_async_list_addr))) {
4196 int retry = 0;
4198 Set_OpRegRetry(ehci_async_list_addr,
4199 ehci_qh_cpu_to_iommu(ehcip,
4200 ehcip->ehci_head_of_async_sched_list),
4201 retry);
4202 if (retry >= EHCI_MAX_RETRY)
4203 cmn_err(CE_PANIC,
4204 "ehci_toggle_scheduler: "
4205 "ASYNCLISTADDR write failed.");
4207 USB_DPRINTF_L3(PRINT_MASK_ATTA,
4208 ehcip->ehci_log_hdl,
4209 "ehci_toggle_scheduler: ASYNCLISTADDR "
4210 "write failed, retry=%d", retry);
4213 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE;
4214 } else {
4215 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE;
4218 if (ehcip->ehci_periodic_req_count > 1) {
4219 /* we already enable the periodic bit. */
4220 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4221 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4222 "periodic bit already enabled: cmd_reg=0x%x", cmd_reg);
4223 } else if (ehcip->ehci_periodic_req_count == 1) {
4224 if (!(cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE)) {
4226 * For some reason this address get's nulled out by
4227 * the ehci chip. Set it here just in case it is null.
4229 Set_OpReg(ehci_periodic_list_base,
4230 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
4231 0xFFFFF000));
4233 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE;
4234 } else {
4235 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE;
4238 /* Just an optimization */
4239 if (temp_reg != cmd_reg) {
4240 Set_OpReg(ehci_command, cmd_reg);
4242 /* To make sure the command register is updated correctly */
4243 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4244 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) {
4245 int retry = 0;
4247 Set_OpRegRetry(ehci_command, cmd_reg, retry);
4248 USB_DPRINTF_L3(PRINT_MASK_ATTA,
4249 ehcip->ehci_log_hdl,
4250 "ehci_toggle_scheduler: CMD write failed, retry=%d",
4251 retry);
4258 * ehci print functions
4262 * ehci_print_caps:
4264 void
4265 ehci_print_caps(ehci_state_t *ehcip)
4267 uint_t i;
4269 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4270 "\n\tUSB 2.0 Host Controller Characteristics\n");
4272 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4273 "Caps Length: 0x%x Version: 0x%x\n",
4274 Get_8Cap(ehci_caps_length), Get_16Cap(ehci_version));
4276 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4277 "Structural Parameters\n");
4278 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4279 "Port indicators: %s", (Get_Cap(ehci_hcs_params) &
4280 EHCI_HCS_PORT_INDICATOR) ? "Yes" : "No");
4281 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4282 "No of Classic host controllers: 0x%x",
4283 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_COMP_CTRLS)
4284 >> EHCI_HCS_NUM_COMP_CTRL_SHIFT);
4285 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4286 "No of ports per Classic host controller: 0x%x",
4287 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS_CC)
4288 >> EHCI_HCS_NUM_PORTS_CC_SHIFT);
4289 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4290 "Port routing rules: %s", (Get_Cap(ehci_hcs_params) &
4291 EHCI_HCS_PORT_ROUTING_RULES) ? "Yes" : "No");
4292 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4293 "Port power control: %s", (Get_Cap(ehci_hcs_params) &
4294 EHCI_HCS_PORT_POWER_CONTROL) ? "Yes" : "No");
4295 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4296 "No of root hub ports: 0x%x\n",
4297 Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS);
4299 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4300 "Capability Parameters\n");
4301 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4302 "EHCI extended capability: %s", (Get_Cap(ehci_hcc_params) &
4303 EHCI_HCC_EECP) ? "Yes" : "No");
4304 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4305 "Isoch schedule threshold: 0x%x",
4306 Get_Cap(ehci_hcc_params) & EHCI_HCC_ISOCH_SCHED_THRESHOLD);
4307 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4308 "Async schedule park capability: %s", (Get_Cap(ehci_hcc_params) &
4309 EHCI_HCC_ASYNC_SCHED_PARK_CAP) ? "Yes" : "No");
4310 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4311 "Programmable frame list flag: %s", (Get_Cap(ehci_hcc_params) &
4312 EHCI_HCC_PROG_FRAME_LIST_FLAG) ? "256/512/1024" : "1024");
4313 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4314 "64bit addressing capability: %s\n", (Get_Cap(ehci_hcc_params) &
4315 EHCI_HCC_64BIT_ADDR_CAP) ? "Yes" : "No");
4317 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4318 "Classic Port Route Description");
4320 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) {
4321 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4322 "\tPort Route 0x%x: 0x%x", i, Get_8Cap(ehci_port_route[i]));
4328 * ehci_print_regs:
4330 void
4331 ehci_print_regs(ehci_state_t *ehcip)
4333 uint_t i;
4335 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4336 "\n\tEHCI%d Operational Registers\n",
4337 ddi_get_instance(ehcip->ehci_dip));
4339 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4340 "Command: 0x%x Status: 0x%x",
4341 Get_OpReg(ehci_command), Get_OpReg(ehci_status));
4342 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4343 "Interrupt: 0x%x Frame Index: 0x%x",
4344 Get_OpReg(ehci_interrupt), Get_OpReg(ehci_frame_index));
4345 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4346 "Control Segment: 0x%x Periodic List Base: 0x%x",
4347 Get_OpReg(ehci_ctrl_segment), Get_OpReg(ehci_periodic_list_base));
4348 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4349 "Async List Addr: 0x%x Config Flag: 0x%x",
4350 Get_OpReg(ehci_async_list_addr), Get_OpReg(ehci_config_flag));
4352 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4353 "Root Hub Port Status");
4355 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) {
4356 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4357 "\tPort Status 0x%x: 0x%x ", i,
4358 Get_OpReg(ehci_rh_port_status[i]));
4364 * ehci_print_qh:
4366 void
4367 ehci_print_qh(
4368 ehci_state_t *ehcip,
4369 ehci_qh_t *qh)
4371 uint_t i;
4373 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4374 "ehci_print_qh: qh = 0x%p", (void *)qh);
4376 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4377 "\tqh_link_ptr: 0x%x ", Get_QH(qh->qh_link_ptr));
4378 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4379 "\tqh_ctrl: 0x%x ", Get_QH(qh->qh_ctrl));
4380 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4381 "\tqh_split_ctrl: 0x%x ", Get_QH(qh->qh_split_ctrl));
4382 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4383 "\tqh_curr_qtd: 0x%x ", Get_QH(qh->qh_curr_qtd));
4384 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4385 "\tqh_next_qtd: 0x%x ", Get_QH(qh->qh_next_qtd));
4386 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4387 "\tqh_alt_next_qtd: 0x%x ", Get_QH(qh->qh_alt_next_qtd));
4388 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4389 "\tqh_status: 0x%x ", Get_QH(qh->qh_status));
4391 for (i = 0; i < 5; i++) {
4392 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4393 "\tqh_buf[%d]: 0x%x ", i, Get_QH(qh->qh_buf[i]));
4396 for (i = 0; i < 5; i++) {
4397 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4398 "\tqh_buf_high[%d]: 0x%x ",
4399 i, Get_QH(qh->qh_buf_high[i]));
4402 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4403 "\tqh_dummy_qtd: 0x%x ", Get_QH(qh->qh_dummy_qtd));
4404 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4405 "\tqh_prev: 0x%x ", Get_QH(qh->qh_prev));
4406 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4407 "\tqh_state: 0x%x ", Get_QH(qh->qh_state));
4408 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4409 "\tqh_reclaim_next: 0x%x ", Get_QH(qh->qh_reclaim_next));
4410 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4411 "\tqh_reclaim_frame: 0x%x ", Get_QH(qh->qh_reclaim_frame));
4416 * ehci_print_qtd:
4418 void
4419 ehci_print_qtd(
4420 ehci_state_t *ehcip,
4421 ehci_qtd_t *qtd)
4423 uint_t i;
4425 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4426 "ehci_print_qtd: qtd = 0x%p", (void *)qtd);
4428 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4429 "\tqtd_next_qtd: 0x%x ", Get_QTD(qtd->qtd_next_qtd));
4430 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4431 "\tqtd_alt_next_qtd: 0x%x ", Get_QTD(qtd->qtd_alt_next_qtd));
4432 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4433 "\tqtd_ctrl: 0x%x ", Get_QTD(qtd->qtd_ctrl));
4435 for (i = 0; i < 5; i++) {
4436 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4437 "\tqtd_buf[%d]: 0x%x ", i, Get_QTD(qtd->qtd_buf[i]));
4440 for (i = 0; i < 5; i++) {
4441 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4442 "\tqtd_buf_high[%d]: 0x%x ",
4443 i, Get_QTD(qtd->qtd_buf_high[i]));
4446 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4447 "\tqtd_trans_wrapper: 0x%x ", Get_QTD(qtd->qtd_trans_wrapper));
4448 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4449 "\tqtd_tw_next_qtd: 0x%x ", Get_QTD(qtd->qtd_tw_next_qtd));
4450 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4451 "\tqtd_active_qtd_next: 0x%x ", Get_QTD(qtd->qtd_active_qtd_next));
4452 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4453 "\tqtd_active_qtd_prev: 0x%x ", Get_QTD(qtd->qtd_active_qtd_prev));
4454 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4455 "\tqtd_state: 0x%x ", Get_QTD(qtd->qtd_state));
4456 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4457 "\tqtd_ctrl_phase: 0x%x ", Get_QTD(qtd->qtd_ctrl_phase));
4458 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4459 "\tqtd_xfer_offs: 0x%x ", Get_QTD(qtd->qtd_xfer_offs));
4460 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4461 "\tqtd_xfer_len: 0x%x ", Get_QTD(qtd->qtd_xfer_len));
4465 * ehci kstat functions
4469 * ehci_create_stats:
4471 * Allocate and initialize the ehci kstat structures
4473 void
4474 ehci_create_stats(ehci_state_t *ehcip)
4476 char kstatname[KSTAT_STRLEN];
4477 const char *dname = ddi_driver_name(ehcip->ehci_dip);
4478 char *usbtypes[USB_N_COUNT_KSTATS] =
4479 {"ctrl", "isoch", "bulk", "intr"};
4480 uint_t instance = ehcip->ehci_instance;
4481 ehci_intrs_stats_t *isp;
4482 int i;
4484 if (EHCI_INTRS_STATS(ehcip) == NULL) {
4485 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs",
4486 dname, instance);
4487 EHCI_INTRS_STATS(ehcip) = kstat_create("usba", instance,
4488 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED,
4489 sizeof (ehci_intrs_stats_t) / sizeof (kstat_named_t),
4490 KSTAT_FLAG_PERSISTENT);
4492 if (EHCI_INTRS_STATS(ehcip)) {
4493 isp = EHCI_INTRS_STATS_DATA(ehcip);
4494 kstat_named_init(&isp->ehci_sts_total,
4495 "Interrupts Total", KSTAT_DATA_UINT64);
4496 kstat_named_init(&isp->ehci_sts_not_claimed,
4497 "Not Claimed", KSTAT_DATA_UINT64);
4498 kstat_named_init(&isp->ehci_sts_async_sched_status,
4499 "Async schedule status", KSTAT_DATA_UINT64);
4500 kstat_named_init(&isp->ehci_sts_periodic_sched_status,
4501 "Periodic sched status", KSTAT_DATA_UINT64);
4502 kstat_named_init(&isp->ehci_sts_empty_async_schedule,
4503 "Empty async schedule", KSTAT_DATA_UINT64);
4504 kstat_named_init(&isp->ehci_sts_host_ctrl_halted,
4505 "Host controller Halted", KSTAT_DATA_UINT64);
4506 kstat_named_init(&isp->ehci_sts_async_advance_intr,
4507 "Intr on async advance", KSTAT_DATA_UINT64);
4508 kstat_named_init(&isp->ehci_sts_host_system_error_intr,
4509 "Host system error", KSTAT_DATA_UINT64);
4510 kstat_named_init(&isp->ehci_sts_frm_list_rollover_intr,
4511 "Frame list rollover", KSTAT_DATA_UINT64);
4512 kstat_named_init(&isp->ehci_sts_rh_port_change_intr,
4513 "Port change detect", KSTAT_DATA_UINT64);
4514 kstat_named_init(&isp->ehci_sts_usb_error_intr,
4515 "USB error interrupt", KSTAT_DATA_UINT64);
4516 kstat_named_init(&isp->ehci_sts_usb_intr,
4517 "USB interrupt", KSTAT_DATA_UINT64);
4519 EHCI_INTRS_STATS(ehcip)->ks_private = ehcip;
4520 EHCI_INTRS_STATS(ehcip)->ks_update = nulldev;
4521 kstat_install(EHCI_INTRS_STATS(ehcip));
4525 if (EHCI_TOTAL_STATS(ehcip) == NULL) {
4526 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total",
4527 dname, instance);
4528 EHCI_TOTAL_STATS(ehcip) = kstat_create("usba", instance,
4529 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1,
4530 KSTAT_FLAG_PERSISTENT);
4532 if (EHCI_TOTAL_STATS(ehcip)) {
4533 kstat_install(EHCI_TOTAL_STATS(ehcip));
4537 for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
4538 if (ehcip->ehci_count_stats[i] == NULL) {
4539 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s",
4540 dname, instance, usbtypes[i]);
4541 ehcip->ehci_count_stats[i] = kstat_create("usba",
4542 instance, kstatname, "usb_byte_count",
4543 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
4545 if (ehcip->ehci_count_stats[i]) {
4546 kstat_install(ehcip->ehci_count_stats[i]);
4554 * ehci_destroy_stats:
4556 * Clean up ehci kstat structures
4558 void
4559 ehci_destroy_stats(ehci_state_t *ehcip)
4561 int i;
4563 if (EHCI_INTRS_STATS(ehcip)) {
4564 kstat_delete(EHCI_INTRS_STATS(ehcip));
4565 EHCI_INTRS_STATS(ehcip) = NULL;
4568 if (EHCI_TOTAL_STATS(ehcip)) {
4569 kstat_delete(EHCI_TOTAL_STATS(ehcip));
4570 EHCI_TOTAL_STATS(ehcip) = NULL;
4573 for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
4574 if (ehcip->ehci_count_stats[i]) {
4575 kstat_delete(ehcip->ehci_count_stats[i]);
4576 ehcip->ehci_count_stats[i] = NULL;
4583 * ehci_do_intrs_stats:
4585 * ehci status information
4587 void
4588 ehci_do_intrs_stats(
4589 ehci_state_t *ehcip,
4590 int val)
4592 if (EHCI_INTRS_STATS(ehcip)) {
4593 EHCI_INTRS_STATS_DATA(ehcip)->ehci_sts_total.value.ui64++;
4594 switch (val) {
4595 case EHCI_STS_ASYNC_SCHED_STATUS:
4596 EHCI_INTRS_STATS_DATA(ehcip)->
4597 ehci_sts_async_sched_status.value.ui64++;
4598 break;
4599 case EHCI_STS_PERIODIC_SCHED_STATUS:
4600 EHCI_INTRS_STATS_DATA(ehcip)->
4601 ehci_sts_periodic_sched_status.value.ui64++;
4602 break;
4603 case EHCI_STS_EMPTY_ASYNC_SCHEDULE:
4604 EHCI_INTRS_STATS_DATA(ehcip)->
4605 ehci_sts_empty_async_schedule.value.ui64++;
4606 break;
4607 case EHCI_STS_HOST_CTRL_HALTED:
4608 EHCI_INTRS_STATS_DATA(ehcip)->
4609 ehci_sts_host_ctrl_halted.value.ui64++;
4610 break;
4611 case EHCI_STS_ASYNC_ADVANCE_INTR:
4612 EHCI_INTRS_STATS_DATA(ehcip)->
4613 ehci_sts_async_advance_intr.value.ui64++;
4614 break;
4615 case EHCI_STS_HOST_SYSTEM_ERROR_INTR:
4616 EHCI_INTRS_STATS_DATA(ehcip)->
4617 ehci_sts_host_system_error_intr.value.ui64++;
4618 break;
4619 case EHCI_STS_FRM_LIST_ROLLOVER_INTR:
4620 EHCI_INTRS_STATS_DATA(ehcip)->
4621 ehci_sts_frm_list_rollover_intr.value.ui64++;
4622 break;
4623 case EHCI_STS_RH_PORT_CHANGE_INTR:
4624 EHCI_INTRS_STATS_DATA(ehcip)->
4625 ehci_sts_rh_port_change_intr.value.ui64++;
4626 break;
4627 case EHCI_STS_USB_ERROR_INTR:
4628 EHCI_INTRS_STATS_DATA(ehcip)->
4629 ehci_sts_usb_error_intr.value.ui64++;
4630 break;
4631 case EHCI_STS_USB_INTR:
4632 EHCI_INTRS_STATS_DATA(ehcip)->
4633 ehci_sts_usb_intr.value.ui64++;
4634 break;
4635 default:
4636 EHCI_INTRS_STATS_DATA(ehcip)->
4637 ehci_sts_not_claimed.value.ui64++;
4638 break;
4645 * ehci_do_byte_stats:
4647 * ehci data xfer information
4649 void
4650 ehci_do_byte_stats(
4651 ehci_state_t *ehcip,
4652 size_t len,
4653 uint8_t attr,
4654 uint8_t addr)
4656 uint8_t type = attr & USB_EP_ATTR_MASK;
4657 uint8_t dir = addr & USB_EP_DIR_MASK;
4659 if (dir == USB_EP_DIR_IN) {
4660 EHCI_TOTAL_STATS_DATA(ehcip)->reads++;
4661 EHCI_TOTAL_STATS_DATA(ehcip)->nread += len;
4662 switch (type) {
4663 case USB_EP_ATTR_CONTROL:
4664 EHCI_CTRL_STATS(ehcip)->reads++;
4665 EHCI_CTRL_STATS(ehcip)->nread += len;
4666 break;
4667 case USB_EP_ATTR_BULK:
4668 EHCI_BULK_STATS(ehcip)->reads++;
4669 EHCI_BULK_STATS(ehcip)->nread += len;
4670 break;
4671 case USB_EP_ATTR_INTR:
4672 EHCI_INTR_STATS(ehcip)->reads++;
4673 EHCI_INTR_STATS(ehcip)->nread += len;
4674 break;
4675 case USB_EP_ATTR_ISOCH:
4676 EHCI_ISOC_STATS(ehcip)->reads++;
4677 EHCI_ISOC_STATS(ehcip)->nread += len;
4678 break;
4680 } else if (dir == USB_EP_DIR_OUT) {
4681 EHCI_TOTAL_STATS_DATA(ehcip)->writes++;
4682 EHCI_TOTAL_STATS_DATA(ehcip)->nwritten += len;
4683 switch (type) {
4684 case USB_EP_ATTR_CONTROL:
4685 EHCI_CTRL_STATS(ehcip)->writes++;
4686 EHCI_CTRL_STATS(ehcip)->nwritten += len;
4687 break;
4688 case USB_EP_ATTR_BULK:
4689 EHCI_BULK_STATS(ehcip)->writes++;
4690 EHCI_BULK_STATS(ehcip)->nwritten += len;
4691 break;
4692 case USB_EP_ATTR_INTR:
4693 EHCI_INTR_STATS(ehcip)->writes++;
4694 EHCI_INTR_STATS(ehcip)->nwritten += len;
4695 break;
4696 case USB_EP_ATTR_ISOCH:
4697 EHCI_ISOC_STATS(ehcip)->writes++;
4698 EHCI_ISOC_STATS(ehcip)->nwritten += len;
4699 break;