9815 Want basic AHCI enclosure services
[unleashed.git] / usr / src / uts / i86pc / io / immu_regs.c
blobdc43b0f49a6b0d11801037b57738ef032ab849cc
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Portions Copyright (c) 2010, Oracle and/or its affiliates.
23 * All rights reserved.
27 * immu_regs.c - File that operates on a IMMU unit's regsiters
29 #include <sys/dditypes.h>
30 #include <sys/ddi.h>
31 #include <sys/archsystm.h>
32 #include <sys/x86_archext.h>
33 #include <sys/spl.h>
34 #include <sys/sysmacros.h>
35 #include <sys/immu.h>
36 #include <sys/cpu.h>
38 #define get_reg32(immu, offset) ddi_get32((immu)->immu_regs_handle, \
39 (uint32_t *)(immu->immu_regs_addr + (offset)))
40 #define get_reg64(immu, offset) ddi_get64((immu)->immu_regs_handle, \
41 (uint64_t *)(immu->immu_regs_addr + (offset)))
42 #define put_reg32(immu, offset, val) ddi_put32\
43 ((immu)->immu_regs_handle, \
44 (uint32_t *)(immu->immu_regs_addr + (offset)), val)
45 #define put_reg64(immu, offset, val) ddi_put64\
46 ((immu)->immu_regs_handle, \
47 (uint64_t *)(immu->immu_regs_addr + (offset)), val)
49 static void immu_regs_inv_wait(immu_inv_wait_t *iwp);
51 struct immu_flushops immu_regs_flushops = {
52 immu_regs_context_fsi,
53 immu_regs_context_dsi,
54 immu_regs_context_gbl,
55 immu_regs_iotlb_psi,
56 immu_regs_iotlb_dsi,
57 immu_regs_iotlb_gbl,
58 immu_regs_inv_wait
62 * wait max 60s for the hardware completion
64 #define IMMU_MAX_WAIT_TIME 60000000
65 #define wait_completion(immu, offset, getf, completion, status) \
66 { \
67 clock_t stick = ddi_get_lbolt(); \
68 clock_t ntick; \
69 _NOTE(CONSTCOND) \
70 while (1) { \
71 status = getf(immu, offset); \
72 ntick = ddi_get_lbolt(); \
73 if (completion) { \
74 break; \
75 } \
76 if (ntick - stick >= drv_usectohz(IMMU_MAX_WAIT_TIME)) { \
77 ddi_err(DER_PANIC, NULL, \
78 "immu wait completion time out"); \
79 /*NOTREACHED*/ \
80 } else { \
81 ht_pause();\
86 static ddi_device_acc_attr_t immu_regs_attr = {
87 DDI_DEVICE_ATTR_V0,
88 DDI_NEVERSWAP_ACC,
89 DDI_STRICTORDER_ACC,
93 * iotlb_flush()
94 * flush the iotlb cache
96 static void
97 iotlb_flush(immu_t *immu, uint_t domain_id,
98 uint64_t addr, uint_t am, uint_t hint, immu_iotlb_inv_t type)
100 uint64_t command = 0, iva = 0;
101 uint_t iva_offset, iotlb_offset;
102 uint64_t status = 0;
104 /* no lock needed since cap and excap fields are RDONLY */
105 iva_offset = IMMU_ECAP_GET_IRO(immu->immu_regs_excap);
106 iotlb_offset = iva_offset + 8;
109 * prepare drain read/write command
111 if (IMMU_CAP_GET_DWD(immu->immu_regs_cap)) {
112 command |= TLB_INV_DRAIN_WRITE;
115 if (IMMU_CAP_GET_DRD(immu->immu_regs_cap)) {
116 command |= TLB_INV_DRAIN_READ;
120 * if the hardward doesn't support page selective invalidation, we
121 * will use domain type. Otherwise, use global type
123 switch (type) {
124 case IOTLB_PSI:
125 command |= TLB_INV_PAGE | TLB_INV_IVT |
126 TLB_INV_DID(domain_id);
127 iva = addr | am | TLB_IVA_HINT(hint);
128 break;
129 case IOTLB_DSI:
130 command |= TLB_INV_DOMAIN | TLB_INV_IVT |
131 TLB_INV_DID(domain_id);
132 break;
133 case IOTLB_GLOBAL:
134 command |= TLB_INV_GLOBAL | TLB_INV_IVT;
135 break;
136 default:
137 ddi_err(DER_MODE, NULL, "%s: incorrect iotlb flush type",
138 immu->immu_name);
139 return;
142 if (iva)
143 put_reg64(immu, iva_offset, iva);
144 put_reg64(immu, iotlb_offset, command);
145 wait_completion(immu, iotlb_offset, get_reg64,
146 (!(status & TLB_INV_IVT)), status);
150 * immu_regs_iotlb_psi()
151 * iotlb page specific invalidation
153 /*ARGSUSED*/
154 void
155 immu_regs_iotlb_psi(immu_t *immu, uint_t did, uint64_t dvma, uint_t snpages,
156 uint_t hint, immu_inv_wait_t *iwp)
158 int dvma_am;
159 int npg_am;
160 int max_am;
161 int am;
162 uint64_t align;
163 int npages_left;
164 int npages;
165 int i;
167 if (!IMMU_CAP_GET_PSI(immu->immu_regs_cap)) {
168 immu_regs_iotlb_dsi(immu, did, iwp);
169 return;
172 max_am = IMMU_CAP_GET_MAMV(immu->immu_regs_cap);
174 mutex_enter(&(immu->immu_regs_lock));
176 npages_left = snpages;
177 for (i = 0; i < immu_flush_gran && npages_left > 0; i++) {
178 /* First calculate alignment of DVMA */
180 if (dvma == 0) {
181 dvma_am = max_am;
182 } else {
183 for (align = (1 << 12), dvma_am = 1;
184 (dvma & align) == 0; align <<= 1, dvma_am++)
186 dvma_am--;
189 /* Calculate the npg_am */
190 npages = npages_left;
191 for (npg_am = 0, npages >>= 1; npages; npages >>= 1, npg_am++)
194 am = MIN(max_am, MIN(dvma_am, npg_am));
196 iotlb_flush(immu, did, dvma, am, hint, IOTLB_PSI);
198 npages = (1 << am);
199 npages_left -= npages;
200 dvma += (npages * IMMU_PAGESIZE);
203 if (npages_left) {
204 iotlb_flush(immu, did, 0, 0, 0, IOTLB_DSI);
206 mutex_exit(&(immu->immu_regs_lock));
210 * immu_regs_iotlb_dsi()
211 * domain specific invalidation
213 /*ARGSUSED*/
214 void
215 immu_regs_iotlb_dsi(immu_t *immu, uint_t domain_id, immu_inv_wait_t *iwp)
217 mutex_enter(&(immu->immu_regs_lock));
218 iotlb_flush(immu, domain_id, 0, 0, 0, IOTLB_DSI);
219 mutex_exit(&(immu->immu_regs_lock));
223 * immu_regs_iotlb_gbl()
224 * global iotlb invalidation
226 /*ARGSUSED*/
227 void
228 immu_regs_iotlb_gbl(immu_t *immu, immu_inv_wait_t *iwp)
230 mutex_enter(&(immu->immu_regs_lock));
231 iotlb_flush(immu, 0, 0, 0, 0, IOTLB_GLOBAL);
232 mutex_exit(&(immu->immu_regs_lock));
236 static int
237 gaw2agaw(int gaw)
239 int r, agaw;
241 r = (gaw - 12) % 9;
243 if (r == 0)
244 agaw = gaw;
245 else
246 agaw = gaw + 9 - r;
248 if (agaw > 64)
249 agaw = 64;
251 return (agaw);
255 * set_immu_agaw()
256 * calculate agaw for a IOMMU unit
258 static int
259 set_agaw(immu_t *immu)
261 int mgaw, magaw, agaw;
262 uint_t bitpos;
263 int max_sagaw_mask, sagaw_mask, mask;
264 int nlevels;
267 * mgaw is the maximum guest address width.
268 * Addresses above this value will be
269 * blocked by the IOMMU unit.
270 * sagaw is a bitmask that lists all the
271 * AGAWs supported by this IOMMU unit.
273 mgaw = IMMU_CAP_MGAW(immu->immu_regs_cap);
274 sagaw_mask = IMMU_CAP_SAGAW(immu->immu_regs_cap);
276 magaw = gaw2agaw(mgaw);
279 * Get bitpos corresponding to
280 * magaw
284 * Maximum SAGAW is specified by
285 * Vt-d spec.
287 max_sagaw_mask = ((1 << 5) - 1);
289 if (sagaw_mask > max_sagaw_mask) {
290 ddi_err(DER_WARN, NULL, "%s: SAGAW bitmask (%x) "
291 "is larger than maximu SAGAW bitmask "
292 "(%x) specified by Intel Vt-d spec",
293 immu->immu_name, sagaw_mask, max_sagaw_mask);
294 return (DDI_FAILURE);
298 * Find a supported AGAW <= magaw
300 * sagaw_mask bitpos AGAW (bits) nlevels
301 * ==============================================
302 * 0 0 0 0 1 0 30 2
303 * 0 0 0 1 0 1 39 3
304 * 0 0 1 0 0 2 48 4
305 * 0 1 0 0 0 3 57 5
306 * 1 0 0 0 0 4 64(66) 6
308 mask = 1;
309 nlevels = 0;
310 agaw = 0;
311 for (mask = 1, bitpos = 0; bitpos < 5;
312 bitpos++, mask <<= 1) {
313 if (mask & sagaw_mask) {
314 nlevels = bitpos + 2;
315 agaw = 30 + (bitpos * 9);
319 /* calculated agaw can be > 64 */
320 agaw = (agaw > 64) ? 64 : agaw;
322 if (agaw < 30 || agaw > magaw) {
323 ddi_err(DER_WARN, NULL, "%s: Calculated AGAW (%d) "
324 "is outside valid limits [30,%d] specified by Vt-d spec "
325 "and magaw", immu->immu_name, agaw, magaw);
326 return (DDI_FAILURE);
329 if (nlevels < 2 || nlevels > 6) {
330 ddi_err(DER_WARN, NULL, "%s: Calculated pagetable "
331 "level (%d) is outside valid limits [2,6]",
332 immu->immu_name, nlevels);
333 return (DDI_FAILURE);
336 ddi_err(DER_LOG, NULL, "Calculated pagetable "
337 "level (%d), agaw = %d", nlevels, agaw);
339 immu->immu_dvma_nlevels = nlevels;
340 immu->immu_dvma_agaw = agaw;
342 return (DDI_SUCCESS);
345 static int
346 setup_regs(immu_t *immu)
348 int error;
351 * This lock may be acquired by the IOMMU interrupt handler
353 mutex_init(&(immu->immu_regs_lock), NULL, MUTEX_DRIVER,
354 (void *)ipltospl(IMMU_INTR_IPL));
357 * map the register address space
359 error = ddi_regs_map_setup(immu->immu_dip, 0,
360 (caddr_t *)&(immu->immu_regs_addr), (offset_t)0,
361 (offset_t)IMMU_REGSZ, &immu_regs_attr,
362 &(immu->immu_regs_handle));
364 if (error == DDI_FAILURE) {
365 ddi_err(DER_WARN, NULL, "%s: Intel IOMMU register map failed",
366 immu->immu_name);
367 mutex_destroy(&(immu->immu_regs_lock));
368 return (DDI_FAILURE);
372 * get the register value
374 immu->immu_regs_cap = get_reg64(immu, IMMU_REG_CAP);
375 immu->immu_regs_excap = get_reg64(immu, IMMU_REG_EXCAP);
378 * if the hardware access is non-coherent, we need clflush
380 if (IMMU_ECAP_GET_C(immu->immu_regs_excap)) {
381 immu->immu_dvma_coherent = B_TRUE;
382 } else {
383 immu->immu_dvma_coherent = B_FALSE;
384 if (!is_x86_feature(x86_featureset, X86FSET_CLFSH)) {
385 ddi_err(DER_WARN, NULL,
386 "immu unit %s can't be enabled due to "
387 "missing clflush functionality", immu->immu_name);
388 ddi_regs_map_free(&(immu->immu_regs_handle));
389 mutex_destroy(&(immu->immu_regs_lock));
390 return (DDI_FAILURE);
394 /* Setup SNP and TM reserved fields */
395 immu->immu_SNP_reserved = immu_regs_is_SNP_reserved(immu);
396 immu->immu_TM_reserved = immu_regs_is_TM_reserved(immu);
398 if (IMMU_ECAP_GET_CH(immu->immu_regs_excap) && immu_use_tm)
399 immu->immu_ptemask = PDTE_MASK_TM;
400 else
401 immu->immu_ptemask = 0;
404 * Check for Mobile 4 series chipset
406 if (immu_quirk_mobile4 == B_TRUE &&
407 !IMMU_CAP_GET_RWBF(immu->immu_regs_cap)) {
408 ddi_err(DER_LOG, NULL,
409 "IMMU: Mobile 4 chipset quirk detected. "
410 "Force-setting RWBF");
411 IMMU_CAP_SET_RWBF(immu->immu_regs_cap);
415 * retrieve the maximum number of domains
417 immu->immu_max_domains = IMMU_CAP_ND(immu->immu_regs_cap);
420 * calculate the agaw
422 if (set_agaw(immu) != DDI_SUCCESS) {
423 ddi_regs_map_free(&(immu->immu_regs_handle));
424 mutex_destroy(&(immu->immu_regs_lock));
425 return (DDI_FAILURE);
427 immu->immu_regs_cmdval = 0;
429 immu->immu_flushops = &immu_regs_flushops;
431 return (DDI_SUCCESS);
434 /* ############### Functions exported ################## */
437 * immu_regs_setup()
438 * Setup mappings to a IMMU unit's registers
439 * so that they can be read/written
441 void
442 immu_regs_setup(list_t *listp)
444 int i;
445 immu_t *immu;
447 for (i = 0; i < IMMU_MAXSEG; i++) {
448 immu = list_head(listp);
449 for (; immu; immu = list_next(listp, immu)) {
450 /* do your best, continue on error */
451 if (setup_regs(immu) != DDI_SUCCESS) {
452 immu->immu_regs_setup = B_FALSE;
453 } else {
454 immu->immu_regs_setup = B_TRUE;
461 * immu_regs_map()
464 immu_regs_resume(immu_t *immu)
466 int error;
469 * remap the register address space
471 error = ddi_regs_map_setup(immu->immu_dip, 0,
472 (caddr_t *)&(immu->immu_regs_addr), (offset_t)0,
473 (offset_t)IMMU_REGSZ, &immu_regs_attr,
474 &(immu->immu_regs_handle));
475 if (error != DDI_SUCCESS) {
476 return (DDI_FAILURE);
479 immu_regs_set_root_table(immu);
481 immu_regs_intr_enable(immu, immu->immu_regs_intr_msi_addr,
482 immu->immu_regs_intr_msi_data, immu->immu_regs_intr_uaddr);
484 (void) immu_intr_handler(immu);
486 immu_regs_intrmap_enable(immu, immu->immu_intrmap_irta_reg);
488 immu_regs_qinv_enable(immu, immu->immu_qinv_reg_value);
491 return (error);
495 * immu_regs_suspend()
497 void
498 immu_regs_suspend(immu_t *immu)
501 immu->immu_intrmap_running = B_FALSE;
503 /* Finally, unmap the regs */
504 ddi_regs_map_free(&(immu->immu_regs_handle));
508 * immu_regs_startup()
509 * set a IMMU unit's registers to startup the unit
511 void
512 immu_regs_startup(immu_t *immu)
514 uint32_t status;
516 if (immu->immu_regs_setup == B_FALSE) {
517 return;
520 mutex_enter(&(immu->immu_regs_lock));
521 put_reg32(immu, IMMU_REG_GLOBAL_CMD,
522 immu->immu_regs_cmdval | IMMU_GCMD_TE);
523 wait_completion(immu, IMMU_REG_GLOBAL_STS,
524 get_reg32, (status & IMMU_GSTS_TES), status);
525 immu->immu_regs_cmdval |= IMMU_GCMD_TE;
526 immu->immu_regs_running = B_TRUE;
527 mutex_exit(&(immu->immu_regs_lock));
529 ddi_err(DER_NOTE, NULL, "%s running", immu->immu_name);
533 * immu_regs_shutdown()
534 * shutdown a unit
536 void
537 immu_regs_shutdown(immu_t *immu)
539 uint32_t status;
541 if (immu->immu_regs_running == B_FALSE) {
542 return;
545 mutex_enter(&(immu->immu_regs_lock));
546 immu->immu_regs_cmdval &= ~IMMU_GCMD_TE;
547 put_reg32(immu, IMMU_REG_GLOBAL_CMD,
548 immu->immu_regs_cmdval);
549 wait_completion(immu, IMMU_REG_GLOBAL_STS,
550 get_reg32, !(status & IMMU_GSTS_TES), status);
551 immu->immu_regs_running = B_FALSE;
552 mutex_exit(&(immu->immu_regs_lock));
554 ddi_err(DER_NOTE, NULL, "IOMMU %s stopped", immu->immu_name);
558 * immu_regs_intr()
559 * Set a IMMU unit regs to setup a IMMU unit's
560 * interrupt handler
562 void
563 immu_regs_intr_enable(immu_t *immu, uint32_t msi_addr, uint32_t msi_data,
564 uint32_t uaddr)
566 mutex_enter(&(immu->immu_regs_lock));
567 immu->immu_regs_intr_msi_addr = msi_addr;
568 immu->immu_regs_intr_uaddr = uaddr;
569 immu->immu_regs_intr_msi_data = msi_data;
570 put_reg32(immu, IMMU_REG_FEVNT_ADDR, msi_addr);
571 put_reg32(immu, IMMU_REG_FEVNT_UADDR, uaddr);
572 put_reg32(immu, IMMU_REG_FEVNT_DATA, msi_data);
573 put_reg32(immu, IMMU_REG_FEVNT_CON, 0);
574 mutex_exit(&(immu->immu_regs_lock));
578 * immu_regs_passthru_supported()
579 * Returns B_TRUE ifi passthru is supported
581 boolean_t
582 immu_regs_passthru_supported(immu_t *immu)
584 if (IMMU_ECAP_GET_PT(immu->immu_regs_excap)) {
585 return (B_TRUE);
588 ddi_err(DER_WARN, NULL, "Passthru not supported");
589 return (B_FALSE);
593 * immu_regs_is_TM_reserved()
594 * Returns B_TRUE if TM field is reserved
596 boolean_t
597 immu_regs_is_TM_reserved(immu_t *immu)
599 if (IMMU_ECAP_GET_DI(immu->immu_regs_excap) ||
600 IMMU_ECAP_GET_CH(immu->immu_regs_excap)) {
601 return (B_FALSE);
603 return (B_TRUE);
607 * immu_regs_is_SNP_reserved()
608 * Returns B_TRUE if SNP field is reserved
610 boolean_t
611 immu_regs_is_SNP_reserved(immu_t *immu)
614 return (IMMU_ECAP_GET_SC(immu->immu_regs_excap) ? B_FALSE : B_TRUE);
618 * immu_regs_wbf_flush()
619 * If required and supported, write to IMMU
620 * unit's regs to flush DMA write buffer(s)
622 void
623 immu_regs_wbf_flush(immu_t *immu)
625 uint32_t status;
627 if (!IMMU_CAP_GET_RWBF(immu->immu_regs_cap)) {
628 return;
631 mutex_enter(&(immu->immu_regs_lock));
632 put_reg32(immu, IMMU_REG_GLOBAL_CMD,
633 immu->immu_regs_cmdval | IMMU_GCMD_WBF);
634 wait_completion(immu, IMMU_REG_GLOBAL_STS,
635 get_reg32, (!(status & IMMU_GSTS_WBFS)), status);
636 mutex_exit(&(immu->immu_regs_lock));
640 * immu_regs_cpu_flush()
641 * flush the cpu cache line after CPU memory writes, so
642 * IOMMU can see the writes
644 void
645 immu_regs_cpu_flush(immu_t *immu, caddr_t addr, uint_t size)
647 uintptr_t startline, endline;
649 if (immu->immu_dvma_coherent == B_TRUE)
650 return;
652 startline = (uintptr_t)addr & ~(uintptr_t)(x86_clflush_size - 1);
653 endline = ((uintptr_t)addr + size - 1) &
654 ~(uintptr_t)(x86_clflush_size - 1);
655 while (startline <= endline) {
656 clflush_insn((caddr_t)startline);
657 startline += x86_clflush_size;
660 mfence_insn();
664 * immu_regs_context_flush()
665 * flush the context cache
667 static void
668 context_flush(immu_t *immu, uint8_t function_mask,
669 uint16_t sid, uint_t did, immu_context_inv_t type)
671 uint64_t command = 0;
672 uint64_t status;
675 * define the command
677 switch (type) {
678 case CONTEXT_FSI:
679 command |= CCMD_INV_ICC | CCMD_INV_DEVICE
680 | CCMD_INV_DID(did)
681 | CCMD_INV_SID(sid) | CCMD_INV_FM(function_mask);
682 break;
683 case CONTEXT_DSI:
684 command |= CCMD_INV_ICC | CCMD_INV_DOMAIN
685 | CCMD_INV_DID(did);
686 break;
687 case CONTEXT_GLOBAL:
688 command |= CCMD_INV_ICC | CCMD_INV_GLOBAL;
689 break;
690 default:
691 ddi_err(DER_PANIC, NULL,
692 "%s: incorrect context cache flush type",
693 immu->immu_name);
694 /*NOTREACHED*/
697 mutex_enter(&(immu->immu_regs_lock));
698 put_reg64(immu, IMMU_REG_CONTEXT_CMD, command);
699 wait_completion(immu, IMMU_REG_CONTEXT_CMD, get_reg64,
700 (!(status & CCMD_INV_ICC)), status);
701 mutex_exit(&(immu->immu_regs_lock));
704 /*ARGSUSED*/
705 void
706 immu_regs_context_fsi(immu_t *immu, uint8_t function_mask,
707 uint16_t source_id, uint_t domain_id, immu_inv_wait_t *iwp)
709 context_flush(immu, function_mask, source_id, domain_id, CONTEXT_FSI);
712 /*ARGSUSED*/
713 void
714 immu_regs_context_dsi(immu_t *immu, uint_t domain_id, immu_inv_wait_t *iwp)
716 context_flush(immu, 0, 0, domain_id, CONTEXT_DSI);
719 /*ARGSUSED*/
720 void
721 immu_regs_context_gbl(immu_t *immu, immu_inv_wait_t *iwp)
723 context_flush(immu, 0, 0, 0, CONTEXT_GLOBAL);
727 * Nothing to do, all register operations are synchronous.
729 /*ARGSUSED*/
730 static void
731 immu_regs_inv_wait(immu_inv_wait_t *iwp)
735 void
736 immu_regs_set_root_table(immu_t *immu)
738 uint32_t status;
740 mutex_enter(&(immu->immu_regs_lock));
741 put_reg64(immu, IMMU_REG_ROOTENTRY,
742 immu->immu_ctx_root->hwpg_paddr);
743 put_reg32(immu, IMMU_REG_GLOBAL_CMD,
744 immu->immu_regs_cmdval | IMMU_GCMD_SRTP);
745 wait_completion(immu, IMMU_REG_GLOBAL_STS,
746 get_reg32, (status & IMMU_GSTS_RTPS), status);
747 mutex_exit(&(immu->immu_regs_lock));
751 /* enable queued invalidation interface */
752 void
753 immu_regs_qinv_enable(immu_t *immu, uint64_t qinv_reg_value)
755 uint32_t status;
757 if (immu_qinv_enable == B_FALSE)
758 return;
760 mutex_enter(&immu->immu_regs_lock);
761 immu->immu_qinv_reg_value = qinv_reg_value;
762 /* Initialize the Invalidation Queue Tail register to zero */
763 put_reg64(immu, IMMU_REG_INVAL_QT, 0);
765 /* set invalidation queue base address register */
766 put_reg64(immu, IMMU_REG_INVAL_QAR, qinv_reg_value);
768 /* enable queued invalidation interface */
769 put_reg32(immu, IMMU_REG_GLOBAL_CMD,
770 immu->immu_regs_cmdval | IMMU_GCMD_QIE);
771 wait_completion(immu, IMMU_REG_GLOBAL_STS,
772 get_reg32, (status & IMMU_GSTS_QIES), status);
773 mutex_exit(&immu->immu_regs_lock);
775 immu->immu_regs_cmdval |= IMMU_GCMD_QIE;
776 immu->immu_qinv_running = B_TRUE;
780 /* enable interrupt remapping hardware unit */
781 void
782 immu_regs_intrmap_enable(immu_t *immu, uint64_t irta_reg)
784 uint32_t status;
786 if (immu_intrmap_enable == B_FALSE)
787 return;
789 /* set interrupt remap table pointer */
790 mutex_enter(&(immu->immu_regs_lock));
791 immu->immu_intrmap_irta_reg = irta_reg;
792 put_reg64(immu, IMMU_REG_IRTAR, irta_reg);
793 put_reg32(immu, IMMU_REG_GLOBAL_CMD,
794 immu->immu_regs_cmdval | IMMU_GCMD_SIRTP);
795 wait_completion(immu, IMMU_REG_GLOBAL_STS,
796 get_reg32, (status & IMMU_GSTS_IRTPS), status);
797 mutex_exit(&(immu->immu_regs_lock));
799 /* global flush intr entry cache */
800 immu_qinv_intr_global(immu, &immu->immu_intrmap_inv_wait);
802 /* enable interrupt remapping */
803 mutex_enter(&(immu->immu_regs_lock));
804 put_reg32(immu, IMMU_REG_GLOBAL_CMD,
805 immu->immu_regs_cmdval | IMMU_GCMD_IRE);
806 wait_completion(immu, IMMU_REG_GLOBAL_STS,
807 get_reg32, (status & IMMU_GSTS_IRES),
808 status);
809 immu->immu_regs_cmdval |= IMMU_GCMD_IRE;
811 /* set compatible mode */
812 put_reg32(immu, IMMU_REG_GLOBAL_CMD,
813 immu->immu_regs_cmdval | IMMU_GCMD_CFI);
814 wait_completion(immu, IMMU_REG_GLOBAL_STS,
815 get_reg32, (status & IMMU_GSTS_CFIS),
816 status);
817 immu->immu_regs_cmdval |= IMMU_GCMD_CFI;
818 mutex_exit(&(immu->immu_regs_lock));
820 immu->immu_intrmap_running = B_TRUE;
823 uint64_t
824 immu_regs_get64(immu_t *immu, uint_t reg)
826 return (get_reg64(immu, reg));
829 uint32_t
830 immu_regs_get32(immu_t *immu, uint_t reg)
832 return (get_reg32(immu, reg));
835 void
836 immu_regs_put64(immu_t *immu, uint_t reg, uint64_t val)
838 put_reg64(immu, reg, val);
841 void
842 immu_regs_put32(immu_t *immu, uint_t reg, uint32_t val)
844 put_reg32(immu, reg, val);