4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
26 #include "amd_iommu_acpi.h"
27 #include "amd_iommu_impl.h"
29 static int create_acpi_hash(amd_iommu_acpi_t
*acpi
);
30 static void amd_iommu_acpi_table_fini(amd_iommu_acpi_t
**acpipp
);
32 static void dump_acpi_aliases(void);
38 static amd_iommu_acpi_global_t
*amd_iommu_acpi_global
;
39 static amd_iommu_acpi_ivhd_t
**amd_iommu_acpi_ivhd_hash
;
40 static amd_iommu_acpi_ivmd_t
**amd_iommu_acpi_ivmd_hash
;
43 type_byte_size(char *cp
)
45 uint8_t type8
= *((uint8_t *)cp
);
48 len_bits
= AMD_IOMMU_REG_GET8(&type8
, AMD_IOMMU_ACPI_DEVENTRY_LEN
);
60 cmn_err(CE_WARN
, "%s: Invalid deventry len: %d",
61 amd_iommu_modname
, len_bits
);
68 process_4byte_deventry(ivhd_container_t
*c
, char *cp
)
70 int entry_type
= *((uint8_t *)cp
);
71 ivhd_deventry_t deventry
= {0};
72 ivhd_deventry_t
*devp
;
78 deventry
.idev_len
= 4;
79 deventry
.idev_deviceid
= -1;
80 deventry
.idev_src_deviceid
= -1;
82 for (i
= 0; i
< 2; i
++) {
83 al
.ent8
[i
] = *((uint8_t *)&cp
[i
+ 1]);
88 deventry
.idev_type
= DEVENTRY_ALL
;
91 deventry
.idev_type
= DEVENTRY_SELECT
;
92 deventry
.idev_deviceid
= al
.ent16
;
95 deventry
.idev_type
= DEVENTRY_RANGE
;
96 deventry
.idev_deviceid
= al
.ent16
;
99 deventry
.idev_type
= DEVENTRY_RANGE_END
;
100 deventry
.idev_deviceid
= al
.ent16
;
104 ASSERT(al
.ent16
== 0);
111 devp
= kmem_alloc(sizeof (ivhd_deventry_t
), KM_SLEEP
);
114 if (c
->ivhdc_first_deventry
== NULL
)
115 c
->ivhdc_first_deventry
= devp
;
117 c
->ivhdc_last_deventry
->idev_next
= devp
;
119 c
->ivhdc_last_deventry
= devp
;
124 datsetting8
= (*((uint8_t *)&cp
[3]));
126 devp
->idev_Lint1Pass
= AMD_IOMMU_REG_GET8(&datsetting8
,
127 AMD_IOMMU_ACPI_LINT1PASS
);
129 devp
->idev_Lint0Pass
= AMD_IOMMU_REG_GET8(&datsetting8
,
130 AMD_IOMMU_ACPI_LINT0PASS
);
132 devp
->idev_SysMgt
= AMD_IOMMU_REG_GET8(&datsetting8
,
133 AMD_IOMMU_ACPI_SYSMGT
);
135 ASSERT(AMD_IOMMU_REG_GET8(&datsetting8
,
136 AMD_IOMMU_ACPI_DATRSV
) == 0);
138 devp
->idev_NMIPass
= AMD_IOMMU_REG_GET8(&datsetting8
,
139 AMD_IOMMU_ACPI_NMIPASS
);
141 devp
->idev_ExtIntPass
= AMD_IOMMU_REG_GET8(&datsetting8
,
142 AMD_IOMMU_ACPI_EXTINTPASS
);
144 devp
->idev_INITPass
= AMD_IOMMU_REG_GET8(&datsetting8
,
145 AMD_IOMMU_ACPI_INITPASS
);
149 process_8byte_deventry(ivhd_container_t
*c
, char *cp
)
152 int entry_type
= (uint8_t)*cp
;
153 ivhd_deventry_t deventry
= {0};
154 ivhd_deventry_t
*devp
;
155 align_16_t al1
= {0};
156 align_16_t al2
= {0};
157 align_32_t al3
= {0};
160 /* Length is 8 bytes */
161 deventry
.idev_len
= 8;
162 deventry
.idev_deviceid
= -1;
163 deventry
.idev_src_deviceid
= -1;
165 for (i
= 0; i
< 2; i
++) {
166 al1
.ent8
[i
] = *((uint8_t *)&cp
[i
+1]);
167 al2
.ent8
[i
] = *((uint8_t *)&cp
[i
+5]);
170 datsetting8
= *((uint8_t *)&cp
[3]);
172 switch (entry_type
) {
174 deventry
.idev_type
= DEVENTRY_ALIAS_SELECT
;
175 deventry
.idev_deviceid
= al1
.ent16
;
176 deventry
.idev_src_deviceid
= al2
.ent16
;
181 deventry
.idev_type
= DEVENTRY_ALIAS_RANGE
;
182 deventry
.idev_deviceid
= al1
.ent16
;
183 deventry
.idev_src_deviceid
= al2
.ent16
;
188 deventry
.idev_type
= DEVENTRY_EXTENDED_SELECT
;
189 deventry
.idev_deviceid
= al1
.ent16
;
192 deventry
.idev_type
= DEVENTRY_EXTENDED_RANGE
;
193 deventry
.idev_deviceid
= al1
.ent16
;
196 deventry
.idev_type
= DEVENTRY_SPECIAL_DEVICE
;
197 ASSERT(al1
.ent16
== 0);
198 deventry
.idev_deviceid
= -1;
199 deventry
.idev_handle
= cp
[4];
200 deventry
.idev_variety
= cp
[7];
201 deventry
.idev_src_deviceid
= al2
.ent16
;
204 for (i
= 0; i
< 7; i
++) {
212 devp
= kmem_alloc(sizeof (ivhd_deventry_t
), KM_SLEEP
);
215 if (c
->ivhdc_first_deventry
== NULL
)
216 c
->ivhdc_first_deventry
= devp
;
218 c
->ivhdc_last_deventry
->idev_next
= devp
;
220 c
->ivhdc_last_deventry
= devp
;
222 devp
->idev_Lint1Pass
= AMD_IOMMU_REG_GET8(&datsetting8
,
223 AMD_IOMMU_ACPI_LINT1PASS
);
225 devp
->idev_Lint0Pass
= AMD_IOMMU_REG_GET8(&datsetting8
,
226 AMD_IOMMU_ACPI_LINT0PASS
);
228 devp
->idev_SysMgt
= AMD_IOMMU_REG_GET8(&datsetting8
,
229 AMD_IOMMU_ACPI_SYSMGT
);
231 ASSERT(AMD_IOMMU_REG_GET8(&datsetting8
,
232 AMD_IOMMU_ACPI_DATRSV
) == 0);
234 devp
->idev_NMIPass
= AMD_IOMMU_REG_GET8(&datsetting8
,
235 AMD_IOMMU_ACPI_NMIPASS
);
237 devp
->idev_ExtIntPass
= AMD_IOMMU_REG_GET8(&datsetting8
,
238 AMD_IOMMU_ACPI_EXTINTPASS
);
240 devp
->idev_INITPass
= AMD_IOMMU_REG_GET8(&datsetting8
,
241 AMD_IOMMU_ACPI_INITPASS
);
243 if (entry_type
!= 70 && entry_type
!= 71) {
248 for (i
= 0; i
< 4; i
++) {
249 al3
.ent8
[i
] = *((uint8_t *)&cp
[i
+4]);
252 devp
->idev_AtsDisabled
= AMD_IOMMU_REG_GET8(&al3
.ent32
,
253 AMD_IOMMU_ACPI_ATSDISABLED
);
255 ASSERT(AMD_IOMMU_REG_GET8(&al3
.ent32
, AMD_IOMMU_ACPI_EXTDATRSV
) == 0);
259 process_ivhd(amd_iommu_acpi_t
*acpi
, ivhd_t
*ivhdp
)
263 caddr_t ivhd_tot_end
;
266 ASSERT(ivhdp
->ivhd_type
== 0x10);
268 c
= kmem_zalloc(sizeof (ivhd_container_t
), KM_SLEEP
);
269 c
->ivhdc_ivhd
= kmem_alloc(sizeof (ivhd_t
), KM_SLEEP
);
270 *(c
->ivhdc_ivhd
) = *ivhdp
;
272 if (acpi
->acp_first_ivhdc
== NULL
)
273 acpi
->acp_first_ivhdc
= c
;
275 acpi
->acp_last_ivhdc
->ivhdc_next
= c
;
277 acpi
->acp_last_ivhdc
= c
;
279 ivhd_end
= (caddr_t
)ivhdp
+ sizeof (ivhd_t
);
280 ivhd_tot_end
= (caddr_t
)ivhdp
+ ivhdp
->ivhd_len
;
282 for (cp
= ivhd_end
; cp
< ivhd_tot_end
; cp
+= type_byte_size(cp
)) {
283 /* 16 byte and 32 byte size are currently reserved */
284 switch (type_byte_size(cp
)) {
286 process_4byte_deventry(c
, cp
);
289 process_8byte_deventry(c
, cp
);
296 cmn_err(CE_WARN
, "%s: unsupported length for device "
297 "entry in ACPI IVRS table's IVHD entry",
305 process_ivmd(amd_iommu_acpi_t
*acpi
, ivmd_t
*ivmdp
)
309 ASSERT(ivmdp
->ivmd_type
!= 0x10);
311 c
= kmem_zalloc(sizeof (ivmd_container_t
), KM_SLEEP
);
312 c
->ivmdc_ivmd
= kmem_alloc(sizeof (ivmd_t
), KM_SLEEP
);
313 *(c
->ivmdc_ivmd
) = *ivmdp
;
315 if (acpi
->acp_first_ivmdc
== NULL
)
316 acpi
->acp_first_ivmdc
= c
;
318 acpi
->acp_last_ivmdc
->ivmdc_next
= c
;
320 acpi
->acp_last_ivmdc
= c
;
324 amd_iommu_acpi_init(void)
331 amd_iommu_acpi_t
*acpi
;
332 align_ivhd_t al_vhd
= {0};
333 align_ivmd_t al_vmd
= {0};
335 if (AcpiGetTable(IVRS_SIG
, 1, (ACPI_TABLE_HEADER
**)&ivrsp
) != AE_OK
) {
336 cmn_err(CE_NOTE
, "!amd_iommu: No AMD IOMMU ACPI IVRS table");
337 return (DDI_FAILURE
);
341 * Reserved field must be 0
343 ASSERT(ivrsp
->ivrs_resv
== 0);
345 ASSERT(AMD_IOMMU_REG_GET32(&ivrsp
->ivrs_ivinfo
,
346 AMD_IOMMU_ACPI_IVINFO_RSV1
) == 0);
347 ASSERT(AMD_IOMMU_REG_GET32(&ivrsp
->ivrs_ivinfo
,
348 AMD_IOMMU_ACPI_IVINFO_RSV2
) == 0);
350 ivrsp_end
= (caddr_t
)ivrsp
+ sizeof (struct ivrs
);
351 table_end
= (caddr_t
)ivrsp
+ ivrsp
->ivrs_hdr
.Length
;
353 acpi
= kmem_zalloc(sizeof (amd_iommu_acpi_t
), KM_SLEEP
);
354 acpi
->acp_ivrs
= kmem_alloc(sizeof (ivrs_t
), KM_SLEEP
);
355 *(acpi
->acp_ivrs
) = *ivrsp
;
357 for (cp
= ivrsp_end
; cp
< table_end
; cp
+= (al_vhd
.ivhdp
)->ivhd_len
) {
359 if (al_vhd
.ivhdp
->ivhd_type
== 0x10)
360 process_ivhd(acpi
, al_vhd
.ivhdp
);
363 for (cp
= ivrsp_end
; cp
< table_end
; cp
+= (al_vmd
.ivmdp
)->ivmd_len
) {
365 type8
= al_vmd
.ivmdp
->ivmd_type
;
366 if (type8
== 0x20 || type8
== 0x21 || type8
== 0x22)
367 process_ivmd(acpi
, al_vmd
.ivmdp
);
370 if (create_acpi_hash(acpi
) != DDI_SUCCESS
) {
371 return (DDI_FAILURE
);
374 amd_iommu_acpi_table_fini(&acpi
);
376 ASSERT(acpi
== NULL
);
378 if (amd_iommu_debug
& AMD_IOMMU_DEBUG_ACPI
) {
383 return (DDI_SUCCESS
);
386 static ivhd_deventry_t
*
387 free_ivhd_deventry(ivhd_deventry_t
*devp
)
389 ivhd_deventry_t
*next
= devp
->idev_next
;
391 kmem_free(devp
, sizeof (ivhd_deventry_t
));
396 static ivhd_container_t
*
397 free_ivhd_container(ivhd_container_t
*ivhdcp
)
399 ivhd_container_t
*next
= ivhdcp
->ivhdc_next
;
400 ivhd_deventry_t
*devp
;
402 for (devp
= ivhdcp
->ivhdc_first_deventry
; devp
; ) {
403 devp
= free_ivhd_deventry(devp
);
406 kmem_free(ivhdcp
->ivhdc_ivhd
, sizeof (ivhd_t
));
407 kmem_free(ivhdcp
, sizeof (ivhd_container_t
));
412 static ivmd_container_t
*
413 free_ivmd_container(ivmd_container_t
*ivmdcp
)
415 ivmd_container_t
*next
= ivmdcp
->ivmdc_next
;
417 kmem_free(ivmdcp
->ivmdc_ivmd
, sizeof (ivmd_t
));
418 kmem_free(ivmdcp
, sizeof (ivmd_container_t
));
424 amd_iommu_acpi_fini(void)
429 * TODO: Do we need to free the ACPI table for om GetFirmwareTable()
432 amd_iommu_acpi_table_fini(amd_iommu_acpi_t
**acpipp
)
434 amd_iommu_acpi_t
*acpi
= *acpipp
;
435 ivhd_container_t
*ivhdcp
;
436 ivmd_container_t
*ivmdcp
;
440 for (ivhdcp
= acpi
->acp_first_ivhdc
; ivhdcp
; ) {
441 ivhdcp
= free_ivhd_container(ivhdcp
);
443 for (ivmdcp
= acpi
->acp_first_ivmdc
; ivmdcp
; ) {
444 ivmdcp
= free_ivmd_container(ivmdcp
);
447 kmem_free(acpi
->acp_ivrs
, sizeof (struct ivrs
));
448 kmem_free(acpi
, sizeof (amd_iommu_acpi_t
));
454 deviceid_hashfn(uint16_t deviceid
)
456 return (deviceid
% AMD_IOMMU_ACPI_INFO_HASH_SZ
);
460 add_deventry_info(ivhd_t
*ivhdp
, ivhd_deventry_t
*deventry
,
461 amd_iommu_acpi_ivhd_t
**hash
)
463 static amd_iommu_acpi_ivhd_t
*last
;
464 amd_iommu_acpi_ivhd_t
*acpi_ivhdp
;
466 uint16_t uint16_info
;
469 if (deventry
->idev_type
== DEVENTRY_RANGE_END
) {
473 ASSERT(acpi_ivhdp
->ach_dev_type
== DEVENTRY_RANGE
||
474 acpi_ivhdp
->ach_dev_type
== DEVENTRY_ALIAS_RANGE
||
475 acpi_ivhdp
->ach_dev_type
== DEVENTRY_EXTENDED_RANGE
);
476 ASSERT(acpi_ivhdp
->ach_deviceid_end
== -1);
477 acpi_ivhdp
->ach_deviceid_end
= deventry
->idev_deviceid
;
478 /* TODO ASSERT data is 0 */
482 ASSERT(last
== NULL
);
483 acpi_ivhdp
= kmem_zalloc(sizeof (*acpi_ivhdp
), KM_SLEEP
);
485 uint8_flags
= ivhdp
->ivhd_flags
;
488 ASSERT(AMD_IOMMU_REG_GET8(&uint8_flags
,
489 AMD_IOMMU_ACPI_IVHD_FLAGS_RSV
) == 0);
492 acpi_ivhdp
->ach_IotlbSup
= AMD_IOMMU_REG_GET8(&uint8_flags
,
493 AMD_IOMMU_ACPI_IVHD_FLAGS_IOTLBSUP
);
494 acpi_ivhdp
->ach_Isoc
= AMD_IOMMU_REG_GET8(&uint8_flags
,
495 AMD_IOMMU_ACPI_IVHD_FLAGS_ISOC
);
496 acpi_ivhdp
->ach_ResPassPW
= AMD_IOMMU_REG_GET8(&uint8_flags
,
497 AMD_IOMMU_ACPI_IVHD_FLAGS_RESPASSPW
);
498 acpi_ivhdp
->ach_PassPW
= AMD_IOMMU_REG_GET8(&uint8_flags
,
499 AMD_IOMMU_ACPI_IVHD_FLAGS_PASSPW
);
500 acpi_ivhdp
->ach_HtTunEn
= AMD_IOMMU_REG_GET8(&uint8_flags
,
501 AMD_IOMMU_ACPI_IVHD_FLAGS_HTTUNEN
);
504 acpi_ivhdp
->ach_IOMMU_deviceid
= ivhdp
->ivhd_deviceid
;
505 acpi_ivhdp
->ach_IOMMU_cap_off
= ivhdp
->ivhd_cap_off
;
506 acpi_ivhdp
->ach_IOMMU_reg_base
= ivhdp
->ivhd_reg_base
;
507 acpi_ivhdp
->ach_IOMMU_pci_seg
= ivhdp
->ivhd_pci_seg
;
509 /* IVHD IOMMU info fields */
510 uint16_info
= ivhdp
->ivhd_iommu_info
;
513 ASSERT(AMD_IOMMU_REG_GET16(&uint16_info
,
514 AMD_IOMMU_ACPI_IOMMU_INFO_RSV1
) == 0);
517 acpi_ivhdp
->ach_IOMMU_UnitID
= AMD_IOMMU_REG_GET16(&uint16_info
,
518 AMD_IOMMU_ACPI_IOMMU_INFO_UNITID
);
519 ASSERT(AMD_IOMMU_REG_GET16(&uint16_info
,
520 AMD_IOMMU_ACPI_IOMMU_INFO_RSV2
) == 0);
521 acpi_ivhdp
->ach_IOMMU_MSInum
= AMD_IOMMU_REG_GET16(&uint16_info
,
522 AMD_IOMMU_ACPI_IOMMU_INFO_MSINUM
);
524 /* Initialize deviceids to -1 */
525 acpi_ivhdp
->ach_deviceid_start
= -1;
526 acpi_ivhdp
->ach_deviceid_end
= -1;
527 acpi_ivhdp
->ach_src_deviceid
= -1;
529 /* All range type entries are put on hash entry 0 */
530 switch (deventry
->idev_type
) {
532 acpi_ivhdp
->ach_deviceid_start
= 0;
533 acpi_ivhdp
->ach_deviceid_end
= (uint16_t)-1;
534 acpi_ivhdp
->ach_dev_type
= DEVENTRY_ALL
;
535 idx
= AMD_IOMMU_ACPI_INFO_HASH_SZ
;
537 case DEVENTRY_SELECT
:
538 acpi_ivhdp
->ach_deviceid_start
= deventry
->idev_deviceid
;
539 acpi_ivhdp
->ach_deviceid_end
= deventry
->idev_deviceid
;
540 acpi_ivhdp
->ach_dev_type
= DEVENTRY_SELECT
;
541 idx
= deviceid_hashfn(deventry
->idev_deviceid
);
544 acpi_ivhdp
->ach_deviceid_start
= deventry
->idev_deviceid
;
545 acpi_ivhdp
->ach_deviceid_end
= -1;
546 acpi_ivhdp
->ach_dev_type
= DEVENTRY_RANGE
;
547 idx
= AMD_IOMMU_ACPI_INFO_HASH_SZ
;
550 case DEVENTRY_RANGE_END
:
551 cmn_err(CE_PANIC
, "%s: Unexpected Range End Deventry",
555 case DEVENTRY_ALIAS_SELECT
:
556 acpi_ivhdp
->ach_deviceid_start
= deventry
->idev_deviceid
;
557 acpi_ivhdp
->ach_deviceid_end
= deventry
->idev_deviceid
;
558 acpi_ivhdp
->ach_src_deviceid
= deventry
->idev_src_deviceid
;
559 acpi_ivhdp
->ach_dev_type
= DEVENTRY_ALIAS_SELECT
;
560 idx
= deviceid_hashfn(deventry
->idev_deviceid
);
562 case DEVENTRY_ALIAS_RANGE
:
563 acpi_ivhdp
->ach_deviceid_start
= deventry
->idev_deviceid
;
564 acpi_ivhdp
->ach_deviceid_end
= -1;
565 acpi_ivhdp
->ach_src_deviceid
= deventry
->idev_src_deviceid
;
566 acpi_ivhdp
->ach_dev_type
= DEVENTRY_ALIAS_RANGE
;
567 idx
= AMD_IOMMU_ACPI_INFO_HASH_SZ
;
570 case DEVENTRY_EXTENDED_SELECT
:
571 acpi_ivhdp
->ach_deviceid_start
= deventry
->idev_deviceid
;
572 acpi_ivhdp
->ach_deviceid_end
= deventry
->idev_deviceid
;
573 acpi_ivhdp
->ach_dev_type
= DEVENTRY_EXTENDED_SELECT
;
574 idx
= deviceid_hashfn(deventry
->idev_deviceid
);
576 case DEVENTRY_EXTENDED_RANGE
:
577 acpi_ivhdp
->ach_deviceid_start
= deventry
->idev_deviceid
;
578 acpi_ivhdp
->ach_deviceid_end
= -1;
579 acpi_ivhdp
->ach_dev_type
= DEVENTRY_EXTENDED_RANGE
;
580 idx
= AMD_IOMMU_ACPI_INFO_HASH_SZ
;
583 case DEVENTRY_SPECIAL_DEVICE
:
584 acpi_ivhdp
->ach_deviceid_start
= -1;
585 acpi_ivhdp
->ach_deviceid_end
= -1;
586 acpi_ivhdp
->ach_src_deviceid
= deventry
->idev_src_deviceid
;
587 acpi_ivhdp
->ach_special_handle
= deventry
->idev_handle
;
588 acpi_ivhdp
->ach_special_variety
= deventry
->idev_variety
;
589 idx
= AMD_IOMMU_ACPI_INFO_HASH_SZ
;
592 cmn_err(CE_PANIC
, "%s: Unsupported deventry type",
597 acpi_ivhdp
->ach_Lint1Pass
= deventry
->idev_Lint1Pass
;
598 acpi_ivhdp
->ach_Lint0Pass
= deventry
->idev_Lint0Pass
;
599 acpi_ivhdp
->ach_SysMgt
= deventry
->idev_SysMgt
;
600 acpi_ivhdp
->ach_NMIPass
= deventry
->idev_NMIPass
;
601 acpi_ivhdp
->ach_ExtIntPass
= deventry
->idev_ExtIntPass
;
602 acpi_ivhdp
->ach_INITPass
= deventry
->idev_INITPass
;
606 if (acpi_ivhdp
->ach_dev_type
== DEVENTRY_EXTENDED_SELECT
||
607 acpi_ivhdp
->ach_dev_type
== DEVENTRY_EXTENDED_RANGE
) {
608 acpi_ivhdp
->ach_AtsDisabled
= deventry
->idev_AtsDisabled
;
612 * Now add it to the hash
614 ASSERT(hash
[idx
] != acpi_ivhdp
);
615 acpi_ivhdp
->ach_next
= hash
[idx
];
616 hash
[idx
] = acpi_ivhdp
;
620 * A device entry may be declared implicitly as a source device ID
621 * in an alias entry. This routine adds it to the hash
624 add_implicit_deventry(ivhd_container_t
*ivhdcp
, amd_iommu_acpi_ivhd_t
**hash
)
629 for (d
= ivhdcp
->ivhdc_first_deventry
; d
; d
= d
->idev_next
) {
631 if ((d
->idev_type
!= DEVENTRY_ALIAS_SELECT
) &&
632 (d
->idev_type
!= DEVENTRY_ALIAS_RANGE
))
635 deviceid
= d
->idev_src_deviceid
;
637 if (amd_iommu_lookup_ivhd(deviceid
) == NULL
) {
638 ivhd_deventry_t deventry
;
640 /* Fake a SELECT entry */
641 deventry
.idev_type
= DEVENTRY_SELECT
;
642 deventry
.idev_len
= 4;
643 deventry
.idev_deviceid
= deviceid
;
644 deventry
.idev_src_deviceid
= -1;
646 deventry
.idev_Lint1Pass
= d
->idev_Lint1Pass
;
647 deventry
.idev_Lint0Pass
= d
->idev_Lint0Pass
;
648 deventry
.idev_SysMgt
= d
->idev_SysMgt
;
649 deventry
.idev_NMIPass
= d
->idev_NMIPass
;
650 deventry
.idev_ExtIntPass
= d
->idev_ExtIntPass
;
651 deventry
.idev_INITPass
= d
->idev_INITPass
;
653 add_deventry_info(ivhdcp
->ivhdc_ivhd
, &deventry
, hash
);
655 if (amd_iommu_debug
& AMD_IOMMU_DEBUG_ACPI
) {
656 cmn_err(CE_NOTE
, "Added implicit IVHD entry "
657 "for: deviceid = %u", deviceid
);
664 add_ivhdc_info(ivhd_container_t
*ivhdcp
, amd_iommu_acpi_ivhd_t
**hash
)
666 ivhd_deventry_t
*deventry
;
667 ivhd_t
*ivhdp
= ivhdcp
->ivhdc_ivhd
;
669 for (deventry
= ivhdcp
->ivhdc_first_deventry
; deventry
;
670 deventry
= deventry
->idev_next
) {
671 add_deventry_info(ivhdp
, deventry
, hash
);
674 add_implicit_deventry(ivhdcp
, hash
);
679 add_ivhd_info(amd_iommu_acpi_t
*acpi
, amd_iommu_acpi_ivhd_t
**hash
)
681 ivhd_container_t
*ivhdcp
;
683 for (ivhdcp
= acpi
->acp_first_ivhdc
; ivhdcp
;
684 ivhdcp
= ivhdcp
->ivhdc_next
) {
685 add_ivhdc_info(ivhdcp
, hash
);
690 set_ivmd_info(ivmd_t
*ivmdp
, amd_iommu_acpi_ivmd_t
**hash
)
692 amd_iommu_acpi_ivmd_t
*acpi_ivmdp
;
696 uint8_flags
= ivmdp
->ivmd_flags
;
698 acpi_ivmdp
= kmem_zalloc(sizeof (*acpi_ivmdp
), KM_SLEEP
);
700 switch (ivmdp
->ivmd_type
) {
702 acpi_ivmdp
->acm_deviceid_start
= 0;
703 acpi_ivmdp
->acm_deviceid_end
= (uint16_t)-1;
704 acpi_ivmdp
->acm_dev_type
= IVMD_DEVICEID_ALL
;
705 idx
= AMD_IOMMU_ACPI_INFO_HASH_SZ
;
708 acpi_ivmdp
->acm_deviceid_start
= ivmdp
->ivmd_deviceid
;
709 acpi_ivmdp
->acm_deviceid_end
= ivmdp
->ivmd_deviceid
;
710 acpi_ivmdp
->acm_dev_type
= IVMD_DEVICEID_SELECT
;
711 idx
= deviceid_hashfn(ivmdp
->ivmd_deviceid
);
714 acpi_ivmdp
->acm_deviceid_start
= ivmdp
->ivmd_deviceid
;
715 acpi_ivmdp
->acm_deviceid_end
= ivmdp
->ivmd_auxdata
;
716 acpi_ivmdp
->acm_dev_type
= IVMD_DEVICEID_RANGE
;
717 idx
= AMD_IOMMU_ACPI_INFO_HASH_SZ
;
720 cmn_err(CE_PANIC
, "Unknown AMD IOMMU ACPI IVMD deviceid type: "
721 "%x", ivmdp
->ivmd_type
);
725 ASSERT(AMD_IOMMU_REG_GET8(&uint8_flags
,
726 AMD_IOMMU_ACPI_IVMD_RSV
) == 0);
728 acpi_ivmdp
->acm_ExclRange
= AMD_IOMMU_REG_GET8(&uint8_flags
,
729 AMD_IOMMU_ACPI_IVMD_EXCL_RANGE
);
730 acpi_ivmdp
->acm_IW
= AMD_IOMMU_REG_GET8(&uint8_flags
,
731 AMD_IOMMU_ACPI_IVMD_IW
);
732 acpi_ivmdp
->acm_IR
= AMD_IOMMU_REG_GET8(&uint8_flags
,
733 AMD_IOMMU_ACPI_IVMD_IR
);
734 acpi_ivmdp
->acm_Unity
= AMD_IOMMU_REG_GET8(&uint8_flags
,
735 AMD_IOMMU_ACPI_IVMD_UNITY
);
737 acpi_ivmdp
->acm_ivmd_phys_start
= ivmdp
->ivmd_phys_start
;
738 acpi_ivmdp
->acm_ivmd_phys_len
= ivmdp
->ivmd_phys_len
;
740 acpi_ivmdp
->acm_next
= hash
[idx
];
741 hash
[idx
] = acpi_ivmdp
;
745 add_ivmdc_info(ivmd_container_t
*ivmdcp
, amd_iommu_acpi_ivmd_t
**hash
)
747 set_ivmd_info(ivmdcp
->ivmdc_ivmd
, hash
);
751 add_ivmd_info(amd_iommu_acpi_t
*acpi
, amd_iommu_acpi_ivmd_t
**hash
)
753 ivmd_container_t
*ivmdcp
;
755 for (ivmdcp
= acpi
->acp_first_ivmdc
; ivmdcp
;
756 ivmdcp
= ivmdcp
->ivmdc_next
) {
757 add_ivmdc_info(ivmdcp
, hash
);
762 add_global_info(amd_iommu_acpi_t
*acpi
, amd_iommu_acpi_global_t
*global
)
764 uint32_t ivrs_ivinfo
= acpi
->acp_ivrs
->ivrs_ivinfo
;
766 global
->acg_HtAtsResv
=
767 AMD_IOMMU_REG_GET32(&ivrs_ivinfo
, AMD_IOMMU_ACPI_HT_ATSRSV
);
769 AMD_IOMMU_REG_GET32(&ivrs_ivinfo
, AMD_IOMMU_ACPI_VA_SIZE
);
771 AMD_IOMMU_REG_GET32(&ivrs_ivinfo
, AMD_IOMMU_ACPI_PA_SIZE
);
775 create_acpi_hash(amd_iommu_acpi_t
*acpi
)
777 /* Last hash entry is for deviceid ranges including "all" */
779 amd_iommu_acpi_global
= kmem_zalloc(sizeof (amd_iommu_acpi_global_t
),
782 amd_iommu_acpi_ivhd_hash
= kmem_zalloc(sizeof (amd_iommu_acpi_ivhd_t
*)
783 * (AMD_IOMMU_ACPI_INFO_HASH_SZ
+ 1), KM_SLEEP
);
785 amd_iommu_acpi_ivmd_hash
= kmem_zalloc(sizeof (amd_iommu_acpi_ivmd_t
*)
786 * (AMD_IOMMU_ACPI_INFO_HASH_SZ
+ 1), KM_SLEEP
);
788 add_global_info(acpi
, amd_iommu_acpi_global
);
790 add_ivhd_info(acpi
, amd_iommu_acpi_ivhd_hash
);
792 add_ivmd_info(acpi
, amd_iommu_acpi_ivmd_hash
);
794 return (DDI_SUCCESS
);
798 set_deventry(amd_iommu_t
*iommu
, int entry
, amd_iommu_acpi_ivhd_t
*hinfop
)
802 dentry
= (uint64_t *)(intptr_t)
803 &iommu
->aiomt_devtbl
[entry
* AMD_IOMMU_DEVTBL_ENTRY_SZ
];
805 AMD_IOMMU_REG_SET64(&(dentry
[1]), AMD_IOMMU_DEVTBL_SYSMGT
,
809 /* Initialize device table according to IVHD */
811 amd_iommu_acpi_init_devtbl(amd_iommu_t
*iommu
)
814 amd_iommu_acpi_ivhd_t
*hinfop
;
816 for (i
= 0; i
<= AMD_IOMMU_ACPI_INFO_HASH_SZ
; i
++) {
817 for (hinfop
= amd_iommu_acpi_ivhd_hash
[i
];
818 hinfop
; hinfop
= hinfop
->ach_next
) {
820 if (hinfop
->ach_IOMMU_deviceid
!= iommu
->aiomt_bdf
)
823 switch (hinfop
->ach_dev_type
) {
825 for (j
= 0; j
< AMD_IOMMU_MAX_DEVICEID
; j
++)
826 set_deventry(iommu
, j
, hinfop
);
828 case DEVENTRY_SELECT
:
829 case DEVENTRY_EXTENDED_SELECT
:
831 hinfop
->ach_deviceid_start
,
835 case DEVENTRY_EXTENDED_RANGE
:
836 for (j
= hinfop
->ach_deviceid_start
;
837 j
<= hinfop
->ach_deviceid_end
;
839 set_deventry(iommu
, j
, hinfop
);
841 case DEVENTRY_ALIAS_SELECT
:
842 case DEVENTRY_ALIAS_RANGE
:
843 case DEVENTRY_SPECIAL_DEVICE
:
845 hinfop
->ach_src_deviceid
,
850 "%s: Unknown deventry type",
852 return (DDI_FAILURE
);
857 return (DDI_SUCCESS
);
860 amd_iommu_acpi_global_t
*
861 amd_iommu_lookup_acpi_global(void)
863 ASSERT(amd_iommu_acpi_global
);
865 return (amd_iommu_acpi_global
);
868 amd_iommu_acpi_ivhd_t
*
869 amd_iommu_lookup_all_ivhd(void)
871 amd_iommu_acpi_ivhd_t
*hinfop
;
873 hinfop
= amd_iommu_acpi_ivhd_hash
[AMD_IOMMU_ACPI_INFO_HASH_SZ
];
874 for (; hinfop
; hinfop
= hinfop
->ach_next
) {
875 if (hinfop
->ach_deviceid_start
== 0 &&
876 hinfop
->ach_deviceid_end
== (uint16_t)-1) {
884 amd_iommu_acpi_ivmd_t
*
885 amd_iommu_lookup_all_ivmd(void)
887 amd_iommu_acpi_ivmd_t
*minfop
;
889 minfop
= amd_iommu_acpi_ivmd_hash
[AMD_IOMMU_ACPI_INFO_HASH_SZ
];
890 for (; minfop
; minfop
= minfop
->acm_next
) {
891 if (minfop
->acm_deviceid_start
== 0 &&
892 minfop
->acm_deviceid_end
== (uint16_t)-1) {
900 amd_iommu_acpi_ivhd_t
*
901 amd_iommu_lookup_any_ivhd(amd_iommu_t
*iommu
)
904 amd_iommu_acpi_ivhd_t
*hinfop
;
906 for (i
= AMD_IOMMU_ACPI_INFO_HASH_SZ
; i
>= 0; i
--) {
907 hinfop
= amd_iommu_acpi_ivhd_hash
[i
];
908 if ((hinfop
!= NULL
) &&
909 hinfop
->ach_IOMMU_deviceid
== iommu
->aiomt_bdf
)
916 amd_iommu_acpi_ivmd_t
*
917 amd_iommu_lookup_any_ivmd(void)
920 amd_iommu_acpi_ivmd_t
*minfop
;
922 for (i
= AMD_IOMMU_ACPI_INFO_HASH_SZ
; i
>= 0; i
--) {
923 if ((minfop
= amd_iommu_acpi_ivmd_hash
[i
]) != NULL
)
931 dump_acpi_aliases(void)
933 amd_iommu_acpi_ivhd_t
*hinfop
;
936 for (idx
= 0; idx
<= AMD_IOMMU_ACPI_INFO_HASH_SZ
; idx
++) {
937 hinfop
= amd_iommu_acpi_ivhd_hash
[idx
];
938 for (; hinfop
; hinfop
= hinfop
->ach_next
) {
939 cmn_err(CE_NOTE
, "start=%d, end=%d, src_bdf=%d",
940 hinfop
->ach_deviceid_start
,
941 hinfop
->ach_deviceid_end
,
942 hinfop
->ach_src_deviceid
);
947 amd_iommu_acpi_ivhd_t
*
948 amd_iommu_lookup_ivhd(int32_t deviceid
)
950 amd_iommu_acpi_ivhd_t
*hinfop
;
953 if (amd_iommu_debug
& AMD_IOMMU_DEBUG_ACPI
) {
954 cmn_err(CE_NOTE
, "Attempting to get ACPI IVHD info "
955 "for deviceid: %d", deviceid
);
958 ASSERT(amd_iommu_acpi_ivhd_hash
);
960 /* check if special device */
961 if (deviceid
== -1) {
962 hinfop
= amd_iommu_acpi_ivhd_hash
[AMD_IOMMU_ACPI_INFO_HASH_SZ
];
963 for (; hinfop
; hinfop
= hinfop
->ach_next
) {
964 if (hinfop
->ach_deviceid_start
== -1 &&
965 hinfop
->ach_deviceid_end
== -1) {
972 /* First search for an exact match */
974 idx
= deviceid_hashfn(deviceid
);
978 hinfop
= amd_iommu_acpi_ivhd_hash
[idx
];
980 for (; hinfop
; hinfop
= hinfop
->ach_next
) {
981 if (deviceid
< hinfop
->ach_deviceid_start
||
982 deviceid
> hinfop
->ach_deviceid_end
)
985 if (amd_iommu_debug
& AMD_IOMMU_DEBUG_ACPI
) {
986 cmn_err(CE_NOTE
, "Found ACPI IVHD match: %p, "
987 "actual deviceid = %u, start = %u, end = %u",
988 (void *)hinfop
, deviceid
,
989 hinfop
->ach_deviceid_start
,
990 hinfop
->ach_deviceid_end
);
995 if (idx
!= AMD_IOMMU_ACPI_INFO_HASH_SZ
) {
996 idx
= AMD_IOMMU_ACPI_INFO_HASH_SZ
;
1001 if (amd_iommu_debug
& AMD_IOMMU_DEBUG_ACPI
) {
1002 cmn_err(CE_NOTE
, "%u: %s ACPI IVHD %p", deviceid
,
1003 hinfop
? "GOT" : "Did NOT get", (void *)hinfop
);
1009 amd_iommu_acpi_ivmd_t
*
1010 amd_iommu_lookup_ivmd(int32_t deviceid
)
1012 amd_iommu_acpi_ivmd_t
*minfop
;
1015 if (amd_iommu_debug
& AMD_IOMMU_DEBUG_ACPI
) {
1016 cmn_err(CE_NOTE
, "Attempting to get ACPI IVMD info "
1017 "for deviceid: %u", deviceid
);
1020 ASSERT(amd_iommu_acpi_ivmd_hash
);
1022 /* First search for an exact match */
1024 idx
= deviceid_hashfn(deviceid
);
1027 minfop
= amd_iommu_acpi_ivmd_hash
[idx
];
1029 for (; minfop
; minfop
= minfop
->acm_next
) {
1030 if (deviceid
< minfop
->acm_deviceid_start
&&
1031 deviceid
> minfop
->acm_deviceid_end
)
1034 if (amd_iommu_debug
& AMD_IOMMU_DEBUG_ACPI
) {
1035 cmn_err(CE_NOTE
, "Found ACPI IVMD match: %p, "
1036 "actual deviceid = %u, start = %u, end = %u",
1037 (void *)minfop
, deviceid
,
1038 minfop
->acm_deviceid_start
,
1039 minfop
->acm_deviceid_end
);
1045 if (idx
!= AMD_IOMMU_ACPI_INFO_HASH_SZ
) {
1046 idx
= AMD_IOMMU_ACPI_INFO_HASH_SZ
;
1051 if (amd_iommu_debug
& AMD_IOMMU_DEBUG_ACPI
) {
1052 cmn_err(CE_NOTE
, "%u: %s ACPI IVMD info %p", deviceid
,
1053 minfop
? "GOT" : "Did NOT get", (void *)minfop
);