9271 uts/i86pc: this statement may fall through
[unleashed.git] / usr / src / uts / i86pc / io / amd_iommu / amd_iommu_acpi.c
blob825c661c2dfc7f237dab47d1c0bf3457cd61427a
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
26 #include "amd_iommu_acpi.h"
27 #include "amd_iommu_impl.h"
29 static int create_acpi_hash(amd_iommu_acpi_t *acpi);
30 static void amd_iommu_acpi_table_fini(amd_iommu_acpi_t **acpipp);
32 static void dump_acpi_aliases(void);
36 * Globals
38 static amd_iommu_acpi_global_t *amd_iommu_acpi_global;
39 static amd_iommu_acpi_ivhd_t **amd_iommu_acpi_ivhd_hash;
40 static amd_iommu_acpi_ivmd_t **amd_iommu_acpi_ivmd_hash;
42 static int
43 type_byte_size(char *cp)
45 uint8_t type8 = *((uint8_t *)cp);
46 uint8_t len_bits;
48 len_bits = AMD_IOMMU_REG_GET8(&type8, AMD_IOMMU_ACPI_DEVENTRY_LEN);
50 switch (len_bits) {
51 case 0:
52 return (4);
53 case 1:
54 return (8);
55 case 2:
56 return (16);
57 case 3:
58 return (32);
59 default:
60 cmn_err(CE_WARN, "%s: Invalid deventry len: %d",
61 amd_iommu_modname, len_bits);
62 return (len_bits);
64 /*NOTREACHED*/
67 static void
68 process_4byte_deventry(ivhd_container_t *c, char *cp)
70 int entry_type = *((uint8_t *)cp);
71 ivhd_deventry_t deventry = {0};
72 ivhd_deventry_t *devp;
73 uint8_t datsetting8;
74 align_16_t al = {0};
75 int i;
77 /* 4 byte entry */
78 deventry.idev_len = 4;
79 deventry.idev_deviceid = -1;
80 deventry.idev_src_deviceid = -1;
82 for (i = 0; i < 2; i++) {
83 al.ent8[i] = *((uint8_t *)&cp[i + 1]);
86 switch (entry_type) {
87 case 1:
88 deventry.idev_type = DEVENTRY_ALL;
89 break;
90 case 2:
91 deventry.idev_type = DEVENTRY_SELECT;
92 deventry.idev_deviceid = al.ent16;
93 break;
94 case 3:
95 deventry.idev_type = DEVENTRY_RANGE;
96 deventry.idev_deviceid = al.ent16;
97 break;
98 case 4:
99 deventry.idev_type = DEVENTRY_RANGE_END;
100 deventry.idev_deviceid = al.ent16;
101 ASSERT(cp[3] == 0);
102 break;
103 case 0:
104 ASSERT(al.ent16 == 0);
105 ASSERT(cp[3] == 0);
106 default:
107 return;
111 devp = kmem_alloc(sizeof (ivhd_deventry_t), KM_SLEEP);
112 *devp = deventry;
114 if (c->ivhdc_first_deventry == NULL)
115 c->ivhdc_first_deventry = devp;
116 else
117 c->ivhdc_last_deventry->idev_next = devp;
119 c->ivhdc_last_deventry = devp;
121 if (entry_type == 4)
122 return;
124 datsetting8 = (*((uint8_t *)&cp[3]));
126 devp->idev_Lint1Pass = AMD_IOMMU_REG_GET8(&datsetting8,
127 AMD_IOMMU_ACPI_LINT1PASS);
129 devp->idev_Lint0Pass = AMD_IOMMU_REG_GET8(&datsetting8,
130 AMD_IOMMU_ACPI_LINT0PASS);
132 devp->idev_SysMgt = AMD_IOMMU_REG_GET8(&datsetting8,
133 AMD_IOMMU_ACPI_SYSMGT);
135 ASSERT(AMD_IOMMU_REG_GET8(&datsetting8,
136 AMD_IOMMU_ACPI_DATRSV) == 0);
138 devp->idev_NMIPass = AMD_IOMMU_REG_GET8(&datsetting8,
139 AMD_IOMMU_ACPI_NMIPASS);
141 devp->idev_ExtIntPass = AMD_IOMMU_REG_GET8(&datsetting8,
142 AMD_IOMMU_ACPI_EXTINTPASS);
144 devp->idev_INITPass = AMD_IOMMU_REG_GET8(&datsetting8,
145 AMD_IOMMU_ACPI_INITPASS);
148 static void
149 process_8byte_deventry(ivhd_container_t *c, char *cp)
151 uint8_t datsetting8;
152 int entry_type = (uint8_t)*cp;
153 ivhd_deventry_t deventry = {0};
154 ivhd_deventry_t *devp;
155 align_16_t al1 = {0};
156 align_16_t al2 = {0};
157 align_32_t al3 = {0};
158 int i;
160 /* Length is 8 bytes */
161 deventry.idev_len = 8;
162 deventry.idev_deviceid = -1;
163 deventry.idev_src_deviceid = -1;
165 for (i = 0; i < 2; i++) {
166 al1.ent8[i] = *((uint8_t *)&cp[i+1]);
167 al2.ent8[i] = *((uint8_t *)&cp[i+5]);
170 datsetting8 = *((uint8_t *)&cp[3]);
172 switch (entry_type) {
173 case 66:
174 deventry.idev_type = DEVENTRY_ALIAS_SELECT;
175 deventry.idev_deviceid = al1.ent16;
176 deventry.idev_src_deviceid = al2.ent16;
177 ASSERT(cp[4] == 0);
178 ASSERT(cp[7] == 0);
179 break;
180 case 67:
181 deventry.idev_type = DEVENTRY_ALIAS_RANGE;
182 deventry.idev_deviceid = al1.ent16;
183 deventry.idev_src_deviceid = al2.ent16;
184 ASSERT(cp[4] == 0);
185 ASSERT(cp[7] == 0);
186 break;
187 case 70:
188 deventry.idev_type = DEVENTRY_EXTENDED_SELECT;
189 deventry.idev_deviceid = al1.ent16;
190 break;
191 case 71:
192 deventry.idev_type = DEVENTRY_EXTENDED_RANGE;
193 deventry.idev_deviceid = al1.ent16;
194 break;
195 case 72:
196 deventry.idev_type = DEVENTRY_SPECIAL_DEVICE;
197 ASSERT(al1.ent16 == 0);
198 deventry.idev_deviceid = -1;
199 deventry.idev_handle = cp[4];
200 deventry.idev_variety = cp[7];
201 deventry.idev_src_deviceid = al2.ent16;
202 default:
203 #ifdef BROKEN_ASSERT
204 for (i = 0; i < 7; i++) {
205 ASSERT(cp[i] == 0);
207 #endif
208 return;
212 devp = kmem_alloc(sizeof (ivhd_deventry_t), KM_SLEEP);
213 *devp = deventry;
215 if (c->ivhdc_first_deventry == NULL)
216 c->ivhdc_first_deventry = devp;
217 else
218 c->ivhdc_last_deventry->idev_next = devp;
220 c->ivhdc_last_deventry = devp;
222 devp->idev_Lint1Pass = AMD_IOMMU_REG_GET8(&datsetting8,
223 AMD_IOMMU_ACPI_LINT1PASS);
225 devp->idev_Lint0Pass = AMD_IOMMU_REG_GET8(&datsetting8,
226 AMD_IOMMU_ACPI_LINT0PASS);
228 devp->idev_SysMgt = AMD_IOMMU_REG_GET8(&datsetting8,
229 AMD_IOMMU_ACPI_SYSMGT);
231 ASSERT(AMD_IOMMU_REG_GET8(&datsetting8,
232 AMD_IOMMU_ACPI_DATRSV) == 0);
234 devp->idev_NMIPass = AMD_IOMMU_REG_GET8(&datsetting8,
235 AMD_IOMMU_ACPI_NMIPASS);
237 devp->idev_ExtIntPass = AMD_IOMMU_REG_GET8(&datsetting8,
238 AMD_IOMMU_ACPI_EXTINTPASS);
240 devp->idev_INITPass = AMD_IOMMU_REG_GET8(&datsetting8,
241 AMD_IOMMU_ACPI_INITPASS);
243 if (entry_type != 70 && entry_type != 71) {
244 return;
247 /* Type 70 and 71 */
248 for (i = 0; i < 4; i++) {
249 al3.ent8[i] = *((uint8_t *)&cp[i+4]);
252 devp->idev_AtsDisabled = AMD_IOMMU_REG_GET8(&al3.ent32,
253 AMD_IOMMU_ACPI_ATSDISABLED);
255 ASSERT(AMD_IOMMU_REG_GET8(&al3.ent32, AMD_IOMMU_ACPI_EXTDATRSV) == 0);
258 static void
259 process_ivhd(amd_iommu_acpi_t *acpi, ivhd_t *ivhdp)
261 ivhd_container_t *c;
262 caddr_t ivhd_end;
263 caddr_t ivhd_tot_end;
264 caddr_t cp;
266 ASSERT(ivhdp->ivhd_type == 0x10);
268 c = kmem_zalloc(sizeof (ivhd_container_t), KM_SLEEP);
269 c->ivhdc_ivhd = kmem_alloc(sizeof (ivhd_t), KM_SLEEP);
270 *(c->ivhdc_ivhd) = *ivhdp;
272 if (acpi->acp_first_ivhdc == NULL)
273 acpi->acp_first_ivhdc = c;
274 else
275 acpi->acp_last_ivhdc->ivhdc_next = c;
277 acpi->acp_last_ivhdc = c;
279 ivhd_end = (caddr_t)ivhdp + sizeof (ivhd_t);
280 ivhd_tot_end = (caddr_t)ivhdp + ivhdp->ivhd_len;
282 for (cp = ivhd_end; cp < ivhd_tot_end; cp += type_byte_size(cp)) {
283 /* 16 byte and 32 byte size are currently reserved */
284 switch (type_byte_size(cp)) {
285 case 4:
286 process_4byte_deventry(c, cp);
287 break;
288 case 8:
289 process_8byte_deventry(c, cp);
290 break;
291 case 16:
292 case 32:
293 /* Reserved */
294 break;
295 default:
296 cmn_err(CE_WARN, "%s: unsupported length for device "
297 "entry in ACPI IVRS table's IVHD entry",
298 amd_iommu_modname);
299 break;
304 static void
305 process_ivmd(amd_iommu_acpi_t *acpi, ivmd_t *ivmdp)
307 ivmd_container_t *c;
309 ASSERT(ivmdp->ivmd_type != 0x10);
311 c = kmem_zalloc(sizeof (ivmd_container_t), KM_SLEEP);
312 c->ivmdc_ivmd = kmem_alloc(sizeof (ivmd_t), KM_SLEEP);
313 *(c->ivmdc_ivmd) = *ivmdp;
315 if (acpi->acp_first_ivmdc == NULL)
316 acpi->acp_first_ivmdc = c;
317 else
318 acpi->acp_last_ivmdc->ivmdc_next = c;
320 acpi->acp_last_ivmdc = c;
324 amd_iommu_acpi_init(void)
326 ivrs_t *ivrsp;
327 caddr_t ivrsp_end;
328 caddr_t table_end;
329 caddr_t cp;
330 uint8_t type8;
331 amd_iommu_acpi_t *acpi;
332 align_ivhd_t al_vhd = {0};
333 align_ivmd_t al_vmd = {0};
335 if (AcpiGetTable(IVRS_SIG, 1, (ACPI_TABLE_HEADER **)&ivrsp) != AE_OK) {
336 cmn_err(CE_NOTE, "!amd_iommu: No AMD IOMMU ACPI IVRS table");
337 return (DDI_FAILURE);
341 * Reserved field must be 0
343 ASSERT(ivrsp->ivrs_resv == 0);
345 ASSERT(AMD_IOMMU_REG_GET32(&ivrsp->ivrs_ivinfo,
346 AMD_IOMMU_ACPI_IVINFO_RSV1) == 0);
347 ASSERT(AMD_IOMMU_REG_GET32(&ivrsp->ivrs_ivinfo,
348 AMD_IOMMU_ACPI_IVINFO_RSV2) == 0);
350 ivrsp_end = (caddr_t)ivrsp + sizeof (struct ivrs);
351 table_end = (caddr_t)ivrsp + ivrsp->ivrs_hdr.Length;
353 acpi = kmem_zalloc(sizeof (amd_iommu_acpi_t), KM_SLEEP);
354 acpi->acp_ivrs = kmem_alloc(sizeof (ivrs_t), KM_SLEEP);
355 *(acpi->acp_ivrs) = *ivrsp;
357 for (cp = ivrsp_end; cp < table_end; cp += (al_vhd.ivhdp)->ivhd_len) {
358 al_vhd.cp = cp;
359 if (al_vhd.ivhdp->ivhd_type == 0x10)
360 process_ivhd(acpi, al_vhd.ivhdp);
363 for (cp = ivrsp_end; cp < table_end; cp += (al_vmd.ivmdp)->ivmd_len) {
364 al_vmd.cp = cp;
365 type8 = al_vmd.ivmdp->ivmd_type;
366 if (type8 == 0x20 || type8 == 0x21 || type8 == 0x22)
367 process_ivmd(acpi, al_vmd.ivmdp);
370 if (create_acpi_hash(acpi) != DDI_SUCCESS) {
371 return (DDI_FAILURE);
374 amd_iommu_acpi_table_fini(&acpi);
376 ASSERT(acpi == NULL);
378 if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
379 dump_acpi_aliases();
380 debug_enter("dump");
383 return (DDI_SUCCESS);
386 static ivhd_deventry_t *
387 free_ivhd_deventry(ivhd_deventry_t *devp)
389 ivhd_deventry_t *next = devp->idev_next;
391 kmem_free(devp, sizeof (ivhd_deventry_t));
393 return (next);
396 static ivhd_container_t *
397 free_ivhd_container(ivhd_container_t *ivhdcp)
399 ivhd_container_t *next = ivhdcp->ivhdc_next;
400 ivhd_deventry_t *devp;
402 for (devp = ivhdcp->ivhdc_first_deventry; devp; ) {
403 devp = free_ivhd_deventry(devp);
406 kmem_free(ivhdcp->ivhdc_ivhd, sizeof (ivhd_t));
407 kmem_free(ivhdcp, sizeof (ivhd_container_t));
409 return (next);
412 static ivmd_container_t *
413 free_ivmd_container(ivmd_container_t *ivmdcp)
415 ivmd_container_t *next = ivmdcp->ivmdc_next;
417 kmem_free(ivmdcp->ivmdc_ivmd, sizeof (ivmd_t));
418 kmem_free(ivmdcp, sizeof (ivmd_container_t));
420 return (next);
423 void
424 amd_iommu_acpi_fini(void)
429 * TODO: Do we need to free the ACPI table for om GetFirmwareTable()
431 static void
432 amd_iommu_acpi_table_fini(amd_iommu_acpi_t **acpipp)
434 amd_iommu_acpi_t *acpi = *acpipp;
435 ivhd_container_t *ivhdcp;
436 ivmd_container_t *ivmdcp;
438 ASSERT(acpi);
440 for (ivhdcp = acpi->acp_first_ivhdc; ivhdcp; ) {
441 ivhdcp = free_ivhd_container(ivhdcp);
443 for (ivmdcp = acpi->acp_first_ivmdc; ivmdcp; ) {
444 ivmdcp = free_ivmd_container(ivmdcp);
447 kmem_free(acpi->acp_ivrs, sizeof (struct ivrs));
448 kmem_free(acpi, sizeof (amd_iommu_acpi_t));
450 *acpipp = NULL;
453 static uint16_t
454 deviceid_hashfn(uint16_t deviceid)
456 return (deviceid % AMD_IOMMU_ACPI_INFO_HASH_SZ);
459 static void
460 add_deventry_info(ivhd_t *ivhdp, ivhd_deventry_t *deventry,
461 amd_iommu_acpi_ivhd_t **hash)
463 static amd_iommu_acpi_ivhd_t *last;
464 amd_iommu_acpi_ivhd_t *acpi_ivhdp;
465 uint8_t uint8_flags;
466 uint16_t uint16_info;
467 uint16_t idx;
469 if (deventry->idev_type == DEVENTRY_RANGE_END) {
470 ASSERT(last);
471 acpi_ivhdp = last;
472 last = NULL;
473 ASSERT(acpi_ivhdp->ach_dev_type == DEVENTRY_RANGE ||
474 acpi_ivhdp->ach_dev_type == DEVENTRY_ALIAS_RANGE ||
475 acpi_ivhdp->ach_dev_type == DEVENTRY_EXTENDED_RANGE);
476 ASSERT(acpi_ivhdp->ach_deviceid_end == -1);
477 acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
478 /* TODO ASSERT data is 0 */
479 return;
482 ASSERT(last == NULL);
483 acpi_ivhdp = kmem_zalloc(sizeof (*acpi_ivhdp), KM_SLEEP);
485 uint8_flags = ivhdp->ivhd_flags;
487 #ifdef BROKEN_ASSERT
488 ASSERT(AMD_IOMMU_REG_GET8(&uint8_flags,
489 AMD_IOMMU_ACPI_IVHD_FLAGS_RSV) == 0);
490 #endif
492 acpi_ivhdp->ach_IotlbSup = AMD_IOMMU_REG_GET8(&uint8_flags,
493 AMD_IOMMU_ACPI_IVHD_FLAGS_IOTLBSUP);
494 acpi_ivhdp->ach_Isoc = AMD_IOMMU_REG_GET8(&uint8_flags,
495 AMD_IOMMU_ACPI_IVHD_FLAGS_ISOC);
496 acpi_ivhdp->ach_ResPassPW = AMD_IOMMU_REG_GET8(&uint8_flags,
497 AMD_IOMMU_ACPI_IVHD_FLAGS_RESPASSPW);
498 acpi_ivhdp->ach_PassPW = AMD_IOMMU_REG_GET8(&uint8_flags,
499 AMD_IOMMU_ACPI_IVHD_FLAGS_PASSPW);
500 acpi_ivhdp->ach_HtTunEn = AMD_IOMMU_REG_GET8(&uint8_flags,
501 AMD_IOMMU_ACPI_IVHD_FLAGS_HTTUNEN);
503 /* IVHD fields */
504 acpi_ivhdp->ach_IOMMU_deviceid = ivhdp->ivhd_deviceid;
505 acpi_ivhdp->ach_IOMMU_cap_off = ivhdp->ivhd_cap_off;
506 acpi_ivhdp->ach_IOMMU_reg_base = ivhdp->ivhd_reg_base;
507 acpi_ivhdp->ach_IOMMU_pci_seg = ivhdp->ivhd_pci_seg;
509 /* IVHD IOMMU info fields */
510 uint16_info = ivhdp->ivhd_iommu_info;
512 #ifdef BROKEN_ASSERT
513 ASSERT(AMD_IOMMU_REG_GET16(&uint16_info,
514 AMD_IOMMU_ACPI_IOMMU_INFO_RSV1) == 0);
515 #endif
517 acpi_ivhdp->ach_IOMMU_UnitID = AMD_IOMMU_REG_GET16(&uint16_info,
518 AMD_IOMMU_ACPI_IOMMU_INFO_UNITID);
519 ASSERT(AMD_IOMMU_REG_GET16(&uint16_info,
520 AMD_IOMMU_ACPI_IOMMU_INFO_RSV2) == 0);
521 acpi_ivhdp->ach_IOMMU_MSInum = AMD_IOMMU_REG_GET16(&uint16_info,
522 AMD_IOMMU_ACPI_IOMMU_INFO_MSINUM);
524 /* Initialize deviceids to -1 */
525 acpi_ivhdp->ach_deviceid_start = -1;
526 acpi_ivhdp->ach_deviceid_end = -1;
527 acpi_ivhdp->ach_src_deviceid = -1;
529 /* All range type entries are put on hash entry 0 */
530 switch (deventry->idev_type) {
531 case DEVENTRY_ALL:
532 acpi_ivhdp->ach_deviceid_start = 0;
533 acpi_ivhdp->ach_deviceid_end = (uint16_t)-1;
534 acpi_ivhdp->ach_dev_type = DEVENTRY_ALL;
535 idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
536 break;
537 case DEVENTRY_SELECT:
538 acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
539 acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
540 acpi_ivhdp->ach_dev_type = DEVENTRY_SELECT;
541 idx = deviceid_hashfn(deventry->idev_deviceid);
542 break;
543 case DEVENTRY_RANGE:
544 acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
545 acpi_ivhdp->ach_deviceid_end = -1;
546 acpi_ivhdp->ach_dev_type = DEVENTRY_RANGE;
547 idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
548 last = acpi_ivhdp;
549 break;
550 case DEVENTRY_RANGE_END:
551 cmn_err(CE_PANIC, "%s: Unexpected Range End Deventry",
552 amd_iommu_modname);
553 /*NOTREACHED*/
554 break;
555 case DEVENTRY_ALIAS_SELECT:
556 acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
557 acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
558 acpi_ivhdp->ach_src_deviceid = deventry->idev_src_deviceid;
559 acpi_ivhdp->ach_dev_type = DEVENTRY_ALIAS_SELECT;
560 idx = deviceid_hashfn(deventry->idev_deviceid);
561 break;
562 case DEVENTRY_ALIAS_RANGE:
563 acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
564 acpi_ivhdp->ach_deviceid_end = -1;
565 acpi_ivhdp->ach_src_deviceid = deventry->idev_src_deviceid;
566 acpi_ivhdp->ach_dev_type = DEVENTRY_ALIAS_RANGE;
567 idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
568 last = acpi_ivhdp;
569 break;
570 case DEVENTRY_EXTENDED_SELECT:
571 acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
572 acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
573 acpi_ivhdp->ach_dev_type = DEVENTRY_EXTENDED_SELECT;
574 idx = deviceid_hashfn(deventry->idev_deviceid);
575 break;
576 case DEVENTRY_EXTENDED_RANGE:
577 acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
578 acpi_ivhdp->ach_deviceid_end = -1;
579 acpi_ivhdp->ach_dev_type = DEVENTRY_EXTENDED_RANGE;
580 idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
581 last = acpi_ivhdp;
582 break;
583 case DEVENTRY_SPECIAL_DEVICE:
584 acpi_ivhdp->ach_deviceid_start = -1;
585 acpi_ivhdp->ach_deviceid_end = -1;
586 acpi_ivhdp->ach_src_deviceid = deventry->idev_src_deviceid;
587 acpi_ivhdp->ach_special_handle = deventry->idev_handle;
588 acpi_ivhdp->ach_special_variety = deventry->idev_variety;
589 idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
590 break;
591 default:
592 cmn_err(CE_PANIC, "%s: Unsupported deventry type",
593 amd_iommu_modname);
594 /* FALLTHROUGH */
597 acpi_ivhdp->ach_Lint1Pass = deventry->idev_Lint1Pass;
598 acpi_ivhdp->ach_Lint0Pass = deventry->idev_Lint0Pass;
599 acpi_ivhdp->ach_SysMgt = deventry->idev_SysMgt;
600 acpi_ivhdp->ach_NMIPass = deventry->idev_NMIPass;
601 acpi_ivhdp->ach_ExtIntPass = deventry->idev_ExtIntPass;
602 acpi_ivhdp->ach_INITPass = deventry->idev_INITPass;
605 /* extended data */
606 if (acpi_ivhdp->ach_dev_type == DEVENTRY_EXTENDED_SELECT ||
607 acpi_ivhdp->ach_dev_type == DEVENTRY_EXTENDED_RANGE) {
608 acpi_ivhdp->ach_AtsDisabled = deventry->idev_AtsDisabled;
612 * Now add it to the hash
614 ASSERT(hash[idx] != acpi_ivhdp);
615 acpi_ivhdp->ach_next = hash[idx];
616 hash[idx] = acpi_ivhdp;
620 * A device entry may be declared implicitly as a source device ID
621 * in an alias entry. This routine adds it to the hash
623 static void
624 add_implicit_deventry(ivhd_container_t *ivhdcp, amd_iommu_acpi_ivhd_t **hash)
626 ivhd_deventry_t *d;
627 int deviceid;
629 for (d = ivhdcp->ivhdc_first_deventry; d; d = d->idev_next) {
631 if ((d->idev_type != DEVENTRY_ALIAS_SELECT) &&
632 (d->idev_type != DEVENTRY_ALIAS_RANGE))
633 continue;
635 deviceid = d->idev_src_deviceid;
637 if (amd_iommu_lookup_ivhd(deviceid) == NULL) {
638 ivhd_deventry_t deventry;
640 /* Fake a SELECT entry */
641 deventry.idev_type = DEVENTRY_SELECT;
642 deventry.idev_len = 4;
643 deventry.idev_deviceid = deviceid;
644 deventry.idev_src_deviceid = -1;
646 deventry.idev_Lint1Pass = d->idev_Lint1Pass;
647 deventry.idev_Lint0Pass = d->idev_Lint0Pass;
648 deventry.idev_SysMgt = d->idev_SysMgt;
649 deventry.idev_NMIPass = d->idev_NMIPass;
650 deventry.idev_ExtIntPass = d->idev_ExtIntPass;
651 deventry.idev_INITPass = d->idev_INITPass;
653 add_deventry_info(ivhdcp->ivhdc_ivhd, &deventry, hash);
655 if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
656 cmn_err(CE_NOTE, "Added implicit IVHD entry "
657 "for: deviceid = %u", deviceid);
663 static void
664 add_ivhdc_info(ivhd_container_t *ivhdcp, amd_iommu_acpi_ivhd_t **hash)
666 ivhd_deventry_t *deventry;
667 ivhd_t *ivhdp = ivhdcp->ivhdc_ivhd;
669 for (deventry = ivhdcp->ivhdc_first_deventry; deventry;
670 deventry = deventry->idev_next) {
671 add_deventry_info(ivhdp, deventry, hash);
674 add_implicit_deventry(ivhdcp, hash);
678 static void
679 add_ivhd_info(amd_iommu_acpi_t *acpi, amd_iommu_acpi_ivhd_t **hash)
681 ivhd_container_t *ivhdcp;
683 for (ivhdcp = acpi->acp_first_ivhdc; ivhdcp;
684 ivhdcp = ivhdcp->ivhdc_next) {
685 add_ivhdc_info(ivhdcp, hash);
689 static void
690 set_ivmd_info(ivmd_t *ivmdp, amd_iommu_acpi_ivmd_t **hash)
692 amd_iommu_acpi_ivmd_t *acpi_ivmdp;
693 uint8_t uint8_flags;
694 uint16_t idx;
696 uint8_flags = ivmdp->ivmd_flags;
698 acpi_ivmdp = kmem_zalloc(sizeof (*acpi_ivmdp), KM_SLEEP);
700 switch (ivmdp->ivmd_type) {
701 case 0x20:
702 acpi_ivmdp->acm_deviceid_start = 0;
703 acpi_ivmdp->acm_deviceid_end = (uint16_t)-1;
704 acpi_ivmdp->acm_dev_type = IVMD_DEVICEID_ALL;
705 idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
706 break;
707 case 0x21:
708 acpi_ivmdp->acm_deviceid_start = ivmdp->ivmd_deviceid;
709 acpi_ivmdp->acm_deviceid_end = ivmdp->ivmd_deviceid;
710 acpi_ivmdp->acm_dev_type = IVMD_DEVICEID_SELECT;
711 idx = deviceid_hashfn(ivmdp->ivmd_deviceid);
712 break;
713 case 0x22:
714 acpi_ivmdp->acm_deviceid_start = ivmdp->ivmd_deviceid;
715 acpi_ivmdp->acm_deviceid_end = ivmdp->ivmd_auxdata;
716 acpi_ivmdp->acm_dev_type = IVMD_DEVICEID_RANGE;
717 idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
718 break;
719 default:
720 cmn_err(CE_PANIC, "Unknown AMD IOMMU ACPI IVMD deviceid type: "
721 "%x", ivmdp->ivmd_type);
722 /*NOTREACHED*/
725 ASSERT(AMD_IOMMU_REG_GET8(&uint8_flags,
726 AMD_IOMMU_ACPI_IVMD_RSV) == 0);
728 acpi_ivmdp->acm_ExclRange = AMD_IOMMU_REG_GET8(&uint8_flags,
729 AMD_IOMMU_ACPI_IVMD_EXCL_RANGE);
730 acpi_ivmdp->acm_IW = AMD_IOMMU_REG_GET8(&uint8_flags,
731 AMD_IOMMU_ACPI_IVMD_IW);
732 acpi_ivmdp->acm_IR = AMD_IOMMU_REG_GET8(&uint8_flags,
733 AMD_IOMMU_ACPI_IVMD_IR);
734 acpi_ivmdp->acm_Unity = AMD_IOMMU_REG_GET8(&uint8_flags,
735 AMD_IOMMU_ACPI_IVMD_UNITY);
737 acpi_ivmdp->acm_ivmd_phys_start = ivmdp->ivmd_phys_start;
738 acpi_ivmdp->acm_ivmd_phys_len = ivmdp->ivmd_phys_len;
740 acpi_ivmdp->acm_next = hash[idx];
741 hash[idx] = acpi_ivmdp;
744 static void
745 add_ivmdc_info(ivmd_container_t *ivmdcp, amd_iommu_acpi_ivmd_t **hash)
747 set_ivmd_info(ivmdcp->ivmdc_ivmd, hash);
750 static void
751 add_ivmd_info(amd_iommu_acpi_t *acpi, amd_iommu_acpi_ivmd_t **hash)
753 ivmd_container_t *ivmdcp;
755 for (ivmdcp = acpi->acp_first_ivmdc; ivmdcp;
756 ivmdcp = ivmdcp->ivmdc_next) {
757 add_ivmdc_info(ivmdcp, hash);
761 static void
762 add_global_info(amd_iommu_acpi_t *acpi, amd_iommu_acpi_global_t *global)
764 uint32_t ivrs_ivinfo = acpi->acp_ivrs->ivrs_ivinfo;
766 global->acg_HtAtsResv =
767 AMD_IOMMU_REG_GET32(&ivrs_ivinfo, AMD_IOMMU_ACPI_HT_ATSRSV);
768 global->acg_VAsize =
769 AMD_IOMMU_REG_GET32(&ivrs_ivinfo, AMD_IOMMU_ACPI_VA_SIZE);
770 global->acg_PAsize =
771 AMD_IOMMU_REG_GET32(&ivrs_ivinfo, AMD_IOMMU_ACPI_PA_SIZE);
774 static int
775 create_acpi_hash(amd_iommu_acpi_t *acpi)
777 /* Last hash entry is for deviceid ranges including "all" */
779 amd_iommu_acpi_global = kmem_zalloc(sizeof (amd_iommu_acpi_global_t),
780 KM_SLEEP);
782 amd_iommu_acpi_ivhd_hash = kmem_zalloc(sizeof (amd_iommu_acpi_ivhd_t *)
783 * (AMD_IOMMU_ACPI_INFO_HASH_SZ + 1), KM_SLEEP);
785 amd_iommu_acpi_ivmd_hash = kmem_zalloc(sizeof (amd_iommu_acpi_ivmd_t *)
786 * (AMD_IOMMU_ACPI_INFO_HASH_SZ + 1), KM_SLEEP);
788 add_global_info(acpi, amd_iommu_acpi_global);
790 add_ivhd_info(acpi, amd_iommu_acpi_ivhd_hash);
792 add_ivmd_info(acpi, amd_iommu_acpi_ivmd_hash);
794 return (DDI_SUCCESS);
797 static void
798 set_deventry(amd_iommu_t *iommu, int entry, amd_iommu_acpi_ivhd_t *hinfop)
800 uint64_t *dentry;
802 dentry = (uint64_t *)(intptr_t)
803 &iommu->aiomt_devtbl[entry * AMD_IOMMU_DEVTBL_ENTRY_SZ];
805 AMD_IOMMU_REG_SET64(&(dentry[1]), AMD_IOMMU_DEVTBL_SYSMGT,
806 hinfop->ach_SysMgt);
809 /* Initialize device table according to IVHD */
811 amd_iommu_acpi_init_devtbl(amd_iommu_t *iommu)
813 int i, j;
814 amd_iommu_acpi_ivhd_t *hinfop;
816 for (i = 0; i <= AMD_IOMMU_ACPI_INFO_HASH_SZ; i++) {
817 for (hinfop = amd_iommu_acpi_ivhd_hash[i];
818 hinfop; hinfop = hinfop->ach_next) {
820 if (hinfop->ach_IOMMU_deviceid != iommu->aiomt_bdf)
821 continue;
823 switch (hinfop->ach_dev_type) {
824 case DEVENTRY_ALL:
825 for (j = 0; j < AMD_IOMMU_MAX_DEVICEID; j++)
826 set_deventry(iommu, j, hinfop);
827 break;
828 case DEVENTRY_SELECT:
829 case DEVENTRY_EXTENDED_SELECT:
830 set_deventry(iommu,
831 hinfop->ach_deviceid_start,
832 hinfop);
833 break;
834 case DEVENTRY_RANGE:
835 case DEVENTRY_EXTENDED_RANGE:
836 for (j = hinfop->ach_deviceid_start;
837 j <= hinfop->ach_deviceid_end;
838 j++)
839 set_deventry(iommu, j, hinfop);
840 break;
841 case DEVENTRY_ALIAS_SELECT:
842 case DEVENTRY_ALIAS_RANGE:
843 case DEVENTRY_SPECIAL_DEVICE:
844 set_deventry(iommu,
845 hinfop->ach_src_deviceid,
846 hinfop);
847 break;
848 default:
849 cmn_err(CE_WARN,
850 "%s: Unknown deventry type",
851 amd_iommu_modname);
852 return (DDI_FAILURE);
857 return (DDI_SUCCESS);
860 amd_iommu_acpi_global_t *
861 amd_iommu_lookup_acpi_global(void)
863 ASSERT(amd_iommu_acpi_global);
865 return (amd_iommu_acpi_global);
868 amd_iommu_acpi_ivhd_t *
869 amd_iommu_lookup_all_ivhd(void)
871 amd_iommu_acpi_ivhd_t *hinfop;
873 hinfop = amd_iommu_acpi_ivhd_hash[AMD_IOMMU_ACPI_INFO_HASH_SZ];
874 for (; hinfop; hinfop = hinfop->ach_next) {
875 if (hinfop->ach_deviceid_start == 0 &&
876 hinfop->ach_deviceid_end == (uint16_t)-1) {
877 break;
881 return (hinfop);
884 amd_iommu_acpi_ivmd_t *
885 amd_iommu_lookup_all_ivmd(void)
887 amd_iommu_acpi_ivmd_t *minfop;
889 minfop = amd_iommu_acpi_ivmd_hash[AMD_IOMMU_ACPI_INFO_HASH_SZ];
890 for (; minfop; minfop = minfop->acm_next) {
891 if (minfop->acm_deviceid_start == 0 &&
892 minfop->acm_deviceid_end == (uint16_t)-1) {
893 break;
897 return (minfop);
900 amd_iommu_acpi_ivhd_t *
901 amd_iommu_lookup_any_ivhd(amd_iommu_t *iommu)
903 int i;
904 amd_iommu_acpi_ivhd_t *hinfop;
906 for (i = AMD_IOMMU_ACPI_INFO_HASH_SZ; i >= 0; i--) {
907 hinfop = amd_iommu_acpi_ivhd_hash[i];
908 if ((hinfop != NULL) &&
909 hinfop->ach_IOMMU_deviceid == iommu->aiomt_bdf)
910 break;
913 return (hinfop);
916 amd_iommu_acpi_ivmd_t *
917 amd_iommu_lookup_any_ivmd(void)
919 int i;
920 amd_iommu_acpi_ivmd_t *minfop;
922 for (i = AMD_IOMMU_ACPI_INFO_HASH_SZ; i >= 0; i--) {
923 if ((minfop = amd_iommu_acpi_ivmd_hash[i]) != NULL)
924 break;
927 return (minfop);
930 static void
931 dump_acpi_aliases(void)
933 amd_iommu_acpi_ivhd_t *hinfop;
934 uint16_t idx;
936 for (idx = 0; idx <= AMD_IOMMU_ACPI_INFO_HASH_SZ; idx++) {
937 hinfop = amd_iommu_acpi_ivhd_hash[idx];
938 for (; hinfop; hinfop = hinfop->ach_next) {
939 cmn_err(CE_NOTE, "start=%d, end=%d, src_bdf=%d",
940 hinfop->ach_deviceid_start,
941 hinfop->ach_deviceid_end,
942 hinfop->ach_src_deviceid);
947 amd_iommu_acpi_ivhd_t *
948 amd_iommu_lookup_ivhd(int32_t deviceid)
950 amd_iommu_acpi_ivhd_t *hinfop;
951 uint16_t idx;
953 if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
954 cmn_err(CE_NOTE, "Attempting to get ACPI IVHD info "
955 "for deviceid: %d", deviceid);
958 ASSERT(amd_iommu_acpi_ivhd_hash);
960 /* check if special device */
961 if (deviceid == -1) {
962 hinfop = amd_iommu_acpi_ivhd_hash[AMD_IOMMU_ACPI_INFO_HASH_SZ];
963 for (; hinfop; hinfop = hinfop->ach_next) {
964 if (hinfop->ach_deviceid_start == -1 &&
965 hinfop->ach_deviceid_end == -1) {
966 break;
969 return (hinfop);
972 /* First search for an exact match */
974 idx = deviceid_hashfn(deviceid);
977 range:
978 hinfop = amd_iommu_acpi_ivhd_hash[idx];
980 for (; hinfop; hinfop = hinfop->ach_next) {
981 if (deviceid < hinfop->ach_deviceid_start ||
982 deviceid > hinfop->ach_deviceid_end)
983 continue;
985 if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
986 cmn_err(CE_NOTE, "Found ACPI IVHD match: %p, "
987 "actual deviceid = %u, start = %u, end = %u",
988 (void *)hinfop, deviceid,
989 hinfop->ach_deviceid_start,
990 hinfop->ach_deviceid_end);
992 goto out;
995 if (idx != AMD_IOMMU_ACPI_INFO_HASH_SZ) {
996 idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
997 goto range;
1000 out:
1001 if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
1002 cmn_err(CE_NOTE, "%u: %s ACPI IVHD %p", deviceid,
1003 hinfop ? "GOT" : "Did NOT get", (void *)hinfop);
1006 return (hinfop);
1009 amd_iommu_acpi_ivmd_t *
1010 amd_iommu_lookup_ivmd(int32_t deviceid)
1012 amd_iommu_acpi_ivmd_t *minfop;
1013 uint16_t idx;
1015 if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
1016 cmn_err(CE_NOTE, "Attempting to get ACPI IVMD info "
1017 "for deviceid: %u", deviceid);
1020 ASSERT(amd_iommu_acpi_ivmd_hash);
1022 /* First search for an exact match */
1024 idx = deviceid_hashfn(deviceid);
1026 range:
1027 minfop = amd_iommu_acpi_ivmd_hash[idx];
1029 for (; minfop; minfop = minfop->acm_next) {
1030 if (deviceid < minfop->acm_deviceid_start &&
1031 deviceid > minfop->acm_deviceid_end)
1032 continue;
1034 if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
1035 cmn_err(CE_NOTE, "Found ACPI IVMD match: %p, "
1036 "actual deviceid = %u, start = %u, end = %u",
1037 (void *)minfop, deviceid,
1038 minfop->acm_deviceid_start,
1039 minfop->acm_deviceid_end);
1042 goto out;
1045 if (idx != AMD_IOMMU_ACPI_INFO_HASH_SZ) {
1046 idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
1047 goto range;
1050 out:
1051 if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
1052 cmn_err(CE_NOTE, "%u: %s ACPI IVMD info %p", deviceid,
1053 minfop ? "GOT" : "Did NOT get", (void *)minfop);
1056 return (minfop);