9597 Want hypervisor API for FPU management
[unleashed.git] / usr / src / uts / i86pc / io / immu_dmar.c
blob384f16e554ce24615100d1ff4008a0979efed337
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2009, Intel Corporation.
27 * All rights reserved.
31 #include <sys/debug.h>
32 #include <sys/sysmacros.h>
33 #include <sys/types.h>
34 #include <sys/kmem.h>
35 #include <sys/sunddi.h>
36 #include <sys/list.h>
37 #include <sys/pci.h>
38 #include <sys/pci_cfgspace.h>
39 #include <sys/pci_impl.h>
40 #include <sys/sunndi.h>
41 #include <sys/ksynch.h>
42 #include <sys/cmn_err.h>
43 #include <sys/bootconf.h>
44 #include <sys/int_fmtio.h>
45 #include <sys/smbios.h>
46 #include <sys/apic.h>
47 #include <sys/acpi/acpi.h>
48 #include <sys/acpica.h>
49 #include <sys/immu.h>
50 #include <sys/smp_impldefs.h>
52 static void dmar_table_destroy(dmar_table_t *tbl);
55 * internal global variables
57 static char *dmar_raw; /* raw DMAR ACPI table */
58 static dmar_table_t *dmar_table; /* converted form of DMAR table */
61 * global variables exported outside this file
63 boolean_t dmar_print = B_FALSE;
64 kmutex_t ioapic_drhd_lock;
65 list_t ioapic_drhd_list;
67 /* ######################################################################### */
70 * helper functions to read the "raw" DMAR table
73 static uint8_t
74 get_uint8(char *cp)
76 uint8_t val = *((uint8_t *)cp);
77 return (val);
80 static uint16_t
81 get_uint16(char *cp)
83 uint16_t val = *((uint16_t *)cp);
84 return (val);
87 static uint32_t
88 get_uint32(char *cp)
90 uint32_t val = *((uint32_t *)cp);
91 return (val);
94 static uint64_t
95 get_uint64(char *cp)
97 uint64_t val = *((uint64_t *)cp);
98 return (val);
101 static char *
102 get_str(char *cp, uint_t len)
104 char *str = kmem_alloc(len + 1, KM_SLEEP);
106 (void) strlcpy(str, cp, len + 1);
108 return (str);
111 static void
112 scope_list_free(list_t *scope_list)
114 scope_t *scope;
116 if (list_is_empty(scope_list)) {
117 list_destroy(scope_list);
118 return;
121 while ((scope = list_remove_head(scope_list)) != NULL) {
122 kmem_free(scope, sizeof (scope_t));
125 ASSERT(list_is_empty(scope_list));
126 list_destroy(scope_list);
129 static void
130 drhd_list_destroy(list_t *drhd_list)
132 drhd_t *drhd;
134 ASSERT(drhd_list);
136 if (list_is_empty(drhd_list)) {
137 list_destroy(drhd_list);
138 return;
141 while ((drhd = list_remove_head(drhd_list)) != NULL) {
142 scope_list_free(&(drhd->dr_scope_list));
143 kmem_free(drhd, sizeof (drhd_t));
146 ASSERT(list_is_empty(drhd_list));
147 list_destroy(drhd_list);
150 static void
151 rmrr_list_destroy(list_t *rmrr_list)
153 rmrr_t *rmrr;
155 ASSERT(rmrr_list);
157 if (list_is_empty(rmrr_list)) {
158 list_destroy(rmrr_list);
159 return;
162 while ((rmrr = list_remove_head(rmrr_list)) != NULL) {
163 scope_list_free(&(rmrr->rm_scope_list));
164 kmem_free(rmrr, sizeof (rmrr_t));
167 ASSERT(list_is_empty(rmrr_list));
168 list_destroy(rmrr_list);
172 * parse_scope()
173 * parse a scope structure in the "raw" table
175 static scope_t *
176 parse_scope(char *shead)
178 scope_t *scope;
179 char *phead;
180 int bus, dev, func;
181 uint8_t startbus;
182 uint8_t len;
183 int depth;
185 ASSERT(shead);
187 scope = kmem_zalloc(sizeof (scope_t), KM_SLEEP);
188 scope->scp_type = get_uint8(&shead[0]);
189 scope->scp_enumid = get_uint8(&shead[4]);
191 len = get_uint8(&shead[1]);
192 startbus = get_uint8(&shead[5]);
193 depth = (len - 6)/2;
194 ASSERT(depth >= 1);
196 phead = &shead[6];
198 bus = startbus;
199 dev = get_uint8(phead++);
200 func = get_uint8(phead++);
202 for (depth--; depth > 0; depth--) {
203 bus = pci_getb_func(bus, dev, func, PCI_BCNF_SECBUS);
204 dev = get_uint8(phead++);
205 func = get_uint8(phead++);
208 ASSERT(bus >= 0 && bus < 256);
209 ASSERT(dev >= 0 && dev < 32);
210 ASSERT(func >= 0 && func < 8);
212 /* ok we got the device BDF */
213 scope->scp_bus = bus;
214 scope->scp_dev = dev;
215 scope->scp_func = func;
217 return (scope);
221 /* setup the ioapic_drhd structure */
222 static void
223 ioapic_drhd_setup(void)
225 mutex_init(&(ioapic_drhd_lock), NULL, MUTEX_DEFAULT, NULL);
227 mutex_enter(&(ioapic_drhd_lock));
228 list_create(&(ioapic_drhd_list), sizeof (ioapic_drhd_t),
229 offsetof(ioapic_drhd_t, ioapic_node));
230 mutex_exit(&(ioapic_drhd_lock));
233 /* get ioapic source id for interrupt remapping */
234 static void
235 ioapic_drhd_insert(scope_t *scope, drhd_t *drhd)
237 ioapic_drhd_t *idt;
239 idt = kmem_zalloc(sizeof (ioapic_drhd_t), KM_SLEEP);
240 idt->ioapic_ioapicid = scope->scp_enumid;
241 idt->ioapic_sid = ((scope->scp_bus << 8) | (scope->scp_dev << 3) |
242 (scope->scp_func));
243 idt->ioapic_drhd = drhd;
245 mutex_enter(&ioapic_drhd_lock);
246 list_insert_tail(&ioapic_drhd_list, idt);
247 mutex_exit(&ioapic_drhd_lock);
250 static ioapic_drhd_t *
251 ioapic_drhd_lookup(int ioapicid)
253 ioapic_drhd_t *idt;
255 mutex_enter(&ioapic_drhd_lock);
256 idt = list_head(&ioapic_drhd_list);
257 for (; idt; idt = list_next(&ioapic_drhd_list, idt)) {
258 if (idt->ioapic_ioapicid == ioapicid) {
259 break;
262 mutex_exit(&ioapic_drhd_lock);
264 return (idt);
267 static void
268 ioapic_drhd_destroy(void)
270 ioapic_drhd_t *idt;
272 mutex_enter(&ioapic_drhd_lock);
273 while (idt = list_remove_head(&ioapic_drhd_list)) {
274 kmem_free(idt, sizeof (ioapic_drhd_t));
276 list_destroy(&ioapic_drhd_list);
277 mutex_exit(&(ioapic_drhd_lock));
279 mutex_destroy(&(ioapic_drhd_lock));
283 * parse_drhd()
284 * parse the drhd uints in dmar table
286 static int
287 parse_drhd(char *uhead, dmar_table_t *tbl)
289 drhd_t *drhd;
290 int seg;
291 int len;
292 char *shead;
293 scope_t *scope;
295 ASSERT(uhead);
296 ASSERT(tbl);
297 ASSERT(get_uint16(&uhead[0]) == DMAR_DRHD);
299 seg = get_uint16(&uhead[6]);
300 if (seg < 0 || seg >= IMMU_MAXSEG) {
301 ddi_err(DER_WARN, NULL, "invalid segment# <%d>"
302 "in DRHD unit in ACPI DMAR table", seg);
303 return (DDI_FAILURE);
306 drhd = kmem_zalloc(sizeof (drhd_t), KM_SLEEP);
307 mutex_init(&(drhd->dr_lock), NULL, MUTEX_DEFAULT, NULL);
308 list_create(&(drhd->dr_scope_list), sizeof (scope_t),
309 offsetof(scope_t, scp_node));
311 len = get_uint16(&uhead[2]);
312 drhd->dr_include_all =
313 (get_uint8(&uhead[4]) & DMAR_INCLUDE_ALL) ? B_TRUE : B_FALSE;
314 drhd->dr_seg = seg;
315 drhd->dr_regs = get_uint64(&uhead[8]);
318 * parse each scope.
320 shead = &uhead[16];
321 while (shead < &uhead[len - 1]) {
322 scope = parse_scope(shead);
323 if (scope == NULL) {
324 return (DDI_FAILURE);
327 if (scope->scp_type == DMAR_IOAPIC) {
328 ioapic_drhd_insert(scope, drhd);
331 list_insert_tail(&(drhd->dr_scope_list), scope);
332 shead += get_uint8(&shead[1]);
335 list_insert_tail(&(tbl->tbl_drhd_list[drhd->dr_seg]), drhd);
337 return (DDI_SUCCESS);
341 * parse_rmrr()
342 * parse the rmrr units in dmar table
344 static int
345 parse_rmrr(char *uhead, dmar_table_t *tbl)
347 rmrr_t *rmrr;
348 int seg;
349 int len;
350 char *shead;
351 scope_t *scope;
353 ASSERT(uhead);
354 ASSERT(tbl);
355 ASSERT(get_uint16(&uhead[0]) == DMAR_RMRR);
357 seg = get_uint16(&uhead[6]);
358 if (seg < 0 || seg >= IMMU_MAXSEG) {
359 ddi_err(DER_WARN, NULL, "invalid segment# <%d>"
360 "in RMRR unit in ACPI DMAR table", seg);
361 return (DDI_FAILURE);
364 rmrr = kmem_zalloc(sizeof (rmrr_t), KM_SLEEP);
365 mutex_init(&(rmrr->rm_lock), NULL, MUTEX_DEFAULT, NULL);
366 list_create(&(rmrr->rm_scope_list), sizeof (scope_t),
367 offsetof(scope_t, scp_node));
369 /* RMRR region is [base,limit] */
370 len = get_uint16(&uhead[2]);
371 rmrr->rm_seg = get_uint16(&uhead[6]);
372 rmrr->rm_base = get_uint64(&uhead[8]);
373 rmrr->rm_limit = get_uint64(&uhead[16]);
375 if (rmrr->rm_base > rmrr->rm_limit) {
376 ddi_err(DER_WARN, NULL, "IMMU: BIOS bug detected: "
377 "RMRR: base (%lx) > limit (%lx)",
378 rmrr->rm_base, rmrr->rm_limit);
379 list_destroy(&(rmrr->rm_scope_list));
380 mutex_destroy(&(rmrr->rm_lock));
381 kmem_free(rmrr, sizeof (rmrr_t));
382 return (DDI_SUCCESS);
386 * parse each scope in RMRR
388 shead = &uhead[24];
389 while (shead < &uhead[len - 1]) {
390 scope = parse_scope(shead);
391 if (scope == NULL) {
392 return (DDI_FAILURE);
394 list_insert_tail(&(rmrr->rm_scope_list), scope);
395 shead += get_uint8(&shead[1]);
398 list_insert_tail(&(tbl->tbl_rmrr_list[rmrr->rm_seg]), rmrr);
400 return (DDI_SUCCESS);
403 #define TBL_OEM_ID_SZ (6)
404 #define TBL_OEM_TBLID_SZ (8)
407 * parse the "raw" DMAR table and convert it
408 * into a useful form.
410 static int
411 dmar_parse(dmar_table_t **tblpp, char *raw)
413 char *uhead;
414 dmar_table_t *tbl;
415 int i;
416 char *unmstr;
418 ASSERT(raw);
419 ASSERT(tblpp);
421 *tblpp = NULL;
424 * do a sanity check. make sure the raw table
425 * has the right signature
427 if (raw[0] != 'D' || raw[1] != 'M' ||
428 raw[2] != 'A' || raw[3] != 'R') {
429 ddi_err(DER_WARN, NULL, "IOMMU ACPI "
430 "signature != \"DMAR\"");
431 return (DDI_FAILURE);
435 * the platform has intel iommu, create processed ACPI struct
437 tbl = kmem_zalloc(sizeof (dmar_table_t), KM_SLEEP);
438 mutex_init(&(tbl->tbl_lock), NULL, MUTEX_DEFAULT, NULL);
440 tbl->tbl_raw = raw;
443 * Note we explicitly show offsets for clarity
445 tbl->tbl_rawlen = get_uint32(&raw[4]);
447 /* XXX TO DO verify checksum of table */
448 tbl->tbl_oem_id = get_str(&raw[10], TBL_OEM_ID_SZ);
449 tbl->tbl_oem_tblid = get_str(&raw[16], TBL_OEM_TBLID_SZ);
450 tbl->tbl_oem_rev = get_uint32(&raw[24]);
451 tbl->tbl_haw = get_uint8(&raw[36]) + 1;
452 tbl->tbl_intrmap = (get_uint8(&raw[37]) & DMAR_INTRMAP_SUPPORT)
453 ? B_TRUE : B_FALSE;
455 /* create lists for DRHD and RMRR */
456 for (i = 0; i < IMMU_MAXSEG; i++) {
457 list_create(&(tbl->tbl_drhd_list[i]), sizeof (drhd_t),
458 offsetof(drhd_t, dr_node));
459 list_create(&(tbl->tbl_rmrr_list[i]), sizeof (rmrr_t),
460 offsetof(rmrr_t, rm_node));
463 ioapic_drhd_setup();
466 * parse each unit. Currently only DRHD and RMRR types
467 * are parsed. We ignore all other types of units.
469 uhead = &raw[48];
470 while (uhead < &raw[tbl->tbl_rawlen - 1]) {
471 unmstr = NULL;
472 switch (get_uint16(uhead)) {
473 case DMAR_DRHD:
474 if (parse_drhd(uhead, tbl) != DDI_SUCCESS) {
475 goto failed;
477 break;
478 case DMAR_RMRR:
479 if (parse_rmrr(uhead, tbl) != DDI_SUCCESS) {
480 goto failed;
482 break;
483 case DMAR_ATSR:
484 unmstr = "ATSR";
485 break;
486 case DMAR_RHSA:
487 unmstr = "RHSA";
488 break;
489 default:
490 unmstr = "unknown unity type";
491 break;
493 if (unmstr) {
494 ddi_err(DER_NOTE, NULL, "DMAR ACPI table: "
495 "skipping unsupported unit type %s", unmstr);
497 uhead += get_uint16(&uhead[2]);
500 *tblpp = tbl;
501 return (DDI_SUCCESS);
503 failed:
504 dmar_table_destroy(tbl);
505 return (DDI_FAILURE);
508 static char *
509 scope_type(int devtype)
511 char *typestr;
513 switch (devtype) {
514 case DMAR_ENDPOINT:
515 typestr = "endpoint-device";
516 break;
517 case DMAR_SUBTREE:
518 typestr = "subtree-device";
519 break;
520 case DMAR_IOAPIC:
521 typestr = "IOAPIC";
522 break;
523 case DMAR_HPET:
524 typestr = "HPET";
525 break;
526 default:
527 typestr = "Unknown device";
528 break;
531 return (typestr);
534 static void
535 print_scope_list(list_t *scope_list)
537 scope_t *scope;
539 if (list_is_empty(scope_list))
540 return;
542 ddi_err(DER_CONT, NULL, "\tdevice list:\n");
544 for (scope = list_head(scope_list); scope;
545 scope = list_next(scope_list, scope)) {
546 ddi_err(DER_CONT, NULL, "\t\ttype = %s\n",
547 scope_type(scope->scp_type));
548 ddi_err(DER_CONT, NULL, "\n\t\tbus = %d\n",
549 scope->scp_bus);
550 ddi_err(DER_CONT, NULL, "\t\tdev = %d\n",
551 scope->scp_dev);
552 ddi_err(DER_CONT, NULL, "\t\tfunc = %d\n",
553 scope->scp_func);
557 static void
558 print_drhd_list(list_t *drhd_list)
560 drhd_t *drhd;
562 if (list_is_empty(drhd_list))
563 return;
565 ddi_err(DER_CONT, NULL, "\ndrhd list:\n");
567 for (drhd = list_head(drhd_list); drhd;
568 drhd = list_next(drhd_list, drhd)) {
570 ddi_err(DER_CONT, NULL, "\n\tsegment = %d\n",
571 drhd->dr_seg);
572 ddi_err(DER_CONT, NULL, "\treg_base = 0x%" PRIx64 "\n",
573 drhd->dr_regs);
574 ddi_err(DER_CONT, NULL, "\tinclude_all = %s\n",
575 drhd->dr_include_all == B_TRUE ? "TRUE" : "FALSE");
576 ddi_err(DER_CONT, NULL, "\tdip = 0x%p\n",
577 (void *)drhd->dr_dip);
579 print_scope_list(&(drhd->dr_scope_list));
584 static void
585 print_rmrr_list(list_t *rmrr_list)
587 rmrr_t *rmrr;
589 if (list_is_empty(rmrr_list))
590 return;
592 ddi_err(DER_CONT, NULL, "\nrmrr list:\n");
594 for (rmrr = list_head(rmrr_list); rmrr;
595 rmrr = list_next(rmrr_list, rmrr)) {
597 ddi_err(DER_CONT, NULL, "\n\tsegment = %d\n",
598 rmrr->rm_seg);
599 ddi_err(DER_CONT, NULL, "\tbase = 0x%lx\n",
600 rmrr->rm_base);
601 ddi_err(DER_CONT, NULL, "\tlimit = 0x%lx\n",
602 rmrr->rm_limit);
604 print_scope_list(&(rmrr->rm_scope_list));
609 * print DMAR table
611 static void
612 dmar_table_print(dmar_table_t *tbl)
614 int i;
616 if (dmar_print == B_FALSE) {
617 return;
620 /* print the title */
621 ddi_err(DER_CONT, NULL, "#### Start of dmar_table ####\n");
622 ddi_err(DER_CONT, NULL, "\thaw = %d\n", tbl->tbl_haw);
623 ddi_err(DER_CONT, NULL, "\tintr_remap = %s\n",
624 tbl->tbl_intrmap == B_TRUE ? "<true>" : "<false>");
626 /* print drhd list */
627 for (i = 0; i < IMMU_MAXSEG; i++) {
628 print_drhd_list(&(tbl->tbl_drhd_list[i]));
632 /* print rmrr list */
633 for (i = 0; i < IMMU_MAXSEG; i++) {
634 print_rmrr_list(&(tbl->tbl_rmrr_list[i]));
637 ddi_err(DER_CONT, NULL, "#### END of dmar_table ####\n");
640 static void
641 drhd_devi_create(drhd_t *drhd, int unit)
643 struct ddi_parent_private_data *pdptr;
644 struct regspec reg;
645 dev_info_t *dip;
647 dip = ddi_add_child(root_devinfo, IMMU_UNIT_NAME,
648 DEVI_SID_NODEID, unit);
650 drhd->dr_dip = dip;
652 reg.regspec_bustype = 0;
653 reg.regspec_addr = drhd->dr_regs;
654 reg.regspec_size = IMMU_REGSZ;
657 * update the reg properties
659 * reg property will be used for register
660 * set access
662 * refer to the bus_map of root nexus driver
663 * I/O or memory mapping:
665 * <bustype=0, addr=x, len=x>: memory
666 * <bustype=1, addr=x, len=x>: i/o
667 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
669 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE,
670 dip, "reg", (int *)&reg,
671 sizeof (struct regspec) / sizeof (int));
674 * This is an artificially constructed dev_info, and we
675 * need to set a few more things to be able to use it
676 * for ddi_dma_alloc_handle/free_handle.
678 ddi_set_driver(dip, ddi_get_driver(ddi_root_node()));
679 DEVI(dip)->devi_bus_dma_allochdl =
680 DEVI(ddi_get_driver((ddi_root_node())));
682 pdptr = kmem_zalloc(sizeof (struct ddi_parent_private_data)
683 + sizeof (struct regspec), KM_SLEEP);
684 pdptr->par_nreg = 1;
685 pdptr->par_reg = (struct regspec *)(pdptr + 1);
686 pdptr->par_reg->regspec_bustype = 0;
687 pdptr->par_reg->regspec_addr = drhd->dr_regs;
688 pdptr->par_reg->regspec_size = IMMU_REGSZ;
689 ddi_set_parent_data(dip, pdptr);
693 * dmar_devinfos_create()
695 * create the dev_info node in the device tree,
696 * the info node is a nuxus child of the root
697 * nexus
699 static void
700 dmar_devinfos_create(dmar_table_t *tbl)
702 list_t *drhd_list;
703 drhd_t *drhd;
704 int i, unit;
706 for (i = 0; i < IMMU_MAXSEG; i++) {
708 drhd_list = &(tbl->tbl_drhd_list[i]);
710 if (list_is_empty(drhd_list))
711 continue;
713 drhd = list_head(drhd_list);
714 for (unit = 0; drhd;
715 drhd = list_next(drhd_list, drhd), unit++) {
716 drhd_devi_create(drhd, unit);
721 static void
722 drhd_devi_destroy(drhd_t *drhd)
724 dev_info_t *dip;
725 int count;
727 dip = drhd->dr_dip;
728 ASSERT(dip);
730 ndi_devi_enter(root_devinfo, &count);
731 if (ndi_devi_offline(dip, NDI_DEVI_REMOVE) != DDI_SUCCESS) {
732 ddi_err(DER_WARN, dip, "Failed to destroy");
734 ndi_devi_exit(root_devinfo, count);
735 drhd->dr_dip = NULL;
739 * dmar_devi_destroy()
741 * destroy dev_info nodes for all drhd units
743 static void
744 dmar_devi_destroy(dmar_table_t *tbl)
746 drhd_t *drhd;
747 list_t *drhd_list;
748 int i;
750 for (i = 0; i < IMMU_MAXSEG; i++) {
751 drhd_list = &(tbl->tbl_drhd_list[i]);
752 if (list_is_empty(drhd_list))
753 continue;
755 drhd = list_head(drhd_list);
756 for (; drhd; drhd = list_next(drhd_list, drhd)) {
757 drhd_devi_destroy(drhd);
762 static int
763 match_bdf(dev_info_t *ddip, void *arg)
765 immu_arg_t *imarg = (immu_arg_t *)arg;
766 immu_devi_t *immu_devi;
768 ASSERT(ddip);
769 ASSERT(imarg);
770 ASSERT(imarg->ima_seg == 0);
771 ASSERT(imarg->ima_bus >= 0);
772 ASSERT(imarg->ima_devfunc >= 0);
773 ASSERT(imarg->ima_ddip == NULL);
775 /* rdip can be NULL */
777 mutex_enter(&(DEVI(ddip)->devi_lock));
779 immu_devi = IMMU_DEVI(ddip);
780 ASSERT(immu_devi);
782 if (immu_devi->imd_seg == imarg->ima_seg &&
783 immu_devi->imd_bus == imarg->ima_bus &&
784 immu_devi->imd_devfunc == imarg->ima_devfunc) {
785 imarg->ima_ddip = ddip;
788 mutex_exit(&(DEVI(ddip)->devi_lock));
790 return (imarg->ima_ddip ? DDI_WALK_TERMINATE : DDI_WALK_CONTINUE);
792 static void
793 dmar_table_destroy(dmar_table_t *tbl)
795 int i;
797 ASSERT(tbl);
799 /* destroy lists for DRHD and RMRR */
800 for (i = 0; i < IMMU_MAXSEG; i++) {
801 rmrr_list_destroy(&(tbl->tbl_rmrr_list[i]));
802 drhd_list_destroy(&(tbl->tbl_drhd_list[i]));
805 /* free strings */
806 kmem_free(tbl->tbl_oem_tblid, TBL_OEM_TBLID_SZ + 1);
807 kmem_free(tbl->tbl_oem_id, TBL_OEM_ID_SZ + 1);
808 tbl->tbl_raw = NULL; /* raw ACPI table doesn't have to be freed */
809 mutex_destroy(&(tbl->tbl_lock));
810 kmem_free(tbl, sizeof (dmar_table_t));
814 * #########################################################################
815 * Functions exported by dmar.c
816 * This file deals with reading and processing the DMAR ACPI table
817 * #########################################################################
821 * immu_dmar_setup()
822 * Check if the system has a DMAR ACPI table. If yes, the system
823 * has Intel IOMMU hardware
826 immu_dmar_setup(void)
828 if (AcpiGetTable("DMAR", 1, (ACPI_TABLE_HEADER **)&dmar_raw) != AE_OK) {
829 ddi_err(DER_LOG, NULL,
830 "No DMAR ACPI table. No Intel IOMMU present\n");
831 dmar_raw = NULL;
832 return (DDI_FAILURE);
834 ASSERT(dmar_raw);
835 return (DDI_SUCCESS);
839 * immu_dmar_parse()
840 * Called by immu.c to parse and convert "raw" ACPI DMAR table
843 immu_dmar_parse(void)
845 dmar_table_t *tbl = NULL;
847 /* we should already have found the "raw" table */
848 ASSERT(dmar_raw);
850 ddi_err(DER_CONT, NULL, "?Processing DMAR ACPI table\n");
852 dmar_table = NULL;
855 * parse DMAR ACPI table
857 if (dmar_parse(&tbl, dmar_raw) != DDI_SUCCESS) {
858 ASSERT(tbl == NULL);
859 return (DDI_FAILURE);
862 ASSERT(tbl);
865 * create one devinfo for every drhd unit
866 * in the DMAR table
868 dmar_devinfos_create(tbl);
871 * print the dmar table if the debug option is set
873 dmar_table_print(tbl);
875 dmar_table = tbl;
877 return (DDI_SUCCESS);
880 void
881 immu_dmar_startup(void)
883 /* nothing to do */
886 void
887 immu_dmar_shutdown(void)
889 /* nothing to do */
892 void
893 immu_dmar_destroy(void)
895 dmar_devi_destroy(dmar_table);
896 dmar_table_destroy(dmar_table);
897 ioapic_drhd_destroy();
898 dmar_table = NULL;
899 dmar_raw = NULL;
902 boolean_t
903 immu_dmar_blacklisted(char **strptr, uint_t nstrs)
905 dmar_table_t *tbl = dmar_table;
906 int i;
907 char oem_rev[IMMU_MAXNAMELEN];
909 ASSERT(tbl);
911 ASSERT((strptr == NULL) ^ (nstrs != 0));
914 * Must be a minimum of 4
916 if (nstrs < 4) {
917 return (B_FALSE);
920 ddi_err(DER_CONT, NULL, "?System DMAR ACPI table information:\n");
921 ddi_err(DER_CONT, NULL, "?OEM-ID = <%s>\n", tbl->tbl_oem_id);
922 ddi_err(DER_CONT, NULL, "?Table-ID = <%s>\n", tbl->tbl_oem_tblid);
923 (void) snprintf(oem_rev, sizeof (oem_rev), "%d", tbl->tbl_oem_rev);
924 ddi_err(DER_CONT, NULL, "?Revision = <%s>\n", oem_rev);
926 for (i = 0; nstrs - i >= 4; i++) {
927 if (strcmp(*strptr++, "DMAR") == 0) {
928 if (strcmp(*strptr++, tbl->tbl_oem_id) == 0 &&
929 (*strptr[0] == '\0' ||
930 strcmp(*strptr++, tbl->tbl_oem_tblid) == 0) &&
931 (*strptr[0] == '\0' ||
932 strcmp(*strptr++, oem_rev) == 0)) {
933 return (B_TRUE);
935 i += 3; /* for loops adds 1 as well, so only 3 here */
938 return (B_FALSE);
941 void
942 immu_dmar_rmrr_map(void)
944 int seg;
945 int count;
946 dev_info_t *rdip;
947 scope_t *scope;
948 rmrr_t *rmrr;
949 dmar_table_t *tbl;
951 ASSERT(dmar_table);
953 tbl = dmar_table;
955 /* called during boot, when kernel is single threaded. No lock */
958 * for each segment, walk the rmrr list looking for an exact match
960 for (seg = 0; seg < IMMU_MAXSEG; seg++) {
961 rmrr = list_head(&(tbl->tbl_rmrr_list)[seg]);
962 for (; rmrr; rmrr = list_next(&(tbl->tbl_rmrr_list)[seg],
963 rmrr)) {
966 * try to match BDF *exactly* to a device scope.
968 scope = list_head(&(rmrr->rm_scope_list));
969 for (; scope;
970 scope = list_next(&(rmrr->rm_scope_list), scope)) {
971 immu_arg_t imarg = {0};
972 memrng_t mrng = {0};
974 /* PCI endpoint devices only */
975 if (scope->scp_type != DMAR_ENDPOINT)
976 continue;
978 imarg.ima_seg = seg;
979 imarg.ima_bus = scope->scp_bus;
980 imarg.ima_devfunc =
981 IMMU_PCI_DEVFUNC(scope->scp_dev,
982 scope->scp_func);
983 imarg.ima_ddip = NULL;
984 imarg.ima_rdip = NULL;
986 ASSERT(root_devinfo);
987 /* XXX should be optimized */
988 ndi_devi_enter(root_devinfo, &count);
989 ddi_walk_devs(ddi_get_child(root_devinfo),
990 match_bdf, &imarg);
991 ndi_devi_exit(root_devinfo, count);
993 if (imarg.ima_ddip == NULL) {
994 ddi_err(DER_WARN, NULL,
995 "No dip found for "
996 "bus=0x%x, dev=0x%x, func= 0x%x",
997 scope->scp_bus, scope->scp_dev,
998 scope->scp_func);
999 continue;
1002 rdip = imarg.ima_ddip;
1004 * This address must be in the BIOS reserved
1005 * map
1007 if (!address_in_memlist(bios_rsvd,
1008 (uint64_t)rmrr->rm_base, rmrr->rm_limit -
1009 rmrr->rm_base + 1)) {
1010 ddi_err(DER_WARN, rdip, "RMRR range "
1011 " [0x%" PRIx64 " - 0x%" PRIx64 "]"
1012 " not in BIOS reserved map",
1013 rmrr->rm_base, rmrr->rm_limit);
1016 /* XXX could be more efficient */
1017 memlist_read_lock();
1018 if (address_in_memlist(phys_install,
1019 (uint64_t)rmrr->rm_base, rmrr->rm_limit -
1020 rmrr->rm_base + 1)) {
1021 ddi_err(DER_WARN, rdip, "RMRR range "
1022 " [0x%" PRIx64 " - 0x%" PRIx64 "]"
1023 " is in physinstall map",
1024 rmrr->rm_base, rmrr->rm_limit);
1026 memlist_read_unlock();
1028 (void) immu_dvma_device_setup(rdip, 0);
1030 ddi_err(DER_LOG, rdip,
1031 "IMMU: Mapping RMRR range "
1032 "[0x%" PRIx64 " - 0x%"PRIx64 "]",
1033 rmrr->rm_base, rmrr->rm_limit);
1035 mrng.mrng_start =
1036 IMMU_ROUNDOWN((uintptr_t)rmrr->rm_base);
1037 mrng.mrng_npages =
1038 IMMU_ROUNDUP((uintptr_t)rmrr->rm_limit -
1039 (uintptr_t)rmrr->rm_base + 1) /
1040 IMMU_PAGESIZE;
1042 (void) immu_map_memrange(rdip, &mrng);
1049 immu_t *
1050 immu_dmar_get_immu(dev_info_t *rdip)
1052 int seg;
1053 int tlevel;
1054 int level;
1055 drhd_t *drhd;
1056 drhd_t *tdrhd;
1057 scope_t *scope;
1058 dmar_table_t *tbl;
1060 ASSERT(dmar_table);
1062 tbl = dmar_table;
1064 mutex_enter(&(tbl->tbl_lock));
1067 * for each segment, walk the drhd list looking for an exact match
1069 for (seg = 0; seg < IMMU_MAXSEG; seg++) {
1070 drhd = list_head(&(tbl->tbl_drhd_list)[seg]);
1071 for (; drhd; drhd = list_next(&(tbl->tbl_drhd_list)[seg],
1072 drhd)) {
1075 * we are currently searching for exact matches so
1076 * skip "include all" (catchall) and subtree matches
1078 if (drhd->dr_include_all == B_TRUE)
1079 continue;
1082 * try to match BDF *exactly* to a device scope.
1084 scope = list_head(&(drhd->dr_scope_list));
1085 for (; scope;
1086 scope = list_next(&(drhd->dr_scope_list), scope)) {
1087 immu_arg_t imarg = {0};
1089 /* PCI endpoint devices only */
1090 if (scope->scp_type != DMAR_ENDPOINT)
1091 continue;
1093 imarg.ima_seg = seg;
1094 imarg.ima_bus = scope->scp_bus;
1095 imarg.ima_devfunc =
1096 IMMU_PCI_DEVFUNC(scope->scp_dev,
1097 scope->scp_func);
1098 imarg.ima_ddip = NULL;
1099 imarg.ima_rdip = rdip;
1100 level = 0;
1101 if (immu_walk_ancestor(rdip, NULL, match_bdf,
1102 &imarg, &level, IMMU_FLAGS_DONTPASS)
1103 != DDI_SUCCESS) {
1104 /* skip - nothing else we can do */
1105 continue;
1108 /* Should have walked only 1 level i.e. rdip */
1109 ASSERT(level == 1);
1111 if (imarg.ima_ddip) {
1112 ASSERT(imarg.ima_ddip == rdip);
1113 goto found;
1120 * walk the drhd list looking for subtree match
1121 * i.e. is the device a descendant of a devscope BDF.
1122 * We want the lowest subtree.
1124 tdrhd = NULL;
1125 tlevel = 0;
1126 for (seg = 0; seg < IMMU_MAXSEG; seg++) {
1127 drhd = list_head(&(tbl->tbl_drhd_list)[seg]);
1128 for (; drhd; drhd = list_next(&(tbl->tbl_drhd_list)[seg],
1129 drhd)) {
1131 /* looking for subtree match */
1132 if (drhd->dr_include_all == B_TRUE)
1133 continue;
1136 * try to match the device scope
1138 scope = list_head(&(drhd->dr_scope_list));
1139 for (; scope;
1140 scope = list_next(&(drhd->dr_scope_list), scope)) {
1141 immu_arg_t imarg = {0};
1143 /* PCI subtree only */
1144 if (scope->scp_type != DMAR_SUBTREE)
1145 continue;
1147 imarg.ima_seg = seg;
1148 imarg.ima_bus = scope->scp_bus;
1149 imarg.ima_devfunc =
1150 IMMU_PCI_DEVFUNC(scope->scp_dev,
1151 scope->scp_func);
1153 imarg.ima_ddip = NULL;
1154 imarg.ima_rdip = rdip;
1155 level = 0;
1156 if (immu_walk_ancestor(rdip, NULL, match_bdf,
1157 &imarg, &level, 0) != DDI_SUCCESS) {
1158 /* skip - nothing else we can do */
1159 continue;
1162 /* should have walked 1 level i.e. rdip */
1163 ASSERT(level > 0);
1165 /* look for lowest ancestor matching drhd */
1166 if (imarg.ima_ddip && (tdrhd == NULL ||
1167 level < tlevel)) {
1168 tdrhd = drhd;
1169 tlevel = level;
1175 if ((drhd = tdrhd) != NULL) {
1176 goto found;
1179 for (seg = 0; seg < IMMU_MAXSEG; seg++) {
1180 drhd = list_head(&(tbl->tbl_drhd_list[seg]));
1181 for (; drhd; drhd = list_next(&(tbl->tbl_drhd_list)[seg],
1182 drhd)) {
1183 /* Look for include all */
1184 if (drhd->dr_include_all == B_TRUE) {
1185 break;
1190 /*FALLTHRU*/
1192 found:
1193 mutex_exit(&(tbl->tbl_lock));
1196 * No drhd (dmar unit) found for this device in the ACPI DMAR tables.
1197 * This may happen with buggy versions of BIOSes. Just warn instead
1198 * of panic as we don't want whole system to go down because of one
1199 * device.
1201 if (drhd == NULL) {
1202 ddi_err(DER_WARN, rdip, "can't find Intel IOMMU unit for "
1203 "device in ACPI DMAR table.");
1204 return (NULL);
1207 return (drhd->dr_immu);
1210 dev_info_t *
1211 immu_dmar_unit_dip(void *dmar_unit)
1213 drhd_t *drhd = (drhd_t *)dmar_unit;
1214 return (drhd->dr_dip);
1217 void *
1218 immu_dmar_walk_units(int seg, void *dmar_unit)
1220 list_t *drhd_list;
1221 drhd_t *drhd = (drhd_t *)dmar_unit;
1223 drhd_list = &(dmar_table->tbl_drhd_list[seg]);
1225 if (drhd == NULL) {
1226 return ((void *)list_head(drhd_list));
1227 } else {
1228 return ((void *)list_next(drhd_list, drhd));
1232 void
1233 immu_dmar_set_immu(void *dmar_unit, immu_t *immu)
1235 drhd_t *drhd = (drhd_t *)dmar_unit;
1237 ASSERT(drhd);
1238 ASSERT(immu);
1240 drhd->dr_immu = immu;
1243 boolean_t
1244 immu_dmar_intrmap_supported(void)
1246 ASSERT(dmar_table);
1247 return (dmar_table->tbl_intrmap);
1250 /* for a given ioapicid, find the source id and immu */
1251 uint16_t
1252 immu_dmar_ioapic_sid(int ioapic_ix)
1254 ioapic_drhd_t *idt;
1256 idt = ioapic_drhd_lookup(psm_get_ioapicid(ioapic_ix));
1257 if (idt == NULL) {
1258 ddi_err(DER_PANIC, NULL, "cannot determine source-id for "
1259 "IOAPIC (index = %d)", ioapic_ix);
1260 /*NOTREACHED*/
1263 return (idt->ioapic_sid);
1266 /* for a given ioapicid, find the source id and immu */
1267 immu_t *
1268 immu_dmar_ioapic_immu(int ioapic_ix)
1270 ioapic_drhd_t *idt;
1272 idt = ioapic_drhd_lookup(psm_get_ioapicid(ioapic_ix));
1273 if (idt) {
1274 return (idt->ioapic_drhd ? idt->ioapic_drhd->dr_immu : NULL);
1276 return (NULL);