intel-iommu: Apply BIOS sanity checks for interrupt remapping too.
[pohmelfs.git] / drivers / pci / dmar.c
blobbeeaef84e151b9fe68c4f48de062a3faa24f82f8
1 /*
2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/tboot.h>
37 #include <linux/dmi.h>
39 #define PREFIX "DMAR: "
41 /* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
45 LIST_HEAD(dmar_drhd_units);
47 static struct acpi_table_header * __initdata dmar_tbl;
48 static acpi_size dmar_tbl_size;
50 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
54 * the very end.
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
58 else
59 list_add(&drhd->list, &dmar_drhd_units);
62 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
65 struct pci_bus *bus;
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
68 int count;
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
75 while (count) {
76 if (pdev)
77 pci_dev_put(pdev);
79 * Some BIOSes list non-exist devices in DMAR table, just
80 * ignore it
82 if (!bus) {
83 printk(KERN_WARNING
84 PREFIX "Device scope bus [%d] not found\n",
85 scope->bus);
86 break;
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89 if (!pdev) {
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
93 break;
95 path ++;
96 count --;
97 bus = pdev->subordinate;
99 if (!pdev) {
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
103 *dev = NULL;
104 return 0;
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
112 pci_name(pdev));
113 return -EINVAL;
115 *dev = pdev;
116 return 0;
119 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
122 struct acpi_dmar_device_scope *scope;
123 void * tmp = start;
124 int index;
125 int ret;
127 *cnt = 0;
128 while (start < end) {
129 scope = start;
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132 (*cnt)++;
133 else
134 printk(KERN_WARNING PREFIX
135 "Unsupported device scope\n");
136 start += scope->length;
138 if (*cnt == 0)
139 return 0;
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142 if (!*devices)
143 return -ENOMEM;
145 start = tmp;
146 index = 0;
147 while (start < end) {
148 scope = start;
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
153 if (ret) {
154 kfree(*devices);
155 return ret;
157 index ++;
159 start += scope->length;
162 return 0;
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
170 static int __init
171 dmar_parse_one_drhd(struct acpi_dmar_header *header)
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
175 int ret = 0;
177 drhd = (struct acpi_dmar_hardware_unit *)header;
178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
179 if (!dmaru)
180 return -ENOMEM;
182 dmaru->hdr = header;
183 dmaru->reg_base_addr = drhd->address;
184 dmaru->segment = drhd->segment;
185 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
187 ret = alloc_iommu(dmaru);
188 if (ret) {
189 kfree(dmaru);
190 return ret;
192 dmar_register_drhd_unit(dmaru);
193 return 0;
196 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
198 struct acpi_dmar_hardware_unit *drhd;
199 int ret = 0;
201 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
203 if (dmaru->include_all)
204 return 0;
206 ret = dmar_parse_dev_scope((void *)(drhd + 1),
207 ((void *)drhd) + drhd->header.length,
208 &dmaru->devices_cnt, &dmaru->devices,
209 drhd->segment);
210 if (ret) {
211 list_del(&dmaru->list);
212 kfree(dmaru);
214 return ret;
217 #ifdef CONFIG_DMAR
218 LIST_HEAD(dmar_rmrr_units);
220 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
222 list_add(&rmrr->list, &dmar_rmrr_units);
226 static int __init
227 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
229 struct acpi_dmar_reserved_memory *rmrr;
230 struct dmar_rmrr_unit *rmrru;
232 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
233 if (!rmrru)
234 return -ENOMEM;
236 rmrru->hdr = header;
237 rmrr = (struct acpi_dmar_reserved_memory *)header;
238 rmrru->base_address = rmrr->base_address;
239 rmrru->end_address = rmrr->end_address;
241 dmar_register_rmrr_unit(rmrru);
242 return 0;
245 static int __init
246 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
248 struct acpi_dmar_reserved_memory *rmrr;
249 int ret;
251 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
252 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
253 ((void *)rmrr) + rmrr->header.length,
254 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
256 if (ret || (rmrru->devices_cnt == 0)) {
257 list_del(&rmrru->list);
258 kfree(rmrru);
260 return ret;
263 static LIST_HEAD(dmar_atsr_units);
265 static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
267 struct acpi_dmar_atsr *atsr;
268 struct dmar_atsr_unit *atsru;
270 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
271 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
272 if (!atsru)
273 return -ENOMEM;
275 atsru->hdr = hdr;
276 atsru->include_all = atsr->flags & 0x1;
278 list_add(&atsru->list, &dmar_atsr_units);
280 return 0;
283 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
285 int rc;
286 struct acpi_dmar_atsr *atsr;
288 if (atsru->include_all)
289 return 0;
291 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
292 rc = dmar_parse_dev_scope((void *)(atsr + 1),
293 (void *)atsr + atsr->header.length,
294 &atsru->devices_cnt, &atsru->devices,
295 atsr->segment);
296 if (rc || !atsru->devices_cnt) {
297 list_del(&atsru->list);
298 kfree(atsru);
301 return rc;
304 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
306 int i;
307 struct pci_bus *bus;
308 struct acpi_dmar_atsr *atsr;
309 struct dmar_atsr_unit *atsru;
311 list_for_each_entry(atsru, &dmar_atsr_units, list) {
312 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
313 if (atsr->segment == pci_domain_nr(dev->bus))
314 goto found;
317 return 0;
319 found:
320 for (bus = dev->bus; bus; bus = bus->parent) {
321 struct pci_dev *bridge = bus->self;
323 if (!bridge || !bridge->is_pcie ||
324 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
325 return 0;
327 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
328 for (i = 0; i < atsru->devices_cnt; i++)
329 if (atsru->devices[i] == bridge)
330 return 1;
331 break;
335 if (atsru->include_all)
336 return 1;
338 return 0;
340 #endif
342 #ifdef CONFIG_ACPI_NUMA
343 static int __init
344 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
346 struct acpi_dmar_rhsa *rhsa;
347 struct dmar_drhd_unit *drhd;
349 rhsa = (struct acpi_dmar_rhsa *)header;
350 for_each_drhd_unit(drhd) {
351 if (drhd->reg_base_addr == rhsa->base_address) {
352 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
354 if (!node_online(node))
355 node = -1;
356 drhd->iommu->node = node;
357 return 0;
360 WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
361 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
362 drhd->reg_base_addr,
363 dmi_get_system_info(DMI_BIOS_VENDOR),
364 dmi_get_system_info(DMI_BIOS_VERSION),
365 dmi_get_system_info(DMI_PRODUCT_VERSION));
367 return 0;
369 #endif
371 static void __init
372 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
374 struct acpi_dmar_hardware_unit *drhd;
375 struct acpi_dmar_reserved_memory *rmrr;
376 struct acpi_dmar_atsr *atsr;
377 struct acpi_dmar_rhsa *rhsa;
379 switch (header->type) {
380 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
381 drhd = container_of(header, struct acpi_dmar_hardware_unit,
382 header);
383 printk (KERN_INFO PREFIX
384 "DRHD base: %#016Lx flags: %#x\n",
385 (unsigned long long)drhd->address, drhd->flags);
386 break;
387 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
388 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
389 header);
390 printk (KERN_INFO PREFIX
391 "RMRR base: %#016Lx end: %#016Lx\n",
392 (unsigned long long)rmrr->base_address,
393 (unsigned long long)rmrr->end_address);
394 break;
395 case ACPI_DMAR_TYPE_ATSR:
396 atsr = container_of(header, struct acpi_dmar_atsr, header);
397 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
398 break;
399 case ACPI_DMAR_HARDWARE_AFFINITY:
400 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
401 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
402 (unsigned long long)rhsa->base_address,
403 rhsa->proximity_domain);
404 break;
409 * dmar_table_detect - checks to see if the platform supports DMAR devices
411 static int __init dmar_table_detect(void)
413 acpi_status status = AE_OK;
415 /* if we could find DMAR table, then there are DMAR devices */
416 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
417 (struct acpi_table_header **)&dmar_tbl,
418 &dmar_tbl_size);
420 if (ACPI_SUCCESS(status) && !dmar_tbl) {
421 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
422 status = AE_NOT_FOUND;
425 return (ACPI_SUCCESS(status) ? 1 : 0);
429 * parse_dmar_table - parses the DMA reporting table
431 static int __init
432 parse_dmar_table(void)
434 struct acpi_table_dmar *dmar;
435 struct acpi_dmar_header *entry_header;
436 int ret = 0;
439 * Do it again, earlier dmar_tbl mapping could be mapped with
440 * fixed map.
442 dmar_table_detect();
445 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
446 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
448 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
450 dmar = (struct acpi_table_dmar *)dmar_tbl;
451 if (!dmar)
452 return -ENODEV;
454 if (dmar->width < PAGE_SHIFT - 1) {
455 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
456 return -EINVAL;
459 printk (KERN_INFO PREFIX "Host address width %d\n",
460 dmar->width + 1);
462 entry_header = (struct acpi_dmar_header *)(dmar + 1);
463 while (((unsigned long)entry_header) <
464 (((unsigned long)dmar) + dmar_tbl->length)) {
465 /* Avoid looping forever on bad ACPI tables */
466 if (entry_header->length == 0) {
467 printk(KERN_WARNING PREFIX
468 "Invalid 0-length structure\n");
469 ret = -EINVAL;
470 break;
473 dmar_table_print_dmar_entry(entry_header);
475 switch (entry_header->type) {
476 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
477 ret = dmar_parse_one_drhd(entry_header);
478 break;
479 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
480 #ifdef CONFIG_DMAR
481 ret = dmar_parse_one_rmrr(entry_header);
482 #endif
483 break;
484 case ACPI_DMAR_TYPE_ATSR:
485 #ifdef CONFIG_DMAR
486 ret = dmar_parse_one_atsr(entry_header);
487 #endif
488 break;
489 case ACPI_DMAR_HARDWARE_AFFINITY:
490 #ifdef CONFIG_ACPI_NUMA
491 ret = dmar_parse_one_rhsa(entry_header);
492 #endif
493 break;
494 default:
495 printk(KERN_WARNING PREFIX
496 "Unknown DMAR structure type %d\n",
497 entry_header->type);
498 ret = 0; /* for forward compatibility */
499 break;
501 if (ret)
502 break;
504 entry_header = ((void *)entry_header + entry_header->length);
506 return ret;
509 int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
510 struct pci_dev *dev)
512 int index;
514 while (dev) {
515 for (index = 0; index < cnt; index++)
516 if (dev == devices[index])
517 return 1;
519 /* Check our parent */
520 dev = dev->bus->self;
523 return 0;
526 struct dmar_drhd_unit *
527 dmar_find_matched_drhd_unit(struct pci_dev *dev)
529 struct dmar_drhd_unit *dmaru = NULL;
530 struct acpi_dmar_hardware_unit *drhd;
532 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
533 drhd = container_of(dmaru->hdr,
534 struct acpi_dmar_hardware_unit,
535 header);
537 if (dmaru->include_all &&
538 drhd->segment == pci_domain_nr(dev->bus))
539 return dmaru;
541 if (dmar_pci_device_match(dmaru->devices,
542 dmaru->devices_cnt, dev))
543 return dmaru;
546 return NULL;
549 int __init dmar_dev_scope_init(void)
551 struct dmar_drhd_unit *drhd, *drhd_n;
552 int ret = -ENODEV;
554 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
555 ret = dmar_parse_dev(drhd);
556 if (ret)
557 return ret;
560 #ifdef CONFIG_DMAR
562 struct dmar_rmrr_unit *rmrr, *rmrr_n;
563 struct dmar_atsr_unit *atsr, *atsr_n;
565 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
566 ret = rmrr_parse_dev(rmrr);
567 if (ret)
568 return ret;
571 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
572 ret = atsr_parse_dev(atsr);
573 if (ret)
574 return ret;
577 #endif
579 return ret;
583 int __init dmar_table_init(void)
585 static int dmar_table_initialized;
586 int ret;
588 if (dmar_table_initialized)
589 return 0;
591 dmar_table_initialized = 1;
593 ret = parse_dmar_table();
594 if (ret) {
595 if (ret != -ENODEV)
596 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
597 return ret;
600 if (list_empty(&dmar_drhd_units)) {
601 printk(KERN_INFO PREFIX "No DMAR devices found\n");
602 return -ENODEV;
605 #ifdef CONFIG_DMAR
606 if (list_empty(&dmar_rmrr_units))
607 printk(KERN_INFO PREFIX "No RMRR found\n");
609 if (list_empty(&dmar_atsr_units))
610 printk(KERN_INFO PREFIX "No ATSR found\n");
611 #endif
613 return 0;
616 static int bios_warned;
618 int __init check_zero_address(void)
620 struct acpi_table_dmar *dmar;
621 struct acpi_dmar_header *entry_header;
622 struct acpi_dmar_hardware_unit *drhd;
624 dmar = (struct acpi_table_dmar *)dmar_tbl;
625 entry_header = (struct acpi_dmar_header *)(dmar + 1);
627 while (((unsigned long)entry_header) <
628 (((unsigned long)dmar) + dmar_tbl->length)) {
629 /* Avoid looping forever on bad ACPI tables */
630 if (entry_header->length == 0) {
631 printk(KERN_WARNING PREFIX
632 "Invalid 0-length structure\n");
633 return 0;
636 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
637 void __iomem *addr;
638 u64 cap, ecap;
640 drhd = (void *)entry_header;
641 if (!drhd->address) {
642 /* Promote an attitude of violence to a BIOS engineer today */
643 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
644 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
645 dmi_get_system_info(DMI_BIOS_VENDOR),
646 dmi_get_system_info(DMI_BIOS_VERSION),
647 dmi_get_system_info(DMI_PRODUCT_VERSION));
648 bios_warned = 1;
649 goto failed;
652 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
653 if (!addr ) {
654 printk("IOMMU: can't validate: %llx\n", drhd->address);
655 goto failed;
657 cap = dmar_readq(addr + DMAR_CAP_REG);
658 ecap = dmar_readq(addr + DMAR_ECAP_REG);
659 early_iounmap(addr, VTD_PAGE_SIZE);
660 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
661 /* Promote an attitude of violence to a BIOS engineer today */
662 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
663 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
664 drhd->address,
665 dmi_get_system_info(DMI_BIOS_VENDOR),
666 dmi_get_system_info(DMI_BIOS_VERSION),
667 dmi_get_system_info(DMI_PRODUCT_VERSION));
668 bios_warned = 1;
669 goto failed;
673 entry_header = ((void *)entry_header + entry_header->length);
675 return 1;
677 failed:
678 #ifdef CONFIG_DMAR
679 dmar_disabled = 1;
680 #endif
681 return 0;
684 void __init detect_intel_iommu(void)
686 int ret;
688 ret = dmar_table_detect();
689 if (ret)
690 ret = check_zero_address();
692 #ifdef CONFIG_INTR_REMAP
693 struct acpi_table_dmar *dmar;
695 * for now we will disable dma-remapping when interrupt
696 * remapping is enabled.
697 * When support for queued invalidation for IOTLB invalidation
698 * is added, we will not need this any more.
700 dmar = (struct acpi_table_dmar *) dmar_tbl;
701 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
702 printk(KERN_INFO
703 "Queued invalidation will be enabled to support "
704 "x2apic and Intr-remapping.\n");
705 #endif
706 #ifdef CONFIG_DMAR
707 if (ret && !no_iommu && !iommu_detected && !dmar_disabled)
708 iommu_detected = 1;
709 #endif
710 #ifdef CONFIG_X86
711 if (ret)
712 x86_init.iommu.iommu_init = intel_iommu_init;
713 #endif
715 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
716 dmar_tbl = NULL;
720 int alloc_iommu(struct dmar_drhd_unit *drhd)
722 struct intel_iommu *iommu;
723 int map_size;
724 u32 ver;
725 static int iommu_allocated = 0;
726 int agaw = 0;
727 int msagaw = 0;
729 if (!drhd->reg_base_addr) {
730 if (!bios_warned) {
731 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
732 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
733 dmi_get_system_info(DMI_BIOS_VENDOR),
734 dmi_get_system_info(DMI_BIOS_VERSION),
735 dmi_get_system_info(DMI_PRODUCT_VERSION));
736 bios_warned = 1;
738 return -EINVAL;
741 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
742 if (!iommu)
743 return -ENOMEM;
745 iommu->seq_id = iommu_allocated++;
746 sprintf (iommu->name, "dmar%d", iommu->seq_id);
748 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
749 if (!iommu->reg) {
750 printk(KERN_ERR "IOMMU: can't map the region\n");
751 goto error;
753 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
754 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
756 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
757 if (!bios_warned) {
758 /* Promote an attitude of violence to a BIOS engineer today */
759 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
760 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
761 drhd->reg_base_addr,
762 dmi_get_system_info(DMI_BIOS_VENDOR),
763 dmi_get_system_info(DMI_BIOS_VERSION),
764 dmi_get_system_info(DMI_PRODUCT_VERSION));
765 bios_warned = 1;
767 goto err_unmap;
770 #ifdef CONFIG_DMAR
771 agaw = iommu_calculate_agaw(iommu);
772 if (agaw < 0) {
773 printk(KERN_ERR
774 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
775 iommu->seq_id);
776 goto err_unmap;
778 msagaw = iommu_calculate_max_sagaw(iommu);
779 if (msagaw < 0) {
780 printk(KERN_ERR
781 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
782 iommu->seq_id);
783 goto err_unmap;
785 #endif
786 iommu->agaw = agaw;
787 iommu->msagaw = msagaw;
789 iommu->node = -1;
791 /* the registers might be more than one page */
792 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
793 cap_max_fault_reg_offset(iommu->cap));
794 map_size = VTD_PAGE_ALIGN(map_size);
795 if (map_size > VTD_PAGE_SIZE) {
796 iounmap(iommu->reg);
797 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
798 if (!iommu->reg) {
799 printk(KERN_ERR "IOMMU: can't map the region\n");
800 goto error;
804 ver = readl(iommu->reg + DMAR_VER_REG);
805 pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
806 (unsigned long long)drhd->reg_base_addr,
807 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
808 (unsigned long long)iommu->cap,
809 (unsigned long long)iommu->ecap);
811 spin_lock_init(&iommu->register_lock);
813 drhd->iommu = iommu;
814 return 0;
816 err_unmap:
817 iounmap(iommu->reg);
818 error:
819 kfree(iommu);
820 return -1;
823 void free_iommu(struct intel_iommu *iommu)
825 if (!iommu)
826 return;
828 #ifdef CONFIG_DMAR
829 free_dmar_iommu(iommu);
830 #endif
832 if (iommu->reg)
833 iounmap(iommu->reg);
834 kfree(iommu);
838 * Reclaim all the submitted descriptors which have completed its work.
840 static inline void reclaim_free_desc(struct q_inval *qi)
842 while (qi->desc_status[qi->free_tail] == QI_DONE ||
843 qi->desc_status[qi->free_tail] == QI_ABORT) {
844 qi->desc_status[qi->free_tail] = QI_FREE;
845 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
846 qi->free_cnt++;
850 static int qi_check_fault(struct intel_iommu *iommu, int index)
852 u32 fault;
853 int head, tail;
854 struct q_inval *qi = iommu->qi;
855 int wait_index = (index + 1) % QI_LENGTH;
857 if (qi->desc_status[wait_index] == QI_ABORT)
858 return -EAGAIN;
860 fault = readl(iommu->reg + DMAR_FSTS_REG);
863 * If IQE happens, the head points to the descriptor associated
864 * with the error. No new descriptors are fetched until the IQE
865 * is cleared.
867 if (fault & DMA_FSTS_IQE) {
868 head = readl(iommu->reg + DMAR_IQH_REG);
869 if ((head >> DMAR_IQ_SHIFT) == index) {
870 printk(KERN_ERR "VT-d detected invalid descriptor: "
871 "low=%llx, high=%llx\n",
872 (unsigned long long)qi->desc[index].low,
873 (unsigned long long)qi->desc[index].high);
874 memcpy(&qi->desc[index], &qi->desc[wait_index],
875 sizeof(struct qi_desc));
876 __iommu_flush_cache(iommu, &qi->desc[index],
877 sizeof(struct qi_desc));
878 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
879 return -EINVAL;
884 * If ITE happens, all pending wait_desc commands are aborted.
885 * No new descriptors are fetched until the ITE is cleared.
887 if (fault & DMA_FSTS_ITE) {
888 head = readl(iommu->reg + DMAR_IQH_REG);
889 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
890 head |= 1;
891 tail = readl(iommu->reg + DMAR_IQT_REG);
892 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
894 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
896 do {
897 if (qi->desc_status[head] == QI_IN_USE)
898 qi->desc_status[head] = QI_ABORT;
899 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
900 } while (head != tail);
902 if (qi->desc_status[wait_index] == QI_ABORT)
903 return -EAGAIN;
906 if (fault & DMA_FSTS_ICE)
907 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
909 return 0;
913 * Submit the queued invalidation descriptor to the remapping
914 * hardware unit and wait for its completion.
916 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
918 int rc;
919 struct q_inval *qi = iommu->qi;
920 struct qi_desc *hw, wait_desc;
921 int wait_index, index;
922 unsigned long flags;
924 if (!qi)
925 return 0;
927 hw = qi->desc;
929 restart:
930 rc = 0;
932 spin_lock_irqsave(&qi->q_lock, flags);
933 while (qi->free_cnt < 3) {
934 spin_unlock_irqrestore(&qi->q_lock, flags);
935 cpu_relax();
936 spin_lock_irqsave(&qi->q_lock, flags);
939 index = qi->free_head;
940 wait_index = (index + 1) % QI_LENGTH;
942 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
944 hw[index] = *desc;
946 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
947 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
948 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
950 hw[wait_index] = wait_desc;
952 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
953 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
955 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
956 qi->free_cnt -= 2;
959 * update the HW tail register indicating the presence of
960 * new descriptors.
962 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
964 while (qi->desc_status[wait_index] != QI_DONE) {
966 * We will leave the interrupts disabled, to prevent interrupt
967 * context to queue another cmd while a cmd is already submitted
968 * and waiting for completion on this cpu. This is to avoid
969 * a deadlock where the interrupt context can wait indefinitely
970 * for free slots in the queue.
972 rc = qi_check_fault(iommu, index);
973 if (rc)
974 break;
976 spin_unlock(&qi->q_lock);
977 cpu_relax();
978 spin_lock(&qi->q_lock);
981 qi->desc_status[index] = QI_DONE;
983 reclaim_free_desc(qi);
984 spin_unlock_irqrestore(&qi->q_lock, flags);
986 if (rc == -EAGAIN)
987 goto restart;
989 return rc;
993 * Flush the global interrupt entry cache.
995 void qi_global_iec(struct intel_iommu *iommu)
997 struct qi_desc desc;
999 desc.low = QI_IEC_TYPE;
1000 desc.high = 0;
1002 /* should never fail */
1003 qi_submit_sync(&desc, iommu);
1006 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1007 u64 type)
1009 struct qi_desc desc;
1011 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1012 | QI_CC_GRAN(type) | QI_CC_TYPE;
1013 desc.high = 0;
1015 qi_submit_sync(&desc, iommu);
1018 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1019 unsigned int size_order, u64 type)
1021 u8 dw = 0, dr = 0;
1023 struct qi_desc desc;
1024 int ih = 0;
1026 if (cap_write_drain(iommu->cap))
1027 dw = 1;
1029 if (cap_read_drain(iommu->cap))
1030 dr = 1;
1032 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1033 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1034 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1035 | QI_IOTLB_AM(size_order);
1037 qi_submit_sync(&desc, iommu);
1040 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1041 u64 addr, unsigned mask)
1043 struct qi_desc desc;
1045 if (mask) {
1046 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1047 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1048 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1049 } else
1050 desc.high = QI_DEV_IOTLB_ADDR(addr);
1052 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1053 qdep = 0;
1055 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1056 QI_DIOTLB_TYPE;
1058 qi_submit_sync(&desc, iommu);
1062 * Disable Queued Invalidation interface.
1064 void dmar_disable_qi(struct intel_iommu *iommu)
1066 unsigned long flags;
1067 u32 sts;
1068 cycles_t start_time = get_cycles();
1070 if (!ecap_qis(iommu->ecap))
1071 return;
1073 spin_lock_irqsave(&iommu->register_lock, flags);
1075 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1076 if (!(sts & DMA_GSTS_QIES))
1077 goto end;
1080 * Give a chance to HW to complete the pending invalidation requests.
1082 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1083 readl(iommu->reg + DMAR_IQH_REG)) &&
1084 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1085 cpu_relax();
1087 iommu->gcmd &= ~DMA_GCMD_QIE;
1088 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1090 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1091 !(sts & DMA_GSTS_QIES), sts);
1092 end:
1093 spin_unlock_irqrestore(&iommu->register_lock, flags);
1097 * Enable queued invalidation.
1099 static void __dmar_enable_qi(struct intel_iommu *iommu)
1101 u32 sts;
1102 unsigned long flags;
1103 struct q_inval *qi = iommu->qi;
1105 qi->free_head = qi->free_tail = 0;
1106 qi->free_cnt = QI_LENGTH;
1108 spin_lock_irqsave(&iommu->register_lock, flags);
1110 /* write zero to the tail reg */
1111 writel(0, iommu->reg + DMAR_IQT_REG);
1113 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1115 iommu->gcmd |= DMA_GCMD_QIE;
1116 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1118 /* Make sure hardware complete it */
1119 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1121 spin_unlock_irqrestore(&iommu->register_lock, flags);
1125 * Enable Queued Invalidation interface. This is a must to support
1126 * interrupt-remapping. Also used by DMA-remapping, which replaces
1127 * register based IOTLB invalidation.
1129 int dmar_enable_qi(struct intel_iommu *iommu)
1131 struct q_inval *qi;
1132 struct page *desc_page;
1134 if (!ecap_qis(iommu->ecap))
1135 return -ENOENT;
1138 * queued invalidation is already setup and enabled.
1140 if (iommu->qi)
1141 return 0;
1143 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1144 if (!iommu->qi)
1145 return -ENOMEM;
1147 qi = iommu->qi;
1150 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1151 if (!desc_page) {
1152 kfree(qi);
1153 iommu->qi = 0;
1154 return -ENOMEM;
1157 qi->desc = page_address(desc_page);
1159 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1160 if (!qi->desc_status) {
1161 free_page((unsigned long) qi->desc);
1162 kfree(qi);
1163 iommu->qi = 0;
1164 return -ENOMEM;
1167 qi->free_head = qi->free_tail = 0;
1168 qi->free_cnt = QI_LENGTH;
1170 spin_lock_init(&qi->q_lock);
1172 __dmar_enable_qi(iommu);
1174 return 0;
1177 /* iommu interrupt handling. Most stuff are MSI-like. */
1179 enum faulttype {
1180 DMA_REMAP,
1181 INTR_REMAP,
1182 UNKNOWN,
1185 static const char *dma_remap_fault_reasons[] =
1187 "Software",
1188 "Present bit in root entry is clear",
1189 "Present bit in context entry is clear",
1190 "Invalid context entry",
1191 "Access beyond MGAW",
1192 "PTE Write access is not set",
1193 "PTE Read access is not set",
1194 "Next page table ptr is invalid",
1195 "Root table address invalid",
1196 "Context table ptr is invalid",
1197 "non-zero reserved fields in RTP",
1198 "non-zero reserved fields in CTP",
1199 "non-zero reserved fields in PTE",
1202 static const char *intr_remap_fault_reasons[] =
1204 "Detected reserved fields in the decoded interrupt-remapped request",
1205 "Interrupt index exceeded the interrupt-remapping table size",
1206 "Present field in the IRTE entry is clear",
1207 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1208 "Detected reserved fields in the IRTE entry",
1209 "Blocked a compatibility format interrupt request",
1210 "Blocked an interrupt request due to source-id verification failure",
1213 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1215 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1217 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1218 ARRAY_SIZE(intr_remap_fault_reasons))) {
1219 *fault_type = INTR_REMAP;
1220 return intr_remap_fault_reasons[fault_reason - 0x20];
1221 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1222 *fault_type = DMA_REMAP;
1223 return dma_remap_fault_reasons[fault_reason];
1224 } else {
1225 *fault_type = UNKNOWN;
1226 return "Unknown";
1230 void dmar_msi_unmask(unsigned int irq)
1232 struct intel_iommu *iommu = get_irq_data(irq);
1233 unsigned long flag;
1235 /* unmask it */
1236 spin_lock_irqsave(&iommu->register_lock, flag);
1237 writel(0, iommu->reg + DMAR_FECTL_REG);
1238 /* Read a reg to force flush the post write */
1239 readl(iommu->reg + DMAR_FECTL_REG);
1240 spin_unlock_irqrestore(&iommu->register_lock, flag);
1243 void dmar_msi_mask(unsigned int irq)
1245 unsigned long flag;
1246 struct intel_iommu *iommu = get_irq_data(irq);
1248 /* mask it */
1249 spin_lock_irqsave(&iommu->register_lock, flag);
1250 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1251 /* Read a reg to force flush the post write */
1252 readl(iommu->reg + DMAR_FECTL_REG);
1253 spin_unlock_irqrestore(&iommu->register_lock, flag);
1256 void dmar_msi_write(int irq, struct msi_msg *msg)
1258 struct intel_iommu *iommu = get_irq_data(irq);
1259 unsigned long flag;
1261 spin_lock_irqsave(&iommu->register_lock, flag);
1262 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1263 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1264 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1265 spin_unlock_irqrestore(&iommu->register_lock, flag);
1268 void dmar_msi_read(int irq, struct msi_msg *msg)
1270 struct intel_iommu *iommu = get_irq_data(irq);
1271 unsigned long flag;
1273 spin_lock_irqsave(&iommu->register_lock, flag);
1274 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1275 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1276 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1277 spin_unlock_irqrestore(&iommu->register_lock, flag);
1280 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1281 u8 fault_reason, u16 source_id, unsigned long long addr)
1283 const char *reason;
1284 int fault_type;
1286 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1288 if (fault_type == INTR_REMAP)
1289 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1290 "fault index %llx\n"
1291 "INTR-REMAP:[fault reason %02d] %s\n",
1292 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1293 PCI_FUNC(source_id & 0xFF), addr >> 48,
1294 fault_reason, reason);
1295 else
1296 printk(KERN_ERR
1297 "DMAR:[%s] Request device [%02x:%02x.%d] "
1298 "fault addr %llx \n"
1299 "DMAR:[fault reason %02d] %s\n",
1300 (type ? "DMA Read" : "DMA Write"),
1301 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1302 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1303 return 0;
1306 #define PRIMARY_FAULT_REG_LEN (16)
1307 irqreturn_t dmar_fault(int irq, void *dev_id)
1309 struct intel_iommu *iommu = dev_id;
1310 int reg, fault_index;
1311 u32 fault_status;
1312 unsigned long flag;
1314 spin_lock_irqsave(&iommu->register_lock, flag);
1315 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1316 if (fault_status)
1317 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1318 fault_status);
1320 /* TBD: ignore advanced fault log currently */
1321 if (!(fault_status & DMA_FSTS_PPF))
1322 goto clear_rest;
1324 fault_index = dma_fsts_fault_record_index(fault_status);
1325 reg = cap_fault_reg_offset(iommu->cap);
1326 while (1) {
1327 u8 fault_reason;
1328 u16 source_id;
1329 u64 guest_addr;
1330 int type;
1331 u32 data;
1333 /* highest 32 bits */
1334 data = readl(iommu->reg + reg +
1335 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1336 if (!(data & DMA_FRCD_F))
1337 break;
1339 fault_reason = dma_frcd_fault_reason(data);
1340 type = dma_frcd_type(data);
1342 data = readl(iommu->reg + reg +
1343 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1344 source_id = dma_frcd_source_id(data);
1346 guest_addr = dmar_readq(iommu->reg + reg +
1347 fault_index * PRIMARY_FAULT_REG_LEN);
1348 guest_addr = dma_frcd_page_addr(guest_addr);
1349 /* clear the fault */
1350 writel(DMA_FRCD_F, iommu->reg + reg +
1351 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1353 spin_unlock_irqrestore(&iommu->register_lock, flag);
1355 dmar_fault_do_one(iommu, type, fault_reason,
1356 source_id, guest_addr);
1358 fault_index++;
1359 if (fault_index >= cap_num_fault_regs(iommu->cap))
1360 fault_index = 0;
1361 spin_lock_irqsave(&iommu->register_lock, flag);
1363 clear_rest:
1364 /* clear all the other faults */
1365 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1366 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1368 spin_unlock_irqrestore(&iommu->register_lock, flag);
1369 return IRQ_HANDLED;
1372 int dmar_set_interrupt(struct intel_iommu *iommu)
1374 int irq, ret;
1377 * Check if the fault interrupt is already initialized.
1379 if (iommu->irq)
1380 return 0;
1382 irq = create_irq();
1383 if (!irq) {
1384 printk(KERN_ERR "IOMMU: no free vectors\n");
1385 return -EINVAL;
1388 set_irq_data(irq, iommu);
1389 iommu->irq = irq;
1391 ret = arch_setup_dmar_msi(irq);
1392 if (ret) {
1393 set_irq_data(irq, NULL);
1394 iommu->irq = 0;
1395 destroy_irq(irq);
1396 return ret;
1399 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1400 if (ret)
1401 printk(KERN_ERR "IOMMU: can't request irq\n");
1402 return ret;
1405 int __init enable_drhd_fault_handling(void)
1407 struct dmar_drhd_unit *drhd;
1410 * Enable fault control interrupt.
1412 for_each_drhd_unit(drhd) {
1413 int ret;
1414 struct intel_iommu *iommu = drhd->iommu;
1415 ret = dmar_set_interrupt(iommu);
1417 if (ret) {
1418 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1419 " interrupt, ret %d\n",
1420 (unsigned long long)drhd->reg_base_addr, ret);
1421 return -1;
1425 return 0;
1429 * Re-enable Queued Invalidation interface.
1431 int dmar_reenable_qi(struct intel_iommu *iommu)
1433 if (!ecap_qis(iommu->ecap))
1434 return -ENOENT;
1436 if (!iommu->qi)
1437 return -ENOENT;
1440 * First disable queued invalidation.
1442 dmar_disable_qi(iommu);
1444 * Then enable queued invalidation again. Since there is no pending
1445 * invalidation requests now, it's safe to re-enable queued
1446 * invalidation.
1448 __dmar_enable_qi(iommu);
1450 return 0;
1454 * Check interrupt remapping support in DMAR table description.
1456 int dmar_ir_support(void)
1458 struct acpi_table_dmar *dmar;
1459 dmar = (struct acpi_table_dmar *)dmar_tbl;
1460 return dmar->flags & 0x1;