GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / pci / dmar.c
bloba286959db67e5ee2eef89a9db596c570d81d64dc
1 /*
2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/tboot.h>
37 #include <linux/dmi.h>
38 #include <linux/slab.h>
40 #define PREFIX "DMAR: "
42 /* No locks are needed as DMA remapping hardware unit
43 * list is constructed at boot time and hotplug of
44 * these units are not supported by the architecture.
46 LIST_HEAD(dmar_drhd_units);
48 static struct acpi_table_header * __initdata dmar_tbl;
49 static acpi_size dmar_tbl_size;
51 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
54 * add INCLUDE_ALL at the tail, so scan the list will find it at
55 * the very end.
57 if (drhd->include_all)
58 list_add_tail(&drhd->list, &dmar_drhd_units);
59 else
60 list_add(&drhd->list, &dmar_drhd_units);
63 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
64 struct pci_dev **dev, u16 segment)
66 struct pci_bus *bus;
67 struct pci_dev *pdev = NULL;
68 struct acpi_dmar_pci_path *path;
69 int count;
71 bus = pci_find_bus(segment, scope->bus);
72 path = (struct acpi_dmar_pci_path *)(scope + 1);
73 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
74 / sizeof(struct acpi_dmar_pci_path);
76 while (count) {
77 if (pdev)
78 pci_dev_put(pdev);
80 * Some BIOSes list non-exist devices in DMAR table, just
81 * ignore it
83 if (!bus) {
84 printk(KERN_WARNING
85 PREFIX "Device scope bus [%d] not found\n",
86 scope->bus);
87 break;
89 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
90 if (!pdev) {
91 printk(KERN_WARNING PREFIX
92 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
93 segment, bus->number, path->dev, path->fn);
94 break;
96 path ++;
97 count --;
98 bus = pdev->subordinate;
100 if (!pdev) {
101 printk(KERN_WARNING PREFIX
102 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
103 segment, scope->bus, path->dev, path->fn);
104 *dev = NULL;
105 return 0;
107 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
108 pdev->subordinate) || (scope->entry_type == \
109 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
110 pci_dev_put(pdev);
111 printk(KERN_WARNING PREFIX
112 "Device scope type does not match for %s\n",
113 pci_name(pdev));
114 return -EINVAL;
116 *dev = pdev;
117 return 0;
120 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
121 struct pci_dev ***devices, u16 segment)
123 struct acpi_dmar_device_scope *scope;
124 void * tmp = start;
125 int index;
126 int ret;
128 *cnt = 0;
129 while (start < end) {
130 scope = start;
131 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
132 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
133 (*cnt)++;
134 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
135 printk(KERN_WARNING PREFIX
136 "Unsupported device scope\n");
138 start += scope->length;
140 if (*cnt == 0)
141 return 0;
143 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
144 if (!*devices)
145 return -ENOMEM;
147 start = tmp;
148 index = 0;
149 while (start < end) {
150 scope = start;
151 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
152 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
153 ret = dmar_parse_one_dev_scope(scope,
154 &(*devices)[index], segment);
155 if (ret) {
156 kfree(*devices);
157 return ret;
159 index ++;
161 start += scope->length;
164 return 0;
168 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
169 * structure which uniquely represent one DMA remapping hardware unit
170 * present in the platform
172 static int __init
173 dmar_parse_one_drhd(struct acpi_dmar_header *header)
175 struct acpi_dmar_hardware_unit *drhd;
176 struct dmar_drhd_unit *dmaru;
177 int ret = 0;
179 drhd = (struct acpi_dmar_hardware_unit *)header;
180 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
181 if (!dmaru)
182 return -ENOMEM;
184 dmaru->hdr = header;
185 dmaru->reg_base_addr = drhd->address;
186 dmaru->segment = drhd->segment;
187 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
189 ret = alloc_iommu(dmaru);
190 if (ret) {
191 kfree(dmaru);
192 return ret;
194 dmar_register_drhd_unit(dmaru);
195 return 0;
198 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
200 struct acpi_dmar_hardware_unit *drhd;
201 int ret = 0;
203 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
205 if (dmaru->include_all)
206 return 0;
208 ret = dmar_parse_dev_scope((void *)(drhd + 1),
209 ((void *)drhd) + drhd->header.length,
210 &dmaru->devices_cnt, &dmaru->devices,
211 drhd->segment);
212 if (ret) {
213 list_del(&dmaru->list);
214 kfree(dmaru);
216 return ret;
219 #ifdef CONFIG_DMAR
220 LIST_HEAD(dmar_rmrr_units);
222 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
224 list_add(&rmrr->list, &dmar_rmrr_units);
228 static int __init
229 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
231 struct acpi_dmar_reserved_memory *rmrr;
232 struct dmar_rmrr_unit *rmrru;
234 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
235 if (!rmrru)
236 return -ENOMEM;
238 rmrru->hdr = header;
239 rmrr = (struct acpi_dmar_reserved_memory *)header;
240 rmrru->base_address = rmrr->base_address;
241 rmrru->end_address = rmrr->end_address;
243 dmar_register_rmrr_unit(rmrru);
244 return 0;
247 static int __init
248 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
250 struct acpi_dmar_reserved_memory *rmrr;
251 int ret;
253 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
254 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
255 ((void *)rmrr) + rmrr->header.length,
256 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
258 if (ret || (rmrru->devices_cnt == 0)) {
259 list_del(&rmrru->list);
260 kfree(rmrru);
262 return ret;
265 static LIST_HEAD(dmar_atsr_units);
267 static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
269 struct acpi_dmar_atsr *atsr;
270 struct dmar_atsr_unit *atsru;
272 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
273 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
274 if (!atsru)
275 return -ENOMEM;
277 atsru->hdr = hdr;
278 atsru->include_all = atsr->flags & 0x1;
280 list_add(&atsru->list, &dmar_atsr_units);
282 return 0;
285 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
287 int rc;
288 struct acpi_dmar_atsr *atsr;
290 if (atsru->include_all)
291 return 0;
293 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
294 rc = dmar_parse_dev_scope((void *)(atsr + 1),
295 (void *)atsr + atsr->header.length,
296 &atsru->devices_cnt, &atsru->devices,
297 atsr->segment);
298 if (rc || !atsru->devices_cnt) {
299 list_del(&atsru->list);
300 kfree(atsru);
303 return rc;
306 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
308 int i;
309 struct pci_bus *bus;
310 struct acpi_dmar_atsr *atsr;
311 struct dmar_atsr_unit *atsru;
313 dev = pci_physfn(dev);
315 list_for_each_entry(atsru, &dmar_atsr_units, list) {
316 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
317 if (atsr->segment == pci_domain_nr(dev->bus))
318 goto found;
321 return 0;
323 found:
324 for (bus = dev->bus; bus; bus = bus->parent) {
325 struct pci_dev *bridge = bus->self;
327 if (!bridge || !pci_is_pcie(bridge) ||
328 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
329 return 0;
331 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
332 for (i = 0; i < atsru->devices_cnt; i++)
333 if (atsru->devices[i] == bridge)
334 return 1;
335 break;
339 if (atsru->include_all)
340 return 1;
342 return 0;
344 #endif
346 #ifdef CONFIG_ACPI_NUMA
347 static int __init
348 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
350 struct acpi_dmar_rhsa *rhsa;
351 struct dmar_drhd_unit *drhd;
353 rhsa = (struct acpi_dmar_rhsa *)header;
354 for_each_drhd_unit(drhd) {
355 if (drhd->reg_base_addr == rhsa->base_address) {
356 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
358 if (!node_online(node))
359 node = -1;
360 drhd->iommu->node = node;
361 return 0;
364 WARN_TAINT(
365 1, TAINT_FIRMWARE_WORKAROUND,
366 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
367 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
368 drhd->reg_base_addr,
369 dmi_get_system_info(DMI_BIOS_VENDOR),
370 dmi_get_system_info(DMI_BIOS_VERSION),
371 dmi_get_system_info(DMI_PRODUCT_VERSION));
373 return 0;
375 #endif
377 static void __init
378 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
380 struct acpi_dmar_hardware_unit *drhd;
381 struct acpi_dmar_reserved_memory *rmrr;
382 struct acpi_dmar_atsr *atsr;
383 struct acpi_dmar_rhsa *rhsa;
385 switch (header->type) {
386 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
387 drhd = container_of(header, struct acpi_dmar_hardware_unit,
388 header);
389 printk (KERN_INFO PREFIX
390 "DRHD base: %#016Lx flags: %#x\n",
391 (unsigned long long)drhd->address, drhd->flags);
392 break;
393 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
394 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
395 header);
396 printk (KERN_INFO PREFIX
397 "RMRR base: %#016Lx end: %#016Lx\n",
398 (unsigned long long)rmrr->base_address,
399 (unsigned long long)rmrr->end_address);
400 break;
401 case ACPI_DMAR_TYPE_ATSR:
402 atsr = container_of(header, struct acpi_dmar_atsr, header);
403 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
404 break;
405 case ACPI_DMAR_HARDWARE_AFFINITY:
406 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
407 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
408 (unsigned long long)rhsa->base_address,
409 rhsa->proximity_domain);
410 break;
415 * dmar_table_detect - checks to see if the platform supports DMAR devices
417 static int __init dmar_table_detect(void)
419 acpi_status status = AE_OK;
421 /* if we could find DMAR table, then there are DMAR devices */
422 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
423 (struct acpi_table_header **)&dmar_tbl,
424 &dmar_tbl_size);
426 if (ACPI_SUCCESS(status) && !dmar_tbl) {
427 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
428 status = AE_NOT_FOUND;
431 return (ACPI_SUCCESS(status) ? 1 : 0);
435 * parse_dmar_table - parses the DMA reporting table
437 static int __init
438 parse_dmar_table(void)
440 struct acpi_table_dmar *dmar;
441 struct acpi_dmar_header *entry_header;
442 int ret = 0;
445 * Do it again, earlier dmar_tbl mapping could be mapped with
446 * fixed map.
448 dmar_table_detect();
451 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
452 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
454 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
456 dmar = (struct acpi_table_dmar *)dmar_tbl;
457 if (!dmar)
458 return -ENODEV;
460 if (dmar->width < PAGE_SHIFT - 1) {
461 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
462 return -EINVAL;
465 printk (KERN_INFO PREFIX "Host address width %d\n",
466 dmar->width + 1);
468 entry_header = (struct acpi_dmar_header *)(dmar + 1);
469 while (((unsigned long)entry_header) <
470 (((unsigned long)dmar) + dmar_tbl->length)) {
471 /* Avoid looping forever on bad ACPI tables */
472 if (entry_header->length == 0) {
473 printk(KERN_WARNING PREFIX
474 "Invalid 0-length structure\n");
475 ret = -EINVAL;
476 break;
479 dmar_table_print_dmar_entry(entry_header);
481 switch (entry_header->type) {
482 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
483 ret = dmar_parse_one_drhd(entry_header);
484 break;
485 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
486 #ifdef CONFIG_DMAR
487 ret = dmar_parse_one_rmrr(entry_header);
488 #endif
489 break;
490 case ACPI_DMAR_TYPE_ATSR:
491 #ifdef CONFIG_DMAR
492 ret = dmar_parse_one_atsr(entry_header);
493 #endif
494 break;
495 case ACPI_DMAR_HARDWARE_AFFINITY:
496 #ifdef CONFIG_ACPI_NUMA
497 ret = dmar_parse_one_rhsa(entry_header);
498 #endif
499 break;
500 default:
501 printk(KERN_WARNING PREFIX
502 "Unknown DMAR structure type %d\n",
503 entry_header->type);
504 ret = 0; /* for forward compatibility */
505 break;
507 if (ret)
508 break;
510 entry_header = ((void *)entry_header + entry_header->length);
512 return ret;
515 static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
516 struct pci_dev *dev)
518 int index;
520 while (dev) {
521 for (index = 0; index < cnt; index++)
522 if (dev == devices[index])
523 return 1;
525 /* Check our parent */
526 dev = dev->bus->self;
529 return 0;
532 struct dmar_drhd_unit *
533 dmar_find_matched_drhd_unit(struct pci_dev *dev)
535 struct dmar_drhd_unit *dmaru = NULL;
536 struct acpi_dmar_hardware_unit *drhd;
538 dev = pci_physfn(dev);
540 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
541 drhd = container_of(dmaru->hdr,
542 struct acpi_dmar_hardware_unit,
543 header);
545 if (dmaru->include_all &&
546 drhd->segment == pci_domain_nr(dev->bus))
547 return dmaru;
549 if (dmar_pci_device_match(dmaru->devices,
550 dmaru->devices_cnt, dev))
551 return dmaru;
554 return NULL;
557 int __init dmar_dev_scope_init(void)
559 struct dmar_drhd_unit *drhd, *drhd_n;
560 int ret = -ENODEV;
562 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
563 ret = dmar_parse_dev(drhd);
564 if (ret)
565 return ret;
568 #ifdef CONFIG_DMAR
570 struct dmar_rmrr_unit *rmrr, *rmrr_n;
571 struct dmar_atsr_unit *atsr, *atsr_n;
573 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
574 ret = rmrr_parse_dev(rmrr);
575 if (ret)
576 return ret;
579 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
580 ret = atsr_parse_dev(atsr);
581 if (ret)
582 return ret;
585 #endif
587 return ret;
591 int __init dmar_table_init(void)
593 static int dmar_table_initialized;
594 int ret;
596 if (dmar_table_initialized)
597 return 0;
599 dmar_table_initialized = 1;
601 ret = parse_dmar_table();
602 if (ret) {
603 if (ret != -ENODEV)
604 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
605 return ret;
608 if (list_empty(&dmar_drhd_units)) {
609 printk(KERN_INFO PREFIX "No DMAR devices found\n");
610 return -ENODEV;
613 #ifdef CONFIG_DMAR
614 if (list_empty(&dmar_rmrr_units))
615 printk(KERN_INFO PREFIX "No RMRR found\n");
617 if (list_empty(&dmar_atsr_units))
618 printk(KERN_INFO PREFIX "No ATSR found\n");
619 #endif
621 return 0;
624 static void warn_invalid_dmar(u64 addr, const char *message)
626 WARN_TAINT_ONCE(
627 1, TAINT_FIRMWARE_WORKAROUND,
628 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
629 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
630 addr, message,
631 dmi_get_system_info(DMI_BIOS_VENDOR),
632 dmi_get_system_info(DMI_BIOS_VERSION),
633 dmi_get_system_info(DMI_PRODUCT_VERSION));
636 int __init check_zero_address(void)
638 struct acpi_table_dmar *dmar;
639 struct acpi_dmar_header *entry_header;
640 struct acpi_dmar_hardware_unit *drhd;
642 dmar = (struct acpi_table_dmar *)dmar_tbl;
643 entry_header = (struct acpi_dmar_header *)(dmar + 1);
645 while (((unsigned long)entry_header) <
646 (((unsigned long)dmar) + dmar_tbl->length)) {
647 /* Avoid looping forever on bad ACPI tables */
648 if (entry_header->length == 0) {
649 printk(KERN_WARNING PREFIX
650 "Invalid 0-length structure\n");
651 return 0;
654 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
655 void __iomem *addr;
656 u64 cap, ecap;
658 drhd = (void *)entry_header;
659 if (!drhd->address) {
660 warn_invalid_dmar(0, "");
661 goto failed;
664 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
665 if (!addr ) {
666 printk("IOMMU: can't validate: %llx\n", drhd->address);
667 goto failed;
669 cap = dmar_readq(addr + DMAR_CAP_REG);
670 ecap = dmar_readq(addr + DMAR_ECAP_REG);
671 early_iounmap(addr, VTD_PAGE_SIZE);
672 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
673 warn_invalid_dmar(drhd->address,
674 " returns all ones");
675 goto failed;
679 entry_header = ((void *)entry_header + entry_header->length);
681 return 1;
683 failed:
684 #ifdef CONFIG_DMAR
685 dmar_disabled = 1;
686 #endif
687 return 0;
690 void __init detect_intel_iommu(void)
692 int ret;
694 ret = dmar_table_detect();
695 if (ret)
696 ret = check_zero_address();
698 #ifdef CONFIG_INTR_REMAP
699 struct acpi_table_dmar *dmar;
701 * for now we will disable dma-remapping when interrupt
702 * remapping is enabled.
703 * When support for queued invalidation for IOTLB invalidation
704 * is added, we will not need this any more.
706 dmar = (struct acpi_table_dmar *) dmar_tbl;
707 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
708 printk(KERN_INFO
709 "Queued invalidation will be enabled to support "
710 "x2apic and Intr-remapping.\n");
711 #endif
712 #ifdef CONFIG_DMAR
713 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
714 iommu_detected = 1;
715 /* Make sure ACS will be enabled */
716 pci_request_acs();
718 #endif
719 #ifdef CONFIG_X86
720 if (ret)
721 x86_init.iommu.iommu_init = intel_iommu_init;
722 #endif
724 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
725 dmar_tbl = NULL;
729 int alloc_iommu(struct dmar_drhd_unit *drhd)
731 struct intel_iommu *iommu;
732 int map_size;
733 u32 ver;
734 static int iommu_allocated = 0;
735 int agaw = 0;
736 int msagaw = 0;
738 if (!drhd->reg_base_addr) {
739 warn_invalid_dmar(0, "");
740 return -EINVAL;
743 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
744 if (!iommu)
745 return -ENOMEM;
747 iommu->seq_id = iommu_allocated++;
748 sprintf (iommu->name, "dmar%d", iommu->seq_id);
750 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
751 if (!iommu->reg) {
752 printk(KERN_ERR "IOMMU: can't map the region\n");
753 goto error;
755 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
756 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
758 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
759 warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
760 goto err_unmap;
763 #ifdef CONFIG_DMAR
764 agaw = iommu_calculate_agaw(iommu);
765 if (agaw < 0) {
766 printk(KERN_ERR
767 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
768 iommu->seq_id);
769 goto err_unmap;
771 msagaw = iommu_calculate_max_sagaw(iommu);
772 if (msagaw < 0) {
773 printk(KERN_ERR
774 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
775 iommu->seq_id);
776 goto err_unmap;
778 #endif
779 iommu->agaw = agaw;
780 iommu->msagaw = msagaw;
782 iommu->node = -1;
784 /* the registers might be more than one page */
785 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
786 cap_max_fault_reg_offset(iommu->cap));
787 map_size = VTD_PAGE_ALIGN(map_size);
788 if (map_size > VTD_PAGE_SIZE) {
789 iounmap(iommu->reg);
790 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
791 if (!iommu->reg) {
792 printk(KERN_ERR "IOMMU: can't map the region\n");
793 goto error;
797 ver = readl(iommu->reg + DMAR_VER_REG);
798 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
799 iommu->seq_id,
800 (unsigned long long)drhd->reg_base_addr,
801 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
802 (unsigned long long)iommu->cap,
803 (unsigned long long)iommu->ecap);
805 spin_lock_init(&iommu->register_lock);
807 drhd->iommu = iommu;
808 return 0;
810 err_unmap:
811 iounmap(iommu->reg);
812 error:
813 kfree(iommu);
814 return -1;
817 void free_iommu(struct intel_iommu *iommu)
819 if (!iommu)
820 return;
822 #ifdef CONFIG_DMAR
823 free_dmar_iommu(iommu);
824 #endif
826 if (iommu->reg)
827 iounmap(iommu->reg);
828 kfree(iommu);
832 * Reclaim all the submitted descriptors which have completed its work.
834 static inline void reclaim_free_desc(struct q_inval *qi)
836 while (qi->desc_status[qi->free_tail] == QI_DONE ||
837 qi->desc_status[qi->free_tail] == QI_ABORT) {
838 qi->desc_status[qi->free_tail] = QI_FREE;
839 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
840 qi->free_cnt++;
844 static int qi_check_fault(struct intel_iommu *iommu, int index)
846 u32 fault;
847 int head, tail;
848 struct q_inval *qi = iommu->qi;
849 int wait_index = (index + 1) % QI_LENGTH;
851 if (qi->desc_status[wait_index] == QI_ABORT)
852 return -EAGAIN;
854 fault = readl(iommu->reg + DMAR_FSTS_REG);
857 * If IQE happens, the head points to the descriptor associated
858 * with the error. No new descriptors are fetched until the IQE
859 * is cleared.
861 if (fault & DMA_FSTS_IQE) {
862 head = readl(iommu->reg + DMAR_IQH_REG);
863 if ((head >> DMAR_IQ_SHIFT) == index) {
864 printk(KERN_ERR "VT-d detected invalid descriptor: "
865 "low=%llx, high=%llx\n",
866 (unsigned long long)qi->desc[index].low,
867 (unsigned long long)qi->desc[index].high);
868 memcpy(&qi->desc[index], &qi->desc[wait_index],
869 sizeof(struct qi_desc));
870 __iommu_flush_cache(iommu, &qi->desc[index],
871 sizeof(struct qi_desc));
872 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
873 return -EINVAL;
878 * If ITE happens, all pending wait_desc commands are aborted.
879 * No new descriptors are fetched until the ITE is cleared.
881 if (fault & DMA_FSTS_ITE) {
882 head = readl(iommu->reg + DMAR_IQH_REG);
883 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
884 head |= 1;
885 tail = readl(iommu->reg + DMAR_IQT_REG);
886 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
888 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
890 do {
891 if (qi->desc_status[head] == QI_IN_USE)
892 qi->desc_status[head] = QI_ABORT;
893 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
894 } while (head != tail);
896 if (qi->desc_status[wait_index] == QI_ABORT)
897 return -EAGAIN;
900 if (fault & DMA_FSTS_ICE)
901 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
903 return 0;
907 * Submit the queued invalidation descriptor to the remapping
908 * hardware unit and wait for its completion.
910 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
912 int rc;
913 struct q_inval *qi = iommu->qi;
914 struct qi_desc *hw, wait_desc;
915 int wait_index, index;
916 unsigned long flags;
918 if (!qi)
919 return 0;
921 hw = qi->desc;
923 restart:
924 rc = 0;
926 spin_lock_irqsave(&qi->q_lock, flags);
927 while (qi->free_cnt < 3) {
928 spin_unlock_irqrestore(&qi->q_lock, flags);
929 cpu_relax();
930 spin_lock_irqsave(&qi->q_lock, flags);
933 index = qi->free_head;
934 wait_index = (index + 1) % QI_LENGTH;
936 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
938 hw[index] = *desc;
940 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
941 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
942 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
944 hw[wait_index] = wait_desc;
946 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
947 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
949 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
950 qi->free_cnt -= 2;
953 * update the HW tail register indicating the presence of
954 * new descriptors.
956 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
958 while (qi->desc_status[wait_index] != QI_DONE) {
960 * We will leave the interrupts disabled, to prevent interrupt
961 * context to queue another cmd while a cmd is already submitted
962 * and waiting for completion on this cpu. This is to avoid
963 * a deadlock where the interrupt context can wait indefinitely
964 * for free slots in the queue.
966 rc = qi_check_fault(iommu, index);
967 if (rc)
968 break;
970 spin_unlock(&qi->q_lock);
971 cpu_relax();
972 spin_lock(&qi->q_lock);
975 qi->desc_status[index] = QI_DONE;
977 reclaim_free_desc(qi);
978 spin_unlock_irqrestore(&qi->q_lock, flags);
980 if (rc == -EAGAIN)
981 goto restart;
983 return rc;
987 * Flush the global interrupt entry cache.
989 void qi_global_iec(struct intel_iommu *iommu)
991 struct qi_desc desc;
993 desc.low = QI_IEC_TYPE;
994 desc.high = 0;
996 /* should never fail */
997 qi_submit_sync(&desc, iommu);
1000 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1001 u64 type)
1003 struct qi_desc desc;
1005 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1006 | QI_CC_GRAN(type) | QI_CC_TYPE;
1007 desc.high = 0;
1009 qi_submit_sync(&desc, iommu);
1012 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1013 unsigned int size_order, u64 type)
1015 u8 dw = 0, dr = 0;
1017 struct qi_desc desc;
1018 int ih = 0;
1020 if (cap_write_drain(iommu->cap))
1021 dw = 1;
1023 if (cap_read_drain(iommu->cap))
1024 dr = 1;
1026 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1027 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1028 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1029 | QI_IOTLB_AM(size_order);
1031 qi_submit_sync(&desc, iommu);
1034 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1035 u64 addr, unsigned mask)
1037 struct qi_desc desc;
1039 if (mask) {
1040 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1041 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1042 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1043 } else
1044 desc.high = QI_DEV_IOTLB_ADDR(addr);
1046 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1047 qdep = 0;
1049 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1050 QI_DIOTLB_TYPE;
1052 qi_submit_sync(&desc, iommu);
1056 * Disable Queued Invalidation interface.
1058 void dmar_disable_qi(struct intel_iommu *iommu)
1060 unsigned long flags;
1061 u32 sts;
1062 cycles_t start_time = get_cycles();
1064 if (!ecap_qis(iommu->ecap))
1065 return;
1067 spin_lock_irqsave(&iommu->register_lock, flags);
1069 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1070 if (!(sts & DMA_GSTS_QIES))
1071 goto end;
1074 * Give a chance to HW to complete the pending invalidation requests.
1076 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1077 readl(iommu->reg + DMAR_IQH_REG)) &&
1078 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1079 cpu_relax();
1081 iommu->gcmd &= ~DMA_GCMD_QIE;
1082 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1084 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1085 !(sts & DMA_GSTS_QIES), sts);
1086 end:
1087 spin_unlock_irqrestore(&iommu->register_lock, flags);
1091 * Enable queued invalidation.
1093 static void __dmar_enable_qi(struct intel_iommu *iommu)
1095 u32 sts;
1096 unsigned long flags;
1097 struct q_inval *qi = iommu->qi;
1099 qi->free_head = qi->free_tail = 0;
1100 qi->free_cnt = QI_LENGTH;
1102 spin_lock_irqsave(&iommu->register_lock, flags);
1104 /* write zero to the tail reg */
1105 writel(0, iommu->reg + DMAR_IQT_REG);
1107 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1109 iommu->gcmd |= DMA_GCMD_QIE;
1110 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1112 /* Make sure hardware complete it */
1113 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1115 spin_unlock_irqrestore(&iommu->register_lock, flags);
1119 * Enable Queued Invalidation interface. This is a must to support
1120 * interrupt-remapping. Also used by DMA-remapping, which replaces
1121 * register based IOTLB invalidation.
1123 int dmar_enable_qi(struct intel_iommu *iommu)
1125 struct q_inval *qi;
1126 struct page *desc_page;
1128 if (!ecap_qis(iommu->ecap))
1129 return -ENOENT;
1132 * queued invalidation is already setup and enabled.
1134 if (iommu->qi)
1135 return 0;
1137 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1138 if (!iommu->qi)
1139 return -ENOMEM;
1141 qi = iommu->qi;
1144 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1145 if (!desc_page) {
1146 kfree(qi);
1147 iommu->qi = 0;
1148 return -ENOMEM;
1151 qi->desc = page_address(desc_page);
1153 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1154 if (!qi->desc_status) {
1155 free_page((unsigned long) qi->desc);
1156 kfree(qi);
1157 iommu->qi = 0;
1158 return -ENOMEM;
1161 qi->free_head = qi->free_tail = 0;
1162 qi->free_cnt = QI_LENGTH;
1164 spin_lock_init(&qi->q_lock);
1166 __dmar_enable_qi(iommu);
1168 return 0;
1171 /* iommu interrupt handling. Most stuff are MSI-like. */
1173 enum faulttype {
1174 DMA_REMAP,
1175 INTR_REMAP,
1176 UNKNOWN,
1179 static const char *dma_remap_fault_reasons[] =
1181 "Software",
1182 "Present bit in root entry is clear",
1183 "Present bit in context entry is clear",
1184 "Invalid context entry",
1185 "Access beyond MGAW",
1186 "PTE Write access is not set",
1187 "PTE Read access is not set",
1188 "Next page table ptr is invalid",
1189 "Root table address invalid",
1190 "Context table ptr is invalid",
1191 "non-zero reserved fields in RTP",
1192 "non-zero reserved fields in CTP",
1193 "non-zero reserved fields in PTE",
1196 static const char *intr_remap_fault_reasons[] =
1198 "Detected reserved fields in the decoded interrupt-remapped request",
1199 "Interrupt index exceeded the interrupt-remapping table size",
1200 "Present field in the IRTE entry is clear",
1201 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1202 "Detected reserved fields in the IRTE entry",
1203 "Blocked a compatibility format interrupt request",
1204 "Blocked an interrupt request due to source-id verification failure",
1207 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1209 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1211 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1212 ARRAY_SIZE(intr_remap_fault_reasons))) {
1213 *fault_type = INTR_REMAP;
1214 return intr_remap_fault_reasons[fault_reason - 0x20];
1215 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1216 *fault_type = DMA_REMAP;
1217 return dma_remap_fault_reasons[fault_reason];
1218 } else {
1219 *fault_type = UNKNOWN;
1220 return "Unknown";
1224 void dmar_msi_unmask(unsigned int irq)
1226 struct intel_iommu *iommu = get_irq_data(irq);
1227 unsigned long flag;
1229 /* unmask it */
1230 spin_lock_irqsave(&iommu->register_lock, flag);
1231 writel(0, iommu->reg + DMAR_FECTL_REG);
1232 /* Read a reg to force flush the post write */
1233 readl(iommu->reg + DMAR_FECTL_REG);
1234 spin_unlock_irqrestore(&iommu->register_lock, flag);
1237 void dmar_msi_mask(unsigned int irq)
1239 unsigned long flag;
1240 struct intel_iommu *iommu = get_irq_data(irq);
1242 /* mask it */
1243 spin_lock_irqsave(&iommu->register_lock, flag);
1244 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1245 /* Read a reg to force flush the post write */
1246 readl(iommu->reg + DMAR_FECTL_REG);
1247 spin_unlock_irqrestore(&iommu->register_lock, flag);
1250 void dmar_msi_write(int irq, struct msi_msg *msg)
1252 struct intel_iommu *iommu = get_irq_data(irq);
1253 unsigned long flag;
1255 spin_lock_irqsave(&iommu->register_lock, flag);
1256 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1257 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1258 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1259 spin_unlock_irqrestore(&iommu->register_lock, flag);
1262 void dmar_msi_read(int irq, struct msi_msg *msg)
1264 struct intel_iommu *iommu = get_irq_data(irq);
1265 unsigned long flag;
1267 spin_lock_irqsave(&iommu->register_lock, flag);
1268 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1269 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1270 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1271 spin_unlock_irqrestore(&iommu->register_lock, flag);
1274 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1275 u8 fault_reason, u16 source_id, unsigned long long addr)
1277 const char *reason;
1278 int fault_type;
1280 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1282 if (fault_type == INTR_REMAP)
1283 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1284 "fault index %llx\n"
1285 "INTR-REMAP:[fault reason %02d] %s\n",
1286 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1287 PCI_FUNC(source_id & 0xFF), addr >> 48,
1288 fault_reason, reason);
1289 else
1290 printk(KERN_ERR
1291 "DMAR:[%s] Request device [%02x:%02x.%d] "
1292 "fault addr %llx \n"
1293 "DMAR:[fault reason %02d] %s\n",
1294 (type ? "DMA Read" : "DMA Write"),
1295 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1296 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1297 return 0;
1300 #define PRIMARY_FAULT_REG_LEN (16)
1301 irqreturn_t dmar_fault(int irq, void *dev_id)
1303 struct intel_iommu *iommu = dev_id;
1304 int reg, fault_index;
1305 u32 fault_status;
1306 unsigned long flag;
1308 spin_lock_irqsave(&iommu->register_lock, flag);
1309 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1310 if (fault_status)
1311 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1312 fault_status);
1314 /* TBD: ignore advanced fault log currently */
1315 if (!(fault_status & DMA_FSTS_PPF))
1316 goto clear_rest;
1318 fault_index = dma_fsts_fault_record_index(fault_status);
1319 reg = cap_fault_reg_offset(iommu->cap);
1320 while (1) {
1321 u8 fault_reason;
1322 u16 source_id;
1323 u64 guest_addr;
1324 int type;
1325 u32 data;
1327 /* highest 32 bits */
1328 data = readl(iommu->reg + reg +
1329 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1330 if (!(data & DMA_FRCD_F))
1331 break;
1333 fault_reason = dma_frcd_fault_reason(data);
1334 type = dma_frcd_type(data);
1336 data = readl(iommu->reg + reg +
1337 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1338 source_id = dma_frcd_source_id(data);
1340 guest_addr = dmar_readq(iommu->reg + reg +
1341 fault_index * PRIMARY_FAULT_REG_LEN);
1342 guest_addr = dma_frcd_page_addr(guest_addr);
1343 /* clear the fault */
1344 writel(DMA_FRCD_F, iommu->reg + reg +
1345 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1347 spin_unlock_irqrestore(&iommu->register_lock, flag);
1349 dmar_fault_do_one(iommu, type, fault_reason,
1350 source_id, guest_addr);
1352 fault_index++;
1353 if (fault_index >= cap_num_fault_regs(iommu->cap))
1354 fault_index = 0;
1355 spin_lock_irqsave(&iommu->register_lock, flag);
1357 clear_rest:
1358 /* clear all the other faults */
1359 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1360 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1362 spin_unlock_irqrestore(&iommu->register_lock, flag);
1363 return IRQ_HANDLED;
1366 int dmar_set_interrupt(struct intel_iommu *iommu)
1368 int irq, ret;
1371 * Check if the fault interrupt is already initialized.
1373 if (iommu->irq)
1374 return 0;
1376 irq = create_irq();
1377 if (!irq) {
1378 printk(KERN_ERR "IOMMU: no free vectors\n");
1379 return -EINVAL;
1382 set_irq_data(irq, iommu);
1383 iommu->irq = irq;
1385 ret = arch_setup_dmar_msi(irq);
1386 if (ret) {
1387 set_irq_data(irq, NULL);
1388 iommu->irq = 0;
1389 destroy_irq(irq);
1390 return ret;
1393 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1394 if (ret)
1395 printk(KERN_ERR "IOMMU: can't request irq\n");
1396 return ret;
1399 int __init enable_drhd_fault_handling(void)
1401 struct dmar_drhd_unit *drhd;
1404 * Enable fault control interrupt.
1406 for_each_drhd_unit(drhd) {
1407 int ret;
1408 struct intel_iommu *iommu = drhd->iommu;
1409 ret = dmar_set_interrupt(iommu);
1411 if (ret) {
1412 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1413 " interrupt, ret %d\n",
1414 (unsigned long long)drhd->reg_base_addr, ret);
1415 return -1;
1419 * Clear any previous faults.
1421 dmar_fault(iommu->irq, iommu);
1424 return 0;
1428 * Re-enable Queued Invalidation interface.
1430 int dmar_reenable_qi(struct intel_iommu *iommu)
1432 if (!ecap_qis(iommu->ecap))
1433 return -ENOENT;
1435 if (!iommu->qi)
1436 return -ENOENT;
1439 * First disable queued invalidation.
1441 dmar_disable_qi(iommu);
1443 * Then enable queued invalidation again. Since there is no pending
1444 * invalidation requests now, it's safe to re-enable queued
1445 * invalidation.
1447 __dmar_enable_qi(iommu);
1449 return 0;
1453 * Check interrupt remapping support in DMAR table description.
1455 int __init dmar_ir_support(void)
1457 struct acpi_table_dmar *dmar;
1458 dmar = (struct acpi_table_dmar *)dmar_tbl;
1459 if (!dmar)
1460 return 0;
1461 return dmar->flags & 0x1;