ARM: sun5i: dt: Add AT24 device on A10S-OLinuXino-Micro
[linux-2.6.git] / drivers / iommu / dmar.c
blob785675a56a10f8117b052ff06bf4e9b462e720b5
1 /*
2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/iommu_table.h>
44 #include "irq_remapping.h"
46 /* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
50 LIST_HEAD(dmar_drhd_units);
52 struct acpi_table_header * __initdata dmar_tbl;
53 static acpi_size dmar_tbl_size;
55 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
58 * add INCLUDE_ALL at the tail, so scan the list will find it at
59 * the very end.
61 if (drhd->include_all)
62 list_add_tail(&drhd->list, &dmar_drhd_units);
63 else
64 list_add(&drhd->list, &dmar_drhd_units);
67 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
68 struct pci_dev **dev, u16 segment)
70 struct pci_bus *bus;
71 struct pci_dev *pdev = NULL;
72 struct acpi_dmar_pci_path *path;
73 int count;
75 bus = pci_find_bus(segment, scope->bus);
76 path = (struct acpi_dmar_pci_path *)(scope + 1);
77 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
78 / sizeof(struct acpi_dmar_pci_path);
80 while (count) {
81 if (pdev)
82 pci_dev_put(pdev);
84 * Some BIOSes list non-exist devices in DMAR table, just
85 * ignore it
87 if (!bus) {
88 pr_warn("Device scope bus [%d] not found\n", scope->bus);
89 break;
91 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
92 if (!pdev) {
93 /* warning will be printed below */
94 break;
96 path ++;
97 count --;
98 bus = pdev->subordinate;
100 if (!pdev) {
101 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
103 *dev = NULL;
104 return 0;
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
110 pr_warn("Device scope type does not match for %s\n",
111 pci_name(pdev));
112 return -EINVAL;
114 *dev = pdev;
115 return 0;
118 int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
119 struct pci_dev ***devices, u16 segment)
121 struct acpi_dmar_device_scope *scope;
122 void * tmp = start;
123 int index;
124 int ret;
126 *cnt = 0;
127 while (start < end) {
128 scope = start;
129 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
130 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
131 (*cnt)++;
132 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
133 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
134 pr_warn("Unsupported device scope\n");
136 start += scope->length;
138 if (*cnt == 0)
139 return 0;
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142 if (!*devices)
143 return -ENOMEM;
145 start = tmp;
146 index = 0;
147 while (start < end) {
148 scope = start;
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
153 if (ret) {
154 kfree(*devices);
155 return ret;
157 index ++;
159 start += scope->length;
162 return 0;
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
170 static int __init
171 dmar_parse_one_drhd(struct acpi_dmar_header *header)
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
175 int ret = 0;
177 drhd = (struct acpi_dmar_hardware_unit *)header;
178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
179 if (!dmaru)
180 return -ENOMEM;
182 dmaru->hdr = header;
183 dmaru->reg_base_addr = drhd->address;
184 dmaru->segment = drhd->segment;
185 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
187 ret = alloc_iommu(dmaru);
188 if (ret) {
189 kfree(dmaru);
190 return ret;
192 dmar_register_drhd_unit(dmaru);
193 return 0;
196 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
198 struct acpi_dmar_hardware_unit *drhd;
199 int ret = 0;
201 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
203 if (dmaru->include_all)
204 return 0;
206 ret = dmar_parse_dev_scope((void *)(drhd + 1),
207 ((void *)drhd) + drhd->header.length,
208 &dmaru->devices_cnt, &dmaru->devices,
209 drhd->segment);
210 if (ret) {
211 list_del(&dmaru->list);
212 kfree(dmaru);
214 return ret;
217 #ifdef CONFIG_ACPI_NUMA
218 static int __init
219 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
221 struct acpi_dmar_rhsa *rhsa;
222 struct dmar_drhd_unit *drhd;
224 rhsa = (struct acpi_dmar_rhsa *)header;
225 for_each_drhd_unit(drhd) {
226 if (drhd->reg_base_addr == rhsa->base_address) {
227 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
229 if (!node_online(node))
230 node = -1;
231 drhd->iommu->node = node;
232 return 0;
235 WARN_TAINT(
236 1, TAINT_FIRMWARE_WORKAROUND,
237 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
238 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
239 drhd->reg_base_addr,
240 dmi_get_system_info(DMI_BIOS_VENDOR),
241 dmi_get_system_info(DMI_BIOS_VERSION),
242 dmi_get_system_info(DMI_PRODUCT_VERSION));
244 return 0;
246 #endif
248 static void __init
249 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
251 struct acpi_dmar_hardware_unit *drhd;
252 struct acpi_dmar_reserved_memory *rmrr;
253 struct acpi_dmar_atsr *atsr;
254 struct acpi_dmar_rhsa *rhsa;
256 switch (header->type) {
257 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
258 drhd = container_of(header, struct acpi_dmar_hardware_unit,
259 header);
260 pr_info("DRHD base: %#016Lx flags: %#x\n",
261 (unsigned long long)drhd->address, drhd->flags);
262 break;
263 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
264 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
265 header);
266 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
267 (unsigned long long)rmrr->base_address,
268 (unsigned long long)rmrr->end_address);
269 break;
270 case ACPI_DMAR_TYPE_ATSR:
271 atsr = container_of(header, struct acpi_dmar_atsr, header);
272 pr_info("ATSR flags: %#x\n", atsr->flags);
273 break;
274 case ACPI_DMAR_HARDWARE_AFFINITY:
275 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
276 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
277 (unsigned long long)rhsa->base_address,
278 rhsa->proximity_domain);
279 break;
284 * dmar_table_detect - checks to see if the platform supports DMAR devices
286 static int __init dmar_table_detect(void)
288 acpi_status status = AE_OK;
290 /* if we could find DMAR table, then there are DMAR devices */
291 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
292 (struct acpi_table_header **)&dmar_tbl,
293 &dmar_tbl_size);
295 if (ACPI_SUCCESS(status) && !dmar_tbl) {
296 pr_warn("Unable to map DMAR\n");
297 status = AE_NOT_FOUND;
300 return (ACPI_SUCCESS(status) ? 1 : 0);
304 * parse_dmar_table - parses the DMA reporting table
306 static int __init
307 parse_dmar_table(void)
309 struct acpi_table_dmar *dmar;
310 struct acpi_dmar_header *entry_header;
311 int ret = 0;
312 int drhd_count = 0;
315 * Do it again, earlier dmar_tbl mapping could be mapped with
316 * fixed map.
318 dmar_table_detect();
321 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
322 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
324 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
326 dmar = (struct acpi_table_dmar *)dmar_tbl;
327 if (!dmar)
328 return -ENODEV;
330 if (dmar->width < PAGE_SHIFT - 1) {
331 pr_warn("Invalid DMAR haw\n");
332 return -EINVAL;
335 pr_info("Host address width %d\n", dmar->width + 1);
337 entry_header = (struct acpi_dmar_header *)(dmar + 1);
338 while (((unsigned long)entry_header) <
339 (((unsigned long)dmar) + dmar_tbl->length)) {
340 /* Avoid looping forever on bad ACPI tables */
341 if (entry_header->length == 0) {
342 pr_warn("Invalid 0-length structure\n");
343 ret = -EINVAL;
344 break;
347 dmar_table_print_dmar_entry(entry_header);
349 switch (entry_header->type) {
350 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
351 drhd_count++;
352 ret = dmar_parse_one_drhd(entry_header);
353 break;
354 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
355 ret = dmar_parse_one_rmrr(entry_header);
356 break;
357 case ACPI_DMAR_TYPE_ATSR:
358 ret = dmar_parse_one_atsr(entry_header);
359 break;
360 case ACPI_DMAR_HARDWARE_AFFINITY:
361 #ifdef CONFIG_ACPI_NUMA
362 ret = dmar_parse_one_rhsa(entry_header);
363 #endif
364 break;
365 default:
366 pr_warn("Unknown DMAR structure type %d\n",
367 entry_header->type);
368 ret = 0; /* for forward compatibility */
369 break;
371 if (ret)
372 break;
374 entry_header = ((void *)entry_header + entry_header->length);
376 if (drhd_count == 0)
377 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
378 return ret;
381 static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
382 struct pci_dev *dev)
384 int index;
386 while (dev) {
387 for (index = 0; index < cnt; index++)
388 if (dev == devices[index])
389 return 1;
391 /* Check our parent */
392 dev = dev->bus->self;
395 return 0;
398 struct dmar_drhd_unit *
399 dmar_find_matched_drhd_unit(struct pci_dev *dev)
401 struct dmar_drhd_unit *dmaru = NULL;
402 struct acpi_dmar_hardware_unit *drhd;
404 dev = pci_physfn(dev);
406 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
407 drhd = container_of(dmaru->hdr,
408 struct acpi_dmar_hardware_unit,
409 header);
411 if (dmaru->include_all &&
412 drhd->segment == pci_domain_nr(dev->bus))
413 return dmaru;
415 if (dmar_pci_device_match(dmaru->devices,
416 dmaru->devices_cnt, dev))
417 return dmaru;
420 return NULL;
423 int __init dmar_dev_scope_init(void)
425 static int dmar_dev_scope_initialized;
426 struct dmar_drhd_unit *drhd, *drhd_n;
427 int ret = -ENODEV;
429 if (dmar_dev_scope_initialized)
430 return dmar_dev_scope_initialized;
432 if (list_empty(&dmar_drhd_units))
433 goto fail;
435 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
436 ret = dmar_parse_dev(drhd);
437 if (ret)
438 goto fail;
441 ret = dmar_parse_rmrr_atsr_dev();
442 if (ret)
443 goto fail;
445 dmar_dev_scope_initialized = 1;
446 return 0;
448 fail:
449 dmar_dev_scope_initialized = ret;
450 return ret;
454 int __init dmar_table_init(void)
456 static int dmar_table_initialized;
457 int ret;
459 if (dmar_table_initialized)
460 return 0;
462 dmar_table_initialized = 1;
464 ret = parse_dmar_table();
465 if (ret) {
466 if (ret != -ENODEV)
467 pr_info("parse DMAR table failure.\n");
468 return ret;
471 if (list_empty(&dmar_drhd_units)) {
472 pr_info("No DMAR devices found\n");
473 return -ENODEV;
476 return 0;
479 static void warn_invalid_dmar(u64 addr, const char *message)
481 WARN_TAINT_ONCE(
482 1, TAINT_FIRMWARE_WORKAROUND,
483 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
484 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
485 addr, message,
486 dmi_get_system_info(DMI_BIOS_VENDOR),
487 dmi_get_system_info(DMI_BIOS_VERSION),
488 dmi_get_system_info(DMI_PRODUCT_VERSION));
491 int __init check_zero_address(void)
493 struct acpi_table_dmar *dmar;
494 struct acpi_dmar_header *entry_header;
495 struct acpi_dmar_hardware_unit *drhd;
497 dmar = (struct acpi_table_dmar *)dmar_tbl;
498 entry_header = (struct acpi_dmar_header *)(dmar + 1);
500 while (((unsigned long)entry_header) <
501 (((unsigned long)dmar) + dmar_tbl->length)) {
502 /* Avoid looping forever on bad ACPI tables */
503 if (entry_header->length == 0) {
504 pr_warn("Invalid 0-length structure\n");
505 return 0;
508 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
509 void __iomem *addr;
510 u64 cap, ecap;
512 drhd = (void *)entry_header;
513 if (!drhd->address) {
514 warn_invalid_dmar(0, "");
515 goto failed;
518 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
519 if (!addr ) {
520 printk("IOMMU: can't validate: %llx\n", drhd->address);
521 goto failed;
523 cap = dmar_readq(addr + DMAR_CAP_REG);
524 ecap = dmar_readq(addr + DMAR_ECAP_REG);
525 early_iounmap(addr, VTD_PAGE_SIZE);
526 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
527 warn_invalid_dmar(drhd->address,
528 " returns all ones");
529 goto failed;
533 entry_header = ((void *)entry_header + entry_header->length);
535 return 1;
537 failed:
538 return 0;
541 int __init detect_intel_iommu(void)
543 int ret;
545 ret = dmar_table_detect();
546 if (ret)
547 ret = check_zero_address();
549 struct acpi_table_dmar *dmar;
551 dmar = (struct acpi_table_dmar *) dmar_tbl;
553 if (ret && irq_remapping_enabled && cpu_has_x2apic &&
554 dmar->flags & 0x1)
555 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
557 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
558 iommu_detected = 1;
559 /* Make sure ACS will be enabled */
560 pci_request_acs();
563 #ifdef CONFIG_X86
564 if (ret)
565 x86_init.iommu.iommu_init = intel_iommu_init;
566 #endif
568 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
569 dmar_tbl = NULL;
571 return ret ? 1 : -ENODEV;
575 static void unmap_iommu(struct intel_iommu *iommu)
577 iounmap(iommu->reg);
578 release_mem_region(iommu->reg_phys, iommu->reg_size);
582 * map_iommu: map the iommu's registers
583 * @iommu: the iommu to map
584 * @phys_addr: the physical address of the base resgister
586 * Memory map the iommu's registers. Start w/ a single page, and
587 * possibly expand if that turns out to be insufficent.
589 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
591 int map_size, err=0;
593 iommu->reg_phys = phys_addr;
594 iommu->reg_size = VTD_PAGE_SIZE;
596 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
597 pr_err("IOMMU: can't reserve memory\n");
598 err = -EBUSY;
599 goto out;
602 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
603 if (!iommu->reg) {
604 pr_err("IOMMU: can't map the region\n");
605 err = -ENOMEM;
606 goto release;
609 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
610 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
612 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
613 err = -EINVAL;
614 warn_invalid_dmar(phys_addr, " returns all ones");
615 goto unmap;
618 /* the registers might be more than one page */
619 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
620 cap_max_fault_reg_offset(iommu->cap));
621 map_size = VTD_PAGE_ALIGN(map_size);
622 if (map_size > iommu->reg_size) {
623 iounmap(iommu->reg);
624 release_mem_region(iommu->reg_phys, iommu->reg_size);
625 iommu->reg_size = map_size;
626 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
627 iommu->name)) {
628 pr_err("IOMMU: can't reserve memory\n");
629 err = -EBUSY;
630 goto out;
632 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
633 if (!iommu->reg) {
634 pr_err("IOMMU: can't map the region\n");
635 err = -ENOMEM;
636 goto release;
639 err = 0;
640 goto out;
642 unmap:
643 iounmap(iommu->reg);
644 release:
645 release_mem_region(iommu->reg_phys, iommu->reg_size);
646 out:
647 return err;
650 int alloc_iommu(struct dmar_drhd_unit *drhd)
652 struct intel_iommu *iommu;
653 u32 ver, sts;
654 static int iommu_allocated = 0;
655 int agaw = 0;
656 int msagaw = 0;
657 int err;
659 if (!drhd->reg_base_addr) {
660 warn_invalid_dmar(0, "");
661 return -EINVAL;
664 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
665 if (!iommu)
666 return -ENOMEM;
668 iommu->seq_id = iommu_allocated++;
669 sprintf (iommu->name, "dmar%d", iommu->seq_id);
671 err = map_iommu(iommu, drhd->reg_base_addr);
672 if (err) {
673 pr_err("IOMMU: failed to map %s\n", iommu->name);
674 goto error;
677 err = -EINVAL;
678 agaw = iommu_calculate_agaw(iommu);
679 if (agaw < 0) {
680 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
681 iommu->seq_id);
682 goto err_unmap;
684 msagaw = iommu_calculate_max_sagaw(iommu);
685 if (msagaw < 0) {
686 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
687 iommu->seq_id);
688 goto err_unmap;
690 iommu->agaw = agaw;
691 iommu->msagaw = msagaw;
693 iommu->node = -1;
695 ver = readl(iommu->reg + DMAR_VER_REG);
696 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
697 iommu->seq_id,
698 (unsigned long long)drhd->reg_base_addr,
699 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
700 (unsigned long long)iommu->cap,
701 (unsigned long long)iommu->ecap);
703 /* Reflect status in gcmd */
704 sts = readl(iommu->reg + DMAR_GSTS_REG);
705 if (sts & DMA_GSTS_IRES)
706 iommu->gcmd |= DMA_GCMD_IRE;
707 if (sts & DMA_GSTS_TES)
708 iommu->gcmd |= DMA_GCMD_TE;
709 if (sts & DMA_GSTS_QIES)
710 iommu->gcmd |= DMA_GCMD_QIE;
712 raw_spin_lock_init(&iommu->register_lock);
714 drhd->iommu = iommu;
715 return 0;
717 err_unmap:
718 unmap_iommu(iommu);
719 error:
720 kfree(iommu);
721 return err;
724 void free_iommu(struct intel_iommu *iommu)
726 if (!iommu)
727 return;
729 free_dmar_iommu(iommu);
731 if (iommu->reg)
732 unmap_iommu(iommu);
734 kfree(iommu);
738 * Reclaim all the submitted descriptors which have completed its work.
740 static inline void reclaim_free_desc(struct q_inval *qi)
742 while (qi->desc_status[qi->free_tail] == QI_DONE ||
743 qi->desc_status[qi->free_tail] == QI_ABORT) {
744 qi->desc_status[qi->free_tail] = QI_FREE;
745 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
746 qi->free_cnt++;
750 static int qi_check_fault(struct intel_iommu *iommu, int index)
752 u32 fault;
753 int head, tail;
754 struct q_inval *qi = iommu->qi;
755 int wait_index = (index + 1) % QI_LENGTH;
757 if (qi->desc_status[wait_index] == QI_ABORT)
758 return -EAGAIN;
760 fault = readl(iommu->reg + DMAR_FSTS_REG);
763 * If IQE happens, the head points to the descriptor associated
764 * with the error. No new descriptors are fetched until the IQE
765 * is cleared.
767 if (fault & DMA_FSTS_IQE) {
768 head = readl(iommu->reg + DMAR_IQH_REG);
769 if ((head >> DMAR_IQ_SHIFT) == index) {
770 pr_err("VT-d detected invalid descriptor: "
771 "low=%llx, high=%llx\n",
772 (unsigned long long)qi->desc[index].low,
773 (unsigned long long)qi->desc[index].high);
774 memcpy(&qi->desc[index], &qi->desc[wait_index],
775 sizeof(struct qi_desc));
776 __iommu_flush_cache(iommu, &qi->desc[index],
777 sizeof(struct qi_desc));
778 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
779 return -EINVAL;
784 * If ITE happens, all pending wait_desc commands are aborted.
785 * No new descriptors are fetched until the ITE is cleared.
787 if (fault & DMA_FSTS_ITE) {
788 head = readl(iommu->reg + DMAR_IQH_REG);
789 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
790 head |= 1;
791 tail = readl(iommu->reg + DMAR_IQT_REG);
792 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
794 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
796 do {
797 if (qi->desc_status[head] == QI_IN_USE)
798 qi->desc_status[head] = QI_ABORT;
799 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
800 } while (head != tail);
802 if (qi->desc_status[wait_index] == QI_ABORT)
803 return -EAGAIN;
806 if (fault & DMA_FSTS_ICE)
807 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
809 return 0;
813 * Submit the queued invalidation descriptor to the remapping
814 * hardware unit and wait for its completion.
816 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
818 int rc;
819 struct q_inval *qi = iommu->qi;
820 struct qi_desc *hw, wait_desc;
821 int wait_index, index;
822 unsigned long flags;
824 if (!qi)
825 return 0;
827 hw = qi->desc;
829 restart:
830 rc = 0;
832 raw_spin_lock_irqsave(&qi->q_lock, flags);
833 while (qi->free_cnt < 3) {
834 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
835 cpu_relax();
836 raw_spin_lock_irqsave(&qi->q_lock, flags);
839 index = qi->free_head;
840 wait_index = (index + 1) % QI_LENGTH;
842 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
844 hw[index] = *desc;
846 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
847 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
848 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
850 hw[wait_index] = wait_desc;
852 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
853 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
855 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
856 qi->free_cnt -= 2;
859 * update the HW tail register indicating the presence of
860 * new descriptors.
862 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
864 while (qi->desc_status[wait_index] != QI_DONE) {
866 * We will leave the interrupts disabled, to prevent interrupt
867 * context to queue another cmd while a cmd is already submitted
868 * and waiting for completion on this cpu. This is to avoid
869 * a deadlock where the interrupt context can wait indefinitely
870 * for free slots in the queue.
872 rc = qi_check_fault(iommu, index);
873 if (rc)
874 break;
876 raw_spin_unlock(&qi->q_lock);
877 cpu_relax();
878 raw_spin_lock(&qi->q_lock);
881 qi->desc_status[index] = QI_DONE;
883 reclaim_free_desc(qi);
884 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
886 if (rc == -EAGAIN)
887 goto restart;
889 return rc;
893 * Flush the global interrupt entry cache.
895 void qi_global_iec(struct intel_iommu *iommu)
897 struct qi_desc desc;
899 desc.low = QI_IEC_TYPE;
900 desc.high = 0;
902 /* should never fail */
903 qi_submit_sync(&desc, iommu);
906 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
907 u64 type)
909 struct qi_desc desc;
911 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
912 | QI_CC_GRAN(type) | QI_CC_TYPE;
913 desc.high = 0;
915 qi_submit_sync(&desc, iommu);
918 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
919 unsigned int size_order, u64 type)
921 u8 dw = 0, dr = 0;
923 struct qi_desc desc;
924 int ih = 0;
926 if (cap_write_drain(iommu->cap))
927 dw = 1;
929 if (cap_read_drain(iommu->cap))
930 dr = 1;
932 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
933 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
934 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
935 | QI_IOTLB_AM(size_order);
937 qi_submit_sync(&desc, iommu);
940 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
941 u64 addr, unsigned mask)
943 struct qi_desc desc;
945 if (mask) {
946 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
947 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
948 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
949 } else
950 desc.high = QI_DEV_IOTLB_ADDR(addr);
952 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
953 qdep = 0;
955 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
956 QI_DIOTLB_TYPE;
958 qi_submit_sync(&desc, iommu);
962 * Disable Queued Invalidation interface.
964 void dmar_disable_qi(struct intel_iommu *iommu)
966 unsigned long flags;
967 u32 sts;
968 cycles_t start_time = get_cycles();
970 if (!ecap_qis(iommu->ecap))
971 return;
973 raw_spin_lock_irqsave(&iommu->register_lock, flags);
975 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
976 if (!(sts & DMA_GSTS_QIES))
977 goto end;
980 * Give a chance to HW to complete the pending invalidation requests.
982 while ((readl(iommu->reg + DMAR_IQT_REG) !=
983 readl(iommu->reg + DMAR_IQH_REG)) &&
984 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
985 cpu_relax();
987 iommu->gcmd &= ~DMA_GCMD_QIE;
988 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
990 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
991 !(sts & DMA_GSTS_QIES), sts);
992 end:
993 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
997 * Enable queued invalidation.
999 static void __dmar_enable_qi(struct intel_iommu *iommu)
1001 u32 sts;
1002 unsigned long flags;
1003 struct q_inval *qi = iommu->qi;
1005 qi->free_head = qi->free_tail = 0;
1006 qi->free_cnt = QI_LENGTH;
1008 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1010 /* write zero to the tail reg */
1011 writel(0, iommu->reg + DMAR_IQT_REG);
1013 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1015 iommu->gcmd |= DMA_GCMD_QIE;
1016 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1018 /* Make sure hardware complete it */
1019 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1021 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1025 * Enable Queued Invalidation interface. This is a must to support
1026 * interrupt-remapping. Also used by DMA-remapping, which replaces
1027 * register based IOTLB invalidation.
1029 int dmar_enable_qi(struct intel_iommu *iommu)
1031 struct q_inval *qi;
1032 struct page *desc_page;
1034 if (!ecap_qis(iommu->ecap))
1035 return -ENOENT;
1038 * queued invalidation is already setup and enabled.
1040 if (iommu->qi)
1041 return 0;
1043 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1044 if (!iommu->qi)
1045 return -ENOMEM;
1047 qi = iommu->qi;
1050 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1051 if (!desc_page) {
1052 kfree(qi);
1053 iommu->qi = 0;
1054 return -ENOMEM;
1057 qi->desc = page_address(desc_page);
1059 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1060 if (!qi->desc_status) {
1061 free_page((unsigned long) qi->desc);
1062 kfree(qi);
1063 iommu->qi = 0;
1064 return -ENOMEM;
1067 qi->free_head = qi->free_tail = 0;
1068 qi->free_cnt = QI_LENGTH;
1070 raw_spin_lock_init(&qi->q_lock);
1072 __dmar_enable_qi(iommu);
1074 return 0;
1077 /* iommu interrupt handling. Most stuff are MSI-like. */
1079 enum faulttype {
1080 DMA_REMAP,
1081 INTR_REMAP,
1082 UNKNOWN,
1085 static const char *dma_remap_fault_reasons[] =
1087 "Software",
1088 "Present bit in root entry is clear",
1089 "Present bit in context entry is clear",
1090 "Invalid context entry",
1091 "Access beyond MGAW",
1092 "PTE Write access is not set",
1093 "PTE Read access is not set",
1094 "Next page table ptr is invalid",
1095 "Root table address invalid",
1096 "Context table ptr is invalid",
1097 "non-zero reserved fields in RTP",
1098 "non-zero reserved fields in CTP",
1099 "non-zero reserved fields in PTE",
1100 "PCE for translation request specifies blocking",
1103 static const char *irq_remap_fault_reasons[] =
1105 "Detected reserved fields in the decoded interrupt-remapped request",
1106 "Interrupt index exceeded the interrupt-remapping table size",
1107 "Present field in the IRTE entry is clear",
1108 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1109 "Detected reserved fields in the IRTE entry",
1110 "Blocked a compatibility format interrupt request",
1111 "Blocked an interrupt request due to source-id verification failure",
1114 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1116 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1118 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1119 ARRAY_SIZE(irq_remap_fault_reasons))) {
1120 *fault_type = INTR_REMAP;
1121 return irq_remap_fault_reasons[fault_reason - 0x20];
1122 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1123 *fault_type = DMA_REMAP;
1124 return dma_remap_fault_reasons[fault_reason];
1125 } else {
1126 *fault_type = UNKNOWN;
1127 return "Unknown";
1131 void dmar_msi_unmask(struct irq_data *data)
1133 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1134 unsigned long flag;
1136 /* unmask it */
1137 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1138 writel(0, iommu->reg + DMAR_FECTL_REG);
1139 /* Read a reg to force flush the post write */
1140 readl(iommu->reg + DMAR_FECTL_REG);
1141 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1144 void dmar_msi_mask(struct irq_data *data)
1146 unsigned long flag;
1147 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1149 /* mask it */
1150 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1151 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1152 /* Read a reg to force flush the post write */
1153 readl(iommu->reg + DMAR_FECTL_REG);
1154 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1157 void dmar_msi_write(int irq, struct msi_msg *msg)
1159 struct intel_iommu *iommu = irq_get_handler_data(irq);
1160 unsigned long flag;
1162 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1163 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1164 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1165 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1166 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1169 void dmar_msi_read(int irq, struct msi_msg *msg)
1171 struct intel_iommu *iommu = irq_get_handler_data(irq);
1172 unsigned long flag;
1174 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1175 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1176 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1177 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1178 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1181 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1182 u8 fault_reason, u16 source_id, unsigned long long addr)
1184 const char *reason;
1185 int fault_type;
1187 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1189 if (fault_type == INTR_REMAP)
1190 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1191 "fault index %llx\n"
1192 "INTR-REMAP:[fault reason %02d] %s\n",
1193 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1194 PCI_FUNC(source_id & 0xFF), addr >> 48,
1195 fault_reason, reason);
1196 else
1197 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1198 "fault addr %llx \n"
1199 "DMAR:[fault reason %02d] %s\n",
1200 (type ? "DMA Read" : "DMA Write"),
1201 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1202 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1203 return 0;
1206 #define PRIMARY_FAULT_REG_LEN (16)
1207 irqreturn_t dmar_fault(int irq, void *dev_id)
1209 struct intel_iommu *iommu = dev_id;
1210 int reg, fault_index;
1211 u32 fault_status;
1212 unsigned long flag;
1214 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1215 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1216 if (fault_status)
1217 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1219 /* TBD: ignore advanced fault log currently */
1220 if (!(fault_status & DMA_FSTS_PPF))
1221 goto unlock_exit;
1223 fault_index = dma_fsts_fault_record_index(fault_status);
1224 reg = cap_fault_reg_offset(iommu->cap);
1225 while (1) {
1226 u8 fault_reason;
1227 u16 source_id;
1228 u64 guest_addr;
1229 int type;
1230 u32 data;
1232 /* highest 32 bits */
1233 data = readl(iommu->reg + reg +
1234 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1235 if (!(data & DMA_FRCD_F))
1236 break;
1238 fault_reason = dma_frcd_fault_reason(data);
1239 type = dma_frcd_type(data);
1241 data = readl(iommu->reg + reg +
1242 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1243 source_id = dma_frcd_source_id(data);
1245 guest_addr = dmar_readq(iommu->reg + reg +
1246 fault_index * PRIMARY_FAULT_REG_LEN);
1247 guest_addr = dma_frcd_page_addr(guest_addr);
1248 /* clear the fault */
1249 writel(DMA_FRCD_F, iommu->reg + reg +
1250 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1252 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1254 dmar_fault_do_one(iommu, type, fault_reason,
1255 source_id, guest_addr);
1257 fault_index++;
1258 if (fault_index >= cap_num_fault_regs(iommu->cap))
1259 fault_index = 0;
1260 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1263 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1265 unlock_exit:
1266 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1267 return IRQ_HANDLED;
1270 int dmar_set_interrupt(struct intel_iommu *iommu)
1272 int irq, ret;
1275 * Check if the fault interrupt is already initialized.
1277 if (iommu->irq)
1278 return 0;
1280 irq = create_irq();
1281 if (!irq) {
1282 pr_err("IOMMU: no free vectors\n");
1283 return -EINVAL;
1286 irq_set_handler_data(irq, iommu);
1287 iommu->irq = irq;
1289 ret = arch_setup_dmar_msi(irq);
1290 if (ret) {
1291 irq_set_handler_data(irq, NULL);
1292 iommu->irq = 0;
1293 destroy_irq(irq);
1294 return ret;
1297 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1298 if (ret)
1299 pr_err("IOMMU: can't request irq\n");
1300 return ret;
1303 int __init enable_drhd_fault_handling(void)
1305 struct dmar_drhd_unit *drhd;
1308 * Enable fault control interrupt.
1310 for_each_drhd_unit(drhd) {
1311 int ret;
1312 struct intel_iommu *iommu = drhd->iommu;
1313 u32 fault_status;
1314 ret = dmar_set_interrupt(iommu);
1316 if (ret) {
1317 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1318 (unsigned long long)drhd->reg_base_addr, ret);
1319 return -1;
1323 * Clear any previous faults.
1325 dmar_fault(iommu->irq, iommu);
1326 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1327 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1330 return 0;
1334 * Re-enable Queued Invalidation interface.
1336 int dmar_reenable_qi(struct intel_iommu *iommu)
1338 if (!ecap_qis(iommu->ecap))
1339 return -ENOENT;
1341 if (!iommu->qi)
1342 return -ENOENT;
1345 * First disable queued invalidation.
1347 dmar_disable_qi(iommu);
1349 * Then enable queued invalidation again. Since there is no pending
1350 * invalidation requests now, it's safe to re-enable queued
1351 * invalidation.
1353 __dmar_enable_qi(iommu);
1355 return 0;
1359 * Check interrupt remapping support in DMAR table description.
1361 int __init dmar_ir_support(void)
1363 struct acpi_table_dmar *dmar;
1364 dmar = (struct acpi_table_dmar *)dmar_tbl;
1365 if (!dmar)
1366 return 0;
1367 return dmar->flags & 0x1;
1369 IOMMU_INIT_POST(detect_intel_iommu);