2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/tboot.h>
37 #include <linux/dmi.h>
39 #define PREFIX "DMAR: "
41 /* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
45 LIST_HEAD(dmar_drhd_units
);
47 static struct acpi_table_header
* __initdata dmar_tbl
;
48 static acpi_size dmar_tbl_size
;
50 static void __init
dmar_register_drhd_unit(struct dmar_drhd_unit
*drhd
)
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
56 if (drhd
->include_all
)
57 list_add_tail(&drhd
->list
, &dmar_drhd_units
);
59 list_add(&drhd
->list
, &dmar_drhd_units
);
62 static int __init
dmar_parse_one_dev_scope(struct acpi_dmar_device_scope
*scope
,
63 struct pci_dev
**dev
, u16 segment
)
66 struct pci_dev
*pdev
= NULL
;
67 struct acpi_dmar_pci_path
*path
;
70 bus
= pci_find_bus(segment
, scope
->bus
);
71 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
72 count
= (scope
->length
- sizeof(struct acpi_dmar_device_scope
))
73 / sizeof(struct acpi_dmar_pci_path
);
79 * Some BIOSes list non-exist devices in DMAR table, just
84 PREFIX
"Device scope bus [%d] not found\n",
88 pdev
= pci_get_slot(bus
, PCI_DEVFN(path
->dev
, path
->fn
));
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment
, bus
->number
, path
->dev
, path
->fn
);
97 bus
= pdev
->subordinate
;
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment
, scope
->bus
, path
->dev
, path
->fn
);
106 if ((scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&& \
107 pdev
->subordinate
) || (scope
->entry_type
== \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE
&& !pdev
->subordinate
)) {
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
119 static int __init
dmar_parse_dev_scope(void *start
, void *end
, int *cnt
,
120 struct pci_dev
***devices
, u16 segment
)
122 struct acpi_dmar_device_scope
*scope
;
128 while (start
< end
) {
130 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
131 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
134 printk(KERN_WARNING PREFIX
135 "Unsupported device scope\n");
136 start
+= scope
->length
;
141 *devices
= kcalloc(*cnt
, sizeof(struct pci_dev
*), GFP_KERNEL
);
147 while (start
< end
) {
149 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
150 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
) {
151 ret
= dmar_parse_one_dev_scope(scope
,
152 &(*devices
)[index
], segment
);
159 start
+= scope
->length
;
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
171 dmar_parse_one_drhd(struct acpi_dmar_header
*header
)
173 struct acpi_dmar_hardware_unit
*drhd
;
174 struct dmar_drhd_unit
*dmaru
;
177 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
178 dmaru
= kzalloc(sizeof(*dmaru
), GFP_KERNEL
);
183 dmaru
->reg_base_addr
= drhd
->address
;
184 dmaru
->segment
= drhd
->segment
;
185 dmaru
->include_all
= drhd
->flags
& 0x1; /* BIT0: INCLUDE_ALL */
187 ret
= alloc_iommu(dmaru
);
192 dmar_register_drhd_unit(dmaru
);
196 static int __init
dmar_parse_dev(struct dmar_drhd_unit
*dmaru
)
198 struct acpi_dmar_hardware_unit
*drhd
;
201 drhd
= (struct acpi_dmar_hardware_unit
*) dmaru
->hdr
;
203 if (dmaru
->include_all
)
206 ret
= dmar_parse_dev_scope((void *)(drhd
+ 1),
207 ((void *)drhd
) + drhd
->header
.length
,
208 &dmaru
->devices_cnt
, &dmaru
->devices
,
211 list_del(&dmaru
->list
);
218 LIST_HEAD(dmar_rmrr_units
);
220 static void __init
dmar_register_rmrr_unit(struct dmar_rmrr_unit
*rmrr
)
222 list_add(&rmrr
->list
, &dmar_rmrr_units
);
227 dmar_parse_one_rmrr(struct acpi_dmar_header
*header
)
229 struct acpi_dmar_reserved_memory
*rmrr
;
230 struct dmar_rmrr_unit
*rmrru
;
232 rmrru
= kzalloc(sizeof(*rmrru
), GFP_KERNEL
);
237 rmrr
= (struct acpi_dmar_reserved_memory
*)header
;
238 rmrru
->base_address
= rmrr
->base_address
;
239 rmrru
->end_address
= rmrr
->end_address
;
241 dmar_register_rmrr_unit(rmrru
);
246 rmrr_parse_dev(struct dmar_rmrr_unit
*rmrru
)
248 struct acpi_dmar_reserved_memory
*rmrr
;
251 rmrr
= (struct acpi_dmar_reserved_memory
*) rmrru
->hdr
;
252 ret
= dmar_parse_dev_scope((void *)(rmrr
+ 1),
253 ((void *)rmrr
) + rmrr
->header
.length
,
254 &rmrru
->devices_cnt
, &rmrru
->devices
, rmrr
->segment
);
256 if (ret
|| (rmrru
->devices_cnt
== 0)) {
257 list_del(&rmrru
->list
);
263 static LIST_HEAD(dmar_atsr_units
);
265 static int __init
dmar_parse_one_atsr(struct acpi_dmar_header
*hdr
)
267 struct acpi_dmar_atsr
*atsr
;
268 struct dmar_atsr_unit
*atsru
;
270 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
271 atsru
= kzalloc(sizeof(*atsru
), GFP_KERNEL
);
276 atsru
->include_all
= atsr
->flags
& 0x1;
278 list_add(&atsru
->list
, &dmar_atsr_units
);
283 static int __init
atsr_parse_dev(struct dmar_atsr_unit
*atsru
)
286 struct acpi_dmar_atsr
*atsr
;
288 if (atsru
->include_all
)
291 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
292 rc
= dmar_parse_dev_scope((void *)(atsr
+ 1),
293 (void *)atsr
+ atsr
->header
.length
,
294 &atsru
->devices_cnt
, &atsru
->devices
,
296 if (rc
|| !atsru
->devices_cnt
) {
297 list_del(&atsru
->list
);
304 int dmar_find_matched_atsr_unit(struct pci_dev
*dev
)
308 struct acpi_dmar_atsr
*atsr
;
309 struct dmar_atsr_unit
*atsru
;
311 list_for_each_entry(atsru
, &dmar_atsr_units
, list
) {
312 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
313 if (atsr
->segment
== pci_domain_nr(dev
->bus
))
320 for (bus
= dev
->bus
; bus
; bus
= bus
->parent
) {
321 struct pci_dev
*bridge
= bus
->self
;
323 if (!bridge
|| !pci_is_pcie(bridge
) ||
324 bridge
->pcie_type
== PCI_EXP_TYPE_PCI_BRIDGE
)
327 if (bridge
->pcie_type
== PCI_EXP_TYPE_ROOT_PORT
) {
328 for (i
= 0; i
< atsru
->devices_cnt
; i
++)
329 if (atsru
->devices
[i
] == bridge
)
335 if (atsru
->include_all
)
342 #ifdef CONFIG_ACPI_NUMA
344 dmar_parse_one_rhsa(struct acpi_dmar_header
*header
)
346 struct acpi_dmar_rhsa
*rhsa
;
347 struct dmar_drhd_unit
*drhd
;
349 rhsa
= (struct acpi_dmar_rhsa
*)header
;
350 for_each_drhd_unit(drhd
) {
351 if (drhd
->reg_base_addr
== rhsa
->base_address
) {
352 int node
= acpi_map_pxm_to_node(rhsa
->proximity_domain
);
354 if (!node_online(node
))
356 drhd
->iommu
->node
= node
;
360 WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
361 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
363 dmi_get_system_info(DMI_BIOS_VENDOR
),
364 dmi_get_system_info(DMI_BIOS_VERSION
),
365 dmi_get_system_info(DMI_PRODUCT_VERSION
));
372 dmar_table_print_dmar_entry(struct acpi_dmar_header
*header
)
374 struct acpi_dmar_hardware_unit
*drhd
;
375 struct acpi_dmar_reserved_memory
*rmrr
;
376 struct acpi_dmar_atsr
*atsr
;
377 struct acpi_dmar_rhsa
*rhsa
;
379 switch (header
->type
) {
380 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
381 drhd
= container_of(header
, struct acpi_dmar_hardware_unit
,
383 printk (KERN_INFO PREFIX
384 "DRHD base: %#016Lx flags: %#x\n",
385 (unsigned long long)drhd
->address
, drhd
->flags
);
387 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
388 rmrr
= container_of(header
, struct acpi_dmar_reserved_memory
,
390 printk (KERN_INFO PREFIX
391 "RMRR base: %#016Lx end: %#016Lx\n",
392 (unsigned long long)rmrr
->base_address
,
393 (unsigned long long)rmrr
->end_address
);
395 case ACPI_DMAR_TYPE_ATSR
:
396 atsr
= container_of(header
, struct acpi_dmar_atsr
, header
);
397 printk(KERN_INFO PREFIX
"ATSR flags: %#x\n", atsr
->flags
);
399 case ACPI_DMAR_HARDWARE_AFFINITY
:
400 rhsa
= container_of(header
, struct acpi_dmar_rhsa
, header
);
401 printk(KERN_INFO PREFIX
"RHSA base: %#016Lx proximity domain: %#x\n",
402 (unsigned long long)rhsa
->base_address
,
403 rhsa
->proximity_domain
);
409 * dmar_table_detect - checks to see if the platform supports DMAR devices
411 static int __init
dmar_table_detect(void)
413 acpi_status status
= AE_OK
;
415 /* if we could find DMAR table, then there are DMAR devices */
416 status
= acpi_get_table_with_size(ACPI_SIG_DMAR
, 0,
417 (struct acpi_table_header
**)&dmar_tbl
,
420 if (ACPI_SUCCESS(status
) && !dmar_tbl
) {
421 printk (KERN_WARNING PREFIX
"Unable to map DMAR\n");
422 status
= AE_NOT_FOUND
;
425 return (ACPI_SUCCESS(status
) ? 1 : 0);
429 * parse_dmar_table - parses the DMA reporting table
432 parse_dmar_table(void)
434 struct acpi_table_dmar
*dmar
;
435 struct acpi_dmar_header
*entry_header
;
439 * Do it again, earlier dmar_tbl mapping could be mapped with
445 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
446 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
448 dmar_tbl
= tboot_get_dmar_table(dmar_tbl
);
450 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
454 if (dmar
->width
< PAGE_SHIFT
- 1) {
455 printk(KERN_WARNING PREFIX
"Invalid DMAR haw\n");
459 printk (KERN_INFO PREFIX
"Host address width %d\n",
462 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
463 while (((unsigned long)entry_header
) <
464 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
465 /* Avoid looping forever on bad ACPI tables */
466 if (entry_header
->length
== 0) {
467 printk(KERN_WARNING PREFIX
468 "Invalid 0-length structure\n");
473 dmar_table_print_dmar_entry(entry_header
);
475 switch (entry_header
->type
) {
476 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
477 ret
= dmar_parse_one_drhd(entry_header
);
479 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
481 ret
= dmar_parse_one_rmrr(entry_header
);
484 case ACPI_DMAR_TYPE_ATSR
:
486 ret
= dmar_parse_one_atsr(entry_header
);
489 case ACPI_DMAR_HARDWARE_AFFINITY
:
490 #ifdef CONFIG_ACPI_NUMA
491 ret
= dmar_parse_one_rhsa(entry_header
);
495 printk(KERN_WARNING PREFIX
496 "Unknown DMAR structure type %d\n",
498 ret
= 0; /* for forward compatibility */
504 entry_header
= ((void *)entry_header
+ entry_header
->length
);
509 int dmar_pci_device_match(struct pci_dev
*devices
[], int cnt
,
515 for (index
= 0; index
< cnt
; index
++)
516 if (dev
== devices
[index
])
519 /* Check our parent */
520 dev
= dev
->bus
->self
;
526 struct dmar_drhd_unit
*
527 dmar_find_matched_drhd_unit(struct pci_dev
*dev
)
529 struct dmar_drhd_unit
*dmaru
= NULL
;
530 struct acpi_dmar_hardware_unit
*drhd
;
532 list_for_each_entry(dmaru
, &dmar_drhd_units
, list
) {
533 drhd
= container_of(dmaru
->hdr
,
534 struct acpi_dmar_hardware_unit
,
537 if (dmaru
->include_all
&&
538 drhd
->segment
== pci_domain_nr(dev
->bus
))
541 if (dmar_pci_device_match(dmaru
->devices
,
542 dmaru
->devices_cnt
, dev
))
549 int __init
dmar_dev_scope_init(void)
551 struct dmar_drhd_unit
*drhd
, *drhd_n
;
554 list_for_each_entry_safe(drhd
, drhd_n
, &dmar_drhd_units
, list
) {
555 ret
= dmar_parse_dev(drhd
);
562 struct dmar_rmrr_unit
*rmrr
, *rmrr_n
;
563 struct dmar_atsr_unit
*atsr
, *atsr_n
;
565 list_for_each_entry_safe(rmrr
, rmrr_n
, &dmar_rmrr_units
, list
) {
566 ret
= rmrr_parse_dev(rmrr
);
571 list_for_each_entry_safe(atsr
, atsr_n
, &dmar_atsr_units
, list
) {
572 ret
= atsr_parse_dev(atsr
);
583 int __init
dmar_table_init(void)
585 static int dmar_table_initialized
;
588 if (dmar_table_initialized
)
591 dmar_table_initialized
= 1;
593 ret
= parse_dmar_table();
596 printk(KERN_INFO PREFIX
"parse DMAR table failure.\n");
600 if (list_empty(&dmar_drhd_units
)) {
601 printk(KERN_INFO PREFIX
"No DMAR devices found\n");
606 if (list_empty(&dmar_rmrr_units
))
607 printk(KERN_INFO PREFIX
"No RMRR found\n");
609 if (list_empty(&dmar_atsr_units
))
610 printk(KERN_INFO PREFIX
"No ATSR found\n");
616 static int bios_warned
;
618 int __init
check_zero_address(void)
620 struct acpi_table_dmar
*dmar
;
621 struct acpi_dmar_header
*entry_header
;
622 struct acpi_dmar_hardware_unit
*drhd
;
624 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
625 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
627 while (((unsigned long)entry_header
) <
628 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
629 /* Avoid looping forever on bad ACPI tables */
630 if (entry_header
->length
== 0) {
631 printk(KERN_WARNING PREFIX
632 "Invalid 0-length structure\n");
636 if (entry_header
->type
== ACPI_DMAR_TYPE_HARDWARE_UNIT
) {
640 drhd
= (void *)entry_header
;
641 if (!drhd
->address
) {
642 /* Promote an attitude of violence to a BIOS engineer today */
643 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
644 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
645 dmi_get_system_info(DMI_BIOS_VENDOR
),
646 dmi_get_system_info(DMI_BIOS_VERSION
),
647 dmi_get_system_info(DMI_PRODUCT_VERSION
));
652 addr
= early_ioremap(drhd
->address
, VTD_PAGE_SIZE
);
654 printk("IOMMU: can't validate: %llx\n", drhd
->address
);
657 cap
= dmar_readq(addr
+ DMAR_CAP_REG
);
658 ecap
= dmar_readq(addr
+ DMAR_ECAP_REG
);
659 early_iounmap(addr
, VTD_PAGE_SIZE
);
660 if (cap
== (uint64_t)-1 && ecap
== (uint64_t)-1) {
661 /* Promote an attitude of violence to a BIOS engineer today */
662 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
663 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
665 dmi_get_system_info(DMI_BIOS_VENDOR
),
666 dmi_get_system_info(DMI_BIOS_VERSION
),
667 dmi_get_system_info(DMI_PRODUCT_VERSION
));
673 entry_header
= ((void *)entry_header
+ entry_header
->length
);
684 void __init
detect_intel_iommu(void)
688 ret
= dmar_table_detect();
690 ret
= check_zero_address();
692 #ifdef CONFIG_INTR_REMAP
693 struct acpi_table_dmar
*dmar
;
695 * for now we will disable dma-remapping when interrupt
696 * remapping is enabled.
697 * When support for queued invalidation for IOTLB invalidation
698 * is added, we will not need this any more.
700 dmar
= (struct acpi_table_dmar
*) dmar_tbl
;
701 if (ret
&& cpu_has_x2apic
&& dmar
->flags
& 0x1)
703 "Queued invalidation will be enabled to support "
704 "x2apic and Intr-remapping.\n");
707 if (ret
&& !no_iommu
&& !iommu_detected
&& !dmar_disabled
) {
709 /* Make sure ACS will be enabled */
715 x86_init
.iommu
.iommu_init
= intel_iommu_init
;
718 early_acpi_os_unmap_memory(dmar_tbl
, dmar_tbl_size
);
723 int alloc_iommu(struct dmar_drhd_unit
*drhd
)
725 struct intel_iommu
*iommu
;
728 static int iommu_allocated
= 0;
732 if (!drhd
->reg_base_addr
) {
734 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
735 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
736 dmi_get_system_info(DMI_BIOS_VENDOR
),
737 dmi_get_system_info(DMI_BIOS_VERSION
),
738 dmi_get_system_info(DMI_PRODUCT_VERSION
));
744 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
748 iommu
->seq_id
= iommu_allocated
++;
749 sprintf (iommu
->name
, "dmar%d", iommu
->seq_id
);
751 iommu
->reg
= ioremap(drhd
->reg_base_addr
, VTD_PAGE_SIZE
);
753 printk(KERN_ERR
"IOMMU: can't map the region\n");
756 iommu
->cap
= dmar_readq(iommu
->reg
+ DMAR_CAP_REG
);
757 iommu
->ecap
= dmar_readq(iommu
->reg
+ DMAR_ECAP_REG
);
759 if (iommu
->cap
== (uint64_t)-1 && iommu
->ecap
== (uint64_t)-1) {
761 /* Promote an attitude of violence to a BIOS engineer today */
762 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
763 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
765 dmi_get_system_info(DMI_BIOS_VENDOR
),
766 dmi_get_system_info(DMI_BIOS_VERSION
),
767 dmi_get_system_info(DMI_PRODUCT_VERSION
));
774 agaw
= iommu_calculate_agaw(iommu
);
777 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
781 msagaw
= iommu_calculate_max_sagaw(iommu
);
784 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
790 iommu
->msagaw
= msagaw
;
794 /* the registers might be more than one page */
795 map_size
= max_t(int, ecap_max_iotlb_offset(iommu
->ecap
),
796 cap_max_fault_reg_offset(iommu
->cap
));
797 map_size
= VTD_PAGE_ALIGN(map_size
);
798 if (map_size
> VTD_PAGE_SIZE
) {
800 iommu
->reg
= ioremap(drhd
->reg_base_addr
, map_size
);
802 printk(KERN_ERR
"IOMMU: can't map the region\n");
807 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
808 pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
809 (unsigned long long)drhd
->reg_base_addr
,
810 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
),
811 (unsigned long long)iommu
->cap
,
812 (unsigned long long)iommu
->ecap
);
814 spin_lock_init(&iommu
->register_lock
);
826 void free_iommu(struct intel_iommu
*iommu
)
832 free_dmar_iommu(iommu
);
841 * Reclaim all the submitted descriptors which have completed its work.
843 static inline void reclaim_free_desc(struct q_inval
*qi
)
845 while (qi
->desc_status
[qi
->free_tail
] == QI_DONE
||
846 qi
->desc_status
[qi
->free_tail
] == QI_ABORT
) {
847 qi
->desc_status
[qi
->free_tail
] = QI_FREE
;
848 qi
->free_tail
= (qi
->free_tail
+ 1) % QI_LENGTH
;
853 static int qi_check_fault(struct intel_iommu
*iommu
, int index
)
857 struct q_inval
*qi
= iommu
->qi
;
858 int wait_index
= (index
+ 1) % QI_LENGTH
;
860 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
863 fault
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
866 * If IQE happens, the head points to the descriptor associated
867 * with the error. No new descriptors are fetched until the IQE
870 if (fault
& DMA_FSTS_IQE
) {
871 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
872 if ((head
>> DMAR_IQ_SHIFT
) == index
) {
873 printk(KERN_ERR
"VT-d detected invalid descriptor: "
874 "low=%llx, high=%llx\n",
875 (unsigned long long)qi
->desc
[index
].low
,
876 (unsigned long long)qi
->desc
[index
].high
);
877 memcpy(&qi
->desc
[index
], &qi
->desc
[wait_index
],
878 sizeof(struct qi_desc
));
879 __iommu_flush_cache(iommu
, &qi
->desc
[index
],
880 sizeof(struct qi_desc
));
881 writel(DMA_FSTS_IQE
, iommu
->reg
+ DMAR_FSTS_REG
);
887 * If ITE happens, all pending wait_desc commands are aborted.
888 * No new descriptors are fetched until the ITE is cleared.
890 if (fault
& DMA_FSTS_ITE
) {
891 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
892 head
= ((head
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
894 tail
= readl(iommu
->reg
+ DMAR_IQT_REG
);
895 tail
= ((tail
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
897 writel(DMA_FSTS_ITE
, iommu
->reg
+ DMAR_FSTS_REG
);
900 if (qi
->desc_status
[head
] == QI_IN_USE
)
901 qi
->desc_status
[head
] = QI_ABORT
;
902 head
= (head
- 2 + QI_LENGTH
) % QI_LENGTH
;
903 } while (head
!= tail
);
905 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
909 if (fault
& DMA_FSTS_ICE
)
910 writel(DMA_FSTS_ICE
, iommu
->reg
+ DMAR_FSTS_REG
);
916 * Submit the queued invalidation descriptor to the remapping
917 * hardware unit and wait for its completion.
919 int qi_submit_sync(struct qi_desc
*desc
, struct intel_iommu
*iommu
)
922 struct q_inval
*qi
= iommu
->qi
;
923 struct qi_desc
*hw
, wait_desc
;
924 int wait_index
, index
;
935 spin_lock_irqsave(&qi
->q_lock
, flags
);
936 while (qi
->free_cnt
< 3) {
937 spin_unlock_irqrestore(&qi
->q_lock
, flags
);
939 spin_lock_irqsave(&qi
->q_lock
, flags
);
942 index
= qi
->free_head
;
943 wait_index
= (index
+ 1) % QI_LENGTH
;
945 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_IN_USE
;
949 wait_desc
.low
= QI_IWD_STATUS_DATA(QI_DONE
) |
950 QI_IWD_STATUS_WRITE
| QI_IWD_TYPE
;
951 wait_desc
.high
= virt_to_phys(&qi
->desc_status
[wait_index
]);
953 hw
[wait_index
] = wait_desc
;
955 __iommu_flush_cache(iommu
, &hw
[index
], sizeof(struct qi_desc
));
956 __iommu_flush_cache(iommu
, &hw
[wait_index
], sizeof(struct qi_desc
));
958 qi
->free_head
= (qi
->free_head
+ 2) % QI_LENGTH
;
962 * update the HW tail register indicating the presence of
965 writel(qi
->free_head
<< DMAR_IQ_SHIFT
, iommu
->reg
+ DMAR_IQT_REG
);
967 while (qi
->desc_status
[wait_index
] != QI_DONE
) {
969 * We will leave the interrupts disabled, to prevent interrupt
970 * context to queue another cmd while a cmd is already submitted
971 * and waiting for completion on this cpu. This is to avoid
972 * a deadlock where the interrupt context can wait indefinitely
973 * for free slots in the queue.
975 rc
= qi_check_fault(iommu
, index
);
979 spin_unlock(&qi
->q_lock
);
981 spin_lock(&qi
->q_lock
);
984 qi
->desc_status
[index
] = QI_DONE
;
986 reclaim_free_desc(qi
);
987 spin_unlock_irqrestore(&qi
->q_lock
, flags
);
996 * Flush the global interrupt entry cache.
998 void qi_global_iec(struct intel_iommu
*iommu
)
1000 struct qi_desc desc
;
1002 desc
.low
= QI_IEC_TYPE
;
1005 /* should never fail */
1006 qi_submit_sync(&desc
, iommu
);
1009 void qi_flush_context(struct intel_iommu
*iommu
, u16 did
, u16 sid
, u8 fm
,
1012 struct qi_desc desc
;
1014 desc
.low
= QI_CC_FM(fm
) | QI_CC_SID(sid
) | QI_CC_DID(did
)
1015 | QI_CC_GRAN(type
) | QI_CC_TYPE
;
1018 qi_submit_sync(&desc
, iommu
);
1021 void qi_flush_iotlb(struct intel_iommu
*iommu
, u16 did
, u64 addr
,
1022 unsigned int size_order
, u64 type
)
1026 struct qi_desc desc
;
1029 if (cap_write_drain(iommu
->cap
))
1032 if (cap_read_drain(iommu
->cap
))
1035 desc
.low
= QI_IOTLB_DID(did
) | QI_IOTLB_DR(dr
) | QI_IOTLB_DW(dw
)
1036 | QI_IOTLB_GRAN(type
) | QI_IOTLB_TYPE
;
1037 desc
.high
= QI_IOTLB_ADDR(addr
) | QI_IOTLB_IH(ih
)
1038 | QI_IOTLB_AM(size_order
);
1040 qi_submit_sync(&desc
, iommu
);
1043 void qi_flush_dev_iotlb(struct intel_iommu
*iommu
, u16 sid
, u16 qdep
,
1044 u64 addr
, unsigned mask
)
1046 struct qi_desc desc
;
1049 BUG_ON(addr
& ((1 << (VTD_PAGE_SHIFT
+ mask
)) - 1));
1050 addr
|= (1 << (VTD_PAGE_SHIFT
+ mask
- 1)) - 1;
1051 desc
.high
= QI_DEV_IOTLB_ADDR(addr
) | QI_DEV_IOTLB_SIZE
;
1053 desc
.high
= QI_DEV_IOTLB_ADDR(addr
);
1055 if (qdep
>= QI_DEV_IOTLB_MAX_INVS
)
1058 desc
.low
= QI_DEV_IOTLB_SID(sid
) | QI_DEV_IOTLB_QDEP(qdep
) |
1061 qi_submit_sync(&desc
, iommu
);
1065 * Disable Queued Invalidation interface.
1067 void dmar_disable_qi(struct intel_iommu
*iommu
)
1069 unsigned long flags
;
1071 cycles_t start_time
= get_cycles();
1073 if (!ecap_qis(iommu
->ecap
))
1076 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1078 sts
= dmar_readq(iommu
->reg
+ DMAR_GSTS_REG
);
1079 if (!(sts
& DMA_GSTS_QIES
))
1083 * Give a chance to HW to complete the pending invalidation requests.
1085 while ((readl(iommu
->reg
+ DMAR_IQT_REG
) !=
1086 readl(iommu
->reg
+ DMAR_IQH_REG
)) &&
1087 (DMAR_OPERATION_TIMEOUT
> (get_cycles() - start_time
)))
1090 iommu
->gcmd
&= ~DMA_GCMD_QIE
;
1091 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1093 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
,
1094 !(sts
& DMA_GSTS_QIES
), sts
);
1096 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1100 * Enable queued invalidation.
1102 static void __dmar_enable_qi(struct intel_iommu
*iommu
)
1105 unsigned long flags
;
1106 struct q_inval
*qi
= iommu
->qi
;
1108 qi
->free_head
= qi
->free_tail
= 0;
1109 qi
->free_cnt
= QI_LENGTH
;
1111 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1113 /* write zero to the tail reg */
1114 writel(0, iommu
->reg
+ DMAR_IQT_REG
);
1116 dmar_writeq(iommu
->reg
+ DMAR_IQA_REG
, virt_to_phys(qi
->desc
));
1118 iommu
->gcmd
|= DMA_GCMD_QIE
;
1119 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1121 /* Make sure hardware complete it */
1122 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
, (sts
& DMA_GSTS_QIES
), sts
);
1124 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1128 * Enable Queued Invalidation interface. This is a must to support
1129 * interrupt-remapping. Also used by DMA-remapping, which replaces
1130 * register based IOTLB invalidation.
1132 int dmar_enable_qi(struct intel_iommu
*iommu
)
1135 struct page
*desc_page
;
1137 if (!ecap_qis(iommu
->ecap
))
1141 * queued invalidation is already setup and enabled.
1146 iommu
->qi
= kmalloc(sizeof(*qi
), GFP_ATOMIC
);
1153 desc_page
= alloc_pages_node(iommu
->node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
1160 qi
->desc
= page_address(desc_page
);
1162 qi
->desc_status
= kmalloc(QI_LENGTH
* sizeof(int), GFP_ATOMIC
);
1163 if (!qi
->desc_status
) {
1164 free_page((unsigned long) qi
->desc
);
1170 qi
->free_head
= qi
->free_tail
= 0;
1171 qi
->free_cnt
= QI_LENGTH
;
1173 spin_lock_init(&qi
->q_lock
);
1175 __dmar_enable_qi(iommu
);
1180 /* iommu interrupt handling. Most stuff are MSI-like. */
1188 static const char *dma_remap_fault_reasons
[] =
1191 "Present bit in root entry is clear",
1192 "Present bit in context entry is clear",
1193 "Invalid context entry",
1194 "Access beyond MGAW",
1195 "PTE Write access is not set",
1196 "PTE Read access is not set",
1197 "Next page table ptr is invalid",
1198 "Root table address invalid",
1199 "Context table ptr is invalid",
1200 "non-zero reserved fields in RTP",
1201 "non-zero reserved fields in CTP",
1202 "non-zero reserved fields in PTE",
1205 static const char *intr_remap_fault_reasons
[] =
1207 "Detected reserved fields in the decoded interrupt-remapped request",
1208 "Interrupt index exceeded the interrupt-remapping table size",
1209 "Present field in the IRTE entry is clear",
1210 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1211 "Detected reserved fields in the IRTE entry",
1212 "Blocked a compatibility format interrupt request",
1213 "Blocked an interrupt request due to source-id verification failure",
1216 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1218 const char *dmar_get_fault_reason(u8 fault_reason
, int *fault_type
)
1220 if (fault_reason
>= 0x20 && (fault_reason
<= 0x20 +
1221 ARRAY_SIZE(intr_remap_fault_reasons
))) {
1222 *fault_type
= INTR_REMAP
;
1223 return intr_remap_fault_reasons
[fault_reason
- 0x20];
1224 } else if (fault_reason
< ARRAY_SIZE(dma_remap_fault_reasons
)) {
1225 *fault_type
= DMA_REMAP
;
1226 return dma_remap_fault_reasons
[fault_reason
];
1228 *fault_type
= UNKNOWN
;
1233 void dmar_msi_unmask(unsigned int irq
)
1235 struct intel_iommu
*iommu
= get_irq_data(irq
);
1239 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1240 writel(0, iommu
->reg
+ DMAR_FECTL_REG
);
1241 /* Read a reg to force flush the post write */
1242 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1243 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1246 void dmar_msi_mask(unsigned int irq
)
1249 struct intel_iommu
*iommu
= get_irq_data(irq
);
1252 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1253 writel(DMA_FECTL_IM
, iommu
->reg
+ DMAR_FECTL_REG
);
1254 /* Read a reg to force flush the post write */
1255 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1256 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1259 void dmar_msi_write(int irq
, struct msi_msg
*msg
)
1261 struct intel_iommu
*iommu
= get_irq_data(irq
);
1264 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1265 writel(msg
->data
, iommu
->reg
+ DMAR_FEDATA_REG
);
1266 writel(msg
->address_lo
, iommu
->reg
+ DMAR_FEADDR_REG
);
1267 writel(msg
->address_hi
, iommu
->reg
+ DMAR_FEUADDR_REG
);
1268 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1271 void dmar_msi_read(int irq
, struct msi_msg
*msg
)
1273 struct intel_iommu
*iommu
= get_irq_data(irq
);
1276 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1277 msg
->data
= readl(iommu
->reg
+ DMAR_FEDATA_REG
);
1278 msg
->address_lo
= readl(iommu
->reg
+ DMAR_FEADDR_REG
);
1279 msg
->address_hi
= readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
1280 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1283 static int dmar_fault_do_one(struct intel_iommu
*iommu
, int type
,
1284 u8 fault_reason
, u16 source_id
, unsigned long long addr
)
1289 reason
= dmar_get_fault_reason(fault_reason
, &fault_type
);
1291 if (fault_type
== INTR_REMAP
)
1292 printk(KERN_ERR
"INTR-REMAP: Request device [[%02x:%02x.%d] "
1293 "fault index %llx\n"
1294 "INTR-REMAP:[fault reason %02d] %s\n",
1295 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1296 PCI_FUNC(source_id
& 0xFF), addr
>> 48,
1297 fault_reason
, reason
);
1300 "DMAR:[%s] Request device [%02x:%02x.%d] "
1301 "fault addr %llx \n"
1302 "DMAR:[fault reason %02d] %s\n",
1303 (type
? "DMA Read" : "DMA Write"),
1304 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1305 PCI_FUNC(source_id
& 0xFF), addr
, fault_reason
, reason
);
1309 #define PRIMARY_FAULT_REG_LEN (16)
1310 irqreturn_t
dmar_fault(int irq
, void *dev_id
)
1312 struct intel_iommu
*iommu
= dev_id
;
1313 int reg
, fault_index
;
1317 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1318 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1320 printk(KERN_ERR
"DRHD: handling fault status reg %x\n",
1323 /* TBD: ignore advanced fault log currently */
1324 if (!(fault_status
& DMA_FSTS_PPF
))
1327 fault_index
= dma_fsts_fault_record_index(fault_status
);
1328 reg
= cap_fault_reg_offset(iommu
->cap
);
1336 /* highest 32 bits */
1337 data
= readl(iommu
->reg
+ reg
+
1338 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1339 if (!(data
& DMA_FRCD_F
))
1342 fault_reason
= dma_frcd_fault_reason(data
);
1343 type
= dma_frcd_type(data
);
1345 data
= readl(iommu
->reg
+ reg
+
1346 fault_index
* PRIMARY_FAULT_REG_LEN
+ 8);
1347 source_id
= dma_frcd_source_id(data
);
1349 guest_addr
= dmar_readq(iommu
->reg
+ reg
+
1350 fault_index
* PRIMARY_FAULT_REG_LEN
);
1351 guest_addr
= dma_frcd_page_addr(guest_addr
);
1352 /* clear the fault */
1353 writel(DMA_FRCD_F
, iommu
->reg
+ reg
+
1354 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1356 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1358 dmar_fault_do_one(iommu
, type
, fault_reason
,
1359 source_id
, guest_addr
);
1362 if (fault_index
>= cap_num_fault_regs(iommu
->cap
))
1364 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1367 /* clear all the other faults */
1368 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1369 writel(fault_status
, iommu
->reg
+ DMAR_FSTS_REG
);
1371 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1375 int dmar_set_interrupt(struct intel_iommu
*iommu
)
1380 * Check if the fault interrupt is already initialized.
1387 printk(KERN_ERR
"IOMMU: no free vectors\n");
1391 set_irq_data(irq
, iommu
);
1394 ret
= arch_setup_dmar_msi(irq
);
1396 set_irq_data(irq
, NULL
);
1402 ret
= request_irq(irq
, dmar_fault
, 0, iommu
->name
, iommu
);
1404 printk(KERN_ERR
"IOMMU: can't request irq\n");
1408 int __init
enable_drhd_fault_handling(void)
1410 struct dmar_drhd_unit
*drhd
;
1413 * Enable fault control interrupt.
1415 for_each_drhd_unit(drhd
) {
1417 struct intel_iommu
*iommu
= drhd
->iommu
;
1418 ret
= dmar_set_interrupt(iommu
);
1421 printk(KERN_ERR
"DRHD %Lx: failed to enable fault, "
1422 " interrupt, ret %d\n",
1423 (unsigned long long)drhd
->reg_base_addr
, ret
);
1428 * Clear any previous faults.
1430 dmar_fault(iommu
->irq
, iommu
);
1437 * Re-enable Queued Invalidation interface.
1439 int dmar_reenable_qi(struct intel_iommu
*iommu
)
1441 if (!ecap_qis(iommu
->ecap
))
1448 * First disable queued invalidation.
1450 dmar_disable_qi(iommu
);
1452 * Then enable queued invalidation again. Since there is no pending
1453 * invalidation requests now, it's safe to re-enable queued
1456 __dmar_enable_qi(iommu
);
1462 * Check interrupt remapping support in DMAR table description.
1464 int dmar_ir_support(void)
1466 struct acpi_table_dmar
*dmar
;
1467 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
1468 return dmar
->flags
& 0x1;