2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/iommu_table.h>
44 #include "irq_remapping.h"
46 /* No locks are needed as DMA remapping hardware unit
47 * list is constructed at boot time and hotplug of
48 * these units are not supported by the architecture.
50 LIST_HEAD(dmar_drhd_units
);
52 struct acpi_table_header
* __initdata dmar_tbl
;
53 static acpi_size dmar_tbl_size
;
55 static void __init
dmar_register_drhd_unit(struct dmar_drhd_unit
*drhd
)
58 * add INCLUDE_ALL at the tail, so scan the list will find it at
61 if (drhd
->include_all
)
62 list_add_tail(&drhd
->list
, &dmar_drhd_units
);
64 list_add(&drhd
->list
, &dmar_drhd_units
);
67 static int __init
dmar_parse_one_dev_scope(struct acpi_dmar_device_scope
*scope
,
68 struct pci_dev
**dev
, u16 segment
)
71 struct pci_dev
*pdev
= NULL
;
72 struct acpi_dmar_pci_path
*path
;
75 bus
= pci_find_bus(segment
, scope
->bus
);
76 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
77 count
= (scope
->length
- sizeof(struct acpi_dmar_device_scope
))
78 / sizeof(struct acpi_dmar_pci_path
);
84 * Some BIOSes list non-exist devices in DMAR table, just
88 pr_warn("Device scope bus [%d] not found\n", scope
->bus
);
91 pdev
= pci_get_slot(bus
, PCI_DEVFN(path
->dev
, path
->fn
));
93 /* warning will be printed below */
98 bus
= pdev
->subordinate
;
101 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment
, scope
->bus
, path
->dev
, path
->fn
);
106 if ((scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&& \
107 pdev
->subordinate
) || (scope
->entry_type
== \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE
&& !pdev
->subordinate
)) {
110 pr_warn("Device scope type does not match for %s\n",
118 int __init
dmar_parse_dev_scope(void *start
, void *end
, int *cnt
,
119 struct pci_dev
***devices
, u16 segment
)
121 struct acpi_dmar_device_scope
*scope
;
127 while (start
< end
) {
129 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
130 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
132 else if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_IOAPIC
&&
133 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_HPET
) {
134 pr_warn("Unsupported device scope\n");
136 start
+= scope
->length
;
141 *devices
= kcalloc(*cnt
, sizeof(struct pci_dev
*), GFP_KERNEL
);
147 while (start
< end
) {
149 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
150 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
) {
151 ret
= dmar_parse_one_dev_scope(scope
,
152 &(*devices
)[index
], segment
);
159 start
+= scope
->length
;
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
171 dmar_parse_one_drhd(struct acpi_dmar_header
*header
)
173 struct acpi_dmar_hardware_unit
*drhd
;
174 struct dmar_drhd_unit
*dmaru
;
177 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
178 dmaru
= kzalloc(sizeof(*dmaru
), GFP_KERNEL
);
183 dmaru
->reg_base_addr
= drhd
->address
;
184 dmaru
->segment
= drhd
->segment
;
185 dmaru
->include_all
= drhd
->flags
& 0x1; /* BIT0: INCLUDE_ALL */
187 ret
= alloc_iommu(dmaru
);
192 dmar_register_drhd_unit(dmaru
);
196 static int __init
dmar_parse_dev(struct dmar_drhd_unit
*dmaru
)
198 struct acpi_dmar_hardware_unit
*drhd
;
201 drhd
= (struct acpi_dmar_hardware_unit
*) dmaru
->hdr
;
203 if (dmaru
->include_all
)
206 ret
= dmar_parse_dev_scope((void *)(drhd
+ 1),
207 ((void *)drhd
) + drhd
->header
.length
,
208 &dmaru
->devices_cnt
, &dmaru
->devices
,
211 list_del(&dmaru
->list
);
217 #ifdef CONFIG_ACPI_NUMA
219 dmar_parse_one_rhsa(struct acpi_dmar_header
*header
)
221 struct acpi_dmar_rhsa
*rhsa
;
222 struct dmar_drhd_unit
*drhd
;
224 rhsa
= (struct acpi_dmar_rhsa
*)header
;
225 for_each_drhd_unit(drhd
) {
226 if (drhd
->reg_base_addr
== rhsa
->base_address
) {
227 int node
= acpi_map_pxm_to_node(rhsa
->proximity_domain
);
229 if (!node_online(node
))
231 drhd
->iommu
->node
= node
;
236 1, TAINT_FIRMWARE_WORKAROUND
,
237 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
238 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
240 dmi_get_system_info(DMI_BIOS_VENDOR
),
241 dmi_get_system_info(DMI_BIOS_VERSION
),
242 dmi_get_system_info(DMI_PRODUCT_VERSION
));
249 dmar_table_print_dmar_entry(struct acpi_dmar_header
*header
)
251 struct acpi_dmar_hardware_unit
*drhd
;
252 struct acpi_dmar_reserved_memory
*rmrr
;
253 struct acpi_dmar_atsr
*atsr
;
254 struct acpi_dmar_rhsa
*rhsa
;
256 switch (header
->type
) {
257 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
258 drhd
= container_of(header
, struct acpi_dmar_hardware_unit
,
260 pr_info("DRHD base: %#016Lx flags: %#x\n",
261 (unsigned long long)drhd
->address
, drhd
->flags
);
263 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
264 rmrr
= container_of(header
, struct acpi_dmar_reserved_memory
,
266 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
267 (unsigned long long)rmrr
->base_address
,
268 (unsigned long long)rmrr
->end_address
);
270 case ACPI_DMAR_TYPE_ATSR
:
271 atsr
= container_of(header
, struct acpi_dmar_atsr
, header
);
272 pr_info("ATSR flags: %#x\n", atsr
->flags
);
274 case ACPI_DMAR_HARDWARE_AFFINITY
:
275 rhsa
= container_of(header
, struct acpi_dmar_rhsa
, header
);
276 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
277 (unsigned long long)rhsa
->base_address
,
278 rhsa
->proximity_domain
);
284 * dmar_table_detect - checks to see if the platform supports DMAR devices
286 static int __init
dmar_table_detect(void)
288 acpi_status status
= AE_OK
;
290 /* if we could find DMAR table, then there are DMAR devices */
291 status
= acpi_get_table_with_size(ACPI_SIG_DMAR
, 0,
292 (struct acpi_table_header
**)&dmar_tbl
,
295 if (ACPI_SUCCESS(status
) && !dmar_tbl
) {
296 pr_warn("Unable to map DMAR\n");
297 status
= AE_NOT_FOUND
;
300 return (ACPI_SUCCESS(status
) ? 1 : 0);
304 * parse_dmar_table - parses the DMA reporting table
307 parse_dmar_table(void)
309 struct acpi_table_dmar
*dmar
;
310 struct acpi_dmar_header
*entry_header
;
315 * Do it again, earlier dmar_tbl mapping could be mapped with
321 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
322 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
324 dmar_tbl
= tboot_get_dmar_table(dmar_tbl
);
326 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
330 if (dmar
->width
< PAGE_SHIFT
- 1) {
331 pr_warn("Invalid DMAR haw\n");
335 pr_info("Host address width %d\n", dmar
->width
+ 1);
337 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
338 while (((unsigned long)entry_header
) <
339 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
340 /* Avoid looping forever on bad ACPI tables */
341 if (entry_header
->length
== 0) {
342 pr_warn("Invalid 0-length structure\n");
347 dmar_table_print_dmar_entry(entry_header
);
349 switch (entry_header
->type
) {
350 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
352 ret
= dmar_parse_one_drhd(entry_header
);
354 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
355 ret
= dmar_parse_one_rmrr(entry_header
);
357 case ACPI_DMAR_TYPE_ATSR
:
358 ret
= dmar_parse_one_atsr(entry_header
);
360 case ACPI_DMAR_HARDWARE_AFFINITY
:
361 #ifdef CONFIG_ACPI_NUMA
362 ret
= dmar_parse_one_rhsa(entry_header
);
366 pr_warn("Unknown DMAR structure type %d\n",
368 ret
= 0; /* for forward compatibility */
374 entry_header
= ((void *)entry_header
+ entry_header
->length
);
377 pr_warn(FW_BUG
"No DRHD structure found in DMAR table\n");
381 static int dmar_pci_device_match(struct pci_dev
*devices
[], int cnt
,
387 for (index
= 0; index
< cnt
; index
++)
388 if (dev
== devices
[index
])
391 /* Check our parent */
392 dev
= dev
->bus
->self
;
398 struct dmar_drhd_unit
*
399 dmar_find_matched_drhd_unit(struct pci_dev
*dev
)
401 struct dmar_drhd_unit
*dmaru
= NULL
;
402 struct acpi_dmar_hardware_unit
*drhd
;
404 dev
= pci_physfn(dev
);
406 list_for_each_entry(dmaru
, &dmar_drhd_units
, list
) {
407 drhd
= container_of(dmaru
->hdr
,
408 struct acpi_dmar_hardware_unit
,
411 if (dmaru
->include_all
&&
412 drhd
->segment
== pci_domain_nr(dev
->bus
))
415 if (dmar_pci_device_match(dmaru
->devices
,
416 dmaru
->devices_cnt
, dev
))
423 int __init
dmar_dev_scope_init(void)
425 static int dmar_dev_scope_initialized
;
426 struct dmar_drhd_unit
*drhd
, *drhd_n
;
429 if (dmar_dev_scope_initialized
)
430 return dmar_dev_scope_initialized
;
432 if (list_empty(&dmar_drhd_units
))
435 list_for_each_entry_safe(drhd
, drhd_n
, &dmar_drhd_units
, list
) {
436 ret
= dmar_parse_dev(drhd
);
441 ret
= dmar_parse_rmrr_atsr_dev();
445 dmar_dev_scope_initialized
= 1;
449 dmar_dev_scope_initialized
= ret
;
454 int __init
dmar_table_init(void)
456 static int dmar_table_initialized
;
459 if (dmar_table_initialized
)
462 dmar_table_initialized
= 1;
464 ret
= parse_dmar_table();
467 pr_info("parse DMAR table failure.\n");
471 if (list_empty(&dmar_drhd_units
)) {
472 pr_info("No DMAR devices found\n");
479 static void warn_invalid_dmar(u64 addr
, const char *message
)
482 1, TAINT_FIRMWARE_WORKAROUND
,
483 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
484 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
486 dmi_get_system_info(DMI_BIOS_VENDOR
),
487 dmi_get_system_info(DMI_BIOS_VERSION
),
488 dmi_get_system_info(DMI_PRODUCT_VERSION
));
491 int __init
check_zero_address(void)
493 struct acpi_table_dmar
*dmar
;
494 struct acpi_dmar_header
*entry_header
;
495 struct acpi_dmar_hardware_unit
*drhd
;
497 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
498 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
500 while (((unsigned long)entry_header
) <
501 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
502 /* Avoid looping forever on bad ACPI tables */
503 if (entry_header
->length
== 0) {
504 pr_warn("Invalid 0-length structure\n");
508 if (entry_header
->type
== ACPI_DMAR_TYPE_HARDWARE_UNIT
) {
512 drhd
= (void *)entry_header
;
513 if (!drhd
->address
) {
514 warn_invalid_dmar(0, "");
518 addr
= early_ioremap(drhd
->address
, VTD_PAGE_SIZE
);
520 printk("IOMMU: can't validate: %llx\n", drhd
->address
);
523 cap
= dmar_readq(addr
+ DMAR_CAP_REG
);
524 ecap
= dmar_readq(addr
+ DMAR_ECAP_REG
);
525 early_iounmap(addr
, VTD_PAGE_SIZE
);
526 if (cap
== (uint64_t)-1 && ecap
== (uint64_t)-1) {
527 warn_invalid_dmar(drhd
->address
,
528 " returns all ones");
533 entry_header
= ((void *)entry_header
+ entry_header
->length
);
541 int __init
detect_intel_iommu(void)
545 ret
= dmar_table_detect();
547 ret
= check_zero_address();
549 struct acpi_table_dmar
*dmar
;
551 dmar
= (struct acpi_table_dmar
*) dmar_tbl
;
553 if (ret
&& irq_remapping_enabled
&& cpu_has_x2apic
&&
555 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
557 if (ret
&& !no_iommu
&& !iommu_detected
&& !dmar_disabled
) {
559 /* Make sure ACS will be enabled */
565 x86_init
.iommu
.iommu_init
= intel_iommu_init
;
568 early_acpi_os_unmap_memory(dmar_tbl
, dmar_tbl_size
);
571 return ret
? 1 : -ENODEV
;
575 static void unmap_iommu(struct intel_iommu
*iommu
)
578 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
582 * map_iommu: map the iommu's registers
583 * @iommu: the iommu to map
584 * @phys_addr: the physical address of the base resgister
586 * Memory map the iommu's registers. Start w/ a single page, and
587 * possibly expand if that turns out to be insufficent.
589 static int map_iommu(struct intel_iommu
*iommu
, u64 phys_addr
)
593 iommu
->reg_phys
= phys_addr
;
594 iommu
->reg_size
= VTD_PAGE_SIZE
;
596 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
, iommu
->name
)) {
597 pr_err("IOMMU: can't reserve memory\n");
602 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
604 pr_err("IOMMU: can't map the region\n");
609 iommu
->cap
= dmar_readq(iommu
->reg
+ DMAR_CAP_REG
);
610 iommu
->ecap
= dmar_readq(iommu
->reg
+ DMAR_ECAP_REG
);
612 if (iommu
->cap
== (uint64_t)-1 && iommu
->ecap
== (uint64_t)-1) {
614 warn_invalid_dmar(phys_addr
, " returns all ones");
618 /* the registers might be more than one page */
619 map_size
= max_t(int, ecap_max_iotlb_offset(iommu
->ecap
),
620 cap_max_fault_reg_offset(iommu
->cap
));
621 map_size
= VTD_PAGE_ALIGN(map_size
);
622 if (map_size
> iommu
->reg_size
) {
624 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
625 iommu
->reg_size
= map_size
;
626 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
,
628 pr_err("IOMMU: can't reserve memory\n");
632 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
634 pr_err("IOMMU: can't map the region\n");
645 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
650 int alloc_iommu(struct dmar_drhd_unit
*drhd
)
652 struct intel_iommu
*iommu
;
654 static int iommu_allocated
= 0;
659 if (!drhd
->reg_base_addr
) {
660 warn_invalid_dmar(0, "");
664 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
668 iommu
->seq_id
= iommu_allocated
++;
669 sprintf (iommu
->name
, "dmar%d", iommu
->seq_id
);
671 err
= map_iommu(iommu
, drhd
->reg_base_addr
);
673 pr_err("IOMMU: failed to map %s\n", iommu
->name
);
678 agaw
= iommu_calculate_agaw(iommu
);
680 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
684 msagaw
= iommu_calculate_max_sagaw(iommu
);
686 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
691 iommu
->msagaw
= msagaw
;
695 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
696 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
698 (unsigned long long)drhd
->reg_base_addr
,
699 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
),
700 (unsigned long long)iommu
->cap
,
701 (unsigned long long)iommu
->ecap
);
703 /* Reflect status in gcmd */
704 sts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
705 if (sts
& DMA_GSTS_IRES
)
706 iommu
->gcmd
|= DMA_GCMD_IRE
;
707 if (sts
& DMA_GSTS_TES
)
708 iommu
->gcmd
|= DMA_GCMD_TE
;
709 if (sts
& DMA_GSTS_QIES
)
710 iommu
->gcmd
|= DMA_GCMD_QIE
;
712 raw_spin_lock_init(&iommu
->register_lock
);
724 void free_iommu(struct intel_iommu
*iommu
)
729 free_dmar_iommu(iommu
);
738 * Reclaim all the submitted descriptors which have completed its work.
740 static inline void reclaim_free_desc(struct q_inval
*qi
)
742 while (qi
->desc_status
[qi
->free_tail
] == QI_DONE
||
743 qi
->desc_status
[qi
->free_tail
] == QI_ABORT
) {
744 qi
->desc_status
[qi
->free_tail
] = QI_FREE
;
745 qi
->free_tail
= (qi
->free_tail
+ 1) % QI_LENGTH
;
750 static int qi_check_fault(struct intel_iommu
*iommu
, int index
)
754 struct q_inval
*qi
= iommu
->qi
;
755 int wait_index
= (index
+ 1) % QI_LENGTH
;
757 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
760 fault
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
763 * If IQE happens, the head points to the descriptor associated
764 * with the error. No new descriptors are fetched until the IQE
767 if (fault
& DMA_FSTS_IQE
) {
768 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
769 if ((head
>> DMAR_IQ_SHIFT
) == index
) {
770 pr_err("VT-d detected invalid descriptor: "
771 "low=%llx, high=%llx\n",
772 (unsigned long long)qi
->desc
[index
].low
,
773 (unsigned long long)qi
->desc
[index
].high
);
774 memcpy(&qi
->desc
[index
], &qi
->desc
[wait_index
],
775 sizeof(struct qi_desc
));
776 __iommu_flush_cache(iommu
, &qi
->desc
[index
],
777 sizeof(struct qi_desc
));
778 writel(DMA_FSTS_IQE
, iommu
->reg
+ DMAR_FSTS_REG
);
784 * If ITE happens, all pending wait_desc commands are aborted.
785 * No new descriptors are fetched until the ITE is cleared.
787 if (fault
& DMA_FSTS_ITE
) {
788 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
789 head
= ((head
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
791 tail
= readl(iommu
->reg
+ DMAR_IQT_REG
);
792 tail
= ((tail
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
794 writel(DMA_FSTS_ITE
, iommu
->reg
+ DMAR_FSTS_REG
);
797 if (qi
->desc_status
[head
] == QI_IN_USE
)
798 qi
->desc_status
[head
] = QI_ABORT
;
799 head
= (head
- 2 + QI_LENGTH
) % QI_LENGTH
;
800 } while (head
!= tail
);
802 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
806 if (fault
& DMA_FSTS_ICE
)
807 writel(DMA_FSTS_ICE
, iommu
->reg
+ DMAR_FSTS_REG
);
813 * Submit the queued invalidation descriptor to the remapping
814 * hardware unit and wait for its completion.
816 int qi_submit_sync(struct qi_desc
*desc
, struct intel_iommu
*iommu
)
819 struct q_inval
*qi
= iommu
->qi
;
820 struct qi_desc
*hw
, wait_desc
;
821 int wait_index
, index
;
832 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
833 while (qi
->free_cnt
< 3) {
834 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
836 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
839 index
= qi
->free_head
;
840 wait_index
= (index
+ 1) % QI_LENGTH
;
842 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_IN_USE
;
846 wait_desc
.low
= QI_IWD_STATUS_DATA(QI_DONE
) |
847 QI_IWD_STATUS_WRITE
| QI_IWD_TYPE
;
848 wait_desc
.high
= virt_to_phys(&qi
->desc_status
[wait_index
]);
850 hw
[wait_index
] = wait_desc
;
852 __iommu_flush_cache(iommu
, &hw
[index
], sizeof(struct qi_desc
));
853 __iommu_flush_cache(iommu
, &hw
[wait_index
], sizeof(struct qi_desc
));
855 qi
->free_head
= (qi
->free_head
+ 2) % QI_LENGTH
;
859 * update the HW tail register indicating the presence of
862 writel(qi
->free_head
<< DMAR_IQ_SHIFT
, iommu
->reg
+ DMAR_IQT_REG
);
864 while (qi
->desc_status
[wait_index
] != QI_DONE
) {
866 * We will leave the interrupts disabled, to prevent interrupt
867 * context to queue another cmd while a cmd is already submitted
868 * and waiting for completion on this cpu. This is to avoid
869 * a deadlock where the interrupt context can wait indefinitely
870 * for free slots in the queue.
872 rc
= qi_check_fault(iommu
, index
);
876 raw_spin_unlock(&qi
->q_lock
);
878 raw_spin_lock(&qi
->q_lock
);
881 qi
->desc_status
[index
] = QI_DONE
;
883 reclaim_free_desc(qi
);
884 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
893 * Flush the global interrupt entry cache.
895 void qi_global_iec(struct intel_iommu
*iommu
)
899 desc
.low
= QI_IEC_TYPE
;
902 /* should never fail */
903 qi_submit_sync(&desc
, iommu
);
906 void qi_flush_context(struct intel_iommu
*iommu
, u16 did
, u16 sid
, u8 fm
,
911 desc
.low
= QI_CC_FM(fm
) | QI_CC_SID(sid
) | QI_CC_DID(did
)
912 | QI_CC_GRAN(type
) | QI_CC_TYPE
;
915 qi_submit_sync(&desc
, iommu
);
918 void qi_flush_iotlb(struct intel_iommu
*iommu
, u16 did
, u64 addr
,
919 unsigned int size_order
, u64 type
)
926 if (cap_write_drain(iommu
->cap
))
929 if (cap_read_drain(iommu
->cap
))
932 desc
.low
= QI_IOTLB_DID(did
) | QI_IOTLB_DR(dr
) | QI_IOTLB_DW(dw
)
933 | QI_IOTLB_GRAN(type
) | QI_IOTLB_TYPE
;
934 desc
.high
= QI_IOTLB_ADDR(addr
) | QI_IOTLB_IH(ih
)
935 | QI_IOTLB_AM(size_order
);
937 qi_submit_sync(&desc
, iommu
);
940 void qi_flush_dev_iotlb(struct intel_iommu
*iommu
, u16 sid
, u16 qdep
,
941 u64 addr
, unsigned mask
)
946 BUG_ON(addr
& ((1 << (VTD_PAGE_SHIFT
+ mask
)) - 1));
947 addr
|= (1 << (VTD_PAGE_SHIFT
+ mask
- 1)) - 1;
948 desc
.high
= QI_DEV_IOTLB_ADDR(addr
) | QI_DEV_IOTLB_SIZE
;
950 desc
.high
= QI_DEV_IOTLB_ADDR(addr
);
952 if (qdep
>= QI_DEV_IOTLB_MAX_INVS
)
955 desc
.low
= QI_DEV_IOTLB_SID(sid
) | QI_DEV_IOTLB_QDEP(qdep
) |
958 qi_submit_sync(&desc
, iommu
);
962 * Disable Queued Invalidation interface.
964 void dmar_disable_qi(struct intel_iommu
*iommu
)
968 cycles_t start_time
= get_cycles();
970 if (!ecap_qis(iommu
->ecap
))
973 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
975 sts
= dmar_readq(iommu
->reg
+ DMAR_GSTS_REG
);
976 if (!(sts
& DMA_GSTS_QIES
))
980 * Give a chance to HW to complete the pending invalidation requests.
982 while ((readl(iommu
->reg
+ DMAR_IQT_REG
) !=
983 readl(iommu
->reg
+ DMAR_IQH_REG
)) &&
984 (DMAR_OPERATION_TIMEOUT
> (get_cycles() - start_time
)))
987 iommu
->gcmd
&= ~DMA_GCMD_QIE
;
988 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
990 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
,
991 !(sts
& DMA_GSTS_QIES
), sts
);
993 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
997 * Enable queued invalidation.
999 static void __dmar_enable_qi(struct intel_iommu
*iommu
)
1002 unsigned long flags
;
1003 struct q_inval
*qi
= iommu
->qi
;
1005 qi
->free_head
= qi
->free_tail
= 0;
1006 qi
->free_cnt
= QI_LENGTH
;
1008 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1010 /* write zero to the tail reg */
1011 writel(0, iommu
->reg
+ DMAR_IQT_REG
);
1013 dmar_writeq(iommu
->reg
+ DMAR_IQA_REG
, virt_to_phys(qi
->desc
));
1015 iommu
->gcmd
|= DMA_GCMD_QIE
;
1016 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1018 /* Make sure hardware complete it */
1019 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
, (sts
& DMA_GSTS_QIES
), sts
);
1021 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1025 * Enable Queued Invalidation interface. This is a must to support
1026 * interrupt-remapping. Also used by DMA-remapping, which replaces
1027 * register based IOTLB invalidation.
1029 int dmar_enable_qi(struct intel_iommu
*iommu
)
1032 struct page
*desc_page
;
1034 if (!ecap_qis(iommu
->ecap
))
1038 * queued invalidation is already setup and enabled.
1043 iommu
->qi
= kmalloc(sizeof(*qi
), GFP_ATOMIC
);
1050 desc_page
= alloc_pages_node(iommu
->node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
1057 qi
->desc
= page_address(desc_page
);
1059 qi
->desc_status
= kzalloc(QI_LENGTH
* sizeof(int), GFP_ATOMIC
);
1060 if (!qi
->desc_status
) {
1061 free_page((unsigned long) qi
->desc
);
1067 qi
->free_head
= qi
->free_tail
= 0;
1068 qi
->free_cnt
= QI_LENGTH
;
1070 raw_spin_lock_init(&qi
->q_lock
);
1072 __dmar_enable_qi(iommu
);
1077 /* iommu interrupt handling. Most stuff are MSI-like. */
1085 static const char *dma_remap_fault_reasons
[] =
1088 "Present bit in root entry is clear",
1089 "Present bit in context entry is clear",
1090 "Invalid context entry",
1091 "Access beyond MGAW",
1092 "PTE Write access is not set",
1093 "PTE Read access is not set",
1094 "Next page table ptr is invalid",
1095 "Root table address invalid",
1096 "Context table ptr is invalid",
1097 "non-zero reserved fields in RTP",
1098 "non-zero reserved fields in CTP",
1099 "non-zero reserved fields in PTE",
1100 "PCE for translation request specifies blocking",
1103 static const char *irq_remap_fault_reasons
[] =
1105 "Detected reserved fields in the decoded interrupt-remapped request",
1106 "Interrupt index exceeded the interrupt-remapping table size",
1107 "Present field in the IRTE entry is clear",
1108 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1109 "Detected reserved fields in the IRTE entry",
1110 "Blocked a compatibility format interrupt request",
1111 "Blocked an interrupt request due to source-id verification failure",
1114 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1116 const char *dmar_get_fault_reason(u8 fault_reason
, int *fault_type
)
1118 if (fault_reason
>= 0x20 && (fault_reason
- 0x20 <
1119 ARRAY_SIZE(irq_remap_fault_reasons
))) {
1120 *fault_type
= INTR_REMAP
;
1121 return irq_remap_fault_reasons
[fault_reason
- 0x20];
1122 } else if (fault_reason
< ARRAY_SIZE(dma_remap_fault_reasons
)) {
1123 *fault_type
= DMA_REMAP
;
1124 return dma_remap_fault_reasons
[fault_reason
];
1126 *fault_type
= UNKNOWN
;
1131 void dmar_msi_unmask(struct irq_data
*data
)
1133 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1137 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1138 writel(0, iommu
->reg
+ DMAR_FECTL_REG
);
1139 /* Read a reg to force flush the post write */
1140 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1141 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1144 void dmar_msi_mask(struct irq_data
*data
)
1147 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1150 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1151 writel(DMA_FECTL_IM
, iommu
->reg
+ DMAR_FECTL_REG
);
1152 /* Read a reg to force flush the post write */
1153 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1154 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1157 void dmar_msi_write(int irq
, struct msi_msg
*msg
)
1159 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1162 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1163 writel(msg
->data
, iommu
->reg
+ DMAR_FEDATA_REG
);
1164 writel(msg
->address_lo
, iommu
->reg
+ DMAR_FEADDR_REG
);
1165 writel(msg
->address_hi
, iommu
->reg
+ DMAR_FEUADDR_REG
);
1166 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1169 void dmar_msi_read(int irq
, struct msi_msg
*msg
)
1171 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1174 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1175 msg
->data
= readl(iommu
->reg
+ DMAR_FEDATA_REG
);
1176 msg
->address_lo
= readl(iommu
->reg
+ DMAR_FEADDR_REG
);
1177 msg
->address_hi
= readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
1178 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1181 static int dmar_fault_do_one(struct intel_iommu
*iommu
, int type
,
1182 u8 fault_reason
, u16 source_id
, unsigned long long addr
)
1187 reason
= dmar_get_fault_reason(fault_reason
, &fault_type
);
1189 if (fault_type
== INTR_REMAP
)
1190 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1191 "fault index %llx\n"
1192 "INTR-REMAP:[fault reason %02d] %s\n",
1193 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1194 PCI_FUNC(source_id
& 0xFF), addr
>> 48,
1195 fault_reason
, reason
);
1197 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1198 "fault addr %llx \n"
1199 "DMAR:[fault reason %02d] %s\n",
1200 (type
? "DMA Read" : "DMA Write"),
1201 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1202 PCI_FUNC(source_id
& 0xFF), addr
, fault_reason
, reason
);
1206 #define PRIMARY_FAULT_REG_LEN (16)
1207 irqreturn_t
dmar_fault(int irq
, void *dev_id
)
1209 struct intel_iommu
*iommu
= dev_id
;
1210 int reg
, fault_index
;
1214 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1215 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1217 pr_err("DRHD: handling fault status reg %x\n", fault_status
);
1219 /* TBD: ignore advanced fault log currently */
1220 if (!(fault_status
& DMA_FSTS_PPF
))
1223 fault_index
= dma_fsts_fault_record_index(fault_status
);
1224 reg
= cap_fault_reg_offset(iommu
->cap
);
1232 /* highest 32 bits */
1233 data
= readl(iommu
->reg
+ reg
+
1234 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1235 if (!(data
& DMA_FRCD_F
))
1238 fault_reason
= dma_frcd_fault_reason(data
);
1239 type
= dma_frcd_type(data
);
1241 data
= readl(iommu
->reg
+ reg
+
1242 fault_index
* PRIMARY_FAULT_REG_LEN
+ 8);
1243 source_id
= dma_frcd_source_id(data
);
1245 guest_addr
= dmar_readq(iommu
->reg
+ reg
+
1246 fault_index
* PRIMARY_FAULT_REG_LEN
);
1247 guest_addr
= dma_frcd_page_addr(guest_addr
);
1248 /* clear the fault */
1249 writel(DMA_FRCD_F
, iommu
->reg
+ reg
+
1250 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1252 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1254 dmar_fault_do_one(iommu
, type
, fault_reason
,
1255 source_id
, guest_addr
);
1258 if (fault_index
>= cap_num_fault_regs(iommu
->cap
))
1260 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1263 writel(DMA_FSTS_PFO
| DMA_FSTS_PPF
, iommu
->reg
+ DMAR_FSTS_REG
);
1266 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1270 int dmar_set_interrupt(struct intel_iommu
*iommu
)
1275 * Check if the fault interrupt is already initialized.
1282 pr_err("IOMMU: no free vectors\n");
1286 irq_set_handler_data(irq
, iommu
);
1289 ret
= arch_setup_dmar_msi(irq
);
1291 irq_set_handler_data(irq
, NULL
);
1297 ret
= request_irq(irq
, dmar_fault
, IRQF_NO_THREAD
, iommu
->name
, iommu
);
1299 pr_err("IOMMU: can't request irq\n");
1303 int __init
enable_drhd_fault_handling(void)
1305 struct dmar_drhd_unit
*drhd
;
1308 * Enable fault control interrupt.
1310 for_each_drhd_unit(drhd
) {
1312 struct intel_iommu
*iommu
= drhd
->iommu
;
1314 ret
= dmar_set_interrupt(iommu
);
1317 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1318 (unsigned long long)drhd
->reg_base_addr
, ret
);
1323 * Clear any previous faults.
1325 dmar_fault(iommu
->irq
, iommu
);
1326 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1327 writel(fault_status
, iommu
->reg
+ DMAR_FSTS_REG
);
1334 * Re-enable Queued Invalidation interface.
1336 int dmar_reenable_qi(struct intel_iommu
*iommu
)
1338 if (!ecap_qis(iommu
->ecap
))
1345 * First disable queued invalidation.
1347 dmar_disable_qi(iommu
);
1349 * Then enable queued invalidation again. Since there is no pending
1350 * invalidation requests now, it's safe to re-enable queued
1353 __dmar_enable_qi(iommu
);
1359 * Check interrupt remapping support in DMAR table description.
1361 int __init
dmar_ir_support(void)
1363 struct acpi_table_dmar
*dmar
;
1364 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
1367 return dmar
->flags
& 0x1;
1369 IOMMU_INIT_POST(detect_intel_iommu
);