2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
38 #define PREFIX "DMAR:"
40 /* No locks are needed as DMA remapping hardware unit
41 * list is constructed at boot time and hotplug of
42 * these units are not supported by the architecture.
44 LIST_HEAD(dmar_drhd_units
);
46 static struct acpi_table_header
* __initdata dmar_tbl
;
47 static acpi_size dmar_tbl_size
;
49 static void __init
dmar_register_drhd_unit(struct dmar_drhd_unit
*drhd
)
52 * add INCLUDE_ALL at the tail, so scan the list will find it at
55 if (drhd
->include_all
)
56 list_add_tail(&drhd
->list
, &dmar_drhd_units
);
58 list_add(&drhd
->list
, &dmar_drhd_units
);
61 static int __init
dmar_parse_one_dev_scope(struct acpi_dmar_device_scope
*scope
,
62 struct pci_dev
**dev
, u16 segment
)
65 struct pci_dev
*pdev
= NULL
;
66 struct acpi_dmar_pci_path
*path
;
69 bus
= pci_find_bus(segment
, scope
->bus
);
70 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
71 count
= (scope
->length
- sizeof(struct acpi_dmar_device_scope
))
72 / sizeof(struct acpi_dmar_pci_path
);
78 * Some BIOSes list non-exist devices in DMAR table, just
83 PREFIX
"Device scope bus [%d] not found\n",
87 pdev
= pci_get_slot(bus
, PCI_DEVFN(path
->dev
, path
->fn
));
89 printk(KERN_WARNING PREFIX
90 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
91 segment
, bus
->number
, path
->dev
, path
->fn
);
96 bus
= pdev
->subordinate
;
99 printk(KERN_WARNING PREFIX
100 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
101 segment
, scope
->bus
, path
->dev
, path
->fn
);
105 if ((scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&& \
106 pdev
->subordinate
) || (scope
->entry_type
== \
107 ACPI_DMAR_SCOPE_TYPE_BRIDGE
&& !pdev
->subordinate
)) {
109 printk(KERN_WARNING PREFIX
110 "Device scope type does not match for %s\n",
118 static int __init
dmar_parse_dev_scope(void *start
, void *end
, int *cnt
,
119 struct pci_dev
***devices
, u16 segment
)
121 struct acpi_dmar_device_scope
*scope
;
127 while (start
< end
) {
129 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
130 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
133 printk(KERN_WARNING PREFIX
134 "Unsupported device scope\n");
135 start
+= scope
->length
;
140 *devices
= kcalloc(*cnt
, sizeof(struct pci_dev
*), GFP_KERNEL
);
146 while (start
< end
) {
148 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
149 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
) {
150 ret
= dmar_parse_one_dev_scope(scope
,
151 &(*devices
)[index
], segment
);
158 start
+= scope
->length
;
165 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
166 * structure which uniquely represent one DMA remapping hardware unit
167 * present in the platform
170 dmar_parse_one_drhd(struct acpi_dmar_header
*header
)
172 struct acpi_dmar_hardware_unit
*drhd
;
173 struct dmar_drhd_unit
*dmaru
;
176 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
177 if (!drhd
->address
) {
178 /* Promote an attitude of violence to a BIOS engineer today */
179 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
180 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
181 dmi_get_system_info(DMI_BIOS_VENDOR
),
182 dmi_get_system_info(DMI_BIOS_VERSION
),
183 dmi_get_system_info(DMI_PRODUCT_VERSION
));
186 dmaru
= kzalloc(sizeof(*dmaru
), GFP_KERNEL
);
191 dmaru
->reg_base_addr
= drhd
->address
;
192 dmaru
->segment
= drhd
->segment
;
193 dmaru
->include_all
= drhd
->flags
& 0x1; /* BIT0: INCLUDE_ALL */
195 ret
= alloc_iommu(dmaru
);
200 dmar_register_drhd_unit(dmaru
);
204 static int __init
dmar_parse_dev(struct dmar_drhd_unit
*dmaru
)
206 struct acpi_dmar_hardware_unit
*drhd
;
209 drhd
= (struct acpi_dmar_hardware_unit
*) dmaru
->hdr
;
211 if (dmaru
->include_all
)
214 ret
= dmar_parse_dev_scope((void *)(drhd
+ 1),
215 ((void *)drhd
) + drhd
->header
.length
,
216 &dmaru
->devices_cnt
, &dmaru
->devices
,
219 list_del(&dmaru
->list
);
226 LIST_HEAD(dmar_rmrr_units
);
228 static void __init
dmar_register_rmrr_unit(struct dmar_rmrr_unit
*rmrr
)
230 list_add(&rmrr
->list
, &dmar_rmrr_units
);
235 dmar_parse_one_rmrr(struct acpi_dmar_header
*header
)
237 struct acpi_dmar_reserved_memory
*rmrr
;
238 struct dmar_rmrr_unit
*rmrru
;
240 rmrru
= kzalloc(sizeof(*rmrru
), GFP_KERNEL
);
245 rmrr
= (struct acpi_dmar_reserved_memory
*)header
;
246 rmrru
->base_address
= rmrr
->base_address
;
247 rmrru
->end_address
= rmrr
->end_address
;
249 dmar_register_rmrr_unit(rmrru
);
254 rmrr_parse_dev(struct dmar_rmrr_unit
*rmrru
)
256 struct acpi_dmar_reserved_memory
*rmrr
;
259 rmrr
= (struct acpi_dmar_reserved_memory
*) rmrru
->hdr
;
260 ret
= dmar_parse_dev_scope((void *)(rmrr
+ 1),
261 ((void *)rmrr
) + rmrr
->header
.length
,
262 &rmrru
->devices_cnt
, &rmrru
->devices
, rmrr
->segment
);
264 if (ret
|| (rmrru
->devices_cnt
== 0)) {
265 list_del(&rmrru
->list
);
271 static LIST_HEAD(dmar_atsr_units
);
273 static int __init
dmar_parse_one_atsr(struct acpi_dmar_header
*hdr
)
275 struct acpi_dmar_atsr
*atsr
;
276 struct dmar_atsr_unit
*atsru
;
278 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
279 atsru
= kzalloc(sizeof(*atsru
), GFP_KERNEL
);
284 atsru
->include_all
= atsr
->flags
& 0x1;
286 list_add(&atsru
->list
, &dmar_atsr_units
);
291 static int __init
atsr_parse_dev(struct dmar_atsr_unit
*atsru
)
294 struct acpi_dmar_atsr
*atsr
;
296 if (atsru
->include_all
)
299 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
300 rc
= dmar_parse_dev_scope((void *)(atsr
+ 1),
301 (void *)atsr
+ atsr
->header
.length
,
302 &atsru
->devices_cnt
, &atsru
->devices
,
304 if (rc
|| !atsru
->devices_cnt
) {
305 list_del(&atsru
->list
);
312 int dmar_find_matched_atsr_unit(struct pci_dev
*dev
)
316 struct acpi_dmar_atsr
*atsr
;
317 struct dmar_atsr_unit
*atsru
;
319 list_for_each_entry(atsru
, &dmar_atsr_units
, list
) {
320 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
321 if (atsr
->segment
== pci_domain_nr(dev
->bus
))
328 for (bus
= dev
->bus
; bus
; bus
= bus
->parent
) {
329 struct pci_dev
*bridge
= bus
->self
;
331 if (!bridge
|| !bridge
->is_pcie
||
332 bridge
->pcie_type
== PCI_EXP_TYPE_PCI_BRIDGE
)
335 if (bridge
->pcie_type
== PCI_EXP_TYPE_ROOT_PORT
) {
336 for (i
= 0; i
< atsru
->devices_cnt
; i
++)
337 if (atsru
->devices
[i
] == bridge
)
343 if (atsru
->include_all
)
351 dmar_table_print_dmar_entry(struct acpi_dmar_header
*header
)
353 struct acpi_dmar_hardware_unit
*drhd
;
354 struct acpi_dmar_reserved_memory
*rmrr
;
355 struct acpi_dmar_atsr
*atsr
;
357 switch (header
->type
) {
358 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
359 drhd
= container_of(header
, struct acpi_dmar_hardware_unit
,
361 printk (KERN_INFO PREFIX
362 "DRHD base: %#016Lx flags: %#x\n",
363 (unsigned long long)drhd
->address
, drhd
->flags
);
365 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
366 rmrr
= container_of(header
, struct acpi_dmar_reserved_memory
,
368 printk (KERN_INFO PREFIX
369 "RMRR base: %#016Lx end: %#016Lx\n",
370 (unsigned long long)rmrr
->base_address
,
371 (unsigned long long)rmrr
->end_address
);
373 case ACPI_DMAR_TYPE_ATSR
:
374 atsr
= container_of(header
, struct acpi_dmar_atsr
, header
);
375 printk(KERN_INFO PREFIX
"ATSR flags: %#x\n", atsr
->flags
);
381 * dmar_table_detect - checks to see if the platform supports DMAR devices
383 static int __init
dmar_table_detect(void)
385 acpi_status status
= AE_OK
;
387 /* if we could find DMAR table, then there are DMAR devices */
388 status
= acpi_get_table_with_size(ACPI_SIG_DMAR
, 0,
389 (struct acpi_table_header
**)&dmar_tbl
,
392 if (ACPI_SUCCESS(status
) && !dmar_tbl
) {
393 printk (KERN_WARNING PREFIX
"Unable to map DMAR\n");
394 status
= AE_NOT_FOUND
;
397 return (ACPI_SUCCESS(status
) ? 1 : 0);
401 * parse_dmar_table - parses the DMA reporting table
404 parse_dmar_table(void)
406 struct acpi_table_dmar
*dmar
;
407 struct acpi_dmar_header
*entry_header
;
411 * Do it again, earlier dmar_tbl mapping could be mapped with
416 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
420 if (dmar
->width
< PAGE_SHIFT
- 1) {
421 printk(KERN_WARNING PREFIX
"Invalid DMAR haw\n");
425 printk (KERN_INFO PREFIX
"Host address width %d\n",
428 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
429 while (((unsigned long)entry_header
) <
430 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
431 /* Avoid looping forever on bad ACPI tables */
432 if (entry_header
->length
== 0) {
433 printk(KERN_WARNING PREFIX
434 "Invalid 0-length structure\n");
439 dmar_table_print_dmar_entry(entry_header
);
441 switch (entry_header
->type
) {
442 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
443 ret
= dmar_parse_one_drhd(entry_header
);
445 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
447 ret
= dmar_parse_one_rmrr(entry_header
);
450 case ACPI_DMAR_TYPE_ATSR
:
452 ret
= dmar_parse_one_atsr(entry_header
);
456 printk(KERN_WARNING PREFIX
457 "Unknown DMAR structure type\n");
458 ret
= 0; /* for forward compatibility */
464 entry_header
= ((void *)entry_header
+ entry_header
->length
);
469 int dmar_pci_device_match(struct pci_dev
*devices
[], int cnt
,
475 for (index
= 0; index
< cnt
; index
++)
476 if (dev
== devices
[index
])
479 /* Check our parent */
480 dev
= dev
->bus
->self
;
486 struct dmar_drhd_unit
*
487 dmar_find_matched_drhd_unit(struct pci_dev
*dev
)
489 struct dmar_drhd_unit
*dmaru
= NULL
;
490 struct acpi_dmar_hardware_unit
*drhd
;
492 list_for_each_entry(dmaru
, &dmar_drhd_units
, list
) {
493 drhd
= container_of(dmaru
->hdr
,
494 struct acpi_dmar_hardware_unit
,
497 if (dmaru
->include_all
&&
498 drhd
->segment
== pci_domain_nr(dev
->bus
))
501 if (dmar_pci_device_match(dmaru
->devices
,
502 dmaru
->devices_cnt
, dev
))
509 int __init
dmar_dev_scope_init(void)
511 struct dmar_drhd_unit
*drhd
, *drhd_n
;
514 list_for_each_entry_safe(drhd
, drhd_n
, &dmar_drhd_units
, list
) {
515 ret
= dmar_parse_dev(drhd
);
522 struct dmar_rmrr_unit
*rmrr
, *rmrr_n
;
523 struct dmar_atsr_unit
*atsr
, *atsr_n
;
525 list_for_each_entry_safe(rmrr
, rmrr_n
, &dmar_rmrr_units
, list
) {
526 ret
= rmrr_parse_dev(rmrr
);
531 list_for_each_entry_safe(atsr
, atsr_n
, &dmar_atsr_units
, list
) {
532 ret
= atsr_parse_dev(atsr
);
543 int __init
dmar_table_init(void)
545 static int dmar_table_initialized
;
548 if (dmar_table_initialized
)
551 dmar_table_initialized
= 1;
553 ret
= parse_dmar_table();
556 printk(KERN_INFO PREFIX
"parse DMAR table failure.\n");
560 if (list_empty(&dmar_drhd_units
)) {
561 printk(KERN_INFO PREFIX
"No DMAR devices found\n");
566 if (list_empty(&dmar_rmrr_units
))
567 printk(KERN_INFO PREFIX
"No RMRR found\n");
569 if (list_empty(&dmar_atsr_units
))
570 printk(KERN_INFO PREFIX
"No ATSR found\n");
573 #ifdef CONFIG_INTR_REMAP
574 parse_ioapics_under_ir();
579 void __init
detect_intel_iommu(void)
583 ret
= dmar_table_detect();
586 #ifdef CONFIG_INTR_REMAP
587 struct acpi_table_dmar
*dmar
;
589 * for now we will disable dma-remapping when interrupt
590 * remapping is enabled.
591 * When support for queued invalidation for IOTLB invalidation
592 * is added, we will not need this any more.
594 dmar
= (struct acpi_table_dmar
*) dmar_tbl
;
595 if (ret
&& cpu_has_x2apic
&& dmar
->flags
& 0x1)
597 "Queued invalidation will be enabled to support "
598 "x2apic and Intr-remapping.\n");
601 if (ret
&& !no_iommu
&& !iommu_detected
&& !swiotlb
&&
606 early_acpi_os_unmap_memory(dmar_tbl
, dmar_tbl_size
);
611 int alloc_iommu(struct dmar_drhd_unit
*drhd
)
613 struct intel_iommu
*iommu
;
616 static int iommu_allocated
= 0;
620 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
624 iommu
->seq_id
= iommu_allocated
++;
625 sprintf (iommu
->name
, "dmar%d", iommu
->seq_id
);
627 iommu
->reg
= ioremap(drhd
->reg_base_addr
, VTD_PAGE_SIZE
);
629 printk(KERN_ERR
"IOMMU: can't map the region\n");
632 iommu
->cap
= dmar_readq(iommu
->reg
+ DMAR_CAP_REG
);
633 iommu
->ecap
= dmar_readq(iommu
->reg
+ DMAR_ECAP_REG
);
635 if (iommu
->cap
== (uint64_t)-1 && iommu
->ecap
== (uint64_t)-1) {
636 /* Promote an attitude of violence to a BIOS engineer today */
637 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
638 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
640 dmi_get_system_info(DMI_BIOS_VENDOR
),
641 dmi_get_system_info(DMI_BIOS_VERSION
),
642 dmi_get_system_info(DMI_PRODUCT_VERSION
));
647 agaw
= iommu_calculate_agaw(iommu
);
650 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
654 msagaw
= iommu_calculate_max_sagaw(iommu
);
657 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
663 iommu
->msagaw
= msagaw
;
665 /* the registers might be more than one page */
666 map_size
= max_t(int, ecap_max_iotlb_offset(iommu
->ecap
),
667 cap_max_fault_reg_offset(iommu
->cap
));
668 map_size
= VTD_PAGE_ALIGN(map_size
);
669 if (map_size
> VTD_PAGE_SIZE
) {
671 iommu
->reg
= ioremap(drhd
->reg_base_addr
, map_size
);
673 printk(KERN_ERR
"IOMMU: can't map the region\n");
678 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
679 pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
680 (unsigned long long)drhd
->reg_base_addr
,
681 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
),
682 (unsigned long long)iommu
->cap
,
683 (unsigned long long)iommu
->ecap
);
685 spin_lock_init(&iommu
->register_lock
);
697 void free_iommu(struct intel_iommu
*iommu
)
703 free_dmar_iommu(iommu
);
712 * Reclaim all the submitted descriptors which have completed its work.
714 static inline void reclaim_free_desc(struct q_inval
*qi
)
716 while (qi
->desc_status
[qi
->free_tail
] == QI_DONE
||
717 qi
->desc_status
[qi
->free_tail
] == QI_ABORT
) {
718 qi
->desc_status
[qi
->free_tail
] = QI_FREE
;
719 qi
->free_tail
= (qi
->free_tail
+ 1) % QI_LENGTH
;
724 static int qi_check_fault(struct intel_iommu
*iommu
, int index
)
728 struct q_inval
*qi
= iommu
->qi
;
729 int wait_index
= (index
+ 1) % QI_LENGTH
;
731 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
734 fault
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
737 * If IQE happens, the head points to the descriptor associated
738 * with the error. No new descriptors are fetched until the IQE
741 if (fault
& DMA_FSTS_IQE
) {
742 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
743 if ((head
>> DMAR_IQ_SHIFT
) == index
) {
744 printk(KERN_ERR
"VT-d detected invalid descriptor: "
745 "low=%llx, high=%llx\n",
746 (unsigned long long)qi
->desc
[index
].low
,
747 (unsigned long long)qi
->desc
[index
].high
);
748 memcpy(&qi
->desc
[index
], &qi
->desc
[wait_index
],
749 sizeof(struct qi_desc
));
750 __iommu_flush_cache(iommu
, &qi
->desc
[index
],
751 sizeof(struct qi_desc
));
752 writel(DMA_FSTS_IQE
, iommu
->reg
+ DMAR_FSTS_REG
);
758 * If ITE happens, all pending wait_desc commands are aborted.
759 * No new descriptors are fetched until the ITE is cleared.
761 if (fault
& DMA_FSTS_ITE
) {
762 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
763 head
= ((head
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
765 tail
= readl(iommu
->reg
+ DMAR_IQT_REG
);
766 tail
= ((tail
>> DMAR_IQ_SHIFT
) - 1 + QI_LENGTH
) % QI_LENGTH
;
768 writel(DMA_FSTS_ITE
, iommu
->reg
+ DMAR_FSTS_REG
);
771 if (qi
->desc_status
[head
] == QI_IN_USE
)
772 qi
->desc_status
[head
] = QI_ABORT
;
773 head
= (head
- 2 + QI_LENGTH
) % QI_LENGTH
;
774 } while (head
!= tail
);
776 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
780 if (fault
& DMA_FSTS_ICE
)
781 writel(DMA_FSTS_ICE
, iommu
->reg
+ DMAR_FSTS_REG
);
787 * Submit the queued invalidation descriptor to the remapping
788 * hardware unit and wait for its completion.
790 int qi_submit_sync(struct qi_desc
*desc
, struct intel_iommu
*iommu
)
793 struct q_inval
*qi
= iommu
->qi
;
794 struct qi_desc
*hw
, wait_desc
;
795 int wait_index
, index
;
806 spin_lock_irqsave(&qi
->q_lock
, flags
);
807 while (qi
->free_cnt
< 3) {
808 spin_unlock_irqrestore(&qi
->q_lock
, flags
);
810 spin_lock_irqsave(&qi
->q_lock
, flags
);
813 index
= qi
->free_head
;
814 wait_index
= (index
+ 1) % QI_LENGTH
;
816 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_IN_USE
;
820 wait_desc
.low
= QI_IWD_STATUS_DATA(QI_DONE
) |
821 QI_IWD_STATUS_WRITE
| QI_IWD_TYPE
;
822 wait_desc
.high
= virt_to_phys(&qi
->desc_status
[wait_index
]);
824 hw
[wait_index
] = wait_desc
;
826 __iommu_flush_cache(iommu
, &hw
[index
], sizeof(struct qi_desc
));
827 __iommu_flush_cache(iommu
, &hw
[wait_index
], sizeof(struct qi_desc
));
829 qi
->free_head
= (qi
->free_head
+ 2) % QI_LENGTH
;
833 * update the HW tail register indicating the presence of
836 writel(qi
->free_head
<< DMAR_IQ_SHIFT
, iommu
->reg
+ DMAR_IQT_REG
);
838 while (qi
->desc_status
[wait_index
] != QI_DONE
) {
840 * We will leave the interrupts disabled, to prevent interrupt
841 * context to queue another cmd while a cmd is already submitted
842 * and waiting for completion on this cpu. This is to avoid
843 * a deadlock where the interrupt context can wait indefinitely
844 * for free slots in the queue.
846 rc
= qi_check_fault(iommu
, index
);
850 spin_unlock(&qi
->q_lock
);
852 spin_lock(&qi
->q_lock
);
855 qi
->desc_status
[index
] = QI_DONE
;
857 reclaim_free_desc(qi
);
858 spin_unlock_irqrestore(&qi
->q_lock
, flags
);
867 * Flush the global interrupt entry cache.
869 void qi_global_iec(struct intel_iommu
*iommu
)
873 desc
.low
= QI_IEC_TYPE
;
876 /* should never fail */
877 qi_submit_sync(&desc
, iommu
);
880 void qi_flush_context(struct intel_iommu
*iommu
, u16 did
, u16 sid
, u8 fm
,
885 desc
.low
= QI_CC_FM(fm
) | QI_CC_SID(sid
) | QI_CC_DID(did
)
886 | QI_CC_GRAN(type
) | QI_CC_TYPE
;
889 qi_submit_sync(&desc
, iommu
);
892 void qi_flush_iotlb(struct intel_iommu
*iommu
, u16 did
, u64 addr
,
893 unsigned int size_order
, u64 type
)
900 if (cap_write_drain(iommu
->cap
))
903 if (cap_read_drain(iommu
->cap
))
906 desc
.low
= QI_IOTLB_DID(did
) | QI_IOTLB_DR(dr
) | QI_IOTLB_DW(dw
)
907 | QI_IOTLB_GRAN(type
) | QI_IOTLB_TYPE
;
908 desc
.high
= QI_IOTLB_ADDR(addr
) | QI_IOTLB_IH(ih
)
909 | QI_IOTLB_AM(size_order
);
911 qi_submit_sync(&desc
, iommu
);
914 void qi_flush_dev_iotlb(struct intel_iommu
*iommu
, u16 sid
, u16 qdep
,
915 u64 addr
, unsigned mask
)
920 BUG_ON(addr
& ((1 << (VTD_PAGE_SHIFT
+ mask
)) - 1));
921 addr
|= (1 << (VTD_PAGE_SHIFT
+ mask
- 1)) - 1;
922 desc
.high
= QI_DEV_IOTLB_ADDR(addr
) | QI_DEV_IOTLB_SIZE
;
924 desc
.high
= QI_DEV_IOTLB_ADDR(addr
);
926 if (qdep
>= QI_DEV_IOTLB_MAX_INVS
)
929 desc
.low
= QI_DEV_IOTLB_SID(sid
) | QI_DEV_IOTLB_QDEP(qdep
) |
932 qi_submit_sync(&desc
, iommu
);
936 * Disable Queued Invalidation interface.
938 void dmar_disable_qi(struct intel_iommu
*iommu
)
942 cycles_t start_time
= get_cycles();
944 if (!ecap_qis(iommu
->ecap
))
947 spin_lock_irqsave(&iommu
->register_lock
, flags
);
949 sts
= dmar_readq(iommu
->reg
+ DMAR_GSTS_REG
);
950 if (!(sts
& DMA_GSTS_QIES
))
954 * Give a chance to HW to complete the pending invalidation requests.
956 while ((readl(iommu
->reg
+ DMAR_IQT_REG
) !=
957 readl(iommu
->reg
+ DMAR_IQH_REG
)) &&
958 (DMAR_OPERATION_TIMEOUT
> (get_cycles() - start_time
)))
961 iommu
->gcmd
&= ~DMA_GCMD_QIE
;
962 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
964 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
,
965 !(sts
& DMA_GSTS_QIES
), sts
);
967 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
971 * Enable queued invalidation.
973 static void __dmar_enable_qi(struct intel_iommu
*iommu
)
977 struct q_inval
*qi
= iommu
->qi
;
979 qi
->free_head
= qi
->free_tail
= 0;
980 qi
->free_cnt
= QI_LENGTH
;
982 spin_lock_irqsave(&iommu
->register_lock
, flags
);
984 /* write zero to the tail reg */
985 writel(0, iommu
->reg
+ DMAR_IQT_REG
);
987 dmar_writeq(iommu
->reg
+ DMAR_IQA_REG
, virt_to_phys(qi
->desc
));
989 iommu
->gcmd
|= DMA_GCMD_QIE
;
990 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
992 /* Make sure hardware complete it */
993 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
, (sts
& DMA_GSTS_QIES
), sts
);
995 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
999 * Enable Queued Invalidation interface. This is a must to support
1000 * interrupt-remapping. Also used by DMA-remapping, which replaces
1001 * register based IOTLB invalidation.
1003 int dmar_enable_qi(struct intel_iommu
*iommu
)
1007 if (!ecap_qis(iommu
->ecap
))
1011 * queued invalidation is already setup and enabled.
1016 iommu
->qi
= kmalloc(sizeof(*qi
), GFP_ATOMIC
);
1022 qi
->desc
= (void *)(get_zeroed_page(GFP_ATOMIC
));
1029 qi
->desc_status
= kmalloc(QI_LENGTH
* sizeof(int), GFP_ATOMIC
);
1030 if (!qi
->desc_status
) {
1031 free_page((unsigned long) qi
->desc
);
1037 qi
->free_head
= qi
->free_tail
= 0;
1038 qi
->free_cnt
= QI_LENGTH
;
1040 spin_lock_init(&qi
->q_lock
);
1042 __dmar_enable_qi(iommu
);
1047 /* iommu interrupt handling. Most stuff are MSI-like. */
1055 static const char *dma_remap_fault_reasons
[] =
1058 "Present bit in root entry is clear",
1059 "Present bit in context entry is clear",
1060 "Invalid context entry",
1061 "Access beyond MGAW",
1062 "PTE Write access is not set",
1063 "PTE Read access is not set",
1064 "Next page table ptr is invalid",
1065 "Root table address invalid",
1066 "Context table ptr is invalid",
1067 "non-zero reserved fields in RTP",
1068 "non-zero reserved fields in CTP",
1069 "non-zero reserved fields in PTE",
1072 static const char *intr_remap_fault_reasons
[] =
1074 "Detected reserved fields in the decoded interrupt-remapped request",
1075 "Interrupt index exceeded the interrupt-remapping table size",
1076 "Present field in the IRTE entry is clear",
1077 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1078 "Detected reserved fields in the IRTE entry",
1079 "Blocked a compatibility format interrupt request",
1080 "Blocked an interrupt request due to source-id verification failure",
1083 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1085 const char *dmar_get_fault_reason(u8 fault_reason
, int *fault_type
)
1087 if (fault_reason
>= 0x20 && (fault_reason
<= 0x20 +
1088 ARRAY_SIZE(intr_remap_fault_reasons
))) {
1089 *fault_type
= INTR_REMAP
;
1090 return intr_remap_fault_reasons
[fault_reason
- 0x20];
1091 } else if (fault_reason
< ARRAY_SIZE(dma_remap_fault_reasons
)) {
1092 *fault_type
= DMA_REMAP
;
1093 return dma_remap_fault_reasons
[fault_reason
];
1095 *fault_type
= UNKNOWN
;
1100 void dmar_msi_unmask(unsigned int irq
)
1102 struct intel_iommu
*iommu
= get_irq_data(irq
);
1106 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1107 writel(0, iommu
->reg
+ DMAR_FECTL_REG
);
1108 /* Read a reg to force flush the post write */
1109 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1110 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1113 void dmar_msi_mask(unsigned int irq
)
1116 struct intel_iommu
*iommu
= get_irq_data(irq
);
1119 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1120 writel(DMA_FECTL_IM
, iommu
->reg
+ DMAR_FECTL_REG
);
1121 /* Read a reg to force flush the post write */
1122 readl(iommu
->reg
+ DMAR_FECTL_REG
);
1123 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1126 void dmar_msi_write(int irq
, struct msi_msg
*msg
)
1128 struct intel_iommu
*iommu
= get_irq_data(irq
);
1131 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1132 writel(msg
->data
, iommu
->reg
+ DMAR_FEDATA_REG
);
1133 writel(msg
->address_lo
, iommu
->reg
+ DMAR_FEADDR_REG
);
1134 writel(msg
->address_hi
, iommu
->reg
+ DMAR_FEUADDR_REG
);
1135 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1138 void dmar_msi_read(int irq
, struct msi_msg
*msg
)
1140 struct intel_iommu
*iommu
= get_irq_data(irq
);
1143 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1144 msg
->data
= readl(iommu
->reg
+ DMAR_FEDATA_REG
);
1145 msg
->address_lo
= readl(iommu
->reg
+ DMAR_FEADDR_REG
);
1146 msg
->address_hi
= readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
1147 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1150 static int dmar_fault_do_one(struct intel_iommu
*iommu
, int type
,
1151 u8 fault_reason
, u16 source_id
, unsigned long long addr
)
1156 reason
= dmar_get_fault_reason(fault_reason
, &fault_type
);
1158 if (fault_type
== INTR_REMAP
)
1159 printk(KERN_ERR
"INTR-REMAP: Request device [[%02x:%02x.%d] "
1160 "fault index %llx\n"
1161 "INTR-REMAP:[fault reason %02d] %s\n",
1162 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1163 PCI_FUNC(source_id
& 0xFF), addr
>> 48,
1164 fault_reason
, reason
);
1167 "DMAR:[%s] Request device [%02x:%02x.%d] "
1168 "fault addr %llx \n"
1169 "DMAR:[fault reason %02d] %s\n",
1170 (type
? "DMA Read" : "DMA Write"),
1171 (source_id
>> 8), PCI_SLOT(source_id
& 0xFF),
1172 PCI_FUNC(source_id
& 0xFF), addr
, fault_reason
, reason
);
1176 #define PRIMARY_FAULT_REG_LEN (16)
1177 irqreturn_t
dmar_fault(int irq
, void *dev_id
)
1179 struct intel_iommu
*iommu
= dev_id
;
1180 int reg
, fault_index
;
1184 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1185 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1187 printk(KERN_ERR
"DRHD: handling fault status reg %x\n",
1190 /* TBD: ignore advanced fault log currently */
1191 if (!(fault_status
& DMA_FSTS_PPF
))
1194 fault_index
= dma_fsts_fault_record_index(fault_status
);
1195 reg
= cap_fault_reg_offset(iommu
->cap
);
1203 /* highest 32 bits */
1204 data
= readl(iommu
->reg
+ reg
+
1205 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1206 if (!(data
& DMA_FRCD_F
))
1209 fault_reason
= dma_frcd_fault_reason(data
);
1210 type
= dma_frcd_type(data
);
1212 data
= readl(iommu
->reg
+ reg
+
1213 fault_index
* PRIMARY_FAULT_REG_LEN
+ 8);
1214 source_id
= dma_frcd_source_id(data
);
1216 guest_addr
= dmar_readq(iommu
->reg
+ reg
+
1217 fault_index
* PRIMARY_FAULT_REG_LEN
);
1218 guest_addr
= dma_frcd_page_addr(guest_addr
);
1219 /* clear the fault */
1220 writel(DMA_FRCD_F
, iommu
->reg
+ reg
+
1221 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1223 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1225 dmar_fault_do_one(iommu
, type
, fault_reason
,
1226 source_id
, guest_addr
);
1229 if (fault_index
> cap_num_fault_regs(iommu
->cap
))
1231 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1234 /* clear all the other faults */
1235 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1236 writel(fault_status
, iommu
->reg
+ DMAR_FSTS_REG
);
1238 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1242 int dmar_set_interrupt(struct intel_iommu
*iommu
)
1247 * Check if the fault interrupt is already initialized.
1254 printk(KERN_ERR
"IOMMU: no free vectors\n");
1258 set_irq_data(irq
, iommu
);
1261 ret
= arch_setup_dmar_msi(irq
);
1263 set_irq_data(irq
, NULL
);
1269 ret
= request_irq(irq
, dmar_fault
, 0, iommu
->name
, iommu
);
1271 printk(KERN_ERR
"IOMMU: can't request irq\n");
1275 int __init
enable_drhd_fault_handling(void)
1277 struct dmar_drhd_unit
*drhd
;
1280 * Enable fault control interrupt.
1282 for_each_drhd_unit(drhd
) {
1284 struct intel_iommu
*iommu
= drhd
->iommu
;
1285 ret
= dmar_set_interrupt(iommu
);
1288 printk(KERN_ERR
"DRHD %Lx: failed to enable fault, "
1289 " interrupt, ret %d\n",
1290 (unsigned long long)drhd
->reg_base_addr
, ret
);
1299 * Re-enable Queued Invalidation interface.
1301 int dmar_reenable_qi(struct intel_iommu
*iommu
)
1303 if (!ecap_qis(iommu
->ecap
))
1310 * First disable queued invalidation.
1312 dmar_disable_qi(iommu
);
1314 * Then enable queued invalidation again. Since there is no pending
1315 * invalidation requests now, it's safe to re-enable queued
1318 __dmar_enable_qi(iommu
);