2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) "DMAR: " fmt
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <asm/irq_remapping.h>
43 #include <asm/iommu_table.h>
45 #include "irq_remapping.h"
47 typedef int (*dmar_res_handler_t
)(struct acpi_dmar_header
*, void *);
48 struct dmar_res_callback
{
49 dmar_res_handler_t cb
[ACPI_DMAR_TYPE_RESERVED
];
50 void *arg
[ACPI_DMAR_TYPE_RESERVED
];
51 bool ignore_unhandled
;
57 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
58 * before IO devices managed by that unit.
59 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
60 * after IO devices managed by that unit.
61 * 3) Hotplug events are rare.
63 * Locking rules for DMA and interrupt remapping related global data structures:
64 * 1) Use dmar_global_lock in process context
65 * 2) Use RCU in interrupt context
67 DECLARE_RWSEM(dmar_global_lock
);
68 LIST_HEAD(dmar_drhd_units
);
70 struct acpi_table_header
* __initdata dmar_tbl
;
71 static int dmar_dev_scope_status
= 1;
72 static unsigned long dmar_seq_ids
[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED
)];
74 static int alloc_iommu(struct dmar_drhd_unit
*drhd
);
75 static void free_iommu(struct intel_iommu
*iommu
);
77 extern const struct iommu_ops intel_iommu_ops
;
79 static void dmar_register_drhd_unit(struct dmar_drhd_unit
*drhd
)
82 * add INCLUDE_ALL at the tail, so scan the list will find it at
85 if (drhd
->include_all
)
86 list_add_tail_rcu(&drhd
->list
, &dmar_drhd_units
);
88 list_add_rcu(&drhd
->list
, &dmar_drhd_units
);
91 void *dmar_alloc_dev_scope(void *start
, void *end
, int *cnt
)
93 struct acpi_dmar_device_scope
*scope
;
98 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_NAMESPACE
||
99 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
100 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
102 else if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_IOAPIC
&&
103 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_HPET
) {
104 pr_warn("Unsupported device scope\n");
106 start
+= scope
->length
;
111 return kcalloc(*cnt
, sizeof(struct dmar_dev_scope
), GFP_KERNEL
);
114 void dmar_free_dev_scope(struct dmar_dev_scope
**devices
, int *cnt
)
117 struct device
*tmp_dev
;
119 if (*devices
&& *cnt
) {
120 for_each_active_dev_scope(*devices
, *cnt
, i
, tmp_dev
)
129 /* Optimize out kzalloc()/kfree() for normal cases */
130 static char dmar_pci_notify_info_buf
[64];
132 static struct dmar_pci_notify_info
*
133 dmar_alloc_pci_notify_info(struct pci_dev
*dev
, unsigned long event
)
138 struct dmar_pci_notify_info
*info
;
140 BUG_ON(dev
->is_virtfn
);
142 /* Only generate path[] for device addition event */
143 if (event
== BUS_NOTIFY_ADD_DEVICE
)
144 for (tmp
= dev
; tmp
; tmp
= tmp
->bus
->self
)
147 size
= sizeof(*info
) + level
* sizeof(struct acpi_dmar_pci_path
);
148 if (size
<= sizeof(dmar_pci_notify_info_buf
)) {
149 info
= (struct dmar_pci_notify_info
*)dmar_pci_notify_info_buf
;
151 info
= kzalloc(size
, GFP_KERNEL
);
153 pr_warn("Out of memory when allocating notify_info "
154 "for %s.\n", pci_name(dev
));
155 if (dmar_dev_scope_status
== 0)
156 dmar_dev_scope_status
= -ENOMEM
;
163 info
->seg
= pci_domain_nr(dev
->bus
);
165 if (event
== BUS_NOTIFY_ADD_DEVICE
) {
166 for (tmp
= dev
; tmp
; tmp
= tmp
->bus
->self
) {
168 info
->path
[level
].bus
= tmp
->bus
->number
;
169 info
->path
[level
].device
= PCI_SLOT(tmp
->devfn
);
170 info
->path
[level
].function
= PCI_FUNC(tmp
->devfn
);
171 if (pci_is_root_bus(tmp
->bus
))
172 info
->bus
= tmp
->bus
->number
;
179 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info
*info
)
181 if ((void *)info
!= dmar_pci_notify_info_buf
)
185 static bool dmar_match_pci_path(struct dmar_pci_notify_info
*info
, int bus
,
186 struct acpi_dmar_pci_path
*path
, int count
)
190 if (info
->bus
!= bus
)
192 if (info
->level
!= count
)
195 for (i
= 0; i
< count
; i
++) {
196 if (path
[i
].device
!= info
->path
[i
].device
||
197 path
[i
].function
!= info
->path
[i
].function
)
209 if (bus
== info
->path
[i
].bus
&&
210 path
[0].device
== info
->path
[i
].device
&&
211 path
[0].function
== info
->path
[i
].function
) {
212 pr_info(FW_BUG
"RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
213 bus
, path
[0].device
, path
[0].function
);
220 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
221 int dmar_insert_dev_scope(struct dmar_pci_notify_info
*info
,
222 void *start
, void*end
, u16 segment
,
223 struct dmar_dev_scope
*devices
,
227 struct device
*tmp
, *dev
= &info
->dev
->dev
;
228 struct acpi_dmar_device_scope
*scope
;
229 struct acpi_dmar_pci_path
*path
;
231 if (segment
!= info
->seg
)
234 for (; start
< end
; start
+= scope
->length
) {
236 if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&&
237 scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
240 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
241 level
= (scope
->length
- sizeof(*scope
)) / sizeof(*path
);
242 if (!dmar_match_pci_path(info
, scope
->bus
, path
, level
))
246 * We expect devices with endpoint scope to have normal PCI
247 * headers, and devices with bridge scope to have bridge PCI
248 * headers. However PCI NTB devices may be listed in the
249 * DMAR table with bridge scope, even though they have a
250 * normal PCI header. NTB devices are identified by class
251 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
252 * for this special case.
254 if ((scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&&
255 info
->dev
->hdr_type
!= PCI_HEADER_TYPE_NORMAL
) ||
256 (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
&&
257 (info
->dev
->hdr_type
== PCI_HEADER_TYPE_NORMAL
&&
258 info
->dev
->class >> 8 != PCI_CLASS_BRIDGE_OTHER
))) {
259 pr_warn("Device scope type does not match for %s\n",
260 pci_name(info
->dev
));
264 for_each_dev_scope(devices
, devices_cnt
, i
, tmp
)
266 devices
[i
].bus
= info
->dev
->bus
->number
;
267 devices
[i
].devfn
= info
->dev
->devfn
;
268 rcu_assign_pointer(devices
[i
].dev
,
272 BUG_ON(i
>= devices_cnt
);
278 int dmar_remove_dev_scope(struct dmar_pci_notify_info
*info
, u16 segment
,
279 struct dmar_dev_scope
*devices
, int count
)
284 if (info
->seg
!= segment
)
287 for_each_active_dev_scope(devices
, count
, index
, tmp
)
288 if (tmp
== &info
->dev
->dev
) {
289 RCU_INIT_POINTER(devices
[index
].dev
, NULL
);
298 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info
*info
)
301 struct dmar_drhd_unit
*dmaru
;
302 struct acpi_dmar_hardware_unit
*drhd
;
304 for_each_drhd_unit(dmaru
) {
305 if (dmaru
->include_all
)
308 drhd
= container_of(dmaru
->hdr
,
309 struct acpi_dmar_hardware_unit
, header
);
310 ret
= dmar_insert_dev_scope(info
, (void *)(drhd
+ 1),
311 ((void *)drhd
) + drhd
->header
.length
,
313 dmaru
->devices
, dmaru
->devices_cnt
);
318 ret
= dmar_iommu_notify_scope_dev(info
);
319 if (ret
< 0 && dmar_dev_scope_status
== 0)
320 dmar_dev_scope_status
= ret
;
325 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info
*info
)
327 struct dmar_drhd_unit
*dmaru
;
329 for_each_drhd_unit(dmaru
)
330 if (dmar_remove_dev_scope(info
, dmaru
->segment
,
331 dmaru
->devices
, dmaru
->devices_cnt
))
333 dmar_iommu_notify_scope_dev(info
);
336 static int dmar_pci_bus_notifier(struct notifier_block
*nb
,
337 unsigned long action
, void *data
)
339 struct pci_dev
*pdev
= to_pci_dev(data
);
340 struct dmar_pci_notify_info
*info
;
342 /* Only care about add/remove events for physical functions.
343 * For VFs we actually do the lookup based on the corresponding
344 * PF in device_to_iommu() anyway. */
347 if (action
!= BUS_NOTIFY_ADD_DEVICE
&&
348 action
!= BUS_NOTIFY_REMOVED_DEVICE
)
351 info
= dmar_alloc_pci_notify_info(pdev
, action
);
355 down_write(&dmar_global_lock
);
356 if (action
== BUS_NOTIFY_ADD_DEVICE
)
357 dmar_pci_bus_add_dev(info
);
358 else if (action
== BUS_NOTIFY_REMOVED_DEVICE
)
359 dmar_pci_bus_del_dev(info
);
360 up_write(&dmar_global_lock
);
362 dmar_free_pci_notify_info(info
);
367 static struct notifier_block dmar_pci_bus_nb
= {
368 .notifier_call
= dmar_pci_bus_notifier
,
372 static struct dmar_drhd_unit
*
373 dmar_find_dmaru(struct acpi_dmar_hardware_unit
*drhd
)
375 struct dmar_drhd_unit
*dmaru
;
377 list_for_each_entry_rcu(dmaru
, &dmar_drhd_units
, list
)
378 if (dmaru
->segment
== drhd
->segment
&&
379 dmaru
->reg_base_addr
== drhd
->address
)
386 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
387 * structure which uniquely represent one DMA remapping hardware unit
388 * present in the platform
390 static int dmar_parse_one_drhd(struct acpi_dmar_header
*header
, void *arg
)
392 struct acpi_dmar_hardware_unit
*drhd
;
393 struct dmar_drhd_unit
*dmaru
;
396 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
397 dmaru
= dmar_find_dmaru(drhd
);
401 dmaru
= kzalloc(sizeof(*dmaru
) + header
->length
, GFP_KERNEL
);
406 * If header is allocated from slab by ACPI _DSM method, we need to
407 * copy the content because the memory buffer will be freed on return.
409 dmaru
->hdr
= (void *)(dmaru
+ 1);
410 memcpy(dmaru
->hdr
, header
, header
->length
);
411 dmaru
->reg_base_addr
= drhd
->address
;
412 dmaru
->segment
= drhd
->segment
;
413 dmaru
->include_all
= drhd
->flags
& 0x1; /* BIT0: INCLUDE_ALL */
414 dmaru
->devices
= dmar_alloc_dev_scope((void *)(drhd
+ 1),
415 ((void *)drhd
) + drhd
->header
.length
,
416 &dmaru
->devices_cnt
);
417 if (dmaru
->devices_cnt
&& dmaru
->devices
== NULL
) {
422 ret
= alloc_iommu(dmaru
);
424 dmar_free_dev_scope(&dmaru
->devices
,
425 &dmaru
->devices_cnt
);
429 dmar_register_drhd_unit(dmaru
);
438 static void dmar_free_drhd(struct dmar_drhd_unit
*dmaru
)
440 if (dmaru
->devices
&& dmaru
->devices_cnt
)
441 dmar_free_dev_scope(&dmaru
->devices
, &dmaru
->devices_cnt
);
443 free_iommu(dmaru
->iommu
);
447 static int __init
dmar_parse_one_andd(struct acpi_dmar_header
*header
,
450 struct acpi_dmar_andd
*andd
= (void *)header
;
452 /* Check for NUL termination within the designated length */
453 if (strnlen(andd
->device_name
, header
->length
- 8) == header
->length
- 8) {
454 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND
,
455 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
456 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
457 dmi_get_system_info(DMI_BIOS_VENDOR
),
458 dmi_get_system_info(DMI_BIOS_VERSION
),
459 dmi_get_system_info(DMI_PRODUCT_VERSION
));
462 pr_info("ANDD device: %x name: %s\n", andd
->device_number
,
468 #ifdef CONFIG_ACPI_NUMA
469 static int dmar_parse_one_rhsa(struct acpi_dmar_header
*header
, void *arg
)
471 struct acpi_dmar_rhsa
*rhsa
;
472 struct dmar_drhd_unit
*drhd
;
474 rhsa
= (struct acpi_dmar_rhsa
*)header
;
475 for_each_drhd_unit(drhd
) {
476 if (drhd
->reg_base_addr
== rhsa
->base_address
) {
477 int node
= acpi_map_pxm_to_node(rhsa
->proximity_domain
);
479 if (!node_online(node
))
481 drhd
->iommu
->node
= node
;
486 1, TAINT_FIRMWARE_WORKAROUND
,
487 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
488 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
490 dmi_get_system_info(DMI_BIOS_VENDOR
),
491 dmi_get_system_info(DMI_BIOS_VERSION
),
492 dmi_get_system_info(DMI_PRODUCT_VERSION
));
497 #define dmar_parse_one_rhsa dmar_res_noop
501 dmar_table_print_dmar_entry(struct acpi_dmar_header
*header
)
503 struct acpi_dmar_hardware_unit
*drhd
;
504 struct acpi_dmar_reserved_memory
*rmrr
;
505 struct acpi_dmar_atsr
*atsr
;
506 struct acpi_dmar_rhsa
*rhsa
;
508 switch (header
->type
) {
509 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
510 drhd
= container_of(header
, struct acpi_dmar_hardware_unit
,
512 pr_info("DRHD base: %#016Lx flags: %#x\n",
513 (unsigned long long)drhd
->address
, drhd
->flags
);
515 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
516 rmrr
= container_of(header
, struct acpi_dmar_reserved_memory
,
518 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
519 (unsigned long long)rmrr
->base_address
,
520 (unsigned long long)rmrr
->end_address
);
522 case ACPI_DMAR_TYPE_ROOT_ATS
:
523 atsr
= container_of(header
, struct acpi_dmar_atsr
, header
);
524 pr_info("ATSR flags: %#x\n", atsr
->flags
);
526 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY
:
527 rhsa
= container_of(header
, struct acpi_dmar_rhsa
, header
);
528 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
529 (unsigned long long)rhsa
->base_address
,
530 rhsa
->proximity_domain
);
532 case ACPI_DMAR_TYPE_NAMESPACE
:
533 /* We don't print this here because we need to sanity-check
534 it first. So print it in dmar_parse_one_andd() instead. */
540 * dmar_table_detect - checks to see if the platform supports DMAR devices
542 static int __init
dmar_table_detect(void)
544 acpi_status status
= AE_OK
;
546 /* if we could find DMAR table, then there are DMAR devices */
547 status
= acpi_get_table(ACPI_SIG_DMAR
, 0, &dmar_tbl
);
549 if (ACPI_SUCCESS(status
) && !dmar_tbl
) {
550 pr_warn("Unable to map DMAR\n");
551 status
= AE_NOT_FOUND
;
554 return ACPI_SUCCESS(status
) ? 0 : -ENOENT
;
557 static int dmar_walk_remapping_entries(struct acpi_dmar_header
*start
,
558 size_t len
, struct dmar_res_callback
*cb
)
560 struct acpi_dmar_header
*iter
, *next
;
561 struct acpi_dmar_header
*end
= ((void *)start
) + len
;
563 for (iter
= start
; iter
< end
; iter
= next
) {
564 next
= (void *)iter
+ iter
->length
;
565 if (iter
->length
== 0) {
566 /* Avoid looping forever on bad ACPI tables */
567 pr_debug(FW_BUG
"Invalid 0-length structure\n");
569 } else if (next
> end
) {
570 /* Avoid passing table end */
571 pr_warn(FW_BUG
"Record passes table end\n");
576 dmar_table_print_dmar_entry(iter
);
578 if (iter
->type
>= ACPI_DMAR_TYPE_RESERVED
) {
579 /* continue for forward compatibility */
580 pr_debug("Unknown DMAR structure type %d\n",
582 } else if (cb
->cb
[iter
->type
]) {
585 ret
= cb
->cb
[iter
->type
](iter
, cb
->arg
[iter
->type
]);
588 } else if (!cb
->ignore_unhandled
) {
589 pr_warn("No handler for DMAR structure type %d\n",
598 static inline int dmar_walk_dmar_table(struct acpi_table_dmar
*dmar
,
599 struct dmar_res_callback
*cb
)
601 return dmar_walk_remapping_entries((void *)(dmar
+ 1),
602 dmar
->header
.length
- sizeof(*dmar
), cb
);
606 * parse_dmar_table - parses the DMA reporting table
609 parse_dmar_table(void)
611 struct acpi_table_dmar
*dmar
;
614 struct dmar_res_callback cb
= {
616 .ignore_unhandled
= true,
617 .arg
[ACPI_DMAR_TYPE_HARDWARE_UNIT
] = &drhd_count
,
618 .cb
[ACPI_DMAR_TYPE_HARDWARE_UNIT
] = &dmar_parse_one_drhd
,
619 .cb
[ACPI_DMAR_TYPE_RESERVED_MEMORY
] = &dmar_parse_one_rmrr
,
620 .cb
[ACPI_DMAR_TYPE_ROOT_ATS
] = &dmar_parse_one_atsr
,
621 .cb
[ACPI_DMAR_TYPE_HARDWARE_AFFINITY
] = &dmar_parse_one_rhsa
,
622 .cb
[ACPI_DMAR_TYPE_NAMESPACE
] = &dmar_parse_one_andd
,
626 * Do it again, earlier dmar_tbl mapping could be mapped with
632 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
633 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
635 dmar_tbl
= tboot_get_dmar_table(dmar_tbl
);
637 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
641 if (dmar
->width
< PAGE_SHIFT
- 1) {
642 pr_warn("Invalid DMAR haw\n");
646 pr_info("Host address width %d\n", dmar
->width
+ 1);
647 ret
= dmar_walk_dmar_table(dmar
, &cb
);
648 if (ret
== 0 && drhd_count
== 0)
649 pr_warn(FW_BUG
"No DRHD structure found in DMAR table\n");
654 static int dmar_pci_device_match(struct dmar_dev_scope devices
[],
655 int cnt
, struct pci_dev
*dev
)
661 for_each_active_dev_scope(devices
, cnt
, index
, tmp
)
662 if (dev_is_pci(tmp
) && dev
== to_pci_dev(tmp
))
665 /* Check our parent */
666 dev
= dev
->bus
->self
;
672 struct dmar_drhd_unit
*
673 dmar_find_matched_drhd_unit(struct pci_dev
*dev
)
675 struct dmar_drhd_unit
*dmaru
;
676 struct acpi_dmar_hardware_unit
*drhd
;
678 dev
= pci_physfn(dev
);
681 for_each_drhd_unit(dmaru
) {
682 drhd
= container_of(dmaru
->hdr
,
683 struct acpi_dmar_hardware_unit
,
686 if (dmaru
->include_all
&&
687 drhd
->segment
== pci_domain_nr(dev
->bus
))
690 if (dmar_pci_device_match(dmaru
->devices
,
691 dmaru
->devices_cnt
, dev
))
701 static void __init
dmar_acpi_insert_dev_scope(u8 device_number
,
702 struct acpi_device
*adev
)
704 struct dmar_drhd_unit
*dmaru
;
705 struct acpi_dmar_hardware_unit
*drhd
;
706 struct acpi_dmar_device_scope
*scope
;
709 struct acpi_dmar_pci_path
*path
;
711 for_each_drhd_unit(dmaru
) {
712 drhd
= container_of(dmaru
->hdr
,
713 struct acpi_dmar_hardware_unit
,
716 for (scope
= (void *)(drhd
+ 1);
717 (unsigned long)scope
< ((unsigned long)drhd
) + drhd
->header
.length
;
718 scope
= ((void *)scope
) + scope
->length
) {
719 if (scope
->entry_type
!= ACPI_DMAR_SCOPE_TYPE_NAMESPACE
)
721 if (scope
->enumeration_id
!= device_number
)
724 path
= (void *)(scope
+ 1);
725 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
726 dev_name(&adev
->dev
), dmaru
->reg_base_addr
,
727 scope
->bus
, path
->device
, path
->function
);
728 for_each_dev_scope(dmaru
->devices
, dmaru
->devices_cnt
, i
, tmp
)
730 dmaru
->devices
[i
].bus
= scope
->bus
;
731 dmaru
->devices
[i
].devfn
= PCI_DEVFN(path
->device
,
733 rcu_assign_pointer(dmaru
->devices
[i
].dev
,
734 get_device(&adev
->dev
));
737 BUG_ON(i
>= dmaru
->devices_cnt
);
740 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
741 device_number
, dev_name(&adev
->dev
));
744 static int __init
dmar_acpi_dev_scope_init(void)
746 struct acpi_dmar_andd
*andd
;
748 if (dmar_tbl
== NULL
)
751 for (andd
= (void *)dmar_tbl
+ sizeof(struct acpi_table_dmar
);
752 ((unsigned long)andd
) < ((unsigned long)dmar_tbl
) + dmar_tbl
->length
;
753 andd
= ((void *)andd
) + andd
->header
.length
) {
754 if (andd
->header
.type
== ACPI_DMAR_TYPE_NAMESPACE
) {
756 struct acpi_device
*adev
;
758 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT
,
761 pr_err("Failed to find handle for ACPI object %s\n",
765 if (acpi_bus_get_device(h
, &adev
)) {
766 pr_err("Failed to get device for ACPI object %s\n",
770 dmar_acpi_insert_dev_scope(andd
->device_number
, adev
);
776 int __init
dmar_dev_scope_init(void)
778 struct pci_dev
*dev
= NULL
;
779 struct dmar_pci_notify_info
*info
;
781 if (dmar_dev_scope_status
!= 1)
782 return dmar_dev_scope_status
;
784 if (list_empty(&dmar_drhd_units
)) {
785 dmar_dev_scope_status
= -ENODEV
;
787 dmar_dev_scope_status
= 0;
789 dmar_acpi_dev_scope_init();
791 for_each_pci_dev(dev
) {
795 info
= dmar_alloc_pci_notify_info(dev
,
796 BUS_NOTIFY_ADD_DEVICE
);
798 return dmar_dev_scope_status
;
800 dmar_pci_bus_add_dev(info
);
801 dmar_free_pci_notify_info(info
);
806 return dmar_dev_scope_status
;
809 void __init
dmar_register_bus_notifier(void)
811 bus_register_notifier(&pci_bus_type
, &dmar_pci_bus_nb
);
815 int __init
dmar_table_init(void)
817 static int dmar_table_initialized
;
820 if (dmar_table_initialized
== 0) {
821 ret
= parse_dmar_table();
824 pr_info("Parse DMAR table failure.\n");
825 } else if (list_empty(&dmar_drhd_units
)) {
826 pr_info("No DMAR devices found\n");
831 dmar_table_initialized
= ret
;
833 dmar_table_initialized
= 1;
836 return dmar_table_initialized
< 0 ? dmar_table_initialized
: 0;
839 static void warn_invalid_dmar(u64 addr
, const char *message
)
842 1, TAINT_FIRMWARE_WORKAROUND
,
843 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
844 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
846 dmi_get_system_info(DMI_BIOS_VENDOR
),
847 dmi_get_system_info(DMI_BIOS_VERSION
),
848 dmi_get_system_info(DMI_PRODUCT_VERSION
));
852 dmar_validate_one_drhd(struct acpi_dmar_header
*entry
, void *arg
)
854 struct acpi_dmar_hardware_unit
*drhd
;
858 drhd
= (void *)entry
;
859 if (!drhd
->address
) {
860 warn_invalid_dmar(0, "");
865 addr
= ioremap(drhd
->address
, VTD_PAGE_SIZE
);
867 addr
= early_ioremap(drhd
->address
, VTD_PAGE_SIZE
);
869 pr_warn("Can't validate DRHD address: %llx\n", drhd
->address
);
873 cap
= dmar_readq(addr
+ DMAR_CAP_REG
);
874 ecap
= dmar_readq(addr
+ DMAR_ECAP_REG
);
879 early_iounmap(addr
, VTD_PAGE_SIZE
);
881 if (cap
== (uint64_t)-1 && ecap
== (uint64_t)-1) {
882 warn_invalid_dmar(drhd
->address
, " returns all ones");
889 int __init
detect_intel_iommu(void)
892 struct dmar_res_callback validate_drhd_cb
= {
893 .cb
[ACPI_DMAR_TYPE_HARDWARE_UNIT
] = &dmar_validate_one_drhd
,
894 .ignore_unhandled
= true,
897 down_write(&dmar_global_lock
);
898 ret
= dmar_table_detect();
900 ret
= dmar_walk_dmar_table((struct acpi_table_dmar
*)dmar_tbl
,
902 if (!ret
&& !no_iommu
&& !iommu_detected
&& !dmar_disabled
) {
904 /* Make sure ACS will be enabled */
910 x86_init
.iommu
.iommu_init
= intel_iommu_init
;
914 acpi_put_table(dmar_tbl
);
917 up_write(&dmar_global_lock
);
919 return ret
? ret
: 1;
922 static void unmap_iommu(struct intel_iommu
*iommu
)
925 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
929 * map_iommu: map the iommu's registers
930 * @iommu: the iommu to map
931 * @phys_addr: the physical address of the base resgister
933 * Memory map the iommu's registers. Start w/ a single page, and
934 * possibly expand if that turns out to be insufficent.
936 static int map_iommu(struct intel_iommu
*iommu
, u64 phys_addr
)
940 iommu
->reg_phys
= phys_addr
;
941 iommu
->reg_size
= VTD_PAGE_SIZE
;
943 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
, iommu
->name
)) {
944 pr_err("Can't reserve memory\n");
949 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
951 pr_err("Can't map the region\n");
956 iommu
->cap
= dmar_readq(iommu
->reg
+ DMAR_CAP_REG
);
957 iommu
->ecap
= dmar_readq(iommu
->reg
+ DMAR_ECAP_REG
);
959 if (iommu
->cap
== (uint64_t)-1 && iommu
->ecap
== (uint64_t)-1) {
961 warn_invalid_dmar(phys_addr
, " returns all ones");
965 /* the registers might be more than one page */
966 map_size
= max_t(int, ecap_max_iotlb_offset(iommu
->ecap
),
967 cap_max_fault_reg_offset(iommu
->cap
));
968 map_size
= VTD_PAGE_ALIGN(map_size
);
969 if (map_size
> iommu
->reg_size
) {
971 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
972 iommu
->reg_size
= map_size
;
973 if (!request_mem_region(iommu
->reg_phys
, iommu
->reg_size
,
975 pr_err("Can't reserve memory\n");
979 iommu
->reg
= ioremap(iommu
->reg_phys
, iommu
->reg_size
);
981 pr_err("Can't map the region\n");
992 release_mem_region(iommu
->reg_phys
, iommu
->reg_size
);
997 static int dmar_alloc_seq_id(struct intel_iommu
*iommu
)
999 iommu
->seq_id
= find_first_zero_bit(dmar_seq_ids
,
1000 DMAR_UNITS_SUPPORTED
);
1001 if (iommu
->seq_id
>= DMAR_UNITS_SUPPORTED
) {
1004 set_bit(iommu
->seq_id
, dmar_seq_ids
);
1005 sprintf(iommu
->name
, "dmar%d", iommu
->seq_id
);
1008 return iommu
->seq_id
;
1011 static void dmar_free_seq_id(struct intel_iommu
*iommu
)
1013 if (iommu
->seq_id
>= 0) {
1014 clear_bit(iommu
->seq_id
, dmar_seq_ids
);
1019 static int alloc_iommu(struct dmar_drhd_unit
*drhd
)
1021 struct intel_iommu
*iommu
;
1027 if (!drhd
->reg_base_addr
) {
1028 warn_invalid_dmar(0, "");
1032 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
1036 if (dmar_alloc_seq_id(iommu
) < 0) {
1037 pr_err("Failed to allocate seq_id\n");
1042 err
= map_iommu(iommu
, drhd
->reg_base_addr
);
1044 pr_err("Failed to map %s\n", iommu
->name
);
1045 goto error_free_seq_id
;
1049 agaw
= iommu_calculate_agaw(iommu
);
1051 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1055 msagaw
= iommu_calculate_max_sagaw(iommu
);
1057 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1062 iommu
->msagaw
= msagaw
;
1063 iommu
->segment
= drhd
->segment
;
1067 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
1068 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1070 (unsigned long long)drhd
->reg_base_addr
,
1071 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
),
1072 (unsigned long long)iommu
->cap
,
1073 (unsigned long long)iommu
->ecap
);
1075 /* Reflect status in gcmd */
1076 sts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
1077 if (sts
& DMA_GSTS_IRES
)
1078 iommu
->gcmd
|= DMA_GCMD_IRE
;
1079 if (sts
& DMA_GSTS_TES
)
1080 iommu
->gcmd
|= DMA_GCMD_TE
;
1081 if (sts
& DMA_GSTS_QIES
)
1082 iommu
->gcmd
|= DMA_GCMD_QIE
;
1084 raw_spin_lock_init(&iommu
->register_lock
);
1086 if (intel_iommu_enabled
) {
1087 err
= iommu_device_sysfs_add(&iommu
->iommu
, NULL
,
1093 iommu_device_set_ops(&iommu
->iommu
, &intel_iommu_ops
);
1095 err
= iommu_device_register(&iommu
->iommu
);
1100 drhd
->iommu
= iommu
;
1107 dmar_free_seq_id(iommu
);
1113 static void free_iommu(struct intel_iommu
*iommu
)
1115 if (intel_iommu_enabled
) {
1116 iommu_device_unregister(&iommu
->iommu
);
1117 iommu_device_sysfs_remove(&iommu
->iommu
);
1121 if (iommu
->pr_irq
) {
1122 free_irq(iommu
->pr_irq
, iommu
);
1123 dmar_free_hwirq(iommu
->pr_irq
);
1126 free_irq(iommu
->irq
, iommu
);
1127 dmar_free_hwirq(iommu
->irq
);
1132 free_page((unsigned long)iommu
->qi
->desc
);
1133 kfree(iommu
->qi
->desc_status
);
1140 dmar_free_seq_id(iommu
);
1145 * Reclaim all the submitted descriptors which have completed its work.
1147 static inline void reclaim_free_desc(struct q_inval
*qi
)
1149 while (qi
->desc_status
[qi
->free_tail
] == QI_DONE
||
1150 qi
->desc_status
[qi
->free_tail
] == QI_ABORT
) {
1151 qi
->desc_status
[qi
->free_tail
] = QI_FREE
;
1152 qi
->free_tail
= (qi
->free_tail
+ 1) % QI_LENGTH
;
1157 static int qi_check_fault(struct intel_iommu
*iommu
, int index
)
1161 struct q_inval
*qi
= iommu
->qi
;
1162 int wait_index
= (index
+ 1) % QI_LENGTH
;
1163 int shift
= qi_shift(iommu
);
1165 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
1168 fault
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1171 * If IQE happens, the head points to the descriptor associated
1172 * with the error. No new descriptors are fetched until the IQE
1175 if (fault
& DMA_FSTS_IQE
) {
1176 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
1177 if ((head
>> shift
) == index
) {
1178 struct qi_desc
*desc
= qi
->desc
+ head
;
1181 * desc->qw2 and desc->qw3 are either reserved or
1182 * used by software as private data. We won't print
1183 * out these two qw's for security consideration.
1185 pr_err("VT-d detected invalid descriptor: qw0 = %llx, qw1 = %llx\n",
1186 (unsigned long long)desc
->qw0
,
1187 (unsigned long long)desc
->qw1
);
1188 memcpy(desc
, qi
->desc
+ (wait_index
<< shift
),
1190 writel(DMA_FSTS_IQE
, iommu
->reg
+ DMAR_FSTS_REG
);
1196 * If ITE happens, all pending wait_desc commands are aborted.
1197 * No new descriptors are fetched until the ITE is cleared.
1199 if (fault
& DMA_FSTS_ITE
) {
1200 head
= readl(iommu
->reg
+ DMAR_IQH_REG
);
1201 head
= ((head
>> shift
) - 1 + QI_LENGTH
) % QI_LENGTH
;
1203 tail
= readl(iommu
->reg
+ DMAR_IQT_REG
);
1204 tail
= ((tail
>> shift
) - 1 + QI_LENGTH
) % QI_LENGTH
;
1206 writel(DMA_FSTS_ITE
, iommu
->reg
+ DMAR_FSTS_REG
);
1209 if (qi
->desc_status
[head
] == QI_IN_USE
)
1210 qi
->desc_status
[head
] = QI_ABORT
;
1211 head
= (head
- 2 + QI_LENGTH
) % QI_LENGTH
;
1212 } while (head
!= tail
);
1214 if (qi
->desc_status
[wait_index
] == QI_ABORT
)
1218 if (fault
& DMA_FSTS_ICE
)
1219 writel(DMA_FSTS_ICE
, iommu
->reg
+ DMAR_FSTS_REG
);
1225 * Submit the queued invalidation descriptor to the remapping
1226 * hardware unit and wait for its completion.
1228 int qi_submit_sync(struct qi_desc
*desc
, struct intel_iommu
*iommu
)
1231 struct q_inval
*qi
= iommu
->qi
;
1232 int offset
, shift
, length
;
1233 struct qi_desc wait_desc
;
1234 int wait_index
, index
;
1235 unsigned long flags
;
1243 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
1244 while (qi
->free_cnt
< 3) {
1245 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
1247 raw_spin_lock_irqsave(&qi
->q_lock
, flags
);
1250 index
= qi
->free_head
;
1251 wait_index
= (index
+ 1) % QI_LENGTH
;
1252 shift
= qi_shift(iommu
);
1253 length
= 1 << shift
;
1255 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_IN_USE
;
1257 offset
= index
<< shift
;
1258 memcpy(qi
->desc
+ offset
, desc
, length
);
1259 wait_desc
.qw0
= QI_IWD_STATUS_DATA(QI_DONE
) |
1260 QI_IWD_STATUS_WRITE
| QI_IWD_TYPE
;
1261 wait_desc
.qw1
= virt_to_phys(&qi
->desc_status
[wait_index
]);
1265 offset
= wait_index
<< shift
;
1266 memcpy(qi
->desc
+ offset
, &wait_desc
, length
);
1268 qi
->free_head
= (qi
->free_head
+ 2) % QI_LENGTH
;
1272 * update the HW tail register indicating the presence of
1275 writel(qi
->free_head
<< shift
, iommu
->reg
+ DMAR_IQT_REG
);
1277 while (qi
->desc_status
[wait_index
] != QI_DONE
) {
1279 * We will leave the interrupts disabled, to prevent interrupt
1280 * context to queue another cmd while a cmd is already submitted
1281 * and waiting for completion on this cpu. This is to avoid
1282 * a deadlock where the interrupt context can wait indefinitely
1283 * for free slots in the queue.
1285 rc
= qi_check_fault(iommu
, index
);
1289 raw_spin_unlock(&qi
->q_lock
);
1291 raw_spin_lock(&qi
->q_lock
);
1294 qi
->desc_status
[index
] = QI_DONE
;
1296 reclaim_free_desc(qi
);
1297 raw_spin_unlock_irqrestore(&qi
->q_lock
, flags
);
1306 * Flush the global interrupt entry cache.
1308 void qi_global_iec(struct intel_iommu
*iommu
)
1310 struct qi_desc desc
;
1312 desc
.qw0
= QI_IEC_TYPE
;
1317 /* should never fail */
1318 qi_submit_sync(&desc
, iommu
);
1321 void qi_flush_context(struct intel_iommu
*iommu
, u16 did
, u16 sid
, u8 fm
,
1324 struct qi_desc desc
;
1326 desc
.qw0
= QI_CC_FM(fm
) | QI_CC_SID(sid
) | QI_CC_DID(did
)
1327 | QI_CC_GRAN(type
) | QI_CC_TYPE
;
1332 qi_submit_sync(&desc
, iommu
);
1335 void qi_flush_iotlb(struct intel_iommu
*iommu
, u16 did
, u64 addr
,
1336 unsigned int size_order
, u64 type
)
1340 struct qi_desc desc
;
1343 if (cap_write_drain(iommu
->cap
))
1346 if (cap_read_drain(iommu
->cap
))
1349 desc
.qw0
= QI_IOTLB_DID(did
) | QI_IOTLB_DR(dr
) | QI_IOTLB_DW(dw
)
1350 | QI_IOTLB_GRAN(type
) | QI_IOTLB_TYPE
;
1351 desc
.qw1
= QI_IOTLB_ADDR(addr
) | QI_IOTLB_IH(ih
)
1352 | QI_IOTLB_AM(size_order
);
1356 qi_submit_sync(&desc
, iommu
);
1359 void qi_flush_dev_iotlb(struct intel_iommu
*iommu
, u16 sid
, u16 pfsid
,
1360 u16 qdep
, u64 addr
, unsigned mask
)
1362 struct qi_desc desc
;
1365 WARN_ON_ONCE(addr
& ((1ULL << (VTD_PAGE_SHIFT
+ mask
)) - 1));
1366 addr
|= (1ULL << (VTD_PAGE_SHIFT
+ mask
- 1)) - 1;
1367 desc
.qw1
= QI_DEV_IOTLB_ADDR(addr
) | QI_DEV_IOTLB_SIZE
;
1369 desc
.qw1
= QI_DEV_IOTLB_ADDR(addr
);
1371 if (qdep
>= QI_DEV_IOTLB_MAX_INVS
)
1374 desc
.qw0
= QI_DEV_IOTLB_SID(sid
) | QI_DEV_IOTLB_QDEP(qdep
) |
1375 QI_DIOTLB_TYPE
| QI_DEV_IOTLB_PFSID(pfsid
);
1379 qi_submit_sync(&desc
, iommu
);
1383 * Disable Queued Invalidation interface.
1385 void dmar_disable_qi(struct intel_iommu
*iommu
)
1387 unsigned long flags
;
1389 cycles_t start_time
= get_cycles();
1391 if (!ecap_qis(iommu
->ecap
))
1394 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1396 sts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
1397 if (!(sts
& DMA_GSTS_QIES
))
1401 * Give a chance to HW to complete the pending invalidation requests.
1403 while ((readl(iommu
->reg
+ DMAR_IQT_REG
) !=
1404 readl(iommu
->reg
+ DMAR_IQH_REG
)) &&
1405 (DMAR_OPERATION_TIMEOUT
> (get_cycles() - start_time
)))
1408 iommu
->gcmd
&= ~DMA_GCMD_QIE
;
1409 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1411 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
,
1412 !(sts
& DMA_GSTS_QIES
), sts
);
1414 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1418 * Enable queued invalidation.
1420 static void __dmar_enable_qi(struct intel_iommu
*iommu
)
1423 unsigned long flags
;
1424 struct q_inval
*qi
= iommu
->qi
;
1425 u64 val
= virt_to_phys(qi
->desc
);
1427 qi
->free_head
= qi
->free_tail
= 0;
1428 qi
->free_cnt
= QI_LENGTH
;
1431 * Set DW=1 and QS=1 in IQA_REG when Scalable Mode capability
1434 if (ecap_smts(iommu
->ecap
))
1435 val
|= (1 << 11) | 1;
1437 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1439 /* write zero to the tail reg */
1440 writel(0, iommu
->reg
+ DMAR_IQT_REG
);
1442 dmar_writeq(iommu
->reg
+ DMAR_IQA_REG
, val
);
1444 iommu
->gcmd
|= DMA_GCMD_QIE
;
1445 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1447 /* Make sure hardware complete it */
1448 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
, (sts
& DMA_GSTS_QIES
), sts
);
1450 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1454 * Enable Queued Invalidation interface. This is a must to support
1455 * interrupt-remapping. Also used by DMA-remapping, which replaces
1456 * register based IOTLB invalidation.
1458 int dmar_enable_qi(struct intel_iommu
*iommu
)
1461 struct page
*desc_page
;
1463 if (!ecap_qis(iommu
->ecap
))
1467 * queued invalidation is already setup and enabled.
1472 iommu
->qi
= kmalloc(sizeof(*qi
), GFP_ATOMIC
);
1479 * Need two pages to accommodate 256 descriptors of 256 bits each
1480 * if the remapping hardware supports scalable mode translation.
1482 desc_page
= alloc_pages_node(iommu
->node
, GFP_ATOMIC
| __GFP_ZERO
,
1483 !!ecap_smts(iommu
->ecap
));
1490 qi
->desc
= page_address(desc_page
);
1492 qi
->desc_status
= kcalloc(QI_LENGTH
, sizeof(int), GFP_ATOMIC
);
1493 if (!qi
->desc_status
) {
1494 free_page((unsigned long) qi
->desc
);
1500 raw_spin_lock_init(&qi
->q_lock
);
1502 __dmar_enable_qi(iommu
);
1507 /* iommu interrupt handling. Most stuff are MSI-like. */
1515 static const char *dma_remap_fault_reasons
[] =
1518 "Present bit in root entry is clear",
1519 "Present bit in context entry is clear",
1520 "Invalid context entry",
1521 "Access beyond MGAW",
1522 "PTE Write access is not set",
1523 "PTE Read access is not set",
1524 "Next page table ptr is invalid",
1525 "Root table address invalid",
1526 "Context table ptr is invalid",
1527 "non-zero reserved fields in RTP",
1528 "non-zero reserved fields in CTP",
1529 "non-zero reserved fields in PTE",
1530 "PCE for translation request specifies blocking",
1533 static const char *irq_remap_fault_reasons
[] =
1535 "Detected reserved fields in the decoded interrupt-remapped request",
1536 "Interrupt index exceeded the interrupt-remapping table size",
1537 "Present field in the IRTE entry is clear",
1538 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1539 "Detected reserved fields in the IRTE entry",
1540 "Blocked a compatibility format interrupt request",
1541 "Blocked an interrupt request due to source-id verification failure",
1544 static const char *dmar_get_fault_reason(u8 fault_reason
, int *fault_type
)
1546 if (fault_reason
>= 0x20 && (fault_reason
- 0x20 <
1547 ARRAY_SIZE(irq_remap_fault_reasons
))) {
1548 *fault_type
= INTR_REMAP
;
1549 return irq_remap_fault_reasons
[fault_reason
- 0x20];
1550 } else if (fault_reason
< ARRAY_SIZE(dma_remap_fault_reasons
)) {
1551 *fault_type
= DMA_REMAP
;
1552 return dma_remap_fault_reasons
[fault_reason
];
1554 *fault_type
= UNKNOWN
;
1560 static inline int dmar_msi_reg(struct intel_iommu
*iommu
, int irq
)
1562 if (iommu
->irq
== irq
)
1563 return DMAR_FECTL_REG
;
1564 else if (iommu
->pr_irq
== irq
)
1565 return DMAR_PECTL_REG
;
1570 void dmar_msi_unmask(struct irq_data
*data
)
1572 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1573 int reg
= dmar_msi_reg(iommu
, data
->irq
);
1577 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1578 writel(0, iommu
->reg
+ reg
);
1579 /* Read a reg to force flush the post write */
1580 readl(iommu
->reg
+ reg
);
1581 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1584 void dmar_msi_mask(struct irq_data
*data
)
1586 struct intel_iommu
*iommu
= irq_data_get_irq_handler_data(data
);
1587 int reg
= dmar_msi_reg(iommu
, data
->irq
);
1591 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1592 writel(DMA_FECTL_IM
, iommu
->reg
+ reg
);
1593 /* Read a reg to force flush the post write */
1594 readl(iommu
->reg
+ reg
);
1595 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1598 void dmar_msi_write(int irq
, struct msi_msg
*msg
)
1600 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1601 int reg
= dmar_msi_reg(iommu
, irq
);
1604 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1605 writel(msg
->data
, iommu
->reg
+ reg
+ 4);
1606 writel(msg
->address_lo
, iommu
->reg
+ reg
+ 8);
1607 writel(msg
->address_hi
, iommu
->reg
+ reg
+ 12);
1608 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1611 void dmar_msi_read(int irq
, struct msi_msg
*msg
)
1613 struct intel_iommu
*iommu
= irq_get_handler_data(irq
);
1614 int reg
= dmar_msi_reg(iommu
, irq
);
1617 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1618 msg
->data
= readl(iommu
->reg
+ reg
+ 4);
1619 msg
->address_lo
= readl(iommu
->reg
+ reg
+ 8);
1620 msg
->address_hi
= readl(iommu
->reg
+ reg
+ 12);
1621 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1624 static int dmar_fault_do_one(struct intel_iommu
*iommu
, int type
,
1625 u8 fault_reason
, u16 source_id
, unsigned long long addr
)
1630 reason
= dmar_get_fault_reason(fault_reason
, &fault_type
);
1632 if (fault_type
== INTR_REMAP
)
1633 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1634 source_id
>> 8, PCI_SLOT(source_id
& 0xFF),
1635 PCI_FUNC(source_id
& 0xFF), addr
>> 48,
1636 fault_reason
, reason
);
1638 pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
1639 type
? "DMA Read" : "DMA Write",
1640 source_id
>> 8, PCI_SLOT(source_id
& 0xFF),
1641 PCI_FUNC(source_id
& 0xFF), addr
, fault_reason
, reason
);
1645 #define PRIMARY_FAULT_REG_LEN (16)
1646 irqreturn_t
dmar_fault(int irq
, void *dev_id
)
1648 struct intel_iommu
*iommu
= dev_id
;
1649 int reg
, fault_index
;
1652 static DEFINE_RATELIMIT_STATE(rs
,
1653 DEFAULT_RATELIMIT_INTERVAL
,
1654 DEFAULT_RATELIMIT_BURST
);
1656 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1657 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1658 if (fault_status
&& __ratelimit(&rs
))
1659 pr_err("DRHD: handling fault status reg %x\n", fault_status
);
1661 /* TBD: ignore advanced fault log currently */
1662 if (!(fault_status
& DMA_FSTS_PPF
))
1665 fault_index
= dma_fsts_fault_record_index(fault_status
);
1666 reg
= cap_fault_reg_offset(iommu
->cap
);
1668 /* Disable printing, simply clear the fault when ratelimited */
1669 bool ratelimited
= !__ratelimit(&rs
);
1676 /* highest 32 bits */
1677 data
= readl(iommu
->reg
+ reg
+
1678 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1679 if (!(data
& DMA_FRCD_F
))
1683 fault_reason
= dma_frcd_fault_reason(data
);
1684 type
= dma_frcd_type(data
);
1686 data
= readl(iommu
->reg
+ reg
+
1687 fault_index
* PRIMARY_FAULT_REG_LEN
+ 8);
1688 source_id
= dma_frcd_source_id(data
);
1690 guest_addr
= dmar_readq(iommu
->reg
+ reg
+
1691 fault_index
* PRIMARY_FAULT_REG_LEN
);
1692 guest_addr
= dma_frcd_page_addr(guest_addr
);
1695 /* clear the fault */
1696 writel(DMA_FRCD_F
, iommu
->reg
+ reg
+
1697 fault_index
* PRIMARY_FAULT_REG_LEN
+ 12);
1699 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1702 dmar_fault_do_one(iommu
, type
, fault_reason
,
1703 source_id
, guest_addr
);
1706 if (fault_index
>= cap_num_fault_regs(iommu
->cap
))
1708 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1711 writel(DMA_FSTS_PFO
| DMA_FSTS_PPF
| DMA_FSTS_PRO
,
1712 iommu
->reg
+ DMAR_FSTS_REG
);
1715 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1719 int dmar_set_interrupt(struct intel_iommu
*iommu
)
1724 * Check if the fault interrupt is already initialized.
1729 irq
= dmar_alloc_hwirq(iommu
->seq_id
, iommu
->node
, iommu
);
1733 pr_err("No free IRQ vectors\n");
1737 ret
= request_irq(irq
, dmar_fault
, IRQF_NO_THREAD
, iommu
->name
, iommu
);
1739 pr_err("Can't request irq\n");
1743 int __init
enable_drhd_fault_handling(void)
1745 struct dmar_drhd_unit
*drhd
;
1746 struct intel_iommu
*iommu
;
1749 * Enable fault control interrupt.
1751 for_each_iommu(iommu
, drhd
) {
1753 int ret
= dmar_set_interrupt(iommu
);
1756 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1757 (unsigned long long)drhd
->reg_base_addr
, ret
);
1762 * Clear any previous faults.
1764 dmar_fault(iommu
->irq
, iommu
);
1765 fault_status
= readl(iommu
->reg
+ DMAR_FSTS_REG
);
1766 writel(fault_status
, iommu
->reg
+ DMAR_FSTS_REG
);
1773 * Re-enable Queued Invalidation interface.
1775 int dmar_reenable_qi(struct intel_iommu
*iommu
)
1777 if (!ecap_qis(iommu
->ecap
))
1784 * First disable queued invalidation.
1786 dmar_disable_qi(iommu
);
1788 * Then enable queued invalidation again. Since there is no pending
1789 * invalidation requests now, it's safe to re-enable queued
1792 __dmar_enable_qi(iommu
);
1798 * Check interrupt remapping support in DMAR table description.
1800 int __init
dmar_ir_support(void)
1802 struct acpi_table_dmar
*dmar
;
1803 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
1806 return dmar
->flags
& 0x1;
1809 /* Check whether DMAR units are in use */
1810 static inline bool dmar_in_use(void)
1812 return irq_remapping_enabled
|| intel_iommu_enabled
;
1815 static int __init
dmar_free_unused_resources(void)
1817 struct dmar_drhd_unit
*dmaru
, *dmaru_n
;
1822 if (dmar_dev_scope_status
!= 1 && !list_empty(&dmar_drhd_units
))
1823 bus_unregister_notifier(&pci_bus_type
, &dmar_pci_bus_nb
);
1825 down_write(&dmar_global_lock
);
1826 list_for_each_entry_safe(dmaru
, dmaru_n
, &dmar_drhd_units
, list
) {
1827 list_del(&dmaru
->list
);
1828 dmar_free_drhd(dmaru
);
1830 up_write(&dmar_global_lock
);
1835 late_initcall(dmar_free_unused_resources
);
1836 IOMMU_INIT_POST(detect_intel_iommu
);
1839 * DMAR Hotplug Support
1840 * For more details, please refer to Intel(R) Virtualization Technology
1841 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1842 * "Remapping Hardware Unit Hot Plug".
1844 static guid_t dmar_hp_guid
=
1845 GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
1846 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
1849 * Currently there's only one revision and BIOS will not check the revision id,
1850 * so use 0 for safety.
1852 #define DMAR_DSM_REV_ID 0
1853 #define DMAR_DSM_FUNC_DRHD 1
1854 #define DMAR_DSM_FUNC_ATSR 2
1855 #define DMAR_DSM_FUNC_RHSA 3
1857 static inline bool dmar_detect_dsm(acpi_handle handle
, int func
)
1859 return acpi_check_dsm(handle
, &dmar_hp_guid
, DMAR_DSM_REV_ID
, 1 << func
);
1862 static int dmar_walk_dsm_resource(acpi_handle handle
, int func
,
1863 dmar_res_handler_t handler
, void *arg
)
1866 union acpi_object
*obj
;
1867 struct acpi_dmar_header
*start
;
1868 struct dmar_res_callback callback
;
1869 static int res_type
[] = {
1870 [DMAR_DSM_FUNC_DRHD
] = ACPI_DMAR_TYPE_HARDWARE_UNIT
,
1871 [DMAR_DSM_FUNC_ATSR
] = ACPI_DMAR_TYPE_ROOT_ATS
,
1872 [DMAR_DSM_FUNC_RHSA
] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY
,
1875 if (!dmar_detect_dsm(handle
, func
))
1878 obj
= acpi_evaluate_dsm_typed(handle
, &dmar_hp_guid
, DMAR_DSM_REV_ID
,
1879 func
, NULL
, ACPI_TYPE_BUFFER
);
1883 memset(&callback
, 0, sizeof(callback
));
1884 callback
.cb
[res_type
[func
]] = handler
;
1885 callback
.arg
[res_type
[func
]] = arg
;
1886 start
= (struct acpi_dmar_header
*)obj
->buffer
.pointer
;
1887 ret
= dmar_walk_remapping_entries(start
, obj
->buffer
.length
, &callback
);
1894 static int dmar_hp_add_drhd(struct acpi_dmar_header
*header
, void *arg
)
1897 struct dmar_drhd_unit
*dmaru
;
1899 dmaru
= dmar_find_dmaru((struct acpi_dmar_hardware_unit
*)header
);
1903 ret
= dmar_ir_hotplug(dmaru
, true);
1905 ret
= dmar_iommu_hotplug(dmaru
, true);
1910 static int dmar_hp_remove_drhd(struct acpi_dmar_header
*header
, void *arg
)
1914 struct dmar_drhd_unit
*dmaru
;
1916 dmaru
= dmar_find_dmaru((struct acpi_dmar_hardware_unit
*)header
);
1921 * All PCI devices managed by this unit should have been destroyed.
1923 if (!dmaru
->include_all
&& dmaru
->devices
&& dmaru
->devices_cnt
) {
1924 for_each_active_dev_scope(dmaru
->devices
,
1925 dmaru
->devices_cnt
, i
, dev
)
1929 ret
= dmar_ir_hotplug(dmaru
, false);
1931 ret
= dmar_iommu_hotplug(dmaru
, false);
1936 static int dmar_hp_release_drhd(struct acpi_dmar_header
*header
, void *arg
)
1938 struct dmar_drhd_unit
*dmaru
;
1940 dmaru
= dmar_find_dmaru((struct acpi_dmar_hardware_unit
*)header
);
1942 list_del_rcu(&dmaru
->list
);
1944 dmar_free_drhd(dmaru
);
1950 static int dmar_hotplug_insert(acpi_handle handle
)
1955 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1956 &dmar_validate_one_drhd
, (void *)1);
1960 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1961 &dmar_parse_one_drhd
, (void *)&drhd_count
);
1962 if (ret
== 0 && drhd_count
== 0) {
1963 pr_warn(FW_BUG
"No DRHD structures in buffer returned by _DSM method\n");
1969 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_RHSA
,
1970 &dmar_parse_one_rhsa
, NULL
);
1974 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_ATSR
,
1975 &dmar_parse_one_atsr
, NULL
);
1979 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1980 &dmar_hp_add_drhd
, NULL
);
1984 dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1985 &dmar_hp_remove_drhd
, NULL
);
1987 dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_ATSR
,
1988 &dmar_release_one_atsr
, NULL
);
1990 dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
1991 &dmar_hp_release_drhd
, NULL
);
1996 static int dmar_hotplug_remove(acpi_handle handle
)
2000 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_ATSR
,
2001 &dmar_check_one_atsr
, NULL
);
2005 ret
= dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
2006 &dmar_hp_remove_drhd
, NULL
);
2008 WARN_ON(dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_ATSR
,
2009 &dmar_release_one_atsr
, NULL
));
2010 WARN_ON(dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
2011 &dmar_hp_release_drhd
, NULL
));
2013 dmar_walk_dsm_resource(handle
, DMAR_DSM_FUNC_DRHD
,
2014 &dmar_hp_add_drhd
, NULL
);
2020 static acpi_status
dmar_get_dsm_handle(acpi_handle handle
, u32 lvl
,
2021 void *context
, void **retval
)
2023 acpi_handle
*phdl
= retval
;
2025 if (dmar_detect_dsm(handle
, DMAR_DSM_FUNC_DRHD
)) {
2027 return AE_CTRL_TERMINATE
;
2033 static int dmar_device_hotplug(acpi_handle handle
, bool insert
)
2036 acpi_handle tmp
= NULL
;
2042 if (dmar_detect_dsm(handle
, DMAR_DSM_FUNC_DRHD
)) {
2045 status
= acpi_walk_namespace(ACPI_TYPE_DEVICE
, handle
,
2047 dmar_get_dsm_handle
,
2049 if (ACPI_FAILURE(status
)) {
2050 pr_warn("Failed to locate _DSM method.\n");
2057 down_write(&dmar_global_lock
);
2059 ret
= dmar_hotplug_insert(tmp
);
2061 ret
= dmar_hotplug_remove(tmp
);
2062 up_write(&dmar_global_lock
);
2067 int dmar_device_add(acpi_handle handle
)
2069 return dmar_device_hotplug(handle
, true);
2072 int dmar_device_remove(acpi_handle handle
)
2074 return dmar_device_hotplug(handle
, false);
2078 * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table
2080 * Returns true if the platform has %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in
2081 * the ACPI DMAR table. This means that the platform boot firmware has made
2082 * sure no device can issue DMA outside of RMRR regions.
2084 bool dmar_platform_optin(void)
2086 struct acpi_table_dmar
*dmar
;
2090 status
= acpi_get_table(ACPI_SIG_DMAR
, 0,
2091 (struct acpi_table_header
**)&dmar
);
2092 if (ACPI_FAILURE(status
))
2095 ret
= !!(dmar
->flags
& DMAR_PLATFORM_OPT_IN
);
2096 acpi_put_table((struct acpi_table_header
*)dmar
);
2100 EXPORT_SYMBOL_GPL(dmar_platform_optin
);