2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
36 #define PREFIX "DMAR:"
38 /* No locks are needed as DMA remapping hardware unit
39 * list is constructed at boot time and hotplug of
40 * these units are not supported by the architecture.
42 LIST_HEAD(dmar_drhd_units
);
44 static struct acpi_table_header
* __initdata dmar_tbl
;
46 static void __init
dmar_register_drhd_unit(struct dmar_drhd_unit
*drhd
)
49 * add INCLUDE_ALL at the tail, so scan the list will find it at
52 if (drhd
->include_all
)
53 list_add_tail(&drhd
->list
, &dmar_drhd_units
);
55 list_add(&drhd
->list
, &dmar_drhd_units
);
58 static int __init
dmar_parse_one_dev_scope(struct acpi_dmar_device_scope
*scope
,
59 struct pci_dev
**dev
, u16 segment
)
62 struct pci_dev
*pdev
= NULL
;
63 struct acpi_dmar_pci_path
*path
;
66 bus
= pci_find_bus(segment
, scope
->bus
);
67 path
= (struct acpi_dmar_pci_path
*)(scope
+ 1);
68 count
= (scope
->length
- sizeof(struct acpi_dmar_device_scope
))
69 / sizeof(struct acpi_dmar_pci_path
);
75 * Some BIOSes list non-exist devices in DMAR table, just
80 PREFIX
"Device scope bus [%d] not found\n",
84 pdev
= pci_get_slot(bus
, PCI_DEVFN(path
->dev
, path
->fn
));
86 printk(KERN_WARNING PREFIX
87 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
88 segment
, bus
->number
, path
->dev
, path
->fn
);
93 bus
= pdev
->subordinate
;
96 printk(KERN_WARNING PREFIX
97 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
98 segment
, scope
->bus
, path
->dev
, path
->fn
);
102 if ((scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
&& \
103 pdev
->subordinate
) || (scope
->entry_type
== \
104 ACPI_DMAR_SCOPE_TYPE_BRIDGE
&& !pdev
->subordinate
)) {
106 printk(KERN_WARNING PREFIX
107 "Device scope type does not match for %s\n",
115 static int __init
dmar_parse_dev_scope(void *start
, void *end
, int *cnt
,
116 struct pci_dev
***devices
, u16 segment
)
118 struct acpi_dmar_device_scope
*scope
;
124 while (start
< end
) {
126 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
127 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
)
130 printk(KERN_WARNING PREFIX
131 "Unsupported device scope\n");
132 start
+= scope
->length
;
137 *devices
= kcalloc(*cnt
, sizeof(struct pci_dev
*), GFP_KERNEL
);
143 while (start
< end
) {
145 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_ENDPOINT
||
146 scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_BRIDGE
) {
147 ret
= dmar_parse_one_dev_scope(scope
,
148 &(*devices
)[index
], segment
);
155 start
+= scope
->length
;
162 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
163 * structure which uniquely represent one DMA remapping hardware unit
164 * present in the platform
167 dmar_parse_one_drhd(struct acpi_dmar_header
*header
)
169 struct acpi_dmar_hardware_unit
*drhd
;
170 struct dmar_drhd_unit
*dmaru
;
173 dmaru
= kzalloc(sizeof(*dmaru
), GFP_KERNEL
);
178 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
179 dmaru
->reg_base_addr
= drhd
->address
;
180 dmaru
->include_all
= drhd
->flags
& 0x1; /* BIT0: INCLUDE_ALL */
182 ret
= alloc_iommu(dmaru
);
187 dmar_register_drhd_unit(dmaru
);
191 static int __init
dmar_parse_dev(struct dmar_drhd_unit
*dmaru
)
193 struct acpi_dmar_hardware_unit
*drhd
;
194 static int include_all
;
197 drhd
= (struct acpi_dmar_hardware_unit
*) dmaru
->hdr
;
199 if (!dmaru
->include_all
)
200 ret
= dmar_parse_dev_scope((void *)(drhd
+ 1),
201 ((void *)drhd
) + drhd
->header
.length
,
202 &dmaru
->devices_cnt
, &dmaru
->devices
,
205 /* Only allow one INCLUDE_ALL */
207 printk(KERN_WARNING PREFIX
"Only one INCLUDE_ALL "
208 "device scope is allowed\n");
215 list_del(&dmaru
->list
);
222 LIST_HEAD(dmar_rmrr_units
);
224 static void __init
dmar_register_rmrr_unit(struct dmar_rmrr_unit
*rmrr
)
226 list_add(&rmrr
->list
, &dmar_rmrr_units
);
231 dmar_parse_one_rmrr(struct acpi_dmar_header
*header
)
233 struct acpi_dmar_reserved_memory
*rmrr
;
234 struct dmar_rmrr_unit
*rmrru
;
236 rmrru
= kzalloc(sizeof(*rmrru
), GFP_KERNEL
);
241 rmrr
= (struct acpi_dmar_reserved_memory
*)header
;
242 rmrru
->base_address
= rmrr
->base_address
;
243 rmrru
->end_address
= rmrr
->end_address
;
245 dmar_register_rmrr_unit(rmrru
);
250 rmrr_parse_dev(struct dmar_rmrr_unit
*rmrru
)
252 struct acpi_dmar_reserved_memory
*rmrr
;
255 rmrr
= (struct acpi_dmar_reserved_memory
*) rmrru
->hdr
;
256 ret
= dmar_parse_dev_scope((void *)(rmrr
+ 1),
257 ((void *)rmrr
) + rmrr
->header
.length
,
258 &rmrru
->devices_cnt
, &rmrru
->devices
, rmrr
->segment
);
260 if (ret
|| (rmrru
->devices_cnt
== 0)) {
261 list_del(&rmrru
->list
);
269 dmar_table_print_dmar_entry(struct acpi_dmar_header
*header
)
271 struct acpi_dmar_hardware_unit
*drhd
;
272 struct acpi_dmar_reserved_memory
*rmrr
;
274 switch (header
->type
) {
275 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
276 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
277 printk (KERN_INFO PREFIX
278 "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
279 drhd
->flags
, (unsigned long long)drhd
->address
);
281 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
282 rmrr
= (struct acpi_dmar_reserved_memory
*)header
;
284 printk (KERN_INFO PREFIX
285 "RMRR base: 0x%016Lx end: 0x%016Lx\n",
286 (unsigned long long)rmrr
->base_address
,
287 (unsigned long long)rmrr
->end_address
);
293 * dmar_table_detect - checks to see if the platform supports DMAR devices
295 static int __init
dmar_table_detect(void)
297 acpi_status status
= AE_OK
;
299 /* if we could find DMAR table, then there are DMAR devices */
300 status
= acpi_get_table(ACPI_SIG_DMAR
, 0,
301 (struct acpi_table_header
**)&dmar_tbl
);
303 if (ACPI_SUCCESS(status
) && !dmar_tbl
) {
304 printk (KERN_WARNING PREFIX
"Unable to map DMAR\n");
305 status
= AE_NOT_FOUND
;
308 return (ACPI_SUCCESS(status
) ? 1 : 0);
312 * parse_dmar_table - parses the DMA reporting table
315 parse_dmar_table(void)
317 struct acpi_table_dmar
*dmar
;
318 struct acpi_dmar_header
*entry_header
;
322 * Do it again, earlier dmar_tbl mapping could be mapped with
327 dmar
= (struct acpi_table_dmar
*)dmar_tbl
;
331 if (dmar
->width
< PAGE_SHIFT
- 1) {
332 printk(KERN_WARNING PREFIX
"Invalid DMAR haw\n");
336 printk (KERN_INFO PREFIX
"Host address width %d\n",
339 entry_header
= (struct acpi_dmar_header
*)(dmar
+ 1);
340 while (((unsigned long)entry_header
) <
341 (((unsigned long)dmar
) + dmar_tbl
->length
)) {
342 dmar_table_print_dmar_entry(entry_header
);
344 switch (entry_header
->type
) {
345 case ACPI_DMAR_TYPE_HARDWARE_UNIT
:
346 ret
= dmar_parse_one_drhd(entry_header
);
348 case ACPI_DMAR_TYPE_RESERVED_MEMORY
:
350 ret
= dmar_parse_one_rmrr(entry_header
);
354 printk(KERN_WARNING PREFIX
355 "Unknown DMAR structure type\n");
356 ret
= 0; /* for forward compatibility */
362 entry_header
= ((void *)entry_header
+ entry_header
->length
);
367 int dmar_pci_device_match(struct pci_dev
*devices
[], int cnt
,
373 for (index
= 0; index
< cnt
; index
++)
374 if (dev
== devices
[index
])
377 /* Check our parent */
378 dev
= dev
->bus
->self
;
384 struct dmar_drhd_unit
*
385 dmar_find_matched_drhd_unit(struct pci_dev
*dev
)
387 struct dmar_drhd_unit
*drhd
= NULL
;
389 list_for_each_entry(drhd
, &dmar_drhd_units
, list
) {
390 if (drhd
->include_all
|| dmar_pci_device_match(drhd
->devices
,
391 drhd
->devices_cnt
, dev
))
398 int __init
dmar_dev_scope_init(void)
400 struct dmar_drhd_unit
*drhd
, *drhd_n
;
403 list_for_each_entry_safe(drhd
, drhd_n
, &dmar_drhd_units
, list
) {
404 ret
= dmar_parse_dev(drhd
);
411 struct dmar_rmrr_unit
*rmrr
, *rmrr_n
;
412 list_for_each_entry_safe(rmrr
, rmrr_n
, &dmar_rmrr_units
, list
) {
413 ret
= rmrr_parse_dev(rmrr
);
424 int __init
dmar_table_init(void)
426 static int dmar_table_initialized
;
429 if (dmar_table_initialized
)
432 dmar_table_initialized
= 1;
434 ret
= parse_dmar_table();
437 printk(KERN_INFO PREFIX
"parse DMAR table failure.\n");
441 if (list_empty(&dmar_drhd_units
)) {
442 printk(KERN_INFO PREFIX
"No DMAR devices found\n");
447 if (list_empty(&dmar_rmrr_units
))
448 printk(KERN_INFO PREFIX
"No RMRR found\n");
451 #ifdef CONFIG_INTR_REMAP
452 parse_ioapics_under_ir();
457 void __init
detect_intel_iommu(void)
461 ret
= dmar_table_detect();
464 #ifdef CONFIG_INTR_REMAP
465 struct acpi_table_dmar
*dmar
;
467 * for now we will disable dma-remapping when interrupt
468 * remapping is enabled.
469 * When support for queued invalidation for IOTLB invalidation
470 * is added, we will not need this any more.
472 dmar
= (struct acpi_table_dmar
*) dmar_tbl
;
473 if (ret
&& cpu_has_x2apic
&& dmar
->flags
& 0x1)
475 "Queued invalidation will be enabled to support "
476 "x2apic and Intr-remapping.\n");
479 if (ret
&& !no_iommu
&& !iommu_detected
&& !swiotlb
&&
488 int alloc_iommu(struct dmar_drhd_unit
*drhd
)
490 struct intel_iommu
*iommu
;
493 static int iommu_allocated
= 0;
495 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
499 iommu
->seq_id
= iommu_allocated
++;
501 iommu
->reg
= ioremap(drhd
->reg_base_addr
, VTD_PAGE_SIZE
);
503 printk(KERN_ERR
"IOMMU: can't map the region\n");
506 iommu
->cap
= dmar_readq(iommu
->reg
+ DMAR_CAP_REG
);
507 iommu
->ecap
= dmar_readq(iommu
->reg
+ DMAR_ECAP_REG
);
509 /* the registers might be more than one page */
510 map_size
= max_t(int, ecap_max_iotlb_offset(iommu
->ecap
),
511 cap_max_fault_reg_offset(iommu
->cap
));
512 map_size
= VTD_PAGE_ALIGN(map_size
);
513 if (map_size
> VTD_PAGE_SIZE
) {
515 iommu
->reg
= ioremap(drhd
->reg_base_addr
, map_size
);
517 printk(KERN_ERR
"IOMMU: can't map the region\n");
522 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
523 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
524 (unsigned long long)drhd
->reg_base_addr
,
525 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
),
526 (unsigned long long)iommu
->cap
,
527 (unsigned long long)iommu
->ecap
);
529 spin_lock_init(&iommu
->register_lock
);
538 void free_iommu(struct intel_iommu
*iommu
)
544 free_dmar_iommu(iommu
);
553 * Reclaim all the submitted descriptors which have completed its work.
555 static inline void reclaim_free_desc(struct q_inval
*qi
)
557 while (qi
->desc_status
[qi
->free_tail
] == QI_DONE
) {
558 qi
->desc_status
[qi
->free_tail
] = QI_FREE
;
559 qi
->free_tail
= (qi
->free_tail
+ 1) % QI_LENGTH
;
565 * Submit the queued invalidation descriptor to the remapping
566 * hardware unit and wait for its completion.
568 void qi_submit_sync(struct qi_desc
*desc
, struct intel_iommu
*iommu
)
570 struct q_inval
*qi
= iommu
->qi
;
571 struct qi_desc
*hw
, wait_desc
;
572 int wait_index
, index
;
580 spin_lock_irqsave(&qi
->q_lock
, flags
);
581 while (qi
->free_cnt
< 3) {
582 spin_unlock_irqrestore(&qi
->q_lock
, flags
);
584 spin_lock_irqsave(&qi
->q_lock
, flags
);
587 index
= qi
->free_head
;
588 wait_index
= (index
+ 1) % QI_LENGTH
;
590 qi
->desc_status
[index
] = qi
->desc_status
[wait_index
] = QI_IN_USE
;
594 wait_desc
.low
= QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE
| QI_IWD_TYPE
;
595 wait_desc
.high
= virt_to_phys(&qi
->desc_status
[wait_index
]);
597 hw
[wait_index
] = wait_desc
;
599 __iommu_flush_cache(iommu
, &hw
[index
], sizeof(struct qi_desc
));
600 __iommu_flush_cache(iommu
, &hw
[wait_index
], sizeof(struct qi_desc
));
602 qi
->free_head
= (qi
->free_head
+ 2) % QI_LENGTH
;
605 spin_lock(&iommu
->register_lock
);
607 * update the HW tail register indicating the presence of
610 writel(qi
->free_head
<< 4, iommu
->reg
+ DMAR_IQT_REG
);
611 spin_unlock(&iommu
->register_lock
);
613 while (qi
->desc_status
[wait_index
] != QI_DONE
) {
615 * We will leave the interrupts disabled, to prevent interrupt
616 * context to queue another cmd while a cmd is already submitted
617 * and waiting for completion on this cpu. This is to avoid
618 * a deadlock where the interrupt context can wait indefinitely
619 * for free slots in the queue.
621 spin_unlock(&qi
->q_lock
);
623 spin_lock(&qi
->q_lock
);
626 qi
->desc_status
[index
] = QI_DONE
;
628 reclaim_free_desc(qi
);
629 spin_unlock_irqrestore(&qi
->q_lock
, flags
);
633 * Flush the global interrupt entry cache.
635 void qi_global_iec(struct intel_iommu
*iommu
)
639 desc
.low
= QI_IEC_TYPE
;
642 qi_submit_sync(&desc
, iommu
);
645 int qi_flush_context(struct intel_iommu
*iommu
, u16 did
, u16 sid
, u8 fm
,
646 u64 type
, int non_present_entry_flush
)
651 if (non_present_entry_flush
) {
652 if (!cap_caching_mode(iommu
->cap
))
658 desc
.low
= QI_CC_FM(fm
) | QI_CC_SID(sid
) | QI_CC_DID(did
)
659 | QI_CC_GRAN(type
) | QI_CC_TYPE
;
662 qi_submit_sync(&desc
, iommu
);
668 int qi_flush_iotlb(struct intel_iommu
*iommu
, u16 did
, u64 addr
,
669 unsigned int size_order
, u64 type
,
670 int non_present_entry_flush
)
677 if (non_present_entry_flush
) {
678 if (!cap_caching_mode(iommu
->cap
))
684 if (cap_write_drain(iommu
->cap
))
687 if (cap_read_drain(iommu
->cap
))
690 desc
.low
= QI_IOTLB_DID(did
) | QI_IOTLB_DR(dr
) | QI_IOTLB_DW(dw
)
691 | QI_IOTLB_GRAN(type
) | QI_IOTLB_TYPE
;
692 desc
.high
= QI_IOTLB_ADDR(addr
) | QI_IOTLB_IH(ih
)
693 | QI_IOTLB_AM(size_order
);
695 qi_submit_sync(&desc
, iommu
);
702 * Enable Queued Invalidation interface. This is a must to support
703 * interrupt-remapping. Also used by DMA-remapping, which replaces
704 * register based IOTLB invalidation.
706 int dmar_enable_qi(struct intel_iommu
*iommu
)
712 if (!ecap_qis(iommu
->ecap
))
716 * queued invalidation is already setup and enabled.
721 iommu
->qi
= kmalloc(sizeof(*qi
), GFP_KERNEL
);
727 qi
->desc
= (void *)(get_zeroed_page(GFP_KERNEL
));
734 qi
->desc_status
= kmalloc(QI_LENGTH
* sizeof(int), GFP_KERNEL
);
735 if (!qi
->desc_status
) {
736 free_page((unsigned long) qi
->desc
);
742 qi
->free_head
= qi
->free_tail
= 0;
743 qi
->free_cnt
= QI_LENGTH
;
745 spin_lock_init(&qi
->q_lock
);
747 spin_lock_irqsave(&iommu
->register_lock
, flags
);
748 /* write zero to the tail reg */
749 writel(0, iommu
->reg
+ DMAR_IQT_REG
);
751 dmar_writeq(iommu
->reg
+ DMAR_IQA_REG
, virt_to_phys(qi
->desc
));
753 cmd
= iommu
->gcmd
| DMA_GCMD_QIE
;
754 iommu
->gcmd
|= DMA_GCMD_QIE
;
755 writel(cmd
, iommu
->reg
+ DMAR_GCMD_REG
);
757 /* Make sure hardware complete it */
758 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
, readl
, (sts
& DMA_GSTS_QIES
), sts
);
759 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);