power: reset: corrections for simple syscon reboot driver
[linux-2.6/btrfs-unstable.git] / drivers / iommu / intel-iommu.c
blob5619f264862d9073a587fd1a2b919f2c61a27523
1 /*
2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
20 #include <linux/init.h>
21 #include <linux/bitmap.h>
22 #include <linux/debugfs.h>
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/irq.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/pci.h>
29 #include <linux/dmar.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/mempool.h>
32 #include <linux/memory.h>
33 #include <linux/timer.h>
34 #include <linux/iova.h>
35 #include <linux/iommu.h>
36 #include <linux/intel-iommu.h>
37 #include <linux/syscore_ops.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/pci-ats.h>
41 #include <linux/memblock.h>
42 #include <linux/dma-contiguous.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/cacheflush.h>
45 #include <asm/iommu.h>
47 #include "irq_remapping.h"
49 #define ROOT_SIZE VTD_PAGE_SIZE
50 #define CONTEXT_SIZE VTD_PAGE_SIZE
52 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
54 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
56 #define IOAPIC_RANGE_START (0xfee00000)
57 #define IOAPIC_RANGE_END (0xfeefffff)
58 #define IOVA_START_ADDR (0x1000)
60 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62 #define MAX_AGAW_WIDTH 64
63 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
65 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
66 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
68 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
69 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
70 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
71 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
74 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
75 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
76 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
78 /* page table handling */
79 #define LEVEL_STRIDE (9)
80 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
83 * This bitmap is used to advertise the page sizes our hardware support
84 * to the IOMMU core, which will then use this information to split
85 * physically contiguous memory regions it is mapping into page sizes
86 * that we support.
88 * Traditionally the IOMMU core just handed us the mappings directly,
89 * after making sure the size is an order of a 4KiB page and that the
90 * mapping has natural alignment.
92 * To retain this behavior, we currently advertise that we support
93 * all page sizes that are an order of 4KiB.
95 * If at some point we'd like to utilize the IOMMU core's new behavior,
96 * we could change this to advertise the real page sizes we support.
98 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
100 static inline int agaw_to_level(int agaw)
102 return agaw + 2;
105 static inline int agaw_to_width(int agaw)
107 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
110 static inline int width_to_agaw(int width)
112 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
115 static inline unsigned int level_to_offset_bits(int level)
117 return (level - 1) * LEVEL_STRIDE;
120 static inline int pfn_level_offset(unsigned long pfn, int level)
122 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
125 static inline unsigned long level_mask(int level)
127 return -1UL << level_to_offset_bits(level);
130 static inline unsigned long level_size(int level)
132 return 1UL << level_to_offset_bits(level);
135 static inline unsigned long align_to_level(unsigned long pfn, int level)
137 return (pfn + level_size(level) - 1) & level_mask(level);
140 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
142 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
145 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
146 are never going to work. */
147 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
149 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
152 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
154 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
156 static inline unsigned long page_to_dma_pfn(struct page *pg)
158 return mm_to_dma_pfn(page_to_pfn(pg));
160 static inline unsigned long virt_to_dma_pfn(void *p)
162 return page_to_dma_pfn(virt_to_page(p));
165 /* global iommu list, set NULL for ignored DMAR units */
166 static struct intel_iommu **g_iommus;
168 static void __init check_tylersburg_isoch(void);
169 static int rwbf_quirk;
172 * set to 1 to panic kernel if can't successfully enable VT-d
173 * (used when kernel is launched w/ TXT)
175 static int force_on = 0;
178 * 0: Present
179 * 1-11: Reserved
180 * 12-63: Context Ptr (12 - (haw-1))
181 * 64-127: Reserved
183 struct root_entry {
184 u64 val;
185 u64 rsvd1;
187 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
188 static inline bool root_present(struct root_entry *root)
190 return (root->val & 1);
192 static inline void set_root_present(struct root_entry *root)
194 root->val |= 1;
196 static inline void set_root_value(struct root_entry *root, unsigned long value)
198 root->val |= value & VTD_PAGE_MASK;
201 static inline struct context_entry *
202 get_context_addr_from_root(struct root_entry *root)
204 return (struct context_entry *)
205 (root_present(root)?phys_to_virt(
206 root->val & VTD_PAGE_MASK) :
207 NULL);
211 * low 64 bits:
212 * 0: present
213 * 1: fault processing disable
214 * 2-3: translation type
215 * 12-63: address space root
216 * high 64 bits:
217 * 0-2: address width
218 * 3-6: aval
219 * 8-23: domain id
221 struct context_entry {
222 u64 lo;
223 u64 hi;
226 static inline bool context_present(struct context_entry *context)
228 return (context->lo & 1);
230 static inline void context_set_present(struct context_entry *context)
232 context->lo |= 1;
235 static inline void context_set_fault_enable(struct context_entry *context)
237 context->lo &= (((u64)-1) << 2) | 1;
240 static inline void context_set_translation_type(struct context_entry *context,
241 unsigned long value)
243 context->lo &= (((u64)-1) << 4) | 3;
244 context->lo |= (value & 3) << 2;
247 static inline void context_set_address_root(struct context_entry *context,
248 unsigned long value)
250 context->lo |= value & VTD_PAGE_MASK;
253 static inline void context_set_address_width(struct context_entry *context,
254 unsigned long value)
256 context->hi |= value & 7;
259 static inline void context_set_domain_id(struct context_entry *context,
260 unsigned long value)
262 context->hi |= (value & ((1 << 16) - 1)) << 8;
265 static inline void context_clear_entry(struct context_entry *context)
267 context->lo = 0;
268 context->hi = 0;
272 * 0: readable
273 * 1: writable
274 * 2-6: reserved
275 * 7: super page
276 * 8-10: available
277 * 11: snoop behavior
278 * 12-63: Host physcial address
280 struct dma_pte {
281 u64 val;
284 static inline void dma_clear_pte(struct dma_pte *pte)
286 pte->val = 0;
289 static inline u64 dma_pte_addr(struct dma_pte *pte)
291 #ifdef CONFIG_64BIT
292 return pte->val & VTD_PAGE_MASK;
293 #else
294 /* Must have a full atomic 64-bit read */
295 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
296 #endif
299 static inline bool dma_pte_present(struct dma_pte *pte)
301 return (pte->val & 3) != 0;
304 static inline bool dma_pte_superpage(struct dma_pte *pte)
306 return (pte->val & DMA_PTE_LARGE_PAGE);
309 static inline int first_pte_in_page(struct dma_pte *pte)
311 return !((unsigned long)pte & ~VTD_PAGE_MASK);
315 * This domain is a statically identity mapping domain.
316 * 1. This domain creats a static 1:1 mapping to all usable memory.
317 * 2. It maps to each iommu if successful.
318 * 3. Each iommu mapps to this domain if successful.
320 static struct dmar_domain *si_domain;
321 static int hw_pass_through = 1;
323 /* domain represents a virtual machine, more than one devices
324 * across iommus may be owned in one domain, e.g. kvm guest.
326 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
328 /* si_domain contains mulitple devices */
329 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
331 /* define the limit of IOMMUs supported in each domain */
332 #ifdef CONFIG_X86
333 # define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
334 #else
335 # define IOMMU_UNITS_SUPPORTED 64
336 #endif
338 struct dmar_domain {
339 int id; /* domain id */
340 int nid; /* node id */
341 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
342 /* bitmap of iommus this domain uses*/
344 struct list_head devices; /* all devices' list */
345 struct iova_domain iovad; /* iova's that belong to this domain */
347 struct dma_pte *pgd; /* virtual address */
348 int gaw; /* max guest address width */
350 /* adjusted guest address width, 0 is level 2 30-bit */
351 int agaw;
353 int flags; /* flags to find out type of domain */
355 int iommu_coherency;/* indicate coherency of iommu access */
356 int iommu_snooping; /* indicate snooping control feature*/
357 int iommu_count; /* reference count of iommu */
358 int iommu_superpage;/* Level of superpages supported:
359 0 == 4KiB (no superpages), 1 == 2MiB,
360 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
361 spinlock_t iommu_lock; /* protect iommu set in domain */
362 u64 max_addr; /* maximum mapped address */
365 /* PCI domain-device relationship */
366 struct device_domain_info {
367 struct list_head link; /* link to domain siblings */
368 struct list_head global; /* link to global list */
369 u8 bus; /* PCI bus number */
370 u8 devfn; /* PCI devfn number */
371 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
372 struct intel_iommu *iommu; /* IOMMU used by this device */
373 struct dmar_domain *domain; /* pointer to domain */
376 struct dmar_rmrr_unit {
377 struct list_head list; /* list of rmrr units */
378 struct acpi_dmar_header *hdr; /* ACPI header */
379 u64 base_address; /* reserved base address*/
380 u64 end_address; /* reserved end address */
381 struct dmar_dev_scope *devices; /* target devices */
382 int devices_cnt; /* target device count */
385 struct dmar_atsr_unit {
386 struct list_head list; /* list of ATSR units */
387 struct acpi_dmar_header *hdr; /* ACPI header */
388 struct dmar_dev_scope *devices; /* target devices */
389 int devices_cnt; /* target device count */
390 u8 include_all:1; /* include all ports */
393 static LIST_HEAD(dmar_atsr_units);
394 static LIST_HEAD(dmar_rmrr_units);
396 #define for_each_rmrr_units(rmrr) \
397 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
399 static void flush_unmaps_timeout(unsigned long data);
401 static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
403 #define HIGH_WATER_MARK 250
404 struct deferred_flush_tables {
405 int next;
406 struct iova *iova[HIGH_WATER_MARK];
407 struct dmar_domain *domain[HIGH_WATER_MARK];
408 struct page *freelist[HIGH_WATER_MARK];
411 static struct deferred_flush_tables *deferred_flush;
413 /* bitmap for indexing intel_iommus */
414 static int g_num_of_iommus;
416 static DEFINE_SPINLOCK(async_umap_flush_lock);
417 static LIST_HEAD(unmaps_to_do);
419 static int timer_on;
420 static long list_size;
422 static void domain_exit(struct dmar_domain *domain);
423 static void domain_remove_dev_info(struct dmar_domain *domain);
424 static void domain_remove_one_dev_info(struct dmar_domain *domain,
425 struct device *dev);
426 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
427 struct device *dev);
428 static int domain_detach_iommu(struct dmar_domain *domain,
429 struct intel_iommu *iommu);
431 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
432 int dmar_disabled = 0;
433 #else
434 int dmar_disabled = 1;
435 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
437 int intel_iommu_enabled = 0;
438 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
440 static int dmar_map_gfx = 1;
441 static int dmar_forcedac;
442 static int intel_iommu_strict;
443 static int intel_iommu_superpage = 1;
445 int intel_iommu_gfx_mapped;
446 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
448 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
449 static DEFINE_SPINLOCK(device_domain_lock);
450 static LIST_HEAD(device_domain_list);
452 static const struct iommu_ops intel_iommu_ops;
454 static int __init intel_iommu_setup(char *str)
456 if (!str)
457 return -EINVAL;
458 while (*str) {
459 if (!strncmp(str, "on", 2)) {
460 dmar_disabled = 0;
461 printk(KERN_INFO "Intel-IOMMU: enabled\n");
462 } else if (!strncmp(str, "off", 3)) {
463 dmar_disabled = 1;
464 printk(KERN_INFO "Intel-IOMMU: disabled\n");
465 } else if (!strncmp(str, "igfx_off", 8)) {
466 dmar_map_gfx = 0;
467 printk(KERN_INFO
468 "Intel-IOMMU: disable GFX device mapping\n");
469 } else if (!strncmp(str, "forcedac", 8)) {
470 printk(KERN_INFO
471 "Intel-IOMMU: Forcing DAC for PCI devices\n");
472 dmar_forcedac = 1;
473 } else if (!strncmp(str, "strict", 6)) {
474 printk(KERN_INFO
475 "Intel-IOMMU: disable batched IOTLB flush\n");
476 intel_iommu_strict = 1;
477 } else if (!strncmp(str, "sp_off", 6)) {
478 printk(KERN_INFO
479 "Intel-IOMMU: disable supported super page\n");
480 intel_iommu_superpage = 0;
483 str += strcspn(str, ",");
484 while (*str == ',')
485 str++;
487 return 0;
489 __setup("intel_iommu=", intel_iommu_setup);
491 static struct kmem_cache *iommu_domain_cache;
492 static struct kmem_cache *iommu_devinfo_cache;
493 static struct kmem_cache *iommu_iova_cache;
495 static inline void *alloc_pgtable_page(int node)
497 struct page *page;
498 void *vaddr = NULL;
500 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
501 if (page)
502 vaddr = page_address(page);
503 return vaddr;
506 static inline void free_pgtable_page(void *vaddr)
508 free_page((unsigned long)vaddr);
511 static inline void *alloc_domain_mem(void)
513 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
516 static void free_domain_mem(void *vaddr)
518 kmem_cache_free(iommu_domain_cache, vaddr);
521 static inline void * alloc_devinfo_mem(void)
523 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
526 static inline void free_devinfo_mem(void *vaddr)
528 kmem_cache_free(iommu_devinfo_cache, vaddr);
531 struct iova *alloc_iova_mem(void)
533 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
536 void free_iova_mem(struct iova *iova)
538 kmem_cache_free(iommu_iova_cache, iova);
541 static inline int domain_type_is_vm(struct dmar_domain *domain)
543 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
546 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
548 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
549 DOMAIN_FLAG_STATIC_IDENTITY);
552 static inline int domain_pfn_supported(struct dmar_domain *domain,
553 unsigned long pfn)
555 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
557 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
560 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
562 unsigned long sagaw;
563 int agaw = -1;
565 sagaw = cap_sagaw(iommu->cap);
566 for (agaw = width_to_agaw(max_gaw);
567 agaw >= 0; agaw--) {
568 if (test_bit(agaw, &sagaw))
569 break;
572 return agaw;
576 * Calculate max SAGAW for each iommu.
578 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
580 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
584 * calculate agaw for each iommu.
585 * "SAGAW" may be different across iommus, use a default agaw, and
586 * get a supported less agaw for iommus that don't support the default agaw.
588 int iommu_calculate_agaw(struct intel_iommu *iommu)
590 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
593 /* This functionin only returns single iommu in a domain */
594 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
596 int iommu_id;
598 /* si_domain and vm domain should not get here. */
599 BUG_ON(domain_type_is_vm_or_si(domain));
600 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
601 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
602 return NULL;
604 return g_iommus[iommu_id];
607 static void domain_update_iommu_coherency(struct dmar_domain *domain)
609 struct dmar_drhd_unit *drhd;
610 struct intel_iommu *iommu;
611 int i, found = 0;
613 domain->iommu_coherency = 1;
615 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
616 found = 1;
617 if (!ecap_coherent(g_iommus[i]->ecap)) {
618 domain->iommu_coherency = 0;
619 break;
622 if (found)
623 return;
625 /* No hardware attached; use lowest common denominator */
626 rcu_read_lock();
627 for_each_active_iommu(iommu, drhd) {
628 if (!ecap_coherent(iommu->ecap)) {
629 domain->iommu_coherency = 0;
630 break;
633 rcu_read_unlock();
636 static int domain_update_iommu_snooping(struct intel_iommu *skip)
638 struct dmar_drhd_unit *drhd;
639 struct intel_iommu *iommu;
640 int ret = 1;
642 rcu_read_lock();
643 for_each_active_iommu(iommu, drhd) {
644 if (iommu != skip) {
645 if (!ecap_sc_support(iommu->ecap)) {
646 ret = 0;
647 break;
651 rcu_read_unlock();
653 return ret;
656 static int domain_update_iommu_superpage(struct intel_iommu *skip)
658 struct dmar_drhd_unit *drhd;
659 struct intel_iommu *iommu;
660 int mask = 0xf;
662 if (!intel_iommu_superpage) {
663 return 0;
666 /* set iommu_superpage to the smallest common denominator */
667 rcu_read_lock();
668 for_each_active_iommu(iommu, drhd) {
669 if (iommu != skip) {
670 mask &= cap_super_page_val(iommu->cap);
671 if (!mask)
672 break;
675 rcu_read_unlock();
677 return fls(mask);
680 /* Some capabilities may be different across iommus */
681 static void domain_update_iommu_cap(struct dmar_domain *domain)
683 domain_update_iommu_coherency(domain);
684 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
685 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
688 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
690 struct dmar_drhd_unit *drhd = NULL;
691 struct intel_iommu *iommu;
692 struct device *tmp;
693 struct pci_dev *ptmp, *pdev = NULL;
694 u16 segment = 0;
695 int i;
697 if (dev_is_pci(dev)) {
698 pdev = to_pci_dev(dev);
699 segment = pci_domain_nr(pdev->bus);
700 } else if (ACPI_COMPANION(dev))
701 dev = &ACPI_COMPANION(dev)->dev;
703 rcu_read_lock();
704 for_each_active_iommu(iommu, drhd) {
705 if (pdev && segment != drhd->segment)
706 continue;
708 for_each_active_dev_scope(drhd->devices,
709 drhd->devices_cnt, i, tmp) {
710 if (tmp == dev) {
711 *bus = drhd->devices[i].bus;
712 *devfn = drhd->devices[i].devfn;
713 goto out;
716 if (!pdev || !dev_is_pci(tmp))
717 continue;
719 ptmp = to_pci_dev(tmp);
720 if (ptmp->subordinate &&
721 ptmp->subordinate->number <= pdev->bus->number &&
722 ptmp->subordinate->busn_res.end >= pdev->bus->number)
723 goto got_pdev;
726 if (pdev && drhd->include_all) {
727 got_pdev:
728 *bus = pdev->bus->number;
729 *devfn = pdev->devfn;
730 goto out;
733 iommu = NULL;
734 out:
735 rcu_read_unlock();
737 return iommu;
740 static void domain_flush_cache(struct dmar_domain *domain,
741 void *addr, int size)
743 if (!domain->iommu_coherency)
744 clflush_cache_range(addr, size);
747 /* Gets context entry for a given bus and devfn */
748 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
749 u8 bus, u8 devfn)
751 struct root_entry *root;
752 struct context_entry *context;
753 unsigned long phy_addr;
754 unsigned long flags;
756 spin_lock_irqsave(&iommu->lock, flags);
757 root = &iommu->root_entry[bus];
758 context = get_context_addr_from_root(root);
759 if (!context) {
760 context = (struct context_entry *)
761 alloc_pgtable_page(iommu->node);
762 if (!context) {
763 spin_unlock_irqrestore(&iommu->lock, flags);
764 return NULL;
766 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
767 phy_addr = virt_to_phys((void *)context);
768 set_root_value(root, phy_addr);
769 set_root_present(root);
770 __iommu_flush_cache(iommu, root, sizeof(*root));
772 spin_unlock_irqrestore(&iommu->lock, flags);
773 return &context[devfn];
776 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
778 struct root_entry *root;
779 struct context_entry *context;
780 int ret;
781 unsigned long flags;
783 spin_lock_irqsave(&iommu->lock, flags);
784 root = &iommu->root_entry[bus];
785 context = get_context_addr_from_root(root);
786 if (!context) {
787 ret = 0;
788 goto out;
790 ret = context_present(&context[devfn]);
791 out:
792 spin_unlock_irqrestore(&iommu->lock, flags);
793 return ret;
796 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
798 struct root_entry *root;
799 struct context_entry *context;
800 unsigned long flags;
802 spin_lock_irqsave(&iommu->lock, flags);
803 root = &iommu->root_entry[bus];
804 context = get_context_addr_from_root(root);
805 if (context) {
806 context_clear_entry(&context[devfn]);
807 __iommu_flush_cache(iommu, &context[devfn], \
808 sizeof(*context));
810 spin_unlock_irqrestore(&iommu->lock, flags);
813 static void free_context_table(struct intel_iommu *iommu)
815 struct root_entry *root;
816 int i;
817 unsigned long flags;
818 struct context_entry *context;
820 spin_lock_irqsave(&iommu->lock, flags);
821 if (!iommu->root_entry) {
822 goto out;
824 for (i = 0; i < ROOT_ENTRY_NR; i++) {
825 root = &iommu->root_entry[i];
826 context = get_context_addr_from_root(root);
827 if (context)
828 free_pgtable_page(context);
830 free_pgtable_page(iommu->root_entry);
831 iommu->root_entry = NULL;
832 out:
833 spin_unlock_irqrestore(&iommu->lock, flags);
836 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
837 unsigned long pfn, int *target_level)
839 struct dma_pte *parent, *pte = NULL;
840 int level = agaw_to_level(domain->agaw);
841 int offset;
843 BUG_ON(!domain->pgd);
845 if (!domain_pfn_supported(domain, pfn))
846 /* Address beyond IOMMU's addressing capabilities. */
847 return NULL;
849 parent = domain->pgd;
851 while (1) {
852 void *tmp_page;
854 offset = pfn_level_offset(pfn, level);
855 pte = &parent[offset];
856 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
857 break;
858 if (level == *target_level)
859 break;
861 if (!dma_pte_present(pte)) {
862 uint64_t pteval;
864 tmp_page = alloc_pgtable_page(domain->nid);
866 if (!tmp_page)
867 return NULL;
869 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
870 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
871 if (cmpxchg64(&pte->val, 0ULL, pteval))
872 /* Someone else set it while we were thinking; use theirs. */
873 free_pgtable_page(tmp_page);
874 else
875 domain_flush_cache(domain, pte, sizeof(*pte));
877 if (level == 1)
878 break;
880 parent = phys_to_virt(dma_pte_addr(pte));
881 level--;
884 if (!*target_level)
885 *target_level = level;
887 return pte;
891 /* return address's pte at specific level */
892 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
893 unsigned long pfn,
894 int level, int *large_page)
896 struct dma_pte *parent, *pte = NULL;
897 int total = agaw_to_level(domain->agaw);
898 int offset;
900 parent = domain->pgd;
901 while (level <= total) {
902 offset = pfn_level_offset(pfn, total);
903 pte = &parent[offset];
904 if (level == total)
905 return pte;
907 if (!dma_pte_present(pte)) {
908 *large_page = total;
909 break;
912 if (dma_pte_superpage(pte)) {
913 *large_page = total;
914 return pte;
917 parent = phys_to_virt(dma_pte_addr(pte));
918 total--;
920 return NULL;
923 /* clear last level pte, a tlb flush should be followed */
924 static void dma_pte_clear_range(struct dmar_domain *domain,
925 unsigned long start_pfn,
926 unsigned long last_pfn)
928 unsigned int large_page = 1;
929 struct dma_pte *first_pte, *pte;
931 BUG_ON(!domain_pfn_supported(domain, start_pfn));
932 BUG_ON(!domain_pfn_supported(domain, last_pfn));
933 BUG_ON(start_pfn > last_pfn);
935 /* we don't need lock here; nobody else touches the iova range */
936 do {
937 large_page = 1;
938 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
939 if (!pte) {
940 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
941 continue;
943 do {
944 dma_clear_pte(pte);
945 start_pfn += lvl_to_nr_pages(large_page);
946 pte++;
947 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
949 domain_flush_cache(domain, first_pte,
950 (void *)pte - (void *)first_pte);
952 } while (start_pfn && start_pfn <= last_pfn);
955 static void dma_pte_free_level(struct dmar_domain *domain, int level,
956 struct dma_pte *pte, unsigned long pfn,
957 unsigned long start_pfn, unsigned long last_pfn)
959 pfn = max(start_pfn, pfn);
960 pte = &pte[pfn_level_offset(pfn, level)];
962 do {
963 unsigned long level_pfn;
964 struct dma_pte *level_pte;
966 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
967 goto next;
969 level_pfn = pfn & level_mask(level - 1);
970 level_pte = phys_to_virt(dma_pte_addr(pte));
972 if (level > 2)
973 dma_pte_free_level(domain, level - 1, level_pte,
974 level_pfn, start_pfn, last_pfn);
976 /* If range covers entire pagetable, free it */
977 if (!(start_pfn > level_pfn ||
978 last_pfn < level_pfn + level_size(level) - 1)) {
979 dma_clear_pte(pte);
980 domain_flush_cache(domain, pte, sizeof(*pte));
981 free_pgtable_page(level_pte);
983 next:
984 pfn += level_size(level);
985 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
988 /* free page table pages. last level pte should already be cleared */
989 static void dma_pte_free_pagetable(struct dmar_domain *domain,
990 unsigned long start_pfn,
991 unsigned long last_pfn)
993 BUG_ON(!domain_pfn_supported(domain, start_pfn));
994 BUG_ON(!domain_pfn_supported(domain, last_pfn));
995 BUG_ON(start_pfn > last_pfn);
997 dma_pte_clear_range(domain, start_pfn, last_pfn);
999 /* We don't need lock here; nobody else touches the iova range */
1000 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1001 domain->pgd, 0, start_pfn, last_pfn);
1003 /* free pgd */
1004 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1005 free_pgtable_page(domain->pgd);
1006 domain->pgd = NULL;
1010 /* When a page at a given level is being unlinked from its parent, we don't
1011 need to *modify* it at all. All we need to do is make a list of all the
1012 pages which can be freed just as soon as we've flushed the IOTLB and we
1013 know the hardware page-walk will no longer touch them.
1014 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1015 be freed. */
1016 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1017 int level, struct dma_pte *pte,
1018 struct page *freelist)
1020 struct page *pg;
1022 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1023 pg->freelist = freelist;
1024 freelist = pg;
1026 if (level == 1)
1027 return freelist;
1029 pte = page_address(pg);
1030 do {
1031 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1032 freelist = dma_pte_list_pagetables(domain, level - 1,
1033 pte, freelist);
1034 pte++;
1035 } while (!first_pte_in_page(pte));
1037 return freelist;
1040 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1041 struct dma_pte *pte, unsigned long pfn,
1042 unsigned long start_pfn,
1043 unsigned long last_pfn,
1044 struct page *freelist)
1046 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1048 pfn = max(start_pfn, pfn);
1049 pte = &pte[pfn_level_offset(pfn, level)];
1051 do {
1052 unsigned long level_pfn;
1054 if (!dma_pte_present(pte))
1055 goto next;
1057 level_pfn = pfn & level_mask(level);
1059 /* If range covers entire pagetable, free it */
1060 if (start_pfn <= level_pfn &&
1061 last_pfn >= level_pfn + level_size(level) - 1) {
1062 /* These suborbinate page tables are going away entirely. Don't
1063 bother to clear them; we're just going to *free* them. */
1064 if (level > 1 && !dma_pte_superpage(pte))
1065 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1067 dma_clear_pte(pte);
1068 if (!first_pte)
1069 first_pte = pte;
1070 last_pte = pte;
1071 } else if (level > 1) {
1072 /* Recurse down into a level that isn't *entirely* obsolete */
1073 freelist = dma_pte_clear_level(domain, level - 1,
1074 phys_to_virt(dma_pte_addr(pte)),
1075 level_pfn, start_pfn, last_pfn,
1076 freelist);
1078 next:
1079 pfn += level_size(level);
1080 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1082 if (first_pte)
1083 domain_flush_cache(domain, first_pte,
1084 (void *)++last_pte - (void *)first_pte);
1086 return freelist;
1089 /* We can't just free the pages because the IOMMU may still be walking
1090 the page tables, and may have cached the intermediate levels. The
1091 pages can only be freed after the IOTLB flush has been done. */
1092 struct page *domain_unmap(struct dmar_domain *domain,
1093 unsigned long start_pfn,
1094 unsigned long last_pfn)
1096 struct page *freelist = NULL;
1098 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1099 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1100 BUG_ON(start_pfn > last_pfn);
1102 /* we don't need lock here; nobody else touches the iova range */
1103 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1104 domain->pgd, 0, start_pfn, last_pfn, NULL);
1106 /* free pgd */
1107 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1108 struct page *pgd_page = virt_to_page(domain->pgd);
1109 pgd_page->freelist = freelist;
1110 freelist = pgd_page;
1112 domain->pgd = NULL;
1115 return freelist;
1118 void dma_free_pagelist(struct page *freelist)
1120 struct page *pg;
1122 while ((pg = freelist)) {
1123 freelist = pg->freelist;
1124 free_pgtable_page(page_address(pg));
1128 /* iommu handling */
1129 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1131 struct root_entry *root;
1132 unsigned long flags;
1134 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1135 if (!root)
1136 return -ENOMEM;
1138 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1140 spin_lock_irqsave(&iommu->lock, flags);
1141 iommu->root_entry = root;
1142 spin_unlock_irqrestore(&iommu->lock, flags);
1144 return 0;
1147 static void iommu_set_root_entry(struct intel_iommu *iommu)
1149 void *addr;
1150 u32 sts;
1151 unsigned long flag;
1153 addr = iommu->root_entry;
1155 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1156 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1158 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1160 /* Make sure hardware complete it */
1161 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1162 readl, (sts & DMA_GSTS_RTPS), sts);
1164 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1167 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1169 u32 val;
1170 unsigned long flag;
1172 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1173 return;
1175 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1176 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1178 /* Make sure hardware complete it */
1179 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1180 readl, (!(val & DMA_GSTS_WBFS)), val);
1182 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1185 /* return value determine if we need a write buffer flush */
1186 static void __iommu_flush_context(struct intel_iommu *iommu,
1187 u16 did, u16 source_id, u8 function_mask,
1188 u64 type)
1190 u64 val = 0;
1191 unsigned long flag;
1193 switch (type) {
1194 case DMA_CCMD_GLOBAL_INVL:
1195 val = DMA_CCMD_GLOBAL_INVL;
1196 break;
1197 case DMA_CCMD_DOMAIN_INVL:
1198 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1199 break;
1200 case DMA_CCMD_DEVICE_INVL:
1201 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1202 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1203 break;
1204 default:
1205 BUG();
1207 val |= DMA_CCMD_ICC;
1209 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1210 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1212 /* Make sure hardware complete it */
1213 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1214 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1216 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1219 /* return value determine if we need a write buffer flush */
1220 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1221 u64 addr, unsigned int size_order, u64 type)
1223 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1224 u64 val = 0, val_iva = 0;
1225 unsigned long flag;
1227 switch (type) {
1228 case DMA_TLB_GLOBAL_FLUSH:
1229 /* global flush doesn't need set IVA_REG */
1230 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1231 break;
1232 case DMA_TLB_DSI_FLUSH:
1233 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1234 break;
1235 case DMA_TLB_PSI_FLUSH:
1236 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1237 /* IH bit is passed in as part of address */
1238 val_iva = size_order | addr;
1239 break;
1240 default:
1241 BUG();
1243 /* Note: set drain read/write */
1244 #if 0
1246 * This is probably to be super secure.. Looks like we can
1247 * ignore it without any impact.
1249 if (cap_read_drain(iommu->cap))
1250 val |= DMA_TLB_READ_DRAIN;
1251 #endif
1252 if (cap_write_drain(iommu->cap))
1253 val |= DMA_TLB_WRITE_DRAIN;
1255 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1256 /* Note: Only uses first TLB reg currently */
1257 if (val_iva)
1258 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1259 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1261 /* Make sure hardware complete it */
1262 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1263 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1265 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1267 /* check IOTLB invalidation granularity */
1268 if (DMA_TLB_IAIG(val) == 0)
1269 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1270 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1271 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1272 (unsigned long long)DMA_TLB_IIRG(type),
1273 (unsigned long long)DMA_TLB_IAIG(val));
1276 static struct device_domain_info *
1277 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1278 u8 bus, u8 devfn)
1280 int found = 0;
1281 unsigned long flags;
1282 struct device_domain_info *info;
1283 struct pci_dev *pdev;
1285 if (!ecap_dev_iotlb_support(iommu->ecap))
1286 return NULL;
1288 if (!iommu->qi)
1289 return NULL;
1291 spin_lock_irqsave(&device_domain_lock, flags);
1292 list_for_each_entry(info, &domain->devices, link)
1293 if (info->iommu == iommu && info->bus == bus &&
1294 info->devfn == devfn) {
1295 found = 1;
1296 break;
1298 spin_unlock_irqrestore(&device_domain_lock, flags);
1300 if (!found || !info->dev || !dev_is_pci(info->dev))
1301 return NULL;
1303 pdev = to_pci_dev(info->dev);
1305 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
1306 return NULL;
1308 if (!dmar_find_matched_atsr_unit(pdev))
1309 return NULL;
1311 return info;
1314 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1316 if (!info || !dev_is_pci(info->dev))
1317 return;
1319 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
1322 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1324 if (!info->dev || !dev_is_pci(info->dev) ||
1325 !pci_ats_enabled(to_pci_dev(info->dev)))
1326 return;
1328 pci_disable_ats(to_pci_dev(info->dev));
1331 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1332 u64 addr, unsigned mask)
1334 u16 sid, qdep;
1335 unsigned long flags;
1336 struct device_domain_info *info;
1338 spin_lock_irqsave(&device_domain_lock, flags);
1339 list_for_each_entry(info, &domain->devices, link) {
1340 struct pci_dev *pdev;
1341 if (!info->dev || !dev_is_pci(info->dev))
1342 continue;
1344 pdev = to_pci_dev(info->dev);
1345 if (!pci_ats_enabled(pdev))
1346 continue;
1348 sid = info->bus << 8 | info->devfn;
1349 qdep = pci_ats_queue_depth(pdev);
1350 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1352 spin_unlock_irqrestore(&device_domain_lock, flags);
1355 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1356 unsigned long pfn, unsigned int pages, int ih, int map)
1358 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1359 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1361 BUG_ON(pages == 0);
1363 if (ih)
1364 ih = 1 << 6;
1366 * Fallback to domain selective flush if no PSI support or the size is
1367 * too big.
1368 * PSI requires page size to be 2 ^ x, and the base address is naturally
1369 * aligned to the size
1371 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1372 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1373 DMA_TLB_DSI_FLUSH);
1374 else
1375 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1376 DMA_TLB_PSI_FLUSH);
1379 * In caching mode, changes of pages from non-present to present require
1380 * flush. However, device IOTLB doesn't need to be flushed in this case.
1382 if (!cap_caching_mode(iommu->cap) || !map)
1383 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1386 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1388 u32 pmen;
1389 unsigned long flags;
1391 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1392 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1393 pmen &= ~DMA_PMEN_EPM;
1394 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1396 /* wait for the protected region status bit to clear */
1397 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1398 readl, !(pmen & DMA_PMEN_PRS), pmen);
1400 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1403 static void iommu_enable_translation(struct intel_iommu *iommu)
1405 u32 sts;
1406 unsigned long flags;
1408 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1409 iommu->gcmd |= DMA_GCMD_TE;
1410 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1412 /* Make sure hardware complete it */
1413 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1414 readl, (sts & DMA_GSTS_TES), sts);
1416 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1419 static void iommu_disable_translation(struct intel_iommu *iommu)
1421 u32 sts;
1422 unsigned long flag;
1424 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1425 iommu->gcmd &= ~DMA_GCMD_TE;
1426 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1428 /* Make sure hardware complete it */
1429 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1430 readl, (!(sts & DMA_GSTS_TES)), sts);
1432 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1436 static int iommu_init_domains(struct intel_iommu *iommu)
1438 unsigned long ndomains;
1439 unsigned long nlongs;
1441 ndomains = cap_ndoms(iommu->cap);
1442 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1443 iommu->seq_id, ndomains);
1444 nlongs = BITS_TO_LONGS(ndomains);
1446 spin_lock_init(&iommu->lock);
1448 /* TBD: there might be 64K domains,
1449 * consider other allocation for future chip
1451 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1452 if (!iommu->domain_ids) {
1453 pr_err("IOMMU%d: allocating domain id array failed\n",
1454 iommu->seq_id);
1455 return -ENOMEM;
1457 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1458 GFP_KERNEL);
1459 if (!iommu->domains) {
1460 pr_err("IOMMU%d: allocating domain array failed\n",
1461 iommu->seq_id);
1462 kfree(iommu->domain_ids);
1463 iommu->domain_ids = NULL;
1464 return -ENOMEM;
1468 * if Caching mode is set, then invalid translations are tagged
1469 * with domainid 0. Hence we need to pre-allocate it.
1471 if (cap_caching_mode(iommu->cap))
1472 set_bit(0, iommu->domain_ids);
1473 return 0;
1476 static void free_dmar_iommu(struct intel_iommu *iommu)
1478 struct dmar_domain *domain;
1479 int i;
1481 if ((iommu->domains) && (iommu->domain_ids)) {
1482 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1484 * Domain id 0 is reserved for invalid translation
1485 * if hardware supports caching mode.
1487 if (cap_caching_mode(iommu->cap) && i == 0)
1488 continue;
1490 domain = iommu->domains[i];
1491 clear_bit(i, iommu->domain_ids);
1492 if (domain_detach_iommu(domain, iommu) == 0 &&
1493 !domain_type_is_vm(domain))
1494 domain_exit(domain);
1498 if (iommu->gcmd & DMA_GCMD_TE)
1499 iommu_disable_translation(iommu);
1501 kfree(iommu->domains);
1502 kfree(iommu->domain_ids);
1503 iommu->domains = NULL;
1504 iommu->domain_ids = NULL;
1506 g_iommus[iommu->seq_id] = NULL;
1508 /* free context mapping */
1509 free_context_table(iommu);
1512 static struct dmar_domain *alloc_domain(int flags)
1514 /* domain id for virtual machine, it won't be set in context */
1515 static atomic_t vm_domid = ATOMIC_INIT(0);
1516 struct dmar_domain *domain;
1518 domain = alloc_domain_mem();
1519 if (!domain)
1520 return NULL;
1522 memset(domain, 0, sizeof(*domain));
1523 domain->nid = -1;
1524 domain->flags = flags;
1525 spin_lock_init(&domain->iommu_lock);
1526 INIT_LIST_HEAD(&domain->devices);
1527 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1528 domain->id = atomic_inc_return(&vm_domid);
1530 return domain;
1533 static int __iommu_attach_domain(struct dmar_domain *domain,
1534 struct intel_iommu *iommu)
1536 int num;
1537 unsigned long ndomains;
1539 ndomains = cap_ndoms(iommu->cap);
1540 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1541 if (num < ndomains) {
1542 set_bit(num, iommu->domain_ids);
1543 iommu->domains[num] = domain;
1544 } else {
1545 num = -ENOSPC;
1548 return num;
1551 static int iommu_attach_domain(struct dmar_domain *domain,
1552 struct intel_iommu *iommu)
1554 int num;
1555 unsigned long flags;
1557 spin_lock_irqsave(&iommu->lock, flags);
1558 num = __iommu_attach_domain(domain, iommu);
1559 spin_unlock_irqrestore(&iommu->lock, flags);
1560 if (num < 0)
1561 pr_err("IOMMU: no free domain ids\n");
1563 return num;
1566 static int iommu_attach_vm_domain(struct dmar_domain *domain,
1567 struct intel_iommu *iommu)
1569 int num;
1570 unsigned long ndomains;
1572 ndomains = cap_ndoms(iommu->cap);
1573 for_each_set_bit(num, iommu->domain_ids, ndomains)
1574 if (iommu->domains[num] == domain)
1575 return num;
1577 return __iommu_attach_domain(domain, iommu);
1580 static void iommu_detach_domain(struct dmar_domain *domain,
1581 struct intel_iommu *iommu)
1583 unsigned long flags;
1584 int num, ndomains;
1586 spin_lock_irqsave(&iommu->lock, flags);
1587 if (domain_type_is_vm_or_si(domain)) {
1588 ndomains = cap_ndoms(iommu->cap);
1589 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1590 if (iommu->domains[num] == domain) {
1591 clear_bit(num, iommu->domain_ids);
1592 iommu->domains[num] = NULL;
1593 break;
1596 } else {
1597 clear_bit(domain->id, iommu->domain_ids);
1598 iommu->domains[domain->id] = NULL;
1600 spin_unlock_irqrestore(&iommu->lock, flags);
1603 static void domain_attach_iommu(struct dmar_domain *domain,
1604 struct intel_iommu *iommu)
1606 unsigned long flags;
1608 spin_lock_irqsave(&domain->iommu_lock, flags);
1609 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1610 domain->iommu_count++;
1611 if (domain->iommu_count == 1)
1612 domain->nid = iommu->node;
1613 domain_update_iommu_cap(domain);
1615 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1618 static int domain_detach_iommu(struct dmar_domain *domain,
1619 struct intel_iommu *iommu)
1621 unsigned long flags;
1622 int count = INT_MAX;
1624 spin_lock_irqsave(&domain->iommu_lock, flags);
1625 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1626 count = --domain->iommu_count;
1627 domain_update_iommu_cap(domain);
1629 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1631 return count;
1634 static struct iova_domain reserved_iova_list;
1635 static struct lock_class_key reserved_rbtree_key;
1637 static int dmar_init_reserved_ranges(void)
1639 struct pci_dev *pdev = NULL;
1640 struct iova *iova;
1641 int i;
1643 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1645 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1646 &reserved_rbtree_key);
1648 /* IOAPIC ranges shouldn't be accessed by DMA */
1649 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1650 IOVA_PFN(IOAPIC_RANGE_END));
1651 if (!iova) {
1652 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1653 return -ENODEV;
1656 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1657 for_each_pci_dev(pdev) {
1658 struct resource *r;
1660 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1661 r = &pdev->resource[i];
1662 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1663 continue;
1664 iova = reserve_iova(&reserved_iova_list,
1665 IOVA_PFN(r->start),
1666 IOVA_PFN(r->end));
1667 if (!iova) {
1668 printk(KERN_ERR "Reserve iova failed\n");
1669 return -ENODEV;
1673 return 0;
1676 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1678 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1681 static inline int guestwidth_to_adjustwidth(int gaw)
1683 int agaw;
1684 int r = (gaw - 12) % 9;
1686 if (r == 0)
1687 agaw = gaw;
1688 else
1689 agaw = gaw + 9 - r;
1690 if (agaw > 64)
1691 agaw = 64;
1692 return agaw;
1695 static int domain_init(struct dmar_domain *domain, int guest_width)
1697 struct intel_iommu *iommu;
1698 int adjust_width, agaw;
1699 unsigned long sagaw;
1701 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1702 domain_reserve_special_ranges(domain);
1704 /* calculate AGAW */
1705 iommu = domain_get_iommu(domain);
1706 if (guest_width > cap_mgaw(iommu->cap))
1707 guest_width = cap_mgaw(iommu->cap);
1708 domain->gaw = guest_width;
1709 adjust_width = guestwidth_to_adjustwidth(guest_width);
1710 agaw = width_to_agaw(adjust_width);
1711 sagaw = cap_sagaw(iommu->cap);
1712 if (!test_bit(agaw, &sagaw)) {
1713 /* hardware doesn't support it, choose a bigger one */
1714 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1715 agaw = find_next_bit(&sagaw, 5, agaw);
1716 if (agaw >= 5)
1717 return -ENODEV;
1719 domain->agaw = agaw;
1721 if (ecap_coherent(iommu->ecap))
1722 domain->iommu_coherency = 1;
1723 else
1724 domain->iommu_coherency = 0;
1726 if (ecap_sc_support(iommu->ecap))
1727 domain->iommu_snooping = 1;
1728 else
1729 domain->iommu_snooping = 0;
1731 if (intel_iommu_superpage)
1732 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1733 else
1734 domain->iommu_superpage = 0;
1736 domain->nid = iommu->node;
1738 /* always allocate the top pgd */
1739 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1740 if (!domain->pgd)
1741 return -ENOMEM;
1742 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1743 return 0;
1746 static void domain_exit(struct dmar_domain *domain)
1748 struct dmar_drhd_unit *drhd;
1749 struct intel_iommu *iommu;
1750 struct page *freelist = NULL;
1752 /* Domain 0 is reserved, so dont process it */
1753 if (!domain)
1754 return;
1756 /* Flush any lazy unmaps that may reference this domain */
1757 if (!intel_iommu_strict)
1758 flush_unmaps_timeout(0);
1760 /* remove associated devices */
1761 domain_remove_dev_info(domain);
1763 /* destroy iovas */
1764 put_iova_domain(&domain->iovad);
1766 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1768 /* clear attached or cached domains */
1769 rcu_read_lock();
1770 for_each_active_iommu(iommu, drhd)
1771 iommu_detach_domain(domain, iommu);
1772 rcu_read_unlock();
1774 dma_free_pagelist(freelist);
1776 free_domain_mem(domain);
1779 static int domain_context_mapping_one(struct dmar_domain *domain,
1780 struct intel_iommu *iommu,
1781 u8 bus, u8 devfn, int translation)
1783 struct context_entry *context;
1784 unsigned long flags;
1785 struct dma_pte *pgd;
1786 int id;
1787 int agaw;
1788 struct device_domain_info *info = NULL;
1790 pr_debug("Set context mapping for %02x:%02x.%d\n",
1791 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1793 BUG_ON(!domain->pgd);
1794 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1795 translation != CONTEXT_TT_MULTI_LEVEL);
1797 context = device_to_context_entry(iommu, bus, devfn);
1798 if (!context)
1799 return -ENOMEM;
1800 spin_lock_irqsave(&iommu->lock, flags);
1801 if (context_present(context)) {
1802 spin_unlock_irqrestore(&iommu->lock, flags);
1803 return 0;
1806 id = domain->id;
1807 pgd = domain->pgd;
1809 if (domain_type_is_vm_or_si(domain)) {
1810 if (domain_type_is_vm(domain)) {
1811 id = iommu_attach_vm_domain(domain, iommu);
1812 if (id < 0) {
1813 spin_unlock_irqrestore(&iommu->lock, flags);
1814 pr_err("IOMMU: no free domain ids\n");
1815 return -EFAULT;
1819 /* Skip top levels of page tables for
1820 * iommu which has less agaw than default.
1821 * Unnecessary for PT mode.
1823 if (translation != CONTEXT_TT_PASS_THROUGH) {
1824 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1825 pgd = phys_to_virt(dma_pte_addr(pgd));
1826 if (!dma_pte_present(pgd)) {
1827 spin_unlock_irqrestore(&iommu->lock, flags);
1828 return -ENOMEM;
1834 context_set_domain_id(context, id);
1836 if (translation != CONTEXT_TT_PASS_THROUGH) {
1837 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
1838 translation = info ? CONTEXT_TT_DEV_IOTLB :
1839 CONTEXT_TT_MULTI_LEVEL;
1842 * In pass through mode, AW must be programmed to indicate the largest
1843 * AGAW value supported by hardware. And ASR is ignored by hardware.
1845 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1846 context_set_address_width(context, iommu->msagaw);
1847 else {
1848 context_set_address_root(context, virt_to_phys(pgd));
1849 context_set_address_width(context, iommu->agaw);
1852 context_set_translation_type(context, translation);
1853 context_set_fault_enable(context);
1854 context_set_present(context);
1855 domain_flush_cache(domain, context, sizeof(*context));
1858 * It's a non-present to present mapping. If hardware doesn't cache
1859 * non-present entry we only need to flush the write-buffer. If the
1860 * _does_ cache non-present entries, then it does so in the special
1861 * domain #0, which we have to flush:
1863 if (cap_caching_mode(iommu->cap)) {
1864 iommu->flush.flush_context(iommu, 0,
1865 (((u16)bus) << 8) | devfn,
1866 DMA_CCMD_MASK_NOBIT,
1867 DMA_CCMD_DEVICE_INVL);
1868 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
1869 } else {
1870 iommu_flush_write_buffer(iommu);
1872 iommu_enable_dev_iotlb(info);
1873 spin_unlock_irqrestore(&iommu->lock, flags);
1875 domain_attach_iommu(domain, iommu);
1877 return 0;
1880 struct domain_context_mapping_data {
1881 struct dmar_domain *domain;
1882 struct intel_iommu *iommu;
1883 int translation;
1886 static int domain_context_mapping_cb(struct pci_dev *pdev,
1887 u16 alias, void *opaque)
1889 struct domain_context_mapping_data *data = opaque;
1891 return domain_context_mapping_one(data->domain, data->iommu,
1892 PCI_BUS_NUM(alias), alias & 0xff,
1893 data->translation);
1896 static int
1897 domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1898 int translation)
1900 struct intel_iommu *iommu;
1901 u8 bus, devfn;
1902 struct domain_context_mapping_data data;
1904 iommu = device_to_iommu(dev, &bus, &devfn);
1905 if (!iommu)
1906 return -ENODEV;
1908 if (!dev_is_pci(dev))
1909 return domain_context_mapping_one(domain, iommu, bus, devfn,
1910 translation);
1912 data.domain = domain;
1913 data.iommu = iommu;
1914 data.translation = translation;
1916 return pci_for_each_dma_alias(to_pci_dev(dev),
1917 &domain_context_mapping_cb, &data);
1920 static int domain_context_mapped_cb(struct pci_dev *pdev,
1921 u16 alias, void *opaque)
1923 struct intel_iommu *iommu = opaque;
1925 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
1928 static int domain_context_mapped(struct device *dev)
1930 struct intel_iommu *iommu;
1931 u8 bus, devfn;
1933 iommu = device_to_iommu(dev, &bus, &devfn);
1934 if (!iommu)
1935 return -ENODEV;
1937 if (!dev_is_pci(dev))
1938 return device_context_mapped(iommu, bus, devfn);
1940 return !pci_for_each_dma_alias(to_pci_dev(dev),
1941 domain_context_mapped_cb, iommu);
1944 /* Returns a number of VTD pages, but aligned to MM page size */
1945 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1946 size_t size)
1948 host_addr &= ~PAGE_MASK;
1949 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1952 /* Return largest possible superpage level for a given mapping */
1953 static inline int hardware_largepage_caps(struct dmar_domain *domain,
1954 unsigned long iov_pfn,
1955 unsigned long phy_pfn,
1956 unsigned long pages)
1958 int support, level = 1;
1959 unsigned long pfnmerge;
1961 support = domain->iommu_superpage;
1963 /* To use a large page, the virtual *and* physical addresses
1964 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1965 of them will mean we have to use smaller pages. So just
1966 merge them and check both at once. */
1967 pfnmerge = iov_pfn | phy_pfn;
1969 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1970 pages >>= VTD_STRIDE_SHIFT;
1971 if (!pages)
1972 break;
1973 pfnmerge >>= VTD_STRIDE_SHIFT;
1974 level++;
1975 support--;
1977 return level;
1980 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1981 struct scatterlist *sg, unsigned long phys_pfn,
1982 unsigned long nr_pages, int prot)
1984 struct dma_pte *first_pte = NULL, *pte = NULL;
1985 phys_addr_t uninitialized_var(pteval);
1986 unsigned long sg_res;
1987 unsigned int largepage_lvl = 0;
1988 unsigned long lvl_pages = 0;
1990 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
1992 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1993 return -EINVAL;
1995 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1997 if (sg)
1998 sg_res = 0;
1999 else {
2000 sg_res = nr_pages + 1;
2001 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2004 while (nr_pages > 0) {
2005 uint64_t tmp;
2007 if (!sg_res) {
2008 sg_res = aligned_nrpages(sg->offset, sg->length);
2009 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2010 sg->dma_length = sg->length;
2011 pteval = page_to_phys(sg_page(sg)) | prot;
2012 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2015 if (!pte) {
2016 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2018 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2019 if (!pte)
2020 return -ENOMEM;
2021 /* It is large page*/
2022 if (largepage_lvl > 1) {
2023 pteval |= DMA_PTE_LARGE_PAGE;
2024 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2026 * Ensure that old small page tables are
2027 * removed to make room for superpage,
2028 * if they exist.
2030 dma_pte_free_pagetable(domain, iov_pfn,
2031 iov_pfn + lvl_pages - 1);
2032 } else {
2033 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2037 /* We don't need lock here, nobody else
2038 * touches the iova range
2040 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2041 if (tmp) {
2042 static int dumps = 5;
2043 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2044 iov_pfn, tmp, (unsigned long long)pteval);
2045 if (dumps) {
2046 dumps--;
2047 debug_dma_dump_mappings(NULL);
2049 WARN_ON(1);
2052 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2054 BUG_ON(nr_pages < lvl_pages);
2055 BUG_ON(sg_res < lvl_pages);
2057 nr_pages -= lvl_pages;
2058 iov_pfn += lvl_pages;
2059 phys_pfn += lvl_pages;
2060 pteval += lvl_pages * VTD_PAGE_SIZE;
2061 sg_res -= lvl_pages;
2063 /* If the next PTE would be the first in a new page, then we
2064 need to flush the cache on the entries we've just written.
2065 And then we'll need to recalculate 'pte', so clear it and
2066 let it get set again in the if (!pte) block above.
2068 If we're done (!nr_pages) we need to flush the cache too.
2070 Also if we've been setting superpages, we may need to
2071 recalculate 'pte' and switch back to smaller pages for the
2072 end of the mapping, if the trailing size is not enough to
2073 use another superpage (i.e. sg_res < lvl_pages). */
2074 pte++;
2075 if (!nr_pages || first_pte_in_page(pte) ||
2076 (largepage_lvl > 1 && sg_res < lvl_pages)) {
2077 domain_flush_cache(domain, first_pte,
2078 (void *)pte - (void *)first_pte);
2079 pte = NULL;
2082 if (!sg_res && nr_pages)
2083 sg = sg_next(sg);
2085 return 0;
2088 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2089 struct scatterlist *sg, unsigned long nr_pages,
2090 int prot)
2092 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2095 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2096 unsigned long phys_pfn, unsigned long nr_pages,
2097 int prot)
2099 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2102 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
2104 if (!iommu)
2105 return;
2107 clear_context_table(iommu, bus, devfn);
2108 iommu->flush.flush_context(iommu, 0, 0, 0,
2109 DMA_CCMD_GLOBAL_INVL);
2110 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2113 static inline void unlink_domain_info(struct device_domain_info *info)
2115 assert_spin_locked(&device_domain_lock);
2116 list_del(&info->link);
2117 list_del(&info->global);
2118 if (info->dev)
2119 info->dev->archdata.iommu = NULL;
2122 static void domain_remove_dev_info(struct dmar_domain *domain)
2124 struct device_domain_info *info, *tmp;
2125 unsigned long flags;
2127 spin_lock_irqsave(&device_domain_lock, flags);
2128 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
2129 unlink_domain_info(info);
2130 spin_unlock_irqrestore(&device_domain_lock, flags);
2132 iommu_disable_dev_iotlb(info);
2133 iommu_detach_dev(info->iommu, info->bus, info->devfn);
2135 if (domain_type_is_vm(domain)) {
2136 iommu_detach_dependent_devices(info->iommu, info->dev);
2137 domain_detach_iommu(domain, info->iommu);
2140 free_devinfo_mem(info);
2141 spin_lock_irqsave(&device_domain_lock, flags);
2143 spin_unlock_irqrestore(&device_domain_lock, flags);
2147 * find_domain
2148 * Note: we use struct device->archdata.iommu stores the info
2150 static struct dmar_domain *find_domain(struct device *dev)
2152 struct device_domain_info *info;
2154 /* No lock here, assumes no domain exit in normal case */
2155 info = dev->archdata.iommu;
2156 if (info)
2157 return info->domain;
2158 return NULL;
2161 static inline struct device_domain_info *
2162 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2164 struct device_domain_info *info;
2166 list_for_each_entry(info, &device_domain_list, global)
2167 if (info->iommu->segment == segment && info->bus == bus &&
2168 info->devfn == devfn)
2169 return info;
2171 return NULL;
2174 static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
2175 int bus, int devfn,
2176 struct device *dev,
2177 struct dmar_domain *domain)
2179 struct dmar_domain *found = NULL;
2180 struct device_domain_info *info;
2181 unsigned long flags;
2183 info = alloc_devinfo_mem();
2184 if (!info)
2185 return NULL;
2187 info->bus = bus;
2188 info->devfn = devfn;
2189 info->dev = dev;
2190 info->domain = domain;
2191 info->iommu = iommu;
2193 spin_lock_irqsave(&device_domain_lock, flags);
2194 if (dev)
2195 found = find_domain(dev);
2196 else {
2197 struct device_domain_info *info2;
2198 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2199 if (info2)
2200 found = info2->domain;
2202 if (found) {
2203 spin_unlock_irqrestore(&device_domain_lock, flags);
2204 free_devinfo_mem(info);
2205 /* Caller must free the original domain */
2206 return found;
2209 list_add(&info->link, &domain->devices);
2210 list_add(&info->global, &device_domain_list);
2211 if (dev)
2212 dev->archdata.iommu = info;
2213 spin_unlock_irqrestore(&device_domain_lock, flags);
2215 return domain;
2218 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2220 *(u16 *)opaque = alias;
2221 return 0;
2224 /* domain is initialized */
2225 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2227 struct dmar_domain *domain, *tmp;
2228 struct intel_iommu *iommu;
2229 struct device_domain_info *info;
2230 u16 dma_alias;
2231 unsigned long flags;
2232 u8 bus, devfn;
2234 domain = find_domain(dev);
2235 if (domain)
2236 return domain;
2238 iommu = device_to_iommu(dev, &bus, &devfn);
2239 if (!iommu)
2240 return NULL;
2242 if (dev_is_pci(dev)) {
2243 struct pci_dev *pdev = to_pci_dev(dev);
2245 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2247 spin_lock_irqsave(&device_domain_lock, flags);
2248 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2249 PCI_BUS_NUM(dma_alias),
2250 dma_alias & 0xff);
2251 if (info) {
2252 iommu = info->iommu;
2253 domain = info->domain;
2255 spin_unlock_irqrestore(&device_domain_lock, flags);
2257 /* DMA alias already has a domain, uses it */
2258 if (info)
2259 goto found_domain;
2262 /* Allocate and initialize new domain for the device */
2263 domain = alloc_domain(0);
2264 if (!domain)
2265 return NULL;
2266 domain->id = iommu_attach_domain(domain, iommu);
2267 if (domain->id < 0) {
2268 free_domain_mem(domain);
2269 return NULL;
2271 domain_attach_iommu(domain, iommu);
2272 if (domain_init(domain, gaw)) {
2273 domain_exit(domain);
2274 return NULL;
2277 /* register PCI DMA alias device */
2278 if (dev_is_pci(dev)) {
2279 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2280 dma_alias & 0xff, NULL, domain);
2282 if (!tmp || tmp != domain) {
2283 domain_exit(domain);
2284 domain = tmp;
2287 if (!domain)
2288 return NULL;
2291 found_domain:
2292 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2294 if (!tmp || tmp != domain) {
2295 domain_exit(domain);
2296 domain = tmp;
2299 return domain;
2302 static int iommu_identity_mapping;
2303 #define IDENTMAP_ALL 1
2304 #define IDENTMAP_GFX 2
2305 #define IDENTMAP_AZALIA 4
2307 static int iommu_domain_identity_map(struct dmar_domain *domain,
2308 unsigned long long start,
2309 unsigned long long end)
2311 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2312 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2314 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2315 dma_to_mm_pfn(last_vpfn))) {
2316 printk(KERN_ERR "IOMMU: reserve iova failed\n");
2317 return -ENOMEM;
2320 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2321 start, end, domain->id);
2323 * RMRR range might have overlap with physical memory range,
2324 * clear it first
2326 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2328 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2329 last_vpfn - first_vpfn + 1,
2330 DMA_PTE_READ|DMA_PTE_WRITE);
2333 static int iommu_prepare_identity_map(struct device *dev,
2334 unsigned long long start,
2335 unsigned long long end)
2337 struct dmar_domain *domain;
2338 int ret;
2340 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2341 if (!domain)
2342 return -ENOMEM;
2344 /* For _hardware_ passthrough, don't bother. But for software
2345 passthrough, we do it anyway -- it may indicate a memory
2346 range which is reserved in E820, so which didn't get set
2347 up to start with in si_domain */
2348 if (domain == si_domain && hw_pass_through) {
2349 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2350 dev_name(dev), start, end);
2351 return 0;
2354 printk(KERN_INFO
2355 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2356 dev_name(dev), start, end);
2358 if (end < start) {
2359 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2360 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2361 dmi_get_system_info(DMI_BIOS_VENDOR),
2362 dmi_get_system_info(DMI_BIOS_VERSION),
2363 dmi_get_system_info(DMI_PRODUCT_VERSION));
2364 ret = -EIO;
2365 goto error;
2368 if (end >> agaw_to_width(domain->agaw)) {
2369 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2370 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2371 agaw_to_width(domain->agaw),
2372 dmi_get_system_info(DMI_BIOS_VENDOR),
2373 dmi_get_system_info(DMI_BIOS_VERSION),
2374 dmi_get_system_info(DMI_PRODUCT_VERSION));
2375 ret = -EIO;
2376 goto error;
2379 ret = iommu_domain_identity_map(domain, start, end);
2380 if (ret)
2381 goto error;
2383 /* context entry init */
2384 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
2385 if (ret)
2386 goto error;
2388 return 0;
2390 error:
2391 domain_exit(domain);
2392 return ret;
2395 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2396 struct device *dev)
2398 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2399 return 0;
2400 return iommu_prepare_identity_map(dev, rmrr->base_address,
2401 rmrr->end_address);
2404 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2405 static inline void iommu_prepare_isa(void)
2407 struct pci_dev *pdev;
2408 int ret;
2410 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2411 if (!pdev)
2412 return;
2414 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2415 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2417 if (ret)
2418 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2419 "floppy might not work\n");
2421 pci_dev_put(pdev);
2423 #else
2424 static inline void iommu_prepare_isa(void)
2426 return;
2428 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2430 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2432 static int __init si_domain_init(int hw)
2434 struct dmar_drhd_unit *drhd;
2435 struct intel_iommu *iommu;
2436 int nid, ret = 0;
2437 bool first = true;
2439 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2440 if (!si_domain)
2441 return -EFAULT;
2443 for_each_active_iommu(iommu, drhd) {
2444 ret = iommu_attach_domain(si_domain, iommu);
2445 if (ret < 0) {
2446 domain_exit(si_domain);
2447 return -EFAULT;
2448 } else if (first) {
2449 si_domain->id = ret;
2450 first = false;
2451 } else if (si_domain->id != ret) {
2452 domain_exit(si_domain);
2453 return -EFAULT;
2455 domain_attach_iommu(si_domain, iommu);
2458 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2459 domain_exit(si_domain);
2460 return -EFAULT;
2463 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2464 si_domain->id);
2466 if (hw)
2467 return 0;
2469 for_each_online_node(nid) {
2470 unsigned long start_pfn, end_pfn;
2471 int i;
2473 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2474 ret = iommu_domain_identity_map(si_domain,
2475 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2476 if (ret)
2477 return ret;
2481 return 0;
2484 static int identity_mapping(struct device *dev)
2486 struct device_domain_info *info;
2488 if (likely(!iommu_identity_mapping))
2489 return 0;
2491 info = dev->archdata.iommu;
2492 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2493 return (info->domain == si_domain);
2495 return 0;
2498 static int domain_add_dev_info(struct dmar_domain *domain,
2499 struct device *dev, int translation)
2501 struct dmar_domain *ndomain;
2502 struct intel_iommu *iommu;
2503 u8 bus, devfn;
2504 int ret;
2506 iommu = device_to_iommu(dev, &bus, &devfn);
2507 if (!iommu)
2508 return -ENODEV;
2510 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2511 if (ndomain != domain)
2512 return -EBUSY;
2514 ret = domain_context_mapping(domain, dev, translation);
2515 if (ret) {
2516 domain_remove_one_dev_info(domain, dev);
2517 return ret;
2520 return 0;
2523 static bool device_has_rmrr(struct device *dev)
2525 struct dmar_rmrr_unit *rmrr;
2526 struct device *tmp;
2527 int i;
2529 rcu_read_lock();
2530 for_each_rmrr_units(rmrr) {
2532 * Return TRUE if this RMRR contains the device that
2533 * is passed in.
2535 for_each_active_dev_scope(rmrr->devices,
2536 rmrr->devices_cnt, i, tmp)
2537 if (tmp == dev) {
2538 rcu_read_unlock();
2539 return true;
2542 rcu_read_unlock();
2543 return false;
2547 * There are a couple cases where we need to restrict the functionality of
2548 * devices associated with RMRRs. The first is when evaluating a device for
2549 * identity mapping because problems exist when devices are moved in and out
2550 * of domains and their respective RMRR information is lost. This means that
2551 * a device with associated RMRRs will never be in a "passthrough" domain.
2552 * The second is use of the device through the IOMMU API. This interface
2553 * expects to have full control of the IOVA space for the device. We cannot
2554 * satisfy both the requirement that RMRR access is maintained and have an
2555 * unencumbered IOVA space. We also have no ability to quiesce the device's
2556 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2557 * We therefore prevent devices associated with an RMRR from participating in
2558 * the IOMMU API, which eliminates them from device assignment.
2560 * In both cases we assume that PCI USB devices with RMRRs have them largely
2561 * for historical reasons and that the RMRR space is not actively used post
2562 * boot. This exclusion may change if vendors begin to abuse it.
2564 static bool device_is_rmrr_locked(struct device *dev)
2566 if (!device_has_rmrr(dev))
2567 return false;
2569 if (dev_is_pci(dev)) {
2570 struct pci_dev *pdev = to_pci_dev(dev);
2572 if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
2573 return false;
2576 return true;
2579 static int iommu_should_identity_map(struct device *dev, int startup)
2582 if (dev_is_pci(dev)) {
2583 struct pci_dev *pdev = to_pci_dev(dev);
2585 if (device_is_rmrr_locked(dev))
2586 return 0;
2588 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2589 return 1;
2591 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2592 return 1;
2594 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2595 return 0;
2598 * We want to start off with all devices in the 1:1 domain, and
2599 * take them out later if we find they can't access all of memory.
2601 * However, we can't do this for PCI devices behind bridges,
2602 * because all PCI devices behind the same bridge will end up
2603 * with the same source-id on their transactions.
2605 * Practically speaking, we can't change things around for these
2606 * devices at run-time, because we can't be sure there'll be no
2607 * DMA transactions in flight for any of their siblings.
2609 * So PCI devices (unless they're on the root bus) as well as
2610 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2611 * the 1:1 domain, just in _case_ one of their siblings turns out
2612 * not to be able to map all of memory.
2614 if (!pci_is_pcie(pdev)) {
2615 if (!pci_is_root_bus(pdev->bus))
2616 return 0;
2617 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2618 return 0;
2619 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2620 return 0;
2621 } else {
2622 if (device_has_rmrr(dev))
2623 return 0;
2627 * At boot time, we don't yet know if devices will be 64-bit capable.
2628 * Assume that they will — if they turn out not to be, then we can
2629 * take them out of the 1:1 domain later.
2631 if (!startup) {
2633 * If the device's dma_mask is less than the system's memory
2634 * size then this is not a candidate for identity mapping.
2636 u64 dma_mask = *dev->dma_mask;
2638 if (dev->coherent_dma_mask &&
2639 dev->coherent_dma_mask < dma_mask)
2640 dma_mask = dev->coherent_dma_mask;
2642 return dma_mask >= dma_get_required_mask(dev);
2645 return 1;
2648 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2650 int ret;
2652 if (!iommu_should_identity_map(dev, 1))
2653 return 0;
2655 ret = domain_add_dev_info(si_domain, dev,
2656 hw ? CONTEXT_TT_PASS_THROUGH :
2657 CONTEXT_TT_MULTI_LEVEL);
2658 if (!ret)
2659 pr_info("IOMMU: %s identity mapping for device %s\n",
2660 hw ? "hardware" : "software", dev_name(dev));
2661 else if (ret == -ENODEV)
2662 /* device not associated with an iommu */
2663 ret = 0;
2665 return ret;
2669 static int __init iommu_prepare_static_identity_mapping(int hw)
2671 struct pci_dev *pdev = NULL;
2672 struct dmar_drhd_unit *drhd;
2673 struct intel_iommu *iommu;
2674 struct device *dev;
2675 int i;
2676 int ret = 0;
2678 ret = si_domain_init(hw);
2679 if (ret)
2680 return -EFAULT;
2682 for_each_pci_dev(pdev) {
2683 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2684 if (ret)
2685 return ret;
2688 for_each_active_iommu(iommu, drhd)
2689 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2690 struct acpi_device_physical_node *pn;
2691 struct acpi_device *adev;
2693 if (dev->bus != &acpi_bus_type)
2694 continue;
2696 adev= to_acpi_device(dev);
2697 mutex_lock(&adev->physical_node_lock);
2698 list_for_each_entry(pn, &adev->physical_node_list, node) {
2699 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2700 if (ret)
2701 break;
2703 mutex_unlock(&adev->physical_node_lock);
2704 if (ret)
2705 return ret;
2708 return 0;
2711 static int __init init_dmars(void)
2713 struct dmar_drhd_unit *drhd;
2714 struct dmar_rmrr_unit *rmrr;
2715 struct device *dev;
2716 struct intel_iommu *iommu;
2717 int i, ret;
2720 * for each drhd
2721 * allocate root
2722 * initialize and program root entry to not present
2723 * endfor
2725 for_each_drhd_unit(drhd) {
2727 * lock not needed as this is only incremented in the single
2728 * threaded kernel __init code path all other access are read
2729 * only
2731 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2732 g_num_of_iommus++;
2733 continue;
2735 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2736 IOMMU_UNITS_SUPPORTED);
2739 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2740 GFP_KERNEL);
2741 if (!g_iommus) {
2742 printk(KERN_ERR "Allocating global iommu array failed\n");
2743 ret = -ENOMEM;
2744 goto error;
2747 deferred_flush = kzalloc(g_num_of_iommus *
2748 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2749 if (!deferred_flush) {
2750 ret = -ENOMEM;
2751 goto free_g_iommus;
2754 for_each_active_iommu(iommu, drhd) {
2755 g_iommus[iommu->seq_id] = iommu;
2757 ret = iommu_init_domains(iommu);
2758 if (ret)
2759 goto free_iommu;
2762 * TBD:
2763 * we could share the same root & context tables
2764 * among all IOMMU's. Need to Split it later.
2766 ret = iommu_alloc_root_entry(iommu);
2767 if (ret) {
2768 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2769 goto free_iommu;
2771 if (!ecap_pass_through(iommu->ecap))
2772 hw_pass_through = 0;
2776 * Start from the sane iommu hardware state.
2778 for_each_active_iommu(iommu, drhd) {
2780 * If the queued invalidation is already initialized by us
2781 * (for example, while enabling interrupt-remapping) then
2782 * we got the things already rolling from a sane state.
2784 if (iommu->qi)
2785 continue;
2788 * Clear any previous faults.
2790 dmar_fault(-1, iommu);
2792 * Disable queued invalidation if supported and already enabled
2793 * before OS handover.
2795 dmar_disable_qi(iommu);
2798 for_each_active_iommu(iommu, drhd) {
2799 if (dmar_enable_qi(iommu)) {
2801 * Queued Invalidate not enabled, use Register Based
2802 * Invalidate
2804 iommu->flush.flush_context = __iommu_flush_context;
2805 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2806 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2807 "invalidation\n",
2808 iommu->seq_id,
2809 (unsigned long long)drhd->reg_base_addr);
2810 } else {
2811 iommu->flush.flush_context = qi_flush_context;
2812 iommu->flush.flush_iotlb = qi_flush_iotlb;
2813 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2814 "invalidation\n",
2815 iommu->seq_id,
2816 (unsigned long long)drhd->reg_base_addr);
2820 if (iommu_pass_through)
2821 iommu_identity_mapping |= IDENTMAP_ALL;
2823 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2824 iommu_identity_mapping |= IDENTMAP_GFX;
2825 #endif
2827 check_tylersburg_isoch();
2830 * If pass through is not set or not enabled, setup context entries for
2831 * identity mappings for rmrr, gfx, and isa and may fall back to static
2832 * identity mapping if iommu_identity_mapping is set.
2834 if (iommu_identity_mapping) {
2835 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2836 if (ret) {
2837 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2838 goto free_iommu;
2842 * For each rmrr
2843 * for each dev attached to rmrr
2844 * do
2845 * locate drhd for dev, alloc domain for dev
2846 * allocate free domain
2847 * allocate page table entries for rmrr
2848 * if context not allocated for bus
2849 * allocate and init context
2850 * set present in root table for this bus
2851 * init context with domain, translation etc
2852 * endfor
2853 * endfor
2855 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2856 for_each_rmrr_units(rmrr) {
2857 /* some BIOS lists non-exist devices in DMAR table. */
2858 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2859 i, dev) {
2860 ret = iommu_prepare_rmrr_dev(rmrr, dev);
2861 if (ret)
2862 printk(KERN_ERR
2863 "IOMMU: mapping reserved region failed\n");
2867 iommu_prepare_isa();
2870 * for each drhd
2871 * enable fault log
2872 * global invalidate context cache
2873 * global invalidate iotlb
2874 * enable translation
2876 for_each_iommu(iommu, drhd) {
2877 if (drhd->ignored) {
2879 * we always have to disable PMRs or DMA may fail on
2880 * this device
2882 if (force_on)
2883 iommu_disable_protect_mem_regions(iommu);
2884 continue;
2887 iommu_flush_write_buffer(iommu);
2889 ret = dmar_set_interrupt(iommu);
2890 if (ret)
2891 goto free_iommu;
2893 iommu_set_root_entry(iommu);
2895 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2896 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2897 iommu_enable_translation(iommu);
2898 iommu_disable_protect_mem_regions(iommu);
2901 return 0;
2903 free_iommu:
2904 for_each_active_iommu(iommu, drhd)
2905 free_dmar_iommu(iommu);
2906 kfree(deferred_flush);
2907 free_g_iommus:
2908 kfree(g_iommus);
2909 error:
2910 return ret;
2913 /* This takes a number of _MM_ pages, not VTD pages */
2914 static struct iova *intel_alloc_iova(struct device *dev,
2915 struct dmar_domain *domain,
2916 unsigned long nrpages, uint64_t dma_mask)
2918 struct iova *iova = NULL;
2920 /* Restrict dma_mask to the width that the iommu can handle */
2921 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2923 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2925 * First try to allocate an io virtual address in
2926 * DMA_BIT_MASK(32) and if that fails then try allocating
2927 * from higher range
2929 iova = alloc_iova(&domain->iovad, nrpages,
2930 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2931 if (iova)
2932 return iova;
2934 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2935 if (unlikely(!iova)) {
2936 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2937 nrpages, dev_name(dev));
2938 return NULL;
2941 return iova;
2944 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
2946 struct dmar_domain *domain;
2947 int ret;
2949 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2950 if (!domain) {
2951 printk(KERN_ERR "Allocating domain for %s failed",
2952 dev_name(dev));
2953 return NULL;
2956 /* make sure context mapping is ok */
2957 if (unlikely(!domain_context_mapped(dev))) {
2958 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
2959 if (ret) {
2960 printk(KERN_ERR "Domain context map for %s failed",
2961 dev_name(dev));
2962 return NULL;
2966 return domain;
2969 static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
2971 struct device_domain_info *info;
2973 /* No lock here, assumes no domain exit in normal case */
2974 info = dev->archdata.iommu;
2975 if (likely(info))
2976 return info->domain;
2978 return __get_valid_domain_for_dev(dev);
2981 static int iommu_dummy(struct device *dev)
2983 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2986 /* Check if the dev needs to go through non-identity map and unmap process.*/
2987 static int iommu_no_mapping(struct device *dev)
2989 int found;
2991 if (iommu_dummy(dev))
2992 return 1;
2994 if (!iommu_identity_mapping)
2995 return 0;
2997 found = identity_mapping(dev);
2998 if (found) {
2999 if (iommu_should_identity_map(dev, 0))
3000 return 1;
3001 else {
3003 * 32 bit DMA is removed from si_domain and fall back
3004 * to non-identity mapping.
3006 domain_remove_one_dev_info(si_domain, dev);
3007 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
3008 dev_name(dev));
3009 return 0;
3011 } else {
3013 * In case of a detached 64 bit DMA device from vm, the device
3014 * is put into si_domain for identity mapping.
3016 if (iommu_should_identity_map(dev, 0)) {
3017 int ret;
3018 ret = domain_add_dev_info(si_domain, dev,
3019 hw_pass_through ?
3020 CONTEXT_TT_PASS_THROUGH :
3021 CONTEXT_TT_MULTI_LEVEL);
3022 if (!ret) {
3023 printk(KERN_INFO "64bit %s uses identity mapping\n",
3024 dev_name(dev));
3025 return 1;
3030 return 0;
3033 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3034 size_t size, int dir, u64 dma_mask)
3036 struct dmar_domain *domain;
3037 phys_addr_t start_paddr;
3038 struct iova *iova;
3039 int prot = 0;
3040 int ret;
3041 struct intel_iommu *iommu;
3042 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3044 BUG_ON(dir == DMA_NONE);
3046 if (iommu_no_mapping(dev))
3047 return paddr;
3049 domain = get_valid_domain_for_dev(dev);
3050 if (!domain)
3051 return 0;
3053 iommu = domain_get_iommu(domain);
3054 size = aligned_nrpages(paddr, size);
3056 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3057 if (!iova)
3058 goto error;
3061 * Check if DMAR supports zero-length reads on write only
3062 * mappings..
3064 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3065 !cap_zlr(iommu->cap))
3066 prot |= DMA_PTE_READ;
3067 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3068 prot |= DMA_PTE_WRITE;
3070 * paddr - (paddr + size) might be partial page, we should map the whole
3071 * page. Note: if two part of one page are separately mapped, we
3072 * might have two guest_addr mapping to the same host paddr, but this
3073 * is not a big problem
3075 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
3076 mm_to_dma_pfn(paddr_pfn), size, prot);
3077 if (ret)
3078 goto error;
3080 /* it's a non-present to present mapping. Only flush if caching mode */
3081 if (cap_caching_mode(iommu->cap))
3082 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
3083 else
3084 iommu_flush_write_buffer(iommu);
3086 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3087 start_paddr += paddr & ~PAGE_MASK;
3088 return start_paddr;
3090 error:
3091 if (iova)
3092 __free_iova(&domain->iovad, iova);
3093 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
3094 dev_name(dev), size, (unsigned long long)paddr, dir);
3095 return 0;
3098 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3099 unsigned long offset, size_t size,
3100 enum dma_data_direction dir,
3101 struct dma_attrs *attrs)
3103 return __intel_map_single(dev, page_to_phys(page) + offset, size,
3104 dir, *dev->dma_mask);
3107 static void flush_unmaps(void)
3109 int i, j;
3111 timer_on = 0;
3113 /* just flush them all */
3114 for (i = 0; i < g_num_of_iommus; i++) {
3115 struct intel_iommu *iommu = g_iommus[i];
3116 if (!iommu)
3117 continue;
3119 if (!deferred_flush[i].next)
3120 continue;
3122 /* In caching mode, global flushes turn emulation expensive */
3123 if (!cap_caching_mode(iommu->cap))
3124 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3125 DMA_TLB_GLOBAL_FLUSH);
3126 for (j = 0; j < deferred_flush[i].next; j++) {
3127 unsigned long mask;
3128 struct iova *iova = deferred_flush[i].iova[j];
3129 struct dmar_domain *domain = deferred_flush[i].domain[j];
3131 /* On real hardware multiple invalidations are expensive */
3132 if (cap_caching_mode(iommu->cap))
3133 iommu_flush_iotlb_psi(iommu, domain->id,
3134 iova->pfn_lo, iova_size(iova),
3135 !deferred_flush[i].freelist[j], 0);
3136 else {
3137 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
3138 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3139 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3141 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
3142 if (deferred_flush[i].freelist[j])
3143 dma_free_pagelist(deferred_flush[i].freelist[j]);
3145 deferred_flush[i].next = 0;
3148 list_size = 0;
3151 static void flush_unmaps_timeout(unsigned long data)
3153 unsigned long flags;
3155 spin_lock_irqsave(&async_umap_flush_lock, flags);
3156 flush_unmaps();
3157 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3160 static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
3162 unsigned long flags;
3163 int next, iommu_id;
3164 struct intel_iommu *iommu;
3166 spin_lock_irqsave(&async_umap_flush_lock, flags);
3167 if (list_size == HIGH_WATER_MARK)
3168 flush_unmaps();
3170 iommu = domain_get_iommu(dom);
3171 iommu_id = iommu->seq_id;
3173 next = deferred_flush[iommu_id].next;
3174 deferred_flush[iommu_id].domain[next] = dom;
3175 deferred_flush[iommu_id].iova[next] = iova;
3176 deferred_flush[iommu_id].freelist[next] = freelist;
3177 deferred_flush[iommu_id].next++;
3179 if (!timer_on) {
3180 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3181 timer_on = 1;
3183 list_size++;
3184 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3187 static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
3189 struct dmar_domain *domain;
3190 unsigned long start_pfn, last_pfn;
3191 struct iova *iova;
3192 struct intel_iommu *iommu;
3193 struct page *freelist;
3195 if (iommu_no_mapping(dev))
3196 return;
3198 domain = find_domain(dev);
3199 BUG_ON(!domain);
3201 iommu = domain_get_iommu(domain);
3203 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
3204 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3205 (unsigned long long)dev_addr))
3206 return;
3208 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3209 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3211 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3212 dev_name(dev), start_pfn, last_pfn);
3214 freelist = domain_unmap(domain, start_pfn, last_pfn);
3216 if (intel_iommu_strict) {
3217 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3218 last_pfn - start_pfn + 1, !freelist, 0);
3219 /* free iova */
3220 __free_iova(&domain->iovad, iova);
3221 dma_free_pagelist(freelist);
3222 } else {
3223 add_unmap(domain, iova, freelist);
3225 * queue up the release of the unmap to save the 1/6th of the
3226 * cpu used up by the iotlb flush operation...
3231 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3232 size_t size, enum dma_data_direction dir,
3233 struct dma_attrs *attrs)
3235 intel_unmap(dev, dev_addr);
3238 static void *intel_alloc_coherent(struct device *dev, size_t size,
3239 dma_addr_t *dma_handle, gfp_t flags,
3240 struct dma_attrs *attrs)
3242 struct page *page = NULL;
3243 int order;
3245 size = PAGE_ALIGN(size);
3246 order = get_order(size);
3248 if (!iommu_no_mapping(dev))
3249 flags &= ~(GFP_DMA | GFP_DMA32);
3250 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3251 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3252 flags |= GFP_DMA;
3253 else
3254 flags |= GFP_DMA32;
3257 if (flags & __GFP_WAIT) {
3258 unsigned int count = size >> PAGE_SHIFT;
3260 page = dma_alloc_from_contiguous(dev, count, order);
3261 if (page && iommu_no_mapping(dev) &&
3262 page_to_phys(page) + size > dev->coherent_dma_mask) {
3263 dma_release_from_contiguous(dev, page, count);
3264 page = NULL;
3268 if (!page)
3269 page = alloc_pages(flags, order);
3270 if (!page)
3271 return NULL;
3272 memset(page_address(page), 0, size);
3274 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3275 DMA_BIDIRECTIONAL,
3276 dev->coherent_dma_mask);
3277 if (*dma_handle)
3278 return page_address(page);
3279 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3280 __free_pages(page, order);
3282 return NULL;
3285 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3286 dma_addr_t dma_handle, struct dma_attrs *attrs)
3288 int order;
3289 struct page *page = virt_to_page(vaddr);
3291 size = PAGE_ALIGN(size);
3292 order = get_order(size);
3294 intel_unmap(dev, dma_handle);
3295 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3296 __free_pages(page, order);
3299 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3300 int nelems, enum dma_data_direction dir,
3301 struct dma_attrs *attrs)
3303 intel_unmap(dev, sglist[0].dma_address);
3306 static int intel_nontranslate_map_sg(struct device *hddev,
3307 struct scatterlist *sglist, int nelems, int dir)
3309 int i;
3310 struct scatterlist *sg;
3312 for_each_sg(sglist, sg, nelems, i) {
3313 BUG_ON(!sg_page(sg));
3314 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3315 sg->dma_length = sg->length;
3317 return nelems;
3320 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3321 enum dma_data_direction dir, struct dma_attrs *attrs)
3323 int i;
3324 struct dmar_domain *domain;
3325 size_t size = 0;
3326 int prot = 0;
3327 struct iova *iova = NULL;
3328 int ret;
3329 struct scatterlist *sg;
3330 unsigned long start_vpfn;
3331 struct intel_iommu *iommu;
3333 BUG_ON(dir == DMA_NONE);
3334 if (iommu_no_mapping(dev))
3335 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3337 domain = get_valid_domain_for_dev(dev);
3338 if (!domain)
3339 return 0;
3341 iommu = domain_get_iommu(domain);
3343 for_each_sg(sglist, sg, nelems, i)
3344 size += aligned_nrpages(sg->offset, sg->length);
3346 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3347 *dev->dma_mask);
3348 if (!iova) {
3349 sglist->dma_length = 0;
3350 return 0;
3354 * Check if DMAR supports zero-length reads on write only
3355 * mappings..
3357 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3358 !cap_zlr(iommu->cap))
3359 prot |= DMA_PTE_READ;
3360 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3361 prot |= DMA_PTE_WRITE;
3363 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3365 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3366 if (unlikely(ret)) {
3367 dma_pte_free_pagetable(domain, start_vpfn,
3368 start_vpfn + size - 1);
3369 __free_iova(&domain->iovad, iova);
3370 return 0;
3373 /* it's a non-present to present mapping. Only flush if caching mode */
3374 if (cap_caching_mode(iommu->cap))
3375 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
3376 else
3377 iommu_flush_write_buffer(iommu);
3379 return nelems;
3382 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3384 return !dma_addr;
3387 struct dma_map_ops intel_dma_ops = {
3388 .alloc = intel_alloc_coherent,
3389 .free = intel_free_coherent,
3390 .map_sg = intel_map_sg,
3391 .unmap_sg = intel_unmap_sg,
3392 .map_page = intel_map_page,
3393 .unmap_page = intel_unmap_page,
3394 .mapping_error = intel_mapping_error,
3397 static inline int iommu_domain_cache_init(void)
3399 int ret = 0;
3401 iommu_domain_cache = kmem_cache_create("iommu_domain",
3402 sizeof(struct dmar_domain),
3404 SLAB_HWCACHE_ALIGN,
3406 NULL);
3407 if (!iommu_domain_cache) {
3408 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3409 ret = -ENOMEM;
3412 return ret;
3415 static inline int iommu_devinfo_cache_init(void)
3417 int ret = 0;
3419 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3420 sizeof(struct device_domain_info),
3422 SLAB_HWCACHE_ALIGN,
3423 NULL);
3424 if (!iommu_devinfo_cache) {
3425 printk(KERN_ERR "Couldn't create devinfo cache\n");
3426 ret = -ENOMEM;
3429 return ret;
3432 static inline int iommu_iova_cache_init(void)
3434 int ret = 0;
3436 iommu_iova_cache = kmem_cache_create("iommu_iova",
3437 sizeof(struct iova),
3439 SLAB_HWCACHE_ALIGN,
3440 NULL);
3441 if (!iommu_iova_cache) {
3442 printk(KERN_ERR "Couldn't create iova cache\n");
3443 ret = -ENOMEM;
3446 return ret;
3449 static int __init iommu_init_mempool(void)
3451 int ret;
3452 ret = iommu_iova_cache_init();
3453 if (ret)
3454 return ret;
3456 ret = iommu_domain_cache_init();
3457 if (ret)
3458 goto domain_error;
3460 ret = iommu_devinfo_cache_init();
3461 if (!ret)
3462 return ret;
3464 kmem_cache_destroy(iommu_domain_cache);
3465 domain_error:
3466 kmem_cache_destroy(iommu_iova_cache);
3468 return -ENOMEM;
3471 static void __init iommu_exit_mempool(void)
3473 kmem_cache_destroy(iommu_devinfo_cache);
3474 kmem_cache_destroy(iommu_domain_cache);
3475 kmem_cache_destroy(iommu_iova_cache);
3479 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3481 struct dmar_drhd_unit *drhd;
3482 u32 vtbar;
3483 int rc;
3485 /* We know that this device on this chipset has its own IOMMU.
3486 * If we find it under a different IOMMU, then the BIOS is lying
3487 * to us. Hope that the IOMMU for this device is actually
3488 * disabled, and it needs no translation...
3490 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3491 if (rc) {
3492 /* "can't" happen */
3493 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3494 return;
3496 vtbar &= 0xffff0000;
3498 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3499 drhd = dmar_find_matched_drhd_unit(pdev);
3500 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3501 TAINT_FIRMWARE_WORKAROUND,
3502 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3503 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3505 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3507 static void __init init_no_remapping_devices(void)
3509 struct dmar_drhd_unit *drhd;
3510 struct device *dev;
3511 int i;
3513 for_each_drhd_unit(drhd) {
3514 if (!drhd->include_all) {
3515 for_each_active_dev_scope(drhd->devices,
3516 drhd->devices_cnt, i, dev)
3517 break;
3518 /* ignore DMAR unit if no devices exist */
3519 if (i == drhd->devices_cnt)
3520 drhd->ignored = 1;
3524 for_each_active_drhd_unit(drhd) {
3525 if (drhd->include_all)
3526 continue;
3528 for_each_active_dev_scope(drhd->devices,
3529 drhd->devices_cnt, i, dev)
3530 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3531 break;
3532 if (i < drhd->devices_cnt)
3533 continue;
3535 /* This IOMMU has *only* gfx devices. Either bypass it or
3536 set the gfx_mapped flag, as appropriate */
3537 if (dmar_map_gfx) {
3538 intel_iommu_gfx_mapped = 1;
3539 } else {
3540 drhd->ignored = 1;
3541 for_each_active_dev_scope(drhd->devices,
3542 drhd->devices_cnt, i, dev)
3543 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3548 #ifdef CONFIG_SUSPEND
3549 static int init_iommu_hw(void)
3551 struct dmar_drhd_unit *drhd;
3552 struct intel_iommu *iommu = NULL;
3554 for_each_active_iommu(iommu, drhd)
3555 if (iommu->qi)
3556 dmar_reenable_qi(iommu);
3558 for_each_iommu(iommu, drhd) {
3559 if (drhd->ignored) {
3561 * we always have to disable PMRs or DMA may fail on
3562 * this device
3564 if (force_on)
3565 iommu_disable_protect_mem_regions(iommu);
3566 continue;
3569 iommu_flush_write_buffer(iommu);
3571 iommu_set_root_entry(iommu);
3573 iommu->flush.flush_context(iommu, 0, 0, 0,
3574 DMA_CCMD_GLOBAL_INVL);
3575 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3576 iommu_enable_translation(iommu);
3577 iommu_disable_protect_mem_regions(iommu);
3580 return 0;
3583 static void iommu_flush_all(void)
3585 struct dmar_drhd_unit *drhd;
3586 struct intel_iommu *iommu;
3588 for_each_active_iommu(iommu, drhd) {
3589 iommu->flush.flush_context(iommu, 0, 0, 0,
3590 DMA_CCMD_GLOBAL_INVL);
3591 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3592 DMA_TLB_GLOBAL_FLUSH);
3596 static int iommu_suspend(void)
3598 struct dmar_drhd_unit *drhd;
3599 struct intel_iommu *iommu = NULL;
3600 unsigned long flag;
3602 for_each_active_iommu(iommu, drhd) {
3603 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3604 GFP_ATOMIC);
3605 if (!iommu->iommu_state)
3606 goto nomem;
3609 iommu_flush_all();
3611 for_each_active_iommu(iommu, drhd) {
3612 iommu_disable_translation(iommu);
3614 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3616 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3617 readl(iommu->reg + DMAR_FECTL_REG);
3618 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3619 readl(iommu->reg + DMAR_FEDATA_REG);
3620 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3621 readl(iommu->reg + DMAR_FEADDR_REG);
3622 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3623 readl(iommu->reg + DMAR_FEUADDR_REG);
3625 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3627 return 0;
3629 nomem:
3630 for_each_active_iommu(iommu, drhd)
3631 kfree(iommu->iommu_state);
3633 return -ENOMEM;
3636 static void iommu_resume(void)
3638 struct dmar_drhd_unit *drhd;
3639 struct intel_iommu *iommu = NULL;
3640 unsigned long flag;
3642 if (init_iommu_hw()) {
3643 if (force_on)
3644 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3645 else
3646 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3647 return;
3650 for_each_active_iommu(iommu, drhd) {
3652 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3654 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3655 iommu->reg + DMAR_FECTL_REG);
3656 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3657 iommu->reg + DMAR_FEDATA_REG);
3658 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3659 iommu->reg + DMAR_FEADDR_REG);
3660 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3661 iommu->reg + DMAR_FEUADDR_REG);
3663 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3666 for_each_active_iommu(iommu, drhd)
3667 kfree(iommu->iommu_state);
3670 static struct syscore_ops iommu_syscore_ops = {
3671 .resume = iommu_resume,
3672 .suspend = iommu_suspend,
3675 static void __init init_iommu_pm_ops(void)
3677 register_syscore_ops(&iommu_syscore_ops);
3680 #else
3681 static inline void init_iommu_pm_ops(void) {}
3682 #endif /* CONFIG_PM */
3685 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3687 struct acpi_dmar_reserved_memory *rmrr;
3688 struct dmar_rmrr_unit *rmrru;
3690 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3691 if (!rmrru)
3692 return -ENOMEM;
3694 rmrru->hdr = header;
3695 rmrr = (struct acpi_dmar_reserved_memory *)header;
3696 rmrru->base_address = rmrr->base_address;
3697 rmrru->end_address = rmrr->end_address;
3698 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3699 ((void *)rmrr) + rmrr->header.length,
3700 &rmrru->devices_cnt);
3701 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3702 kfree(rmrru);
3703 return -ENOMEM;
3706 list_add(&rmrru->list, &dmar_rmrr_units);
3708 return 0;
3711 int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3713 struct acpi_dmar_atsr *atsr;
3714 struct dmar_atsr_unit *atsru;
3716 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3717 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3718 if (!atsru)
3719 return -ENOMEM;
3721 atsru->hdr = hdr;
3722 atsru->include_all = atsr->flags & 0x1;
3723 if (!atsru->include_all) {
3724 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3725 (void *)atsr + atsr->header.length,
3726 &atsru->devices_cnt);
3727 if (atsru->devices_cnt && atsru->devices == NULL) {
3728 kfree(atsru);
3729 return -ENOMEM;
3733 list_add_rcu(&atsru->list, &dmar_atsr_units);
3735 return 0;
3738 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3740 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3741 kfree(atsru);
3744 static void intel_iommu_free_dmars(void)
3746 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3747 struct dmar_atsr_unit *atsru, *atsr_n;
3749 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3750 list_del(&rmrru->list);
3751 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3752 kfree(rmrru);
3755 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3756 list_del(&atsru->list);
3757 intel_iommu_free_atsr(atsru);
3761 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3763 int i, ret = 1;
3764 struct pci_bus *bus;
3765 struct pci_dev *bridge = NULL;
3766 struct device *tmp;
3767 struct acpi_dmar_atsr *atsr;
3768 struct dmar_atsr_unit *atsru;
3770 dev = pci_physfn(dev);
3771 for (bus = dev->bus; bus; bus = bus->parent) {
3772 bridge = bus->self;
3773 if (!bridge || !pci_is_pcie(bridge) ||
3774 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
3775 return 0;
3776 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
3777 break;
3779 if (!bridge)
3780 return 0;
3782 rcu_read_lock();
3783 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3784 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3785 if (atsr->segment != pci_domain_nr(dev->bus))
3786 continue;
3788 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
3789 if (tmp == &bridge->dev)
3790 goto out;
3792 if (atsru->include_all)
3793 goto out;
3795 ret = 0;
3796 out:
3797 rcu_read_unlock();
3799 return ret;
3802 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3804 int ret = 0;
3805 struct dmar_rmrr_unit *rmrru;
3806 struct dmar_atsr_unit *atsru;
3807 struct acpi_dmar_atsr *atsr;
3808 struct acpi_dmar_reserved_memory *rmrr;
3810 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3811 return 0;
3813 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3814 rmrr = container_of(rmrru->hdr,
3815 struct acpi_dmar_reserved_memory, header);
3816 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3817 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3818 ((void *)rmrr) + rmrr->header.length,
3819 rmrr->segment, rmrru->devices,
3820 rmrru->devices_cnt);
3821 if(ret < 0)
3822 return ret;
3823 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3824 dmar_remove_dev_scope(info, rmrr->segment,
3825 rmrru->devices, rmrru->devices_cnt);
3829 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3830 if (atsru->include_all)
3831 continue;
3833 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3834 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3835 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3836 (void *)atsr + atsr->header.length,
3837 atsr->segment, atsru->devices,
3838 atsru->devices_cnt);
3839 if (ret > 0)
3840 break;
3841 else if(ret < 0)
3842 return ret;
3843 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3844 if (dmar_remove_dev_scope(info, atsr->segment,
3845 atsru->devices, atsru->devices_cnt))
3846 break;
3850 return 0;
3854 * Here we only respond to action of unbound device from driver.
3856 * Added device is not attached to its DMAR domain here yet. That will happen
3857 * when mapping the device to iova.
3859 static int device_notifier(struct notifier_block *nb,
3860 unsigned long action, void *data)
3862 struct device *dev = data;
3863 struct dmar_domain *domain;
3865 if (iommu_dummy(dev))
3866 return 0;
3868 if (action != BUS_NOTIFY_UNBOUND_DRIVER &&
3869 action != BUS_NOTIFY_DEL_DEVICE)
3870 return 0;
3873 * If the device is still attached to a device driver we can't
3874 * tear down the domain yet as DMA mappings may still be in use.
3875 * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that.
3877 if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL)
3878 return 0;
3880 domain = find_domain(dev);
3881 if (!domain)
3882 return 0;
3884 down_read(&dmar_global_lock);
3885 domain_remove_one_dev_info(domain, dev);
3886 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
3887 domain_exit(domain);
3888 up_read(&dmar_global_lock);
3890 return 0;
3893 static struct notifier_block device_nb = {
3894 .notifier_call = device_notifier,
3897 static int intel_iommu_memory_notifier(struct notifier_block *nb,
3898 unsigned long val, void *v)
3900 struct memory_notify *mhp = v;
3901 unsigned long long start, end;
3902 unsigned long start_vpfn, last_vpfn;
3904 switch (val) {
3905 case MEM_GOING_ONLINE:
3906 start = mhp->start_pfn << PAGE_SHIFT;
3907 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
3908 if (iommu_domain_identity_map(si_domain, start, end)) {
3909 pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
3910 start, end);
3911 return NOTIFY_BAD;
3913 break;
3915 case MEM_OFFLINE:
3916 case MEM_CANCEL_ONLINE:
3917 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
3918 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
3919 while (start_vpfn <= last_vpfn) {
3920 struct iova *iova;
3921 struct dmar_drhd_unit *drhd;
3922 struct intel_iommu *iommu;
3923 struct page *freelist;
3925 iova = find_iova(&si_domain->iovad, start_vpfn);
3926 if (iova == NULL) {
3927 pr_debug("dmar: failed get IOVA for PFN %lx\n",
3928 start_vpfn);
3929 break;
3932 iova = split_and_remove_iova(&si_domain->iovad, iova,
3933 start_vpfn, last_vpfn);
3934 if (iova == NULL) {
3935 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
3936 start_vpfn, last_vpfn);
3937 return NOTIFY_BAD;
3940 freelist = domain_unmap(si_domain, iova->pfn_lo,
3941 iova->pfn_hi);
3943 rcu_read_lock();
3944 for_each_active_iommu(iommu, drhd)
3945 iommu_flush_iotlb_psi(iommu, si_domain->id,
3946 iova->pfn_lo, iova_size(iova),
3947 !freelist, 0);
3948 rcu_read_unlock();
3949 dma_free_pagelist(freelist);
3951 start_vpfn = iova->pfn_hi + 1;
3952 free_iova_mem(iova);
3954 break;
3957 return NOTIFY_OK;
3960 static struct notifier_block intel_iommu_memory_nb = {
3961 .notifier_call = intel_iommu_memory_notifier,
3962 .priority = 0
3966 static ssize_t intel_iommu_show_version(struct device *dev,
3967 struct device_attribute *attr,
3968 char *buf)
3970 struct intel_iommu *iommu = dev_get_drvdata(dev);
3971 u32 ver = readl(iommu->reg + DMAR_VER_REG);
3972 return sprintf(buf, "%d:%d\n",
3973 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
3975 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
3977 static ssize_t intel_iommu_show_address(struct device *dev,
3978 struct device_attribute *attr,
3979 char *buf)
3981 struct intel_iommu *iommu = dev_get_drvdata(dev);
3982 return sprintf(buf, "%llx\n", iommu->reg_phys);
3984 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
3986 static ssize_t intel_iommu_show_cap(struct device *dev,
3987 struct device_attribute *attr,
3988 char *buf)
3990 struct intel_iommu *iommu = dev_get_drvdata(dev);
3991 return sprintf(buf, "%llx\n", iommu->cap);
3993 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
3995 static ssize_t intel_iommu_show_ecap(struct device *dev,
3996 struct device_attribute *attr,
3997 char *buf)
3999 struct intel_iommu *iommu = dev_get_drvdata(dev);
4000 return sprintf(buf, "%llx\n", iommu->ecap);
4002 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4004 static struct attribute *intel_iommu_attrs[] = {
4005 &dev_attr_version.attr,
4006 &dev_attr_address.attr,
4007 &dev_attr_cap.attr,
4008 &dev_attr_ecap.attr,
4009 NULL,
4012 static struct attribute_group intel_iommu_group = {
4013 .name = "intel-iommu",
4014 .attrs = intel_iommu_attrs,
4017 const struct attribute_group *intel_iommu_groups[] = {
4018 &intel_iommu_group,
4019 NULL,
4022 int __init intel_iommu_init(void)
4024 int ret = -ENODEV;
4025 struct dmar_drhd_unit *drhd;
4026 struct intel_iommu *iommu;
4028 /* VT-d is required for a TXT/tboot launch, so enforce that */
4029 force_on = tboot_force_iommu();
4031 if (iommu_init_mempool()) {
4032 if (force_on)
4033 panic("tboot: Failed to initialize iommu memory\n");
4034 return -ENOMEM;
4037 down_write(&dmar_global_lock);
4038 if (dmar_table_init()) {
4039 if (force_on)
4040 panic("tboot: Failed to initialize DMAR table\n");
4041 goto out_free_dmar;
4045 * Disable translation if already enabled prior to OS handover.
4047 for_each_active_iommu(iommu, drhd)
4048 if (iommu->gcmd & DMA_GCMD_TE)
4049 iommu_disable_translation(iommu);
4051 if (dmar_dev_scope_init() < 0) {
4052 if (force_on)
4053 panic("tboot: Failed to initialize DMAR device scope\n");
4054 goto out_free_dmar;
4057 if (no_iommu || dmar_disabled)
4058 goto out_free_dmar;
4060 if (list_empty(&dmar_rmrr_units))
4061 printk(KERN_INFO "DMAR: No RMRR found\n");
4063 if (list_empty(&dmar_atsr_units))
4064 printk(KERN_INFO "DMAR: No ATSR found\n");
4066 if (dmar_init_reserved_ranges()) {
4067 if (force_on)
4068 panic("tboot: Failed to reserve iommu ranges\n");
4069 goto out_free_reserved_range;
4072 init_no_remapping_devices();
4074 ret = init_dmars();
4075 if (ret) {
4076 if (force_on)
4077 panic("tboot: Failed to initialize DMARs\n");
4078 printk(KERN_ERR "IOMMU: dmar init failed\n");
4079 goto out_free_reserved_range;
4081 up_write(&dmar_global_lock);
4082 printk(KERN_INFO
4083 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4085 init_timer(&unmap_timer);
4086 #ifdef CONFIG_SWIOTLB
4087 swiotlb = 0;
4088 #endif
4089 dma_ops = &intel_dma_ops;
4091 init_iommu_pm_ops();
4093 for_each_active_iommu(iommu, drhd)
4094 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4095 intel_iommu_groups,
4096 iommu->name);
4098 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4099 bus_register_notifier(&pci_bus_type, &device_nb);
4100 if (si_domain && !hw_pass_through)
4101 register_memory_notifier(&intel_iommu_memory_nb);
4103 intel_iommu_enabled = 1;
4105 return 0;
4107 out_free_reserved_range:
4108 put_iova_domain(&reserved_iova_list);
4109 out_free_dmar:
4110 intel_iommu_free_dmars();
4111 up_write(&dmar_global_lock);
4112 iommu_exit_mempool();
4113 return ret;
4116 static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4118 struct intel_iommu *iommu = opaque;
4120 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4121 return 0;
4125 * NB - intel-iommu lacks any sort of reference counting for the users of
4126 * dependent devices. If multiple endpoints have intersecting dependent
4127 * devices, unbinding the driver from any one of them will possibly leave
4128 * the others unable to operate.
4130 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
4131 struct device *dev)
4133 if (!iommu || !dev || !dev_is_pci(dev))
4134 return;
4136 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
4139 static void domain_remove_one_dev_info(struct dmar_domain *domain,
4140 struct device *dev)
4142 struct device_domain_info *info, *tmp;
4143 struct intel_iommu *iommu;
4144 unsigned long flags;
4145 int found = 0;
4146 u8 bus, devfn;
4148 iommu = device_to_iommu(dev, &bus, &devfn);
4149 if (!iommu)
4150 return;
4152 spin_lock_irqsave(&device_domain_lock, flags);
4153 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
4154 if (info->iommu == iommu && info->bus == bus &&
4155 info->devfn == devfn) {
4156 unlink_domain_info(info);
4157 spin_unlock_irqrestore(&device_domain_lock, flags);
4159 iommu_disable_dev_iotlb(info);
4160 iommu_detach_dev(iommu, info->bus, info->devfn);
4161 iommu_detach_dependent_devices(iommu, dev);
4162 free_devinfo_mem(info);
4164 spin_lock_irqsave(&device_domain_lock, flags);
4166 if (found)
4167 break;
4168 else
4169 continue;
4172 /* if there is no other devices under the same iommu
4173 * owned by this domain, clear this iommu in iommu_bmp
4174 * update iommu count and coherency
4176 if (info->iommu == iommu)
4177 found = 1;
4180 spin_unlock_irqrestore(&device_domain_lock, flags);
4182 if (found == 0) {
4183 domain_detach_iommu(domain, iommu);
4184 if (!domain_type_is_vm_or_si(domain))
4185 iommu_detach_domain(domain, iommu);
4189 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4191 int adjust_width;
4193 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
4194 domain_reserve_special_ranges(domain);
4196 /* calculate AGAW */
4197 domain->gaw = guest_width;
4198 adjust_width = guestwidth_to_adjustwidth(guest_width);
4199 domain->agaw = width_to_agaw(adjust_width);
4201 domain->iommu_coherency = 0;
4202 domain->iommu_snooping = 0;
4203 domain->iommu_superpage = 0;
4204 domain->max_addr = 0;
4206 /* always allocate the top pgd */
4207 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4208 if (!domain->pgd)
4209 return -ENOMEM;
4210 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4211 return 0;
4214 static int intel_iommu_domain_init(struct iommu_domain *domain)
4216 struct dmar_domain *dmar_domain;
4218 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4219 if (!dmar_domain) {
4220 printk(KERN_ERR
4221 "intel_iommu_domain_init: dmar_domain == NULL\n");
4222 return -ENOMEM;
4224 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4225 printk(KERN_ERR
4226 "intel_iommu_domain_init() failed\n");
4227 domain_exit(dmar_domain);
4228 return -ENOMEM;
4230 domain_update_iommu_cap(dmar_domain);
4231 domain->priv = dmar_domain;
4233 domain->geometry.aperture_start = 0;
4234 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4235 domain->geometry.force_aperture = true;
4237 return 0;
4240 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
4242 struct dmar_domain *dmar_domain = domain->priv;
4244 domain->priv = NULL;
4245 domain_exit(dmar_domain);
4248 static int intel_iommu_attach_device(struct iommu_domain *domain,
4249 struct device *dev)
4251 struct dmar_domain *dmar_domain = domain->priv;
4252 struct intel_iommu *iommu;
4253 int addr_width;
4254 u8 bus, devfn;
4256 if (device_is_rmrr_locked(dev)) {
4257 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4258 return -EPERM;
4261 /* normally dev is not mapped */
4262 if (unlikely(domain_context_mapped(dev))) {
4263 struct dmar_domain *old_domain;
4265 old_domain = find_domain(dev);
4266 if (old_domain) {
4267 if (domain_type_is_vm_or_si(dmar_domain))
4268 domain_remove_one_dev_info(old_domain, dev);
4269 else
4270 domain_remove_dev_info(old_domain);
4274 iommu = device_to_iommu(dev, &bus, &devfn);
4275 if (!iommu)
4276 return -ENODEV;
4278 /* check if this iommu agaw is sufficient for max mapped address */
4279 addr_width = agaw_to_width(iommu->agaw);
4280 if (addr_width > cap_mgaw(iommu->cap))
4281 addr_width = cap_mgaw(iommu->cap);
4283 if (dmar_domain->max_addr > (1LL << addr_width)) {
4284 printk(KERN_ERR "%s: iommu width (%d) is not "
4285 "sufficient for the mapped address (%llx)\n",
4286 __func__, addr_width, dmar_domain->max_addr);
4287 return -EFAULT;
4289 dmar_domain->gaw = addr_width;
4292 * Knock out extra levels of page tables if necessary
4294 while (iommu->agaw < dmar_domain->agaw) {
4295 struct dma_pte *pte;
4297 pte = dmar_domain->pgd;
4298 if (dma_pte_present(pte)) {
4299 dmar_domain->pgd = (struct dma_pte *)
4300 phys_to_virt(dma_pte_addr(pte));
4301 free_pgtable_page(pte);
4303 dmar_domain->agaw--;
4306 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
4309 static void intel_iommu_detach_device(struct iommu_domain *domain,
4310 struct device *dev)
4312 struct dmar_domain *dmar_domain = domain->priv;
4314 domain_remove_one_dev_info(dmar_domain, dev);
4317 static int intel_iommu_map(struct iommu_domain *domain,
4318 unsigned long iova, phys_addr_t hpa,
4319 size_t size, int iommu_prot)
4321 struct dmar_domain *dmar_domain = domain->priv;
4322 u64 max_addr;
4323 int prot = 0;
4324 int ret;
4326 if (iommu_prot & IOMMU_READ)
4327 prot |= DMA_PTE_READ;
4328 if (iommu_prot & IOMMU_WRITE)
4329 prot |= DMA_PTE_WRITE;
4330 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4331 prot |= DMA_PTE_SNP;
4333 max_addr = iova + size;
4334 if (dmar_domain->max_addr < max_addr) {
4335 u64 end;
4337 /* check if minimum agaw is sufficient for mapped address */
4338 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4339 if (end < max_addr) {
4340 printk(KERN_ERR "%s: iommu width (%d) is not "
4341 "sufficient for the mapped address (%llx)\n",
4342 __func__, dmar_domain->gaw, max_addr);
4343 return -EFAULT;
4345 dmar_domain->max_addr = max_addr;
4347 /* Round up size to next multiple of PAGE_SIZE, if it and
4348 the low bits of hpa would take us onto the next page */
4349 size = aligned_nrpages(hpa, size);
4350 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4351 hpa >> VTD_PAGE_SHIFT, size, prot);
4352 return ret;
4355 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4356 unsigned long iova, size_t size)
4358 struct dmar_domain *dmar_domain = domain->priv;
4359 struct page *freelist = NULL;
4360 struct intel_iommu *iommu;
4361 unsigned long start_pfn, last_pfn;
4362 unsigned int npages;
4363 int iommu_id, num, ndomains, level = 0;
4365 /* Cope with horrid API which requires us to unmap more than the
4366 size argument if it happens to be a large-page mapping. */
4367 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4368 BUG();
4370 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4371 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4373 start_pfn = iova >> VTD_PAGE_SHIFT;
4374 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4376 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4378 npages = last_pfn - start_pfn + 1;
4380 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4381 iommu = g_iommus[iommu_id];
4384 * find bit position of dmar_domain
4386 ndomains = cap_ndoms(iommu->cap);
4387 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4388 if (iommu->domains[num] == dmar_domain)
4389 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4390 npages, !freelist, 0);
4395 dma_free_pagelist(freelist);
4397 if (dmar_domain->max_addr == iova + size)
4398 dmar_domain->max_addr = iova;
4400 return size;
4403 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4404 dma_addr_t iova)
4406 struct dmar_domain *dmar_domain = domain->priv;
4407 struct dma_pte *pte;
4408 int level = 0;
4409 u64 phys = 0;
4411 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
4412 if (pte)
4413 phys = dma_pte_addr(pte);
4415 return phys;
4418 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4419 unsigned long cap)
4421 struct dmar_domain *dmar_domain = domain->priv;
4423 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4424 return dmar_domain->iommu_snooping;
4425 if (cap == IOMMU_CAP_INTR_REMAP)
4426 return irq_remapping_enabled;
4428 return 0;
4431 static int intel_iommu_add_device(struct device *dev)
4433 struct intel_iommu *iommu;
4434 struct iommu_group *group;
4435 u8 bus, devfn;
4437 iommu = device_to_iommu(dev, &bus, &devfn);
4438 if (!iommu)
4439 return -ENODEV;
4441 iommu_device_link(iommu->iommu_dev, dev);
4443 group = iommu_group_get_for_dev(dev);
4445 if (IS_ERR(group))
4446 return PTR_ERR(group);
4448 iommu_group_put(group);
4449 return 0;
4452 static void intel_iommu_remove_device(struct device *dev)
4454 struct intel_iommu *iommu;
4455 u8 bus, devfn;
4457 iommu = device_to_iommu(dev, &bus, &devfn);
4458 if (!iommu)
4459 return;
4461 iommu_group_remove_device(dev);
4463 iommu_device_unlink(iommu->iommu_dev, dev);
4466 static const struct iommu_ops intel_iommu_ops = {
4467 .domain_init = intel_iommu_domain_init,
4468 .domain_destroy = intel_iommu_domain_destroy,
4469 .attach_dev = intel_iommu_attach_device,
4470 .detach_dev = intel_iommu_detach_device,
4471 .map = intel_iommu_map,
4472 .unmap = intel_iommu_unmap,
4473 .iova_to_phys = intel_iommu_iova_to_phys,
4474 .domain_has_cap = intel_iommu_domain_has_cap,
4475 .add_device = intel_iommu_add_device,
4476 .remove_device = intel_iommu_remove_device,
4477 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
4480 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4482 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4483 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4484 dmar_map_gfx = 0;
4487 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4488 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4489 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4490 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4491 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4492 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4493 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4495 static void quirk_iommu_rwbf(struct pci_dev *dev)
4498 * Mobile 4 Series Chipset neglects to set RWBF capability,
4499 * but needs it. Same seems to hold for the desktop versions.
4501 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4502 rwbf_quirk = 1;
4505 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4506 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4507 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4508 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4509 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4510 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4511 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4513 #define GGC 0x52
4514 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4515 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4516 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4517 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4518 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4519 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4520 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4521 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4523 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4525 unsigned short ggc;
4527 if (pci_read_config_word(dev, GGC, &ggc))
4528 return;
4530 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4531 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4532 dmar_map_gfx = 0;
4533 } else if (dmar_map_gfx) {
4534 /* we have to ensure the gfx device is idle before we flush */
4535 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4536 intel_iommu_strict = 1;
4539 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4540 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4541 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4542 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4544 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4545 ISOCH DMAR unit for the Azalia sound device, but not give it any
4546 TLB entries, which causes it to deadlock. Check for that. We do
4547 this in a function called from init_dmars(), instead of in a PCI
4548 quirk, because we don't want to print the obnoxious "BIOS broken"
4549 message if VT-d is actually disabled.
4551 static void __init check_tylersburg_isoch(void)
4553 struct pci_dev *pdev;
4554 uint32_t vtisochctrl;
4556 /* If there's no Azalia in the system anyway, forget it. */
4557 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4558 if (!pdev)
4559 return;
4560 pci_dev_put(pdev);
4562 /* System Management Registers. Might be hidden, in which case
4563 we can't do the sanity check. But that's OK, because the
4564 known-broken BIOSes _don't_ actually hide it, so far. */
4565 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4566 if (!pdev)
4567 return;
4569 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4570 pci_dev_put(pdev);
4571 return;
4574 pci_dev_put(pdev);
4576 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4577 if (vtisochctrl & 1)
4578 return;
4580 /* Drop all bits other than the number of TLB entries */
4581 vtisochctrl &= 0x1c;
4583 /* If we have the recommended number of TLB entries (16), fine. */
4584 if (vtisochctrl == 0x10)
4585 return;
4587 /* Zero TLB entries? You get to ride the short bus to school. */
4588 if (!vtisochctrl) {
4589 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4590 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4591 dmi_get_system_info(DMI_BIOS_VENDOR),
4592 dmi_get_system_info(DMI_BIOS_VERSION),
4593 dmi_get_system_info(DMI_PRODUCT_VERSION));
4594 iommu_identity_mapping |= IDENTMAP_AZALIA;
4595 return;
4598 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4599 vtisochctrl);