2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <asm/cacheflush.h>
41 #include <asm/iommu.h>
44 #define ROOT_SIZE VTD_PAGE_SIZE
45 #define CONTEXT_SIZE VTD_PAGE_SIZE
47 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
50 #define IOAPIC_RANGE_START (0xfee00000)
51 #define IOAPIC_RANGE_END (0xfeefffff)
52 #define IOVA_START_ADDR (0x1000)
54 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
56 #define MAX_AGAW_WIDTH 64
58 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
59 #define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
61 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
62 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
63 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
66 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
67 are never going to work. */
68 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn
)
70 return dma_pfn
>> (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
73 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn
)
75 return mm_pfn
<< (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
77 static inline unsigned long page_to_dma_pfn(struct page
*pg
)
79 return mm_to_dma_pfn(page_to_pfn(pg
));
81 static inline unsigned long virt_to_dma_pfn(void *p
)
83 return page_to_dma_pfn(virt_to_page(p
));
86 /* global iommu list, set NULL for ignored DMAR units */
87 static struct intel_iommu
**g_iommus
;
89 static int rwbf_quirk
;
94 * 12-63: Context Ptr (12 - (haw-1))
101 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
102 static inline bool root_present(struct root_entry
*root
)
104 return (root
->val
& 1);
106 static inline void set_root_present(struct root_entry
*root
)
110 static inline void set_root_value(struct root_entry
*root
, unsigned long value
)
112 root
->val
|= value
& VTD_PAGE_MASK
;
115 static inline struct context_entry
*
116 get_context_addr_from_root(struct root_entry
*root
)
118 return (struct context_entry
*)
119 (root_present(root
)?phys_to_virt(
120 root
->val
& VTD_PAGE_MASK
) :
127 * 1: fault processing disable
128 * 2-3: translation type
129 * 12-63: address space root
135 struct context_entry
{
140 static inline bool context_present(struct context_entry
*context
)
142 return (context
->lo
& 1);
144 static inline void context_set_present(struct context_entry
*context
)
149 static inline void context_set_fault_enable(struct context_entry
*context
)
151 context
->lo
&= (((u64
)-1) << 2) | 1;
154 static inline void context_set_translation_type(struct context_entry
*context
,
157 context
->lo
&= (((u64
)-1) << 4) | 3;
158 context
->lo
|= (value
& 3) << 2;
161 static inline void context_set_address_root(struct context_entry
*context
,
164 context
->lo
|= value
& VTD_PAGE_MASK
;
167 static inline void context_set_address_width(struct context_entry
*context
,
170 context
->hi
|= value
& 7;
173 static inline void context_set_domain_id(struct context_entry
*context
,
176 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
179 static inline void context_clear_entry(struct context_entry
*context
)
192 * 12-63: Host physcial address
198 static inline void dma_clear_pte(struct dma_pte
*pte
)
203 static inline void dma_set_pte_readable(struct dma_pte
*pte
)
205 pte
->val
|= DMA_PTE_READ
;
208 static inline void dma_set_pte_writable(struct dma_pte
*pte
)
210 pte
->val
|= DMA_PTE_WRITE
;
213 static inline void dma_set_pte_snp(struct dma_pte
*pte
)
215 pte
->val
|= DMA_PTE_SNP
;
218 static inline void dma_set_pte_prot(struct dma_pte
*pte
, unsigned long prot
)
220 pte
->val
= (pte
->val
& ~3) | (prot
& 3);
223 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
226 return pte
->val
& VTD_PAGE_MASK
;
228 /* Must have a full atomic 64-bit read */
229 return __cmpxchg64(pte
, 0ULL, 0ULL) & VTD_PAGE_MASK
;
233 static inline void dma_set_pte_pfn(struct dma_pte
*pte
, unsigned long pfn
)
235 pte
->val
|= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
238 static inline bool dma_pte_present(struct dma_pte
*pte
)
240 return (pte
->val
& 3) != 0;
244 * This domain is a statically identity mapping domain.
245 * 1. This domain creats a static 1:1 mapping to all usable memory.
246 * 2. It maps to each iommu if successful.
247 * 3. Each iommu mapps to this domain if successful.
249 struct dmar_domain
*si_domain
;
251 /* devices under the same p2p bridge are owned in one domain */
252 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
254 /* domain represents a virtual machine, more than one devices
255 * across iommus may be owned in one domain, e.g. kvm guest.
257 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
259 /* si_domain contains mulitple devices */
260 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
263 int id
; /* domain id */
264 unsigned long iommu_bmp
; /* bitmap of iommus this domain uses*/
266 struct list_head devices
; /* all devices' list */
267 struct iova_domain iovad
; /* iova's that belong to this domain */
269 struct dma_pte
*pgd
; /* virtual address */
270 spinlock_t mapping_lock
; /* page table lock */
271 int gaw
; /* max guest address width */
273 /* adjusted guest address width, 0 is level 2 30-bit */
276 int flags
; /* flags to find out type of domain */
278 int iommu_coherency
;/* indicate coherency of iommu access */
279 int iommu_snooping
; /* indicate snooping control feature*/
280 int iommu_count
; /* reference count of iommu */
281 spinlock_t iommu_lock
; /* protect iommu set in domain */
282 u64 max_addr
; /* maximum mapped address */
285 /* PCI domain-device relationship */
286 struct device_domain_info
{
287 struct list_head link
; /* link to domain siblings */
288 struct list_head global
; /* link to global list */
289 int segment
; /* PCI domain */
290 u8 bus
; /* PCI bus number */
291 u8 devfn
; /* PCI devfn number */
292 struct pci_dev
*dev
; /* it's NULL for PCIE-to-PCI bridge */
293 struct intel_iommu
*iommu
; /* IOMMU used by this device */
294 struct dmar_domain
*domain
; /* pointer to domain */
297 static void flush_unmaps_timeout(unsigned long data
);
299 DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
301 #define HIGH_WATER_MARK 250
302 struct deferred_flush_tables
{
304 struct iova
*iova
[HIGH_WATER_MARK
];
305 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
308 static struct deferred_flush_tables
*deferred_flush
;
310 /* bitmap for indexing intel_iommus */
311 static int g_num_of_iommus
;
313 static DEFINE_SPINLOCK(async_umap_flush_lock
);
314 static LIST_HEAD(unmaps_to_do
);
317 static long list_size
;
319 static void domain_remove_dev_info(struct dmar_domain
*domain
);
321 #ifdef CONFIG_DMAR_DEFAULT_ON
322 int dmar_disabled
= 0;
324 int dmar_disabled
= 1;
325 #endif /*CONFIG_DMAR_DEFAULT_ON*/
327 static int __initdata dmar_map_gfx
= 1;
328 static int dmar_forcedac
;
329 static int intel_iommu_strict
;
331 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
332 static DEFINE_SPINLOCK(device_domain_lock
);
333 static LIST_HEAD(device_domain_list
);
335 static struct iommu_ops intel_iommu_ops
;
337 static int __init
intel_iommu_setup(char *str
)
342 if (!strncmp(str
, "on", 2)) {
344 printk(KERN_INFO
"Intel-IOMMU: enabled\n");
345 } else if (!strncmp(str
, "off", 3)) {
347 printk(KERN_INFO
"Intel-IOMMU: disabled\n");
348 } else if (!strncmp(str
, "igfx_off", 8)) {
351 "Intel-IOMMU: disable GFX device mapping\n");
352 } else if (!strncmp(str
, "forcedac", 8)) {
354 "Intel-IOMMU: Forcing DAC for PCI devices\n");
356 } else if (!strncmp(str
, "strict", 6)) {
358 "Intel-IOMMU: disable batched IOTLB flush\n");
359 intel_iommu_strict
= 1;
362 str
+= strcspn(str
, ",");
368 __setup("intel_iommu=", intel_iommu_setup
);
370 static struct kmem_cache
*iommu_domain_cache
;
371 static struct kmem_cache
*iommu_devinfo_cache
;
372 static struct kmem_cache
*iommu_iova_cache
;
374 static inline void *iommu_kmem_cache_alloc(struct kmem_cache
*cachep
)
379 /* trying to avoid low memory issues */
380 flags
= current
->flags
& PF_MEMALLOC
;
381 current
->flags
|= PF_MEMALLOC
;
382 vaddr
= kmem_cache_alloc(cachep
, GFP_ATOMIC
);
383 current
->flags
&= (~PF_MEMALLOC
| flags
);
388 static inline void *alloc_pgtable_page(void)
393 /* trying to avoid low memory issues */
394 flags
= current
->flags
& PF_MEMALLOC
;
395 current
->flags
|= PF_MEMALLOC
;
396 vaddr
= (void *)get_zeroed_page(GFP_ATOMIC
);
397 current
->flags
&= (~PF_MEMALLOC
| flags
);
401 static inline void free_pgtable_page(void *vaddr
)
403 free_page((unsigned long)vaddr
);
406 static inline void *alloc_domain_mem(void)
408 return iommu_kmem_cache_alloc(iommu_domain_cache
);
411 static void free_domain_mem(void *vaddr
)
413 kmem_cache_free(iommu_domain_cache
, vaddr
);
416 static inline void * alloc_devinfo_mem(void)
418 return iommu_kmem_cache_alloc(iommu_devinfo_cache
);
421 static inline void free_devinfo_mem(void *vaddr
)
423 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
426 struct iova
*alloc_iova_mem(void)
428 return iommu_kmem_cache_alloc(iommu_iova_cache
);
431 void free_iova_mem(struct iova
*iova
)
433 kmem_cache_free(iommu_iova_cache
, iova
);
437 static inline int width_to_agaw(int width
);
439 static int __iommu_calculate_agaw(struct intel_iommu
*iommu
, int max_gaw
)
444 sagaw
= cap_sagaw(iommu
->cap
);
445 for (agaw
= width_to_agaw(max_gaw
);
447 if (test_bit(agaw
, &sagaw
))
455 * Calculate max SAGAW for each iommu.
457 int iommu_calculate_max_sagaw(struct intel_iommu
*iommu
)
459 return __iommu_calculate_agaw(iommu
, MAX_AGAW_WIDTH
);
463 * calculate agaw for each iommu.
464 * "SAGAW" may be different across iommus, use a default agaw, and
465 * get a supported less agaw for iommus that don't support the default agaw.
467 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
469 return __iommu_calculate_agaw(iommu
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
472 /* This functionin only returns single iommu in a domain */
473 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
477 /* si_domain and vm domain should not get here. */
478 BUG_ON(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
);
479 BUG_ON(domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
);
481 iommu_id
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
482 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
485 return g_iommus
[iommu_id
];
488 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
492 domain
->iommu_coherency
= 1;
494 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
495 for (; i
< g_num_of_iommus
; ) {
496 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
497 domain
->iommu_coherency
= 0;
500 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
504 static void domain_update_iommu_snooping(struct dmar_domain
*domain
)
508 domain
->iommu_snooping
= 1;
510 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
511 for (; i
< g_num_of_iommus
; ) {
512 if (!ecap_sc_support(g_iommus
[i
]->ecap
)) {
513 domain
->iommu_snooping
= 0;
516 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
520 /* Some capabilities may be different across iommus */
521 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
523 domain_update_iommu_coherency(domain
);
524 domain_update_iommu_snooping(domain
);
527 static struct intel_iommu
*device_to_iommu(int segment
, u8 bus
, u8 devfn
)
529 struct dmar_drhd_unit
*drhd
= NULL
;
532 for_each_drhd_unit(drhd
) {
535 if (segment
!= drhd
->segment
)
538 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
539 if (drhd
->devices
[i
] &&
540 drhd
->devices
[i
]->bus
->number
== bus
&&
541 drhd
->devices
[i
]->devfn
== devfn
)
543 if (drhd
->devices
[i
] &&
544 drhd
->devices
[i
]->subordinate
&&
545 drhd
->devices
[i
]->subordinate
->number
<= bus
&&
546 drhd
->devices
[i
]->subordinate
->subordinate
>= bus
)
550 if (drhd
->include_all
)
557 static void domain_flush_cache(struct dmar_domain
*domain
,
558 void *addr
, int size
)
560 if (!domain
->iommu_coherency
)
561 clflush_cache_range(addr
, size
);
564 /* Gets context entry for a given bus and devfn */
565 static struct context_entry
* device_to_context_entry(struct intel_iommu
*iommu
,
568 struct root_entry
*root
;
569 struct context_entry
*context
;
570 unsigned long phy_addr
;
573 spin_lock_irqsave(&iommu
->lock
, flags
);
574 root
= &iommu
->root_entry
[bus
];
575 context
= get_context_addr_from_root(root
);
577 context
= (struct context_entry
*)alloc_pgtable_page();
579 spin_unlock_irqrestore(&iommu
->lock
, flags
);
582 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
583 phy_addr
= virt_to_phys((void *)context
);
584 set_root_value(root
, phy_addr
);
585 set_root_present(root
);
586 __iommu_flush_cache(iommu
, root
, sizeof(*root
));
588 spin_unlock_irqrestore(&iommu
->lock
, flags
);
589 return &context
[devfn
];
592 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
594 struct root_entry
*root
;
595 struct context_entry
*context
;
599 spin_lock_irqsave(&iommu
->lock
, flags
);
600 root
= &iommu
->root_entry
[bus
];
601 context
= get_context_addr_from_root(root
);
606 ret
= context_present(&context
[devfn
]);
608 spin_unlock_irqrestore(&iommu
->lock
, flags
);
612 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
614 struct root_entry
*root
;
615 struct context_entry
*context
;
618 spin_lock_irqsave(&iommu
->lock
, flags
);
619 root
= &iommu
->root_entry
[bus
];
620 context
= get_context_addr_from_root(root
);
622 context_clear_entry(&context
[devfn
]);
623 __iommu_flush_cache(iommu
, &context
[devfn
], \
626 spin_unlock_irqrestore(&iommu
->lock
, flags
);
629 static void free_context_table(struct intel_iommu
*iommu
)
631 struct root_entry
*root
;
634 struct context_entry
*context
;
636 spin_lock_irqsave(&iommu
->lock
, flags
);
637 if (!iommu
->root_entry
) {
640 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
641 root
= &iommu
->root_entry
[i
];
642 context
= get_context_addr_from_root(root
);
644 free_pgtable_page(context
);
646 free_pgtable_page(iommu
->root_entry
);
647 iommu
->root_entry
= NULL
;
649 spin_unlock_irqrestore(&iommu
->lock
, flags
);
652 /* page table handling */
653 #define LEVEL_STRIDE (9)
654 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
656 static inline int agaw_to_level(int agaw
)
661 static inline int agaw_to_width(int agaw
)
663 return 30 + agaw
* LEVEL_STRIDE
;
667 static inline int width_to_agaw(int width
)
669 return (width
- 30) / LEVEL_STRIDE
;
672 static inline unsigned int level_to_offset_bits(int level
)
674 return (level
- 1) * LEVEL_STRIDE
;
677 static inline int pfn_level_offset(unsigned long pfn
, int level
)
679 return (pfn
>> level_to_offset_bits(level
)) & LEVEL_MASK
;
682 static inline unsigned long level_mask(int level
)
684 return -1UL << level_to_offset_bits(level
);
687 static inline unsigned long level_size(int level
)
689 return 1UL << level_to_offset_bits(level
);
692 static inline unsigned long align_to_level(unsigned long pfn
, int level
)
694 return (pfn
+ level_size(level
) - 1) & level_mask(level
);
697 static struct dma_pte
*pfn_to_dma_pte(struct dmar_domain
*domain
,
700 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
701 struct dma_pte
*parent
, *pte
= NULL
;
702 int level
= agaw_to_level(domain
->agaw
);
706 BUG_ON(!domain
->pgd
);
707 BUG_ON(addr_width
< BITS_PER_LONG
&& pfn
>> addr_width
);
708 parent
= domain
->pgd
;
710 spin_lock_irqsave(&domain
->mapping_lock
, flags
);
714 offset
= pfn_level_offset(pfn
, level
);
715 pte
= &parent
[offset
];
719 if (!dma_pte_present(pte
)) {
722 tmp_page
= alloc_pgtable_page();
725 spin_unlock_irqrestore(&domain
->mapping_lock
,
729 domain_flush_cache(domain
, tmp_page
, VTD_PAGE_SIZE
);
730 pteval
= (virt_to_dma_pfn(tmp_page
) << VTD_PAGE_SHIFT
) | DMA_PTE_READ
| DMA_PTE_WRITE
;
731 if (cmpxchg64(&pte
->val
, 0ULL, pteval
)) {
732 /* Someone else set it while we were thinking; use theirs. */
733 free_pgtable_page(tmp_page
);
736 domain_flush_cache(domain
, pte
, sizeof(*pte
));
739 parent
= phys_to_virt(dma_pte_addr(pte
));
743 spin_unlock_irqrestore(&domain
->mapping_lock
, flags
);
747 /* return address's pte at specific level */
748 static struct dma_pte
*dma_pfn_level_pte(struct dmar_domain
*domain
,
752 struct dma_pte
*parent
, *pte
= NULL
;
753 int total
= agaw_to_level(domain
->agaw
);
756 parent
= domain
->pgd
;
757 while (level
<= total
) {
758 offset
= pfn_level_offset(pfn
, total
);
759 pte
= &parent
[offset
];
763 if (!dma_pte_present(pte
))
765 parent
= phys_to_virt(dma_pte_addr(pte
));
771 /* clear last level pte, a tlb flush should be followed */
772 static void dma_pte_clear_range(struct dmar_domain
*domain
,
773 unsigned long start_pfn
,
774 unsigned long last_pfn
)
776 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
777 struct dma_pte
*first_pte
, *pte
;
779 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
780 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
782 /* we don't need lock here; nobody else touches the iova range */
783 while (start_pfn
<= last_pfn
) {
784 first_pte
= pte
= dma_pfn_level_pte(domain
, start_pfn
, 1);
786 start_pfn
= align_to_level(start_pfn
+ 1, 2);
789 while (start_pfn
<= last_pfn
&&
790 (unsigned long)pte
>> VTD_PAGE_SHIFT
==
791 (unsigned long)first_pte
>> VTD_PAGE_SHIFT
) {
796 domain_flush_cache(domain
, first_pte
,
797 (void *)pte
- (void *)first_pte
);
801 /* free page table pages. last level pte should already be cleared */
802 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
803 unsigned long start_pfn
,
804 unsigned long last_pfn
)
806 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
807 struct dma_pte
*first_pte
, *pte
;
808 int total
= agaw_to_level(domain
->agaw
);
812 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
813 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
815 /* We don't need lock here; nobody else touches the iova range */
817 while (level
<= total
) {
818 tmp
= align_to_level(start_pfn
, level
);
820 /* If we can't even clear one PTE at this level, we're done */
821 if (tmp
+ level_size(level
) - 1 > last_pfn
)
824 while (tmp
+ level_size(level
) - 1 <= last_pfn
) {
825 first_pte
= pte
= dma_pfn_level_pte(domain
, tmp
, level
);
827 tmp
= align_to_level(tmp
+ 1, level
+ 1);
830 while (tmp
+ level_size(level
) - 1 <= last_pfn
&&
831 (unsigned long)pte
>> VTD_PAGE_SHIFT
==
832 (unsigned long)first_pte
>> VTD_PAGE_SHIFT
) {
833 free_pgtable_page(phys_to_virt(dma_pte_addr(pte
)));
836 tmp
+= level_size(level
);
838 domain_flush_cache(domain
, first_pte
,
839 (void *)pte
- (void *)first_pte
);
845 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
846 free_pgtable_page(domain
->pgd
);
852 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
854 struct root_entry
*root
;
857 root
= (struct root_entry
*)alloc_pgtable_page();
861 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
863 spin_lock_irqsave(&iommu
->lock
, flags
);
864 iommu
->root_entry
= root
;
865 spin_unlock_irqrestore(&iommu
->lock
, flags
);
870 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
876 addr
= iommu
->root_entry
;
878 spin_lock_irqsave(&iommu
->register_lock
, flag
);
879 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, virt_to_phys(addr
));
881 writel(iommu
->gcmd
| DMA_GCMD_SRTP
, iommu
->reg
+ DMAR_GCMD_REG
);
883 /* Make sure hardware complete it */
884 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
885 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
887 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
890 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
895 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
898 spin_lock_irqsave(&iommu
->register_lock
, flag
);
899 writel(iommu
->gcmd
| DMA_GCMD_WBF
, iommu
->reg
+ DMAR_GCMD_REG
);
901 /* Make sure hardware complete it */
902 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
903 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
905 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
908 /* return value determine if we need a write buffer flush */
909 static void __iommu_flush_context(struct intel_iommu
*iommu
,
910 u16 did
, u16 source_id
, u8 function_mask
,
917 case DMA_CCMD_GLOBAL_INVL
:
918 val
= DMA_CCMD_GLOBAL_INVL
;
920 case DMA_CCMD_DOMAIN_INVL
:
921 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
923 case DMA_CCMD_DEVICE_INVL
:
924 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
925 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
932 spin_lock_irqsave(&iommu
->register_lock
, flag
);
933 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
935 /* Make sure hardware complete it */
936 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
937 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
939 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
942 /* return value determine if we need a write buffer flush */
943 static void __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
944 u64 addr
, unsigned int size_order
, u64 type
)
946 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
947 u64 val
= 0, val_iva
= 0;
951 case DMA_TLB_GLOBAL_FLUSH
:
952 /* global flush doesn't need set IVA_REG */
953 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
955 case DMA_TLB_DSI_FLUSH
:
956 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
958 case DMA_TLB_PSI_FLUSH
:
959 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
960 /* Note: always flush non-leaf currently */
961 val_iva
= size_order
| addr
;
966 /* Note: set drain read/write */
969 * This is probably to be super secure.. Looks like we can
970 * ignore it without any impact.
972 if (cap_read_drain(iommu
->cap
))
973 val
|= DMA_TLB_READ_DRAIN
;
975 if (cap_write_drain(iommu
->cap
))
976 val
|= DMA_TLB_WRITE_DRAIN
;
978 spin_lock_irqsave(&iommu
->register_lock
, flag
);
979 /* Note: Only uses first TLB reg currently */
981 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
982 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
984 /* Make sure hardware complete it */
985 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
986 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
988 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
990 /* check IOTLB invalidation granularity */
991 if (DMA_TLB_IAIG(val
) == 0)
992 printk(KERN_ERR
"IOMMU: flush IOTLB failed\n");
993 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
994 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
995 (unsigned long long)DMA_TLB_IIRG(type
),
996 (unsigned long long)DMA_TLB_IAIG(val
));
999 static struct device_domain_info
*iommu_support_dev_iotlb(
1000 struct dmar_domain
*domain
, int segment
, u8 bus
, u8 devfn
)
1003 unsigned long flags
;
1004 struct device_domain_info
*info
;
1005 struct intel_iommu
*iommu
= device_to_iommu(segment
, bus
, devfn
);
1007 if (!ecap_dev_iotlb_support(iommu
->ecap
))
1013 spin_lock_irqsave(&device_domain_lock
, flags
);
1014 list_for_each_entry(info
, &domain
->devices
, link
)
1015 if (info
->bus
== bus
&& info
->devfn
== devfn
) {
1019 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1021 if (!found
|| !info
->dev
)
1024 if (!pci_find_ext_capability(info
->dev
, PCI_EXT_CAP_ID_ATS
))
1027 if (!dmar_find_matched_atsr_unit(info
->dev
))
1030 info
->iommu
= iommu
;
1035 static void iommu_enable_dev_iotlb(struct device_domain_info
*info
)
1040 pci_enable_ats(info
->dev
, VTD_PAGE_SHIFT
);
1043 static void iommu_disable_dev_iotlb(struct device_domain_info
*info
)
1045 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1048 pci_disable_ats(info
->dev
);
1051 static void iommu_flush_dev_iotlb(struct dmar_domain
*domain
,
1052 u64 addr
, unsigned mask
)
1055 unsigned long flags
;
1056 struct device_domain_info
*info
;
1058 spin_lock_irqsave(&device_domain_lock
, flags
);
1059 list_for_each_entry(info
, &domain
->devices
, link
) {
1060 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1063 sid
= info
->bus
<< 8 | info
->devfn
;
1064 qdep
= pci_ats_queue_depth(info
->dev
);
1065 qi_flush_dev_iotlb(info
->iommu
, sid
, qdep
, addr
, mask
);
1067 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1070 static void iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
1071 unsigned long pfn
, unsigned int pages
)
1073 unsigned int mask
= ilog2(__roundup_pow_of_two(pages
));
1074 uint64_t addr
= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
1079 * Fallback to domain selective flush if no PSI support or the size is
1081 * PSI requires page size to be 2 ^ x, and the base address is naturally
1082 * aligned to the size
1084 if (!cap_pgsel_inv(iommu
->cap
) || mask
> cap_max_amask_val(iommu
->cap
))
1085 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
1088 iommu
->flush
.flush_iotlb(iommu
, did
, addr
, mask
,
1092 * In caching mode, domain ID 0 is reserved for non-present to present
1093 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1095 if (!cap_caching_mode(iommu
->cap
) || did
)
1096 iommu_flush_dev_iotlb(iommu
->domains
[did
], addr
, mask
);
1099 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
1102 unsigned long flags
;
1104 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1105 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
1106 pmen
&= ~DMA_PMEN_EPM
;
1107 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
1109 /* wait for the protected region status bit to clear */
1110 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
1111 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1113 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1116 static int iommu_enable_translation(struct intel_iommu
*iommu
)
1119 unsigned long flags
;
1121 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1122 iommu
->gcmd
|= DMA_GCMD_TE
;
1123 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1125 /* Make sure hardware complete it */
1126 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1127 readl
, (sts
& DMA_GSTS_TES
), sts
);
1129 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1133 static int iommu_disable_translation(struct intel_iommu
*iommu
)
1138 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1139 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1140 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1142 /* Make sure hardware complete it */
1143 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1144 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1146 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1151 static int iommu_init_domains(struct intel_iommu
*iommu
)
1153 unsigned long ndomains
;
1154 unsigned long nlongs
;
1156 ndomains
= cap_ndoms(iommu
->cap
);
1157 pr_debug("Number of Domains supportd <%ld>\n", ndomains
);
1158 nlongs
= BITS_TO_LONGS(ndomains
);
1160 /* TBD: there might be 64K domains,
1161 * consider other allocation for future chip
1163 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1164 if (!iommu
->domain_ids
) {
1165 printk(KERN_ERR
"Allocating domain id array failed\n");
1168 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1170 if (!iommu
->domains
) {
1171 printk(KERN_ERR
"Allocating domain array failed\n");
1172 kfree(iommu
->domain_ids
);
1176 spin_lock_init(&iommu
->lock
);
1179 * if Caching mode is set, then invalid translations are tagged
1180 * with domainid 0. Hence we need to pre-allocate it.
1182 if (cap_caching_mode(iommu
->cap
))
1183 set_bit(0, iommu
->domain_ids
);
1188 static void domain_exit(struct dmar_domain
*domain
);
1189 static void vm_domain_exit(struct dmar_domain
*domain
);
1191 void free_dmar_iommu(struct intel_iommu
*iommu
)
1193 struct dmar_domain
*domain
;
1195 unsigned long flags
;
1197 i
= find_first_bit(iommu
->domain_ids
, cap_ndoms(iommu
->cap
));
1198 for (; i
< cap_ndoms(iommu
->cap
); ) {
1199 domain
= iommu
->domains
[i
];
1200 clear_bit(i
, iommu
->domain_ids
);
1202 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1203 if (--domain
->iommu_count
== 0) {
1204 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
1205 vm_domain_exit(domain
);
1207 domain_exit(domain
);
1209 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1211 i
= find_next_bit(iommu
->domain_ids
,
1212 cap_ndoms(iommu
->cap
), i
+1);
1215 if (iommu
->gcmd
& DMA_GCMD_TE
)
1216 iommu_disable_translation(iommu
);
1219 set_irq_data(iommu
->irq
, NULL
);
1220 /* This will mask the irq */
1221 free_irq(iommu
->irq
, iommu
);
1222 destroy_irq(iommu
->irq
);
1225 kfree(iommu
->domains
);
1226 kfree(iommu
->domain_ids
);
1228 g_iommus
[iommu
->seq_id
] = NULL
;
1230 /* if all iommus are freed, free g_iommus */
1231 for (i
= 0; i
< g_num_of_iommus
; i
++) {
1236 if (i
== g_num_of_iommus
)
1239 /* free context mapping */
1240 free_context_table(iommu
);
1243 static struct dmar_domain
*alloc_domain(void)
1245 struct dmar_domain
*domain
;
1247 domain
= alloc_domain_mem();
1251 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
1257 static int iommu_attach_domain(struct dmar_domain
*domain
,
1258 struct intel_iommu
*iommu
)
1261 unsigned long ndomains
;
1262 unsigned long flags
;
1264 ndomains
= cap_ndoms(iommu
->cap
);
1266 spin_lock_irqsave(&iommu
->lock
, flags
);
1268 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1269 if (num
>= ndomains
) {
1270 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1271 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1276 set_bit(num
, iommu
->domain_ids
);
1277 set_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1278 iommu
->domains
[num
] = domain
;
1279 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1284 static void iommu_detach_domain(struct dmar_domain
*domain
,
1285 struct intel_iommu
*iommu
)
1287 unsigned long flags
;
1291 spin_lock_irqsave(&iommu
->lock
, flags
);
1292 ndomains
= cap_ndoms(iommu
->cap
);
1293 num
= find_first_bit(iommu
->domain_ids
, ndomains
);
1294 for (; num
< ndomains
; ) {
1295 if (iommu
->domains
[num
] == domain
) {
1299 num
= find_next_bit(iommu
->domain_ids
,
1300 cap_ndoms(iommu
->cap
), num
+1);
1304 clear_bit(num
, iommu
->domain_ids
);
1305 clear_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1306 iommu
->domains
[num
] = NULL
;
1308 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1311 static struct iova_domain reserved_iova_list
;
1312 static struct lock_class_key reserved_alloc_key
;
1313 static struct lock_class_key reserved_rbtree_key
;
1315 static void dmar_init_reserved_ranges(void)
1317 struct pci_dev
*pdev
= NULL
;
1321 init_iova_domain(&reserved_iova_list
, DMA_32BIT_PFN
);
1323 lockdep_set_class(&reserved_iova_list
.iova_alloc_lock
,
1324 &reserved_alloc_key
);
1325 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1326 &reserved_rbtree_key
);
1328 /* IOAPIC ranges shouldn't be accessed by DMA */
1329 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1330 IOVA_PFN(IOAPIC_RANGE_END
));
1332 printk(KERN_ERR
"Reserve IOAPIC range failed\n");
1334 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1335 for_each_pci_dev(pdev
) {
1338 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1339 r
= &pdev
->resource
[i
];
1340 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1342 iova
= reserve_iova(&reserved_iova_list
,
1346 printk(KERN_ERR
"Reserve iova failed\n");
1352 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1354 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1357 static inline int guestwidth_to_adjustwidth(int gaw
)
1360 int r
= (gaw
- 12) % 9;
1371 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1373 struct intel_iommu
*iommu
;
1374 int adjust_width
, agaw
;
1375 unsigned long sagaw
;
1377 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
1378 spin_lock_init(&domain
->mapping_lock
);
1379 spin_lock_init(&domain
->iommu_lock
);
1381 domain_reserve_special_ranges(domain
);
1383 /* calculate AGAW */
1384 iommu
= domain_get_iommu(domain
);
1385 if (guest_width
> cap_mgaw(iommu
->cap
))
1386 guest_width
= cap_mgaw(iommu
->cap
);
1387 domain
->gaw
= guest_width
;
1388 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1389 agaw
= width_to_agaw(adjust_width
);
1390 sagaw
= cap_sagaw(iommu
->cap
);
1391 if (!test_bit(agaw
, &sagaw
)) {
1392 /* hardware doesn't support it, choose a bigger one */
1393 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw
);
1394 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1398 domain
->agaw
= agaw
;
1399 INIT_LIST_HEAD(&domain
->devices
);
1401 if (ecap_coherent(iommu
->ecap
))
1402 domain
->iommu_coherency
= 1;
1404 domain
->iommu_coherency
= 0;
1406 if (ecap_sc_support(iommu
->ecap
))
1407 domain
->iommu_snooping
= 1;
1409 domain
->iommu_snooping
= 0;
1411 domain
->iommu_count
= 1;
1413 /* always allocate the top pgd */
1414 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
1417 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1421 static void domain_exit(struct dmar_domain
*domain
)
1423 struct dmar_drhd_unit
*drhd
;
1424 struct intel_iommu
*iommu
;
1426 /* Domain 0 is reserved, so dont process it */
1430 domain_remove_dev_info(domain
);
1432 put_iova_domain(&domain
->iovad
);
1435 dma_pte_clear_range(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1437 /* free page tables */
1438 dma_pte_free_pagetable(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1440 for_each_active_iommu(iommu
, drhd
)
1441 if (test_bit(iommu
->seq_id
, &domain
->iommu_bmp
))
1442 iommu_detach_domain(domain
, iommu
);
1444 free_domain_mem(domain
);
1447 static int domain_context_mapping_one(struct dmar_domain
*domain
, int segment
,
1448 u8 bus
, u8 devfn
, int translation
)
1450 struct context_entry
*context
;
1451 unsigned long flags
;
1452 struct intel_iommu
*iommu
;
1453 struct dma_pte
*pgd
;
1455 unsigned long ndomains
;
1458 struct device_domain_info
*info
= NULL
;
1460 pr_debug("Set context mapping for %02x:%02x.%d\n",
1461 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1463 BUG_ON(!domain
->pgd
);
1464 BUG_ON(translation
!= CONTEXT_TT_PASS_THROUGH
&&
1465 translation
!= CONTEXT_TT_MULTI_LEVEL
);
1467 iommu
= device_to_iommu(segment
, bus
, devfn
);
1471 context
= device_to_context_entry(iommu
, bus
, devfn
);
1474 spin_lock_irqsave(&iommu
->lock
, flags
);
1475 if (context_present(context
)) {
1476 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1483 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
1484 domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
) {
1487 /* find an available domain id for this device in iommu */
1488 ndomains
= cap_ndoms(iommu
->cap
);
1489 num
= find_first_bit(iommu
->domain_ids
, ndomains
);
1490 for (; num
< ndomains
; ) {
1491 if (iommu
->domains
[num
] == domain
) {
1496 num
= find_next_bit(iommu
->domain_ids
,
1497 cap_ndoms(iommu
->cap
), num
+1);
1501 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1502 if (num
>= ndomains
) {
1503 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1504 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1508 set_bit(num
, iommu
->domain_ids
);
1509 set_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1510 iommu
->domains
[num
] = domain
;
1514 /* Skip top levels of page tables for
1515 * iommu which has less agaw than default.
1517 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1518 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1519 if (!dma_pte_present(pgd
)) {
1520 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1526 context_set_domain_id(context
, id
);
1528 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1529 info
= iommu_support_dev_iotlb(domain
, segment
, bus
, devfn
);
1530 translation
= info
? CONTEXT_TT_DEV_IOTLB
:
1531 CONTEXT_TT_MULTI_LEVEL
;
1534 * In pass through mode, AW must be programmed to indicate the largest
1535 * AGAW value supported by hardware. And ASR is ignored by hardware.
1537 if (unlikely(translation
== CONTEXT_TT_PASS_THROUGH
))
1538 context_set_address_width(context
, iommu
->msagaw
);
1540 context_set_address_root(context
, virt_to_phys(pgd
));
1541 context_set_address_width(context
, iommu
->agaw
);
1544 context_set_translation_type(context
, translation
);
1545 context_set_fault_enable(context
);
1546 context_set_present(context
);
1547 domain_flush_cache(domain
, context
, sizeof(*context
));
1550 * It's a non-present to present mapping. If hardware doesn't cache
1551 * non-present entry we only need to flush the write-buffer. If the
1552 * _does_ cache non-present entries, then it does so in the special
1553 * domain #0, which we have to flush:
1555 if (cap_caching_mode(iommu
->cap
)) {
1556 iommu
->flush
.flush_context(iommu
, 0,
1557 (((u16
)bus
) << 8) | devfn
,
1558 DMA_CCMD_MASK_NOBIT
,
1559 DMA_CCMD_DEVICE_INVL
);
1560 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_DSI_FLUSH
);
1562 iommu_flush_write_buffer(iommu
);
1564 iommu_enable_dev_iotlb(info
);
1565 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1567 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1568 if (!test_and_set_bit(iommu
->seq_id
, &domain
->iommu_bmp
)) {
1569 domain
->iommu_count
++;
1570 domain_update_iommu_cap(domain
);
1572 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1577 domain_context_mapping(struct dmar_domain
*domain
, struct pci_dev
*pdev
,
1581 struct pci_dev
*tmp
, *parent
;
1583 ret
= domain_context_mapping_one(domain
, pci_domain_nr(pdev
->bus
),
1584 pdev
->bus
->number
, pdev
->devfn
,
1589 /* dependent device mapping */
1590 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1593 /* Secondary interface's bus number and devfn 0 */
1594 parent
= pdev
->bus
->self
;
1595 while (parent
!= tmp
) {
1596 ret
= domain_context_mapping_one(domain
,
1597 pci_domain_nr(parent
->bus
),
1598 parent
->bus
->number
,
1599 parent
->devfn
, translation
);
1602 parent
= parent
->bus
->self
;
1604 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
1605 return domain_context_mapping_one(domain
,
1606 pci_domain_nr(tmp
->subordinate
),
1607 tmp
->subordinate
->number
, 0,
1609 else /* this is a legacy PCI bridge */
1610 return domain_context_mapping_one(domain
,
1611 pci_domain_nr(tmp
->bus
),
1617 static int domain_context_mapped(struct pci_dev
*pdev
)
1620 struct pci_dev
*tmp
, *parent
;
1621 struct intel_iommu
*iommu
;
1623 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
1628 ret
= device_context_mapped(iommu
, pdev
->bus
->number
, pdev
->devfn
);
1631 /* dependent device mapping */
1632 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1635 /* Secondary interface's bus number and devfn 0 */
1636 parent
= pdev
->bus
->self
;
1637 while (parent
!= tmp
) {
1638 ret
= device_context_mapped(iommu
, parent
->bus
->number
,
1642 parent
= parent
->bus
->self
;
1645 return device_context_mapped(iommu
, tmp
->subordinate
->number
,
1648 return device_context_mapped(iommu
, tmp
->bus
->number
,
1652 static int __domain_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1653 struct scatterlist
*sg
, unsigned long phys_pfn
,
1654 unsigned long nr_pages
, int prot
)
1656 struct dma_pte
*first_pte
= NULL
, *pte
= NULL
;
1657 phys_addr_t
uninitialized_var(pteval
);
1658 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
1659 unsigned long sg_res
;
1661 BUG_ON(addr_width
< BITS_PER_LONG
&& (iov_pfn
+ nr_pages
- 1) >> addr_width
);
1663 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
1666 prot
&= DMA_PTE_READ
| DMA_PTE_WRITE
| DMA_PTE_SNP
;
1671 sg_res
= nr_pages
+ 1;
1672 pteval
= ((phys_addr_t
)phys_pfn
<< VTD_PAGE_SHIFT
) | prot
;
1675 while (nr_pages
--) {
1679 sg_res
= (sg
->offset
+ sg
->length
+ VTD_PAGE_SIZE
- 1) >> VTD_PAGE_SHIFT
;
1680 sg
->dma_address
= ((dma_addr_t
)iov_pfn
<< VTD_PAGE_SHIFT
) + sg
->offset
;
1681 sg
->dma_length
= sg
->length
;
1682 pteval
= page_to_phys(sg_page(sg
)) | prot
;
1685 first_pte
= pte
= pfn_to_dma_pte(domain
, iov_pfn
);
1689 /* We don't need lock here, nobody else
1690 * touches the iova range
1692 tmp
= cmpxchg64(&pte
->val
, 0ULL, pteval
);
1694 static int dumps
= 5;
1695 printk(KERN_CRIT
"ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1696 iov_pfn
, tmp
, (unsigned long long)pteval
);
1699 debug_dma_dump_mappings(NULL
);
1705 (unsigned long)pte
>> VTD_PAGE_SHIFT
!=
1706 (unsigned long)first_pte
>> VTD_PAGE_SHIFT
) {
1707 domain_flush_cache(domain
, first_pte
,
1708 (void *)pte
- (void *)first_pte
);
1712 pteval
+= VTD_PAGE_SIZE
;
1720 static inline int domain_sg_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1721 struct scatterlist
*sg
, unsigned long nr_pages
,
1724 return __domain_mapping(domain
, iov_pfn
, sg
, 0, nr_pages
, prot
);
1727 static inline int domain_pfn_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1728 unsigned long phys_pfn
, unsigned long nr_pages
,
1731 return __domain_mapping(domain
, iov_pfn
, NULL
, phys_pfn
, nr_pages
, prot
);
1734 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
1739 clear_context_table(iommu
, bus
, devfn
);
1740 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
1741 DMA_CCMD_GLOBAL_INVL
);
1742 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
1745 static void domain_remove_dev_info(struct dmar_domain
*domain
)
1747 struct device_domain_info
*info
;
1748 unsigned long flags
;
1749 struct intel_iommu
*iommu
;
1751 spin_lock_irqsave(&device_domain_lock
, flags
);
1752 while (!list_empty(&domain
->devices
)) {
1753 info
= list_entry(domain
->devices
.next
,
1754 struct device_domain_info
, link
);
1755 list_del(&info
->link
);
1756 list_del(&info
->global
);
1758 info
->dev
->dev
.archdata
.iommu
= NULL
;
1759 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1761 iommu_disable_dev_iotlb(info
);
1762 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
1763 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
1764 free_devinfo_mem(info
);
1766 spin_lock_irqsave(&device_domain_lock
, flags
);
1768 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1773 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1775 static struct dmar_domain
*
1776 find_domain(struct pci_dev
*pdev
)
1778 struct device_domain_info
*info
;
1780 /* No lock here, assumes no domain exit in normal case */
1781 info
= pdev
->dev
.archdata
.iommu
;
1783 return info
->domain
;
1787 /* domain is initialized */
1788 static struct dmar_domain
*get_domain_for_dev(struct pci_dev
*pdev
, int gaw
)
1790 struct dmar_domain
*domain
, *found
= NULL
;
1791 struct intel_iommu
*iommu
;
1792 struct dmar_drhd_unit
*drhd
;
1793 struct device_domain_info
*info
, *tmp
;
1794 struct pci_dev
*dev_tmp
;
1795 unsigned long flags
;
1796 int bus
= 0, devfn
= 0;
1800 domain
= find_domain(pdev
);
1804 segment
= pci_domain_nr(pdev
->bus
);
1806 dev_tmp
= pci_find_upstream_pcie_bridge(pdev
);
1808 if (dev_tmp
->is_pcie
) {
1809 bus
= dev_tmp
->subordinate
->number
;
1812 bus
= dev_tmp
->bus
->number
;
1813 devfn
= dev_tmp
->devfn
;
1815 spin_lock_irqsave(&device_domain_lock
, flags
);
1816 list_for_each_entry(info
, &device_domain_list
, global
) {
1817 if (info
->segment
== segment
&&
1818 info
->bus
== bus
&& info
->devfn
== devfn
) {
1819 found
= info
->domain
;
1823 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1824 /* pcie-pci bridge already has a domain, uses it */
1831 domain
= alloc_domain();
1835 /* Allocate new domain for the device */
1836 drhd
= dmar_find_matched_drhd_unit(pdev
);
1838 printk(KERN_ERR
"IOMMU: can't find DMAR for device %s\n",
1842 iommu
= drhd
->iommu
;
1844 ret
= iommu_attach_domain(domain
, iommu
);
1846 domain_exit(domain
);
1850 if (domain_init(domain
, gaw
)) {
1851 domain_exit(domain
);
1855 /* register pcie-to-pci device */
1857 info
= alloc_devinfo_mem();
1859 domain_exit(domain
);
1862 info
->segment
= segment
;
1864 info
->devfn
= devfn
;
1866 info
->domain
= domain
;
1867 /* This domain is shared by devices under p2p bridge */
1868 domain
->flags
|= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES
;
1870 /* pcie-to-pci bridge already has a domain, uses it */
1872 spin_lock_irqsave(&device_domain_lock
, flags
);
1873 list_for_each_entry(tmp
, &device_domain_list
, global
) {
1874 if (tmp
->segment
== segment
&&
1875 tmp
->bus
== bus
&& tmp
->devfn
== devfn
) {
1876 found
= tmp
->domain
;
1881 free_devinfo_mem(info
);
1882 domain_exit(domain
);
1885 list_add(&info
->link
, &domain
->devices
);
1886 list_add(&info
->global
, &device_domain_list
);
1888 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1892 info
= alloc_devinfo_mem();
1895 info
->segment
= segment
;
1896 info
->bus
= pdev
->bus
->number
;
1897 info
->devfn
= pdev
->devfn
;
1899 info
->domain
= domain
;
1900 spin_lock_irqsave(&device_domain_lock
, flags
);
1901 /* somebody is fast */
1902 found
= find_domain(pdev
);
1903 if (found
!= NULL
) {
1904 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1905 if (found
!= domain
) {
1906 domain_exit(domain
);
1909 free_devinfo_mem(info
);
1912 list_add(&info
->link
, &domain
->devices
);
1913 list_add(&info
->global
, &device_domain_list
);
1914 pdev
->dev
.archdata
.iommu
= info
;
1915 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1918 /* recheck it here, maybe others set it */
1919 return find_domain(pdev
);
1922 static int iommu_identity_mapping
;
1924 static int iommu_domain_identity_map(struct dmar_domain
*domain
,
1925 unsigned long long start
,
1926 unsigned long long end
)
1928 unsigned long first_vpfn
= start
>> VTD_PAGE_SHIFT
;
1929 unsigned long last_vpfn
= end
>> VTD_PAGE_SHIFT
;
1931 if (!reserve_iova(&domain
->iovad
, dma_to_mm_pfn(first_vpfn
),
1932 dma_to_mm_pfn(last_vpfn
))) {
1933 printk(KERN_ERR
"IOMMU: reserve iova failed\n");
1937 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1938 start
, end
, domain
->id
);
1940 * RMRR range might have overlap with physical memory range,
1943 dma_pte_clear_range(domain
, first_vpfn
, last_vpfn
);
1945 return domain_pfn_mapping(domain
, first_vpfn
, first_vpfn
,
1946 last_vpfn
- first_vpfn
+ 1,
1947 DMA_PTE_READ
|DMA_PTE_WRITE
);
1950 static int iommu_prepare_identity_map(struct pci_dev
*pdev
,
1951 unsigned long long start
,
1952 unsigned long long end
)
1954 struct dmar_domain
*domain
;
1958 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1959 pci_name(pdev
), start
, end
);
1961 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
1965 ret
= iommu_domain_identity_map(domain
, start
, end
);
1969 /* context entry init */
1970 ret
= domain_context_mapping(domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
1977 domain_exit(domain
);
1981 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
1982 struct pci_dev
*pdev
)
1984 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
1986 return iommu_prepare_identity_map(pdev
, rmrr
->base_address
,
1987 rmrr
->end_address
+ 1);
1990 #ifdef CONFIG_DMAR_FLOPPY_WA
1991 static inline void iommu_prepare_isa(void)
1993 struct pci_dev
*pdev
;
1996 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
2000 printk(KERN_INFO
"IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2001 ret
= iommu_prepare_identity_map(pdev
, 0, 16*1024*1024);
2004 printk(KERN_ERR
"IOMMU: Failed to create 0-16MiB identity map; "
2005 "floppy might not work\n");
2009 static inline void iommu_prepare_isa(void)
2013 #endif /* !CONFIG_DMAR_FLPY_WA */
2015 /* Initialize each context entry as pass through.*/
2016 static int __init
init_context_pass_through(void)
2018 struct pci_dev
*pdev
= NULL
;
2019 struct dmar_domain
*domain
;
2022 for_each_pci_dev(pdev
) {
2023 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2024 ret
= domain_context_mapping(domain
, pdev
,
2025 CONTEXT_TT_PASS_THROUGH
);
2032 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
);
2034 static int __init
si_domain_work_fn(unsigned long start_pfn
,
2035 unsigned long end_pfn
, void *datax
)
2039 *ret
= iommu_domain_identity_map(si_domain
,
2040 (uint64_t)start_pfn
<< PAGE_SHIFT
,
2041 (uint64_t)end_pfn
<< PAGE_SHIFT
);
2046 static int si_domain_init(void)
2048 struct dmar_drhd_unit
*drhd
;
2049 struct intel_iommu
*iommu
;
2052 si_domain
= alloc_domain();
2056 pr_debug("Identity mapping domain is domain %d\n", si_domain
->id
);
2058 for_each_active_iommu(iommu
, drhd
) {
2059 ret
= iommu_attach_domain(si_domain
, iommu
);
2061 domain_exit(si_domain
);
2066 if (md_domain_init(si_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2067 domain_exit(si_domain
);
2071 si_domain
->flags
= DOMAIN_FLAG_STATIC_IDENTITY
;
2073 for_each_online_node(nid
) {
2074 work_with_active_regions(nid
, si_domain_work_fn
, &ret
);
2082 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
2083 struct pci_dev
*pdev
);
2084 static int identity_mapping(struct pci_dev
*pdev
)
2086 struct device_domain_info
*info
;
2088 if (likely(!iommu_identity_mapping
))
2092 list_for_each_entry(info
, &si_domain
->devices
, link
)
2093 if (info
->dev
== pdev
)
2098 static int domain_add_dev_info(struct dmar_domain
*domain
,
2099 struct pci_dev
*pdev
)
2101 struct device_domain_info
*info
;
2102 unsigned long flags
;
2104 info
= alloc_devinfo_mem();
2108 info
->segment
= pci_domain_nr(pdev
->bus
);
2109 info
->bus
= pdev
->bus
->number
;
2110 info
->devfn
= pdev
->devfn
;
2112 info
->domain
= domain
;
2114 spin_lock_irqsave(&device_domain_lock
, flags
);
2115 list_add(&info
->link
, &domain
->devices
);
2116 list_add(&info
->global
, &device_domain_list
);
2117 pdev
->dev
.archdata
.iommu
= info
;
2118 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2123 static int iommu_prepare_static_identity_mapping(void)
2125 struct pci_dev
*pdev
= NULL
;
2128 ret
= si_domain_init();
2132 for_each_pci_dev(pdev
) {
2133 printk(KERN_INFO
"IOMMU: identity mapping for device %s\n",
2136 ret
= domain_context_mapping(si_domain
, pdev
,
2137 CONTEXT_TT_MULTI_LEVEL
);
2140 ret
= domain_add_dev_info(si_domain
, pdev
);
2148 int __init
init_dmars(void)
2150 struct dmar_drhd_unit
*drhd
;
2151 struct dmar_rmrr_unit
*rmrr
;
2152 struct pci_dev
*pdev
;
2153 struct intel_iommu
*iommu
;
2155 int pass_through
= 1;
2158 * In case pass through can not be enabled, iommu tries to use identity
2161 if (iommu_pass_through
)
2162 iommu_identity_mapping
= 1;
2167 * initialize and program root entry to not present
2170 for_each_drhd_unit(drhd
) {
2173 * lock not needed as this is only incremented in the single
2174 * threaded kernel __init code path all other access are read
2179 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
2182 printk(KERN_ERR
"Allocating global iommu array failed\n");
2187 deferred_flush
= kzalloc(g_num_of_iommus
*
2188 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
2189 if (!deferred_flush
) {
2195 for_each_drhd_unit(drhd
) {
2199 iommu
= drhd
->iommu
;
2200 g_iommus
[iommu
->seq_id
] = iommu
;
2202 ret
= iommu_init_domains(iommu
);
2208 * we could share the same root & context tables
2209 * amoung all IOMMU's. Need to Split it later.
2211 ret
= iommu_alloc_root_entry(iommu
);
2213 printk(KERN_ERR
"IOMMU: allocate root entry failed\n");
2216 if (!ecap_pass_through(iommu
->ecap
))
2219 if (iommu_pass_through
)
2220 if (!pass_through
) {
2222 "Pass Through is not supported by hardware.\n");
2223 iommu_pass_through
= 0;
2227 * Start from the sane iommu hardware state.
2229 for_each_drhd_unit(drhd
) {
2233 iommu
= drhd
->iommu
;
2236 * If the queued invalidation is already initialized by us
2237 * (for example, while enabling interrupt-remapping) then
2238 * we got the things already rolling from a sane state.
2244 * Clear any previous faults.
2246 dmar_fault(-1, iommu
);
2248 * Disable queued invalidation if supported and already enabled
2249 * before OS handover.
2251 dmar_disable_qi(iommu
);
2254 for_each_drhd_unit(drhd
) {
2258 iommu
= drhd
->iommu
;
2260 if (dmar_enable_qi(iommu
)) {
2262 * Queued Invalidate not enabled, use Register Based
2265 iommu
->flush
.flush_context
= __iommu_flush_context
;
2266 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
2267 printk(KERN_INFO
"IOMMU 0x%Lx: using Register based "
2269 (unsigned long long)drhd
->reg_base_addr
);
2271 iommu
->flush
.flush_context
= qi_flush_context
;
2272 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
2273 printk(KERN_INFO
"IOMMU 0x%Lx: using Queued "
2275 (unsigned long long)drhd
->reg_base_addr
);
2280 * If pass through is set and enabled, context entries of all pci
2281 * devices are intialized by pass through translation type.
2283 if (iommu_pass_through
) {
2284 ret
= init_context_pass_through();
2286 printk(KERN_ERR
"IOMMU: Pass through init failed.\n");
2287 iommu_pass_through
= 0;
2292 * If pass through is not set or not enabled, setup context entries for
2293 * identity mappings for rmrr, gfx, and isa and may fall back to static
2294 * identity mapping if iommu_identity_mapping is set.
2296 if (!iommu_pass_through
) {
2297 if (iommu_identity_mapping
)
2298 iommu_prepare_static_identity_mapping();
2301 * for each dev attached to rmrr
2303 * locate drhd for dev, alloc domain for dev
2304 * allocate free domain
2305 * allocate page table entries for rmrr
2306 * if context not allocated for bus
2307 * allocate and init context
2308 * set present in root table for this bus
2309 * init context with domain, translation etc
2313 printk(KERN_INFO
"IOMMU: Setting RMRR:\n");
2314 for_each_rmrr_units(rmrr
) {
2315 for (i
= 0; i
< rmrr
->devices_cnt
; i
++) {
2316 pdev
= rmrr
->devices
[i
];
2318 * some BIOS lists non-exist devices in DMAR
2323 ret
= iommu_prepare_rmrr_dev(rmrr
, pdev
);
2326 "IOMMU: mapping reserved region failed\n");
2330 iommu_prepare_isa();
2336 * global invalidate context cache
2337 * global invalidate iotlb
2338 * enable translation
2340 for_each_drhd_unit(drhd
) {
2343 iommu
= drhd
->iommu
;
2345 iommu_flush_write_buffer(iommu
);
2347 ret
= dmar_set_interrupt(iommu
);
2351 iommu_set_root_entry(iommu
);
2353 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
2354 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2355 iommu_disable_protect_mem_regions(iommu
);
2357 ret
= iommu_enable_translation(iommu
);
2364 for_each_drhd_unit(drhd
) {
2367 iommu
= drhd
->iommu
;
2374 static inline unsigned long aligned_nrpages(unsigned long host_addr
,
2377 host_addr
&= ~PAGE_MASK
;
2378 host_addr
+= size
+ PAGE_SIZE
- 1;
2380 return host_addr
>> VTD_PAGE_SHIFT
;
2383 static struct iova
*intel_alloc_iova(struct device
*dev
,
2384 struct dmar_domain
*domain
,
2385 unsigned long nrpages
, uint64_t dma_mask
)
2387 struct pci_dev
*pdev
= to_pci_dev(dev
);
2388 struct iova
*iova
= NULL
;
2390 /* Restrict dma_mask to the width that the iommu can handle */
2391 dma_mask
= min_t(uint64_t, DOMAIN_MAX_ADDR(domain
->gaw
), dma_mask
);
2393 if (!dmar_forcedac
&& dma_mask
> DMA_BIT_MASK(32)) {
2395 * First try to allocate an io virtual address in
2396 * DMA_BIT_MASK(32) and if that fails then try allocating
2399 iova
= alloc_iova(&domain
->iovad
, nrpages
,
2400 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2404 iova
= alloc_iova(&domain
->iovad
, nrpages
, IOVA_PFN(dma_mask
), 1);
2405 if (unlikely(!iova
)) {
2406 printk(KERN_ERR
"Allocating %ld-page iova for %s failed",
2407 nrpages
, pci_name(pdev
));
2414 static struct dmar_domain
*
2415 get_valid_domain_for_dev(struct pci_dev
*pdev
)
2417 struct dmar_domain
*domain
;
2420 domain
= get_domain_for_dev(pdev
,
2421 DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2424 "Allocating domain for %s failed", pci_name(pdev
));
2428 /* make sure context mapping is ok */
2429 if (unlikely(!domain_context_mapped(pdev
))) {
2430 ret
= domain_context_mapping(domain
, pdev
,
2431 CONTEXT_TT_MULTI_LEVEL
);
2434 "Domain context map for %s failed",
2443 static int iommu_dummy(struct pci_dev
*pdev
)
2445 return pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
;
2448 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2449 static int iommu_no_mapping(struct pci_dev
*pdev
)
2453 if (!iommu_identity_mapping
)
2454 return iommu_dummy(pdev
);
2456 found
= identity_mapping(pdev
);
2458 if (pdev
->dma_mask
> DMA_BIT_MASK(32))
2462 * 32 bit DMA is removed from si_domain and fall back
2463 * to non-identity mapping.
2465 domain_remove_one_dev_info(si_domain
, pdev
);
2466 printk(KERN_INFO
"32bit %s uses non-identity mapping\n",
2472 * In case of a detached 64 bit DMA device from vm, the device
2473 * is put into si_domain for identity mapping.
2475 if (pdev
->dma_mask
> DMA_BIT_MASK(32)) {
2477 ret
= domain_add_dev_info(si_domain
, pdev
);
2479 printk(KERN_INFO
"64bit %s uses identity mapping\n",
2486 return iommu_dummy(pdev
);
2489 static dma_addr_t
__intel_map_single(struct device
*hwdev
, phys_addr_t paddr
,
2490 size_t size
, int dir
, u64 dma_mask
)
2492 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2493 struct dmar_domain
*domain
;
2494 phys_addr_t start_paddr
;
2498 struct intel_iommu
*iommu
;
2500 BUG_ON(dir
== DMA_NONE
);
2502 if (iommu_no_mapping(pdev
))
2505 domain
= get_valid_domain_for_dev(pdev
);
2509 iommu
= domain_get_iommu(domain
);
2510 size
= aligned_nrpages(paddr
, size
);
2512 iova
= intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2517 * Check if DMAR supports zero-length reads on write only
2520 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2521 !cap_zlr(iommu
->cap
))
2522 prot
|= DMA_PTE_READ
;
2523 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2524 prot
|= DMA_PTE_WRITE
;
2526 * paddr - (paddr + size) might be partial page, we should map the whole
2527 * page. Note: if two part of one page are separately mapped, we
2528 * might have two guest_addr mapping to the same host paddr, but this
2529 * is not a big problem
2531 ret
= domain_pfn_mapping(domain
, mm_to_dma_pfn(iova
->pfn_lo
),
2532 paddr
>> VTD_PAGE_SHIFT
, size
, prot
);
2536 /* it's a non-present to present mapping. Only flush if caching mode */
2537 if (cap_caching_mode(iommu
->cap
))
2538 iommu_flush_iotlb_psi(iommu
, 0, mm_to_dma_pfn(iova
->pfn_lo
), size
);
2540 iommu_flush_write_buffer(iommu
);
2542 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
2543 start_paddr
+= paddr
& ~PAGE_MASK
;
2548 __free_iova(&domain
->iovad
, iova
);
2549 printk(KERN_ERR
"Device %s request: %zx@%llx dir %d --- failed\n",
2550 pci_name(pdev
), size
, (unsigned long long)paddr
, dir
);
2554 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
2555 unsigned long offset
, size_t size
,
2556 enum dma_data_direction dir
,
2557 struct dma_attrs
*attrs
)
2559 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
2560 dir
, to_pci_dev(dev
)->dma_mask
);
2563 static void flush_unmaps(void)
2569 /* just flush them all */
2570 for (i
= 0; i
< g_num_of_iommus
; i
++) {
2571 struct intel_iommu
*iommu
= g_iommus
[i
];
2575 if (!deferred_flush
[i
].next
)
2578 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2579 DMA_TLB_GLOBAL_FLUSH
);
2580 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
2582 struct iova
*iova
= deferred_flush
[i
].iova
[j
];
2584 mask
= (iova
->pfn_hi
- iova
->pfn_lo
+ 1) << PAGE_SHIFT
;
2585 mask
= ilog2(mask
>> VTD_PAGE_SHIFT
);
2586 iommu_flush_dev_iotlb(deferred_flush
[i
].domain
[j
],
2587 iova
->pfn_lo
<< PAGE_SHIFT
, mask
);
2588 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
, iova
);
2590 deferred_flush
[i
].next
= 0;
2596 static void flush_unmaps_timeout(unsigned long data
)
2598 unsigned long flags
;
2600 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2602 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2605 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
)
2607 unsigned long flags
;
2609 struct intel_iommu
*iommu
;
2611 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2612 if (list_size
== HIGH_WATER_MARK
)
2615 iommu
= domain_get_iommu(dom
);
2616 iommu_id
= iommu
->seq_id
;
2618 next
= deferred_flush
[iommu_id
].next
;
2619 deferred_flush
[iommu_id
].domain
[next
] = dom
;
2620 deferred_flush
[iommu_id
].iova
[next
] = iova
;
2621 deferred_flush
[iommu_id
].next
++;
2624 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
2628 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2631 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
2632 size_t size
, enum dma_data_direction dir
,
2633 struct dma_attrs
*attrs
)
2635 struct pci_dev
*pdev
= to_pci_dev(dev
);
2636 struct dmar_domain
*domain
;
2637 unsigned long start_pfn
, last_pfn
;
2639 struct intel_iommu
*iommu
;
2641 if (iommu_no_mapping(pdev
))
2644 domain
= find_domain(pdev
);
2647 iommu
= domain_get_iommu(domain
);
2649 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
2653 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
2654 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
2656 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2657 pci_name(pdev
), start_pfn
, last_pfn
);
2659 /* clear the whole page */
2660 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
2662 /* free page tables */
2663 dma_pte_free_pagetable(domain
, start_pfn
, last_pfn
);
2665 if (intel_iommu_strict
) {
2666 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
2667 last_pfn
- start_pfn
+ 1);
2669 __free_iova(&domain
->iovad
, iova
);
2671 add_unmap(domain
, iova
);
2673 * queue up the release of the unmap to save the 1/6th of the
2674 * cpu used up by the iotlb flush operation...
2679 static void intel_unmap_single(struct device
*dev
, dma_addr_t dev_addr
, size_t size
,
2682 intel_unmap_page(dev
, dev_addr
, size
, dir
, NULL
);
2685 static void *intel_alloc_coherent(struct device
*hwdev
, size_t size
,
2686 dma_addr_t
*dma_handle
, gfp_t flags
)
2691 size
= PAGE_ALIGN(size
);
2692 order
= get_order(size
);
2693 flags
&= ~(GFP_DMA
| GFP_DMA32
);
2695 vaddr
= (void *)__get_free_pages(flags
, order
);
2698 memset(vaddr
, 0, size
);
2700 *dma_handle
= __intel_map_single(hwdev
, virt_to_bus(vaddr
), size
,
2702 hwdev
->coherent_dma_mask
);
2705 free_pages((unsigned long)vaddr
, order
);
2709 static void intel_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
2710 dma_addr_t dma_handle
)
2714 size
= PAGE_ALIGN(size
);
2715 order
= get_order(size
);
2717 intel_unmap_single(hwdev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
2718 free_pages((unsigned long)vaddr
, order
);
2721 static void intel_unmap_sg(struct device
*hwdev
, struct scatterlist
*sglist
,
2722 int nelems
, enum dma_data_direction dir
,
2723 struct dma_attrs
*attrs
)
2725 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2726 struct dmar_domain
*domain
;
2727 unsigned long start_pfn
, last_pfn
;
2729 struct intel_iommu
*iommu
;
2731 if (iommu_no_mapping(pdev
))
2734 domain
= find_domain(pdev
);
2737 iommu
= domain_get_iommu(domain
);
2739 iova
= find_iova(&domain
->iovad
, IOVA_PFN(sglist
[0].dma_address
));
2743 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
2744 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
2746 /* clear the whole page */
2747 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
2749 /* free page tables */
2750 dma_pte_free_pagetable(domain
, start_pfn
, last_pfn
);
2752 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
2753 (last_pfn
- start_pfn
+ 1));
2756 __free_iova(&domain
->iovad
, iova
);
2759 static int intel_nontranslate_map_sg(struct device
*hddev
,
2760 struct scatterlist
*sglist
, int nelems
, int dir
)
2763 struct scatterlist
*sg
;
2765 for_each_sg(sglist
, sg
, nelems
, i
) {
2766 BUG_ON(!sg_page(sg
));
2767 sg
->dma_address
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2768 sg
->dma_length
= sg
->length
;
2773 static int intel_map_sg(struct device
*hwdev
, struct scatterlist
*sglist
, int nelems
,
2774 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
2777 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2778 struct dmar_domain
*domain
;
2781 size_t offset_pfn
= 0;
2782 struct iova
*iova
= NULL
;
2784 struct scatterlist
*sg
;
2785 unsigned long start_vpfn
;
2786 struct intel_iommu
*iommu
;
2788 BUG_ON(dir
== DMA_NONE
);
2789 if (iommu_no_mapping(pdev
))
2790 return intel_nontranslate_map_sg(hwdev
, sglist
, nelems
, dir
);
2792 domain
= get_valid_domain_for_dev(pdev
);
2796 iommu
= domain_get_iommu(domain
);
2798 for_each_sg(sglist
, sg
, nelems
, i
)
2799 size
+= aligned_nrpages(sg
->offset
, sg
->length
);
2801 iova
= intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2803 sglist
->dma_length
= 0;
2808 * Check if DMAR supports zero-length reads on write only
2811 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2812 !cap_zlr(iommu
->cap
))
2813 prot
|= DMA_PTE_READ
;
2814 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2815 prot
|= DMA_PTE_WRITE
;
2817 start_vpfn
= mm_to_dma_pfn(iova
->pfn_lo
);
2819 ret
= domain_sg_mapping(domain
, start_vpfn
, sglist
, mm_to_dma_pfn(size
), prot
);
2820 if (unlikely(ret
)) {
2821 /* clear the page */
2822 dma_pte_clear_range(domain
, start_vpfn
,
2823 start_vpfn
+ size
- 1);
2824 /* free page tables */
2825 dma_pte_free_pagetable(domain
, start_vpfn
,
2826 start_vpfn
+ size
- 1);
2828 __free_iova(&domain
->iovad
, iova
);
2832 /* it's a non-present to present mapping. Only flush if caching mode */
2833 if (cap_caching_mode(iommu
->cap
))
2834 iommu_flush_iotlb_psi(iommu
, 0, start_vpfn
, offset_pfn
);
2836 iommu_flush_write_buffer(iommu
);
2841 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
2846 struct dma_map_ops intel_dma_ops
= {
2847 .alloc_coherent
= intel_alloc_coherent
,
2848 .free_coherent
= intel_free_coherent
,
2849 .map_sg
= intel_map_sg
,
2850 .unmap_sg
= intel_unmap_sg
,
2851 .map_page
= intel_map_page
,
2852 .unmap_page
= intel_unmap_page
,
2853 .mapping_error
= intel_mapping_error
,
2856 static inline int iommu_domain_cache_init(void)
2860 iommu_domain_cache
= kmem_cache_create("iommu_domain",
2861 sizeof(struct dmar_domain
),
2866 if (!iommu_domain_cache
) {
2867 printk(KERN_ERR
"Couldn't create iommu_domain cache\n");
2874 static inline int iommu_devinfo_cache_init(void)
2878 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
2879 sizeof(struct device_domain_info
),
2883 if (!iommu_devinfo_cache
) {
2884 printk(KERN_ERR
"Couldn't create devinfo cache\n");
2891 static inline int iommu_iova_cache_init(void)
2895 iommu_iova_cache
= kmem_cache_create("iommu_iova",
2896 sizeof(struct iova
),
2900 if (!iommu_iova_cache
) {
2901 printk(KERN_ERR
"Couldn't create iova cache\n");
2908 static int __init
iommu_init_mempool(void)
2911 ret
= iommu_iova_cache_init();
2915 ret
= iommu_domain_cache_init();
2919 ret
= iommu_devinfo_cache_init();
2923 kmem_cache_destroy(iommu_domain_cache
);
2925 kmem_cache_destroy(iommu_iova_cache
);
2930 static void __init
iommu_exit_mempool(void)
2932 kmem_cache_destroy(iommu_devinfo_cache
);
2933 kmem_cache_destroy(iommu_domain_cache
);
2934 kmem_cache_destroy(iommu_iova_cache
);
2938 static void __init
init_no_remapping_devices(void)
2940 struct dmar_drhd_unit
*drhd
;
2942 for_each_drhd_unit(drhd
) {
2943 if (!drhd
->include_all
) {
2945 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2946 if (drhd
->devices
[i
] != NULL
)
2948 /* ignore DMAR unit if no pci devices exist */
2949 if (i
== drhd
->devices_cnt
)
2957 for_each_drhd_unit(drhd
) {
2959 if (drhd
->ignored
|| drhd
->include_all
)
2962 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2963 if (drhd
->devices
[i
] &&
2964 !IS_GFX_DEVICE(drhd
->devices
[i
]))
2967 if (i
< drhd
->devices_cnt
)
2970 /* bypass IOMMU if it is just for gfx devices */
2972 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
2973 if (!drhd
->devices
[i
])
2975 drhd
->devices
[i
]->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
2980 #ifdef CONFIG_SUSPEND
2981 static int init_iommu_hw(void)
2983 struct dmar_drhd_unit
*drhd
;
2984 struct intel_iommu
*iommu
= NULL
;
2986 for_each_active_iommu(iommu
, drhd
)
2988 dmar_reenable_qi(iommu
);
2990 for_each_active_iommu(iommu
, drhd
) {
2991 iommu_flush_write_buffer(iommu
);
2993 iommu_set_root_entry(iommu
);
2995 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
2996 DMA_CCMD_GLOBAL_INVL
);
2997 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2998 DMA_TLB_GLOBAL_FLUSH
);
2999 iommu_disable_protect_mem_regions(iommu
);
3000 iommu_enable_translation(iommu
);
3006 static void iommu_flush_all(void)
3008 struct dmar_drhd_unit
*drhd
;
3009 struct intel_iommu
*iommu
;
3011 for_each_active_iommu(iommu
, drhd
) {
3012 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3013 DMA_CCMD_GLOBAL_INVL
);
3014 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3015 DMA_TLB_GLOBAL_FLUSH
);
3019 static int iommu_suspend(struct sys_device
*dev
, pm_message_t state
)
3021 struct dmar_drhd_unit
*drhd
;
3022 struct intel_iommu
*iommu
= NULL
;
3025 for_each_active_iommu(iommu
, drhd
) {
3026 iommu
->iommu_state
= kzalloc(sizeof(u32
) * MAX_SR_DMAR_REGS
,
3028 if (!iommu
->iommu_state
)
3034 for_each_active_iommu(iommu
, drhd
) {
3035 iommu_disable_translation(iommu
);
3037 spin_lock_irqsave(&iommu
->register_lock
, flag
);
3039 iommu
->iommu_state
[SR_DMAR_FECTL_REG
] =
3040 readl(iommu
->reg
+ DMAR_FECTL_REG
);
3041 iommu
->iommu_state
[SR_DMAR_FEDATA_REG
] =
3042 readl(iommu
->reg
+ DMAR_FEDATA_REG
);
3043 iommu
->iommu_state
[SR_DMAR_FEADDR_REG
] =
3044 readl(iommu
->reg
+ DMAR_FEADDR_REG
);
3045 iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
] =
3046 readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
3048 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3053 for_each_active_iommu(iommu
, drhd
)
3054 kfree(iommu
->iommu_state
);
3059 static int iommu_resume(struct sys_device
*dev
)
3061 struct dmar_drhd_unit
*drhd
;
3062 struct intel_iommu
*iommu
= NULL
;
3065 if (init_iommu_hw()) {
3066 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3070 for_each_active_iommu(iommu
, drhd
) {
3072 spin_lock_irqsave(&iommu
->register_lock
, flag
);
3074 writel(iommu
->iommu_state
[SR_DMAR_FECTL_REG
],
3075 iommu
->reg
+ DMAR_FECTL_REG
);
3076 writel(iommu
->iommu_state
[SR_DMAR_FEDATA_REG
],
3077 iommu
->reg
+ DMAR_FEDATA_REG
);
3078 writel(iommu
->iommu_state
[SR_DMAR_FEADDR_REG
],
3079 iommu
->reg
+ DMAR_FEADDR_REG
);
3080 writel(iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
],
3081 iommu
->reg
+ DMAR_FEUADDR_REG
);
3083 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3086 for_each_active_iommu(iommu
, drhd
)
3087 kfree(iommu
->iommu_state
);
3092 static struct sysdev_class iommu_sysclass
= {
3094 .resume
= iommu_resume
,
3095 .suspend
= iommu_suspend
,
3098 static struct sys_device device_iommu
= {
3099 .cls
= &iommu_sysclass
,
3102 static int __init
init_iommu_sysfs(void)
3106 error
= sysdev_class_register(&iommu_sysclass
);
3110 error
= sysdev_register(&device_iommu
);
3112 sysdev_class_unregister(&iommu_sysclass
);
3118 static int __init
init_iommu_sysfs(void)
3122 #endif /* CONFIG_PM */
3124 int __init
intel_iommu_init(void)
3128 if (dmar_table_init())
3131 if (dmar_dev_scope_init())
3135 * Check the need for DMA-remapping initialization now.
3136 * Above initialization will also be used by Interrupt-remapping.
3138 if (no_iommu
|| (swiotlb
&& !iommu_pass_through
) || dmar_disabled
)
3141 iommu_init_mempool();
3142 dmar_init_reserved_ranges();
3144 init_no_remapping_devices();
3148 printk(KERN_ERR
"IOMMU: dmar init failed\n");
3149 put_iova_domain(&reserved_iova_list
);
3150 iommu_exit_mempool();
3154 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3156 init_timer(&unmap_timer
);
3159 if (!iommu_pass_through
) {
3161 "Multi-level page-table translation for DMAR.\n");
3162 dma_ops
= &intel_dma_ops
;
3165 "DMAR: Pass through translation for DMAR.\n");
3169 register_iommu(&intel_iommu_ops
);
3174 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
3175 struct pci_dev
*pdev
)
3177 struct pci_dev
*tmp
, *parent
;
3179 if (!iommu
|| !pdev
)
3182 /* dependent device detach */
3183 tmp
= pci_find_upstream_pcie_bridge(pdev
);
3184 /* Secondary interface's bus number and devfn 0 */
3186 parent
= pdev
->bus
->self
;
3187 while (parent
!= tmp
) {
3188 iommu_detach_dev(iommu
, parent
->bus
->number
,
3190 parent
= parent
->bus
->self
;
3192 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
3193 iommu_detach_dev(iommu
,
3194 tmp
->subordinate
->number
, 0);
3195 else /* this is a legacy PCI bridge */
3196 iommu_detach_dev(iommu
, tmp
->bus
->number
,
3201 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
3202 struct pci_dev
*pdev
)
3204 struct device_domain_info
*info
;
3205 struct intel_iommu
*iommu
;
3206 unsigned long flags
;
3208 struct list_head
*entry
, *tmp
;
3210 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3215 spin_lock_irqsave(&device_domain_lock
, flags
);
3216 list_for_each_safe(entry
, tmp
, &domain
->devices
) {
3217 info
= list_entry(entry
, struct device_domain_info
, link
);
3218 /* No need to compare PCI domain; it has to be the same */
3219 if (info
->bus
== pdev
->bus
->number
&&
3220 info
->devfn
== pdev
->devfn
) {
3221 list_del(&info
->link
);
3222 list_del(&info
->global
);
3224 info
->dev
->dev
.archdata
.iommu
= NULL
;
3225 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3227 iommu_disable_dev_iotlb(info
);
3228 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3229 iommu_detach_dependent_devices(iommu
, pdev
);
3230 free_devinfo_mem(info
);
3232 spin_lock_irqsave(&device_domain_lock
, flags
);
3240 /* if there is no other devices under the same iommu
3241 * owned by this domain, clear this iommu in iommu_bmp
3242 * update iommu count and coherency
3244 if (iommu
== device_to_iommu(info
->segment
, info
->bus
,
3250 unsigned long tmp_flags
;
3251 spin_lock_irqsave(&domain
->iommu_lock
, tmp_flags
);
3252 clear_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
3253 domain
->iommu_count
--;
3254 domain_update_iommu_cap(domain
);
3255 spin_unlock_irqrestore(&domain
->iommu_lock
, tmp_flags
);
3258 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3261 static void vm_domain_remove_all_dev_info(struct dmar_domain
*domain
)
3263 struct device_domain_info
*info
;
3264 struct intel_iommu
*iommu
;
3265 unsigned long flags1
, flags2
;
3267 spin_lock_irqsave(&device_domain_lock
, flags1
);
3268 while (!list_empty(&domain
->devices
)) {
3269 info
= list_entry(domain
->devices
.next
,
3270 struct device_domain_info
, link
);
3271 list_del(&info
->link
);
3272 list_del(&info
->global
);
3274 info
->dev
->dev
.archdata
.iommu
= NULL
;
3276 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3278 iommu_disable_dev_iotlb(info
);
3279 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
3280 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3281 iommu_detach_dependent_devices(iommu
, info
->dev
);
3283 /* clear this iommu in iommu_bmp, update iommu count
3286 spin_lock_irqsave(&domain
->iommu_lock
, flags2
);
3287 if (test_and_clear_bit(iommu
->seq_id
,
3288 &domain
->iommu_bmp
)) {
3289 domain
->iommu_count
--;
3290 domain_update_iommu_cap(domain
);
3292 spin_unlock_irqrestore(&domain
->iommu_lock
, flags2
);
3294 free_devinfo_mem(info
);
3295 spin_lock_irqsave(&device_domain_lock
, flags1
);
3297 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3300 /* domain id for virtual machine, it won't be set in context */
3301 static unsigned long vm_domid
;
3303 static int vm_domain_min_agaw(struct dmar_domain
*domain
)
3306 int min_agaw
= domain
->agaw
;
3308 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
3309 for (; i
< g_num_of_iommus
; ) {
3310 if (min_agaw
> g_iommus
[i
]->agaw
)
3311 min_agaw
= g_iommus
[i
]->agaw
;
3313 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
3319 static struct dmar_domain
*iommu_alloc_vm_domain(void)
3321 struct dmar_domain
*domain
;
3323 domain
= alloc_domain_mem();
3327 domain
->id
= vm_domid
++;
3328 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
3329 domain
->flags
= DOMAIN_FLAG_VIRTUAL_MACHINE
;
3334 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
)
3338 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
3339 spin_lock_init(&domain
->mapping_lock
);
3340 spin_lock_init(&domain
->iommu_lock
);
3342 domain_reserve_special_ranges(domain
);
3344 /* calculate AGAW */
3345 domain
->gaw
= guest_width
;
3346 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
3347 domain
->agaw
= width_to_agaw(adjust_width
);
3349 INIT_LIST_HEAD(&domain
->devices
);
3351 domain
->iommu_count
= 0;
3352 domain
->iommu_coherency
= 0;
3353 domain
->max_addr
= 0;
3355 /* always allocate the top pgd */
3356 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
3359 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
3363 static void iommu_free_vm_domain(struct dmar_domain
*domain
)
3365 unsigned long flags
;
3366 struct dmar_drhd_unit
*drhd
;
3367 struct intel_iommu
*iommu
;
3369 unsigned long ndomains
;
3371 for_each_drhd_unit(drhd
) {
3374 iommu
= drhd
->iommu
;
3376 ndomains
= cap_ndoms(iommu
->cap
);
3377 i
= find_first_bit(iommu
->domain_ids
, ndomains
);
3378 for (; i
< ndomains
; ) {
3379 if (iommu
->domains
[i
] == domain
) {
3380 spin_lock_irqsave(&iommu
->lock
, flags
);
3381 clear_bit(i
, iommu
->domain_ids
);
3382 iommu
->domains
[i
] = NULL
;
3383 spin_unlock_irqrestore(&iommu
->lock
, flags
);
3386 i
= find_next_bit(iommu
->domain_ids
, ndomains
, i
+1);
3391 static void vm_domain_exit(struct dmar_domain
*domain
)
3393 /* Domain 0 is reserved, so dont process it */
3397 vm_domain_remove_all_dev_info(domain
);
3399 put_iova_domain(&domain
->iovad
);
3402 dma_pte_clear_range(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
3404 /* free page tables */
3405 dma_pte_free_pagetable(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
3407 iommu_free_vm_domain(domain
);
3408 free_domain_mem(domain
);
3411 static int intel_iommu_domain_init(struct iommu_domain
*domain
)
3413 struct dmar_domain
*dmar_domain
;
3415 dmar_domain
= iommu_alloc_vm_domain();
3418 "intel_iommu_domain_init: dmar_domain == NULL\n");
3421 if (md_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
3423 "intel_iommu_domain_init() failed\n");
3424 vm_domain_exit(dmar_domain
);
3427 domain
->priv
= dmar_domain
;
3432 static void intel_iommu_domain_destroy(struct iommu_domain
*domain
)
3434 struct dmar_domain
*dmar_domain
= domain
->priv
;
3436 domain
->priv
= NULL
;
3437 vm_domain_exit(dmar_domain
);
3440 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
3443 struct dmar_domain
*dmar_domain
= domain
->priv
;
3444 struct pci_dev
*pdev
= to_pci_dev(dev
);
3445 struct intel_iommu
*iommu
;
3450 /* normally pdev is not mapped */
3451 if (unlikely(domain_context_mapped(pdev
))) {
3452 struct dmar_domain
*old_domain
;
3454 old_domain
= find_domain(pdev
);
3456 if (dmar_domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
3457 dmar_domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
)
3458 domain_remove_one_dev_info(old_domain
, pdev
);
3460 domain_remove_dev_info(old_domain
);
3464 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3469 /* check if this iommu agaw is sufficient for max mapped address */
3470 addr_width
= agaw_to_width(iommu
->agaw
);
3471 end
= DOMAIN_MAX_ADDR(addr_width
);
3472 end
= end
& VTD_PAGE_MASK
;
3473 if (end
< dmar_domain
->max_addr
) {
3474 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
3475 "sufficient for the mapped address (%llx)\n",
3476 __func__
, iommu
->agaw
, dmar_domain
->max_addr
);
3480 ret
= domain_add_dev_info(dmar_domain
, pdev
);
3484 ret
= domain_context_mapping(dmar_domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
3488 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
3491 struct dmar_domain
*dmar_domain
= domain
->priv
;
3492 struct pci_dev
*pdev
= to_pci_dev(dev
);
3494 domain_remove_one_dev_info(dmar_domain
, pdev
);
3497 static int intel_iommu_map_range(struct iommu_domain
*domain
,
3498 unsigned long iova
, phys_addr_t hpa
,
3499 size_t size
, int iommu_prot
)
3501 struct dmar_domain
*dmar_domain
= domain
->priv
;
3507 if (iommu_prot
& IOMMU_READ
)
3508 prot
|= DMA_PTE_READ
;
3509 if (iommu_prot
& IOMMU_WRITE
)
3510 prot
|= DMA_PTE_WRITE
;
3511 if ((iommu_prot
& IOMMU_CACHE
) && dmar_domain
->iommu_snooping
)
3512 prot
|= DMA_PTE_SNP
;
3514 max_addr
= iova
+ size
;
3515 if (dmar_domain
->max_addr
< max_addr
) {
3519 /* check if minimum agaw is sufficient for mapped address */
3520 min_agaw
= vm_domain_min_agaw(dmar_domain
);
3521 addr_width
= agaw_to_width(min_agaw
);
3522 end
= DOMAIN_MAX_ADDR(addr_width
);
3523 end
= end
& VTD_PAGE_MASK
;
3524 if (end
< max_addr
) {
3525 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
3526 "sufficient for the mapped address (%llx)\n",
3527 __func__
, min_agaw
, max_addr
);
3530 dmar_domain
->max_addr
= max_addr
;
3532 /* Round up size to next multiple of PAGE_SIZE, if it and
3533 the low bits of hpa would take us onto the next page */
3534 size
= aligned_nrpages(hpa
, size
);
3535 ret
= domain_pfn_mapping(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
3536 hpa
>> VTD_PAGE_SHIFT
, size
, prot
);
3540 static void intel_iommu_unmap_range(struct iommu_domain
*domain
,
3541 unsigned long iova
, size_t size
)
3543 struct dmar_domain
*dmar_domain
= domain
->priv
;
3545 dma_pte_clear_range(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
3546 (iova
+ size
- 1) >> VTD_PAGE_SHIFT
);
3548 if (dmar_domain
->max_addr
== iova
+ size
)
3549 dmar_domain
->max_addr
= iova
;
3552 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
3555 struct dmar_domain
*dmar_domain
= domain
->priv
;
3556 struct dma_pte
*pte
;
3559 pte
= pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
);
3561 phys
= dma_pte_addr(pte
);
3566 static int intel_iommu_domain_has_cap(struct iommu_domain
*domain
,
3569 struct dmar_domain
*dmar_domain
= domain
->priv
;
3571 if (cap
== IOMMU_CAP_CACHE_COHERENCY
)
3572 return dmar_domain
->iommu_snooping
;
3577 static struct iommu_ops intel_iommu_ops
= {
3578 .domain_init
= intel_iommu_domain_init
,
3579 .domain_destroy
= intel_iommu_domain_destroy
,
3580 .attach_dev
= intel_iommu_attach_device
,
3581 .detach_dev
= intel_iommu_detach_device
,
3582 .map
= intel_iommu_map_range
,
3583 .unmap
= intel_iommu_unmap_range
,
3584 .iova_to_phys
= intel_iommu_iova_to_phys
,
3585 .domain_has_cap
= intel_iommu_domain_has_cap
,
3588 static void __devinit
quirk_iommu_rwbf(struct pci_dev
*dev
)
3591 * Mobile 4 Series Chipset neglects to set RWBF capability,
3594 printk(KERN_INFO
"DMAR: Forcing write-buffer flush capability\n");
3598 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);