2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <asm/cacheflush.h>
40 #include <asm/iommu.h>
43 #define ROOT_SIZE VTD_PAGE_SIZE
44 #define CONTEXT_SIZE VTD_PAGE_SIZE
46 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
47 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
49 #define IOAPIC_RANGE_START (0xfee00000)
50 #define IOAPIC_RANGE_END (0xfeefffff)
51 #define IOVA_START_ADDR (0x1000)
53 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
55 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
57 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
58 #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
59 #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
61 /* global iommu list, set NULL for ignored DMAR units */
62 static struct intel_iommu
**g_iommus
;
64 static int rwbf_quirk
;
69 * 12-63: Context Ptr (12 - (haw-1))
76 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
77 static inline bool root_present(struct root_entry
*root
)
79 return (root
->val
& 1);
81 static inline void set_root_present(struct root_entry
*root
)
85 static inline void set_root_value(struct root_entry
*root
, unsigned long value
)
87 root
->val
|= value
& VTD_PAGE_MASK
;
90 static inline struct context_entry
*
91 get_context_addr_from_root(struct root_entry
*root
)
93 return (struct context_entry
*)
94 (root_present(root
)?phys_to_virt(
95 root
->val
& VTD_PAGE_MASK
) :
102 * 1: fault processing disable
103 * 2-3: translation type
104 * 12-63: address space root
110 struct context_entry
{
115 static inline bool context_present(struct context_entry
*context
)
117 return (context
->lo
& 1);
119 static inline void context_set_present(struct context_entry
*context
)
124 static inline void context_set_fault_enable(struct context_entry
*context
)
126 context
->lo
&= (((u64
)-1) << 2) | 1;
129 #define CONTEXT_TT_MULTI_LEVEL 0
131 static inline void context_set_translation_type(struct context_entry
*context
,
134 context
->lo
&= (((u64
)-1) << 4) | 3;
135 context
->lo
|= (value
& 3) << 2;
138 static inline void context_set_address_root(struct context_entry
*context
,
141 context
->lo
|= value
& VTD_PAGE_MASK
;
144 static inline void context_set_address_width(struct context_entry
*context
,
147 context
->hi
|= value
& 7;
150 static inline void context_set_domain_id(struct context_entry
*context
,
153 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
156 static inline void context_clear_entry(struct context_entry
*context
)
169 * 12-63: Host physcial address
175 static inline void dma_clear_pte(struct dma_pte
*pte
)
180 static inline void dma_set_pte_readable(struct dma_pte
*pte
)
182 pte
->val
|= DMA_PTE_READ
;
185 static inline void dma_set_pte_writable(struct dma_pte
*pte
)
187 pte
->val
|= DMA_PTE_WRITE
;
190 static inline void dma_set_pte_snp(struct dma_pte
*pte
)
192 pte
->val
|= DMA_PTE_SNP
;
195 static inline void dma_set_pte_prot(struct dma_pte
*pte
, unsigned long prot
)
197 pte
->val
= (pte
->val
& ~3) | (prot
& 3);
200 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
202 return (pte
->val
& VTD_PAGE_MASK
);
205 static inline void dma_set_pte_addr(struct dma_pte
*pte
, u64 addr
)
207 pte
->val
|= (addr
& VTD_PAGE_MASK
);
210 static inline bool dma_pte_present(struct dma_pte
*pte
)
212 return (pte
->val
& 3) != 0;
215 /* devices under the same p2p bridge are owned in one domain */
216 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
218 /* domain represents a virtual machine, more than one devices
219 * across iommus may be owned in one domain, e.g. kvm guest.
221 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
224 int id
; /* domain id */
225 unsigned long iommu_bmp
; /* bitmap of iommus this domain uses*/
227 struct list_head devices
; /* all devices' list */
228 struct iova_domain iovad
; /* iova's that belong to this domain */
230 struct dma_pte
*pgd
; /* virtual address */
231 spinlock_t mapping_lock
; /* page table lock */
232 int gaw
; /* max guest address width */
234 /* adjusted guest address width, 0 is level 2 30-bit */
237 int flags
; /* flags to find out type of domain */
239 int iommu_coherency
;/* indicate coherency of iommu access */
240 int iommu_snooping
; /* indicate snooping control feature*/
241 int iommu_count
; /* reference count of iommu */
242 spinlock_t iommu_lock
; /* protect iommu set in domain */
243 u64 max_addr
; /* maximum mapped address */
246 /* PCI domain-device relationship */
247 struct device_domain_info
{
248 struct list_head link
; /* link to domain siblings */
249 struct list_head global
; /* link to global list */
250 u8 bus
; /* PCI bus numer */
251 u8 devfn
; /* PCI devfn number */
252 struct pci_dev
*dev
; /* it's NULL for PCIE-to-PCI bridge */
253 struct dmar_domain
*domain
; /* pointer to domain */
256 static void flush_unmaps_timeout(unsigned long data
);
258 DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
260 #define HIGH_WATER_MARK 250
261 struct deferred_flush_tables
{
263 struct iova
*iova
[HIGH_WATER_MARK
];
264 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
267 static struct deferred_flush_tables
*deferred_flush
;
269 /* bitmap for indexing intel_iommus */
270 static int g_num_of_iommus
;
272 static DEFINE_SPINLOCK(async_umap_flush_lock
);
273 static LIST_HEAD(unmaps_to_do
);
276 static long list_size
;
278 static void domain_remove_dev_info(struct dmar_domain
*domain
);
280 #ifdef CONFIG_DMAR_DEFAULT_ON
281 int dmar_disabled
= 0;
283 int dmar_disabled
= 1;
284 #endif /*CONFIG_DMAR_DEFAULT_ON*/
286 static int __initdata dmar_map_gfx
= 1;
287 static int dmar_forcedac
;
288 static int intel_iommu_strict
;
290 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
291 static DEFINE_SPINLOCK(device_domain_lock
);
292 static LIST_HEAD(device_domain_list
);
294 static struct iommu_ops intel_iommu_ops
;
296 static int __init
intel_iommu_setup(char *str
)
301 if (!strncmp(str
, "on", 2)) {
303 printk(KERN_INFO
"Intel-IOMMU: enabled\n");
304 } else if (!strncmp(str
, "off", 3)) {
306 printk(KERN_INFO
"Intel-IOMMU: disabled\n");
307 } else if (!strncmp(str
, "igfx_off", 8)) {
310 "Intel-IOMMU: disable GFX device mapping\n");
311 } else if (!strncmp(str
, "forcedac", 8)) {
313 "Intel-IOMMU: Forcing DAC for PCI devices\n");
315 } else if (!strncmp(str
, "strict", 6)) {
317 "Intel-IOMMU: disable batched IOTLB flush\n");
318 intel_iommu_strict
= 1;
321 str
+= strcspn(str
, ",");
327 __setup("intel_iommu=", intel_iommu_setup
);
329 static struct kmem_cache
*iommu_domain_cache
;
330 static struct kmem_cache
*iommu_devinfo_cache
;
331 static struct kmem_cache
*iommu_iova_cache
;
333 static inline void *iommu_kmem_cache_alloc(struct kmem_cache
*cachep
)
338 /* trying to avoid low memory issues */
339 flags
= current
->flags
& PF_MEMALLOC
;
340 current
->flags
|= PF_MEMALLOC
;
341 vaddr
= kmem_cache_alloc(cachep
, GFP_ATOMIC
);
342 current
->flags
&= (~PF_MEMALLOC
| flags
);
347 static inline void *alloc_pgtable_page(void)
352 /* trying to avoid low memory issues */
353 flags
= current
->flags
& PF_MEMALLOC
;
354 current
->flags
|= PF_MEMALLOC
;
355 vaddr
= (void *)get_zeroed_page(GFP_ATOMIC
);
356 current
->flags
&= (~PF_MEMALLOC
| flags
);
360 static inline void free_pgtable_page(void *vaddr
)
362 free_page((unsigned long)vaddr
);
365 static inline void *alloc_domain_mem(void)
367 return iommu_kmem_cache_alloc(iommu_domain_cache
);
370 static void free_domain_mem(void *vaddr
)
372 kmem_cache_free(iommu_domain_cache
, vaddr
);
375 static inline void * alloc_devinfo_mem(void)
377 return iommu_kmem_cache_alloc(iommu_devinfo_cache
);
380 static inline void free_devinfo_mem(void *vaddr
)
382 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
385 struct iova
*alloc_iova_mem(void)
387 return iommu_kmem_cache_alloc(iommu_iova_cache
);
390 void free_iova_mem(struct iova
*iova
)
392 kmem_cache_free(iommu_iova_cache
, iova
);
396 static inline int width_to_agaw(int width
);
398 /* calculate agaw for each iommu.
399 * "SAGAW" may be different across iommus, use a default agaw, and
400 * get a supported less agaw for iommus that don't support the default agaw.
402 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
407 sagaw
= cap_sagaw(iommu
->cap
);
408 for (agaw
= width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH
);
410 if (test_bit(agaw
, &sagaw
))
417 /* in native case, each domain is related to only one iommu */
418 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
422 BUG_ON(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
);
424 iommu_id
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
425 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
428 return g_iommus
[iommu_id
];
431 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
435 domain
->iommu_coherency
= 1;
437 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
438 for (; i
< g_num_of_iommus
; ) {
439 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
440 domain
->iommu_coherency
= 0;
443 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
447 static void domain_update_iommu_snooping(struct dmar_domain
*domain
)
451 domain
->iommu_snooping
= 1;
453 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
454 for (; i
< g_num_of_iommus
; ) {
455 if (!ecap_sc_support(g_iommus
[i
]->ecap
)) {
456 domain
->iommu_snooping
= 0;
459 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
463 /* Some capabilities may be different across iommus */
464 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
466 domain_update_iommu_coherency(domain
);
467 domain_update_iommu_snooping(domain
);
470 static struct intel_iommu
*device_to_iommu(u8 bus
, u8 devfn
)
472 struct dmar_drhd_unit
*drhd
= NULL
;
475 for_each_drhd_unit(drhd
) {
479 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
480 if (drhd
->devices
[i
] &&
481 drhd
->devices
[i
]->bus
->number
== bus
&&
482 drhd
->devices
[i
]->devfn
== devfn
)
485 if (drhd
->include_all
)
492 static void domain_flush_cache(struct dmar_domain
*domain
,
493 void *addr
, int size
)
495 if (!domain
->iommu_coherency
)
496 clflush_cache_range(addr
, size
);
499 /* Gets context entry for a given bus and devfn */
500 static struct context_entry
* device_to_context_entry(struct intel_iommu
*iommu
,
503 struct root_entry
*root
;
504 struct context_entry
*context
;
505 unsigned long phy_addr
;
508 spin_lock_irqsave(&iommu
->lock
, flags
);
509 root
= &iommu
->root_entry
[bus
];
510 context
= get_context_addr_from_root(root
);
512 context
= (struct context_entry
*)alloc_pgtable_page();
514 spin_unlock_irqrestore(&iommu
->lock
, flags
);
517 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
518 phy_addr
= virt_to_phys((void *)context
);
519 set_root_value(root
, phy_addr
);
520 set_root_present(root
);
521 __iommu_flush_cache(iommu
, root
, sizeof(*root
));
523 spin_unlock_irqrestore(&iommu
->lock
, flags
);
524 return &context
[devfn
];
527 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
529 struct root_entry
*root
;
530 struct context_entry
*context
;
534 spin_lock_irqsave(&iommu
->lock
, flags
);
535 root
= &iommu
->root_entry
[bus
];
536 context
= get_context_addr_from_root(root
);
541 ret
= context_present(&context
[devfn
]);
543 spin_unlock_irqrestore(&iommu
->lock
, flags
);
547 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
549 struct root_entry
*root
;
550 struct context_entry
*context
;
553 spin_lock_irqsave(&iommu
->lock
, flags
);
554 root
= &iommu
->root_entry
[bus
];
555 context
= get_context_addr_from_root(root
);
557 context_clear_entry(&context
[devfn
]);
558 __iommu_flush_cache(iommu
, &context
[devfn
], \
561 spin_unlock_irqrestore(&iommu
->lock
, flags
);
564 static void free_context_table(struct intel_iommu
*iommu
)
566 struct root_entry
*root
;
569 struct context_entry
*context
;
571 spin_lock_irqsave(&iommu
->lock
, flags
);
572 if (!iommu
->root_entry
) {
575 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
576 root
= &iommu
->root_entry
[i
];
577 context
= get_context_addr_from_root(root
);
579 free_pgtable_page(context
);
581 free_pgtable_page(iommu
->root_entry
);
582 iommu
->root_entry
= NULL
;
584 spin_unlock_irqrestore(&iommu
->lock
, flags
);
587 /* page table handling */
588 #define LEVEL_STRIDE (9)
589 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
591 static inline int agaw_to_level(int agaw
)
596 static inline int agaw_to_width(int agaw
)
598 return 30 + agaw
* LEVEL_STRIDE
;
602 static inline int width_to_agaw(int width
)
604 return (width
- 30) / LEVEL_STRIDE
;
607 static inline unsigned int level_to_offset_bits(int level
)
609 return (12 + (level
- 1) * LEVEL_STRIDE
);
612 static inline int address_level_offset(u64 addr
, int level
)
614 return ((addr
>> level_to_offset_bits(level
)) & LEVEL_MASK
);
617 static inline u64
level_mask(int level
)
619 return ((u64
)-1 << level_to_offset_bits(level
));
622 static inline u64
level_size(int level
)
624 return ((u64
)1 << level_to_offset_bits(level
));
627 static inline u64
align_to_level(u64 addr
, int level
)
629 return ((addr
+ level_size(level
) - 1) & level_mask(level
));
632 static struct dma_pte
* addr_to_dma_pte(struct dmar_domain
*domain
, u64 addr
)
634 int addr_width
= agaw_to_width(domain
->agaw
);
635 struct dma_pte
*parent
, *pte
= NULL
;
636 int level
= agaw_to_level(domain
->agaw
);
640 BUG_ON(!domain
->pgd
);
642 addr
&= (((u64
)1) << addr_width
) - 1;
643 parent
= domain
->pgd
;
645 spin_lock_irqsave(&domain
->mapping_lock
, flags
);
649 offset
= address_level_offset(addr
, level
);
650 pte
= &parent
[offset
];
654 if (!dma_pte_present(pte
)) {
655 tmp_page
= alloc_pgtable_page();
658 spin_unlock_irqrestore(&domain
->mapping_lock
,
662 domain_flush_cache(domain
, tmp_page
, PAGE_SIZE
);
663 dma_set_pte_addr(pte
, virt_to_phys(tmp_page
));
665 * high level table always sets r/w, last level page
666 * table control read/write
668 dma_set_pte_readable(pte
);
669 dma_set_pte_writable(pte
);
670 domain_flush_cache(domain
, pte
, sizeof(*pte
));
672 parent
= phys_to_virt(dma_pte_addr(pte
));
676 spin_unlock_irqrestore(&domain
->mapping_lock
, flags
);
680 /* return address's pte at specific level */
681 static struct dma_pte
*dma_addr_level_pte(struct dmar_domain
*domain
, u64 addr
,
684 struct dma_pte
*parent
, *pte
= NULL
;
685 int total
= agaw_to_level(domain
->agaw
);
688 parent
= domain
->pgd
;
689 while (level
<= total
) {
690 offset
= address_level_offset(addr
, total
);
691 pte
= &parent
[offset
];
695 if (!dma_pte_present(pte
))
697 parent
= phys_to_virt(dma_pte_addr(pte
));
703 /* clear one page's page table */
704 static void dma_pte_clear_one(struct dmar_domain
*domain
, u64 addr
)
706 struct dma_pte
*pte
= NULL
;
708 /* get last level pte */
709 pte
= dma_addr_level_pte(domain
, addr
, 1);
713 domain_flush_cache(domain
, pte
, sizeof(*pte
));
717 /* clear last level pte, a tlb flush should be followed */
718 static void dma_pte_clear_range(struct dmar_domain
*domain
, u64 start
, u64 end
)
720 int addr_width
= agaw_to_width(domain
->agaw
);
723 start
&= (((u64
)1) << addr_width
) - 1;
724 end
&= (((u64
)1) << addr_width
) - 1;
725 /* in case it's partial page */
726 start
= PAGE_ALIGN(start
);
728 npages
= (end
- start
) / VTD_PAGE_SIZE
;
730 /* we don't need lock here, nobody else touches the iova range */
732 dma_pte_clear_one(domain
, start
);
733 start
+= VTD_PAGE_SIZE
;
737 /* free page table pages. last level pte should already be cleared */
738 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
741 int addr_width
= agaw_to_width(domain
->agaw
);
743 int total
= agaw_to_level(domain
->agaw
);
747 start
&= (((u64
)1) << addr_width
) - 1;
748 end
&= (((u64
)1) << addr_width
) - 1;
750 /* we don't need lock here, nobody else touches the iova range */
752 while (level
<= total
) {
753 tmp
= align_to_level(start
, level
);
754 if (tmp
>= end
|| (tmp
+ level_size(level
) > end
))
758 pte
= dma_addr_level_pte(domain
, tmp
, level
);
761 phys_to_virt(dma_pte_addr(pte
)));
763 domain_flush_cache(domain
, pte
, sizeof(*pte
));
765 tmp
+= level_size(level
);
770 if (start
== 0 && end
>= ((((u64
)1) << addr_width
) - 1)) {
771 free_pgtable_page(domain
->pgd
);
777 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
779 struct root_entry
*root
;
782 root
= (struct root_entry
*)alloc_pgtable_page();
786 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
788 spin_lock_irqsave(&iommu
->lock
, flags
);
789 iommu
->root_entry
= root
;
790 spin_unlock_irqrestore(&iommu
->lock
, flags
);
795 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
801 addr
= iommu
->root_entry
;
803 spin_lock_irqsave(&iommu
->register_lock
, flag
);
804 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, virt_to_phys(addr
));
806 cmd
= iommu
->gcmd
| DMA_GCMD_SRTP
;
807 writel(cmd
, iommu
->reg
+ DMAR_GCMD_REG
);
809 /* Make sure hardware complete it */
810 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
811 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
813 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
816 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
821 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
823 val
= iommu
->gcmd
| DMA_GCMD_WBF
;
825 spin_lock_irqsave(&iommu
->register_lock
, flag
);
826 writel(val
, iommu
->reg
+ DMAR_GCMD_REG
);
828 /* Make sure hardware complete it */
829 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
830 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
832 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
835 /* return value determine if we need a write buffer flush */
836 static int __iommu_flush_context(struct intel_iommu
*iommu
,
837 u16 did
, u16 source_id
, u8 function_mask
, u64 type
,
838 int non_present_entry_flush
)
844 * In the non-present entry flush case, if hardware doesn't cache
845 * non-present entry we do nothing and if hardware cache non-present
846 * entry, we flush entries of domain 0 (the domain id is used to cache
847 * any non-present entries)
849 if (non_present_entry_flush
) {
850 if (!cap_caching_mode(iommu
->cap
))
857 case DMA_CCMD_GLOBAL_INVL
:
858 val
= DMA_CCMD_GLOBAL_INVL
;
860 case DMA_CCMD_DOMAIN_INVL
:
861 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
863 case DMA_CCMD_DEVICE_INVL
:
864 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
865 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
872 spin_lock_irqsave(&iommu
->register_lock
, flag
);
873 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
875 /* Make sure hardware complete it */
876 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
877 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
879 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
881 /* flush context entry will implicitly flush write buffer */
885 /* return value determine if we need a write buffer flush */
886 static int __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
887 u64 addr
, unsigned int size_order
, u64 type
,
888 int non_present_entry_flush
)
890 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
891 u64 val
= 0, val_iva
= 0;
895 * In the non-present entry flush case, if hardware doesn't cache
896 * non-present entry we do nothing and if hardware cache non-present
897 * entry, we flush entries of domain 0 (the domain id is used to cache
898 * any non-present entries)
900 if (non_present_entry_flush
) {
901 if (!cap_caching_mode(iommu
->cap
))
908 case DMA_TLB_GLOBAL_FLUSH
:
909 /* global flush doesn't need set IVA_REG */
910 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
912 case DMA_TLB_DSI_FLUSH
:
913 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
915 case DMA_TLB_PSI_FLUSH
:
916 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
917 /* Note: always flush non-leaf currently */
918 val_iva
= size_order
| addr
;
923 /* Note: set drain read/write */
926 * This is probably to be super secure.. Looks like we can
927 * ignore it without any impact.
929 if (cap_read_drain(iommu
->cap
))
930 val
|= DMA_TLB_READ_DRAIN
;
932 if (cap_write_drain(iommu
->cap
))
933 val
|= DMA_TLB_WRITE_DRAIN
;
935 spin_lock_irqsave(&iommu
->register_lock
, flag
);
936 /* Note: Only uses first TLB reg currently */
938 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
939 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
941 /* Make sure hardware complete it */
942 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
943 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
945 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
947 /* check IOTLB invalidation granularity */
948 if (DMA_TLB_IAIG(val
) == 0)
949 printk(KERN_ERR
"IOMMU: flush IOTLB failed\n");
950 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
951 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
952 (unsigned long long)DMA_TLB_IIRG(type
),
953 (unsigned long long)DMA_TLB_IAIG(val
));
954 /* flush iotlb entry will implicitly flush write buffer */
958 static int iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
959 u64 addr
, unsigned int pages
, int non_present_entry_flush
)
963 BUG_ON(addr
& (~VTD_PAGE_MASK
));
966 /* Fallback to domain selective flush if no PSI support */
967 if (!cap_pgsel_inv(iommu
->cap
))
968 return iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
970 non_present_entry_flush
);
973 * PSI requires page size to be 2 ^ x, and the base address is naturally
974 * aligned to the size
976 mask
= ilog2(__roundup_pow_of_two(pages
));
977 /* Fallback to domain selective flush if size is too big */
978 if (mask
> cap_max_amask_val(iommu
->cap
))
979 return iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
980 DMA_TLB_DSI_FLUSH
, non_present_entry_flush
);
982 return iommu
->flush
.flush_iotlb(iommu
, did
, addr
, mask
,
984 non_present_entry_flush
);
987 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
992 spin_lock_irqsave(&iommu
->register_lock
, flags
);
993 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
994 pmen
&= ~DMA_PMEN_EPM
;
995 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
997 /* wait for the protected region status bit to clear */
998 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
999 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1001 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1004 static int iommu_enable_translation(struct intel_iommu
*iommu
)
1007 unsigned long flags
;
1009 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1010 writel(iommu
->gcmd
|DMA_GCMD_TE
, iommu
->reg
+ DMAR_GCMD_REG
);
1012 /* Make sure hardware complete it */
1013 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1014 readl
, (sts
& DMA_GSTS_TES
), sts
);
1016 iommu
->gcmd
|= DMA_GCMD_TE
;
1017 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1021 static int iommu_disable_translation(struct intel_iommu
*iommu
)
1026 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1027 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1028 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1030 /* Make sure hardware complete it */
1031 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1032 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1034 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1039 static int iommu_init_domains(struct intel_iommu
*iommu
)
1041 unsigned long ndomains
;
1042 unsigned long nlongs
;
1044 ndomains
= cap_ndoms(iommu
->cap
);
1045 pr_debug("Number of Domains supportd <%ld>\n", ndomains
);
1046 nlongs
= BITS_TO_LONGS(ndomains
);
1048 /* TBD: there might be 64K domains,
1049 * consider other allocation for future chip
1051 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1052 if (!iommu
->domain_ids
) {
1053 printk(KERN_ERR
"Allocating domain id array failed\n");
1056 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1058 if (!iommu
->domains
) {
1059 printk(KERN_ERR
"Allocating domain array failed\n");
1060 kfree(iommu
->domain_ids
);
1064 spin_lock_init(&iommu
->lock
);
1067 * if Caching mode is set, then invalid translations are tagged
1068 * with domainid 0. Hence we need to pre-allocate it.
1070 if (cap_caching_mode(iommu
->cap
))
1071 set_bit(0, iommu
->domain_ids
);
1076 static void domain_exit(struct dmar_domain
*domain
);
1077 static void vm_domain_exit(struct dmar_domain
*domain
);
1079 void free_dmar_iommu(struct intel_iommu
*iommu
)
1081 struct dmar_domain
*domain
;
1083 unsigned long flags
;
1085 i
= find_first_bit(iommu
->domain_ids
, cap_ndoms(iommu
->cap
));
1086 for (; i
< cap_ndoms(iommu
->cap
); ) {
1087 domain
= iommu
->domains
[i
];
1088 clear_bit(i
, iommu
->domain_ids
);
1090 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1091 if (--domain
->iommu_count
== 0) {
1092 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
1093 vm_domain_exit(domain
);
1095 domain_exit(domain
);
1097 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1099 i
= find_next_bit(iommu
->domain_ids
,
1100 cap_ndoms(iommu
->cap
), i
+1);
1103 if (iommu
->gcmd
& DMA_GCMD_TE
)
1104 iommu_disable_translation(iommu
);
1107 set_irq_data(iommu
->irq
, NULL
);
1108 /* This will mask the irq */
1109 free_irq(iommu
->irq
, iommu
);
1110 destroy_irq(iommu
->irq
);
1113 kfree(iommu
->domains
);
1114 kfree(iommu
->domain_ids
);
1116 g_iommus
[iommu
->seq_id
] = NULL
;
1118 /* if all iommus are freed, free g_iommus */
1119 for (i
= 0; i
< g_num_of_iommus
; i
++) {
1124 if (i
== g_num_of_iommus
)
1127 /* free context mapping */
1128 free_context_table(iommu
);
1131 static struct dmar_domain
* iommu_alloc_domain(struct intel_iommu
*iommu
)
1134 unsigned long ndomains
;
1135 struct dmar_domain
*domain
;
1136 unsigned long flags
;
1138 domain
= alloc_domain_mem();
1142 ndomains
= cap_ndoms(iommu
->cap
);
1144 spin_lock_irqsave(&iommu
->lock
, flags
);
1145 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1146 if (num
>= ndomains
) {
1147 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1148 free_domain_mem(domain
);
1149 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1153 set_bit(num
, iommu
->domain_ids
);
1155 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
1156 set_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1158 iommu
->domains
[num
] = domain
;
1159 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1164 static void iommu_free_domain(struct dmar_domain
*domain
)
1166 unsigned long flags
;
1167 struct intel_iommu
*iommu
;
1169 iommu
= domain_get_iommu(domain
);
1171 spin_lock_irqsave(&iommu
->lock
, flags
);
1172 clear_bit(domain
->id
, iommu
->domain_ids
);
1173 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1176 static struct iova_domain reserved_iova_list
;
1177 static struct lock_class_key reserved_alloc_key
;
1178 static struct lock_class_key reserved_rbtree_key
;
1180 static void dmar_init_reserved_ranges(void)
1182 struct pci_dev
*pdev
= NULL
;
1187 init_iova_domain(&reserved_iova_list
, DMA_32BIT_PFN
);
1189 lockdep_set_class(&reserved_iova_list
.iova_alloc_lock
,
1190 &reserved_alloc_key
);
1191 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1192 &reserved_rbtree_key
);
1194 /* IOAPIC ranges shouldn't be accessed by DMA */
1195 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1196 IOVA_PFN(IOAPIC_RANGE_END
));
1198 printk(KERN_ERR
"Reserve IOAPIC range failed\n");
1200 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1201 for_each_pci_dev(pdev
) {
1204 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1205 r
= &pdev
->resource
[i
];
1206 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1210 size
= r
->end
- addr
;
1211 size
= PAGE_ALIGN(size
);
1212 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(addr
),
1213 IOVA_PFN(size
+ addr
) - 1);
1215 printk(KERN_ERR
"Reserve iova failed\n");
1221 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1223 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1226 static inline int guestwidth_to_adjustwidth(int gaw
)
1229 int r
= (gaw
- 12) % 9;
1240 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1242 struct intel_iommu
*iommu
;
1243 int adjust_width
, agaw
;
1244 unsigned long sagaw
;
1246 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
1247 spin_lock_init(&domain
->mapping_lock
);
1248 spin_lock_init(&domain
->iommu_lock
);
1250 domain_reserve_special_ranges(domain
);
1252 /* calculate AGAW */
1253 iommu
= domain_get_iommu(domain
);
1254 if (guest_width
> cap_mgaw(iommu
->cap
))
1255 guest_width
= cap_mgaw(iommu
->cap
);
1256 domain
->gaw
= guest_width
;
1257 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1258 agaw
= width_to_agaw(adjust_width
);
1259 sagaw
= cap_sagaw(iommu
->cap
);
1260 if (!test_bit(agaw
, &sagaw
)) {
1261 /* hardware doesn't support it, choose a bigger one */
1262 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw
);
1263 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1267 domain
->agaw
= agaw
;
1268 INIT_LIST_HEAD(&domain
->devices
);
1270 if (ecap_coherent(iommu
->ecap
))
1271 domain
->iommu_coherency
= 1;
1273 domain
->iommu_coherency
= 0;
1275 if (ecap_sc_support(iommu
->ecap
))
1276 domain
->iommu_snooping
= 1;
1278 domain
->iommu_snooping
= 0;
1280 domain
->iommu_count
= 1;
1282 /* always allocate the top pgd */
1283 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
1286 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1290 static void domain_exit(struct dmar_domain
*domain
)
1294 /* Domain 0 is reserved, so dont process it */
1298 domain_remove_dev_info(domain
);
1300 put_iova_domain(&domain
->iovad
);
1301 end
= DOMAIN_MAX_ADDR(domain
->gaw
);
1302 end
= end
& (~PAGE_MASK
);
1305 dma_pte_clear_range(domain
, 0, end
);
1307 /* free page tables */
1308 dma_pte_free_pagetable(domain
, 0, end
);
1310 iommu_free_domain(domain
);
1311 free_domain_mem(domain
);
1314 static int domain_context_mapping_one(struct dmar_domain
*domain
,
1317 struct context_entry
*context
;
1318 unsigned long flags
;
1319 struct intel_iommu
*iommu
;
1320 struct dma_pte
*pgd
;
1322 unsigned long ndomains
;
1326 pr_debug("Set context mapping for %02x:%02x.%d\n",
1327 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1328 BUG_ON(!domain
->pgd
);
1330 iommu
= device_to_iommu(bus
, devfn
);
1334 context
= device_to_context_entry(iommu
, bus
, devfn
);
1337 spin_lock_irqsave(&iommu
->lock
, flags
);
1338 if (context_present(context
)) {
1339 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1346 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
) {
1349 /* find an available domain id for this device in iommu */
1350 ndomains
= cap_ndoms(iommu
->cap
);
1351 num
= find_first_bit(iommu
->domain_ids
, ndomains
);
1352 for (; num
< ndomains
; ) {
1353 if (iommu
->domains
[num
] == domain
) {
1358 num
= find_next_bit(iommu
->domain_ids
,
1359 cap_ndoms(iommu
->cap
), num
+1);
1363 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1364 if (num
>= ndomains
) {
1365 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1366 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1370 set_bit(num
, iommu
->domain_ids
);
1371 iommu
->domains
[num
] = domain
;
1375 /* Skip top levels of page tables for
1376 * iommu which has less agaw than default.
1378 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1379 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1380 if (!dma_pte_present(pgd
)) {
1381 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1387 context_set_domain_id(context
, id
);
1388 context_set_address_width(context
, iommu
->agaw
);
1389 context_set_address_root(context
, virt_to_phys(pgd
));
1390 context_set_translation_type(context
, CONTEXT_TT_MULTI_LEVEL
);
1391 context_set_fault_enable(context
);
1392 context_set_present(context
);
1393 domain_flush_cache(domain
, context
, sizeof(*context
));
1395 /* it's a non-present to present mapping */
1396 if (iommu
->flush
.flush_context(iommu
, domain
->id
,
1397 (((u16
)bus
) << 8) | devfn
, DMA_CCMD_MASK_NOBIT
,
1398 DMA_CCMD_DEVICE_INVL
, 1))
1399 iommu_flush_write_buffer(iommu
);
1401 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_DSI_FLUSH
, 0);
1403 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1405 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1406 if (!test_and_set_bit(iommu
->seq_id
, &domain
->iommu_bmp
)) {
1407 domain
->iommu_count
++;
1408 domain_update_iommu_cap(domain
);
1410 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1415 domain_context_mapping(struct dmar_domain
*domain
, struct pci_dev
*pdev
)
1418 struct pci_dev
*tmp
, *parent
;
1420 ret
= domain_context_mapping_one(domain
, pdev
->bus
->number
,
1425 /* dependent device mapping */
1426 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1429 /* Secondary interface's bus number and devfn 0 */
1430 parent
= pdev
->bus
->self
;
1431 while (parent
!= tmp
) {
1432 ret
= domain_context_mapping_one(domain
, parent
->bus
->number
,
1436 parent
= parent
->bus
->self
;
1438 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
1439 return domain_context_mapping_one(domain
,
1440 tmp
->subordinate
->number
, 0);
1441 else /* this is a legacy PCI bridge */
1442 return domain_context_mapping_one(domain
,
1443 tmp
->bus
->number
, tmp
->devfn
);
1446 static int domain_context_mapped(struct pci_dev
*pdev
)
1449 struct pci_dev
*tmp
, *parent
;
1450 struct intel_iommu
*iommu
;
1452 iommu
= device_to_iommu(pdev
->bus
->number
, pdev
->devfn
);
1456 ret
= device_context_mapped(iommu
,
1457 pdev
->bus
->number
, pdev
->devfn
);
1460 /* dependent device mapping */
1461 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1464 /* Secondary interface's bus number and devfn 0 */
1465 parent
= pdev
->bus
->self
;
1466 while (parent
!= tmp
) {
1467 ret
= device_context_mapped(iommu
, parent
->bus
->number
,
1471 parent
= parent
->bus
->self
;
1474 return device_context_mapped(iommu
,
1475 tmp
->subordinate
->number
, 0);
1477 return device_context_mapped(iommu
,
1478 tmp
->bus
->number
, tmp
->devfn
);
1482 domain_page_mapping(struct dmar_domain
*domain
, dma_addr_t iova
,
1483 u64 hpa
, size_t size
, int prot
)
1485 u64 start_pfn
, end_pfn
;
1486 struct dma_pte
*pte
;
1488 int addr_width
= agaw_to_width(domain
->agaw
);
1490 hpa
&= (((u64
)1) << addr_width
) - 1;
1492 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
1495 start_pfn
= ((u64
)hpa
) >> VTD_PAGE_SHIFT
;
1496 end_pfn
= (VTD_PAGE_ALIGN(((u64
)hpa
) + size
)) >> VTD_PAGE_SHIFT
;
1498 while (start_pfn
< end_pfn
) {
1499 pte
= addr_to_dma_pte(domain
, iova
+ VTD_PAGE_SIZE
* index
);
1502 /* We don't need lock here, nobody else
1503 * touches the iova range
1505 BUG_ON(dma_pte_addr(pte
));
1506 dma_set_pte_addr(pte
, start_pfn
<< VTD_PAGE_SHIFT
);
1507 dma_set_pte_prot(pte
, prot
);
1508 if (prot
& DMA_PTE_SNP
)
1509 dma_set_pte_snp(pte
);
1510 domain_flush_cache(domain
, pte
, sizeof(*pte
));
1517 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
1522 clear_context_table(iommu
, bus
, devfn
);
1523 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
1524 DMA_CCMD_GLOBAL_INVL
, 0);
1525 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
1526 DMA_TLB_GLOBAL_FLUSH
, 0);
1529 static void domain_remove_dev_info(struct dmar_domain
*domain
)
1531 struct device_domain_info
*info
;
1532 unsigned long flags
;
1533 struct intel_iommu
*iommu
;
1535 spin_lock_irqsave(&device_domain_lock
, flags
);
1536 while (!list_empty(&domain
->devices
)) {
1537 info
= list_entry(domain
->devices
.next
,
1538 struct device_domain_info
, link
);
1539 list_del(&info
->link
);
1540 list_del(&info
->global
);
1542 info
->dev
->dev
.archdata
.iommu
= NULL
;
1543 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1545 iommu
= device_to_iommu(info
->bus
, info
->devfn
);
1546 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
1547 free_devinfo_mem(info
);
1549 spin_lock_irqsave(&device_domain_lock
, flags
);
1551 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1556 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1558 static struct dmar_domain
*
1559 find_domain(struct pci_dev
*pdev
)
1561 struct device_domain_info
*info
;
1563 /* No lock here, assumes no domain exit in normal case */
1564 info
= pdev
->dev
.archdata
.iommu
;
1566 return info
->domain
;
1570 /* domain is initialized */
1571 static struct dmar_domain
*get_domain_for_dev(struct pci_dev
*pdev
, int gaw
)
1573 struct dmar_domain
*domain
, *found
= NULL
;
1574 struct intel_iommu
*iommu
;
1575 struct dmar_drhd_unit
*drhd
;
1576 struct device_domain_info
*info
, *tmp
;
1577 struct pci_dev
*dev_tmp
;
1578 unsigned long flags
;
1579 int bus
= 0, devfn
= 0;
1581 domain
= find_domain(pdev
);
1585 dev_tmp
= pci_find_upstream_pcie_bridge(pdev
);
1587 if (dev_tmp
->is_pcie
) {
1588 bus
= dev_tmp
->subordinate
->number
;
1591 bus
= dev_tmp
->bus
->number
;
1592 devfn
= dev_tmp
->devfn
;
1594 spin_lock_irqsave(&device_domain_lock
, flags
);
1595 list_for_each_entry(info
, &device_domain_list
, global
) {
1596 if (info
->bus
== bus
&& info
->devfn
== devfn
) {
1597 found
= info
->domain
;
1601 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1602 /* pcie-pci bridge already has a domain, uses it */
1609 /* Allocate new domain for the device */
1610 drhd
= dmar_find_matched_drhd_unit(pdev
);
1612 printk(KERN_ERR
"IOMMU: can't find DMAR for device %s\n",
1616 iommu
= drhd
->iommu
;
1618 domain
= iommu_alloc_domain(iommu
);
1622 if (domain_init(domain
, gaw
)) {
1623 domain_exit(domain
);
1627 /* register pcie-to-pci device */
1629 info
= alloc_devinfo_mem();
1631 domain_exit(domain
);
1635 info
->devfn
= devfn
;
1637 info
->domain
= domain
;
1638 /* This domain is shared by devices under p2p bridge */
1639 domain
->flags
|= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES
;
1641 /* pcie-to-pci bridge already has a domain, uses it */
1643 spin_lock_irqsave(&device_domain_lock
, flags
);
1644 list_for_each_entry(tmp
, &device_domain_list
, global
) {
1645 if (tmp
->bus
== bus
&& tmp
->devfn
== devfn
) {
1646 found
= tmp
->domain
;
1651 free_devinfo_mem(info
);
1652 domain_exit(domain
);
1655 list_add(&info
->link
, &domain
->devices
);
1656 list_add(&info
->global
, &device_domain_list
);
1658 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1662 info
= alloc_devinfo_mem();
1665 info
->bus
= pdev
->bus
->number
;
1666 info
->devfn
= pdev
->devfn
;
1668 info
->domain
= domain
;
1669 spin_lock_irqsave(&device_domain_lock
, flags
);
1670 /* somebody is fast */
1671 found
= find_domain(pdev
);
1672 if (found
!= NULL
) {
1673 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1674 if (found
!= domain
) {
1675 domain_exit(domain
);
1678 free_devinfo_mem(info
);
1681 list_add(&info
->link
, &domain
->devices
);
1682 list_add(&info
->global
, &device_domain_list
);
1683 pdev
->dev
.archdata
.iommu
= info
;
1684 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1687 /* recheck it here, maybe others set it */
1688 return find_domain(pdev
);
1691 static int iommu_prepare_identity_map(struct pci_dev
*pdev
,
1692 unsigned long long start
,
1693 unsigned long long end
)
1695 struct dmar_domain
*domain
;
1697 unsigned long long base
;
1701 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1702 pci_name(pdev
), start
, end
);
1703 /* page table init */
1704 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
1708 /* The address might not be aligned */
1709 base
= start
& PAGE_MASK
;
1711 size
= PAGE_ALIGN(size
);
1712 if (!reserve_iova(&domain
->iovad
, IOVA_PFN(base
),
1713 IOVA_PFN(base
+ size
) - 1)) {
1714 printk(KERN_ERR
"IOMMU: reserve iova failed\n");
1719 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1720 size
, base
, pci_name(pdev
));
1722 * RMRR range might have overlap with physical memory range,
1725 dma_pte_clear_range(domain
, base
, base
+ size
);
1727 ret
= domain_page_mapping(domain
, base
, base
, size
,
1728 DMA_PTE_READ
|DMA_PTE_WRITE
);
1732 /* context entry init */
1733 ret
= domain_context_mapping(domain
, pdev
);
1737 domain_exit(domain
);
1742 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
1743 struct pci_dev
*pdev
)
1745 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
1747 return iommu_prepare_identity_map(pdev
, rmrr
->base_address
,
1748 rmrr
->end_address
+ 1);
1751 #ifdef CONFIG_DMAR_GFX_WA
1752 struct iommu_prepare_data
{
1753 struct pci_dev
*pdev
;
1757 static int __init
iommu_prepare_work_fn(unsigned long start_pfn
,
1758 unsigned long end_pfn
, void *datax
)
1760 struct iommu_prepare_data
*data
;
1762 data
= (struct iommu_prepare_data
*)datax
;
1764 data
->ret
= iommu_prepare_identity_map(data
->pdev
,
1765 start_pfn
<<PAGE_SHIFT
, end_pfn
<<PAGE_SHIFT
);
1770 static int __init
iommu_prepare_with_active_regions(struct pci_dev
*pdev
)
1773 struct iommu_prepare_data data
;
1778 for_each_online_node(nid
) {
1779 work_with_active_regions(nid
, iommu_prepare_work_fn
, &data
);
1786 static void __init
iommu_prepare_gfx_mapping(void)
1788 struct pci_dev
*pdev
= NULL
;
1791 for_each_pci_dev(pdev
) {
1792 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
||
1793 !IS_GFX_DEVICE(pdev
))
1795 printk(KERN_INFO
"IOMMU: gfx device %s 1-1 mapping\n",
1797 ret
= iommu_prepare_with_active_regions(pdev
);
1799 printk(KERN_ERR
"IOMMU: mapping reserved region failed\n");
1802 #else /* !CONFIG_DMAR_GFX_WA */
1803 static inline void iommu_prepare_gfx_mapping(void)
1809 #ifdef CONFIG_DMAR_FLOPPY_WA
1810 static inline void iommu_prepare_isa(void)
1812 struct pci_dev
*pdev
;
1815 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
1819 printk(KERN_INFO
"IOMMU: Prepare 0-16M unity mapping for LPC\n");
1820 ret
= iommu_prepare_identity_map(pdev
, 0, 16*1024*1024);
1823 printk(KERN_ERR
"IOMMU: Failed to create 0-64M identity map, "
1824 "floppy might not work\n");
1828 static inline void iommu_prepare_isa(void)
1832 #endif /* !CONFIG_DMAR_FLPY_WA */
1834 static int __init
init_dmars(void)
1836 struct dmar_drhd_unit
*drhd
;
1837 struct dmar_rmrr_unit
*rmrr
;
1838 struct pci_dev
*pdev
;
1839 struct intel_iommu
*iommu
;
1845 * initialize and program root entry to not present
1848 for_each_drhd_unit(drhd
) {
1851 * lock not needed as this is only incremented in the single
1852 * threaded kernel __init code path all other access are read
1857 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
1860 printk(KERN_ERR
"Allocating global iommu array failed\n");
1865 deferred_flush
= kzalloc(g_num_of_iommus
*
1866 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
1867 if (!deferred_flush
) {
1873 for_each_drhd_unit(drhd
) {
1877 iommu
= drhd
->iommu
;
1878 g_iommus
[iommu
->seq_id
] = iommu
;
1880 ret
= iommu_init_domains(iommu
);
1886 * we could share the same root & context tables
1887 * amoung all IOMMU's. Need to Split it later.
1889 ret
= iommu_alloc_root_entry(iommu
);
1891 printk(KERN_ERR
"IOMMU: allocate root entry failed\n");
1897 * Start from the sane iommu hardware state.
1899 for_each_drhd_unit(drhd
) {
1903 iommu
= drhd
->iommu
;
1906 * If the queued invalidation is already initialized by us
1907 * (for example, while enabling interrupt-remapping) then
1908 * we got the things already rolling from a sane state.
1914 * Clear any previous faults.
1916 dmar_fault(-1, iommu
);
1918 * Disable queued invalidation if supported and already enabled
1919 * before OS handover.
1921 dmar_disable_qi(iommu
);
1924 for_each_drhd_unit(drhd
) {
1928 iommu
= drhd
->iommu
;
1930 if (dmar_enable_qi(iommu
)) {
1932 * Queued Invalidate not enabled, use Register Based
1935 iommu
->flush
.flush_context
= __iommu_flush_context
;
1936 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
1937 printk(KERN_INFO
"IOMMU 0x%Lx: using Register based "
1939 (unsigned long long)drhd
->reg_base_addr
);
1941 iommu
->flush
.flush_context
= qi_flush_context
;
1942 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
1943 printk(KERN_INFO
"IOMMU 0x%Lx: using Queued "
1945 (unsigned long long)drhd
->reg_base_addr
);
1951 * for each dev attached to rmrr
1953 * locate drhd for dev, alloc domain for dev
1954 * allocate free domain
1955 * allocate page table entries for rmrr
1956 * if context not allocated for bus
1957 * allocate and init context
1958 * set present in root table for this bus
1959 * init context with domain, translation etc
1963 for_each_rmrr_units(rmrr
) {
1964 for (i
= 0; i
< rmrr
->devices_cnt
; i
++) {
1965 pdev
= rmrr
->devices
[i
];
1966 /* some BIOS lists non-exist devices in DMAR table */
1969 ret
= iommu_prepare_rmrr_dev(rmrr
, pdev
);
1972 "IOMMU: mapping reserved region failed\n");
1976 iommu_prepare_gfx_mapping();
1978 iommu_prepare_isa();
1983 * global invalidate context cache
1984 * global invalidate iotlb
1985 * enable translation
1987 for_each_drhd_unit(drhd
) {
1990 iommu
= drhd
->iommu
;
1992 iommu_flush_write_buffer(iommu
);
1994 ret
= dmar_set_interrupt(iommu
);
1998 iommu_set_root_entry(iommu
);
2000 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
,
2002 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
,
2004 iommu_disable_protect_mem_regions(iommu
);
2006 ret
= iommu_enable_translation(iommu
);
2013 for_each_drhd_unit(drhd
) {
2016 iommu
= drhd
->iommu
;
2023 static inline u64
aligned_size(u64 host_addr
, size_t size
)
2026 addr
= (host_addr
& (~PAGE_MASK
)) + size
;
2027 return PAGE_ALIGN(addr
);
2031 iommu_alloc_iova(struct dmar_domain
*domain
, size_t size
, u64 end
)
2035 /* Make sure it's in range */
2036 end
= min_t(u64
, DOMAIN_MAX_ADDR(domain
->gaw
), end
);
2037 if (!size
|| (IOVA_START_ADDR
+ size
> end
))
2040 piova
= alloc_iova(&domain
->iovad
,
2041 size
>> PAGE_SHIFT
, IOVA_PFN(end
), 1);
2045 static struct iova
*
2046 __intel_alloc_iova(struct device
*dev
, struct dmar_domain
*domain
,
2047 size_t size
, u64 dma_mask
)
2049 struct pci_dev
*pdev
= to_pci_dev(dev
);
2050 struct iova
*iova
= NULL
;
2052 if (dma_mask
<= DMA_32BIT_MASK
|| dmar_forcedac
)
2053 iova
= iommu_alloc_iova(domain
, size
, dma_mask
);
2056 * First try to allocate an io virtual address in
2057 * DMA_32BIT_MASK and if that fails then try allocating
2060 iova
= iommu_alloc_iova(domain
, size
, DMA_32BIT_MASK
);
2062 iova
= iommu_alloc_iova(domain
, size
, dma_mask
);
2066 printk(KERN_ERR
"Allocating iova for %s failed", pci_name(pdev
));
2073 static struct dmar_domain
*
2074 get_valid_domain_for_dev(struct pci_dev
*pdev
)
2076 struct dmar_domain
*domain
;
2079 domain
= get_domain_for_dev(pdev
,
2080 DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2083 "Allocating domain for %s failed", pci_name(pdev
));
2087 /* make sure context mapping is ok */
2088 if (unlikely(!domain_context_mapped(pdev
))) {
2089 ret
= domain_context_mapping(domain
, pdev
);
2092 "Domain context map for %s failed",
2101 static dma_addr_t
__intel_map_single(struct device
*hwdev
, phys_addr_t paddr
,
2102 size_t size
, int dir
, u64 dma_mask
)
2104 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2105 struct dmar_domain
*domain
;
2106 phys_addr_t start_paddr
;
2110 struct intel_iommu
*iommu
;
2112 BUG_ON(dir
== DMA_NONE
);
2113 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2116 domain
= get_valid_domain_for_dev(pdev
);
2120 iommu
= domain_get_iommu(domain
);
2121 size
= aligned_size((u64
)paddr
, size
);
2123 iova
= __intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2127 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
2130 * Check if DMAR supports zero-length reads on write only
2133 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2134 !cap_zlr(iommu
->cap
))
2135 prot
|= DMA_PTE_READ
;
2136 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2137 prot
|= DMA_PTE_WRITE
;
2139 * paddr - (paddr + size) might be partial page, we should map the whole
2140 * page. Note: if two part of one page are separately mapped, we
2141 * might have two guest_addr mapping to the same host paddr, but this
2142 * is not a big problem
2144 ret
= domain_page_mapping(domain
, start_paddr
,
2145 ((u64
)paddr
) & PAGE_MASK
, size
, prot
);
2149 /* it's a non-present to present mapping */
2150 ret
= iommu_flush_iotlb_psi(iommu
, domain
->id
,
2151 start_paddr
, size
>> VTD_PAGE_SHIFT
, 1);
2153 iommu_flush_write_buffer(iommu
);
2155 return start_paddr
+ ((u64
)paddr
& (~PAGE_MASK
));
2159 __free_iova(&domain
->iovad
, iova
);
2160 printk(KERN_ERR
"Device %s request: %zx@%llx dir %d --- failed\n",
2161 pci_name(pdev
), size
, (unsigned long long)paddr
, dir
);
2165 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
2166 unsigned long offset
, size_t size
,
2167 enum dma_data_direction dir
,
2168 struct dma_attrs
*attrs
)
2170 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
2171 dir
, to_pci_dev(dev
)->dma_mask
);
2174 static void flush_unmaps(void)
2180 /* just flush them all */
2181 for (i
= 0; i
< g_num_of_iommus
; i
++) {
2182 struct intel_iommu
*iommu
= g_iommus
[i
];
2186 if (deferred_flush
[i
].next
) {
2187 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2188 DMA_TLB_GLOBAL_FLUSH
, 0);
2189 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
2190 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
,
2191 deferred_flush
[i
].iova
[j
]);
2193 deferred_flush
[i
].next
= 0;
2200 static void flush_unmaps_timeout(unsigned long data
)
2202 unsigned long flags
;
2204 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2206 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2209 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
)
2211 unsigned long flags
;
2213 struct intel_iommu
*iommu
;
2215 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2216 if (list_size
== HIGH_WATER_MARK
)
2219 iommu
= domain_get_iommu(dom
);
2220 iommu_id
= iommu
->seq_id
;
2222 next
= deferred_flush
[iommu_id
].next
;
2223 deferred_flush
[iommu_id
].domain
[next
] = dom
;
2224 deferred_flush
[iommu_id
].iova
[next
] = iova
;
2225 deferred_flush
[iommu_id
].next
++;
2228 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
2232 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2235 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
2236 size_t size
, enum dma_data_direction dir
,
2237 struct dma_attrs
*attrs
)
2239 struct pci_dev
*pdev
= to_pci_dev(dev
);
2240 struct dmar_domain
*domain
;
2241 unsigned long start_addr
;
2243 struct intel_iommu
*iommu
;
2245 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2247 domain
= find_domain(pdev
);
2250 iommu
= domain_get_iommu(domain
);
2252 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
2256 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2257 size
= aligned_size((u64
)dev_addr
, size
);
2259 pr_debug("Device %s unmapping: %zx@%llx\n",
2260 pci_name(pdev
), size
, (unsigned long long)start_addr
);
2262 /* clear the whole page */
2263 dma_pte_clear_range(domain
, start_addr
, start_addr
+ size
);
2264 /* free page tables */
2265 dma_pte_free_pagetable(domain
, start_addr
, start_addr
+ size
);
2266 if (intel_iommu_strict
) {
2267 if (iommu_flush_iotlb_psi(iommu
,
2268 domain
->id
, start_addr
, size
>> VTD_PAGE_SHIFT
, 0))
2269 iommu_flush_write_buffer(iommu
);
2271 __free_iova(&domain
->iovad
, iova
);
2273 add_unmap(domain
, iova
);
2275 * queue up the release of the unmap to save the 1/6th of the
2276 * cpu used up by the iotlb flush operation...
2281 static void intel_unmap_single(struct device
*dev
, dma_addr_t dev_addr
, size_t size
,
2284 intel_unmap_page(dev
, dev_addr
, size
, dir
, NULL
);
2287 static void *intel_alloc_coherent(struct device
*hwdev
, size_t size
,
2288 dma_addr_t
*dma_handle
, gfp_t flags
)
2293 size
= PAGE_ALIGN(size
);
2294 order
= get_order(size
);
2295 flags
&= ~(GFP_DMA
| GFP_DMA32
);
2297 vaddr
= (void *)__get_free_pages(flags
, order
);
2300 memset(vaddr
, 0, size
);
2302 *dma_handle
= __intel_map_single(hwdev
, virt_to_bus(vaddr
), size
,
2304 hwdev
->coherent_dma_mask
);
2307 free_pages((unsigned long)vaddr
, order
);
2311 static void intel_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
2312 dma_addr_t dma_handle
)
2316 size
= PAGE_ALIGN(size
);
2317 order
= get_order(size
);
2319 intel_unmap_single(hwdev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
2320 free_pages((unsigned long)vaddr
, order
);
2323 static void intel_unmap_sg(struct device
*hwdev
, struct scatterlist
*sglist
,
2324 int nelems
, enum dma_data_direction dir
,
2325 struct dma_attrs
*attrs
)
2328 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2329 struct dmar_domain
*domain
;
2330 unsigned long start_addr
;
2334 struct scatterlist
*sg
;
2335 struct intel_iommu
*iommu
;
2337 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2340 domain
= find_domain(pdev
);
2343 iommu
= domain_get_iommu(domain
);
2345 iova
= find_iova(&domain
->iovad
, IOVA_PFN(sglist
[0].dma_address
));
2348 for_each_sg(sglist
, sg
, nelems
, i
) {
2349 addr
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2350 size
+= aligned_size((u64
)addr
, sg
->length
);
2353 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2355 /* clear the whole page */
2356 dma_pte_clear_range(domain
, start_addr
, start_addr
+ size
);
2357 /* free page tables */
2358 dma_pte_free_pagetable(domain
, start_addr
, start_addr
+ size
);
2360 if (iommu_flush_iotlb_psi(iommu
, domain
->id
, start_addr
,
2361 size
>> VTD_PAGE_SHIFT
, 0))
2362 iommu_flush_write_buffer(iommu
);
2365 __free_iova(&domain
->iovad
, iova
);
2368 static int intel_nontranslate_map_sg(struct device
*hddev
,
2369 struct scatterlist
*sglist
, int nelems
, int dir
)
2372 struct scatterlist
*sg
;
2374 for_each_sg(sglist
, sg
, nelems
, i
) {
2375 BUG_ON(!sg_page(sg
));
2376 sg
->dma_address
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2377 sg
->dma_length
= sg
->length
;
2382 static int intel_map_sg(struct device
*hwdev
, struct scatterlist
*sglist
, int nelems
,
2383 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
2387 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2388 struct dmar_domain
*domain
;
2392 struct iova
*iova
= NULL
;
2394 struct scatterlist
*sg
;
2395 unsigned long start_addr
;
2396 struct intel_iommu
*iommu
;
2398 BUG_ON(dir
== DMA_NONE
);
2399 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2400 return intel_nontranslate_map_sg(hwdev
, sglist
, nelems
, dir
);
2402 domain
= get_valid_domain_for_dev(pdev
);
2406 iommu
= domain_get_iommu(domain
);
2408 for_each_sg(sglist
, sg
, nelems
, i
) {
2409 addr
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2410 size
+= aligned_size((u64
)addr
, sg
->length
);
2413 iova
= __intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2415 sglist
->dma_length
= 0;
2420 * Check if DMAR supports zero-length reads on write only
2423 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2424 !cap_zlr(iommu
->cap
))
2425 prot
|= DMA_PTE_READ
;
2426 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2427 prot
|= DMA_PTE_WRITE
;
2429 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2431 for_each_sg(sglist
, sg
, nelems
, i
) {
2432 addr
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2433 size
= aligned_size((u64
)addr
, sg
->length
);
2434 ret
= domain_page_mapping(domain
, start_addr
+ offset
,
2435 ((u64
)addr
) & PAGE_MASK
,
2438 /* clear the page */
2439 dma_pte_clear_range(domain
, start_addr
,
2440 start_addr
+ offset
);
2441 /* free page tables */
2442 dma_pte_free_pagetable(domain
, start_addr
,
2443 start_addr
+ offset
);
2445 __free_iova(&domain
->iovad
, iova
);
2448 sg
->dma_address
= start_addr
+ offset
+
2449 ((u64
)addr
& (~PAGE_MASK
));
2450 sg
->dma_length
= sg
->length
;
2454 /* it's a non-present to present mapping */
2455 if (iommu_flush_iotlb_psi(iommu
, domain
->id
,
2456 start_addr
, offset
>> VTD_PAGE_SHIFT
, 1))
2457 iommu_flush_write_buffer(iommu
);
2461 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
2466 struct dma_map_ops intel_dma_ops
= {
2467 .alloc_coherent
= intel_alloc_coherent
,
2468 .free_coherent
= intel_free_coherent
,
2469 .map_sg
= intel_map_sg
,
2470 .unmap_sg
= intel_unmap_sg
,
2471 .map_page
= intel_map_page
,
2472 .unmap_page
= intel_unmap_page
,
2473 .mapping_error
= intel_mapping_error
,
2476 static inline int iommu_domain_cache_init(void)
2480 iommu_domain_cache
= kmem_cache_create("iommu_domain",
2481 sizeof(struct dmar_domain
),
2486 if (!iommu_domain_cache
) {
2487 printk(KERN_ERR
"Couldn't create iommu_domain cache\n");
2494 static inline int iommu_devinfo_cache_init(void)
2498 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
2499 sizeof(struct device_domain_info
),
2503 if (!iommu_devinfo_cache
) {
2504 printk(KERN_ERR
"Couldn't create devinfo cache\n");
2511 static inline int iommu_iova_cache_init(void)
2515 iommu_iova_cache
= kmem_cache_create("iommu_iova",
2516 sizeof(struct iova
),
2520 if (!iommu_iova_cache
) {
2521 printk(KERN_ERR
"Couldn't create iova cache\n");
2528 static int __init
iommu_init_mempool(void)
2531 ret
= iommu_iova_cache_init();
2535 ret
= iommu_domain_cache_init();
2539 ret
= iommu_devinfo_cache_init();
2543 kmem_cache_destroy(iommu_domain_cache
);
2545 kmem_cache_destroy(iommu_iova_cache
);
2550 static void __init
iommu_exit_mempool(void)
2552 kmem_cache_destroy(iommu_devinfo_cache
);
2553 kmem_cache_destroy(iommu_domain_cache
);
2554 kmem_cache_destroy(iommu_iova_cache
);
2558 static void __init
init_no_remapping_devices(void)
2560 struct dmar_drhd_unit
*drhd
;
2562 for_each_drhd_unit(drhd
) {
2563 if (!drhd
->include_all
) {
2565 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2566 if (drhd
->devices
[i
] != NULL
)
2568 /* ignore DMAR unit if no pci devices exist */
2569 if (i
== drhd
->devices_cnt
)
2577 for_each_drhd_unit(drhd
) {
2579 if (drhd
->ignored
|| drhd
->include_all
)
2582 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2583 if (drhd
->devices
[i
] &&
2584 !IS_GFX_DEVICE(drhd
->devices
[i
]))
2587 if (i
< drhd
->devices_cnt
)
2590 /* bypass IOMMU if it is just for gfx devices */
2592 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
2593 if (!drhd
->devices
[i
])
2595 drhd
->devices
[i
]->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
2600 int __init
intel_iommu_init(void)
2604 if (dmar_table_init())
2607 if (dmar_dev_scope_init())
2611 * Check the need for DMA-remapping initialization now.
2612 * Above initialization will also be used by Interrupt-remapping.
2614 if (no_iommu
|| swiotlb
|| dmar_disabled
)
2617 iommu_init_mempool();
2618 dmar_init_reserved_ranges();
2620 init_no_remapping_devices();
2624 printk(KERN_ERR
"IOMMU: dmar init failed\n");
2625 put_iova_domain(&reserved_iova_list
);
2626 iommu_exit_mempool();
2630 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
2632 init_timer(&unmap_timer
);
2634 dma_ops
= &intel_dma_ops
;
2636 register_iommu(&intel_iommu_ops
);
2641 static int vm_domain_add_dev_info(struct dmar_domain
*domain
,
2642 struct pci_dev
*pdev
)
2644 struct device_domain_info
*info
;
2645 unsigned long flags
;
2647 info
= alloc_devinfo_mem();
2651 info
->bus
= pdev
->bus
->number
;
2652 info
->devfn
= pdev
->devfn
;
2654 info
->domain
= domain
;
2656 spin_lock_irqsave(&device_domain_lock
, flags
);
2657 list_add(&info
->link
, &domain
->devices
);
2658 list_add(&info
->global
, &device_domain_list
);
2659 pdev
->dev
.archdata
.iommu
= info
;
2660 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2665 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
2666 struct pci_dev
*pdev
)
2668 struct pci_dev
*tmp
, *parent
;
2670 if (!iommu
|| !pdev
)
2673 /* dependent device detach */
2674 tmp
= pci_find_upstream_pcie_bridge(pdev
);
2675 /* Secondary interface's bus number and devfn 0 */
2677 parent
= pdev
->bus
->self
;
2678 while (parent
!= tmp
) {
2679 iommu_detach_dev(iommu
, parent
->bus
->number
,
2681 parent
= parent
->bus
->self
;
2683 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
2684 iommu_detach_dev(iommu
,
2685 tmp
->subordinate
->number
, 0);
2686 else /* this is a legacy PCI bridge */
2687 iommu_detach_dev(iommu
,
2688 tmp
->bus
->number
, tmp
->devfn
);
2692 static void vm_domain_remove_one_dev_info(struct dmar_domain
*domain
,
2693 struct pci_dev
*pdev
)
2695 struct device_domain_info
*info
;
2696 struct intel_iommu
*iommu
;
2697 unsigned long flags
;
2699 struct list_head
*entry
, *tmp
;
2701 iommu
= device_to_iommu(pdev
->bus
->number
, pdev
->devfn
);
2705 spin_lock_irqsave(&device_domain_lock
, flags
);
2706 list_for_each_safe(entry
, tmp
, &domain
->devices
) {
2707 info
= list_entry(entry
, struct device_domain_info
, link
);
2708 if (info
->bus
== pdev
->bus
->number
&&
2709 info
->devfn
== pdev
->devfn
) {
2710 list_del(&info
->link
);
2711 list_del(&info
->global
);
2713 info
->dev
->dev
.archdata
.iommu
= NULL
;
2714 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2716 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
2717 iommu_detach_dependent_devices(iommu
, pdev
);
2718 free_devinfo_mem(info
);
2720 spin_lock_irqsave(&device_domain_lock
, flags
);
2728 /* if there is no other devices under the same iommu
2729 * owned by this domain, clear this iommu in iommu_bmp
2730 * update iommu count and coherency
2732 if (device_to_iommu(info
->bus
, info
->devfn
) == iommu
)
2737 unsigned long tmp_flags
;
2738 spin_lock_irqsave(&domain
->iommu_lock
, tmp_flags
);
2739 clear_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
2740 domain
->iommu_count
--;
2741 domain_update_iommu_cap(domain
);
2742 spin_unlock_irqrestore(&domain
->iommu_lock
, tmp_flags
);
2745 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2748 static void vm_domain_remove_all_dev_info(struct dmar_domain
*domain
)
2750 struct device_domain_info
*info
;
2751 struct intel_iommu
*iommu
;
2752 unsigned long flags1
, flags2
;
2754 spin_lock_irqsave(&device_domain_lock
, flags1
);
2755 while (!list_empty(&domain
->devices
)) {
2756 info
= list_entry(domain
->devices
.next
,
2757 struct device_domain_info
, link
);
2758 list_del(&info
->link
);
2759 list_del(&info
->global
);
2761 info
->dev
->dev
.archdata
.iommu
= NULL
;
2763 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
2765 iommu
= device_to_iommu(info
->bus
, info
->devfn
);
2766 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
2767 iommu_detach_dependent_devices(iommu
, info
->dev
);
2769 /* clear this iommu in iommu_bmp, update iommu count
2772 spin_lock_irqsave(&domain
->iommu_lock
, flags2
);
2773 if (test_and_clear_bit(iommu
->seq_id
,
2774 &domain
->iommu_bmp
)) {
2775 domain
->iommu_count
--;
2776 domain_update_iommu_cap(domain
);
2778 spin_unlock_irqrestore(&domain
->iommu_lock
, flags2
);
2780 free_devinfo_mem(info
);
2781 spin_lock_irqsave(&device_domain_lock
, flags1
);
2783 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
2786 /* domain id for virtual machine, it won't be set in context */
2787 static unsigned long vm_domid
;
2789 static int vm_domain_min_agaw(struct dmar_domain
*domain
)
2792 int min_agaw
= domain
->agaw
;
2794 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
2795 for (; i
< g_num_of_iommus
; ) {
2796 if (min_agaw
> g_iommus
[i
]->agaw
)
2797 min_agaw
= g_iommus
[i
]->agaw
;
2799 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
2805 static struct dmar_domain
*iommu_alloc_vm_domain(void)
2807 struct dmar_domain
*domain
;
2809 domain
= alloc_domain_mem();
2813 domain
->id
= vm_domid
++;
2814 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
2815 domain
->flags
= DOMAIN_FLAG_VIRTUAL_MACHINE
;
2820 static int vm_domain_init(struct dmar_domain
*domain
, int guest_width
)
2824 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
2825 spin_lock_init(&domain
->mapping_lock
);
2826 spin_lock_init(&domain
->iommu_lock
);
2828 domain_reserve_special_ranges(domain
);
2830 /* calculate AGAW */
2831 domain
->gaw
= guest_width
;
2832 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
2833 domain
->agaw
= width_to_agaw(adjust_width
);
2835 INIT_LIST_HEAD(&domain
->devices
);
2837 domain
->iommu_count
= 0;
2838 domain
->iommu_coherency
= 0;
2839 domain
->max_addr
= 0;
2841 /* always allocate the top pgd */
2842 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
2845 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
2849 static void iommu_free_vm_domain(struct dmar_domain
*domain
)
2851 unsigned long flags
;
2852 struct dmar_drhd_unit
*drhd
;
2853 struct intel_iommu
*iommu
;
2855 unsigned long ndomains
;
2857 for_each_drhd_unit(drhd
) {
2860 iommu
= drhd
->iommu
;
2862 ndomains
= cap_ndoms(iommu
->cap
);
2863 i
= find_first_bit(iommu
->domain_ids
, ndomains
);
2864 for (; i
< ndomains
; ) {
2865 if (iommu
->domains
[i
] == domain
) {
2866 spin_lock_irqsave(&iommu
->lock
, flags
);
2867 clear_bit(i
, iommu
->domain_ids
);
2868 iommu
->domains
[i
] = NULL
;
2869 spin_unlock_irqrestore(&iommu
->lock
, flags
);
2872 i
= find_next_bit(iommu
->domain_ids
, ndomains
, i
+1);
2877 static void vm_domain_exit(struct dmar_domain
*domain
)
2881 /* Domain 0 is reserved, so dont process it */
2885 vm_domain_remove_all_dev_info(domain
);
2887 put_iova_domain(&domain
->iovad
);
2888 end
= DOMAIN_MAX_ADDR(domain
->gaw
);
2889 end
= end
& (~VTD_PAGE_MASK
);
2892 dma_pte_clear_range(domain
, 0, end
);
2894 /* free page tables */
2895 dma_pte_free_pagetable(domain
, 0, end
);
2897 iommu_free_vm_domain(domain
);
2898 free_domain_mem(domain
);
2901 static int intel_iommu_domain_init(struct iommu_domain
*domain
)
2903 struct dmar_domain
*dmar_domain
;
2905 dmar_domain
= iommu_alloc_vm_domain();
2908 "intel_iommu_domain_init: dmar_domain == NULL\n");
2911 if (vm_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2913 "intel_iommu_domain_init() failed\n");
2914 vm_domain_exit(dmar_domain
);
2917 domain
->priv
= dmar_domain
;
2922 static void intel_iommu_domain_destroy(struct iommu_domain
*domain
)
2924 struct dmar_domain
*dmar_domain
= domain
->priv
;
2926 domain
->priv
= NULL
;
2927 vm_domain_exit(dmar_domain
);
2930 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
2933 struct dmar_domain
*dmar_domain
= domain
->priv
;
2934 struct pci_dev
*pdev
= to_pci_dev(dev
);
2935 struct intel_iommu
*iommu
;
2940 /* normally pdev is not mapped */
2941 if (unlikely(domain_context_mapped(pdev
))) {
2942 struct dmar_domain
*old_domain
;
2944 old_domain
= find_domain(pdev
);
2946 if (dmar_domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
2947 vm_domain_remove_one_dev_info(old_domain
, pdev
);
2949 domain_remove_dev_info(old_domain
);
2953 iommu
= device_to_iommu(pdev
->bus
->number
, pdev
->devfn
);
2957 /* check if this iommu agaw is sufficient for max mapped address */
2958 addr_width
= agaw_to_width(iommu
->agaw
);
2959 end
= DOMAIN_MAX_ADDR(addr_width
);
2960 end
= end
& VTD_PAGE_MASK
;
2961 if (end
< dmar_domain
->max_addr
) {
2962 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
2963 "sufficient for the mapped address (%llx)\n",
2964 __func__
, iommu
->agaw
, dmar_domain
->max_addr
);
2968 ret
= domain_context_mapping(dmar_domain
, pdev
);
2972 ret
= vm_domain_add_dev_info(dmar_domain
, pdev
);
2976 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
2979 struct dmar_domain
*dmar_domain
= domain
->priv
;
2980 struct pci_dev
*pdev
= to_pci_dev(dev
);
2982 vm_domain_remove_one_dev_info(dmar_domain
, pdev
);
2985 static int intel_iommu_map_range(struct iommu_domain
*domain
,
2986 unsigned long iova
, phys_addr_t hpa
,
2987 size_t size
, int iommu_prot
)
2989 struct dmar_domain
*dmar_domain
= domain
->priv
;
2995 if (iommu_prot
& IOMMU_READ
)
2996 prot
|= DMA_PTE_READ
;
2997 if (iommu_prot
& IOMMU_WRITE
)
2998 prot
|= DMA_PTE_WRITE
;
2999 if ((iommu_prot
& IOMMU_CACHE
) && dmar_domain
->iommu_snooping
)
3000 prot
|= DMA_PTE_SNP
;
3002 max_addr
= (iova
& VTD_PAGE_MASK
) + VTD_PAGE_ALIGN(size
);
3003 if (dmar_domain
->max_addr
< max_addr
) {
3007 /* check if minimum agaw is sufficient for mapped address */
3008 min_agaw
= vm_domain_min_agaw(dmar_domain
);
3009 addr_width
= agaw_to_width(min_agaw
);
3010 end
= DOMAIN_MAX_ADDR(addr_width
);
3011 end
= end
& VTD_PAGE_MASK
;
3012 if (end
< max_addr
) {
3013 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
3014 "sufficient for the mapped address (%llx)\n",
3015 __func__
, min_agaw
, max_addr
);
3018 dmar_domain
->max_addr
= max_addr
;
3021 ret
= domain_page_mapping(dmar_domain
, iova
, hpa
, size
, prot
);
3025 static void intel_iommu_unmap_range(struct iommu_domain
*domain
,
3026 unsigned long iova
, size_t size
)
3028 struct dmar_domain
*dmar_domain
= domain
->priv
;
3031 /* The address might not be aligned */
3032 base
= iova
& VTD_PAGE_MASK
;
3033 size
= VTD_PAGE_ALIGN(size
);
3034 dma_pte_clear_range(dmar_domain
, base
, base
+ size
);
3036 if (dmar_domain
->max_addr
== base
+ size
)
3037 dmar_domain
->max_addr
= base
;
3040 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
3043 struct dmar_domain
*dmar_domain
= domain
->priv
;
3044 struct dma_pte
*pte
;
3047 pte
= addr_to_dma_pte(dmar_domain
, iova
);
3049 phys
= dma_pte_addr(pte
);
3054 static int intel_iommu_domain_has_cap(struct iommu_domain
*domain
,
3057 struct dmar_domain
*dmar_domain
= domain
->priv
;
3059 if (cap
== IOMMU_CAP_CACHE_COHERENCY
)
3060 return dmar_domain
->iommu_snooping
;
3065 static struct iommu_ops intel_iommu_ops
= {
3066 .domain_init
= intel_iommu_domain_init
,
3067 .domain_destroy
= intel_iommu_domain_destroy
,
3068 .attach_dev
= intel_iommu_attach_device
,
3069 .detach_dev
= intel_iommu_detach_device
,
3070 .map
= intel_iommu_map_range
,
3071 .unmap
= intel_iommu_unmap_range
,
3072 .iova_to_phys
= intel_iommu_iova_to_phys
,
3073 .domain_has_cap
= intel_iommu_domain_has_cap
,
3076 static void __devinit
quirk_iommu_rwbf(struct pci_dev
*dev
)
3079 * Mobile 4 Series Chipset neglects to set RWBF capability,
3082 printk(KERN_INFO
"DMAR: Forcing write-buffer flush capability\n");
3086 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);