2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <asm/cacheflush.h>
41 #include <asm/iommu.h>
44 #define ROOT_SIZE VTD_PAGE_SIZE
45 #define CONTEXT_SIZE VTD_PAGE_SIZE
47 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
50 #define IOAPIC_RANGE_START (0xfee00000)
51 #define IOAPIC_RANGE_END (0xfeefffff)
52 #define IOVA_START_ADDR (0x1000)
54 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
56 #define MAX_AGAW_WIDTH 64
58 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
59 #define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
61 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
62 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
63 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
65 #ifndef PHYSICAL_PAGE_MASK
66 #define PHYSICAL_PAGE_MASK PAGE_MASK
69 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
70 are never going to work. */
71 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn
)
73 return dma_pfn
>> (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
76 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn
)
78 return mm_pfn
<< (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
80 static inline unsigned long page_to_dma_pfn(struct page
*pg
)
82 return mm_to_dma_pfn(page_to_pfn(pg
));
84 static inline unsigned long virt_to_dma_pfn(void *p
)
86 return page_to_dma_pfn(virt_to_page(p
));
89 /* global iommu list, set NULL for ignored DMAR units */
90 static struct intel_iommu
**g_iommus
;
92 static int rwbf_quirk
;
97 * 12-63: Context Ptr (12 - (haw-1))
104 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
105 static inline bool root_present(struct root_entry
*root
)
107 return (root
->val
& 1);
109 static inline void set_root_present(struct root_entry
*root
)
113 static inline void set_root_value(struct root_entry
*root
, unsigned long value
)
115 root
->val
|= value
& VTD_PAGE_MASK
;
118 static inline struct context_entry
*
119 get_context_addr_from_root(struct root_entry
*root
)
121 return (struct context_entry
*)
122 (root_present(root
)?phys_to_virt(
123 root
->val
& VTD_PAGE_MASK
) :
130 * 1: fault processing disable
131 * 2-3: translation type
132 * 12-63: address space root
138 struct context_entry
{
143 static inline bool context_present(struct context_entry
*context
)
145 return (context
->lo
& 1);
147 static inline void context_set_present(struct context_entry
*context
)
152 static inline void context_set_fault_enable(struct context_entry
*context
)
154 context
->lo
&= (((u64
)-1) << 2) | 1;
157 static inline void context_set_translation_type(struct context_entry
*context
,
160 context
->lo
&= (((u64
)-1) << 4) | 3;
161 context
->lo
|= (value
& 3) << 2;
164 static inline void context_set_address_root(struct context_entry
*context
,
167 context
->lo
|= value
& VTD_PAGE_MASK
;
170 static inline void context_set_address_width(struct context_entry
*context
,
173 context
->hi
|= value
& 7;
176 static inline void context_set_domain_id(struct context_entry
*context
,
179 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
182 static inline void context_clear_entry(struct context_entry
*context
)
195 * 12-63: Host physcial address
201 static inline void dma_clear_pte(struct dma_pte
*pte
)
206 static inline void dma_set_pte_readable(struct dma_pte
*pte
)
208 pte
->val
|= DMA_PTE_READ
;
211 static inline void dma_set_pte_writable(struct dma_pte
*pte
)
213 pte
->val
|= DMA_PTE_WRITE
;
216 static inline void dma_set_pte_snp(struct dma_pte
*pte
)
218 pte
->val
|= DMA_PTE_SNP
;
221 static inline void dma_set_pte_prot(struct dma_pte
*pte
, unsigned long prot
)
223 pte
->val
= (pte
->val
& ~3) | (prot
& 3);
226 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
228 return (pte
->val
& VTD_PAGE_MASK
);
231 static inline void dma_set_pte_pfn(struct dma_pte
*pte
, unsigned long pfn
)
233 pte
->val
|= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
236 static inline bool dma_pte_present(struct dma_pte
*pte
)
238 return (pte
->val
& 3) != 0;
242 * This domain is a statically identity mapping domain.
243 * 1. This domain creats a static 1:1 mapping to all usable memory.
244 * 2. It maps to each iommu if successful.
245 * 3. Each iommu mapps to this domain if successful.
247 struct dmar_domain
*si_domain
;
249 /* devices under the same p2p bridge are owned in one domain */
250 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
252 /* domain represents a virtual machine, more than one devices
253 * across iommus may be owned in one domain, e.g. kvm guest.
255 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
257 /* si_domain contains mulitple devices */
258 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
261 int id
; /* domain id */
262 unsigned long iommu_bmp
; /* bitmap of iommus this domain uses*/
264 struct list_head devices
; /* all devices' list */
265 struct iova_domain iovad
; /* iova's that belong to this domain */
267 struct dma_pte
*pgd
; /* virtual address */
268 spinlock_t mapping_lock
; /* page table lock */
269 int gaw
; /* max guest address width */
271 /* adjusted guest address width, 0 is level 2 30-bit */
274 int flags
; /* flags to find out type of domain */
276 int iommu_coherency
;/* indicate coherency of iommu access */
277 int iommu_snooping
; /* indicate snooping control feature*/
278 int iommu_count
; /* reference count of iommu */
279 spinlock_t iommu_lock
; /* protect iommu set in domain */
280 u64 max_addr
; /* maximum mapped address */
283 /* PCI domain-device relationship */
284 struct device_domain_info
{
285 struct list_head link
; /* link to domain siblings */
286 struct list_head global
; /* link to global list */
287 int segment
; /* PCI domain */
288 u8 bus
; /* PCI bus number */
289 u8 devfn
; /* PCI devfn number */
290 struct pci_dev
*dev
; /* it's NULL for PCIE-to-PCI bridge */
291 struct intel_iommu
*iommu
; /* IOMMU used by this device */
292 struct dmar_domain
*domain
; /* pointer to domain */
295 static void flush_unmaps_timeout(unsigned long data
);
297 DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
299 #define HIGH_WATER_MARK 250
300 struct deferred_flush_tables
{
302 struct iova
*iova
[HIGH_WATER_MARK
];
303 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
306 static struct deferred_flush_tables
*deferred_flush
;
308 /* bitmap for indexing intel_iommus */
309 static int g_num_of_iommus
;
311 static DEFINE_SPINLOCK(async_umap_flush_lock
);
312 static LIST_HEAD(unmaps_to_do
);
315 static long list_size
;
317 static void domain_remove_dev_info(struct dmar_domain
*domain
);
319 #ifdef CONFIG_DMAR_DEFAULT_ON
320 int dmar_disabled
= 0;
322 int dmar_disabled
= 1;
323 #endif /*CONFIG_DMAR_DEFAULT_ON*/
325 static int __initdata dmar_map_gfx
= 1;
326 static int dmar_forcedac
;
327 static int intel_iommu_strict
;
329 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
330 static DEFINE_SPINLOCK(device_domain_lock
);
331 static LIST_HEAD(device_domain_list
);
333 static struct iommu_ops intel_iommu_ops
;
335 static int __init
intel_iommu_setup(char *str
)
340 if (!strncmp(str
, "on", 2)) {
342 printk(KERN_INFO
"Intel-IOMMU: enabled\n");
343 } else if (!strncmp(str
, "off", 3)) {
345 printk(KERN_INFO
"Intel-IOMMU: disabled\n");
346 } else if (!strncmp(str
, "igfx_off", 8)) {
349 "Intel-IOMMU: disable GFX device mapping\n");
350 } else if (!strncmp(str
, "forcedac", 8)) {
352 "Intel-IOMMU: Forcing DAC for PCI devices\n");
354 } else if (!strncmp(str
, "strict", 6)) {
356 "Intel-IOMMU: disable batched IOTLB flush\n");
357 intel_iommu_strict
= 1;
360 str
+= strcspn(str
, ",");
366 __setup("intel_iommu=", intel_iommu_setup
);
368 static struct kmem_cache
*iommu_domain_cache
;
369 static struct kmem_cache
*iommu_devinfo_cache
;
370 static struct kmem_cache
*iommu_iova_cache
;
372 static inline void *iommu_kmem_cache_alloc(struct kmem_cache
*cachep
)
377 /* trying to avoid low memory issues */
378 flags
= current
->flags
& PF_MEMALLOC
;
379 current
->flags
|= PF_MEMALLOC
;
380 vaddr
= kmem_cache_alloc(cachep
, GFP_ATOMIC
);
381 current
->flags
&= (~PF_MEMALLOC
| flags
);
386 static inline void *alloc_pgtable_page(void)
391 /* trying to avoid low memory issues */
392 flags
= current
->flags
& PF_MEMALLOC
;
393 current
->flags
|= PF_MEMALLOC
;
394 vaddr
= (void *)get_zeroed_page(GFP_ATOMIC
);
395 current
->flags
&= (~PF_MEMALLOC
| flags
);
399 static inline void free_pgtable_page(void *vaddr
)
401 free_page((unsigned long)vaddr
);
404 static inline void *alloc_domain_mem(void)
406 return iommu_kmem_cache_alloc(iommu_domain_cache
);
409 static void free_domain_mem(void *vaddr
)
411 kmem_cache_free(iommu_domain_cache
, vaddr
);
414 static inline void * alloc_devinfo_mem(void)
416 return iommu_kmem_cache_alloc(iommu_devinfo_cache
);
419 static inline void free_devinfo_mem(void *vaddr
)
421 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
424 struct iova
*alloc_iova_mem(void)
426 return iommu_kmem_cache_alloc(iommu_iova_cache
);
429 void free_iova_mem(struct iova
*iova
)
431 kmem_cache_free(iommu_iova_cache
, iova
);
435 static inline int width_to_agaw(int width
);
437 static int __iommu_calculate_agaw(struct intel_iommu
*iommu
, int max_gaw
)
442 sagaw
= cap_sagaw(iommu
->cap
);
443 for (agaw
= width_to_agaw(max_gaw
);
445 if (test_bit(agaw
, &sagaw
))
453 * Calculate max SAGAW for each iommu.
455 int iommu_calculate_max_sagaw(struct intel_iommu
*iommu
)
457 return __iommu_calculate_agaw(iommu
, MAX_AGAW_WIDTH
);
461 * calculate agaw for each iommu.
462 * "SAGAW" may be different across iommus, use a default agaw, and
463 * get a supported less agaw for iommus that don't support the default agaw.
465 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
467 return __iommu_calculate_agaw(iommu
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
470 /* This functionin only returns single iommu in a domain */
471 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
475 /* si_domain and vm domain should not get here. */
476 BUG_ON(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
);
477 BUG_ON(domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
);
479 iommu_id
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
480 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
483 return g_iommus
[iommu_id
];
486 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
490 domain
->iommu_coherency
= 1;
492 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
493 for (; i
< g_num_of_iommus
; ) {
494 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
495 domain
->iommu_coherency
= 0;
498 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
502 static void domain_update_iommu_snooping(struct dmar_domain
*domain
)
506 domain
->iommu_snooping
= 1;
508 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
509 for (; i
< g_num_of_iommus
; ) {
510 if (!ecap_sc_support(g_iommus
[i
]->ecap
)) {
511 domain
->iommu_snooping
= 0;
514 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
518 /* Some capabilities may be different across iommus */
519 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
521 domain_update_iommu_coherency(domain
);
522 domain_update_iommu_snooping(domain
);
525 static struct intel_iommu
*device_to_iommu(int segment
, u8 bus
, u8 devfn
)
527 struct dmar_drhd_unit
*drhd
= NULL
;
530 for_each_drhd_unit(drhd
) {
533 if (segment
!= drhd
->segment
)
536 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
537 if (drhd
->devices
[i
] &&
538 drhd
->devices
[i
]->bus
->number
== bus
&&
539 drhd
->devices
[i
]->devfn
== devfn
)
541 if (drhd
->devices
[i
] &&
542 drhd
->devices
[i
]->subordinate
&&
543 drhd
->devices
[i
]->subordinate
->number
<= bus
&&
544 drhd
->devices
[i
]->subordinate
->subordinate
>= bus
)
548 if (drhd
->include_all
)
555 static void domain_flush_cache(struct dmar_domain
*domain
,
556 void *addr
, int size
)
558 if (!domain
->iommu_coherency
)
559 clflush_cache_range(addr
, size
);
562 /* Gets context entry for a given bus and devfn */
563 static struct context_entry
* device_to_context_entry(struct intel_iommu
*iommu
,
566 struct root_entry
*root
;
567 struct context_entry
*context
;
568 unsigned long phy_addr
;
571 spin_lock_irqsave(&iommu
->lock
, flags
);
572 root
= &iommu
->root_entry
[bus
];
573 context
= get_context_addr_from_root(root
);
575 context
= (struct context_entry
*)alloc_pgtable_page();
577 spin_unlock_irqrestore(&iommu
->lock
, flags
);
580 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
581 phy_addr
= virt_to_phys((void *)context
);
582 set_root_value(root
, phy_addr
);
583 set_root_present(root
);
584 __iommu_flush_cache(iommu
, root
, sizeof(*root
));
586 spin_unlock_irqrestore(&iommu
->lock
, flags
);
587 return &context
[devfn
];
590 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
592 struct root_entry
*root
;
593 struct context_entry
*context
;
597 spin_lock_irqsave(&iommu
->lock
, flags
);
598 root
= &iommu
->root_entry
[bus
];
599 context
= get_context_addr_from_root(root
);
604 ret
= context_present(&context
[devfn
]);
606 spin_unlock_irqrestore(&iommu
->lock
, flags
);
610 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
612 struct root_entry
*root
;
613 struct context_entry
*context
;
616 spin_lock_irqsave(&iommu
->lock
, flags
);
617 root
= &iommu
->root_entry
[bus
];
618 context
= get_context_addr_from_root(root
);
620 context_clear_entry(&context
[devfn
]);
621 __iommu_flush_cache(iommu
, &context
[devfn
], \
624 spin_unlock_irqrestore(&iommu
->lock
, flags
);
627 static void free_context_table(struct intel_iommu
*iommu
)
629 struct root_entry
*root
;
632 struct context_entry
*context
;
634 spin_lock_irqsave(&iommu
->lock
, flags
);
635 if (!iommu
->root_entry
) {
638 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
639 root
= &iommu
->root_entry
[i
];
640 context
= get_context_addr_from_root(root
);
642 free_pgtable_page(context
);
644 free_pgtable_page(iommu
->root_entry
);
645 iommu
->root_entry
= NULL
;
647 spin_unlock_irqrestore(&iommu
->lock
, flags
);
650 /* page table handling */
651 #define LEVEL_STRIDE (9)
652 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
654 static inline int agaw_to_level(int agaw
)
659 static inline int agaw_to_width(int agaw
)
661 return 30 + agaw
* LEVEL_STRIDE
;
665 static inline int width_to_agaw(int width
)
667 return (width
- 30) / LEVEL_STRIDE
;
670 static inline unsigned int level_to_offset_bits(int level
)
672 return (level
- 1) * LEVEL_STRIDE
;
675 static inline int pfn_level_offset(unsigned long pfn
, int level
)
677 return (pfn
>> level_to_offset_bits(level
)) & LEVEL_MASK
;
680 static inline unsigned long level_mask(int level
)
682 return -1UL << level_to_offset_bits(level
);
685 static inline unsigned long level_size(int level
)
687 return 1UL << level_to_offset_bits(level
);
690 static inline unsigned long align_to_level(unsigned long pfn
, int level
)
692 return (pfn
+ level_size(level
) - 1) & level_mask(level
);
695 static struct dma_pte
*pfn_to_dma_pte(struct dmar_domain
*domain
,
698 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
699 struct dma_pte
*parent
, *pte
= NULL
;
700 int level
= agaw_to_level(domain
->agaw
);
704 BUG_ON(!domain
->pgd
);
705 BUG_ON(addr_width
< BITS_PER_LONG
&& pfn
>> addr_width
);
706 parent
= domain
->pgd
;
708 spin_lock_irqsave(&domain
->mapping_lock
, flags
);
712 offset
= pfn_level_offset(pfn
, level
);
713 pte
= &parent
[offset
];
717 if (!dma_pte_present(pte
)) {
718 tmp_page
= alloc_pgtable_page();
721 spin_unlock_irqrestore(&domain
->mapping_lock
,
725 domain_flush_cache(domain
, tmp_page
, PAGE_SIZE
);
726 dma_set_pte_pfn(pte
, virt_to_dma_pfn(tmp_page
));
728 * high level table always sets r/w, last level page
729 * table control read/write
731 dma_set_pte_readable(pte
);
732 dma_set_pte_writable(pte
);
733 domain_flush_cache(domain
, pte
, sizeof(*pte
));
735 parent
= phys_to_virt(dma_pte_addr(pte
));
739 spin_unlock_irqrestore(&domain
->mapping_lock
, flags
);
743 /* return address's pte at specific level */
744 static struct dma_pte
*dma_pfn_level_pte(struct dmar_domain
*domain
,
748 struct dma_pte
*parent
, *pte
= NULL
;
749 int total
= agaw_to_level(domain
->agaw
);
752 parent
= domain
->pgd
;
753 while (level
<= total
) {
754 offset
= pfn_level_offset(pfn
, total
);
755 pte
= &parent
[offset
];
759 if (!dma_pte_present(pte
))
761 parent
= phys_to_virt(dma_pte_addr(pte
));
767 /* clear one page's page table */
768 static void dma_pte_clear_one(struct dmar_domain
*domain
, unsigned long pfn
)
770 struct dma_pte
*pte
= NULL
;
772 /* get last level pte */
773 pte
= dma_pfn_level_pte(domain
, pfn
, 1);
777 domain_flush_cache(domain
, pte
, sizeof(*pte
));
781 /* clear last level pte, a tlb flush should be followed */
782 static void dma_pte_clear_range(struct dmar_domain
*domain
,
783 unsigned long start_pfn
,
784 unsigned long last_pfn
)
786 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
788 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
789 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
791 /* we don't need lock here; nobody else touches the iova range */
792 while (start_pfn
<= last_pfn
) {
793 dma_pte_clear_one(domain
, start_pfn
);
798 /* free page table pages. last level pte should already be cleared */
799 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
800 unsigned long start_pfn
,
801 unsigned long last_pfn
)
803 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
805 int total
= agaw_to_level(domain
->agaw
);
809 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
810 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
812 /* we don't need lock here, nobody else touches the iova range */
814 while (level
<= total
) {
815 tmp
= align_to_level(start_pfn
, level
);
817 /* Only clear this pte/pmd if we're asked to clear its
819 if (tmp
+ level_size(level
) - 1 > last_pfn
)
822 while (tmp
<= last_pfn
) {
823 pte
= dma_pfn_level_pte(domain
, tmp
, level
);
826 phys_to_virt(dma_pte_addr(pte
)));
828 domain_flush_cache(domain
, pte
, sizeof(*pte
));
830 tmp
+= level_size(level
);
835 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
836 free_pgtable_page(domain
->pgd
);
842 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
844 struct root_entry
*root
;
847 root
= (struct root_entry
*)alloc_pgtable_page();
851 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
853 spin_lock_irqsave(&iommu
->lock
, flags
);
854 iommu
->root_entry
= root
;
855 spin_unlock_irqrestore(&iommu
->lock
, flags
);
860 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
866 addr
= iommu
->root_entry
;
868 spin_lock_irqsave(&iommu
->register_lock
, flag
);
869 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, virt_to_phys(addr
));
871 writel(iommu
->gcmd
| DMA_GCMD_SRTP
, iommu
->reg
+ DMAR_GCMD_REG
);
873 /* Make sure hardware complete it */
874 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
875 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
877 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
880 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
885 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
888 spin_lock_irqsave(&iommu
->register_lock
, flag
);
889 writel(iommu
->gcmd
| DMA_GCMD_WBF
, iommu
->reg
+ DMAR_GCMD_REG
);
891 /* Make sure hardware complete it */
892 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
893 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
895 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
898 /* return value determine if we need a write buffer flush */
899 static void __iommu_flush_context(struct intel_iommu
*iommu
,
900 u16 did
, u16 source_id
, u8 function_mask
,
907 case DMA_CCMD_GLOBAL_INVL
:
908 val
= DMA_CCMD_GLOBAL_INVL
;
910 case DMA_CCMD_DOMAIN_INVL
:
911 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
913 case DMA_CCMD_DEVICE_INVL
:
914 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
915 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
922 spin_lock_irqsave(&iommu
->register_lock
, flag
);
923 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
925 /* Make sure hardware complete it */
926 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
927 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
929 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
932 /* return value determine if we need a write buffer flush */
933 static void __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
934 u64 addr
, unsigned int size_order
, u64 type
)
936 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
937 u64 val
= 0, val_iva
= 0;
941 case DMA_TLB_GLOBAL_FLUSH
:
942 /* global flush doesn't need set IVA_REG */
943 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
945 case DMA_TLB_DSI_FLUSH
:
946 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
948 case DMA_TLB_PSI_FLUSH
:
949 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
950 /* Note: always flush non-leaf currently */
951 val_iva
= size_order
| addr
;
956 /* Note: set drain read/write */
959 * This is probably to be super secure.. Looks like we can
960 * ignore it without any impact.
962 if (cap_read_drain(iommu
->cap
))
963 val
|= DMA_TLB_READ_DRAIN
;
965 if (cap_write_drain(iommu
->cap
))
966 val
|= DMA_TLB_WRITE_DRAIN
;
968 spin_lock_irqsave(&iommu
->register_lock
, flag
);
969 /* Note: Only uses first TLB reg currently */
971 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
972 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
974 /* Make sure hardware complete it */
975 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
976 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
978 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
980 /* check IOTLB invalidation granularity */
981 if (DMA_TLB_IAIG(val
) == 0)
982 printk(KERN_ERR
"IOMMU: flush IOTLB failed\n");
983 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
984 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
985 (unsigned long long)DMA_TLB_IIRG(type
),
986 (unsigned long long)DMA_TLB_IAIG(val
));
989 static struct device_domain_info
*iommu_support_dev_iotlb(
990 struct dmar_domain
*domain
, int segment
, u8 bus
, u8 devfn
)
994 struct device_domain_info
*info
;
995 struct intel_iommu
*iommu
= device_to_iommu(segment
, bus
, devfn
);
997 if (!ecap_dev_iotlb_support(iommu
->ecap
))
1003 spin_lock_irqsave(&device_domain_lock
, flags
);
1004 list_for_each_entry(info
, &domain
->devices
, link
)
1005 if (info
->bus
== bus
&& info
->devfn
== devfn
) {
1009 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1011 if (!found
|| !info
->dev
)
1014 if (!pci_find_ext_capability(info
->dev
, PCI_EXT_CAP_ID_ATS
))
1017 if (!dmar_find_matched_atsr_unit(info
->dev
))
1020 info
->iommu
= iommu
;
1025 static void iommu_enable_dev_iotlb(struct device_domain_info
*info
)
1030 pci_enable_ats(info
->dev
, VTD_PAGE_SHIFT
);
1033 static void iommu_disable_dev_iotlb(struct device_domain_info
*info
)
1035 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1038 pci_disable_ats(info
->dev
);
1041 static void iommu_flush_dev_iotlb(struct dmar_domain
*domain
,
1042 u64 addr
, unsigned mask
)
1045 unsigned long flags
;
1046 struct device_domain_info
*info
;
1048 spin_lock_irqsave(&device_domain_lock
, flags
);
1049 list_for_each_entry(info
, &domain
->devices
, link
) {
1050 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1053 sid
= info
->bus
<< 8 | info
->devfn
;
1054 qdep
= pci_ats_queue_depth(info
->dev
);
1055 qi_flush_dev_iotlb(info
->iommu
, sid
, qdep
, addr
, mask
);
1057 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1060 static void iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
1061 u64 addr
, unsigned int pages
)
1063 unsigned int mask
= ilog2(__roundup_pow_of_two(pages
));
1065 BUG_ON(addr
& (~VTD_PAGE_MASK
));
1069 * Fallback to domain selective flush if no PSI support or the size is
1071 * PSI requires page size to be 2 ^ x, and the base address is naturally
1072 * aligned to the size
1074 if (!cap_pgsel_inv(iommu
->cap
) || mask
> cap_max_amask_val(iommu
->cap
))
1075 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
1078 iommu
->flush
.flush_iotlb(iommu
, did
, addr
, mask
,
1082 * In caching mode, domain ID 0 is reserved for non-present to present
1083 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1085 if (!cap_caching_mode(iommu
->cap
) || did
)
1086 iommu_flush_dev_iotlb(iommu
->domains
[did
], addr
, mask
);
1089 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
1092 unsigned long flags
;
1094 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1095 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
1096 pmen
&= ~DMA_PMEN_EPM
;
1097 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
1099 /* wait for the protected region status bit to clear */
1100 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
1101 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1103 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1106 static int iommu_enable_translation(struct intel_iommu
*iommu
)
1109 unsigned long flags
;
1111 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1112 iommu
->gcmd
|= DMA_GCMD_TE
;
1113 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1115 /* Make sure hardware complete it */
1116 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1117 readl
, (sts
& DMA_GSTS_TES
), sts
);
1119 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1123 static int iommu_disable_translation(struct intel_iommu
*iommu
)
1128 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1129 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1130 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1132 /* Make sure hardware complete it */
1133 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1134 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1136 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1141 static int iommu_init_domains(struct intel_iommu
*iommu
)
1143 unsigned long ndomains
;
1144 unsigned long nlongs
;
1146 ndomains
= cap_ndoms(iommu
->cap
);
1147 pr_debug("Number of Domains supportd <%ld>\n", ndomains
);
1148 nlongs
= BITS_TO_LONGS(ndomains
);
1150 /* TBD: there might be 64K domains,
1151 * consider other allocation for future chip
1153 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1154 if (!iommu
->domain_ids
) {
1155 printk(KERN_ERR
"Allocating domain id array failed\n");
1158 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1160 if (!iommu
->domains
) {
1161 printk(KERN_ERR
"Allocating domain array failed\n");
1162 kfree(iommu
->domain_ids
);
1166 spin_lock_init(&iommu
->lock
);
1169 * if Caching mode is set, then invalid translations are tagged
1170 * with domainid 0. Hence we need to pre-allocate it.
1172 if (cap_caching_mode(iommu
->cap
))
1173 set_bit(0, iommu
->domain_ids
);
1178 static void domain_exit(struct dmar_domain
*domain
);
1179 static void vm_domain_exit(struct dmar_domain
*domain
);
1181 void free_dmar_iommu(struct intel_iommu
*iommu
)
1183 struct dmar_domain
*domain
;
1185 unsigned long flags
;
1187 i
= find_first_bit(iommu
->domain_ids
, cap_ndoms(iommu
->cap
));
1188 for (; i
< cap_ndoms(iommu
->cap
); ) {
1189 domain
= iommu
->domains
[i
];
1190 clear_bit(i
, iommu
->domain_ids
);
1192 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1193 if (--domain
->iommu_count
== 0) {
1194 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
1195 vm_domain_exit(domain
);
1197 domain_exit(domain
);
1199 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1201 i
= find_next_bit(iommu
->domain_ids
,
1202 cap_ndoms(iommu
->cap
), i
+1);
1205 if (iommu
->gcmd
& DMA_GCMD_TE
)
1206 iommu_disable_translation(iommu
);
1209 set_irq_data(iommu
->irq
, NULL
);
1210 /* This will mask the irq */
1211 free_irq(iommu
->irq
, iommu
);
1212 destroy_irq(iommu
->irq
);
1215 kfree(iommu
->domains
);
1216 kfree(iommu
->domain_ids
);
1218 g_iommus
[iommu
->seq_id
] = NULL
;
1220 /* if all iommus are freed, free g_iommus */
1221 for (i
= 0; i
< g_num_of_iommus
; i
++) {
1226 if (i
== g_num_of_iommus
)
1229 /* free context mapping */
1230 free_context_table(iommu
);
1233 static struct dmar_domain
*alloc_domain(void)
1235 struct dmar_domain
*domain
;
1237 domain
= alloc_domain_mem();
1241 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
1247 static int iommu_attach_domain(struct dmar_domain
*domain
,
1248 struct intel_iommu
*iommu
)
1251 unsigned long ndomains
;
1252 unsigned long flags
;
1254 ndomains
= cap_ndoms(iommu
->cap
);
1256 spin_lock_irqsave(&iommu
->lock
, flags
);
1258 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1259 if (num
>= ndomains
) {
1260 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1261 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1266 set_bit(num
, iommu
->domain_ids
);
1267 set_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1268 iommu
->domains
[num
] = domain
;
1269 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1274 static void iommu_detach_domain(struct dmar_domain
*domain
,
1275 struct intel_iommu
*iommu
)
1277 unsigned long flags
;
1281 spin_lock_irqsave(&iommu
->lock
, flags
);
1282 ndomains
= cap_ndoms(iommu
->cap
);
1283 num
= find_first_bit(iommu
->domain_ids
, ndomains
);
1284 for (; num
< ndomains
; ) {
1285 if (iommu
->domains
[num
] == domain
) {
1289 num
= find_next_bit(iommu
->domain_ids
,
1290 cap_ndoms(iommu
->cap
), num
+1);
1294 clear_bit(num
, iommu
->domain_ids
);
1295 clear_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1296 iommu
->domains
[num
] = NULL
;
1298 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1301 static struct iova_domain reserved_iova_list
;
1302 static struct lock_class_key reserved_alloc_key
;
1303 static struct lock_class_key reserved_rbtree_key
;
1305 static void dmar_init_reserved_ranges(void)
1307 struct pci_dev
*pdev
= NULL
;
1312 init_iova_domain(&reserved_iova_list
, DMA_32BIT_PFN
);
1314 lockdep_set_class(&reserved_iova_list
.iova_alloc_lock
,
1315 &reserved_alloc_key
);
1316 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1317 &reserved_rbtree_key
);
1319 /* IOAPIC ranges shouldn't be accessed by DMA */
1320 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1321 IOVA_PFN(IOAPIC_RANGE_END
));
1323 printk(KERN_ERR
"Reserve IOAPIC range failed\n");
1325 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1326 for_each_pci_dev(pdev
) {
1329 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1330 r
= &pdev
->resource
[i
];
1331 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1334 addr
&= PHYSICAL_PAGE_MASK
;
1335 size
= r
->end
- addr
;
1336 size
= PAGE_ALIGN(size
);
1337 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(addr
),
1338 IOVA_PFN(size
+ addr
) - 1);
1340 printk(KERN_ERR
"Reserve iova failed\n");
1346 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1348 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1351 static inline int guestwidth_to_adjustwidth(int gaw
)
1354 int r
= (gaw
- 12) % 9;
1365 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1367 struct intel_iommu
*iommu
;
1368 int adjust_width
, agaw
;
1369 unsigned long sagaw
;
1371 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
1372 spin_lock_init(&domain
->mapping_lock
);
1373 spin_lock_init(&domain
->iommu_lock
);
1375 domain_reserve_special_ranges(domain
);
1377 /* calculate AGAW */
1378 iommu
= domain_get_iommu(domain
);
1379 if (guest_width
> cap_mgaw(iommu
->cap
))
1380 guest_width
= cap_mgaw(iommu
->cap
);
1381 domain
->gaw
= guest_width
;
1382 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1383 agaw
= width_to_agaw(adjust_width
);
1384 sagaw
= cap_sagaw(iommu
->cap
);
1385 if (!test_bit(agaw
, &sagaw
)) {
1386 /* hardware doesn't support it, choose a bigger one */
1387 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw
);
1388 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1392 domain
->agaw
= agaw
;
1393 INIT_LIST_HEAD(&domain
->devices
);
1395 if (ecap_coherent(iommu
->ecap
))
1396 domain
->iommu_coherency
= 1;
1398 domain
->iommu_coherency
= 0;
1400 if (ecap_sc_support(iommu
->ecap
))
1401 domain
->iommu_snooping
= 1;
1403 domain
->iommu_snooping
= 0;
1405 domain
->iommu_count
= 1;
1407 /* always allocate the top pgd */
1408 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
1411 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1415 static void domain_exit(struct dmar_domain
*domain
)
1417 struct dmar_drhd_unit
*drhd
;
1418 struct intel_iommu
*iommu
;
1420 /* Domain 0 is reserved, so dont process it */
1424 domain_remove_dev_info(domain
);
1426 put_iova_domain(&domain
->iovad
);
1429 dma_pte_clear_range(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1431 /* free page tables */
1432 dma_pte_free_pagetable(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1434 for_each_active_iommu(iommu
, drhd
)
1435 if (test_bit(iommu
->seq_id
, &domain
->iommu_bmp
))
1436 iommu_detach_domain(domain
, iommu
);
1438 free_domain_mem(domain
);
1441 static int domain_context_mapping_one(struct dmar_domain
*domain
, int segment
,
1442 u8 bus
, u8 devfn
, int translation
)
1444 struct context_entry
*context
;
1445 unsigned long flags
;
1446 struct intel_iommu
*iommu
;
1447 struct dma_pte
*pgd
;
1449 unsigned long ndomains
;
1452 struct device_domain_info
*info
= NULL
;
1454 pr_debug("Set context mapping for %02x:%02x.%d\n",
1455 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1457 BUG_ON(!domain
->pgd
);
1458 BUG_ON(translation
!= CONTEXT_TT_PASS_THROUGH
&&
1459 translation
!= CONTEXT_TT_MULTI_LEVEL
);
1461 iommu
= device_to_iommu(segment
, bus
, devfn
);
1465 context
= device_to_context_entry(iommu
, bus
, devfn
);
1468 spin_lock_irqsave(&iommu
->lock
, flags
);
1469 if (context_present(context
)) {
1470 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1477 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
1478 domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
) {
1481 /* find an available domain id for this device in iommu */
1482 ndomains
= cap_ndoms(iommu
->cap
);
1483 num
= find_first_bit(iommu
->domain_ids
, ndomains
);
1484 for (; num
< ndomains
; ) {
1485 if (iommu
->domains
[num
] == domain
) {
1490 num
= find_next_bit(iommu
->domain_ids
,
1491 cap_ndoms(iommu
->cap
), num
+1);
1495 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1496 if (num
>= ndomains
) {
1497 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1498 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1502 set_bit(num
, iommu
->domain_ids
);
1503 set_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1504 iommu
->domains
[num
] = domain
;
1508 /* Skip top levels of page tables for
1509 * iommu which has less agaw than default.
1511 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1512 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1513 if (!dma_pte_present(pgd
)) {
1514 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1520 context_set_domain_id(context
, id
);
1522 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1523 info
= iommu_support_dev_iotlb(domain
, segment
, bus
, devfn
);
1524 translation
= info
? CONTEXT_TT_DEV_IOTLB
:
1525 CONTEXT_TT_MULTI_LEVEL
;
1528 * In pass through mode, AW must be programmed to indicate the largest
1529 * AGAW value supported by hardware. And ASR is ignored by hardware.
1531 if (unlikely(translation
== CONTEXT_TT_PASS_THROUGH
))
1532 context_set_address_width(context
, iommu
->msagaw
);
1534 context_set_address_root(context
, virt_to_phys(pgd
));
1535 context_set_address_width(context
, iommu
->agaw
);
1538 context_set_translation_type(context
, translation
);
1539 context_set_fault_enable(context
);
1540 context_set_present(context
);
1541 domain_flush_cache(domain
, context
, sizeof(*context
));
1544 * It's a non-present to present mapping. If hardware doesn't cache
1545 * non-present entry we only need to flush the write-buffer. If the
1546 * _does_ cache non-present entries, then it does so in the special
1547 * domain #0, which we have to flush:
1549 if (cap_caching_mode(iommu
->cap
)) {
1550 iommu
->flush
.flush_context(iommu
, 0,
1551 (((u16
)bus
) << 8) | devfn
,
1552 DMA_CCMD_MASK_NOBIT
,
1553 DMA_CCMD_DEVICE_INVL
);
1554 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_DSI_FLUSH
);
1556 iommu_flush_write_buffer(iommu
);
1558 iommu_enable_dev_iotlb(info
);
1559 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1561 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1562 if (!test_and_set_bit(iommu
->seq_id
, &domain
->iommu_bmp
)) {
1563 domain
->iommu_count
++;
1564 domain_update_iommu_cap(domain
);
1566 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1571 domain_context_mapping(struct dmar_domain
*domain
, struct pci_dev
*pdev
,
1575 struct pci_dev
*tmp
, *parent
;
1577 ret
= domain_context_mapping_one(domain
, pci_domain_nr(pdev
->bus
),
1578 pdev
->bus
->number
, pdev
->devfn
,
1583 /* dependent device mapping */
1584 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1587 /* Secondary interface's bus number and devfn 0 */
1588 parent
= pdev
->bus
->self
;
1589 while (parent
!= tmp
) {
1590 ret
= domain_context_mapping_one(domain
,
1591 pci_domain_nr(parent
->bus
),
1592 parent
->bus
->number
,
1593 parent
->devfn
, translation
);
1596 parent
= parent
->bus
->self
;
1598 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
1599 return domain_context_mapping_one(domain
,
1600 pci_domain_nr(tmp
->subordinate
),
1601 tmp
->subordinate
->number
, 0,
1603 else /* this is a legacy PCI bridge */
1604 return domain_context_mapping_one(domain
,
1605 pci_domain_nr(tmp
->bus
),
1611 static int domain_context_mapped(struct pci_dev
*pdev
)
1614 struct pci_dev
*tmp
, *parent
;
1615 struct intel_iommu
*iommu
;
1617 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
1622 ret
= device_context_mapped(iommu
, pdev
->bus
->number
, pdev
->devfn
);
1625 /* dependent device mapping */
1626 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1629 /* Secondary interface's bus number and devfn 0 */
1630 parent
= pdev
->bus
->self
;
1631 while (parent
!= tmp
) {
1632 ret
= device_context_mapped(iommu
, parent
->bus
->number
,
1636 parent
= parent
->bus
->self
;
1639 return device_context_mapped(iommu
, tmp
->subordinate
->number
,
1642 return device_context_mapped(iommu
, tmp
->bus
->number
,
1646 static int domain_pfn_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1647 unsigned long phys_pfn
, unsigned long nr_pages
,
1650 struct dma_pte
*pte
;
1651 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
1653 BUG_ON(addr_width
< BITS_PER_LONG
&& (iov_pfn
+ nr_pages
- 1) >> addr_width
);
1655 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
1658 while (nr_pages
--) {
1659 pte
= pfn_to_dma_pte(domain
, iov_pfn
);
1662 /* We don't need lock here, nobody else
1663 * touches the iova range
1665 BUG_ON(dma_pte_addr(pte
));
1666 dma_set_pte_pfn(pte
, phys_pfn
);
1667 dma_set_pte_prot(pte
, prot
);
1668 if (prot
& DMA_PTE_SNP
)
1669 dma_set_pte_snp(pte
);
1670 domain_flush_cache(domain
, pte
, sizeof(*pte
));
1677 static int domain_page_mapping(struct dmar_domain
*domain
, dma_addr_t iova
,
1678 u64 hpa
, size_t size
, int prot
)
1680 unsigned long first_pfn
= hpa
>> VTD_PAGE_SHIFT
;
1681 unsigned long last_pfn
= (hpa
+ size
- 1) >> VTD_PAGE_SHIFT
;
1683 return domain_pfn_mapping(domain
, iova
>> VTD_PAGE_SHIFT
, first_pfn
,
1684 last_pfn
- first_pfn
+ 1, prot
);
1688 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
1693 clear_context_table(iommu
, bus
, devfn
);
1694 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
1695 DMA_CCMD_GLOBAL_INVL
);
1696 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
1699 static void domain_remove_dev_info(struct dmar_domain
*domain
)
1701 struct device_domain_info
*info
;
1702 unsigned long flags
;
1703 struct intel_iommu
*iommu
;
1705 spin_lock_irqsave(&device_domain_lock
, flags
);
1706 while (!list_empty(&domain
->devices
)) {
1707 info
= list_entry(domain
->devices
.next
,
1708 struct device_domain_info
, link
);
1709 list_del(&info
->link
);
1710 list_del(&info
->global
);
1712 info
->dev
->dev
.archdata
.iommu
= NULL
;
1713 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1715 iommu_disable_dev_iotlb(info
);
1716 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
1717 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
1718 free_devinfo_mem(info
);
1720 spin_lock_irqsave(&device_domain_lock
, flags
);
1722 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1727 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1729 static struct dmar_domain
*
1730 find_domain(struct pci_dev
*pdev
)
1732 struct device_domain_info
*info
;
1734 /* No lock here, assumes no domain exit in normal case */
1735 info
= pdev
->dev
.archdata
.iommu
;
1737 return info
->domain
;
1741 /* domain is initialized */
1742 static struct dmar_domain
*get_domain_for_dev(struct pci_dev
*pdev
, int gaw
)
1744 struct dmar_domain
*domain
, *found
= NULL
;
1745 struct intel_iommu
*iommu
;
1746 struct dmar_drhd_unit
*drhd
;
1747 struct device_domain_info
*info
, *tmp
;
1748 struct pci_dev
*dev_tmp
;
1749 unsigned long flags
;
1750 int bus
= 0, devfn
= 0;
1754 domain
= find_domain(pdev
);
1758 segment
= pci_domain_nr(pdev
->bus
);
1760 dev_tmp
= pci_find_upstream_pcie_bridge(pdev
);
1762 if (dev_tmp
->is_pcie
) {
1763 bus
= dev_tmp
->subordinate
->number
;
1766 bus
= dev_tmp
->bus
->number
;
1767 devfn
= dev_tmp
->devfn
;
1769 spin_lock_irqsave(&device_domain_lock
, flags
);
1770 list_for_each_entry(info
, &device_domain_list
, global
) {
1771 if (info
->segment
== segment
&&
1772 info
->bus
== bus
&& info
->devfn
== devfn
) {
1773 found
= info
->domain
;
1777 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1778 /* pcie-pci bridge already has a domain, uses it */
1785 domain
= alloc_domain();
1789 /* Allocate new domain for the device */
1790 drhd
= dmar_find_matched_drhd_unit(pdev
);
1792 printk(KERN_ERR
"IOMMU: can't find DMAR for device %s\n",
1796 iommu
= drhd
->iommu
;
1798 ret
= iommu_attach_domain(domain
, iommu
);
1800 domain_exit(domain
);
1804 if (domain_init(domain
, gaw
)) {
1805 domain_exit(domain
);
1809 /* register pcie-to-pci device */
1811 info
= alloc_devinfo_mem();
1813 domain_exit(domain
);
1816 info
->segment
= segment
;
1818 info
->devfn
= devfn
;
1820 info
->domain
= domain
;
1821 /* This domain is shared by devices under p2p bridge */
1822 domain
->flags
|= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES
;
1824 /* pcie-to-pci bridge already has a domain, uses it */
1826 spin_lock_irqsave(&device_domain_lock
, flags
);
1827 list_for_each_entry(tmp
, &device_domain_list
, global
) {
1828 if (tmp
->segment
== segment
&&
1829 tmp
->bus
== bus
&& tmp
->devfn
== devfn
) {
1830 found
= tmp
->domain
;
1835 free_devinfo_mem(info
);
1836 domain_exit(domain
);
1839 list_add(&info
->link
, &domain
->devices
);
1840 list_add(&info
->global
, &device_domain_list
);
1842 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1846 info
= alloc_devinfo_mem();
1849 info
->segment
= segment
;
1850 info
->bus
= pdev
->bus
->number
;
1851 info
->devfn
= pdev
->devfn
;
1853 info
->domain
= domain
;
1854 spin_lock_irqsave(&device_domain_lock
, flags
);
1855 /* somebody is fast */
1856 found
= find_domain(pdev
);
1857 if (found
!= NULL
) {
1858 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1859 if (found
!= domain
) {
1860 domain_exit(domain
);
1863 free_devinfo_mem(info
);
1866 list_add(&info
->link
, &domain
->devices
);
1867 list_add(&info
->global
, &device_domain_list
);
1868 pdev
->dev
.archdata
.iommu
= info
;
1869 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1872 /* recheck it here, maybe others set it */
1873 return find_domain(pdev
);
1876 static int iommu_identity_mapping
;
1878 static int iommu_domain_identity_map(struct dmar_domain
*domain
,
1879 unsigned long long start
,
1880 unsigned long long end
)
1883 unsigned long long base
;
1885 /* The address might not be aligned */
1886 base
= start
& PAGE_MASK
;
1888 size
= PAGE_ALIGN(size
);
1889 if (!reserve_iova(&domain
->iovad
, IOVA_PFN(base
),
1890 IOVA_PFN(base
+ size
) - 1)) {
1891 printk(KERN_ERR
"IOMMU: reserve iova failed\n");
1895 pr_debug("Mapping reserved region %lx@%llx for domain %d\n",
1896 size
, base
, domain
->id
);
1898 * RMRR range might have overlap with physical memory range,
1901 dma_pte_clear_range(domain
, base
>> VTD_PAGE_SHIFT
,
1902 (base
+ size
- 1) >> VTD_PAGE_SHIFT
);
1904 return domain_pfn_mapping(domain
, base
>> VTD_PAGE_SHIFT
,
1905 base
>> VTD_PAGE_SHIFT
,
1906 size
>> VTD_PAGE_SHIFT
,
1907 DMA_PTE_READ
|DMA_PTE_WRITE
);
1910 static int iommu_prepare_identity_map(struct pci_dev
*pdev
,
1911 unsigned long long start
,
1912 unsigned long long end
)
1914 struct dmar_domain
*domain
;
1918 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1919 pci_name(pdev
), start
, end
);
1921 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
1925 ret
= iommu_domain_identity_map(domain
, start
, end
);
1929 /* context entry init */
1930 ret
= domain_context_mapping(domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
1937 domain_exit(domain
);
1941 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
1942 struct pci_dev
*pdev
)
1944 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
1946 return iommu_prepare_identity_map(pdev
, rmrr
->base_address
,
1947 rmrr
->end_address
+ 1);
1950 #ifdef CONFIG_DMAR_FLOPPY_WA
1951 static inline void iommu_prepare_isa(void)
1953 struct pci_dev
*pdev
;
1956 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
1960 printk(KERN_INFO
"IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
1961 ret
= iommu_prepare_identity_map(pdev
, 0, 16*1024*1024);
1964 printk(KERN_ERR
"IOMMU: Failed to create 0-16MiB identity map; "
1965 "floppy might not work\n");
1969 static inline void iommu_prepare_isa(void)
1973 #endif /* !CONFIG_DMAR_FLPY_WA */
1975 /* Initialize each context entry as pass through.*/
1976 static int __init
init_context_pass_through(void)
1978 struct pci_dev
*pdev
= NULL
;
1979 struct dmar_domain
*domain
;
1982 for_each_pci_dev(pdev
) {
1983 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
1984 ret
= domain_context_mapping(domain
, pdev
,
1985 CONTEXT_TT_PASS_THROUGH
);
1992 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
);
1994 static int __init
si_domain_work_fn(unsigned long start_pfn
,
1995 unsigned long end_pfn
, void *datax
)
1999 *ret
= iommu_domain_identity_map(si_domain
,
2000 (uint64_t)start_pfn
<< PAGE_SHIFT
,
2001 (uint64_t)end_pfn
<< PAGE_SHIFT
);
2006 static int si_domain_init(void)
2008 struct dmar_drhd_unit
*drhd
;
2009 struct intel_iommu
*iommu
;
2012 si_domain
= alloc_domain();
2016 pr_debug("Identity mapping domain is domain %d\n", si_domain
->id
);
2018 for_each_active_iommu(iommu
, drhd
) {
2019 ret
= iommu_attach_domain(si_domain
, iommu
);
2021 domain_exit(si_domain
);
2026 if (md_domain_init(si_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2027 domain_exit(si_domain
);
2031 si_domain
->flags
= DOMAIN_FLAG_STATIC_IDENTITY
;
2033 for_each_online_node(nid
) {
2034 work_with_active_regions(nid
, si_domain_work_fn
, &ret
);
2042 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
2043 struct pci_dev
*pdev
);
2044 static int identity_mapping(struct pci_dev
*pdev
)
2046 struct device_domain_info
*info
;
2048 if (likely(!iommu_identity_mapping
))
2052 list_for_each_entry(info
, &si_domain
->devices
, link
)
2053 if (info
->dev
== pdev
)
2058 static int domain_add_dev_info(struct dmar_domain
*domain
,
2059 struct pci_dev
*pdev
)
2061 struct device_domain_info
*info
;
2062 unsigned long flags
;
2064 info
= alloc_devinfo_mem();
2068 info
->segment
= pci_domain_nr(pdev
->bus
);
2069 info
->bus
= pdev
->bus
->number
;
2070 info
->devfn
= pdev
->devfn
;
2072 info
->domain
= domain
;
2074 spin_lock_irqsave(&device_domain_lock
, flags
);
2075 list_add(&info
->link
, &domain
->devices
);
2076 list_add(&info
->global
, &device_domain_list
);
2077 pdev
->dev
.archdata
.iommu
= info
;
2078 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2083 static int iommu_prepare_static_identity_mapping(void)
2085 struct pci_dev
*pdev
= NULL
;
2088 ret
= si_domain_init();
2092 for_each_pci_dev(pdev
) {
2093 printk(KERN_INFO
"IOMMU: identity mapping for device %s\n",
2096 ret
= domain_context_mapping(si_domain
, pdev
,
2097 CONTEXT_TT_MULTI_LEVEL
);
2100 ret
= domain_add_dev_info(si_domain
, pdev
);
2108 int __init
init_dmars(void)
2110 struct dmar_drhd_unit
*drhd
;
2111 struct dmar_rmrr_unit
*rmrr
;
2112 struct pci_dev
*pdev
;
2113 struct intel_iommu
*iommu
;
2115 int pass_through
= 1;
2118 * In case pass through can not be enabled, iommu tries to use identity
2121 if (iommu_pass_through
)
2122 iommu_identity_mapping
= 1;
2127 * initialize and program root entry to not present
2130 for_each_drhd_unit(drhd
) {
2133 * lock not needed as this is only incremented in the single
2134 * threaded kernel __init code path all other access are read
2139 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
2142 printk(KERN_ERR
"Allocating global iommu array failed\n");
2147 deferred_flush
= kzalloc(g_num_of_iommus
*
2148 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
2149 if (!deferred_flush
) {
2155 for_each_drhd_unit(drhd
) {
2159 iommu
= drhd
->iommu
;
2160 g_iommus
[iommu
->seq_id
] = iommu
;
2162 ret
= iommu_init_domains(iommu
);
2168 * we could share the same root & context tables
2169 * amoung all IOMMU's. Need to Split it later.
2171 ret
= iommu_alloc_root_entry(iommu
);
2173 printk(KERN_ERR
"IOMMU: allocate root entry failed\n");
2176 if (!ecap_pass_through(iommu
->ecap
))
2179 if (iommu_pass_through
)
2180 if (!pass_through
) {
2182 "Pass Through is not supported by hardware.\n");
2183 iommu_pass_through
= 0;
2187 * Start from the sane iommu hardware state.
2189 for_each_drhd_unit(drhd
) {
2193 iommu
= drhd
->iommu
;
2196 * If the queued invalidation is already initialized by us
2197 * (for example, while enabling interrupt-remapping) then
2198 * we got the things already rolling from a sane state.
2204 * Clear any previous faults.
2206 dmar_fault(-1, iommu
);
2208 * Disable queued invalidation if supported and already enabled
2209 * before OS handover.
2211 dmar_disable_qi(iommu
);
2214 for_each_drhd_unit(drhd
) {
2218 iommu
= drhd
->iommu
;
2220 if (dmar_enable_qi(iommu
)) {
2222 * Queued Invalidate not enabled, use Register Based
2225 iommu
->flush
.flush_context
= __iommu_flush_context
;
2226 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
2227 printk(KERN_INFO
"IOMMU 0x%Lx: using Register based "
2229 (unsigned long long)drhd
->reg_base_addr
);
2231 iommu
->flush
.flush_context
= qi_flush_context
;
2232 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
2233 printk(KERN_INFO
"IOMMU 0x%Lx: using Queued "
2235 (unsigned long long)drhd
->reg_base_addr
);
2240 * If pass through is set and enabled, context entries of all pci
2241 * devices are intialized by pass through translation type.
2243 if (iommu_pass_through
) {
2244 ret
= init_context_pass_through();
2246 printk(KERN_ERR
"IOMMU: Pass through init failed.\n");
2247 iommu_pass_through
= 0;
2252 * If pass through is not set or not enabled, setup context entries for
2253 * identity mappings for rmrr, gfx, and isa and may fall back to static
2254 * identity mapping if iommu_identity_mapping is set.
2256 if (!iommu_pass_through
) {
2257 if (iommu_identity_mapping
)
2258 iommu_prepare_static_identity_mapping();
2261 * for each dev attached to rmrr
2263 * locate drhd for dev, alloc domain for dev
2264 * allocate free domain
2265 * allocate page table entries for rmrr
2266 * if context not allocated for bus
2267 * allocate and init context
2268 * set present in root table for this bus
2269 * init context with domain, translation etc
2273 printk(KERN_INFO
"IOMMU: Setting RMRR:\n");
2274 for_each_rmrr_units(rmrr
) {
2275 for (i
= 0; i
< rmrr
->devices_cnt
; i
++) {
2276 pdev
= rmrr
->devices
[i
];
2278 * some BIOS lists non-exist devices in DMAR
2283 ret
= iommu_prepare_rmrr_dev(rmrr
, pdev
);
2286 "IOMMU: mapping reserved region failed\n");
2290 iommu_prepare_isa();
2296 * global invalidate context cache
2297 * global invalidate iotlb
2298 * enable translation
2300 for_each_drhd_unit(drhd
) {
2303 iommu
= drhd
->iommu
;
2305 iommu_flush_write_buffer(iommu
);
2307 ret
= dmar_set_interrupt(iommu
);
2311 iommu_set_root_entry(iommu
);
2313 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
2314 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2315 iommu_disable_protect_mem_regions(iommu
);
2317 ret
= iommu_enable_translation(iommu
);
2324 for_each_drhd_unit(drhd
) {
2327 iommu
= drhd
->iommu
;
2334 static inline u64
aligned_size(u64 host_addr
, size_t size
)
2337 addr
= (host_addr
& (~PAGE_MASK
)) + size
;
2338 return PAGE_ALIGN(addr
);
2342 iommu_alloc_iova(struct dmar_domain
*domain
, size_t size
, u64 end
)
2346 /* Make sure it's in range */
2347 end
= min_t(u64
, DOMAIN_MAX_ADDR(domain
->gaw
), end
);
2348 if (!size
|| (IOVA_START_ADDR
+ size
> end
))
2351 piova
= alloc_iova(&domain
->iovad
,
2352 size
>> PAGE_SHIFT
, IOVA_PFN(end
), 1);
2356 static struct iova
*
2357 __intel_alloc_iova(struct device
*dev
, struct dmar_domain
*domain
,
2358 size_t size
, u64 dma_mask
)
2360 struct pci_dev
*pdev
= to_pci_dev(dev
);
2361 struct iova
*iova
= NULL
;
2363 if (dma_mask
<= DMA_BIT_MASK(32) || dmar_forcedac
)
2364 iova
= iommu_alloc_iova(domain
, size
, dma_mask
);
2367 * First try to allocate an io virtual address in
2368 * DMA_BIT_MASK(32) and if that fails then try allocating
2371 iova
= iommu_alloc_iova(domain
, size
, DMA_BIT_MASK(32));
2373 iova
= iommu_alloc_iova(domain
, size
, dma_mask
);
2377 printk(KERN_ERR
"Allocating iova for %s failed", pci_name(pdev
));
2384 static struct dmar_domain
*
2385 get_valid_domain_for_dev(struct pci_dev
*pdev
)
2387 struct dmar_domain
*domain
;
2390 domain
= get_domain_for_dev(pdev
,
2391 DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2394 "Allocating domain for %s failed", pci_name(pdev
));
2398 /* make sure context mapping is ok */
2399 if (unlikely(!domain_context_mapped(pdev
))) {
2400 ret
= domain_context_mapping(domain
, pdev
,
2401 CONTEXT_TT_MULTI_LEVEL
);
2404 "Domain context map for %s failed",
2413 static int iommu_dummy(struct pci_dev
*pdev
)
2415 return pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
;
2418 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2419 static int iommu_no_mapping(struct pci_dev
*pdev
)
2423 if (!iommu_identity_mapping
)
2424 return iommu_dummy(pdev
);
2426 found
= identity_mapping(pdev
);
2428 if (pdev
->dma_mask
> DMA_BIT_MASK(32))
2432 * 32 bit DMA is removed from si_domain and fall back
2433 * to non-identity mapping.
2435 domain_remove_one_dev_info(si_domain
, pdev
);
2436 printk(KERN_INFO
"32bit %s uses non-identity mapping\n",
2442 * In case of a detached 64 bit DMA device from vm, the device
2443 * is put into si_domain for identity mapping.
2445 if (pdev
->dma_mask
> DMA_BIT_MASK(32)) {
2447 ret
= domain_add_dev_info(si_domain
, pdev
);
2449 printk(KERN_INFO
"64bit %s uses identity mapping\n",
2456 return iommu_dummy(pdev
);
2459 static dma_addr_t
__intel_map_single(struct device
*hwdev
, phys_addr_t paddr
,
2460 size_t size
, int dir
, u64 dma_mask
)
2462 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2463 struct dmar_domain
*domain
;
2464 phys_addr_t start_paddr
;
2468 struct intel_iommu
*iommu
;
2470 BUG_ON(dir
== DMA_NONE
);
2472 if (iommu_no_mapping(pdev
))
2475 domain
= get_valid_domain_for_dev(pdev
);
2479 iommu
= domain_get_iommu(domain
);
2480 size
= aligned_size((u64
)paddr
, size
);
2482 iova
= __intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2486 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
2489 * Check if DMAR supports zero-length reads on write only
2492 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2493 !cap_zlr(iommu
->cap
))
2494 prot
|= DMA_PTE_READ
;
2495 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2496 prot
|= DMA_PTE_WRITE
;
2498 * paddr - (paddr + size) might be partial page, we should map the whole
2499 * page. Note: if two part of one page are separately mapped, we
2500 * might have two guest_addr mapping to the same host paddr, but this
2501 * is not a big problem
2503 ret
= domain_page_mapping(domain
, start_paddr
,
2504 ((u64
)paddr
) & PHYSICAL_PAGE_MASK
,
2509 /* it's a non-present to present mapping. Only flush if caching mode */
2510 if (cap_caching_mode(iommu
->cap
))
2511 iommu_flush_iotlb_psi(iommu
, 0, start_paddr
,
2512 size
>> VTD_PAGE_SHIFT
);
2514 iommu_flush_write_buffer(iommu
);
2516 return start_paddr
+ ((u64
)paddr
& (~PAGE_MASK
));
2520 __free_iova(&domain
->iovad
, iova
);
2521 printk(KERN_ERR
"Device %s request: %zx@%llx dir %d --- failed\n",
2522 pci_name(pdev
), size
, (unsigned long long)paddr
, dir
);
2526 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
2527 unsigned long offset
, size_t size
,
2528 enum dma_data_direction dir
,
2529 struct dma_attrs
*attrs
)
2531 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
2532 dir
, to_pci_dev(dev
)->dma_mask
);
2535 static void flush_unmaps(void)
2541 /* just flush them all */
2542 for (i
= 0; i
< g_num_of_iommus
; i
++) {
2543 struct intel_iommu
*iommu
= g_iommus
[i
];
2547 if (!deferred_flush
[i
].next
)
2550 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2551 DMA_TLB_GLOBAL_FLUSH
);
2552 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
2554 struct iova
*iova
= deferred_flush
[i
].iova
[j
];
2556 mask
= (iova
->pfn_hi
- iova
->pfn_lo
+ 1) << PAGE_SHIFT
;
2557 mask
= ilog2(mask
>> VTD_PAGE_SHIFT
);
2558 iommu_flush_dev_iotlb(deferred_flush
[i
].domain
[j
],
2559 iova
->pfn_lo
<< PAGE_SHIFT
, mask
);
2560 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
, iova
);
2562 deferred_flush
[i
].next
= 0;
2568 static void flush_unmaps_timeout(unsigned long data
)
2570 unsigned long flags
;
2572 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2574 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2577 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
)
2579 unsigned long flags
;
2581 struct intel_iommu
*iommu
;
2583 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2584 if (list_size
== HIGH_WATER_MARK
)
2587 iommu
= domain_get_iommu(dom
);
2588 iommu_id
= iommu
->seq_id
;
2590 next
= deferred_flush
[iommu_id
].next
;
2591 deferred_flush
[iommu_id
].domain
[next
] = dom
;
2592 deferred_flush
[iommu_id
].iova
[next
] = iova
;
2593 deferred_flush
[iommu_id
].next
++;
2596 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
2600 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2603 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
2604 size_t size
, enum dma_data_direction dir
,
2605 struct dma_attrs
*attrs
)
2607 struct pci_dev
*pdev
= to_pci_dev(dev
);
2608 struct dmar_domain
*domain
;
2609 unsigned long start_pfn
, last_pfn
;
2611 struct intel_iommu
*iommu
;
2613 if (iommu_no_mapping(pdev
))
2616 domain
= find_domain(pdev
);
2619 iommu
= domain_get_iommu(domain
);
2621 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
2625 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
2626 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
2628 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2629 pci_name(pdev
), start_pfn
, last_pfn
);
2631 /* clear the whole page */
2632 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
2634 /* free page tables */
2635 dma_pte_free_pagetable(domain
, start_pfn
, last_pfn
);
2637 if (intel_iommu_strict
) {
2638 iommu_flush_iotlb_psi(iommu
, domain
->id
,
2639 start_pfn
<< VTD_PAGE_SHIFT
,
2640 last_pfn
- start_pfn
+ 1);
2642 __free_iova(&domain
->iovad
, iova
);
2644 add_unmap(domain
, iova
);
2646 * queue up the release of the unmap to save the 1/6th of the
2647 * cpu used up by the iotlb flush operation...
2652 static void intel_unmap_single(struct device
*dev
, dma_addr_t dev_addr
, size_t size
,
2655 intel_unmap_page(dev
, dev_addr
, size
, dir
, NULL
);
2658 static void *intel_alloc_coherent(struct device
*hwdev
, size_t size
,
2659 dma_addr_t
*dma_handle
, gfp_t flags
)
2664 size
= PAGE_ALIGN(size
);
2665 order
= get_order(size
);
2666 flags
&= ~(GFP_DMA
| GFP_DMA32
);
2668 vaddr
= (void *)__get_free_pages(flags
, order
);
2671 memset(vaddr
, 0, size
);
2673 *dma_handle
= __intel_map_single(hwdev
, virt_to_bus(vaddr
), size
,
2675 hwdev
->coherent_dma_mask
);
2678 free_pages((unsigned long)vaddr
, order
);
2682 static void intel_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
2683 dma_addr_t dma_handle
)
2687 size
= PAGE_ALIGN(size
);
2688 order
= get_order(size
);
2690 intel_unmap_single(hwdev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
2691 free_pages((unsigned long)vaddr
, order
);
2694 static void intel_unmap_sg(struct device
*hwdev
, struct scatterlist
*sglist
,
2695 int nelems
, enum dma_data_direction dir
,
2696 struct dma_attrs
*attrs
)
2698 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2699 struct dmar_domain
*domain
;
2700 unsigned long start_pfn
, last_pfn
;
2702 struct intel_iommu
*iommu
;
2704 if (iommu_no_mapping(pdev
))
2707 domain
= find_domain(pdev
);
2710 iommu
= domain_get_iommu(domain
);
2712 iova
= find_iova(&domain
->iovad
, IOVA_PFN(sglist
[0].dma_address
));
2716 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
2717 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
2719 /* clear the whole page */
2720 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
2722 /* free page tables */
2723 dma_pte_free_pagetable(domain
, start_pfn
, last_pfn
);
2725 iommu_flush_iotlb_psi(iommu
, domain
->id
,
2726 start_pfn
<< VTD_PAGE_SHIFT
,
2727 (last_pfn
- start_pfn
+ 1));
2730 __free_iova(&domain
->iovad
, iova
);
2733 static int intel_nontranslate_map_sg(struct device
*hddev
,
2734 struct scatterlist
*sglist
, int nelems
, int dir
)
2737 struct scatterlist
*sg
;
2739 for_each_sg(sglist
, sg
, nelems
, i
) {
2740 BUG_ON(!sg_page(sg
));
2741 sg
->dma_address
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2742 sg
->dma_length
= sg
->length
;
2747 static int intel_map_sg(struct device
*hwdev
, struct scatterlist
*sglist
, int nelems
,
2748 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
2752 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2753 struct dmar_domain
*domain
;
2757 struct iova
*iova
= NULL
;
2759 struct scatterlist
*sg
;
2760 unsigned long start_addr
;
2761 struct intel_iommu
*iommu
;
2763 BUG_ON(dir
== DMA_NONE
);
2764 if (iommu_no_mapping(pdev
))
2765 return intel_nontranslate_map_sg(hwdev
, sglist
, nelems
, dir
);
2767 domain
= get_valid_domain_for_dev(pdev
);
2771 iommu
= domain_get_iommu(domain
);
2773 for_each_sg(sglist
, sg
, nelems
, i
) {
2774 addr
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2775 size
+= aligned_size((u64
)addr
, sg
->length
);
2778 iova
= __intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2780 sglist
->dma_length
= 0;
2785 * Check if DMAR supports zero-length reads on write only
2788 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2789 !cap_zlr(iommu
->cap
))
2790 prot
|= DMA_PTE_READ
;
2791 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2792 prot
|= DMA_PTE_WRITE
;
2794 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2796 for_each_sg(sglist
, sg
, nelems
, i
) {
2797 addr
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2798 size
= aligned_size((u64
)addr
, sg
->length
);
2799 ret
= domain_page_mapping(domain
, start_addr
+ offset
,
2800 ((u64
)addr
) & PHYSICAL_PAGE_MASK
,
2803 /* clear the page */
2804 dma_pte_clear_range(domain
,
2805 start_addr
>> VTD_PAGE_SHIFT
,
2806 (start_addr
+ offset
- 1) >> VTD_PAGE_SHIFT
);
2807 /* free page tables */
2808 dma_pte_free_pagetable(domain
, start_addr
>> VTD_PAGE_SHIFT
,
2809 (start_addr
+ offset
- 1) >> VTD_PAGE_SHIFT
);
2811 __free_iova(&domain
->iovad
, iova
);
2814 sg
->dma_address
= start_addr
+ offset
+
2815 ((u64
)addr
& (~PAGE_MASK
));
2816 sg
->dma_length
= sg
->length
;
2820 /* it's a non-present to present mapping. Only flush if caching mode */
2821 if (cap_caching_mode(iommu
->cap
))
2822 iommu_flush_iotlb_psi(iommu
, 0, start_addr
,
2823 offset
>> VTD_PAGE_SHIFT
);
2825 iommu_flush_write_buffer(iommu
);
2830 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
2835 struct dma_map_ops intel_dma_ops
= {
2836 .alloc_coherent
= intel_alloc_coherent
,
2837 .free_coherent
= intel_free_coherent
,
2838 .map_sg
= intel_map_sg
,
2839 .unmap_sg
= intel_unmap_sg
,
2840 .map_page
= intel_map_page
,
2841 .unmap_page
= intel_unmap_page
,
2842 .mapping_error
= intel_mapping_error
,
2845 static inline int iommu_domain_cache_init(void)
2849 iommu_domain_cache
= kmem_cache_create("iommu_domain",
2850 sizeof(struct dmar_domain
),
2855 if (!iommu_domain_cache
) {
2856 printk(KERN_ERR
"Couldn't create iommu_domain cache\n");
2863 static inline int iommu_devinfo_cache_init(void)
2867 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
2868 sizeof(struct device_domain_info
),
2872 if (!iommu_devinfo_cache
) {
2873 printk(KERN_ERR
"Couldn't create devinfo cache\n");
2880 static inline int iommu_iova_cache_init(void)
2884 iommu_iova_cache
= kmem_cache_create("iommu_iova",
2885 sizeof(struct iova
),
2889 if (!iommu_iova_cache
) {
2890 printk(KERN_ERR
"Couldn't create iova cache\n");
2897 static int __init
iommu_init_mempool(void)
2900 ret
= iommu_iova_cache_init();
2904 ret
= iommu_domain_cache_init();
2908 ret
= iommu_devinfo_cache_init();
2912 kmem_cache_destroy(iommu_domain_cache
);
2914 kmem_cache_destroy(iommu_iova_cache
);
2919 static void __init
iommu_exit_mempool(void)
2921 kmem_cache_destroy(iommu_devinfo_cache
);
2922 kmem_cache_destroy(iommu_domain_cache
);
2923 kmem_cache_destroy(iommu_iova_cache
);
2927 static void __init
init_no_remapping_devices(void)
2929 struct dmar_drhd_unit
*drhd
;
2931 for_each_drhd_unit(drhd
) {
2932 if (!drhd
->include_all
) {
2934 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2935 if (drhd
->devices
[i
] != NULL
)
2937 /* ignore DMAR unit if no pci devices exist */
2938 if (i
== drhd
->devices_cnt
)
2946 for_each_drhd_unit(drhd
) {
2948 if (drhd
->ignored
|| drhd
->include_all
)
2951 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2952 if (drhd
->devices
[i
] &&
2953 !IS_GFX_DEVICE(drhd
->devices
[i
]))
2956 if (i
< drhd
->devices_cnt
)
2959 /* bypass IOMMU if it is just for gfx devices */
2961 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
2962 if (!drhd
->devices
[i
])
2964 drhd
->devices
[i
]->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
2969 #ifdef CONFIG_SUSPEND
2970 static int init_iommu_hw(void)
2972 struct dmar_drhd_unit
*drhd
;
2973 struct intel_iommu
*iommu
= NULL
;
2975 for_each_active_iommu(iommu
, drhd
)
2977 dmar_reenable_qi(iommu
);
2979 for_each_active_iommu(iommu
, drhd
) {
2980 iommu_flush_write_buffer(iommu
);
2982 iommu_set_root_entry(iommu
);
2984 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
2985 DMA_CCMD_GLOBAL_INVL
);
2986 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2987 DMA_TLB_GLOBAL_FLUSH
);
2988 iommu_disable_protect_mem_regions(iommu
);
2989 iommu_enable_translation(iommu
);
2995 static void iommu_flush_all(void)
2997 struct dmar_drhd_unit
*drhd
;
2998 struct intel_iommu
*iommu
;
3000 for_each_active_iommu(iommu
, drhd
) {
3001 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
3002 DMA_CCMD_GLOBAL_INVL
);
3003 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3004 DMA_TLB_GLOBAL_FLUSH
);
3008 static int iommu_suspend(struct sys_device
*dev
, pm_message_t state
)
3010 struct dmar_drhd_unit
*drhd
;
3011 struct intel_iommu
*iommu
= NULL
;
3014 for_each_active_iommu(iommu
, drhd
) {
3015 iommu
->iommu_state
= kzalloc(sizeof(u32
) * MAX_SR_DMAR_REGS
,
3017 if (!iommu
->iommu_state
)
3023 for_each_active_iommu(iommu
, drhd
) {
3024 iommu_disable_translation(iommu
);
3026 spin_lock_irqsave(&iommu
->register_lock
, flag
);
3028 iommu
->iommu_state
[SR_DMAR_FECTL_REG
] =
3029 readl(iommu
->reg
+ DMAR_FECTL_REG
);
3030 iommu
->iommu_state
[SR_DMAR_FEDATA_REG
] =
3031 readl(iommu
->reg
+ DMAR_FEDATA_REG
);
3032 iommu
->iommu_state
[SR_DMAR_FEADDR_REG
] =
3033 readl(iommu
->reg
+ DMAR_FEADDR_REG
);
3034 iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
] =
3035 readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
3037 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3042 for_each_active_iommu(iommu
, drhd
)
3043 kfree(iommu
->iommu_state
);
3048 static int iommu_resume(struct sys_device
*dev
)
3050 struct dmar_drhd_unit
*drhd
;
3051 struct intel_iommu
*iommu
= NULL
;
3054 if (init_iommu_hw()) {
3055 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3059 for_each_active_iommu(iommu
, drhd
) {
3061 spin_lock_irqsave(&iommu
->register_lock
, flag
);
3063 writel(iommu
->iommu_state
[SR_DMAR_FECTL_REG
],
3064 iommu
->reg
+ DMAR_FECTL_REG
);
3065 writel(iommu
->iommu_state
[SR_DMAR_FEDATA_REG
],
3066 iommu
->reg
+ DMAR_FEDATA_REG
);
3067 writel(iommu
->iommu_state
[SR_DMAR_FEADDR_REG
],
3068 iommu
->reg
+ DMAR_FEADDR_REG
);
3069 writel(iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
],
3070 iommu
->reg
+ DMAR_FEUADDR_REG
);
3072 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3075 for_each_active_iommu(iommu
, drhd
)
3076 kfree(iommu
->iommu_state
);
3081 static struct sysdev_class iommu_sysclass
= {
3083 .resume
= iommu_resume
,
3084 .suspend
= iommu_suspend
,
3087 static struct sys_device device_iommu
= {
3088 .cls
= &iommu_sysclass
,
3091 static int __init
init_iommu_sysfs(void)
3095 error
= sysdev_class_register(&iommu_sysclass
);
3099 error
= sysdev_register(&device_iommu
);
3101 sysdev_class_unregister(&iommu_sysclass
);
3107 static int __init
init_iommu_sysfs(void)
3111 #endif /* CONFIG_PM */
3113 int __init
intel_iommu_init(void)
3117 if (dmar_table_init())
3120 if (dmar_dev_scope_init())
3124 * Check the need for DMA-remapping initialization now.
3125 * Above initialization will also be used by Interrupt-remapping.
3127 if (no_iommu
|| (swiotlb
&& !iommu_pass_through
) || dmar_disabled
)
3130 iommu_init_mempool();
3131 dmar_init_reserved_ranges();
3133 init_no_remapping_devices();
3137 printk(KERN_ERR
"IOMMU: dmar init failed\n");
3138 put_iova_domain(&reserved_iova_list
);
3139 iommu_exit_mempool();
3143 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3145 init_timer(&unmap_timer
);
3148 if (!iommu_pass_through
) {
3150 "Multi-level page-table translation for DMAR.\n");
3151 dma_ops
= &intel_dma_ops
;
3154 "DMAR: Pass through translation for DMAR.\n");
3158 register_iommu(&intel_iommu_ops
);
3163 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
3164 struct pci_dev
*pdev
)
3166 struct pci_dev
*tmp
, *parent
;
3168 if (!iommu
|| !pdev
)
3171 /* dependent device detach */
3172 tmp
= pci_find_upstream_pcie_bridge(pdev
);
3173 /* Secondary interface's bus number and devfn 0 */
3175 parent
= pdev
->bus
->self
;
3176 while (parent
!= tmp
) {
3177 iommu_detach_dev(iommu
, parent
->bus
->number
,
3179 parent
= parent
->bus
->self
;
3181 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
3182 iommu_detach_dev(iommu
,
3183 tmp
->subordinate
->number
, 0);
3184 else /* this is a legacy PCI bridge */
3185 iommu_detach_dev(iommu
, tmp
->bus
->number
,
3190 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
3191 struct pci_dev
*pdev
)
3193 struct device_domain_info
*info
;
3194 struct intel_iommu
*iommu
;
3195 unsigned long flags
;
3197 struct list_head
*entry
, *tmp
;
3199 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3204 spin_lock_irqsave(&device_domain_lock
, flags
);
3205 list_for_each_safe(entry
, tmp
, &domain
->devices
) {
3206 info
= list_entry(entry
, struct device_domain_info
, link
);
3207 /* No need to compare PCI domain; it has to be the same */
3208 if (info
->bus
== pdev
->bus
->number
&&
3209 info
->devfn
== pdev
->devfn
) {
3210 list_del(&info
->link
);
3211 list_del(&info
->global
);
3213 info
->dev
->dev
.archdata
.iommu
= NULL
;
3214 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3216 iommu_disable_dev_iotlb(info
);
3217 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3218 iommu_detach_dependent_devices(iommu
, pdev
);
3219 free_devinfo_mem(info
);
3221 spin_lock_irqsave(&device_domain_lock
, flags
);
3229 /* if there is no other devices under the same iommu
3230 * owned by this domain, clear this iommu in iommu_bmp
3231 * update iommu count and coherency
3233 if (iommu
== device_to_iommu(info
->segment
, info
->bus
,
3239 unsigned long tmp_flags
;
3240 spin_lock_irqsave(&domain
->iommu_lock
, tmp_flags
);
3241 clear_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
3242 domain
->iommu_count
--;
3243 domain_update_iommu_cap(domain
);
3244 spin_unlock_irqrestore(&domain
->iommu_lock
, tmp_flags
);
3247 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3250 static void vm_domain_remove_all_dev_info(struct dmar_domain
*domain
)
3252 struct device_domain_info
*info
;
3253 struct intel_iommu
*iommu
;
3254 unsigned long flags1
, flags2
;
3256 spin_lock_irqsave(&device_domain_lock
, flags1
);
3257 while (!list_empty(&domain
->devices
)) {
3258 info
= list_entry(domain
->devices
.next
,
3259 struct device_domain_info
, link
);
3260 list_del(&info
->link
);
3261 list_del(&info
->global
);
3263 info
->dev
->dev
.archdata
.iommu
= NULL
;
3265 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3267 iommu_disable_dev_iotlb(info
);
3268 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
3269 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3270 iommu_detach_dependent_devices(iommu
, info
->dev
);
3272 /* clear this iommu in iommu_bmp, update iommu count
3275 spin_lock_irqsave(&domain
->iommu_lock
, flags2
);
3276 if (test_and_clear_bit(iommu
->seq_id
,
3277 &domain
->iommu_bmp
)) {
3278 domain
->iommu_count
--;
3279 domain_update_iommu_cap(domain
);
3281 spin_unlock_irqrestore(&domain
->iommu_lock
, flags2
);
3283 free_devinfo_mem(info
);
3284 spin_lock_irqsave(&device_domain_lock
, flags1
);
3286 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3289 /* domain id for virtual machine, it won't be set in context */
3290 static unsigned long vm_domid
;
3292 static int vm_domain_min_agaw(struct dmar_domain
*domain
)
3295 int min_agaw
= domain
->agaw
;
3297 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
3298 for (; i
< g_num_of_iommus
; ) {
3299 if (min_agaw
> g_iommus
[i
]->agaw
)
3300 min_agaw
= g_iommus
[i
]->agaw
;
3302 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
3308 static struct dmar_domain
*iommu_alloc_vm_domain(void)
3310 struct dmar_domain
*domain
;
3312 domain
= alloc_domain_mem();
3316 domain
->id
= vm_domid
++;
3317 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
3318 domain
->flags
= DOMAIN_FLAG_VIRTUAL_MACHINE
;
3323 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
)
3327 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
3328 spin_lock_init(&domain
->mapping_lock
);
3329 spin_lock_init(&domain
->iommu_lock
);
3331 domain_reserve_special_ranges(domain
);
3333 /* calculate AGAW */
3334 domain
->gaw
= guest_width
;
3335 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
3336 domain
->agaw
= width_to_agaw(adjust_width
);
3338 INIT_LIST_HEAD(&domain
->devices
);
3340 domain
->iommu_count
= 0;
3341 domain
->iommu_coherency
= 0;
3342 domain
->max_addr
= 0;
3344 /* always allocate the top pgd */
3345 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
3348 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
3352 static void iommu_free_vm_domain(struct dmar_domain
*domain
)
3354 unsigned long flags
;
3355 struct dmar_drhd_unit
*drhd
;
3356 struct intel_iommu
*iommu
;
3358 unsigned long ndomains
;
3360 for_each_drhd_unit(drhd
) {
3363 iommu
= drhd
->iommu
;
3365 ndomains
= cap_ndoms(iommu
->cap
);
3366 i
= find_first_bit(iommu
->domain_ids
, ndomains
);
3367 for (; i
< ndomains
; ) {
3368 if (iommu
->domains
[i
] == domain
) {
3369 spin_lock_irqsave(&iommu
->lock
, flags
);
3370 clear_bit(i
, iommu
->domain_ids
);
3371 iommu
->domains
[i
] = NULL
;
3372 spin_unlock_irqrestore(&iommu
->lock
, flags
);
3375 i
= find_next_bit(iommu
->domain_ids
, ndomains
, i
+1);
3380 static void vm_domain_exit(struct dmar_domain
*domain
)
3382 /* Domain 0 is reserved, so dont process it */
3386 vm_domain_remove_all_dev_info(domain
);
3388 put_iova_domain(&domain
->iovad
);
3391 dma_pte_clear_range(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
3393 /* free page tables */
3394 dma_pte_free_pagetable(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
3396 iommu_free_vm_domain(domain
);
3397 free_domain_mem(domain
);
3400 static int intel_iommu_domain_init(struct iommu_domain
*domain
)
3402 struct dmar_domain
*dmar_domain
;
3404 dmar_domain
= iommu_alloc_vm_domain();
3407 "intel_iommu_domain_init: dmar_domain == NULL\n");
3410 if (md_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
3412 "intel_iommu_domain_init() failed\n");
3413 vm_domain_exit(dmar_domain
);
3416 domain
->priv
= dmar_domain
;
3421 static void intel_iommu_domain_destroy(struct iommu_domain
*domain
)
3423 struct dmar_domain
*dmar_domain
= domain
->priv
;
3425 domain
->priv
= NULL
;
3426 vm_domain_exit(dmar_domain
);
3429 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
3432 struct dmar_domain
*dmar_domain
= domain
->priv
;
3433 struct pci_dev
*pdev
= to_pci_dev(dev
);
3434 struct intel_iommu
*iommu
;
3439 /* normally pdev is not mapped */
3440 if (unlikely(domain_context_mapped(pdev
))) {
3441 struct dmar_domain
*old_domain
;
3443 old_domain
= find_domain(pdev
);
3445 if (dmar_domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
3446 dmar_domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
)
3447 domain_remove_one_dev_info(old_domain
, pdev
);
3449 domain_remove_dev_info(old_domain
);
3453 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3458 /* check if this iommu agaw is sufficient for max mapped address */
3459 addr_width
= agaw_to_width(iommu
->agaw
);
3460 end
= DOMAIN_MAX_ADDR(addr_width
);
3461 end
= end
& VTD_PAGE_MASK
;
3462 if (end
< dmar_domain
->max_addr
) {
3463 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
3464 "sufficient for the mapped address (%llx)\n",
3465 __func__
, iommu
->agaw
, dmar_domain
->max_addr
);
3469 ret
= domain_add_dev_info(dmar_domain
, pdev
);
3473 ret
= domain_context_mapping(dmar_domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
3477 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
3480 struct dmar_domain
*dmar_domain
= domain
->priv
;
3481 struct pci_dev
*pdev
= to_pci_dev(dev
);
3483 domain_remove_one_dev_info(dmar_domain
, pdev
);
3486 static int intel_iommu_map_range(struct iommu_domain
*domain
,
3487 unsigned long iova
, phys_addr_t hpa
,
3488 size_t size
, int iommu_prot
)
3490 struct dmar_domain
*dmar_domain
= domain
->priv
;
3496 if (iommu_prot
& IOMMU_READ
)
3497 prot
|= DMA_PTE_READ
;
3498 if (iommu_prot
& IOMMU_WRITE
)
3499 prot
|= DMA_PTE_WRITE
;
3500 if ((iommu_prot
& IOMMU_CACHE
) && dmar_domain
->iommu_snooping
)
3501 prot
|= DMA_PTE_SNP
;
3503 max_addr
= iova
+ size
;
3504 if (dmar_domain
->max_addr
< max_addr
) {
3508 /* check if minimum agaw is sufficient for mapped address */
3509 min_agaw
= vm_domain_min_agaw(dmar_domain
);
3510 addr_width
= agaw_to_width(min_agaw
);
3511 end
= DOMAIN_MAX_ADDR(addr_width
);
3512 end
= end
& VTD_PAGE_MASK
;
3513 if (end
< max_addr
) {
3514 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
3515 "sufficient for the mapped address (%llx)\n",
3516 __func__
, min_agaw
, max_addr
);
3519 dmar_domain
->max_addr
= max_addr
;
3522 ret
= domain_page_mapping(dmar_domain
, iova
, hpa
, size
, prot
);
3526 static void intel_iommu_unmap_range(struct iommu_domain
*domain
,
3527 unsigned long iova
, size_t size
)
3529 struct dmar_domain
*dmar_domain
= domain
->priv
;
3531 dma_pte_clear_range(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
3532 (iova
+ size
- 1) >> VTD_PAGE_SHIFT
);
3534 if (dmar_domain
->max_addr
== iova
+ size
)
3535 dmar_domain
->max_addr
= iova
;
3538 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
3541 struct dmar_domain
*dmar_domain
= domain
->priv
;
3542 struct dma_pte
*pte
;
3545 pte
= pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
);
3547 phys
= dma_pte_addr(pte
);
3552 static int intel_iommu_domain_has_cap(struct iommu_domain
*domain
,
3555 struct dmar_domain
*dmar_domain
= domain
->priv
;
3557 if (cap
== IOMMU_CAP_CACHE_COHERENCY
)
3558 return dmar_domain
->iommu_snooping
;
3563 static struct iommu_ops intel_iommu_ops
= {
3564 .domain_init
= intel_iommu_domain_init
,
3565 .domain_destroy
= intel_iommu_domain_destroy
,
3566 .attach_dev
= intel_iommu_attach_device
,
3567 .detach_dev
= intel_iommu_detach_device
,
3568 .map
= intel_iommu_map_range
,
3569 .unmap
= intel_iommu_unmap_range
,
3570 .iova_to_phys
= intel_iommu_iova_to_phys
,
3571 .domain_has_cap
= intel_iommu_domain_has_cap
,
3574 static void __devinit
quirk_iommu_rwbf(struct pci_dev
*dev
)
3577 * Mobile 4 Series Chipset neglects to set RWBF capability,
3580 printk(KERN_INFO
"DMAR: Forcing write-buffer flush capability\n");
3584 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);