2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <asm/cacheflush.h>
41 #include <asm/iommu.h>
44 #define ROOT_SIZE VTD_PAGE_SIZE
45 #define CONTEXT_SIZE VTD_PAGE_SIZE
47 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
50 #define IOAPIC_RANGE_START (0xfee00000)
51 #define IOAPIC_RANGE_END (0xfeefffff)
52 #define IOVA_START_ADDR (0x1000)
54 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
56 #define MAX_AGAW_WIDTH 64
58 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
59 #define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
61 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
62 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
63 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
65 #ifndef PHYSICAL_PAGE_MASK
66 #define PHYSICAL_PAGE_MASK PAGE_MASK
69 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
70 are never going to work. */
71 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn
)
73 return dma_pfn
>> (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
76 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn
)
78 return mm_pfn
<< (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
80 static inline unsigned long page_to_dma_pfn(struct page
*pg
)
82 return mm_to_dma_pfn(page_to_pfn(pg
));
84 static inline unsigned long virt_to_dma_pfn(void *p
)
86 return page_to_dma_pfn(virt_to_page(p
));
89 /* global iommu list, set NULL for ignored DMAR units */
90 static struct intel_iommu
**g_iommus
;
92 static int rwbf_quirk
;
97 * 12-63: Context Ptr (12 - (haw-1))
104 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
105 static inline bool root_present(struct root_entry
*root
)
107 return (root
->val
& 1);
109 static inline void set_root_present(struct root_entry
*root
)
113 static inline void set_root_value(struct root_entry
*root
, unsigned long value
)
115 root
->val
|= value
& VTD_PAGE_MASK
;
118 static inline struct context_entry
*
119 get_context_addr_from_root(struct root_entry
*root
)
121 return (struct context_entry
*)
122 (root_present(root
)?phys_to_virt(
123 root
->val
& VTD_PAGE_MASK
) :
130 * 1: fault processing disable
131 * 2-3: translation type
132 * 12-63: address space root
138 struct context_entry
{
143 static inline bool context_present(struct context_entry
*context
)
145 return (context
->lo
& 1);
147 static inline void context_set_present(struct context_entry
*context
)
152 static inline void context_set_fault_enable(struct context_entry
*context
)
154 context
->lo
&= (((u64
)-1) << 2) | 1;
157 static inline void context_set_translation_type(struct context_entry
*context
,
160 context
->lo
&= (((u64
)-1) << 4) | 3;
161 context
->lo
|= (value
& 3) << 2;
164 static inline void context_set_address_root(struct context_entry
*context
,
167 context
->lo
|= value
& VTD_PAGE_MASK
;
170 static inline void context_set_address_width(struct context_entry
*context
,
173 context
->hi
|= value
& 7;
176 static inline void context_set_domain_id(struct context_entry
*context
,
179 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
182 static inline void context_clear_entry(struct context_entry
*context
)
195 * 12-63: Host physcial address
201 static inline void dma_clear_pte(struct dma_pte
*pte
)
206 static inline void dma_set_pte_readable(struct dma_pte
*pte
)
208 pte
->val
|= DMA_PTE_READ
;
211 static inline void dma_set_pte_writable(struct dma_pte
*pte
)
213 pte
->val
|= DMA_PTE_WRITE
;
216 static inline void dma_set_pte_snp(struct dma_pte
*pte
)
218 pte
->val
|= DMA_PTE_SNP
;
221 static inline void dma_set_pte_prot(struct dma_pte
*pte
, unsigned long prot
)
223 pte
->val
= (pte
->val
& ~3) | (prot
& 3);
226 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
228 return (pte
->val
& VTD_PAGE_MASK
);
231 static inline void dma_set_pte_pfn(struct dma_pte
*pte
, unsigned long pfn
)
233 pte
->val
|= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
236 static inline bool dma_pte_present(struct dma_pte
*pte
)
238 return (pte
->val
& 3) != 0;
242 * This domain is a statically identity mapping domain.
243 * 1. This domain creats a static 1:1 mapping to all usable memory.
244 * 2. It maps to each iommu if successful.
245 * 3. Each iommu mapps to this domain if successful.
247 struct dmar_domain
*si_domain
;
249 /* devices under the same p2p bridge are owned in one domain */
250 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
252 /* domain represents a virtual machine, more than one devices
253 * across iommus may be owned in one domain, e.g. kvm guest.
255 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
257 /* si_domain contains mulitple devices */
258 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
261 int id
; /* domain id */
262 unsigned long iommu_bmp
; /* bitmap of iommus this domain uses*/
264 struct list_head devices
; /* all devices' list */
265 struct iova_domain iovad
; /* iova's that belong to this domain */
267 struct dma_pte
*pgd
; /* virtual address */
268 spinlock_t mapping_lock
; /* page table lock */
269 int gaw
; /* max guest address width */
271 /* adjusted guest address width, 0 is level 2 30-bit */
274 int flags
; /* flags to find out type of domain */
276 int iommu_coherency
;/* indicate coherency of iommu access */
277 int iommu_snooping
; /* indicate snooping control feature*/
278 int iommu_count
; /* reference count of iommu */
279 spinlock_t iommu_lock
; /* protect iommu set in domain */
280 u64 max_addr
; /* maximum mapped address */
283 /* PCI domain-device relationship */
284 struct device_domain_info
{
285 struct list_head link
; /* link to domain siblings */
286 struct list_head global
; /* link to global list */
287 int segment
; /* PCI domain */
288 u8 bus
; /* PCI bus number */
289 u8 devfn
; /* PCI devfn number */
290 struct pci_dev
*dev
; /* it's NULL for PCIE-to-PCI bridge */
291 struct intel_iommu
*iommu
; /* IOMMU used by this device */
292 struct dmar_domain
*domain
; /* pointer to domain */
295 static void flush_unmaps_timeout(unsigned long data
);
297 DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
299 #define HIGH_WATER_MARK 250
300 struct deferred_flush_tables
{
302 struct iova
*iova
[HIGH_WATER_MARK
];
303 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
306 static struct deferred_flush_tables
*deferred_flush
;
308 /* bitmap for indexing intel_iommus */
309 static int g_num_of_iommus
;
311 static DEFINE_SPINLOCK(async_umap_flush_lock
);
312 static LIST_HEAD(unmaps_to_do
);
315 static long list_size
;
317 static void domain_remove_dev_info(struct dmar_domain
*domain
);
319 #ifdef CONFIG_DMAR_DEFAULT_ON
320 int dmar_disabled
= 0;
322 int dmar_disabled
= 1;
323 #endif /*CONFIG_DMAR_DEFAULT_ON*/
325 static int __initdata dmar_map_gfx
= 1;
326 static int dmar_forcedac
;
327 static int intel_iommu_strict
;
329 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
330 static DEFINE_SPINLOCK(device_domain_lock
);
331 static LIST_HEAD(device_domain_list
);
333 static struct iommu_ops intel_iommu_ops
;
335 static int __init
intel_iommu_setup(char *str
)
340 if (!strncmp(str
, "on", 2)) {
342 printk(KERN_INFO
"Intel-IOMMU: enabled\n");
343 } else if (!strncmp(str
, "off", 3)) {
345 printk(KERN_INFO
"Intel-IOMMU: disabled\n");
346 } else if (!strncmp(str
, "igfx_off", 8)) {
349 "Intel-IOMMU: disable GFX device mapping\n");
350 } else if (!strncmp(str
, "forcedac", 8)) {
352 "Intel-IOMMU: Forcing DAC for PCI devices\n");
354 } else if (!strncmp(str
, "strict", 6)) {
356 "Intel-IOMMU: disable batched IOTLB flush\n");
357 intel_iommu_strict
= 1;
360 str
+= strcspn(str
, ",");
366 __setup("intel_iommu=", intel_iommu_setup
);
368 static struct kmem_cache
*iommu_domain_cache
;
369 static struct kmem_cache
*iommu_devinfo_cache
;
370 static struct kmem_cache
*iommu_iova_cache
;
372 static inline void *iommu_kmem_cache_alloc(struct kmem_cache
*cachep
)
377 /* trying to avoid low memory issues */
378 flags
= current
->flags
& PF_MEMALLOC
;
379 current
->flags
|= PF_MEMALLOC
;
380 vaddr
= kmem_cache_alloc(cachep
, GFP_ATOMIC
);
381 current
->flags
&= (~PF_MEMALLOC
| flags
);
386 static inline void *alloc_pgtable_page(void)
391 /* trying to avoid low memory issues */
392 flags
= current
->flags
& PF_MEMALLOC
;
393 current
->flags
|= PF_MEMALLOC
;
394 vaddr
= (void *)get_zeroed_page(GFP_ATOMIC
);
395 current
->flags
&= (~PF_MEMALLOC
| flags
);
399 static inline void free_pgtable_page(void *vaddr
)
401 free_page((unsigned long)vaddr
);
404 static inline void *alloc_domain_mem(void)
406 return iommu_kmem_cache_alloc(iommu_domain_cache
);
409 static void free_domain_mem(void *vaddr
)
411 kmem_cache_free(iommu_domain_cache
, vaddr
);
414 static inline void * alloc_devinfo_mem(void)
416 return iommu_kmem_cache_alloc(iommu_devinfo_cache
);
419 static inline void free_devinfo_mem(void *vaddr
)
421 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
424 struct iova
*alloc_iova_mem(void)
426 return iommu_kmem_cache_alloc(iommu_iova_cache
);
429 void free_iova_mem(struct iova
*iova
)
431 kmem_cache_free(iommu_iova_cache
, iova
);
435 static inline int width_to_agaw(int width
);
437 static int __iommu_calculate_agaw(struct intel_iommu
*iommu
, int max_gaw
)
442 sagaw
= cap_sagaw(iommu
->cap
);
443 for (agaw
= width_to_agaw(max_gaw
);
445 if (test_bit(agaw
, &sagaw
))
453 * Calculate max SAGAW for each iommu.
455 int iommu_calculate_max_sagaw(struct intel_iommu
*iommu
)
457 return __iommu_calculate_agaw(iommu
, MAX_AGAW_WIDTH
);
461 * calculate agaw for each iommu.
462 * "SAGAW" may be different across iommus, use a default agaw, and
463 * get a supported less agaw for iommus that don't support the default agaw.
465 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
467 return __iommu_calculate_agaw(iommu
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
470 /* This functionin only returns single iommu in a domain */
471 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
475 /* si_domain and vm domain should not get here. */
476 BUG_ON(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
);
477 BUG_ON(domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
);
479 iommu_id
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
480 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
483 return g_iommus
[iommu_id
];
486 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
490 domain
->iommu_coherency
= 1;
492 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
493 for (; i
< g_num_of_iommus
; ) {
494 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
495 domain
->iommu_coherency
= 0;
498 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
502 static void domain_update_iommu_snooping(struct dmar_domain
*domain
)
506 domain
->iommu_snooping
= 1;
508 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
509 for (; i
< g_num_of_iommus
; ) {
510 if (!ecap_sc_support(g_iommus
[i
]->ecap
)) {
511 domain
->iommu_snooping
= 0;
514 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
518 /* Some capabilities may be different across iommus */
519 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
521 domain_update_iommu_coherency(domain
);
522 domain_update_iommu_snooping(domain
);
525 static struct intel_iommu
*device_to_iommu(int segment
, u8 bus
, u8 devfn
)
527 struct dmar_drhd_unit
*drhd
= NULL
;
530 for_each_drhd_unit(drhd
) {
533 if (segment
!= drhd
->segment
)
536 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
537 if (drhd
->devices
[i
] &&
538 drhd
->devices
[i
]->bus
->number
== bus
&&
539 drhd
->devices
[i
]->devfn
== devfn
)
541 if (drhd
->devices
[i
] &&
542 drhd
->devices
[i
]->subordinate
&&
543 drhd
->devices
[i
]->subordinate
->number
<= bus
&&
544 drhd
->devices
[i
]->subordinate
->subordinate
>= bus
)
548 if (drhd
->include_all
)
555 static void domain_flush_cache(struct dmar_domain
*domain
,
556 void *addr
, int size
)
558 if (!domain
->iommu_coherency
)
559 clflush_cache_range(addr
, size
);
562 /* Gets context entry for a given bus and devfn */
563 static struct context_entry
* device_to_context_entry(struct intel_iommu
*iommu
,
566 struct root_entry
*root
;
567 struct context_entry
*context
;
568 unsigned long phy_addr
;
571 spin_lock_irqsave(&iommu
->lock
, flags
);
572 root
= &iommu
->root_entry
[bus
];
573 context
= get_context_addr_from_root(root
);
575 context
= (struct context_entry
*)alloc_pgtable_page();
577 spin_unlock_irqrestore(&iommu
->lock
, flags
);
580 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
581 phy_addr
= virt_to_phys((void *)context
);
582 set_root_value(root
, phy_addr
);
583 set_root_present(root
);
584 __iommu_flush_cache(iommu
, root
, sizeof(*root
));
586 spin_unlock_irqrestore(&iommu
->lock
, flags
);
587 return &context
[devfn
];
590 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
592 struct root_entry
*root
;
593 struct context_entry
*context
;
597 spin_lock_irqsave(&iommu
->lock
, flags
);
598 root
= &iommu
->root_entry
[bus
];
599 context
= get_context_addr_from_root(root
);
604 ret
= context_present(&context
[devfn
]);
606 spin_unlock_irqrestore(&iommu
->lock
, flags
);
610 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
612 struct root_entry
*root
;
613 struct context_entry
*context
;
616 spin_lock_irqsave(&iommu
->lock
, flags
);
617 root
= &iommu
->root_entry
[bus
];
618 context
= get_context_addr_from_root(root
);
620 context_clear_entry(&context
[devfn
]);
621 __iommu_flush_cache(iommu
, &context
[devfn
], \
624 spin_unlock_irqrestore(&iommu
->lock
, flags
);
627 static void free_context_table(struct intel_iommu
*iommu
)
629 struct root_entry
*root
;
632 struct context_entry
*context
;
634 spin_lock_irqsave(&iommu
->lock
, flags
);
635 if (!iommu
->root_entry
) {
638 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
639 root
= &iommu
->root_entry
[i
];
640 context
= get_context_addr_from_root(root
);
642 free_pgtable_page(context
);
644 free_pgtable_page(iommu
->root_entry
);
645 iommu
->root_entry
= NULL
;
647 spin_unlock_irqrestore(&iommu
->lock
, flags
);
650 /* page table handling */
651 #define LEVEL_STRIDE (9)
652 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
654 static inline int agaw_to_level(int agaw
)
659 static inline int agaw_to_width(int agaw
)
661 return 30 + agaw
* LEVEL_STRIDE
;
665 static inline int width_to_agaw(int width
)
667 return (width
- 30) / LEVEL_STRIDE
;
670 static inline unsigned int level_to_offset_bits(int level
)
672 return (level
- 1) * LEVEL_STRIDE
;
675 static inline int pfn_level_offset(unsigned long pfn
, int level
)
677 return (pfn
>> level_to_offset_bits(level
)) & LEVEL_MASK
;
680 static inline unsigned long level_mask(int level
)
682 return -1UL << level_to_offset_bits(level
);
685 static inline unsigned long level_size(int level
)
687 return 1UL << level_to_offset_bits(level
);
690 static inline unsigned long align_to_level(unsigned long pfn
, int level
)
692 return (pfn
+ level_size(level
) - 1) & level_mask(level
);
695 static struct dma_pte
* addr_to_dma_pte(struct dmar_domain
*domain
, u64 addr
)
697 int addr_width
= agaw_to_width(domain
->agaw
);
698 struct dma_pte
*parent
, *pte
= NULL
;
699 int level
= agaw_to_level(domain
->agaw
);
703 BUG_ON(!domain
->pgd
);
704 BUG_ON(addr
>> addr_width
);
705 parent
= domain
->pgd
;
707 spin_lock_irqsave(&domain
->mapping_lock
, flags
);
711 offset
= pfn_level_offset(addr
>> VTD_PAGE_SHIFT
, level
);
712 pte
= &parent
[offset
];
716 if (!dma_pte_present(pte
)) {
717 tmp_page
= alloc_pgtable_page();
720 spin_unlock_irqrestore(&domain
->mapping_lock
,
724 domain_flush_cache(domain
, tmp_page
, PAGE_SIZE
);
725 dma_set_pte_pfn(pte
, virt_to_dma_pfn(tmp_page
));
727 * high level table always sets r/w, last level page
728 * table control read/write
730 dma_set_pte_readable(pte
);
731 dma_set_pte_writable(pte
);
732 domain_flush_cache(domain
, pte
, sizeof(*pte
));
734 parent
= phys_to_virt(dma_pte_addr(pte
));
738 spin_unlock_irqrestore(&domain
->mapping_lock
, flags
);
742 /* return address's pte at specific level */
743 static struct dma_pte
*dma_pfn_level_pte(struct dmar_domain
*domain
,
747 struct dma_pte
*parent
, *pte
= NULL
;
748 int total
= agaw_to_level(domain
->agaw
);
751 parent
= domain
->pgd
;
752 while (level
<= total
) {
753 offset
= pfn_level_offset(pfn
, total
);
754 pte
= &parent
[offset
];
758 if (!dma_pte_present(pte
))
760 parent
= phys_to_virt(dma_pte_addr(pte
));
766 /* clear one page's page table */
767 static void dma_pte_clear_one(struct dmar_domain
*domain
, unsigned long pfn
)
769 struct dma_pte
*pte
= NULL
;
771 /* get last level pte */
772 pte
= dma_pfn_level_pte(domain
, pfn
, 1);
776 domain_flush_cache(domain
, pte
, sizeof(*pte
));
780 /* clear last level pte, a tlb flush should be followed */
781 static void dma_pte_clear_range(struct dmar_domain
*domain
,
782 unsigned long start_pfn
,
783 unsigned long last_pfn
)
785 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
787 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
788 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
790 /* we don't need lock here; nobody else touches the iova range */
791 while (start_pfn
<= last_pfn
) {
792 dma_pte_clear_one(domain
, start_pfn
);
797 /* free page table pages. last level pte should already be cleared */
798 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
799 unsigned long start_pfn
,
800 unsigned long last_pfn
)
802 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
804 int total
= agaw_to_level(domain
->agaw
);
808 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
809 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
811 /* we don't need lock here, nobody else touches the iova range */
813 while (level
<= total
) {
814 tmp
= align_to_level(start_pfn
, level
);
816 /* Only clear this pte/pmd if we're asked to clear its
818 if (tmp
+ level_size(level
) - 1 > last_pfn
)
821 while (tmp
<= last_pfn
) {
822 pte
= dma_pfn_level_pte(domain
, tmp
, level
);
825 phys_to_virt(dma_pte_addr(pte
)));
827 domain_flush_cache(domain
, pte
, sizeof(*pte
));
829 tmp
+= level_size(level
);
834 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
835 free_pgtable_page(domain
->pgd
);
841 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
843 struct root_entry
*root
;
846 root
= (struct root_entry
*)alloc_pgtable_page();
850 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
852 spin_lock_irqsave(&iommu
->lock
, flags
);
853 iommu
->root_entry
= root
;
854 spin_unlock_irqrestore(&iommu
->lock
, flags
);
859 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
865 addr
= iommu
->root_entry
;
867 spin_lock_irqsave(&iommu
->register_lock
, flag
);
868 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, virt_to_phys(addr
));
870 writel(iommu
->gcmd
| DMA_GCMD_SRTP
, iommu
->reg
+ DMAR_GCMD_REG
);
872 /* Make sure hardware complete it */
873 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
874 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
876 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
879 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
884 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
887 spin_lock_irqsave(&iommu
->register_lock
, flag
);
888 writel(iommu
->gcmd
| DMA_GCMD_WBF
, iommu
->reg
+ DMAR_GCMD_REG
);
890 /* Make sure hardware complete it */
891 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
892 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
894 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
897 /* return value determine if we need a write buffer flush */
898 static void __iommu_flush_context(struct intel_iommu
*iommu
,
899 u16 did
, u16 source_id
, u8 function_mask
,
906 case DMA_CCMD_GLOBAL_INVL
:
907 val
= DMA_CCMD_GLOBAL_INVL
;
909 case DMA_CCMD_DOMAIN_INVL
:
910 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
912 case DMA_CCMD_DEVICE_INVL
:
913 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
914 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
921 spin_lock_irqsave(&iommu
->register_lock
, flag
);
922 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
924 /* Make sure hardware complete it */
925 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
926 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
928 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
931 /* return value determine if we need a write buffer flush */
932 static void __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
933 u64 addr
, unsigned int size_order
, u64 type
)
935 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
936 u64 val
= 0, val_iva
= 0;
940 case DMA_TLB_GLOBAL_FLUSH
:
941 /* global flush doesn't need set IVA_REG */
942 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
944 case DMA_TLB_DSI_FLUSH
:
945 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
947 case DMA_TLB_PSI_FLUSH
:
948 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
949 /* Note: always flush non-leaf currently */
950 val_iva
= size_order
| addr
;
955 /* Note: set drain read/write */
958 * This is probably to be super secure.. Looks like we can
959 * ignore it without any impact.
961 if (cap_read_drain(iommu
->cap
))
962 val
|= DMA_TLB_READ_DRAIN
;
964 if (cap_write_drain(iommu
->cap
))
965 val
|= DMA_TLB_WRITE_DRAIN
;
967 spin_lock_irqsave(&iommu
->register_lock
, flag
);
968 /* Note: Only uses first TLB reg currently */
970 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
971 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
973 /* Make sure hardware complete it */
974 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
975 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
977 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
979 /* check IOTLB invalidation granularity */
980 if (DMA_TLB_IAIG(val
) == 0)
981 printk(KERN_ERR
"IOMMU: flush IOTLB failed\n");
982 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
983 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
984 (unsigned long long)DMA_TLB_IIRG(type
),
985 (unsigned long long)DMA_TLB_IAIG(val
));
988 static struct device_domain_info
*iommu_support_dev_iotlb(
989 struct dmar_domain
*domain
, int segment
, u8 bus
, u8 devfn
)
993 struct device_domain_info
*info
;
994 struct intel_iommu
*iommu
= device_to_iommu(segment
, bus
, devfn
);
996 if (!ecap_dev_iotlb_support(iommu
->ecap
))
1002 spin_lock_irqsave(&device_domain_lock
, flags
);
1003 list_for_each_entry(info
, &domain
->devices
, link
)
1004 if (info
->bus
== bus
&& info
->devfn
== devfn
) {
1008 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1010 if (!found
|| !info
->dev
)
1013 if (!pci_find_ext_capability(info
->dev
, PCI_EXT_CAP_ID_ATS
))
1016 if (!dmar_find_matched_atsr_unit(info
->dev
))
1019 info
->iommu
= iommu
;
1024 static void iommu_enable_dev_iotlb(struct device_domain_info
*info
)
1029 pci_enable_ats(info
->dev
, VTD_PAGE_SHIFT
);
1032 static void iommu_disable_dev_iotlb(struct device_domain_info
*info
)
1034 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1037 pci_disable_ats(info
->dev
);
1040 static void iommu_flush_dev_iotlb(struct dmar_domain
*domain
,
1041 u64 addr
, unsigned mask
)
1044 unsigned long flags
;
1045 struct device_domain_info
*info
;
1047 spin_lock_irqsave(&device_domain_lock
, flags
);
1048 list_for_each_entry(info
, &domain
->devices
, link
) {
1049 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1052 sid
= info
->bus
<< 8 | info
->devfn
;
1053 qdep
= pci_ats_queue_depth(info
->dev
);
1054 qi_flush_dev_iotlb(info
->iommu
, sid
, qdep
, addr
, mask
);
1056 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1059 static void iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
1060 u64 addr
, unsigned int pages
)
1062 unsigned int mask
= ilog2(__roundup_pow_of_two(pages
));
1064 BUG_ON(addr
& (~VTD_PAGE_MASK
));
1068 * Fallback to domain selective flush if no PSI support or the size is
1070 * PSI requires page size to be 2 ^ x, and the base address is naturally
1071 * aligned to the size
1073 if (!cap_pgsel_inv(iommu
->cap
) || mask
> cap_max_amask_val(iommu
->cap
))
1074 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
1077 iommu
->flush
.flush_iotlb(iommu
, did
, addr
, mask
,
1081 * In caching mode, domain ID 0 is reserved for non-present to present
1082 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1084 if (!cap_caching_mode(iommu
->cap
) || did
)
1085 iommu_flush_dev_iotlb(iommu
->domains
[did
], addr
, mask
);
1088 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
1091 unsigned long flags
;
1093 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1094 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
1095 pmen
&= ~DMA_PMEN_EPM
;
1096 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
1098 /* wait for the protected region status bit to clear */
1099 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
1100 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1102 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1105 static int iommu_enable_translation(struct intel_iommu
*iommu
)
1108 unsigned long flags
;
1110 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1111 iommu
->gcmd
|= DMA_GCMD_TE
;
1112 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1114 /* Make sure hardware complete it */
1115 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1116 readl
, (sts
& DMA_GSTS_TES
), sts
);
1118 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1122 static int iommu_disable_translation(struct intel_iommu
*iommu
)
1127 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1128 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1129 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1131 /* Make sure hardware complete it */
1132 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1133 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1135 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1140 static int iommu_init_domains(struct intel_iommu
*iommu
)
1142 unsigned long ndomains
;
1143 unsigned long nlongs
;
1145 ndomains
= cap_ndoms(iommu
->cap
);
1146 pr_debug("Number of Domains supportd <%ld>\n", ndomains
);
1147 nlongs
= BITS_TO_LONGS(ndomains
);
1149 /* TBD: there might be 64K domains,
1150 * consider other allocation for future chip
1152 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1153 if (!iommu
->domain_ids
) {
1154 printk(KERN_ERR
"Allocating domain id array failed\n");
1157 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1159 if (!iommu
->domains
) {
1160 printk(KERN_ERR
"Allocating domain array failed\n");
1161 kfree(iommu
->domain_ids
);
1165 spin_lock_init(&iommu
->lock
);
1168 * if Caching mode is set, then invalid translations are tagged
1169 * with domainid 0. Hence we need to pre-allocate it.
1171 if (cap_caching_mode(iommu
->cap
))
1172 set_bit(0, iommu
->domain_ids
);
1177 static void domain_exit(struct dmar_domain
*domain
);
1178 static void vm_domain_exit(struct dmar_domain
*domain
);
1180 void free_dmar_iommu(struct intel_iommu
*iommu
)
1182 struct dmar_domain
*domain
;
1184 unsigned long flags
;
1186 i
= find_first_bit(iommu
->domain_ids
, cap_ndoms(iommu
->cap
));
1187 for (; i
< cap_ndoms(iommu
->cap
); ) {
1188 domain
= iommu
->domains
[i
];
1189 clear_bit(i
, iommu
->domain_ids
);
1191 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1192 if (--domain
->iommu_count
== 0) {
1193 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
1194 vm_domain_exit(domain
);
1196 domain_exit(domain
);
1198 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1200 i
= find_next_bit(iommu
->domain_ids
,
1201 cap_ndoms(iommu
->cap
), i
+1);
1204 if (iommu
->gcmd
& DMA_GCMD_TE
)
1205 iommu_disable_translation(iommu
);
1208 set_irq_data(iommu
->irq
, NULL
);
1209 /* This will mask the irq */
1210 free_irq(iommu
->irq
, iommu
);
1211 destroy_irq(iommu
->irq
);
1214 kfree(iommu
->domains
);
1215 kfree(iommu
->domain_ids
);
1217 g_iommus
[iommu
->seq_id
] = NULL
;
1219 /* if all iommus are freed, free g_iommus */
1220 for (i
= 0; i
< g_num_of_iommus
; i
++) {
1225 if (i
== g_num_of_iommus
)
1228 /* free context mapping */
1229 free_context_table(iommu
);
1232 static struct dmar_domain
*alloc_domain(void)
1234 struct dmar_domain
*domain
;
1236 domain
= alloc_domain_mem();
1240 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
1246 static int iommu_attach_domain(struct dmar_domain
*domain
,
1247 struct intel_iommu
*iommu
)
1250 unsigned long ndomains
;
1251 unsigned long flags
;
1253 ndomains
= cap_ndoms(iommu
->cap
);
1255 spin_lock_irqsave(&iommu
->lock
, flags
);
1257 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1258 if (num
>= ndomains
) {
1259 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1260 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1265 set_bit(num
, iommu
->domain_ids
);
1266 set_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1267 iommu
->domains
[num
] = domain
;
1268 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1273 static void iommu_detach_domain(struct dmar_domain
*domain
,
1274 struct intel_iommu
*iommu
)
1276 unsigned long flags
;
1280 spin_lock_irqsave(&iommu
->lock
, flags
);
1281 ndomains
= cap_ndoms(iommu
->cap
);
1282 num
= find_first_bit(iommu
->domain_ids
, ndomains
);
1283 for (; num
< ndomains
; ) {
1284 if (iommu
->domains
[num
] == domain
) {
1288 num
= find_next_bit(iommu
->domain_ids
,
1289 cap_ndoms(iommu
->cap
), num
+1);
1293 clear_bit(num
, iommu
->domain_ids
);
1294 clear_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1295 iommu
->domains
[num
] = NULL
;
1297 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1300 static struct iova_domain reserved_iova_list
;
1301 static struct lock_class_key reserved_alloc_key
;
1302 static struct lock_class_key reserved_rbtree_key
;
1304 static void dmar_init_reserved_ranges(void)
1306 struct pci_dev
*pdev
= NULL
;
1311 init_iova_domain(&reserved_iova_list
, DMA_32BIT_PFN
);
1313 lockdep_set_class(&reserved_iova_list
.iova_alloc_lock
,
1314 &reserved_alloc_key
);
1315 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1316 &reserved_rbtree_key
);
1318 /* IOAPIC ranges shouldn't be accessed by DMA */
1319 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1320 IOVA_PFN(IOAPIC_RANGE_END
));
1322 printk(KERN_ERR
"Reserve IOAPIC range failed\n");
1324 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1325 for_each_pci_dev(pdev
) {
1328 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1329 r
= &pdev
->resource
[i
];
1330 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1333 addr
&= PHYSICAL_PAGE_MASK
;
1334 size
= r
->end
- addr
;
1335 size
= PAGE_ALIGN(size
);
1336 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(addr
),
1337 IOVA_PFN(size
+ addr
) - 1);
1339 printk(KERN_ERR
"Reserve iova failed\n");
1345 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1347 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1350 static inline int guestwidth_to_adjustwidth(int gaw
)
1353 int r
= (gaw
- 12) % 9;
1364 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1366 struct intel_iommu
*iommu
;
1367 int adjust_width
, agaw
;
1368 unsigned long sagaw
;
1370 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
1371 spin_lock_init(&domain
->mapping_lock
);
1372 spin_lock_init(&domain
->iommu_lock
);
1374 domain_reserve_special_ranges(domain
);
1376 /* calculate AGAW */
1377 iommu
= domain_get_iommu(domain
);
1378 if (guest_width
> cap_mgaw(iommu
->cap
))
1379 guest_width
= cap_mgaw(iommu
->cap
);
1380 domain
->gaw
= guest_width
;
1381 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1382 agaw
= width_to_agaw(adjust_width
);
1383 sagaw
= cap_sagaw(iommu
->cap
);
1384 if (!test_bit(agaw
, &sagaw
)) {
1385 /* hardware doesn't support it, choose a bigger one */
1386 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw
);
1387 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1391 domain
->agaw
= agaw
;
1392 INIT_LIST_HEAD(&domain
->devices
);
1394 if (ecap_coherent(iommu
->ecap
))
1395 domain
->iommu_coherency
= 1;
1397 domain
->iommu_coherency
= 0;
1399 if (ecap_sc_support(iommu
->ecap
))
1400 domain
->iommu_snooping
= 1;
1402 domain
->iommu_snooping
= 0;
1404 domain
->iommu_count
= 1;
1406 /* always allocate the top pgd */
1407 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
1410 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1414 static void domain_exit(struct dmar_domain
*domain
)
1416 struct dmar_drhd_unit
*drhd
;
1417 struct intel_iommu
*iommu
;
1419 /* Domain 0 is reserved, so dont process it */
1423 domain_remove_dev_info(domain
);
1425 put_iova_domain(&domain
->iovad
);
1428 dma_pte_clear_range(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1430 /* free page tables */
1431 dma_pte_free_pagetable(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1433 for_each_active_iommu(iommu
, drhd
)
1434 if (test_bit(iommu
->seq_id
, &domain
->iommu_bmp
))
1435 iommu_detach_domain(domain
, iommu
);
1437 free_domain_mem(domain
);
1440 static int domain_context_mapping_one(struct dmar_domain
*domain
, int segment
,
1441 u8 bus
, u8 devfn
, int translation
)
1443 struct context_entry
*context
;
1444 unsigned long flags
;
1445 struct intel_iommu
*iommu
;
1446 struct dma_pte
*pgd
;
1448 unsigned long ndomains
;
1451 struct device_domain_info
*info
= NULL
;
1453 pr_debug("Set context mapping for %02x:%02x.%d\n",
1454 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1456 BUG_ON(!domain
->pgd
);
1457 BUG_ON(translation
!= CONTEXT_TT_PASS_THROUGH
&&
1458 translation
!= CONTEXT_TT_MULTI_LEVEL
);
1460 iommu
= device_to_iommu(segment
, bus
, devfn
);
1464 context
= device_to_context_entry(iommu
, bus
, devfn
);
1467 spin_lock_irqsave(&iommu
->lock
, flags
);
1468 if (context_present(context
)) {
1469 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1476 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
1477 domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
) {
1480 /* find an available domain id for this device in iommu */
1481 ndomains
= cap_ndoms(iommu
->cap
);
1482 num
= find_first_bit(iommu
->domain_ids
, ndomains
);
1483 for (; num
< ndomains
; ) {
1484 if (iommu
->domains
[num
] == domain
) {
1489 num
= find_next_bit(iommu
->domain_ids
,
1490 cap_ndoms(iommu
->cap
), num
+1);
1494 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1495 if (num
>= ndomains
) {
1496 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1497 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1501 set_bit(num
, iommu
->domain_ids
);
1502 set_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1503 iommu
->domains
[num
] = domain
;
1507 /* Skip top levels of page tables for
1508 * iommu which has less agaw than default.
1510 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1511 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1512 if (!dma_pte_present(pgd
)) {
1513 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1519 context_set_domain_id(context
, id
);
1521 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1522 info
= iommu_support_dev_iotlb(domain
, segment
, bus
, devfn
);
1523 translation
= info
? CONTEXT_TT_DEV_IOTLB
:
1524 CONTEXT_TT_MULTI_LEVEL
;
1527 * In pass through mode, AW must be programmed to indicate the largest
1528 * AGAW value supported by hardware. And ASR is ignored by hardware.
1530 if (unlikely(translation
== CONTEXT_TT_PASS_THROUGH
))
1531 context_set_address_width(context
, iommu
->msagaw
);
1533 context_set_address_root(context
, virt_to_phys(pgd
));
1534 context_set_address_width(context
, iommu
->agaw
);
1537 context_set_translation_type(context
, translation
);
1538 context_set_fault_enable(context
);
1539 context_set_present(context
);
1540 domain_flush_cache(domain
, context
, sizeof(*context
));
1543 * It's a non-present to present mapping. If hardware doesn't cache
1544 * non-present entry we only need to flush the write-buffer. If the
1545 * _does_ cache non-present entries, then it does so in the special
1546 * domain #0, which we have to flush:
1548 if (cap_caching_mode(iommu
->cap
)) {
1549 iommu
->flush
.flush_context(iommu
, 0,
1550 (((u16
)bus
) << 8) | devfn
,
1551 DMA_CCMD_MASK_NOBIT
,
1552 DMA_CCMD_DEVICE_INVL
);
1553 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_DSI_FLUSH
);
1555 iommu_flush_write_buffer(iommu
);
1557 iommu_enable_dev_iotlb(info
);
1558 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1560 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1561 if (!test_and_set_bit(iommu
->seq_id
, &domain
->iommu_bmp
)) {
1562 domain
->iommu_count
++;
1563 domain_update_iommu_cap(domain
);
1565 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1570 domain_context_mapping(struct dmar_domain
*domain
, struct pci_dev
*pdev
,
1574 struct pci_dev
*tmp
, *parent
;
1576 ret
= domain_context_mapping_one(domain
, pci_domain_nr(pdev
->bus
),
1577 pdev
->bus
->number
, pdev
->devfn
,
1582 /* dependent device mapping */
1583 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1586 /* Secondary interface's bus number and devfn 0 */
1587 parent
= pdev
->bus
->self
;
1588 while (parent
!= tmp
) {
1589 ret
= domain_context_mapping_one(domain
,
1590 pci_domain_nr(parent
->bus
),
1591 parent
->bus
->number
,
1592 parent
->devfn
, translation
);
1595 parent
= parent
->bus
->self
;
1597 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
1598 return domain_context_mapping_one(domain
,
1599 pci_domain_nr(tmp
->subordinate
),
1600 tmp
->subordinate
->number
, 0,
1602 else /* this is a legacy PCI bridge */
1603 return domain_context_mapping_one(domain
,
1604 pci_domain_nr(tmp
->bus
),
1610 static int domain_context_mapped(struct pci_dev
*pdev
)
1613 struct pci_dev
*tmp
, *parent
;
1614 struct intel_iommu
*iommu
;
1616 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
1621 ret
= device_context_mapped(iommu
, pdev
->bus
->number
, pdev
->devfn
);
1624 /* dependent device mapping */
1625 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1628 /* Secondary interface's bus number and devfn 0 */
1629 parent
= pdev
->bus
->self
;
1630 while (parent
!= tmp
) {
1631 ret
= device_context_mapped(iommu
, parent
->bus
->number
,
1635 parent
= parent
->bus
->self
;
1638 return device_context_mapped(iommu
, tmp
->subordinate
->number
,
1641 return device_context_mapped(iommu
, tmp
->bus
->number
,
1646 domain_page_mapping(struct dmar_domain
*domain
, dma_addr_t iova
,
1647 u64 hpa
, size_t size
, int prot
)
1649 u64 start_pfn
, end_pfn
;
1650 struct dma_pte
*pte
;
1652 int addr_width
= agaw_to_width(domain
->agaw
);
1654 BUG_ON(hpa
>> addr_width
);
1656 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
1659 start_pfn
= ((u64
)hpa
) >> VTD_PAGE_SHIFT
;
1660 end_pfn
= (VTD_PAGE_ALIGN(((u64
)hpa
) + size
)) >> VTD_PAGE_SHIFT
;
1662 while (start_pfn
< end_pfn
) {
1663 pte
= addr_to_dma_pte(domain
, iova
+ VTD_PAGE_SIZE
* index
);
1666 /* We don't need lock here, nobody else
1667 * touches the iova range
1669 BUG_ON(dma_pte_addr(pte
));
1670 dma_set_pte_pfn(pte
, start_pfn
);
1671 dma_set_pte_prot(pte
, prot
);
1672 if (prot
& DMA_PTE_SNP
)
1673 dma_set_pte_snp(pte
);
1674 domain_flush_cache(domain
, pte
, sizeof(*pte
));
1681 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
1686 clear_context_table(iommu
, bus
, devfn
);
1687 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
1688 DMA_CCMD_GLOBAL_INVL
);
1689 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
1692 static void domain_remove_dev_info(struct dmar_domain
*domain
)
1694 struct device_domain_info
*info
;
1695 unsigned long flags
;
1696 struct intel_iommu
*iommu
;
1698 spin_lock_irqsave(&device_domain_lock
, flags
);
1699 while (!list_empty(&domain
->devices
)) {
1700 info
= list_entry(domain
->devices
.next
,
1701 struct device_domain_info
, link
);
1702 list_del(&info
->link
);
1703 list_del(&info
->global
);
1705 info
->dev
->dev
.archdata
.iommu
= NULL
;
1706 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1708 iommu_disable_dev_iotlb(info
);
1709 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
1710 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
1711 free_devinfo_mem(info
);
1713 spin_lock_irqsave(&device_domain_lock
, flags
);
1715 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1720 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1722 static struct dmar_domain
*
1723 find_domain(struct pci_dev
*pdev
)
1725 struct device_domain_info
*info
;
1727 /* No lock here, assumes no domain exit in normal case */
1728 info
= pdev
->dev
.archdata
.iommu
;
1730 return info
->domain
;
1734 /* domain is initialized */
1735 static struct dmar_domain
*get_domain_for_dev(struct pci_dev
*pdev
, int gaw
)
1737 struct dmar_domain
*domain
, *found
= NULL
;
1738 struct intel_iommu
*iommu
;
1739 struct dmar_drhd_unit
*drhd
;
1740 struct device_domain_info
*info
, *tmp
;
1741 struct pci_dev
*dev_tmp
;
1742 unsigned long flags
;
1743 int bus
= 0, devfn
= 0;
1747 domain
= find_domain(pdev
);
1751 segment
= pci_domain_nr(pdev
->bus
);
1753 dev_tmp
= pci_find_upstream_pcie_bridge(pdev
);
1755 if (dev_tmp
->is_pcie
) {
1756 bus
= dev_tmp
->subordinate
->number
;
1759 bus
= dev_tmp
->bus
->number
;
1760 devfn
= dev_tmp
->devfn
;
1762 spin_lock_irqsave(&device_domain_lock
, flags
);
1763 list_for_each_entry(info
, &device_domain_list
, global
) {
1764 if (info
->segment
== segment
&&
1765 info
->bus
== bus
&& info
->devfn
== devfn
) {
1766 found
= info
->domain
;
1770 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1771 /* pcie-pci bridge already has a domain, uses it */
1778 domain
= alloc_domain();
1782 /* Allocate new domain for the device */
1783 drhd
= dmar_find_matched_drhd_unit(pdev
);
1785 printk(KERN_ERR
"IOMMU: can't find DMAR for device %s\n",
1789 iommu
= drhd
->iommu
;
1791 ret
= iommu_attach_domain(domain
, iommu
);
1793 domain_exit(domain
);
1797 if (domain_init(domain
, gaw
)) {
1798 domain_exit(domain
);
1802 /* register pcie-to-pci device */
1804 info
= alloc_devinfo_mem();
1806 domain_exit(domain
);
1809 info
->segment
= segment
;
1811 info
->devfn
= devfn
;
1813 info
->domain
= domain
;
1814 /* This domain is shared by devices under p2p bridge */
1815 domain
->flags
|= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES
;
1817 /* pcie-to-pci bridge already has a domain, uses it */
1819 spin_lock_irqsave(&device_domain_lock
, flags
);
1820 list_for_each_entry(tmp
, &device_domain_list
, global
) {
1821 if (tmp
->segment
== segment
&&
1822 tmp
->bus
== bus
&& tmp
->devfn
== devfn
) {
1823 found
= tmp
->domain
;
1828 free_devinfo_mem(info
);
1829 domain_exit(domain
);
1832 list_add(&info
->link
, &domain
->devices
);
1833 list_add(&info
->global
, &device_domain_list
);
1835 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1839 info
= alloc_devinfo_mem();
1842 info
->segment
= segment
;
1843 info
->bus
= pdev
->bus
->number
;
1844 info
->devfn
= pdev
->devfn
;
1846 info
->domain
= domain
;
1847 spin_lock_irqsave(&device_domain_lock
, flags
);
1848 /* somebody is fast */
1849 found
= find_domain(pdev
);
1850 if (found
!= NULL
) {
1851 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1852 if (found
!= domain
) {
1853 domain_exit(domain
);
1856 free_devinfo_mem(info
);
1859 list_add(&info
->link
, &domain
->devices
);
1860 list_add(&info
->global
, &device_domain_list
);
1861 pdev
->dev
.archdata
.iommu
= info
;
1862 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1865 /* recheck it here, maybe others set it */
1866 return find_domain(pdev
);
1869 static int iommu_identity_mapping
;
1871 static int iommu_domain_identity_map(struct dmar_domain
*domain
,
1872 unsigned long long start
,
1873 unsigned long long end
)
1876 unsigned long long base
;
1878 /* The address might not be aligned */
1879 base
= start
& PAGE_MASK
;
1881 size
= PAGE_ALIGN(size
);
1882 if (!reserve_iova(&domain
->iovad
, IOVA_PFN(base
),
1883 IOVA_PFN(base
+ size
) - 1)) {
1884 printk(KERN_ERR
"IOMMU: reserve iova failed\n");
1888 pr_debug("Mapping reserved region %lx@%llx for domain %d\n",
1889 size
, base
, domain
->id
);
1891 * RMRR range might have overlap with physical memory range,
1894 dma_pte_clear_range(domain
, base
>> VTD_PAGE_SHIFT
,
1895 (base
+ size
- 1) >> VTD_PAGE_SHIFT
);
1897 return domain_page_mapping(domain
, base
, base
, size
,
1898 DMA_PTE_READ
|DMA_PTE_WRITE
);
1901 static int iommu_prepare_identity_map(struct pci_dev
*pdev
,
1902 unsigned long long start
,
1903 unsigned long long end
)
1905 struct dmar_domain
*domain
;
1909 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1910 pci_name(pdev
), start
, end
);
1912 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
1916 ret
= iommu_domain_identity_map(domain
, start
, end
);
1920 /* context entry init */
1921 ret
= domain_context_mapping(domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
1928 domain_exit(domain
);
1932 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
1933 struct pci_dev
*pdev
)
1935 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
1937 return iommu_prepare_identity_map(pdev
, rmrr
->base_address
,
1938 rmrr
->end_address
+ 1);
1941 #ifdef CONFIG_DMAR_FLOPPY_WA
1942 static inline void iommu_prepare_isa(void)
1944 struct pci_dev
*pdev
;
1947 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
1951 printk(KERN_INFO
"IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
1952 ret
= iommu_prepare_identity_map(pdev
, 0, 16*1024*1024);
1955 printk(KERN_ERR
"IOMMU: Failed to create 0-16MiB identity map; "
1956 "floppy might not work\n");
1960 static inline void iommu_prepare_isa(void)
1964 #endif /* !CONFIG_DMAR_FLPY_WA */
1966 /* Initialize each context entry as pass through.*/
1967 static int __init
init_context_pass_through(void)
1969 struct pci_dev
*pdev
= NULL
;
1970 struct dmar_domain
*domain
;
1973 for_each_pci_dev(pdev
) {
1974 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
1975 ret
= domain_context_mapping(domain
, pdev
,
1976 CONTEXT_TT_PASS_THROUGH
);
1983 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
);
1985 static int __init
si_domain_work_fn(unsigned long start_pfn
,
1986 unsigned long end_pfn
, void *datax
)
1990 *ret
= iommu_domain_identity_map(si_domain
,
1991 (uint64_t)start_pfn
<< PAGE_SHIFT
,
1992 (uint64_t)end_pfn
<< PAGE_SHIFT
);
1997 static int si_domain_init(void)
1999 struct dmar_drhd_unit
*drhd
;
2000 struct intel_iommu
*iommu
;
2003 si_domain
= alloc_domain();
2007 pr_debug("Identity mapping domain is domain %d\n", si_domain
->id
);
2009 for_each_active_iommu(iommu
, drhd
) {
2010 ret
= iommu_attach_domain(si_domain
, iommu
);
2012 domain_exit(si_domain
);
2017 if (md_domain_init(si_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2018 domain_exit(si_domain
);
2022 si_domain
->flags
= DOMAIN_FLAG_STATIC_IDENTITY
;
2024 for_each_online_node(nid
) {
2025 work_with_active_regions(nid
, si_domain_work_fn
, &ret
);
2033 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
2034 struct pci_dev
*pdev
);
2035 static int identity_mapping(struct pci_dev
*pdev
)
2037 struct device_domain_info
*info
;
2039 if (likely(!iommu_identity_mapping
))
2043 list_for_each_entry(info
, &si_domain
->devices
, link
)
2044 if (info
->dev
== pdev
)
2049 static int domain_add_dev_info(struct dmar_domain
*domain
,
2050 struct pci_dev
*pdev
)
2052 struct device_domain_info
*info
;
2053 unsigned long flags
;
2055 info
= alloc_devinfo_mem();
2059 info
->segment
= pci_domain_nr(pdev
->bus
);
2060 info
->bus
= pdev
->bus
->number
;
2061 info
->devfn
= pdev
->devfn
;
2063 info
->domain
= domain
;
2065 spin_lock_irqsave(&device_domain_lock
, flags
);
2066 list_add(&info
->link
, &domain
->devices
);
2067 list_add(&info
->global
, &device_domain_list
);
2068 pdev
->dev
.archdata
.iommu
= info
;
2069 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2074 static int iommu_prepare_static_identity_mapping(void)
2076 struct pci_dev
*pdev
= NULL
;
2079 ret
= si_domain_init();
2083 for_each_pci_dev(pdev
) {
2084 printk(KERN_INFO
"IOMMU: identity mapping for device %s\n",
2087 ret
= domain_context_mapping(si_domain
, pdev
,
2088 CONTEXT_TT_MULTI_LEVEL
);
2091 ret
= domain_add_dev_info(si_domain
, pdev
);
2099 int __init
init_dmars(void)
2101 struct dmar_drhd_unit
*drhd
;
2102 struct dmar_rmrr_unit
*rmrr
;
2103 struct pci_dev
*pdev
;
2104 struct intel_iommu
*iommu
;
2106 int pass_through
= 1;
2109 * In case pass through can not be enabled, iommu tries to use identity
2112 if (iommu_pass_through
)
2113 iommu_identity_mapping
= 1;
2118 * initialize and program root entry to not present
2121 for_each_drhd_unit(drhd
) {
2124 * lock not needed as this is only incremented in the single
2125 * threaded kernel __init code path all other access are read
2130 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
2133 printk(KERN_ERR
"Allocating global iommu array failed\n");
2138 deferred_flush
= kzalloc(g_num_of_iommus
*
2139 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
2140 if (!deferred_flush
) {
2146 for_each_drhd_unit(drhd
) {
2150 iommu
= drhd
->iommu
;
2151 g_iommus
[iommu
->seq_id
] = iommu
;
2153 ret
= iommu_init_domains(iommu
);
2159 * we could share the same root & context tables
2160 * amoung all IOMMU's. Need to Split it later.
2162 ret
= iommu_alloc_root_entry(iommu
);
2164 printk(KERN_ERR
"IOMMU: allocate root entry failed\n");
2167 if (!ecap_pass_through(iommu
->ecap
))
2170 if (iommu_pass_through
)
2171 if (!pass_through
) {
2173 "Pass Through is not supported by hardware.\n");
2174 iommu_pass_through
= 0;
2178 * Start from the sane iommu hardware state.
2180 for_each_drhd_unit(drhd
) {
2184 iommu
= drhd
->iommu
;
2187 * If the queued invalidation is already initialized by us
2188 * (for example, while enabling interrupt-remapping) then
2189 * we got the things already rolling from a sane state.
2195 * Clear any previous faults.
2197 dmar_fault(-1, iommu
);
2199 * Disable queued invalidation if supported and already enabled
2200 * before OS handover.
2202 dmar_disable_qi(iommu
);
2205 for_each_drhd_unit(drhd
) {
2209 iommu
= drhd
->iommu
;
2211 if (dmar_enable_qi(iommu
)) {
2213 * Queued Invalidate not enabled, use Register Based
2216 iommu
->flush
.flush_context
= __iommu_flush_context
;
2217 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
2218 printk(KERN_INFO
"IOMMU 0x%Lx: using Register based "
2220 (unsigned long long)drhd
->reg_base_addr
);
2222 iommu
->flush
.flush_context
= qi_flush_context
;
2223 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
2224 printk(KERN_INFO
"IOMMU 0x%Lx: using Queued "
2226 (unsigned long long)drhd
->reg_base_addr
);
2231 * If pass through is set and enabled, context entries of all pci
2232 * devices are intialized by pass through translation type.
2234 if (iommu_pass_through
) {
2235 ret
= init_context_pass_through();
2237 printk(KERN_ERR
"IOMMU: Pass through init failed.\n");
2238 iommu_pass_through
= 0;
2243 * If pass through is not set or not enabled, setup context entries for
2244 * identity mappings for rmrr, gfx, and isa and may fall back to static
2245 * identity mapping if iommu_identity_mapping is set.
2247 if (!iommu_pass_through
) {
2248 if (iommu_identity_mapping
)
2249 iommu_prepare_static_identity_mapping();
2252 * for each dev attached to rmrr
2254 * locate drhd for dev, alloc domain for dev
2255 * allocate free domain
2256 * allocate page table entries for rmrr
2257 * if context not allocated for bus
2258 * allocate and init context
2259 * set present in root table for this bus
2260 * init context with domain, translation etc
2264 printk(KERN_INFO
"IOMMU: Setting RMRR:\n");
2265 for_each_rmrr_units(rmrr
) {
2266 for (i
= 0; i
< rmrr
->devices_cnt
; i
++) {
2267 pdev
= rmrr
->devices
[i
];
2269 * some BIOS lists non-exist devices in DMAR
2274 ret
= iommu_prepare_rmrr_dev(rmrr
, pdev
);
2277 "IOMMU: mapping reserved region failed\n");
2281 iommu_prepare_isa();
2287 * global invalidate context cache
2288 * global invalidate iotlb
2289 * enable translation
2291 for_each_drhd_unit(drhd
) {
2294 iommu
= drhd
->iommu
;
2296 iommu_flush_write_buffer(iommu
);
2298 ret
= dmar_set_interrupt(iommu
);
2302 iommu_set_root_entry(iommu
);
2304 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
2305 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2306 iommu_disable_protect_mem_regions(iommu
);
2308 ret
= iommu_enable_translation(iommu
);
2315 for_each_drhd_unit(drhd
) {
2318 iommu
= drhd
->iommu
;
2325 static inline u64
aligned_size(u64 host_addr
, size_t size
)
2328 addr
= (host_addr
& (~PAGE_MASK
)) + size
;
2329 return PAGE_ALIGN(addr
);
2333 iommu_alloc_iova(struct dmar_domain
*domain
, size_t size
, u64 end
)
2337 /* Make sure it's in range */
2338 end
= min_t(u64
, DOMAIN_MAX_ADDR(domain
->gaw
), end
);
2339 if (!size
|| (IOVA_START_ADDR
+ size
> end
))
2342 piova
= alloc_iova(&domain
->iovad
,
2343 size
>> PAGE_SHIFT
, IOVA_PFN(end
), 1);
2347 static struct iova
*
2348 __intel_alloc_iova(struct device
*dev
, struct dmar_domain
*domain
,
2349 size_t size
, u64 dma_mask
)
2351 struct pci_dev
*pdev
= to_pci_dev(dev
);
2352 struct iova
*iova
= NULL
;
2354 if (dma_mask
<= DMA_BIT_MASK(32) || dmar_forcedac
)
2355 iova
= iommu_alloc_iova(domain
, size
, dma_mask
);
2358 * First try to allocate an io virtual address in
2359 * DMA_BIT_MASK(32) and if that fails then try allocating
2362 iova
= iommu_alloc_iova(domain
, size
, DMA_BIT_MASK(32));
2364 iova
= iommu_alloc_iova(domain
, size
, dma_mask
);
2368 printk(KERN_ERR
"Allocating iova for %s failed", pci_name(pdev
));
2375 static struct dmar_domain
*
2376 get_valid_domain_for_dev(struct pci_dev
*pdev
)
2378 struct dmar_domain
*domain
;
2381 domain
= get_domain_for_dev(pdev
,
2382 DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2385 "Allocating domain for %s failed", pci_name(pdev
));
2389 /* make sure context mapping is ok */
2390 if (unlikely(!domain_context_mapped(pdev
))) {
2391 ret
= domain_context_mapping(domain
, pdev
,
2392 CONTEXT_TT_MULTI_LEVEL
);
2395 "Domain context map for %s failed",
2404 static int iommu_dummy(struct pci_dev
*pdev
)
2406 return pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
;
2409 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2410 static int iommu_no_mapping(struct pci_dev
*pdev
)
2414 if (!iommu_identity_mapping
)
2415 return iommu_dummy(pdev
);
2417 found
= identity_mapping(pdev
);
2419 if (pdev
->dma_mask
> DMA_BIT_MASK(32))
2423 * 32 bit DMA is removed from si_domain and fall back
2424 * to non-identity mapping.
2426 domain_remove_one_dev_info(si_domain
, pdev
);
2427 printk(KERN_INFO
"32bit %s uses non-identity mapping\n",
2433 * In case of a detached 64 bit DMA device from vm, the device
2434 * is put into si_domain for identity mapping.
2436 if (pdev
->dma_mask
> DMA_BIT_MASK(32)) {
2438 ret
= domain_add_dev_info(si_domain
, pdev
);
2440 printk(KERN_INFO
"64bit %s uses identity mapping\n",
2447 return iommu_dummy(pdev
);
2450 static dma_addr_t
__intel_map_single(struct device
*hwdev
, phys_addr_t paddr
,
2451 size_t size
, int dir
, u64 dma_mask
)
2453 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2454 struct dmar_domain
*domain
;
2455 phys_addr_t start_paddr
;
2459 struct intel_iommu
*iommu
;
2461 BUG_ON(dir
== DMA_NONE
);
2463 if (iommu_no_mapping(pdev
))
2466 domain
= get_valid_domain_for_dev(pdev
);
2470 iommu
= domain_get_iommu(domain
);
2471 size
= aligned_size((u64
)paddr
, size
);
2473 iova
= __intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2477 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
2480 * Check if DMAR supports zero-length reads on write only
2483 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2484 !cap_zlr(iommu
->cap
))
2485 prot
|= DMA_PTE_READ
;
2486 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2487 prot
|= DMA_PTE_WRITE
;
2489 * paddr - (paddr + size) might be partial page, we should map the whole
2490 * page. Note: if two part of one page are separately mapped, we
2491 * might have two guest_addr mapping to the same host paddr, but this
2492 * is not a big problem
2494 ret
= domain_page_mapping(domain
, start_paddr
,
2495 ((u64
)paddr
) & PHYSICAL_PAGE_MASK
,
2500 /* it's a non-present to present mapping. Only flush if caching mode */
2501 if (cap_caching_mode(iommu
->cap
))
2502 iommu_flush_iotlb_psi(iommu
, 0, start_paddr
,
2503 size
>> VTD_PAGE_SHIFT
);
2505 iommu_flush_write_buffer(iommu
);
2507 return start_paddr
+ ((u64
)paddr
& (~PAGE_MASK
));
2511 __free_iova(&domain
->iovad
, iova
);
2512 printk(KERN_ERR
"Device %s request: %zx@%llx dir %d --- failed\n",
2513 pci_name(pdev
), size
, (unsigned long long)paddr
, dir
);
2517 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
2518 unsigned long offset
, size_t size
,
2519 enum dma_data_direction dir
,
2520 struct dma_attrs
*attrs
)
2522 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
2523 dir
, to_pci_dev(dev
)->dma_mask
);
2526 static void flush_unmaps(void)
2532 /* just flush them all */
2533 for (i
= 0; i
< g_num_of_iommus
; i
++) {
2534 struct intel_iommu
*iommu
= g_iommus
[i
];
2538 if (!deferred_flush
[i
].next
)
2541 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2542 DMA_TLB_GLOBAL_FLUSH
);
2543 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
2545 struct iova
*iova
= deferred_flush
[i
].iova
[j
];
2547 mask
= (iova
->pfn_hi
- iova
->pfn_lo
+ 1) << PAGE_SHIFT
;
2548 mask
= ilog2(mask
>> VTD_PAGE_SHIFT
);
2549 iommu_flush_dev_iotlb(deferred_flush
[i
].domain
[j
],
2550 iova
->pfn_lo
<< PAGE_SHIFT
, mask
);
2551 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
, iova
);
2553 deferred_flush
[i
].next
= 0;
2559 static void flush_unmaps_timeout(unsigned long data
)
2561 unsigned long flags
;
2563 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2565 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2568 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
)
2570 unsigned long flags
;
2572 struct intel_iommu
*iommu
;
2574 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2575 if (list_size
== HIGH_WATER_MARK
)
2578 iommu
= domain_get_iommu(dom
);
2579 iommu_id
= iommu
->seq_id
;
2581 next
= deferred_flush
[iommu_id
].next
;
2582 deferred_flush
[iommu_id
].domain
[next
] = dom
;
2583 deferred_flush
[iommu_id
].iova
[next
] = iova
;
2584 deferred_flush
[iommu_id
].next
++;
2587 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
2591 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2594 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
2595 size_t size
, enum dma_data_direction dir
,
2596 struct dma_attrs
*attrs
)
2598 struct pci_dev
*pdev
= to_pci_dev(dev
);
2599 struct dmar_domain
*domain
;
2600 unsigned long start_pfn
, last_pfn
;
2602 struct intel_iommu
*iommu
;
2604 if (iommu_no_mapping(pdev
))
2607 domain
= find_domain(pdev
);
2610 iommu
= domain_get_iommu(domain
);
2612 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
2616 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
2617 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
2619 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2620 pci_name(pdev
), start_pfn
, last_pfn
);
2622 /* clear the whole page */
2623 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
2625 /* free page tables */
2626 dma_pte_free_pagetable(domain
, start_pfn
, last_pfn
);
2628 if (intel_iommu_strict
) {
2629 iommu_flush_iotlb_psi(iommu
, domain
->id
,
2630 start_pfn
<< VTD_PAGE_SHIFT
,
2631 last_pfn
- start_pfn
+ 1);
2633 __free_iova(&domain
->iovad
, iova
);
2635 add_unmap(domain
, iova
);
2637 * queue up the release of the unmap to save the 1/6th of the
2638 * cpu used up by the iotlb flush operation...
2643 static void intel_unmap_single(struct device
*dev
, dma_addr_t dev_addr
, size_t size
,
2646 intel_unmap_page(dev
, dev_addr
, size
, dir
, NULL
);
2649 static void *intel_alloc_coherent(struct device
*hwdev
, size_t size
,
2650 dma_addr_t
*dma_handle
, gfp_t flags
)
2655 size
= PAGE_ALIGN(size
);
2656 order
= get_order(size
);
2657 flags
&= ~(GFP_DMA
| GFP_DMA32
);
2659 vaddr
= (void *)__get_free_pages(flags
, order
);
2662 memset(vaddr
, 0, size
);
2664 *dma_handle
= __intel_map_single(hwdev
, virt_to_bus(vaddr
), size
,
2666 hwdev
->coherent_dma_mask
);
2669 free_pages((unsigned long)vaddr
, order
);
2673 static void intel_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
2674 dma_addr_t dma_handle
)
2678 size
= PAGE_ALIGN(size
);
2679 order
= get_order(size
);
2681 intel_unmap_single(hwdev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
2682 free_pages((unsigned long)vaddr
, order
);
2685 static void intel_unmap_sg(struct device
*hwdev
, struct scatterlist
*sglist
,
2686 int nelems
, enum dma_data_direction dir
,
2687 struct dma_attrs
*attrs
)
2689 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2690 struct dmar_domain
*domain
;
2691 unsigned long start_pfn
, last_pfn
;
2693 struct intel_iommu
*iommu
;
2695 if (iommu_no_mapping(pdev
))
2698 domain
= find_domain(pdev
);
2701 iommu
= domain_get_iommu(domain
);
2703 iova
= find_iova(&domain
->iovad
, IOVA_PFN(sglist
[0].dma_address
));
2707 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
2708 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
2710 /* clear the whole page */
2711 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
2713 /* free page tables */
2714 dma_pte_free_pagetable(domain
, start_pfn
, last_pfn
);
2716 iommu_flush_iotlb_psi(iommu
, domain
->id
,
2717 start_pfn
<< VTD_PAGE_SHIFT
,
2718 (last_pfn
- start_pfn
+ 1));
2721 __free_iova(&domain
->iovad
, iova
);
2724 static int intel_nontranslate_map_sg(struct device
*hddev
,
2725 struct scatterlist
*sglist
, int nelems
, int dir
)
2728 struct scatterlist
*sg
;
2730 for_each_sg(sglist
, sg
, nelems
, i
) {
2731 BUG_ON(!sg_page(sg
));
2732 sg
->dma_address
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2733 sg
->dma_length
= sg
->length
;
2738 static int intel_map_sg(struct device
*hwdev
, struct scatterlist
*sglist
, int nelems
,
2739 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
2743 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2744 struct dmar_domain
*domain
;
2748 struct iova
*iova
= NULL
;
2750 struct scatterlist
*sg
;
2751 unsigned long start_addr
;
2752 struct intel_iommu
*iommu
;
2754 BUG_ON(dir
== DMA_NONE
);
2755 if (iommu_no_mapping(pdev
))
2756 return intel_nontranslate_map_sg(hwdev
, sglist
, nelems
, dir
);
2758 domain
= get_valid_domain_for_dev(pdev
);
2762 iommu
= domain_get_iommu(domain
);
2764 for_each_sg(sglist
, sg
, nelems
, i
) {
2765 addr
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2766 size
+= aligned_size((u64
)addr
, sg
->length
);
2769 iova
= __intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2771 sglist
->dma_length
= 0;
2776 * Check if DMAR supports zero-length reads on write only
2779 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2780 !cap_zlr(iommu
->cap
))
2781 prot
|= DMA_PTE_READ
;
2782 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2783 prot
|= DMA_PTE_WRITE
;
2785 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2787 for_each_sg(sglist
, sg
, nelems
, i
) {
2788 addr
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2789 size
= aligned_size((u64
)addr
, sg
->length
);
2790 ret
= domain_page_mapping(domain
, start_addr
+ offset
,
2791 ((u64
)addr
) & PHYSICAL_PAGE_MASK
,
2794 /* clear the page */
2795 dma_pte_clear_range(domain
,
2796 start_addr
>> VTD_PAGE_SHIFT
,
2797 (start_addr
+ offset
- 1) >> VTD_PAGE_SHIFT
);
2798 /* free page tables */
2799 dma_pte_free_pagetable(domain
, start_addr
>> VTD_PAGE_SHIFT
,
2800 (start_addr
+ offset
- 1) >> VTD_PAGE_SHIFT
);
2802 __free_iova(&domain
->iovad
, iova
);
2805 sg
->dma_address
= start_addr
+ offset
+
2806 ((u64
)addr
& (~PAGE_MASK
));
2807 sg
->dma_length
= sg
->length
;
2811 /* it's a non-present to present mapping. Only flush if caching mode */
2812 if (cap_caching_mode(iommu
->cap
))
2813 iommu_flush_iotlb_psi(iommu
, 0, start_addr
,
2814 offset
>> VTD_PAGE_SHIFT
);
2816 iommu_flush_write_buffer(iommu
);
2821 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
2826 struct dma_map_ops intel_dma_ops
= {
2827 .alloc_coherent
= intel_alloc_coherent
,
2828 .free_coherent
= intel_free_coherent
,
2829 .map_sg
= intel_map_sg
,
2830 .unmap_sg
= intel_unmap_sg
,
2831 .map_page
= intel_map_page
,
2832 .unmap_page
= intel_unmap_page
,
2833 .mapping_error
= intel_mapping_error
,
2836 static inline int iommu_domain_cache_init(void)
2840 iommu_domain_cache
= kmem_cache_create("iommu_domain",
2841 sizeof(struct dmar_domain
),
2846 if (!iommu_domain_cache
) {
2847 printk(KERN_ERR
"Couldn't create iommu_domain cache\n");
2854 static inline int iommu_devinfo_cache_init(void)
2858 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
2859 sizeof(struct device_domain_info
),
2863 if (!iommu_devinfo_cache
) {
2864 printk(KERN_ERR
"Couldn't create devinfo cache\n");
2871 static inline int iommu_iova_cache_init(void)
2875 iommu_iova_cache
= kmem_cache_create("iommu_iova",
2876 sizeof(struct iova
),
2880 if (!iommu_iova_cache
) {
2881 printk(KERN_ERR
"Couldn't create iova cache\n");
2888 static int __init
iommu_init_mempool(void)
2891 ret
= iommu_iova_cache_init();
2895 ret
= iommu_domain_cache_init();
2899 ret
= iommu_devinfo_cache_init();
2903 kmem_cache_destroy(iommu_domain_cache
);
2905 kmem_cache_destroy(iommu_iova_cache
);
2910 static void __init
iommu_exit_mempool(void)
2912 kmem_cache_destroy(iommu_devinfo_cache
);
2913 kmem_cache_destroy(iommu_domain_cache
);
2914 kmem_cache_destroy(iommu_iova_cache
);
2918 static void __init
init_no_remapping_devices(void)
2920 struct dmar_drhd_unit
*drhd
;
2922 for_each_drhd_unit(drhd
) {
2923 if (!drhd
->include_all
) {
2925 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2926 if (drhd
->devices
[i
] != NULL
)
2928 /* ignore DMAR unit if no pci devices exist */
2929 if (i
== drhd
->devices_cnt
)
2937 for_each_drhd_unit(drhd
) {
2939 if (drhd
->ignored
|| drhd
->include_all
)
2942 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2943 if (drhd
->devices
[i
] &&
2944 !IS_GFX_DEVICE(drhd
->devices
[i
]))
2947 if (i
< drhd
->devices_cnt
)
2950 /* bypass IOMMU if it is just for gfx devices */
2952 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
2953 if (!drhd
->devices
[i
])
2955 drhd
->devices
[i
]->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
2960 #ifdef CONFIG_SUSPEND
2961 static int init_iommu_hw(void)
2963 struct dmar_drhd_unit
*drhd
;
2964 struct intel_iommu
*iommu
= NULL
;
2966 for_each_active_iommu(iommu
, drhd
)
2968 dmar_reenable_qi(iommu
);
2970 for_each_active_iommu(iommu
, drhd
) {
2971 iommu_flush_write_buffer(iommu
);
2973 iommu_set_root_entry(iommu
);
2975 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
2976 DMA_CCMD_GLOBAL_INVL
);
2977 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2978 DMA_TLB_GLOBAL_FLUSH
);
2979 iommu_disable_protect_mem_regions(iommu
);
2980 iommu_enable_translation(iommu
);
2986 static void iommu_flush_all(void)
2988 struct dmar_drhd_unit
*drhd
;
2989 struct intel_iommu
*iommu
;
2991 for_each_active_iommu(iommu
, drhd
) {
2992 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
2993 DMA_CCMD_GLOBAL_INVL
);
2994 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2995 DMA_TLB_GLOBAL_FLUSH
);
2999 static int iommu_suspend(struct sys_device
*dev
, pm_message_t state
)
3001 struct dmar_drhd_unit
*drhd
;
3002 struct intel_iommu
*iommu
= NULL
;
3005 for_each_active_iommu(iommu
, drhd
) {
3006 iommu
->iommu_state
= kzalloc(sizeof(u32
) * MAX_SR_DMAR_REGS
,
3008 if (!iommu
->iommu_state
)
3014 for_each_active_iommu(iommu
, drhd
) {
3015 iommu_disable_translation(iommu
);
3017 spin_lock_irqsave(&iommu
->register_lock
, flag
);
3019 iommu
->iommu_state
[SR_DMAR_FECTL_REG
] =
3020 readl(iommu
->reg
+ DMAR_FECTL_REG
);
3021 iommu
->iommu_state
[SR_DMAR_FEDATA_REG
] =
3022 readl(iommu
->reg
+ DMAR_FEDATA_REG
);
3023 iommu
->iommu_state
[SR_DMAR_FEADDR_REG
] =
3024 readl(iommu
->reg
+ DMAR_FEADDR_REG
);
3025 iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
] =
3026 readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
3028 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3033 for_each_active_iommu(iommu
, drhd
)
3034 kfree(iommu
->iommu_state
);
3039 static int iommu_resume(struct sys_device
*dev
)
3041 struct dmar_drhd_unit
*drhd
;
3042 struct intel_iommu
*iommu
= NULL
;
3045 if (init_iommu_hw()) {
3046 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3050 for_each_active_iommu(iommu
, drhd
) {
3052 spin_lock_irqsave(&iommu
->register_lock
, flag
);
3054 writel(iommu
->iommu_state
[SR_DMAR_FECTL_REG
],
3055 iommu
->reg
+ DMAR_FECTL_REG
);
3056 writel(iommu
->iommu_state
[SR_DMAR_FEDATA_REG
],
3057 iommu
->reg
+ DMAR_FEDATA_REG
);
3058 writel(iommu
->iommu_state
[SR_DMAR_FEADDR_REG
],
3059 iommu
->reg
+ DMAR_FEADDR_REG
);
3060 writel(iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
],
3061 iommu
->reg
+ DMAR_FEUADDR_REG
);
3063 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3066 for_each_active_iommu(iommu
, drhd
)
3067 kfree(iommu
->iommu_state
);
3072 static struct sysdev_class iommu_sysclass
= {
3074 .resume
= iommu_resume
,
3075 .suspend
= iommu_suspend
,
3078 static struct sys_device device_iommu
= {
3079 .cls
= &iommu_sysclass
,
3082 static int __init
init_iommu_sysfs(void)
3086 error
= sysdev_class_register(&iommu_sysclass
);
3090 error
= sysdev_register(&device_iommu
);
3092 sysdev_class_unregister(&iommu_sysclass
);
3098 static int __init
init_iommu_sysfs(void)
3102 #endif /* CONFIG_PM */
3104 int __init
intel_iommu_init(void)
3108 if (dmar_table_init())
3111 if (dmar_dev_scope_init())
3115 * Check the need for DMA-remapping initialization now.
3116 * Above initialization will also be used by Interrupt-remapping.
3118 if (no_iommu
|| (swiotlb
&& !iommu_pass_through
) || dmar_disabled
)
3121 iommu_init_mempool();
3122 dmar_init_reserved_ranges();
3124 init_no_remapping_devices();
3128 printk(KERN_ERR
"IOMMU: dmar init failed\n");
3129 put_iova_domain(&reserved_iova_list
);
3130 iommu_exit_mempool();
3134 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3136 init_timer(&unmap_timer
);
3139 if (!iommu_pass_through
) {
3141 "Multi-level page-table translation for DMAR.\n");
3142 dma_ops
= &intel_dma_ops
;
3145 "DMAR: Pass through translation for DMAR.\n");
3149 register_iommu(&intel_iommu_ops
);
3154 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
3155 struct pci_dev
*pdev
)
3157 struct pci_dev
*tmp
, *parent
;
3159 if (!iommu
|| !pdev
)
3162 /* dependent device detach */
3163 tmp
= pci_find_upstream_pcie_bridge(pdev
);
3164 /* Secondary interface's bus number and devfn 0 */
3166 parent
= pdev
->bus
->self
;
3167 while (parent
!= tmp
) {
3168 iommu_detach_dev(iommu
, parent
->bus
->number
,
3170 parent
= parent
->bus
->self
;
3172 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
3173 iommu_detach_dev(iommu
,
3174 tmp
->subordinate
->number
, 0);
3175 else /* this is a legacy PCI bridge */
3176 iommu_detach_dev(iommu
, tmp
->bus
->number
,
3181 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
3182 struct pci_dev
*pdev
)
3184 struct device_domain_info
*info
;
3185 struct intel_iommu
*iommu
;
3186 unsigned long flags
;
3188 struct list_head
*entry
, *tmp
;
3190 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3195 spin_lock_irqsave(&device_domain_lock
, flags
);
3196 list_for_each_safe(entry
, tmp
, &domain
->devices
) {
3197 info
= list_entry(entry
, struct device_domain_info
, link
);
3198 /* No need to compare PCI domain; it has to be the same */
3199 if (info
->bus
== pdev
->bus
->number
&&
3200 info
->devfn
== pdev
->devfn
) {
3201 list_del(&info
->link
);
3202 list_del(&info
->global
);
3204 info
->dev
->dev
.archdata
.iommu
= NULL
;
3205 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3207 iommu_disable_dev_iotlb(info
);
3208 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3209 iommu_detach_dependent_devices(iommu
, pdev
);
3210 free_devinfo_mem(info
);
3212 spin_lock_irqsave(&device_domain_lock
, flags
);
3220 /* if there is no other devices under the same iommu
3221 * owned by this domain, clear this iommu in iommu_bmp
3222 * update iommu count and coherency
3224 if (iommu
== device_to_iommu(info
->segment
, info
->bus
,
3230 unsigned long tmp_flags
;
3231 spin_lock_irqsave(&domain
->iommu_lock
, tmp_flags
);
3232 clear_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
3233 domain
->iommu_count
--;
3234 domain_update_iommu_cap(domain
);
3235 spin_unlock_irqrestore(&domain
->iommu_lock
, tmp_flags
);
3238 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3241 static void vm_domain_remove_all_dev_info(struct dmar_domain
*domain
)
3243 struct device_domain_info
*info
;
3244 struct intel_iommu
*iommu
;
3245 unsigned long flags1
, flags2
;
3247 spin_lock_irqsave(&device_domain_lock
, flags1
);
3248 while (!list_empty(&domain
->devices
)) {
3249 info
= list_entry(domain
->devices
.next
,
3250 struct device_domain_info
, link
);
3251 list_del(&info
->link
);
3252 list_del(&info
->global
);
3254 info
->dev
->dev
.archdata
.iommu
= NULL
;
3256 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3258 iommu_disable_dev_iotlb(info
);
3259 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
3260 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3261 iommu_detach_dependent_devices(iommu
, info
->dev
);
3263 /* clear this iommu in iommu_bmp, update iommu count
3266 spin_lock_irqsave(&domain
->iommu_lock
, flags2
);
3267 if (test_and_clear_bit(iommu
->seq_id
,
3268 &domain
->iommu_bmp
)) {
3269 domain
->iommu_count
--;
3270 domain_update_iommu_cap(domain
);
3272 spin_unlock_irqrestore(&domain
->iommu_lock
, flags2
);
3274 free_devinfo_mem(info
);
3275 spin_lock_irqsave(&device_domain_lock
, flags1
);
3277 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3280 /* domain id for virtual machine, it won't be set in context */
3281 static unsigned long vm_domid
;
3283 static int vm_domain_min_agaw(struct dmar_domain
*domain
)
3286 int min_agaw
= domain
->agaw
;
3288 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
3289 for (; i
< g_num_of_iommus
; ) {
3290 if (min_agaw
> g_iommus
[i
]->agaw
)
3291 min_agaw
= g_iommus
[i
]->agaw
;
3293 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
3299 static struct dmar_domain
*iommu_alloc_vm_domain(void)
3301 struct dmar_domain
*domain
;
3303 domain
= alloc_domain_mem();
3307 domain
->id
= vm_domid
++;
3308 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
3309 domain
->flags
= DOMAIN_FLAG_VIRTUAL_MACHINE
;
3314 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
)
3318 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
3319 spin_lock_init(&domain
->mapping_lock
);
3320 spin_lock_init(&domain
->iommu_lock
);
3322 domain_reserve_special_ranges(domain
);
3324 /* calculate AGAW */
3325 domain
->gaw
= guest_width
;
3326 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
3327 domain
->agaw
= width_to_agaw(adjust_width
);
3329 INIT_LIST_HEAD(&domain
->devices
);
3331 domain
->iommu_count
= 0;
3332 domain
->iommu_coherency
= 0;
3333 domain
->max_addr
= 0;
3335 /* always allocate the top pgd */
3336 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
3339 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
3343 static void iommu_free_vm_domain(struct dmar_domain
*domain
)
3345 unsigned long flags
;
3346 struct dmar_drhd_unit
*drhd
;
3347 struct intel_iommu
*iommu
;
3349 unsigned long ndomains
;
3351 for_each_drhd_unit(drhd
) {
3354 iommu
= drhd
->iommu
;
3356 ndomains
= cap_ndoms(iommu
->cap
);
3357 i
= find_first_bit(iommu
->domain_ids
, ndomains
);
3358 for (; i
< ndomains
; ) {
3359 if (iommu
->domains
[i
] == domain
) {
3360 spin_lock_irqsave(&iommu
->lock
, flags
);
3361 clear_bit(i
, iommu
->domain_ids
);
3362 iommu
->domains
[i
] = NULL
;
3363 spin_unlock_irqrestore(&iommu
->lock
, flags
);
3366 i
= find_next_bit(iommu
->domain_ids
, ndomains
, i
+1);
3371 static void vm_domain_exit(struct dmar_domain
*domain
)
3373 /* Domain 0 is reserved, so dont process it */
3377 vm_domain_remove_all_dev_info(domain
);
3379 put_iova_domain(&domain
->iovad
);
3382 dma_pte_clear_range(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
3384 /* free page tables */
3385 dma_pte_free_pagetable(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
3387 iommu_free_vm_domain(domain
);
3388 free_domain_mem(domain
);
3391 static int intel_iommu_domain_init(struct iommu_domain
*domain
)
3393 struct dmar_domain
*dmar_domain
;
3395 dmar_domain
= iommu_alloc_vm_domain();
3398 "intel_iommu_domain_init: dmar_domain == NULL\n");
3401 if (md_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
3403 "intel_iommu_domain_init() failed\n");
3404 vm_domain_exit(dmar_domain
);
3407 domain
->priv
= dmar_domain
;
3412 static void intel_iommu_domain_destroy(struct iommu_domain
*domain
)
3414 struct dmar_domain
*dmar_domain
= domain
->priv
;
3416 domain
->priv
= NULL
;
3417 vm_domain_exit(dmar_domain
);
3420 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
3423 struct dmar_domain
*dmar_domain
= domain
->priv
;
3424 struct pci_dev
*pdev
= to_pci_dev(dev
);
3425 struct intel_iommu
*iommu
;
3430 /* normally pdev is not mapped */
3431 if (unlikely(domain_context_mapped(pdev
))) {
3432 struct dmar_domain
*old_domain
;
3434 old_domain
= find_domain(pdev
);
3436 if (dmar_domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
3437 dmar_domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
)
3438 domain_remove_one_dev_info(old_domain
, pdev
);
3440 domain_remove_dev_info(old_domain
);
3444 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3449 /* check if this iommu agaw is sufficient for max mapped address */
3450 addr_width
= agaw_to_width(iommu
->agaw
);
3451 end
= DOMAIN_MAX_ADDR(addr_width
);
3452 end
= end
& VTD_PAGE_MASK
;
3453 if (end
< dmar_domain
->max_addr
) {
3454 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
3455 "sufficient for the mapped address (%llx)\n",
3456 __func__
, iommu
->agaw
, dmar_domain
->max_addr
);
3460 ret
= domain_add_dev_info(dmar_domain
, pdev
);
3464 ret
= domain_context_mapping(dmar_domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
3468 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
3471 struct dmar_domain
*dmar_domain
= domain
->priv
;
3472 struct pci_dev
*pdev
= to_pci_dev(dev
);
3474 domain_remove_one_dev_info(dmar_domain
, pdev
);
3477 static int intel_iommu_map_range(struct iommu_domain
*domain
,
3478 unsigned long iova
, phys_addr_t hpa
,
3479 size_t size
, int iommu_prot
)
3481 struct dmar_domain
*dmar_domain
= domain
->priv
;
3487 if (iommu_prot
& IOMMU_READ
)
3488 prot
|= DMA_PTE_READ
;
3489 if (iommu_prot
& IOMMU_WRITE
)
3490 prot
|= DMA_PTE_WRITE
;
3491 if ((iommu_prot
& IOMMU_CACHE
) && dmar_domain
->iommu_snooping
)
3492 prot
|= DMA_PTE_SNP
;
3494 max_addr
= iova
+ size
;
3495 if (dmar_domain
->max_addr
< max_addr
) {
3499 /* check if minimum agaw is sufficient for mapped address */
3500 min_agaw
= vm_domain_min_agaw(dmar_domain
);
3501 addr_width
= agaw_to_width(min_agaw
);
3502 end
= DOMAIN_MAX_ADDR(addr_width
);
3503 end
= end
& VTD_PAGE_MASK
;
3504 if (end
< max_addr
) {
3505 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
3506 "sufficient for the mapped address (%llx)\n",
3507 __func__
, min_agaw
, max_addr
);
3510 dmar_domain
->max_addr
= max_addr
;
3513 ret
= domain_page_mapping(dmar_domain
, iova
, hpa
, size
, prot
);
3517 static void intel_iommu_unmap_range(struct iommu_domain
*domain
,
3518 unsigned long iova
, size_t size
)
3520 struct dmar_domain
*dmar_domain
= domain
->priv
;
3522 dma_pte_clear_range(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
3523 (iova
+ size
- 1) >> VTD_PAGE_SHIFT
);
3525 if (dmar_domain
->max_addr
== iova
+ size
)
3526 dmar_domain
->max_addr
= iova
;
3529 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
3532 struct dmar_domain
*dmar_domain
= domain
->priv
;
3533 struct dma_pte
*pte
;
3536 pte
= addr_to_dma_pte(dmar_domain
, iova
);
3538 phys
= dma_pte_addr(pte
);
3543 static int intel_iommu_domain_has_cap(struct iommu_domain
*domain
,
3546 struct dmar_domain
*dmar_domain
= domain
->priv
;
3548 if (cap
== IOMMU_CAP_CACHE_COHERENCY
)
3549 return dmar_domain
->iommu_snooping
;
3554 static struct iommu_ops intel_iommu_ops
= {
3555 .domain_init
= intel_iommu_domain_init
,
3556 .domain_destroy
= intel_iommu_domain_destroy
,
3557 .attach_dev
= intel_iommu_attach_device
,
3558 .detach_dev
= intel_iommu_detach_device
,
3559 .map
= intel_iommu_map_range
,
3560 .unmap
= intel_iommu_unmap_range
,
3561 .iova_to_phys
= intel_iommu_iova_to_phys
,
3562 .domain_has_cap
= intel_iommu_domain_has_cap
,
3565 static void __devinit
quirk_iommu_rwbf(struct pci_dev
*dev
)
3568 * Mobile 4 Series Chipset neglects to set RWBF capability,
3571 printk(KERN_INFO
"DMAR: Forcing write-buffer flush capability\n");
3575 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);