2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <asm/cacheflush.h>
41 #include <asm/iommu.h>
44 #define ROOT_SIZE VTD_PAGE_SIZE
45 #define CONTEXT_SIZE VTD_PAGE_SIZE
47 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
50 #define IOAPIC_RANGE_START (0xfee00000)
51 #define IOAPIC_RANGE_END (0xfeefffff)
52 #define IOVA_START_ADDR (0x1000)
54 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
56 #define MAX_AGAW_WIDTH 64
58 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
59 #define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
61 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
62 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
63 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
66 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
67 are never going to work. */
68 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn
)
70 return dma_pfn
>> (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
73 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn
)
75 return mm_pfn
<< (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
77 static inline unsigned long page_to_dma_pfn(struct page
*pg
)
79 return mm_to_dma_pfn(page_to_pfn(pg
));
81 static inline unsigned long virt_to_dma_pfn(void *p
)
83 return page_to_dma_pfn(virt_to_page(p
));
86 /* global iommu list, set NULL for ignored DMAR units */
87 static struct intel_iommu
**g_iommus
;
89 static int rwbf_quirk
;
94 * 12-63: Context Ptr (12 - (haw-1))
101 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
102 static inline bool root_present(struct root_entry
*root
)
104 return (root
->val
& 1);
106 static inline void set_root_present(struct root_entry
*root
)
110 static inline void set_root_value(struct root_entry
*root
, unsigned long value
)
112 root
->val
|= value
& VTD_PAGE_MASK
;
115 static inline struct context_entry
*
116 get_context_addr_from_root(struct root_entry
*root
)
118 return (struct context_entry
*)
119 (root_present(root
)?phys_to_virt(
120 root
->val
& VTD_PAGE_MASK
) :
127 * 1: fault processing disable
128 * 2-3: translation type
129 * 12-63: address space root
135 struct context_entry
{
140 static inline bool context_present(struct context_entry
*context
)
142 return (context
->lo
& 1);
144 static inline void context_set_present(struct context_entry
*context
)
149 static inline void context_set_fault_enable(struct context_entry
*context
)
151 context
->lo
&= (((u64
)-1) << 2) | 1;
154 static inline void context_set_translation_type(struct context_entry
*context
,
157 context
->lo
&= (((u64
)-1) << 4) | 3;
158 context
->lo
|= (value
& 3) << 2;
161 static inline void context_set_address_root(struct context_entry
*context
,
164 context
->lo
|= value
& VTD_PAGE_MASK
;
167 static inline void context_set_address_width(struct context_entry
*context
,
170 context
->hi
|= value
& 7;
173 static inline void context_set_domain_id(struct context_entry
*context
,
176 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
179 static inline void context_clear_entry(struct context_entry
*context
)
192 * 12-63: Host physcial address
198 static inline void dma_clear_pte(struct dma_pte
*pte
)
203 static inline void dma_set_pte_readable(struct dma_pte
*pte
)
205 pte
->val
|= DMA_PTE_READ
;
208 static inline void dma_set_pte_writable(struct dma_pte
*pte
)
210 pte
->val
|= DMA_PTE_WRITE
;
213 static inline void dma_set_pte_snp(struct dma_pte
*pte
)
215 pte
->val
|= DMA_PTE_SNP
;
218 static inline void dma_set_pte_prot(struct dma_pte
*pte
, unsigned long prot
)
220 pte
->val
= (pte
->val
& ~3) | (prot
& 3);
223 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
225 return (pte
->val
& VTD_PAGE_MASK
);
228 static inline void dma_set_pte_pfn(struct dma_pte
*pte
, unsigned long pfn
)
230 pte
->val
|= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
233 static inline bool dma_pte_present(struct dma_pte
*pte
)
235 return (pte
->val
& 3) != 0;
239 * This domain is a statically identity mapping domain.
240 * 1. This domain creats a static 1:1 mapping to all usable memory.
241 * 2. It maps to each iommu if successful.
242 * 3. Each iommu mapps to this domain if successful.
244 struct dmar_domain
*si_domain
;
246 /* devices under the same p2p bridge are owned in one domain */
247 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
249 /* domain represents a virtual machine, more than one devices
250 * across iommus may be owned in one domain, e.g. kvm guest.
252 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
254 /* si_domain contains mulitple devices */
255 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
258 int id
; /* domain id */
259 unsigned long iommu_bmp
; /* bitmap of iommus this domain uses*/
261 struct list_head devices
; /* all devices' list */
262 struct iova_domain iovad
; /* iova's that belong to this domain */
264 struct dma_pte
*pgd
; /* virtual address */
265 spinlock_t mapping_lock
; /* page table lock */
266 int gaw
; /* max guest address width */
268 /* adjusted guest address width, 0 is level 2 30-bit */
271 int flags
; /* flags to find out type of domain */
273 int iommu_coherency
;/* indicate coherency of iommu access */
274 int iommu_snooping
; /* indicate snooping control feature*/
275 int iommu_count
; /* reference count of iommu */
276 spinlock_t iommu_lock
; /* protect iommu set in domain */
277 u64 max_addr
; /* maximum mapped address */
280 /* PCI domain-device relationship */
281 struct device_domain_info
{
282 struct list_head link
; /* link to domain siblings */
283 struct list_head global
; /* link to global list */
284 int segment
; /* PCI domain */
285 u8 bus
; /* PCI bus number */
286 u8 devfn
; /* PCI devfn number */
287 struct pci_dev
*dev
; /* it's NULL for PCIE-to-PCI bridge */
288 struct intel_iommu
*iommu
; /* IOMMU used by this device */
289 struct dmar_domain
*domain
; /* pointer to domain */
292 static void flush_unmaps_timeout(unsigned long data
);
294 DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
296 #define HIGH_WATER_MARK 250
297 struct deferred_flush_tables
{
299 struct iova
*iova
[HIGH_WATER_MARK
];
300 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
303 static struct deferred_flush_tables
*deferred_flush
;
305 /* bitmap for indexing intel_iommus */
306 static int g_num_of_iommus
;
308 static DEFINE_SPINLOCK(async_umap_flush_lock
);
309 static LIST_HEAD(unmaps_to_do
);
312 static long list_size
;
314 static void domain_remove_dev_info(struct dmar_domain
*domain
);
316 #ifdef CONFIG_DMAR_DEFAULT_ON
317 int dmar_disabled
= 0;
319 int dmar_disabled
= 1;
320 #endif /*CONFIG_DMAR_DEFAULT_ON*/
322 static int __initdata dmar_map_gfx
= 1;
323 static int dmar_forcedac
;
324 static int intel_iommu_strict
;
326 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
327 static DEFINE_SPINLOCK(device_domain_lock
);
328 static LIST_HEAD(device_domain_list
);
330 static struct iommu_ops intel_iommu_ops
;
332 static int __init
intel_iommu_setup(char *str
)
337 if (!strncmp(str
, "on", 2)) {
339 printk(KERN_INFO
"Intel-IOMMU: enabled\n");
340 } else if (!strncmp(str
, "off", 3)) {
342 printk(KERN_INFO
"Intel-IOMMU: disabled\n");
343 } else if (!strncmp(str
, "igfx_off", 8)) {
346 "Intel-IOMMU: disable GFX device mapping\n");
347 } else if (!strncmp(str
, "forcedac", 8)) {
349 "Intel-IOMMU: Forcing DAC for PCI devices\n");
351 } else if (!strncmp(str
, "strict", 6)) {
353 "Intel-IOMMU: disable batched IOTLB flush\n");
354 intel_iommu_strict
= 1;
357 str
+= strcspn(str
, ",");
363 __setup("intel_iommu=", intel_iommu_setup
);
365 static struct kmem_cache
*iommu_domain_cache
;
366 static struct kmem_cache
*iommu_devinfo_cache
;
367 static struct kmem_cache
*iommu_iova_cache
;
369 static inline void *iommu_kmem_cache_alloc(struct kmem_cache
*cachep
)
374 /* trying to avoid low memory issues */
375 flags
= current
->flags
& PF_MEMALLOC
;
376 current
->flags
|= PF_MEMALLOC
;
377 vaddr
= kmem_cache_alloc(cachep
, GFP_ATOMIC
);
378 current
->flags
&= (~PF_MEMALLOC
| flags
);
383 static inline void *alloc_pgtable_page(void)
388 /* trying to avoid low memory issues */
389 flags
= current
->flags
& PF_MEMALLOC
;
390 current
->flags
|= PF_MEMALLOC
;
391 vaddr
= (void *)get_zeroed_page(GFP_ATOMIC
);
392 current
->flags
&= (~PF_MEMALLOC
| flags
);
396 static inline void free_pgtable_page(void *vaddr
)
398 free_page((unsigned long)vaddr
);
401 static inline void *alloc_domain_mem(void)
403 return iommu_kmem_cache_alloc(iommu_domain_cache
);
406 static void free_domain_mem(void *vaddr
)
408 kmem_cache_free(iommu_domain_cache
, vaddr
);
411 static inline void * alloc_devinfo_mem(void)
413 return iommu_kmem_cache_alloc(iommu_devinfo_cache
);
416 static inline void free_devinfo_mem(void *vaddr
)
418 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
421 struct iova
*alloc_iova_mem(void)
423 return iommu_kmem_cache_alloc(iommu_iova_cache
);
426 void free_iova_mem(struct iova
*iova
)
428 kmem_cache_free(iommu_iova_cache
, iova
);
432 static inline int width_to_agaw(int width
);
434 static int __iommu_calculate_agaw(struct intel_iommu
*iommu
, int max_gaw
)
439 sagaw
= cap_sagaw(iommu
->cap
);
440 for (agaw
= width_to_agaw(max_gaw
);
442 if (test_bit(agaw
, &sagaw
))
450 * Calculate max SAGAW for each iommu.
452 int iommu_calculate_max_sagaw(struct intel_iommu
*iommu
)
454 return __iommu_calculate_agaw(iommu
, MAX_AGAW_WIDTH
);
458 * calculate agaw for each iommu.
459 * "SAGAW" may be different across iommus, use a default agaw, and
460 * get a supported less agaw for iommus that don't support the default agaw.
462 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
464 return __iommu_calculate_agaw(iommu
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
467 /* This functionin only returns single iommu in a domain */
468 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
472 /* si_domain and vm domain should not get here. */
473 BUG_ON(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
);
474 BUG_ON(domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
);
476 iommu_id
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
477 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
480 return g_iommus
[iommu_id
];
483 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
487 domain
->iommu_coherency
= 1;
489 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
490 for (; i
< g_num_of_iommus
; ) {
491 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
492 domain
->iommu_coherency
= 0;
495 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
499 static void domain_update_iommu_snooping(struct dmar_domain
*domain
)
503 domain
->iommu_snooping
= 1;
505 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
506 for (; i
< g_num_of_iommus
; ) {
507 if (!ecap_sc_support(g_iommus
[i
]->ecap
)) {
508 domain
->iommu_snooping
= 0;
511 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
515 /* Some capabilities may be different across iommus */
516 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
518 domain_update_iommu_coherency(domain
);
519 domain_update_iommu_snooping(domain
);
522 static struct intel_iommu
*device_to_iommu(int segment
, u8 bus
, u8 devfn
)
524 struct dmar_drhd_unit
*drhd
= NULL
;
527 for_each_drhd_unit(drhd
) {
530 if (segment
!= drhd
->segment
)
533 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
534 if (drhd
->devices
[i
] &&
535 drhd
->devices
[i
]->bus
->number
== bus
&&
536 drhd
->devices
[i
]->devfn
== devfn
)
538 if (drhd
->devices
[i
] &&
539 drhd
->devices
[i
]->subordinate
&&
540 drhd
->devices
[i
]->subordinate
->number
<= bus
&&
541 drhd
->devices
[i
]->subordinate
->subordinate
>= bus
)
545 if (drhd
->include_all
)
552 static void domain_flush_cache(struct dmar_domain
*domain
,
553 void *addr
, int size
)
555 if (!domain
->iommu_coherency
)
556 clflush_cache_range(addr
, size
);
559 /* Gets context entry for a given bus and devfn */
560 static struct context_entry
* device_to_context_entry(struct intel_iommu
*iommu
,
563 struct root_entry
*root
;
564 struct context_entry
*context
;
565 unsigned long phy_addr
;
568 spin_lock_irqsave(&iommu
->lock
, flags
);
569 root
= &iommu
->root_entry
[bus
];
570 context
= get_context_addr_from_root(root
);
572 context
= (struct context_entry
*)alloc_pgtable_page();
574 spin_unlock_irqrestore(&iommu
->lock
, flags
);
577 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
578 phy_addr
= virt_to_phys((void *)context
);
579 set_root_value(root
, phy_addr
);
580 set_root_present(root
);
581 __iommu_flush_cache(iommu
, root
, sizeof(*root
));
583 spin_unlock_irqrestore(&iommu
->lock
, flags
);
584 return &context
[devfn
];
587 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
589 struct root_entry
*root
;
590 struct context_entry
*context
;
594 spin_lock_irqsave(&iommu
->lock
, flags
);
595 root
= &iommu
->root_entry
[bus
];
596 context
= get_context_addr_from_root(root
);
601 ret
= context_present(&context
[devfn
]);
603 spin_unlock_irqrestore(&iommu
->lock
, flags
);
607 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
609 struct root_entry
*root
;
610 struct context_entry
*context
;
613 spin_lock_irqsave(&iommu
->lock
, flags
);
614 root
= &iommu
->root_entry
[bus
];
615 context
= get_context_addr_from_root(root
);
617 context_clear_entry(&context
[devfn
]);
618 __iommu_flush_cache(iommu
, &context
[devfn
], \
621 spin_unlock_irqrestore(&iommu
->lock
, flags
);
624 static void free_context_table(struct intel_iommu
*iommu
)
626 struct root_entry
*root
;
629 struct context_entry
*context
;
631 spin_lock_irqsave(&iommu
->lock
, flags
);
632 if (!iommu
->root_entry
) {
635 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
636 root
= &iommu
->root_entry
[i
];
637 context
= get_context_addr_from_root(root
);
639 free_pgtable_page(context
);
641 free_pgtable_page(iommu
->root_entry
);
642 iommu
->root_entry
= NULL
;
644 spin_unlock_irqrestore(&iommu
->lock
, flags
);
647 /* page table handling */
648 #define LEVEL_STRIDE (9)
649 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
651 static inline int agaw_to_level(int agaw
)
656 static inline int agaw_to_width(int agaw
)
658 return 30 + agaw
* LEVEL_STRIDE
;
662 static inline int width_to_agaw(int width
)
664 return (width
- 30) / LEVEL_STRIDE
;
667 static inline unsigned int level_to_offset_bits(int level
)
669 return (level
- 1) * LEVEL_STRIDE
;
672 static inline int pfn_level_offset(unsigned long pfn
, int level
)
674 return (pfn
>> level_to_offset_bits(level
)) & LEVEL_MASK
;
677 static inline unsigned long level_mask(int level
)
679 return -1UL << level_to_offset_bits(level
);
682 static inline unsigned long level_size(int level
)
684 return 1UL << level_to_offset_bits(level
);
687 static inline unsigned long align_to_level(unsigned long pfn
, int level
)
689 return (pfn
+ level_size(level
) - 1) & level_mask(level
);
692 static struct dma_pte
*pfn_to_dma_pte(struct dmar_domain
*domain
,
695 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
696 struct dma_pte
*parent
, *pte
= NULL
;
697 int level
= agaw_to_level(domain
->agaw
);
701 BUG_ON(!domain
->pgd
);
702 BUG_ON(addr_width
< BITS_PER_LONG
&& pfn
>> addr_width
);
703 parent
= domain
->pgd
;
705 spin_lock_irqsave(&domain
->mapping_lock
, flags
);
709 offset
= pfn_level_offset(pfn
, level
);
710 pte
= &parent
[offset
];
714 if (!dma_pte_present(pte
)) {
715 tmp_page
= alloc_pgtable_page();
718 spin_unlock_irqrestore(&domain
->mapping_lock
,
722 domain_flush_cache(domain
, tmp_page
, PAGE_SIZE
);
723 dma_set_pte_pfn(pte
, virt_to_dma_pfn(tmp_page
));
725 * high level table always sets r/w, last level page
726 * table control read/write
728 dma_set_pte_readable(pte
);
729 dma_set_pte_writable(pte
);
730 domain_flush_cache(domain
, pte
, sizeof(*pte
));
732 parent
= phys_to_virt(dma_pte_addr(pte
));
736 spin_unlock_irqrestore(&domain
->mapping_lock
, flags
);
740 /* return address's pte at specific level */
741 static struct dma_pte
*dma_pfn_level_pte(struct dmar_domain
*domain
,
745 struct dma_pte
*parent
, *pte
= NULL
;
746 int total
= agaw_to_level(domain
->agaw
);
749 parent
= domain
->pgd
;
750 while (level
<= total
) {
751 offset
= pfn_level_offset(pfn
, total
);
752 pte
= &parent
[offset
];
756 if (!dma_pte_present(pte
))
758 parent
= phys_to_virt(dma_pte_addr(pte
));
764 /* clear last level pte, a tlb flush should be followed */
765 static void dma_pte_clear_range(struct dmar_domain
*domain
,
766 unsigned long start_pfn
,
767 unsigned long last_pfn
)
769 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
770 struct dma_pte
*first_pte
, *pte
;
772 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
773 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
775 /* we don't need lock here; nobody else touches the iova range */
776 while (start_pfn
<= last_pfn
) {
777 first_pte
= pte
= dma_pfn_level_pte(domain
, start_pfn
, 1);
779 start_pfn
= align_to_level(start_pfn
+ 1, 2);
782 while (start_pfn
<= last_pfn
&&
783 (unsigned long)pte
>> VTD_PAGE_SHIFT
==
784 (unsigned long)first_pte
>> VTD_PAGE_SHIFT
) {
789 domain_flush_cache(domain
, first_pte
,
790 (void *)pte
- (void *)first_pte
);
794 /* free page table pages. last level pte should already be cleared */
795 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
796 unsigned long start_pfn
,
797 unsigned long last_pfn
)
799 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
801 int total
= agaw_to_level(domain
->agaw
);
805 BUG_ON(addr_width
< BITS_PER_LONG
&& start_pfn
>> addr_width
);
806 BUG_ON(addr_width
< BITS_PER_LONG
&& last_pfn
>> addr_width
);
808 /* we don't need lock here, nobody else touches the iova range */
810 while (level
<= total
) {
811 tmp
= align_to_level(start_pfn
, level
);
813 /* Only clear this pte/pmd if we're asked to clear its
815 if (tmp
+ level_size(level
) - 1 > last_pfn
)
818 while (tmp
+ level_size(level
) - 1 <= last_pfn
) {
819 pte
= dma_pfn_level_pte(domain
, tmp
, level
);
822 phys_to_virt(dma_pte_addr(pte
)));
824 domain_flush_cache(domain
, pte
, sizeof(*pte
));
826 tmp
+= level_size(level
);
831 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
832 free_pgtable_page(domain
->pgd
);
838 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
840 struct root_entry
*root
;
843 root
= (struct root_entry
*)alloc_pgtable_page();
847 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
849 spin_lock_irqsave(&iommu
->lock
, flags
);
850 iommu
->root_entry
= root
;
851 spin_unlock_irqrestore(&iommu
->lock
, flags
);
856 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
862 addr
= iommu
->root_entry
;
864 spin_lock_irqsave(&iommu
->register_lock
, flag
);
865 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, virt_to_phys(addr
));
867 writel(iommu
->gcmd
| DMA_GCMD_SRTP
, iommu
->reg
+ DMAR_GCMD_REG
);
869 /* Make sure hardware complete it */
870 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
871 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
873 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
876 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
881 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
884 spin_lock_irqsave(&iommu
->register_lock
, flag
);
885 writel(iommu
->gcmd
| DMA_GCMD_WBF
, iommu
->reg
+ DMAR_GCMD_REG
);
887 /* Make sure hardware complete it */
888 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
889 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
891 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
894 /* return value determine if we need a write buffer flush */
895 static void __iommu_flush_context(struct intel_iommu
*iommu
,
896 u16 did
, u16 source_id
, u8 function_mask
,
903 case DMA_CCMD_GLOBAL_INVL
:
904 val
= DMA_CCMD_GLOBAL_INVL
;
906 case DMA_CCMD_DOMAIN_INVL
:
907 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
909 case DMA_CCMD_DEVICE_INVL
:
910 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
911 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
918 spin_lock_irqsave(&iommu
->register_lock
, flag
);
919 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
921 /* Make sure hardware complete it */
922 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
923 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
925 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
928 /* return value determine if we need a write buffer flush */
929 static void __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
930 u64 addr
, unsigned int size_order
, u64 type
)
932 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
933 u64 val
= 0, val_iva
= 0;
937 case DMA_TLB_GLOBAL_FLUSH
:
938 /* global flush doesn't need set IVA_REG */
939 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
941 case DMA_TLB_DSI_FLUSH
:
942 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
944 case DMA_TLB_PSI_FLUSH
:
945 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
946 /* Note: always flush non-leaf currently */
947 val_iva
= size_order
| addr
;
952 /* Note: set drain read/write */
955 * This is probably to be super secure.. Looks like we can
956 * ignore it without any impact.
958 if (cap_read_drain(iommu
->cap
))
959 val
|= DMA_TLB_READ_DRAIN
;
961 if (cap_write_drain(iommu
->cap
))
962 val
|= DMA_TLB_WRITE_DRAIN
;
964 spin_lock_irqsave(&iommu
->register_lock
, flag
);
965 /* Note: Only uses first TLB reg currently */
967 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
968 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
970 /* Make sure hardware complete it */
971 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
972 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
974 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
976 /* check IOTLB invalidation granularity */
977 if (DMA_TLB_IAIG(val
) == 0)
978 printk(KERN_ERR
"IOMMU: flush IOTLB failed\n");
979 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
980 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
981 (unsigned long long)DMA_TLB_IIRG(type
),
982 (unsigned long long)DMA_TLB_IAIG(val
));
985 static struct device_domain_info
*iommu_support_dev_iotlb(
986 struct dmar_domain
*domain
, int segment
, u8 bus
, u8 devfn
)
990 struct device_domain_info
*info
;
991 struct intel_iommu
*iommu
= device_to_iommu(segment
, bus
, devfn
);
993 if (!ecap_dev_iotlb_support(iommu
->ecap
))
999 spin_lock_irqsave(&device_domain_lock
, flags
);
1000 list_for_each_entry(info
, &domain
->devices
, link
)
1001 if (info
->bus
== bus
&& info
->devfn
== devfn
) {
1005 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1007 if (!found
|| !info
->dev
)
1010 if (!pci_find_ext_capability(info
->dev
, PCI_EXT_CAP_ID_ATS
))
1013 if (!dmar_find_matched_atsr_unit(info
->dev
))
1016 info
->iommu
= iommu
;
1021 static void iommu_enable_dev_iotlb(struct device_domain_info
*info
)
1026 pci_enable_ats(info
->dev
, VTD_PAGE_SHIFT
);
1029 static void iommu_disable_dev_iotlb(struct device_domain_info
*info
)
1031 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1034 pci_disable_ats(info
->dev
);
1037 static void iommu_flush_dev_iotlb(struct dmar_domain
*domain
,
1038 u64 addr
, unsigned mask
)
1041 unsigned long flags
;
1042 struct device_domain_info
*info
;
1044 spin_lock_irqsave(&device_domain_lock
, flags
);
1045 list_for_each_entry(info
, &domain
->devices
, link
) {
1046 if (!info
->dev
|| !pci_ats_enabled(info
->dev
))
1049 sid
= info
->bus
<< 8 | info
->devfn
;
1050 qdep
= pci_ats_queue_depth(info
->dev
);
1051 qi_flush_dev_iotlb(info
->iommu
, sid
, qdep
, addr
, mask
);
1053 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1056 static void iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
1057 unsigned long pfn
, unsigned int pages
)
1059 unsigned int mask
= ilog2(__roundup_pow_of_two(pages
));
1060 uint64_t addr
= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
1065 * Fallback to domain selective flush if no PSI support or the size is
1067 * PSI requires page size to be 2 ^ x, and the base address is naturally
1068 * aligned to the size
1070 if (!cap_pgsel_inv(iommu
->cap
) || mask
> cap_max_amask_val(iommu
->cap
))
1071 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
1074 iommu
->flush
.flush_iotlb(iommu
, did
, addr
, mask
,
1078 * In caching mode, domain ID 0 is reserved for non-present to present
1079 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1081 if (!cap_caching_mode(iommu
->cap
) || did
)
1082 iommu_flush_dev_iotlb(iommu
->domains
[did
], addr
, mask
);
1085 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
1088 unsigned long flags
;
1090 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1091 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
1092 pmen
&= ~DMA_PMEN_EPM
;
1093 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
1095 /* wait for the protected region status bit to clear */
1096 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
1097 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1099 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1102 static int iommu_enable_translation(struct intel_iommu
*iommu
)
1105 unsigned long flags
;
1107 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1108 iommu
->gcmd
|= DMA_GCMD_TE
;
1109 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1111 /* Make sure hardware complete it */
1112 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1113 readl
, (sts
& DMA_GSTS_TES
), sts
);
1115 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1119 static int iommu_disable_translation(struct intel_iommu
*iommu
)
1124 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1125 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1126 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1128 /* Make sure hardware complete it */
1129 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1130 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1132 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1137 static int iommu_init_domains(struct intel_iommu
*iommu
)
1139 unsigned long ndomains
;
1140 unsigned long nlongs
;
1142 ndomains
= cap_ndoms(iommu
->cap
);
1143 pr_debug("Number of Domains supportd <%ld>\n", ndomains
);
1144 nlongs
= BITS_TO_LONGS(ndomains
);
1146 /* TBD: there might be 64K domains,
1147 * consider other allocation for future chip
1149 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1150 if (!iommu
->domain_ids
) {
1151 printk(KERN_ERR
"Allocating domain id array failed\n");
1154 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1156 if (!iommu
->domains
) {
1157 printk(KERN_ERR
"Allocating domain array failed\n");
1158 kfree(iommu
->domain_ids
);
1162 spin_lock_init(&iommu
->lock
);
1165 * if Caching mode is set, then invalid translations are tagged
1166 * with domainid 0. Hence we need to pre-allocate it.
1168 if (cap_caching_mode(iommu
->cap
))
1169 set_bit(0, iommu
->domain_ids
);
1174 static void domain_exit(struct dmar_domain
*domain
);
1175 static void vm_domain_exit(struct dmar_domain
*domain
);
1177 void free_dmar_iommu(struct intel_iommu
*iommu
)
1179 struct dmar_domain
*domain
;
1181 unsigned long flags
;
1183 i
= find_first_bit(iommu
->domain_ids
, cap_ndoms(iommu
->cap
));
1184 for (; i
< cap_ndoms(iommu
->cap
); ) {
1185 domain
= iommu
->domains
[i
];
1186 clear_bit(i
, iommu
->domain_ids
);
1188 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1189 if (--domain
->iommu_count
== 0) {
1190 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
1191 vm_domain_exit(domain
);
1193 domain_exit(domain
);
1195 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1197 i
= find_next_bit(iommu
->domain_ids
,
1198 cap_ndoms(iommu
->cap
), i
+1);
1201 if (iommu
->gcmd
& DMA_GCMD_TE
)
1202 iommu_disable_translation(iommu
);
1205 set_irq_data(iommu
->irq
, NULL
);
1206 /* This will mask the irq */
1207 free_irq(iommu
->irq
, iommu
);
1208 destroy_irq(iommu
->irq
);
1211 kfree(iommu
->domains
);
1212 kfree(iommu
->domain_ids
);
1214 g_iommus
[iommu
->seq_id
] = NULL
;
1216 /* if all iommus are freed, free g_iommus */
1217 for (i
= 0; i
< g_num_of_iommus
; i
++) {
1222 if (i
== g_num_of_iommus
)
1225 /* free context mapping */
1226 free_context_table(iommu
);
1229 static struct dmar_domain
*alloc_domain(void)
1231 struct dmar_domain
*domain
;
1233 domain
= alloc_domain_mem();
1237 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
1243 static int iommu_attach_domain(struct dmar_domain
*domain
,
1244 struct intel_iommu
*iommu
)
1247 unsigned long ndomains
;
1248 unsigned long flags
;
1250 ndomains
= cap_ndoms(iommu
->cap
);
1252 spin_lock_irqsave(&iommu
->lock
, flags
);
1254 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1255 if (num
>= ndomains
) {
1256 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1257 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1262 set_bit(num
, iommu
->domain_ids
);
1263 set_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1264 iommu
->domains
[num
] = domain
;
1265 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1270 static void iommu_detach_domain(struct dmar_domain
*domain
,
1271 struct intel_iommu
*iommu
)
1273 unsigned long flags
;
1277 spin_lock_irqsave(&iommu
->lock
, flags
);
1278 ndomains
= cap_ndoms(iommu
->cap
);
1279 num
= find_first_bit(iommu
->domain_ids
, ndomains
);
1280 for (; num
< ndomains
; ) {
1281 if (iommu
->domains
[num
] == domain
) {
1285 num
= find_next_bit(iommu
->domain_ids
,
1286 cap_ndoms(iommu
->cap
), num
+1);
1290 clear_bit(num
, iommu
->domain_ids
);
1291 clear_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1292 iommu
->domains
[num
] = NULL
;
1294 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1297 static struct iova_domain reserved_iova_list
;
1298 static struct lock_class_key reserved_alloc_key
;
1299 static struct lock_class_key reserved_rbtree_key
;
1301 static void dmar_init_reserved_ranges(void)
1303 struct pci_dev
*pdev
= NULL
;
1307 init_iova_domain(&reserved_iova_list
, DMA_32BIT_PFN
);
1309 lockdep_set_class(&reserved_iova_list
.iova_alloc_lock
,
1310 &reserved_alloc_key
);
1311 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1312 &reserved_rbtree_key
);
1314 /* IOAPIC ranges shouldn't be accessed by DMA */
1315 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1316 IOVA_PFN(IOAPIC_RANGE_END
));
1318 printk(KERN_ERR
"Reserve IOAPIC range failed\n");
1320 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1321 for_each_pci_dev(pdev
) {
1324 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1325 r
= &pdev
->resource
[i
];
1326 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1328 iova
= reserve_iova(&reserved_iova_list
,
1332 printk(KERN_ERR
"Reserve iova failed\n");
1338 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1340 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1343 static inline int guestwidth_to_adjustwidth(int gaw
)
1346 int r
= (gaw
- 12) % 9;
1357 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1359 struct intel_iommu
*iommu
;
1360 int adjust_width
, agaw
;
1361 unsigned long sagaw
;
1363 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
1364 spin_lock_init(&domain
->mapping_lock
);
1365 spin_lock_init(&domain
->iommu_lock
);
1367 domain_reserve_special_ranges(domain
);
1369 /* calculate AGAW */
1370 iommu
= domain_get_iommu(domain
);
1371 if (guest_width
> cap_mgaw(iommu
->cap
))
1372 guest_width
= cap_mgaw(iommu
->cap
);
1373 domain
->gaw
= guest_width
;
1374 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1375 agaw
= width_to_agaw(adjust_width
);
1376 sagaw
= cap_sagaw(iommu
->cap
);
1377 if (!test_bit(agaw
, &sagaw
)) {
1378 /* hardware doesn't support it, choose a bigger one */
1379 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw
);
1380 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1384 domain
->agaw
= agaw
;
1385 INIT_LIST_HEAD(&domain
->devices
);
1387 if (ecap_coherent(iommu
->ecap
))
1388 domain
->iommu_coherency
= 1;
1390 domain
->iommu_coherency
= 0;
1392 if (ecap_sc_support(iommu
->ecap
))
1393 domain
->iommu_snooping
= 1;
1395 domain
->iommu_snooping
= 0;
1397 domain
->iommu_count
= 1;
1399 /* always allocate the top pgd */
1400 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
1403 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1407 static void domain_exit(struct dmar_domain
*domain
)
1409 struct dmar_drhd_unit
*drhd
;
1410 struct intel_iommu
*iommu
;
1412 /* Domain 0 is reserved, so dont process it */
1416 domain_remove_dev_info(domain
);
1418 put_iova_domain(&domain
->iovad
);
1421 dma_pte_clear_range(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1423 /* free page tables */
1424 dma_pte_free_pagetable(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
1426 for_each_active_iommu(iommu
, drhd
)
1427 if (test_bit(iommu
->seq_id
, &domain
->iommu_bmp
))
1428 iommu_detach_domain(domain
, iommu
);
1430 free_domain_mem(domain
);
1433 static int domain_context_mapping_one(struct dmar_domain
*domain
, int segment
,
1434 u8 bus
, u8 devfn
, int translation
)
1436 struct context_entry
*context
;
1437 unsigned long flags
;
1438 struct intel_iommu
*iommu
;
1439 struct dma_pte
*pgd
;
1441 unsigned long ndomains
;
1444 struct device_domain_info
*info
= NULL
;
1446 pr_debug("Set context mapping for %02x:%02x.%d\n",
1447 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1449 BUG_ON(!domain
->pgd
);
1450 BUG_ON(translation
!= CONTEXT_TT_PASS_THROUGH
&&
1451 translation
!= CONTEXT_TT_MULTI_LEVEL
);
1453 iommu
= device_to_iommu(segment
, bus
, devfn
);
1457 context
= device_to_context_entry(iommu
, bus
, devfn
);
1460 spin_lock_irqsave(&iommu
->lock
, flags
);
1461 if (context_present(context
)) {
1462 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1469 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
1470 domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
) {
1473 /* find an available domain id for this device in iommu */
1474 ndomains
= cap_ndoms(iommu
->cap
);
1475 num
= find_first_bit(iommu
->domain_ids
, ndomains
);
1476 for (; num
< ndomains
; ) {
1477 if (iommu
->domains
[num
] == domain
) {
1482 num
= find_next_bit(iommu
->domain_ids
,
1483 cap_ndoms(iommu
->cap
), num
+1);
1487 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1488 if (num
>= ndomains
) {
1489 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1490 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1494 set_bit(num
, iommu
->domain_ids
);
1495 set_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1496 iommu
->domains
[num
] = domain
;
1500 /* Skip top levels of page tables for
1501 * iommu which has less agaw than default.
1503 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1504 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1505 if (!dma_pte_present(pgd
)) {
1506 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1512 context_set_domain_id(context
, id
);
1514 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1515 info
= iommu_support_dev_iotlb(domain
, segment
, bus
, devfn
);
1516 translation
= info
? CONTEXT_TT_DEV_IOTLB
:
1517 CONTEXT_TT_MULTI_LEVEL
;
1520 * In pass through mode, AW must be programmed to indicate the largest
1521 * AGAW value supported by hardware. And ASR is ignored by hardware.
1523 if (unlikely(translation
== CONTEXT_TT_PASS_THROUGH
))
1524 context_set_address_width(context
, iommu
->msagaw
);
1526 context_set_address_root(context
, virt_to_phys(pgd
));
1527 context_set_address_width(context
, iommu
->agaw
);
1530 context_set_translation_type(context
, translation
);
1531 context_set_fault_enable(context
);
1532 context_set_present(context
);
1533 domain_flush_cache(domain
, context
, sizeof(*context
));
1536 * It's a non-present to present mapping. If hardware doesn't cache
1537 * non-present entry we only need to flush the write-buffer. If the
1538 * _does_ cache non-present entries, then it does so in the special
1539 * domain #0, which we have to flush:
1541 if (cap_caching_mode(iommu
->cap
)) {
1542 iommu
->flush
.flush_context(iommu
, 0,
1543 (((u16
)bus
) << 8) | devfn
,
1544 DMA_CCMD_MASK_NOBIT
,
1545 DMA_CCMD_DEVICE_INVL
);
1546 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_DSI_FLUSH
);
1548 iommu_flush_write_buffer(iommu
);
1550 iommu_enable_dev_iotlb(info
);
1551 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1553 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1554 if (!test_and_set_bit(iommu
->seq_id
, &domain
->iommu_bmp
)) {
1555 domain
->iommu_count
++;
1556 domain_update_iommu_cap(domain
);
1558 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1563 domain_context_mapping(struct dmar_domain
*domain
, struct pci_dev
*pdev
,
1567 struct pci_dev
*tmp
, *parent
;
1569 ret
= domain_context_mapping_one(domain
, pci_domain_nr(pdev
->bus
),
1570 pdev
->bus
->number
, pdev
->devfn
,
1575 /* dependent device mapping */
1576 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1579 /* Secondary interface's bus number and devfn 0 */
1580 parent
= pdev
->bus
->self
;
1581 while (parent
!= tmp
) {
1582 ret
= domain_context_mapping_one(domain
,
1583 pci_domain_nr(parent
->bus
),
1584 parent
->bus
->number
,
1585 parent
->devfn
, translation
);
1588 parent
= parent
->bus
->self
;
1590 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
1591 return domain_context_mapping_one(domain
,
1592 pci_domain_nr(tmp
->subordinate
),
1593 tmp
->subordinate
->number
, 0,
1595 else /* this is a legacy PCI bridge */
1596 return domain_context_mapping_one(domain
,
1597 pci_domain_nr(tmp
->bus
),
1603 static int domain_context_mapped(struct pci_dev
*pdev
)
1606 struct pci_dev
*tmp
, *parent
;
1607 struct intel_iommu
*iommu
;
1609 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
1614 ret
= device_context_mapped(iommu
, pdev
->bus
->number
, pdev
->devfn
);
1617 /* dependent device mapping */
1618 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1621 /* Secondary interface's bus number and devfn 0 */
1622 parent
= pdev
->bus
->self
;
1623 while (parent
!= tmp
) {
1624 ret
= device_context_mapped(iommu
, parent
->bus
->number
,
1628 parent
= parent
->bus
->self
;
1631 return device_context_mapped(iommu
, tmp
->subordinate
->number
,
1634 return device_context_mapped(iommu
, tmp
->bus
->number
,
1638 static int __domain_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1639 struct scatterlist
*sg
, unsigned long phys_pfn
,
1640 unsigned long nr_pages
, int prot
)
1642 struct dma_pte
*first_pte
= NULL
, *pte
= NULL
;
1643 phys_addr_t
uninitialized_var(pteval
);
1644 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
1645 unsigned long sg_res
;
1647 BUG_ON(addr_width
< BITS_PER_LONG
&& (iov_pfn
+ nr_pages
- 1) >> addr_width
);
1649 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
1652 prot
&= DMA_PTE_READ
| DMA_PTE_WRITE
| DMA_PTE_SNP
;
1657 sg_res
= nr_pages
+ 1;
1658 pteval
= ((phys_addr_t
)phys_pfn
<< VTD_PAGE_SHIFT
) | prot
;
1661 while (nr_pages
--) {
1663 sg_res
= (sg
->offset
+ sg
->length
+ VTD_PAGE_SIZE
- 1) >> VTD_PAGE_SHIFT
;
1664 sg
->dma_address
= ((dma_addr_t
)iov_pfn
<< VTD_PAGE_SHIFT
) + sg
->offset
;
1665 sg
->dma_length
= sg
->length
;
1666 pteval
= page_to_phys(sg_page(sg
)) | prot
;
1669 first_pte
= pte
= pfn_to_dma_pte(domain
, iov_pfn
);
1673 /* We don't need lock here, nobody else
1674 * touches the iova range
1676 if (unlikely(dma_pte_addr(pte
))) {
1677 static int dumps
= 5;
1678 printk(KERN_CRIT
"ERROR: DMA PTE for vPFN 0x%lx already set (to %llx)\n",
1682 debug_dma_dump_mappings(NULL
);
1689 (unsigned long)pte
>> VTD_PAGE_SHIFT
!=
1690 (unsigned long)first_pte
>> VTD_PAGE_SHIFT
) {
1691 domain_flush_cache(domain
, first_pte
,
1692 (void *)pte
- (void *)first_pte
);
1696 pteval
+= VTD_PAGE_SIZE
;
1704 static inline int domain_sg_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1705 struct scatterlist
*sg
, unsigned long nr_pages
,
1708 return __domain_mapping(domain
, iov_pfn
, sg
, 0, nr_pages
, prot
);
1711 static inline int domain_pfn_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
1712 unsigned long phys_pfn
, unsigned long nr_pages
,
1715 return __domain_mapping(domain
, iov_pfn
, NULL
, phys_pfn
, nr_pages
, prot
);
1718 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
1723 clear_context_table(iommu
, bus
, devfn
);
1724 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
1725 DMA_CCMD_GLOBAL_INVL
);
1726 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
1729 static void domain_remove_dev_info(struct dmar_domain
*domain
)
1731 struct device_domain_info
*info
;
1732 unsigned long flags
;
1733 struct intel_iommu
*iommu
;
1735 spin_lock_irqsave(&device_domain_lock
, flags
);
1736 while (!list_empty(&domain
->devices
)) {
1737 info
= list_entry(domain
->devices
.next
,
1738 struct device_domain_info
, link
);
1739 list_del(&info
->link
);
1740 list_del(&info
->global
);
1742 info
->dev
->dev
.archdata
.iommu
= NULL
;
1743 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1745 iommu_disable_dev_iotlb(info
);
1746 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
1747 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
1748 free_devinfo_mem(info
);
1750 spin_lock_irqsave(&device_domain_lock
, flags
);
1752 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1757 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1759 static struct dmar_domain
*
1760 find_domain(struct pci_dev
*pdev
)
1762 struct device_domain_info
*info
;
1764 /* No lock here, assumes no domain exit in normal case */
1765 info
= pdev
->dev
.archdata
.iommu
;
1767 return info
->domain
;
1771 /* domain is initialized */
1772 static struct dmar_domain
*get_domain_for_dev(struct pci_dev
*pdev
, int gaw
)
1774 struct dmar_domain
*domain
, *found
= NULL
;
1775 struct intel_iommu
*iommu
;
1776 struct dmar_drhd_unit
*drhd
;
1777 struct device_domain_info
*info
, *tmp
;
1778 struct pci_dev
*dev_tmp
;
1779 unsigned long flags
;
1780 int bus
= 0, devfn
= 0;
1784 domain
= find_domain(pdev
);
1788 segment
= pci_domain_nr(pdev
->bus
);
1790 dev_tmp
= pci_find_upstream_pcie_bridge(pdev
);
1792 if (dev_tmp
->is_pcie
) {
1793 bus
= dev_tmp
->subordinate
->number
;
1796 bus
= dev_tmp
->bus
->number
;
1797 devfn
= dev_tmp
->devfn
;
1799 spin_lock_irqsave(&device_domain_lock
, flags
);
1800 list_for_each_entry(info
, &device_domain_list
, global
) {
1801 if (info
->segment
== segment
&&
1802 info
->bus
== bus
&& info
->devfn
== devfn
) {
1803 found
= info
->domain
;
1807 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1808 /* pcie-pci bridge already has a domain, uses it */
1815 domain
= alloc_domain();
1819 /* Allocate new domain for the device */
1820 drhd
= dmar_find_matched_drhd_unit(pdev
);
1822 printk(KERN_ERR
"IOMMU: can't find DMAR for device %s\n",
1826 iommu
= drhd
->iommu
;
1828 ret
= iommu_attach_domain(domain
, iommu
);
1830 domain_exit(domain
);
1834 if (domain_init(domain
, gaw
)) {
1835 domain_exit(domain
);
1839 /* register pcie-to-pci device */
1841 info
= alloc_devinfo_mem();
1843 domain_exit(domain
);
1846 info
->segment
= segment
;
1848 info
->devfn
= devfn
;
1850 info
->domain
= domain
;
1851 /* This domain is shared by devices under p2p bridge */
1852 domain
->flags
|= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES
;
1854 /* pcie-to-pci bridge already has a domain, uses it */
1856 spin_lock_irqsave(&device_domain_lock
, flags
);
1857 list_for_each_entry(tmp
, &device_domain_list
, global
) {
1858 if (tmp
->segment
== segment
&&
1859 tmp
->bus
== bus
&& tmp
->devfn
== devfn
) {
1860 found
= tmp
->domain
;
1865 free_devinfo_mem(info
);
1866 domain_exit(domain
);
1869 list_add(&info
->link
, &domain
->devices
);
1870 list_add(&info
->global
, &device_domain_list
);
1872 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1876 info
= alloc_devinfo_mem();
1879 info
->segment
= segment
;
1880 info
->bus
= pdev
->bus
->number
;
1881 info
->devfn
= pdev
->devfn
;
1883 info
->domain
= domain
;
1884 spin_lock_irqsave(&device_domain_lock
, flags
);
1885 /* somebody is fast */
1886 found
= find_domain(pdev
);
1887 if (found
!= NULL
) {
1888 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1889 if (found
!= domain
) {
1890 domain_exit(domain
);
1893 free_devinfo_mem(info
);
1896 list_add(&info
->link
, &domain
->devices
);
1897 list_add(&info
->global
, &device_domain_list
);
1898 pdev
->dev
.archdata
.iommu
= info
;
1899 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1902 /* recheck it here, maybe others set it */
1903 return find_domain(pdev
);
1906 static int iommu_identity_mapping
;
1908 static int iommu_domain_identity_map(struct dmar_domain
*domain
,
1909 unsigned long long start
,
1910 unsigned long long end
)
1912 unsigned long first_vpfn
= start
>> VTD_PAGE_SHIFT
;
1913 unsigned long last_vpfn
= end
>> VTD_PAGE_SHIFT
;
1915 if (!reserve_iova(&domain
->iovad
, dma_to_mm_pfn(first_vpfn
),
1916 dma_to_mm_pfn(last_vpfn
))) {
1917 printk(KERN_ERR
"IOMMU: reserve iova failed\n");
1921 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1922 start
, end
, domain
->id
);
1924 * RMRR range might have overlap with physical memory range,
1927 dma_pte_clear_range(domain
, first_vpfn
, last_vpfn
);
1929 return domain_pfn_mapping(domain
, first_vpfn
, first_vpfn
,
1930 last_vpfn
- first_vpfn
+ 1,
1931 DMA_PTE_READ
|DMA_PTE_WRITE
);
1934 static int iommu_prepare_identity_map(struct pci_dev
*pdev
,
1935 unsigned long long start
,
1936 unsigned long long end
)
1938 struct dmar_domain
*domain
;
1942 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1943 pci_name(pdev
), start
, end
);
1945 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
1949 ret
= iommu_domain_identity_map(domain
, start
, end
);
1953 /* context entry init */
1954 ret
= domain_context_mapping(domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
1961 domain_exit(domain
);
1965 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
1966 struct pci_dev
*pdev
)
1968 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
1970 return iommu_prepare_identity_map(pdev
, rmrr
->base_address
,
1971 rmrr
->end_address
+ 1);
1974 #ifdef CONFIG_DMAR_FLOPPY_WA
1975 static inline void iommu_prepare_isa(void)
1977 struct pci_dev
*pdev
;
1980 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
1984 printk(KERN_INFO
"IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
1985 ret
= iommu_prepare_identity_map(pdev
, 0, 16*1024*1024);
1988 printk(KERN_ERR
"IOMMU: Failed to create 0-16MiB identity map; "
1989 "floppy might not work\n");
1993 static inline void iommu_prepare_isa(void)
1997 #endif /* !CONFIG_DMAR_FLPY_WA */
1999 /* Initialize each context entry as pass through.*/
2000 static int __init
init_context_pass_through(void)
2002 struct pci_dev
*pdev
= NULL
;
2003 struct dmar_domain
*domain
;
2006 for_each_pci_dev(pdev
) {
2007 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2008 ret
= domain_context_mapping(domain
, pdev
,
2009 CONTEXT_TT_PASS_THROUGH
);
2016 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
);
2018 static int __init
si_domain_work_fn(unsigned long start_pfn
,
2019 unsigned long end_pfn
, void *datax
)
2023 *ret
= iommu_domain_identity_map(si_domain
,
2024 (uint64_t)start_pfn
<< PAGE_SHIFT
,
2025 (uint64_t)end_pfn
<< PAGE_SHIFT
);
2030 static int si_domain_init(void)
2032 struct dmar_drhd_unit
*drhd
;
2033 struct intel_iommu
*iommu
;
2036 si_domain
= alloc_domain();
2040 pr_debug("Identity mapping domain is domain %d\n", si_domain
->id
);
2042 for_each_active_iommu(iommu
, drhd
) {
2043 ret
= iommu_attach_domain(si_domain
, iommu
);
2045 domain_exit(si_domain
);
2050 if (md_domain_init(si_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2051 domain_exit(si_domain
);
2055 si_domain
->flags
= DOMAIN_FLAG_STATIC_IDENTITY
;
2057 for_each_online_node(nid
) {
2058 work_with_active_regions(nid
, si_domain_work_fn
, &ret
);
2066 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
2067 struct pci_dev
*pdev
);
2068 static int identity_mapping(struct pci_dev
*pdev
)
2070 struct device_domain_info
*info
;
2072 if (likely(!iommu_identity_mapping
))
2076 list_for_each_entry(info
, &si_domain
->devices
, link
)
2077 if (info
->dev
== pdev
)
2082 static int domain_add_dev_info(struct dmar_domain
*domain
,
2083 struct pci_dev
*pdev
)
2085 struct device_domain_info
*info
;
2086 unsigned long flags
;
2088 info
= alloc_devinfo_mem();
2092 info
->segment
= pci_domain_nr(pdev
->bus
);
2093 info
->bus
= pdev
->bus
->number
;
2094 info
->devfn
= pdev
->devfn
;
2096 info
->domain
= domain
;
2098 spin_lock_irqsave(&device_domain_lock
, flags
);
2099 list_add(&info
->link
, &domain
->devices
);
2100 list_add(&info
->global
, &device_domain_list
);
2101 pdev
->dev
.archdata
.iommu
= info
;
2102 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2107 static int iommu_prepare_static_identity_mapping(void)
2109 struct pci_dev
*pdev
= NULL
;
2112 ret
= si_domain_init();
2116 for_each_pci_dev(pdev
) {
2117 printk(KERN_INFO
"IOMMU: identity mapping for device %s\n",
2120 ret
= domain_context_mapping(si_domain
, pdev
,
2121 CONTEXT_TT_MULTI_LEVEL
);
2124 ret
= domain_add_dev_info(si_domain
, pdev
);
2132 int __init
init_dmars(void)
2134 struct dmar_drhd_unit
*drhd
;
2135 struct dmar_rmrr_unit
*rmrr
;
2136 struct pci_dev
*pdev
;
2137 struct intel_iommu
*iommu
;
2139 int pass_through
= 1;
2142 * In case pass through can not be enabled, iommu tries to use identity
2145 if (iommu_pass_through
)
2146 iommu_identity_mapping
= 1;
2151 * initialize and program root entry to not present
2154 for_each_drhd_unit(drhd
) {
2157 * lock not needed as this is only incremented in the single
2158 * threaded kernel __init code path all other access are read
2163 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
2166 printk(KERN_ERR
"Allocating global iommu array failed\n");
2171 deferred_flush
= kzalloc(g_num_of_iommus
*
2172 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
2173 if (!deferred_flush
) {
2179 for_each_drhd_unit(drhd
) {
2183 iommu
= drhd
->iommu
;
2184 g_iommus
[iommu
->seq_id
] = iommu
;
2186 ret
= iommu_init_domains(iommu
);
2192 * we could share the same root & context tables
2193 * amoung all IOMMU's. Need to Split it later.
2195 ret
= iommu_alloc_root_entry(iommu
);
2197 printk(KERN_ERR
"IOMMU: allocate root entry failed\n");
2200 if (!ecap_pass_through(iommu
->ecap
))
2203 if (iommu_pass_through
)
2204 if (!pass_through
) {
2206 "Pass Through is not supported by hardware.\n");
2207 iommu_pass_through
= 0;
2211 * Start from the sane iommu hardware state.
2213 for_each_drhd_unit(drhd
) {
2217 iommu
= drhd
->iommu
;
2220 * If the queued invalidation is already initialized by us
2221 * (for example, while enabling interrupt-remapping) then
2222 * we got the things already rolling from a sane state.
2228 * Clear any previous faults.
2230 dmar_fault(-1, iommu
);
2232 * Disable queued invalidation if supported and already enabled
2233 * before OS handover.
2235 dmar_disable_qi(iommu
);
2238 for_each_drhd_unit(drhd
) {
2242 iommu
= drhd
->iommu
;
2244 if (dmar_enable_qi(iommu
)) {
2246 * Queued Invalidate not enabled, use Register Based
2249 iommu
->flush
.flush_context
= __iommu_flush_context
;
2250 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
2251 printk(KERN_INFO
"IOMMU 0x%Lx: using Register based "
2253 (unsigned long long)drhd
->reg_base_addr
);
2255 iommu
->flush
.flush_context
= qi_flush_context
;
2256 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
2257 printk(KERN_INFO
"IOMMU 0x%Lx: using Queued "
2259 (unsigned long long)drhd
->reg_base_addr
);
2264 * If pass through is set and enabled, context entries of all pci
2265 * devices are intialized by pass through translation type.
2267 if (iommu_pass_through
) {
2268 ret
= init_context_pass_through();
2270 printk(KERN_ERR
"IOMMU: Pass through init failed.\n");
2271 iommu_pass_through
= 0;
2276 * If pass through is not set or not enabled, setup context entries for
2277 * identity mappings for rmrr, gfx, and isa and may fall back to static
2278 * identity mapping if iommu_identity_mapping is set.
2280 if (!iommu_pass_through
) {
2281 if (iommu_identity_mapping
)
2282 iommu_prepare_static_identity_mapping();
2285 * for each dev attached to rmrr
2287 * locate drhd for dev, alloc domain for dev
2288 * allocate free domain
2289 * allocate page table entries for rmrr
2290 * if context not allocated for bus
2291 * allocate and init context
2292 * set present in root table for this bus
2293 * init context with domain, translation etc
2297 printk(KERN_INFO
"IOMMU: Setting RMRR:\n");
2298 for_each_rmrr_units(rmrr
) {
2299 for (i
= 0; i
< rmrr
->devices_cnt
; i
++) {
2300 pdev
= rmrr
->devices
[i
];
2302 * some BIOS lists non-exist devices in DMAR
2307 ret
= iommu_prepare_rmrr_dev(rmrr
, pdev
);
2310 "IOMMU: mapping reserved region failed\n");
2314 iommu_prepare_isa();
2320 * global invalidate context cache
2321 * global invalidate iotlb
2322 * enable translation
2324 for_each_drhd_unit(drhd
) {
2327 iommu
= drhd
->iommu
;
2329 iommu_flush_write_buffer(iommu
);
2331 ret
= dmar_set_interrupt(iommu
);
2335 iommu_set_root_entry(iommu
);
2337 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
2338 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2339 iommu_disable_protect_mem_regions(iommu
);
2341 ret
= iommu_enable_translation(iommu
);
2348 for_each_drhd_unit(drhd
) {
2351 iommu
= drhd
->iommu
;
2358 static inline unsigned long aligned_nrpages(unsigned long host_addr
,
2361 host_addr
&= ~PAGE_MASK
;
2362 host_addr
+= size
+ PAGE_SIZE
- 1;
2364 return host_addr
>> VTD_PAGE_SHIFT
;
2367 static struct iova
*intel_alloc_iova(struct device
*dev
,
2368 struct dmar_domain
*domain
,
2369 unsigned long nrpages
, uint64_t dma_mask
)
2371 struct pci_dev
*pdev
= to_pci_dev(dev
);
2372 struct iova
*iova
= NULL
;
2374 /* Restrict dma_mask to the width that the iommu can handle */
2375 dma_mask
= min_t(uint64_t, DOMAIN_MAX_ADDR(domain
->gaw
), dma_mask
);
2377 if (!dmar_forcedac
&& dma_mask
> DMA_BIT_MASK(32)) {
2379 * First try to allocate an io virtual address in
2380 * DMA_BIT_MASK(32) and if that fails then try allocating
2383 iova
= alloc_iova(&domain
->iovad
, nrpages
,
2384 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2388 iova
= alloc_iova(&domain
->iovad
, nrpages
, IOVA_PFN(dma_mask
), 1);
2389 if (unlikely(!iova
)) {
2390 printk(KERN_ERR
"Allocating %ld-page iova for %s failed",
2391 nrpages
, pci_name(pdev
));
2398 static struct dmar_domain
*
2399 get_valid_domain_for_dev(struct pci_dev
*pdev
)
2401 struct dmar_domain
*domain
;
2404 domain
= get_domain_for_dev(pdev
,
2405 DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2408 "Allocating domain for %s failed", pci_name(pdev
));
2412 /* make sure context mapping is ok */
2413 if (unlikely(!domain_context_mapped(pdev
))) {
2414 ret
= domain_context_mapping(domain
, pdev
,
2415 CONTEXT_TT_MULTI_LEVEL
);
2418 "Domain context map for %s failed",
2427 static int iommu_dummy(struct pci_dev
*pdev
)
2429 return pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
;
2432 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2433 static int iommu_no_mapping(struct pci_dev
*pdev
)
2437 if (!iommu_identity_mapping
)
2438 return iommu_dummy(pdev
);
2440 found
= identity_mapping(pdev
);
2442 if (pdev
->dma_mask
> DMA_BIT_MASK(32))
2446 * 32 bit DMA is removed from si_domain and fall back
2447 * to non-identity mapping.
2449 domain_remove_one_dev_info(si_domain
, pdev
);
2450 printk(KERN_INFO
"32bit %s uses non-identity mapping\n",
2456 * In case of a detached 64 bit DMA device from vm, the device
2457 * is put into si_domain for identity mapping.
2459 if (pdev
->dma_mask
> DMA_BIT_MASK(32)) {
2461 ret
= domain_add_dev_info(si_domain
, pdev
);
2463 printk(KERN_INFO
"64bit %s uses identity mapping\n",
2470 return iommu_dummy(pdev
);
2473 static dma_addr_t
__intel_map_single(struct device
*hwdev
, phys_addr_t paddr
,
2474 size_t size
, int dir
, u64 dma_mask
)
2476 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2477 struct dmar_domain
*domain
;
2478 phys_addr_t start_paddr
;
2482 struct intel_iommu
*iommu
;
2484 BUG_ON(dir
== DMA_NONE
);
2486 if (iommu_no_mapping(pdev
))
2489 domain
= get_valid_domain_for_dev(pdev
);
2493 iommu
= domain_get_iommu(domain
);
2494 size
= aligned_nrpages(paddr
, size
);
2496 iova
= intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2501 * Check if DMAR supports zero-length reads on write only
2504 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2505 !cap_zlr(iommu
->cap
))
2506 prot
|= DMA_PTE_READ
;
2507 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2508 prot
|= DMA_PTE_WRITE
;
2510 * paddr - (paddr + size) might be partial page, we should map the whole
2511 * page. Note: if two part of one page are separately mapped, we
2512 * might have two guest_addr mapping to the same host paddr, but this
2513 * is not a big problem
2515 ret
= domain_pfn_mapping(domain
, mm_to_dma_pfn(iova
->pfn_lo
),
2516 paddr
>> VTD_PAGE_SHIFT
, size
, prot
);
2520 /* it's a non-present to present mapping. Only flush if caching mode */
2521 if (cap_caching_mode(iommu
->cap
))
2522 iommu_flush_iotlb_psi(iommu
, 0, mm_to_dma_pfn(iova
->pfn_lo
), size
);
2524 iommu_flush_write_buffer(iommu
);
2526 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
2527 start_paddr
+= paddr
& ~PAGE_MASK
;
2532 __free_iova(&domain
->iovad
, iova
);
2533 printk(KERN_ERR
"Device %s request: %zx@%llx dir %d --- failed\n",
2534 pci_name(pdev
), size
, (unsigned long long)paddr
, dir
);
2538 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
2539 unsigned long offset
, size_t size
,
2540 enum dma_data_direction dir
,
2541 struct dma_attrs
*attrs
)
2543 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
2544 dir
, to_pci_dev(dev
)->dma_mask
);
2547 static void flush_unmaps(void)
2553 /* just flush them all */
2554 for (i
= 0; i
< g_num_of_iommus
; i
++) {
2555 struct intel_iommu
*iommu
= g_iommus
[i
];
2559 if (!deferred_flush
[i
].next
)
2562 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2563 DMA_TLB_GLOBAL_FLUSH
);
2564 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
2566 struct iova
*iova
= deferred_flush
[i
].iova
[j
];
2568 mask
= (iova
->pfn_hi
- iova
->pfn_lo
+ 1) << PAGE_SHIFT
;
2569 mask
= ilog2(mask
>> VTD_PAGE_SHIFT
);
2570 iommu_flush_dev_iotlb(deferred_flush
[i
].domain
[j
],
2571 iova
->pfn_lo
<< PAGE_SHIFT
, mask
);
2572 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
, iova
);
2574 deferred_flush
[i
].next
= 0;
2580 static void flush_unmaps_timeout(unsigned long data
)
2582 unsigned long flags
;
2584 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2586 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2589 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
)
2591 unsigned long flags
;
2593 struct intel_iommu
*iommu
;
2595 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2596 if (list_size
== HIGH_WATER_MARK
)
2599 iommu
= domain_get_iommu(dom
);
2600 iommu_id
= iommu
->seq_id
;
2602 next
= deferred_flush
[iommu_id
].next
;
2603 deferred_flush
[iommu_id
].domain
[next
] = dom
;
2604 deferred_flush
[iommu_id
].iova
[next
] = iova
;
2605 deferred_flush
[iommu_id
].next
++;
2608 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
2612 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2615 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
2616 size_t size
, enum dma_data_direction dir
,
2617 struct dma_attrs
*attrs
)
2619 struct pci_dev
*pdev
= to_pci_dev(dev
);
2620 struct dmar_domain
*domain
;
2621 unsigned long start_pfn
, last_pfn
;
2623 struct intel_iommu
*iommu
;
2625 if (iommu_no_mapping(pdev
))
2628 domain
= find_domain(pdev
);
2631 iommu
= domain_get_iommu(domain
);
2633 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
2637 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
2638 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
2640 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2641 pci_name(pdev
), start_pfn
, last_pfn
);
2643 /* clear the whole page */
2644 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
2646 /* free page tables */
2647 dma_pte_free_pagetable(domain
, start_pfn
, last_pfn
);
2649 if (intel_iommu_strict
) {
2650 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
2651 last_pfn
- start_pfn
+ 1);
2653 __free_iova(&domain
->iovad
, iova
);
2655 add_unmap(domain
, iova
);
2657 * queue up the release of the unmap to save the 1/6th of the
2658 * cpu used up by the iotlb flush operation...
2663 static void intel_unmap_single(struct device
*dev
, dma_addr_t dev_addr
, size_t size
,
2666 intel_unmap_page(dev
, dev_addr
, size
, dir
, NULL
);
2669 static void *intel_alloc_coherent(struct device
*hwdev
, size_t size
,
2670 dma_addr_t
*dma_handle
, gfp_t flags
)
2675 size
= PAGE_ALIGN(size
);
2676 order
= get_order(size
);
2677 flags
&= ~(GFP_DMA
| GFP_DMA32
);
2679 vaddr
= (void *)__get_free_pages(flags
, order
);
2682 memset(vaddr
, 0, size
);
2684 *dma_handle
= __intel_map_single(hwdev
, virt_to_bus(vaddr
), size
,
2686 hwdev
->coherent_dma_mask
);
2689 free_pages((unsigned long)vaddr
, order
);
2693 static void intel_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
2694 dma_addr_t dma_handle
)
2698 size
= PAGE_ALIGN(size
);
2699 order
= get_order(size
);
2701 intel_unmap_single(hwdev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
2702 free_pages((unsigned long)vaddr
, order
);
2705 static void intel_unmap_sg(struct device
*hwdev
, struct scatterlist
*sglist
,
2706 int nelems
, enum dma_data_direction dir
,
2707 struct dma_attrs
*attrs
)
2709 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2710 struct dmar_domain
*domain
;
2711 unsigned long start_pfn
, last_pfn
;
2713 struct intel_iommu
*iommu
;
2715 if (iommu_no_mapping(pdev
))
2718 domain
= find_domain(pdev
);
2721 iommu
= domain_get_iommu(domain
);
2723 iova
= find_iova(&domain
->iovad
, IOVA_PFN(sglist
[0].dma_address
));
2727 start_pfn
= mm_to_dma_pfn(iova
->pfn_lo
);
2728 last_pfn
= mm_to_dma_pfn(iova
->pfn_hi
+ 1) - 1;
2730 /* clear the whole page */
2731 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
2733 /* free page tables */
2734 dma_pte_free_pagetable(domain
, start_pfn
, last_pfn
);
2736 iommu_flush_iotlb_psi(iommu
, domain
->id
, start_pfn
,
2737 (last_pfn
- start_pfn
+ 1));
2740 __free_iova(&domain
->iovad
, iova
);
2743 static int intel_nontranslate_map_sg(struct device
*hddev
,
2744 struct scatterlist
*sglist
, int nelems
, int dir
)
2747 struct scatterlist
*sg
;
2749 for_each_sg(sglist
, sg
, nelems
, i
) {
2750 BUG_ON(!sg_page(sg
));
2751 sg
->dma_address
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2752 sg
->dma_length
= sg
->length
;
2757 static int intel_map_sg(struct device
*hwdev
, struct scatterlist
*sglist
, int nelems
,
2758 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
2761 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2762 struct dmar_domain
*domain
;
2765 size_t offset_pfn
= 0;
2766 struct iova
*iova
= NULL
;
2768 struct scatterlist
*sg
;
2769 unsigned long start_vpfn
;
2770 struct intel_iommu
*iommu
;
2772 BUG_ON(dir
== DMA_NONE
);
2773 if (iommu_no_mapping(pdev
))
2774 return intel_nontranslate_map_sg(hwdev
, sglist
, nelems
, dir
);
2776 domain
= get_valid_domain_for_dev(pdev
);
2780 iommu
= domain_get_iommu(domain
);
2782 for_each_sg(sglist
, sg
, nelems
, i
)
2783 size
+= aligned_nrpages(sg
->offset
, sg
->length
);
2785 iova
= intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2787 sglist
->dma_length
= 0;
2792 * Check if DMAR supports zero-length reads on write only
2795 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2796 !cap_zlr(iommu
->cap
))
2797 prot
|= DMA_PTE_READ
;
2798 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2799 prot
|= DMA_PTE_WRITE
;
2801 start_vpfn
= mm_to_dma_pfn(iova
->pfn_lo
);
2803 ret
= domain_sg_mapping(domain
, start_vpfn
, sglist
, mm_to_dma_pfn(size
), prot
);
2804 if (unlikely(ret
)) {
2805 /* clear the page */
2806 dma_pte_clear_range(domain
, start_vpfn
,
2807 start_vpfn
+ size
- 1);
2808 /* free page tables */
2809 dma_pte_free_pagetable(domain
, start_vpfn
,
2810 start_vpfn
+ size
- 1);
2812 __free_iova(&domain
->iovad
, iova
);
2816 /* it's a non-present to present mapping. Only flush if caching mode */
2817 if (cap_caching_mode(iommu
->cap
))
2818 iommu_flush_iotlb_psi(iommu
, 0, start_vpfn
, offset_pfn
);
2820 iommu_flush_write_buffer(iommu
);
2825 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
2830 struct dma_map_ops intel_dma_ops
= {
2831 .alloc_coherent
= intel_alloc_coherent
,
2832 .free_coherent
= intel_free_coherent
,
2833 .map_sg
= intel_map_sg
,
2834 .unmap_sg
= intel_unmap_sg
,
2835 .map_page
= intel_map_page
,
2836 .unmap_page
= intel_unmap_page
,
2837 .mapping_error
= intel_mapping_error
,
2840 static inline int iommu_domain_cache_init(void)
2844 iommu_domain_cache
= kmem_cache_create("iommu_domain",
2845 sizeof(struct dmar_domain
),
2850 if (!iommu_domain_cache
) {
2851 printk(KERN_ERR
"Couldn't create iommu_domain cache\n");
2858 static inline int iommu_devinfo_cache_init(void)
2862 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
2863 sizeof(struct device_domain_info
),
2867 if (!iommu_devinfo_cache
) {
2868 printk(KERN_ERR
"Couldn't create devinfo cache\n");
2875 static inline int iommu_iova_cache_init(void)
2879 iommu_iova_cache
= kmem_cache_create("iommu_iova",
2880 sizeof(struct iova
),
2884 if (!iommu_iova_cache
) {
2885 printk(KERN_ERR
"Couldn't create iova cache\n");
2892 static int __init
iommu_init_mempool(void)
2895 ret
= iommu_iova_cache_init();
2899 ret
= iommu_domain_cache_init();
2903 ret
= iommu_devinfo_cache_init();
2907 kmem_cache_destroy(iommu_domain_cache
);
2909 kmem_cache_destroy(iommu_iova_cache
);
2914 static void __init
iommu_exit_mempool(void)
2916 kmem_cache_destroy(iommu_devinfo_cache
);
2917 kmem_cache_destroy(iommu_domain_cache
);
2918 kmem_cache_destroy(iommu_iova_cache
);
2922 static void __init
init_no_remapping_devices(void)
2924 struct dmar_drhd_unit
*drhd
;
2926 for_each_drhd_unit(drhd
) {
2927 if (!drhd
->include_all
) {
2929 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2930 if (drhd
->devices
[i
] != NULL
)
2932 /* ignore DMAR unit if no pci devices exist */
2933 if (i
== drhd
->devices_cnt
)
2941 for_each_drhd_unit(drhd
) {
2943 if (drhd
->ignored
|| drhd
->include_all
)
2946 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2947 if (drhd
->devices
[i
] &&
2948 !IS_GFX_DEVICE(drhd
->devices
[i
]))
2951 if (i
< drhd
->devices_cnt
)
2954 /* bypass IOMMU if it is just for gfx devices */
2956 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
2957 if (!drhd
->devices
[i
])
2959 drhd
->devices
[i
]->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
2964 #ifdef CONFIG_SUSPEND
2965 static int init_iommu_hw(void)
2967 struct dmar_drhd_unit
*drhd
;
2968 struct intel_iommu
*iommu
= NULL
;
2970 for_each_active_iommu(iommu
, drhd
)
2972 dmar_reenable_qi(iommu
);
2974 for_each_active_iommu(iommu
, drhd
) {
2975 iommu_flush_write_buffer(iommu
);
2977 iommu_set_root_entry(iommu
);
2979 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
2980 DMA_CCMD_GLOBAL_INVL
);
2981 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2982 DMA_TLB_GLOBAL_FLUSH
);
2983 iommu_disable_protect_mem_regions(iommu
);
2984 iommu_enable_translation(iommu
);
2990 static void iommu_flush_all(void)
2992 struct dmar_drhd_unit
*drhd
;
2993 struct intel_iommu
*iommu
;
2995 for_each_active_iommu(iommu
, drhd
) {
2996 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
2997 DMA_CCMD_GLOBAL_INVL
);
2998 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2999 DMA_TLB_GLOBAL_FLUSH
);
3003 static int iommu_suspend(struct sys_device
*dev
, pm_message_t state
)
3005 struct dmar_drhd_unit
*drhd
;
3006 struct intel_iommu
*iommu
= NULL
;
3009 for_each_active_iommu(iommu
, drhd
) {
3010 iommu
->iommu_state
= kzalloc(sizeof(u32
) * MAX_SR_DMAR_REGS
,
3012 if (!iommu
->iommu_state
)
3018 for_each_active_iommu(iommu
, drhd
) {
3019 iommu_disable_translation(iommu
);
3021 spin_lock_irqsave(&iommu
->register_lock
, flag
);
3023 iommu
->iommu_state
[SR_DMAR_FECTL_REG
] =
3024 readl(iommu
->reg
+ DMAR_FECTL_REG
);
3025 iommu
->iommu_state
[SR_DMAR_FEDATA_REG
] =
3026 readl(iommu
->reg
+ DMAR_FEDATA_REG
);
3027 iommu
->iommu_state
[SR_DMAR_FEADDR_REG
] =
3028 readl(iommu
->reg
+ DMAR_FEADDR_REG
);
3029 iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
] =
3030 readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
3032 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3037 for_each_active_iommu(iommu
, drhd
)
3038 kfree(iommu
->iommu_state
);
3043 static int iommu_resume(struct sys_device
*dev
)
3045 struct dmar_drhd_unit
*drhd
;
3046 struct intel_iommu
*iommu
= NULL
;
3049 if (init_iommu_hw()) {
3050 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3054 for_each_active_iommu(iommu
, drhd
) {
3056 spin_lock_irqsave(&iommu
->register_lock
, flag
);
3058 writel(iommu
->iommu_state
[SR_DMAR_FECTL_REG
],
3059 iommu
->reg
+ DMAR_FECTL_REG
);
3060 writel(iommu
->iommu_state
[SR_DMAR_FEDATA_REG
],
3061 iommu
->reg
+ DMAR_FEDATA_REG
);
3062 writel(iommu
->iommu_state
[SR_DMAR_FEADDR_REG
],
3063 iommu
->reg
+ DMAR_FEADDR_REG
);
3064 writel(iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
],
3065 iommu
->reg
+ DMAR_FEUADDR_REG
);
3067 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3070 for_each_active_iommu(iommu
, drhd
)
3071 kfree(iommu
->iommu_state
);
3076 static struct sysdev_class iommu_sysclass
= {
3078 .resume
= iommu_resume
,
3079 .suspend
= iommu_suspend
,
3082 static struct sys_device device_iommu
= {
3083 .cls
= &iommu_sysclass
,
3086 static int __init
init_iommu_sysfs(void)
3090 error
= sysdev_class_register(&iommu_sysclass
);
3094 error
= sysdev_register(&device_iommu
);
3096 sysdev_class_unregister(&iommu_sysclass
);
3102 static int __init
init_iommu_sysfs(void)
3106 #endif /* CONFIG_PM */
3108 int __init
intel_iommu_init(void)
3112 if (dmar_table_init())
3115 if (dmar_dev_scope_init())
3119 * Check the need for DMA-remapping initialization now.
3120 * Above initialization will also be used by Interrupt-remapping.
3122 if (no_iommu
|| (swiotlb
&& !iommu_pass_through
) || dmar_disabled
)
3125 iommu_init_mempool();
3126 dmar_init_reserved_ranges();
3128 init_no_remapping_devices();
3132 printk(KERN_ERR
"IOMMU: dmar init failed\n");
3133 put_iova_domain(&reserved_iova_list
);
3134 iommu_exit_mempool();
3138 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3140 init_timer(&unmap_timer
);
3143 if (!iommu_pass_through
) {
3145 "Multi-level page-table translation for DMAR.\n");
3146 dma_ops
= &intel_dma_ops
;
3149 "DMAR: Pass through translation for DMAR.\n");
3153 register_iommu(&intel_iommu_ops
);
3158 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
3159 struct pci_dev
*pdev
)
3161 struct pci_dev
*tmp
, *parent
;
3163 if (!iommu
|| !pdev
)
3166 /* dependent device detach */
3167 tmp
= pci_find_upstream_pcie_bridge(pdev
);
3168 /* Secondary interface's bus number and devfn 0 */
3170 parent
= pdev
->bus
->self
;
3171 while (parent
!= tmp
) {
3172 iommu_detach_dev(iommu
, parent
->bus
->number
,
3174 parent
= parent
->bus
->self
;
3176 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
3177 iommu_detach_dev(iommu
,
3178 tmp
->subordinate
->number
, 0);
3179 else /* this is a legacy PCI bridge */
3180 iommu_detach_dev(iommu
, tmp
->bus
->number
,
3185 static void domain_remove_one_dev_info(struct dmar_domain
*domain
,
3186 struct pci_dev
*pdev
)
3188 struct device_domain_info
*info
;
3189 struct intel_iommu
*iommu
;
3190 unsigned long flags
;
3192 struct list_head
*entry
, *tmp
;
3194 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3199 spin_lock_irqsave(&device_domain_lock
, flags
);
3200 list_for_each_safe(entry
, tmp
, &domain
->devices
) {
3201 info
= list_entry(entry
, struct device_domain_info
, link
);
3202 /* No need to compare PCI domain; it has to be the same */
3203 if (info
->bus
== pdev
->bus
->number
&&
3204 info
->devfn
== pdev
->devfn
) {
3205 list_del(&info
->link
);
3206 list_del(&info
->global
);
3208 info
->dev
->dev
.archdata
.iommu
= NULL
;
3209 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3211 iommu_disable_dev_iotlb(info
);
3212 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3213 iommu_detach_dependent_devices(iommu
, pdev
);
3214 free_devinfo_mem(info
);
3216 spin_lock_irqsave(&device_domain_lock
, flags
);
3224 /* if there is no other devices under the same iommu
3225 * owned by this domain, clear this iommu in iommu_bmp
3226 * update iommu count and coherency
3228 if (iommu
== device_to_iommu(info
->segment
, info
->bus
,
3234 unsigned long tmp_flags
;
3235 spin_lock_irqsave(&domain
->iommu_lock
, tmp_flags
);
3236 clear_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
3237 domain
->iommu_count
--;
3238 domain_update_iommu_cap(domain
);
3239 spin_unlock_irqrestore(&domain
->iommu_lock
, tmp_flags
);
3242 spin_unlock_irqrestore(&device_domain_lock
, flags
);
3245 static void vm_domain_remove_all_dev_info(struct dmar_domain
*domain
)
3247 struct device_domain_info
*info
;
3248 struct intel_iommu
*iommu
;
3249 unsigned long flags1
, flags2
;
3251 spin_lock_irqsave(&device_domain_lock
, flags1
);
3252 while (!list_empty(&domain
->devices
)) {
3253 info
= list_entry(domain
->devices
.next
,
3254 struct device_domain_info
, link
);
3255 list_del(&info
->link
);
3256 list_del(&info
->global
);
3258 info
->dev
->dev
.archdata
.iommu
= NULL
;
3260 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3262 iommu_disable_dev_iotlb(info
);
3263 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
3264 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
3265 iommu_detach_dependent_devices(iommu
, info
->dev
);
3267 /* clear this iommu in iommu_bmp, update iommu count
3270 spin_lock_irqsave(&domain
->iommu_lock
, flags2
);
3271 if (test_and_clear_bit(iommu
->seq_id
,
3272 &domain
->iommu_bmp
)) {
3273 domain
->iommu_count
--;
3274 domain_update_iommu_cap(domain
);
3276 spin_unlock_irqrestore(&domain
->iommu_lock
, flags2
);
3278 free_devinfo_mem(info
);
3279 spin_lock_irqsave(&device_domain_lock
, flags1
);
3281 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
3284 /* domain id for virtual machine, it won't be set in context */
3285 static unsigned long vm_domid
;
3287 static int vm_domain_min_agaw(struct dmar_domain
*domain
)
3290 int min_agaw
= domain
->agaw
;
3292 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
3293 for (; i
< g_num_of_iommus
; ) {
3294 if (min_agaw
> g_iommus
[i
]->agaw
)
3295 min_agaw
= g_iommus
[i
]->agaw
;
3297 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
3303 static struct dmar_domain
*iommu_alloc_vm_domain(void)
3305 struct dmar_domain
*domain
;
3307 domain
= alloc_domain_mem();
3311 domain
->id
= vm_domid
++;
3312 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
3313 domain
->flags
= DOMAIN_FLAG_VIRTUAL_MACHINE
;
3318 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
)
3322 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
3323 spin_lock_init(&domain
->mapping_lock
);
3324 spin_lock_init(&domain
->iommu_lock
);
3326 domain_reserve_special_ranges(domain
);
3328 /* calculate AGAW */
3329 domain
->gaw
= guest_width
;
3330 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
3331 domain
->agaw
= width_to_agaw(adjust_width
);
3333 INIT_LIST_HEAD(&domain
->devices
);
3335 domain
->iommu_count
= 0;
3336 domain
->iommu_coherency
= 0;
3337 domain
->max_addr
= 0;
3339 /* always allocate the top pgd */
3340 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
3343 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
3347 static void iommu_free_vm_domain(struct dmar_domain
*domain
)
3349 unsigned long flags
;
3350 struct dmar_drhd_unit
*drhd
;
3351 struct intel_iommu
*iommu
;
3353 unsigned long ndomains
;
3355 for_each_drhd_unit(drhd
) {
3358 iommu
= drhd
->iommu
;
3360 ndomains
= cap_ndoms(iommu
->cap
);
3361 i
= find_first_bit(iommu
->domain_ids
, ndomains
);
3362 for (; i
< ndomains
; ) {
3363 if (iommu
->domains
[i
] == domain
) {
3364 spin_lock_irqsave(&iommu
->lock
, flags
);
3365 clear_bit(i
, iommu
->domain_ids
);
3366 iommu
->domains
[i
] = NULL
;
3367 spin_unlock_irqrestore(&iommu
->lock
, flags
);
3370 i
= find_next_bit(iommu
->domain_ids
, ndomains
, i
+1);
3375 static void vm_domain_exit(struct dmar_domain
*domain
)
3377 /* Domain 0 is reserved, so dont process it */
3381 vm_domain_remove_all_dev_info(domain
);
3383 put_iova_domain(&domain
->iovad
);
3386 dma_pte_clear_range(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
3388 /* free page tables */
3389 dma_pte_free_pagetable(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
3391 iommu_free_vm_domain(domain
);
3392 free_domain_mem(domain
);
3395 static int intel_iommu_domain_init(struct iommu_domain
*domain
)
3397 struct dmar_domain
*dmar_domain
;
3399 dmar_domain
= iommu_alloc_vm_domain();
3402 "intel_iommu_domain_init: dmar_domain == NULL\n");
3405 if (md_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
3407 "intel_iommu_domain_init() failed\n");
3408 vm_domain_exit(dmar_domain
);
3411 domain
->priv
= dmar_domain
;
3416 static void intel_iommu_domain_destroy(struct iommu_domain
*domain
)
3418 struct dmar_domain
*dmar_domain
= domain
->priv
;
3420 domain
->priv
= NULL
;
3421 vm_domain_exit(dmar_domain
);
3424 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
3427 struct dmar_domain
*dmar_domain
= domain
->priv
;
3428 struct pci_dev
*pdev
= to_pci_dev(dev
);
3429 struct intel_iommu
*iommu
;
3434 /* normally pdev is not mapped */
3435 if (unlikely(domain_context_mapped(pdev
))) {
3436 struct dmar_domain
*old_domain
;
3438 old_domain
= find_domain(pdev
);
3440 if (dmar_domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
||
3441 dmar_domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
)
3442 domain_remove_one_dev_info(old_domain
, pdev
);
3444 domain_remove_dev_info(old_domain
);
3448 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3453 /* check if this iommu agaw is sufficient for max mapped address */
3454 addr_width
= agaw_to_width(iommu
->agaw
);
3455 end
= DOMAIN_MAX_ADDR(addr_width
);
3456 end
= end
& VTD_PAGE_MASK
;
3457 if (end
< dmar_domain
->max_addr
) {
3458 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
3459 "sufficient for the mapped address (%llx)\n",
3460 __func__
, iommu
->agaw
, dmar_domain
->max_addr
);
3464 ret
= domain_add_dev_info(dmar_domain
, pdev
);
3468 ret
= domain_context_mapping(dmar_domain
, pdev
, CONTEXT_TT_MULTI_LEVEL
);
3472 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
3475 struct dmar_domain
*dmar_domain
= domain
->priv
;
3476 struct pci_dev
*pdev
= to_pci_dev(dev
);
3478 domain_remove_one_dev_info(dmar_domain
, pdev
);
3481 static int intel_iommu_map_range(struct iommu_domain
*domain
,
3482 unsigned long iova
, phys_addr_t hpa
,
3483 size_t size
, int iommu_prot
)
3485 struct dmar_domain
*dmar_domain
= domain
->priv
;
3491 if (iommu_prot
& IOMMU_READ
)
3492 prot
|= DMA_PTE_READ
;
3493 if (iommu_prot
& IOMMU_WRITE
)
3494 prot
|= DMA_PTE_WRITE
;
3495 if ((iommu_prot
& IOMMU_CACHE
) && dmar_domain
->iommu_snooping
)
3496 prot
|= DMA_PTE_SNP
;
3498 max_addr
= iova
+ size
;
3499 if (dmar_domain
->max_addr
< max_addr
) {
3503 /* check if minimum agaw is sufficient for mapped address */
3504 min_agaw
= vm_domain_min_agaw(dmar_domain
);
3505 addr_width
= agaw_to_width(min_agaw
);
3506 end
= DOMAIN_MAX_ADDR(addr_width
);
3507 end
= end
& VTD_PAGE_MASK
;
3508 if (end
< max_addr
) {
3509 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
3510 "sufficient for the mapped address (%llx)\n",
3511 __func__
, min_agaw
, max_addr
);
3514 dmar_domain
->max_addr
= max_addr
;
3516 /* Round up size to next multiple of PAGE_SIZE, if it and
3517 the low bits of hpa would take us onto the next page */
3518 size
= aligned_nrpages(hpa
, size
);
3519 ret
= domain_pfn_mapping(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
3520 hpa
>> VTD_PAGE_SHIFT
, size
, prot
);
3524 static void intel_iommu_unmap_range(struct iommu_domain
*domain
,
3525 unsigned long iova
, size_t size
)
3527 struct dmar_domain
*dmar_domain
= domain
->priv
;
3529 dma_pte_clear_range(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
3530 (iova
+ size
- 1) >> VTD_PAGE_SHIFT
);
3532 if (dmar_domain
->max_addr
== iova
+ size
)
3533 dmar_domain
->max_addr
= iova
;
3536 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
3539 struct dmar_domain
*dmar_domain
= domain
->priv
;
3540 struct dma_pte
*pte
;
3543 pte
= pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
);
3545 phys
= dma_pte_addr(pte
);
3550 static int intel_iommu_domain_has_cap(struct iommu_domain
*domain
,
3553 struct dmar_domain
*dmar_domain
= domain
->priv
;
3555 if (cap
== IOMMU_CAP_CACHE_COHERENCY
)
3556 return dmar_domain
->iommu_snooping
;
3561 static struct iommu_ops intel_iommu_ops
= {
3562 .domain_init
= intel_iommu_domain_init
,
3563 .domain_destroy
= intel_iommu_domain_destroy
,
3564 .attach_dev
= intel_iommu_attach_device
,
3565 .detach_dev
= intel_iommu_detach_device
,
3566 .map
= intel_iommu_map_range
,
3567 .unmap
= intel_iommu_unmap_range
,
3568 .iova_to_phys
= intel_iommu_iova_to_phys
,
3569 .domain_has_cap
= intel_iommu_domain_has_cap
,
3572 static void __devinit
quirk_iommu_rwbf(struct pci_dev
*dev
)
3575 * Mobile 4 Series Chipset neglects to set RWBF capability,
3578 printk(KERN_INFO
"DMAR: Forcing write-buffer flush capability\n");
3582 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);