2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <asm/cacheflush.h>
40 #include <asm/iommu.h>
43 #define ROOT_SIZE VTD_PAGE_SIZE
44 #define CONTEXT_SIZE VTD_PAGE_SIZE
46 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
47 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
49 #define IOAPIC_RANGE_START (0xfee00000)
50 #define IOAPIC_RANGE_END (0xfeefffff)
51 #define IOVA_START_ADDR (0x1000)
53 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
55 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
57 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
58 #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
59 #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
61 /* global iommu list, set NULL for ignored DMAR units */
62 static struct intel_iommu
**g_iommus
;
64 static int rwbf_quirk
;
69 * 12-63: Context Ptr (12 - (haw-1))
76 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
77 static inline bool root_present(struct root_entry
*root
)
79 return (root
->val
& 1);
81 static inline void set_root_present(struct root_entry
*root
)
85 static inline void set_root_value(struct root_entry
*root
, unsigned long value
)
87 root
->val
|= value
& VTD_PAGE_MASK
;
90 static inline struct context_entry
*
91 get_context_addr_from_root(struct root_entry
*root
)
93 return (struct context_entry
*)
94 (root_present(root
)?phys_to_virt(
95 root
->val
& VTD_PAGE_MASK
) :
102 * 1: fault processing disable
103 * 2-3: translation type
104 * 12-63: address space root
110 struct context_entry
{
115 static inline bool context_present(struct context_entry
*context
)
117 return (context
->lo
& 1);
119 static inline void context_set_present(struct context_entry
*context
)
124 static inline void context_set_fault_enable(struct context_entry
*context
)
126 context
->lo
&= (((u64
)-1) << 2) | 1;
129 #define CONTEXT_TT_MULTI_LEVEL 0
131 static inline void context_set_translation_type(struct context_entry
*context
,
134 context
->lo
&= (((u64
)-1) << 4) | 3;
135 context
->lo
|= (value
& 3) << 2;
138 static inline void context_set_address_root(struct context_entry
*context
,
141 context
->lo
|= value
& VTD_PAGE_MASK
;
144 static inline void context_set_address_width(struct context_entry
*context
,
147 context
->hi
|= value
& 7;
150 static inline void context_set_domain_id(struct context_entry
*context
,
153 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
156 static inline void context_clear_entry(struct context_entry
*context
)
168 * 12-63: Host physcial address
174 static inline void dma_clear_pte(struct dma_pte
*pte
)
179 static inline void dma_set_pte_readable(struct dma_pte
*pte
)
181 pte
->val
|= DMA_PTE_READ
;
184 static inline void dma_set_pte_writable(struct dma_pte
*pte
)
186 pte
->val
|= DMA_PTE_WRITE
;
189 static inline void dma_set_pte_prot(struct dma_pte
*pte
, unsigned long prot
)
191 pte
->val
= (pte
->val
& ~3) | (prot
& 3);
194 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
196 return (pte
->val
& VTD_PAGE_MASK
);
199 static inline void dma_set_pte_addr(struct dma_pte
*pte
, u64 addr
)
201 pte
->val
|= (addr
& VTD_PAGE_MASK
);
204 static inline bool dma_pte_present(struct dma_pte
*pte
)
206 return (pte
->val
& 3) != 0;
209 /* devices under the same p2p bridge are owned in one domain */
210 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
212 /* domain represents a virtual machine, more than one devices
213 * across iommus may be owned in one domain, e.g. kvm guest.
215 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
218 int id
; /* domain id */
219 unsigned long iommu_bmp
; /* bitmap of iommus this domain uses*/
221 struct list_head devices
; /* all devices' list */
222 struct iova_domain iovad
; /* iova's that belong to this domain */
224 struct dma_pte
*pgd
; /* virtual address */
225 spinlock_t mapping_lock
; /* page table lock */
226 int gaw
; /* max guest address width */
228 /* adjusted guest address width, 0 is level 2 30-bit */
231 int flags
; /* flags to find out type of domain */
233 int iommu_coherency
;/* indicate coherency of iommu access */
234 int iommu_count
; /* reference count of iommu */
235 spinlock_t iommu_lock
; /* protect iommu set in domain */
236 u64 max_addr
; /* maximum mapped address */
239 /* PCI domain-device relationship */
240 struct device_domain_info
{
241 struct list_head link
; /* link to domain siblings */
242 struct list_head global
; /* link to global list */
243 u8 bus
; /* PCI bus numer */
244 u8 devfn
; /* PCI devfn number */
245 struct pci_dev
*dev
; /* it's NULL for PCIE-to-PCI bridge */
246 struct dmar_domain
*domain
; /* pointer to domain */
249 static void flush_unmaps_timeout(unsigned long data
);
251 DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
253 #define HIGH_WATER_MARK 250
254 struct deferred_flush_tables
{
256 struct iova
*iova
[HIGH_WATER_MARK
];
257 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
260 static struct deferred_flush_tables
*deferred_flush
;
262 /* bitmap for indexing intel_iommus */
263 static int g_num_of_iommus
;
265 static DEFINE_SPINLOCK(async_umap_flush_lock
);
266 static LIST_HEAD(unmaps_to_do
);
269 static long list_size
;
271 static void domain_remove_dev_info(struct dmar_domain
*domain
);
273 #ifdef CONFIG_DMAR_DEFAULT_ON
274 int dmar_disabled
= 0;
276 int dmar_disabled
= 1;
277 #endif /*CONFIG_DMAR_DEFAULT_ON*/
279 static int __initdata dmar_map_gfx
= 1;
280 static int dmar_forcedac
;
281 static int intel_iommu_strict
;
283 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
284 static DEFINE_SPINLOCK(device_domain_lock
);
285 static LIST_HEAD(device_domain_list
);
287 static struct iommu_ops intel_iommu_ops
;
289 static int __init
intel_iommu_setup(char *str
)
294 if (!strncmp(str
, "on", 2)) {
296 printk(KERN_INFO
"Intel-IOMMU: enabled\n");
297 } else if (!strncmp(str
, "off", 3)) {
299 printk(KERN_INFO
"Intel-IOMMU: disabled\n");
300 } else if (!strncmp(str
, "igfx_off", 8)) {
303 "Intel-IOMMU: disable GFX device mapping\n");
304 } else if (!strncmp(str
, "forcedac", 8)) {
306 "Intel-IOMMU: Forcing DAC for PCI devices\n");
308 } else if (!strncmp(str
, "strict", 6)) {
310 "Intel-IOMMU: disable batched IOTLB flush\n");
311 intel_iommu_strict
= 1;
314 str
+= strcspn(str
, ",");
320 __setup("intel_iommu=", intel_iommu_setup
);
322 static struct kmem_cache
*iommu_domain_cache
;
323 static struct kmem_cache
*iommu_devinfo_cache
;
324 static struct kmem_cache
*iommu_iova_cache
;
326 static inline void *iommu_kmem_cache_alloc(struct kmem_cache
*cachep
)
331 /* trying to avoid low memory issues */
332 flags
= current
->flags
& PF_MEMALLOC
;
333 current
->flags
|= PF_MEMALLOC
;
334 vaddr
= kmem_cache_alloc(cachep
, GFP_ATOMIC
);
335 current
->flags
&= (~PF_MEMALLOC
| flags
);
340 static inline void *alloc_pgtable_page(void)
345 /* trying to avoid low memory issues */
346 flags
= current
->flags
& PF_MEMALLOC
;
347 current
->flags
|= PF_MEMALLOC
;
348 vaddr
= (void *)get_zeroed_page(GFP_ATOMIC
);
349 current
->flags
&= (~PF_MEMALLOC
| flags
);
353 static inline void free_pgtable_page(void *vaddr
)
355 free_page((unsigned long)vaddr
);
358 static inline void *alloc_domain_mem(void)
360 return iommu_kmem_cache_alloc(iommu_domain_cache
);
363 static void free_domain_mem(void *vaddr
)
365 kmem_cache_free(iommu_domain_cache
, vaddr
);
368 static inline void * alloc_devinfo_mem(void)
370 return iommu_kmem_cache_alloc(iommu_devinfo_cache
);
373 static inline void free_devinfo_mem(void *vaddr
)
375 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
378 struct iova
*alloc_iova_mem(void)
380 return iommu_kmem_cache_alloc(iommu_iova_cache
);
383 void free_iova_mem(struct iova
*iova
)
385 kmem_cache_free(iommu_iova_cache
, iova
);
389 static inline int width_to_agaw(int width
);
391 /* calculate agaw for each iommu.
392 * "SAGAW" may be different across iommus, use a default agaw, and
393 * get a supported less agaw for iommus that don't support the default agaw.
395 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
400 sagaw
= cap_sagaw(iommu
->cap
);
401 for (agaw
= width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH
);
403 if (test_bit(agaw
, &sagaw
))
410 /* in native case, each domain is related to only one iommu */
411 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
415 BUG_ON(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
);
417 iommu_id
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
418 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
421 return g_iommus
[iommu_id
];
424 /* "Coherency" capability may be different across iommus */
425 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
429 domain
->iommu_coherency
= 1;
431 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
432 for (; i
< g_num_of_iommus
; ) {
433 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
434 domain
->iommu_coherency
= 0;
437 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
441 static struct intel_iommu
*device_to_iommu(u8 bus
, u8 devfn
)
443 struct dmar_drhd_unit
*drhd
= NULL
;
446 for_each_drhd_unit(drhd
) {
450 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
451 if (drhd
->devices
[i
] &&
452 drhd
->devices
[i
]->bus
->number
== bus
&&
453 drhd
->devices
[i
]->devfn
== devfn
)
456 if (drhd
->include_all
)
463 static void domain_flush_cache(struct dmar_domain
*domain
,
464 void *addr
, int size
)
466 if (!domain
->iommu_coherency
)
467 clflush_cache_range(addr
, size
);
470 /* Gets context entry for a given bus and devfn */
471 static struct context_entry
* device_to_context_entry(struct intel_iommu
*iommu
,
474 struct root_entry
*root
;
475 struct context_entry
*context
;
476 unsigned long phy_addr
;
479 spin_lock_irqsave(&iommu
->lock
, flags
);
480 root
= &iommu
->root_entry
[bus
];
481 context
= get_context_addr_from_root(root
);
483 context
= (struct context_entry
*)alloc_pgtable_page();
485 spin_unlock_irqrestore(&iommu
->lock
, flags
);
488 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
489 phy_addr
= virt_to_phys((void *)context
);
490 set_root_value(root
, phy_addr
);
491 set_root_present(root
);
492 __iommu_flush_cache(iommu
, root
, sizeof(*root
));
494 spin_unlock_irqrestore(&iommu
->lock
, flags
);
495 return &context
[devfn
];
498 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
500 struct root_entry
*root
;
501 struct context_entry
*context
;
505 spin_lock_irqsave(&iommu
->lock
, flags
);
506 root
= &iommu
->root_entry
[bus
];
507 context
= get_context_addr_from_root(root
);
512 ret
= context_present(&context
[devfn
]);
514 spin_unlock_irqrestore(&iommu
->lock
, flags
);
518 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
520 struct root_entry
*root
;
521 struct context_entry
*context
;
524 spin_lock_irqsave(&iommu
->lock
, flags
);
525 root
= &iommu
->root_entry
[bus
];
526 context
= get_context_addr_from_root(root
);
528 context_clear_entry(&context
[devfn
]);
529 __iommu_flush_cache(iommu
, &context
[devfn
], \
532 spin_unlock_irqrestore(&iommu
->lock
, flags
);
535 static void free_context_table(struct intel_iommu
*iommu
)
537 struct root_entry
*root
;
540 struct context_entry
*context
;
542 spin_lock_irqsave(&iommu
->lock
, flags
);
543 if (!iommu
->root_entry
) {
546 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
547 root
= &iommu
->root_entry
[i
];
548 context
= get_context_addr_from_root(root
);
550 free_pgtable_page(context
);
552 free_pgtable_page(iommu
->root_entry
);
553 iommu
->root_entry
= NULL
;
555 spin_unlock_irqrestore(&iommu
->lock
, flags
);
558 /* page table handling */
559 #define LEVEL_STRIDE (9)
560 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
562 static inline int agaw_to_level(int agaw
)
567 static inline int agaw_to_width(int agaw
)
569 return 30 + agaw
* LEVEL_STRIDE
;
573 static inline int width_to_agaw(int width
)
575 return (width
- 30) / LEVEL_STRIDE
;
578 static inline unsigned int level_to_offset_bits(int level
)
580 return (12 + (level
- 1) * LEVEL_STRIDE
);
583 static inline int address_level_offset(u64 addr
, int level
)
585 return ((addr
>> level_to_offset_bits(level
)) & LEVEL_MASK
);
588 static inline u64
level_mask(int level
)
590 return ((u64
)-1 << level_to_offset_bits(level
));
593 static inline u64
level_size(int level
)
595 return ((u64
)1 << level_to_offset_bits(level
));
598 static inline u64
align_to_level(u64 addr
, int level
)
600 return ((addr
+ level_size(level
) - 1) & level_mask(level
));
603 static struct dma_pte
* addr_to_dma_pte(struct dmar_domain
*domain
, u64 addr
)
605 int addr_width
= agaw_to_width(domain
->agaw
);
606 struct dma_pte
*parent
, *pte
= NULL
;
607 int level
= agaw_to_level(domain
->agaw
);
611 BUG_ON(!domain
->pgd
);
613 addr
&= (((u64
)1) << addr_width
) - 1;
614 parent
= domain
->pgd
;
616 spin_lock_irqsave(&domain
->mapping_lock
, flags
);
620 offset
= address_level_offset(addr
, level
);
621 pte
= &parent
[offset
];
625 if (!dma_pte_present(pte
)) {
626 tmp_page
= alloc_pgtable_page();
629 spin_unlock_irqrestore(&domain
->mapping_lock
,
633 domain_flush_cache(domain
, tmp_page
, PAGE_SIZE
);
634 dma_set_pte_addr(pte
, virt_to_phys(tmp_page
));
636 * high level table always sets r/w, last level page
637 * table control read/write
639 dma_set_pte_readable(pte
);
640 dma_set_pte_writable(pte
);
641 domain_flush_cache(domain
, pte
, sizeof(*pte
));
643 parent
= phys_to_virt(dma_pte_addr(pte
));
647 spin_unlock_irqrestore(&domain
->mapping_lock
, flags
);
651 /* return address's pte at specific level */
652 static struct dma_pte
*dma_addr_level_pte(struct dmar_domain
*domain
, u64 addr
,
655 struct dma_pte
*parent
, *pte
= NULL
;
656 int total
= agaw_to_level(domain
->agaw
);
659 parent
= domain
->pgd
;
660 while (level
<= total
) {
661 offset
= address_level_offset(addr
, total
);
662 pte
= &parent
[offset
];
666 if (!dma_pte_present(pte
))
668 parent
= phys_to_virt(dma_pte_addr(pte
));
674 /* clear one page's page table */
675 static void dma_pte_clear_one(struct dmar_domain
*domain
, u64 addr
)
677 struct dma_pte
*pte
= NULL
;
679 /* get last level pte */
680 pte
= dma_addr_level_pte(domain
, addr
, 1);
684 domain_flush_cache(domain
, pte
, sizeof(*pte
));
688 /* clear last level pte, a tlb flush should be followed */
689 static void dma_pte_clear_range(struct dmar_domain
*domain
, u64 start
, u64 end
)
691 int addr_width
= agaw_to_width(domain
->agaw
);
693 start
&= (((u64
)1) << addr_width
) - 1;
694 end
&= (((u64
)1) << addr_width
) - 1;
695 /* in case it's partial page */
696 start
= PAGE_ALIGN(start
);
699 /* we don't need lock here, nobody else touches the iova range */
700 while (start
< end
) {
701 dma_pte_clear_one(domain
, start
);
702 start
+= VTD_PAGE_SIZE
;
706 /* free page table pages. last level pte should already be cleared */
707 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
710 int addr_width
= agaw_to_width(domain
->agaw
);
712 int total
= agaw_to_level(domain
->agaw
);
716 start
&= (((u64
)1) << addr_width
) - 1;
717 end
&= (((u64
)1) << addr_width
) - 1;
719 /* we don't need lock here, nobody else touches the iova range */
721 while (level
<= total
) {
722 tmp
= align_to_level(start
, level
);
723 if (tmp
>= end
|| (tmp
+ level_size(level
) > end
))
727 pte
= dma_addr_level_pte(domain
, tmp
, level
);
730 phys_to_virt(dma_pte_addr(pte
)));
732 domain_flush_cache(domain
, pte
, sizeof(*pte
));
734 tmp
+= level_size(level
);
739 if (start
== 0 && end
>= ((((u64
)1) << addr_width
) - 1)) {
740 free_pgtable_page(domain
->pgd
);
746 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
748 struct root_entry
*root
;
751 root
= (struct root_entry
*)alloc_pgtable_page();
755 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
757 spin_lock_irqsave(&iommu
->lock
, flags
);
758 iommu
->root_entry
= root
;
759 spin_unlock_irqrestore(&iommu
->lock
, flags
);
764 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
770 addr
= iommu
->root_entry
;
772 spin_lock_irqsave(&iommu
->register_lock
, flag
);
773 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, virt_to_phys(addr
));
775 cmd
= iommu
->gcmd
| DMA_GCMD_SRTP
;
776 writel(cmd
, iommu
->reg
+ DMAR_GCMD_REG
);
778 /* Make sure hardware complete it */
779 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
780 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
782 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
785 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
790 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
792 val
= iommu
->gcmd
| DMA_GCMD_WBF
;
794 spin_lock_irqsave(&iommu
->register_lock
, flag
);
795 writel(val
, iommu
->reg
+ DMAR_GCMD_REG
);
797 /* Make sure hardware complete it */
798 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
799 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
801 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
804 /* return value determine if we need a write buffer flush */
805 static int __iommu_flush_context(struct intel_iommu
*iommu
,
806 u16 did
, u16 source_id
, u8 function_mask
, u64 type
,
807 int non_present_entry_flush
)
813 * In the non-present entry flush case, if hardware doesn't cache
814 * non-present entry we do nothing and if hardware cache non-present
815 * entry, we flush entries of domain 0 (the domain id is used to cache
816 * any non-present entries)
818 if (non_present_entry_flush
) {
819 if (!cap_caching_mode(iommu
->cap
))
826 case DMA_CCMD_GLOBAL_INVL
:
827 val
= DMA_CCMD_GLOBAL_INVL
;
829 case DMA_CCMD_DOMAIN_INVL
:
830 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
832 case DMA_CCMD_DEVICE_INVL
:
833 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
834 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
841 spin_lock_irqsave(&iommu
->register_lock
, flag
);
842 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
844 /* Make sure hardware complete it */
845 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
846 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
848 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
850 /* flush context entry will implicitly flush write buffer */
854 /* return value determine if we need a write buffer flush */
855 static int __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
856 u64 addr
, unsigned int size_order
, u64 type
,
857 int non_present_entry_flush
)
859 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
860 u64 val
= 0, val_iva
= 0;
864 * In the non-present entry flush case, if hardware doesn't cache
865 * non-present entry we do nothing and if hardware cache non-present
866 * entry, we flush entries of domain 0 (the domain id is used to cache
867 * any non-present entries)
869 if (non_present_entry_flush
) {
870 if (!cap_caching_mode(iommu
->cap
))
877 case DMA_TLB_GLOBAL_FLUSH
:
878 /* global flush doesn't need set IVA_REG */
879 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
881 case DMA_TLB_DSI_FLUSH
:
882 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
884 case DMA_TLB_PSI_FLUSH
:
885 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
886 /* Note: always flush non-leaf currently */
887 val_iva
= size_order
| addr
;
892 /* Note: set drain read/write */
895 * This is probably to be super secure.. Looks like we can
896 * ignore it without any impact.
898 if (cap_read_drain(iommu
->cap
))
899 val
|= DMA_TLB_READ_DRAIN
;
901 if (cap_write_drain(iommu
->cap
))
902 val
|= DMA_TLB_WRITE_DRAIN
;
904 spin_lock_irqsave(&iommu
->register_lock
, flag
);
905 /* Note: Only uses first TLB reg currently */
907 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
908 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
910 /* Make sure hardware complete it */
911 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
912 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
914 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
916 /* check IOTLB invalidation granularity */
917 if (DMA_TLB_IAIG(val
) == 0)
918 printk(KERN_ERR
"IOMMU: flush IOTLB failed\n");
919 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
920 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
921 (unsigned long long)DMA_TLB_IIRG(type
),
922 (unsigned long long)DMA_TLB_IAIG(val
));
923 /* flush iotlb entry will implicitly flush write buffer */
927 static int iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
928 u64 addr
, unsigned int pages
, int non_present_entry_flush
)
932 BUG_ON(addr
& (~VTD_PAGE_MASK
));
935 /* Fallback to domain selective flush if no PSI support */
936 if (!cap_pgsel_inv(iommu
->cap
))
937 return iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
939 non_present_entry_flush
);
942 * PSI requires page size to be 2 ^ x, and the base address is naturally
943 * aligned to the size
945 mask
= ilog2(__roundup_pow_of_two(pages
));
946 /* Fallback to domain selective flush if size is too big */
947 if (mask
> cap_max_amask_val(iommu
->cap
))
948 return iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
949 DMA_TLB_DSI_FLUSH
, non_present_entry_flush
);
951 return iommu
->flush
.flush_iotlb(iommu
, did
, addr
, mask
,
953 non_present_entry_flush
);
956 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
961 spin_lock_irqsave(&iommu
->register_lock
, flags
);
962 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
963 pmen
&= ~DMA_PMEN_EPM
;
964 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
966 /* wait for the protected region status bit to clear */
967 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
968 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
970 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
973 static int iommu_enable_translation(struct intel_iommu
*iommu
)
978 spin_lock_irqsave(&iommu
->register_lock
, flags
);
979 writel(iommu
->gcmd
|DMA_GCMD_TE
, iommu
->reg
+ DMAR_GCMD_REG
);
981 /* Make sure hardware complete it */
982 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
983 readl
, (sts
& DMA_GSTS_TES
), sts
);
985 iommu
->gcmd
|= DMA_GCMD_TE
;
986 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
990 static int iommu_disable_translation(struct intel_iommu
*iommu
)
995 spin_lock_irqsave(&iommu
->register_lock
, flag
);
996 iommu
->gcmd
&= ~DMA_GCMD_TE
;
997 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
999 /* Make sure hardware complete it */
1000 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1001 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1003 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1008 static int iommu_init_domains(struct intel_iommu
*iommu
)
1010 unsigned long ndomains
;
1011 unsigned long nlongs
;
1013 ndomains
= cap_ndoms(iommu
->cap
);
1014 pr_debug("Number of Domains supportd <%ld>\n", ndomains
);
1015 nlongs
= BITS_TO_LONGS(ndomains
);
1017 /* TBD: there might be 64K domains,
1018 * consider other allocation for future chip
1020 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1021 if (!iommu
->domain_ids
) {
1022 printk(KERN_ERR
"Allocating domain id array failed\n");
1025 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1027 if (!iommu
->domains
) {
1028 printk(KERN_ERR
"Allocating domain array failed\n");
1029 kfree(iommu
->domain_ids
);
1033 spin_lock_init(&iommu
->lock
);
1036 * if Caching mode is set, then invalid translations are tagged
1037 * with domainid 0. Hence we need to pre-allocate it.
1039 if (cap_caching_mode(iommu
->cap
))
1040 set_bit(0, iommu
->domain_ids
);
1045 static void domain_exit(struct dmar_domain
*domain
);
1046 static void vm_domain_exit(struct dmar_domain
*domain
);
1048 void free_dmar_iommu(struct intel_iommu
*iommu
)
1050 struct dmar_domain
*domain
;
1052 unsigned long flags
;
1054 i
= find_first_bit(iommu
->domain_ids
, cap_ndoms(iommu
->cap
));
1055 for (; i
< cap_ndoms(iommu
->cap
); ) {
1056 domain
= iommu
->domains
[i
];
1057 clear_bit(i
, iommu
->domain_ids
);
1059 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1060 if (--domain
->iommu_count
== 0) {
1061 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
1062 vm_domain_exit(domain
);
1064 domain_exit(domain
);
1066 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1068 i
= find_next_bit(iommu
->domain_ids
,
1069 cap_ndoms(iommu
->cap
), i
+1);
1072 if (iommu
->gcmd
& DMA_GCMD_TE
)
1073 iommu_disable_translation(iommu
);
1076 set_irq_data(iommu
->irq
, NULL
);
1077 /* This will mask the irq */
1078 free_irq(iommu
->irq
, iommu
);
1079 destroy_irq(iommu
->irq
);
1082 kfree(iommu
->domains
);
1083 kfree(iommu
->domain_ids
);
1085 g_iommus
[iommu
->seq_id
] = NULL
;
1087 /* if all iommus are freed, free g_iommus */
1088 for (i
= 0; i
< g_num_of_iommus
; i
++) {
1093 if (i
== g_num_of_iommus
)
1096 /* free context mapping */
1097 free_context_table(iommu
);
1100 static struct dmar_domain
* iommu_alloc_domain(struct intel_iommu
*iommu
)
1103 unsigned long ndomains
;
1104 struct dmar_domain
*domain
;
1105 unsigned long flags
;
1107 domain
= alloc_domain_mem();
1111 ndomains
= cap_ndoms(iommu
->cap
);
1113 spin_lock_irqsave(&iommu
->lock
, flags
);
1114 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1115 if (num
>= ndomains
) {
1116 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1117 free_domain_mem(domain
);
1118 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1122 set_bit(num
, iommu
->domain_ids
);
1124 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
1125 set_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1127 iommu
->domains
[num
] = domain
;
1128 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1133 static void iommu_free_domain(struct dmar_domain
*domain
)
1135 unsigned long flags
;
1136 struct intel_iommu
*iommu
;
1138 iommu
= domain_get_iommu(domain
);
1140 spin_lock_irqsave(&iommu
->lock
, flags
);
1141 clear_bit(domain
->id
, iommu
->domain_ids
);
1142 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1145 static struct iova_domain reserved_iova_list
;
1146 static struct lock_class_key reserved_alloc_key
;
1147 static struct lock_class_key reserved_rbtree_key
;
1149 static void dmar_init_reserved_ranges(void)
1151 struct pci_dev
*pdev
= NULL
;
1156 init_iova_domain(&reserved_iova_list
, DMA_32BIT_PFN
);
1158 lockdep_set_class(&reserved_iova_list
.iova_alloc_lock
,
1159 &reserved_alloc_key
);
1160 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1161 &reserved_rbtree_key
);
1163 /* IOAPIC ranges shouldn't be accessed by DMA */
1164 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1165 IOVA_PFN(IOAPIC_RANGE_END
));
1167 printk(KERN_ERR
"Reserve IOAPIC range failed\n");
1169 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1170 for_each_pci_dev(pdev
) {
1173 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1174 r
= &pdev
->resource
[i
];
1175 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1179 size
= r
->end
- addr
;
1180 size
= PAGE_ALIGN(size
);
1181 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(addr
),
1182 IOVA_PFN(size
+ addr
) - 1);
1184 printk(KERN_ERR
"Reserve iova failed\n");
1190 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1192 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1195 static inline int guestwidth_to_adjustwidth(int gaw
)
1198 int r
= (gaw
- 12) % 9;
1209 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1211 struct intel_iommu
*iommu
;
1212 int adjust_width
, agaw
;
1213 unsigned long sagaw
;
1215 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
1216 spin_lock_init(&domain
->mapping_lock
);
1217 spin_lock_init(&domain
->iommu_lock
);
1219 domain_reserve_special_ranges(domain
);
1221 /* calculate AGAW */
1222 iommu
= domain_get_iommu(domain
);
1223 if (guest_width
> cap_mgaw(iommu
->cap
))
1224 guest_width
= cap_mgaw(iommu
->cap
);
1225 domain
->gaw
= guest_width
;
1226 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1227 agaw
= width_to_agaw(adjust_width
);
1228 sagaw
= cap_sagaw(iommu
->cap
);
1229 if (!test_bit(agaw
, &sagaw
)) {
1230 /* hardware doesn't support it, choose a bigger one */
1231 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw
);
1232 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1236 domain
->agaw
= agaw
;
1237 INIT_LIST_HEAD(&domain
->devices
);
1239 if (ecap_coherent(iommu
->ecap
))
1240 domain
->iommu_coherency
= 1;
1242 domain
->iommu_coherency
= 0;
1244 domain
->iommu_count
= 1;
1246 /* always allocate the top pgd */
1247 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
1250 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1254 static void domain_exit(struct dmar_domain
*domain
)
1258 /* Domain 0 is reserved, so dont process it */
1262 domain_remove_dev_info(domain
);
1264 put_iova_domain(&domain
->iovad
);
1265 end
= DOMAIN_MAX_ADDR(domain
->gaw
);
1266 end
= end
& (~PAGE_MASK
);
1269 dma_pte_clear_range(domain
, 0, end
);
1271 /* free page tables */
1272 dma_pte_free_pagetable(domain
, 0, end
);
1274 iommu_free_domain(domain
);
1275 free_domain_mem(domain
);
1278 static int domain_context_mapping_one(struct dmar_domain
*domain
,
1281 struct context_entry
*context
;
1282 unsigned long flags
;
1283 struct intel_iommu
*iommu
;
1284 struct dma_pte
*pgd
;
1286 unsigned long ndomains
;
1290 pr_debug("Set context mapping for %02x:%02x.%d\n",
1291 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1292 BUG_ON(!domain
->pgd
);
1294 iommu
= device_to_iommu(bus
, devfn
);
1298 context
= device_to_context_entry(iommu
, bus
, devfn
);
1301 spin_lock_irqsave(&iommu
->lock
, flags
);
1302 if (context_present(context
)) {
1303 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1310 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
) {
1313 /* find an available domain id for this device in iommu */
1314 ndomains
= cap_ndoms(iommu
->cap
);
1315 num
= find_first_bit(iommu
->domain_ids
, ndomains
);
1316 for (; num
< ndomains
; ) {
1317 if (iommu
->domains
[num
] == domain
) {
1322 num
= find_next_bit(iommu
->domain_ids
,
1323 cap_ndoms(iommu
->cap
), num
+1);
1327 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1328 if (num
>= ndomains
) {
1329 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1330 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1334 set_bit(num
, iommu
->domain_ids
);
1335 iommu
->domains
[num
] = domain
;
1339 /* Skip top levels of page tables for
1340 * iommu which has less agaw than default.
1342 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1343 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1344 if (!dma_pte_present(pgd
)) {
1345 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1351 context_set_domain_id(context
, id
);
1352 context_set_address_width(context
, iommu
->agaw
);
1353 context_set_address_root(context
, virt_to_phys(pgd
));
1354 context_set_translation_type(context
, CONTEXT_TT_MULTI_LEVEL
);
1355 context_set_fault_enable(context
);
1356 context_set_present(context
);
1357 domain_flush_cache(domain
, context
, sizeof(*context
));
1359 /* it's a non-present to present mapping */
1360 if (iommu
->flush
.flush_context(iommu
, domain
->id
,
1361 (((u16
)bus
) << 8) | devfn
, DMA_CCMD_MASK_NOBIT
,
1362 DMA_CCMD_DEVICE_INVL
, 1))
1363 iommu_flush_write_buffer(iommu
);
1365 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_DSI_FLUSH
, 0);
1367 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1369 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1370 if (!test_and_set_bit(iommu
->seq_id
, &domain
->iommu_bmp
)) {
1371 domain
->iommu_count
++;
1372 domain_update_iommu_coherency(domain
);
1374 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1379 domain_context_mapping(struct dmar_domain
*domain
, struct pci_dev
*pdev
)
1382 struct pci_dev
*tmp
, *parent
;
1384 ret
= domain_context_mapping_one(domain
, pdev
->bus
->number
,
1389 /* dependent device mapping */
1390 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1393 /* Secondary interface's bus number and devfn 0 */
1394 parent
= pdev
->bus
->self
;
1395 while (parent
!= tmp
) {
1396 ret
= domain_context_mapping_one(domain
, parent
->bus
->number
,
1400 parent
= parent
->bus
->self
;
1402 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
1403 return domain_context_mapping_one(domain
,
1404 tmp
->subordinate
->number
, 0);
1405 else /* this is a legacy PCI bridge */
1406 return domain_context_mapping_one(domain
,
1407 tmp
->bus
->number
, tmp
->devfn
);
1410 static int domain_context_mapped(struct pci_dev
*pdev
)
1413 struct pci_dev
*tmp
, *parent
;
1414 struct intel_iommu
*iommu
;
1416 iommu
= device_to_iommu(pdev
->bus
->number
, pdev
->devfn
);
1420 ret
= device_context_mapped(iommu
,
1421 pdev
->bus
->number
, pdev
->devfn
);
1424 /* dependent device mapping */
1425 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1428 /* Secondary interface's bus number and devfn 0 */
1429 parent
= pdev
->bus
->self
;
1430 while (parent
!= tmp
) {
1431 ret
= device_context_mapped(iommu
, parent
->bus
->number
,
1435 parent
= parent
->bus
->self
;
1438 return device_context_mapped(iommu
,
1439 tmp
->subordinate
->number
, 0);
1441 return device_context_mapped(iommu
,
1442 tmp
->bus
->number
, tmp
->devfn
);
1446 domain_page_mapping(struct dmar_domain
*domain
, dma_addr_t iova
,
1447 u64 hpa
, size_t size
, int prot
)
1449 u64 start_pfn
, end_pfn
;
1450 struct dma_pte
*pte
;
1452 int addr_width
= agaw_to_width(domain
->agaw
);
1454 hpa
&= (((u64
)1) << addr_width
) - 1;
1456 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
1459 start_pfn
= ((u64
)hpa
) >> VTD_PAGE_SHIFT
;
1460 end_pfn
= (VTD_PAGE_ALIGN(((u64
)hpa
) + size
)) >> VTD_PAGE_SHIFT
;
1462 while (start_pfn
< end_pfn
) {
1463 pte
= addr_to_dma_pte(domain
, iova
+ VTD_PAGE_SIZE
* index
);
1466 /* We don't need lock here, nobody else
1467 * touches the iova range
1469 BUG_ON(dma_pte_addr(pte
));
1470 dma_set_pte_addr(pte
, start_pfn
<< VTD_PAGE_SHIFT
);
1471 dma_set_pte_prot(pte
, prot
);
1472 domain_flush_cache(domain
, pte
, sizeof(*pte
));
1479 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
1484 clear_context_table(iommu
, bus
, devfn
);
1485 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
1486 DMA_CCMD_GLOBAL_INVL
, 0);
1487 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
1488 DMA_TLB_GLOBAL_FLUSH
, 0);
1491 static void domain_remove_dev_info(struct dmar_domain
*domain
)
1493 struct device_domain_info
*info
;
1494 unsigned long flags
;
1495 struct intel_iommu
*iommu
;
1497 spin_lock_irqsave(&device_domain_lock
, flags
);
1498 while (!list_empty(&domain
->devices
)) {
1499 info
= list_entry(domain
->devices
.next
,
1500 struct device_domain_info
, link
);
1501 list_del(&info
->link
);
1502 list_del(&info
->global
);
1504 info
->dev
->dev
.archdata
.iommu
= NULL
;
1505 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1507 iommu
= device_to_iommu(info
->bus
, info
->devfn
);
1508 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
1509 free_devinfo_mem(info
);
1511 spin_lock_irqsave(&device_domain_lock
, flags
);
1513 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1518 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1520 static struct dmar_domain
*
1521 find_domain(struct pci_dev
*pdev
)
1523 struct device_domain_info
*info
;
1525 /* No lock here, assumes no domain exit in normal case */
1526 info
= pdev
->dev
.archdata
.iommu
;
1528 return info
->domain
;
1532 /* domain is initialized */
1533 static struct dmar_domain
*get_domain_for_dev(struct pci_dev
*pdev
, int gaw
)
1535 struct dmar_domain
*domain
, *found
= NULL
;
1536 struct intel_iommu
*iommu
;
1537 struct dmar_drhd_unit
*drhd
;
1538 struct device_domain_info
*info
, *tmp
;
1539 struct pci_dev
*dev_tmp
;
1540 unsigned long flags
;
1541 int bus
= 0, devfn
= 0;
1543 domain
= find_domain(pdev
);
1547 dev_tmp
= pci_find_upstream_pcie_bridge(pdev
);
1549 if (dev_tmp
->is_pcie
) {
1550 bus
= dev_tmp
->subordinate
->number
;
1553 bus
= dev_tmp
->bus
->number
;
1554 devfn
= dev_tmp
->devfn
;
1556 spin_lock_irqsave(&device_domain_lock
, flags
);
1557 list_for_each_entry(info
, &device_domain_list
, global
) {
1558 if (info
->bus
== bus
&& info
->devfn
== devfn
) {
1559 found
= info
->domain
;
1563 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1564 /* pcie-pci bridge already has a domain, uses it */
1571 /* Allocate new domain for the device */
1572 drhd
= dmar_find_matched_drhd_unit(pdev
);
1574 printk(KERN_ERR
"IOMMU: can't find DMAR for device %s\n",
1578 iommu
= drhd
->iommu
;
1580 domain
= iommu_alloc_domain(iommu
);
1584 if (domain_init(domain
, gaw
)) {
1585 domain_exit(domain
);
1589 /* register pcie-to-pci device */
1591 info
= alloc_devinfo_mem();
1593 domain_exit(domain
);
1597 info
->devfn
= devfn
;
1599 info
->domain
= domain
;
1600 /* This domain is shared by devices under p2p bridge */
1601 domain
->flags
|= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES
;
1603 /* pcie-to-pci bridge already has a domain, uses it */
1605 spin_lock_irqsave(&device_domain_lock
, flags
);
1606 list_for_each_entry(tmp
, &device_domain_list
, global
) {
1607 if (tmp
->bus
== bus
&& tmp
->devfn
== devfn
) {
1608 found
= tmp
->domain
;
1613 free_devinfo_mem(info
);
1614 domain_exit(domain
);
1617 list_add(&info
->link
, &domain
->devices
);
1618 list_add(&info
->global
, &device_domain_list
);
1620 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1624 info
= alloc_devinfo_mem();
1627 info
->bus
= pdev
->bus
->number
;
1628 info
->devfn
= pdev
->devfn
;
1630 info
->domain
= domain
;
1631 spin_lock_irqsave(&device_domain_lock
, flags
);
1632 /* somebody is fast */
1633 found
= find_domain(pdev
);
1634 if (found
!= NULL
) {
1635 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1636 if (found
!= domain
) {
1637 domain_exit(domain
);
1640 free_devinfo_mem(info
);
1643 list_add(&info
->link
, &domain
->devices
);
1644 list_add(&info
->global
, &device_domain_list
);
1645 pdev
->dev
.archdata
.iommu
= info
;
1646 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1649 /* recheck it here, maybe others set it */
1650 return find_domain(pdev
);
1653 static int iommu_prepare_identity_map(struct pci_dev
*pdev
,
1654 unsigned long long start
,
1655 unsigned long long end
)
1657 struct dmar_domain
*domain
;
1659 unsigned long long base
;
1663 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1664 pci_name(pdev
), start
, end
);
1665 /* page table init */
1666 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
1670 /* The address might not be aligned */
1671 base
= start
& PAGE_MASK
;
1673 size
= PAGE_ALIGN(size
);
1674 if (!reserve_iova(&domain
->iovad
, IOVA_PFN(base
),
1675 IOVA_PFN(base
+ size
) - 1)) {
1676 printk(KERN_ERR
"IOMMU: reserve iova failed\n");
1681 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1682 size
, base
, pci_name(pdev
));
1684 * RMRR range might have overlap with physical memory range,
1687 dma_pte_clear_range(domain
, base
, base
+ size
);
1689 ret
= domain_page_mapping(domain
, base
, base
, size
,
1690 DMA_PTE_READ
|DMA_PTE_WRITE
);
1694 /* context entry init */
1695 ret
= domain_context_mapping(domain
, pdev
);
1699 domain_exit(domain
);
1704 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
1705 struct pci_dev
*pdev
)
1707 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
1709 return iommu_prepare_identity_map(pdev
, rmrr
->base_address
,
1710 rmrr
->end_address
+ 1);
1713 #ifdef CONFIG_DMAR_GFX_WA
1714 struct iommu_prepare_data
{
1715 struct pci_dev
*pdev
;
1719 static int __init
iommu_prepare_work_fn(unsigned long start_pfn
,
1720 unsigned long end_pfn
, void *datax
)
1722 struct iommu_prepare_data
*data
;
1724 data
= (struct iommu_prepare_data
*)datax
;
1726 data
->ret
= iommu_prepare_identity_map(data
->pdev
,
1727 start_pfn
<<PAGE_SHIFT
, end_pfn
<<PAGE_SHIFT
);
1732 static int __init
iommu_prepare_with_active_regions(struct pci_dev
*pdev
)
1735 struct iommu_prepare_data data
;
1740 for_each_online_node(nid
) {
1741 work_with_active_regions(nid
, iommu_prepare_work_fn
, &data
);
1748 static void __init
iommu_prepare_gfx_mapping(void)
1750 struct pci_dev
*pdev
= NULL
;
1753 for_each_pci_dev(pdev
) {
1754 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
||
1755 !IS_GFX_DEVICE(pdev
))
1757 printk(KERN_INFO
"IOMMU: gfx device %s 1-1 mapping\n",
1759 ret
= iommu_prepare_with_active_regions(pdev
);
1761 printk(KERN_ERR
"IOMMU: mapping reserved region failed\n");
1764 #else /* !CONFIG_DMAR_GFX_WA */
1765 static inline void iommu_prepare_gfx_mapping(void)
1771 #ifdef CONFIG_DMAR_FLOPPY_WA
1772 static inline void iommu_prepare_isa(void)
1774 struct pci_dev
*pdev
;
1777 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
1781 printk(KERN_INFO
"IOMMU: Prepare 0-16M unity mapping for LPC\n");
1782 ret
= iommu_prepare_identity_map(pdev
, 0, 16*1024*1024);
1785 printk("IOMMU: Failed to create 0-64M identity map, "
1786 "floppy might not work\n");
1790 static inline void iommu_prepare_isa(void)
1794 #endif /* !CONFIG_DMAR_FLPY_WA */
1796 static int __init
init_dmars(void)
1798 struct dmar_drhd_unit
*drhd
;
1799 struct dmar_rmrr_unit
*rmrr
;
1800 struct pci_dev
*pdev
;
1801 struct intel_iommu
*iommu
;
1807 * initialize and program root entry to not present
1810 for_each_drhd_unit(drhd
) {
1813 * lock not needed as this is only incremented in the single
1814 * threaded kernel __init code path all other access are read
1819 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
1822 printk(KERN_ERR
"Allocating global iommu array failed\n");
1827 deferred_flush
= kzalloc(g_num_of_iommus
*
1828 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
1829 if (!deferred_flush
) {
1835 for_each_drhd_unit(drhd
) {
1839 iommu
= drhd
->iommu
;
1840 g_iommus
[iommu
->seq_id
] = iommu
;
1842 ret
= iommu_init_domains(iommu
);
1848 * we could share the same root & context tables
1849 * amoung all IOMMU's. Need to Split it later.
1851 ret
= iommu_alloc_root_entry(iommu
);
1853 printk(KERN_ERR
"IOMMU: allocate root entry failed\n");
1858 for_each_drhd_unit(drhd
) {
1862 iommu
= drhd
->iommu
;
1863 if (dmar_enable_qi(iommu
)) {
1865 * Queued Invalidate not enabled, use Register Based
1868 iommu
->flush
.flush_context
= __iommu_flush_context
;
1869 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
1870 printk(KERN_INFO
"IOMMU 0x%Lx: using Register based "
1872 (unsigned long long)drhd
->reg_base_addr
);
1874 iommu
->flush
.flush_context
= qi_flush_context
;
1875 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
1876 printk(KERN_INFO
"IOMMU 0x%Lx: using Queued "
1878 (unsigned long long)drhd
->reg_base_addr
);
1884 * for each dev attached to rmrr
1886 * locate drhd for dev, alloc domain for dev
1887 * allocate free domain
1888 * allocate page table entries for rmrr
1889 * if context not allocated for bus
1890 * allocate and init context
1891 * set present in root table for this bus
1892 * init context with domain, translation etc
1896 for_each_rmrr_units(rmrr
) {
1897 for (i
= 0; i
< rmrr
->devices_cnt
; i
++) {
1898 pdev
= rmrr
->devices
[i
];
1899 /* some BIOS lists non-exist devices in DMAR table */
1902 ret
= iommu_prepare_rmrr_dev(rmrr
, pdev
);
1905 "IOMMU: mapping reserved region failed\n");
1909 iommu_prepare_gfx_mapping();
1911 iommu_prepare_isa();
1916 * global invalidate context cache
1917 * global invalidate iotlb
1918 * enable translation
1920 for_each_drhd_unit(drhd
) {
1923 iommu
= drhd
->iommu
;
1925 iommu_flush_write_buffer(iommu
);
1927 ret
= dmar_set_interrupt(iommu
);
1931 iommu_set_root_entry(iommu
);
1933 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
,
1935 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
,
1937 iommu_disable_protect_mem_regions(iommu
);
1939 ret
= iommu_enable_translation(iommu
);
1946 for_each_drhd_unit(drhd
) {
1949 iommu
= drhd
->iommu
;
1956 static inline u64
aligned_size(u64 host_addr
, size_t size
)
1959 addr
= (host_addr
& (~PAGE_MASK
)) + size
;
1960 return PAGE_ALIGN(addr
);
1964 iommu_alloc_iova(struct dmar_domain
*domain
, size_t size
, u64 end
)
1968 /* Make sure it's in range */
1969 end
= min_t(u64
, DOMAIN_MAX_ADDR(domain
->gaw
), end
);
1970 if (!size
|| (IOVA_START_ADDR
+ size
> end
))
1973 piova
= alloc_iova(&domain
->iovad
,
1974 size
>> PAGE_SHIFT
, IOVA_PFN(end
), 1);
1978 static struct iova
*
1979 __intel_alloc_iova(struct device
*dev
, struct dmar_domain
*domain
,
1980 size_t size
, u64 dma_mask
)
1982 struct pci_dev
*pdev
= to_pci_dev(dev
);
1983 struct iova
*iova
= NULL
;
1985 if (dma_mask
<= DMA_32BIT_MASK
|| dmar_forcedac
)
1986 iova
= iommu_alloc_iova(domain
, size
, dma_mask
);
1989 * First try to allocate an io virtual address in
1990 * DMA_32BIT_MASK and if that fails then try allocating
1993 iova
= iommu_alloc_iova(domain
, size
, DMA_32BIT_MASK
);
1995 iova
= iommu_alloc_iova(domain
, size
, dma_mask
);
1999 printk(KERN_ERR
"Allocating iova for %s failed", pci_name(pdev
));
2006 static struct dmar_domain
*
2007 get_valid_domain_for_dev(struct pci_dev
*pdev
)
2009 struct dmar_domain
*domain
;
2012 domain
= get_domain_for_dev(pdev
,
2013 DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2016 "Allocating domain for %s failed", pci_name(pdev
));
2020 /* make sure context mapping is ok */
2021 if (unlikely(!domain_context_mapped(pdev
))) {
2022 ret
= domain_context_mapping(domain
, pdev
);
2025 "Domain context map for %s failed",
2034 static dma_addr_t
__intel_map_single(struct device
*hwdev
, phys_addr_t paddr
,
2035 size_t size
, int dir
, u64 dma_mask
)
2037 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2038 struct dmar_domain
*domain
;
2039 phys_addr_t start_paddr
;
2043 struct intel_iommu
*iommu
;
2045 BUG_ON(dir
== DMA_NONE
);
2046 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2049 domain
= get_valid_domain_for_dev(pdev
);
2053 iommu
= domain_get_iommu(domain
);
2054 size
= aligned_size((u64
)paddr
, size
);
2056 iova
= __intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2060 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
2063 * Check if DMAR supports zero-length reads on write only
2066 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2067 !cap_zlr(iommu
->cap
))
2068 prot
|= DMA_PTE_READ
;
2069 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2070 prot
|= DMA_PTE_WRITE
;
2072 * paddr - (paddr + size) might be partial page, we should map the whole
2073 * page. Note: if two part of one page are separately mapped, we
2074 * might have two guest_addr mapping to the same host paddr, but this
2075 * is not a big problem
2077 ret
= domain_page_mapping(domain
, start_paddr
,
2078 ((u64
)paddr
) & PAGE_MASK
, size
, prot
);
2082 /* it's a non-present to present mapping */
2083 ret
= iommu_flush_iotlb_psi(iommu
, domain
->id
,
2084 start_paddr
, size
>> VTD_PAGE_SHIFT
, 1);
2086 iommu_flush_write_buffer(iommu
);
2088 return start_paddr
+ ((u64
)paddr
& (~PAGE_MASK
));
2092 __free_iova(&domain
->iovad
, iova
);
2093 printk(KERN_ERR
"Device %s request: %lx@%llx dir %d --- failed\n",
2094 pci_name(pdev
), size
, (unsigned long long)paddr
, dir
);
2098 dma_addr_t
intel_map_single(struct device
*hwdev
, phys_addr_t paddr
,
2099 size_t size
, int dir
)
2101 return __intel_map_single(hwdev
, paddr
, size
, dir
,
2102 to_pci_dev(hwdev
)->dma_mask
);
2105 static void flush_unmaps(void)
2111 /* just flush them all */
2112 for (i
= 0; i
< g_num_of_iommus
; i
++) {
2113 struct intel_iommu
*iommu
= g_iommus
[i
];
2117 if (deferred_flush
[i
].next
) {
2118 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2119 DMA_TLB_GLOBAL_FLUSH
, 0);
2120 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
2121 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
,
2122 deferred_flush
[i
].iova
[j
]);
2124 deferred_flush
[i
].next
= 0;
2131 static void flush_unmaps_timeout(unsigned long data
)
2133 unsigned long flags
;
2135 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2137 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2140 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
)
2142 unsigned long flags
;
2144 struct intel_iommu
*iommu
;
2146 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2147 if (list_size
== HIGH_WATER_MARK
)
2150 iommu
= domain_get_iommu(dom
);
2151 iommu_id
= iommu
->seq_id
;
2153 next
= deferred_flush
[iommu_id
].next
;
2154 deferred_flush
[iommu_id
].domain
[next
] = dom
;
2155 deferred_flush
[iommu_id
].iova
[next
] = iova
;
2156 deferred_flush
[iommu_id
].next
++;
2159 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
2163 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2166 void intel_unmap_single(struct device
*dev
, dma_addr_t dev_addr
, size_t size
,
2169 struct pci_dev
*pdev
= to_pci_dev(dev
);
2170 struct dmar_domain
*domain
;
2171 unsigned long start_addr
;
2173 struct intel_iommu
*iommu
;
2175 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2177 domain
= find_domain(pdev
);
2180 iommu
= domain_get_iommu(domain
);
2182 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
2186 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2187 size
= aligned_size((u64
)dev_addr
, size
);
2189 pr_debug("Device %s unmapping: %lx@%llx\n",
2190 pci_name(pdev
), size
, (unsigned long long)start_addr
);
2192 /* clear the whole page */
2193 dma_pte_clear_range(domain
, start_addr
, start_addr
+ size
);
2194 /* free page tables */
2195 dma_pte_free_pagetable(domain
, start_addr
, start_addr
+ size
);
2196 if (intel_iommu_strict
) {
2197 if (iommu_flush_iotlb_psi(iommu
,
2198 domain
->id
, start_addr
, size
>> VTD_PAGE_SHIFT
, 0))
2199 iommu_flush_write_buffer(iommu
);
2201 __free_iova(&domain
->iovad
, iova
);
2203 add_unmap(domain
, iova
);
2205 * queue up the release of the unmap to save the 1/6th of the
2206 * cpu used up by the iotlb flush operation...
2211 void *intel_alloc_coherent(struct device
*hwdev
, size_t size
,
2212 dma_addr_t
*dma_handle
, gfp_t flags
)
2217 size
= PAGE_ALIGN(size
);
2218 order
= get_order(size
);
2219 flags
&= ~(GFP_DMA
| GFP_DMA32
);
2221 vaddr
= (void *)__get_free_pages(flags
, order
);
2224 memset(vaddr
, 0, size
);
2226 *dma_handle
= __intel_map_single(hwdev
, virt_to_bus(vaddr
), size
,
2228 hwdev
->coherent_dma_mask
);
2231 free_pages((unsigned long)vaddr
, order
);
2235 void intel_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
2236 dma_addr_t dma_handle
)
2240 size
= PAGE_ALIGN(size
);
2241 order
= get_order(size
);
2243 intel_unmap_single(hwdev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
2244 free_pages((unsigned long)vaddr
, order
);
2247 #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
2249 void intel_unmap_sg(struct device
*hwdev
, struct scatterlist
*sglist
,
2250 int nelems
, int dir
)
2253 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2254 struct dmar_domain
*domain
;
2255 unsigned long start_addr
;
2259 struct scatterlist
*sg
;
2260 struct intel_iommu
*iommu
;
2262 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2265 domain
= find_domain(pdev
);
2268 iommu
= domain_get_iommu(domain
);
2270 iova
= find_iova(&domain
->iovad
, IOVA_PFN(sglist
[0].dma_address
));
2273 for_each_sg(sglist
, sg
, nelems
, i
) {
2274 addr
= SG_ENT_VIRT_ADDRESS(sg
);
2275 size
+= aligned_size((u64
)addr
, sg
->length
);
2278 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2280 /* clear the whole page */
2281 dma_pte_clear_range(domain
, start_addr
, start_addr
+ size
);
2282 /* free page tables */
2283 dma_pte_free_pagetable(domain
, start_addr
, start_addr
+ size
);
2285 if (iommu_flush_iotlb_psi(iommu
, domain
->id
, start_addr
,
2286 size
>> VTD_PAGE_SHIFT
, 0))
2287 iommu_flush_write_buffer(iommu
);
2290 __free_iova(&domain
->iovad
, iova
);
2293 static int intel_nontranslate_map_sg(struct device
*hddev
,
2294 struct scatterlist
*sglist
, int nelems
, int dir
)
2297 struct scatterlist
*sg
;
2299 for_each_sg(sglist
, sg
, nelems
, i
) {
2300 BUG_ON(!sg_page(sg
));
2301 sg
->dma_address
= virt_to_bus(SG_ENT_VIRT_ADDRESS(sg
));
2302 sg
->dma_length
= sg
->length
;
2307 int intel_map_sg(struct device
*hwdev
, struct scatterlist
*sglist
, int nelems
,
2312 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2313 struct dmar_domain
*domain
;
2317 struct iova
*iova
= NULL
;
2319 struct scatterlist
*sg
;
2320 unsigned long start_addr
;
2321 struct intel_iommu
*iommu
;
2323 BUG_ON(dir
== DMA_NONE
);
2324 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2325 return intel_nontranslate_map_sg(hwdev
, sglist
, nelems
, dir
);
2327 domain
= get_valid_domain_for_dev(pdev
);
2331 iommu
= domain_get_iommu(domain
);
2333 for_each_sg(sglist
, sg
, nelems
, i
) {
2334 addr
= SG_ENT_VIRT_ADDRESS(sg
);
2335 addr
= (void *)virt_to_phys(addr
);
2336 size
+= aligned_size((u64
)addr
, sg
->length
);
2339 iova
= __intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2341 sglist
->dma_length
= 0;
2346 * Check if DMAR supports zero-length reads on write only
2349 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2350 !cap_zlr(iommu
->cap
))
2351 prot
|= DMA_PTE_READ
;
2352 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2353 prot
|= DMA_PTE_WRITE
;
2355 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2357 for_each_sg(sglist
, sg
, nelems
, i
) {
2358 addr
= SG_ENT_VIRT_ADDRESS(sg
);
2359 addr
= (void *)virt_to_phys(addr
);
2360 size
= aligned_size((u64
)addr
, sg
->length
);
2361 ret
= domain_page_mapping(domain
, start_addr
+ offset
,
2362 ((u64
)addr
) & PAGE_MASK
,
2365 /* clear the page */
2366 dma_pte_clear_range(domain
, start_addr
,
2367 start_addr
+ offset
);
2368 /* free page tables */
2369 dma_pte_free_pagetable(domain
, start_addr
,
2370 start_addr
+ offset
);
2372 __free_iova(&domain
->iovad
, iova
);
2375 sg
->dma_address
= start_addr
+ offset
+
2376 ((u64
)addr
& (~PAGE_MASK
));
2377 sg
->dma_length
= sg
->length
;
2381 /* it's a non-present to present mapping */
2382 if (iommu_flush_iotlb_psi(iommu
, domain
->id
,
2383 start_addr
, offset
>> VTD_PAGE_SHIFT
, 1))
2384 iommu_flush_write_buffer(iommu
);
2388 static struct dma_mapping_ops intel_dma_ops
= {
2389 .alloc_coherent
= intel_alloc_coherent
,
2390 .free_coherent
= intel_free_coherent
,
2391 .map_single
= intel_map_single
,
2392 .unmap_single
= intel_unmap_single
,
2393 .map_sg
= intel_map_sg
,
2394 .unmap_sg
= intel_unmap_sg
,
2397 static inline int iommu_domain_cache_init(void)
2401 iommu_domain_cache
= kmem_cache_create("iommu_domain",
2402 sizeof(struct dmar_domain
),
2407 if (!iommu_domain_cache
) {
2408 printk(KERN_ERR
"Couldn't create iommu_domain cache\n");
2415 static inline int iommu_devinfo_cache_init(void)
2419 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
2420 sizeof(struct device_domain_info
),
2424 if (!iommu_devinfo_cache
) {
2425 printk(KERN_ERR
"Couldn't create devinfo cache\n");
2432 static inline int iommu_iova_cache_init(void)
2436 iommu_iova_cache
= kmem_cache_create("iommu_iova",
2437 sizeof(struct iova
),
2441 if (!iommu_iova_cache
) {
2442 printk(KERN_ERR
"Couldn't create iova cache\n");
2449 static int __init
iommu_init_mempool(void)
2452 ret
= iommu_iova_cache_init();
2456 ret
= iommu_domain_cache_init();
2460 ret
= iommu_devinfo_cache_init();
2464 kmem_cache_destroy(iommu_domain_cache
);
2466 kmem_cache_destroy(iommu_iova_cache
);
2471 static void __init
iommu_exit_mempool(void)
2473 kmem_cache_destroy(iommu_devinfo_cache
);
2474 kmem_cache_destroy(iommu_domain_cache
);
2475 kmem_cache_destroy(iommu_iova_cache
);
2479 static void __init
init_no_remapping_devices(void)
2481 struct dmar_drhd_unit
*drhd
;
2483 for_each_drhd_unit(drhd
) {
2484 if (!drhd
->include_all
) {
2486 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2487 if (drhd
->devices
[i
] != NULL
)
2489 /* ignore DMAR unit if no pci devices exist */
2490 if (i
== drhd
->devices_cnt
)
2498 for_each_drhd_unit(drhd
) {
2500 if (drhd
->ignored
|| drhd
->include_all
)
2503 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2504 if (drhd
->devices
[i
] &&
2505 !IS_GFX_DEVICE(drhd
->devices
[i
]))
2508 if (i
< drhd
->devices_cnt
)
2511 /* bypass IOMMU if it is just for gfx devices */
2513 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
2514 if (!drhd
->devices
[i
])
2516 drhd
->devices
[i
]->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
2521 int __init
intel_iommu_init(void)
2525 if (dmar_table_init())
2528 if (dmar_dev_scope_init())
2532 * Check the need for DMA-remapping initialization now.
2533 * Above initialization will also be used by Interrupt-remapping.
2535 if (no_iommu
|| swiotlb
|| dmar_disabled
)
2538 iommu_init_mempool();
2539 dmar_init_reserved_ranges();
2541 init_no_remapping_devices();
2545 printk(KERN_ERR
"IOMMU: dmar init failed\n");
2546 put_iova_domain(&reserved_iova_list
);
2547 iommu_exit_mempool();
2551 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
2553 init_timer(&unmap_timer
);
2555 dma_ops
= &intel_dma_ops
;
2557 register_iommu(&intel_iommu_ops
);
2562 static int vm_domain_add_dev_info(struct dmar_domain
*domain
,
2563 struct pci_dev
*pdev
)
2565 struct device_domain_info
*info
;
2566 unsigned long flags
;
2568 info
= alloc_devinfo_mem();
2572 info
->bus
= pdev
->bus
->number
;
2573 info
->devfn
= pdev
->devfn
;
2575 info
->domain
= domain
;
2577 spin_lock_irqsave(&device_domain_lock
, flags
);
2578 list_add(&info
->link
, &domain
->devices
);
2579 list_add(&info
->global
, &device_domain_list
);
2580 pdev
->dev
.archdata
.iommu
= info
;
2581 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2586 static void vm_domain_remove_one_dev_info(struct dmar_domain
*domain
,
2587 struct pci_dev
*pdev
)
2589 struct device_domain_info
*info
;
2590 struct intel_iommu
*iommu
;
2591 unsigned long flags
;
2593 struct list_head
*entry
, *tmp
;
2595 iommu
= device_to_iommu(pdev
->bus
->number
, pdev
->devfn
);
2599 spin_lock_irqsave(&device_domain_lock
, flags
);
2600 list_for_each_safe(entry
, tmp
, &domain
->devices
) {
2601 info
= list_entry(entry
, struct device_domain_info
, link
);
2602 if (info
->bus
== pdev
->bus
->number
&&
2603 info
->devfn
== pdev
->devfn
) {
2604 list_del(&info
->link
);
2605 list_del(&info
->global
);
2607 info
->dev
->dev
.archdata
.iommu
= NULL
;
2608 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2610 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
2611 free_devinfo_mem(info
);
2613 spin_lock_irqsave(&device_domain_lock
, flags
);
2621 /* if there is no other devices under the same iommu
2622 * owned by this domain, clear this iommu in iommu_bmp
2623 * update iommu count and coherency
2625 if (device_to_iommu(info
->bus
, info
->devfn
) == iommu
)
2630 unsigned long tmp_flags
;
2631 spin_lock_irqsave(&domain
->iommu_lock
, tmp_flags
);
2632 clear_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
2633 domain
->iommu_count
--;
2634 domain_update_iommu_coherency(domain
);
2635 spin_unlock_irqrestore(&domain
->iommu_lock
, tmp_flags
);
2638 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2641 static void vm_domain_remove_all_dev_info(struct dmar_domain
*domain
)
2643 struct device_domain_info
*info
;
2644 struct intel_iommu
*iommu
;
2645 unsigned long flags1
, flags2
;
2647 spin_lock_irqsave(&device_domain_lock
, flags1
);
2648 while (!list_empty(&domain
->devices
)) {
2649 info
= list_entry(domain
->devices
.next
,
2650 struct device_domain_info
, link
);
2651 list_del(&info
->link
);
2652 list_del(&info
->global
);
2654 info
->dev
->dev
.archdata
.iommu
= NULL
;
2656 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
2658 iommu
= device_to_iommu(info
->bus
, info
->devfn
);
2659 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
2661 /* clear this iommu in iommu_bmp, update iommu count
2664 spin_lock_irqsave(&domain
->iommu_lock
, flags2
);
2665 if (test_and_clear_bit(iommu
->seq_id
,
2666 &domain
->iommu_bmp
)) {
2667 domain
->iommu_count
--;
2668 domain_update_iommu_coherency(domain
);
2670 spin_unlock_irqrestore(&domain
->iommu_lock
, flags2
);
2672 free_devinfo_mem(info
);
2673 spin_lock_irqsave(&device_domain_lock
, flags1
);
2675 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
2678 /* domain id for virtual machine, it won't be set in context */
2679 static unsigned long vm_domid
;
2681 static int vm_domain_min_agaw(struct dmar_domain
*domain
)
2684 int min_agaw
= domain
->agaw
;
2686 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
2687 for (; i
< g_num_of_iommus
; ) {
2688 if (min_agaw
> g_iommus
[i
]->agaw
)
2689 min_agaw
= g_iommus
[i
]->agaw
;
2691 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
2697 static struct dmar_domain
*iommu_alloc_vm_domain(void)
2699 struct dmar_domain
*domain
;
2701 domain
= alloc_domain_mem();
2705 domain
->id
= vm_domid
++;
2706 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
2707 domain
->flags
= DOMAIN_FLAG_VIRTUAL_MACHINE
;
2712 static int vm_domain_init(struct dmar_domain
*domain
, int guest_width
)
2716 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
2717 spin_lock_init(&domain
->mapping_lock
);
2718 spin_lock_init(&domain
->iommu_lock
);
2720 domain_reserve_special_ranges(domain
);
2722 /* calculate AGAW */
2723 domain
->gaw
= guest_width
;
2724 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
2725 domain
->agaw
= width_to_agaw(adjust_width
);
2727 INIT_LIST_HEAD(&domain
->devices
);
2729 domain
->iommu_count
= 0;
2730 domain
->iommu_coherency
= 0;
2731 domain
->max_addr
= 0;
2733 /* always allocate the top pgd */
2734 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
2737 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
2741 static void iommu_free_vm_domain(struct dmar_domain
*domain
)
2743 unsigned long flags
;
2744 struct dmar_drhd_unit
*drhd
;
2745 struct intel_iommu
*iommu
;
2747 unsigned long ndomains
;
2749 for_each_drhd_unit(drhd
) {
2752 iommu
= drhd
->iommu
;
2754 ndomains
= cap_ndoms(iommu
->cap
);
2755 i
= find_first_bit(iommu
->domain_ids
, ndomains
);
2756 for (; i
< ndomains
; ) {
2757 if (iommu
->domains
[i
] == domain
) {
2758 spin_lock_irqsave(&iommu
->lock
, flags
);
2759 clear_bit(i
, iommu
->domain_ids
);
2760 iommu
->domains
[i
] = NULL
;
2761 spin_unlock_irqrestore(&iommu
->lock
, flags
);
2764 i
= find_next_bit(iommu
->domain_ids
, ndomains
, i
+1);
2769 static void vm_domain_exit(struct dmar_domain
*domain
)
2773 /* Domain 0 is reserved, so dont process it */
2777 vm_domain_remove_all_dev_info(domain
);
2779 put_iova_domain(&domain
->iovad
);
2780 end
= DOMAIN_MAX_ADDR(domain
->gaw
);
2781 end
= end
& (~VTD_PAGE_MASK
);
2784 dma_pte_clear_range(domain
, 0, end
);
2786 /* free page tables */
2787 dma_pte_free_pagetable(domain
, 0, end
);
2789 iommu_free_vm_domain(domain
);
2790 free_domain_mem(domain
);
2793 static int intel_iommu_domain_init(struct iommu_domain
*domain
)
2795 struct dmar_domain
*dmar_domain
;
2797 dmar_domain
= iommu_alloc_vm_domain();
2800 "intel_iommu_domain_init: dmar_domain == NULL\n");
2803 if (vm_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2805 "intel_iommu_domain_init() failed\n");
2806 vm_domain_exit(dmar_domain
);
2809 domain
->priv
= dmar_domain
;
2814 static void intel_iommu_domain_destroy(struct iommu_domain
*domain
)
2816 struct dmar_domain
*dmar_domain
= domain
->priv
;
2818 domain
->priv
= NULL
;
2819 vm_domain_exit(dmar_domain
);
2822 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
2825 struct dmar_domain
*dmar_domain
= domain
->priv
;
2826 struct pci_dev
*pdev
= to_pci_dev(dev
);
2827 struct intel_iommu
*iommu
;
2832 /* normally pdev is not mapped */
2833 if (unlikely(domain_context_mapped(pdev
))) {
2834 struct dmar_domain
*old_domain
;
2836 old_domain
= find_domain(pdev
);
2838 if (dmar_domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
2839 vm_domain_remove_one_dev_info(old_domain
, pdev
);
2841 domain_remove_dev_info(old_domain
);
2845 iommu
= device_to_iommu(pdev
->bus
->number
, pdev
->devfn
);
2849 /* check if this iommu agaw is sufficient for max mapped address */
2850 addr_width
= agaw_to_width(iommu
->agaw
);
2851 end
= DOMAIN_MAX_ADDR(addr_width
);
2852 end
= end
& VTD_PAGE_MASK
;
2853 if (end
< dmar_domain
->max_addr
) {
2854 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
2855 "sufficient for the mapped address (%llx)\n",
2856 __func__
, iommu
->agaw
, dmar_domain
->max_addr
);
2860 ret
= domain_context_mapping(dmar_domain
, pdev
);
2864 ret
= vm_domain_add_dev_info(dmar_domain
, pdev
);
2868 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
2871 struct dmar_domain
*dmar_domain
= domain
->priv
;
2872 struct pci_dev
*pdev
= to_pci_dev(dev
);
2874 vm_domain_remove_one_dev_info(dmar_domain
, pdev
);
2877 static int intel_iommu_map_range(struct iommu_domain
*domain
,
2878 unsigned long iova
, phys_addr_t hpa
,
2879 size_t size
, int iommu_prot
)
2881 struct dmar_domain
*dmar_domain
= domain
->priv
;
2887 if (iommu_prot
& IOMMU_READ
)
2888 prot
|= DMA_PTE_READ
;
2889 if (iommu_prot
& IOMMU_WRITE
)
2890 prot
|= DMA_PTE_WRITE
;
2892 max_addr
= (iova
& VTD_PAGE_MASK
) + VTD_PAGE_ALIGN(size
);
2893 if (dmar_domain
->max_addr
< max_addr
) {
2897 /* check if minimum agaw is sufficient for mapped address */
2898 min_agaw
= vm_domain_min_agaw(dmar_domain
);
2899 addr_width
= agaw_to_width(min_agaw
);
2900 end
= DOMAIN_MAX_ADDR(addr_width
);
2901 end
= end
& VTD_PAGE_MASK
;
2902 if (end
< max_addr
) {
2903 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
2904 "sufficient for the mapped address (%llx)\n",
2905 __func__
, min_agaw
, max_addr
);
2908 dmar_domain
->max_addr
= max_addr
;
2911 ret
= domain_page_mapping(dmar_domain
, iova
, hpa
, size
, prot
);
2915 static void intel_iommu_unmap_range(struct iommu_domain
*domain
,
2916 unsigned long iova
, size_t size
)
2918 struct dmar_domain
*dmar_domain
= domain
->priv
;
2921 /* The address might not be aligned */
2922 base
= iova
& VTD_PAGE_MASK
;
2923 size
= VTD_PAGE_ALIGN(size
);
2924 dma_pte_clear_range(dmar_domain
, base
, base
+ size
);
2926 if (dmar_domain
->max_addr
== base
+ size
)
2927 dmar_domain
->max_addr
= base
;
2930 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
2933 struct dmar_domain
*dmar_domain
= domain
->priv
;
2934 struct dma_pte
*pte
;
2937 pte
= addr_to_dma_pte(dmar_domain
, iova
);
2939 phys
= dma_pte_addr(pte
);
2944 static struct iommu_ops intel_iommu_ops
= {
2945 .domain_init
= intel_iommu_domain_init
,
2946 .domain_destroy
= intel_iommu_domain_destroy
,
2947 .attach_dev
= intel_iommu_attach_device
,
2948 .detach_dev
= intel_iommu_detach_device
,
2949 .map
= intel_iommu_map_range
,
2950 .unmap
= intel_iommu_unmap_range
,
2951 .iova_to_phys
= intel_iommu_iova_to_phys
,
2954 static void __devinit
quirk_iommu_rwbf(struct pci_dev
*dev
)
2957 * Mobile 4 Series Chipset neglects to set RWBF capability,
2960 printk(KERN_INFO
"DMAR: Forcing write-buffer flush capability\n");
2964 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);