2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <asm/cacheflush.h>
41 #include <asm/iommu.h>
44 #define ROOT_SIZE VTD_PAGE_SIZE
45 #define CONTEXT_SIZE VTD_PAGE_SIZE
47 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
50 #define IOAPIC_RANGE_START (0xfee00000)
51 #define IOAPIC_RANGE_END (0xfeefffff)
52 #define IOVA_START_ADDR (0x1000)
54 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
56 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
58 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
59 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
60 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
62 /* global iommu list, set NULL for ignored DMAR units */
63 static struct intel_iommu
**g_iommus
;
65 static int rwbf_quirk
;
70 * 12-63: Context Ptr (12 - (haw-1))
77 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
78 static inline bool root_present(struct root_entry
*root
)
80 return (root
->val
& 1);
82 static inline void set_root_present(struct root_entry
*root
)
86 static inline void set_root_value(struct root_entry
*root
, unsigned long value
)
88 root
->val
|= value
& VTD_PAGE_MASK
;
91 static inline struct context_entry
*
92 get_context_addr_from_root(struct root_entry
*root
)
94 return (struct context_entry
*)
95 (root_present(root
)?phys_to_virt(
96 root
->val
& VTD_PAGE_MASK
) :
103 * 1: fault processing disable
104 * 2-3: translation type
105 * 12-63: address space root
111 struct context_entry
{
116 static inline bool context_present(struct context_entry
*context
)
118 return (context
->lo
& 1);
120 static inline void context_set_present(struct context_entry
*context
)
125 static inline void context_set_fault_enable(struct context_entry
*context
)
127 context
->lo
&= (((u64
)-1) << 2) | 1;
130 #define CONTEXT_TT_MULTI_LEVEL 0
132 static inline void context_set_translation_type(struct context_entry
*context
,
135 context
->lo
&= (((u64
)-1) << 4) | 3;
136 context
->lo
|= (value
& 3) << 2;
139 static inline void context_set_address_root(struct context_entry
*context
,
142 context
->lo
|= value
& VTD_PAGE_MASK
;
145 static inline void context_set_address_width(struct context_entry
*context
,
148 context
->hi
|= value
& 7;
151 static inline void context_set_domain_id(struct context_entry
*context
,
154 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
157 static inline void context_clear_entry(struct context_entry
*context
)
170 * 12-63: Host physcial address
176 static inline void dma_clear_pte(struct dma_pte
*pte
)
181 static inline void dma_set_pte_readable(struct dma_pte
*pte
)
183 pte
->val
|= DMA_PTE_READ
;
186 static inline void dma_set_pte_writable(struct dma_pte
*pte
)
188 pte
->val
|= DMA_PTE_WRITE
;
191 static inline void dma_set_pte_snp(struct dma_pte
*pte
)
193 pte
->val
|= DMA_PTE_SNP
;
196 static inline void dma_set_pte_prot(struct dma_pte
*pte
, unsigned long prot
)
198 pte
->val
= (pte
->val
& ~3) | (prot
& 3);
201 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
203 return (pte
->val
& VTD_PAGE_MASK
);
206 static inline void dma_set_pte_addr(struct dma_pte
*pte
, u64 addr
)
208 pte
->val
|= (addr
& VTD_PAGE_MASK
);
211 static inline bool dma_pte_present(struct dma_pte
*pte
)
213 return (pte
->val
& 3) != 0;
216 /* devices under the same p2p bridge are owned in one domain */
217 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
219 /* domain represents a virtual machine, more than one devices
220 * across iommus may be owned in one domain, e.g. kvm guest.
222 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
225 int id
; /* domain id */
226 unsigned long iommu_bmp
; /* bitmap of iommus this domain uses*/
228 struct list_head devices
; /* all devices' list */
229 struct iova_domain iovad
; /* iova's that belong to this domain */
231 struct dma_pte
*pgd
; /* virtual address */
232 spinlock_t mapping_lock
; /* page table lock */
233 int gaw
; /* max guest address width */
235 /* adjusted guest address width, 0 is level 2 30-bit */
238 int flags
; /* flags to find out type of domain */
240 int iommu_coherency
;/* indicate coherency of iommu access */
241 int iommu_snooping
; /* indicate snooping control feature*/
242 int iommu_count
; /* reference count of iommu */
243 spinlock_t iommu_lock
; /* protect iommu set in domain */
244 u64 max_addr
; /* maximum mapped address */
247 /* PCI domain-device relationship */
248 struct device_domain_info
{
249 struct list_head link
; /* link to domain siblings */
250 struct list_head global
; /* link to global list */
251 int segment
; /* PCI domain */
252 u8 bus
; /* PCI bus number */
253 u8 devfn
; /* PCI devfn number */
254 struct pci_dev
*dev
; /* it's NULL for PCIE-to-PCI bridge */
255 struct dmar_domain
*domain
; /* pointer to domain */
258 static void flush_unmaps_timeout(unsigned long data
);
260 DEFINE_TIMER(unmap_timer
, flush_unmaps_timeout
, 0, 0);
262 #define HIGH_WATER_MARK 250
263 struct deferred_flush_tables
{
265 struct iova
*iova
[HIGH_WATER_MARK
];
266 struct dmar_domain
*domain
[HIGH_WATER_MARK
];
269 static struct deferred_flush_tables
*deferred_flush
;
271 /* bitmap for indexing intel_iommus */
272 static int g_num_of_iommus
;
274 static DEFINE_SPINLOCK(async_umap_flush_lock
);
275 static LIST_HEAD(unmaps_to_do
);
278 static long list_size
;
280 static void domain_remove_dev_info(struct dmar_domain
*domain
);
282 #ifdef CONFIG_DMAR_DEFAULT_ON
283 int dmar_disabled
= 0;
285 int dmar_disabled
= 1;
286 #endif /*CONFIG_DMAR_DEFAULT_ON*/
288 static int __initdata dmar_map_gfx
= 1;
289 static int dmar_forcedac
;
290 static int intel_iommu_strict
;
292 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
293 static DEFINE_SPINLOCK(device_domain_lock
);
294 static LIST_HEAD(device_domain_list
);
296 static struct iommu_ops intel_iommu_ops
;
298 static int __init
intel_iommu_setup(char *str
)
303 if (!strncmp(str
, "on", 2)) {
305 printk(KERN_INFO
"Intel-IOMMU: enabled\n");
306 } else if (!strncmp(str
, "off", 3)) {
308 printk(KERN_INFO
"Intel-IOMMU: disabled\n");
309 } else if (!strncmp(str
, "igfx_off", 8)) {
312 "Intel-IOMMU: disable GFX device mapping\n");
313 } else if (!strncmp(str
, "forcedac", 8)) {
315 "Intel-IOMMU: Forcing DAC for PCI devices\n");
317 } else if (!strncmp(str
, "strict", 6)) {
319 "Intel-IOMMU: disable batched IOTLB flush\n");
320 intel_iommu_strict
= 1;
323 str
+= strcspn(str
, ",");
329 __setup("intel_iommu=", intel_iommu_setup
);
331 static struct kmem_cache
*iommu_domain_cache
;
332 static struct kmem_cache
*iommu_devinfo_cache
;
333 static struct kmem_cache
*iommu_iova_cache
;
335 static inline void *iommu_kmem_cache_alloc(struct kmem_cache
*cachep
)
340 /* trying to avoid low memory issues */
341 flags
= current
->flags
& PF_MEMALLOC
;
342 current
->flags
|= PF_MEMALLOC
;
343 vaddr
= kmem_cache_alloc(cachep
, GFP_ATOMIC
);
344 current
->flags
&= (~PF_MEMALLOC
| flags
);
349 static inline void *alloc_pgtable_page(void)
354 /* trying to avoid low memory issues */
355 flags
= current
->flags
& PF_MEMALLOC
;
356 current
->flags
|= PF_MEMALLOC
;
357 vaddr
= (void *)get_zeroed_page(GFP_ATOMIC
);
358 current
->flags
&= (~PF_MEMALLOC
| flags
);
362 static inline void free_pgtable_page(void *vaddr
)
364 free_page((unsigned long)vaddr
);
367 static inline void *alloc_domain_mem(void)
369 return iommu_kmem_cache_alloc(iommu_domain_cache
);
372 static void free_domain_mem(void *vaddr
)
374 kmem_cache_free(iommu_domain_cache
, vaddr
);
377 static inline void * alloc_devinfo_mem(void)
379 return iommu_kmem_cache_alloc(iommu_devinfo_cache
);
382 static inline void free_devinfo_mem(void *vaddr
)
384 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
387 struct iova
*alloc_iova_mem(void)
389 return iommu_kmem_cache_alloc(iommu_iova_cache
);
392 void free_iova_mem(struct iova
*iova
)
394 kmem_cache_free(iommu_iova_cache
, iova
);
398 static inline int width_to_agaw(int width
);
400 /* calculate agaw for each iommu.
401 * "SAGAW" may be different across iommus, use a default agaw, and
402 * get a supported less agaw for iommus that don't support the default agaw.
404 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
409 sagaw
= cap_sagaw(iommu
->cap
);
410 for (agaw
= width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH
);
412 if (test_bit(agaw
, &sagaw
))
419 /* in native case, each domain is related to only one iommu */
420 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
424 BUG_ON(domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
);
426 iommu_id
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
427 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
430 return g_iommus
[iommu_id
];
433 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
437 domain
->iommu_coherency
= 1;
439 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
440 for (; i
< g_num_of_iommus
; ) {
441 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
442 domain
->iommu_coherency
= 0;
445 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
449 static void domain_update_iommu_snooping(struct dmar_domain
*domain
)
453 domain
->iommu_snooping
= 1;
455 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
456 for (; i
< g_num_of_iommus
; ) {
457 if (!ecap_sc_support(g_iommus
[i
]->ecap
)) {
458 domain
->iommu_snooping
= 0;
461 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
465 /* Some capabilities may be different across iommus */
466 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
468 domain_update_iommu_coherency(domain
);
469 domain_update_iommu_snooping(domain
);
472 static struct intel_iommu
*device_to_iommu(int segment
, u8 bus
, u8 devfn
)
474 struct dmar_drhd_unit
*drhd
= NULL
;
477 for_each_drhd_unit(drhd
) {
480 if (segment
!= drhd
->segment
)
483 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
484 if (drhd
->devices
[i
] &&
485 drhd
->devices
[i
]->bus
->number
== bus
&&
486 drhd
->devices
[i
]->devfn
== devfn
)
488 if (drhd
->devices
[i
] &&
489 drhd
->devices
[i
]->subordinate
&&
490 drhd
->devices
[i
]->subordinate
->number
<= bus
&&
491 drhd
->devices
[i
]->subordinate
->subordinate
>= bus
)
495 if (drhd
->include_all
)
502 static void domain_flush_cache(struct dmar_domain
*domain
,
503 void *addr
, int size
)
505 if (!domain
->iommu_coherency
)
506 clflush_cache_range(addr
, size
);
509 /* Gets context entry for a given bus and devfn */
510 static struct context_entry
* device_to_context_entry(struct intel_iommu
*iommu
,
513 struct root_entry
*root
;
514 struct context_entry
*context
;
515 unsigned long phy_addr
;
518 spin_lock_irqsave(&iommu
->lock
, flags
);
519 root
= &iommu
->root_entry
[bus
];
520 context
= get_context_addr_from_root(root
);
522 context
= (struct context_entry
*)alloc_pgtable_page();
524 spin_unlock_irqrestore(&iommu
->lock
, flags
);
527 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
528 phy_addr
= virt_to_phys((void *)context
);
529 set_root_value(root
, phy_addr
);
530 set_root_present(root
);
531 __iommu_flush_cache(iommu
, root
, sizeof(*root
));
533 spin_unlock_irqrestore(&iommu
->lock
, flags
);
534 return &context
[devfn
];
537 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
539 struct root_entry
*root
;
540 struct context_entry
*context
;
544 spin_lock_irqsave(&iommu
->lock
, flags
);
545 root
= &iommu
->root_entry
[bus
];
546 context
= get_context_addr_from_root(root
);
551 ret
= context_present(&context
[devfn
]);
553 spin_unlock_irqrestore(&iommu
->lock
, flags
);
557 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
559 struct root_entry
*root
;
560 struct context_entry
*context
;
563 spin_lock_irqsave(&iommu
->lock
, flags
);
564 root
= &iommu
->root_entry
[bus
];
565 context
= get_context_addr_from_root(root
);
567 context_clear_entry(&context
[devfn
]);
568 __iommu_flush_cache(iommu
, &context
[devfn
], \
571 spin_unlock_irqrestore(&iommu
->lock
, flags
);
574 static void free_context_table(struct intel_iommu
*iommu
)
576 struct root_entry
*root
;
579 struct context_entry
*context
;
581 spin_lock_irqsave(&iommu
->lock
, flags
);
582 if (!iommu
->root_entry
) {
585 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
586 root
= &iommu
->root_entry
[i
];
587 context
= get_context_addr_from_root(root
);
589 free_pgtable_page(context
);
591 free_pgtable_page(iommu
->root_entry
);
592 iommu
->root_entry
= NULL
;
594 spin_unlock_irqrestore(&iommu
->lock
, flags
);
597 /* page table handling */
598 #define LEVEL_STRIDE (9)
599 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
601 static inline int agaw_to_level(int agaw
)
606 static inline int agaw_to_width(int agaw
)
608 return 30 + agaw
* LEVEL_STRIDE
;
612 static inline int width_to_agaw(int width
)
614 return (width
- 30) / LEVEL_STRIDE
;
617 static inline unsigned int level_to_offset_bits(int level
)
619 return (12 + (level
- 1) * LEVEL_STRIDE
);
622 static inline int address_level_offset(u64 addr
, int level
)
624 return ((addr
>> level_to_offset_bits(level
)) & LEVEL_MASK
);
627 static inline u64
level_mask(int level
)
629 return ((u64
)-1 << level_to_offset_bits(level
));
632 static inline u64
level_size(int level
)
634 return ((u64
)1 << level_to_offset_bits(level
));
637 static inline u64
align_to_level(u64 addr
, int level
)
639 return ((addr
+ level_size(level
) - 1) & level_mask(level
));
642 static struct dma_pte
* addr_to_dma_pte(struct dmar_domain
*domain
, u64 addr
)
644 int addr_width
= agaw_to_width(domain
->agaw
);
645 struct dma_pte
*parent
, *pte
= NULL
;
646 int level
= agaw_to_level(domain
->agaw
);
650 BUG_ON(!domain
->pgd
);
652 addr
&= (((u64
)1) << addr_width
) - 1;
653 parent
= domain
->pgd
;
655 spin_lock_irqsave(&domain
->mapping_lock
, flags
);
659 offset
= address_level_offset(addr
, level
);
660 pte
= &parent
[offset
];
664 if (!dma_pte_present(pte
)) {
665 tmp_page
= alloc_pgtable_page();
668 spin_unlock_irqrestore(&domain
->mapping_lock
,
672 domain_flush_cache(domain
, tmp_page
, PAGE_SIZE
);
673 dma_set_pte_addr(pte
, virt_to_phys(tmp_page
));
675 * high level table always sets r/w, last level page
676 * table control read/write
678 dma_set_pte_readable(pte
);
679 dma_set_pte_writable(pte
);
680 domain_flush_cache(domain
, pte
, sizeof(*pte
));
682 parent
= phys_to_virt(dma_pte_addr(pte
));
686 spin_unlock_irqrestore(&domain
->mapping_lock
, flags
);
690 /* return address's pte at specific level */
691 static struct dma_pte
*dma_addr_level_pte(struct dmar_domain
*domain
, u64 addr
,
694 struct dma_pte
*parent
, *pte
= NULL
;
695 int total
= agaw_to_level(domain
->agaw
);
698 parent
= domain
->pgd
;
699 while (level
<= total
) {
700 offset
= address_level_offset(addr
, total
);
701 pte
= &parent
[offset
];
705 if (!dma_pte_present(pte
))
707 parent
= phys_to_virt(dma_pte_addr(pte
));
713 /* clear one page's page table */
714 static void dma_pte_clear_one(struct dmar_domain
*domain
, u64 addr
)
716 struct dma_pte
*pte
= NULL
;
718 /* get last level pte */
719 pte
= dma_addr_level_pte(domain
, addr
, 1);
723 domain_flush_cache(domain
, pte
, sizeof(*pte
));
727 /* clear last level pte, a tlb flush should be followed */
728 static void dma_pte_clear_range(struct dmar_domain
*domain
, u64 start
, u64 end
)
730 int addr_width
= agaw_to_width(domain
->agaw
);
733 start
&= (((u64
)1) << addr_width
) - 1;
734 end
&= (((u64
)1) << addr_width
) - 1;
735 /* in case it's partial page */
737 end
= PAGE_ALIGN(end
);
738 npages
= (end
- start
) / VTD_PAGE_SIZE
;
740 /* we don't need lock here, nobody else touches the iova range */
742 dma_pte_clear_one(domain
, start
);
743 start
+= VTD_PAGE_SIZE
;
747 /* free page table pages. last level pte should already be cleared */
748 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
751 int addr_width
= agaw_to_width(domain
->agaw
);
753 int total
= agaw_to_level(domain
->agaw
);
757 start
&= (((u64
)1) << addr_width
) - 1;
758 end
&= (((u64
)1) << addr_width
) - 1;
760 /* we don't need lock here, nobody else touches the iova range */
762 while (level
<= total
) {
763 tmp
= align_to_level(start
, level
);
764 if (tmp
>= end
|| (tmp
+ level_size(level
) > end
))
768 pte
= dma_addr_level_pte(domain
, tmp
, level
);
771 phys_to_virt(dma_pte_addr(pte
)));
773 domain_flush_cache(domain
, pte
, sizeof(*pte
));
775 tmp
+= level_size(level
);
780 if (start
== 0 && end
>= ((((u64
)1) << addr_width
) - 1)) {
781 free_pgtable_page(domain
->pgd
);
787 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
789 struct root_entry
*root
;
792 root
= (struct root_entry
*)alloc_pgtable_page();
796 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
798 spin_lock_irqsave(&iommu
->lock
, flags
);
799 iommu
->root_entry
= root
;
800 spin_unlock_irqrestore(&iommu
->lock
, flags
);
805 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
811 addr
= iommu
->root_entry
;
813 spin_lock_irqsave(&iommu
->register_lock
, flag
);
814 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, virt_to_phys(addr
));
816 cmd
= iommu
->gcmd
| DMA_GCMD_SRTP
;
817 writel(cmd
, iommu
->reg
+ DMAR_GCMD_REG
);
819 /* Make sure hardware complete it */
820 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
821 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
823 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
826 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
831 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
833 val
= iommu
->gcmd
| DMA_GCMD_WBF
;
835 spin_lock_irqsave(&iommu
->register_lock
, flag
);
836 writel(val
, iommu
->reg
+ DMAR_GCMD_REG
);
838 /* Make sure hardware complete it */
839 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
840 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
842 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
845 /* return value determine if we need a write buffer flush */
846 static int __iommu_flush_context(struct intel_iommu
*iommu
,
847 u16 did
, u16 source_id
, u8 function_mask
, u64 type
,
848 int non_present_entry_flush
)
854 * In the non-present entry flush case, if hardware doesn't cache
855 * non-present entry we do nothing and if hardware cache non-present
856 * entry, we flush entries of domain 0 (the domain id is used to cache
857 * any non-present entries)
859 if (non_present_entry_flush
) {
860 if (!cap_caching_mode(iommu
->cap
))
867 case DMA_CCMD_GLOBAL_INVL
:
868 val
= DMA_CCMD_GLOBAL_INVL
;
870 case DMA_CCMD_DOMAIN_INVL
:
871 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
873 case DMA_CCMD_DEVICE_INVL
:
874 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
875 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
882 spin_lock_irqsave(&iommu
->register_lock
, flag
);
883 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
885 /* Make sure hardware complete it */
886 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
887 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
889 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
891 /* flush context entry will implicitly flush write buffer */
895 /* return value determine if we need a write buffer flush */
896 static int __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
897 u64 addr
, unsigned int size_order
, u64 type
,
898 int non_present_entry_flush
)
900 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
901 u64 val
= 0, val_iva
= 0;
905 * In the non-present entry flush case, if hardware doesn't cache
906 * non-present entry we do nothing and if hardware cache non-present
907 * entry, we flush entries of domain 0 (the domain id is used to cache
908 * any non-present entries)
910 if (non_present_entry_flush
) {
911 if (!cap_caching_mode(iommu
->cap
))
918 case DMA_TLB_GLOBAL_FLUSH
:
919 /* global flush doesn't need set IVA_REG */
920 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
922 case DMA_TLB_DSI_FLUSH
:
923 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
925 case DMA_TLB_PSI_FLUSH
:
926 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
927 /* Note: always flush non-leaf currently */
928 val_iva
= size_order
| addr
;
933 /* Note: set drain read/write */
936 * This is probably to be super secure.. Looks like we can
937 * ignore it without any impact.
939 if (cap_read_drain(iommu
->cap
))
940 val
|= DMA_TLB_READ_DRAIN
;
942 if (cap_write_drain(iommu
->cap
))
943 val
|= DMA_TLB_WRITE_DRAIN
;
945 spin_lock_irqsave(&iommu
->register_lock
, flag
);
946 /* Note: Only uses first TLB reg currently */
948 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
949 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
951 /* Make sure hardware complete it */
952 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
953 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
955 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
957 /* check IOTLB invalidation granularity */
958 if (DMA_TLB_IAIG(val
) == 0)
959 printk(KERN_ERR
"IOMMU: flush IOTLB failed\n");
960 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
961 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
962 (unsigned long long)DMA_TLB_IIRG(type
),
963 (unsigned long long)DMA_TLB_IAIG(val
));
964 /* flush iotlb entry will implicitly flush write buffer */
968 static int iommu_flush_iotlb_psi(struct intel_iommu
*iommu
, u16 did
,
969 u64 addr
, unsigned int pages
, int non_present_entry_flush
)
973 BUG_ON(addr
& (~VTD_PAGE_MASK
));
976 /* Fallback to domain selective flush if no PSI support */
977 if (!cap_pgsel_inv(iommu
->cap
))
978 return iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
980 non_present_entry_flush
);
983 * PSI requires page size to be 2 ^ x, and the base address is naturally
984 * aligned to the size
986 mask
= ilog2(__roundup_pow_of_two(pages
));
987 /* Fallback to domain selective flush if size is too big */
988 if (mask
> cap_max_amask_val(iommu
->cap
))
989 return iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
990 DMA_TLB_DSI_FLUSH
, non_present_entry_flush
);
992 return iommu
->flush
.flush_iotlb(iommu
, did
, addr
, mask
,
994 non_present_entry_flush
);
997 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
1000 unsigned long flags
;
1002 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1003 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
1004 pmen
&= ~DMA_PMEN_EPM
;
1005 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
1007 /* wait for the protected region status bit to clear */
1008 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
1009 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1011 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1014 static int iommu_enable_translation(struct intel_iommu
*iommu
)
1017 unsigned long flags
;
1019 spin_lock_irqsave(&iommu
->register_lock
, flags
);
1020 writel(iommu
->gcmd
|DMA_GCMD_TE
, iommu
->reg
+ DMAR_GCMD_REG
);
1022 /* Make sure hardware complete it */
1023 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1024 readl
, (sts
& DMA_GSTS_TES
), sts
);
1026 iommu
->gcmd
|= DMA_GCMD_TE
;
1027 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1031 static int iommu_disable_translation(struct intel_iommu
*iommu
)
1036 spin_lock_irqsave(&iommu
->register_lock
, flag
);
1037 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1038 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1040 /* Make sure hardware complete it */
1041 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1042 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1044 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1049 static int iommu_init_domains(struct intel_iommu
*iommu
)
1051 unsigned long ndomains
;
1052 unsigned long nlongs
;
1054 ndomains
= cap_ndoms(iommu
->cap
);
1055 pr_debug("Number of Domains supportd <%ld>\n", ndomains
);
1056 nlongs
= BITS_TO_LONGS(ndomains
);
1058 /* TBD: there might be 64K domains,
1059 * consider other allocation for future chip
1061 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1062 if (!iommu
->domain_ids
) {
1063 printk(KERN_ERR
"Allocating domain id array failed\n");
1066 iommu
->domains
= kcalloc(ndomains
, sizeof(struct dmar_domain
*),
1068 if (!iommu
->domains
) {
1069 printk(KERN_ERR
"Allocating domain array failed\n");
1070 kfree(iommu
->domain_ids
);
1074 spin_lock_init(&iommu
->lock
);
1077 * if Caching mode is set, then invalid translations are tagged
1078 * with domainid 0. Hence we need to pre-allocate it.
1080 if (cap_caching_mode(iommu
->cap
))
1081 set_bit(0, iommu
->domain_ids
);
1086 static void domain_exit(struct dmar_domain
*domain
);
1087 static void vm_domain_exit(struct dmar_domain
*domain
);
1089 void free_dmar_iommu(struct intel_iommu
*iommu
)
1091 struct dmar_domain
*domain
;
1093 unsigned long flags
;
1095 i
= find_first_bit(iommu
->domain_ids
, cap_ndoms(iommu
->cap
));
1096 for (; i
< cap_ndoms(iommu
->cap
); ) {
1097 domain
= iommu
->domains
[i
];
1098 clear_bit(i
, iommu
->domain_ids
);
1100 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1101 if (--domain
->iommu_count
== 0) {
1102 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
1103 vm_domain_exit(domain
);
1105 domain_exit(domain
);
1107 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1109 i
= find_next_bit(iommu
->domain_ids
,
1110 cap_ndoms(iommu
->cap
), i
+1);
1113 if (iommu
->gcmd
& DMA_GCMD_TE
)
1114 iommu_disable_translation(iommu
);
1117 set_irq_data(iommu
->irq
, NULL
);
1118 /* This will mask the irq */
1119 free_irq(iommu
->irq
, iommu
);
1120 destroy_irq(iommu
->irq
);
1123 kfree(iommu
->domains
);
1124 kfree(iommu
->domain_ids
);
1126 g_iommus
[iommu
->seq_id
] = NULL
;
1128 /* if all iommus are freed, free g_iommus */
1129 for (i
= 0; i
< g_num_of_iommus
; i
++) {
1134 if (i
== g_num_of_iommus
)
1137 /* free context mapping */
1138 free_context_table(iommu
);
1141 static struct dmar_domain
* iommu_alloc_domain(struct intel_iommu
*iommu
)
1144 unsigned long ndomains
;
1145 struct dmar_domain
*domain
;
1146 unsigned long flags
;
1148 domain
= alloc_domain_mem();
1152 ndomains
= cap_ndoms(iommu
->cap
);
1154 spin_lock_irqsave(&iommu
->lock
, flags
);
1155 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1156 if (num
>= ndomains
) {
1157 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1158 free_domain_mem(domain
);
1159 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1163 set_bit(num
, iommu
->domain_ids
);
1165 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
1166 set_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
1168 iommu
->domains
[num
] = domain
;
1169 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1174 static void iommu_free_domain(struct dmar_domain
*domain
)
1176 unsigned long flags
;
1177 struct intel_iommu
*iommu
;
1179 iommu
= domain_get_iommu(domain
);
1181 spin_lock_irqsave(&iommu
->lock
, flags
);
1182 clear_bit(domain
->id
, iommu
->domain_ids
);
1183 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1186 static struct iova_domain reserved_iova_list
;
1187 static struct lock_class_key reserved_alloc_key
;
1188 static struct lock_class_key reserved_rbtree_key
;
1190 static void dmar_init_reserved_ranges(void)
1192 struct pci_dev
*pdev
= NULL
;
1197 init_iova_domain(&reserved_iova_list
, DMA_32BIT_PFN
);
1199 lockdep_set_class(&reserved_iova_list
.iova_alloc_lock
,
1200 &reserved_alloc_key
);
1201 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1202 &reserved_rbtree_key
);
1204 /* IOAPIC ranges shouldn't be accessed by DMA */
1205 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1206 IOVA_PFN(IOAPIC_RANGE_END
));
1208 printk(KERN_ERR
"Reserve IOAPIC range failed\n");
1210 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1211 for_each_pci_dev(pdev
) {
1214 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1215 r
= &pdev
->resource
[i
];
1216 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1220 size
= r
->end
- addr
;
1221 size
= PAGE_ALIGN(size
);
1222 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(addr
),
1223 IOVA_PFN(size
+ addr
) - 1);
1225 printk(KERN_ERR
"Reserve iova failed\n");
1231 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1233 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1236 static inline int guestwidth_to_adjustwidth(int gaw
)
1239 int r
= (gaw
- 12) % 9;
1250 static int domain_init(struct dmar_domain
*domain
, int guest_width
)
1252 struct intel_iommu
*iommu
;
1253 int adjust_width
, agaw
;
1254 unsigned long sagaw
;
1256 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
1257 spin_lock_init(&domain
->mapping_lock
);
1258 spin_lock_init(&domain
->iommu_lock
);
1260 domain_reserve_special_ranges(domain
);
1262 /* calculate AGAW */
1263 iommu
= domain_get_iommu(domain
);
1264 if (guest_width
> cap_mgaw(iommu
->cap
))
1265 guest_width
= cap_mgaw(iommu
->cap
);
1266 domain
->gaw
= guest_width
;
1267 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1268 agaw
= width_to_agaw(adjust_width
);
1269 sagaw
= cap_sagaw(iommu
->cap
);
1270 if (!test_bit(agaw
, &sagaw
)) {
1271 /* hardware doesn't support it, choose a bigger one */
1272 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw
);
1273 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1277 domain
->agaw
= agaw
;
1278 INIT_LIST_HEAD(&domain
->devices
);
1280 if (ecap_coherent(iommu
->ecap
))
1281 domain
->iommu_coherency
= 1;
1283 domain
->iommu_coherency
= 0;
1285 if (ecap_sc_support(iommu
->ecap
))
1286 domain
->iommu_snooping
= 1;
1288 domain
->iommu_snooping
= 0;
1290 domain
->iommu_count
= 1;
1292 /* always allocate the top pgd */
1293 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
1296 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1300 static void domain_exit(struct dmar_domain
*domain
)
1304 /* Domain 0 is reserved, so dont process it */
1308 domain_remove_dev_info(domain
);
1310 put_iova_domain(&domain
->iovad
);
1311 end
= DOMAIN_MAX_ADDR(domain
->gaw
);
1312 end
= end
& (~PAGE_MASK
);
1315 dma_pte_clear_range(domain
, 0, end
);
1317 /* free page tables */
1318 dma_pte_free_pagetable(domain
, 0, end
);
1320 iommu_free_domain(domain
);
1321 free_domain_mem(domain
);
1324 static int domain_context_mapping_one(struct dmar_domain
*domain
,
1325 int segment
, u8 bus
, u8 devfn
)
1327 struct context_entry
*context
;
1328 unsigned long flags
;
1329 struct intel_iommu
*iommu
;
1330 struct dma_pte
*pgd
;
1332 unsigned long ndomains
;
1336 pr_debug("Set context mapping for %02x:%02x.%d\n",
1337 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1338 BUG_ON(!domain
->pgd
);
1340 iommu
= device_to_iommu(segment
, bus
, devfn
);
1344 context
= device_to_context_entry(iommu
, bus
, devfn
);
1347 spin_lock_irqsave(&iommu
->lock
, flags
);
1348 if (context_present(context
)) {
1349 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1356 if (domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
) {
1359 /* find an available domain id for this device in iommu */
1360 ndomains
= cap_ndoms(iommu
->cap
);
1361 num
= find_first_bit(iommu
->domain_ids
, ndomains
);
1362 for (; num
< ndomains
; ) {
1363 if (iommu
->domains
[num
] == domain
) {
1368 num
= find_next_bit(iommu
->domain_ids
,
1369 cap_ndoms(iommu
->cap
), num
+1);
1373 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1374 if (num
>= ndomains
) {
1375 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1376 printk(KERN_ERR
"IOMMU: no free domain ids\n");
1380 set_bit(num
, iommu
->domain_ids
);
1381 iommu
->domains
[num
] = domain
;
1385 /* Skip top levels of page tables for
1386 * iommu which has less agaw than default.
1388 for (agaw
= domain
->agaw
; agaw
!= iommu
->agaw
; agaw
--) {
1389 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1390 if (!dma_pte_present(pgd
)) {
1391 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1397 context_set_domain_id(context
, id
);
1398 context_set_address_width(context
, iommu
->agaw
);
1399 context_set_address_root(context
, virt_to_phys(pgd
));
1400 context_set_translation_type(context
, CONTEXT_TT_MULTI_LEVEL
);
1401 context_set_fault_enable(context
);
1402 context_set_present(context
);
1403 domain_flush_cache(domain
, context
, sizeof(*context
));
1405 /* it's a non-present to present mapping */
1406 if (iommu
->flush
.flush_context(iommu
, domain
->id
,
1407 (((u16
)bus
) << 8) | devfn
, DMA_CCMD_MASK_NOBIT
,
1408 DMA_CCMD_DEVICE_INVL
, 1))
1409 iommu_flush_write_buffer(iommu
);
1411 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_DSI_FLUSH
, 0);
1413 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1415 spin_lock_irqsave(&domain
->iommu_lock
, flags
);
1416 if (!test_and_set_bit(iommu
->seq_id
, &domain
->iommu_bmp
)) {
1417 domain
->iommu_count
++;
1418 domain_update_iommu_cap(domain
);
1420 spin_unlock_irqrestore(&domain
->iommu_lock
, flags
);
1425 domain_context_mapping(struct dmar_domain
*domain
, struct pci_dev
*pdev
)
1428 struct pci_dev
*tmp
, *parent
;
1430 ret
= domain_context_mapping_one(domain
, pci_domain_nr(pdev
->bus
),
1431 pdev
->bus
->number
, pdev
->devfn
);
1435 /* dependent device mapping */
1436 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1439 /* Secondary interface's bus number and devfn 0 */
1440 parent
= pdev
->bus
->self
;
1441 while (parent
!= tmp
) {
1442 ret
= domain_context_mapping_one(domain
,
1443 pci_domain_nr(parent
->bus
),
1444 parent
->bus
->number
,
1448 parent
= parent
->bus
->self
;
1450 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
1451 return domain_context_mapping_one(domain
,
1452 pci_domain_nr(tmp
->subordinate
),
1453 tmp
->subordinate
->number
, 0);
1454 else /* this is a legacy PCI bridge */
1455 return domain_context_mapping_one(domain
,
1456 pci_domain_nr(tmp
->bus
),
1461 static int domain_context_mapped(struct pci_dev
*pdev
)
1464 struct pci_dev
*tmp
, *parent
;
1465 struct intel_iommu
*iommu
;
1467 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
1472 ret
= device_context_mapped(iommu
, pdev
->bus
->number
, pdev
->devfn
);
1475 /* dependent device mapping */
1476 tmp
= pci_find_upstream_pcie_bridge(pdev
);
1479 /* Secondary interface's bus number and devfn 0 */
1480 parent
= pdev
->bus
->self
;
1481 while (parent
!= tmp
) {
1482 ret
= device_context_mapped(iommu
, parent
->bus
->number
,
1486 parent
= parent
->bus
->self
;
1489 return device_context_mapped(iommu
, tmp
->subordinate
->number
,
1492 return device_context_mapped(iommu
, tmp
->bus
->number
,
1497 domain_page_mapping(struct dmar_domain
*domain
, dma_addr_t iova
,
1498 u64 hpa
, size_t size
, int prot
)
1500 u64 start_pfn
, end_pfn
;
1501 struct dma_pte
*pte
;
1503 int addr_width
= agaw_to_width(domain
->agaw
);
1505 hpa
&= (((u64
)1) << addr_width
) - 1;
1507 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
1510 start_pfn
= ((u64
)hpa
) >> VTD_PAGE_SHIFT
;
1511 end_pfn
= (VTD_PAGE_ALIGN(((u64
)hpa
) + size
)) >> VTD_PAGE_SHIFT
;
1513 while (start_pfn
< end_pfn
) {
1514 pte
= addr_to_dma_pte(domain
, iova
+ VTD_PAGE_SIZE
* index
);
1517 /* We don't need lock here, nobody else
1518 * touches the iova range
1520 BUG_ON(dma_pte_addr(pte
));
1521 dma_set_pte_addr(pte
, start_pfn
<< VTD_PAGE_SHIFT
);
1522 dma_set_pte_prot(pte
, prot
);
1523 if (prot
& DMA_PTE_SNP
)
1524 dma_set_pte_snp(pte
);
1525 domain_flush_cache(domain
, pte
, sizeof(*pte
));
1532 static void iommu_detach_dev(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
1537 clear_context_table(iommu
, bus
, devfn
);
1538 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
1539 DMA_CCMD_GLOBAL_INVL
, 0);
1540 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
1541 DMA_TLB_GLOBAL_FLUSH
, 0);
1544 static void domain_remove_dev_info(struct dmar_domain
*domain
)
1546 struct device_domain_info
*info
;
1547 unsigned long flags
;
1548 struct intel_iommu
*iommu
;
1550 spin_lock_irqsave(&device_domain_lock
, flags
);
1551 while (!list_empty(&domain
->devices
)) {
1552 info
= list_entry(domain
->devices
.next
,
1553 struct device_domain_info
, link
);
1554 list_del(&info
->link
);
1555 list_del(&info
->global
);
1557 info
->dev
->dev
.archdata
.iommu
= NULL
;
1558 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1560 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
1561 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
1562 free_devinfo_mem(info
);
1564 spin_lock_irqsave(&device_domain_lock
, flags
);
1566 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1571 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1573 static struct dmar_domain
*
1574 find_domain(struct pci_dev
*pdev
)
1576 struct device_domain_info
*info
;
1578 /* No lock here, assumes no domain exit in normal case */
1579 info
= pdev
->dev
.archdata
.iommu
;
1581 return info
->domain
;
1585 /* domain is initialized */
1586 static struct dmar_domain
*get_domain_for_dev(struct pci_dev
*pdev
, int gaw
)
1588 struct dmar_domain
*domain
, *found
= NULL
;
1589 struct intel_iommu
*iommu
;
1590 struct dmar_drhd_unit
*drhd
;
1591 struct device_domain_info
*info
, *tmp
;
1592 struct pci_dev
*dev_tmp
;
1593 unsigned long flags
;
1594 int bus
= 0, devfn
= 0;
1597 domain
= find_domain(pdev
);
1601 segment
= pci_domain_nr(pdev
->bus
);
1603 dev_tmp
= pci_find_upstream_pcie_bridge(pdev
);
1605 if (dev_tmp
->is_pcie
) {
1606 bus
= dev_tmp
->subordinate
->number
;
1609 bus
= dev_tmp
->bus
->number
;
1610 devfn
= dev_tmp
->devfn
;
1612 spin_lock_irqsave(&device_domain_lock
, flags
);
1613 list_for_each_entry(info
, &device_domain_list
, global
) {
1614 if (info
->segment
== segment
&&
1615 info
->bus
== bus
&& info
->devfn
== devfn
) {
1616 found
= info
->domain
;
1620 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1621 /* pcie-pci bridge already has a domain, uses it */
1628 /* Allocate new domain for the device */
1629 drhd
= dmar_find_matched_drhd_unit(pdev
);
1631 printk(KERN_ERR
"IOMMU: can't find DMAR for device %s\n",
1635 iommu
= drhd
->iommu
;
1637 domain
= iommu_alloc_domain(iommu
);
1641 if (domain_init(domain
, gaw
)) {
1642 domain_exit(domain
);
1646 /* register pcie-to-pci device */
1648 info
= alloc_devinfo_mem();
1650 domain_exit(domain
);
1653 info
->segment
= segment
;
1655 info
->devfn
= devfn
;
1657 info
->domain
= domain
;
1658 /* This domain is shared by devices under p2p bridge */
1659 domain
->flags
|= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES
;
1661 /* pcie-to-pci bridge already has a domain, uses it */
1663 spin_lock_irqsave(&device_domain_lock
, flags
);
1664 list_for_each_entry(tmp
, &device_domain_list
, global
) {
1665 if (tmp
->segment
== segment
&&
1666 tmp
->bus
== bus
&& tmp
->devfn
== devfn
) {
1667 found
= tmp
->domain
;
1672 free_devinfo_mem(info
);
1673 domain_exit(domain
);
1676 list_add(&info
->link
, &domain
->devices
);
1677 list_add(&info
->global
, &device_domain_list
);
1679 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1683 info
= alloc_devinfo_mem();
1686 info
->segment
= segment
;
1687 info
->bus
= pdev
->bus
->number
;
1688 info
->devfn
= pdev
->devfn
;
1690 info
->domain
= domain
;
1691 spin_lock_irqsave(&device_domain_lock
, flags
);
1692 /* somebody is fast */
1693 found
= find_domain(pdev
);
1694 if (found
!= NULL
) {
1695 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1696 if (found
!= domain
) {
1697 domain_exit(domain
);
1700 free_devinfo_mem(info
);
1703 list_add(&info
->link
, &domain
->devices
);
1704 list_add(&info
->global
, &device_domain_list
);
1705 pdev
->dev
.archdata
.iommu
= info
;
1706 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1709 /* recheck it here, maybe others set it */
1710 return find_domain(pdev
);
1713 static int iommu_prepare_identity_map(struct pci_dev
*pdev
,
1714 unsigned long long start
,
1715 unsigned long long end
)
1717 struct dmar_domain
*domain
;
1719 unsigned long long base
;
1723 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1724 pci_name(pdev
), start
, end
);
1725 /* page table init */
1726 domain
= get_domain_for_dev(pdev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
1730 /* The address might not be aligned */
1731 base
= start
& PAGE_MASK
;
1733 size
= PAGE_ALIGN(size
);
1734 if (!reserve_iova(&domain
->iovad
, IOVA_PFN(base
),
1735 IOVA_PFN(base
+ size
) - 1)) {
1736 printk(KERN_ERR
"IOMMU: reserve iova failed\n");
1741 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1742 size
, base
, pci_name(pdev
));
1744 * RMRR range might have overlap with physical memory range,
1747 dma_pte_clear_range(domain
, base
, base
+ size
);
1749 ret
= domain_page_mapping(domain
, base
, base
, size
,
1750 DMA_PTE_READ
|DMA_PTE_WRITE
);
1754 /* context entry init */
1755 ret
= domain_context_mapping(domain
, pdev
);
1759 domain_exit(domain
);
1764 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
1765 struct pci_dev
*pdev
)
1767 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
1769 return iommu_prepare_identity_map(pdev
, rmrr
->base_address
,
1770 rmrr
->end_address
+ 1);
1773 #ifdef CONFIG_DMAR_GFX_WA
1774 struct iommu_prepare_data
{
1775 struct pci_dev
*pdev
;
1779 static int __init
iommu_prepare_work_fn(unsigned long start_pfn
,
1780 unsigned long end_pfn
, void *datax
)
1782 struct iommu_prepare_data
*data
;
1784 data
= (struct iommu_prepare_data
*)datax
;
1786 data
->ret
= iommu_prepare_identity_map(data
->pdev
,
1787 start_pfn
<<PAGE_SHIFT
, end_pfn
<<PAGE_SHIFT
);
1792 static int __init
iommu_prepare_with_active_regions(struct pci_dev
*pdev
)
1795 struct iommu_prepare_data data
;
1800 for_each_online_node(nid
) {
1801 work_with_active_regions(nid
, iommu_prepare_work_fn
, &data
);
1808 static void __init
iommu_prepare_gfx_mapping(void)
1810 struct pci_dev
*pdev
= NULL
;
1813 for_each_pci_dev(pdev
) {
1814 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
||
1815 !IS_GFX_DEVICE(pdev
))
1817 printk(KERN_INFO
"IOMMU: gfx device %s 1-1 mapping\n",
1819 ret
= iommu_prepare_with_active_regions(pdev
);
1821 printk(KERN_ERR
"IOMMU: mapping reserved region failed\n");
1824 #else /* !CONFIG_DMAR_GFX_WA */
1825 static inline void iommu_prepare_gfx_mapping(void)
1831 #ifdef CONFIG_DMAR_FLOPPY_WA
1832 static inline void iommu_prepare_isa(void)
1834 struct pci_dev
*pdev
;
1837 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
1841 printk(KERN_INFO
"IOMMU: Prepare 0-16M unity mapping for LPC\n");
1842 ret
= iommu_prepare_identity_map(pdev
, 0, 16*1024*1024);
1845 printk(KERN_ERR
"IOMMU: Failed to create 0-64M identity map, "
1846 "floppy might not work\n");
1850 static inline void iommu_prepare_isa(void)
1854 #endif /* !CONFIG_DMAR_FLPY_WA */
1856 static int __init
init_dmars(void)
1858 struct dmar_drhd_unit
*drhd
;
1859 struct dmar_rmrr_unit
*rmrr
;
1860 struct pci_dev
*pdev
;
1861 struct intel_iommu
*iommu
;
1867 * initialize and program root entry to not present
1870 for_each_drhd_unit(drhd
) {
1873 * lock not needed as this is only incremented in the single
1874 * threaded kernel __init code path all other access are read
1879 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
1882 printk(KERN_ERR
"Allocating global iommu array failed\n");
1887 deferred_flush
= kzalloc(g_num_of_iommus
*
1888 sizeof(struct deferred_flush_tables
), GFP_KERNEL
);
1889 if (!deferred_flush
) {
1895 for_each_drhd_unit(drhd
) {
1899 iommu
= drhd
->iommu
;
1900 g_iommus
[iommu
->seq_id
] = iommu
;
1902 ret
= iommu_init_domains(iommu
);
1908 * we could share the same root & context tables
1909 * amoung all IOMMU's. Need to Split it later.
1911 ret
= iommu_alloc_root_entry(iommu
);
1913 printk(KERN_ERR
"IOMMU: allocate root entry failed\n");
1919 * Start from the sane iommu hardware state.
1921 for_each_drhd_unit(drhd
) {
1925 iommu
= drhd
->iommu
;
1928 * If the queued invalidation is already initialized by us
1929 * (for example, while enabling interrupt-remapping) then
1930 * we got the things already rolling from a sane state.
1936 * Clear any previous faults.
1938 dmar_fault(-1, iommu
);
1940 * Disable queued invalidation if supported and already enabled
1941 * before OS handover.
1943 dmar_disable_qi(iommu
);
1946 for_each_drhd_unit(drhd
) {
1950 iommu
= drhd
->iommu
;
1952 if (dmar_enable_qi(iommu
)) {
1954 * Queued Invalidate not enabled, use Register Based
1957 iommu
->flush
.flush_context
= __iommu_flush_context
;
1958 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
1959 printk(KERN_INFO
"IOMMU 0x%Lx: using Register based "
1961 (unsigned long long)drhd
->reg_base_addr
);
1963 iommu
->flush
.flush_context
= qi_flush_context
;
1964 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
1965 printk(KERN_INFO
"IOMMU 0x%Lx: using Queued "
1967 (unsigned long long)drhd
->reg_base_addr
);
1971 #ifdef CONFIG_INTR_REMAP
1972 if (!intr_remapping_enabled
) {
1973 ret
= enable_intr_remapping(0);
1976 "IOMMU: enable interrupt remapping failed\n");
1982 * for each dev attached to rmrr
1984 * locate drhd for dev, alloc domain for dev
1985 * allocate free domain
1986 * allocate page table entries for rmrr
1987 * if context not allocated for bus
1988 * allocate and init context
1989 * set present in root table for this bus
1990 * init context with domain, translation etc
1994 for_each_rmrr_units(rmrr
) {
1995 for (i
= 0; i
< rmrr
->devices_cnt
; i
++) {
1996 pdev
= rmrr
->devices
[i
];
1997 /* some BIOS lists non-exist devices in DMAR table */
2000 ret
= iommu_prepare_rmrr_dev(rmrr
, pdev
);
2003 "IOMMU: mapping reserved region failed\n");
2007 iommu_prepare_gfx_mapping();
2009 iommu_prepare_isa();
2014 * global invalidate context cache
2015 * global invalidate iotlb
2016 * enable translation
2018 for_each_drhd_unit(drhd
) {
2021 iommu
= drhd
->iommu
;
2023 iommu_flush_write_buffer(iommu
);
2025 ret
= dmar_set_interrupt(iommu
);
2029 iommu_set_root_entry(iommu
);
2031 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
,
2033 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
,
2035 iommu_disable_protect_mem_regions(iommu
);
2037 ret
= iommu_enable_translation(iommu
);
2044 for_each_drhd_unit(drhd
) {
2047 iommu
= drhd
->iommu
;
2054 static inline u64
aligned_size(u64 host_addr
, size_t size
)
2057 addr
= (host_addr
& (~PAGE_MASK
)) + size
;
2058 return PAGE_ALIGN(addr
);
2062 iommu_alloc_iova(struct dmar_domain
*domain
, size_t size
, u64 end
)
2066 /* Make sure it's in range */
2067 end
= min_t(u64
, DOMAIN_MAX_ADDR(domain
->gaw
), end
);
2068 if (!size
|| (IOVA_START_ADDR
+ size
> end
))
2071 piova
= alloc_iova(&domain
->iovad
,
2072 size
>> PAGE_SHIFT
, IOVA_PFN(end
), 1);
2076 static struct iova
*
2077 __intel_alloc_iova(struct device
*dev
, struct dmar_domain
*domain
,
2078 size_t size
, u64 dma_mask
)
2080 struct pci_dev
*pdev
= to_pci_dev(dev
);
2081 struct iova
*iova
= NULL
;
2083 if (dma_mask
<= DMA_BIT_MASK(32) || dmar_forcedac
)
2084 iova
= iommu_alloc_iova(domain
, size
, dma_mask
);
2087 * First try to allocate an io virtual address in
2088 * DMA_BIT_MASK(32) and if that fails then try allocating
2091 iova
= iommu_alloc_iova(domain
, size
, DMA_BIT_MASK(32));
2093 iova
= iommu_alloc_iova(domain
, size
, dma_mask
);
2097 printk(KERN_ERR
"Allocating iova for %s failed", pci_name(pdev
));
2104 static struct dmar_domain
*
2105 get_valid_domain_for_dev(struct pci_dev
*pdev
)
2107 struct dmar_domain
*domain
;
2110 domain
= get_domain_for_dev(pdev
,
2111 DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2114 "Allocating domain for %s failed", pci_name(pdev
));
2118 /* make sure context mapping is ok */
2119 if (unlikely(!domain_context_mapped(pdev
))) {
2120 ret
= domain_context_mapping(domain
, pdev
);
2123 "Domain context map for %s failed",
2132 static dma_addr_t
__intel_map_single(struct device
*hwdev
, phys_addr_t paddr
,
2133 size_t size
, int dir
, u64 dma_mask
)
2135 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2136 struct dmar_domain
*domain
;
2137 phys_addr_t start_paddr
;
2141 struct intel_iommu
*iommu
;
2143 BUG_ON(dir
== DMA_NONE
);
2144 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2147 domain
= get_valid_domain_for_dev(pdev
);
2151 iommu
= domain_get_iommu(domain
);
2152 size
= aligned_size((u64
)paddr
, size
);
2154 iova
= __intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2158 start_paddr
= (phys_addr_t
)iova
->pfn_lo
<< PAGE_SHIFT
;
2161 * Check if DMAR supports zero-length reads on write only
2164 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2165 !cap_zlr(iommu
->cap
))
2166 prot
|= DMA_PTE_READ
;
2167 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2168 prot
|= DMA_PTE_WRITE
;
2170 * paddr - (paddr + size) might be partial page, we should map the whole
2171 * page. Note: if two part of one page are separately mapped, we
2172 * might have two guest_addr mapping to the same host paddr, but this
2173 * is not a big problem
2175 ret
= domain_page_mapping(domain
, start_paddr
,
2176 ((u64
)paddr
) & PAGE_MASK
, size
, prot
);
2180 /* it's a non-present to present mapping */
2181 ret
= iommu_flush_iotlb_psi(iommu
, domain
->id
,
2182 start_paddr
, size
>> VTD_PAGE_SHIFT
, 1);
2184 iommu_flush_write_buffer(iommu
);
2186 return start_paddr
+ ((u64
)paddr
& (~PAGE_MASK
));
2190 __free_iova(&domain
->iovad
, iova
);
2191 printk(KERN_ERR
"Device %s request: %zx@%llx dir %d --- failed\n",
2192 pci_name(pdev
), size
, (unsigned long long)paddr
, dir
);
2196 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
2197 unsigned long offset
, size_t size
,
2198 enum dma_data_direction dir
,
2199 struct dma_attrs
*attrs
)
2201 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
2202 dir
, to_pci_dev(dev
)->dma_mask
);
2205 static void flush_unmaps(void)
2211 /* just flush them all */
2212 for (i
= 0; i
< g_num_of_iommus
; i
++) {
2213 struct intel_iommu
*iommu
= g_iommus
[i
];
2217 if (deferred_flush
[i
].next
) {
2218 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2219 DMA_TLB_GLOBAL_FLUSH
, 0);
2220 for (j
= 0; j
< deferred_flush
[i
].next
; j
++) {
2221 __free_iova(&deferred_flush
[i
].domain
[j
]->iovad
,
2222 deferred_flush
[i
].iova
[j
]);
2224 deferred_flush
[i
].next
= 0;
2231 static void flush_unmaps_timeout(unsigned long data
)
2233 unsigned long flags
;
2235 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2237 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2240 static void add_unmap(struct dmar_domain
*dom
, struct iova
*iova
)
2242 unsigned long flags
;
2244 struct intel_iommu
*iommu
;
2246 spin_lock_irqsave(&async_umap_flush_lock
, flags
);
2247 if (list_size
== HIGH_WATER_MARK
)
2250 iommu
= domain_get_iommu(dom
);
2251 iommu_id
= iommu
->seq_id
;
2253 next
= deferred_flush
[iommu_id
].next
;
2254 deferred_flush
[iommu_id
].domain
[next
] = dom
;
2255 deferred_flush
[iommu_id
].iova
[next
] = iova
;
2256 deferred_flush
[iommu_id
].next
++;
2259 mod_timer(&unmap_timer
, jiffies
+ msecs_to_jiffies(10));
2263 spin_unlock_irqrestore(&async_umap_flush_lock
, flags
);
2266 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
2267 size_t size
, enum dma_data_direction dir
,
2268 struct dma_attrs
*attrs
)
2270 struct pci_dev
*pdev
= to_pci_dev(dev
);
2271 struct dmar_domain
*domain
;
2272 unsigned long start_addr
;
2274 struct intel_iommu
*iommu
;
2276 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2278 domain
= find_domain(pdev
);
2281 iommu
= domain_get_iommu(domain
);
2283 iova
= find_iova(&domain
->iovad
, IOVA_PFN(dev_addr
));
2287 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2288 size
= aligned_size((u64
)dev_addr
, size
);
2290 pr_debug("Device %s unmapping: %zx@%llx\n",
2291 pci_name(pdev
), size
, (unsigned long long)start_addr
);
2293 /* clear the whole page */
2294 dma_pte_clear_range(domain
, start_addr
, start_addr
+ size
);
2295 /* free page tables */
2296 dma_pte_free_pagetable(domain
, start_addr
, start_addr
+ size
);
2297 if (intel_iommu_strict
) {
2298 if (iommu_flush_iotlb_psi(iommu
,
2299 domain
->id
, start_addr
, size
>> VTD_PAGE_SHIFT
, 0))
2300 iommu_flush_write_buffer(iommu
);
2302 __free_iova(&domain
->iovad
, iova
);
2304 add_unmap(domain
, iova
);
2306 * queue up the release of the unmap to save the 1/6th of the
2307 * cpu used up by the iotlb flush operation...
2312 static void intel_unmap_single(struct device
*dev
, dma_addr_t dev_addr
, size_t size
,
2315 intel_unmap_page(dev
, dev_addr
, size
, dir
, NULL
);
2318 static void *intel_alloc_coherent(struct device
*hwdev
, size_t size
,
2319 dma_addr_t
*dma_handle
, gfp_t flags
)
2324 size
= PAGE_ALIGN(size
);
2325 order
= get_order(size
);
2326 flags
&= ~(GFP_DMA
| GFP_DMA32
);
2328 vaddr
= (void *)__get_free_pages(flags
, order
);
2331 memset(vaddr
, 0, size
);
2333 *dma_handle
= __intel_map_single(hwdev
, virt_to_bus(vaddr
), size
,
2335 hwdev
->coherent_dma_mask
);
2338 free_pages((unsigned long)vaddr
, order
);
2342 static void intel_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
2343 dma_addr_t dma_handle
)
2347 size
= PAGE_ALIGN(size
);
2348 order
= get_order(size
);
2350 intel_unmap_single(hwdev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
2351 free_pages((unsigned long)vaddr
, order
);
2354 static void intel_unmap_sg(struct device
*hwdev
, struct scatterlist
*sglist
,
2355 int nelems
, enum dma_data_direction dir
,
2356 struct dma_attrs
*attrs
)
2359 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2360 struct dmar_domain
*domain
;
2361 unsigned long start_addr
;
2365 struct scatterlist
*sg
;
2366 struct intel_iommu
*iommu
;
2368 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2371 domain
= find_domain(pdev
);
2374 iommu
= domain_get_iommu(domain
);
2376 iova
= find_iova(&domain
->iovad
, IOVA_PFN(sglist
[0].dma_address
));
2379 for_each_sg(sglist
, sg
, nelems
, i
) {
2380 addr
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2381 size
+= aligned_size((u64
)addr
, sg
->length
);
2384 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2386 /* clear the whole page */
2387 dma_pte_clear_range(domain
, start_addr
, start_addr
+ size
);
2388 /* free page tables */
2389 dma_pte_free_pagetable(domain
, start_addr
, start_addr
+ size
);
2391 if (iommu_flush_iotlb_psi(iommu
, domain
->id
, start_addr
,
2392 size
>> VTD_PAGE_SHIFT
, 0))
2393 iommu_flush_write_buffer(iommu
);
2396 __free_iova(&domain
->iovad
, iova
);
2399 static int intel_nontranslate_map_sg(struct device
*hddev
,
2400 struct scatterlist
*sglist
, int nelems
, int dir
)
2403 struct scatterlist
*sg
;
2405 for_each_sg(sglist
, sg
, nelems
, i
) {
2406 BUG_ON(!sg_page(sg
));
2407 sg
->dma_address
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2408 sg
->dma_length
= sg
->length
;
2413 static int intel_map_sg(struct device
*hwdev
, struct scatterlist
*sglist
, int nelems
,
2414 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
2418 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
2419 struct dmar_domain
*domain
;
2423 struct iova
*iova
= NULL
;
2425 struct scatterlist
*sg
;
2426 unsigned long start_addr
;
2427 struct intel_iommu
*iommu
;
2429 BUG_ON(dir
== DMA_NONE
);
2430 if (pdev
->dev
.archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2431 return intel_nontranslate_map_sg(hwdev
, sglist
, nelems
, dir
);
2433 domain
= get_valid_domain_for_dev(pdev
);
2437 iommu
= domain_get_iommu(domain
);
2439 for_each_sg(sglist
, sg
, nelems
, i
) {
2440 addr
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2441 size
+= aligned_size((u64
)addr
, sg
->length
);
2444 iova
= __intel_alloc_iova(hwdev
, domain
, size
, pdev
->dma_mask
);
2446 sglist
->dma_length
= 0;
2451 * Check if DMAR supports zero-length reads on write only
2454 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
2455 !cap_zlr(iommu
->cap
))
2456 prot
|= DMA_PTE_READ
;
2457 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
2458 prot
|= DMA_PTE_WRITE
;
2460 start_addr
= iova
->pfn_lo
<< PAGE_SHIFT
;
2462 for_each_sg(sglist
, sg
, nelems
, i
) {
2463 addr
= page_to_phys(sg_page(sg
)) + sg
->offset
;
2464 size
= aligned_size((u64
)addr
, sg
->length
);
2465 ret
= domain_page_mapping(domain
, start_addr
+ offset
,
2466 ((u64
)addr
) & PAGE_MASK
,
2469 /* clear the page */
2470 dma_pte_clear_range(domain
, start_addr
,
2471 start_addr
+ offset
);
2472 /* free page tables */
2473 dma_pte_free_pagetable(domain
, start_addr
,
2474 start_addr
+ offset
);
2476 __free_iova(&domain
->iovad
, iova
);
2479 sg
->dma_address
= start_addr
+ offset
+
2480 ((u64
)addr
& (~PAGE_MASK
));
2481 sg
->dma_length
= sg
->length
;
2485 /* it's a non-present to present mapping */
2486 if (iommu_flush_iotlb_psi(iommu
, domain
->id
,
2487 start_addr
, offset
>> VTD_PAGE_SHIFT
, 1))
2488 iommu_flush_write_buffer(iommu
);
2492 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
2497 struct dma_map_ops intel_dma_ops
= {
2498 .alloc_coherent
= intel_alloc_coherent
,
2499 .free_coherent
= intel_free_coherent
,
2500 .map_sg
= intel_map_sg
,
2501 .unmap_sg
= intel_unmap_sg
,
2502 .map_page
= intel_map_page
,
2503 .unmap_page
= intel_unmap_page
,
2504 .mapping_error
= intel_mapping_error
,
2507 static inline int iommu_domain_cache_init(void)
2511 iommu_domain_cache
= kmem_cache_create("iommu_domain",
2512 sizeof(struct dmar_domain
),
2517 if (!iommu_domain_cache
) {
2518 printk(KERN_ERR
"Couldn't create iommu_domain cache\n");
2525 static inline int iommu_devinfo_cache_init(void)
2529 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
2530 sizeof(struct device_domain_info
),
2534 if (!iommu_devinfo_cache
) {
2535 printk(KERN_ERR
"Couldn't create devinfo cache\n");
2542 static inline int iommu_iova_cache_init(void)
2546 iommu_iova_cache
= kmem_cache_create("iommu_iova",
2547 sizeof(struct iova
),
2551 if (!iommu_iova_cache
) {
2552 printk(KERN_ERR
"Couldn't create iova cache\n");
2559 static int __init
iommu_init_mempool(void)
2562 ret
= iommu_iova_cache_init();
2566 ret
= iommu_domain_cache_init();
2570 ret
= iommu_devinfo_cache_init();
2574 kmem_cache_destroy(iommu_domain_cache
);
2576 kmem_cache_destroy(iommu_iova_cache
);
2581 static void __init
iommu_exit_mempool(void)
2583 kmem_cache_destroy(iommu_devinfo_cache
);
2584 kmem_cache_destroy(iommu_domain_cache
);
2585 kmem_cache_destroy(iommu_iova_cache
);
2589 static void __init
init_no_remapping_devices(void)
2591 struct dmar_drhd_unit
*drhd
;
2593 for_each_drhd_unit(drhd
) {
2594 if (!drhd
->include_all
) {
2596 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2597 if (drhd
->devices
[i
] != NULL
)
2599 /* ignore DMAR unit if no pci devices exist */
2600 if (i
== drhd
->devices_cnt
)
2608 for_each_drhd_unit(drhd
) {
2610 if (drhd
->ignored
|| drhd
->include_all
)
2613 for (i
= 0; i
< drhd
->devices_cnt
; i
++)
2614 if (drhd
->devices
[i
] &&
2615 !IS_GFX_DEVICE(drhd
->devices
[i
]))
2618 if (i
< drhd
->devices_cnt
)
2621 /* bypass IOMMU if it is just for gfx devices */
2623 for (i
= 0; i
< drhd
->devices_cnt
; i
++) {
2624 if (!drhd
->devices
[i
])
2626 drhd
->devices
[i
]->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
2631 #ifdef CONFIG_SUSPEND
2632 static int init_iommu_hw(void)
2634 struct dmar_drhd_unit
*drhd
;
2635 struct intel_iommu
*iommu
= NULL
;
2637 for_each_active_iommu(iommu
, drhd
)
2639 dmar_reenable_qi(iommu
);
2641 for_each_active_iommu(iommu
, drhd
) {
2642 iommu_flush_write_buffer(iommu
);
2644 iommu_set_root_entry(iommu
);
2646 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
2647 DMA_CCMD_GLOBAL_INVL
, 0);
2648 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2649 DMA_TLB_GLOBAL_FLUSH
, 0);
2650 iommu_disable_protect_mem_regions(iommu
);
2651 iommu_enable_translation(iommu
);
2657 static void iommu_flush_all(void)
2659 struct dmar_drhd_unit
*drhd
;
2660 struct intel_iommu
*iommu
;
2662 for_each_active_iommu(iommu
, drhd
) {
2663 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
2664 DMA_CCMD_GLOBAL_INVL
, 0);
2665 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2666 DMA_TLB_GLOBAL_FLUSH
, 0);
2670 static int iommu_suspend(struct sys_device
*dev
, pm_message_t state
)
2672 struct dmar_drhd_unit
*drhd
;
2673 struct intel_iommu
*iommu
= NULL
;
2676 for_each_active_iommu(iommu
, drhd
) {
2677 iommu
->iommu_state
= kzalloc(sizeof(u32
) * MAX_SR_DMAR_REGS
,
2679 if (!iommu
->iommu_state
)
2685 for_each_active_iommu(iommu
, drhd
) {
2686 iommu_disable_translation(iommu
);
2688 spin_lock_irqsave(&iommu
->register_lock
, flag
);
2690 iommu
->iommu_state
[SR_DMAR_FECTL_REG
] =
2691 readl(iommu
->reg
+ DMAR_FECTL_REG
);
2692 iommu
->iommu_state
[SR_DMAR_FEDATA_REG
] =
2693 readl(iommu
->reg
+ DMAR_FEDATA_REG
);
2694 iommu
->iommu_state
[SR_DMAR_FEADDR_REG
] =
2695 readl(iommu
->reg
+ DMAR_FEADDR_REG
);
2696 iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
] =
2697 readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
2699 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
2704 for_each_active_iommu(iommu
, drhd
)
2705 kfree(iommu
->iommu_state
);
2710 static int iommu_resume(struct sys_device
*dev
)
2712 struct dmar_drhd_unit
*drhd
;
2713 struct intel_iommu
*iommu
= NULL
;
2716 if (init_iommu_hw()) {
2717 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
2721 for_each_active_iommu(iommu
, drhd
) {
2723 spin_lock_irqsave(&iommu
->register_lock
, flag
);
2725 writel(iommu
->iommu_state
[SR_DMAR_FECTL_REG
],
2726 iommu
->reg
+ DMAR_FECTL_REG
);
2727 writel(iommu
->iommu_state
[SR_DMAR_FEDATA_REG
],
2728 iommu
->reg
+ DMAR_FEDATA_REG
);
2729 writel(iommu
->iommu_state
[SR_DMAR_FEADDR_REG
],
2730 iommu
->reg
+ DMAR_FEADDR_REG
);
2731 writel(iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
],
2732 iommu
->reg
+ DMAR_FEUADDR_REG
);
2734 spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
2737 for_each_active_iommu(iommu
, drhd
)
2738 kfree(iommu
->iommu_state
);
2743 static struct sysdev_class iommu_sysclass
= {
2745 .resume
= iommu_resume
,
2746 .suspend
= iommu_suspend
,
2749 static struct sys_device device_iommu
= {
2750 .cls
= &iommu_sysclass
,
2753 static int __init
init_iommu_sysfs(void)
2757 error
= sysdev_class_register(&iommu_sysclass
);
2761 error
= sysdev_register(&device_iommu
);
2763 sysdev_class_unregister(&iommu_sysclass
);
2769 static int __init
init_iommu_sysfs(void)
2773 #endif /* CONFIG_PM */
2775 int __init
intel_iommu_init(void)
2779 if (dmar_table_init())
2782 if (dmar_dev_scope_init())
2786 * Check the need for DMA-remapping initialization now.
2787 * Above initialization will also be used by Interrupt-remapping.
2789 if (no_iommu
|| swiotlb
|| dmar_disabled
)
2792 iommu_init_mempool();
2793 dmar_init_reserved_ranges();
2795 init_no_remapping_devices();
2799 printk(KERN_ERR
"IOMMU: dmar init failed\n");
2800 put_iova_domain(&reserved_iova_list
);
2801 iommu_exit_mempool();
2805 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
2807 init_timer(&unmap_timer
);
2809 dma_ops
= &intel_dma_ops
;
2812 register_iommu(&intel_iommu_ops
);
2817 static int vm_domain_add_dev_info(struct dmar_domain
*domain
,
2818 struct pci_dev
*pdev
)
2820 struct device_domain_info
*info
;
2821 unsigned long flags
;
2823 info
= alloc_devinfo_mem();
2827 info
->segment
= pci_domain_nr(pdev
->bus
);
2828 info
->bus
= pdev
->bus
->number
;
2829 info
->devfn
= pdev
->devfn
;
2831 info
->domain
= domain
;
2833 spin_lock_irqsave(&device_domain_lock
, flags
);
2834 list_add(&info
->link
, &domain
->devices
);
2835 list_add(&info
->global
, &device_domain_list
);
2836 pdev
->dev
.archdata
.iommu
= info
;
2837 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2842 static void iommu_detach_dependent_devices(struct intel_iommu
*iommu
,
2843 struct pci_dev
*pdev
)
2845 struct pci_dev
*tmp
, *parent
;
2847 if (!iommu
|| !pdev
)
2850 /* dependent device detach */
2851 tmp
= pci_find_upstream_pcie_bridge(pdev
);
2852 /* Secondary interface's bus number and devfn 0 */
2854 parent
= pdev
->bus
->self
;
2855 while (parent
!= tmp
) {
2856 iommu_detach_dev(iommu
, parent
->bus
->number
,
2858 parent
= parent
->bus
->self
;
2860 if (tmp
->is_pcie
) /* this is a PCIE-to-PCI bridge */
2861 iommu_detach_dev(iommu
,
2862 tmp
->subordinate
->number
, 0);
2863 else /* this is a legacy PCI bridge */
2864 iommu_detach_dev(iommu
, tmp
->bus
->number
,
2869 static void vm_domain_remove_one_dev_info(struct dmar_domain
*domain
,
2870 struct pci_dev
*pdev
)
2872 struct device_domain_info
*info
;
2873 struct intel_iommu
*iommu
;
2874 unsigned long flags
;
2876 struct list_head
*entry
, *tmp
;
2878 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
2883 spin_lock_irqsave(&device_domain_lock
, flags
);
2884 list_for_each_safe(entry
, tmp
, &domain
->devices
) {
2885 info
= list_entry(entry
, struct device_domain_info
, link
);
2886 /* No need to compare PCI domain; it has to be the same */
2887 if (info
->bus
== pdev
->bus
->number
&&
2888 info
->devfn
== pdev
->devfn
) {
2889 list_del(&info
->link
);
2890 list_del(&info
->global
);
2892 info
->dev
->dev
.archdata
.iommu
= NULL
;
2893 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2895 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
2896 iommu_detach_dependent_devices(iommu
, pdev
);
2897 free_devinfo_mem(info
);
2899 spin_lock_irqsave(&device_domain_lock
, flags
);
2907 /* if there is no other devices under the same iommu
2908 * owned by this domain, clear this iommu in iommu_bmp
2909 * update iommu count and coherency
2911 if (iommu
== device_to_iommu(info
->segment
, info
->bus
,
2917 unsigned long tmp_flags
;
2918 spin_lock_irqsave(&domain
->iommu_lock
, tmp_flags
);
2919 clear_bit(iommu
->seq_id
, &domain
->iommu_bmp
);
2920 domain
->iommu_count
--;
2921 domain_update_iommu_cap(domain
);
2922 spin_unlock_irqrestore(&domain
->iommu_lock
, tmp_flags
);
2925 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2928 static void vm_domain_remove_all_dev_info(struct dmar_domain
*domain
)
2930 struct device_domain_info
*info
;
2931 struct intel_iommu
*iommu
;
2932 unsigned long flags1
, flags2
;
2934 spin_lock_irqsave(&device_domain_lock
, flags1
);
2935 while (!list_empty(&domain
->devices
)) {
2936 info
= list_entry(domain
->devices
.next
,
2937 struct device_domain_info
, link
);
2938 list_del(&info
->link
);
2939 list_del(&info
->global
);
2941 info
->dev
->dev
.archdata
.iommu
= NULL
;
2943 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
2945 iommu
= device_to_iommu(info
->segment
, info
->bus
, info
->devfn
);
2946 iommu_detach_dev(iommu
, info
->bus
, info
->devfn
);
2947 iommu_detach_dependent_devices(iommu
, info
->dev
);
2949 /* clear this iommu in iommu_bmp, update iommu count
2952 spin_lock_irqsave(&domain
->iommu_lock
, flags2
);
2953 if (test_and_clear_bit(iommu
->seq_id
,
2954 &domain
->iommu_bmp
)) {
2955 domain
->iommu_count
--;
2956 domain_update_iommu_cap(domain
);
2958 spin_unlock_irqrestore(&domain
->iommu_lock
, flags2
);
2960 free_devinfo_mem(info
);
2961 spin_lock_irqsave(&device_domain_lock
, flags1
);
2963 spin_unlock_irqrestore(&device_domain_lock
, flags1
);
2966 /* domain id for virtual machine, it won't be set in context */
2967 static unsigned long vm_domid
;
2969 static int vm_domain_min_agaw(struct dmar_domain
*domain
)
2972 int min_agaw
= domain
->agaw
;
2974 i
= find_first_bit(&domain
->iommu_bmp
, g_num_of_iommus
);
2975 for (; i
< g_num_of_iommus
; ) {
2976 if (min_agaw
> g_iommus
[i
]->agaw
)
2977 min_agaw
= g_iommus
[i
]->agaw
;
2979 i
= find_next_bit(&domain
->iommu_bmp
, g_num_of_iommus
, i
+1);
2985 static struct dmar_domain
*iommu_alloc_vm_domain(void)
2987 struct dmar_domain
*domain
;
2989 domain
= alloc_domain_mem();
2993 domain
->id
= vm_domid
++;
2994 memset(&domain
->iommu_bmp
, 0, sizeof(unsigned long));
2995 domain
->flags
= DOMAIN_FLAG_VIRTUAL_MACHINE
;
3000 static int vm_domain_init(struct dmar_domain
*domain
, int guest_width
)
3004 init_iova_domain(&domain
->iovad
, DMA_32BIT_PFN
);
3005 spin_lock_init(&domain
->mapping_lock
);
3006 spin_lock_init(&domain
->iommu_lock
);
3008 domain_reserve_special_ranges(domain
);
3010 /* calculate AGAW */
3011 domain
->gaw
= guest_width
;
3012 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
3013 domain
->agaw
= width_to_agaw(adjust_width
);
3015 INIT_LIST_HEAD(&domain
->devices
);
3017 domain
->iommu_count
= 0;
3018 domain
->iommu_coherency
= 0;
3019 domain
->max_addr
= 0;
3021 /* always allocate the top pgd */
3022 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page();
3025 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
3029 static void iommu_free_vm_domain(struct dmar_domain
*domain
)
3031 unsigned long flags
;
3032 struct dmar_drhd_unit
*drhd
;
3033 struct intel_iommu
*iommu
;
3035 unsigned long ndomains
;
3037 for_each_drhd_unit(drhd
) {
3040 iommu
= drhd
->iommu
;
3042 ndomains
= cap_ndoms(iommu
->cap
);
3043 i
= find_first_bit(iommu
->domain_ids
, ndomains
);
3044 for (; i
< ndomains
; ) {
3045 if (iommu
->domains
[i
] == domain
) {
3046 spin_lock_irqsave(&iommu
->lock
, flags
);
3047 clear_bit(i
, iommu
->domain_ids
);
3048 iommu
->domains
[i
] = NULL
;
3049 spin_unlock_irqrestore(&iommu
->lock
, flags
);
3052 i
= find_next_bit(iommu
->domain_ids
, ndomains
, i
+1);
3057 static void vm_domain_exit(struct dmar_domain
*domain
)
3061 /* Domain 0 is reserved, so dont process it */
3065 vm_domain_remove_all_dev_info(domain
);
3067 put_iova_domain(&domain
->iovad
);
3068 end
= DOMAIN_MAX_ADDR(domain
->gaw
);
3069 end
= end
& (~VTD_PAGE_MASK
);
3072 dma_pte_clear_range(domain
, 0, end
);
3074 /* free page tables */
3075 dma_pte_free_pagetable(domain
, 0, end
);
3077 iommu_free_vm_domain(domain
);
3078 free_domain_mem(domain
);
3081 static int intel_iommu_domain_init(struct iommu_domain
*domain
)
3083 struct dmar_domain
*dmar_domain
;
3085 dmar_domain
= iommu_alloc_vm_domain();
3088 "intel_iommu_domain_init: dmar_domain == NULL\n");
3091 if (vm_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
3093 "intel_iommu_domain_init() failed\n");
3094 vm_domain_exit(dmar_domain
);
3097 domain
->priv
= dmar_domain
;
3102 static void intel_iommu_domain_destroy(struct iommu_domain
*domain
)
3104 struct dmar_domain
*dmar_domain
= domain
->priv
;
3106 domain
->priv
= NULL
;
3107 vm_domain_exit(dmar_domain
);
3110 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
3113 struct dmar_domain
*dmar_domain
= domain
->priv
;
3114 struct pci_dev
*pdev
= to_pci_dev(dev
);
3115 struct intel_iommu
*iommu
;
3120 /* normally pdev is not mapped */
3121 if (unlikely(domain_context_mapped(pdev
))) {
3122 struct dmar_domain
*old_domain
;
3124 old_domain
= find_domain(pdev
);
3126 if (dmar_domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
)
3127 vm_domain_remove_one_dev_info(old_domain
, pdev
);
3129 domain_remove_dev_info(old_domain
);
3133 iommu
= device_to_iommu(pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
3138 /* check if this iommu agaw is sufficient for max mapped address */
3139 addr_width
= agaw_to_width(iommu
->agaw
);
3140 end
= DOMAIN_MAX_ADDR(addr_width
);
3141 end
= end
& VTD_PAGE_MASK
;
3142 if (end
< dmar_domain
->max_addr
) {
3143 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
3144 "sufficient for the mapped address (%llx)\n",
3145 __func__
, iommu
->agaw
, dmar_domain
->max_addr
);
3149 ret
= domain_context_mapping(dmar_domain
, pdev
);
3153 ret
= vm_domain_add_dev_info(dmar_domain
, pdev
);
3157 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
3160 struct dmar_domain
*dmar_domain
= domain
->priv
;
3161 struct pci_dev
*pdev
= to_pci_dev(dev
);
3163 vm_domain_remove_one_dev_info(dmar_domain
, pdev
);
3166 static int intel_iommu_map_range(struct iommu_domain
*domain
,
3167 unsigned long iova
, phys_addr_t hpa
,
3168 size_t size
, int iommu_prot
)
3170 struct dmar_domain
*dmar_domain
= domain
->priv
;
3176 if (iommu_prot
& IOMMU_READ
)
3177 prot
|= DMA_PTE_READ
;
3178 if (iommu_prot
& IOMMU_WRITE
)
3179 prot
|= DMA_PTE_WRITE
;
3180 if ((iommu_prot
& IOMMU_CACHE
) && dmar_domain
->iommu_snooping
)
3181 prot
|= DMA_PTE_SNP
;
3183 max_addr
= (iova
& VTD_PAGE_MASK
) + VTD_PAGE_ALIGN(size
);
3184 if (dmar_domain
->max_addr
< max_addr
) {
3188 /* check if minimum agaw is sufficient for mapped address */
3189 min_agaw
= vm_domain_min_agaw(dmar_domain
);
3190 addr_width
= agaw_to_width(min_agaw
);
3191 end
= DOMAIN_MAX_ADDR(addr_width
);
3192 end
= end
& VTD_PAGE_MASK
;
3193 if (end
< max_addr
) {
3194 printk(KERN_ERR
"%s: iommu agaw (%d) is not "
3195 "sufficient for the mapped address (%llx)\n",
3196 __func__
, min_agaw
, max_addr
);
3199 dmar_domain
->max_addr
= max_addr
;
3202 ret
= domain_page_mapping(dmar_domain
, iova
, hpa
, size
, prot
);
3206 static void intel_iommu_unmap_range(struct iommu_domain
*domain
,
3207 unsigned long iova
, size_t size
)
3209 struct dmar_domain
*dmar_domain
= domain
->priv
;
3212 /* The address might not be aligned */
3213 base
= iova
& VTD_PAGE_MASK
;
3214 size
= VTD_PAGE_ALIGN(size
);
3215 dma_pte_clear_range(dmar_domain
, base
, base
+ size
);
3217 if (dmar_domain
->max_addr
== base
+ size
)
3218 dmar_domain
->max_addr
= base
;
3221 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
3224 struct dmar_domain
*dmar_domain
= domain
->priv
;
3225 struct dma_pte
*pte
;
3228 pte
= addr_to_dma_pte(dmar_domain
, iova
);
3230 phys
= dma_pte_addr(pte
);
3235 static int intel_iommu_domain_has_cap(struct iommu_domain
*domain
,
3238 struct dmar_domain
*dmar_domain
= domain
->priv
;
3240 if (cap
== IOMMU_CAP_CACHE_COHERENCY
)
3241 return dmar_domain
->iommu_snooping
;
3246 static struct iommu_ops intel_iommu_ops
= {
3247 .domain_init
= intel_iommu_domain_init
,
3248 .domain_destroy
= intel_iommu_domain_destroy
,
3249 .attach_dev
= intel_iommu_attach_device
,
3250 .detach_dev
= intel_iommu_detach_device
,
3251 .map
= intel_iommu_map_range
,
3252 .unmap
= intel_iommu_unmap_range
,
3253 .iova_to_phys
= intel_iommu_iova_to_phys
,
3254 .domain_has_cap
= intel_iommu_domain_has_cap
,
3257 static void __devinit
quirk_iommu_rwbf(struct pci_dev
*dev
)
3260 * Mobile 4 Series Chipset neglects to set RWBF capability,
3263 printk(KERN_INFO
"DMAR: Forcing write-buffer flush capability\n");
3267 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);