1 #include <linux/dma-mapping.h>
2 #include <linux/dmar.h>
3 #include <linux/bootmem.h>
9 #include <asm/calgary.h>
10 #include <asm/amd_iommu.h>
12 static int forbid_dac __read_mostly
;
14 struct dma_mapping_ops
*dma_ops
;
15 EXPORT_SYMBOL(dma_ops
);
17 static int iommu_sac_force __read_mostly
;
19 #ifdef CONFIG_IOMMU_DEBUG
20 int panic_on_overflow __read_mostly
= 1;
21 int force_iommu __read_mostly
= 1;
23 int panic_on_overflow __read_mostly
= 0;
24 int force_iommu __read_mostly
= 0;
27 int iommu_merge __read_mostly
= 0;
29 int no_iommu __read_mostly
;
30 /* Set this to 1 if there is a HW IOMMU in the system */
31 int iommu_detected __read_mostly
= 0;
33 /* This tells the BIO block layer to assume merging. Default to off
34 because we cannot guarantee merging later. */
35 int iommu_bio_merge __read_mostly
= 0;
36 EXPORT_SYMBOL(iommu_bio_merge
);
38 dma_addr_t bad_dma_address __read_mostly
= 0;
39 EXPORT_SYMBOL(bad_dma_address
);
41 /* Dummy device used for NULL arguments (normally ISA). Better would
42 be probably a smaller DMA mask, but this is bug-to-bug compatible
44 struct device x86_dma_fallback_dev
= {
45 .bus_id
= "fallback device",
46 .coherent_dma_mask
= DMA_32BIT_MASK
,
47 .dma_mask
= &x86_dma_fallback_dev
.coherent_dma_mask
,
49 EXPORT_SYMBOL(x86_dma_fallback_dev
);
51 int dma_set_mask(struct device
*dev
, u64 mask
)
53 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
56 *dev
->dma_mask
= mask
;
60 EXPORT_SYMBOL(dma_set_mask
);
63 static __initdata
void *dma32_bootmem_ptr
;
64 static unsigned long dma32_bootmem_size __initdata
= (128ULL<<20);
66 static int __init
parse_dma32_size_opt(char *p
)
70 dma32_bootmem_size
= memparse(p
, &p
);
73 early_param("dma32_size", parse_dma32_size_opt
);
75 void __init
dma32_reserve_bootmem(void)
77 unsigned long size
, align
;
78 if (max_pfn
<= MAX_DMA32_PFN
)
82 * check aperture_64.c allocate_aperture() for reason about
86 size
= roundup(dma32_bootmem_size
, align
);
87 dma32_bootmem_ptr
= __alloc_bootmem_nopanic(size
, align
,
89 if (dma32_bootmem_ptr
)
90 dma32_bootmem_size
= size
;
92 dma32_bootmem_size
= 0;
94 static void __init
dma32_free_bootmem(void)
97 if (max_pfn
<= MAX_DMA32_PFN
)
100 if (!dma32_bootmem_ptr
)
103 free_bootmem(__pa(dma32_bootmem_ptr
), dma32_bootmem_size
);
105 dma32_bootmem_ptr
= NULL
;
106 dma32_bootmem_size
= 0;
109 void __init
pci_iommu_alloc(void)
111 /* free the range so iommu could get some range less than 4G */
112 dma32_free_bootmem();
114 * The order of these functions is important for
115 * fall-back/fail-over reasons
117 gart_iommu_hole_init();
121 detect_intel_iommu();
130 void *dma_generic_alloc_coherent(struct device
*dev
, size_t size
,
131 dma_addr_t
*dma_addr
, gfp_t flag
)
133 unsigned long dma_mask
;
137 dma_mask
= dma_alloc_coherent_mask(dev
, flag
);
141 page
= alloc_pages_node(dev_to_node(dev
), flag
, get_order(size
));
145 addr
= page_to_phys(page
);
146 if (!is_buffer_dma_capable(dma_mask
, addr
, size
)) {
147 __free_pages(page
, get_order(size
));
149 if (dma_mask
< DMA_32BIT_MASK
&& !(flag
& GFP_DMA
)) {
150 flag
= (flag
& ~GFP_DMA32
) | GFP_DMA
;
158 return page_address(page
);
162 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
165 static __init
int iommu_setup(char *p
)
173 if (!strncmp(p
, "off", 3))
175 /* gart_parse_options has more force support */
176 if (!strncmp(p
, "force", 5))
178 if (!strncmp(p
, "noforce", 7)) {
183 if (!strncmp(p
, "biomerge", 8)) {
184 iommu_bio_merge
= 4096;
188 if (!strncmp(p
, "panic", 5))
189 panic_on_overflow
= 1;
190 if (!strncmp(p
, "nopanic", 7))
191 panic_on_overflow
= 0;
192 if (!strncmp(p
, "merge", 5)) {
196 if (!strncmp(p
, "nomerge", 7))
198 if (!strncmp(p
, "forcesac", 8))
200 if (!strncmp(p
, "allowdac", 8))
202 if (!strncmp(p
, "nodac", 5))
204 if (!strncmp(p
, "usedac", 6)) {
208 #ifdef CONFIG_SWIOTLB
209 if (!strncmp(p
, "soft", 4))
213 gart_parse_options(p
);
215 #ifdef CONFIG_CALGARY_IOMMU
216 if (!strncmp(p
, "calgary", 7))
218 #endif /* CONFIG_CALGARY_IOMMU */
220 p
+= strcspn(p
, ",");
226 early_param("iommu", iommu_setup
);
228 int dma_supported(struct device
*dev
, u64 mask
)
230 struct dma_mapping_ops
*ops
= get_dma_ops(dev
);
233 if (mask
> 0xffffffff && forbid_dac
> 0) {
234 dev_info(dev
, "PCI: Disallowing DAC for device\n");
239 if (ops
->dma_supported
)
240 return ops
->dma_supported(dev
, mask
);
242 /* Copied from i386. Doesn't make much sense, because it will
243 only work for pci_alloc_coherent.
244 The caller just has to use GFP_DMA in this case. */
245 if (mask
< DMA_24BIT_MASK
)
248 /* Tell the device to use SAC when IOMMU force is on. This
249 allows the driver to use cheaper accesses in some cases.
251 Problem with this is that if we overflow the IOMMU area and
252 return DAC as fallback address the device may not handle it
255 As a special case some controllers have a 39bit address
256 mode that is as efficient as 32bit (aic79xx). Don't force
257 SAC for these. Assume all masks <= 40 bits are of this
258 type. Normally this doesn't make any difference, but gives
259 more gentle handling of IOMMU overflow. */
260 if (iommu_sac_force
&& (mask
>= DMA_40BIT_MASK
)) {
261 dev_info(dev
, "Force SAC with mask %Lx\n", mask
);
267 EXPORT_SYMBOL(dma_supported
);
269 static int __init
pci_iommu_init(void)
271 calgary_iommu_init();
283 void pci_iommu_shutdown(void)
285 gart_iommu_shutdown();
287 /* Must execute after PCI subsystem */
288 fs_initcall(pci_iommu_init
);
291 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
293 static __devinit
void via_no_dac(struct pci_dev
*dev
)
295 if ((dev
->class >> 8) == PCI_CLASS_BRIDGE_PCI
&& forbid_dac
== 0) {
296 printk(KERN_INFO
"PCI: VIA PCI bridge detected."
301 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA
, PCI_ANY_ID
, via_no_dac
);