x86, bts: remove bad warning
[linux-2.6.git] / arch / x86 / kernel / pci-dma.c
blobb25428533141a5a73fb8787268864b774c5e9752
1 #include <linux/dma-mapping.h>
2 #include <linux/dmar.h>
3 #include <linux/bootmem.h>
4 #include <linux/pci.h>
6 #include <asm/proto.h>
7 #include <asm/dma.h>
8 #include <asm/iommu.h>
9 #include <asm/gart.h>
10 #include <asm/calgary.h>
11 #include <asm/amd_iommu.h>
13 static int forbid_dac __read_mostly;
15 struct dma_mapping_ops *dma_ops;
16 EXPORT_SYMBOL(dma_ops);
18 static int iommu_sac_force __read_mostly;
20 #ifdef CONFIG_IOMMU_DEBUG
21 int panic_on_overflow __read_mostly = 1;
22 int force_iommu __read_mostly = 1;
23 #else
24 int panic_on_overflow __read_mostly = 0;
25 int force_iommu __read_mostly = 0;
26 #endif
28 int iommu_merge __read_mostly = 0;
30 int no_iommu __read_mostly;
31 /* Set this to 1 if there is a HW IOMMU in the system */
32 int iommu_detected __read_mostly = 0;
34 dma_addr_t bad_dma_address __read_mostly = 0;
35 EXPORT_SYMBOL(bad_dma_address);
37 /* Dummy device used for NULL arguments (normally ISA). Better would
38 be probably a smaller DMA mask, but this is bug-to-bug compatible
39 to older i386. */
40 struct device x86_dma_fallback_dev = {
41 .init_name = "fallback device",
42 .coherent_dma_mask = DMA_32BIT_MASK,
43 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
45 EXPORT_SYMBOL(x86_dma_fallback_dev);
47 int dma_set_mask(struct device *dev, u64 mask)
49 if (!dev->dma_mask || !dma_supported(dev, mask))
50 return -EIO;
52 *dev->dma_mask = mask;
54 return 0;
56 EXPORT_SYMBOL(dma_set_mask);
58 #ifdef CONFIG_X86_64
59 static __initdata void *dma32_bootmem_ptr;
60 static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
62 static int __init parse_dma32_size_opt(char *p)
64 if (!p)
65 return -EINVAL;
66 dma32_bootmem_size = memparse(p, &p);
67 return 0;
69 early_param("dma32_size", parse_dma32_size_opt);
71 void __init dma32_reserve_bootmem(void)
73 unsigned long size, align;
74 if (max_pfn <= MAX_DMA32_PFN)
75 return;
78 * check aperture_64.c allocate_aperture() for reason about
79 * using 512M as goal
81 align = 64ULL<<20;
82 size = roundup(dma32_bootmem_size, align);
83 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
84 512ULL<<20);
85 if (dma32_bootmem_ptr)
86 dma32_bootmem_size = size;
87 else
88 dma32_bootmem_size = 0;
90 static void __init dma32_free_bootmem(void)
93 if (max_pfn <= MAX_DMA32_PFN)
94 return;
96 if (!dma32_bootmem_ptr)
97 return;
99 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
101 dma32_bootmem_ptr = NULL;
102 dma32_bootmem_size = 0;
104 #endif
106 void __init pci_iommu_alloc(void)
108 #ifdef CONFIG_X86_64
109 /* free the range so iommu could get some range less than 4G */
110 dma32_free_bootmem();
111 #endif
114 * The order of these functions is important for
115 * fall-back/fail-over reasons
117 gart_iommu_hole_init();
119 detect_calgary();
121 detect_intel_iommu();
123 amd_iommu_detect();
125 pci_swiotlb_init();
128 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
129 dma_addr_t *dma_addr, gfp_t flag)
131 unsigned long dma_mask;
132 struct page *page;
133 dma_addr_t addr;
135 dma_mask = dma_alloc_coherent_mask(dev, flag);
137 flag |= __GFP_ZERO;
138 again:
139 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
140 if (!page)
141 return NULL;
143 addr = page_to_phys(page);
144 if (!is_buffer_dma_capable(dma_mask, addr, size)) {
145 __free_pages(page, get_order(size));
147 if (dma_mask < DMA_32BIT_MASK && !(flag & GFP_DMA)) {
148 flag = (flag & ~GFP_DMA32) | GFP_DMA;
149 goto again;
152 return NULL;
155 *dma_addr = addr;
156 return page_address(page);
160 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
161 * documentation.
163 static __init int iommu_setup(char *p)
165 iommu_merge = 1;
167 if (!p)
168 return -EINVAL;
170 while (*p) {
171 if (!strncmp(p, "off", 3))
172 no_iommu = 1;
173 /* gart_parse_options has more force support */
174 if (!strncmp(p, "force", 5))
175 force_iommu = 1;
176 if (!strncmp(p, "noforce", 7)) {
177 iommu_merge = 0;
178 force_iommu = 0;
181 if (!strncmp(p, "biomerge", 8)) {
182 iommu_merge = 1;
183 force_iommu = 1;
185 if (!strncmp(p, "panic", 5))
186 panic_on_overflow = 1;
187 if (!strncmp(p, "nopanic", 7))
188 panic_on_overflow = 0;
189 if (!strncmp(p, "merge", 5)) {
190 iommu_merge = 1;
191 force_iommu = 1;
193 if (!strncmp(p, "nomerge", 7))
194 iommu_merge = 0;
195 if (!strncmp(p, "forcesac", 8))
196 iommu_sac_force = 1;
197 if (!strncmp(p, "allowdac", 8))
198 forbid_dac = 0;
199 if (!strncmp(p, "nodac", 5))
200 forbid_dac = -1;
201 if (!strncmp(p, "usedac", 6)) {
202 forbid_dac = -1;
203 return 1;
205 #ifdef CONFIG_SWIOTLB
206 if (!strncmp(p, "soft", 4))
207 swiotlb = 1;
208 #endif
210 gart_parse_options(p);
212 #ifdef CONFIG_CALGARY_IOMMU
213 if (!strncmp(p, "calgary", 7))
214 use_calgary = 1;
215 #endif /* CONFIG_CALGARY_IOMMU */
217 p += strcspn(p, ",");
218 if (*p == ',')
219 ++p;
221 return 0;
223 early_param("iommu", iommu_setup);
225 int dma_supported(struct device *dev, u64 mask)
227 struct dma_mapping_ops *ops = get_dma_ops(dev);
229 #ifdef CONFIG_PCI
230 if (mask > 0xffffffff && forbid_dac > 0) {
231 dev_info(dev, "PCI: Disallowing DAC for device\n");
232 return 0;
234 #endif
236 if (ops->dma_supported)
237 return ops->dma_supported(dev, mask);
239 /* Copied from i386. Doesn't make much sense, because it will
240 only work for pci_alloc_coherent.
241 The caller just has to use GFP_DMA in this case. */
242 if (mask < DMA_24BIT_MASK)
243 return 0;
245 /* Tell the device to use SAC when IOMMU force is on. This
246 allows the driver to use cheaper accesses in some cases.
248 Problem with this is that if we overflow the IOMMU area and
249 return DAC as fallback address the device may not handle it
250 correctly.
252 As a special case some controllers have a 39bit address
253 mode that is as efficient as 32bit (aic79xx). Don't force
254 SAC for these. Assume all masks <= 40 bits are of this
255 type. Normally this doesn't make any difference, but gives
256 more gentle handling of IOMMU overflow. */
257 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
258 dev_info(dev, "Force SAC with mask %Lx\n", mask);
259 return 0;
262 return 1;
264 EXPORT_SYMBOL(dma_supported);
266 static int __init pci_iommu_init(void)
268 calgary_iommu_init();
270 intel_iommu_init();
272 amd_iommu_init();
274 gart_iommu_init();
276 no_iommu_init();
277 return 0;
280 void pci_iommu_shutdown(void)
282 gart_iommu_shutdown();
284 /* Must execute after PCI subsystem */
285 fs_initcall(pci_iommu_init);
287 #ifdef CONFIG_PCI
288 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
290 static __devinit void via_no_dac(struct pci_dev *dev)
292 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
293 printk(KERN_INFO
294 "PCI: VIA PCI bridge detected. Disabling DAC.\n");
295 forbid_dac = 1;
298 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
299 #endif