x86, apic: Don't use logical-flat mode when CPU hotplug may exceed 8 CPUs
[linux-2.6/mini2440.git] / arch / x86 / kernel / pci-dma.c
blob6ac3931160d7ce299385451d4cd4d2f18d7c7a4a
1 #include <linux/dma-mapping.h>
2 #include <linux/dma-debug.h>
3 #include <linux/dmar.h>
4 #include <linux/bootmem.h>
5 #include <linux/pci.h>
6 #include <linux/kmemleak.h>
8 #include <asm/proto.h>
9 #include <asm/dma.h>
10 #include <asm/iommu.h>
11 #include <asm/gart.h>
12 #include <asm/calgary.h>
13 #include <asm/amd_iommu.h>
15 static int forbid_dac __read_mostly;
17 struct dma_map_ops *dma_ops;
18 EXPORT_SYMBOL(dma_ops);
20 static int iommu_sac_force __read_mostly;
22 #ifdef CONFIG_IOMMU_DEBUG
23 int panic_on_overflow __read_mostly = 1;
24 int force_iommu __read_mostly = 1;
25 #else
26 int panic_on_overflow __read_mostly = 0;
27 int force_iommu __read_mostly = 0;
28 #endif
30 int iommu_merge __read_mostly = 0;
32 int no_iommu __read_mostly;
33 /* Set this to 1 if there is a HW IOMMU in the system */
34 int iommu_detected __read_mostly = 0;
37 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
38 * If this variable is 1, IOMMU implementations do no DMA translation for
39 * devices and allow every device to access to whole physical memory. This is
40 * useful if a user want to use an IOMMU only for KVM device assignment to
41 * guests and not for driver dma translation.
43 int iommu_pass_through __read_mostly;
45 dma_addr_t bad_dma_address __read_mostly = 0;
46 EXPORT_SYMBOL(bad_dma_address);
48 /* Dummy device used for NULL arguments (normally ISA). */
49 struct device x86_dma_fallback_dev = {
50 .init_name = "fallback device",
51 .coherent_dma_mask = ISA_DMA_BIT_MASK,
52 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
54 EXPORT_SYMBOL(x86_dma_fallback_dev);
56 /* Number of entries preallocated for DMA-API debugging */
57 #define PREALLOC_DMA_DEBUG_ENTRIES 32768
59 int dma_set_mask(struct device *dev, u64 mask)
61 if (!dev->dma_mask || !dma_supported(dev, mask))
62 return -EIO;
64 *dev->dma_mask = mask;
66 return 0;
68 EXPORT_SYMBOL(dma_set_mask);
70 #ifdef CONFIG_X86_64
71 static __initdata void *dma32_bootmem_ptr;
72 static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
74 static int __init parse_dma32_size_opt(char *p)
76 if (!p)
77 return -EINVAL;
78 dma32_bootmem_size = memparse(p, &p);
79 return 0;
81 early_param("dma32_size", parse_dma32_size_opt);
83 void __init dma32_reserve_bootmem(void)
85 unsigned long size, align;
86 if (max_pfn <= MAX_DMA32_PFN)
87 return;
90 * check aperture_64.c allocate_aperture() for reason about
91 * using 512M as goal
93 align = 64ULL<<20;
94 size = roundup(dma32_bootmem_size, align);
95 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
96 512ULL<<20);
98 * Kmemleak should not scan this block as it may not be mapped via the
99 * kernel direct mapping.
101 kmemleak_ignore(dma32_bootmem_ptr);
102 if (dma32_bootmem_ptr)
103 dma32_bootmem_size = size;
104 else
105 dma32_bootmem_size = 0;
107 static void __init dma32_free_bootmem(void)
110 if (max_pfn <= MAX_DMA32_PFN)
111 return;
113 if (!dma32_bootmem_ptr)
114 return;
116 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
118 dma32_bootmem_ptr = NULL;
119 dma32_bootmem_size = 0;
121 #endif
123 void __init pci_iommu_alloc(void)
125 #ifdef CONFIG_X86_64
126 /* free the range so iommu could get some range less than 4G */
127 dma32_free_bootmem();
128 #endif
131 * The order of these functions is important for
132 * fall-back/fail-over reasons
134 gart_iommu_hole_init();
136 detect_calgary();
138 detect_intel_iommu();
140 amd_iommu_detect();
142 pci_swiotlb_init();
145 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
146 dma_addr_t *dma_addr, gfp_t flag)
148 unsigned long dma_mask;
149 struct page *page;
150 dma_addr_t addr;
152 dma_mask = dma_alloc_coherent_mask(dev, flag);
154 flag |= __GFP_ZERO;
155 again:
156 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
157 if (!page)
158 return NULL;
160 addr = page_to_phys(page);
161 if (addr + size > dma_mask) {
162 __free_pages(page, get_order(size));
164 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
165 flag = (flag & ~GFP_DMA32) | GFP_DMA;
166 goto again;
169 return NULL;
172 *dma_addr = addr;
173 return page_address(page);
177 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
178 * documentation.
180 static __init int iommu_setup(char *p)
182 iommu_merge = 1;
184 if (!p)
185 return -EINVAL;
187 while (*p) {
188 if (!strncmp(p, "off", 3))
189 no_iommu = 1;
190 /* gart_parse_options has more force support */
191 if (!strncmp(p, "force", 5))
192 force_iommu = 1;
193 if (!strncmp(p, "noforce", 7)) {
194 iommu_merge = 0;
195 force_iommu = 0;
198 if (!strncmp(p, "biomerge", 8)) {
199 iommu_merge = 1;
200 force_iommu = 1;
202 if (!strncmp(p, "panic", 5))
203 panic_on_overflow = 1;
204 if (!strncmp(p, "nopanic", 7))
205 panic_on_overflow = 0;
206 if (!strncmp(p, "merge", 5)) {
207 iommu_merge = 1;
208 force_iommu = 1;
210 if (!strncmp(p, "nomerge", 7))
211 iommu_merge = 0;
212 if (!strncmp(p, "forcesac", 8))
213 iommu_sac_force = 1;
214 if (!strncmp(p, "allowdac", 8))
215 forbid_dac = 0;
216 if (!strncmp(p, "nodac", 5))
217 forbid_dac = 1;
218 if (!strncmp(p, "usedac", 6)) {
219 forbid_dac = -1;
220 return 1;
222 #ifdef CONFIG_SWIOTLB
223 if (!strncmp(p, "soft", 4))
224 swiotlb = 1;
225 #endif
226 if (!strncmp(p, "pt", 2))
227 iommu_pass_through = 1;
229 gart_parse_options(p);
231 #ifdef CONFIG_CALGARY_IOMMU
232 if (!strncmp(p, "calgary", 7))
233 use_calgary = 1;
234 #endif /* CONFIG_CALGARY_IOMMU */
236 p += strcspn(p, ",");
237 if (*p == ',')
238 ++p;
240 return 0;
242 early_param("iommu", iommu_setup);
244 int dma_supported(struct device *dev, u64 mask)
246 struct dma_map_ops *ops = get_dma_ops(dev);
248 #ifdef CONFIG_PCI
249 if (mask > 0xffffffff && forbid_dac > 0) {
250 dev_info(dev, "PCI: Disallowing DAC for device\n");
251 return 0;
253 #endif
255 if (ops->dma_supported)
256 return ops->dma_supported(dev, mask);
258 /* Copied from i386. Doesn't make much sense, because it will
259 only work for pci_alloc_coherent.
260 The caller just has to use GFP_DMA in this case. */
261 if (mask < DMA_BIT_MASK(24))
262 return 0;
264 /* Tell the device to use SAC when IOMMU force is on. This
265 allows the driver to use cheaper accesses in some cases.
267 Problem with this is that if we overflow the IOMMU area and
268 return DAC as fallback address the device may not handle it
269 correctly.
271 As a special case some controllers have a 39bit address
272 mode that is as efficient as 32bit (aic79xx). Don't force
273 SAC for these. Assume all masks <= 40 bits are of this
274 type. Normally this doesn't make any difference, but gives
275 more gentle handling of IOMMU overflow. */
276 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
277 dev_info(dev, "Force SAC with mask %Lx\n", mask);
278 return 0;
281 return 1;
283 EXPORT_SYMBOL(dma_supported);
285 static int __init pci_iommu_init(void)
287 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
289 #ifdef CONFIG_PCI
290 dma_debug_add_bus(&pci_bus_type);
291 #endif
293 calgary_iommu_init();
295 intel_iommu_init();
297 amd_iommu_init();
299 gart_iommu_init();
301 no_iommu_init();
302 return 0;
305 void pci_iommu_shutdown(void)
307 gart_iommu_shutdown();
309 amd_iommu_shutdown();
311 /* Must execute after PCI subsystem */
312 rootfs_initcall(pci_iommu_init);
314 #ifdef CONFIG_PCI
315 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
317 static __devinit void via_no_dac(struct pci_dev *dev)
319 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
320 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
321 forbid_dac = 1;
324 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
325 #endif