GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / arch / x86 / kernel / pci-dma.c
blob9f07cfcbd3a5e60db651c952c9a66fa21dd68d87
1 #include <linux/dma-mapping.h>
2 #include <linux/dma-debug.h>
3 #include <linux/dmar.h>
4 #include <linux/bootmem.h>
5 #include <linux/gfp.h>
6 #include <linux/pci.h>
7 #include <linux/kmemleak.h>
9 #include <asm/proto.h>
10 #include <asm/dma.h>
11 #include <asm/iommu.h>
12 #include <asm/gart.h>
13 #include <asm/calgary.h>
14 #include <asm/amd_iommu.h>
15 #include <asm/x86_init.h>
16 #include <asm/xen/swiotlb-xen.h>
18 static int forbid_dac __read_mostly;
20 struct dma_map_ops *dma_ops = &nommu_dma_ops;
21 EXPORT_SYMBOL(dma_ops);
23 static int iommu_sac_force __read_mostly;
25 #ifdef CONFIG_IOMMU_DEBUG
26 int panic_on_overflow __read_mostly = 1;
27 int force_iommu __read_mostly = 1;
28 #else
29 int panic_on_overflow __read_mostly = 0;
30 int force_iommu __read_mostly = 0;
31 #endif
33 int iommu_merge __read_mostly = 0;
35 int no_iommu __read_mostly;
36 /* Set this to 1 if there is a HW IOMMU in the system */
37 int iommu_detected __read_mostly = 0;
40 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
41 * If this variable is 1, IOMMU implementations do no DMA translation for
42 * devices and allow every device to access to whole physical memory. This is
43 * useful if a user wants to use an IOMMU only for KVM device assignment to
44 * guests and not for driver dma translation.
46 int iommu_pass_through __read_mostly;
48 /* Dummy device used for NULL arguments (normally ISA). */
49 struct device x86_dma_fallback_dev = {
50 .init_name = "fallback device",
51 .coherent_dma_mask = ISA_DMA_BIT_MASK,
52 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
54 EXPORT_SYMBOL(x86_dma_fallback_dev);
56 /* Number of entries preallocated for DMA-API debugging */
57 #define PREALLOC_DMA_DEBUG_ENTRIES 32768
59 int dma_set_mask(struct device *dev, u64 mask)
61 if (!dev->dma_mask || !dma_supported(dev, mask))
62 return -EIO;
64 *dev->dma_mask = mask;
66 return 0;
68 EXPORT_SYMBOL(dma_set_mask);
70 #if defined(CONFIG_X86_64) && !defined(CONFIG_NUMA)
71 static __initdata void *dma32_bootmem_ptr;
72 static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
74 static int __init parse_dma32_size_opt(char *p)
76 if (!p)
77 return -EINVAL;
78 dma32_bootmem_size = memparse(p, &p);
79 return 0;
81 early_param("dma32_size", parse_dma32_size_opt);
83 void __init dma32_reserve_bootmem(void)
85 unsigned long size, align;
86 if (max_pfn <= MAX_DMA32_PFN)
87 return;
90 * check aperture_64.c allocate_aperture() for reason about
91 * using 512M as goal
93 align = 64ULL<<20;
94 size = roundup(dma32_bootmem_size, align);
95 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
96 512ULL<<20);
98 * Kmemleak should not scan this block as it may not be mapped via the
99 * kernel direct mapping.
101 kmemleak_ignore(dma32_bootmem_ptr);
102 if (dma32_bootmem_ptr)
103 dma32_bootmem_size = size;
104 else
105 dma32_bootmem_size = 0;
107 static void __init dma32_free_bootmem(void)
110 if (max_pfn <= MAX_DMA32_PFN)
111 return;
113 if (!dma32_bootmem_ptr)
114 return;
116 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
118 dma32_bootmem_ptr = NULL;
119 dma32_bootmem_size = 0;
121 #else
122 void __init dma32_reserve_bootmem(void)
125 static void __init dma32_free_bootmem(void)
129 #endif
131 void __init pci_iommu_alloc(void)
133 /* free the range so iommu could get some range less than 4G */
134 dma32_free_bootmem();
136 if (pci_xen_swiotlb_detect() || pci_swiotlb_detect())
137 goto out;
139 gart_iommu_hole_init();
141 detect_calgary();
143 detect_intel_iommu();
145 /* needs to be called after gart_iommu_hole_init */
146 amd_iommu_detect();
147 out:
148 pci_xen_swiotlb_init();
150 pci_swiotlb_init();
153 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
154 dma_addr_t *dma_addr, gfp_t flag)
156 unsigned long dma_mask;
157 struct page *page;
158 dma_addr_t addr;
160 dma_mask = dma_alloc_coherent_mask(dev, flag);
162 flag |= __GFP_ZERO;
163 again:
164 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
165 if (!page)
166 return NULL;
168 addr = page_to_phys(page);
169 if (addr + size > dma_mask) {
170 __free_pages(page, get_order(size));
172 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
173 flag = (flag & ~GFP_DMA32) | GFP_DMA;
174 goto again;
177 return NULL;
180 *dma_addr = addr;
181 return page_address(page);
185 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
186 * documentation.
188 static __init int iommu_setup(char *p)
190 iommu_merge = 1;
192 if (!p)
193 return -EINVAL;
195 while (*p) {
196 if (!strncmp(p, "off", 3))
197 no_iommu = 1;
198 /* gart_parse_options has more force support */
199 if (!strncmp(p, "force", 5))
200 force_iommu = 1;
201 if (!strncmp(p, "noforce", 7)) {
202 iommu_merge = 0;
203 force_iommu = 0;
206 if (!strncmp(p, "biomerge", 8)) {
207 iommu_merge = 1;
208 force_iommu = 1;
210 if (!strncmp(p, "panic", 5))
211 panic_on_overflow = 1;
212 if (!strncmp(p, "nopanic", 7))
213 panic_on_overflow = 0;
214 if (!strncmp(p, "merge", 5)) {
215 iommu_merge = 1;
216 force_iommu = 1;
218 if (!strncmp(p, "nomerge", 7))
219 iommu_merge = 0;
220 if (!strncmp(p, "forcesac", 8))
221 iommu_sac_force = 1;
222 if (!strncmp(p, "allowdac", 8))
223 forbid_dac = 0;
224 if (!strncmp(p, "nodac", 5))
225 forbid_dac = 1;
226 if (!strncmp(p, "usedac", 6)) {
227 forbid_dac = -1;
228 return 1;
230 #ifdef CONFIG_SWIOTLB
231 if (!strncmp(p, "soft", 4))
232 swiotlb = 1;
233 #endif
234 if (!strncmp(p, "pt", 2))
235 iommu_pass_through = 1;
237 gart_parse_options(p);
239 #ifdef CONFIG_CALGARY_IOMMU
240 if (!strncmp(p, "calgary", 7))
241 use_calgary = 1;
242 #endif /* CONFIG_CALGARY_IOMMU */
244 p += strcspn(p, ",");
245 if (*p == ',')
246 ++p;
248 return 0;
250 early_param("iommu", iommu_setup);
252 int dma_supported(struct device *dev, u64 mask)
254 struct dma_map_ops *ops = get_dma_ops(dev);
256 #ifdef CONFIG_PCI
257 if (mask > 0xffffffff && forbid_dac > 0) {
258 dev_info(dev, "PCI: Disallowing DAC for device\n");
259 return 0;
261 #endif
263 if (ops->dma_supported)
264 return ops->dma_supported(dev, mask);
266 /* Copied from i386. Doesn't make much sense, because it will
267 only work for pci_alloc_coherent.
268 The caller just has to use GFP_DMA in this case. */
269 if (mask < DMA_BIT_MASK(24))
270 return 0;
272 /* Tell the device to use SAC when IOMMU force is on. This
273 allows the driver to use cheaper accesses in some cases.
275 Problem with this is that if we overflow the IOMMU area and
276 return DAC as fallback address the device may not handle it
277 correctly.
279 As a special case some controllers have a 39bit address
280 mode that is as efficient as 32bit (aic79xx). Don't force
281 SAC for these. Assume all masks <= 40 bits are of this
282 type. Normally this doesn't make any difference, but gives
283 more gentle handling of IOMMU overflow. */
284 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
285 dev_info(dev, "Force SAC with mask %Lx\n", mask);
286 return 0;
289 return 1;
291 EXPORT_SYMBOL(dma_supported);
293 static int __init pci_iommu_init(void)
295 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
297 #ifdef CONFIG_PCI
298 dma_debug_add_bus(&pci_bus_type);
299 #endif
300 x86_init.iommu.iommu_init();
302 if (swiotlb || xen_swiotlb) {
303 printk(KERN_INFO "PCI-DMA: "
304 "Using software bounce buffering for IO (SWIOTLB)\n");
305 swiotlb_print_info();
306 } else
307 swiotlb_free();
309 return 0;
311 /* Must execute after PCI subsystem */
312 rootfs_initcall(pci_iommu_init);
314 #ifdef CONFIG_PCI
315 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
317 static __devinit void via_no_dac(struct pci_dev *dev)
319 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
320 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
321 forbid_dac = 1;
324 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
325 #endif