ipv6: fix problem with expired dst cache
[linux-2.6.git] / arch / x86 / kernel / pci-dma.c
blob3003250ac51dbcdc5be17a2fa37a84d9aae4a4e1
1 #include <linux/dma-mapping.h>
2 #include <linux/dma-debug.h>
3 #include <linux/dmar.h>
4 #include <linux/export.h>
5 #include <linux/bootmem.h>
6 #include <linux/gfp.h>
7 #include <linux/pci.h>
8 #include <linux/kmemleak.h>
10 #include <asm/proto.h>
11 #include <asm/dma.h>
12 #include <asm/iommu.h>
13 #include <asm/gart.h>
14 #include <asm/calgary.h>
15 #include <asm/x86_init.h>
16 #include <asm/iommu_table.h>
18 static int forbid_dac __read_mostly;
20 struct dma_map_ops *dma_ops = &nommu_dma_ops;
21 EXPORT_SYMBOL(dma_ops);
23 static int iommu_sac_force __read_mostly;
25 #ifdef CONFIG_IOMMU_DEBUG
26 int panic_on_overflow __read_mostly = 1;
27 int force_iommu __read_mostly = 1;
28 #else
29 int panic_on_overflow __read_mostly = 0;
30 int force_iommu __read_mostly = 0;
31 #endif
33 int iommu_merge __read_mostly = 0;
35 int no_iommu __read_mostly;
36 /* Set this to 1 if there is a HW IOMMU in the system */
37 int iommu_detected __read_mostly = 0;
40 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
41 * If this variable is 1, IOMMU implementations do no DMA translation for
42 * devices and allow every device to access to whole physical memory. This is
43 * useful if a user wants to use an IOMMU only for KVM device assignment to
44 * guests and not for driver dma translation.
46 int iommu_pass_through __read_mostly;
49 * Group multi-function PCI devices into a single device-group for the
50 * iommu_device_group interface. This tells the iommu driver to pretend
51 * it cannot distinguish between functions of a device, exposing only one
52 * group for the device. Useful for disallowing use of individual PCI
53 * functions from userspace drivers.
55 int iommu_group_mf __read_mostly;
57 extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
59 /* Dummy device used for NULL arguments (normally ISA). */
60 struct device x86_dma_fallback_dev = {
61 .init_name = "fallback device",
62 .coherent_dma_mask = ISA_DMA_BIT_MASK,
63 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
65 EXPORT_SYMBOL(x86_dma_fallback_dev);
67 /* Number of entries preallocated for DMA-API debugging */
68 #define PREALLOC_DMA_DEBUG_ENTRIES 32768
70 int dma_set_mask(struct device *dev, u64 mask)
72 if (!dev->dma_mask || !dma_supported(dev, mask))
73 return -EIO;
75 *dev->dma_mask = mask;
77 return 0;
79 EXPORT_SYMBOL(dma_set_mask);
81 void __init pci_iommu_alloc(void)
83 struct iommu_table_entry *p;
85 sort_iommu_table(__iommu_table, __iommu_table_end);
86 check_iommu_entries(__iommu_table, __iommu_table_end);
88 for (p = __iommu_table; p < __iommu_table_end; p++) {
89 if (p && p->detect && p->detect() > 0) {
90 p->flags |= IOMMU_DETECTED;
91 if (p->early_init)
92 p->early_init();
93 if (p->flags & IOMMU_FINISH_IF_DETECTED)
94 break;
98 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
99 dma_addr_t *dma_addr, gfp_t flag,
100 struct dma_attrs *attrs)
102 unsigned long dma_mask;
103 struct page *page;
104 dma_addr_t addr;
106 dma_mask = dma_alloc_coherent_mask(dev, flag);
108 flag |= __GFP_ZERO;
109 again:
110 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
111 if (!page)
112 return NULL;
114 addr = page_to_phys(page);
115 if (addr + size > dma_mask) {
116 __free_pages(page, get_order(size));
118 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
119 flag = (flag & ~GFP_DMA32) | GFP_DMA;
120 goto again;
123 return NULL;
126 *dma_addr = addr;
127 return page_address(page);
131 * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
132 * parameter documentation.
134 static __init int iommu_setup(char *p)
136 iommu_merge = 1;
138 if (!p)
139 return -EINVAL;
141 while (*p) {
142 if (!strncmp(p, "off", 3))
143 no_iommu = 1;
144 /* gart_parse_options has more force support */
145 if (!strncmp(p, "force", 5))
146 force_iommu = 1;
147 if (!strncmp(p, "noforce", 7)) {
148 iommu_merge = 0;
149 force_iommu = 0;
152 if (!strncmp(p, "biomerge", 8)) {
153 iommu_merge = 1;
154 force_iommu = 1;
156 if (!strncmp(p, "panic", 5))
157 panic_on_overflow = 1;
158 if (!strncmp(p, "nopanic", 7))
159 panic_on_overflow = 0;
160 if (!strncmp(p, "merge", 5)) {
161 iommu_merge = 1;
162 force_iommu = 1;
164 if (!strncmp(p, "nomerge", 7))
165 iommu_merge = 0;
166 if (!strncmp(p, "forcesac", 8))
167 iommu_sac_force = 1;
168 if (!strncmp(p, "allowdac", 8))
169 forbid_dac = 0;
170 if (!strncmp(p, "nodac", 5))
171 forbid_dac = 1;
172 if (!strncmp(p, "usedac", 6)) {
173 forbid_dac = -1;
174 return 1;
176 #ifdef CONFIG_SWIOTLB
177 if (!strncmp(p, "soft", 4))
178 swiotlb = 1;
179 #endif
180 if (!strncmp(p, "pt", 2))
181 iommu_pass_through = 1;
182 if (!strncmp(p, "group_mf", 8))
183 iommu_group_mf = 1;
185 gart_parse_options(p);
187 #ifdef CONFIG_CALGARY_IOMMU
188 if (!strncmp(p, "calgary", 7))
189 use_calgary = 1;
190 #endif /* CONFIG_CALGARY_IOMMU */
192 p += strcspn(p, ",");
193 if (*p == ',')
194 ++p;
196 return 0;
198 early_param("iommu", iommu_setup);
200 int dma_supported(struct device *dev, u64 mask)
202 struct dma_map_ops *ops = get_dma_ops(dev);
204 #ifdef CONFIG_PCI
205 if (mask > 0xffffffff && forbid_dac > 0) {
206 dev_info(dev, "PCI: Disallowing DAC for device\n");
207 return 0;
209 #endif
211 if (ops->dma_supported)
212 return ops->dma_supported(dev, mask);
214 /* Copied from i386. Doesn't make much sense, because it will
215 only work for pci_alloc_coherent.
216 The caller just has to use GFP_DMA in this case. */
217 if (mask < DMA_BIT_MASK(24))
218 return 0;
220 /* Tell the device to use SAC when IOMMU force is on. This
221 allows the driver to use cheaper accesses in some cases.
223 Problem with this is that if we overflow the IOMMU area and
224 return DAC as fallback address the device may not handle it
225 correctly.
227 As a special case some controllers have a 39bit address
228 mode that is as efficient as 32bit (aic79xx). Don't force
229 SAC for these. Assume all masks <= 40 bits are of this
230 type. Normally this doesn't make any difference, but gives
231 more gentle handling of IOMMU overflow. */
232 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
233 dev_info(dev, "Force SAC with mask %Lx\n", mask);
234 return 0;
237 return 1;
239 EXPORT_SYMBOL(dma_supported);
241 static int __init pci_iommu_init(void)
243 struct iommu_table_entry *p;
244 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
246 #ifdef CONFIG_PCI
247 dma_debug_add_bus(&pci_bus_type);
248 #endif
249 x86_init.iommu.iommu_init();
251 for (p = __iommu_table; p < __iommu_table_end; p++) {
252 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
253 p->late_init();
256 return 0;
258 /* Must execute after PCI subsystem */
259 rootfs_initcall(pci_iommu_init);
261 #ifdef CONFIG_PCI
262 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
264 static __devinit void via_no_dac(struct pci_dev *dev)
266 if (forbid_dac == 0) {
267 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
268 forbid_dac = 1;
271 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
272 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
273 #endif