[PATCH] x86_64: Don't invoke OOM killer during dma_alloc_coherent()
[linux-2.6/libata-dev.git] / arch / x86_64 / kernel / pci-dma.c
blob03c9eeedb0f30672839648984c94ec89103c9e2f
1 /*
2 * Dynamic DMA mapping support.
3 */
5 #include <linux/types.h>
6 #include <linux/mm.h>
7 #include <linux/string.h>
8 #include <linux/pci.h>
9 #include <linux/module.h>
10 #include <asm/io.h>
11 #include <asm/proto.h>
13 int iommu_merge __read_mostly = 0;
14 EXPORT_SYMBOL(iommu_merge);
16 dma_addr_t bad_dma_address __read_mostly;
17 EXPORT_SYMBOL(bad_dma_address);
19 /* This tells the BIO block layer to assume merging. Default to off
20 because we cannot guarantee merging later. */
21 int iommu_bio_merge __read_mostly = 0;
22 EXPORT_SYMBOL(iommu_bio_merge);
24 int iommu_sac_force __read_mostly = 0;
25 EXPORT_SYMBOL(iommu_sac_force);
27 int no_iommu __read_mostly;
28 #ifdef CONFIG_IOMMU_DEBUG
29 int panic_on_overflow __read_mostly = 1;
30 int force_iommu __read_mostly = 1;
31 #else
32 int panic_on_overflow __read_mostly = 0;
33 int force_iommu __read_mostly= 0;
34 #endif
36 /* Dummy device used for NULL arguments (normally ISA). Better would
37 be probably a smaller DMA mask, but this is bug-to-bug compatible
38 to i386. */
39 struct device fallback_dev = {
40 .bus_id = "fallback device",
41 .coherent_dma_mask = 0xffffffff,
42 .dma_mask = &fallback_dev.coherent_dma_mask,
45 /* Allocate DMA memory on node near device */
46 noinline static void *
47 dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
49 struct page *page;
50 int node;
51 if (dev->bus == &pci_bus_type)
52 node = pcibus_to_node(to_pci_dev(dev)->bus);
53 else
54 node = numa_node_id();
55 page = alloc_pages_node(node, gfp, order);
56 return page ? page_address(page) : NULL;
60 * Allocate memory for a coherent mapping.
62 void *
63 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
64 gfp_t gfp)
66 void *memory;
67 unsigned long dma_mask = 0;
68 u64 bus;
70 if (!dev)
71 dev = &fallback_dev;
72 dma_mask = dev->coherent_dma_mask;
73 if (dma_mask == 0)
74 dma_mask = 0xffffffff;
76 /* Don't invoke OOM killer */
77 gfp |= __GFP_NORETRY;
79 /* Kludge to make it bug-to-bug compatible with i386. i386
80 uses the normal dma_mask for alloc_coherent. */
81 dma_mask &= *dev->dma_mask;
83 /* Why <=? Even when the mask is smaller than 4GB it is often
84 larger than 16MB and in this case we have a chance of
85 finding fitting memory in the next higher zone first. If
86 not retry with true GFP_DMA. -AK */
87 if (dma_mask <= 0xffffffff)
88 gfp |= GFP_DMA32;
90 again:
91 memory = dma_alloc_pages(dev, gfp, get_order(size));
92 if (memory == NULL)
93 return NULL;
96 int high, mmu;
97 bus = virt_to_bus(memory);
98 high = (bus + size) >= dma_mask;
99 mmu = high;
100 if (force_iommu && !(gfp & GFP_DMA))
101 mmu = 1;
102 else if (high) {
103 free_pages((unsigned long)memory,
104 get_order(size));
106 /* Don't use the 16MB ZONE_DMA unless absolutely
107 needed. It's better to use remapping first. */
108 if (dma_mask < 0xffffffff && !(gfp & GFP_DMA)) {
109 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
110 goto again;
113 /* Let low level make its own zone decisions */
114 gfp &= ~(GFP_DMA32|GFP_DMA);
116 if (dma_ops->alloc_coherent)
117 return dma_ops->alloc_coherent(dev, size,
118 dma_handle, gfp);
119 return NULL;
122 memset(memory, 0, size);
123 if (!mmu) {
124 *dma_handle = virt_to_bus(memory);
125 return memory;
129 if (dma_ops->alloc_coherent) {
130 free_pages((unsigned long)memory, get_order(size));
131 gfp &= ~(GFP_DMA|GFP_DMA32);
132 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
135 if (dma_ops->map_simple) {
136 *dma_handle = dma_ops->map_simple(dev, memory,
137 size,
138 PCI_DMA_BIDIRECTIONAL);
139 if (*dma_handle != bad_dma_address)
140 return memory;
143 if (panic_on_overflow)
144 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size);
145 free_pages((unsigned long)memory, get_order(size));
146 return NULL;
148 EXPORT_SYMBOL(dma_alloc_coherent);
151 * Unmap coherent memory.
152 * The caller must ensure that the device has finished accessing the mapping.
154 void dma_free_coherent(struct device *dev, size_t size,
155 void *vaddr, dma_addr_t bus)
157 if (dma_ops->unmap_single)
158 dma_ops->unmap_single(dev, bus, size, 0);
159 free_pages((unsigned long)vaddr, get_order(size));
161 EXPORT_SYMBOL(dma_free_coherent);
163 int dma_supported(struct device *dev, u64 mask)
165 if (dma_ops->dma_supported)
166 return dma_ops->dma_supported(dev, mask);
168 /* Copied from i386. Doesn't make much sense, because it will
169 only work for pci_alloc_coherent.
170 The caller just has to use GFP_DMA in this case. */
171 if (mask < 0x00ffffff)
172 return 0;
174 /* Tell the device to use SAC when IOMMU force is on. This
175 allows the driver to use cheaper accesses in some cases.
177 Problem with this is that if we overflow the IOMMU area and
178 return DAC as fallback address the device may not handle it
179 correctly.
181 As a special case some controllers have a 39bit address
182 mode that is as efficient as 32bit (aic79xx). Don't force
183 SAC for these. Assume all masks <= 40 bits are of this
184 type. Normally this doesn't make any difference, but gives
185 more gentle handling of IOMMU overflow. */
186 if (iommu_sac_force && (mask >= 0xffffffffffULL)) {
187 printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask);
188 return 0;
191 return 1;
193 EXPORT_SYMBOL(dma_supported);
195 int dma_set_mask(struct device *dev, u64 mask)
197 if (!dev->dma_mask || !dma_supported(dev, mask))
198 return -EIO;
199 *dev->dma_mask = mask;
200 return 0;
202 EXPORT_SYMBOL(dma_set_mask);
204 /* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]][,merge]
205 [,forcesac][,fullflush][,nomerge][,biomerge]
206 size set size of iommu (in bytes)
207 noagp don't initialize the AGP driver and use full aperture.
208 off don't use the IOMMU
209 leak turn on simple iommu leak tracing (only when CONFIG_IOMMU_LEAK is on)
210 memaper[=order] allocate an own aperture over RAM with size 32MB^order.
211 noforce don't force IOMMU usage. Default.
212 force Force IOMMU.
213 merge Do lazy merging. This may improve performance on some block devices.
214 Implies force (experimental)
215 biomerge Do merging at the BIO layer. This is more efficient than merge,
216 but should be only done with very big IOMMUs. Implies merge,force.
217 nomerge Don't do SG merging.
218 forcesac For SAC mode for masks <40bits (experimental)
219 fullflush Flush IOMMU on each allocation (default)
220 nofullflush Don't use IOMMU fullflush
221 allowed overwrite iommu off workarounds for specific chipsets.
222 soft Use software bounce buffering (default for Intel machines)
223 noaperture Don't touch the aperture for AGP.
225 __init int iommu_setup(char *p)
227 iommu_merge = 1;
229 while (*p) {
230 if (!strncmp(p,"off",3))
231 no_iommu = 1;
232 /* gart_parse_options has more force support */
233 if (!strncmp(p,"force",5))
234 force_iommu = 1;
235 if (!strncmp(p,"noforce",7)) {
236 iommu_merge = 0;
237 force_iommu = 0;
240 if (!strncmp(p, "biomerge",8)) {
241 iommu_bio_merge = 4096;
242 iommu_merge = 1;
243 force_iommu = 1;
245 if (!strncmp(p, "panic",5))
246 panic_on_overflow = 1;
247 if (!strncmp(p, "nopanic",7))
248 panic_on_overflow = 0;
249 if (!strncmp(p, "merge",5)) {
250 iommu_merge = 1;
251 force_iommu = 1;
253 if (!strncmp(p, "nomerge",7))
254 iommu_merge = 0;
255 if (!strncmp(p, "forcesac",8))
256 iommu_sac_force = 1;
258 #ifdef CONFIG_SWIOTLB
259 if (!strncmp(p, "soft",4))
260 swiotlb = 1;
261 #endif
263 #ifdef CONFIG_GART_IOMMU
264 gart_parse_options(p);
265 #endif
267 p += strcspn(p, ",");
268 if (*p == ',')
269 ++p;
271 return 1;