added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / arch / x86 / include / asm / dma-mapping.h
blob4750303ff44206c4f1a50cc376cb974f64171d5f
1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
4 /*
5 * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6 * Documentation/DMA-API.txt for documentation.
7 */
9 #include <linux/kmemcheck.h>
10 #include <linux/scatterlist.h>
11 #include <linux/dma-debug.h>
12 #include <linux/dma-attrs.h>
13 #include <asm/io.h>
14 #include <asm/swiotlb.h>
15 #include <asm-generic/dma-coherent.h>
17 extern dma_addr_t bad_dma_address;
18 extern int iommu_merge;
19 extern struct device x86_dma_fallback_dev;
20 extern int panic_on_overflow;
22 extern struct dma_map_ops *dma_ops;
24 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
26 #ifdef CONFIG_X86_32
27 return dma_ops;
28 #else
29 if (unlikely(!dev) || !dev->archdata.dma_ops)
30 return dma_ops;
31 else
32 return dev->archdata.dma_ops;
33 #endif
36 /* Make sure we keep the same behaviour */
37 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
39 struct dma_map_ops *ops = get_dma_ops(dev);
40 if (ops->mapping_error)
41 return ops->mapping_error(dev, dma_addr);
43 return (dma_addr == bad_dma_address);
46 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
47 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
48 #define dma_is_consistent(d, h) (1)
50 extern int dma_supported(struct device *hwdev, u64 mask);
51 extern int dma_set_mask(struct device *dev, u64 mask);
53 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
54 dma_addr_t *dma_addr, gfp_t flag);
56 static inline dma_addr_t
57 dma_map_single(struct device *hwdev, void *ptr, size_t size,
58 enum dma_data_direction dir)
60 struct dma_map_ops *ops = get_dma_ops(hwdev);
61 dma_addr_t addr;
63 kmemcheck_mark_initialized(ptr, size);
64 BUG_ON(!valid_dma_direction(dir));
65 addr = ops->map_page(hwdev, virt_to_page(ptr),
66 (unsigned long)ptr & ~PAGE_MASK, size,
67 dir, NULL);
68 debug_dma_map_page(hwdev, virt_to_page(ptr),
69 (unsigned long)ptr & ~PAGE_MASK, size,
70 dir, addr, true);
71 return addr;
74 static inline void
75 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
76 enum dma_data_direction dir)
78 struct dma_map_ops *ops = get_dma_ops(dev);
80 BUG_ON(!valid_dma_direction(dir));
81 if (ops->unmap_page)
82 ops->unmap_page(dev, addr, size, dir, NULL);
83 debug_dma_unmap_page(dev, addr, size, dir, true);
86 static inline int
87 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
88 int nents, enum dma_data_direction dir)
90 struct dma_map_ops *ops = get_dma_ops(hwdev);
91 int ents;
93 struct scatterlist *s;
94 int i;
96 for_each_sg(sg, s, nents, i)
97 kmemcheck_mark_initialized(sg_virt(s), s->length);
98 BUG_ON(!valid_dma_direction(dir));
99 ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
100 debug_dma_map_sg(hwdev, sg, nents, ents, dir);
102 return ents;
105 static inline void
106 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
107 enum dma_data_direction dir)
109 struct dma_map_ops *ops = get_dma_ops(hwdev);
111 BUG_ON(!valid_dma_direction(dir));
112 debug_dma_unmap_sg(hwdev, sg, nents, dir);
113 if (ops->unmap_sg)
114 ops->unmap_sg(hwdev, sg, nents, dir, NULL);
117 static inline void
118 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
119 size_t size, enum dma_data_direction dir)
121 struct dma_map_ops *ops = get_dma_ops(hwdev);
123 BUG_ON(!valid_dma_direction(dir));
124 if (ops->sync_single_for_cpu)
125 ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
126 debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir);
127 flush_write_buffers();
130 static inline void
131 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
132 size_t size, enum dma_data_direction dir)
134 struct dma_map_ops *ops = get_dma_ops(hwdev);
136 BUG_ON(!valid_dma_direction(dir));
137 if (ops->sync_single_for_device)
138 ops->sync_single_for_device(hwdev, dma_handle, size, dir);
139 debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir);
140 flush_write_buffers();
143 static inline void
144 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
145 unsigned long offset, size_t size,
146 enum dma_data_direction dir)
148 struct dma_map_ops *ops = get_dma_ops(hwdev);
150 BUG_ON(!valid_dma_direction(dir));
151 if (ops->sync_single_range_for_cpu)
152 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
153 size, dir);
154 debug_dma_sync_single_range_for_cpu(hwdev, dma_handle,
155 offset, size, dir);
156 flush_write_buffers();
159 static inline void
160 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
161 unsigned long offset, size_t size,
162 enum dma_data_direction dir)
164 struct dma_map_ops *ops = get_dma_ops(hwdev);
166 BUG_ON(!valid_dma_direction(dir));
167 if (ops->sync_single_range_for_device)
168 ops->sync_single_range_for_device(hwdev, dma_handle,
169 offset, size, dir);
170 debug_dma_sync_single_range_for_device(hwdev, dma_handle,
171 offset, size, dir);
172 flush_write_buffers();
175 static inline void
176 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
177 int nelems, enum dma_data_direction dir)
179 struct dma_map_ops *ops = get_dma_ops(hwdev);
181 BUG_ON(!valid_dma_direction(dir));
182 if (ops->sync_sg_for_cpu)
183 ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
184 debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir);
185 flush_write_buffers();
188 static inline void
189 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
190 int nelems, enum dma_data_direction dir)
192 struct dma_map_ops *ops = get_dma_ops(hwdev);
194 BUG_ON(!valid_dma_direction(dir));
195 if (ops->sync_sg_for_device)
196 ops->sync_sg_for_device(hwdev, sg, nelems, dir);
197 debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir);
199 flush_write_buffers();
202 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
203 size_t offset, size_t size,
204 enum dma_data_direction dir)
206 struct dma_map_ops *ops = get_dma_ops(dev);
207 dma_addr_t addr;
209 kmemcheck_mark_initialized(page_address(page) + offset, size);
210 BUG_ON(!valid_dma_direction(dir));
211 addr = ops->map_page(dev, page, offset, size, dir, NULL);
212 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
214 return addr;
217 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
218 size_t size, enum dma_data_direction dir)
220 struct dma_map_ops *ops = get_dma_ops(dev);
222 BUG_ON(!valid_dma_direction(dir));
223 if (ops->unmap_page)
224 ops->unmap_page(dev, addr, size, dir, NULL);
225 debug_dma_unmap_page(dev, addr, size, dir, false);
228 static inline void
229 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
230 enum dma_data_direction dir)
232 flush_write_buffers();
235 static inline int dma_get_cache_alignment(void)
237 /* no easy way to get cache size on all x86, so return the
238 * maximum possible, to be safe */
239 return boot_cpu_data.x86_clflush_size;
242 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
243 gfp_t gfp)
245 unsigned long dma_mask = 0;
247 dma_mask = dev->coherent_dma_mask;
248 if (!dma_mask)
249 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
251 return dma_mask;
254 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
256 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
258 if (dma_mask <= DMA_24BIT_MASK)
259 gfp |= GFP_DMA;
260 #ifdef CONFIG_X86_64
261 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
262 gfp |= GFP_DMA32;
263 #endif
264 return gfp;
267 static inline void *
268 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
269 gfp_t gfp)
271 struct dma_map_ops *ops = get_dma_ops(dev);
272 void *memory;
274 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
276 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
277 return memory;
279 if (!dev) {
280 dev = &x86_dma_fallback_dev;
281 gfp |= GFP_DMA;
284 if (!is_device_dma_capable(dev))
285 return NULL;
287 if (!ops->alloc_coherent)
288 return NULL;
290 memory = ops->alloc_coherent(dev, size, dma_handle,
291 dma_alloc_coherent_gfp_flags(dev, gfp));
292 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
294 return memory;
297 static inline void dma_free_coherent(struct device *dev, size_t size,
298 void *vaddr, dma_addr_t bus)
300 struct dma_map_ops *ops = get_dma_ops(dev);
302 WARN_ON(irqs_disabled()); /* for portability */
304 if (dma_release_from_coherent(dev, get_order(size), vaddr))
305 return;
307 debug_dma_free_coherent(dev, size, vaddr, bus);
308 if (ops->free_coherent)
309 ops->free_coherent(dev, size, vaddr, bus);
312 #endif