[MIPS] RTLX: Harden against compiler reordering and optimization.
[linux-2.6/verdex.git] / arch / mips / mm / dma-default.c
blobf503d02e403bbf78bdb421402ff6a6dfddf6f742
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/string.h>
17 #include <asm/cache.h>
18 #include <asm/io.h>
20 #include <dma-coherence.h>
23 * Warning on the terminology - Linux calls an uncached area coherent;
24 * MIPS terminology calls memory areas with hardware maintained coherency
25 * coherent.
28 static inline int cpu_is_noncoherent_r10000(struct device *dev)
30 return !plat_device_is_coherent(dev) &&
31 (current_cpu_data.cputype == CPU_R10000 &&
32 current_cpu_data.cputype == CPU_R12000);
35 void *dma_alloc_noncoherent(struct device *dev, size_t size,
36 dma_addr_t * dma_handle, gfp_t gfp)
38 void *ret;
40 /* ignore region specifiers */
41 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
43 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
44 gfp |= GFP_DMA;
45 ret = (void *) __get_free_pages(gfp, get_order(size));
47 if (ret != NULL) {
48 memset(ret, 0, size);
49 *dma_handle = plat_map_dma_mem(dev, ret, size);
52 return ret;
55 EXPORT_SYMBOL(dma_alloc_noncoherent);
57 void *dma_alloc_coherent(struct device *dev, size_t size,
58 dma_addr_t * dma_handle, gfp_t gfp)
60 void *ret;
62 /* ignore region specifiers */
63 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
65 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
66 gfp |= GFP_DMA;
67 ret = (void *) __get_free_pages(gfp, get_order(size));
69 if (ret) {
70 memset(ret, 0, size);
71 *dma_handle = plat_map_dma_mem(dev, ret, size);
73 if (!plat_device_is_coherent(dev)) {
74 dma_cache_wback_inv((unsigned long) ret, size);
75 ret = UNCAC_ADDR(ret);
79 return ret;
82 EXPORT_SYMBOL(dma_alloc_coherent);
84 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
85 dma_addr_t dma_handle)
87 free_pages((unsigned long) vaddr, get_order(size));
90 EXPORT_SYMBOL(dma_free_noncoherent);
92 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
93 dma_addr_t dma_handle)
95 unsigned long addr = (unsigned long) vaddr;
97 if (!plat_device_is_coherent(dev))
98 addr = CAC_ADDR(addr);
100 free_pages(addr, get_order(size));
103 EXPORT_SYMBOL(dma_free_coherent);
105 static inline void __dma_sync(unsigned long addr, size_t size,
106 enum dma_data_direction direction)
108 switch (direction) {
109 case DMA_TO_DEVICE:
110 dma_cache_wback(addr, size);
111 break;
113 case DMA_FROM_DEVICE:
114 dma_cache_inv(addr, size);
115 break;
117 case DMA_BIDIRECTIONAL:
118 dma_cache_wback_inv(addr, size);
119 break;
121 default:
122 BUG();
126 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
127 enum dma_data_direction direction)
129 unsigned long addr = (unsigned long) ptr;
131 if (!plat_device_is_coherent(dev))
132 __dma_sync(addr, size, direction);
134 return plat_map_dma_mem(dev, ptr, size);
137 EXPORT_SYMBOL(dma_map_single);
139 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
140 enum dma_data_direction direction)
142 if (cpu_is_noncoherent_r10000(dev))
143 __dma_sync(plat_dma_addr_to_phys(dma_addr) + PAGE_OFFSET, size,
144 direction);
146 plat_unmap_dma_mem(dma_addr);
149 EXPORT_SYMBOL(dma_unmap_single);
151 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
152 enum dma_data_direction direction)
154 int i;
156 BUG_ON(direction == DMA_NONE);
158 for (i = 0; i < nents; i++, sg++) {
159 unsigned long addr;
161 addr = (unsigned long) page_address(sg->page);
162 if (!plat_device_is_coherent(dev) && addr)
163 __dma_sync(addr + sg->offset, sg->length, direction);
164 sg->dma_address = plat_map_dma_mem_page(dev, sg->page) +
165 sg->offset;
168 return nents;
171 EXPORT_SYMBOL(dma_map_sg);
173 dma_addr_t dma_map_page(struct device *dev, struct page *page,
174 unsigned long offset, size_t size, enum dma_data_direction direction)
176 BUG_ON(direction == DMA_NONE);
178 if (!plat_device_is_coherent(dev)) {
179 unsigned long addr;
181 addr = (unsigned long) page_address(page) + offset;
182 dma_cache_wback_inv(addr, size);
185 return plat_map_dma_mem_page(dev, page) + offset;
188 EXPORT_SYMBOL(dma_map_page);
190 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
191 enum dma_data_direction direction)
193 BUG_ON(direction == DMA_NONE);
195 if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
196 unsigned long addr;
198 addr = plat_dma_addr_to_phys(dma_address);
199 dma_cache_wback_inv(addr, size);
202 plat_unmap_dma_mem(dma_address);
205 EXPORT_SYMBOL(dma_unmap_page);
207 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
208 enum dma_data_direction direction)
210 unsigned long addr;
211 int i;
213 BUG_ON(direction == DMA_NONE);
215 for (i = 0; i < nhwentries; i++, sg++) {
216 if (!plat_device_is_coherent(dev) &&
217 direction != DMA_TO_DEVICE) {
218 addr = (unsigned long) page_address(sg->page);
219 if (addr)
220 __dma_sync(addr + sg->offset, sg->length,
221 direction);
223 plat_unmap_dma_mem(sg->dma_address);
227 EXPORT_SYMBOL(dma_unmap_sg);
229 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
230 size_t size, enum dma_data_direction direction)
232 BUG_ON(direction == DMA_NONE);
234 if (cpu_is_noncoherent_r10000(dev)) {
235 unsigned long addr;
237 addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
238 __dma_sync(addr, size, direction);
242 EXPORT_SYMBOL(dma_sync_single_for_cpu);
244 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
245 size_t size, enum dma_data_direction direction)
247 BUG_ON(direction == DMA_NONE);
249 if (!plat_device_is_coherent(dev)) {
250 unsigned long addr;
252 addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
253 __dma_sync(addr, size, direction);
257 EXPORT_SYMBOL(dma_sync_single_for_device);
259 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
260 unsigned long offset, size_t size, enum dma_data_direction direction)
262 BUG_ON(direction == DMA_NONE);
264 if (cpu_is_noncoherent_r10000(dev)) {
265 unsigned long addr;
267 addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
268 __dma_sync(addr + offset, size, direction);
272 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
274 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
275 unsigned long offset, size_t size, enum dma_data_direction direction)
277 BUG_ON(direction == DMA_NONE);
279 if (!plat_device_is_coherent(dev)) {
280 unsigned long addr;
282 addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
283 __dma_sync(addr + offset, size, direction);
287 EXPORT_SYMBOL(dma_sync_single_range_for_device);
289 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
290 enum dma_data_direction direction)
292 int i;
294 BUG_ON(direction == DMA_NONE);
296 /* Make sure that gcc doesn't leave the empty loop body. */
297 for (i = 0; i < nelems; i++, sg++) {
298 if (cpu_is_noncoherent_r10000(dev))
299 __dma_sync((unsigned long)page_address(sg->page),
300 sg->length, direction);
301 plat_unmap_dma_mem(sg->dma_address);
305 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
307 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
308 enum dma_data_direction direction)
310 int i;
312 BUG_ON(direction == DMA_NONE);
314 /* Make sure that gcc doesn't leave the empty loop body. */
315 for (i = 0; i < nelems; i++, sg++) {
316 if (!plat_device_is_coherent(dev))
317 __dma_sync((unsigned long)page_address(sg->page),
318 sg->length, direction);
319 plat_unmap_dma_mem(sg->dma_address);
323 EXPORT_SYMBOL(dma_sync_sg_for_device);
325 int dma_mapping_error(dma_addr_t dma_addr)
327 return 0;
330 EXPORT_SYMBOL(dma_mapping_error);
332 int dma_supported(struct device *dev, u64 mask)
335 * we fall back to GFP_DMA when the mask isn't all 1s,
336 * so we can't guarantee allocations that must be
337 * within a tighter range than GFP_DMA..
339 if (mask < 0x00ffffff)
340 return 0;
342 return 1;
345 EXPORT_SYMBOL(dma_supported);
347 int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
349 return plat_device_is_coherent(dev);
352 EXPORT_SYMBOL(dma_is_consistent);
354 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
355 enum dma_data_direction direction)
357 BUG_ON(direction == DMA_NONE);
359 if (!plat_device_is_coherent(dev))
360 dma_cache_wback_inv((unsigned long)vaddr, size);
363 EXPORT_SYMBOL(dma_cache_sync);