drm/radeon: fix correctness of irq_enabled check for radeon.
[linux-2.6/linux-loongson.git] / arch / mips / mm / dma-default.c
blobe6708b3ad343f478e15a7d1102a90df2c7f61841
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/string.h>
18 #include <asm/cache.h>
19 #include <asm/io.h>
21 #include <dma-coherence.h>
23 static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
25 unsigned long addr = plat_dma_addr_to_phys(dma_addr);
27 return (unsigned long)phys_to_virt(addr);
31 * Warning on the terminology - Linux calls an uncached area coherent;
32 * MIPS terminology calls memory areas with hardware maintained coherency
33 * coherent.
36 static inline int cpu_is_noncoherent_r10000(struct device *dev)
38 return !plat_device_is_coherent(dev) &&
39 (current_cpu_type() == CPU_R10000 ||
40 current_cpu_type() == CPU_R12000);
43 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
45 /* ignore region specifiers */
46 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
48 #ifdef CONFIG_ZONE_DMA
49 if (dev == NULL)
50 gfp |= __GFP_DMA;
51 else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
52 gfp |= __GFP_DMA;
53 else
54 #endif
55 #ifdef CONFIG_ZONE_DMA32
56 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
57 gfp |= __GFP_DMA32;
58 else
59 #endif
62 /* Don't invoke OOM killer */
63 gfp |= __GFP_NORETRY;
65 return gfp;
68 void *dma_alloc_noncoherent(struct device *dev, size_t size,
69 dma_addr_t * dma_handle, gfp_t gfp)
71 void *ret;
73 gfp = massage_gfp_flags(dev, gfp);
75 ret = (void *) __get_free_pages(gfp, get_order(size));
77 if (ret != NULL) {
78 memset(ret, 0, size);
79 *dma_handle = plat_map_dma_mem(dev, ret, size);
82 return ret;
85 EXPORT_SYMBOL(dma_alloc_noncoherent);
87 void *dma_alloc_coherent(struct device *dev, size_t size,
88 dma_addr_t * dma_handle, gfp_t gfp)
90 void *ret;
92 gfp = massage_gfp_flags(dev, gfp);
94 ret = (void *) __get_free_pages(gfp, get_order(size));
96 if (ret) {
97 memset(ret, 0, size);
98 *dma_handle = plat_map_dma_mem(dev, ret, size);
100 if (!plat_device_is_coherent(dev)) {
101 dma_cache_wback_inv((unsigned long) ret, size);
102 ret = UNCAC_ADDR(ret);
106 return ret;
109 EXPORT_SYMBOL(dma_alloc_coherent);
111 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
112 dma_addr_t dma_handle)
114 plat_unmap_dma_mem(dma_handle);
115 free_pages((unsigned long) vaddr, get_order(size));
118 EXPORT_SYMBOL(dma_free_noncoherent);
120 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
121 dma_addr_t dma_handle)
123 unsigned long addr = (unsigned long) vaddr;
125 plat_unmap_dma_mem(dma_handle);
127 if (!plat_device_is_coherent(dev))
128 addr = CAC_ADDR(addr);
130 free_pages(addr, get_order(size));
133 EXPORT_SYMBOL(dma_free_coherent);
135 static inline void __dma_sync(unsigned long addr, size_t size,
136 enum dma_data_direction direction)
138 switch (direction) {
139 case DMA_TO_DEVICE:
140 dma_cache_wback(addr, size);
141 break;
143 case DMA_FROM_DEVICE:
144 dma_cache_inv(addr, size);
145 break;
147 case DMA_BIDIRECTIONAL:
148 dma_cache_wback_inv(addr, size);
149 break;
151 default:
152 BUG();
156 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
157 enum dma_data_direction direction)
159 unsigned long addr = (unsigned long) ptr;
161 if (!plat_device_is_coherent(dev))
162 __dma_sync(addr, size, direction);
164 return plat_map_dma_mem(dev, ptr, size);
167 EXPORT_SYMBOL(dma_map_single);
169 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
170 enum dma_data_direction direction)
172 if (cpu_is_noncoherent_r10000(dev))
173 __dma_sync(dma_addr_to_virt(dma_addr), size,
174 direction);
176 plat_unmap_dma_mem(dma_addr);
179 EXPORT_SYMBOL(dma_unmap_single);
181 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
182 enum dma_data_direction direction)
184 int i;
186 BUG_ON(direction == DMA_NONE);
188 for (i = 0; i < nents; i++, sg++) {
189 unsigned long addr;
191 addr = (unsigned long) sg_virt(sg);
192 if (!plat_device_is_coherent(dev) && addr)
193 __dma_sync(addr, sg->length, direction);
194 sg->dma_address = plat_map_dma_mem(dev,
195 (void *)addr, sg->length);
198 return nents;
201 EXPORT_SYMBOL(dma_map_sg);
203 dma_addr_t dma_map_page(struct device *dev, struct page *page,
204 unsigned long offset, size_t size, enum dma_data_direction direction)
206 BUG_ON(direction == DMA_NONE);
208 if (!plat_device_is_coherent(dev)) {
209 unsigned long addr;
211 addr = (unsigned long) page_address(page) + offset;
212 dma_cache_wback_inv(addr, size);
215 return plat_map_dma_mem_page(dev, page) + offset;
218 EXPORT_SYMBOL(dma_map_page);
220 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
221 enum dma_data_direction direction)
223 BUG_ON(direction == DMA_NONE);
225 if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
226 unsigned long addr;
228 addr = plat_dma_addr_to_phys(dma_address);
229 dma_cache_wback_inv(addr, size);
232 plat_unmap_dma_mem(dma_address);
235 EXPORT_SYMBOL(dma_unmap_page);
237 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
238 enum dma_data_direction direction)
240 unsigned long addr;
241 int i;
243 BUG_ON(direction == DMA_NONE);
245 for (i = 0; i < nhwentries; i++, sg++) {
246 if (!plat_device_is_coherent(dev) &&
247 direction != DMA_TO_DEVICE) {
248 addr = (unsigned long) sg_virt(sg);
249 if (addr)
250 __dma_sync(addr, sg->length, direction);
252 plat_unmap_dma_mem(sg->dma_address);
256 EXPORT_SYMBOL(dma_unmap_sg);
258 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
259 size_t size, enum dma_data_direction direction)
261 BUG_ON(direction == DMA_NONE);
263 if (cpu_is_noncoherent_r10000(dev)) {
264 unsigned long addr;
266 addr = dma_addr_to_virt(dma_handle);
267 __dma_sync(addr, size, direction);
271 EXPORT_SYMBOL(dma_sync_single_for_cpu);
273 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
274 size_t size, enum dma_data_direction direction)
276 BUG_ON(direction == DMA_NONE);
278 if (!plat_device_is_coherent(dev)) {
279 unsigned long addr;
281 addr = dma_addr_to_virt(dma_handle);
282 __dma_sync(addr, size, direction);
286 EXPORT_SYMBOL(dma_sync_single_for_device);
288 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
289 unsigned long offset, size_t size, enum dma_data_direction direction)
291 BUG_ON(direction == DMA_NONE);
293 if (cpu_is_noncoherent_r10000(dev)) {
294 unsigned long addr;
296 addr = dma_addr_to_virt(dma_handle);
297 __dma_sync(addr + offset, size, direction);
301 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
303 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
304 unsigned long offset, size_t size, enum dma_data_direction direction)
306 BUG_ON(direction == DMA_NONE);
308 if (!plat_device_is_coherent(dev)) {
309 unsigned long addr;
311 addr = dma_addr_to_virt(dma_handle);
312 __dma_sync(addr + offset, size, direction);
316 EXPORT_SYMBOL(dma_sync_single_range_for_device);
318 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
319 enum dma_data_direction direction)
321 int i;
323 BUG_ON(direction == DMA_NONE);
325 /* Make sure that gcc doesn't leave the empty loop body. */
326 for (i = 0; i < nelems; i++, sg++) {
327 if (cpu_is_noncoherent_r10000(dev))
328 __dma_sync((unsigned long)page_address(sg_page(sg)),
329 sg->length, direction);
333 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
335 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
336 enum dma_data_direction direction)
338 int i;
340 BUG_ON(direction == DMA_NONE);
342 /* Make sure that gcc doesn't leave the empty loop body. */
343 for (i = 0; i < nelems; i++, sg++) {
344 if (!plat_device_is_coherent(dev))
345 __dma_sync((unsigned long)page_address(sg_page(sg)),
346 sg->length, direction);
350 EXPORT_SYMBOL(dma_sync_sg_for_device);
352 int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
354 return 0;
357 EXPORT_SYMBOL(dma_mapping_error);
359 int dma_supported(struct device *dev, u64 mask)
362 * we fall back to GFP_DMA when the mask isn't all 1s,
363 * so we can't guarantee allocations that must be
364 * within a tighter range than GFP_DMA..
366 if (mask < DMA_BIT_MASK(24))
367 return 0;
369 return 1;
372 EXPORT_SYMBOL(dma_supported);
374 int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
376 return plat_device_is_coherent(dev);
379 EXPORT_SYMBOL(dma_is_consistent);
381 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
382 enum dma_data_direction direction)
384 BUG_ON(direction == DMA_NONE);
386 if (!plat_device_is_coherent(dev))
387 __dma_sync((unsigned long)vaddr, size, direction);
390 EXPORT_SYMBOL(dma_cache_sync);