allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / arch / mips / mm / dma-default.c
blob622bddf96ca10750367dea69cf13ab4b10f10750
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/string.h>
17 #include <asm/cache.h>
18 #include <asm/io.h>
20 #include <dma-coherence.h>
22 #ifdef CONFIG_HIGHMEM
23 #include <linux/highmem.h>
24 #endif
26 static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
28 unsigned long addr = plat_dma_addr_to_phys(dma_addr);
30 return (unsigned long)phys_to_virt(addr);
34 * Warning on the terminology - Linux calls an uncached area coherent;
35 * MIPS terminology calls memory areas with hardware maintained coherency
36 * coherent.
39 static inline int cpu_is_noncoherent_r10000(struct device *dev)
41 return !plat_device_is_coherent(dev) &&
42 (current_cpu_data.cputype == CPU_R10000 &&
43 current_cpu_data.cputype == CPU_R12000);
46 void *dma_alloc_noncoherent(struct device *dev, size_t size,
47 dma_addr_t * dma_handle, gfp_t gfp)
49 void *ret;
51 /* ignore region specifiers */
52 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
54 if (dev == NULL || (dev->coherent_dma_mask < DMA_64BIT_MASK))
55 gfp |= GFP_DMA;
56 ret = (void *) __get_free_pages(gfp, get_order(size));
58 if (ret != NULL) {
59 memset(ret, 0, size);
60 *dma_handle = plat_map_dma_mem(dev, ret, size);
63 return ret;
66 EXPORT_SYMBOL(dma_alloc_noncoherent);
68 void *dma_alloc_coherent(struct device *dev, size_t size,
69 dma_addr_t * dma_handle, gfp_t gfp)
71 void *ret;
73 /* ignore region specifiers */
74 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
76 if (dev == NULL || (dev->coherent_dma_mask < DMA_64BIT_MASK))
77 gfp |= GFP_DMA;
78 ret = (void *) __get_free_pages(gfp, get_order(size));
80 if (ret) {
81 memset(ret, 0, size);
82 *dma_handle = plat_map_dma_mem(dev, ret, size);
84 if (!plat_device_is_coherent(dev)) {
85 dma_cache_wback_inv((unsigned long) ret, size);
86 ret = UNCAC_ADDR(ret);
90 return ret;
93 EXPORT_SYMBOL(dma_alloc_coherent);
95 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
96 dma_addr_t dma_handle)
98 plat_unmap_dma_mem(dma_handle);
99 free_pages((unsigned long) vaddr, get_order(size));
102 EXPORT_SYMBOL(dma_free_noncoherent);
104 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
105 dma_addr_t dma_handle)
107 unsigned long addr = (unsigned long) vaddr;
109 plat_unmap_dma_mem(dma_handle);
111 if (!plat_device_is_coherent(dev))
112 addr = CAC_ADDR(addr);
114 free_pages(addr, get_order(size));
117 EXPORT_SYMBOL(dma_free_coherent);
119 static inline void __dma_sync(unsigned long addr, size_t size,
120 enum dma_data_direction direction)
122 switch (direction) {
123 case DMA_TO_DEVICE:
124 dma_cache_wback(addr, size);
125 break;
127 case DMA_FROM_DEVICE:
128 dma_cache_inv(addr, size);
129 break;
131 case DMA_BIDIRECTIONAL:
132 dma_cache_wback_inv(addr, size);
133 break;
135 default:
136 BUG();
140 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
141 enum dma_data_direction direction)
143 unsigned long addr = (unsigned long) ptr;
145 if (!plat_device_is_coherent(dev))
146 __dma_sync(addr, size, direction);
148 return plat_map_dma_mem(dev, ptr, size);
151 EXPORT_SYMBOL(dma_map_single);
153 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
154 enum dma_data_direction direction)
156 if (cpu_is_noncoherent_r10000(dev))
157 __dma_sync(dma_addr_to_virt(dma_addr), size,
158 direction);
160 plat_unmap_dma_mem(dma_addr);
163 EXPORT_SYMBOL(dma_unmap_single);
165 #ifdef CONFIG_HIGHMEM
166 static inline void
167 dma_sync_high(struct scatterlist *sg, enum dma_data_direction direction)
169 int i;
170 unsigned int nr_pages;
171 struct page * tmp_page;
172 unsigned int offset;
173 unsigned int length, length_remain;
174 unsigned long addr;
176 nr_pages = (sg->length + sg->offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
177 length_remain = sg->length;
179 for (i = 0, offset = sg->offset; i < nr_pages; i++, offset = 0) {
180 tmp_page = nth_page(sg->page, i);
182 length = (length_remain > (PAGE_SIZE - offset))?
183 (PAGE_SIZE - offset): length_remain;
185 addr = (unsigned long)kmap(tmp_page);
186 __dma_sync(addr + offset, length, direction);
187 kunmap(tmp_page);
188 length_remain -= length;
191 #endif /* CONFIG_HIGHMEM */
193 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
194 enum dma_data_direction direction)
196 int i;
198 BUG_ON(direction == DMA_NONE);
200 for (i = 0; i < nents; i++, sg++) {
201 unsigned long addr;
203 #ifdef CONFIG_HIGHMEM
204 if (PageHighMem(sg->page)) {
205 dma_sync_high(sg, direction);
206 sg->dma_address = (page_to_pfn(sg->page) << PAGE_SHIFT) + sg->offset;
208 else
209 #endif
211 addr = (unsigned long) page_address(sg->page);
212 if (!plat_device_is_coherent(dev) && addr)
213 __dma_sync(addr + sg->offset, sg->length, direction);
214 sg->dma_address = plat_map_dma_mem(dev,
215 (void *)(addr + sg->offset),
216 sg->length);
220 return nents;
223 EXPORT_SYMBOL(dma_map_sg);
225 dma_addr_t dma_map_page(struct device *dev, struct page *page,
226 unsigned long offset, size_t size, enum dma_data_direction direction)
228 BUG_ON(direction == DMA_NONE);
230 if (!plat_device_is_coherent(dev)) {
231 unsigned long addr;
233 addr = (unsigned long) page_address(page) + offset;
234 dma_cache_wback_inv(addr, size);
237 return plat_map_dma_mem_page(dev, page) + offset;
240 EXPORT_SYMBOL(dma_map_page);
242 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
243 enum dma_data_direction direction)
245 BUG_ON(direction == DMA_NONE);
247 if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
248 unsigned long addr;
250 addr = dma_addr_to_virt(dma_address);
251 dma_cache_wback_inv(addr, size);
254 plat_unmap_dma_mem(dma_address);
257 EXPORT_SYMBOL(dma_unmap_page);
259 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
260 enum dma_data_direction direction)
262 unsigned long addr;
263 int i;
265 BUG_ON(direction == DMA_NONE);
267 for (i = 0; i < nhwentries; i++, sg++) {
268 if (!plat_device_is_coherent(dev) &&
269 direction != DMA_TO_DEVICE) {
271 #ifdef CONFIG_HIGHMEM
272 if (PageHighMem(sg->page)) {
273 dma_sync_high(sg, direction);
275 else
276 #endif
278 addr = (unsigned long) page_address(sg->page);
279 if (addr)
280 __dma_sync(addr + sg->offset, sg->length,
281 direction);
284 plat_unmap_dma_mem(sg->dma_address);
288 EXPORT_SYMBOL(dma_unmap_sg);
290 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
291 size_t size, enum dma_data_direction direction)
293 BUG_ON(direction == DMA_NONE);
295 if (cpu_is_noncoherent_r10000(dev)) {
296 unsigned long addr;
298 addr = dma_addr_to_virt(dma_handle);
299 __dma_sync(addr, size, direction);
303 EXPORT_SYMBOL(dma_sync_single_for_cpu);
305 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
306 size_t size, enum dma_data_direction direction)
308 BUG_ON(direction == DMA_NONE);
310 if (!plat_device_is_coherent(dev)) {
311 unsigned long addr;
313 addr = dma_addr_to_virt(dma_handle);
314 __dma_sync(addr, size, direction);
318 EXPORT_SYMBOL(dma_sync_single_for_device);
320 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
321 unsigned long offset, size_t size, enum dma_data_direction direction)
323 BUG_ON(direction == DMA_NONE);
325 if (cpu_is_noncoherent_r10000(dev)) {
326 unsigned long addr;
328 addr = dma_addr_to_virt(dma_handle);
329 __dma_sync(addr + offset, size, direction);
333 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
335 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
336 unsigned long offset, size_t size, enum dma_data_direction direction)
338 BUG_ON(direction == DMA_NONE);
340 if (!plat_device_is_coherent(dev)) {
341 unsigned long addr;
343 addr = dma_addr_to_virt(dma_handle);
344 __dma_sync(addr + offset, size, direction);
348 EXPORT_SYMBOL(dma_sync_single_range_for_device);
350 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
351 enum dma_data_direction direction)
353 int i;
355 BUG_ON(direction == DMA_NONE);
357 /* Make sure that gcc doesn't leave the empty loop body. */
358 for (i = 0; i < nelems; i++, sg++) {
359 if (cpu_is_noncoherent_r10000(dev))
360 __dma_sync((unsigned long)page_address(sg->page),
361 sg->length, direction);
365 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
367 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
368 enum dma_data_direction direction)
370 int i;
372 BUG_ON(direction == DMA_NONE);
374 /* Make sure that gcc doesn't leave the empty loop body. */
375 for (i = 0; i < nelems; i++, sg++) {
376 if (!plat_device_is_coherent(dev))
377 __dma_sync((unsigned long)page_address(sg->page),
378 sg->length, direction);
382 EXPORT_SYMBOL(dma_sync_sg_for_device);
384 int dma_mapping_error(dma_addr_t dma_addr)
386 return 0;
389 EXPORT_SYMBOL(dma_mapping_error);
391 int dma_supported(struct device *dev, u64 mask)
394 * we fall back to GFP_DMA when the mask isn't all 1s,
395 * so we can't guarantee allocations that must be
396 * within a tighter range than GFP_DMA..
398 if (mask < DMA_24BIT_MASK)
399 return 0;
401 return 1;
404 EXPORT_SYMBOL(dma_supported);
406 int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
408 return plat_device_is_coherent(dev);
411 EXPORT_SYMBOL(dma_is_consistent);
413 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
414 enum dma_data_direction direction)
416 BUG_ON(direction == DMA_NONE);
418 if (!plat_device_is_coherent(dev))
419 dma_cache_wback_inv((unsigned long)vaddr, size);
422 EXPORT_SYMBOL(dma_cache_sync);