2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/string.h>
17 #include <linux/gfp.h>
19 #include <asm/cache.h>
22 #include <dma-coherence.h>
24 static inline unsigned long dma_addr_to_virt(struct device
*dev
,
27 unsigned long addr
= plat_dma_addr_to_phys(dev
, dma_addr
);
29 return (unsigned long)phys_to_virt(addr
);
33 * Warning on the terminology - Linux calls an uncached area coherent;
34 * MIPS terminology calls memory areas with hardware maintained coherency
38 static inline int cpu_is_noncoherent_r10000(struct device
*dev
)
40 return !plat_device_is_coherent(dev
) &&
41 (current_cpu_type() == CPU_R10000
||
42 current_cpu_type() == CPU_R12000
);
45 static gfp_t
massage_gfp_flags(const struct device
*dev
, gfp_t gfp
)
47 /* ignore region specifiers */
48 gfp
&= ~(__GFP_DMA
| __GFP_DMA32
| __GFP_HIGHMEM
);
50 #ifdef CONFIG_ZONE_DMA
53 else if (dev
->coherent_dma_mask
< DMA_BIT_MASK(24))
57 #ifdef CONFIG_ZONE_DMA32
58 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(32))
64 /* Don't invoke OOM killer */
70 void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
71 dma_addr_t
* dma_handle
, gfp_t gfp
)
75 gfp
= massage_gfp_flags(dev
, gfp
);
77 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
81 *dma_handle
= plat_map_dma_mem(dev
, ret
, size
);
87 EXPORT_SYMBOL(dma_alloc_noncoherent
);
89 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
90 dma_addr_t
* dma_handle
, gfp_t gfp
)
94 if (dma_alloc_from_coherent(dev
, size
, dma_handle
, &ret
))
97 gfp
= massage_gfp_flags(dev
, gfp
);
99 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
102 memset(ret
, 0, size
);
103 *dma_handle
= plat_map_dma_mem(dev
, ret
, size
);
105 if (!plat_device_is_coherent(dev
)) {
106 dma_cache_wback_inv((unsigned long) ret
, size
);
107 ret
= UNCAC_ADDR(ret
);
114 EXPORT_SYMBOL(dma_alloc_coherent
);
116 void dma_free_noncoherent(struct device
*dev
, size_t size
, void *vaddr
,
117 dma_addr_t dma_handle
)
119 plat_unmap_dma_mem(dev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
120 free_pages((unsigned long) vaddr
, get_order(size
));
123 EXPORT_SYMBOL(dma_free_noncoherent
);
125 void dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
126 dma_addr_t dma_handle
)
128 unsigned long addr
= (unsigned long) vaddr
;
129 int order
= get_order(size
);
131 if (dma_release_from_coherent(dev
, order
, vaddr
))
134 plat_unmap_dma_mem(dev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
136 if (!plat_device_is_coherent(dev
))
137 addr
= CAC_ADDR(addr
);
139 free_pages(addr
, get_order(size
));
142 EXPORT_SYMBOL(dma_free_coherent
);
144 static inline void __dma_sync(unsigned long addr
, size_t size
,
145 enum dma_data_direction direction
)
149 dma_cache_wback(addr
, size
);
152 case DMA_FROM_DEVICE
:
153 dma_cache_inv(addr
, size
);
156 case DMA_BIDIRECTIONAL
:
157 dma_cache_wback_inv(addr
, size
);
165 dma_addr_t
dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
166 enum dma_data_direction direction
)
168 unsigned long addr
= (unsigned long) ptr
;
170 if (!plat_device_is_coherent(dev
))
171 __dma_sync(addr
, size
, direction
);
173 return plat_map_dma_mem(dev
, ptr
, size
);
176 EXPORT_SYMBOL(dma_map_single
);
178 void dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
179 enum dma_data_direction direction
)
181 if (cpu_is_noncoherent_r10000(dev
))
182 __dma_sync(dma_addr_to_virt(dev
, dma_addr
), size
,
185 plat_unmap_dma_mem(dev
, dma_addr
, size
, direction
);
188 EXPORT_SYMBOL(dma_unmap_single
);
190 int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
191 enum dma_data_direction direction
)
195 BUG_ON(direction
== DMA_NONE
);
197 for (i
= 0; i
< nents
; i
++, sg
++) {
200 addr
= (unsigned long) sg_virt(sg
);
201 if (!plat_device_is_coherent(dev
) && addr
)
202 __dma_sync(addr
, sg
->length
, direction
);
203 sg
->dma_address
= plat_map_dma_mem(dev
,
204 (void *)addr
, sg
->length
);
210 EXPORT_SYMBOL(dma_map_sg
);
212 dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
213 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
215 BUG_ON(direction
== DMA_NONE
);
217 if (!plat_device_is_coherent(dev
)) {
220 addr
= (unsigned long) page_address(page
) + offset
;
221 __dma_sync(addr
, size
, direction
);
224 return plat_map_dma_mem_page(dev
, page
) + offset
;
227 EXPORT_SYMBOL(dma_map_page
);
229 void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nhwentries
,
230 enum dma_data_direction direction
)
235 BUG_ON(direction
== DMA_NONE
);
237 for (i
= 0; i
< nhwentries
; i
++, sg
++) {
238 if (!plat_device_is_coherent(dev
) &&
239 direction
!= DMA_TO_DEVICE
) {
240 addr
= (unsigned long) sg_virt(sg
);
242 __dma_sync(addr
, sg
->length
, direction
);
244 plat_unmap_dma_mem(dev
, sg
->dma_address
, sg
->length
, direction
);
248 EXPORT_SYMBOL(dma_unmap_sg
);
250 void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
251 size_t size
, enum dma_data_direction direction
)
253 BUG_ON(direction
== DMA_NONE
);
255 if (cpu_is_noncoherent_r10000(dev
)) {
258 addr
= dma_addr_to_virt(dev
, dma_handle
);
259 __dma_sync(addr
, size
, direction
);
263 EXPORT_SYMBOL(dma_sync_single_for_cpu
);
265 void dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
,
266 size_t size
, enum dma_data_direction direction
)
268 BUG_ON(direction
== DMA_NONE
);
270 plat_extra_sync_for_device(dev
);
271 if (!plat_device_is_coherent(dev
)) {
274 addr
= dma_addr_to_virt(dev
, dma_handle
);
275 __dma_sync(addr
, size
, direction
);
279 EXPORT_SYMBOL(dma_sync_single_for_device
);
281 void dma_sync_single_range_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
282 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
284 BUG_ON(direction
== DMA_NONE
);
286 if (cpu_is_noncoherent_r10000(dev
)) {
289 addr
= dma_addr_to_virt(dev
, dma_handle
);
290 __dma_sync(addr
+ offset
, size
, direction
);
294 EXPORT_SYMBOL(dma_sync_single_range_for_cpu
);
296 void dma_sync_single_range_for_device(struct device
*dev
, dma_addr_t dma_handle
,
297 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
299 BUG_ON(direction
== DMA_NONE
);
301 plat_extra_sync_for_device(dev
);
302 if (!plat_device_is_coherent(dev
)) {
305 addr
= dma_addr_to_virt(dev
, dma_handle
);
306 __dma_sync(addr
+ offset
, size
, direction
);
310 EXPORT_SYMBOL(dma_sync_single_range_for_device
);
312 void dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
313 enum dma_data_direction direction
)
317 BUG_ON(direction
== DMA_NONE
);
319 /* Make sure that gcc doesn't leave the empty loop body. */
320 for (i
= 0; i
< nelems
; i
++, sg
++) {
321 if (cpu_is_noncoherent_r10000(dev
))
322 __dma_sync((unsigned long)page_address(sg_page(sg
)),
323 sg
->length
, direction
);
327 EXPORT_SYMBOL(dma_sync_sg_for_cpu
);
329 void dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
330 enum dma_data_direction direction
)
334 BUG_ON(direction
== DMA_NONE
);
336 /* Make sure that gcc doesn't leave the empty loop body. */
337 for (i
= 0; i
< nelems
; i
++, sg
++) {
338 if (!plat_device_is_coherent(dev
))
339 __dma_sync((unsigned long)page_address(sg_page(sg
)),
340 sg
->length
, direction
);
344 EXPORT_SYMBOL(dma_sync_sg_for_device
);
346 int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
348 return plat_dma_mapping_error(dev
, dma_addr
);
351 EXPORT_SYMBOL(dma_mapping_error
);
353 int dma_supported(struct device
*dev
, u64 mask
)
355 return plat_dma_supported(dev
, mask
);
358 EXPORT_SYMBOL(dma_supported
);
360 int dma_is_consistent(struct device
*dev
, dma_addr_t dma_addr
)
362 return plat_device_is_coherent(dev
);
365 EXPORT_SYMBOL(dma_is_consistent
);
367 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
368 enum dma_data_direction direction
)
370 BUG_ON(direction
== DMA_NONE
);
372 plat_extra_sync_for_device(dev
);
373 if (!plat_device_is_coherent(dev
))
374 __dma_sync((unsigned long)vaddr
, size
, direction
);
377 EXPORT_SYMBOL(dma_cache_sync
);