2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
14 #include <linux/module.h>
15 #include <linux/string.h>
17 #include <asm/cache.h>
20 #include <dma-coherence.h>
23 #include <linux/highmem.h>
26 static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr
)
28 unsigned long addr
= plat_dma_addr_to_phys(dma_addr
);
30 return (unsigned long)phys_to_virt(addr
);
34 * Warning on the terminology - Linux calls an uncached area coherent;
35 * MIPS terminology calls memory areas with hardware maintained coherency
39 static inline int cpu_is_noncoherent_r10000(struct device
*dev
)
41 return !plat_device_is_coherent(dev
) &&
42 (current_cpu_data
.cputype
== CPU_R10000
&&
43 current_cpu_data
.cputype
== CPU_R12000
);
46 void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
47 dma_addr_t
* dma_handle
, gfp_t gfp
)
51 /* ignore region specifiers */
52 gfp
&= ~(__GFP_DMA
| __GFP_HIGHMEM
);
54 if (dev
== NULL
|| (dev
->coherent_dma_mask
< DMA_64BIT_MASK
))
56 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
60 *dma_handle
= plat_map_dma_mem(dev
, ret
, size
);
66 EXPORT_SYMBOL(dma_alloc_noncoherent
);
68 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
69 dma_addr_t
* dma_handle
, gfp_t gfp
)
73 /* ignore region specifiers */
74 gfp
&= ~(__GFP_DMA
| __GFP_HIGHMEM
);
76 if (dev
== NULL
|| (dev
->coherent_dma_mask
< DMA_64BIT_MASK
))
78 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
82 *dma_handle
= plat_map_dma_mem(dev
, ret
, size
);
84 if (!plat_device_is_coherent(dev
)) {
85 dma_cache_wback_inv((unsigned long) ret
, size
);
86 ret
= UNCAC_ADDR(ret
);
93 EXPORT_SYMBOL(dma_alloc_coherent
);
95 void dma_free_noncoherent(struct device
*dev
, size_t size
, void *vaddr
,
96 dma_addr_t dma_handle
)
98 plat_unmap_dma_mem(dma_handle
);
99 free_pages((unsigned long) vaddr
, get_order(size
));
102 EXPORT_SYMBOL(dma_free_noncoherent
);
104 void dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
105 dma_addr_t dma_handle
)
107 unsigned long addr
= (unsigned long) vaddr
;
109 plat_unmap_dma_mem(dma_handle
);
111 if (!plat_device_is_coherent(dev
))
112 addr
= CAC_ADDR(addr
);
114 free_pages(addr
, get_order(size
));
117 EXPORT_SYMBOL(dma_free_coherent
);
119 static inline void __dma_sync(unsigned long addr
, size_t size
,
120 enum dma_data_direction direction
)
124 dma_cache_wback(addr
, size
);
127 case DMA_FROM_DEVICE
:
128 dma_cache_inv(addr
, size
);
131 case DMA_BIDIRECTIONAL
:
132 dma_cache_wback_inv(addr
, size
);
140 dma_addr_t
dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
141 enum dma_data_direction direction
)
143 unsigned long addr
= (unsigned long) ptr
;
145 if (!plat_device_is_coherent(dev
))
146 __dma_sync(addr
, size
, direction
);
148 return plat_map_dma_mem(dev
, ptr
, size
);
151 EXPORT_SYMBOL(dma_map_single
);
153 void dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
154 enum dma_data_direction direction
)
156 if (cpu_is_noncoherent_r10000(dev
))
157 __dma_sync(dma_addr_to_virt(dma_addr
), size
,
160 plat_unmap_dma_mem(dma_addr
);
163 EXPORT_SYMBOL(dma_unmap_single
);
165 #ifdef CONFIG_HIGHMEM
167 dma_sync_high(struct scatterlist
*sg
, enum dma_data_direction direction
)
170 unsigned int nr_pages
;
171 struct page
* tmp_page
;
173 unsigned int length
, length_remain
;
176 nr_pages
= (sg
->length
+ sg
->offset
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
177 length_remain
= sg
->length
;
179 for (i
= 0, offset
= sg
->offset
; i
< nr_pages
; i
++, offset
= 0) {
180 tmp_page
= nth_page(sg
->page
, i
);
182 length
= (length_remain
> (PAGE_SIZE
- offset
))?
183 (PAGE_SIZE
- offset
): length_remain
;
185 addr
= (unsigned long)kmap(tmp_page
);
186 __dma_sync(addr
+ offset
, length
, direction
);
188 length_remain
-= length
;
191 #endif /* CONFIG_HIGHMEM */
193 int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
194 enum dma_data_direction direction
)
198 BUG_ON(direction
== DMA_NONE
);
200 for (i
= 0; i
< nents
; i
++, sg
++) {
203 #ifdef CONFIG_HIGHMEM
204 if (PageHighMem(sg
->page
)) {
205 dma_sync_high(sg
, direction
);
206 sg
->dma_address
= (page_to_pfn(sg
->page
) << PAGE_SHIFT
) + sg
->offset
;
211 addr
= (unsigned long) page_address(sg
->page
);
212 if (!plat_device_is_coherent(dev
) && addr
)
213 __dma_sync(addr
+ sg
->offset
, sg
->length
, direction
);
214 sg
->dma_address
= plat_map_dma_mem(dev
,
215 (void *)(addr
+ sg
->offset
),
223 EXPORT_SYMBOL(dma_map_sg
);
225 dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
226 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
228 BUG_ON(direction
== DMA_NONE
);
230 if (!plat_device_is_coherent(dev
)) {
233 addr
= (unsigned long) page_address(page
) + offset
;
234 dma_cache_wback_inv(addr
, size
);
237 return plat_map_dma_mem_page(dev
, page
) + offset
;
240 EXPORT_SYMBOL(dma_map_page
);
242 void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
, size_t size
,
243 enum dma_data_direction direction
)
245 BUG_ON(direction
== DMA_NONE
);
247 if (!plat_device_is_coherent(dev
) && direction
!= DMA_TO_DEVICE
) {
250 addr
= dma_addr_to_virt(dma_address
);
251 dma_cache_wback_inv(addr
, size
);
254 plat_unmap_dma_mem(dma_address
);
257 EXPORT_SYMBOL(dma_unmap_page
);
259 void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nhwentries
,
260 enum dma_data_direction direction
)
265 BUG_ON(direction
== DMA_NONE
);
267 for (i
= 0; i
< nhwentries
; i
++, sg
++) {
268 if (!plat_device_is_coherent(dev
) &&
269 direction
!= DMA_TO_DEVICE
) {
271 #ifdef CONFIG_HIGHMEM
272 if (PageHighMem(sg
->page
)) {
273 dma_sync_high(sg
, direction
);
278 addr
= (unsigned long) page_address(sg
->page
);
280 __dma_sync(addr
+ sg
->offset
, sg
->length
,
284 plat_unmap_dma_mem(sg
->dma_address
);
288 EXPORT_SYMBOL(dma_unmap_sg
);
290 void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
291 size_t size
, enum dma_data_direction direction
)
293 BUG_ON(direction
== DMA_NONE
);
295 if (cpu_is_noncoherent_r10000(dev
)) {
298 addr
= dma_addr_to_virt(dma_handle
);
299 __dma_sync(addr
, size
, direction
);
303 EXPORT_SYMBOL(dma_sync_single_for_cpu
);
305 void dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
,
306 size_t size
, enum dma_data_direction direction
)
308 BUG_ON(direction
== DMA_NONE
);
310 if (!plat_device_is_coherent(dev
)) {
313 addr
= dma_addr_to_virt(dma_handle
);
314 __dma_sync(addr
, size
, direction
);
318 EXPORT_SYMBOL(dma_sync_single_for_device
);
320 void dma_sync_single_range_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
321 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
323 BUG_ON(direction
== DMA_NONE
);
325 if (cpu_is_noncoherent_r10000(dev
)) {
328 addr
= dma_addr_to_virt(dma_handle
);
329 __dma_sync(addr
+ offset
, size
, direction
);
333 EXPORT_SYMBOL(dma_sync_single_range_for_cpu
);
335 void dma_sync_single_range_for_device(struct device
*dev
, dma_addr_t dma_handle
,
336 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
338 BUG_ON(direction
== DMA_NONE
);
340 if (!plat_device_is_coherent(dev
)) {
343 addr
= dma_addr_to_virt(dma_handle
);
344 __dma_sync(addr
+ offset
, size
, direction
);
348 EXPORT_SYMBOL(dma_sync_single_range_for_device
);
350 void dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
351 enum dma_data_direction direction
)
355 BUG_ON(direction
== DMA_NONE
);
357 /* Make sure that gcc doesn't leave the empty loop body. */
358 for (i
= 0; i
< nelems
; i
++, sg
++) {
359 if (cpu_is_noncoherent_r10000(dev
))
360 __dma_sync((unsigned long)page_address(sg
->page
),
361 sg
->length
, direction
);
365 EXPORT_SYMBOL(dma_sync_sg_for_cpu
);
367 void dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
368 enum dma_data_direction direction
)
372 BUG_ON(direction
== DMA_NONE
);
374 /* Make sure that gcc doesn't leave the empty loop body. */
375 for (i
= 0; i
< nelems
; i
++, sg
++) {
376 if (!plat_device_is_coherent(dev
))
377 __dma_sync((unsigned long)page_address(sg
->page
),
378 sg
->length
, direction
);
382 EXPORT_SYMBOL(dma_sync_sg_for_device
);
384 int dma_mapping_error(dma_addr_t dma_addr
)
389 EXPORT_SYMBOL(dma_mapping_error
);
391 int dma_supported(struct device
*dev
, u64 mask
)
394 * we fall back to GFP_DMA when the mask isn't all 1s,
395 * so we can't guarantee allocations that must be
396 * within a tighter range than GFP_DMA..
398 if (mask
< DMA_24BIT_MASK
)
404 EXPORT_SYMBOL(dma_supported
);
406 int dma_is_consistent(struct device
*dev
, dma_addr_t dma_addr
)
408 return plat_device_is_coherent(dev
);
411 EXPORT_SYMBOL(dma_is_consistent
);
413 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
414 enum dma_data_direction direction
)
416 BUG_ON(direction
== DMA_NONE
);
418 if (!plat_device_is_coherent(dev
))
419 dma_cache_wback_inv((unsigned long)vaddr
, size
);
422 EXPORT_SYMBOL(dma_cache_sync
);