2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
10 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/string.h>
14 #include <linux/dma-mapping.h>
16 #include <asm/cache.h>
17 #include <asm/cacheflush.h>
20 #define UNCAC_ADDR(addr) ((void *)((unsigned long)(addr) | 0x80000000))
21 #define CAC_ADDR(addr) ((void *)((unsigned long)(addr) & ~0x80000000))
24 * Warning on the terminology - Linux calls an uncached area coherent;
25 * MIPS terminology calls memory areas with hardware maintained coherency
29 void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
30 dma_addr_t
* dma_handle
, gfp_t gfp
)
33 /* ignore region specifiers */
34 gfp
&= ~(__GFP_DMA
| __GFP_HIGHMEM
);
36 if (dev
== NULL
|| (dev
->coherent_dma_mask
< 0xffffffff))
38 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
42 *dma_handle
= virt_to_phys(ret
);
48 EXPORT_SYMBOL(dma_alloc_noncoherent
);
50 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
51 dma_addr_t
* dma_handle
, gfp_t gfp
)
55 ret
= dma_alloc_noncoherent(dev
, size
, dma_handle
, gfp
);
57 dma_cache_wback_inv((unsigned long) ret
, size
);
58 ret
= UNCAC_ADDR(ret
);
64 EXPORT_SYMBOL(dma_alloc_coherent
);
66 void dma_free_noncoherent(struct device
*dev
, size_t size
, void *vaddr
,
67 dma_addr_t dma_handle
)
69 free_pages((unsigned long) vaddr
, get_order(size
));
72 EXPORT_SYMBOL(dma_free_noncoherent
);
74 void dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
75 dma_addr_t dma_handle
)
77 unsigned long addr
= (unsigned long) vaddr
;
79 addr
= (unsigned long) CAC_ADDR(addr
);
80 free_pages(addr
, get_order(size
));
83 EXPORT_SYMBOL(dma_free_coherent
);
85 static inline void __dma_sync(unsigned long addr
, size_t size
,
86 enum dma_data_direction direction
)
90 dma_cache_wback(addr
, size
);
94 dma_cache_inv(addr
, size
);
97 case DMA_BIDIRECTIONAL
:
98 dma_cache_wback_inv(addr
, size
);
106 dma_addr_t
dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
107 enum dma_data_direction direction
)
109 unsigned long addr
= (unsigned long) ptr
;
111 __dma_sync(addr
, size
, direction
);
113 return virt_to_phys(ptr
);
116 EXPORT_SYMBOL(dma_map_single
);
118 void dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
119 enum dma_data_direction direction
)
122 addr
= dma_addr
+ PAGE_OFFSET
;
124 //__dma_sync(addr, size, direction);
127 EXPORT_SYMBOL(dma_unmap_single
);
129 int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
130 enum dma_data_direction direction
)
134 BUG_ON(direction
== DMA_NONE
);
136 for (i
= 0; i
< nents
; i
++, sg
++) {
139 addr
= (unsigned long) page_address(sg
->page
);
141 __dma_sync(addr
+ sg
->offset
, sg
->length
, direction
);
142 sg
->dma_address
= (dma_addr_t
)page_to_phys(sg
->page
)
150 EXPORT_SYMBOL(dma_map_sg
);
152 dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
153 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
157 BUG_ON(direction
== DMA_NONE
);
159 addr
= (unsigned long) page_address(page
) + offset
;
160 dma_cache_wback_inv(addr
, size
);
162 return page_to_phys(page
) + offset
;
165 EXPORT_SYMBOL(dma_map_page
);
167 void dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
, size_t size
,
168 enum dma_data_direction direction
)
170 BUG_ON(direction
== DMA_NONE
);
172 if (direction
!= DMA_TO_DEVICE
) {
175 addr
= dma_address
+ PAGE_OFFSET
;
176 dma_cache_wback_inv(addr
, size
);
180 EXPORT_SYMBOL(dma_unmap_page
);
182 void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nhwentries
,
183 enum dma_data_direction direction
)
188 BUG_ON(direction
== DMA_NONE
);
190 if (direction
== DMA_TO_DEVICE
)
193 for (i
= 0; i
< nhwentries
; i
++, sg
++) {
194 addr
= (unsigned long) page_address(sg
->page
);
196 __dma_sync(addr
+ sg
->offset
, sg
->length
, direction
);
200 EXPORT_SYMBOL(dma_unmap_sg
);
202 void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
203 size_t size
, enum dma_data_direction direction
)
207 BUG_ON(direction
== DMA_NONE
);
209 addr
= dma_handle
+ PAGE_OFFSET
;
210 __dma_sync(addr
, size
, direction
);
213 EXPORT_SYMBOL(dma_sync_single_for_cpu
);
215 void dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
,
216 size_t size
, enum dma_data_direction direction
)
220 BUG_ON(direction
== DMA_NONE
);
222 addr
= dma_handle
+ PAGE_OFFSET
;
223 __dma_sync(addr
, size
, direction
);
226 EXPORT_SYMBOL(dma_sync_single_for_device
);
228 void dma_sync_single_range_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
229 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
233 BUG_ON(direction
== DMA_NONE
);
235 addr
= dma_handle
+ offset
+ PAGE_OFFSET
;
236 __dma_sync(addr
, size
, direction
);
239 EXPORT_SYMBOL(dma_sync_single_range_for_cpu
);
241 void dma_sync_single_range_for_device(struct device
*dev
, dma_addr_t dma_handle
,
242 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
246 BUG_ON(direction
== DMA_NONE
);
248 addr
= dma_handle
+ offset
+ PAGE_OFFSET
;
249 __dma_sync(addr
, size
, direction
);
252 EXPORT_SYMBOL(dma_sync_single_range_for_device
);
254 void dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
255 enum dma_data_direction direction
)
259 BUG_ON(direction
== DMA_NONE
);
261 /* Make sure that gcc doesn't leave the empty loop body. */
262 for (i
= 0; i
< nelems
; i
++, sg
++)
263 __dma_sync((unsigned long)page_address(sg
->page
),
264 sg
->length
, direction
);
267 EXPORT_SYMBOL(dma_sync_sg_for_cpu
);
269 void dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
270 enum dma_data_direction direction
)
274 BUG_ON(direction
== DMA_NONE
);
276 /* Make sure that gcc doesn't leave the empty loop body. */
277 for (i
= 0; i
< nelems
; i
++, sg
++)
278 __dma_sync((unsigned long)page_address(sg
->page
),
279 sg
->length
, direction
);
282 EXPORT_SYMBOL(dma_sync_sg_for_device
);
284 int dma_mapping_error(dma_addr_t dma_addr
)
289 EXPORT_SYMBOL(dma_mapping_error
);
291 int dma_supported(struct device
*dev
, u64 mask
)
294 * we fall back to GFP_DMA when the mask isn't all 1s,
295 * so we can't guarantee allocations that must be
296 * within a tighter range than GFP_DMA..
298 if (mask
< 0x00ffffff)
304 EXPORT_SYMBOL(dma_supported
);
306 int dma_is_consistent(dma_addr_t dma_addr
)
311 EXPORT_SYMBOL(dma_is_consistent
);
313 void dma_cache_sync(void *vaddr
, size_t size
, enum dma_data_direction direction
)
315 if (direction
== DMA_NONE
)
318 dma_cache_wback_inv((unsigned long)vaddr
, size
);
321 EXPORT_SYMBOL(dma_cache_sync
);
323 /* The DAC routines are a PCIism.. */
327 #include <linux/pci.h>
329 dma64_addr_t
pci_dac_page_to_dma(struct pci_dev
*pdev
,
330 struct page
*page
, unsigned long offset
, int direction
)
332 return (dma64_addr_t
)page_to_phys(page
) + offset
;
335 EXPORT_SYMBOL(pci_dac_page_to_dma
);
337 struct page
*pci_dac_dma_to_page(struct pci_dev
*pdev
,
338 dma64_addr_t dma_addr
)
340 return mem_map
+ (dma_addr
>> PAGE_SHIFT
);
343 EXPORT_SYMBOL(pci_dac_dma_to_page
);
345 unsigned long pci_dac_dma_to_offset(struct pci_dev
*pdev
,
346 dma64_addr_t dma_addr
)
348 return dma_addr
& ~PAGE_MASK
;
351 EXPORT_SYMBOL(pci_dac_dma_to_offset
);
353 void pci_dac_dma_sync_single_for_cpu(struct pci_dev
*pdev
,
354 dma64_addr_t dma_addr
, size_t len
, int direction
)
356 BUG_ON(direction
== PCI_DMA_NONE
);
358 dma_cache_wback_inv(dma_addr
+ PAGE_OFFSET
, len
);
361 EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu
);
363 void pci_dac_dma_sync_single_for_device(struct pci_dev
*pdev
,
364 dma64_addr_t dma_addr
, size_t len
, int direction
)
366 BUG_ON(direction
== PCI_DMA_NONE
);
368 dma_cache_wback_inv(dma_addr
+ PAGE_OFFSET
, len
);
371 EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device
);
373 #endif /* CONFIG_PCI */