MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / arch / mips / mm / dma-noncoherent.c
blob9895e32b0fceca7d37899a096f62cd18d78f6c42
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
10 #include <linux/config.h>
11 #include <linux/types.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/string.h>
15 #include <linux/dma-mapping.h>
17 #include <asm/cache.h>
18 #include <asm/io.h>
21 * Warning on the terminology - Linux calls an uncached area coherent;
22 * MIPS terminology calls memory areas with hardware maintained coherency
23 * coherent.
26 void *dma_alloc_noncoherent(struct device *dev, size_t size,
27 dma_addr_t * dma_handle, int gfp)
29 void *ret;
30 /* ignore region specifiers */
31 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
33 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
34 gfp |= GFP_DMA;
35 ret = (void *) __get_free_pages(gfp, get_order(size));
37 if (ret != NULL) {
38 memset(ret, 0, size);
39 *dma_handle = virt_to_phys(ret);
42 return ret;
45 EXPORT_SYMBOL(dma_alloc_noncoherent);
47 void *dma_alloc_coherent(struct device *dev, size_t size,
48 dma_addr_t * dma_handle, int gfp)
50 void *ret;
52 ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
53 if (ret) {
54 dma_cache_wback_inv((unsigned long) ret, size);
55 ret = UNCAC_ADDR(ret);
58 return ret;
61 EXPORT_SYMBOL(dma_alloc_coherent);
63 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
64 dma_addr_t dma_handle)
66 free_pages((unsigned long) vaddr, get_order(size));
69 EXPORT_SYMBOL(dma_free_noncoherent);
71 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
72 dma_addr_t dma_handle)
74 unsigned long addr = (unsigned long) vaddr;
76 addr = CAC_ADDR(addr);
77 free_pages(addr, get_order(size));
80 EXPORT_SYMBOL(dma_free_coherent);
82 static inline void __dma_sync(unsigned long addr, size_t size,
83 enum dma_data_direction direction)
85 switch (direction) {
86 case DMA_TO_DEVICE:
87 dma_cache_wback(addr, size);
88 break;
90 case DMA_FROM_DEVICE:
91 dma_cache_inv(addr, size);
92 break;
94 case DMA_BIDIRECTIONAL:
95 dma_cache_wback_inv(addr, size);
96 break;
98 default:
99 BUG();
103 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
104 enum dma_data_direction direction)
106 unsigned long addr = (unsigned long) ptr;
108 switch (direction) {
109 case DMA_TO_DEVICE:
110 dma_cache_wback(addr, size);
111 break;
113 case DMA_FROM_DEVICE:
114 dma_cache_inv(addr, size);
115 break;
117 case DMA_BIDIRECTIONAL:
118 dma_cache_wback_inv(addr, size);
119 break;
121 default:
122 BUG();
125 return virt_to_phys(ptr);
128 EXPORT_SYMBOL(dma_map_single);
130 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
131 enum dma_data_direction direction)
133 unsigned long addr;
134 addr = dma_addr + PAGE_OFFSET;
136 switch (direction) {
137 case DMA_TO_DEVICE:
138 //dma_cache_wback(addr, size);
139 break;
141 case DMA_FROM_DEVICE:
142 //dma_cache_inv(addr, size);
143 break;
145 case DMA_BIDIRECTIONAL:
146 //dma_cache_wback_inv(addr, size);
147 break;
149 default:
150 BUG();
154 EXPORT_SYMBOL(dma_unmap_single);
156 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
157 enum dma_data_direction direction)
159 int i;
161 BUG_ON(direction == DMA_NONE);
163 for (i = 0; i < nents; i++, sg++) {
164 unsigned long addr;
166 addr = (unsigned long) page_address(sg->page);
167 if (addr)
168 __dma_sync(addr + sg->offset, sg->length, direction);
169 sg->dma_address = (dma_addr_t)
170 (page_to_phys(sg->page) + sg->offset);
173 return nents;
176 EXPORT_SYMBOL(dma_map_sg);
178 dma_addr_t dma_map_page(struct device *dev, struct page *page,
179 unsigned long offset, size_t size, enum dma_data_direction direction)
181 unsigned long addr;
183 BUG_ON(direction == DMA_NONE);
185 addr = (unsigned long) page_address(page) + offset;
186 dma_cache_wback_inv(addr, size);
188 return page_to_phys(page) + offset;
191 EXPORT_SYMBOL(dma_map_page);
193 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
194 enum dma_data_direction direction)
196 BUG_ON(direction == DMA_NONE);
198 if (direction != DMA_TO_DEVICE) {
199 unsigned long addr;
201 addr = dma_address + PAGE_OFFSET;
202 dma_cache_wback_inv(addr, size);
206 EXPORT_SYMBOL(dma_unmap_page);
208 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
209 enum dma_data_direction direction)
211 unsigned long addr;
212 int i;
214 BUG_ON(direction == DMA_NONE);
216 if (direction == DMA_TO_DEVICE)
217 return;
219 for (i = 0; i < nhwentries; i++, sg++) {
220 addr = (unsigned long) page_address(sg->page);
221 if (!addr)
222 continue;
223 dma_cache_wback_inv(addr + sg->offset, sg->length);
227 EXPORT_SYMBOL(dma_unmap_sg);
229 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
230 size_t size, enum dma_data_direction direction)
232 unsigned long addr;
234 BUG_ON(direction == DMA_NONE);
236 addr = dma_handle + PAGE_OFFSET;
237 __dma_sync(addr, size, direction);
240 EXPORT_SYMBOL(dma_sync_single_for_cpu);
242 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
243 size_t size, enum dma_data_direction direction)
245 unsigned long addr;
247 BUG_ON(direction == DMA_NONE);
249 addr = dma_handle + PAGE_OFFSET;
250 __dma_sync(addr, size, direction);
253 EXPORT_SYMBOL(dma_sync_single_for_device);
255 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
256 unsigned long offset, size_t size, enum dma_data_direction direction)
258 unsigned long addr;
260 BUG_ON(direction == DMA_NONE);
262 addr = dma_handle + offset + PAGE_OFFSET;
263 __dma_sync(addr, size, direction);
266 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
268 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
269 unsigned long offset, size_t size, enum dma_data_direction direction)
271 unsigned long addr;
273 BUG_ON(direction == DMA_NONE);
275 addr = dma_handle + offset + PAGE_OFFSET;
276 __dma_sync(addr, size, direction);
279 EXPORT_SYMBOL(dma_sync_single_range_for_device);
281 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
282 enum dma_data_direction direction)
284 int i;
286 BUG_ON(direction == DMA_NONE);
288 /* Make sure that gcc doesn't leave the empty loop body. */
289 for (i = 0; i < nelems; i++, sg++)
290 __dma_sync((unsigned long)page_address(sg->page),
291 sg->length, direction);
294 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
296 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
297 enum dma_data_direction direction)
299 int i;
301 BUG_ON(direction == DMA_NONE);
303 /* Make sure that gcc doesn't leave the empty loop body. */
304 for (i = 0; i < nelems; i++, sg++)
305 __dma_sync((unsigned long)page_address(sg->page),
306 sg->length, direction);
309 EXPORT_SYMBOL(dma_sync_sg_for_device);
311 int dma_mapping_error(dma_addr_t dma_addr)
313 return 0;
316 EXPORT_SYMBOL(dma_mapping_error);
318 int dma_supported(struct device *dev, u64 mask)
321 * we fall back to GFP_DMA when the mask isn't all 1s,
322 * so we can't guarantee allocations that must be
323 * within a tighter range than GFP_DMA..
325 if (mask < 0x00ffffff)
326 return 0;
328 return 1;
331 EXPORT_SYMBOL(dma_supported);
333 int dma_is_consistent(dma_addr_t dma_addr)
335 return 1;
338 EXPORT_SYMBOL(dma_is_consistent);
340 void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
342 if (direction == DMA_NONE)
343 return;
345 dma_cache_wback_inv((unsigned long)vaddr, size);
348 EXPORT_SYMBOL(dma_cache_sync);
350 /* The DAC routines are a PCIism.. */
352 #ifdef CONFIG_PCI
354 #include <linux/pci.h>
356 dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
357 struct page *page, unsigned long offset, int direction)
359 return (dma64_addr_t)page_to_phys(page) + offset;
362 EXPORT_SYMBOL(pci_dac_page_to_dma);
364 struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
365 dma64_addr_t dma_addr)
367 return mem_map + (dma_addr >> PAGE_SHIFT);
370 EXPORT_SYMBOL(pci_dac_dma_to_page);
372 unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
373 dma64_addr_t dma_addr)
375 return dma_addr & ~PAGE_MASK;
378 EXPORT_SYMBOL(pci_dac_dma_to_offset);
380 void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev,
381 dma64_addr_t dma_addr, size_t len, int direction)
383 BUG_ON(direction == PCI_DMA_NONE);
385 dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
388 EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu);
390 void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev,
391 dma64_addr_t dma_addr, size_t len, int direction)
393 BUG_ON(direction == PCI_DMA_NONE);
395 dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
398 EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device);
400 #endif /* CONFIG_PCI */