2 * Copyright (C) 2004-2006 Atmel Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/dma-mapping.h>
10 #include <linux/gfp.h>
12 #include <asm/addrspace.h>
13 #include <asm/cacheflush.h>
15 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
, int direction
)
18 * No need to sync an uncached area
20 if (PXSEG(vaddr
) == P2SEG
)
24 case DMA_FROM_DEVICE
: /* invalidate only */
25 invalidate_dcache_region(vaddr
, size
);
27 case DMA_TO_DEVICE
: /* writeback only */
28 clean_dcache_region(vaddr
, size
);
30 case DMA_BIDIRECTIONAL
: /* writeback and invalidate */
31 flush_dcache_region(vaddr
, size
);
37 EXPORT_SYMBOL(dma_cache_sync
);
39 static struct page
*__dma_alloc(struct device
*dev
, size_t size
,
40 dma_addr_t
*handle
, gfp_t gfp
)
42 struct page
*page
, *free
, *end
;
45 /* Following is a work-around (a.k.a. hack) to prevent pages
46 * with __GFP_COMP being passed to split_page() which cannot
47 * handle them. The real problem is that this flag probably
48 * should be 0 on AVR32 as it is not supported on this
49 * platform--see CONFIG_HUGETLB_PAGE. */
52 size
= PAGE_ALIGN(size
);
53 order
= get_order(size
);
55 page
= alloc_pages(gfp
, order
);
58 split_page(page
, order
);
61 * When accessing physical memory with valid cache data, we
62 * get a cache hit even if the virtual memory region is marked
65 * Since the memory is newly allocated, there is no point in
66 * doing a writeback. If the previous owner cares, he should
67 * have flushed the cache before releasing the memory.
69 invalidate_dcache_region(phys_to_virt(page_to_phys(page
)), size
);
71 *handle
= page_to_bus(page
);
72 free
= page
+ (size
>> PAGE_SHIFT
);
73 end
= page
+ (1 << order
);
76 * Free any unused pages
86 static void __dma_free(struct device
*dev
, size_t size
,
87 struct page
*page
, dma_addr_t handle
)
89 struct page
*end
= page
+ (PAGE_ALIGN(size
) >> PAGE_SHIFT
);
95 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
96 dma_addr_t
*handle
, gfp_t gfp
)
101 page
= __dma_alloc(dev
, size
, handle
, gfp
);
103 ret
= phys_to_uncached(page_to_phys(page
));
107 EXPORT_SYMBOL(dma_alloc_coherent
);
109 void dma_free_coherent(struct device
*dev
, size_t size
,
110 void *cpu_addr
, dma_addr_t handle
)
112 void *addr
= phys_to_cached(uncached_to_phys(cpu_addr
));
115 pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
116 cpu_addr
, (unsigned long)handle
, (unsigned)size
);
117 BUG_ON(!virt_addr_valid(addr
));
118 page
= virt_to_page(addr
);
119 __dma_free(dev
, size
, page
, handle
);
121 EXPORT_SYMBOL(dma_free_coherent
);
123 void *dma_alloc_writecombine(struct device
*dev
, size_t size
,
124 dma_addr_t
*handle
, gfp_t gfp
)
129 page
= __dma_alloc(dev
, size
, handle
, gfp
);
133 phys
= page_to_phys(page
);
136 /* Now, map the page into P3 with write-combining turned on */
137 return __ioremap(phys
, size
, _PAGE_BUFFER
);
139 EXPORT_SYMBOL(dma_alloc_writecombine
);
141 void dma_free_writecombine(struct device
*dev
, size_t size
,
142 void *cpu_addr
, dma_addr_t handle
)
148 page
= phys_to_page(handle
);
149 __dma_free(dev
, size
, page
, handle
);
151 EXPORT_SYMBOL(dma_free_writecombine
);