2 * arch/sh/mm/consistent.c
4 * Copyright (C) 2004 - 2007 Paul Mundt
6 * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dma-debug.h>
18 #include <asm/cacheflush.h>
19 #include <asm/addrspace.h>
21 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
23 static int __init
dma_init(void)
25 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
28 fs_initcall(dma_init
);
30 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
31 dma_addr_t
*dma_handle
, gfp_t gfp
)
33 void *ret
, *ret_nocache
;
34 int order
= get_order(size
);
36 if (dma_alloc_from_coherent(dev
, size
, dma_handle
, &ret
))
39 ret
= (void *)__get_free_pages(gfp
, order
);
45 * Pages from the page allocator may have data present in
46 * cache. So flush the cache before using uncached memory.
48 dma_cache_sync(dev
, ret
, size
, DMA_BIDIRECTIONAL
);
50 ret_nocache
= (void __force
*)ioremap_nocache(virt_to_phys(ret
), size
);
52 free_pages((unsigned long)ret
, order
);
56 split_page(pfn_to_page(virt_to_phys(ret
) >> PAGE_SHIFT
), order
);
58 *dma_handle
= virt_to_phys(ret
);
60 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, ret_nocache
);
64 EXPORT_SYMBOL(dma_alloc_coherent
);
66 void dma_free_coherent(struct device
*dev
, size_t size
,
67 void *vaddr
, dma_addr_t dma_handle
)
69 int order
= get_order(size
);
70 unsigned long pfn
= dma_handle
>> PAGE_SHIFT
;
73 WARN_ON(irqs_disabled()); /* for portability */
75 if (dma_release_from_coherent(dev
, order
, vaddr
))
78 debug_dma_free_coherent(dev
, size
, vaddr
, dma_handle
);
79 for (k
= 0; k
< (1 << order
); k
++)
80 __free_pages(pfn_to_page(pfn
+ k
), 0);
83 EXPORT_SYMBOL(dma_free_coherent
);
85 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
86 enum dma_data_direction direction
)
91 void *p1addr
= (void*) P1SEGADDR((unsigned long)vaddr
);
95 case DMA_FROM_DEVICE
: /* invalidate only */
96 __flush_invalidate_region(p1addr
, size
);
98 case DMA_TO_DEVICE
: /* writeback only */
99 __flush_wback_region(p1addr
, size
);
101 case DMA_BIDIRECTIONAL
: /* writeback and invalidate */
102 __flush_purge_region(p1addr
, size
);
108 EXPORT_SYMBOL(dma_cache_sync
);
110 static int __init
memchunk_setup(char *str
)
112 return 1; /* accept anything that begins with "memchunk." */
114 __setup("memchunk.", memchunk_setup
);
116 static void __init
memchunk_cmdline_override(char *name
, unsigned long *sizep
)
118 char *p
= boot_command_line
;
119 int k
= strlen(name
);
121 while ((p
= strstr(p
, "memchunk."))) {
122 p
+= 9; /* strlen("memchunk.") */
123 if (!strncmp(name
, p
, k
) && p
[k
] == '=') {
125 *sizep
= memparse(p
, NULL
);
126 pr_info("%s: forcing memory chunk size to 0x%08lx\n",
133 int __init
platform_resource_setup_memory(struct platform_device
*pdev
,
134 char *name
, unsigned long memsize
)
137 dma_addr_t dma_handle
;
140 r
= pdev
->resource
+ pdev
->num_resources
- 1;
142 pr_warning("%s: unable to find empty space for resource\n",
147 memchunk_cmdline_override(name
, &memsize
);
151 buf
= dma_alloc_coherent(NULL
, memsize
, &dma_handle
, GFP_KERNEL
);
153 pr_warning("%s: unable to allocate memory\n", name
);
157 memset(buf
, 0, memsize
);
159 r
->flags
= IORESOURCE_MEM
;
160 r
->start
= dma_handle
;
161 r
->end
= r
->start
+ memsize
- 1;