ASoC: davinci: replace private sram api with genalloc
[linux-2.6.git] / drivers / base / dma-contiguous.c
blob9a1469474f55addaea1ae5be1f4499187297db06
1 /*
2 * Contiguous Memory Allocator for DMA mapping framework
3 * Copyright (c) 2010-2011 by Samsung Electronics.
4 * Written by:
5 * Marek Szyprowski <m.szyprowski@samsung.com>
6 * Michal Nazarewicz <mina86@mina86.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
14 #define pr_fmt(fmt) "cma: " fmt
16 #ifdef CONFIG_CMA_DEBUG
17 #ifndef DEBUG
18 # define DEBUG
19 #endif
20 #endif
22 #include <asm/page.h>
23 #include <asm/dma-contiguous.h>
25 #include <linux/memblock.h>
26 #include <linux/err.h>
27 #include <linux/mm.h>
28 #include <linux/mutex.h>
29 #include <linux/page-isolation.h>
30 #include <linux/slab.h>
31 #include <linux/swap.h>
32 #include <linux/mm_types.h>
33 #include <linux/dma-contiguous.h>
35 #ifndef SZ_1M
36 #define SZ_1M (1 << 20)
37 #endif
39 struct cma {
40 unsigned long base_pfn;
41 unsigned long count;
42 unsigned long *bitmap;
45 struct cma *dma_contiguous_default_area;
47 #ifdef CONFIG_CMA_SIZE_MBYTES
48 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
49 #else
50 #define CMA_SIZE_MBYTES 0
51 #endif
54 * Default global CMA area size can be defined in kernel's .config.
55 * This is usefull mainly for distro maintainers to create a kernel
56 * that works correctly for most supported systems.
57 * The size can be set in bytes or as a percentage of the total memory
58 * in the system.
60 * Users, who want to set the size of global CMA area for their system
61 * should use cma= kernel parameter.
63 static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
64 static long size_cmdline = -1;
66 static int __init early_cma(char *p)
68 pr_debug("%s(%s)\n", __func__, p);
69 size_cmdline = memparse(p, &p);
70 return 0;
72 early_param("cma", early_cma);
74 #ifdef CONFIG_CMA_SIZE_PERCENTAGE
76 static unsigned long __init __maybe_unused cma_early_percent_memory(void)
78 struct memblock_region *reg;
79 unsigned long total_pages = 0;
82 * We cannot use memblock_phys_mem_size() here, because
83 * memblock_analyze() has not been called yet.
85 for_each_memblock(memory, reg)
86 total_pages += memblock_region_memory_end_pfn(reg) -
87 memblock_region_memory_base_pfn(reg);
89 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
92 #else
94 static inline __maybe_unused unsigned long cma_early_percent_memory(void)
96 return 0;
99 #endif
102 * dma_contiguous_reserve() - reserve area for contiguous memory handling
103 * @limit: End address of the reserved memory (optional, 0 for any).
105 * This function reserves memory from early allocator. It should be
106 * called by arch specific code once the early allocator (memblock or bootmem)
107 * has been activated and all other subsystems have already allocated/reserved
108 * memory.
110 void __init dma_contiguous_reserve(phys_addr_t limit)
112 unsigned long selected_size = 0;
114 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
116 if (size_cmdline != -1) {
117 selected_size = size_cmdline;
118 } else {
119 #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
120 selected_size = size_bytes;
121 #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
122 selected_size = cma_early_percent_memory();
123 #elif defined(CONFIG_CMA_SIZE_SEL_MIN)
124 selected_size = min(size_bytes, cma_early_percent_memory());
125 #elif defined(CONFIG_CMA_SIZE_SEL_MAX)
126 selected_size = max(size_bytes, cma_early_percent_memory());
127 #endif
130 if (selected_size) {
131 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
132 selected_size / SZ_1M);
134 dma_declare_contiguous(NULL, selected_size, 0, limit);
138 static DEFINE_MUTEX(cma_mutex);
140 static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
142 unsigned long pfn = base_pfn;
143 unsigned i = count >> pageblock_order;
144 struct zone *zone;
146 WARN_ON_ONCE(!pfn_valid(pfn));
147 zone = page_zone(pfn_to_page(pfn));
149 do {
150 unsigned j;
151 base_pfn = pfn;
152 for (j = pageblock_nr_pages; j; --j, pfn++) {
153 WARN_ON_ONCE(!pfn_valid(pfn));
154 if (page_zone(pfn_to_page(pfn)) != zone)
155 return -EINVAL;
157 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
158 } while (--i);
159 return 0;
162 static __init struct cma *cma_create_area(unsigned long base_pfn,
163 unsigned long count)
165 int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
166 struct cma *cma;
167 int ret = -ENOMEM;
169 pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
171 cma = kmalloc(sizeof *cma, GFP_KERNEL);
172 if (!cma)
173 return ERR_PTR(-ENOMEM);
175 cma->base_pfn = base_pfn;
176 cma->count = count;
177 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
179 if (!cma->bitmap)
180 goto no_mem;
182 ret = cma_activate_area(base_pfn, count);
183 if (ret)
184 goto error;
186 pr_debug("%s: returned %p\n", __func__, (void *)cma);
187 return cma;
189 error:
190 kfree(cma->bitmap);
191 no_mem:
192 kfree(cma);
193 return ERR_PTR(ret);
196 static struct cma_reserved {
197 phys_addr_t start;
198 unsigned long size;
199 struct device *dev;
200 } cma_reserved[MAX_CMA_AREAS] __initdata;
201 static unsigned cma_reserved_count __initdata;
203 static int __init cma_init_reserved_areas(void)
205 struct cma_reserved *r = cma_reserved;
206 unsigned i = cma_reserved_count;
208 pr_debug("%s()\n", __func__);
210 for (; i; --i, ++r) {
211 struct cma *cma;
212 cma = cma_create_area(PFN_DOWN(r->start),
213 r->size >> PAGE_SHIFT);
214 if (!IS_ERR(cma))
215 dev_set_cma_area(r->dev, cma);
217 return 0;
219 core_initcall(cma_init_reserved_areas);
222 * dma_declare_contiguous() - reserve area for contiguous memory handling
223 * for particular device
224 * @dev: Pointer to device structure.
225 * @size: Size of the reserved memory.
226 * @base: Start address of the reserved memory (optional, 0 for any).
227 * @limit: End address of the reserved memory (optional, 0 for any).
229 * This function reserves memory for specified device. It should be
230 * called by board specific code when early allocator (memblock or bootmem)
231 * is still activate.
233 int __init dma_declare_contiguous(struct device *dev, unsigned long size,
234 phys_addr_t base, phys_addr_t limit)
236 struct cma_reserved *r = &cma_reserved[cma_reserved_count];
237 unsigned long alignment;
239 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
240 (unsigned long)size, (unsigned long)base,
241 (unsigned long)limit);
243 /* Sanity checks */
244 if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
245 pr_err("Not enough slots for CMA reserved regions!\n");
246 return -ENOSPC;
249 if (!size)
250 return -EINVAL;
252 /* Sanitise input arguments */
253 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
254 base = ALIGN(base, alignment);
255 size = ALIGN(size, alignment);
256 limit &= ~(alignment - 1);
258 /* Reserve memory */
259 if (base) {
260 if (memblock_is_region_reserved(base, size) ||
261 memblock_reserve(base, size) < 0) {
262 base = -EBUSY;
263 goto err;
265 } else {
267 * Use __memblock_alloc_base() since
268 * memblock_alloc_base() panic()s.
270 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
271 if (!addr) {
272 base = -ENOMEM;
273 goto err;
274 } else if (addr + size > ~(unsigned long)0) {
275 memblock_free(addr, size);
276 base = -EINVAL;
277 goto err;
278 } else {
279 base = addr;
284 * Each reserved area must be initialised later, when more kernel
285 * subsystems (like slab allocator) are available.
287 r->start = base;
288 r->size = size;
289 r->dev = dev;
290 cma_reserved_count++;
291 pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
292 (unsigned long)base);
294 /* Architecture specific contiguous memory fixup. */
295 dma_contiguous_early_fixup(base, size);
296 return 0;
297 err:
298 pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
299 return base;
303 * dma_alloc_from_contiguous() - allocate pages from contiguous area
304 * @dev: Pointer to device for which the allocation is performed.
305 * @count: Requested number of pages.
306 * @align: Requested alignment of pages (in PAGE_SIZE order).
308 * This function allocates memory buffer for specified device. It uses
309 * device specific contiguous memory area if available or the default
310 * global one. Requires architecture specific get_dev_cma_area() helper
311 * function.
313 struct page *dma_alloc_from_contiguous(struct device *dev, int count,
314 unsigned int align)
316 unsigned long mask, pfn, pageno, start = 0;
317 struct cma *cma = dev_get_cma_area(dev);
318 struct page *page = NULL;
319 int ret;
321 if (!cma || !cma->count)
322 return NULL;
324 if (align > CONFIG_CMA_ALIGNMENT)
325 align = CONFIG_CMA_ALIGNMENT;
327 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
328 count, align);
330 if (!count)
331 return NULL;
333 mask = (1 << align) - 1;
335 mutex_lock(&cma_mutex);
337 for (;;) {
338 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
339 start, count, mask);
340 if (pageno >= cma->count)
341 break;
343 pfn = cma->base_pfn + pageno;
344 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
345 if (ret == 0) {
346 bitmap_set(cma->bitmap, pageno, count);
347 page = pfn_to_page(pfn);
348 break;
349 } else if (ret != -EBUSY) {
350 break;
352 pr_debug("%s(): memory range at %p is busy, retrying\n",
353 __func__, pfn_to_page(pfn));
354 /* try again with a bit different memory target */
355 start = pageno + mask + 1;
358 mutex_unlock(&cma_mutex);
359 pr_debug("%s(): returned %p\n", __func__, page);
360 return page;
364 * dma_release_from_contiguous() - release allocated pages
365 * @dev: Pointer to device for which the pages were allocated.
366 * @pages: Allocated pages.
367 * @count: Number of allocated pages.
369 * This function releases memory allocated by dma_alloc_from_contiguous().
370 * It returns false when provided pages do not belong to contiguous area and
371 * true otherwise.
373 bool dma_release_from_contiguous(struct device *dev, struct page *pages,
374 int count)
376 struct cma *cma = dev_get_cma_area(dev);
377 unsigned long pfn;
379 if (!cma || !pages)
380 return false;
382 pr_debug("%s(page %p)\n", __func__, (void *)pages);
384 pfn = page_to_pfn(pages);
386 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
387 return false;
389 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
391 mutex_lock(&cma_mutex);
392 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
393 free_contig_range(pfn, count);
394 mutex_unlock(&cma_mutex);
396 return true;