HID: thingm: improve locking
[linux-2.6/btrfs-unstable.git] / mm / nobootmem.c
blob99feb2b07fc541a9e79ea92018f7aeadf14d68a9
1 /*
2 * bootmem - A boot-time physical memory allocator and configurator
4 * Copyright (C) 1999 Ingo Molnar
5 * 1999 Kanoj Sarcar, SGI
6 * 2008 Johannes Weiner
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/slab.h>
14 #include <linux/bootmem.h>
15 #include <linux/export.h>
16 #include <linux/kmemleak.h>
17 #include <linux/range.h>
18 #include <linux/memblock.h>
20 #include <asm/bug.h>
21 #include <asm/io.h>
22 #include <asm/processor.h>
24 #include "internal.h"
26 #ifndef CONFIG_NEED_MULTIPLE_NODES
27 struct pglist_data __refdata contig_page_data;
28 EXPORT_SYMBOL(contig_page_data);
29 #endif
31 unsigned long max_low_pfn;
32 unsigned long min_low_pfn;
33 unsigned long max_pfn;
34 unsigned long long max_possible_pfn;
36 static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
37 u64 goal, u64 limit)
39 void *ptr;
40 u64 addr;
41 ulong flags = choose_memblock_flags();
43 if (limit > memblock.current_limit)
44 limit = memblock.current_limit;
46 again:
47 addr = memblock_find_in_range_node(size, align, goal, limit, nid,
48 flags);
49 if (!addr && (flags & MEMBLOCK_MIRROR)) {
50 flags &= ~MEMBLOCK_MIRROR;
51 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
52 &size);
53 goto again;
55 if (!addr)
56 return NULL;
58 if (memblock_reserve(addr, size))
59 return NULL;
61 ptr = phys_to_virt(addr);
62 memset(ptr, 0, size);
64 * The min_count is set to 0 so that bootmem allocated blocks
65 * are never reported as leaks.
67 kmemleak_alloc(ptr, size, 0, 0);
68 return ptr;
72 * free_bootmem_late - free bootmem pages directly to page allocator
73 * @addr: starting address of the range
74 * @size: size of the range in bytes
76 * This is only useful when the bootmem allocator has already been torn
77 * down, but we are still initializing the system. Pages are given directly
78 * to the page allocator, no bootmem metadata is updated because it is gone.
80 void __init free_bootmem_late(unsigned long addr, unsigned long size)
82 unsigned long cursor, end;
84 kmemleak_free_part(__va(addr), size);
86 cursor = PFN_UP(addr);
87 end = PFN_DOWN(addr + size);
89 for (; cursor < end; cursor++) {
90 __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
91 totalram_pages++;
95 static void __init __free_pages_memory(unsigned long start, unsigned long end)
97 int order;
99 while (start < end) {
100 order = min(MAX_ORDER - 1UL, __ffs(start));
102 while (start + (1UL << order) > end)
103 order--;
105 __free_pages_bootmem(pfn_to_page(start), start, order);
107 start += (1UL << order);
111 static unsigned long __init __free_memory_core(phys_addr_t start,
112 phys_addr_t end)
114 unsigned long start_pfn = PFN_UP(start);
115 unsigned long end_pfn = min_t(unsigned long,
116 PFN_DOWN(end), max_low_pfn);
118 if (start_pfn > end_pfn)
119 return 0;
121 __free_pages_memory(start_pfn, end_pfn);
123 return end_pfn - start_pfn;
126 static unsigned long __init free_low_memory_core_early(void)
128 unsigned long count = 0;
129 phys_addr_t start, end;
130 u64 i;
132 memblock_clear_hotplug(0, -1);
134 for_each_reserved_mem_region(i, &start, &end)
135 reserve_bootmem_region(start, end);
137 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
138 NULL)
139 count += __free_memory_core(start, end);
141 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
143 phys_addr_t size;
145 /* Free memblock.reserved array if it was allocated */
146 size = get_allocated_memblock_reserved_regions_info(&start);
147 if (size)
148 count += __free_memory_core(start, start + size);
150 /* Free memblock.memory array if it was allocated */
151 size = get_allocated_memblock_memory_regions_info(&start);
152 if (size)
153 count += __free_memory_core(start, start + size);
155 #endif
157 return count;
160 static int reset_managed_pages_done __initdata;
162 void reset_node_managed_pages(pg_data_t *pgdat)
164 struct zone *z;
166 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
167 z->managed_pages = 0;
170 void __init reset_all_zones_managed_pages(void)
172 struct pglist_data *pgdat;
174 if (reset_managed_pages_done)
175 return;
177 for_each_online_pgdat(pgdat)
178 reset_node_managed_pages(pgdat);
180 reset_managed_pages_done = 1;
184 * free_all_bootmem - release free pages to the buddy allocator
186 * Returns the number of pages actually released.
188 unsigned long __init free_all_bootmem(void)
190 unsigned long pages;
192 reset_all_zones_managed_pages();
195 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
196 * because in some case like Node0 doesn't have RAM installed
197 * low ram will be on Node1
199 pages = free_low_memory_core_early();
200 totalram_pages += pages;
202 return pages;
206 * free_bootmem_node - mark a page range as usable
207 * @pgdat: node the range resides on
208 * @physaddr: starting address of the range
209 * @size: size of the range in bytes
211 * Partial pages will be considered reserved and left as they are.
213 * The range must reside completely on the specified node.
215 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
216 unsigned long size)
218 memblock_free(physaddr, size);
222 * free_bootmem - mark a page range as usable
223 * @addr: starting address of the range
224 * @size: size of the range in bytes
226 * Partial pages will be considered reserved and left as they are.
228 * The range must be contiguous but may span node boundaries.
230 void __init free_bootmem(unsigned long addr, unsigned long size)
232 memblock_free(addr, size);
235 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
236 unsigned long align,
237 unsigned long goal,
238 unsigned long limit)
240 void *ptr;
242 if (WARN_ON_ONCE(slab_is_available()))
243 return kzalloc(size, GFP_NOWAIT);
245 restart:
247 ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align, goal, limit);
249 if (ptr)
250 return ptr;
252 if (goal != 0) {
253 goal = 0;
254 goto restart;
257 return NULL;
261 * __alloc_bootmem_nopanic - allocate boot memory without panicking
262 * @size: size of the request in bytes
263 * @align: alignment of the region
264 * @goal: preferred starting address of the region
266 * The goal is dropped if it can not be satisfied and the allocation will
267 * fall back to memory below @goal.
269 * Allocation may happen on any node in the system.
271 * Returns NULL on failure.
273 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
274 unsigned long goal)
276 unsigned long limit = -1UL;
278 return ___alloc_bootmem_nopanic(size, align, goal, limit);
281 static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
282 unsigned long goal, unsigned long limit)
284 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
286 if (mem)
287 return mem;
289 * Whoops, we cannot satisfy the allocation request.
291 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
292 panic("Out of memory");
293 return NULL;
297 * __alloc_bootmem - allocate boot memory
298 * @size: size of the request in bytes
299 * @align: alignment of the region
300 * @goal: preferred starting address of the region
302 * The goal is dropped if it can not be satisfied and the allocation will
303 * fall back to memory below @goal.
305 * Allocation may happen on any node in the system.
307 * The function panics if the request can not be satisfied.
309 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
310 unsigned long goal)
312 unsigned long limit = -1UL;
314 return ___alloc_bootmem(size, align, goal, limit);
317 void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
318 unsigned long size,
319 unsigned long align,
320 unsigned long goal,
321 unsigned long limit)
323 void *ptr;
325 again:
326 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
327 goal, limit);
328 if (ptr)
329 return ptr;
331 ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align,
332 goal, limit);
333 if (ptr)
334 return ptr;
336 if (goal) {
337 goal = 0;
338 goto again;
341 return NULL;
344 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
345 unsigned long align, unsigned long goal)
347 if (WARN_ON_ONCE(slab_is_available()))
348 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
350 return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
353 static void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
354 unsigned long align, unsigned long goal,
355 unsigned long limit)
357 void *ptr;
359 ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit);
360 if (ptr)
361 return ptr;
363 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
364 panic("Out of memory");
365 return NULL;
369 * __alloc_bootmem_node - allocate boot memory from a specific node
370 * @pgdat: node to allocate from
371 * @size: size of the request in bytes
372 * @align: alignment of the region
373 * @goal: preferred starting address of the region
375 * The goal is dropped if it can not be satisfied and the allocation will
376 * fall back to memory below @goal.
378 * Allocation may fall back to any node in the system if the specified node
379 * can not hold the requested memory.
381 * The function panics if the request can not be satisfied.
383 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
384 unsigned long align, unsigned long goal)
386 if (WARN_ON_ONCE(slab_is_available()))
387 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
389 return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
392 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
393 unsigned long align, unsigned long goal)
395 return __alloc_bootmem_node(pgdat, size, align, goal);
398 #ifndef ARCH_LOW_ADDRESS_LIMIT
399 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
400 #endif
403 * __alloc_bootmem_low - allocate low boot memory
404 * @size: size of the request in bytes
405 * @align: alignment of the region
406 * @goal: preferred starting address of the region
408 * The goal is dropped if it can not be satisfied and the allocation will
409 * fall back to memory below @goal.
411 * Allocation may happen on any node in the system.
413 * The function panics if the request can not be satisfied.
415 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
416 unsigned long goal)
418 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
421 void * __init __alloc_bootmem_low_nopanic(unsigned long size,
422 unsigned long align,
423 unsigned long goal)
425 return ___alloc_bootmem_nopanic(size, align, goal,
426 ARCH_LOW_ADDRESS_LIMIT);
430 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
431 * @pgdat: node to allocate from
432 * @size: size of the request in bytes
433 * @align: alignment of the region
434 * @goal: preferred starting address of the region
436 * The goal is dropped if it can not be satisfied and the allocation will
437 * fall back to memory below @goal.
439 * Allocation may fall back to any node in the system if the specified node
440 * can not hold the requested memory.
442 * The function panics if the request can not be satisfied.
444 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
445 unsigned long align, unsigned long goal)
447 if (WARN_ON_ONCE(slab_is_available()))
448 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
450 return ___alloc_bootmem_node(pgdat, size, align, goal,
451 ARCH_LOW_ADDRESS_LIMIT);