Merge branch 'urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile...
[linux-2.6/x86.git] / mm / nobootmem.c
blob6e93dc7f25863628b576539648dfe0c7ba8d3f10
1 /*
2 * bootmem - A boot-time physical memory allocator and configurator
4 * Copyright (C) 1999 Ingo Molnar
5 * 1999 Kanoj Sarcar, SGI
6 * 2008 Johannes Weiner
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/slab.h>
14 #include <linux/bootmem.h>
15 #include <linux/module.h>
16 #include <linux/kmemleak.h>
17 #include <linux/range.h>
18 #include <linux/memblock.h>
20 #include <asm/bug.h>
21 #include <asm/io.h>
22 #include <asm/processor.h>
24 #include "internal.h"
26 #ifndef CONFIG_NEED_MULTIPLE_NODES
27 struct pglist_data __refdata contig_page_data;
28 EXPORT_SYMBOL(contig_page_data);
29 #endif
31 unsigned long max_low_pfn;
32 unsigned long min_low_pfn;
33 unsigned long max_pfn;
35 static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
36 u64 goal, u64 limit)
38 void *ptr;
39 u64 addr;
41 if (limit > memblock.current_limit)
42 limit = memblock.current_limit;
44 addr = find_memory_core_early(nid, size, align, goal, limit);
46 if (addr == MEMBLOCK_ERROR)
47 return NULL;
49 ptr = phys_to_virt(addr);
50 memset(ptr, 0, size);
51 memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
53 * The min_count is set to 0 so that bootmem allocated blocks
54 * are never reported as leaks.
56 kmemleak_alloc(ptr, size, 0, 0);
57 return ptr;
61 * free_bootmem_late - free bootmem pages directly to page allocator
62 * @addr: starting address of the range
63 * @size: size of the range in bytes
65 * This is only useful when the bootmem allocator has already been torn
66 * down, but we are still initializing the system. Pages are given directly
67 * to the page allocator, no bootmem metadata is updated because it is gone.
69 void __init free_bootmem_late(unsigned long addr, unsigned long size)
71 unsigned long cursor, end;
73 kmemleak_free_part(__va(addr), size);
75 cursor = PFN_UP(addr);
76 end = PFN_DOWN(addr + size);
78 for (; cursor < end; cursor++) {
79 __free_pages_bootmem(pfn_to_page(cursor), 0);
80 totalram_pages++;
84 static void __init __free_pages_memory(unsigned long start, unsigned long end)
86 int i;
87 unsigned long start_aligned, end_aligned;
88 int order = ilog2(BITS_PER_LONG);
90 start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
91 end_aligned = end & ~(BITS_PER_LONG - 1);
93 if (end_aligned <= start_aligned) {
94 for (i = start; i < end; i++)
95 __free_pages_bootmem(pfn_to_page(i), 0);
97 return;
100 for (i = start; i < start_aligned; i++)
101 __free_pages_bootmem(pfn_to_page(i), 0);
103 for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
104 __free_pages_bootmem(pfn_to_page(i), order);
106 for (i = end_aligned; i < end; i++)
107 __free_pages_bootmem(pfn_to_page(i), 0);
110 unsigned long __init free_all_memory_core_early(int nodeid)
112 int i;
113 u64 start, end;
114 unsigned long count = 0;
115 struct range *range = NULL;
116 int nr_range;
118 nr_range = get_free_all_memory_range(&range, nodeid);
120 for (i = 0; i < nr_range; i++) {
121 start = range[i].start;
122 end = range[i].end;
123 count += end - start;
124 __free_pages_memory(start, end);
127 return count;
131 * free_all_bootmem_node - release a node's free pages to the buddy allocator
132 * @pgdat: node to be released
134 * Returns the number of pages actually released.
136 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
138 register_page_bootmem_info_node(pgdat);
140 /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
141 return 0;
145 * free_all_bootmem - release free pages to the buddy allocator
147 * Returns the number of pages actually released.
149 unsigned long __init free_all_bootmem(void)
152 * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
153 * because in some case like Node0 doesn't have RAM installed
154 * low ram will be on Node1
155 * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
156 * will be used instead of only Node0 related
158 return free_all_memory_core_early(MAX_NUMNODES);
162 * free_bootmem_node - mark a page range as usable
163 * @pgdat: node the range resides on
164 * @physaddr: starting address of the range
165 * @size: size of the range in bytes
167 * Partial pages will be considered reserved and left as they are.
169 * The range must reside completely on the specified node.
171 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
172 unsigned long size)
174 kmemleak_free_part(__va(physaddr), size);
175 memblock_x86_free_range(physaddr, physaddr + size);
179 * free_bootmem - mark a page range as usable
180 * @addr: starting address of the range
181 * @size: size of the range in bytes
183 * Partial pages will be considered reserved and left as they are.
185 * The range must be contiguous but may span node boundaries.
187 void __init free_bootmem(unsigned long addr, unsigned long size)
189 kmemleak_free_part(__va(addr), size);
190 memblock_x86_free_range(addr, addr + size);
193 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
194 unsigned long align,
195 unsigned long goal,
196 unsigned long limit)
198 void *ptr;
200 if (WARN_ON_ONCE(slab_is_available()))
201 return kzalloc(size, GFP_NOWAIT);
203 restart:
205 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
207 if (ptr)
208 return ptr;
210 if (goal != 0) {
211 goal = 0;
212 goto restart;
215 return NULL;
219 * __alloc_bootmem_nopanic - allocate boot memory without panicking
220 * @size: size of the request in bytes
221 * @align: alignment of the region
222 * @goal: preferred starting address of the region
224 * The goal is dropped if it can not be satisfied and the allocation will
225 * fall back to memory below @goal.
227 * Allocation may happen on any node in the system.
229 * Returns NULL on failure.
231 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
232 unsigned long goal)
234 unsigned long limit = -1UL;
236 return ___alloc_bootmem_nopanic(size, align, goal, limit);
239 static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
240 unsigned long goal, unsigned long limit)
242 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
244 if (mem)
245 return mem;
247 * Whoops, we cannot satisfy the allocation request.
249 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
250 panic("Out of memory");
251 return NULL;
255 * __alloc_bootmem - allocate boot memory
256 * @size: size of the request in bytes
257 * @align: alignment of the region
258 * @goal: preferred starting address of the region
260 * The goal is dropped if it can not be satisfied and the allocation will
261 * fall back to memory below @goal.
263 * Allocation may happen on any node in the system.
265 * The function panics if the request can not be satisfied.
267 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
268 unsigned long goal)
270 unsigned long limit = -1UL;
272 return ___alloc_bootmem(size, align, goal, limit);
276 * __alloc_bootmem_node - allocate boot memory from a specific node
277 * @pgdat: node to allocate from
278 * @size: size of the request in bytes
279 * @align: alignment of the region
280 * @goal: preferred starting address of the region
282 * The goal is dropped if it can not be satisfied and the allocation will
283 * fall back to memory below @goal.
285 * Allocation may fall back to any node in the system if the specified node
286 * can not hold the requested memory.
288 * The function panics if the request can not be satisfied.
290 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
291 unsigned long align, unsigned long goal)
293 void *ptr;
295 if (WARN_ON_ONCE(slab_is_available()))
296 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
298 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
299 goal, -1ULL);
300 if (ptr)
301 return ptr;
303 return __alloc_memory_core_early(MAX_NUMNODES, size, align,
304 goal, -1ULL);
307 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
308 unsigned long align, unsigned long goal)
310 return __alloc_bootmem_node(pgdat, size, align, goal);
313 #ifdef CONFIG_SPARSEMEM
315 * alloc_bootmem_section - allocate boot memory from a specific section
316 * @size: size of the request in bytes
317 * @section_nr: sparse map section to allocate from
319 * Return NULL on failure.
321 void * __init alloc_bootmem_section(unsigned long size,
322 unsigned long section_nr)
324 unsigned long pfn, goal, limit;
326 pfn = section_nr_to_pfn(section_nr);
327 goal = pfn << PAGE_SHIFT;
328 limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
330 return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
331 SMP_CACHE_BYTES, goal, limit);
333 #endif
335 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
336 unsigned long align, unsigned long goal)
338 void *ptr;
340 if (WARN_ON_ONCE(slab_is_available()))
341 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
343 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
344 goal, -1ULL);
345 if (ptr)
346 return ptr;
348 return __alloc_bootmem_nopanic(size, align, goal);
351 #ifndef ARCH_LOW_ADDRESS_LIMIT
352 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
353 #endif
356 * __alloc_bootmem_low - allocate low boot memory
357 * @size: size of the request in bytes
358 * @align: alignment of the region
359 * @goal: preferred starting address of the region
361 * The goal is dropped if it can not be satisfied and the allocation will
362 * fall back to memory below @goal.
364 * Allocation may happen on any node in the system.
366 * The function panics if the request can not be satisfied.
368 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
369 unsigned long goal)
371 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
375 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
376 * @pgdat: node to allocate from
377 * @size: size of the request in bytes
378 * @align: alignment of the region
379 * @goal: preferred starting address of the region
381 * The goal is dropped if it can not be satisfied and the allocation will
382 * fall back to memory below @goal.
384 * Allocation may fall back to any node in the system if the specified node
385 * can not hold the requested memory.
387 * The function panics if the request can not be satisfied.
389 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
390 unsigned long align, unsigned long goal)
392 void *ptr;
394 if (WARN_ON_ONCE(slab_is_available()))
395 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
397 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
398 goal, ARCH_LOW_ADDRESS_LIMIT);
399 if (ptr)
400 return ptr;
402 return __alloc_memory_core_early(MAX_NUMNODES, size, align,
403 goal, ARCH_LOW_ADDRESS_LIMIT);