net: fec: Remove 'inline' declarations
[linux-2.6/cjktty.git] / mm / nobootmem.c
blobb8294fc03df869153378f47f41f0ecd595c10887
1 /*
2 * bootmem - A boot-time physical memory allocator and configurator
4 * Copyright (C) 1999 Ingo Molnar
5 * 1999 Kanoj Sarcar, SGI
6 * 2008 Johannes Weiner
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/slab.h>
14 #include <linux/bootmem.h>
15 #include <linux/export.h>
16 #include <linux/kmemleak.h>
17 #include <linux/range.h>
18 #include <linux/memblock.h>
20 #include <asm/bug.h>
21 #include <asm/io.h>
22 #include <asm/processor.h>
24 #include "internal.h"
26 #ifndef CONFIG_NEED_MULTIPLE_NODES
27 struct pglist_data __refdata contig_page_data;
28 EXPORT_SYMBOL(contig_page_data);
29 #endif
31 unsigned long max_low_pfn;
32 unsigned long min_low_pfn;
33 unsigned long max_pfn;
35 static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
36 u64 goal, u64 limit)
38 void *ptr;
39 u64 addr;
41 if (limit > memblock.current_limit)
42 limit = memblock.current_limit;
44 addr = memblock_find_in_range_node(goal, limit, size, align, nid);
45 if (!addr)
46 return NULL;
48 ptr = phys_to_virt(addr);
49 memset(ptr, 0, size);
50 memblock_reserve(addr, size);
52 * The min_count is set to 0 so that bootmem allocated blocks
53 * are never reported as leaks.
55 kmemleak_alloc(ptr, size, 0, 0);
56 return ptr;
60 * free_bootmem_late - free bootmem pages directly to page allocator
61 * @addr: starting address of the range
62 * @size: size of the range in bytes
64 * This is only useful when the bootmem allocator has already been torn
65 * down, but we are still initializing the system. Pages are given directly
66 * to the page allocator, no bootmem metadata is updated because it is gone.
68 void __init free_bootmem_late(unsigned long addr, unsigned long size)
70 unsigned long cursor, end;
72 kmemleak_free_part(__va(addr), size);
74 cursor = PFN_UP(addr);
75 end = PFN_DOWN(addr + size);
77 for (; cursor < end; cursor++) {
78 __free_pages_bootmem(pfn_to_page(cursor), 0);
79 totalram_pages++;
83 static void __init __free_pages_memory(unsigned long start, unsigned long end)
85 unsigned long i, start_aligned, end_aligned;
86 int order = ilog2(BITS_PER_LONG);
88 start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
89 end_aligned = end & ~(BITS_PER_LONG - 1);
91 if (end_aligned <= start_aligned) {
92 for (i = start; i < end; i++)
93 __free_pages_bootmem(pfn_to_page(i), 0);
95 return;
98 for (i = start; i < start_aligned; i++)
99 __free_pages_bootmem(pfn_to_page(i), 0);
101 for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
102 __free_pages_bootmem(pfn_to_page(i), order);
104 for (i = end_aligned; i < end; i++)
105 __free_pages_bootmem(pfn_to_page(i), 0);
108 static unsigned long __init __free_memory_core(phys_addr_t start,
109 phys_addr_t end)
111 unsigned long start_pfn = PFN_UP(start);
112 unsigned long end_pfn = min_t(unsigned long,
113 PFN_DOWN(end), max_low_pfn);
115 if (start_pfn > end_pfn)
116 return 0;
118 __free_pages_memory(start_pfn, end_pfn);
120 return end_pfn - start_pfn;
123 unsigned long __init free_low_memory_core_early(int nodeid)
125 unsigned long count = 0;
126 phys_addr_t start, end, size;
127 u64 i;
129 for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
130 count += __free_memory_core(start, end);
132 /* free range that is used for reserved array if we allocate it */
133 size = get_allocated_memblock_reserved_regions_info(&start);
134 if (size)
135 count += __free_memory_core(start, start + size);
137 return count;
140 static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
142 struct zone *z;
145 * In free_area_init_core(), highmem zone's managed_pages is set to
146 * present_pages, and bootmem allocator doesn't allocate from highmem
147 * zones. So there's no need to recalculate managed_pages because all
148 * highmem pages will be managed by the buddy system. Here highmem
149 * zone also includes highmem movable zone.
151 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
152 if (!is_highmem(z))
153 z->managed_pages = 0;
157 * free_all_bootmem_node - release a node's free pages to the buddy allocator
158 * @pgdat: node to be released
160 * Returns the number of pages actually released.
162 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
164 register_page_bootmem_info_node(pgdat);
165 reset_node_lowmem_managed_pages(pgdat);
167 /* free_low_memory_core_early(MAX_NUMNODES) will be called later */
168 return 0;
172 * free_all_bootmem - release free pages to the buddy allocator
174 * Returns the number of pages actually released.
176 unsigned long __init free_all_bootmem(void)
178 struct pglist_data *pgdat;
180 for_each_online_pgdat(pgdat)
181 reset_node_lowmem_managed_pages(pgdat);
184 * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
185 * because in some case like Node0 doesn't have RAM installed
186 * low ram will be on Node1
188 return free_low_memory_core_early(MAX_NUMNODES);
192 * free_bootmem_node - mark a page range as usable
193 * @pgdat: node the range resides on
194 * @physaddr: starting address of the range
195 * @size: size of the range in bytes
197 * Partial pages will be considered reserved and left as they are.
199 * The range must reside completely on the specified node.
201 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
202 unsigned long size)
204 kmemleak_free_part(__va(physaddr), size);
205 memblock_free(physaddr, size);
209 * free_bootmem - mark a page range as usable
210 * @addr: starting address of the range
211 * @size: size of the range in bytes
213 * Partial pages will be considered reserved and left as they are.
215 * The range must be contiguous but may span node boundaries.
217 void __init free_bootmem(unsigned long addr, unsigned long size)
219 kmemleak_free_part(__va(addr), size);
220 memblock_free(addr, size);
223 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
224 unsigned long align,
225 unsigned long goal,
226 unsigned long limit)
228 void *ptr;
230 if (WARN_ON_ONCE(slab_is_available()))
231 return kzalloc(size, GFP_NOWAIT);
233 restart:
235 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
237 if (ptr)
238 return ptr;
240 if (goal != 0) {
241 goal = 0;
242 goto restart;
245 return NULL;
249 * __alloc_bootmem_nopanic - allocate boot memory without panicking
250 * @size: size of the request in bytes
251 * @align: alignment of the region
252 * @goal: preferred starting address of the region
254 * The goal is dropped if it can not be satisfied and the allocation will
255 * fall back to memory below @goal.
257 * Allocation may happen on any node in the system.
259 * Returns NULL on failure.
261 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
262 unsigned long goal)
264 unsigned long limit = -1UL;
266 return ___alloc_bootmem_nopanic(size, align, goal, limit);
269 static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
270 unsigned long goal, unsigned long limit)
272 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
274 if (mem)
275 return mem;
277 * Whoops, we cannot satisfy the allocation request.
279 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
280 panic("Out of memory");
281 return NULL;
285 * __alloc_bootmem - allocate boot memory
286 * @size: size of the request in bytes
287 * @align: alignment of the region
288 * @goal: preferred starting address of the region
290 * The goal is dropped if it can not be satisfied and the allocation will
291 * fall back to memory below @goal.
293 * Allocation may happen on any node in the system.
295 * The function panics if the request can not be satisfied.
297 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
298 unsigned long goal)
300 unsigned long limit = -1UL;
302 return ___alloc_bootmem(size, align, goal, limit);
305 void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
306 unsigned long size,
307 unsigned long align,
308 unsigned long goal,
309 unsigned long limit)
311 void *ptr;
313 again:
314 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
315 goal, limit);
316 if (ptr)
317 return ptr;
319 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
320 goal, limit);
321 if (ptr)
322 return ptr;
324 if (goal) {
325 goal = 0;
326 goto again;
329 return NULL;
332 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
333 unsigned long align, unsigned long goal)
335 if (WARN_ON_ONCE(slab_is_available()))
336 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
338 return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
341 void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
342 unsigned long align, unsigned long goal,
343 unsigned long limit)
345 void *ptr;
347 ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit);
348 if (ptr)
349 return ptr;
351 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
352 panic("Out of memory");
353 return NULL;
357 * __alloc_bootmem_node - allocate boot memory from a specific node
358 * @pgdat: node to allocate from
359 * @size: size of the request in bytes
360 * @align: alignment of the region
361 * @goal: preferred starting address of the region
363 * The goal is dropped if it can not be satisfied and the allocation will
364 * fall back to memory below @goal.
366 * Allocation may fall back to any node in the system if the specified node
367 * can not hold the requested memory.
369 * The function panics if the request can not be satisfied.
371 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
372 unsigned long align, unsigned long goal)
374 if (WARN_ON_ONCE(slab_is_available()))
375 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
377 return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
380 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
381 unsigned long align, unsigned long goal)
383 return __alloc_bootmem_node(pgdat, size, align, goal);
386 #ifndef ARCH_LOW_ADDRESS_LIMIT
387 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
388 #endif
391 * __alloc_bootmem_low - allocate low boot memory
392 * @size: size of the request in bytes
393 * @align: alignment of the region
394 * @goal: preferred starting address of the region
396 * The goal is dropped if it can not be satisfied and the allocation will
397 * fall back to memory below @goal.
399 * Allocation may happen on any node in the system.
401 * The function panics if the request can not be satisfied.
403 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
404 unsigned long goal)
406 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
410 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
411 * @pgdat: node to allocate from
412 * @size: size of the request in bytes
413 * @align: alignment of the region
414 * @goal: preferred starting address of the region
416 * The goal is dropped if it can not be satisfied and the allocation will
417 * fall back to memory below @goal.
419 * Allocation may fall back to any node in the system if the specified node
420 * can not hold the requested memory.
422 * The function panics if the request can not be satisfied.
424 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
425 unsigned long align, unsigned long goal)
427 if (WARN_ON_ONCE(slab_is_available()))
428 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
430 return ___alloc_bootmem_node(pgdat, size, align, goal,
431 ARCH_LOW_ADDRESS_LIMIT);