2 * bootmem - A boot-time physical memory allocator and configurator
4 * Copyright (C) 1999 Ingo Molnar
5 * 1999 Kanoj Sarcar, SGI
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/bootmem.h>
14 #include <linux/module.h>
15 #include <linux/kmemleak.h>
19 #include <asm/processor.h>
23 unsigned long max_low_pfn
;
24 unsigned long min_low_pfn
;
25 unsigned long max_pfn
;
27 #ifdef CONFIG_CRASH_DUMP
29 * If we have booted due to a crash, max_pfn will be a very low value. We need
30 * to know the amount of memory that the previous kernel used.
32 unsigned long saved_max_pfn
;
35 bootmem_data_t bootmem_node_data
[MAX_NUMNODES
] __initdata
;
37 static struct list_head bdata_list __initdata
= LIST_HEAD_INIT(bdata_list
);
39 static int bootmem_debug
;
41 static int __init
bootmem_debug_setup(char *buf
)
46 early_param("bootmem_debug", bootmem_debug_setup
);
48 #define bdebug(fmt, args...) ({ \
49 if (unlikely(bootmem_debug)) \
55 static unsigned long __init
bootmap_bytes(unsigned long pages
)
57 unsigned long bytes
= (pages
+ 7) / 8;
59 return ALIGN(bytes
, sizeof(long));
63 * bootmem_bootmap_pages - calculate bitmap size in pages
64 * @pages: number of pages the bitmap has to represent
66 unsigned long __init
bootmem_bootmap_pages(unsigned long pages
)
68 unsigned long bytes
= bootmap_bytes(pages
);
70 return PAGE_ALIGN(bytes
) >> PAGE_SHIFT
;
76 static void __init
link_bootmem(bootmem_data_t
*bdata
)
78 struct list_head
*iter
;
80 list_for_each(iter
, &bdata_list
) {
83 ent
= list_entry(iter
, bootmem_data_t
, list
);
84 if (bdata
->node_min_pfn
< ent
->node_min_pfn
)
87 list_add_tail(&bdata
->list
, iter
);
91 * Called once to set up the allocator itself.
93 static unsigned long __init
init_bootmem_core(bootmem_data_t
*bdata
,
94 unsigned long mapstart
, unsigned long start
, unsigned long end
)
96 unsigned long mapsize
;
98 mminit_validate_memmodel_limits(&start
, &end
);
99 bdata
->node_bootmem_map
= phys_to_virt(PFN_PHYS(mapstart
));
100 bdata
->node_min_pfn
= start
;
101 bdata
->node_low_pfn
= end
;
105 * Initially all pages are reserved - setup_arch() has to
106 * register free RAM areas explicitly.
108 mapsize
= bootmap_bytes(end
- start
);
109 memset(bdata
->node_bootmem_map
, 0xff, mapsize
);
111 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
112 bdata
- bootmem_node_data
, start
, mapstart
, end
, mapsize
);
118 * init_bootmem_node - register a node as boot memory
119 * @pgdat: node to register
120 * @freepfn: pfn where the bitmap for this node is to be placed
121 * @startpfn: first pfn on the node
122 * @endpfn: first pfn after the node
124 * Returns the number of bytes needed to hold the bitmap for this node.
126 unsigned long __init
init_bootmem_node(pg_data_t
*pgdat
, unsigned long freepfn
,
127 unsigned long startpfn
, unsigned long endpfn
)
129 return init_bootmem_core(pgdat
->bdata
, freepfn
, startpfn
, endpfn
);
133 * init_bootmem - register boot memory
134 * @start: pfn where the bitmap is to be placed
135 * @pages: number of available physical pages
137 * Returns the number of bytes needed to hold the bitmap.
139 unsigned long __init
init_bootmem(unsigned long start
, unsigned long pages
)
143 return init_bootmem_core(NODE_DATA(0)->bdata
, start
, 0, pages
);
146 static unsigned long __init
free_all_bootmem_core(bootmem_data_t
*bdata
)
150 unsigned long start
, end
, pages
, count
= 0;
152 if (!bdata
->node_bootmem_map
)
155 start
= bdata
->node_min_pfn
;
156 end
= bdata
->node_low_pfn
;
159 * If the start is aligned to the machines wordsize, we might
160 * be able to free pages in bulks of that order.
162 aligned
= !(start
& (BITS_PER_LONG
- 1));
164 bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
165 bdata
- bootmem_node_data
, start
, end
, aligned
);
167 while (start
< end
) {
168 unsigned long *map
, idx
, vec
;
170 map
= bdata
->node_bootmem_map
;
171 idx
= start
- bdata
->node_min_pfn
;
172 vec
= ~map
[idx
/ BITS_PER_LONG
];
174 if (aligned
&& vec
== ~0UL && start
+ BITS_PER_LONG
< end
) {
175 int order
= ilog2(BITS_PER_LONG
);
177 __free_pages_bootmem(pfn_to_page(start
), order
);
178 count
+= BITS_PER_LONG
;
180 unsigned long off
= 0;
182 while (vec
&& off
< BITS_PER_LONG
) {
184 page
= pfn_to_page(start
+ off
);
185 __free_pages_bootmem(page
, 0);
192 start
+= BITS_PER_LONG
;
195 page
= virt_to_page(bdata
->node_bootmem_map
);
196 pages
= bdata
->node_low_pfn
- bdata
->node_min_pfn
;
197 pages
= bootmem_bootmap_pages(pages
);
200 __free_pages_bootmem(page
++, 0);
202 bdebug("nid=%td released=%lx\n", bdata
- bootmem_node_data
, count
);
208 * free_all_bootmem_node - release a node's free pages to the buddy allocator
209 * @pgdat: node to be released
211 * Returns the number of pages actually released.
213 unsigned long __init
free_all_bootmem_node(pg_data_t
*pgdat
)
215 register_page_bootmem_info_node(pgdat
);
216 return free_all_bootmem_core(pgdat
->bdata
);
220 * free_all_bootmem - release free pages to the buddy allocator
222 * Returns the number of pages actually released.
224 unsigned long __init
free_all_bootmem(void)
226 return free_all_bootmem_core(NODE_DATA(0)->bdata
);
229 static void __init
__free(bootmem_data_t
*bdata
,
230 unsigned long sidx
, unsigned long eidx
)
234 bdebug("nid=%td start=%lx end=%lx\n", bdata
- bootmem_node_data
,
235 sidx
+ bdata
->node_min_pfn
,
236 eidx
+ bdata
->node_min_pfn
);
238 if (bdata
->hint_idx
> sidx
)
239 bdata
->hint_idx
= sidx
;
241 for (idx
= sidx
; idx
< eidx
; idx
++)
242 if (!test_and_clear_bit(idx
, bdata
->node_bootmem_map
))
246 static int __init
__reserve(bootmem_data_t
*bdata
, unsigned long sidx
,
247 unsigned long eidx
, int flags
)
250 int exclusive
= flags
& BOOTMEM_EXCLUSIVE
;
252 bdebug("nid=%td start=%lx end=%lx flags=%x\n",
253 bdata
- bootmem_node_data
,
254 sidx
+ bdata
->node_min_pfn
,
255 eidx
+ bdata
->node_min_pfn
,
258 for (idx
= sidx
; idx
< eidx
; idx
++)
259 if (test_and_set_bit(idx
, bdata
->node_bootmem_map
)) {
261 __free(bdata
, sidx
, idx
);
264 bdebug("silent double reserve of PFN %lx\n",
265 idx
+ bdata
->node_min_pfn
);
270 static int __init
mark_bootmem_node(bootmem_data_t
*bdata
,
271 unsigned long start
, unsigned long end
,
272 int reserve
, int flags
)
274 unsigned long sidx
, eidx
;
276 bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
277 bdata
- bootmem_node_data
, start
, end
, reserve
, flags
);
279 BUG_ON(start
< bdata
->node_min_pfn
);
280 BUG_ON(end
> bdata
->node_low_pfn
);
282 sidx
= start
- bdata
->node_min_pfn
;
283 eidx
= end
- bdata
->node_min_pfn
;
286 return __reserve(bdata
, sidx
, eidx
, flags
);
288 __free(bdata
, sidx
, eidx
);
292 static int __init
mark_bootmem(unsigned long start
, unsigned long end
,
293 int reserve
, int flags
)
296 bootmem_data_t
*bdata
;
299 list_for_each_entry(bdata
, &bdata_list
, list
) {
303 if (pos
< bdata
->node_min_pfn
||
304 pos
>= bdata
->node_low_pfn
) {
305 BUG_ON(pos
!= start
);
309 max
= min(bdata
->node_low_pfn
, end
);
311 err
= mark_bootmem_node(bdata
, pos
, max
, reserve
, flags
);
312 if (reserve
&& err
) {
313 mark_bootmem(start
, pos
, 0, 0);
319 pos
= bdata
->node_low_pfn
;
325 * free_bootmem_node - mark a page range as usable
326 * @pgdat: node the range resides on
327 * @physaddr: starting address of the range
328 * @size: size of the range in bytes
330 * Partial pages will be considered reserved and left as they are.
332 * The range must reside completely on the specified node.
334 void __init
free_bootmem_node(pg_data_t
*pgdat
, unsigned long physaddr
,
337 unsigned long start
, end
;
339 kmemleak_free_part(__va(physaddr
), size
);
341 start
= PFN_UP(physaddr
);
342 end
= PFN_DOWN(physaddr
+ size
);
344 mark_bootmem_node(pgdat
->bdata
, start
, end
, 0, 0);
348 * free_bootmem - mark a page range as usable
349 * @addr: starting address of the range
350 * @size: size of the range in bytes
352 * Partial pages will be considered reserved and left as they are.
354 * The range must be contiguous but may span node boundaries.
356 void __init
free_bootmem(unsigned long addr
, unsigned long size
)
358 unsigned long start
, end
;
360 kmemleak_free_part(__va(addr
), size
);
362 start
= PFN_UP(addr
);
363 end
= PFN_DOWN(addr
+ size
);
365 mark_bootmem(start
, end
, 0, 0);
369 * reserve_bootmem_node - mark a page range as reserved
370 * @pgdat: node the range resides on
371 * @physaddr: starting address of the range
372 * @size: size of the range in bytes
373 * @flags: reservation flags (see linux/bootmem.h)
375 * Partial pages will be reserved.
377 * The range must reside completely on the specified node.
379 int __init
reserve_bootmem_node(pg_data_t
*pgdat
, unsigned long physaddr
,
380 unsigned long size
, int flags
)
382 unsigned long start
, end
;
384 start
= PFN_DOWN(physaddr
);
385 end
= PFN_UP(physaddr
+ size
);
387 return mark_bootmem_node(pgdat
->bdata
, start
, end
, 1, flags
);
391 * reserve_bootmem - mark a page range as usable
392 * @addr: starting address of the range
393 * @size: size of the range in bytes
394 * @flags: reservation flags (see linux/bootmem.h)
396 * Partial pages will be reserved.
398 * The range must be contiguous but may span node boundaries.
400 int __init
reserve_bootmem(unsigned long addr
, unsigned long size
,
403 unsigned long start
, end
;
405 start
= PFN_DOWN(addr
);
406 end
= PFN_UP(addr
+ size
);
408 return mark_bootmem(start
, end
, 1, flags
);
411 static unsigned long align_idx(struct bootmem_data
*bdata
, unsigned long idx
,
414 unsigned long base
= bdata
->node_min_pfn
;
417 * Align the index with respect to the node start so that the
418 * combination of both satisfies the requested alignment.
421 return ALIGN(base
+ idx
, step
) - base
;
424 static unsigned long align_off(struct bootmem_data
*bdata
, unsigned long off
,
427 unsigned long base
= PFN_PHYS(bdata
->node_min_pfn
);
429 /* Same as align_idx for byte offsets */
431 return ALIGN(base
+ off
, align
) - base
;
434 static void * __init
alloc_bootmem_core(struct bootmem_data
*bdata
,
435 unsigned long size
, unsigned long align
,
436 unsigned long goal
, unsigned long limit
)
438 unsigned long fallback
= 0;
439 unsigned long min
, max
, start
, sidx
, midx
, step
;
441 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
442 bdata
- bootmem_node_data
, size
, PAGE_ALIGN(size
) >> PAGE_SHIFT
,
446 BUG_ON(align
& (align
- 1));
447 BUG_ON(limit
&& goal
+ size
> limit
);
449 if (!bdata
->node_bootmem_map
)
452 min
= bdata
->node_min_pfn
;
453 max
= bdata
->node_low_pfn
;
456 limit
>>= PAGE_SHIFT
;
458 if (limit
&& max
> limit
)
463 step
= max(align
>> PAGE_SHIFT
, 1UL);
465 if (goal
&& min
< goal
&& goal
< max
)
466 start
= ALIGN(goal
, step
);
468 start
= ALIGN(min
, step
);
470 sidx
= start
- bdata
->node_min_pfn
;
471 midx
= max
- bdata
->node_min_pfn
;
473 if (bdata
->hint_idx
> sidx
) {
475 * Handle the valid case of sidx being zero and still
476 * catch the fallback below.
479 sidx
= align_idx(bdata
, bdata
->hint_idx
, step
);
485 unsigned long eidx
, i
, start_off
, end_off
;
487 sidx
= find_next_zero_bit(bdata
->node_bootmem_map
, midx
, sidx
);
488 sidx
= align_idx(bdata
, sidx
, step
);
489 eidx
= sidx
+ PFN_UP(size
);
491 if (sidx
>= midx
|| eidx
> midx
)
494 for (i
= sidx
; i
< eidx
; i
++)
495 if (test_bit(i
, bdata
->node_bootmem_map
)) {
496 sidx
= align_idx(bdata
, i
, step
);
502 if (bdata
->last_end_off
& (PAGE_SIZE
- 1) &&
503 PFN_DOWN(bdata
->last_end_off
) + 1 == sidx
)
504 start_off
= align_off(bdata
, bdata
->last_end_off
, align
);
506 start_off
= PFN_PHYS(sidx
);
508 merge
= PFN_DOWN(start_off
) < sidx
;
509 end_off
= start_off
+ size
;
511 bdata
->last_end_off
= end_off
;
512 bdata
->hint_idx
= PFN_UP(end_off
);
515 * Reserve the area now:
517 if (__reserve(bdata
, PFN_DOWN(start_off
) + merge
,
518 PFN_UP(end_off
), BOOTMEM_EXCLUSIVE
))
521 region
= phys_to_virt(PFN_PHYS(bdata
->node_min_pfn
) +
523 memset(region
, 0, size
);
525 * The min_count is set to 0 so that bootmem allocated blocks
526 * are never reported as leaks.
528 kmemleak_alloc(region
, size
, 0, 0);
533 sidx
= align_idx(bdata
, fallback
- 1, step
);
541 static void * __init
alloc_arch_preferred_bootmem(bootmem_data_t
*bdata
,
542 unsigned long size
, unsigned long align
,
543 unsigned long goal
, unsigned long limit
)
545 if (WARN_ON_ONCE(slab_is_available()))
546 return kzalloc(size
, GFP_NOWAIT
);
548 #ifdef CONFIG_HAVE_ARCH_BOOTMEM
550 bootmem_data_t
*p_bdata
;
552 p_bdata
= bootmem_arch_preferred_node(bdata
, size
, align
,
555 return alloc_bootmem_core(p_bdata
, size
, align
,
562 static void * __init
___alloc_bootmem_nopanic(unsigned long size
,
567 bootmem_data_t
*bdata
;
571 region
= alloc_arch_preferred_bootmem(NULL
, size
, align
, goal
, limit
);
575 list_for_each_entry(bdata
, &bdata_list
, list
) {
576 if (goal
&& bdata
->node_low_pfn
<= PFN_DOWN(goal
))
578 if (limit
&& bdata
->node_min_pfn
>= PFN_DOWN(limit
))
581 region
= alloc_bootmem_core(bdata
, size
, align
, goal
, limit
);
595 * __alloc_bootmem_nopanic - allocate boot memory without panicking
596 * @size: size of the request in bytes
597 * @align: alignment of the region
598 * @goal: preferred starting address of the region
600 * The goal is dropped if it can not be satisfied and the allocation will
601 * fall back to memory below @goal.
603 * Allocation may happen on any node in the system.
605 * Returns NULL on failure.
607 void * __init
__alloc_bootmem_nopanic(unsigned long size
, unsigned long align
,
610 return ___alloc_bootmem_nopanic(size
, align
, goal
, 0);
613 static void * __init
___alloc_bootmem(unsigned long size
, unsigned long align
,
614 unsigned long goal
, unsigned long limit
)
616 void *mem
= ___alloc_bootmem_nopanic(size
, align
, goal
, limit
);
621 * Whoops, we cannot satisfy the allocation request.
623 printk(KERN_ALERT
"bootmem alloc of %lu bytes failed!\n", size
);
624 panic("Out of memory");
629 * __alloc_bootmem - allocate boot memory
630 * @size: size of the request in bytes
631 * @align: alignment of the region
632 * @goal: preferred starting address of the region
634 * The goal is dropped if it can not be satisfied and the allocation will
635 * fall back to memory below @goal.
637 * Allocation may happen on any node in the system.
639 * The function panics if the request can not be satisfied.
641 void * __init
__alloc_bootmem(unsigned long size
, unsigned long align
,
644 return ___alloc_bootmem(size
, align
, goal
, 0);
647 static void * __init
___alloc_bootmem_node(bootmem_data_t
*bdata
,
648 unsigned long size
, unsigned long align
,
649 unsigned long goal
, unsigned long limit
)
653 ptr
= alloc_arch_preferred_bootmem(bdata
, size
, align
, goal
, limit
);
657 ptr
= alloc_bootmem_core(bdata
, size
, align
, goal
, limit
);
661 return ___alloc_bootmem(size
, align
, goal
, limit
);
665 * __alloc_bootmem_node - allocate boot memory from a specific node
666 * @pgdat: node to allocate from
667 * @size: size of the request in bytes
668 * @align: alignment of the region
669 * @goal: preferred starting address of the region
671 * The goal is dropped if it can not be satisfied and the allocation will
672 * fall back to memory below @goal.
674 * Allocation may fall back to any node in the system if the specified node
675 * can not hold the requested memory.
677 * The function panics if the request can not be satisfied.
679 void * __init
__alloc_bootmem_node(pg_data_t
*pgdat
, unsigned long size
,
680 unsigned long align
, unsigned long goal
)
682 if (WARN_ON_ONCE(slab_is_available()))
683 return kzalloc_node(size
, GFP_NOWAIT
, pgdat
->node_id
);
685 return ___alloc_bootmem_node(pgdat
->bdata
, size
, align
, goal
, 0);
688 #ifdef CONFIG_SPARSEMEM
690 * alloc_bootmem_section - allocate boot memory from a specific section
691 * @size: size of the request in bytes
692 * @section_nr: sparse map section to allocate from
694 * Return NULL on failure.
696 void * __init
alloc_bootmem_section(unsigned long size
,
697 unsigned long section_nr
)
699 bootmem_data_t
*bdata
;
700 unsigned long pfn
, goal
, limit
;
702 pfn
= section_nr_to_pfn(section_nr
);
703 goal
= pfn
<< PAGE_SHIFT
;
704 limit
= section_nr_to_pfn(section_nr
+ 1) << PAGE_SHIFT
;
705 bdata
= &bootmem_node_data
[early_pfn_to_nid(pfn
)];
707 return alloc_bootmem_core(bdata
, size
, SMP_CACHE_BYTES
, goal
, limit
);
711 void * __init
__alloc_bootmem_node_nopanic(pg_data_t
*pgdat
, unsigned long size
,
712 unsigned long align
, unsigned long goal
)
716 if (WARN_ON_ONCE(slab_is_available()))
717 return kzalloc_node(size
, GFP_NOWAIT
, pgdat
->node_id
);
719 ptr
= alloc_arch_preferred_bootmem(pgdat
->bdata
, size
, align
, goal
, 0);
723 ptr
= alloc_bootmem_core(pgdat
->bdata
, size
, align
, goal
, 0);
727 return __alloc_bootmem_nopanic(size
, align
, goal
);
730 #ifndef ARCH_LOW_ADDRESS_LIMIT
731 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
735 * __alloc_bootmem_low - allocate low boot memory
736 * @size: size of the request in bytes
737 * @align: alignment of the region
738 * @goal: preferred starting address of the region
740 * The goal is dropped if it can not be satisfied and the allocation will
741 * fall back to memory below @goal.
743 * Allocation may happen on any node in the system.
745 * The function panics if the request can not be satisfied.
747 void * __init
__alloc_bootmem_low(unsigned long size
, unsigned long align
,
750 return ___alloc_bootmem(size
, align
, goal
, ARCH_LOW_ADDRESS_LIMIT
);
754 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
755 * @pgdat: node to allocate from
756 * @size: size of the request in bytes
757 * @align: alignment of the region
758 * @goal: preferred starting address of the region
760 * The goal is dropped if it can not be satisfied and the allocation will
761 * fall back to memory below @goal.
763 * Allocation may fall back to any node in the system if the specified node
764 * can not hold the requested memory.
766 * The function panics if the request can not be satisfied.
768 void * __init
__alloc_bootmem_low_node(pg_data_t
*pgdat
, unsigned long size
,
769 unsigned long align
, unsigned long goal
)
771 if (WARN_ON_ONCE(slab_is_available()))
772 return kzalloc_node(size
, GFP_NOWAIT
, pgdat
->node_id
);
774 return ___alloc_bootmem_node(pgdat
->bdata
, size
, align
,
775 goal
, ARCH_LOW_ADDRESS_LIMIT
);