2 * bootmem - A boot-time physical memory allocator and configurator
4 * Copyright (C) 1999 Ingo Molnar
5 * 1999 Kanoj Sarcar, SGI
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/bootmem.h>
14 #include <linux/module.h>
18 #include <asm/processor.h>
22 unsigned long max_low_pfn
;
23 unsigned long min_low_pfn
;
24 unsigned long max_pfn
;
26 #ifdef CONFIG_CRASH_DUMP
28 * If we have booted due to a crash, max_pfn will be a very low value. We need
29 * to know the amount of memory that the previous kernel used.
31 unsigned long saved_max_pfn
;
34 bootmem_data_t bootmem_node_data
[MAX_NUMNODES
] __initdata
;
36 static struct list_head bdata_list __initdata
= LIST_HEAD_INIT(bdata_list
);
38 static int bootmem_debug
;
40 static int __init
bootmem_debug_setup(char *buf
)
45 early_param("bootmem_debug", bootmem_debug_setup
);
47 #define bdebug(fmt, args...) ({ \
48 if (unlikely(bootmem_debug)) \
51 __FUNCTION__, ## args); \
54 static unsigned long __init
bootmap_bytes(unsigned long pages
)
56 unsigned long bytes
= (pages
+ 7) / 8;
58 return ALIGN(bytes
, sizeof(long));
62 * bootmem_bootmap_pages - calculate bitmap size in pages
63 * @pages: number of pages the bitmap has to represent
65 unsigned long __init
bootmem_bootmap_pages(unsigned long pages
)
67 unsigned long bytes
= bootmap_bytes(pages
);
69 return PAGE_ALIGN(bytes
) >> PAGE_SHIFT
;
75 static void __init
link_bootmem(bootmem_data_t
*bdata
)
77 struct list_head
*iter
;
79 list_for_each(iter
, &bdata_list
) {
82 ent
= list_entry(iter
, bootmem_data_t
, list
);
83 if (bdata
->node_boot_start
< ent
->node_boot_start
)
86 list_add_tail(&bdata
->list
, iter
);
90 * Called once to set up the allocator itself.
92 static unsigned long __init
init_bootmem_core(bootmem_data_t
*bdata
,
93 unsigned long mapstart
, unsigned long start
, unsigned long end
)
95 unsigned long mapsize
;
97 mminit_validate_memmodel_limits(&start
, &end
);
98 bdata
->node_bootmem_map
= phys_to_virt(PFN_PHYS(mapstart
));
99 bdata
->node_boot_start
= PFN_PHYS(start
);
100 bdata
->node_low_pfn
= end
;
104 * Initially all pages are reserved - setup_arch() has to
105 * register free RAM areas explicitly.
107 mapsize
= bootmap_bytes(end
- start
);
108 memset(bdata
->node_bootmem_map
, 0xff, mapsize
);
110 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
111 bdata
- bootmem_node_data
, start
, mapstart
, end
, mapsize
);
117 * init_bootmem_node - register a node as boot memory
118 * @pgdat: node to register
119 * @freepfn: pfn where the bitmap for this node is to be placed
120 * @startpfn: first pfn on the node
121 * @endpfn: first pfn after the node
123 * Returns the number of bytes needed to hold the bitmap for this node.
125 unsigned long __init
init_bootmem_node(pg_data_t
*pgdat
, unsigned long freepfn
,
126 unsigned long startpfn
, unsigned long endpfn
)
128 return init_bootmem_core(pgdat
->bdata
, freepfn
, startpfn
, endpfn
);
132 * init_bootmem - register boot memory
133 * @start: pfn where the bitmap is to be placed
134 * @pages: number of available physical pages
136 * Returns the number of bytes needed to hold the bitmap.
138 unsigned long __init
init_bootmem(unsigned long start
, unsigned long pages
)
142 return init_bootmem_core(NODE_DATA(0)->bdata
, start
, 0, pages
);
145 static unsigned long __init
free_all_bootmem_core(bootmem_data_t
*bdata
)
149 unsigned long start
, end
, pages
, count
= 0;
151 if (!bdata
->node_bootmem_map
)
154 start
= PFN_DOWN(bdata
->node_boot_start
);
155 end
= bdata
->node_low_pfn
;
158 * If the start is aligned to the machines wordsize, we might
159 * be able to free pages in bulks of that order.
161 aligned
= !(start
& (BITS_PER_LONG
- 1));
163 bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
164 bdata
- bootmem_node_data
, start
, end
, aligned
);
166 while (start
< end
) {
167 unsigned long *map
, idx
, vec
;
169 map
= bdata
->node_bootmem_map
;
170 idx
= start
- PFN_DOWN(bdata
->node_boot_start
);
171 vec
= ~map
[idx
/ BITS_PER_LONG
];
173 if (aligned
&& vec
== ~0UL && start
+ BITS_PER_LONG
< end
) {
174 int order
= ilog2(BITS_PER_LONG
);
176 __free_pages_bootmem(pfn_to_page(start
), order
);
177 count
+= BITS_PER_LONG
;
179 unsigned long off
= 0;
181 while (vec
&& off
< BITS_PER_LONG
) {
183 page
= pfn_to_page(start
+ off
);
184 __free_pages_bootmem(page
, 0);
191 start
+= BITS_PER_LONG
;
194 page
= virt_to_page(bdata
->node_bootmem_map
);
195 pages
= bdata
->node_low_pfn
- PFN_DOWN(bdata
->node_boot_start
);
196 pages
= bootmem_bootmap_pages(pages
);
199 __free_pages_bootmem(page
++, 0);
201 bdebug("nid=%td released=%lx\n", bdata
- bootmem_node_data
, count
);
207 * free_all_bootmem_node - release a node's free pages to the buddy allocator
208 * @pgdat: node to be released
210 * Returns the number of pages actually released.
212 unsigned long __init
free_all_bootmem_node(pg_data_t
*pgdat
)
214 register_page_bootmem_info_node(pgdat
);
215 return free_all_bootmem_core(pgdat
->bdata
);
219 * free_all_bootmem - release free pages to the buddy allocator
221 * Returns the number of pages actually released.
223 unsigned long __init
free_all_bootmem(void)
225 return free_all_bootmem_core(NODE_DATA(0)->bdata
);
228 static void __init
__free(bootmem_data_t
*bdata
,
229 unsigned long sidx
, unsigned long eidx
)
233 bdebug("nid=%td start=%lx end=%lx\n", bdata
- bootmem_node_data
,
234 sidx
+ PFN_DOWN(bdata
->node_boot_start
),
235 eidx
+ PFN_DOWN(bdata
->node_boot_start
));
237 for (idx
= sidx
; idx
< eidx
; idx
++)
238 if (!test_and_clear_bit(idx
, bdata
->node_bootmem_map
))
242 static int __init
__reserve(bootmem_data_t
*bdata
, unsigned long sidx
,
243 unsigned long eidx
, int flags
)
246 int exclusive
= flags
& BOOTMEM_EXCLUSIVE
;
248 bdebug("nid=%td start=%lx end=%lx flags=%x\n",
249 bdata
- bootmem_node_data
,
250 sidx
+ PFN_DOWN(bdata
->node_boot_start
),
251 eidx
+ PFN_DOWN(bdata
->node_boot_start
),
254 for (idx
= sidx
; idx
< eidx
; idx
++)
255 if (test_and_set_bit(idx
, bdata
->node_bootmem_map
)) {
257 __free(bdata
, sidx
, idx
);
260 bdebug("silent double reserve of PFN %lx\n",
261 idx
+ PFN_DOWN(bdata
->node_boot_start
));
266 static void __init
free_bootmem_core(bootmem_data_t
*bdata
, unsigned long addr
,
269 unsigned long sidx
, eidx
;
275 if (addr
+ size
< bdata
->node_boot_start
||
276 PFN_DOWN(addr
) > bdata
->node_low_pfn
)
279 * round down end of usable mem, partially free pages are
280 * considered reserved.
283 if (addr
>= bdata
->node_boot_start
&&
284 PFN_DOWN(addr
- bdata
->node_boot_start
) < bdata
->hint_idx
)
285 bdata
->hint_idx
= PFN_DOWN(addr
- bdata
->node_boot_start
);
288 * Round up to index to the range.
290 if (PFN_UP(addr
) > PFN_DOWN(bdata
->node_boot_start
))
291 sidx
= PFN_UP(addr
) - PFN_DOWN(bdata
->node_boot_start
);
295 eidx
= PFN_DOWN(addr
+ size
- bdata
->node_boot_start
);
296 if (eidx
> bdata
->node_low_pfn
- PFN_DOWN(bdata
->node_boot_start
))
297 eidx
= bdata
->node_low_pfn
- PFN_DOWN(bdata
->node_boot_start
);
299 __free(bdata
, sidx
, eidx
);
303 * free_bootmem_node - mark a page range as usable
304 * @pgdat: node the range resides on
305 * @physaddr: starting address of the range
306 * @size: size of the range in bytes
308 * Partial pages will be considered reserved and left as they are.
310 * Only physical pages that actually reside on @pgdat are marked.
312 void __init
free_bootmem_node(pg_data_t
*pgdat
, unsigned long physaddr
,
315 free_bootmem_core(pgdat
->bdata
, physaddr
, size
);
319 * free_bootmem - mark a page range as usable
320 * @addr: starting address of the range
321 * @size: size of the range in bytes
323 * Partial pages will be considered reserved and left as they are.
325 * All physical pages within the range are marked, no matter what
326 * node they reside on.
328 void __init
free_bootmem(unsigned long addr
, unsigned long size
)
330 bootmem_data_t
*bdata
;
331 list_for_each_entry(bdata
, &bdata_list
, list
)
332 free_bootmem_core(bdata
, addr
, size
);
336 * Marks a particular physical memory range as unallocatable. Usable RAM
337 * might be used for boot-time allocations - or it might get added
338 * to the free page pool later on.
340 static int __init
can_reserve_bootmem_core(bootmem_data_t
*bdata
,
341 unsigned long addr
, unsigned long size
, int flags
)
343 unsigned long sidx
, eidx
;
348 /* out of range, don't hold other */
349 if (addr
+ size
< bdata
->node_boot_start
||
350 PFN_DOWN(addr
) > bdata
->node_low_pfn
)
354 * Round up to index to the range.
356 if (addr
> bdata
->node_boot_start
)
357 sidx
= PFN_DOWN(addr
- bdata
->node_boot_start
);
361 eidx
= PFN_UP(addr
+ size
- bdata
->node_boot_start
);
362 if (eidx
> bdata
->node_low_pfn
- PFN_DOWN(bdata
->node_boot_start
))
363 eidx
= bdata
->node_low_pfn
- PFN_DOWN(bdata
->node_boot_start
);
365 for (i
= sidx
; i
< eidx
; i
++) {
366 if (test_bit(i
, bdata
->node_bootmem_map
)) {
367 if (flags
& BOOTMEM_EXCLUSIVE
)
376 static void __init
reserve_bootmem_core(bootmem_data_t
*bdata
,
377 unsigned long addr
, unsigned long size
, int flags
)
379 unsigned long sidx
, eidx
;
385 if (addr
+ size
< bdata
->node_boot_start
||
386 PFN_DOWN(addr
) > bdata
->node_low_pfn
)
390 * Round up to index to the range.
392 if (addr
> bdata
->node_boot_start
)
393 sidx
= PFN_DOWN(addr
- bdata
->node_boot_start
);
397 eidx
= PFN_UP(addr
+ size
- bdata
->node_boot_start
);
398 if (eidx
> bdata
->node_low_pfn
- PFN_DOWN(bdata
->node_boot_start
))
399 eidx
= bdata
->node_low_pfn
- PFN_DOWN(bdata
->node_boot_start
);
401 return __reserve(bdata
, sidx
, eidx
, flags
);
405 * reserve_bootmem_node - mark a page range as reserved
406 * @pgdat: node the range resides on
407 * @physaddr: starting address of the range
408 * @size: size of the range in bytes
409 * @flags: reservation flags (see linux/bootmem.h)
411 * Partial pages will be reserved.
413 * Only physical pages that actually reside on @pgdat are marked.
415 int __init
reserve_bootmem_node(pg_data_t
*pgdat
, unsigned long physaddr
,
416 unsigned long size
, int flags
)
420 ret
= can_reserve_bootmem_core(pgdat
->bdata
, physaddr
, size
, flags
);
423 reserve_bootmem_core(pgdat
->bdata
, physaddr
, size
, flags
);
427 #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
429 * reserve_bootmem - mark a page range as usable
430 * @addr: starting address of the range
431 * @size: size of the range in bytes
432 * @flags: reservation flags (see linux/bootmem.h)
434 * Partial pages will be reserved.
436 * All physical pages within the range are marked, no matter what
437 * node they reside on.
439 int __init
reserve_bootmem(unsigned long addr
, unsigned long size
,
442 bootmem_data_t
*bdata
;
445 list_for_each_entry(bdata
, &bdata_list
, list
) {
446 ret
= can_reserve_bootmem_core(bdata
, addr
, size
, flags
);
450 list_for_each_entry(bdata
, &bdata_list
, list
)
451 reserve_bootmem_core(bdata
, addr
, size
, flags
);
455 #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
457 static void * __init
alloc_bootmem_core(struct bootmem_data
*bdata
,
458 unsigned long size
, unsigned long align
,
459 unsigned long goal
, unsigned long limit
)
461 unsigned long min
, max
, start
, sidx
, midx
, step
;
464 BUG_ON(align
& (align
- 1));
465 BUG_ON(limit
&& goal
+ size
> limit
);
467 if (!bdata
->node_bootmem_map
)
470 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
471 bdata
- bootmem_node_data
, size
, PAGE_ALIGN(size
) >> PAGE_SHIFT
,
474 min
= PFN_DOWN(bdata
->node_boot_start
);
475 max
= bdata
->node_low_pfn
;
478 limit
>>= PAGE_SHIFT
;
480 if (limit
&& max
> limit
)
485 step
= max(align
>> PAGE_SHIFT
, 1UL);
487 if (goal
&& min
< goal
&& goal
< max
)
488 start
= ALIGN(goal
, step
);
490 start
= ALIGN(min
, step
);
492 sidx
= start
- PFN_DOWN(bdata
->node_boot_start
);
493 midx
= max
- PFN_DOWN(bdata
->node_boot_start
);
495 if (bdata
->hint_idx
> sidx
) {
496 /* Make sure we retry on failure */
498 sidx
= ALIGN(bdata
->hint_idx
, step
);
504 unsigned long eidx
, i
, start_off
, end_off
;
506 sidx
= find_next_zero_bit(bdata
->node_bootmem_map
, midx
, sidx
);
507 sidx
= ALIGN(sidx
, step
);
508 eidx
= sidx
+ PFN_UP(size
);
510 if (sidx
>= midx
|| eidx
> midx
)
513 for (i
= sidx
; i
< eidx
; i
++)
514 if (test_bit(i
, bdata
->node_bootmem_map
)) {
515 sidx
= ALIGN(i
, step
);
521 if (bdata
->last_end_off
&&
522 PFN_DOWN(bdata
->last_end_off
) + 1 == sidx
)
523 start_off
= ALIGN(bdata
->last_end_off
, align
);
525 start_off
= PFN_PHYS(sidx
);
527 merge
= PFN_DOWN(start_off
) < sidx
;
528 end_off
= start_off
+ size
;
530 bdata
->last_end_off
= end_off
;
531 bdata
->hint_idx
= PFN_UP(end_off
);
534 * Reserve the area now:
536 if (__reserve(bdata
, PFN_DOWN(start_off
) + merge
,
537 PFN_UP(end_off
), BOOTMEM_EXCLUSIVE
))
540 region
= phys_to_virt(bdata
->node_boot_start
+ start_off
);
541 memset(region
, 0, size
);
555 * __alloc_bootmem_nopanic - allocate boot memory without panicking
556 * @size: size of the request in bytes
557 * @align: alignment of the region
558 * @goal: preferred starting address of the region
560 * The goal is dropped if it can not be satisfied and the allocation will
561 * fall back to memory below @goal.
563 * Allocation may happen on any node in the system.
565 * Returns NULL on failure.
567 void * __init
__alloc_bootmem_nopanic(unsigned long size
, unsigned long align
,
570 bootmem_data_t
*bdata
;
573 list_for_each_entry(bdata
, &bdata_list
, list
) {
574 ptr
= alloc_bootmem_core(bdata
, size
, align
, goal
, 0);
582 * __alloc_bootmem - allocate boot memory
583 * @size: size of the request in bytes
584 * @align: alignment of the region
585 * @goal: preferred starting address of the region
587 * The goal is dropped if it can not be satisfied and the allocation will
588 * fall back to memory below @goal.
590 * Allocation may happen on any node in the system.
592 * The function panics if the request can not be satisfied.
594 void * __init
__alloc_bootmem(unsigned long size
, unsigned long align
,
597 void *mem
= __alloc_bootmem_nopanic(size
,align
,goal
);
602 * Whoops, we cannot satisfy the allocation request.
604 printk(KERN_ALERT
"bootmem alloc of %lu bytes failed!\n", size
);
605 panic("Out of memory");
610 * __alloc_bootmem_node - allocate boot memory from a specific node
611 * @pgdat: node to allocate from
612 * @size: size of the request in bytes
613 * @align: alignment of the region
614 * @goal: preferred starting address of the region
616 * The goal is dropped if it can not be satisfied and the allocation will
617 * fall back to memory below @goal.
619 * Allocation may fall back to any node in the system if the specified node
620 * can not hold the requested memory.
622 * The function panics if the request can not be satisfied.
624 void * __init
__alloc_bootmem_node(pg_data_t
*pgdat
, unsigned long size
,
625 unsigned long align
, unsigned long goal
)
629 ptr
= alloc_bootmem_core(pgdat
->bdata
, size
, align
, goal
, 0);
633 return __alloc_bootmem(size
, align
, goal
);
636 #ifdef CONFIG_SPARSEMEM
638 * alloc_bootmem_section - allocate boot memory from a specific section
639 * @size: size of the request in bytes
640 * @section_nr: sparse map section to allocate from
642 * Return NULL on failure.
644 void * __init
alloc_bootmem_section(unsigned long size
,
645 unsigned long section_nr
)
648 unsigned long limit
, goal
, start_nr
, end_nr
, pfn
;
649 struct pglist_data
*pgdat
;
651 pfn
= section_nr_to_pfn(section_nr
);
652 goal
= PFN_PHYS(pfn
);
653 limit
= PFN_PHYS(section_nr_to_pfn(section_nr
+ 1)) - 1;
654 pgdat
= NODE_DATA(early_pfn_to_nid(pfn
));
655 ptr
= alloc_bootmem_core(pgdat
->bdata
, size
, SMP_CACHE_BYTES
, goal
,
661 start_nr
= pfn_to_section_nr(PFN_DOWN(__pa(ptr
)));
662 end_nr
= pfn_to_section_nr(PFN_DOWN(__pa(ptr
) + size
));
663 if (start_nr
!= section_nr
|| end_nr
!= section_nr
) {
664 printk(KERN_WARNING
"alloc_bootmem failed on section %ld.\n",
666 free_bootmem_core(pgdat
->bdata
, __pa(ptr
), size
);
674 void * __init
__alloc_bootmem_node_nopanic(pg_data_t
*pgdat
, unsigned long size
,
675 unsigned long align
, unsigned long goal
)
679 ptr
= alloc_bootmem_core(pgdat
->bdata
, size
, align
, goal
, 0);
683 return __alloc_bootmem_nopanic(size
, align
, goal
);
686 #ifndef ARCH_LOW_ADDRESS_LIMIT
687 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
691 * __alloc_bootmem_low - allocate low boot memory
692 * @size: size of the request in bytes
693 * @align: alignment of the region
694 * @goal: preferred starting address of the region
696 * The goal is dropped if it can not be satisfied and the allocation will
697 * fall back to memory below @goal.
699 * Allocation may happen on any node in the system.
701 * The function panics if the request can not be satisfied.
703 void * __init
__alloc_bootmem_low(unsigned long size
, unsigned long align
,
706 bootmem_data_t
*bdata
;
709 list_for_each_entry(bdata
, &bdata_list
, list
) {
710 ptr
= alloc_bootmem_core(bdata
, size
, align
, goal
,
711 ARCH_LOW_ADDRESS_LIMIT
);
717 * Whoops, we cannot satisfy the allocation request.
719 printk(KERN_ALERT
"low bootmem alloc of %lu bytes failed!\n", size
);
720 panic("Out of low memory");
725 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
726 * @pgdat: node to allocate from
727 * @size: size of the request in bytes
728 * @align: alignment of the region
729 * @goal: preferred starting address of the region
731 * The goal is dropped if it can not be satisfied and the allocation will
732 * fall back to memory below @goal.
734 * Allocation may fall back to any node in the system if the specified node
735 * can not hold the requested memory.
737 * The function panics if the request can not be satisfied.
739 void * __init
__alloc_bootmem_low_node(pg_data_t
*pgdat
, unsigned long size
,
740 unsigned long align
, unsigned long goal
)
742 return alloc_bootmem_core(pgdat
->bdata
, size
, align
, goal
,
743 ARCH_LOW_ADDRESS_LIMIT
);