dm mpath: pass struct pgpath to pg init done
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / mm / bootmem.c
blobd7c791ef00367e27ac65f4a51da946abbd45d271
1 /*
2 * bootmem - A boot-time physical memory allocator and configurator
4 * Copyright (C) 1999 Ingo Molnar
5 * 1999 Kanoj Sarcar, SGI
6 * 2008 Johannes Weiner
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/bootmem.h>
14 #include <linux/module.h>
15 #include <linux/kmemleak.h>
16 #include <linux/range.h>
18 #include <asm/bug.h>
19 #include <asm/io.h>
20 #include <asm/processor.h>
22 #include "internal.h"
24 unsigned long max_low_pfn;
25 unsigned long min_low_pfn;
26 unsigned long max_pfn;
28 #ifdef CONFIG_CRASH_DUMP
30 * If we have booted due to a crash, max_pfn will be a very low value. We need
31 * to know the amount of memory that the previous kernel used.
33 unsigned long saved_max_pfn;
34 #endif
36 #ifndef CONFIG_NO_BOOTMEM
37 bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
39 static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
41 static int bootmem_debug;
43 static int __init bootmem_debug_setup(char *buf)
45 bootmem_debug = 1;
46 return 0;
48 early_param("bootmem_debug", bootmem_debug_setup);
50 #define bdebug(fmt, args...) ({ \
51 if (unlikely(bootmem_debug)) \
52 printk(KERN_INFO \
53 "bootmem::%s " fmt, \
54 __func__, ## args); \
57 static unsigned long __init bootmap_bytes(unsigned long pages)
59 unsigned long bytes = (pages + 7) / 8;
61 return ALIGN(bytes, sizeof(long));
64 /**
65 * bootmem_bootmap_pages - calculate bitmap size in pages
66 * @pages: number of pages the bitmap has to represent
68 unsigned long __init bootmem_bootmap_pages(unsigned long pages)
70 unsigned long bytes = bootmap_bytes(pages);
72 return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
76 * link bdata in order
78 static void __init link_bootmem(bootmem_data_t *bdata)
80 struct list_head *iter;
82 list_for_each(iter, &bdata_list) {
83 bootmem_data_t *ent;
85 ent = list_entry(iter, bootmem_data_t, list);
86 if (bdata->node_min_pfn < ent->node_min_pfn)
87 break;
89 list_add_tail(&bdata->list, iter);
93 * Called once to set up the allocator itself.
95 static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
96 unsigned long mapstart, unsigned long start, unsigned long end)
98 unsigned long mapsize;
100 mminit_validate_memmodel_limits(&start, &end);
101 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
102 bdata->node_min_pfn = start;
103 bdata->node_low_pfn = end;
104 link_bootmem(bdata);
107 * Initially all pages are reserved - setup_arch() has to
108 * register free RAM areas explicitly.
110 mapsize = bootmap_bytes(end - start);
111 memset(bdata->node_bootmem_map, 0xff, mapsize);
113 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
114 bdata - bootmem_node_data, start, mapstart, end, mapsize);
116 return mapsize;
120 * init_bootmem_node - register a node as boot memory
121 * @pgdat: node to register
122 * @freepfn: pfn where the bitmap for this node is to be placed
123 * @startpfn: first pfn on the node
124 * @endpfn: first pfn after the node
126 * Returns the number of bytes needed to hold the bitmap for this node.
128 unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
129 unsigned long startpfn, unsigned long endpfn)
131 return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
135 * init_bootmem - register boot memory
136 * @start: pfn where the bitmap is to be placed
137 * @pages: number of available physical pages
139 * Returns the number of bytes needed to hold the bitmap.
141 unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
143 max_low_pfn = pages;
144 min_low_pfn = start;
145 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
147 #endif
149 * free_bootmem_late - free bootmem pages directly to page allocator
150 * @addr: starting address of the range
151 * @size: size of the range in bytes
153 * This is only useful when the bootmem allocator has already been torn
154 * down, but we are still initializing the system. Pages are given directly
155 * to the page allocator, no bootmem metadata is updated because it is gone.
157 void __init free_bootmem_late(unsigned long addr, unsigned long size)
159 unsigned long cursor, end;
161 kmemleak_free_part(__va(addr), size);
163 cursor = PFN_UP(addr);
164 end = PFN_DOWN(addr + size);
166 for (; cursor < end; cursor++) {
167 __free_pages_bootmem(pfn_to_page(cursor), 0);
168 totalram_pages++;
172 #ifdef CONFIG_NO_BOOTMEM
173 static void __init __free_pages_memory(unsigned long start, unsigned long end)
175 int i;
176 unsigned long start_aligned, end_aligned;
177 int order = ilog2(BITS_PER_LONG);
179 start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
180 end_aligned = end & ~(BITS_PER_LONG - 1);
182 if (end_aligned <= start_aligned) {
183 #if 1
184 printk(KERN_DEBUG " %lx - %lx\n", start, end);
185 #endif
186 for (i = start; i < end; i++)
187 __free_pages_bootmem(pfn_to_page(i), 0);
189 return;
192 #if 1
193 printk(KERN_DEBUG " %lx %lx - %lx %lx\n",
194 start, start_aligned, end_aligned, end);
195 #endif
196 for (i = start; i < start_aligned; i++)
197 __free_pages_bootmem(pfn_to_page(i), 0);
199 for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
200 __free_pages_bootmem(pfn_to_page(i), order);
202 for (i = end_aligned; i < end; i++)
203 __free_pages_bootmem(pfn_to_page(i), 0);
206 unsigned long __init free_all_memory_core_early(int nodeid)
208 int i;
209 u64 start, end;
210 unsigned long count = 0;
211 struct range *range = NULL;
212 int nr_range;
214 nr_range = get_free_all_memory_range(&range, nodeid);
216 for (i = 0; i < nr_range; i++) {
217 start = range[i].start;
218 end = range[i].end;
219 count += end - start;
220 __free_pages_memory(start, end);
223 return count;
225 #else
226 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
228 int aligned;
229 struct page *page;
230 unsigned long start, end, pages, count = 0;
232 if (!bdata->node_bootmem_map)
233 return 0;
235 start = bdata->node_min_pfn;
236 end = bdata->node_low_pfn;
239 * If the start is aligned to the machines wordsize, we might
240 * be able to free pages in bulks of that order.
242 aligned = !(start & (BITS_PER_LONG - 1));
244 bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
245 bdata - bootmem_node_data, start, end, aligned);
247 while (start < end) {
248 unsigned long *map, idx, vec;
250 map = bdata->node_bootmem_map;
251 idx = start - bdata->node_min_pfn;
252 vec = ~map[idx / BITS_PER_LONG];
254 if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) {
255 int order = ilog2(BITS_PER_LONG);
257 __free_pages_bootmem(pfn_to_page(start), order);
258 count += BITS_PER_LONG;
259 } else {
260 unsigned long off = 0;
262 while (vec && off < BITS_PER_LONG) {
263 if (vec & 1) {
264 page = pfn_to_page(start + off);
265 __free_pages_bootmem(page, 0);
266 count++;
268 vec >>= 1;
269 off++;
272 start += BITS_PER_LONG;
275 page = virt_to_page(bdata->node_bootmem_map);
276 pages = bdata->node_low_pfn - bdata->node_min_pfn;
277 pages = bootmem_bootmap_pages(pages);
278 count += pages;
279 while (pages--)
280 __free_pages_bootmem(page++, 0);
282 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
284 return count;
286 #endif
289 * free_all_bootmem_node - release a node's free pages to the buddy allocator
290 * @pgdat: node to be released
292 * Returns the number of pages actually released.
294 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
296 register_page_bootmem_info_node(pgdat);
297 #ifdef CONFIG_NO_BOOTMEM
298 /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
299 return 0;
300 #else
301 return free_all_bootmem_core(pgdat->bdata);
302 #endif
306 * free_all_bootmem - release free pages to the buddy allocator
308 * Returns the number of pages actually released.
310 unsigned long __init free_all_bootmem(void)
312 #ifdef CONFIG_NO_BOOTMEM
313 return free_all_memory_core_early(NODE_DATA(0)->node_id);
314 #else
315 return free_all_bootmem_core(NODE_DATA(0)->bdata);
316 #endif
319 #ifndef CONFIG_NO_BOOTMEM
320 static void __init __free(bootmem_data_t *bdata,
321 unsigned long sidx, unsigned long eidx)
323 unsigned long idx;
325 bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
326 sidx + bdata->node_min_pfn,
327 eidx + bdata->node_min_pfn);
329 if (bdata->hint_idx > sidx)
330 bdata->hint_idx = sidx;
332 for (idx = sidx; idx < eidx; idx++)
333 if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
334 BUG();
337 static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
338 unsigned long eidx, int flags)
340 unsigned long idx;
341 int exclusive = flags & BOOTMEM_EXCLUSIVE;
343 bdebug("nid=%td start=%lx end=%lx flags=%x\n",
344 bdata - bootmem_node_data,
345 sidx + bdata->node_min_pfn,
346 eidx + bdata->node_min_pfn,
347 flags);
349 for (idx = sidx; idx < eidx; idx++)
350 if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
351 if (exclusive) {
352 __free(bdata, sidx, idx);
353 return -EBUSY;
355 bdebug("silent double reserve of PFN %lx\n",
356 idx + bdata->node_min_pfn);
358 return 0;
361 static int __init mark_bootmem_node(bootmem_data_t *bdata,
362 unsigned long start, unsigned long end,
363 int reserve, int flags)
365 unsigned long sidx, eidx;
367 bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
368 bdata - bootmem_node_data, start, end, reserve, flags);
370 BUG_ON(start < bdata->node_min_pfn);
371 BUG_ON(end > bdata->node_low_pfn);
373 sidx = start - bdata->node_min_pfn;
374 eidx = end - bdata->node_min_pfn;
376 if (reserve)
377 return __reserve(bdata, sidx, eidx, flags);
378 else
379 __free(bdata, sidx, eidx);
380 return 0;
383 static int __init mark_bootmem(unsigned long start, unsigned long end,
384 int reserve, int flags)
386 unsigned long pos;
387 bootmem_data_t *bdata;
389 pos = start;
390 list_for_each_entry(bdata, &bdata_list, list) {
391 int err;
392 unsigned long max;
394 if (pos < bdata->node_min_pfn ||
395 pos >= bdata->node_low_pfn) {
396 BUG_ON(pos != start);
397 continue;
400 max = min(bdata->node_low_pfn, end);
402 err = mark_bootmem_node(bdata, pos, max, reserve, flags);
403 if (reserve && err) {
404 mark_bootmem(start, pos, 0, 0);
405 return err;
408 if (max == end)
409 return 0;
410 pos = bdata->node_low_pfn;
412 BUG();
414 #endif
417 * free_bootmem_node - mark a page range as usable
418 * @pgdat: node the range resides on
419 * @physaddr: starting address of the range
420 * @size: size of the range in bytes
422 * Partial pages will be considered reserved and left as they are.
424 * The range must reside completely on the specified node.
426 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
427 unsigned long size)
429 #ifdef CONFIG_NO_BOOTMEM
430 free_early(physaddr, physaddr + size);
431 #if 0
432 printk(KERN_DEBUG "free %lx %lx\n", physaddr, size);
433 #endif
434 #else
435 unsigned long start, end;
437 kmemleak_free_part(__va(physaddr), size);
439 start = PFN_UP(physaddr);
440 end = PFN_DOWN(physaddr + size);
442 mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
443 #endif
447 * free_bootmem - mark a page range as usable
448 * @addr: starting address of the range
449 * @size: size of the range in bytes
451 * Partial pages will be considered reserved and left as they are.
453 * The range must be contiguous but may span node boundaries.
455 void __init free_bootmem(unsigned long addr, unsigned long size)
457 #ifdef CONFIG_NO_BOOTMEM
458 free_early(addr, addr + size);
459 #if 0
460 printk(KERN_DEBUG "free %lx %lx\n", addr, size);
461 #endif
462 #else
463 unsigned long start, end;
465 kmemleak_free_part(__va(addr), size);
467 start = PFN_UP(addr);
468 end = PFN_DOWN(addr + size);
470 mark_bootmem(start, end, 0, 0);
471 #endif
475 * reserve_bootmem_node - mark a page range as reserved
476 * @pgdat: node the range resides on
477 * @physaddr: starting address of the range
478 * @size: size of the range in bytes
479 * @flags: reservation flags (see linux/bootmem.h)
481 * Partial pages will be reserved.
483 * The range must reside completely on the specified node.
485 int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
486 unsigned long size, int flags)
488 #ifdef CONFIG_NO_BOOTMEM
489 panic("no bootmem");
490 return 0;
491 #else
492 unsigned long start, end;
494 start = PFN_DOWN(physaddr);
495 end = PFN_UP(physaddr + size);
497 return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
498 #endif
502 * reserve_bootmem - mark a page range as usable
503 * @addr: starting address of the range
504 * @size: size of the range in bytes
505 * @flags: reservation flags (see linux/bootmem.h)
507 * Partial pages will be reserved.
509 * The range must be contiguous but may span node boundaries.
511 int __init reserve_bootmem(unsigned long addr, unsigned long size,
512 int flags)
514 #ifdef CONFIG_NO_BOOTMEM
515 panic("no bootmem");
516 return 0;
517 #else
518 unsigned long start, end;
520 start = PFN_DOWN(addr);
521 end = PFN_UP(addr + size);
523 return mark_bootmem(start, end, 1, flags);
524 #endif
527 #ifndef CONFIG_NO_BOOTMEM
528 static unsigned long __init align_idx(struct bootmem_data *bdata,
529 unsigned long idx, unsigned long step)
531 unsigned long base = bdata->node_min_pfn;
534 * Align the index with respect to the node start so that the
535 * combination of both satisfies the requested alignment.
538 return ALIGN(base + idx, step) - base;
541 static unsigned long __init align_off(struct bootmem_data *bdata,
542 unsigned long off, unsigned long align)
544 unsigned long base = PFN_PHYS(bdata->node_min_pfn);
546 /* Same as align_idx for byte offsets */
548 return ALIGN(base + off, align) - base;
551 static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
552 unsigned long size, unsigned long align,
553 unsigned long goal, unsigned long limit)
555 unsigned long fallback = 0;
556 unsigned long min, max, start, sidx, midx, step;
558 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
559 bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
560 align, goal, limit);
562 BUG_ON(!size);
563 BUG_ON(align & (align - 1));
564 BUG_ON(limit && goal + size > limit);
566 if (!bdata->node_bootmem_map)
567 return NULL;
569 min = bdata->node_min_pfn;
570 max = bdata->node_low_pfn;
572 goal >>= PAGE_SHIFT;
573 limit >>= PAGE_SHIFT;
575 if (limit && max > limit)
576 max = limit;
577 if (max <= min)
578 return NULL;
580 step = max(align >> PAGE_SHIFT, 1UL);
582 if (goal && min < goal && goal < max)
583 start = ALIGN(goal, step);
584 else
585 start = ALIGN(min, step);
587 sidx = start - bdata->node_min_pfn;
588 midx = max - bdata->node_min_pfn;
590 if (bdata->hint_idx > sidx) {
592 * Handle the valid case of sidx being zero and still
593 * catch the fallback below.
595 fallback = sidx + 1;
596 sidx = align_idx(bdata, bdata->hint_idx, step);
599 while (1) {
600 int merge;
601 void *region;
602 unsigned long eidx, i, start_off, end_off;
603 find_block:
604 sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
605 sidx = align_idx(bdata, sidx, step);
606 eidx = sidx + PFN_UP(size);
608 if (sidx >= midx || eidx > midx)
609 break;
611 for (i = sidx; i < eidx; i++)
612 if (test_bit(i, bdata->node_bootmem_map)) {
613 sidx = align_idx(bdata, i, step);
614 if (sidx == i)
615 sidx += step;
616 goto find_block;
619 if (bdata->last_end_off & (PAGE_SIZE - 1) &&
620 PFN_DOWN(bdata->last_end_off) + 1 == sidx)
621 start_off = align_off(bdata, bdata->last_end_off, align);
622 else
623 start_off = PFN_PHYS(sidx);
625 merge = PFN_DOWN(start_off) < sidx;
626 end_off = start_off + size;
628 bdata->last_end_off = end_off;
629 bdata->hint_idx = PFN_UP(end_off);
632 * Reserve the area now:
634 if (__reserve(bdata, PFN_DOWN(start_off) + merge,
635 PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
636 BUG();
638 region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
639 start_off);
640 memset(region, 0, size);
642 * The min_count is set to 0 so that bootmem allocated blocks
643 * are never reported as leaks.
645 kmemleak_alloc(region, size, 0, 0);
646 return region;
649 if (fallback) {
650 sidx = align_idx(bdata, fallback - 1, step);
651 fallback = 0;
652 goto find_block;
655 return NULL;
658 static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
659 unsigned long size, unsigned long align,
660 unsigned long goal, unsigned long limit)
662 if (WARN_ON_ONCE(slab_is_available()))
663 return kzalloc(size, GFP_NOWAIT);
665 #ifdef CONFIG_HAVE_ARCH_BOOTMEM
667 bootmem_data_t *p_bdata;
669 p_bdata = bootmem_arch_preferred_node(bdata, size, align,
670 goal, limit);
671 if (p_bdata)
672 return alloc_bootmem_core(p_bdata, size, align,
673 goal, limit);
675 #endif
676 return NULL;
678 #endif
680 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
681 unsigned long align,
682 unsigned long goal,
683 unsigned long limit)
685 #ifdef CONFIG_NO_BOOTMEM
686 void *ptr;
688 if (WARN_ON_ONCE(slab_is_available()))
689 return kzalloc(size, GFP_NOWAIT);
691 restart:
693 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
695 if (ptr)
696 return ptr;
698 if (goal != 0) {
699 goal = 0;
700 goto restart;
703 return NULL;
704 #else
705 bootmem_data_t *bdata;
706 void *region;
708 restart:
709 region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
710 if (region)
711 return region;
713 list_for_each_entry(bdata, &bdata_list, list) {
714 if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
715 continue;
716 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
717 break;
719 region = alloc_bootmem_core(bdata, size, align, goal, limit);
720 if (region)
721 return region;
724 if (goal) {
725 goal = 0;
726 goto restart;
729 return NULL;
730 #endif
734 * __alloc_bootmem_nopanic - allocate boot memory without panicking
735 * @size: size of the request in bytes
736 * @align: alignment of the region
737 * @goal: preferred starting address of the region
739 * The goal is dropped if it can not be satisfied and the allocation will
740 * fall back to memory below @goal.
742 * Allocation may happen on any node in the system.
744 * Returns NULL on failure.
746 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
747 unsigned long goal)
749 unsigned long limit = 0;
751 #ifdef CONFIG_NO_BOOTMEM
752 limit = -1UL;
753 #endif
755 return ___alloc_bootmem_nopanic(size, align, goal, limit);
758 static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
759 unsigned long goal, unsigned long limit)
761 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
763 if (mem)
764 return mem;
766 * Whoops, we cannot satisfy the allocation request.
768 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
769 panic("Out of memory");
770 return NULL;
774 * __alloc_bootmem - allocate boot memory
775 * @size: size of the request in bytes
776 * @align: alignment of the region
777 * @goal: preferred starting address of the region
779 * The goal is dropped if it can not be satisfied and the allocation will
780 * fall back to memory below @goal.
782 * Allocation may happen on any node in the system.
784 * The function panics if the request can not be satisfied.
786 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
787 unsigned long goal)
789 unsigned long limit = 0;
791 #ifdef CONFIG_NO_BOOTMEM
792 limit = -1UL;
793 #endif
795 return ___alloc_bootmem(size, align, goal, limit);
798 #ifndef CONFIG_NO_BOOTMEM
799 static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
800 unsigned long size, unsigned long align,
801 unsigned long goal, unsigned long limit)
803 void *ptr;
805 ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit);
806 if (ptr)
807 return ptr;
809 ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
810 if (ptr)
811 return ptr;
813 return ___alloc_bootmem(size, align, goal, limit);
815 #endif
818 * __alloc_bootmem_node - allocate boot memory from a specific node
819 * @pgdat: node to allocate from
820 * @size: size of the request in bytes
821 * @align: alignment of the region
822 * @goal: preferred starting address of the region
824 * The goal is dropped if it can not be satisfied and the allocation will
825 * fall back to memory below @goal.
827 * Allocation may fall back to any node in the system if the specified node
828 * can not hold the requested memory.
830 * The function panics if the request can not be satisfied.
832 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
833 unsigned long align, unsigned long goal)
835 if (WARN_ON_ONCE(slab_is_available()))
836 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
838 #ifdef CONFIG_NO_BOOTMEM
839 return __alloc_memory_core_early(pgdat->node_id, size, align,
840 goal, -1ULL);
841 #else
842 return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
843 #endif
846 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
847 unsigned long align, unsigned long goal)
849 #ifdef MAX_DMA32_PFN
850 unsigned long end_pfn;
852 if (WARN_ON_ONCE(slab_is_available()))
853 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
855 /* update goal according ...MAX_DMA32_PFN */
856 end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
858 if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
859 (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
860 void *ptr;
861 unsigned long new_goal;
863 new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
864 #ifdef CONFIG_NO_BOOTMEM
865 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
866 new_goal, -1ULL);
867 #else
868 ptr = alloc_bootmem_core(pgdat->bdata, size, align,
869 new_goal, 0);
870 #endif
871 if (ptr)
872 return ptr;
874 #endif
876 return __alloc_bootmem_node(pgdat, size, align, goal);
880 #ifdef CONFIG_SPARSEMEM
882 * alloc_bootmem_section - allocate boot memory from a specific section
883 * @size: size of the request in bytes
884 * @section_nr: sparse map section to allocate from
886 * Return NULL on failure.
888 void * __init alloc_bootmem_section(unsigned long size,
889 unsigned long section_nr)
891 #ifdef CONFIG_NO_BOOTMEM
892 unsigned long pfn, goal, limit;
894 pfn = section_nr_to_pfn(section_nr);
895 goal = pfn << PAGE_SHIFT;
896 limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
898 return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
899 SMP_CACHE_BYTES, goal, limit);
900 #else
901 bootmem_data_t *bdata;
902 unsigned long pfn, goal, limit;
904 pfn = section_nr_to_pfn(section_nr);
905 goal = pfn << PAGE_SHIFT;
906 limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
907 bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
909 return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
910 #endif
912 #endif
914 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
915 unsigned long align, unsigned long goal)
917 void *ptr;
919 if (WARN_ON_ONCE(slab_is_available()))
920 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
922 #ifdef CONFIG_NO_BOOTMEM
923 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
924 goal, -1ULL);
925 #else
926 ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
927 if (ptr)
928 return ptr;
930 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
931 #endif
932 if (ptr)
933 return ptr;
935 return __alloc_bootmem_nopanic(size, align, goal);
938 #ifndef ARCH_LOW_ADDRESS_LIMIT
939 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
940 #endif
943 * __alloc_bootmem_low - allocate low boot memory
944 * @size: size of the request in bytes
945 * @align: alignment of the region
946 * @goal: preferred starting address of the region
948 * The goal is dropped if it can not be satisfied and the allocation will
949 * fall back to memory below @goal.
951 * Allocation may happen on any node in the system.
953 * The function panics if the request can not be satisfied.
955 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
956 unsigned long goal)
958 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
962 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
963 * @pgdat: node to allocate from
964 * @size: size of the request in bytes
965 * @align: alignment of the region
966 * @goal: preferred starting address of the region
968 * The goal is dropped if it can not be satisfied and the allocation will
969 * fall back to memory below @goal.
971 * Allocation may fall back to any node in the system if the specified node
972 * can not hold the requested memory.
974 * The function panics if the request can not be satisfied.
976 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
977 unsigned long align, unsigned long goal)
979 if (WARN_ON_ONCE(slab_is_available()))
980 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
982 #ifdef CONFIG_NO_BOOTMEM
983 return __alloc_memory_core_early(pgdat->node_id, size, align,
984 goal, ARCH_LOW_ADDRESS_LIMIT);
985 #else
986 return ___alloc_bootmem_node(pgdat->bdata, size, align,
987 goal, ARCH_LOW_ADDRESS_LIMIT);
988 #endif