2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/kmemleak.h>
21 #include <linux/seq_file.h>
22 #include <linux/memblock.h>
24 #include <asm/sections.h>
29 static struct memblock_region memblock_memory_init_regions
[INIT_MEMBLOCK_REGIONS
] __initdata_memblock
;
30 static struct memblock_region memblock_reserved_init_regions
[INIT_MEMBLOCK_REGIONS
] __initdata_memblock
;
31 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
32 static struct memblock_region memblock_physmem_init_regions
[INIT_PHYSMEM_REGIONS
] __initdata_memblock
;
35 struct memblock memblock __initdata_memblock
= {
36 .memory
.regions
= memblock_memory_init_regions
,
37 .memory
.cnt
= 1, /* empty dummy entry */
38 .memory
.max
= INIT_MEMBLOCK_REGIONS
,
39 .memory
.name
= "memory",
41 .reserved
.regions
= memblock_reserved_init_regions
,
42 .reserved
.cnt
= 1, /* empty dummy entry */
43 .reserved
.max
= INIT_MEMBLOCK_REGIONS
,
44 .reserved
.name
= "reserved",
46 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
47 .physmem
.regions
= memblock_physmem_init_regions
,
48 .physmem
.cnt
= 1, /* empty dummy entry */
49 .physmem
.max
= INIT_PHYSMEM_REGIONS
,
50 .physmem
.name
= "physmem",
54 .current_limit
= MEMBLOCK_ALLOC_ANYWHERE
,
57 int memblock_debug __initdata_memblock
;
58 static bool system_has_some_mirror __initdata_memblock
= false;
59 static int memblock_can_resize __initdata_memblock
;
60 static int memblock_memory_in_slab __initdata_memblock
= 0;
61 static int memblock_reserved_in_slab __initdata_memblock
= 0;
63 ulong __init_memblock
choose_memblock_flags(void)
65 return system_has_some_mirror
? MEMBLOCK_MIRROR
: MEMBLOCK_NONE
;
68 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
69 static inline phys_addr_t
memblock_cap_size(phys_addr_t base
, phys_addr_t
*size
)
71 return *size
= min(*size
, PHYS_ADDR_MAX
- base
);
75 * Address comparison utilities
77 static unsigned long __init_memblock
memblock_addrs_overlap(phys_addr_t base1
, phys_addr_t size1
,
78 phys_addr_t base2
, phys_addr_t size2
)
80 return ((base1
< (base2
+ size2
)) && (base2
< (base1
+ size1
)));
83 bool __init_memblock
memblock_overlaps_region(struct memblock_type
*type
,
84 phys_addr_t base
, phys_addr_t size
)
88 for (i
= 0; i
< type
->cnt
; i
++)
89 if (memblock_addrs_overlap(base
, size
, type
->regions
[i
].base
,
90 type
->regions
[i
].size
))
96 * __memblock_find_range_bottom_up - find free area utility in bottom-up
97 * @start: start of candidate range
98 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
99 * @size: size of free area to find
100 * @align: alignment of free area to find
101 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
102 * @flags: pick from blocks based on memory attributes
104 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
107 * Found address on success, 0 on failure.
109 static phys_addr_t __init_memblock
110 __memblock_find_range_bottom_up(phys_addr_t start
, phys_addr_t end
,
111 phys_addr_t size
, phys_addr_t align
, int nid
,
114 phys_addr_t this_start
, this_end
, cand
;
117 for_each_free_mem_range(i
, nid
, flags
, &this_start
, &this_end
, NULL
) {
118 this_start
= clamp(this_start
, start
, end
);
119 this_end
= clamp(this_end
, start
, end
);
121 cand
= round_up(this_start
, align
);
122 if (cand
< this_end
&& this_end
- cand
>= size
)
130 * __memblock_find_range_top_down - find free area utility, in top-down
131 * @start: start of candidate range
132 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
133 * @size: size of free area to find
134 * @align: alignment of free area to find
135 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
136 * @flags: pick from blocks based on memory attributes
138 * Utility called from memblock_find_in_range_node(), find free area top-down.
141 * Found address on success, 0 on failure.
143 static phys_addr_t __init_memblock
144 __memblock_find_range_top_down(phys_addr_t start
, phys_addr_t end
,
145 phys_addr_t size
, phys_addr_t align
, int nid
,
148 phys_addr_t this_start
, this_end
, cand
;
151 for_each_free_mem_range_reverse(i
, nid
, flags
, &this_start
, &this_end
,
153 this_start
= clamp(this_start
, start
, end
);
154 this_end
= clamp(this_end
, start
, end
);
159 cand
= round_down(this_end
- size
, align
);
160 if (cand
>= this_start
)
168 * memblock_find_in_range_node - find free area in given range and node
169 * @size: size of free area to find
170 * @align: alignment of free area to find
171 * @start: start of candidate range
172 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
173 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
174 * @flags: pick from blocks based on memory attributes
176 * Find @size free area aligned to @align in the specified range and node.
178 * When allocation direction is bottom-up, the @start should be greater
179 * than the end of the kernel image. Otherwise, it will be trimmed. The
180 * reason is that we want the bottom-up allocation just near the kernel
181 * image so it is highly likely that the allocated memory and the kernel
182 * will reside in the same node.
184 * If bottom-up allocation failed, will try to allocate memory top-down.
187 * Found address on success, 0 on failure.
189 phys_addr_t __init_memblock
memblock_find_in_range_node(phys_addr_t size
,
190 phys_addr_t align
, phys_addr_t start
,
191 phys_addr_t end
, int nid
, ulong flags
)
193 phys_addr_t kernel_end
, ret
;
196 if (end
== MEMBLOCK_ALLOC_ACCESSIBLE
)
197 end
= memblock
.current_limit
;
199 /* avoid allocating the first page */
200 start
= max_t(phys_addr_t
, start
, PAGE_SIZE
);
201 end
= max(start
, end
);
202 kernel_end
= __pa_symbol(_end
);
205 * try bottom-up allocation only when bottom-up mode
206 * is set and @end is above the kernel image.
208 if (memblock_bottom_up() && end
> kernel_end
) {
209 phys_addr_t bottom_up_start
;
211 /* make sure we will allocate above the kernel */
212 bottom_up_start
= max(start
, kernel_end
);
214 /* ok, try bottom-up allocation first */
215 ret
= __memblock_find_range_bottom_up(bottom_up_start
, end
,
216 size
, align
, nid
, flags
);
221 * we always limit bottom-up allocation above the kernel,
222 * but top-down allocation doesn't have the limit, so
223 * retrying top-down allocation may succeed when bottom-up
226 * bottom-up allocation is expected to be fail very rarely,
227 * so we use WARN_ONCE() here to see the stack trace if
230 WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
233 return __memblock_find_range_top_down(start
, end
, size
, align
, nid
,
238 * memblock_find_in_range - find free area in given range
239 * @start: start of candidate range
240 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
241 * @size: size of free area to find
242 * @align: alignment of free area to find
244 * Find @size free area aligned to @align in the specified range.
247 * Found address on success, 0 on failure.
249 phys_addr_t __init_memblock
memblock_find_in_range(phys_addr_t start
,
250 phys_addr_t end
, phys_addr_t size
,
254 ulong flags
= choose_memblock_flags();
257 ret
= memblock_find_in_range_node(size
, align
, start
, end
,
258 NUMA_NO_NODE
, flags
);
260 if (!ret
&& (flags
& MEMBLOCK_MIRROR
)) {
261 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
263 flags
&= ~MEMBLOCK_MIRROR
;
270 static void __init_memblock
memblock_remove_region(struct memblock_type
*type
, unsigned long r
)
272 type
->total_size
-= type
->regions
[r
].size
;
273 memmove(&type
->regions
[r
], &type
->regions
[r
+ 1],
274 (type
->cnt
- (r
+ 1)) * sizeof(type
->regions
[r
]));
277 /* Special case for empty arrays */
278 if (type
->cnt
== 0) {
279 WARN_ON(type
->total_size
!= 0);
281 type
->regions
[0].base
= 0;
282 type
->regions
[0].size
= 0;
283 type
->regions
[0].flags
= 0;
284 memblock_set_region_node(&type
->regions
[0], MAX_NUMNODES
);
288 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
290 * Discard memory and reserved arrays if they were allocated
292 void __init
memblock_discard(void)
294 phys_addr_t addr
, size
;
296 if (memblock
.reserved
.regions
!= memblock_reserved_init_regions
) {
297 addr
= __pa(memblock
.reserved
.regions
);
298 size
= PAGE_ALIGN(sizeof(struct memblock_region
) *
299 memblock
.reserved
.max
);
300 __memblock_free_late(addr
, size
);
303 if (memblock
.memory
.regions
!= memblock_memory_init_regions
) {
304 addr
= __pa(memblock
.memory
.regions
);
305 size
= PAGE_ALIGN(sizeof(struct memblock_region
) *
306 memblock
.memory
.max
);
307 __memblock_free_late(addr
, size
);
313 * memblock_double_array - double the size of the memblock regions array
314 * @type: memblock type of the regions array being doubled
315 * @new_area_start: starting address of memory range to avoid overlap with
316 * @new_area_size: size of memory range to avoid overlap with
318 * Double the size of the @type regions array. If memblock is being used to
319 * allocate memory for a new reserved regions array and there is a previously
320 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
321 * waiting to be reserved, ensure the memory used by the new array does
325 * 0 on success, -1 on failure.
327 static int __init_memblock
memblock_double_array(struct memblock_type
*type
,
328 phys_addr_t new_area_start
,
329 phys_addr_t new_area_size
)
331 struct memblock_region
*new_array
, *old_array
;
332 phys_addr_t old_alloc_size
, new_alloc_size
;
333 phys_addr_t old_size
, new_size
, addr
;
334 int use_slab
= slab_is_available();
337 /* We don't allow resizing until we know about the reserved regions
338 * of memory that aren't suitable for allocation
340 if (!memblock_can_resize
)
343 /* Calculate new doubled size */
344 old_size
= type
->max
* sizeof(struct memblock_region
);
345 new_size
= old_size
<< 1;
347 * We need to allocated new one align to PAGE_SIZE,
348 * so we can free them completely later.
350 old_alloc_size
= PAGE_ALIGN(old_size
);
351 new_alloc_size
= PAGE_ALIGN(new_size
);
353 /* Retrieve the slab flag */
354 if (type
== &memblock
.memory
)
355 in_slab
= &memblock_memory_in_slab
;
357 in_slab
= &memblock_reserved_in_slab
;
359 /* Try to find some space for it.
361 * WARNING: We assume that either slab_is_available() and we use it or
362 * we use MEMBLOCK for allocations. That means that this is unsafe to
363 * use when bootmem is currently active (unless bootmem itself is
364 * implemented on top of MEMBLOCK which isn't the case yet)
366 * This should however not be an issue for now, as we currently only
367 * call into MEMBLOCK while it's still active, or much later when slab
368 * is active for memory hotplug operations
371 new_array
= kmalloc(new_size
, GFP_KERNEL
);
372 addr
= new_array
? __pa(new_array
) : 0;
374 /* only exclude range when trying to double reserved.regions */
375 if (type
!= &memblock
.reserved
)
376 new_area_start
= new_area_size
= 0;
378 addr
= memblock_find_in_range(new_area_start
+ new_area_size
,
379 memblock
.current_limit
,
380 new_alloc_size
, PAGE_SIZE
);
381 if (!addr
&& new_area_size
)
382 addr
= memblock_find_in_range(0,
383 min(new_area_start
, memblock
.current_limit
),
384 new_alloc_size
, PAGE_SIZE
);
386 new_array
= addr
? __va(addr
) : NULL
;
389 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
390 type
->name
, type
->max
, type
->max
* 2);
394 memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
395 type
->name
, type
->max
* 2, (u64
)addr
,
396 (u64
)addr
+ new_size
- 1);
399 * Found space, we now need to move the array over before we add the
400 * reserved region since it may be our reserved array itself that is
403 memcpy(new_array
, type
->regions
, old_size
);
404 memset(new_array
+ type
->max
, 0, old_size
);
405 old_array
= type
->regions
;
406 type
->regions
= new_array
;
409 /* Free old array. We needn't free it if the array is the static one */
412 else if (old_array
!= memblock_memory_init_regions
&&
413 old_array
!= memblock_reserved_init_regions
)
414 memblock_free(__pa(old_array
), old_alloc_size
);
417 * Reserve the new array if that comes from the memblock. Otherwise, we
421 BUG_ON(memblock_reserve(addr
, new_alloc_size
));
423 /* Update slab flag */
430 * memblock_merge_regions - merge neighboring compatible regions
431 * @type: memblock type to scan
433 * Scan @type and merge neighboring compatible regions.
435 static void __init_memblock
memblock_merge_regions(struct memblock_type
*type
)
439 /* cnt never goes below 1 */
440 while (i
< type
->cnt
- 1) {
441 struct memblock_region
*this = &type
->regions
[i
];
442 struct memblock_region
*next
= &type
->regions
[i
+ 1];
444 if (this->base
+ this->size
!= next
->base
||
445 memblock_get_region_node(this) !=
446 memblock_get_region_node(next
) ||
447 this->flags
!= next
->flags
) {
448 BUG_ON(this->base
+ this->size
> next
->base
);
453 this->size
+= next
->size
;
454 /* move forward from next + 1, index of which is i + 2 */
455 memmove(next
, next
+ 1, (type
->cnt
- (i
+ 2)) * sizeof(*next
));
461 * memblock_insert_region - insert new memblock region
462 * @type: memblock type to insert into
463 * @idx: index for the insertion point
464 * @base: base address of the new region
465 * @size: size of the new region
466 * @nid: node id of the new region
467 * @flags: flags of the new region
469 * Insert new memblock region [@base,@base+@size) into @type at @idx.
470 * @type must already have extra room to accommodate the new region.
472 static void __init_memblock
memblock_insert_region(struct memblock_type
*type
,
473 int idx
, phys_addr_t base
,
475 int nid
, unsigned long flags
)
477 struct memblock_region
*rgn
= &type
->regions
[idx
];
479 BUG_ON(type
->cnt
>= type
->max
);
480 memmove(rgn
+ 1, rgn
, (type
->cnt
- idx
) * sizeof(*rgn
));
484 memblock_set_region_node(rgn
, nid
);
486 type
->total_size
+= size
;
490 * memblock_add_range - add new memblock region
491 * @type: memblock type to add new region into
492 * @base: base address of the new region
493 * @size: size of the new region
494 * @nid: nid of the new region
495 * @flags: flags of the new region
497 * Add new memblock region [@base,@base+@size) into @type. The new region
498 * is allowed to overlap with existing ones - overlaps don't affect already
499 * existing regions. @type is guaranteed to be minimal (all neighbouring
500 * compatible regions are merged) after the addition.
503 * 0 on success, -errno on failure.
505 int __init_memblock
memblock_add_range(struct memblock_type
*type
,
506 phys_addr_t base
, phys_addr_t size
,
507 int nid
, unsigned long flags
)
510 phys_addr_t obase
= base
;
511 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
513 struct memblock_region
*rgn
;
518 /* special case for empty array */
519 if (type
->regions
[0].size
== 0) {
520 WARN_ON(type
->cnt
!= 1 || type
->total_size
);
521 type
->regions
[0].base
= base
;
522 type
->regions
[0].size
= size
;
523 type
->regions
[0].flags
= flags
;
524 memblock_set_region_node(&type
->regions
[0], nid
);
525 type
->total_size
= size
;
530 * The following is executed twice. Once with %false @insert and
531 * then with %true. The first counts the number of regions needed
532 * to accommodate the new area. The second actually inserts them.
537 for_each_memblock_type(idx
, type
, rgn
) {
538 phys_addr_t rbase
= rgn
->base
;
539 phys_addr_t rend
= rbase
+ rgn
->size
;
546 * @rgn overlaps. If it separates the lower part of new
547 * area, insert that portion.
550 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
551 WARN_ON(nid
!= memblock_get_region_node(rgn
));
553 WARN_ON(flags
!= rgn
->flags
);
556 memblock_insert_region(type
, idx
++, base
,
560 /* area below @rend is dealt with, forget about it */
561 base
= min(rend
, end
);
564 /* insert the remaining portion */
568 memblock_insert_region(type
, idx
, base
, end
- base
,
576 * If this was the first round, resize array and repeat for actual
577 * insertions; otherwise, merge and return.
580 while (type
->cnt
+ nr_new
> type
->max
)
581 if (memblock_double_array(type
, obase
, size
) < 0)
586 memblock_merge_regions(type
);
591 int __init_memblock
memblock_add_node(phys_addr_t base
, phys_addr_t size
,
594 return memblock_add_range(&memblock
.memory
, base
, size
, nid
, 0);
597 int __init_memblock
memblock_add(phys_addr_t base
, phys_addr_t size
)
599 phys_addr_t end
= base
+ size
- 1;
601 memblock_dbg("memblock_add: [%pa-%pa] %pF\n",
602 &base
, &end
, (void *)_RET_IP_
);
604 return memblock_add_range(&memblock
.memory
, base
, size
, MAX_NUMNODES
, 0);
608 * memblock_isolate_range - isolate given range into disjoint memblocks
609 * @type: memblock type to isolate range for
610 * @base: base of range to isolate
611 * @size: size of range to isolate
612 * @start_rgn: out parameter for the start of isolated region
613 * @end_rgn: out parameter for the end of isolated region
615 * Walk @type and ensure that regions don't cross the boundaries defined by
616 * [@base,@base+@size). Crossing regions are split at the boundaries,
617 * which may create at most two more regions. The index of the first
618 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
621 * 0 on success, -errno on failure.
623 static int __init_memblock
memblock_isolate_range(struct memblock_type
*type
,
624 phys_addr_t base
, phys_addr_t size
,
625 int *start_rgn
, int *end_rgn
)
627 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
629 struct memblock_region
*rgn
;
631 *start_rgn
= *end_rgn
= 0;
636 /* we'll create at most two more regions */
637 while (type
->cnt
+ 2 > type
->max
)
638 if (memblock_double_array(type
, base
, size
) < 0)
641 for_each_memblock_type(idx
, type
, rgn
) {
642 phys_addr_t rbase
= rgn
->base
;
643 phys_addr_t rend
= rbase
+ rgn
->size
;
652 * @rgn intersects from below. Split and continue
653 * to process the next region - the new top half.
656 rgn
->size
-= base
- rbase
;
657 type
->total_size
-= base
- rbase
;
658 memblock_insert_region(type
, idx
, rbase
, base
- rbase
,
659 memblock_get_region_node(rgn
),
661 } else if (rend
> end
) {
663 * @rgn intersects from above. Split and redo the
664 * current region - the new bottom half.
667 rgn
->size
-= end
- rbase
;
668 type
->total_size
-= end
- rbase
;
669 memblock_insert_region(type
, idx
--, rbase
, end
- rbase
,
670 memblock_get_region_node(rgn
),
673 /* @rgn is fully contained, record it */
683 static int __init_memblock
memblock_remove_range(struct memblock_type
*type
,
684 phys_addr_t base
, phys_addr_t size
)
686 int start_rgn
, end_rgn
;
689 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
693 for (i
= end_rgn
- 1; i
>= start_rgn
; i
--)
694 memblock_remove_region(type
, i
);
698 int __init_memblock
memblock_remove(phys_addr_t base
, phys_addr_t size
)
700 phys_addr_t end
= base
+ size
- 1;
702 memblock_dbg("memblock_remove: [%pa-%pa] %pS\n",
703 &base
, &end
, (void *)_RET_IP_
);
705 return memblock_remove_range(&memblock
.memory
, base
, size
);
709 int __init_memblock
memblock_free(phys_addr_t base
, phys_addr_t size
)
711 phys_addr_t end
= base
+ size
- 1;
713 memblock_dbg(" memblock_free: [%pa-%pa] %pF\n",
714 &base
, &end
, (void *)_RET_IP_
);
716 kmemleak_free_part_phys(base
, size
);
717 return memblock_remove_range(&memblock
.reserved
, base
, size
);
720 int __init_memblock
memblock_reserve(phys_addr_t base
, phys_addr_t size
)
722 phys_addr_t end
= base
+ size
- 1;
724 memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n",
725 &base
, &end
, (void *)_RET_IP_
);
727 return memblock_add_range(&memblock
.reserved
, base
, size
, MAX_NUMNODES
, 0);
732 * This function isolates region [@base, @base + @size), and sets/clears flag
734 * Return 0 on success, -errno on failure.
736 static int __init_memblock
memblock_setclr_flag(phys_addr_t base
,
737 phys_addr_t size
, int set
, int flag
)
739 struct memblock_type
*type
= &memblock
.memory
;
740 int i
, ret
, start_rgn
, end_rgn
;
742 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
746 for (i
= start_rgn
; i
< end_rgn
; i
++)
748 memblock_set_region_flags(&type
->regions
[i
], flag
);
750 memblock_clear_region_flags(&type
->regions
[i
], flag
);
752 memblock_merge_regions(type
);
757 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
758 * @base: the base phys addr of the region
759 * @size: the size of the region
761 * Return 0 on success, -errno on failure.
763 int __init_memblock
memblock_mark_hotplug(phys_addr_t base
, phys_addr_t size
)
765 return memblock_setclr_flag(base
, size
, 1, MEMBLOCK_HOTPLUG
);
769 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
770 * @base: the base phys addr of the region
771 * @size: the size of the region
773 * Return 0 on success, -errno on failure.
775 int __init_memblock
memblock_clear_hotplug(phys_addr_t base
, phys_addr_t size
)
777 return memblock_setclr_flag(base
, size
, 0, MEMBLOCK_HOTPLUG
);
781 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
782 * @base: the base phys addr of the region
783 * @size: the size of the region
785 * Return 0 on success, -errno on failure.
787 int __init_memblock
memblock_mark_mirror(phys_addr_t base
, phys_addr_t size
)
789 system_has_some_mirror
= true;
791 return memblock_setclr_flag(base
, size
, 1, MEMBLOCK_MIRROR
);
795 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
796 * @base: the base phys addr of the region
797 * @size: the size of the region
799 * Return 0 on success, -errno on failure.
801 int __init_memblock
memblock_mark_nomap(phys_addr_t base
, phys_addr_t size
)
803 return memblock_setclr_flag(base
, size
, 1, MEMBLOCK_NOMAP
);
807 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
808 * @base: the base phys addr of the region
809 * @size: the size of the region
811 * Return 0 on success, -errno on failure.
813 int __init_memblock
memblock_clear_nomap(phys_addr_t base
, phys_addr_t size
)
815 return memblock_setclr_flag(base
, size
, 0, MEMBLOCK_NOMAP
);
819 * __next_reserved_mem_region - next function for for_each_reserved_region()
820 * @idx: pointer to u64 loop variable
821 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
822 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
824 * Iterate over all reserved memory regions.
826 void __init_memblock
__next_reserved_mem_region(u64
*idx
,
827 phys_addr_t
*out_start
,
828 phys_addr_t
*out_end
)
830 struct memblock_type
*type
= &memblock
.reserved
;
832 if (*idx
< type
->cnt
) {
833 struct memblock_region
*r
= &type
->regions
[*idx
];
834 phys_addr_t base
= r
->base
;
835 phys_addr_t size
= r
->size
;
840 *out_end
= base
+ size
- 1;
846 /* signal end of iteration */
851 * __next__mem_range - next function for for_each_free_mem_range() etc.
852 * @idx: pointer to u64 loop variable
853 * @nid: node selector, %NUMA_NO_NODE for all nodes
854 * @flags: pick from blocks based on memory attributes
855 * @type_a: pointer to memblock_type from where the range is taken
856 * @type_b: pointer to memblock_type which excludes memory from being taken
857 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
858 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
859 * @out_nid: ptr to int for nid of the range, can be %NULL
861 * Find the first area from *@idx which matches @nid, fill the out
862 * parameters, and update *@idx for the next iteration. The lower 32bit of
863 * *@idx contains index into type_a and the upper 32bit indexes the
864 * areas before each region in type_b. For example, if type_b regions
865 * look like the following,
867 * 0:[0-16), 1:[32-48), 2:[128-130)
869 * The upper 32bit indexes the following regions.
871 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
873 * As both region arrays are sorted, the function advances the two indices
874 * in lockstep and returns each intersection.
876 void __init_memblock
__next_mem_range(u64
*idx
, int nid
, ulong flags
,
877 struct memblock_type
*type_a
,
878 struct memblock_type
*type_b
,
879 phys_addr_t
*out_start
,
880 phys_addr_t
*out_end
, int *out_nid
)
882 int idx_a
= *idx
& 0xffffffff;
883 int idx_b
= *idx
>> 32;
885 if (WARN_ONCE(nid
== MAX_NUMNODES
,
886 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
889 for (; idx_a
< type_a
->cnt
; idx_a
++) {
890 struct memblock_region
*m
= &type_a
->regions
[idx_a
];
892 phys_addr_t m_start
= m
->base
;
893 phys_addr_t m_end
= m
->base
+ m
->size
;
894 int m_nid
= memblock_get_region_node(m
);
896 /* only memory regions are associated with nodes, check it */
897 if (nid
!= NUMA_NO_NODE
&& nid
!= m_nid
)
900 /* skip hotpluggable memory regions if needed */
901 if (movable_node_is_enabled() && memblock_is_hotpluggable(m
))
904 /* if we want mirror memory skip non-mirror memory regions */
905 if ((flags
& MEMBLOCK_MIRROR
) && !memblock_is_mirror(m
))
908 /* skip nomap memory unless we were asked for it explicitly */
909 if (!(flags
& MEMBLOCK_NOMAP
) && memblock_is_nomap(m
))
914 *out_start
= m_start
;
920 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
924 /* scan areas before each reservation */
925 for (; idx_b
< type_b
->cnt
+ 1; idx_b
++) {
926 struct memblock_region
*r
;
930 r
= &type_b
->regions
[idx_b
];
931 r_start
= idx_b
? r
[-1].base
+ r
[-1].size
: 0;
932 r_end
= idx_b
< type_b
->cnt
?
933 r
->base
: PHYS_ADDR_MAX
;
936 * if idx_b advanced past idx_a,
937 * break out to advance idx_a
939 if (r_start
>= m_end
)
941 /* if the two regions intersect, we're done */
942 if (m_start
< r_end
) {
945 max(m_start
, r_start
);
947 *out_end
= min(m_end
, r_end
);
951 * The region which ends first is
952 * advanced for the next iteration.
958 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
964 /* signal end of iteration */
969 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
971 * Finds the next range from type_a which is not marked as unsuitable
974 * @idx: pointer to u64 loop variable
975 * @nid: node selector, %NUMA_NO_NODE for all nodes
976 * @flags: pick from blocks based on memory attributes
977 * @type_a: pointer to memblock_type from where the range is taken
978 * @type_b: pointer to memblock_type which excludes memory from being taken
979 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
980 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
981 * @out_nid: ptr to int for nid of the range, can be %NULL
983 * Reverse of __next_mem_range().
985 void __init_memblock
__next_mem_range_rev(u64
*idx
, int nid
, ulong flags
,
986 struct memblock_type
*type_a
,
987 struct memblock_type
*type_b
,
988 phys_addr_t
*out_start
,
989 phys_addr_t
*out_end
, int *out_nid
)
991 int idx_a
= *idx
& 0xffffffff;
992 int idx_b
= *idx
>> 32;
994 if (WARN_ONCE(nid
== MAX_NUMNODES
, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
997 if (*idx
== (u64
)ULLONG_MAX
) {
998 idx_a
= type_a
->cnt
- 1;
1000 idx_b
= type_b
->cnt
;
1005 for (; idx_a
>= 0; idx_a
--) {
1006 struct memblock_region
*m
= &type_a
->regions
[idx_a
];
1008 phys_addr_t m_start
= m
->base
;
1009 phys_addr_t m_end
= m
->base
+ m
->size
;
1010 int m_nid
= memblock_get_region_node(m
);
1012 /* only memory regions are associated with nodes, check it */
1013 if (nid
!= NUMA_NO_NODE
&& nid
!= m_nid
)
1016 /* skip hotpluggable memory regions if needed */
1017 if (movable_node_is_enabled() && memblock_is_hotpluggable(m
))
1020 /* if we want mirror memory skip non-mirror memory regions */
1021 if ((flags
& MEMBLOCK_MIRROR
) && !memblock_is_mirror(m
))
1024 /* skip nomap memory unless we were asked for it explicitly */
1025 if (!(flags
& MEMBLOCK_NOMAP
) && memblock_is_nomap(m
))
1030 *out_start
= m_start
;
1036 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1040 /* scan areas before each reservation */
1041 for (; idx_b
>= 0; idx_b
--) {
1042 struct memblock_region
*r
;
1043 phys_addr_t r_start
;
1046 r
= &type_b
->regions
[idx_b
];
1047 r_start
= idx_b
? r
[-1].base
+ r
[-1].size
: 0;
1048 r_end
= idx_b
< type_b
->cnt
?
1049 r
->base
: PHYS_ADDR_MAX
;
1051 * if idx_b advanced past idx_a,
1052 * break out to advance idx_a
1055 if (r_end
<= m_start
)
1057 /* if the two regions intersect, we're done */
1058 if (m_end
> r_start
) {
1060 *out_start
= max(m_start
, r_start
);
1062 *out_end
= min(m_end
, r_end
);
1065 if (m_start
>= r_start
)
1069 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1074 /* signal end of iteration */
1078 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1080 * Common iterator interface used to define for_each_mem_range().
1082 void __init_memblock
__next_mem_pfn_range(int *idx
, int nid
,
1083 unsigned long *out_start_pfn
,
1084 unsigned long *out_end_pfn
, int *out_nid
)
1086 struct memblock_type
*type
= &memblock
.memory
;
1087 struct memblock_region
*r
;
1089 while (++*idx
< type
->cnt
) {
1090 r
= &type
->regions
[*idx
];
1092 if (PFN_UP(r
->base
) >= PFN_DOWN(r
->base
+ r
->size
))
1094 if (nid
== MAX_NUMNODES
|| nid
== r
->nid
)
1097 if (*idx
>= type
->cnt
) {
1103 *out_start_pfn
= PFN_UP(r
->base
);
1105 *out_end_pfn
= PFN_DOWN(r
->base
+ r
->size
);
1111 * memblock_set_node - set node ID on memblock regions
1112 * @base: base of area to set node ID for
1113 * @size: size of area to set node ID for
1114 * @type: memblock type to set node ID for
1115 * @nid: node ID to set
1117 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
1118 * Regions which cross the area boundaries are split as necessary.
1121 * 0 on success, -errno on failure.
1123 int __init_memblock
memblock_set_node(phys_addr_t base
, phys_addr_t size
,
1124 struct memblock_type
*type
, int nid
)
1126 int start_rgn
, end_rgn
;
1129 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
1133 for (i
= start_rgn
; i
< end_rgn
; i
++)
1134 memblock_set_region_node(&type
->regions
[i
], nid
);
1136 memblock_merge_regions(type
);
1139 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1141 static phys_addr_t __init
memblock_alloc_range_nid(phys_addr_t size
,
1142 phys_addr_t align
, phys_addr_t start
,
1143 phys_addr_t end
, int nid
, ulong flags
)
1148 align
= SMP_CACHE_BYTES
;
1150 found
= memblock_find_in_range_node(size
, align
, start
, end
, nid
,
1152 if (found
&& !memblock_reserve(found
, size
)) {
1154 * The min_count is set to 0 so that memblock allocations are
1155 * never reported as leaks.
1157 kmemleak_alloc_phys(found
, size
, 0, 0);
1163 phys_addr_t __init
memblock_alloc_range(phys_addr_t size
, phys_addr_t align
,
1164 phys_addr_t start
, phys_addr_t end
,
1167 return memblock_alloc_range_nid(size
, align
, start
, end
, NUMA_NO_NODE
,
1171 phys_addr_t __init
memblock_alloc_base_nid(phys_addr_t size
,
1172 phys_addr_t align
, phys_addr_t max_addr
,
1173 int nid
, ulong flags
)
1175 return memblock_alloc_range_nid(size
, align
, 0, max_addr
, nid
, flags
);
1178 phys_addr_t __init
memblock_alloc_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
1180 ulong flags
= choose_memblock_flags();
1184 ret
= memblock_alloc_base_nid(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
,
1187 if (!ret
&& (flags
& MEMBLOCK_MIRROR
)) {
1188 flags
&= ~MEMBLOCK_MIRROR
;
1194 phys_addr_t __init
__memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
1196 return memblock_alloc_base_nid(size
, align
, max_addr
, NUMA_NO_NODE
,
1200 phys_addr_t __init
memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
1204 alloc
= __memblock_alloc_base(size
, align
, max_addr
);
1207 panic("ERROR: Failed to allocate %pa bytes below %pa.\n",
1213 phys_addr_t __init
memblock_alloc(phys_addr_t size
, phys_addr_t align
)
1215 return memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
);
1218 phys_addr_t __init
memblock_alloc_try_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
1220 phys_addr_t res
= memblock_alloc_nid(size
, align
, nid
);
1224 return memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
);
1228 * memblock_virt_alloc_internal - allocate boot memory block
1229 * @size: size of memory block to be allocated in bytes
1230 * @align: alignment of the region and block's size
1231 * @min_addr: the lower bound of the memory region to allocate (phys address)
1232 * @max_addr: the upper bound of the memory region to allocate (phys address)
1233 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1235 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1236 * will fall back to memory below @min_addr. Also, allocation may fall back
1237 * to any node in the system if the specified node can not
1238 * hold the requested memory.
1240 * The allocation is performed from memory region limited by
1241 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
1243 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
1245 * The phys address of allocated boot memory block is converted to virtual and
1246 * allocated memory is reset to 0.
1248 * In addition, function sets the min_count to 0 using kmemleak_alloc for
1249 * allocated boot memory block, so that it is never reported as leaks.
1252 * Virtual address of allocated memory block on success, NULL on failure.
1254 static void * __init
memblock_virt_alloc_internal(
1255 phys_addr_t size
, phys_addr_t align
,
1256 phys_addr_t min_addr
, phys_addr_t max_addr
,
1261 ulong flags
= choose_memblock_flags();
1263 if (WARN_ONCE(nid
== MAX_NUMNODES
, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1267 * Detect any accidental use of these APIs after slab is ready, as at
1268 * this moment memblock may be deinitialized already and its
1269 * internal data may be destroyed (after execution of free_all_bootmem)
1271 if (WARN_ON_ONCE(slab_is_available()))
1272 return kzalloc_node(size
, GFP_NOWAIT
, nid
);
1275 align
= SMP_CACHE_BYTES
;
1277 if (max_addr
> memblock
.current_limit
)
1278 max_addr
= memblock
.current_limit
;
1280 alloc
= memblock_find_in_range_node(size
, align
, min_addr
, max_addr
,
1282 if (alloc
&& !memblock_reserve(alloc
, size
))
1285 if (nid
!= NUMA_NO_NODE
) {
1286 alloc
= memblock_find_in_range_node(size
, align
, min_addr
,
1287 max_addr
, NUMA_NO_NODE
,
1289 if (alloc
&& !memblock_reserve(alloc
, size
))
1298 if (flags
& MEMBLOCK_MIRROR
) {
1299 flags
&= ~MEMBLOCK_MIRROR
;
1300 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1307 ptr
= phys_to_virt(alloc
);
1310 * The min_count is set to 0 so that bootmem allocated blocks
1311 * are never reported as leaks. This is because many of these blocks
1312 * are only referred via the physical address which is not
1313 * looked up by kmemleak.
1315 kmemleak_alloc(ptr
, size
, 0, 0);
1321 * memblock_virt_alloc_try_nid_raw - allocate boot memory block without zeroing
1322 * memory and without panicking
1323 * @size: size of memory block to be allocated in bytes
1324 * @align: alignment of the region and block's size
1325 * @min_addr: the lower bound of the memory region from where the allocation
1326 * is preferred (phys address)
1327 * @max_addr: the upper bound of the memory region from where the allocation
1328 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1329 * allocate only from memory limited by memblock.current_limit value
1330 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1332 * Public function, provides additional debug information (including caller
1333 * info), if enabled. Does not zero allocated memory, does not panic if request
1334 * cannot be satisfied.
1337 * Virtual address of allocated memory block on success, NULL on failure.
1339 void * __init
memblock_virt_alloc_try_nid_raw(
1340 phys_addr_t size
, phys_addr_t align
,
1341 phys_addr_t min_addr
, phys_addr_t max_addr
,
1346 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1347 __func__
, (u64
)size
, (u64
)align
, nid
, (u64
)min_addr
,
1348 (u64
)max_addr
, (void *)_RET_IP_
);
1350 ptr
= memblock_virt_alloc_internal(size
, align
,
1351 min_addr
, max_addr
, nid
);
1352 #ifdef CONFIG_DEBUG_VM
1353 if (ptr
&& size
> 0)
1354 memset(ptr
, PAGE_POISON_PATTERN
, size
);
1360 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
1361 * @size: size of memory block to be allocated in bytes
1362 * @align: alignment of the region and block's size
1363 * @min_addr: the lower bound of the memory region from where the allocation
1364 * is preferred (phys address)
1365 * @max_addr: the upper bound of the memory region from where the allocation
1366 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1367 * allocate only from memory limited by memblock.current_limit value
1368 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1370 * Public function, provides additional debug information (including caller
1371 * info), if enabled. This function zeroes the allocated memory.
1374 * Virtual address of allocated memory block on success, NULL on failure.
1376 void * __init
memblock_virt_alloc_try_nid_nopanic(
1377 phys_addr_t size
, phys_addr_t align
,
1378 phys_addr_t min_addr
, phys_addr_t max_addr
,
1383 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1384 __func__
, (u64
)size
, (u64
)align
, nid
, (u64
)min_addr
,
1385 (u64
)max_addr
, (void *)_RET_IP_
);
1387 ptr
= memblock_virt_alloc_internal(size
, align
,
1388 min_addr
, max_addr
, nid
);
1390 memset(ptr
, 0, size
);
1395 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
1396 * @size: size of memory block to be allocated in bytes
1397 * @align: alignment of the region and block's size
1398 * @min_addr: the lower bound of the memory region from where the allocation
1399 * is preferred (phys address)
1400 * @max_addr: the upper bound of the memory region from where the allocation
1401 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1402 * allocate only from memory limited by memblock.current_limit value
1403 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1405 * Public panicking version of memblock_virt_alloc_try_nid_nopanic()
1406 * which provides debug information (including caller info), if enabled,
1407 * and panics if the request can not be satisfied.
1410 * Virtual address of allocated memory block on success, NULL on failure.
1412 void * __init
memblock_virt_alloc_try_nid(
1413 phys_addr_t size
, phys_addr_t align
,
1414 phys_addr_t min_addr
, phys_addr_t max_addr
,
1419 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1420 __func__
, (u64
)size
, (u64
)align
, nid
, (u64
)min_addr
,
1421 (u64
)max_addr
, (void *)_RET_IP_
);
1422 ptr
= memblock_virt_alloc_internal(size
, align
,
1423 min_addr
, max_addr
, nid
);
1425 memset(ptr
, 0, size
);
1429 panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
1430 __func__
, (u64
)size
, (u64
)align
, nid
, (u64
)min_addr
,
1436 * __memblock_free_early - free boot memory block
1437 * @base: phys starting address of the boot memory block
1438 * @size: size of the boot memory block in bytes
1440 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
1441 * The freeing memory will not be released to the buddy allocator.
1443 void __init
__memblock_free_early(phys_addr_t base
, phys_addr_t size
)
1445 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1446 __func__
, (u64
)base
, (u64
)base
+ size
- 1,
1448 kmemleak_free_part_phys(base
, size
);
1449 memblock_remove_range(&memblock
.reserved
, base
, size
);
1453 * __memblock_free_late - free bootmem block pages directly to buddy allocator
1454 * @addr: phys starting address of the boot memory block
1455 * @size: size of the boot memory block in bytes
1457 * This is only useful when the bootmem allocator has already been torn
1458 * down, but we are still initializing the system. Pages are released directly
1459 * to the buddy allocator, no bootmem metadata is updated because it is gone.
1461 void __init
__memblock_free_late(phys_addr_t base
, phys_addr_t size
)
1465 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1466 __func__
, (u64
)base
, (u64
)base
+ size
- 1,
1468 kmemleak_free_part_phys(base
, size
);
1469 cursor
= PFN_UP(base
);
1470 end
= PFN_DOWN(base
+ size
);
1472 for (; cursor
< end
; cursor
++) {
1473 __free_pages_bootmem(pfn_to_page(cursor
), cursor
, 0);
1479 * Remaining API functions
1482 phys_addr_t __init_memblock
memblock_phys_mem_size(void)
1484 return memblock
.memory
.total_size
;
1487 phys_addr_t __init_memblock
memblock_reserved_size(void)
1489 return memblock
.reserved
.total_size
;
1492 phys_addr_t __init
memblock_mem_size(unsigned long limit_pfn
)
1494 unsigned long pages
= 0;
1495 struct memblock_region
*r
;
1496 unsigned long start_pfn
, end_pfn
;
1498 for_each_memblock(memory
, r
) {
1499 start_pfn
= memblock_region_memory_base_pfn(r
);
1500 end_pfn
= memblock_region_memory_end_pfn(r
);
1501 start_pfn
= min_t(unsigned long, start_pfn
, limit_pfn
);
1502 end_pfn
= min_t(unsigned long, end_pfn
, limit_pfn
);
1503 pages
+= end_pfn
- start_pfn
;
1506 return PFN_PHYS(pages
);
1509 /* lowest address */
1510 phys_addr_t __init_memblock
memblock_start_of_DRAM(void)
1512 return memblock
.memory
.regions
[0].base
;
1515 phys_addr_t __init_memblock
memblock_end_of_DRAM(void)
1517 int idx
= memblock
.memory
.cnt
- 1;
1519 return (memblock
.memory
.regions
[idx
].base
+ memblock
.memory
.regions
[idx
].size
);
1522 static phys_addr_t __init_memblock
__find_max_addr(phys_addr_t limit
)
1524 phys_addr_t max_addr
= PHYS_ADDR_MAX
;
1525 struct memblock_region
*r
;
1528 * translate the memory @limit size into the max address within one of
1529 * the memory memblock regions, if the @limit exceeds the total size
1530 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1532 for_each_memblock(memory
, r
) {
1533 if (limit
<= r
->size
) {
1534 max_addr
= r
->base
+ limit
;
1543 void __init
memblock_enforce_memory_limit(phys_addr_t limit
)
1545 phys_addr_t max_addr
= PHYS_ADDR_MAX
;
1550 max_addr
= __find_max_addr(limit
);
1552 /* @limit exceeds the total size of the memory, do nothing */
1553 if (max_addr
== PHYS_ADDR_MAX
)
1556 /* truncate both memory and reserved regions */
1557 memblock_remove_range(&memblock
.memory
, max_addr
,
1559 memblock_remove_range(&memblock
.reserved
, max_addr
,
1563 void __init
memblock_cap_memory_range(phys_addr_t base
, phys_addr_t size
)
1565 int start_rgn
, end_rgn
;
1571 ret
= memblock_isolate_range(&memblock
.memory
, base
, size
,
1572 &start_rgn
, &end_rgn
);
1576 /* remove all the MAP regions */
1577 for (i
= memblock
.memory
.cnt
- 1; i
>= end_rgn
; i
--)
1578 if (!memblock_is_nomap(&memblock
.memory
.regions
[i
]))
1579 memblock_remove_region(&memblock
.memory
, i
);
1581 for (i
= start_rgn
- 1; i
>= 0; i
--)
1582 if (!memblock_is_nomap(&memblock
.memory
.regions
[i
]))
1583 memblock_remove_region(&memblock
.memory
, i
);
1585 /* truncate the reserved regions */
1586 memblock_remove_range(&memblock
.reserved
, 0, base
);
1587 memblock_remove_range(&memblock
.reserved
,
1588 base
+ size
, PHYS_ADDR_MAX
);
1591 void __init
memblock_mem_limit_remove_map(phys_addr_t limit
)
1593 phys_addr_t max_addr
;
1598 max_addr
= __find_max_addr(limit
);
1600 /* @limit exceeds the total size of the memory, do nothing */
1601 if (max_addr
== PHYS_ADDR_MAX
)
1604 memblock_cap_memory_range(0, max_addr
);
1607 static int __init_memblock
memblock_search(struct memblock_type
*type
, phys_addr_t addr
)
1609 unsigned int left
= 0, right
= type
->cnt
;
1612 unsigned int mid
= (right
+ left
) / 2;
1614 if (addr
< type
->regions
[mid
].base
)
1616 else if (addr
>= (type
->regions
[mid
].base
+
1617 type
->regions
[mid
].size
))
1621 } while (left
< right
);
1625 bool __init
memblock_is_reserved(phys_addr_t addr
)
1627 return memblock_search(&memblock
.reserved
, addr
) != -1;
1630 bool __init_memblock
memblock_is_memory(phys_addr_t addr
)
1632 return memblock_search(&memblock
.memory
, addr
) != -1;
1635 bool __init_memblock
memblock_is_map_memory(phys_addr_t addr
)
1637 int i
= memblock_search(&memblock
.memory
, addr
);
1641 return !memblock_is_nomap(&memblock
.memory
.regions
[i
]);
1644 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1645 int __init_memblock
memblock_search_pfn_nid(unsigned long pfn
,
1646 unsigned long *start_pfn
, unsigned long *end_pfn
)
1648 struct memblock_type
*type
= &memblock
.memory
;
1649 int mid
= memblock_search(type
, PFN_PHYS(pfn
));
1654 *start_pfn
= PFN_DOWN(type
->regions
[mid
].base
);
1655 *end_pfn
= PFN_DOWN(type
->regions
[mid
].base
+ type
->regions
[mid
].size
);
1657 return type
->regions
[mid
].nid
;
1662 * memblock_is_region_memory - check if a region is a subset of memory
1663 * @base: base of region to check
1664 * @size: size of region to check
1666 * Check if the region [@base, @base+@size) is a subset of a memory block.
1669 * 0 if false, non-zero if true
1671 bool __init_memblock
memblock_is_region_memory(phys_addr_t base
, phys_addr_t size
)
1673 int idx
= memblock_search(&memblock
.memory
, base
);
1674 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
1678 return (memblock
.memory
.regions
[idx
].base
+
1679 memblock
.memory
.regions
[idx
].size
) >= end
;
1683 * memblock_is_region_reserved - check if a region intersects reserved memory
1684 * @base: base of region to check
1685 * @size: size of region to check
1687 * Check if the region [@base, @base+@size) intersects a reserved memory block.
1690 * True if they intersect, false if not.
1692 bool __init_memblock
memblock_is_region_reserved(phys_addr_t base
, phys_addr_t size
)
1694 memblock_cap_size(base
, &size
);
1695 return memblock_overlaps_region(&memblock
.reserved
, base
, size
);
1698 void __init_memblock
memblock_trim_memory(phys_addr_t align
)
1700 phys_addr_t start
, end
, orig_start
, orig_end
;
1701 struct memblock_region
*r
;
1703 for_each_memblock(memory
, r
) {
1704 orig_start
= r
->base
;
1705 orig_end
= r
->base
+ r
->size
;
1706 start
= round_up(orig_start
, align
);
1707 end
= round_down(orig_end
, align
);
1709 if (start
== orig_start
&& end
== orig_end
)
1714 r
->size
= end
- start
;
1716 memblock_remove_region(&memblock
.memory
,
1717 r
- memblock
.memory
.regions
);
1723 void __init_memblock
memblock_set_current_limit(phys_addr_t limit
)
1725 memblock
.current_limit
= limit
;
1728 phys_addr_t __init_memblock
memblock_get_current_limit(void)
1730 return memblock
.current_limit
;
1733 static void __init_memblock
memblock_dump(struct memblock_type
*type
)
1735 phys_addr_t base
, end
, size
;
1736 unsigned long flags
;
1738 struct memblock_region
*rgn
;
1740 pr_info(" %s.cnt = 0x%lx\n", type
->name
, type
->cnt
);
1742 for_each_memblock_type(idx
, type
, rgn
) {
1743 char nid_buf
[32] = "";
1747 end
= base
+ size
- 1;
1749 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1750 if (memblock_get_region_node(rgn
) != MAX_NUMNODES
)
1751 snprintf(nid_buf
, sizeof(nid_buf
), " on node %d",
1752 memblock_get_region_node(rgn
));
1754 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#lx\n",
1755 type
->name
, idx
, &base
, &end
, &size
, nid_buf
, flags
);
1759 void __init_memblock
__memblock_dump_all(void)
1761 pr_info("MEMBLOCK configuration:\n");
1762 pr_info(" memory size = %pa reserved size = %pa\n",
1763 &memblock
.memory
.total_size
,
1764 &memblock
.reserved
.total_size
);
1766 memblock_dump(&memblock
.memory
);
1767 memblock_dump(&memblock
.reserved
);
1768 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1769 memblock_dump(&memblock
.physmem
);
1773 void __init
memblock_allow_resize(void)
1775 memblock_can_resize
= 1;
1778 static int __init
early_memblock(char *p
)
1780 if (p
&& strstr(p
, "debug"))
1784 early_param("memblock", early_memblock
);
1786 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1788 static int memblock_debug_show(struct seq_file
*m
, void *private)
1790 struct memblock_type
*type
= m
->private;
1791 struct memblock_region
*reg
;
1795 for (i
= 0; i
< type
->cnt
; i
++) {
1796 reg
= &type
->regions
[i
];
1797 end
= reg
->base
+ reg
->size
- 1;
1799 seq_printf(m
, "%4d: ", i
);
1800 seq_printf(m
, "%pa..%pa\n", ®
->base
, &end
);
1804 DEFINE_SHOW_ATTRIBUTE(memblock_debug
);
1806 static int __init
memblock_init_debugfs(void)
1808 struct dentry
*root
= debugfs_create_dir("memblock", NULL
);
1811 debugfs_create_file("memory", 0444, root
,
1812 &memblock
.memory
, &memblock_debug_fops
);
1813 debugfs_create_file("reserved", 0444, root
,
1814 &memblock
.reserved
, &memblock_debug_fops
);
1815 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1816 debugfs_create_file("physmem", 0444, root
,
1817 &memblock
.physmem
, &memblock_debug_fops
);
1822 __initcall(memblock_init_debugfs
);
1824 #endif /* CONFIG_DEBUG_FS */