2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/memblock.h>
21 struct memblock memblock
;
23 static int memblock_debug
, memblock_can_resize
;
24 static struct memblock_region memblock_memory_init_regions
[INIT_MEMBLOCK_REGIONS
+ 1];
25 static struct memblock_region memblock_reserved_init_regions
[INIT_MEMBLOCK_REGIONS
+ 1];
27 #define MEMBLOCK_ERROR (~(phys_addr_t)0)
29 /* inline so we don't get a warning when pr_debug is compiled out */
30 static inline const char *memblock_type_name(struct memblock_type
*type
)
32 if (type
== &memblock
.memory
)
34 else if (type
== &memblock
.reserved
)
41 * Address comparison utilities
44 static phys_addr_t
memblock_align_down(phys_addr_t addr
, phys_addr_t size
)
46 return addr
& ~(size
- 1);
49 static phys_addr_t
memblock_align_up(phys_addr_t addr
, phys_addr_t size
)
51 return (addr
+ (size
- 1)) & ~(size
- 1);
54 static unsigned long memblock_addrs_overlap(phys_addr_t base1
, phys_addr_t size1
,
55 phys_addr_t base2
, phys_addr_t size2
)
57 return ((base1
< (base2
+ size2
)) && (base2
< (base1
+ size1
)));
60 static long memblock_addrs_adjacent(phys_addr_t base1
, phys_addr_t size1
,
61 phys_addr_t base2
, phys_addr_t size2
)
63 if (base2
== base1
+ size1
)
65 else if (base1
== base2
+ size2
)
71 static long memblock_regions_adjacent(struct memblock_type
*type
,
72 unsigned long r1
, unsigned long r2
)
74 phys_addr_t base1
= type
->regions
[r1
].base
;
75 phys_addr_t size1
= type
->regions
[r1
].size
;
76 phys_addr_t base2
= type
->regions
[r2
].base
;
77 phys_addr_t size2
= type
->regions
[r2
].size
;
79 return memblock_addrs_adjacent(base1
, size1
, base2
, size2
);
82 long memblock_overlaps_region(struct memblock_type
*type
, phys_addr_t base
, phys_addr_t size
)
86 for (i
= 0; i
< type
->cnt
; i
++) {
87 phys_addr_t rgnbase
= type
->regions
[i
].base
;
88 phys_addr_t rgnsize
= type
->regions
[i
].size
;
89 if (memblock_addrs_overlap(base
, size
, rgnbase
, rgnsize
))
93 return (i
< type
->cnt
) ? i
: -1;
97 * Find, allocate, deallocate or reserve unreserved regions. All allocations
101 static phys_addr_t __init
memblock_find_region(phys_addr_t start
, phys_addr_t end
,
102 phys_addr_t size
, phys_addr_t align
)
104 phys_addr_t base
, res_base
;
107 base
= memblock_align_down((end
- size
), align
);
108 while (start
<= base
) {
109 j
= memblock_overlaps_region(&memblock
.reserved
, base
, size
);
112 res_base
= memblock
.reserved
.regions
[j
].base
;
115 base
= memblock_align_down(res_base
- size
, align
);
118 return MEMBLOCK_ERROR
;
121 static phys_addr_t __init
memblock_find_base(phys_addr_t size
, phys_addr_t align
,
122 phys_addr_t start
, phys_addr_t end
)
128 size
= memblock_align_up(size
, align
);
130 /* Pump up max_addr */
131 if (end
== MEMBLOCK_ALLOC_ACCESSIBLE
)
132 end
= memblock
.current_limit
;
134 /* We do a top-down search, this tends to limit memory
135 * fragmentation by keeping early boot allocs near the
138 for (i
= memblock
.memory
.cnt
- 1; i
>= 0; i
--) {
139 phys_addr_t memblockbase
= memblock
.memory
.regions
[i
].base
;
140 phys_addr_t memblocksize
= memblock
.memory
.regions
[i
].size
;
141 phys_addr_t bottom
, top
, found
;
143 if (memblocksize
< size
)
145 if ((memblockbase
+ memblocksize
) <= start
)
147 bottom
= max(memblockbase
, start
);
148 top
= min(memblockbase
+ memblocksize
, end
);
151 found
= memblock_find_region(bottom
, top
, size
, align
);
152 if (found
!= MEMBLOCK_ERROR
)
155 return MEMBLOCK_ERROR
;
158 static void memblock_remove_region(struct memblock_type
*type
, unsigned long r
)
162 for (i
= r
; i
< type
->cnt
- 1; i
++) {
163 type
->regions
[i
].base
= type
->regions
[i
+ 1].base
;
164 type
->regions
[i
].size
= type
->regions
[i
+ 1].size
;
169 /* Assumption: base addr of region 1 < base addr of region 2 */
170 static void memblock_coalesce_regions(struct memblock_type
*type
,
171 unsigned long r1
, unsigned long r2
)
173 type
->regions
[r1
].size
+= type
->regions
[r2
].size
;
174 memblock_remove_region(type
, r2
);
177 /* Defined below but needed now */
178 static long memblock_add_region(struct memblock_type
*type
, phys_addr_t base
, phys_addr_t size
);
180 static int memblock_double_array(struct memblock_type
*type
)
182 struct memblock_region
*new_array
, *old_array
;
183 phys_addr_t old_size
, new_size
, addr
;
184 int use_slab
= slab_is_available();
186 /* We don't allow resizing until we know about the reserved regions
187 * of memory that aren't suitable for allocation
189 if (!memblock_can_resize
)
192 pr_debug("memblock: %s array full, doubling...", memblock_type_name(type
));
194 /* Calculate new doubled size */
195 old_size
= type
->max
* sizeof(struct memblock_region
);
196 new_size
= old_size
<< 1;
198 /* Try to find some space for it.
200 * WARNING: We assume that either slab_is_available() and we use it or
201 * we use MEMBLOCK for allocations. That means that this is unsafe to use
202 * when bootmem is currently active (unless bootmem itself is implemented
203 * on top of MEMBLOCK which isn't the case yet)
205 * This should however not be an issue for now, as we currently only
206 * call into MEMBLOCK while it's still active, or much later when slab is
207 * active for memory hotplug operations
210 new_array
= kmalloc(new_size
, GFP_KERNEL
);
211 addr
= new_array
== NULL
? MEMBLOCK_ERROR
: __pa(new_array
);
213 addr
= memblock_find_base(new_size
, sizeof(phys_addr_t
), 0, MEMBLOCK_ALLOC_ACCESSIBLE
);
214 if (addr
== MEMBLOCK_ERROR
) {
215 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
216 memblock_type_name(type
), type
->max
, type
->max
* 2);
219 new_array
= __va(addr
);
221 /* Found space, we now need to move the array over before
222 * we add the reserved region since it may be our reserved
223 * array itself that is full.
225 memcpy(new_array
, type
->regions
, old_size
);
226 memset(new_array
+ type
->max
, 0, old_size
);
227 old_array
= type
->regions
;
228 type
->regions
= new_array
;
231 /* If we use SLAB that's it, we are done */
235 /* Add the new reserved region now. Should not fail ! */
236 BUG_ON(memblock_add_region(&memblock
.reserved
, addr
, new_size
) < 0);
238 /* If the array wasn't our static init one, then free it. We only do
239 * that before SLAB is available as later on, we don't know whether
240 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
243 if (old_array
!= memblock_memory_init_regions
&&
244 old_array
!= memblock_reserved_init_regions
)
245 memblock_free(__pa(old_array
), old_size
);
250 extern int __weak
memblock_memory_can_coalesce(phys_addr_t addr1
, phys_addr_t size1
,
251 phys_addr_t addr2
, phys_addr_t size2
)
256 static long memblock_add_region(struct memblock_type
*type
, phys_addr_t base
, phys_addr_t size
)
258 unsigned long coalesced
= 0;
261 if ((type
->cnt
== 1) && (type
->regions
[0].size
== 0)) {
262 type
->regions
[0].base
= base
;
263 type
->regions
[0].size
= size
;
267 /* First try and coalesce this MEMBLOCK with another. */
268 for (i
= 0; i
< type
->cnt
; i
++) {
269 phys_addr_t rgnbase
= type
->regions
[i
].base
;
270 phys_addr_t rgnsize
= type
->regions
[i
].size
;
272 if ((rgnbase
== base
) && (rgnsize
== size
))
273 /* Already have this region, so we're done */
276 adjacent
= memblock_addrs_adjacent(base
, size
, rgnbase
, rgnsize
);
277 /* Check if arch allows coalescing */
278 if (adjacent
!= 0 && type
== &memblock
.memory
&&
279 !memblock_memory_can_coalesce(base
, size
, rgnbase
, rgnsize
))
282 type
->regions
[i
].base
-= size
;
283 type
->regions
[i
].size
+= size
;
286 } else if (adjacent
< 0) {
287 type
->regions
[i
].size
+= size
;
293 /* If we plugged a hole, we may want to also coalesce with the
296 if ((i
< type
->cnt
- 1) && memblock_regions_adjacent(type
, i
, i
+1) &&
297 ((type
!= &memblock
.memory
|| memblock_memory_can_coalesce(type
->regions
[i
].base
,
298 type
->regions
[i
].size
,
299 type
->regions
[i
+1].base
,
300 type
->regions
[i
+1].size
)))) {
301 memblock_coalesce_regions(type
, i
, i
+1);
308 /* If we are out of space, we fail. It's too late to resize the array
309 * but then this shouldn't have happened in the first place.
311 if (WARN_ON(type
->cnt
>= type
->max
))
314 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
315 for (i
= type
->cnt
- 1; i
>= 0; i
--) {
316 if (base
< type
->regions
[i
].base
) {
317 type
->regions
[i
+1].base
= type
->regions
[i
].base
;
318 type
->regions
[i
+1].size
= type
->regions
[i
].size
;
320 type
->regions
[i
+1].base
= base
;
321 type
->regions
[i
+1].size
= size
;
326 if (base
< type
->regions
[0].base
) {
327 type
->regions
[0].base
= base
;
328 type
->regions
[0].size
= size
;
332 /* The array is full ? Try to resize it. If that fails, we undo
333 * our allocation and return an error
335 if (type
->cnt
== type
->max
&& memblock_double_array(type
)) {
343 long memblock_add(phys_addr_t base
, phys_addr_t size
)
345 return memblock_add_region(&memblock
.memory
, base
, size
);
349 static long __memblock_remove(struct memblock_type
*type
, phys_addr_t base
, phys_addr_t size
)
351 phys_addr_t rgnbegin
, rgnend
;
352 phys_addr_t end
= base
+ size
;
355 rgnbegin
= rgnend
= 0; /* supress gcc warnings */
357 /* Find the region where (base, size) belongs to */
358 for (i
=0; i
< type
->cnt
; i
++) {
359 rgnbegin
= type
->regions
[i
].base
;
360 rgnend
= rgnbegin
+ type
->regions
[i
].size
;
362 if ((rgnbegin
<= base
) && (end
<= rgnend
))
366 /* Didn't find the region */
370 /* Check to see if we are removing entire region */
371 if ((rgnbegin
== base
) && (rgnend
== end
)) {
372 memblock_remove_region(type
, i
);
376 /* Check to see if region is matching at the front */
377 if (rgnbegin
== base
) {
378 type
->regions
[i
].base
= end
;
379 type
->regions
[i
].size
-= size
;
383 /* Check to see if the region is matching at the end */
385 type
->regions
[i
].size
-= size
;
390 * We need to split the entry - adjust the current one to the
391 * beginging of the hole and add the region after hole.
393 type
->regions
[i
].size
= base
- type
->regions
[i
].base
;
394 return memblock_add_region(type
, end
, rgnend
- end
);
397 long memblock_remove(phys_addr_t base
, phys_addr_t size
)
399 return __memblock_remove(&memblock
.memory
, base
, size
);
402 long __init
memblock_free(phys_addr_t base
, phys_addr_t size
)
404 return __memblock_remove(&memblock
.reserved
, base
, size
);
407 long __init
memblock_reserve(phys_addr_t base
, phys_addr_t size
)
409 struct memblock_type
*_rgn
= &memblock
.reserved
;
413 return memblock_add_region(_rgn
, base
, size
);
416 phys_addr_t __init
__memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
420 /* We align the size to limit fragmentation. Without this, a lot of
421 * small allocs quickly eat up the whole reserve array on sparc
423 size
= memblock_align_up(size
, align
);
425 found
= memblock_find_base(size
, align
, 0, max_addr
);
426 if (found
!= MEMBLOCK_ERROR
&&
427 memblock_add_region(&memblock
.reserved
, found
, size
) >= 0)
433 phys_addr_t __init
memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
437 alloc
= __memblock_alloc_base(size
, align
, max_addr
);
440 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
441 (unsigned long long) size
, (unsigned long long) max_addr
);
446 phys_addr_t __init
memblock_alloc(phys_addr_t size
, phys_addr_t align
)
448 return memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
);
453 * Additional node-local allocators. Search for node memory is bottom up
454 * and walks memblock regions within that node bottom-up as well, but allocation
455 * within an memblock region is top-down. XXX I plan to fix that at some stage
457 * WARNING: Only available after early_node_map[] has been populated,
458 * on some architectures, that is after all the calls to add_active_range()
459 * have been done to populate it.
462 phys_addr_t __weak __init
memblock_nid_range(phys_addr_t start
, phys_addr_t end
, int *nid
)
464 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
466 * This code originates from sparc which really wants use to walk by addresses
467 * and returns the nid. This is not very convenient for early_pfn_map[] users
468 * as the map isn't sorted yet, and it really wants to be walked by nid.
470 * For now, I implement the inefficient method below which walks the early
471 * map multiple times. Eventually we may want to use an ARCH config option
472 * to implement a completely different method for both case.
474 unsigned long start_pfn
, end_pfn
;
477 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
478 get_pfn_range_for_nid(i
, &start_pfn
, &end_pfn
);
479 if (start
< PFN_PHYS(start_pfn
) || start
>= PFN_PHYS(end_pfn
))
482 return min(end
, PFN_PHYS(end_pfn
));
490 static phys_addr_t __init
memblock_alloc_nid_region(struct memblock_region
*mp
,
492 phys_addr_t align
, int nid
)
494 phys_addr_t start
, end
;
497 end
= start
+ mp
->size
;
499 start
= memblock_align_up(start
, align
);
500 while (start
< end
) {
501 phys_addr_t this_end
;
504 this_end
= memblock_nid_range(start
, end
, &this_nid
);
505 if (this_nid
== nid
) {
506 phys_addr_t ret
= memblock_find_region(start
, this_end
, size
, align
);
507 if (ret
!= MEMBLOCK_ERROR
&&
508 memblock_add_region(&memblock
.reserved
, ret
, size
) >= 0)
514 return MEMBLOCK_ERROR
;
517 phys_addr_t __init
memblock_alloc_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
519 struct memblock_type
*mem
= &memblock
.memory
;
524 /* We align the size to limit fragmentation. Without this, a lot of
525 * small allocs quickly eat up the whole reserve array on sparc
527 size
= memblock_align_up(size
, align
);
529 /* We do a bottom-up search for a region with the right
530 * nid since that's easier considering how memblock_nid_range()
533 for (i
= 0; i
< mem
->cnt
; i
++) {
534 phys_addr_t ret
= memblock_alloc_nid_region(&mem
->regions
[i
],
536 if (ret
!= MEMBLOCK_ERROR
)
540 return memblock_alloc(size
, align
);
543 /* You must call memblock_analyze() before this. */
544 phys_addr_t __init
memblock_phys_mem_size(void)
546 return memblock
.memory_size
;
549 phys_addr_t
memblock_end_of_DRAM(void)
551 int idx
= memblock
.memory
.cnt
- 1;
553 return (memblock
.memory
.regions
[idx
].base
+ memblock
.memory
.regions
[idx
].size
);
556 /* You must call memblock_analyze() after this. */
557 void __init
memblock_enforce_memory_limit(phys_addr_t memory_limit
)
561 struct memblock_region
*p
;
566 /* Truncate the memblock regions to satisfy the memory limit. */
567 limit
= memory_limit
;
568 for (i
= 0; i
< memblock
.memory
.cnt
; i
++) {
569 if (limit
> memblock
.memory
.regions
[i
].size
) {
570 limit
-= memblock
.memory
.regions
[i
].size
;
574 memblock
.memory
.regions
[i
].size
= limit
;
575 memblock
.memory
.cnt
= i
+ 1;
579 memory_limit
= memblock_end_of_DRAM();
581 /* And truncate any reserves above the limit also. */
582 for (i
= 0; i
< memblock
.reserved
.cnt
; i
++) {
583 p
= &memblock
.reserved
.regions
[i
];
585 if (p
->base
> memory_limit
)
587 else if ((p
->base
+ p
->size
) > memory_limit
)
588 p
->size
= memory_limit
- p
->base
;
591 memblock_remove_region(&memblock
.reserved
, i
);
597 static int memblock_search(struct memblock_type
*type
, phys_addr_t addr
)
599 unsigned int left
= 0, right
= type
->cnt
;
602 unsigned int mid
= (right
+ left
) / 2;
604 if (addr
< type
->regions
[mid
].base
)
606 else if (addr
>= (type
->regions
[mid
].base
+
607 type
->regions
[mid
].size
))
611 } while (left
< right
);
615 int __init
memblock_is_reserved(phys_addr_t addr
)
617 return memblock_search(&memblock
.reserved
, addr
) != -1;
620 int memblock_is_memory(phys_addr_t addr
)
622 return memblock_search(&memblock
.memory
, addr
) != -1;
625 int memblock_is_region_memory(phys_addr_t base
, phys_addr_t size
)
627 int idx
= memblock_search(&memblock
.reserved
, base
);
631 return memblock
.reserved
.regions
[idx
].base
<= base
&&
632 (memblock
.reserved
.regions
[idx
].base
+
633 memblock
.reserved
.regions
[idx
].size
) >= (base
+ size
);
636 int memblock_is_region_reserved(phys_addr_t base
, phys_addr_t size
)
638 return memblock_overlaps_region(&memblock
.reserved
, base
, size
) >= 0;
642 void __init
memblock_set_current_limit(phys_addr_t limit
)
644 memblock
.current_limit
= limit
;
647 static void memblock_dump(struct memblock_type
*region
, char *name
)
649 unsigned long long base
, size
;
652 pr_info(" %s.cnt = 0x%lx\n", name
, region
->cnt
);
654 for (i
= 0; i
< region
->cnt
; i
++) {
655 base
= region
->regions
[i
].base
;
656 size
= region
->regions
[i
].size
;
658 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
659 name
, i
, base
, base
+ size
- 1, size
);
663 void memblock_dump_all(void)
668 pr_info("MEMBLOCK configuration:\n");
669 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock
.memory_size
);
671 memblock_dump(&memblock
.memory
, "memory");
672 memblock_dump(&memblock
.reserved
, "reserved");
675 void __init
memblock_analyze(void)
679 /* Check marker in the unused last array entry */
680 WARN_ON(memblock_memory_init_regions
[INIT_MEMBLOCK_REGIONS
].base
681 != (phys_addr_t
)RED_INACTIVE
);
682 WARN_ON(memblock_reserved_init_regions
[INIT_MEMBLOCK_REGIONS
].base
683 != (phys_addr_t
)RED_INACTIVE
);
685 memblock
.memory_size
= 0;
687 for (i
= 0; i
< memblock
.memory
.cnt
; i
++)
688 memblock
.memory_size
+= memblock
.memory
.regions
[i
].size
;
690 /* We allow resizing from there */
691 memblock_can_resize
= 1;
694 void __init
memblock_init(void)
696 /* Hookup the initial arrays */
697 memblock
.memory
.regions
= memblock_memory_init_regions
;
698 memblock
.memory
.max
= INIT_MEMBLOCK_REGIONS
;
699 memblock
.reserved
.regions
= memblock_reserved_init_regions
;
700 memblock
.reserved
.max
= INIT_MEMBLOCK_REGIONS
;
702 /* Write a marker in the unused last array entry */
703 memblock
.memory
.regions
[INIT_MEMBLOCK_REGIONS
].base
= (phys_addr_t
)RED_INACTIVE
;
704 memblock
.reserved
.regions
[INIT_MEMBLOCK_REGIONS
].base
= (phys_addr_t
)RED_INACTIVE
;
706 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
707 * This simplifies the memblock_add() code below...
709 memblock
.memory
.regions
[0].base
= 0;
710 memblock
.memory
.regions
[0].size
= 0;
711 memblock
.memory
.cnt
= 1;
714 memblock
.reserved
.regions
[0].base
= 0;
715 memblock
.reserved
.regions
[0].size
= 0;
716 memblock
.reserved
.cnt
= 1;
718 memblock
.current_limit
= MEMBLOCK_ALLOC_ANYWHERE
;
721 static int __init
early_memblock(char *p
)
723 if (p
&& strstr(p
, "debug"))
727 early_param("memblock", early_memblock
);