2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/memblock.h>
20 struct memblock memblock
;
22 static int memblock_debug
, memblock_can_resize
;
23 static struct memblock_region memblock_memory_init_regions
[INIT_MEMBLOCK_REGIONS
+ 1];
24 static struct memblock_region memblock_reserved_init_regions
[INIT_MEMBLOCK_REGIONS
+ 1];
26 #define MEMBLOCK_ERROR (~(phys_addr_t)0)
28 /* inline so we don't get a warning when pr_debug is compiled out */
29 static inline const char *memblock_type_name(struct memblock_type
*type
)
31 if (type
== &memblock
.memory
)
33 else if (type
== &memblock
.reserved
)
40 * Address comparison utilities
43 static phys_addr_t
memblock_align_down(phys_addr_t addr
, phys_addr_t size
)
45 return addr
& ~(size
- 1);
48 static phys_addr_t
memblock_align_up(phys_addr_t addr
, phys_addr_t size
)
50 return (addr
+ (size
- 1)) & ~(size
- 1);
53 static unsigned long memblock_addrs_overlap(phys_addr_t base1
, phys_addr_t size1
,
54 phys_addr_t base2
, phys_addr_t size2
)
56 return ((base1
< (base2
+ size2
)) && (base2
< (base1
+ size1
)));
59 static long memblock_addrs_adjacent(phys_addr_t base1
, phys_addr_t size1
,
60 phys_addr_t base2
, phys_addr_t size2
)
62 if (base2
== base1
+ size1
)
64 else if (base1
== base2
+ size2
)
70 static long memblock_regions_adjacent(struct memblock_type
*type
,
71 unsigned long r1
, unsigned long r2
)
73 phys_addr_t base1
= type
->regions
[r1
].base
;
74 phys_addr_t size1
= type
->regions
[r1
].size
;
75 phys_addr_t base2
= type
->regions
[r2
].base
;
76 phys_addr_t size2
= type
->regions
[r2
].size
;
78 return memblock_addrs_adjacent(base1
, size1
, base2
, size2
);
81 long memblock_overlaps_region(struct memblock_type
*type
, phys_addr_t base
, phys_addr_t size
)
85 for (i
= 0; i
< type
->cnt
; i
++) {
86 phys_addr_t rgnbase
= type
->regions
[i
].base
;
87 phys_addr_t rgnsize
= type
->regions
[i
].size
;
88 if (memblock_addrs_overlap(base
, size
, rgnbase
, rgnsize
))
92 return (i
< type
->cnt
) ? i
: -1;
96 * Find, allocate, deallocate or reserve unreserved regions. All allocations
100 static phys_addr_t __init
memblock_find_region(phys_addr_t start
, phys_addr_t end
,
101 phys_addr_t size
, phys_addr_t align
)
103 phys_addr_t base
, res_base
;
106 base
= memblock_align_down((end
- size
), align
);
107 while (start
<= base
) {
108 j
= memblock_overlaps_region(&memblock
.reserved
, base
, size
);
111 res_base
= memblock
.reserved
.regions
[j
].base
;
114 base
= memblock_align_down(res_base
- size
, align
);
117 return MEMBLOCK_ERROR
;
120 static phys_addr_t __init
memblock_find_base(phys_addr_t size
, phys_addr_t align
,
121 phys_addr_t start
, phys_addr_t end
)
127 size
= memblock_align_up(size
, align
);
129 /* Pump up max_addr */
130 if (end
== MEMBLOCK_ALLOC_ACCESSIBLE
)
131 end
= memblock
.current_limit
;
133 /* We do a top-down search, this tends to limit memory
134 * fragmentation by keeping early boot allocs near the
137 for (i
= memblock
.memory
.cnt
- 1; i
>= 0; i
--) {
138 phys_addr_t memblockbase
= memblock
.memory
.regions
[i
].base
;
139 phys_addr_t memblocksize
= memblock
.memory
.regions
[i
].size
;
140 phys_addr_t bottom
, top
, found
;
142 if (memblocksize
< size
)
144 if ((memblockbase
+ memblocksize
) <= start
)
146 bottom
= max(memblockbase
, start
);
147 top
= min(memblockbase
+ memblocksize
, end
);
150 found
= memblock_find_region(bottom
, top
, size
, align
);
151 if (found
!= MEMBLOCK_ERROR
)
154 return MEMBLOCK_ERROR
;
157 static void memblock_remove_region(struct memblock_type
*type
, unsigned long r
)
161 for (i
= r
; i
< type
->cnt
- 1; i
++) {
162 type
->regions
[i
].base
= type
->regions
[i
+ 1].base
;
163 type
->regions
[i
].size
= type
->regions
[i
+ 1].size
;
168 /* Assumption: base addr of region 1 < base addr of region 2 */
169 static void memblock_coalesce_regions(struct memblock_type
*type
,
170 unsigned long r1
, unsigned long r2
)
172 type
->regions
[r1
].size
+= type
->regions
[r2
].size
;
173 memblock_remove_region(type
, r2
);
176 /* Defined below but needed now */
177 static long memblock_add_region(struct memblock_type
*type
, phys_addr_t base
, phys_addr_t size
);
179 static int memblock_double_array(struct memblock_type
*type
)
181 struct memblock_region
*new_array
, *old_array
;
182 phys_addr_t old_size
, new_size
, addr
;
183 int use_slab
= slab_is_available();
185 /* We don't allow resizing until we know about the reserved regions
186 * of memory that aren't suitable for allocation
188 if (!memblock_can_resize
)
191 pr_debug("memblock: %s array full, doubling...", memblock_type_name(type
));
193 /* Calculate new doubled size */
194 old_size
= type
->max
* sizeof(struct memblock_region
);
195 new_size
= old_size
<< 1;
197 /* Try to find some space for it.
199 * WARNING: We assume that either slab_is_available() and we use it or
200 * we use MEMBLOCK for allocations. That means that this is unsafe to use
201 * when bootmem is currently active (unless bootmem itself is implemented
202 * on top of MEMBLOCK which isn't the case yet)
204 * This should however not be an issue for now, as we currently only
205 * call into MEMBLOCK while it's still active, or much later when slab is
206 * active for memory hotplug operations
209 new_array
= kmalloc(new_size
, GFP_KERNEL
);
210 addr
= new_array
== NULL
? MEMBLOCK_ERROR
: __pa(new_array
);
212 addr
= memblock_find_base(new_size
, sizeof(phys_addr_t
), 0, MEMBLOCK_ALLOC_ACCESSIBLE
);
213 if (addr
== MEMBLOCK_ERROR
) {
214 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
215 memblock_type_name(type
), type
->max
, type
->max
* 2);
218 new_array
= __va(addr
);
220 /* Found space, we now need to move the array over before
221 * we add the reserved region since it may be our reserved
222 * array itself that is full.
224 memcpy(new_array
, type
->regions
, old_size
);
225 memset(new_array
+ type
->max
, 0, old_size
);
226 old_array
= type
->regions
;
227 type
->regions
= new_array
;
230 /* If we use SLAB that's it, we are done */
234 /* Add the new reserved region now. Should not fail ! */
235 BUG_ON(memblock_add_region(&memblock
.reserved
, addr
, new_size
) < 0);
237 /* If the array wasn't our static init one, then free it. We only do
238 * that before SLAB is available as later on, we don't know whether
239 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
242 if (old_array
!= memblock_memory_init_regions
&&
243 old_array
!= memblock_reserved_init_regions
)
244 memblock_free(__pa(old_array
), old_size
);
249 extern int __weak
memblock_memory_can_coalesce(phys_addr_t addr1
, phys_addr_t size1
,
250 phys_addr_t addr2
, phys_addr_t size2
)
255 static long memblock_add_region(struct memblock_type
*type
, phys_addr_t base
, phys_addr_t size
)
257 unsigned long coalesced
= 0;
260 if ((type
->cnt
== 1) && (type
->regions
[0].size
== 0)) {
261 type
->regions
[0].base
= base
;
262 type
->regions
[0].size
= size
;
266 /* First try and coalesce this MEMBLOCK with another. */
267 for (i
= 0; i
< type
->cnt
; i
++) {
268 phys_addr_t rgnbase
= type
->regions
[i
].base
;
269 phys_addr_t rgnsize
= type
->regions
[i
].size
;
271 if ((rgnbase
== base
) && (rgnsize
== size
))
272 /* Already have this region, so we're done */
275 adjacent
= memblock_addrs_adjacent(base
, size
, rgnbase
, rgnsize
);
276 /* Check if arch allows coalescing */
277 if (adjacent
!= 0 && type
== &memblock
.memory
&&
278 !memblock_memory_can_coalesce(base
, size
, rgnbase
, rgnsize
))
281 type
->regions
[i
].base
-= size
;
282 type
->regions
[i
].size
+= size
;
285 } else if (adjacent
< 0) {
286 type
->regions
[i
].size
+= size
;
292 /* If we plugged a hole, we may want to also coalesce with the
295 if ((i
< type
->cnt
- 1) && memblock_regions_adjacent(type
, i
, i
+1) &&
296 ((type
!= &memblock
.memory
|| memblock_memory_can_coalesce(type
->regions
[i
].base
,
297 type
->regions
[i
].size
,
298 type
->regions
[i
+1].base
,
299 type
->regions
[i
+1].size
)))) {
300 memblock_coalesce_regions(type
, i
, i
+1);
307 /* If we are out of space, we fail. It's too late to resize the array
308 * but then this shouldn't have happened in the first place.
310 if (WARN_ON(type
->cnt
>= type
->max
))
313 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
314 for (i
= type
->cnt
- 1; i
>= 0; i
--) {
315 if (base
< type
->regions
[i
].base
) {
316 type
->regions
[i
+1].base
= type
->regions
[i
].base
;
317 type
->regions
[i
+1].size
= type
->regions
[i
].size
;
319 type
->regions
[i
+1].base
= base
;
320 type
->regions
[i
+1].size
= size
;
325 if (base
< type
->regions
[0].base
) {
326 type
->regions
[0].base
= base
;
327 type
->regions
[0].size
= size
;
331 /* The array is full ? Try to resize it. If that fails, we undo
332 * our allocation and return an error
334 if (type
->cnt
== type
->max
&& memblock_double_array(type
)) {
342 long memblock_add(phys_addr_t base
, phys_addr_t size
)
344 return memblock_add_region(&memblock
.memory
, base
, size
);
348 static long __memblock_remove(struct memblock_type
*type
, phys_addr_t base
, phys_addr_t size
)
350 phys_addr_t rgnbegin
, rgnend
;
351 phys_addr_t end
= base
+ size
;
354 rgnbegin
= rgnend
= 0; /* supress gcc warnings */
356 /* Find the region where (base, size) belongs to */
357 for (i
=0; i
< type
->cnt
; i
++) {
358 rgnbegin
= type
->regions
[i
].base
;
359 rgnend
= rgnbegin
+ type
->regions
[i
].size
;
361 if ((rgnbegin
<= base
) && (end
<= rgnend
))
365 /* Didn't find the region */
369 /* Check to see if we are removing entire region */
370 if ((rgnbegin
== base
) && (rgnend
== end
)) {
371 memblock_remove_region(type
, i
);
375 /* Check to see if region is matching at the front */
376 if (rgnbegin
== base
) {
377 type
->regions
[i
].base
= end
;
378 type
->regions
[i
].size
-= size
;
382 /* Check to see if the region is matching at the end */
384 type
->regions
[i
].size
-= size
;
389 * We need to split the entry - adjust the current one to the
390 * beginging of the hole and add the region after hole.
392 type
->regions
[i
].size
= base
- type
->regions
[i
].base
;
393 return memblock_add_region(type
, end
, rgnend
- end
);
396 long memblock_remove(phys_addr_t base
, phys_addr_t size
)
398 return __memblock_remove(&memblock
.memory
, base
, size
);
401 long __init
memblock_free(phys_addr_t base
, phys_addr_t size
)
403 return __memblock_remove(&memblock
.reserved
, base
, size
);
406 long __init
memblock_reserve(phys_addr_t base
, phys_addr_t size
)
408 struct memblock_type
*_rgn
= &memblock
.reserved
;
412 return memblock_add_region(_rgn
, base
, size
);
415 phys_addr_t __init
__memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
419 /* We align the size to limit fragmentation. Without this, a lot of
420 * small allocs quickly eat up the whole reserve array on sparc
422 size
= memblock_align_up(size
, align
);
424 found
= memblock_find_base(size
, align
, 0, max_addr
);
425 if (found
!= MEMBLOCK_ERROR
&&
426 memblock_add_region(&memblock
.reserved
, found
, size
) >= 0)
432 phys_addr_t __init
memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
436 alloc
= __memblock_alloc_base(size
, align
, max_addr
);
439 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
440 (unsigned long long) size
, (unsigned long long) max_addr
);
445 phys_addr_t __init
memblock_alloc(phys_addr_t size
, phys_addr_t align
)
447 return memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
);
452 * Additional node-local allocators. Search for node memory is bottom up
453 * and walks memblock regions within that node bottom-up as well, but allocation
454 * within an memblock region is top-down.
457 phys_addr_t __weak __init
memblock_nid_range(phys_addr_t start
, phys_addr_t end
, int *nid
)
464 static phys_addr_t __init
memblock_alloc_nid_region(struct memblock_region
*mp
,
466 phys_addr_t align
, int nid
)
468 phys_addr_t start
, end
;
471 end
= start
+ mp
->size
;
473 start
= memblock_align_up(start
, align
);
474 while (start
< end
) {
475 phys_addr_t this_end
;
478 this_end
= memblock_nid_range(start
, end
, &this_nid
);
479 if (this_nid
== nid
) {
480 phys_addr_t ret
= memblock_find_region(start
, this_end
, size
, align
);
481 if (ret
!= MEMBLOCK_ERROR
&&
482 memblock_add_region(&memblock
.reserved
, ret
, size
) >= 0)
488 return MEMBLOCK_ERROR
;
491 phys_addr_t __init
memblock_alloc_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
493 struct memblock_type
*mem
= &memblock
.memory
;
498 /* We align the size to limit fragmentation. Without this, a lot of
499 * small allocs quickly eat up the whole reserve array on sparc
501 size
= memblock_align_up(size
, align
);
503 /* We do a bottom-up search for a region with the right
504 * nid since that's easier considering how memblock_nid_range()
507 for (i
= 0; i
< mem
->cnt
; i
++) {
508 phys_addr_t ret
= memblock_alloc_nid_region(&mem
->regions
[i
],
510 if (ret
!= MEMBLOCK_ERROR
)
514 return memblock_alloc(size
, align
);
517 /* You must call memblock_analyze() before this. */
518 phys_addr_t __init
memblock_phys_mem_size(void)
520 return memblock
.memory_size
;
523 phys_addr_t
memblock_end_of_DRAM(void)
525 int idx
= memblock
.memory
.cnt
- 1;
527 return (memblock
.memory
.regions
[idx
].base
+ memblock
.memory
.regions
[idx
].size
);
530 /* You must call memblock_analyze() after this. */
531 void __init
memblock_enforce_memory_limit(phys_addr_t memory_limit
)
535 struct memblock_region
*p
;
540 /* Truncate the memblock regions to satisfy the memory limit. */
541 limit
= memory_limit
;
542 for (i
= 0; i
< memblock
.memory
.cnt
; i
++) {
543 if (limit
> memblock
.memory
.regions
[i
].size
) {
544 limit
-= memblock
.memory
.regions
[i
].size
;
548 memblock
.memory
.regions
[i
].size
= limit
;
549 memblock
.memory
.cnt
= i
+ 1;
553 memory_limit
= memblock_end_of_DRAM();
555 /* And truncate any reserves above the limit also. */
556 for (i
= 0; i
< memblock
.reserved
.cnt
; i
++) {
557 p
= &memblock
.reserved
.regions
[i
];
559 if (p
->base
> memory_limit
)
561 else if ((p
->base
+ p
->size
) > memory_limit
)
562 p
->size
= memory_limit
- p
->base
;
565 memblock_remove_region(&memblock
.reserved
, i
);
571 static int memblock_search(struct memblock_type
*type
, phys_addr_t addr
)
573 unsigned int left
= 0, right
= type
->cnt
;
576 unsigned int mid
= (right
+ left
) / 2;
578 if (addr
< type
->regions
[mid
].base
)
580 else if (addr
>= (type
->regions
[mid
].base
+
581 type
->regions
[mid
].size
))
585 } while (left
< right
);
589 int __init
memblock_is_reserved(phys_addr_t addr
)
591 return memblock_search(&memblock
.reserved
, addr
) != -1;
594 int memblock_is_memory(phys_addr_t addr
)
596 return memblock_search(&memblock
.memory
, addr
) != -1;
599 int memblock_is_region_memory(phys_addr_t base
, phys_addr_t size
)
601 int idx
= memblock_search(&memblock
.reserved
, base
);
605 return memblock
.reserved
.regions
[idx
].base
<= base
&&
606 (memblock
.reserved
.regions
[idx
].base
+
607 memblock
.reserved
.regions
[idx
].size
) >= (base
+ size
);
610 int memblock_is_region_reserved(phys_addr_t base
, phys_addr_t size
)
612 return memblock_overlaps_region(&memblock
.reserved
, base
, size
) >= 0;
616 void __init
memblock_set_current_limit(phys_addr_t limit
)
618 memblock
.current_limit
= limit
;
621 static void memblock_dump(struct memblock_type
*region
, char *name
)
623 unsigned long long base
, size
;
626 pr_info(" %s.cnt = 0x%lx\n", name
, region
->cnt
);
628 for (i
= 0; i
< region
->cnt
; i
++) {
629 base
= region
->regions
[i
].base
;
630 size
= region
->regions
[i
].size
;
632 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
633 name
, i
, base
, base
+ size
- 1, size
);
637 void memblock_dump_all(void)
642 pr_info("MEMBLOCK configuration:\n");
643 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock
.memory_size
);
645 memblock_dump(&memblock
.memory
, "memory");
646 memblock_dump(&memblock
.reserved
, "reserved");
649 void __init
memblock_analyze(void)
653 /* Check marker in the unused last array entry */
654 WARN_ON(memblock_memory_init_regions
[INIT_MEMBLOCK_REGIONS
].base
655 != (phys_addr_t
)RED_INACTIVE
);
656 WARN_ON(memblock_reserved_init_regions
[INIT_MEMBLOCK_REGIONS
].base
657 != (phys_addr_t
)RED_INACTIVE
);
659 memblock
.memory_size
= 0;
661 for (i
= 0; i
< memblock
.memory
.cnt
; i
++)
662 memblock
.memory_size
+= memblock
.memory
.regions
[i
].size
;
664 /* We allow resizing from there */
665 memblock_can_resize
= 1;
668 void __init
memblock_init(void)
670 /* Hookup the initial arrays */
671 memblock
.memory
.regions
= memblock_memory_init_regions
;
672 memblock
.memory
.max
= INIT_MEMBLOCK_REGIONS
;
673 memblock
.reserved
.regions
= memblock_reserved_init_regions
;
674 memblock
.reserved
.max
= INIT_MEMBLOCK_REGIONS
;
676 /* Write a marker in the unused last array entry */
677 memblock
.memory
.regions
[INIT_MEMBLOCK_REGIONS
].base
= (phys_addr_t
)RED_INACTIVE
;
678 memblock
.reserved
.regions
[INIT_MEMBLOCK_REGIONS
].base
= (phys_addr_t
)RED_INACTIVE
;
680 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
681 * This simplifies the memblock_add() code below...
683 memblock
.memory
.regions
[0].base
= 0;
684 memblock
.memory
.regions
[0].size
= 0;
685 memblock
.memory
.cnt
= 1;
688 memblock
.reserved
.regions
[0].base
= 0;
689 memblock
.reserved
.regions
[0].size
= 0;
690 memblock
.reserved
.cnt
= 1;
692 memblock
.current_limit
= MEMBLOCK_ALLOC_ANYWHERE
;
695 static int __init
early_memblock(char *p
)
697 if (p
&& strstr(p
, "debug"))
701 early_param("memblock", early_memblock
);