memblock: Add arch function to control coalescing of memblock memory regions
[linux-2.6.git] / mm / memblock.c
blob8715f09434dfd18a7851b800698777e9941933f9
1 /*
2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/memblock.h>
20 struct memblock memblock;
22 static int memblock_debug, memblock_can_resize;
23 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1];
24 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1];
26 #define MEMBLOCK_ERROR (~(phys_addr_t)0)
28 /* inline so we don't get a warning when pr_debug is compiled out */
29 static inline const char *memblock_type_name(struct memblock_type *type)
31 if (type == &memblock.memory)
32 return "memory";
33 else if (type == &memblock.reserved)
34 return "reserved";
35 else
36 return "unknown";
40 * Address comparison utilities
43 static phys_addr_t memblock_align_down(phys_addr_t addr, phys_addr_t size)
45 return addr & ~(size - 1);
48 static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size)
50 return (addr + (size - 1)) & ~(size - 1);
53 static unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
54 phys_addr_t base2, phys_addr_t size2)
56 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
59 static long memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1,
60 phys_addr_t base2, phys_addr_t size2)
62 if (base2 == base1 + size1)
63 return 1;
64 else if (base1 == base2 + size2)
65 return -1;
67 return 0;
70 static long memblock_regions_adjacent(struct memblock_type *type,
71 unsigned long r1, unsigned long r2)
73 phys_addr_t base1 = type->regions[r1].base;
74 phys_addr_t size1 = type->regions[r1].size;
75 phys_addr_t base2 = type->regions[r2].base;
76 phys_addr_t size2 = type->regions[r2].size;
78 return memblock_addrs_adjacent(base1, size1, base2, size2);
81 long memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
83 unsigned long i;
85 for (i = 0; i < type->cnt; i++) {
86 phys_addr_t rgnbase = type->regions[i].base;
87 phys_addr_t rgnsize = type->regions[i].size;
88 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
89 break;
92 return (i < type->cnt) ? i : -1;
96 * Find, allocate, deallocate or reserve unreserved regions. All allocations
97 * are top-down.
100 static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t end,
101 phys_addr_t size, phys_addr_t align)
103 phys_addr_t base, res_base;
104 long j;
106 base = memblock_align_down((end - size), align);
107 while (start <= base) {
108 j = memblock_overlaps_region(&memblock.reserved, base, size);
109 if (j < 0)
110 return base;
111 res_base = memblock.reserved.regions[j].base;
112 if (res_base < size)
113 break;
114 base = memblock_align_down(res_base - size, align);
117 return MEMBLOCK_ERROR;
120 static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
122 long i;
123 phys_addr_t base = 0;
124 phys_addr_t res_base;
126 BUG_ON(0 == size);
128 size = memblock_align_up(size, align);
130 /* Pump up max_addr */
131 if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE)
132 max_addr = memblock.current_limit;
134 /* We do a top-down search, this tends to limit memory
135 * fragmentation by keeping early boot allocs near the
136 * top of memory
138 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
139 phys_addr_t memblockbase = memblock.memory.regions[i].base;
140 phys_addr_t memblocksize = memblock.memory.regions[i].size;
142 if (memblocksize < size)
143 continue;
144 base = min(memblockbase + memblocksize, max_addr);
145 res_base = memblock_find_region(memblockbase, base, size, align);
146 if (res_base != MEMBLOCK_ERROR)
147 return res_base;
149 return MEMBLOCK_ERROR;
152 static void memblock_remove_region(struct memblock_type *type, unsigned long r)
154 unsigned long i;
156 for (i = r; i < type->cnt - 1; i++) {
157 type->regions[i].base = type->regions[i + 1].base;
158 type->regions[i].size = type->regions[i + 1].size;
160 type->cnt--;
163 /* Assumption: base addr of region 1 < base addr of region 2 */
164 static void memblock_coalesce_regions(struct memblock_type *type,
165 unsigned long r1, unsigned long r2)
167 type->regions[r1].size += type->regions[r2].size;
168 memblock_remove_region(type, r2);
171 /* Defined below but needed now */
172 static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
174 static int memblock_double_array(struct memblock_type *type)
176 struct memblock_region *new_array, *old_array;
177 phys_addr_t old_size, new_size, addr;
178 int use_slab = slab_is_available();
180 /* We don't allow resizing until we know about the reserved regions
181 * of memory that aren't suitable for allocation
183 if (!memblock_can_resize)
184 return -1;
186 pr_debug("memblock: %s array full, doubling...", memblock_type_name(type));
188 /* Calculate new doubled size */
189 old_size = type->max * sizeof(struct memblock_region);
190 new_size = old_size << 1;
192 /* Try to find some space for it.
194 * WARNING: We assume that either slab_is_available() and we use it or
195 * we use MEMBLOCK for allocations. That means that this is unsafe to use
196 * when bootmem is currently active (unless bootmem itself is implemented
197 * on top of MEMBLOCK which isn't the case yet)
199 * This should however not be an issue for now, as we currently only
200 * call into MEMBLOCK while it's still active, or much later when slab is
201 * active for memory hotplug operations
203 if (use_slab) {
204 new_array = kmalloc(new_size, GFP_KERNEL);
205 addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
206 } else
207 addr = memblock_find_base(new_size, sizeof(phys_addr_t), MEMBLOCK_ALLOC_ACCESSIBLE);
208 if (addr == MEMBLOCK_ERROR) {
209 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
210 memblock_type_name(type), type->max, type->max * 2);
211 return -1;
213 new_array = __va(addr);
215 /* Found space, we now need to move the array over before
216 * we add the reserved region since it may be our reserved
217 * array itself that is full.
219 memcpy(new_array, type->regions, old_size);
220 memset(new_array + type->max, 0, old_size);
221 old_array = type->regions;
222 type->regions = new_array;
223 type->max <<= 1;
225 /* If we use SLAB that's it, we are done */
226 if (use_slab)
227 return 0;
229 /* Add the new reserved region now. Should not fail ! */
230 BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size) < 0);
232 /* If the array wasn't our static init one, then free it. We only do
233 * that before SLAB is available as later on, we don't know whether
234 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
235 * anyways
237 if (old_array != memblock_memory_init_regions &&
238 old_array != memblock_reserved_init_regions)
239 memblock_free(__pa(old_array), old_size);
241 return 0;
244 extern int __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
245 phys_addr_t addr2, phys_addr_t size2)
247 return 1;
250 static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
252 unsigned long coalesced = 0;
253 long adjacent, i;
255 if ((type->cnt == 1) && (type->regions[0].size == 0)) {
256 type->regions[0].base = base;
257 type->regions[0].size = size;
258 return 0;
261 /* First try and coalesce this MEMBLOCK with another. */
262 for (i = 0; i < type->cnt; i++) {
263 phys_addr_t rgnbase = type->regions[i].base;
264 phys_addr_t rgnsize = type->regions[i].size;
266 if ((rgnbase == base) && (rgnsize == size))
267 /* Already have this region, so we're done */
268 return 0;
270 adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize);
271 /* Check if arch allows coalescing */
272 if (adjacent != 0 && type == &memblock.memory &&
273 !memblock_memory_can_coalesce(base, size, rgnbase, rgnsize))
274 break;
275 if (adjacent > 0) {
276 type->regions[i].base -= size;
277 type->regions[i].size += size;
278 coalesced++;
279 break;
280 } else if (adjacent < 0) {
281 type->regions[i].size += size;
282 coalesced++;
283 break;
287 /* If we plugged a hole, we may want to also coalesce with the
288 * next region
290 if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1) &&
291 ((type != &memblock.memory || memblock_memory_can_coalesce(type->regions[i].base,
292 type->regions[i].size,
293 type->regions[i+1].base,
294 type->regions[i+1].size)))) {
295 memblock_coalesce_regions(type, i, i+1);
296 coalesced++;
299 if (coalesced)
300 return coalesced;
302 /* If we are out of space, we fail. It's too late to resize the array
303 * but then this shouldn't have happened in the first place.
305 if (WARN_ON(type->cnt >= type->max))
306 return -1;
308 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
309 for (i = type->cnt - 1; i >= 0; i--) {
310 if (base < type->regions[i].base) {
311 type->regions[i+1].base = type->regions[i].base;
312 type->regions[i+1].size = type->regions[i].size;
313 } else {
314 type->regions[i+1].base = base;
315 type->regions[i+1].size = size;
316 break;
320 if (base < type->regions[0].base) {
321 type->regions[0].base = base;
322 type->regions[0].size = size;
324 type->cnt++;
326 /* The array is full ? Try to resize it. If that fails, we undo
327 * our allocation and return an error
329 if (type->cnt == type->max && memblock_double_array(type)) {
330 type->cnt--;
331 return -1;
334 return 0;
337 long memblock_add(phys_addr_t base, phys_addr_t size)
339 return memblock_add_region(&memblock.memory, base, size);
343 static long __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
345 phys_addr_t rgnbegin, rgnend;
346 phys_addr_t end = base + size;
347 int i;
349 rgnbegin = rgnend = 0; /* supress gcc warnings */
351 /* Find the region where (base, size) belongs to */
352 for (i=0; i < type->cnt; i++) {
353 rgnbegin = type->regions[i].base;
354 rgnend = rgnbegin + type->regions[i].size;
356 if ((rgnbegin <= base) && (end <= rgnend))
357 break;
360 /* Didn't find the region */
361 if (i == type->cnt)
362 return -1;
364 /* Check to see if we are removing entire region */
365 if ((rgnbegin == base) && (rgnend == end)) {
366 memblock_remove_region(type, i);
367 return 0;
370 /* Check to see if region is matching at the front */
371 if (rgnbegin == base) {
372 type->regions[i].base = end;
373 type->regions[i].size -= size;
374 return 0;
377 /* Check to see if the region is matching at the end */
378 if (rgnend == end) {
379 type->regions[i].size -= size;
380 return 0;
384 * We need to split the entry - adjust the current one to the
385 * beginging of the hole and add the region after hole.
387 type->regions[i].size = base - type->regions[i].base;
388 return memblock_add_region(type, end, rgnend - end);
391 long memblock_remove(phys_addr_t base, phys_addr_t size)
393 return __memblock_remove(&memblock.memory, base, size);
396 long __init memblock_free(phys_addr_t base, phys_addr_t size)
398 return __memblock_remove(&memblock.reserved, base, size);
401 long __init memblock_reserve(phys_addr_t base, phys_addr_t size)
403 struct memblock_type *_rgn = &memblock.reserved;
405 BUG_ON(0 == size);
407 return memblock_add_region(_rgn, base, size);
410 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
412 phys_addr_t found;
414 /* We align the size to limit fragmentation. Without this, a lot of
415 * small allocs quickly eat up the whole reserve array on sparc
417 size = memblock_align_up(size, align);
419 found = memblock_find_base(size, align, max_addr);
420 if (found != MEMBLOCK_ERROR &&
421 memblock_add_region(&memblock.reserved, found, size) >= 0)
422 return found;
424 return 0;
427 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
429 phys_addr_t alloc;
431 alloc = __memblock_alloc_base(size, align, max_addr);
433 if (alloc == 0)
434 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
435 (unsigned long long) size, (unsigned long long) max_addr);
437 return alloc;
440 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
442 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
447 * Additional node-local allocators. Search for node memory is bottom up
448 * and walks memblock regions within that node bottom-up as well, but allocation
449 * within an memblock region is top-down.
452 phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
454 *nid = 0;
456 return end;
459 static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
460 phys_addr_t size,
461 phys_addr_t align, int nid)
463 phys_addr_t start, end;
465 start = mp->base;
466 end = start + mp->size;
468 start = memblock_align_up(start, align);
469 while (start < end) {
470 phys_addr_t this_end;
471 int this_nid;
473 this_end = memblock_nid_range(start, end, &this_nid);
474 if (this_nid == nid) {
475 phys_addr_t ret = memblock_find_region(start, this_end, size, align);
476 if (ret != MEMBLOCK_ERROR &&
477 memblock_add_region(&memblock.reserved, ret, size) >= 0)
478 return ret;
480 start = this_end;
483 return MEMBLOCK_ERROR;
486 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
488 struct memblock_type *mem = &memblock.memory;
489 int i;
491 BUG_ON(0 == size);
493 /* We align the size to limit fragmentation. Without this, a lot of
494 * small allocs quickly eat up the whole reserve array on sparc
496 size = memblock_align_up(size, align);
498 /* We do a bottom-up search for a region with the right
499 * nid since that's easier considering how memblock_nid_range()
500 * works
502 for (i = 0; i < mem->cnt; i++) {
503 phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
504 size, align, nid);
505 if (ret != MEMBLOCK_ERROR)
506 return ret;
509 return memblock_alloc(size, align);
512 /* You must call memblock_analyze() before this. */
513 phys_addr_t __init memblock_phys_mem_size(void)
515 return memblock.memory_size;
518 phys_addr_t memblock_end_of_DRAM(void)
520 int idx = memblock.memory.cnt - 1;
522 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
525 /* You must call memblock_analyze() after this. */
526 void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
528 unsigned long i;
529 phys_addr_t limit;
530 struct memblock_region *p;
532 if (!memory_limit)
533 return;
535 /* Truncate the memblock regions to satisfy the memory limit. */
536 limit = memory_limit;
537 for (i = 0; i < memblock.memory.cnt; i++) {
538 if (limit > memblock.memory.regions[i].size) {
539 limit -= memblock.memory.regions[i].size;
540 continue;
543 memblock.memory.regions[i].size = limit;
544 memblock.memory.cnt = i + 1;
545 break;
548 memory_limit = memblock_end_of_DRAM();
550 /* And truncate any reserves above the limit also. */
551 for (i = 0; i < memblock.reserved.cnt; i++) {
552 p = &memblock.reserved.regions[i];
554 if (p->base > memory_limit)
555 p->size = 0;
556 else if ((p->base + p->size) > memory_limit)
557 p->size = memory_limit - p->base;
559 if (p->size == 0) {
560 memblock_remove_region(&memblock.reserved, i);
561 i--;
566 static int memblock_search(struct memblock_type *type, phys_addr_t addr)
568 unsigned int left = 0, right = type->cnt;
570 do {
571 unsigned int mid = (right + left) / 2;
573 if (addr < type->regions[mid].base)
574 right = mid;
575 else if (addr >= (type->regions[mid].base +
576 type->regions[mid].size))
577 left = mid + 1;
578 else
579 return mid;
580 } while (left < right);
581 return -1;
584 int __init memblock_is_reserved(phys_addr_t addr)
586 return memblock_search(&memblock.reserved, addr) != -1;
589 int memblock_is_memory(phys_addr_t addr)
591 return memblock_search(&memblock.memory, addr) != -1;
594 int memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
596 int idx = memblock_search(&memblock.reserved, base);
598 if (idx == -1)
599 return 0;
600 return memblock.reserved.regions[idx].base <= base &&
601 (memblock.reserved.regions[idx].base +
602 memblock.reserved.regions[idx].size) >= (base + size);
605 int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
607 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
611 void __init memblock_set_current_limit(phys_addr_t limit)
613 memblock.current_limit = limit;
616 static void memblock_dump(struct memblock_type *region, char *name)
618 unsigned long long base, size;
619 int i;
621 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
623 for (i = 0; i < region->cnt; i++) {
624 base = region->regions[i].base;
625 size = region->regions[i].size;
627 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
628 name, i, base, base + size - 1, size);
632 void memblock_dump_all(void)
634 if (!memblock_debug)
635 return;
637 pr_info("MEMBLOCK configuration:\n");
638 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
640 memblock_dump(&memblock.memory, "memory");
641 memblock_dump(&memblock.reserved, "reserved");
644 void __init memblock_analyze(void)
646 int i;
648 /* Check marker in the unused last array entry */
649 WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
650 != (phys_addr_t)RED_INACTIVE);
651 WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
652 != (phys_addr_t)RED_INACTIVE);
654 memblock.memory_size = 0;
656 for (i = 0; i < memblock.memory.cnt; i++)
657 memblock.memory_size += memblock.memory.regions[i].size;
659 /* We allow resizing from there */
660 memblock_can_resize = 1;
663 void __init memblock_init(void)
665 /* Hookup the initial arrays */
666 memblock.memory.regions = memblock_memory_init_regions;
667 memblock.memory.max = INIT_MEMBLOCK_REGIONS;
668 memblock.reserved.regions = memblock_reserved_init_regions;
669 memblock.reserved.max = INIT_MEMBLOCK_REGIONS;
671 /* Write a marker in the unused last array entry */
672 memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
673 memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
675 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
676 * This simplifies the memblock_add() code below...
678 memblock.memory.regions[0].base = 0;
679 memblock.memory.regions[0].size = 0;
680 memblock.memory.cnt = 1;
682 /* Ditto. */
683 memblock.reserved.regions[0].base = 0;
684 memblock.reserved.regions[0].size = 0;
685 memblock.reserved.cnt = 1;
687 memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
690 static int __init early_memblock(char *p)
692 if (p && strstr(p, "debug"))
693 memblock_debug = 1;
694 return 0;
696 early_param("memblock", early_memblock);