2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/bitops.h>
16 #include <linux/lmb.h>
18 #define LMB_ALLOC_ANYWHERE 0
24 static int __init
early_lmb(char *p
)
26 if (p
&& strstr(p
, "debug"))
30 early_param("lmb", early_lmb
);
32 void lmb_dump_all(void)
39 pr_info("lmb_dump_all:\n");
40 pr_info(" memory.cnt = 0x%lx\n", lmb
.memory
.cnt
);
41 pr_info(" memory.size = 0x%llx\n",
42 (unsigned long long)lmb
.memory
.size
);
43 for (i
=0; i
< lmb
.memory
.cnt
;i
++) {
44 pr_info(" memory.region[0x%lx].base = 0x%llx\n",
45 i
, (unsigned long long)lmb
.memory
.region
[i
].base
);
46 pr_info(" .size = 0x%llx\n",
47 (unsigned long long)lmb
.memory
.region
[i
].size
);
50 pr_info(" reserved.cnt = 0x%lx\n", lmb
.reserved
.cnt
);
51 pr_info(" reserved.size = 0x%lx\n", lmb
.reserved
.size
);
52 for (i
=0; i
< lmb
.reserved
.cnt
;i
++) {
53 pr_info(" reserved.region[0x%lx].base = 0x%llx\n",
54 i
, (unsigned long long)lmb
.reserved
.region
[i
].base
);
55 pr_info(" .size = 0x%llx\n",
56 (unsigned long long)lmb
.reserved
.region
[i
].size
);
60 static unsigned long lmb_addrs_overlap(u64 base1
, u64 size1
, u64 base2
,
63 return ((base1
< (base2
+ size2
)) && (base2
< (base1
+ size1
)));
66 static long lmb_addrs_adjacent(u64 base1
, u64 size1
, u64 base2
, u64 size2
)
68 if (base2
== base1
+ size1
)
70 else if (base1
== base2
+ size2
)
76 static long lmb_regions_adjacent(struct lmb_region
*rgn
,
77 unsigned long r1
, unsigned long r2
)
79 u64 base1
= rgn
->region
[r1
].base
;
80 u64 size1
= rgn
->region
[r1
].size
;
81 u64 base2
= rgn
->region
[r2
].base
;
82 u64 size2
= rgn
->region
[r2
].size
;
84 return lmb_addrs_adjacent(base1
, size1
, base2
, size2
);
87 static void lmb_remove_region(struct lmb_region
*rgn
, unsigned long r
)
91 for (i
= r
; i
< rgn
->cnt
- 1; i
++) {
92 rgn
->region
[i
].base
= rgn
->region
[i
+ 1].base
;
93 rgn
->region
[i
].size
= rgn
->region
[i
+ 1].size
;
98 /* Assumption: base addr of region 1 < base addr of region 2 */
99 static void lmb_coalesce_regions(struct lmb_region
*rgn
,
100 unsigned long r1
, unsigned long r2
)
102 rgn
->region
[r1
].size
+= rgn
->region
[r2
].size
;
103 lmb_remove_region(rgn
, r2
);
106 void __init
lmb_init(void)
108 /* Create a dummy zero size LMB which will get coalesced away later.
109 * This simplifies the lmb_add() code below...
111 lmb
.memory
.region
[0].base
= 0;
112 lmb
.memory
.region
[0].size
= 0;
116 lmb
.reserved
.region
[0].base
= 0;
117 lmb
.reserved
.region
[0].size
= 0;
118 lmb
.reserved
.cnt
= 1;
121 void __init
lmb_analyze(void)
127 for (i
= 0; i
< lmb
.memory
.cnt
; i
++)
128 lmb
.memory
.size
+= lmb
.memory
.region
[i
].size
;
131 static long lmb_add_region(struct lmb_region
*rgn
, u64 base
, u64 size
)
133 unsigned long coalesced
= 0;
136 if ((rgn
->cnt
== 1) && (rgn
->region
[0].size
== 0)) {
137 rgn
->region
[0].base
= base
;
138 rgn
->region
[0].size
= size
;
142 /* First try and coalesce this LMB with another. */
143 for (i
= 0; i
< rgn
->cnt
; i
++) {
144 u64 rgnbase
= rgn
->region
[i
].base
;
145 u64 rgnsize
= rgn
->region
[i
].size
;
147 if ((rgnbase
== base
) && (rgnsize
== size
))
148 /* Already have this region, so we're done */
151 adjacent
= lmb_addrs_adjacent(base
, size
, rgnbase
, rgnsize
);
153 rgn
->region
[i
].base
-= size
;
154 rgn
->region
[i
].size
+= size
;
157 } else if (adjacent
< 0) {
158 rgn
->region
[i
].size
+= size
;
164 if ((i
< rgn
->cnt
- 1) && lmb_regions_adjacent(rgn
, i
, i
+1)) {
165 lmb_coalesce_regions(rgn
, i
, i
+1);
171 if (rgn
->cnt
>= MAX_LMB_REGIONS
)
174 /* Couldn't coalesce the LMB, so add it to the sorted table. */
175 for (i
= rgn
->cnt
- 1; i
>= 0; i
--) {
176 if (base
< rgn
->region
[i
].base
) {
177 rgn
->region
[i
+1].base
= rgn
->region
[i
].base
;
178 rgn
->region
[i
+1].size
= rgn
->region
[i
].size
;
180 rgn
->region
[i
+1].base
= base
;
181 rgn
->region
[i
+1].size
= size
;
186 if (base
< rgn
->region
[0].base
) {
187 rgn
->region
[0].base
= base
;
188 rgn
->region
[0].size
= size
;
195 long lmb_add(u64 base
, u64 size
)
197 struct lmb_region
*_rgn
= &lmb
.memory
;
199 /* On pSeries LPAR systems, the first LMB is our RMO region. */
203 return lmb_add_region(_rgn
, base
, size
);
207 long lmb_remove(u64 base
, u64 size
)
209 struct lmb_region
*rgn
= &(lmb
.memory
);
210 u64 rgnbegin
, rgnend
;
211 u64 end
= base
+ size
;
214 rgnbegin
= rgnend
= 0; /* supress gcc warnings */
216 /* Find the region where (base, size) belongs to */
217 for (i
=0; i
< rgn
->cnt
; i
++) {
218 rgnbegin
= rgn
->region
[i
].base
;
219 rgnend
= rgnbegin
+ rgn
->region
[i
].size
;
221 if ((rgnbegin
<= base
) && (end
<= rgnend
))
225 /* Didn't find the region */
229 /* Check to see if we are removing entire region */
230 if ((rgnbegin
== base
) && (rgnend
== end
)) {
231 lmb_remove_region(rgn
, i
);
235 /* Check to see if region is matching at the front */
236 if (rgnbegin
== base
) {
237 rgn
->region
[i
].base
= end
;
238 rgn
->region
[i
].size
-= size
;
242 /* Check to see if the region is matching at the end */
244 rgn
->region
[i
].size
-= size
;
249 * We need to split the entry - adjust the current one to the
250 * beginging of the hole and add the region after hole.
252 rgn
->region
[i
].size
= base
- rgn
->region
[i
].base
;
253 return lmb_add_region(rgn
, end
, rgnend
- end
);
256 long __init
lmb_reserve(u64 base
, u64 size
)
258 struct lmb_region
*_rgn
= &lmb
.reserved
;
262 return lmb_add_region(_rgn
, base
, size
);
265 long __init
lmb_overlaps_region(struct lmb_region
*rgn
, u64 base
, u64 size
)
269 for (i
= 0; i
< rgn
->cnt
; i
++) {
270 u64 rgnbase
= rgn
->region
[i
].base
;
271 u64 rgnsize
= rgn
->region
[i
].size
;
272 if (lmb_addrs_overlap(base
, size
, rgnbase
, rgnsize
))
276 return (i
< rgn
->cnt
) ? i
: -1;
279 static u64
lmb_align_down(u64 addr
, u64 size
)
281 return addr
& ~(size
- 1);
284 static u64
lmb_align_up(u64 addr
, u64 size
)
286 return (addr
+ (size
- 1)) & ~(size
- 1);
289 static u64 __init
lmb_alloc_nid_unreserved(u64 start
, u64 end
,
295 base
= lmb_align_down((end
- size
), align
);
296 while (start
<= base
) {
297 j
= lmb_overlaps_region(&lmb
.reserved
, base
, size
);
299 /* this area isn't reserved, take it */
300 if (lmb_add_region(&lmb
.reserved
, base
, size
) < 0)
304 res_base
= lmb
.reserved
.region
[j
].base
;
307 base
= lmb_align_down(res_base
- size
, align
);
313 static u64 __init
lmb_alloc_nid_region(struct lmb_property
*mp
,
314 u64 (*nid_range
)(u64
, u64
, int *),
315 u64 size
, u64 align
, int nid
)
320 end
= start
+ mp
->size
;
322 start
= lmb_align_up(start
, align
);
323 while (start
< end
) {
327 this_end
= nid_range(start
, end
, &this_nid
);
328 if (this_nid
== nid
) {
329 u64 ret
= lmb_alloc_nid_unreserved(start
, this_end
,
340 u64 __init
lmb_alloc_nid(u64 size
, u64 align
, int nid
,
341 u64 (*nid_range
)(u64 start
, u64 end
, int *nid
))
343 struct lmb_region
*mem
= &lmb
.memory
;
348 size
= lmb_align_up(size
, align
);
350 for (i
= 0; i
< mem
->cnt
; i
++) {
351 u64 ret
= lmb_alloc_nid_region(&mem
->region
[i
],
358 return lmb_alloc(size
, align
);
361 u64 __init
lmb_alloc(u64 size
, u64 align
)
363 return lmb_alloc_base(size
, align
, LMB_ALLOC_ANYWHERE
);
366 u64 __init
lmb_alloc_base(u64 size
, u64 align
, u64 max_addr
)
370 alloc
= __lmb_alloc_base(size
, align
, max_addr
);
373 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
374 (unsigned long long) size
, (unsigned long long) max_addr
);
379 u64 __init
__lmb_alloc_base(u64 size
, u64 align
, u64 max_addr
)
387 size
= lmb_align_up(size
, align
);
389 /* On some platforms, make sure we allocate lowmem */
390 /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
391 if (max_addr
== LMB_ALLOC_ANYWHERE
)
392 max_addr
= LMB_REAL_LIMIT
;
394 for (i
= lmb
.memory
.cnt
- 1; i
>= 0; i
--) {
395 u64 lmbbase
= lmb
.memory
.region
[i
].base
;
396 u64 lmbsize
= lmb
.memory
.region
[i
].size
;
400 if (max_addr
== LMB_ALLOC_ANYWHERE
)
401 base
= lmb_align_down(lmbbase
+ lmbsize
- size
, align
);
402 else if (lmbbase
< max_addr
) {
403 base
= min(lmbbase
+ lmbsize
, max_addr
);
404 base
= lmb_align_down(base
- size
, align
);
408 while (base
&& lmbbase
<= base
) {
409 j
= lmb_overlaps_region(&lmb
.reserved
, base
, size
);
411 /* this area isn't reserved, take it */
412 if (lmb_add_region(&lmb
.reserved
, base
, size
) < 0)
416 res_base
= lmb
.reserved
.region
[j
].base
;
419 base
= lmb_align_down(res_base
- size
, align
);
425 /* You must call lmb_analyze() before this. */
426 u64 __init
lmb_phys_mem_size(void)
428 return lmb
.memory
.size
;
431 u64 __init
lmb_end_of_DRAM(void)
433 int idx
= lmb
.memory
.cnt
- 1;
435 return (lmb
.memory
.region
[idx
].base
+ lmb
.memory
.region
[idx
].size
);
438 /* You must call lmb_analyze() after this. */
439 void __init
lmb_enforce_memory_limit(u64 memory_limit
)
443 struct lmb_property
*p
;
448 /* Truncate the lmb regions to satisfy the memory limit. */
449 limit
= memory_limit
;
450 for (i
= 0; i
< lmb
.memory
.cnt
; i
++) {
451 if (limit
> lmb
.memory
.region
[i
].size
) {
452 limit
-= lmb
.memory
.region
[i
].size
;
456 lmb
.memory
.region
[i
].size
= limit
;
457 lmb
.memory
.cnt
= i
+ 1;
461 if (lmb
.memory
.region
[0].size
< lmb
.rmo_size
)
462 lmb
.rmo_size
= lmb
.memory
.region
[0].size
;
464 /* And truncate any reserves above the limit also. */
465 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++) {
466 p
= &lmb
.reserved
.region
[i
];
468 if (p
->base
> memory_limit
)
470 else if ((p
->base
+ p
->size
) > memory_limit
)
471 p
->size
= memory_limit
- p
->base
;
474 lmb_remove_region(&lmb
.reserved
, i
);
480 int __init
lmb_is_reserved(u64 addr
)
484 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++) {
485 u64 upper
= lmb
.reserved
.region
[i
].base
+
486 lmb
.reserved
.region
[i
].size
- 1;
487 if ((addr
>= lmb
.reserved
.region
[i
].base
) && (addr
<= upper
))
494 * Given a <base, len>, find which memory regions belong to this range.
495 * Adjust the request and return a contiguous chunk.
497 int lmb_find(struct lmb_property
*res
)
503 rend
= rstart
+ res
->size
- 1;
505 for (i
= 0; i
< lmb
.memory
.cnt
; i
++) {
506 u64 start
= lmb
.memory
.region
[i
].base
;
507 u64 end
= start
+ lmb
.memory
.region
[i
].size
- 1;
512 if ((end
>= rstart
) && (start
< rend
)) {
513 /* adjust the request */
519 res
->size
= rend
- rstart
+ 1;