2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/bitops.h>
16 #include <linux/lmb.h>
21 #define DBG(fmt...) LMB_DBG(fmt)
26 #define LMB_ALLOC_ANYWHERE 0
30 void lmb_dump_all(void)
35 DBG("lmb_dump_all:\n");
36 DBG(" memory.cnt = 0x%lx\n", lmb
.memory
.cnt
);
37 DBG(" memory.size = 0x%llx\n",
38 (unsigned long long)lmb
.memory
.size
);
39 for (i
=0; i
< lmb
.memory
.cnt
;i
++) {
40 DBG(" memory.region[0x%x].base = 0x%llx\n",
41 i
, (unsigned long long)lmb
.memory
.region
[i
].base
);
42 DBG(" .size = 0x%llx\n",
43 (unsigned long long)lmb
.memory
.region
[i
].size
);
46 DBG("\n reserved.cnt = 0x%lx\n", lmb
.reserved
.cnt
);
47 DBG(" reserved.size = 0x%lx\n", lmb
.reserved
.size
);
48 for (i
=0; i
< lmb
.reserved
.cnt
;i
++) {
49 DBG(" reserved.region[0x%x].base = 0x%llx\n",
50 i
, (unsigned long long)lmb
.reserved
.region
[i
].base
);
51 DBG(" .size = 0x%llx\n",
52 (unsigned long long)lmb
.reserved
.region
[i
].size
);
57 static unsigned long __init
lmb_addrs_overlap(u64 base1
,
58 u64 size1
, u64 base2
, u64 size2
)
60 return ((base1
< (base2
+size2
)) && (base2
< (base1
+size1
)));
63 static long __init
lmb_addrs_adjacent(u64 base1
, u64 size1
,
66 if (base2
== base1
+ size1
)
68 else if (base1
== base2
+ size2
)
74 static long __init
lmb_regions_adjacent(struct lmb_region
*rgn
,
75 unsigned long r1
, unsigned long r2
)
77 u64 base1
= rgn
->region
[r1
].base
;
78 u64 size1
= rgn
->region
[r1
].size
;
79 u64 base2
= rgn
->region
[r2
].base
;
80 u64 size2
= rgn
->region
[r2
].size
;
82 return lmb_addrs_adjacent(base1
, size1
, base2
, size2
);
85 static void __init
lmb_remove_region(struct lmb_region
*rgn
, unsigned long r
)
89 for (i
= r
; i
< rgn
->cnt
- 1; i
++) {
90 rgn
->region
[i
].base
= rgn
->region
[i
+ 1].base
;
91 rgn
->region
[i
].size
= rgn
->region
[i
+ 1].size
;
96 /* Assumption: base addr of region 1 < base addr of region 2 */
97 static void __init
lmb_coalesce_regions(struct lmb_region
*rgn
,
98 unsigned long r1
, unsigned long r2
)
100 rgn
->region
[r1
].size
+= rgn
->region
[r2
].size
;
101 lmb_remove_region(rgn
, r2
);
104 /* This routine called with relocation disabled. */
105 void __init
lmb_init(void)
107 /* Create a dummy zero size LMB which will get coalesced away later.
108 * This simplifies the lmb_add() code below...
110 lmb
.memory
.region
[0].base
= 0;
111 lmb
.memory
.region
[0].size
= 0;
115 lmb
.reserved
.region
[0].base
= 0;
116 lmb
.reserved
.region
[0].size
= 0;
117 lmb
.reserved
.cnt
= 1;
120 /* This routine may be called with relocation disabled. */
121 void __init
lmb_analyze(void)
127 for (i
= 0; i
< lmb
.memory
.cnt
; i
++)
128 lmb
.memory
.size
+= lmb
.memory
.region
[i
].size
;
131 /* This routine called with relocation disabled. */
132 static long __init
lmb_add_region(struct lmb_region
*rgn
, u64 base
, u64 size
)
134 unsigned long coalesced
= 0;
137 if ((rgn
->cnt
== 1) && (rgn
->region
[0].size
== 0)) {
138 rgn
->region
[0].base
= base
;
139 rgn
->region
[0].size
= size
;
143 /* First try and coalesce this LMB with another. */
144 for (i
=0; i
< rgn
->cnt
; i
++) {
145 u64 rgnbase
= rgn
->region
[i
].base
;
146 u64 rgnsize
= rgn
->region
[i
].size
;
148 if ((rgnbase
== base
) && (rgnsize
== size
))
149 /* Already have this region, so we're done */
152 adjacent
= lmb_addrs_adjacent(base
,size
,rgnbase
,rgnsize
);
153 if ( adjacent
> 0 ) {
154 rgn
->region
[i
].base
-= size
;
155 rgn
->region
[i
].size
+= size
;
159 else if ( adjacent
< 0 ) {
160 rgn
->region
[i
].size
+= size
;
166 if ((i
< rgn
->cnt
-1) && lmb_regions_adjacent(rgn
, i
, i
+1) ) {
167 lmb_coalesce_regions(rgn
, i
, i
+1);
173 if (rgn
->cnt
>= MAX_LMB_REGIONS
)
176 /* Couldn't coalesce the LMB, so add it to the sorted table. */
177 for (i
= rgn
->cnt
-1; i
>= 0; i
--) {
178 if (base
< rgn
->region
[i
].base
) {
179 rgn
->region
[i
+1].base
= rgn
->region
[i
].base
;
180 rgn
->region
[i
+1].size
= rgn
->region
[i
].size
;
182 rgn
->region
[i
+1].base
= base
;
183 rgn
->region
[i
+1].size
= size
;
188 if (base
< rgn
->region
[0].base
) {
189 rgn
->region
[0].base
= base
;
190 rgn
->region
[0].size
= size
;
197 /* This routine may be called with relocation disabled. */
198 long __init
lmb_add(u64 base
, u64 size
)
200 struct lmb_region
*_rgn
= &(lmb
.memory
);
202 /* On pSeries LPAR systems, the first LMB is our RMO region. */
206 return lmb_add_region(_rgn
, base
, size
);
210 long __init
lmb_reserve(u64 base
, u64 size
)
212 struct lmb_region
*_rgn
= &(lmb
.reserved
);
216 return lmb_add_region(_rgn
, base
, size
);
219 long __init
lmb_overlaps_region(struct lmb_region
*rgn
, u64 base
,
224 for (i
=0; i
< rgn
->cnt
; i
++) {
225 u64 rgnbase
= rgn
->region
[i
].base
;
226 u64 rgnsize
= rgn
->region
[i
].size
;
227 if ( lmb_addrs_overlap(base
,size
,rgnbase
,rgnsize
) ) {
232 return (i
< rgn
->cnt
) ? i
: -1;
235 u64 __init
lmb_alloc(u64 size
, u64 align
)
237 return lmb_alloc_base(size
, align
, LMB_ALLOC_ANYWHERE
);
240 u64 __init
lmb_alloc_base(u64 size
, u64 align
, u64 max_addr
)
244 alloc
= __lmb_alloc_base(size
, align
, max_addr
);
247 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
248 (unsigned long long) size
, (unsigned long long) max_addr
);
253 static u64
lmb_align_down(u64 addr
, u64 size
)
255 return addr
& ~(size
- 1);
258 static u64
lmb_align_up(u64 addr
, u64 size
)
260 return (addr
+ (size
- 1)) & ~(size
- 1);
263 u64 __init
__lmb_alloc_base(u64 size
, u64 align
, u64 max_addr
)
270 /* On some platforms, make sure we allocate lowmem */
271 if (max_addr
== LMB_ALLOC_ANYWHERE
)
272 max_addr
= LMB_REAL_LIMIT
;
274 for (i
= lmb
.memory
.cnt
-1; i
>= 0; i
--) {
275 u64 lmbbase
= lmb
.memory
.region
[i
].base
;
276 u64 lmbsize
= lmb
.memory
.region
[i
].size
;
278 if (max_addr
== LMB_ALLOC_ANYWHERE
)
279 base
= lmb_align_down(lmbbase
+ lmbsize
- size
, align
);
280 else if (lmbbase
< max_addr
) {
281 base
= min(lmbbase
+ lmbsize
, max_addr
);
282 base
= lmb_align_down(base
- size
, align
);
286 while ((lmbbase
<= base
) &&
287 ((j
= lmb_overlaps_region(&lmb
.reserved
, base
, size
)) >= 0) )
288 base
= lmb_align_down(lmb
.reserved
.region
[j
].base
- size
,
291 if ((base
!= 0) && (lmbbase
<= base
))
298 if (lmb_add_region(&lmb
.reserved
, base
, lmb_align_up(size
, align
)) < 0)
304 /* You must call lmb_analyze() before this. */
305 u64 __init
lmb_phys_mem_size(void)
307 return lmb
.memory
.size
;
310 u64 __init
lmb_end_of_DRAM(void)
312 int idx
= lmb
.memory
.cnt
- 1;
314 return (lmb
.memory
.region
[idx
].base
+ lmb
.memory
.region
[idx
].size
);
317 /* You must call lmb_analyze() after this. */
318 void __init
lmb_enforce_memory_limit(u64 memory_limit
)
322 struct lmb_property
*p
;
327 /* Truncate the lmb regions to satisfy the memory limit. */
328 limit
= memory_limit
;
329 for (i
= 0; i
< lmb
.memory
.cnt
; i
++) {
330 if (limit
> lmb
.memory
.region
[i
].size
) {
331 limit
-= lmb
.memory
.region
[i
].size
;
335 lmb
.memory
.region
[i
].size
= limit
;
336 lmb
.memory
.cnt
= i
+ 1;
340 if (lmb
.memory
.region
[0].size
< lmb
.rmo_size
)
341 lmb
.rmo_size
= lmb
.memory
.region
[0].size
;
343 /* And truncate any reserves above the limit also. */
344 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++) {
345 p
= &lmb
.reserved
.region
[i
];
347 if (p
->base
> memory_limit
)
349 else if ((p
->base
+ p
->size
) > memory_limit
)
350 p
->size
= memory_limit
- p
->base
;
353 lmb_remove_region(&lmb
.reserved
, i
);
359 int __init
lmb_is_reserved(u64 addr
)
363 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++) {
364 u64 upper
= lmb
.reserved
.region
[i
].base
+
365 lmb
.reserved
.region
[i
].size
- 1;
366 if ((addr
>= lmb
.reserved
.region
[i
].base
) && (addr
<= upper
))