2 * Procedures for interfacing to Open Firmware.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <asm/types.h>
21 #include <asm/abs_addr.h>
27 void lmb_dump_all(void)
32 udbg_printf("lmb_dump_all:\n");
33 udbg_printf(" memory.cnt = 0x%lx\n",
35 udbg_printf(" memory.size = 0x%lx\n",
37 for (i
=0; i
< lmb
.memory
.cnt
;i
++) {
38 udbg_printf(" memory.region[0x%x].base = 0x%lx\n",
39 i
, lmb
.memory
.region
[i
].base
);
40 udbg_printf(" .physbase = 0x%lx\n",
41 lmb
.memory
.region
[i
].physbase
);
42 udbg_printf(" .size = 0x%lx\n",
43 lmb
.memory
.region
[i
].size
);
46 udbg_printf("\n reserved.cnt = 0x%lx\n",
48 udbg_printf(" reserved.size = 0x%lx\n",
50 for (i
=0; i
< lmb
.reserved
.cnt
;i
++) {
51 udbg_printf(" reserved.region[0x%x].base = 0x%lx\n",
52 i
, lmb
.reserved
.region
[i
].base
);
53 udbg_printf(" .physbase = 0x%lx\n",
54 lmb
.reserved
.region
[i
].physbase
);
55 udbg_printf(" .size = 0x%lx\n",
56 lmb
.reserved
.region
[i
].size
);
61 static unsigned long __init
62 lmb_addrs_overlap(unsigned long base1
, unsigned long size1
,
63 unsigned long base2
, unsigned long size2
)
65 return ((base1
< (base2
+size2
)) && (base2
< (base1
+size1
)));
69 lmb_addrs_adjacent(unsigned long base1
, unsigned long size1
,
70 unsigned long base2
, unsigned long size2
)
72 if (base2
== base1
+ size1
)
74 else if (base1
== base2
+ size2
)
81 lmb_regions_adjacent(struct lmb_region
*rgn
, unsigned long r1
, unsigned long r2
)
83 unsigned long base1
= rgn
->region
[r1
].base
;
84 unsigned long size1
= rgn
->region
[r1
].size
;
85 unsigned long base2
= rgn
->region
[r2
].base
;
86 unsigned long size2
= rgn
->region
[r2
].size
;
88 return lmb_addrs_adjacent(base1
, size1
, base2
, size2
);
91 /* Assumption: base addr of region 1 < base addr of region 2 */
93 lmb_coalesce_regions(struct lmb_region
*rgn
, unsigned long r1
, unsigned long r2
)
97 rgn
->region
[r1
].size
+= rgn
->region
[r2
].size
;
98 for (i
=r2
; i
< rgn
->cnt
-1; i
++) {
99 rgn
->region
[i
].base
= rgn
->region
[i
+1].base
;
100 rgn
->region
[i
].physbase
= rgn
->region
[i
+1].physbase
;
101 rgn
->region
[i
].size
= rgn
->region
[i
+1].size
;
106 /* This routine called with relocation disabled. */
110 /* Create a dummy zero size LMB which will get coalesced away later.
111 * This simplifies the lmb_add() code below...
113 lmb
.memory
.region
[0].base
= 0;
114 lmb
.memory
.region
[0].size
= 0;
118 lmb
.reserved
.region
[0].base
= 0;
119 lmb
.reserved
.region
[0].size
= 0;
120 lmb
.reserved
.cnt
= 1;
123 /* This routine called with relocation disabled. */
128 unsigned long mem_size
= 0;
129 unsigned long size_mask
= 0;
130 #ifdef CONFIG_MSCHUNKS
131 unsigned long physbase
= 0;
134 for (i
=0; i
< lmb
.memory
.cnt
; i
++) {
135 unsigned long lmb_size
;
137 lmb_size
= lmb
.memory
.region
[i
].size
;
139 #ifdef CONFIG_MSCHUNKS
140 lmb
.memory
.region
[i
].physbase
= physbase
;
141 physbase
+= lmb_size
;
143 lmb
.memory
.region
[i
].physbase
= lmb
.memory
.region
[i
].base
;
145 mem_size
+= lmb_size
;
146 size_mask
|= lmb_size
;
149 lmb
.memory
.size
= mem_size
;
152 /* This routine called with relocation disabled. */
154 lmb_add_region(struct lmb_region
*rgn
, unsigned long base
, unsigned long size
)
156 unsigned long i
, coalesced
= 0;
159 /* First try and coalesce this LMB with another. */
160 for (i
=0; i
< rgn
->cnt
; i
++) {
161 unsigned long rgnbase
= rgn
->region
[i
].base
;
162 unsigned long rgnsize
= rgn
->region
[i
].size
;
164 adjacent
= lmb_addrs_adjacent(base
,size
,rgnbase
,rgnsize
);
165 if ( adjacent
> 0 ) {
166 rgn
->region
[i
].base
-= size
;
167 rgn
->region
[i
].physbase
-= size
;
168 rgn
->region
[i
].size
+= size
;
172 else if ( adjacent
< 0 ) {
173 rgn
->region
[i
].size
+= size
;
179 if ((i
< rgn
->cnt
-1) && lmb_regions_adjacent(rgn
, i
, i
+1) ) {
180 lmb_coalesce_regions(rgn
, i
, i
+1);
186 } else if ( rgn
->cnt
>= MAX_LMB_REGIONS
) {
190 /* Couldn't coalesce the LMB, so add it to the sorted table. */
191 for (i
=rgn
->cnt
-1; i
>= 0; i
--) {
192 if (base
< rgn
->region
[i
].base
) {
193 rgn
->region
[i
+1].base
= rgn
->region
[i
].base
;
194 rgn
->region
[i
+1].physbase
= rgn
->region
[i
].physbase
;
195 rgn
->region
[i
+1].size
= rgn
->region
[i
].size
;
197 rgn
->region
[i
+1].base
= base
;
198 rgn
->region
[i
+1].physbase
= lmb_abs_to_phys(base
);
199 rgn
->region
[i
+1].size
= size
;
208 /* This routine called with relocation disabled. */
210 lmb_add(unsigned long base
, unsigned long size
)
212 struct lmb_region
*_rgn
= &(lmb
.memory
);
214 /* On pSeries LPAR systems, the first LMB is our RMO region. */
218 return lmb_add_region(_rgn
, base
, size
);
223 lmb_reserve(unsigned long base
, unsigned long size
)
225 struct lmb_region
*_rgn
= &(lmb
.reserved
);
227 return lmb_add_region(_rgn
, base
, size
);
231 lmb_overlaps_region(struct lmb_region
*rgn
, unsigned long base
, unsigned long size
)
235 for (i
=0; i
< rgn
->cnt
; i
++) {
236 unsigned long rgnbase
= rgn
->region
[i
].base
;
237 unsigned long rgnsize
= rgn
->region
[i
].size
;
238 if ( lmb_addrs_overlap(base
,size
,rgnbase
,rgnsize
) ) {
243 return (i
< rgn
->cnt
) ? i
: -1;
247 lmb_alloc(unsigned long size
, unsigned long align
)
249 return lmb_alloc_base(size
, align
, LMB_ALLOC_ANYWHERE
);
253 lmb_alloc_base(unsigned long size
, unsigned long align
, unsigned long max_addr
)
256 unsigned long base
= 0;
258 for (i
=lmb
.memory
.cnt
-1; i
>= 0; i
--) {
259 unsigned long lmbbase
= lmb
.memory
.region
[i
].base
;
260 unsigned long lmbsize
= lmb
.memory
.region
[i
].size
;
262 if ( max_addr
== LMB_ALLOC_ANYWHERE
)
263 base
= _ALIGN_DOWN(lmbbase
+lmbsize
-size
, align
);
264 else if ( lmbbase
< max_addr
)
265 base
= _ALIGN_DOWN(min(lmbbase
+lmbsize
,max_addr
)-size
, align
);
269 while ( (lmbbase
<= base
) &&
270 ((j
= lmb_overlaps_region(&lmb
.reserved
,base
,size
)) >= 0) ) {
271 base
= _ALIGN_DOWN(lmb
.reserved
.region
[j
].base
-size
, align
);
274 if ( (base
!= 0) && (lmbbase
<= base
) )
281 lmb_add_region(&lmb
.reserved
, base
, size
);
287 lmb_phys_mem_size(void)
289 #ifdef CONFIG_MSCHUNKS
290 return lmb
.memory
.size
;
292 unsigned long total
= 0;
295 /* add all physical memory to the bootmem map */
296 for (i
=0; i
< lmb
.memory
.cnt
; i
++)
297 total
+= lmb
.memory
.region
[i
].size
;
299 #endif /* CONFIG_MSCHUNKS */
303 lmb_end_of_DRAM(void)
305 int idx
= lmb
.memory
.cnt
- 1;
307 #ifdef CONFIG_MSCHUNKS
308 return (lmb
.memory
.region
[idx
].physbase
+ lmb
.memory
.region
[idx
].size
);
310 return (lmb
.memory
.region
[idx
].base
+ lmb
.memory
.region
[idx
].size
);
311 #endif /* CONFIG_MSCHUNKS */
317 * Truncate the lmb list to memory_limit if it's set
318 * You must call lmb_analyze() after this.
320 void __init
lmb_enforce_memory_limit(void)
322 extern unsigned long memory_limit
;
323 unsigned long i
, limit
;
328 limit
= memory_limit
;
329 for (i
= 0; i
< lmb
.memory
.cnt
; i
++) {
330 if (limit
> lmb
.memory
.region
[i
].size
) {
331 limit
-= lmb
.memory
.region
[i
].size
;
335 lmb
.memory
.region
[i
].size
= limit
;
336 lmb
.memory
.cnt
= i
+ 1;