KVM: ia64: Generate offset values for assembly code use
[linux-2.6/verdex.git] / lib / lmb.c
blob207147ab25e46a90cfa0d17bd703f352c56e2526
1 /*
2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/bitops.h>
16 #include <linux/lmb.h>
18 #define LMB_ALLOC_ANYWHERE 0
20 struct lmb lmb;
22 void lmb_dump_all(void)
24 #ifdef DEBUG
25 unsigned long i;
27 pr_debug("lmb_dump_all:\n");
28 pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
29 pr_debug(" memory.size = 0x%llx\n",
30 (unsigned long long)lmb.memory.size);
31 for (i=0; i < lmb.memory.cnt ;i++) {
32 pr_debug(" memory.region[0x%x].base = 0x%llx\n",
33 i, (unsigned long long)lmb.memory.region[i].base);
34 pr_debug(" .size = 0x%llx\n",
35 (unsigned long long)lmb.memory.region[i].size);
38 pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
39 pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size);
40 for (i=0; i < lmb.reserved.cnt ;i++) {
41 pr_debug(" reserved.region[0x%x].base = 0x%llx\n",
42 i, (unsigned long long)lmb.reserved.region[i].base);
43 pr_debug(" .size = 0x%llx\n",
44 (unsigned long long)lmb.reserved.region[i].size);
46 #endif /* DEBUG */
49 static unsigned long __init lmb_addrs_overlap(u64 base1, u64 size1,
50 u64 base2, u64 size2)
52 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
55 static long __init lmb_addrs_adjacent(u64 base1, u64 size1,
56 u64 base2, u64 size2)
58 if (base2 == base1 + size1)
59 return 1;
60 else if (base1 == base2 + size2)
61 return -1;
63 return 0;
66 static long __init lmb_regions_adjacent(struct lmb_region *rgn,
67 unsigned long r1, unsigned long r2)
69 u64 base1 = rgn->region[r1].base;
70 u64 size1 = rgn->region[r1].size;
71 u64 base2 = rgn->region[r2].base;
72 u64 size2 = rgn->region[r2].size;
74 return lmb_addrs_adjacent(base1, size1, base2, size2);
77 static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
79 unsigned long i;
81 for (i = r; i < rgn->cnt - 1; i++) {
82 rgn->region[i].base = rgn->region[i + 1].base;
83 rgn->region[i].size = rgn->region[i + 1].size;
85 rgn->cnt--;
88 /* Assumption: base addr of region 1 < base addr of region 2 */
89 static void __init lmb_coalesce_regions(struct lmb_region *rgn,
90 unsigned long r1, unsigned long r2)
92 rgn->region[r1].size += rgn->region[r2].size;
93 lmb_remove_region(rgn, r2);
96 void __init lmb_init(void)
98 /* Create a dummy zero size LMB which will get coalesced away later.
99 * This simplifies the lmb_add() code below...
101 lmb.memory.region[0].base = 0;
102 lmb.memory.region[0].size = 0;
103 lmb.memory.cnt = 1;
105 /* Ditto. */
106 lmb.reserved.region[0].base = 0;
107 lmb.reserved.region[0].size = 0;
108 lmb.reserved.cnt = 1;
111 void __init lmb_analyze(void)
113 int i;
115 lmb.memory.size = 0;
117 for (i = 0; i < lmb.memory.cnt; i++)
118 lmb.memory.size += lmb.memory.region[i].size;
121 static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
123 unsigned long coalesced = 0;
124 long adjacent, i;
126 if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
127 rgn->region[0].base = base;
128 rgn->region[0].size = size;
129 return 0;
132 /* First try and coalesce this LMB with another. */
133 for (i = 0; i < rgn->cnt; i++) {
134 u64 rgnbase = rgn->region[i].base;
135 u64 rgnsize = rgn->region[i].size;
137 if ((rgnbase == base) && (rgnsize == size))
138 /* Already have this region, so we're done */
139 return 0;
141 adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
142 if (adjacent > 0) {
143 rgn->region[i].base -= size;
144 rgn->region[i].size += size;
145 coalesced++;
146 break;
147 } else if (adjacent < 0) {
148 rgn->region[i].size += size;
149 coalesced++;
150 break;
154 if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) {
155 lmb_coalesce_regions(rgn, i, i+1);
156 coalesced++;
159 if (coalesced)
160 return coalesced;
161 if (rgn->cnt >= MAX_LMB_REGIONS)
162 return -1;
164 /* Couldn't coalesce the LMB, so add it to the sorted table. */
165 for (i = rgn->cnt - 1; i >= 0; i--) {
166 if (base < rgn->region[i].base) {
167 rgn->region[i+1].base = rgn->region[i].base;
168 rgn->region[i+1].size = rgn->region[i].size;
169 } else {
170 rgn->region[i+1].base = base;
171 rgn->region[i+1].size = size;
172 break;
176 if (base < rgn->region[0].base) {
177 rgn->region[0].base = base;
178 rgn->region[0].size = size;
180 rgn->cnt++;
182 return 0;
185 long __init lmb_add(u64 base, u64 size)
187 struct lmb_region *_rgn = &lmb.memory;
189 /* On pSeries LPAR systems, the first LMB is our RMO region. */
190 if (base == 0)
191 lmb.rmo_size = size;
193 return lmb_add_region(_rgn, base, size);
197 long __init lmb_reserve(u64 base, u64 size)
199 struct lmb_region *_rgn = &lmb.reserved;
201 BUG_ON(0 == size);
203 return lmb_add_region(_rgn, base, size);
206 long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
208 unsigned long i;
210 for (i = 0; i < rgn->cnt; i++) {
211 u64 rgnbase = rgn->region[i].base;
212 u64 rgnsize = rgn->region[i].size;
213 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
214 break;
217 return (i < rgn->cnt) ? i : -1;
220 static u64 lmb_align_down(u64 addr, u64 size)
222 return addr & ~(size - 1);
225 static u64 lmb_align_up(u64 addr, u64 size)
227 return (addr + (size - 1)) & ~(size - 1);
230 static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
231 u64 size, u64 align)
233 u64 base, res_base;
234 long j;
236 base = lmb_align_down((end - size), align);
237 while (start <= base) {
238 j = lmb_overlaps_region(&lmb.reserved, base, size);
239 if (j < 0) {
240 /* this area isn't reserved, take it */
241 if (lmb_add_region(&lmb.reserved, base,
242 lmb_align_up(size, align)) < 0)
243 base = ~(u64)0;
244 return base;
246 res_base = lmb.reserved.region[j].base;
247 if (res_base < size)
248 break;
249 base = lmb_align_down(res_base - size, align);
252 return ~(u64)0;
255 static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
256 u64 (*nid_range)(u64, u64, int *),
257 u64 size, u64 align, int nid)
259 u64 start, end;
261 start = mp->base;
262 end = start + mp->size;
264 start = lmb_align_up(start, align);
265 while (start < end) {
266 u64 this_end;
267 int this_nid;
269 this_end = nid_range(start, end, &this_nid);
270 if (this_nid == nid) {
271 u64 ret = lmb_alloc_nid_unreserved(start, this_end,
272 size, align);
273 if (ret != ~(u64)0)
274 return ret;
276 start = this_end;
279 return ~(u64)0;
282 u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
283 u64 (*nid_range)(u64 start, u64 end, int *nid))
285 struct lmb_region *mem = &lmb.memory;
286 int i;
288 for (i = 0; i < mem->cnt; i++) {
289 u64 ret = lmb_alloc_nid_region(&mem->region[i],
290 nid_range,
291 size, align, nid);
292 if (ret != ~(u64)0)
293 return ret;
296 return lmb_alloc(size, align);
299 u64 __init lmb_alloc(u64 size, u64 align)
301 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
304 u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
306 u64 alloc;
308 alloc = __lmb_alloc_base(size, align, max_addr);
310 if (alloc == 0)
311 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
312 (unsigned long long) size, (unsigned long long) max_addr);
314 return alloc;
317 u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
319 long i, j;
320 u64 base = 0;
321 u64 res_base;
323 BUG_ON(0 == size);
325 /* On some platforms, make sure we allocate lowmem */
326 /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
327 if (max_addr == LMB_ALLOC_ANYWHERE)
328 max_addr = LMB_REAL_LIMIT;
330 for (i = lmb.memory.cnt - 1; i >= 0; i--) {
331 u64 lmbbase = lmb.memory.region[i].base;
332 u64 lmbsize = lmb.memory.region[i].size;
334 if (lmbsize < size)
335 continue;
336 if (max_addr == LMB_ALLOC_ANYWHERE)
337 base = lmb_align_down(lmbbase + lmbsize - size, align);
338 else if (lmbbase < max_addr) {
339 base = min(lmbbase + lmbsize, max_addr);
340 base = lmb_align_down(base - size, align);
341 } else
342 continue;
344 while (base && lmbbase <= base) {
345 j = lmb_overlaps_region(&lmb.reserved, base, size);
346 if (j < 0) {
347 /* this area isn't reserved, take it */
348 if (lmb_add_region(&lmb.reserved, base,
349 lmb_align_up(size, align)) < 0)
350 return 0;
351 return base;
353 res_base = lmb.reserved.region[j].base;
354 if (res_base < size)
355 break;
356 base = lmb_align_down(res_base - size, align);
359 return 0;
362 /* You must call lmb_analyze() before this. */
363 u64 __init lmb_phys_mem_size(void)
365 return lmb.memory.size;
368 u64 __init lmb_end_of_DRAM(void)
370 int idx = lmb.memory.cnt - 1;
372 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
375 /* You must call lmb_analyze() after this. */
376 void __init lmb_enforce_memory_limit(u64 memory_limit)
378 unsigned long i;
379 u64 limit;
380 struct lmb_property *p;
382 if (!memory_limit)
383 return;
385 /* Truncate the lmb regions to satisfy the memory limit. */
386 limit = memory_limit;
387 for (i = 0; i < lmb.memory.cnt; i++) {
388 if (limit > lmb.memory.region[i].size) {
389 limit -= lmb.memory.region[i].size;
390 continue;
393 lmb.memory.region[i].size = limit;
394 lmb.memory.cnt = i + 1;
395 break;
398 if (lmb.memory.region[0].size < lmb.rmo_size)
399 lmb.rmo_size = lmb.memory.region[0].size;
401 /* And truncate any reserves above the limit also. */
402 for (i = 0; i < lmb.reserved.cnt; i++) {
403 p = &lmb.reserved.region[i];
405 if (p->base > memory_limit)
406 p->size = 0;
407 else if ((p->base + p->size) > memory_limit)
408 p->size = memory_limit - p->base;
410 if (p->size == 0) {
411 lmb_remove_region(&lmb.reserved, i);
412 i--;
417 int __init lmb_is_reserved(u64 addr)
419 int i;
421 for (i = 0; i < lmb.reserved.cnt; i++) {
422 u64 upper = lmb.reserved.region[i].base +
423 lmb.reserved.region[i].size - 1;
424 if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
425 return 1;
427 return 0;