[POWERPC] Use it_offset not pte_offset in cell IOMMU code
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / mm / bootmem.c
blobf6ff4337b4242e911ab4e4faadb160b76a445b50
1 /*
2 * linux/mm/bootmem.c
4 * Copyright (C) 1999 Ingo Molnar
5 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
7 * simple boot-time physical memory area allocator and
8 * free memory collector. It's used to deal with reserved
9 * system memory and memory holes as well.
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/bootmem.h>
14 #include <linux/module.h>
16 #include <asm/bug.h>
17 #include <asm/io.h>
18 #include <asm/processor.h>
20 #include "internal.h"
23 * Access to this subsystem has to be serialized externally. (this is
24 * true for the boot process anyway)
26 unsigned long max_low_pfn;
27 unsigned long min_low_pfn;
28 unsigned long max_pfn;
30 static LIST_HEAD(bdata_list);
31 #ifdef CONFIG_CRASH_DUMP
33 * If we have booted due to a crash, max_pfn will be a very low value. We need
34 * to know the amount of memory that the previous kernel used.
36 unsigned long saved_max_pfn;
37 #endif
39 /* return the number of _pages_ that will be allocated for the boot bitmap */
40 unsigned long __init bootmem_bootmap_pages(unsigned long pages)
42 unsigned long mapsize;
44 mapsize = (pages+7)/8;
45 mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
46 mapsize >>= PAGE_SHIFT;
48 return mapsize;
52 * link bdata in order
54 static void __init link_bootmem(bootmem_data_t *bdata)
56 bootmem_data_t *ent;
58 if (list_empty(&bdata_list)) {
59 list_add(&bdata->list, &bdata_list);
60 return;
62 /* insert in order */
63 list_for_each_entry(ent, &bdata_list, list) {
64 if (bdata->node_boot_start < ent->node_boot_start) {
65 list_add_tail(&bdata->list, &ent->list);
66 return;
69 list_add_tail(&bdata->list, &bdata_list);
73 * Given an initialised bdata, it returns the size of the boot bitmap
75 static unsigned long __init get_mapsize(bootmem_data_t *bdata)
77 unsigned long mapsize;
78 unsigned long start = PFN_DOWN(bdata->node_boot_start);
79 unsigned long end = bdata->node_low_pfn;
81 mapsize = ((end - start) + 7) / 8;
82 return ALIGN(mapsize, sizeof(long));
86 * Called once to set up the allocator itself.
88 static unsigned long __init init_bootmem_core(pg_data_t *pgdat,
89 unsigned long mapstart, unsigned long start, unsigned long end)
91 bootmem_data_t *bdata = pgdat->bdata;
92 unsigned long mapsize;
94 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
95 bdata->node_boot_start = PFN_PHYS(start);
96 bdata->node_low_pfn = end;
97 link_bootmem(bdata);
100 * Initially all pages are reserved - setup_arch() has to
101 * register free RAM areas explicitly.
103 mapsize = get_mapsize(bdata);
104 memset(bdata->node_bootmem_map, 0xff, mapsize);
106 return mapsize;
110 * Marks a particular physical memory range as unallocatable. Usable RAM
111 * might be used for boot-time allocations - or it might get added
112 * to the free page pool later on.
114 static int __init reserve_bootmem_core(bootmem_data_t *bdata,
115 unsigned long addr, unsigned long size, int flags)
117 unsigned long sidx, eidx;
118 unsigned long i;
119 int ret;
122 * round up, partially reserved pages are considered
123 * fully reserved.
125 BUG_ON(!size);
126 BUG_ON(PFN_DOWN(addr) >= bdata->node_low_pfn);
127 BUG_ON(PFN_UP(addr + size) > bdata->node_low_pfn);
129 sidx = PFN_DOWN(addr - bdata->node_boot_start);
130 eidx = PFN_UP(addr + size - bdata->node_boot_start);
132 for (i = sidx; i < eidx; i++)
133 if (test_and_set_bit(i, bdata->node_bootmem_map)) {
134 #ifdef CONFIG_DEBUG_BOOTMEM
135 printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
136 #endif
137 if (flags & BOOTMEM_EXCLUSIVE) {
138 ret = -EBUSY;
139 goto err;
143 return 0;
145 err:
146 /* unreserve memory we accidentally reserved */
147 for (i--; i >= sidx; i--)
148 clear_bit(i, bdata->node_bootmem_map);
150 return ret;
153 static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
154 unsigned long size)
156 unsigned long sidx, eidx;
157 unsigned long i;
160 * round down end of usable mem, partially free pages are
161 * considered reserved.
163 BUG_ON(!size);
164 BUG_ON(PFN_DOWN(addr + size) > bdata->node_low_pfn);
166 if (addr < bdata->last_success)
167 bdata->last_success = addr;
170 * Round up the beginning of the address.
172 sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
173 eidx = PFN_DOWN(addr + size - bdata->node_boot_start);
175 for (i = sidx; i < eidx; i++) {
176 if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
177 BUG();
182 * We 'merge' subsequent allocations to save space. We might 'lose'
183 * some fraction of a page if allocations cannot be satisfied due to
184 * size constraints on boxes where there is physical RAM space
185 * fragmentation - in these cases (mostly large memory boxes) this
186 * is not a problem.
188 * On low memory boxes we get it right in 100% of the cases.
190 * alignment has to be a power of 2 value.
192 * NOTE: This function is _not_ reentrant.
194 void * __init
195 __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
196 unsigned long align, unsigned long goal, unsigned long limit)
198 unsigned long offset, remaining_size, areasize, preferred;
199 unsigned long i, start = 0, incr, eidx, end_pfn;
200 void *ret;
202 if (!size) {
203 printk("__alloc_bootmem_core(): zero-sized request\n");
204 BUG();
206 BUG_ON(align & (align-1));
208 if (limit && bdata->node_boot_start >= limit)
209 return NULL;
211 /* on nodes without memory - bootmem_map is NULL */
212 if (!bdata->node_bootmem_map)
213 return NULL;
215 end_pfn = bdata->node_low_pfn;
216 limit = PFN_DOWN(limit);
217 if (limit && end_pfn > limit)
218 end_pfn = limit;
220 eidx = end_pfn - PFN_DOWN(bdata->node_boot_start);
221 offset = 0;
222 if (align && (bdata->node_boot_start & (align - 1UL)) != 0)
223 offset = align - (bdata->node_boot_start & (align - 1UL));
224 offset = PFN_DOWN(offset);
227 * We try to allocate bootmem pages above 'goal'
228 * first, then we try to allocate lower pages.
230 if (goal && goal >= bdata->node_boot_start && PFN_DOWN(goal) < end_pfn) {
231 preferred = goal - bdata->node_boot_start;
233 if (bdata->last_success >= preferred)
234 if (!limit || (limit && limit > bdata->last_success))
235 preferred = bdata->last_success;
236 } else
237 preferred = 0;
239 preferred = PFN_DOWN(ALIGN(preferred, align)) + offset;
240 areasize = (size + PAGE_SIZE-1) / PAGE_SIZE;
241 incr = align >> PAGE_SHIFT ? : 1;
243 restart_scan:
244 for (i = preferred; i < eidx; i += incr) {
245 unsigned long j;
246 i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i);
247 i = ALIGN(i, incr);
248 if (i >= eidx)
249 break;
250 if (test_bit(i, bdata->node_bootmem_map))
251 continue;
252 for (j = i + 1; j < i + areasize; ++j) {
253 if (j >= eidx)
254 goto fail_block;
255 if (test_bit(j, bdata->node_bootmem_map))
256 goto fail_block;
258 start = i;
259 goto found;
260 fail_block:
261 i = ALIGN(j, incr);
264 if (preferred > offset) {
265 preferred = offset;
266 goto restart_scan;
268 return NULL;
270 found:
271 bdata->last_success = PFN_PHYS(start);
272 BUG_ON(start >= eidx);
275 * Is the next page of the previous allocation-end the start
276 * of this allocation's buffer? If yes then we can 'merge'
277 * the previous partial page with this allocation.
279 if (align < PAGE_SIZE &&
280 bdata->last_offset && bdata->last_pos+1 == start) {
281 offset = ALIGN(bdata->last_offset, align);
282 BUG_ON(offset > PAGE_SIZE);
283 remaining_size = PAGE_SIZE - offset;
284 if (size < remaining_size) {
285 areasize = 0;
286 /* last_pos unchanged */
287 bdata->last_offset = offset + size;
288 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
289 offset +
290 bdata->node_boot_start);
291 } else {
292 remaining_size = size - remaining_size;
293 areasize = (remaining_size + PAGE_SIZE-1) / PAGE_SIZE;
294 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
295 offset +
296 bdata->node_boot_start);
297 bdata->last_pos = start + areasize - 1;
298 bdata->last_offset = remaining_size;
300 bdata->last_offset &= ~PAGE_MASK;
301 } else {
302 bdata->last_pos = start + areasize - 1;
303 bdata->last_offset = size & ~PAGE_MASK;
304 ret = phys_to_virt(start * PAGE_SIZE + bdata->node_boot_start);
308 * Reserve the area now:
310 for (i = start; i < start + areasize; i++)
311 if (unlikely(test_and_set_bit(i, bdata->node_bootmem_map)))
312 BUG();
313 memset(ret, 0, size);
314 return ret;
317 static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
319 struct page *page;
320 unsigned long pfn;
321 bootmem_data_t *bdata = pgdat->bdata;
322 unsigned long i, count, total = 0;
323 unsigned long idx;
324 unsigned long *map;
325 int gofast = 0;
327 BUG_ON(!bdata->node_bootmem_map);
329 count = 0;
330 /* first extant page of the node */
331 pfn = PFN_DOWN(bdata->node_boot_start);
332 idx = bdata->node_low_pfn - pfn;
333 map = bdata->node_bootmem_map;
334 /* Check physaddr is O(LOG2(BITS_PER_LONG)) page aligned */
335 if (bdata->node_boot_start == 0 ||
336 ffs(bdata->node_boot_start) - PAGE_SHIFT > ffs(BITS_PER_LONG))
337 gofast = 1;
338 for (i = 0; i < idx; ) {
339 unsigned long v = ~map[i / BITS_PER_LONG];
341 if (gofast && v == ~0UL) {
342 int order;
344 page = pfn_to_page(pfn);
345 count += BITS_PER_LONG;
346 order = ffs(BITS_PER_LONG) - 1;
347 __free_pages_bootmem(page, order);
348 i += BITS_PER_LONG;
349 page += BITS_PER_LONG;
350 } else if (v) {
351 unsigned long m;
353 page = pfn_to_page(pfn);
354 for (m = 1; m && i < idx; m<<=1, page++, i++) {
355 if (v & m) {
356 count++;
357 __free_pages_bootmem(page, 0);
360 } else {
361 i += BITS_PER_LONG;
363 pfn += BITS_PER_LONG;
365 total += count;
368 * Now free the allocator bitmap itself, it's not
369 * needed anymore:
371 page = virt_to_page(bdata->node_bootmem_map);
372 count = 0;
373 idx = (get_mapsize(bdata) + PAGE_SIZE-1) >> PAGE_SHIFT;
374 for (i = 0; i < idx; i++, page++) {
375 __free_pages_bootmem(page, 0);
376 count++;
378 total += count;
379 bdata->node_bootmem_map = NULL;
381 return total;
384 unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
385 unsigned long startpfn, unsigned long endpfn)
387 return init_bootmem_core(pgdat, freepfn, startpfn, endpfn);
390 void __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
391 unsigned long size, int flags)
393 reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
396 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
397 unsigned long size)
399 free_bootmem_core(pgdat->bdata, physaddr, size);
402 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
404 return free_all_bootmem_core(pgdat);
407 unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
409 max_low_pfn = pages;
410 min_low_pfn = start;
411 return init_bootmem_core(NODE_DATA(0), start, 0, pages);
414 #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
415 int __init reserve_bootmem(unsigned long addr, unsigned long size,
416 int flags)
418 return reserve_bootmem_core(NODE_DATA(0)->bdata, addr, size, flags);
420 #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
422 void __init free_bootmem(unsigned long addr, unsigned long size)
424 free_bootmem_core(NODE_DATA(0)->bdata, addr, size);
427 unsigned long __init free_all_bootmem(void)
429 return free_all_bootmem_core(NODE_DATA(0));
432 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
433 unsigned long goal)
435 bootmem_data_t *bdata;
436 void *ptr;
438 list_for_each_entry(bdata, &bdata_list, list) {
439 ptr = __alloc_bootmem_core(bdata, size, align, goal, 0);
440 if (ptr)
441 return ptr;
443 return NULL;
446 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
447 unsigned long goal)
449 void *mem = __alloc_bootmem_nopanic(size,align,goal);
451 if (mem)
452 return mem;
454 * Whoops, we cannot satisfy the allocation request.
456 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
457 panic("Out of memory");
458 return NULL;
462 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
463 unsigned long align, unsigned long goal)
465 void *ptr;
467 ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
468 if (ptr)
469 return ptr;
471 return __alloc_bootmem(size, align, goal);
474 #ifndef ARCH_LOW_ADDRESS_LIMIT
475 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
476 #endif
478 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
479 unsigned long goal)
481 bootmem_data_t *bdata;
482 void *ptr;
484 list_for_each_entry(bdata, &bdata_list, list) {
485 ptr = __alloc_bootmem_core(bdata, size, align, goal,
486 ARCH_LOW_ADDRESS_LIMIT);
487 if (ptr)
488 return ptr;
492 * Whoops, we cannot satisfy the allocation request.
494 printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size);
495 panic("Out of low memory");
496 return NULL;
499 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
500 unsigned long align, unsigned long goal)
502 return __alloc_bootmem_core(pgdat->bdata, size, align, goal,
503 ARCH_LOW_ADDRESS_LIMIT);