Merge branch 'upstream-fixes' into upstream
[linux-2.6/mini2440.git] / arch / x86_64 / kernel / e820.c
blobe56c2adf57a4f2ab2265ccdff5c55f08aa431f1d
1 /*
2 * Handle the memory map.
3 * The functions here do the job until bootmem takes over.
5 * Getting sanitize_e820_map() in sync with i386 version by applying change:
6 * - Provisions for empty E820 memory regions (reported by certain BIOSes).
7 * Alex Achenbach <xela@slit.de>, December 2002.
8 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/ioport.h>
16 #include <linux/string.h>
17 #include <linux/kexec.h>
18 #include <linux/module.h>
20 #include <asm/pgtable.h>
21 #include <asm/page.h>
22 #include <asm/e820.h>
23 #include <asm/proto.h>
24 #include <asm/bootsetup.h>
25 #include <asm/sections.h>
27 /*
28 * PFN of last memory page.
30 unsigned long end_pfn;
31 EXPORT_SYMBOL(end_pfn);
33 /*
34 * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
35 * The direct mapping extends to end_pfn_map, so that we can directly access
36 * apertures, ACPI and other tables without having to play with fixmaps.
37 */
38 unsigned long end_pfn_map;
40 /*
41 * Last pfn which the user wants to use.
43 unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;
45 extern struct resource code_resource, data_resource;
47 /* Check for some hardcoded bad areas that early boot is not allowed to touch */
48 static inline int bad_addr(unsigned long *addrp, unsigned long size)
50 unsigned long addr = *addrp, last = addr + size;
52 /* various gunk below that needed for SMP startup */
53 if (addr < 0x8000) {
54 *addrp = 0x8000;
55 return 1;
58 /* direct mapping tables of the kernel */
59 if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) {
60 *addrp = table_end << PAGE_SHIFT;
61 return 1;
64 /* initrd */
65 #ifdef CONFIG_BLK_DEV_INITRD
66 if (LOADER_TYPE && INITRD_START && last >= INITRD_START &&
67 addr < INITRD_START+INITRD_SIZE) {
68 *addrp = INITRD_START + INITRD_SIZE;
69 return 1;
71 #endif
72 /* kernel code + 640k memory hole (later should not be needed, but
73 be paranoid for now) */
74 if (last >= 640*1024 && addr < __pa_symbol(&_end)) {
75 *addrp = __pa_symbol(&_end);
76 return 1;
79 if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
80 *addrp = ebda_addr + ebda_size;
81 return 1;
84 /* XXX ramdisk image here? */
85 return 0;
89 * This function checks if any part of the range <start,end> is mapped
90 * with type.
92 int __meminit
93 e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
95 int i;
96 for (i = 0; i < e820.nr_map; i++) {
97 struct e820entry *ei = &e820.map[i];
98 if (type && ei->type != type)
99 continue;
100 if (ei->addr >= end || ei->addr + ei->size <= start)
101 continue;
102 return 1;
104 return 0;
108 * This function checks if the entire range <start,end> is mapped with type.
110 * Note: this function only works correct if the e820 table is sorted and
111 * not-overlapping, which is the case
113 int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
115 int i;
116 for (i = 0; i < e820.nr_map; i++) {
117 struct e820entry *ei = &e820.map[i];
118 if (type && ei->type != type)
119 continue;
120 /* is the region (part) in overlap with the current region ?*/
121 if (ei->addr >= end || ei->addr + ei->size <= start)
122 continue;
124 /* if the region is at the beginning of <start,end> we move
125 * start to the end of the region since it's ok until there
127 if (ei->addr <= start)
128 start = ei->addr + ei->size;
129 /* if start is now at or beyond end, we're done, full coverage */
130 if (start >= end)
131 return 1; /* we're done */
133 return 0;
137 * Find a free area in a specific range.
139 unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size)
141 int i;
142 for (i = 0; i < e820.nr_map; i++) {
143 struct e820entry *ei = &e820.map[i];
144 unsigned long addr = ei->addr, last;
145 if (ei->type != E820_RAM)
146 continue;
147 if (addr < start)
148 addr = start;
149 if (addr > ei->addr + ei->size)
150 continue;
151 while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
153 last = addr + size;
154 if (last > ei->addr + ei->size)
155 continue;
156 if (last > end)
157 continue;
158 return addr;
160 return -1UL;
164 * Free bootmem based on the e820 table for a node.
166 void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
168 int i;
169 for (i = 0; i < e820.nr_map; i++) {
170 struct e820entry *ei = &e820.map[i];
171 unsigned long last, addr;
173 if (ei->type != E820_RAM ||
174 ei->addr+ei->size <= start ||
175 ei->addr >= end)
176 continue;
178 addr = round_up(ei->addr, PAGE_SIZE);
179 if (addr < start)
180 addr = start;
182 last = round_down(ei->addr + ei->size, PAGE_SIZE);
183 if (last >= end)
184 last = end;
186 if (last > addr && last-addr >= PAGE_SIZE)
187 free_bootmem_node(pgdat, addr, last-addr);
192 * Find the highest page frame number we have available
194 unsigned long __init e820_end_of_ram(void)
196 int i;
197 unsigned long end_pfn = 0;
199 for (i = 0; i < e820.nr_map; i++) {
200 struct e820entry *ei = &e820.map[i];
201 unsigned long start, end;
203 start = round_up(ei->addr, PAGE_SIZE);
204 end = round_down(ei->addr + ei->size, PAGE_SIZE);
205 if (start >= end)
206 continue;
207 if (ei->type == E820_RAM) {
208 if (end > end_pfn<<PAGE_SHIFT)
209 end_pfn = end>>PAGE_SHIFT;
210 } else {
211 if (end > end_pfn_map<<PAGE_SHIFT)
212 end_pfn_map = end>>PAGE_SHIFT;
216 if (end_pfn > end_pfn_map)
217 end_pfn_map = end_pfn;
218 if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
219 end_pfn_map = MAXMEM>>PAGE_SHIFT;
220 if (end_pfn > end_user_pfn)
221 end_pfn = end_user_pfn;
222 if (end_pfn > end_pfn_map)
223 end_pfn = end_pfn_map;
225 return end_pfn;
229 * Compute how much memory is missing in a range.
230 * Unlike the other functions in this file the arguments are in page numbers.
232 unsigned long __init
233 e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
235 unsigned long ram = 0;
236 unsigned long start = start_pfn << PAGE_SHIFT;
237 unsigned long end = end_pfn << PAGE_SHIFT;
238 int i;
239 for (i = 0; i < e820.nr_map; i++) {
240 struct e820entry *ei = &e820.map[i];
241 unsigned long last, addr;
243 if (ei->type != E820_RAM ||
244 ei->addr+ei->size <= start ||
245 ei->addr >= end)
246 continue;
248 addr = round_up(ei->addr, PAGE_SIZE);
249 if (addr < start)
250 addr = start;
252 last = round_down(ei->addr + ei->size, PAGE_SIZE);
253 if (last >= end)
254 last = end;
256 if (last > addr)
257 ram += last - addr;
259 return ((end - start) - ram) >> PAGE_SHIFT;
263 * Mark e820 reserved areas as busy for the resource manager.
265 void __init e820_reserve_resources(void)
267 int i;
268 for (i = 0; i < e820.nr_map; i++) {
269 struct resource *res;
270 res = alloc_bootmem_low(sizeof(struct resource));
271 switch (e820.map[i].type) {
272 case E820_RAM: res->name = "System RAM"; break;
273 case E820_ACPI: res->name = "ACPI Tables"; break;
274 case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
275 default: res->name = "reserved";
277 res->start = e820.map[i].addr;
278 res->end = res->start + e820.map[i].size - 1;
279 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
280 request_resource(&iomem_resource, res);
281 if (e820.map[i].type == E820_RAM) {
283 * We don't know which RAM region contains kernel data,
284 * so we try it repeatedly and let the resource manager
285 * test it.
287 request_resource(res, &code_resource);
288 request_resource(res, &data_resource);
289 #ifdef CONFIG_KEXEC
290 request_resource(res, &crashk_res);
291 #endif
297 * Add a memory region to the kernel e820 map.
299 void __init add_memory_region(unsigned long start, unsigned long size, int type)
301 int x = e820.nr_map;
303 if (x == E820MAX) {
304 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
305 return;
308 e820.map[x].addr = start;
309 e820.map[x].size = size;
310 e820.map[x].type = type;
311 e820.nr_map++;
314 void __init e820_print_map(char *who)
316 int i;
318 for (i = 0; i < e820.nr_map; i++) {
319 printk(" %s: %016Lx - %016Lx ", who,
320 (unsigned long long) e820.map[i].addr,
321 (unsigned long long) (e820.map[i].addr + e820.map[i].size));
322 switch (e820.map[i].type) {
323 case E820_RAM: printk("(usable)\n");
324 break;
325 case E820_RESERVED:
326 printk("(reserved)\n");
327 break;
328 case E820_ACPI:
329 printk("(ACPI data)\n");
330 break;
331 case E820_NVS:
332 printk("(ACPI NVS)\n");
333 break;
334 default: printk("type %u\n", e820.map[i].type);
335 break;
341 * Sanitize the BIOS e820 map.
343 * Some e820 responses include overlapping entries. The following
344 * replaces the original e820 map with a new one, removing overlaps.
347 static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
349 struct change_member {
350 struct e820entry *pbios; /* pointer to original bios entry */
351 unsigned long long addr; /* address for this change point */
353 static struct change_member change_point_list[2*E820MAX] __initdata;
354 static struct change_member *change_point[2*E820MAX] __initdata;
355 static struct e820entry *overlap_list[E820MAX] __initdata;
356 static struct e820entry new_bios[E820MAX] __initdata;
357 struct change_member *change_tmp;
358 unsigned long current_type, last_type;
359 unsigned long long last_addr;
360 int chgidx, still_changing;
361 int overlap_entries;
362 int new_bios_entry;
363 int old_nr, new_nr, chg_nr;
364 int i;
367 Visually we're performing the following (1,2,3,4 = memory types)...
369 Sample memory map (w/overlaps):
370 ____22__________________
371 ______________________4_
372 ____1111________________
373 _44_____________________
374 11111111________________
375 ____________________33__
376 ___________44___________
377 __________33333_________
378 ______________22________
379 ___________________2222_
380 _________111111111______
381 _____________________11_
382 _________________4______
384 Sanitized equivalent (no overlap):
385 1_______________________
386 _44_____________________
387 ___1____________________
388 ____22__________________
389 ______11________________
390 _________1______________
391 __________3_____________
392 ___________44___________
393 _____________33_________
394 _______________2________
395 ________________1_______
396 _________________4______
397 ___________________2____
398 ____________________33__
399 ______________________4_
402 /* if there's only one memory region, don't bother */
403 if (*pnr_map < 2)
404 return -1;
406 old_nr = *pnr_map;
408 /* bail out if we find any unreasonable addresses in bios map */
409 for (i=0; i<old_nr; i++)
410 if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
411 return -1;
413 /* create pointers for initial change-point information (for sorting) */
414 for (i=0; i < 2*old_nr; i++)
415 change_point[i] = &change_point_list[i];
417 /* record all known change-points (starting and ending addresses),
418 omitting those that are for empty memory regions */
419 chgidx = 0;
420 for (i=0; i < old_nr; i++) {
421 if (biosmap[i].size != 0) {
422 change_point[chgidx]->addr = biosmap[i].addr;
423 change_point[chgidx++]->pbios = &biosmap[i];
424 change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
425 change_point[chgidx++]->pbios = &biosmap[i];
428 chg_nr = chgidx;
430 /* sort change-point list by memory addresses (low -> high) */
431 still_changing = 1;
432 while (still_changing) {
433 still_changing = 0;
434 for (i=1; i < chg_nr; i++) {
435 /* if <current_addr> > <last_addr>, swap */
436 /* or, if current=<start_addr> & last=<end_addr>, swap */
437 if ((change_point[i]->addr < change_point[i-1]->addr) ||
438 ((change_point[i]->addr == change_point[i-1]->addr) &&
439 (change_point[i]->addr == change_point[i]->pbios->addr) &&
440 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
443 change_tmp = change_point[i];
444 change_point[i] = change_point[i-1];
445 change_point[i-1] = change_tmp;
446 still_changing=1;
451 /* create a new bios memory map, removing overlaps */
452 overlap_entries=0; /* number of entries in the overlap table */
453 new_bios_entry=0; /* index for creating new bios map entries */
454 last_type = 0; /* start with undefined memory type */
455 last_addr = 0; /* start with 0 as last starting address */
456 /* loop through change-points, determining affect on the new bios map */
457 for (chgidx=0; chgidx < chg_nr; chgidx++)
459 /* keep track of all overlapping bios entries */
460 if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
462 /* add map entry to overlap list (> 1 entry implies an overlap) */
463 overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
465 else
467 /* remove entry from list (order independent, so swap with last) */
468 for (i=0; i<overlap_entries; i++)
470 if (overlap_list[i] == change_point[chgidx]->pbios)
471 overlap_list[i] = overlap_list[overlap_entries-1];
473 overlap_entries--;
475 /* if there are overlapping entries, decide which "type" to use */
476 /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
477 current_type = 0;
478 for (i=0; i<overlap_entries; i++)
479 if (overlap_list[i]->type > current_type)
480 current_type = overlap_list[i]->type;
481 /* continue building up new bios map based on this information */
482 if (current_type != last_type) {
483 if (last_type != 0) {
484 new_bios[new_bios_entry].size =
485 change_point[chgidx]->addr - last_addr;
486 /* move forward only if the new size was non-zero */
487 if (new_bios[new_bios_entry].size != 0)
488 if (++new_bios_entry >= E820MAX)
489 break; /* no more space left for new bios entries */
491 if (current_type != 0) {
492 new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
493 new_bios[new_bios_entry].type = current_type;
494 last_addr=change_point[chgidx]->addr;
496 last_type = current_type;
499 new_nr = new_bios_entry; /* retain count for new bios entries */
501 /* copy new bios mapping into original location */
502 memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
503 *pnr_map = new_nr;
505 return 0;
509 * Copy the BIOS e820 map into a safe place.
511 * Sanity-check it while we're at it..
513 * If we're lucky and live on a modern system, the setup code
514 * will have given us a memory map that we can use to properly
515 * set up memory. If we aren't, we'll fake a memory map.
517 * We check to see that the memory map contains at least 2 elements
518 * before we'll use it, because the detection code in setup.S may
519 * not be perfect and most every PC known to man has two memory
520 * regions: one from 0 to 640k, and one from 1mb up. (The IBM
521 * thinkpad 560x, for example, does not cooperate with the memory
522 * detection code.)
524 static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
526 /* Only one memory region (or negative)? Ignore it */
527 if (nr_map < 2)
528 return -1;
530 do {
531 unsigned long start = biosmap->addr;
532 unsigned long size = biosmap->size;
533 unsigned long end = start + size;
534 unsigned long type = biosmap->type;
536 /* Overflow in 64 bits? Ignore the memory map. */
537 if (start > end)
538 return -1;
541 * Some BIOSes claim RAM in the 640k - 1M region.
542 * Not right. Fix it up.
544 * This should be removed on Hammer which is supposed to not
545 * have non e820 covered ISA mappings there, but I had some strange
546 * problems so it stays for now. -AK
548 if (type == E820_RAM) {
549 if (start < 0x100000ULL && end > 0xA0000ULL) {
550 if (start < 0xA0000ULL)
551 add_memory_region(start, 0xA0000ULL-start, type);
552 if (end <= 0x100000ULL)
553 continue;
554 start = 0x100000ULL;
555 size = end - start;
559 add_memory_region(start, size, type);
560 } while (biosmap++,--nr_map);
561 return 0;
564 void __init setup_memory_region(void)
566 char *who = "BIOS-e820";
569 * Try to copy the BIOS-supplied E820-map.
571 * Otherwise fake a memory map; one section from 0k->640k,
572 * the next section from 1mb->appropriate_mem_k
574 sanitize_e820_map(E820_MAP, &E820_MAP_NR);
575 if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
576 unsigned long mem_size;
578 /* compare results from other methods and take the greater */
579 if (ALT_MEM_K < EXT_MEM_K) {
580 mem_size = EXT_MEM_K;
581 who = "BIOS-88";
582 } else {
583 mem_size = ALT_MEM_K;
584 who = "BIOS-e801";
587 e820.nr_map = 0;
588 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
589 add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
591 printk(KERN_INFO "BIOS-provided physical RAM map:\n");
592 e820_print_map(who);
595 void __init parse_memopt(char *p, char **from)
597 end_user_pfn = memparse(p, from);
598 end_user_pfn >>= PAGE_SHIFT;
601 void __init parse_memmapopt(char *p, char **from)
603 unsigned long long start_at, mem_size;
605 mem_size = memparse(p, from);
606 p = *from;
607 if (*p == '@') {
608 start_at = memparse(p+1, from);
609 add_memory_region(start_at, mem_size, E820_RAM);
610 } else if (*p == '#') {
611 start_at = memparse(p+1, from);
612 add_memory_region(start_at, mem_size, E820_ACPI);
613 } else if (*p == '$') {
614 start_at = memparse(p+1, from);
615 add_memory_region(start_at, mem_size, E820_RESERVED);
616 } else {
617 end_user_pfn = (mem_size >> PAGE_SHIFT);
619 p = *from;
622 unsigned long pci_mem_start = 0xaeedbabe;
623 EXPORT_SYMBOL(pci_mem_start);
626 * Search for the biggest gap in the low 32 bits of the e820
627 * memory space. We pass this space to PCI to assign MMIO resources
628 * for hotplug or unconfigured devices in.
629 * Hopefully the BIOS let enough space left.
631 __init void e820_setup_gap(void)
633 unsigned long gapstart, gapsize, round;
634 unsigned long last;
635 int i;
636 int found = 0;
638 last = 0x100000000ull;
639 gapstart = 0x10000000;
640 gapsize = 0x400000;
641 i = e820.nr_map;
642 while (--i >= 0) {
643 unsigned long long start = e820.map[i].addr;
644 unsigned long long end = start + e820.map[i].size;
647 * Since "last" is at most 4GB, we know we'll
648 * fit in 32 bits if this condition is true
650 if (last > end) {
651 unsigned long gap = last - end;
653 if (gap > gapsize) {
654 gapsize = gap;
655 gapstart = end;
656 found = 1;
659 if (start < last)
660 last = start;
663 if (!found) {
664 gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024;
665 printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
666 KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
670 * See how much we want to round up: start off with
671 * rounding to the next 1MB area.
673 round = 0x100000;
674 while ((gapsize >> 4) > round)
675 round += round;
676 /* Fun with two's complement */
677 pci_mem_start = (gapstart + round) & -round;
679 printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
680 pci_mem_start, gapstart, gapsize);