2 * Handle the memory map.
3 * The functions here do the job until bootmem takes over.
5 * Getting sanitize_e820_map() in sync with i386 version by applying change:
6 * - Provisions for empty E820 memory regions (reported by certain BIOSes).
7 * Alex Achenbach <xela@slit.de>, December 2002.
8 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/ioport.h>
16 #include <linux/string.h>
17 #include <linux/kexec.h>
18 #include <linux/module.h>
20 #include <linux/pfn.h>
21 #include <linux/suspend.h>
23 #include <asm/pgtable.h>
26 #include <asm/proto.h>
27 #include <asm/setup.h>
28 #include <asm/trampoline.h>
32 /* For PCI or other memory-mapped resources */
33 unsigned long pci_mem_start
= 0xaeedbabe;
35 EXPORT_SYMBOL(pci_mem_start
);
39 * This function checks if any part of the range <start,end> is mapped
43 e820_any_mapped(u64 start
, u64 end
, unsigned type
)
47 for (i
= 0; i
< e820
.nr_map
; i
++) {
48 struct e820entry
*ei
= &e820
.map
[i
];
50 if (type
&& ei
->type
!= type
)
52 if (ei
->addr
>= end
|| ei
->addr
+ ei
->size
<= start
)
58 EXPORT_SYMBOL_GPL(e820_any_mapped
);
61 * This function checks if the entire range <start,end> is mapped with type.
63 * Note: this function only works correct if the e820 table is sorted and
64 * not-overlapping, which is the case
66 int __init
e820_all_mapped(u64 start
, u64 end
, unsigned type
)
70 for (i
= 0; i
< e820
.nr_map
; i
++) {
71 struct e820entry
*ei
= &e820
.map
[i
];
73 if (type
&& ei
->type
!= type
)
75 /* is the region (part) in overlap with the current region ?*/
76 if (ei
->addr
>= end
|| ei
->addr
+ ei
->size
<= start
)
79 /* if the region is at the beginning of <start,end> we move
80 * start to the end of the region since it's ok until there
82 if (ei
->addr
<= start
)
83 start
= ei
->addr
+ ei
->size
;
85 * if start is now at or beyond end, we're done, full
95 * Add a memory region to the kernel e820 map.
97 void __init
e820_add_region(u64 start
, u64 size
, int type
)
101 if (x
== ARRAY_SIZE(e820
.map
)) {
102 printk(KERN_ERR
"Ooops! Too many entries in the memory map!\n");
106 e820
.map
[x
].addr
= start
;
107 e820
.map
[x
].size
= size
;
108 e820
.map
[x
].type
= type
;
112 void __init
e820_print_map(char *who
)
116 for (i
= 0; i
< e820
.nr_map
; i
++) {
117 printk(KERN_INFO
" %s: %016Lx - %016Lx ", who
,
118 (unsigned long long) e820
.map
[i
].addr
,
120 (e820
.map
[i
].addr
+ e820
.map
[i
].size
));
121 switch (e820
.map
[i
].type
) {
123 printk(KERN_CONT
"(usable)\n");
126 printk(KERN_CONT
"(reserved)\n");
129 printk(KERN_CONT
"(ACPI data)\n");
132 printk(KERN_CONT
"(ACPI NVS)\n");
135 printk(KERN_CONT
"type %u\n", e820
.map
[i
].type
);
142 * Sanitize the BIOS e820 map.
144 * Some e820 responses include overlapping entries. The following
145 * replaces the original e820 map with a new one, removing overlaps,
146 * and resolving conflicting memory types in favor of highest
149 * The input parameter biosmap points to an array of 'struct
150 * e820entry' which on entry has elements in the range [0, *pnr_map)
151 * valid, and which has space for up to max_nr_map entries.
152 * On return, the resulting sanitized e820 map entries will be in
153 * overwritten in the same location, starting at biosmap.
155 * The integer pointed to by pnr_map must be valid on entry (the
156 * current number of valid entries located at biosmap) and will
157 * be updated on return, with the new number of valid entries
158 * (something no more than max_nr_map.)
160 * The return value from sanitize_e820_map() is zero if it
161 * successfully 'sanitized' the map entries passed in, and is -1
162 * if it did nothing, which can happen if either of (1) it was
163 * only passed one map entry, or (2) any of the input map entries
164 * were invalid (start + size < start, meaning that the size was
165 * so big the described memory range wrapped around through zero.)
167 * Visually we're performing the following
168 * (1,2,3,4 = memory types)...
170 * Sample memory map (w/overlaps):
171 * ____22__________________
172 * ______________________4_
173 * ____1111________________
174 * _44_____________________
175 * 11111111________________
176 * ____________________33__
177 * ___________44___________
178 * __________33333_________
179 * ______________22________
180 * ___________________2222_
181 * _________111111111______
182 * _____________________11_
183 * _________________4______
185 * Sanitized equivalent (no overlap):
186 * 1_______________________
187 * _44_____________________
188 * ___1____________________
189 * ____22__________________
190 * ______11________________
191 * _________1______________
192 * __________3_____________
193 * ___________44___________
194 * _____________33_________
195 * _______________2________
196 * ________________1_______
197 * _________________4______
198 * ___________________2____
199 * ____________________33__
200 * ______________________4_
203 int __init
sanitize_e820_map(struct e820entry
*biosmap
, int max_nr_map
,
206 struct change_member
{
207 struct e820entry
*pbios
; /* pointer to original bios entry */
208 unsigned long long addr
; /* address for this change point */
210 static struct change_member change_point_list
[2*E820_X_MAX
] __initdata
;
211 static struct change_member
*change_point
[2*E820_X_MAX
] __initdata
;
212 static struct e820entry
*overlap_list
[E820_X_MAX
] __initdata
;
213 static struct e820entry new_bios
[E820_X_MAX
] __initdata
;
214 struct change_member
*change_tmp
;
215 unsigned long current_type
, last_type
;
216 unsigned long long last_addr
;
217 int chgidx
, still_changing
;
220 int old_nr
, new_nr
, chg_nr
;
223 /* if there's only one memory region, don't bother */
228 BUG_ON(old_nr
> max_nr_map
);
230 /* bail out if we find any unreasonable addresses in bios map */
231 for (i
= 0; i
< old_nr
; i
++)
232 if (biosmap
[i
].addr
+ biosmap
[i
].size
< biosmap
[i
].addr
)
235 /* create pointers for initial change-point information (for sorting) */
236 for (i
= 0; i
< 2 * old_nr
; i
++)
237 change_point
[i
] = &change_point_list
[i
];
239 /* record all known change-points (starting and ending addresses),
240 omitting those that are for empty memory regions */
242 for (i
= 0; i
< old_nr
; i
++) {
243 if (biosmap
[i
].size
!= 0) {
244 change_point
[chgidx
]->addr
= biosmap
[i
].addr
;
245 change_point
[chgidx
++]->pbios
= &biosmap
[i
];
246 change_point
[chgidx
]->addr
= biosmap
[i
].addr
+
248 change_point
[chgidx
++]->pbios
= &biosmap
[i
];
253 /* sort change-point list by memory addresses (low -> high) */
255 while (still_changing
) {
257 for (i
= 1; i
< chg_nr
; i
++) {
258 unsigned long long curaddr
, lastaddr
;
259 unsigned long long curpbaddr
, lastpbaddr
;
261 curaddr
= change_point
[i
]->addr
;
262 lastaddr
= change_point
[i
- 1]->addr
;
263 curpbaddr
= change_point
[i
]->pbios
->addr
;
264 lastpbaddr
= change_point
[i
- 1]->pbios
->addr
;
267 * swap entries, when:
269 * curaddr > lastaddr or
270 * curaddr == lastaddr and curaddr == curpbaddr and
271 * lastaddr != lastpbaddr
273 if (curaddr
< lastaddr
||
274 (curaddr
== lastaddr
&& curaddr
== curpbaddr
&&
275 lastaddr
!= lastpbaddr
)) {
276 change_tmp
= change_point
[i
];
277 change_point
[i
] = change_point
[i
-1];
278 change_point
[i
-1] = change_tmp
;
284 /* create a new bios memory map, removing overlaps */
285 overlap_entries
= 0; /* number of entries in the overlap table */
286 new_bios_entry
= 0; /* index for creating new bios map entries */
287 last_type
= 0; /* start with undefined memory type */
288 last_addr
= 0; /* start with 0 as last starting address */
290 /* loop through change-points, determining affect on the new bios map */
291 for (chgidx
= 0; chgidx
< chg_nr
; chgidx
++) {
292 /* keep track of all overlapping bios entries */
293 if (change_point
[chgidx
]->addr
==
294 change_point
[chgidx
]->pbios
->addr
) {
296 * add map entry to overlap list (> 1 entry
297 * implies an overlap)
299 overlap_list
[overlap_entries
++] =
300 change_point
[chgidx
]->pbios
;
303 * remove entry from list (order independent,
306 for (i
= 0; i
< overlap_entries
; i
++) {
307 if (overlap_list
[i
] ==
308 change_point
[chgidx
]->pbios
)
310 overlap_list
[overlap_entries
-1];
315 * if there are overlapping entries, decide which
316 * "type" to use (larger value takes precedence --
317 * 1=usable, 2,3,4,4+=unusable)
320 for (i
= 0; i
< overlap_entries
; i
++)
321 if (overlap_list
[i
]->type
> current_type
)
322 current_type
= overlap_list
[i
]->type
;
324 * continue building up new bios map based on this
327 if (current_type
!= last_type
) {
328 if (last_type
!= 0) {
329 new_bios
[new_bios_entry
].size
=
330 change_point
[chgidx
]->addr
- last_addr
;
332 * move forward only if the new size
335 if (new_bios
[new_bios_entry
].size
!= 0)
337 * no more space left for new
340 if (++new_bios_entry
>= max_nr_map
)
343 if (current_type
!= 0) {
344 new_bios
[new_bios_entry
].addr
=
345 change_point
[chgidx
]->addr
;
346 new_bios
[new_bios_entry
].type
= current_type
;
347 last_addr
= change_point
[chgidx
]->addr
;
349 last_type
= current_type
;
352 /* retain count for new bios entries */
353 new_nr
= new_bios_entry
;
355 /* copy new bios mapping into original location */
356 memcpy(biosmap
, new_bios
, new_nr
* sizeof(struct e820entry
));
362 static int __init
__copy_e820_map(struct e820entry
*biosmap
, int nr_map
)
365 u64 start
= biosmap
->addr
;
366 u64 size
= biosmap
->size
;
367 u64 end
= start
+ size
;
368 u32 type
= biosmap
->type
;
370 /* Overflow in 64 bits? Ignore the memory map. */
374 e820_add_region(start
, size
, type
);
383 * Copy the BIOS e820 map into a safe place.
385 * Sanity-check it while we're at it..
387 * If we're lucky and live on a modern system, the setup code
388 * will have given us a memory map that we can use to properly
389 * set up memory. If we aren't, we'll fake a memory map.
391 int __init
copy_e820_map(struct e820entry
*biosmap
, int nr_map
)
393 /* Only one memory region (or negative)? Ignore it */
397 return __copy_e820_map(biosmap
, nr_map
);
400 u64 __init
e820_update_range(u64 start
, u64 size
, unsigned old_type
,
404 u64 real_updated_size
= 0;
406 BUG_ON(old_type
== new_type
);
408 if (size
> (ULLONG_MAX
- start
))
409 size
= ULLONG_MAX
- start
;
411 for (i
= 0; i
< e820
.nr_map
; i
++) {
412 struct e820entry
*ei
= &e820
.map
[i
];
413 u64 final_start
, final_end
;
414 if (ei
->type
!= old_type
)
416 /* totally covered? */
417 if (ei
->addr
>= start
&&
418 (ei
->addr
+ ei
->size
) <= (start
+ size
)) {
420 real_updated_size
+= ei
->size
;
423 /* partially covered */
424 final_start
= max(start
, ei
->addr
);
425 final_end
= min(start
+ size
, ei
->addr
+ ei
->size
);
426 if (final_start
>= final_end
)
428 e820_add_region(final_start
, final_end
- final_start
,
430 real_updated_size
+= final_end
- final_start
;
432 ei
->size
-= final_end
- final_start
;
433 if (ei
->addr
< final_start
)
435 ei
->addr
= final_end
;
437 return real_updated_size
;
440 /* make e820 not cover the range */
441 u64 __init
e820_remove_range(u64 start
, u64 size
, unsigned old_type
,
445 u64 real_removed_size
= 0;
447 if (size
> (ULLONG_MAX
- start
))
448 size
= ULLONG_MAX
- start
;
450 for (i
= 0; i
< e820
.nr_map
; i
++) {
451 struct e820entry
*ei
= &e820
.map
[i
];
452 u64 final_start
, final_end
;
454 if (checktype
&& ei
->type
!= old_type
)
456 /* totally covered? */
457 if (ei
->addr
>= start
&&
458 (ei
->addr
+ ei
->size
) <= (start
+ size
)) {
459 real_removed_size
+= ei
->size
;
460 memset(ei
, 0, sizeof(struct e820entry
));
463 /* partially covered */
464 final_start
= max(start
, ei
->addr
);
465 final_end
= min(start
+ size
, ei
->addr
+ ei
->size
);
466 if (final_start
>= final_end
)
468 real_removed_size
+= final_end
- final_start
;
470 ei
->size
-= final_end
- final_start
;
471 if (ei
->addr
< final_start
)
473 ei
->addr
= final_end
;
475 return real_removed_size
;
478 void __init
update_e820(void)
482 nr_map
= e820
.nr_map
;
483 if (sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &nr_map
))
485 e820
.nr_map
= nr_map
;
486 printk(KERN_INFO
"modified physical RAM map:\n");
487 e820_print_map("modified");
491 * Search for a gap in the e820 memory space from start_addr to 2^32.
493 __init
int e820_search_gap(unsigned long *gapstart
, unsigned long *gapsize
,
494 unsigned long start_addr
)
496 unsigned long long last
= 0x100000000ull
;
501 unsigned long long start
= e820
.map
[i
].addr
;
502 unsigned long long end
= start
+ e820
.map
[i
].size
;
504 if (end
< start_addr
)
508 * Since "last" is at most 4GB, we know we'll
509 * fit in 32 bits if this condition is true
512 unsigned long gap
= last
- end
;
514 if (gap
>= *gapsize
) {
527 * Search for the biggest gap in the low 32 bits of the e820
528 * memory space. We pass this space to PCI to assign MMIO resources
529 * for hotplug or unconfigured devices in.
530 * Hopefully the BIOS let enough space left.
532 __init
void e820_setup_gap(void)
534 unsigned long gapstart
, gapsize
, round
;
537 gapstart
= 0x10000000;
539 found
= e820_search_gap(&gapstart
, &gapsize
, 0);
543 gapstart
= (max_pfn
<< PAGE_SHIFT
) + 1024*1024;
544 printk(KERN_ERR
"PCI: Warning: Cannot find a gap in the 32bit "
546 KERN_ERR
"PCI: Unassigned devices with 32bit resource "
547 "registers may break!\n");
552 * See how much we want to round up: start off with
553 * rounding to the next 1MB area.
556 while ((gapsize
>> 4) > round
)
558 /* Fun with two's complement */
559 pci_mem_start
= (gapstart
+ round
) & -round
;
562 "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
563 pci_mem_start
, gapstart
, gapsize
);
567 * Because of the size limitation of struct boot_params, only first
568 * 128 E820 memory entries are passed to kernel via
569 * boot_params.e820_map, others are passed via SETUP_E820_EXT node of
570 * linked list of struct setup_data, which is parsed here.
572 void __init
parse_e820_ext(struct setup_data
*sdata
, unsigned long pa_data
)
576 struct e820entry
*extmap
;
578 entries
= sdata
->len
/ sizeof(struct e820entry
);
579 map_len
= sdata
->len
+ sizeof(struct setup_data
);
580 if (map_len
> PAGE_SIZE
)
581 sdata
= early_ioremap(pa_data
, map_len
);
582 extmap
= (struct e820entry
*)(sdata
->data
);
583 __copy_e820_map(extmap
, entries
);
584 sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &e820
.nr_map
);
585 if (map_len
> PAGE_SIZE
)
586 early_iounmap(sdata
, map_len
);
587 printk(KERN_INFO
"extended physical RAM map:\n");
588 e820_print_map("extended");
591 #if defined(CONFIG_X86_64) || \
592 (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
594 * Find the ranges of physical addresses that do not correspond to
595 * e820 RAM areas and mark the corresponding pages as nosave for
596 * hibernation (32 bit) or software suspend and suspend to RAM (64 bit).
598 * This function requires the e820 map to be sorted and without any
599 * overlapping entries and assumes the first e820 area to be RAM.
601 void __init
e820_mark_nosave_regions(unsigned long limit_pfn
)
606 pfn
= PFN_DOWN(e820
.map
[0].addr
+ e820
.map
[0].size
);
607 for (i
= 1; i
< e820
.nr_map
; i
++) {
608 struct e820entry
*ei
= &e820
.map
[i
];
610 if (pfn
< PFN_UP(ei
->addr
))
611 register_nosave_region(pfn
, PFN_UP(ei
->addr
));
613 pfn
= PFN_DOWN(ei
->addr
+ ei
->size
);
614 if (ei
->type
!= E820_RAM
)
615 register_nosave_region(PFN_UP(ei
->addr
), pfn
);
617 if (pfn
>= limit_pfn
)
624 * Early reserved memory areas.
626 #define MAX_EARLY_RES 20
633 static struct early_res early_res
[MAX_EARLY_RES
] __initdata
= {
634 { 0, PAGE_SIZE
, "BIOS data page" }, /* BIOS data page */
635 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_TRAMPOLINE)
636 { TRAMPOLINE_BASE
, TRAMPOLINE_BASE
+ 2 * PAGE_SIZE
, "TRAMPOLINE" },
638 #if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
640 * But first pinch a few for the stack/trampoline stuff
641 * FIXME: Don't need the extra page at 4K, but need to fix
642 * trampoline before removing it. (see the GDT stuff)
644 { PAGE_SIZE
, PAGE_SIZE
+ PAGE_SIZE
, "EX TRAMPOLINE" },
646 * Has to be in very low memory so we can execute
649 { TRAMPOLINE_BASE
, TRAMPOLINE_BASE
+ PAGE_SIZE
, "TRAMPOLINE" },
654 static int __init
find_overlapped_early(u64 start
, u64 end
)
659 for (i
= 0; i
< MAX_EARLY_RES
&& early_res
[i
].end
; i
++) {
661 if (end
> r
->start
&& start
< r
->end
)
669 * Drop the i-th range from the early reservation map,
670 * by copying any higher ranges down one over it, and
671 * clearing what had been the last slot.
673 static void __init
drop_range(int i
)
677 for (j
= i
+ 1; j
< MAX_EARLY_RES
&& early_res
[j
].end
; j
++)
680 memmove(&early_res
[i
], &early_res
[i
+ 1],
681 (j
- 1 - i
) * sizeof(struct early_res
));
683 early_res
[j
- 1].end
= 0;
687 * Split any existing ranges that:
688 * 1) are marked 'overlap_ok', and
689 * 2) overlap with the stated range [start, end)
690 * into whatever portion (if any) of the existing range is entirely
691 * below or entirely above the stated range. Drop the portion
692 * of the existing range that overlaps with the stated range,
693 * which will allow the caller of this routine to then add that
694 * stated range without conflicting with any existing range.
696 static void __init
drop_overlaps_that_are_ok(u64 start
, u64 end
)
700 u64 lower_start
, lower_end
;
701 u64 upper_start
, upper_end
;
704 for (i
= 0; i
< MAX_EARLY_RES
&& early_res
[i
].end
; i
++) {
707 /* Continue past non-overlapping ranges */
708 if (end
<= r
->start
|| start
>= r
->end
)
712 * Leave non-ok overlaps as is; let caller
713 * panic "Overlapping early reservations"
714 * when it hits this overlap.
720 * We have an ok overlap. We will drop it from the early
721 * reservation map, and add back in any non-overlapping
722 * portions (lower or upper) as separate, overlap_ok,
723 * non-overlapping ranges.
726 /* 1. Note any non-overlapping (lower or upper) ranges. */
727 strncpy(name
, r
->name
, sizeof(name
) - 1);
729 lower_start
= lower_end
= 0;
730 upper_start
= upper_end
= 0;
731 if (r
->start
< start
) {
732 lower_start
= r
->start
;
740 /* 2. Drop the original ok overlapping range */
743 i
--; /* resume for-loop on copied down entry */
745 /* 3. Add back in any non-overlapping ranges. */
747 reserve_early_overlap_ok(lower_start
, lower_end
, name
);
749 reserve_early_overlap_ok(upper_start
, upper_end
, name
);
753 static void __init
__reserve_early(u64 start
, u64 end
, char *name
,
759 i
= find_overlapped_early(start
, end
);
760 if (i
>= MAX_EARLY_RES
)
761 panic("Too many early reservations");
764 panic("Overlapping early reservations "
765 "%llx-%llx %s to %llx-%llx %s\n",
766 start
, end
- 1, name
?name
:"", r
->start
,
767 r
->end
- 1, r
->name
);
770 r
->overlap_ok
= overlap_ok
;
772 strncpy(r
->name
, name
, sizeof(r
->name
) - 1);
776 * A few early reservtations come here.
778 * The 'overlap_ok' in the name of this routine does -not- mean it
779 * is ok for these reservations to overlap an earlier reservation.
780 * Rather it means that it is ok for subsequent reservations to
783 * Use this entry point to reserve early ranges when you are doing
784 * so out of "Paranoia", reserving perhaps more memory than you need,
785 * just in case, and don't mind a subsequent overlapping reservation
786 * that is known to be needed.
788 * The drop_overlaps_that_are_ok() call here isn't really needed.
789 * It would be needed if we had two colliding 'overlap_ok'
790 * reservations, so that the second such would not panic on the
791 * overlap with the first. We don't have any such as of this
792 * writing, but might as well tolerate such if it happens in
795 void __init
reserve_early_overlap_ok(u64 start
, u64 end
, char *name
)
797 drop_overlaps_that_are_ok(start
, end
);
798 __reserve_early(start
, end
, name
, 1);
802 * Most early reservations come here.
804 * We first have drop_overlaps_that_are_ok() drop any pre-existing
805 * 'overlap_ok' ranges, so that we can then reserve this memory
806 * range without risk of panic'ing on an overlapping overlap_ok
809 void __init
reserve_early(u64 start
, u64 end
, char *name
)
811 drop_overlaps_that_are_ok(start
, end
);
812 __reserve_early(start
, end
, name
, 0);
815 void __init
free_early(u64 start
, u64 end
)
820 i
= find_overlapped_early(start
, end
);
822 if (i
>= MAX_EARLY_RES
|| r
->end
!= end
|| r
->start
!= start
)
823 panic("free_early on not reserved area: %llx-%llx!",
829 void __init
early_res_to_bootmem(u64 start
, u64 end
)
832 u64 final_start
, final_end
;
835 for (i
= 0; i
< MAX_EARLY_RES
&& early_res
[i
].end
; i
++)
838 printk(KERN_INFO
"(%d early reservations) ==> bootmem\n", count
);
839 for (i
= 0; i
< count
; i
++) {
840 struct early_res
*r
= &early_res
[i
];
841 printk(KERN_INFO
" #%d [ %010llx - %010llx ] %16s", i
,
842 r
->start
, r
->end
, r
->name
);
843 final_start
= max(start
, r
->start
);
844 final_end
= min(end
, r
->end
);
845 if (final_start
>= final_end
) {
846 printk(KERN_CONT
"\n");
849 printk(KERN_CONT
" ===> [ %010llx - %010llx ]\n",
850 final_start
, final_end
);
851 reserve_bootmem_generic(final_start
, final_end
- final_start
,
856 /* Check for already reserved areas */
857 static inline int __init
bad_addr(u64
*addrp
, u64 size
, u64 align
)
864 i
= find_overlapped_early(addr
, addr
+ size
);
866 if (i
< MAX_EARLY_RES
&& r
->end
) {
867 *addrp
= addr
= round_up(r
->end
, align
);
874 /* Check for already reserved areas */
875 static inline int __init
bad_addr_size(u64
*addrp
, u64
*sizep
, u64 align
)
878 u64 addr
= *addrp
, last
;
883 for (i
= 0; i
< MAX_EARLY_RES
&& early_res
[i
].end
; i
++) {
884 struct early_res
*r
= &early_res
[i
];
885 if (last
> r
->start
&& addr
< r
->start
) {
886 size
= r
->start
- addr
;
890 if (last
> r
->end
&& addr
< r
->end
) {
891 addr
= round_up(r
->end
, align
);
896 if (last
<= r
->end
&& addr
>= r
->start
) {
909 * Find a free area with specified alignment in a specific range.
911 u64 __init
find_e820_area(u64 start
, u64 end
, u64 size
, u64 align
)
915 for (i
= 0; i
< e820
.nr_map
; i
++) {
916 struct e820entry
*ei
= &e820
.map
[i
];
920 if (ei
->type
!= E820_RAM
)
922 addr
= round_up(ei
->addr
, align
);
923 ei_last
= ei
->addr
+ ei
->size
;
925 addr
= round_up(start
, align
);
928 while (bad_addr(&addr
, size
, align
) && addr
+size
<= ei_last
)
941 * Find next free range after *start
943 u64 __init
find_e820_area_size(u64 start
, u64
*sizep
, u64 align
)
947 for (i
= 0; i
< e820
.nr_map
; i
++) {
948 struct e820entry
*ei
= &e820
.map
[i
];
952 if (ei
->type
!= E820_RAM
)
954 addr
= round_up(ei
->addr
, align
);
955 ei_last
= ei
->addr
+ ei
->size
;
957 addr
= round_up(start
, align
);
960 *sizep
= ei_last
- addr
;
961 while (bad_addr_size(&addr
, sizep
, align
) &&
962 addr
+ *sizep
<= ei_last
)
964 last
= addr
+ *sizep
;
974 * pre allocated 4k and reserved it in e820
976 u64 __init
early_reserve_e820(u64 startt
, u64 sizet
, u64 align
)
984 start
= find_e820_area_size(start
, &size
, align
);
989 addr
= round_down(start
+ size
- sizet
, align
);
990 e820_update_range(addr
, sizet
, E820_RAM
, E820_RESERVED
);
991 printk(KERN_INFO
"update e820 for early_reserve_e820\n");
998 # ifdef CONFIG_X86_PAE
999 # define MAX_ARCH_PFN (1ULL<<(36-PAGE_SHIFT))
1001 # define MAX_ARCH_PFN (1ULL<<(32-PAGE_SHIFT))
1003 #else /* CONFIG_X86_32 */
1004 # define MAX_ARCH_PFN MAXMEM>>PAGE_SHIFT
1008 * Last pfn which the user wants to use.
1010 unsigned long __initdata end_user_pfn
= MAX_ARCH_PFN
;
1013 * Find the highest page frame number we have available
1015 unsigned long __init
e820_end_of_ram(void)
1017 unsigned long last_pfn
;
1018 unsigned long max_arch_pfn
= MAX_ARCH_PFN
;
1020 last_pfn
= find_max_pfn_with_active_regions();
1022 if (last_pfn
> max_arch_pfn
)
1023 last_pfn
= max_arch_pfn
;
1024 if (last_pfn
> end_user_pfn
)
1025 last_pfn
= end_user_pfn
;
1027 printk(KERN_INFO
"last_pfn = %#lx max_arch_pfn = %#lx\n",
1028 last_pfn
, max_arch_pfn
);
1033 * Finds an active region in the address range from start_pfn to last_pfn and
1034 * returns its range in ei_startpfn and ei_endpfn for the e820 entry.
1036 int __init
e820_find_active_region(const struct e820entry
*ei
,
1037 unsigned long start_pfn
,
1038 unsigned long last_pfn
,
1039 unsigned long *ei_startpfn
,
1040 unsigned long *ei_endpfn
)
1042 u64 align
= PAGE_SIZE
;
1044 *ei_startpfn
= round_up(ei
->addr
, align
) >> PAGE_SHIFT
;
1045 *ei_endpfn
= round_down(ei
->addr
+ ei
->size
, align
) >> PAGE_SHIFT
;
1047 /* Skip map entries smaller than a page */
1048 if (*ei_startpfn
>= *ei_endpfn
)
1051 /* Skip if map is outside the node */
1052 if (ei
->type
!= E820_RAM
|| *ei_endpfn
<= start_pfn
||
1053 *ei_startpfn
>= last_pfn
)
1056 /* Check for overlaps */
1057 if (*ei_startpfn
< start_pfn
)
1058 *ei_startpfn
= start_pfn
;
1059 if (*ei_endpfn
> last_pfn
)
1060 *ei_endpfn
= last_pfn
;
1062 /* Obey end_user_pfn to save on memmap */
1063 if (*ei_startpfn
>= end_user_pfn
)
1065 if (*ei_endpfn
> end_user_pfn
)
1066 *ei_endpfn
= end_user_pfn
;
1071 /* Walk the e820 map and register active regions within a node */
1072 void __init
e820_register_active_regions(int nid
, unsigned long start_pfn
,
1073 unsigned long last_pfn
)
1075 unsigned long ei_startpfn
;
1076 unsigned long ei_endpfn
;
1079 for (i
= 0; i
< e820
.nr_map
; i
++)
1080 if (e820_find_active_region(&e820
.map
[i
],
1081 start_pfn
, last_pfn
,
1082 &ei_startpfn
, &ei_endpfn
))
1083 add_active_range(nid
, ei_startpfn
, ei_endpfn
);
1087 * Find the hole size (in bytes) in the memory range.
1088 * @start: starting address of the memory range to scan
1089 * @end: ending address of the memory range to scan
1091 u64 __init
e820_hole_size(u64 start
, u64 end
)
1093 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
1094 unsigned long last_pfn
= end
>> PAGE_SHIFT
;
1095 unsigned long ei_startpfn
, ei_endpfn
, ram
= 0;
1098 for (i
= 0; i
< e820
.nr_map
; i
++) {
1099 if (e820_find_active_region(&e820
.map
[i
],
1100 start_pfn
, last_pfn
,
1101 &ei_startpfn
, &ei_endpfn
))
1102 ram
+= ei_endpfn
- ei_startpfn
;
1104 return end
- start
- ((u64
)ram
<< PAGE_SHIFT
);
1107 static void early_panic(char *msg
)
1113 /* "mem=nopentium" disables the 4MB page tables. */
1114 static int __init
parse_memopt(char *p
)
1121 #ifdef CONFIG_X86_32
1122 if (!strcmp(p
, "nopentium")) {
1123 setup_clear_cpu_cap(X86_FEATURE_PSE
);
1128 mem_size
= memparse(p
, &p
);
1129 end_user_pfn
= mem_size
>>PAGE_SHIFT
;
1130 e820_update_range(mem_size
, ULLONG_MAX
- mem_size
,
1131 E820_RAM
, E820_RESERVED
);
1135 early_param("mem", parse_memopt
);
1137 static int userdef __initdata
;
1139 static int __init
parse_memmap_opt(char *p
)
1142 u64 start_at
, mem_size
;
1144 if (!strcmp(p
, "exactmap")) {
1145 #ifdef CONFIG_CRASH_DUMP
1147 * If we are doing a crash dump, we still need to know
1148 * the real mem size before original memory map is
1151 e820_register_active_regions(0, 0, -1UL);
1152 saved_max_pfn
= e820_end_of_ram();
1153 remove_all_active_ranges();
1161 mem_size
= memparse(p
, &p
);
1167 start_at
= memparse(p
+1, &p
);
1168 e820_add_region(start_at
, mem_size
, E820_RAM
);
1169 } else if (*p
== '#') {
1170 start_at
= memparse(p
+1, &p
);
1171 e820_add_region(start_at
, mem_size
, E820_ACPI
);
1172 } else if (*p
== '$') {
1173 start_at
= memparse(p
+1, &p
);
1174 e820_add_region(start_at
, mem_size
, E820_RESERVED
);
1176 end_user_pfn
= (mem_size
>> PAGE_SHIFT
);
1177 e820_update_range(mem_size
, ULLONG_MAX
- mem_size
,
1178 E820_RAM
, E820_RESERVED
);
1180 return *p
== '\0' ? 0 : -EINVAL
;
1182 early_param("memmap", parse_memmap_opt
);
1184 void __init
finish_e820_parsing(void)
1187 int nr
= e820
.nr_map
;
1189 if (sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &nr
) < 0)
1190 early_panic("Invalid user supplied memory map");
1193 printk(KERN_INFO
"user-defined physical RAM map:\n");
1194 e820_print_map("user");
1199 * Mark e820 reserved areas as busy for the resource manager.
1201 void __init
e820_reserve_resources(void)
1204 struct resource
*res
;
1206 res
= alloc_bootmem_low(sizeof(struct resource
) * e820
.nr_map
);
1207 for (i
= 0; i
< e820
.nr_map
; i
++) {
1208 switch (e820
.map
[i
].type
) {
1209 case E820_RAM
: res
->name
= "System RAM"; break;
1210 case E820_ACPI
: res
->name
= "ACPI Tables"; break;
1211 case E820_NVS
: res
->name
= "ACPI Non-volatile Storage"; break;
1212 default: res
->name
= "reserved";
1214 res
->start
= e820
.map
[i
].addr
;
1215 res
->end
= res
->start
+ e820
.map
[i
].size
- 1;
1216 #ifndef CONFIG_RESOURCES_64BIT
1217 if (res
->end
> 0x100000000ULL
) {
1222 res
->flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
1223 insert_resource(&iomem_resource
, res
);
1228 char *__init
default_machine_specific_memory_setup(void)
1230 char *who
= "BIOS-e820";
1233 * Try to copy the BIOS-supplied E820-map.
1235 * Otherwise fake a memory map; one section from 0k->640k,
1236 * the next section from 1mb->appropriate_mem_k
1238 new_nr
= boot_params
.e820_entries
;
1239 sanitize_e820_map(boot_params
.e820_map
,
1240 ARRAY_SIZE(boot_params
.e820_map
),
1242 boot_params
.e820_entries
= new_nr
;
1243 if (copy_e820_map(boot_params
.e820_map
, boot_params
.e820_entries
) < 0) {
1246 /* compare results from other methods and take the greater */
1247 if (boot_params
.alt_mem_k
1248 < boot_params
.screen_info
.ext_mem_k
) {
1249 mem_size
= boot_params
.screen_info
.ext_mem_k
;
1252 mem_size
= boot_params
.alt_mem_k
;
1257 e820_add_region(0, LOWMEMSIZE(), E820_RAM
);
1258 e820_add_region(HIGH_MEMORY
, mem_size
<< 10, E820_RAM
);
1261 /* In case someone cares... */
1265 char *__init
__attribute__((weak
)) machine_specific_memory_setup(void)
1267 return default_machine_specific_memory_setup();
1270 /* Overridden in paravirt.c if CONFIG_PARAVIRT */
1271 char * __init
__attribute__((weak
)) memory_setup(void)
1273 return machine_specific_memory_setup();
1276 void __init
setup_memory_map(void)
1278 printk(KERN_INFO
"BIOS-provided physical RAM map:\n");
1279 e820_print_map(memory_setup());
1282 #ifdef CONFIG_X86_64
1283 int __init
arch_get_ram_range(int slot
, u64
*addr
, u64
*size
)
1287 if (slot
< 0 || slot
>= e820
.nr_map
)
1289 for (i
= slot
; i
< e820
.nr_map
; i
++) {
1290 if (e820
.map
[i
].type
!= E820_RAM
)
1294 if (i
== e820
.nr_map
|| e820
.map
[i
].addr
> (max_pfn
<< PAGE_SHIFT
))
1296 *addr
= e820
.map
[i
].addr
;
1297 *size
= min_t(u64
, e820
.map
[i
].size
+ e820
.map
[i
].addr
,
1298 max_pfn
<< PAGE_SHIFT
) - *addr
;