2 * Handle the memory map.
3 * The functions here do the job until bootmem takes over.
5 * Getting sanitize_e820_map() in sync with i386 version by applying change:
6 * - Provisions for empty E820 memory regions (reported by certain BIOSes).
7 * Alex Achenbach <xela@slit.de>, December 2002.
8 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/ioport.h>
16 #include <linux/string.h>
17 #include <linux/kexec.h>
18 #include <linux/module.h>
20 #include <linux/pfn.h>
21 #include <linux/suspend.h>
23 #include <asm/pgtable.h>
26 #include <asm/proto.h>
27 #include <asm/setup.h>
28 #include <asm/trampoline.h>
32 /* For PCI or other memory-mapped resources */
33 unsigned long pci_mem_start
= 0xaeedbabe;
35 EXPORT_SYMBOL(pci_mem_start
);
39 * This function checks if any part of the range <start,end> is mapped
43 e820_any_mapped(u64 start
, u64 end
, unsigned type
)
47 for (i
= 0; i
< e820
.nr_map
; i
++) {
48 struct e820entry
*ei
= &e820
.map
[i
];
50 if (type
&& ei
->type
!= type
)
52 if (ei
->addr
>= end
|| ei
->addr
+ ei
->size
<= start
)
58 EXPORT_SYMBOL_GPL(e820_any_mapped
);
61 * This function checks if the entire range <start,end> is mapped with type.
63 * Note: this function only works correct if the e820 table is sorted and
64 * not-overlapping, which is the case
66 int __init
e820_all_mapped(u64 start
, u64 end
, unsigned type
)
70 for (i
= 0; i
< e820
.nr_map
; i
++) {
71 struct e820entry
*ei
= &e820
.map
[i
];
73 if (type
&& ei
->type
!= type
)
75 /* is the region (part) in overlap with the current region ?*/
76 if (ei
->addr
>= end
|| ei
->addr
+ ei
->size
<= start
)
79 /* if the region is at the beginning of <start,end> we move
80 * start to the end of the region since it's ok until there
82 if (ei
->addr
<= start
)
83 start
= ei
->addr
+ ei
->size
;
85 * if start is now at or beyond end, we're done, full
95 * Add a memory region to the kernel e820 map.
97 void __init
e820_add_region(u64 start
, u64 size
, int type
)
101 if (x
== ARRAY_SIZE(e820
.map
)) {
102 printk(KERN_ERR
"Ooops! Too many entries in the memory map!\n");
106 e820
.map
[x
].addr
= start
;
107 e820
.map
[x
].size
= size
;
108 e820
.map
[x
].type
= type
;
112 void __init
e820_print_map(char *who
)
116 for (i
= 0; i
< e820
.nr_map
; i
++) {
117 printk(KERN_INFO
" %s: %016Lx - %016Lx ", who
,
118 (unsigned long long) e820
.map
[i
].addr
,
120 (e820
.map
[i
].addr
+ e820
.map
[i
].size
));
121 switch (e820
.map
[i
].type
) {
123 case E820_RESERVED_KERN
:
124 printk(KERN_CONT
"(usable)\n");
127 printk(KERN_CONT
"(reserved)\n");
130 printk(KERN_CONT
"(ACPI data)\n");
133 printk(KERN_CONT
"(ACPI NVS)\n");
136 printk(KERN_CONT
"type %u\n", e820
.map
[i
].type
);
143 * Sanitize the BIOS e820 map.
145 * Some e820 responses include overlapping entries. The following
146 * replaces the original e820 map with a new one, removing overlaps,
147 * and resolving conflicting memory types in favor of highest
150 * The input parameter biosmap points to an array of 'struct
151 * e820entry' which on entry has elements in the range [0, *pnr_map)
152 * valid, and which has space for up to max_nr_map entries.
153 * On return, the resulting sanitized e820 map entries will be in
154 * overwritten in the same location, starting at biosmap.
156 * The integer pointed to by pnr_map must be valid on entry (the
157 * current number of valid entries located at biosmap) and will
158 * be updated on return, with the new number of valid entries
159 * (something no more than max_nr_map.)
161 * The return value from sanitize_e820_map() is zero if it
162 * successfully 'sanitized' the map entries passed in, and is -1
163 * if it did nothing, which can happen if either of (1) it was
164 * only passed one map entry, or (2) any of the input map entries
165 * were invalid (start + size < start, meaning that the size was
166 * so big the described memory range wrapped around through zero.)
168 * Visually we're performing the following
169 * (1,2,3,4 = memory types)...
171 * Sample memory map (w/overlaps):
172 * ____22__________________
173 * ______________________4_
174 * ____1111________________
175 * _44_____________________
176 * 11111111________________
177 * ____________________33__
178 * ___________44___________
179 * __________33333_________
180 * ______________22________
181 * ___________________2222_
182 * _________111111111______
183 * _____________________11_
184 * _________________4______
186 * Sanitized equivalent (no overlap):
187 * 1_______________________
188 * _44_____________________
189 * ___1____________________
190 * ____22__________________
191 * ______11________________
192 * _________1______________
193 * __________3_____________
194 * ___________44___________
195 * _____________33_________
196 * _______________2________
197 * ________________1_______
198 * _________________4______
199 * ___________________2____
200 * ____________________33__
201 * ______________________4_
204 int __init
sanitize_e820_map(struct e820entry
*biosmap
, int max_nr_map
,
207 struct change_member
{
208 struct e820entry
*pbios
; /* pointer to original bios entry */
209 unsigned long long addr
; /* address for this change point */
211 static struct change_member change_point_list
[2*E820_X_MAX
] __initdata
;
212 static struct change_member
*change_point
[2*E820_X_MAX
] __initdata
;
213 static struct e820entry
*overlap_list
[E820_X_MAX
] __initdata
;
214 static struct e820entry new_bios
[E820_X_MAX
] __initdata
;
215 struct change_member
*change_tmp
;
216 unsigned long current_type
, last_type
;
217 unsigned long long last_addr
;
218 int chgidx
, still_changing
;
221 int old_nr
, new_nr
, chg_nr
;
224 /* if there's only one memory region, don't bother */
229 BUG_ON(old_nr
> max_nr_map
);
231 /* bail out if we find any unreasonable addresses in bios map */
232 for (i
= 0; i
< old_nr
; i
++)
233 if (biosmap
[i
].addr
+ biosmap
[i
].size
< biosmap
[i
].addr
)
236 /* create pointers for initial change-point information (for sorting) */
237 for (i
= 0; i
< 2 * old_nr
; i
++)
238 change_point
[i
] = &change_point_list
[i
];
240 /* record all known change-points (starting and ending addresses),
241 omitting those that are for empty memory regions */
243 for (i
= 0; i
< old_nr
; i
++) {
244 if (biosmap
[i
].size
!= 0) {
245 change_point
[chgidx
]->addr
= biosmap
[i
].addr
;
246 change_point
[chgidx
++]->pbios
= &biosmap
[i
];
247 change_point
[chgidx
]->addr
= biosmap
[i
].addr
+
249 change_point
[chgidx
++]->pbios
= &biosmap
[i
];
254 /* sort change-point list by memory addresses (low -> high) */
256 while (still_changing
) {
258 for (i
= 1; i
< chg_nr
; i
++) {
259 unsigned long long curaddr
, lastaddr
;
260 unsigned long long curpbaddr
, lastpbaddr
;
262 curaddr
= change_point
[i
]->addr
;
263 lastaddr
= change_point
[i
- 1]->addr
;
264 curpbaddr
= change_point
[i
]->pbios
->addr
;
265 lastpbaddr
= change_point
[i
- 1]->pbios
->addr
;
268 * swap entries, when:
270 * curaddr > lastaddr or
271 * curaddr == lastaddr and curaddr == curpbaddr and
272 * lastaddr != lastpbaddr
274 if (curaddr
< lastaddr
||
275 (curaddr
== lastaddr
&& curaddr
== curpbaddr
&&
276 lastaddr
!= lastpbaddr
)) {
277 change_tmp
= change_point
[i
];
278 change_point
[i
] = change_point
[i
-1];
279 change_point
[i
-1] = change_tmp
;
285 /* create a new bios memory map, removing overlaps */
286 overlap_entries
= 0; /* number of entries in the overlap table */
287 new_bios_entry
= 0; /* index for creating new bios map entries */
288 last_type
= 0; /* start with undefined memory type */
289 last_addr
= 0; /* start with 0 as last starting address */
291 /* loop through change-points, determining affect on the new bios map */
292 for (chgidx
= 0; chgidx
< chg_nr
; chgidx
++) {
293 /* keep track of all overlapping bios entries */
294 if (change_point
[chgidx
]->addr
==
295 change_point
[chgidx
]->pbios
->addr
) {
297 * add map entry to overlap list (> 1 entry
298 * implies an overlap)
300 overlap_list
[overlap_entries
++] =
301 change_point
[chgidx
]->pbios
;
304 * remove entry from list (order independent,
307 for (i
= 0; i
< overlap_entries
; i
++) {
308 if (overlap_list
[i
] ==
309 change_point
[chgidx
]->pbios
)
311 overlap_list
[overlap_entries
-1];
316 * if there are overlapping entries, decide which
317 * "type" to use (larger value takes precedence --
318 * 1=usable, 2,3,4,4+=unusable)
321 for (i
= 0; i
< overlap_entries
; i
++)
322 if (overlap_list
[i
]->type
> current_type
)
323 current_type
= overlap_list
[i
]->type
;
325 * continue building up new bios map based on this
328 if (current_type
!= last_type
) {
329 if (last_type
!= 0) {
330 new_bios
[new_bios_entry
].size
=
331 change_point
[chgidx
]->addr
- last_addr
;
333 * move forward only if the new size
336 if (new_bios
[new_bios_entry
].size
!= 0)
338 * no more space left for new
341 if (++new_bios_entry
>= max_nr_map
)
344 if (current_type
!= 0) {
345 new_bios
[new_bios_entry
].addr
=
346 change_point
[chgidx
]->addr
;
347 new_bios
[new_bios_entry
].type
= current_type
;
348 last_addr
= change_point
[chgidx
]->addr
;
350 last_type
= current_type
;
353 /* retain count for new bios entries */
354 new_nr
= new_bios_entry
;
356 /* copy new bios mapping into original location */
357 memcpy(biosmap
, new_bios
, new_nr
* sizeof(struct e820entry
));
363 static int __init
__copy_e820_map(struct e820entry
*biosmap
, int nr_map
)
366 u64 start
= biosmap
->addr
;
367 u64 size
= biosmap
->size
;
368 u64 end
= start
+ size
;
369 u32 type
= biosmap
->type
;
371 /* Overflow in 64 bits? Ignore the memory map. */
375 e820_add_region(start
, size
, type
);
384 * Copy the BIOS e820 map into a safe place.
386 * Sanity-check it while we're at it..
388 * If we're lucky and live on a modern system, the setup code
389 * will have given us a memory map that we can use to properly
390 * set up memory. If we aren't, we'll fake a memory map.
392 int __init
copy_e820_map(struct e820entry
*biosmap
, int nr_map
)
394 /* Only one memory region (or negative)? Ignore it */
398 return __copy_e820_map(biosmap
, nr_map
);
401 u64 __init
e820_update_range(u64 start
, u64 size
, unsigned old_type
,
405 u64 real_updated_size
= 0;
407 BUG_ON(old_type
== new_type
);
409 if (size
> (ULLONG_MAX
- start
))
410 size
= ULLONG_MAX
- start
;
412 for (i
= 0; i
< e820
.nr_map
; i
++) {
413 struct e820entry
*ei
= &e820
.map
[i
];
414 u64 final_start
, final_end
;
415 if (ei
->type
!= old_type
)
417 /* totally covered? */
418 if (ei
->addr
>= start
&&
419 (ei
->addr
+ ei
->size
) <= (start
+ size
)) {
421 real_updated_size
+= ei
->size
;
424 /* partially covered */
425 final_start
= max(start
, ei
->addr
);
426 final_end
= min(start
+ size
, ei
->addr
+ ei
->size
);
427 if (final_start
>= final_end
)
429 e820_add_region(final_start
, final_end
- final_start
,
431 real_updated_size
+= final_end
- final_start
;
433 ei
->size
-= final_end
- final_start
;
434 if (ei
->addr
< final_start
)
436 ei
->addr
= final_end
;
438 return real_updated_size
;
441 /* make e820 not cover the range */
442 u64 __init
e820_remove_range(u64 start
, u64 size
, unsigned old_type
,
446 u64 real_removed_size
= 0;
448 if (size
> (ULLONG_MAX
- start
))
449 size
= ULLONG_MAX
- start
;
451 for (i
= 0; i
< e820
.nr_map
; i
++) {
452 struct e820entry
*ei
= &e820
.map
[i
];
453 u64 final_start
, final_end
;
455 if (checktype
&& ei
->type
!= old_type
)
457 /* totally covered? */
458 if (ei
->addr
>= start
&&
459 (ei
->addr
+ ei
->size
) <= (start
+ size
)) {
460 real_removed_size
+= ei
->size
;
461 memset(ei
, 0, sizeof(struct e820entry
));
464 /* partially covered */
465 final_start
= max(start
, ei
->addr
);
466 final_end
= min(start
+ size
, ei
->addr
+ ei
->size
);
467 if (final_start
>= final_end
)
469 real_removed_size
+= final_end
- final_start
;
471 ei
->size
-= final_end
- final_start
;
472 if (ei
->addr
< final_start
)
474 ei
->addr
= final_end
;
476 return real_removed_size
;
479 void __init
update_e820(void)
483 nr_map
= e820
.nr_map
;
484 if (sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &nr_map
))
486 e820
.nr_map
= nr_map
;
487 printk(KERN_INFO
"modified physical RAM map:\n");
488 e820_print_map("modified");
492 * Search for a gap in the e820 memory space from start_addr to 2^32.
494 __init
int e820_search_gap(unsigned long *gapstart
, unsigned long *gapsize
,
495 unsigned long start_addr
)
497 unsigned long long last
= 0x100000000ull
;
502 unsigned long long start
= e820
.map
[i
].addr
;
503 unsigned long long end
= start
+ e820
.map
[i
].size
;
505 if (end
< start_addr
)
509 * Since "last" is at most 4GB, we know we'll
510 * fit in 32 bits if this condition is true
513 unsigned long gap
= last
- end
;
515 if (gap
>= *gapsize
) {
528 * Search for the biggest gap in the low 32 bits of the e820
529 * memory space. We pass this space to PCI to assign MMIO resources
530 * for hotplug or unconfigured devices in.
531 * Hopefully the BIOS let enough space left.
533 __init
void e820_setup_gap(void)
535 unsigned long gapstart
, gapsize
, round
;
538 gapstart
= 0x10000000;
540 found
= e820_search_gap(&gapstart
, &gapsize
, 0);
544 gapstart
= (max_pfn
<< PAGE_SHIFT
) + 1024*1024;
545 printk(KERN_ERR
"PCI: Warning: Cannot find a gap in the 32bit "
547 KERN_ERR
"PCI: Unassigned devices with 32bit resource "
548 "registers may break!\n");
553 * See how much we want to round up: start off with
554 * rounding to the next 1MB area.
557 while ((gapsize
>> 4) > round
)
559 /* Fun with two's complement */
560 pci_mem_start
= (gapstart
+ round
) & -round
;
563 "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
564 pci_mem_start
, gapstart
, gapsize
);
568 * Because of the size limitation of struct boot_params, only first
569 * 128 E820 memory entries are passed to kernel via
570 * boot_params.e820_map, others are passed via SETUP_E820_EXT node of
571 * linked list of struct setup_data, which is parsed here.
573 void __init
parse_e820_ext(struct setup_data
*sdata
, unsigned long pa_data
)
577 struct e820entry
*extmap
;
579 entries
= sdata
->len
/ sizeof(struct e820entry
);
580 map_len
= sdata
->len
+ sizeof(struct setup_data
);
581 if (map_len
> PAGE_SIZE
)
582 sdata
= early_ioremap(pa_data
, map_len
);
583 extmap
= (struct e820entry
*)(sdata
->data
);
584 __copy_e820_map(extmap
, entries
);
585 sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &e820
.nr_map
);
586 if (map_len
> PAGE_SIZE
)
587 early_iounmap(sdata
, map_len
);
588 printk(KERN_INFO
"extended physical RAM map:\n");
589 e820_print_map("extended");
592 #if defined(CONFIG_X86_64) || \
593 (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
595 * Find the ranges of physical addresses that do not correspond to
596 * e820 RAM areas and mark the corresponding pages as nosave for
597 * hibernation (32 bit) or software suspend and suspend to RAM (64 bit).
599 * This function requires the e820 map to be sorted and without any
600 * overlapping entries and assumes the first e820 area to be RAM.
602 void __init
e820_mark_nosave_regions(unsigned long limit_pfn
)
607 pfn
= PFN_DOWN(e820
.map
[0].addr
+ e820
.map
[0].size
);
608 for (i
= 1; i
< e820
.nr_map
; i
++) {
609 struct e820entry
*ei
= &e820
.map
[i
];
611 if (pfn
< PFN_UP(ei
->addr
))
612 register_nosave_region(pfn
, PFN_UP(ei
->addr
));
614 pfn
= PFN_DOWN(ei
->addr
+ ei
->size
);
615 if (ei
->type
!= E820_RAM
&& ei
->type
!= E820_RESERVED_KERN
)
616 register_nosave_region(PFN_UP(ei
->addr
), pfn
);
618 if (pfn
>= limit_pfn
)
625 * Early reserved memory areas.
627 #define MAX_EARLY_RES 20
634 static struct early_res early_res
[MAX_EARLY_RES
] __initdata
= {
635 { 0, PAGE_SIZE
, "BIOS data page" }, /* BIOS data page */
636 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_TRAMPOLINE)
637 { TRAMPOLINE_BASE
, TRAMPOLINE_BASE
+ 2 * PAGE_SIZE
, "TRAMPOLINE" },
639 #if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
641 * But first pinch a few for the stack/trampoline stuff
642 * FIXME: Don't need the extra page at 4K, but need to fix
643 * trampoline before removing it. (see the GDT stuff)
645 { PAGE_SIZE
, PAGE_SIZE
+ PAGE_SIZE
, "EX TRAMPOLINE" },
647 * Has to be in very low memory so we can execute
650 { TRAMPOLINE_BASE
, TRAMPOLINE_BASE
+ PAGE_SIZE
, "TRAMPOLINE" },
655 static int __init
find_overlapped_early(u64 start
, u64 end
)
660 for (i
= 0; i
< MAX_EARLY_RES
&& early_res
[i
].end
; i
++) {
662 if (end
> r
->start
&& start
< r
->end
)
670 * Drop the i-th range from the early reservation map,
671 * by copying any higher ranges down one over it, and
672 * clearing what had been the last slot.
674 static void __init
drop_range(int i
)
678 for (j
= i
+ 1; j
< MAX_EARLY_RES
&& early_res
[j
].end
; j
++)
681 memmove(&early_res
[i
], &early_res
[i
+ 1],
682 (j
- 1 - i
) * sizeof(struct early_res
));
684 early_res
[j
- 1].end
= 0;
688 * Split any existing ranges that:
689 * 1) are marked 'overlap_ok', and
690 * 2) overlap with the stated range [start, end)
691 * into whatever portion (if any) of the existing range is entirely
692 * below or entirely above the stated range. Drop the portion
693 * of the existing range that overlaps with the stated range,
694 * which will allow the caller of this routine to then add that
695 * stated range without conflicting with any existing range.
697 static void __init
drop_overlaps_that_are_ok(u64 start
, u64 end
)
701 u64 lower_start
, lower_end
;
702 u64 upper_start
, upper_end
;
705 for (i
= 0; i
< MAX_EARLY_RES
&& early_res
[i
].end
; i
++) {
708 /* Continue past non-overlapping ranges */
709 if (end
<= r
->start
|| start
>= r
->end
)
713 * Leave non-ok overlaps as is; let caller
714 * panic "Overlapping early reservations"
715 * when it hits this overlap.
721 * We have an ok overlap. We will drop it from the early
722 * reservation map, and add back in any non-overlapping
723 * portions (lower or upper) as separate, overlap_ok,
724 * non-overlapping ranges.
727 /* 1. Note any non-overlapping (lower or upper) ranges. */
728 strncpy(name
, r
->name
, sizeof(name
) - 1);
730 lower_start
= lower_end
= 0;
731 upper_start
= upper_end
= 0;
732 if (r
->start
< start
) {
733 lower_start
= r
->start
;
741 /* 2. Drop the original ok overlapping range */
744 i
--; /* resume for-loop on copied down entry */
746 /* 3. Add back in any non-overlapping ranges. */
748 reserve_early_overlap_ok(lower_start
, lower_end
, name
);
750 reserve_early_overlap_ok(upper_start
, upper_end
, name
);
754 static void __init
__reserve_early(u64 start
, u64 end
, char *name
,
760 i
= find_overlapped_early(start
, end
);
761 if (i
>= MAX_EARLY_RES
)
762 panic("Too many early reservations");
765 panic("Overlapping early reservations "
766 "%llx-%llx %s to %llx-%llx %s\n",
767 start
, end
- 1, name
?name
:"", r
->start
,
768 r
->end
- 1, r
->name
);
771 r
->overlap_ok
= overlap_ok
;
773 strncpy(r
->name
, name
, sizeof(r
->name
) - 1);
777 * A few early reservtations come here.
779 * The 'overlap_ok' in the name of this routine does -not- mean it
780 * is ok for these reservations to overlap an earlier reservation.
781 * Rather it means that it is ok for subsequent reservations to
784 * Use this entry point to reserve early ranges when you are doing
785 * so out of "Paranoia", reserving perhaps more memory than you need,
786 * just in case, and don't mind a subsequent overlapping reservation
787 * that is known to be needed.
789 * The drop_overlaps_that_are_ok() call here isn't really needed.
790 * It would be needed if we had two colliding 'overlap_ok'
791 * reservations, so that the second such would not panic on the
792 * overlap with the first. We don't have any such as of this
793 * writing, but might as well tolerate such if it happens in
796 void __init
reserve_early_overlap_ok(u64 start
, u64 end
, char *name
)
798 drop_overlaps_that_are_ok(start
, end
);
799 __reserve_early(start
, end
, name
, 1);
803 * Most early reservations come here.
805 * We first have drop_overlaps_that_are_ok() drop any pre-existing
806 * 'overlap_ok' ranges, so that we can then reserve this memory
807 * range without risk of panic'ing on an overlapping overlap_ok
810 void __init
reserve_early(u64 start
, u64 end
, char *name
)
812 drop_overlaps_that_are_ok(start
, end
);
813 __reserve_early(start
, end
, name
, 0);
816 void __init
free_early(u64 start
, u64 end
)
821 i
= find_overlapped_early(start
, end
);
823 if (i
>= MAX_EARLY_RES
|| r
->end
!= end
|| r
->start
!= start
)
824 panic("free_early on not reserved area: %llx-%llx!",
830 void __init
early_res_to_bootmem(u64 start
, u64 end
)
833 u64 final_start
, final_end
;
836 for (i
= 0; i
< MAX_EARLY_RES
&& early_res
[i
].end
; i
++)
839 printk(KERN_INFO
"(%d early reservations) ==> bootmem\n", count
);
840 for (i
= 0; i
< count
; i
++) {
841 struct early_res
*r
= &early_res
[i
];
842 printk(KERN_INFO
" #%d [ %010llx - %010llx ] %16s", i
,
843 r
->start
, r
->end
, r
->name
);
844 final_start
= max(start
, r
->start
);
845 final_end
= min(end
, r
->end
);
846 if (final_start
>= final_end
) {
847 printk(KERN_CONT
"\n");
850 printk(KERN_CONT
" ===> [ %010llx - %010llx ]\n",
851 final_start
, final_end
);
852 reserve_bootmem_generic(final_start
, final_end
- final_start
,
857 /* Check for already reserved areas */
858 static inline int __init
bad_addr(u64
*addrp
, u64 size
, u64 align
)
865 i
= find_overlapped_early(addr
, addr
+ size
);
867 if (i
< MAX_EARLY_RES
&& r
->end
) {
868 *addrp
= addr
= round_up(r
->end
, align
);
875 /* Check for already reserved areas */
876 static inline int __init
bad_addr_size(u64
*addrp
, u64
*sizep
, u64 align
)
879 u64 addr
= *addrp
, last
;
884 for (i
= 0; i
< MAX_EARLY_RES
&& early_res
[i
].end
; i
++) {
885 struct early_res
*r
= &early_res
[i
];
886 if (last
> r
->start
&& addr
< r
->start
) {
887 size
= r
->start
- addr
;
891 if (last
> r
->end
&& addr
< r
->end
) {
892 addr
= round_up(r
->end
, align
);
897 if (last
<= r
->end
&& addr
>= r
->start
) {
910 * Find a free area with specified alignment in a specific range.
912 u64 __init
find_e820_area(u64 start
, u64 end
, u64 size
, u64 align
)
916 for (i
= 0; i
< e820
.nr_map
; i
++) {
917 struct e820entry
*ei
= &e820
.map
[i
];
921 if (ei
->type
!= E820_RAM
)
923 addr
= round_up(ei
->addr
, align
);
924 ei_last
= ei
->addr
+ ei
->size
;
926 addr
= round_up(start
, align
);
929 while (bad_addr(&addr
, size
, align
) && addr
+size
<= ei_last
)
942 * Find next free range after *start
944 u64 __init
find_e820_area_size(u64 start
, u64
*sizep
, u64 align
)
948 for (i
= 0; i
< e820
.nr_map
; i
++) {
949 struct e820entry
*ei
= &e820
.map
[i
];
953 if (ei
->type
!= E820_RAM
)
955 addr
= round_up(ei
->addr
, align
);
956 ei_last
= ei
->addr
+ ei
->size
;
958 addr
= round_up(start
, align
);
961 *sizep
= ei_last
- addr
;
962 while (bad_addr_size(&addr
, sizep
, align
) &&
963 addr
+ *sizep
<= ei_last
)
965 last
= addr
+ *sizep
;
975 * pre allocated 4k and reserved it in e820
977 u64 __init
early_reserve_e820(u64 startt
, u64 sizet
, u64 align
)
985 start
= find_e820_area_size(start
, &size
, align
);
990 addr
= round_down(start
+ size
- sizet
, align
);
991 e820_update_range(addr
, sizet
, E820_RAM
, E820_RESERVED
);
992 printk(KERN_INFO
"update e820 for early_reserve_e820\n");
999 # ifdef CONFIG_X86_PAE
1000 # define MAX_ARCH_PFN (1ULL<<(36-PAGE_SHIFT))
1002 # define MAX_ARCH_PFN (1ULL<<(32-PAGE_SHIFT))
1004 #else /* CONFIG_X86_32 */
1005 # define MAX_ARCH_PFN MAXMEM>>PAGE_SHIFT
1009 * Last pfn which the user wants to use.
1011 unsigned long __initdata end_user_pfn
= MAX_ARCH_PFN
;
1014 * Find the highest page frame number we have available
1016 unsigned long __init
e820_end_of_ram(void)
1018 unsigned long last_pfn
;
1019 unsigned long max_arch_pfn
= MAX_ARCH_PFN
;
1021 last_pfn
= find_max_pfn_with_active_regions();
1023 if (last_pfn
> max_arch_pfn
)
1024 last_pfn
= max_arch_pfn
;
1025 if (last_pfn
> end_user_pfn
)
1026 last_pfn
= end_user_pfn
;
1028 printk(KERN_INFO
"last_pfn = %#lx max_arch_pfn = %#lx\n",
1029 last_pfn
, max_arch_pfn
);
1034 * Finds an active region in the address range from start_pfn to last_pfn and
1035 * returns its range in ei_startpfn and ei_endpfn for the e820 entry.
1037 int __init
e820_find_active_region(const struct e820entry
*ei
,
1038 unsigned long start_pfn
,
1039 unsigned long last_pfn
,
1040 unsigned long *ei_startpfn
,
1041 unsigned long *ei_endpfn
)
1043 u64 align
= PAGE_SIZE
;
1045 *ei_startpfn
= round_up(ei
->addr
, align
) >> PAGE_SHIFT
;
1046 *ei_endpfn
= round_down(ei
->addr
+ ei
->size
, align
) >> PAGE_SHIFT
;
1048 /* Skip map entries smaller than a page */
1049 if (*ei_startpfn
>= *ei_endpfn
)
1052 /* Skip if map is outside the node */
1053 if (ei
->type
!= E820_RAM
|| *ei_endpfn
<= start_pfn
||
1054 *ei_startpfn
>= last_pfn
)
1057 /* Check for overlaps */
1058 if (*ei_startpfn
< start_pfn
)
1059 *ei_startpfn
= start_pfn
;
1060 if (*ei_endpfn
> last_pfn
)
1061 *ei_endpfn
= last_pfn
;
1063 /* Obey end_user_pfn to save on memmap */
1064 if (*ei_startpfn
>= end_user_pfn
)
1066 if (*ei_endpfn
> end_user_pfn
)
1067 *ei_endpfn
= end_user_pfn
;
1072 /* Walk the e820 map and register active regions within a node */
1073 void __init
e820_register_active_regions(int nid
, unsigned long start_pfn
,
1074 unsigned long last_pfn
)
1076 unsigned long ei_startpfn
;
1077 unsigned long ei_endpfn
;
1080 for (i
= 0; i
< e820
.nr_map
; i
++)
1081 if (e820_find_active_region(&e820
.map
[i
],
1082 start_pfn
, last_pfn
,
1083 &ei_startpfn
, &ei_endpfn
))
1084 add_active_range(nid
, ei_startpfn
, ei_endpfn
);
1088 * Find the hole size (in bytes) in the memory range.
1089 * @start: starting address of the memory range to scan
1090 * @end: ending address of the memory range to scan
1092 u64 __init
e820_hole_size(u64 start
, u64 end
)
1094 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
1095 unsigned long last_pfn
= end
>> PAGE_SHIFT
;
1096 unsigned long ei_startpfn
, ei_endpfn
, ram
= 0;
1099 for (i
= 0; i
< e820
.nr_map
; i
++) {
1100 if (e820_find_active_region(&e820
.map
[i
],
1101 start_pfn
, last_pfn
,
1102 &ei_startpfn
, &ei_endpfn
))
1103 ram
+= ei_endpfn
- ei_startpfn
;
1105 return end
- start
- ((u64
)ram
<< PAGE_SHIFT
);
1108 static void early_panic(char *msg
)
1114 /* "mem=nopentium" disables the 4MB page tables. */
1115 static int __init
parse_memopt(char *p
)
1122 #ifdef CONFIG_X86_32
1123 if (!strcmp(p
, "nopentium")) {
1124 setup_clear_cpu_cap(X86_FEATURE_PSE
);
1129 mem_size
= memparse(p
, &p
);
1130 end_user_pfn
= mem_size
>>PAGE_SHIFT
;
1131 e820_update_range(mem_size
, ULLONG_MAX
- mem_size
,
1132 E820_RAM
, E820_RESERVED
);
1136 early_param("mem", parse_memopt
);
1138 static int userdef __initdata
;
1140 static int __init
parse_memmap_opt(char *p
)
1143 u64 start_at
, mem_size
;
1145 if (!strcmp(p
, "exactmap")) {
1146 #ifdef CONFIG_CRASH_DUMP
1148 * If we are doing a crash dump, we still need to know
1149 * the real mem size before original memory map is
1152 e820_register_active_regions(0, 0, -1UL);
1153 saved_max_pfn
= e820_end_of_ram();
1154 remove_all_active_ranges();
1162 mem_size
= memparse(p
, &p
);
1168 start_at
= memparse(p
+1, &p
);
1169 e820_add_region(start_at
, mem_size
, E820_RAM
);
1170 } else if (*p
== '#') {
1171 start_at
= memparse(p
+1, &p
);
1172 e820_add_region(start_at
, mem_size
, E820_ACPI
);
1173 } else if (*p
== '$') {
1174 start_at
= memparse(p
+1, &p
);
1175 e820_add_region(start_at
, mem_size
, E820_RESERVED
);
1177 end_user_pfn
= (mem_size
>> PAGE_SHIFT
);
1178 e820_update_range(mem_size
, ULLONG_MAX
- mem_size
,
1179 E820_RAM
, E820_RESERVED
);
1181 return *p
== '\0' ? 0 : -EINVAL
;
1183 early_param("memmap", parse_memmap_opt
);
1185 void __init
finish_e820_parsing(void)
1188 int nr
= e820
.nr_map
;
1190 if (sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &nr
) < 0)
1191 early_panic("Invalid user supplied memory map");
1194 printk(KERN_INFO
"user-defined physical RAM map:\n");
1195 e820_print_map("user");
1200 * Mark e820 reserved areas as busy for the resource manager.
1202 void __init
e820_reserve_resources(void)
1205 struct resource
*res
;
1208 res
= alloc_bootmem_low(sizeof(struct resource
) * e820
.nr_map
);
1209 for (i
= 0; i
< e820
.nr_map
; i
++) {
1210 switch (e820
.map
[i
].type
) {
1211 case E820_RESERVED_KERN
:
1212 case E820_RAM
: res
->name
= "System RAM"; break;
1213 case E820_ACPI
: res
->name
= "ACPI Tables"; break;
1214 case E820_NVS
: res
->name
= "ACPI Non-volatile Storage"; break;
1215 default: res
->name
= "reserved";
1217 end
= e820
.map
[i
].addr
+ e820
.map
[i
].size
- 1;
1218 #ifndef CONFIG_RESOURCES_64BIT
1219 if (end
> 0x100000000ULL
) {
1224 res
->start
= e820
.map
[i
].addr
;
1227 res
->flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
1228 insert_resource(&iomem_resource
, res
);
1233 char *__init
default_machine_specific_memory_setup(void)
1235 char *who
= "BIOS-e820";
1238 * Try to copy the BIOS-supplied E820-map.
1240 * Otherwise fake a memory map; one section from 0k->640k,
1241 * the next section from 1mb->appropriate_mem_k
1243 new_nr
= boot_params
.e820_entries
;
1244 sanitize_e820_map(boot_params
.e820_map
,
1245 ARRAY_SIZE(boot_params
.e820_map
),
1247 boot_params
.e820_entries
= new_nr
;
1248 if (copy_e820_map(boot_params
.e820_map
, boot_params
.e820_entries
) < 0) {
1251 /* compare results from other methods and take the greater */
1252 if (boot_params
.alt_mem_k
1253 < boot_params
.screen_info
.ext_mem_k
) {
1254 mem_size
= boot_params
.screen_info
.ext_mem_k
;
1257 mem_size
= boot_params
.alt_mem_k
;
1262 e820_add_region(0, LOWMEMSIZE(), E820_RAM
);
1263 e820_add_region(HIGH_MEMORY
, mem_size
<< 10, E820_RAM
);
1266 /* In case someone cares... */
1270 char *__init
__attribute__((weak
)) machine_specific_memory_setup(void)
1272 return default_machine_specific_memory_setup();
1275 /* Overridden in paravirt.c if CONFIG_PARAVIRT */
1276 char * __init
__attribute__((weak
)) memory_setup(void)
1278 return machine_specific_memory_setup();
1281 void __init
setup_memory_map(void)
1283 printk(KERN_INFO
"BIOS-provided physical RAM map:\n");
1284 e820_print_map(memory_setup());
1287 #ifdef CONFIG_X86_64
1288 int __init
arch_get_ram_range(int slot
, u64
*addr
, u64
*size
)
1292 if (slot
< 0 || slot
>= e820
.nr_map
)
1294 for (i
= slot
; i
< e820
.nr_map
; i
++) {
1295 if (e820
.map
[i
].type
!= E820_RAM
)
1299 if (i
== e820
.nr_map
|| e820
.map
[i
].addr
> (max_pfn
<< PAGE_SHIFT
))
1301 *addr
= e820
.map
[i
].addr
;
1302 *size
= min_t(u64
, e820
.map
[i
].size
+ e820
.map
[i
].addr
,
1303 max_pfn
<< PAGE_SHIFT
) - *addr
;