2 * early_res, could be used to replace bootmem
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/init.h>
7 #include <linux/bootmem.h>
10 #include <asm/early_res.h>
13 * Early reserved memory areas.
16 * need to make sure this one is bigger enough before
17 * find_fw_memmap_area could be used
19 #define MAX_EARLY_RES_X 32
26 static struct early_res early_res_x
[MAX_EARLY_RES_X
] __initdata
;
28 static int max_early_res __initdata
= MAX_EARLY_RES_X
;
29 static struct early_res
*early_res __initdata
= &early_res_x
[0];
30 static int early_res_count __initdata
;
32 static int __init
find_overlapped_early(u64 start
, u64 end
)
37 for (i
= 0; i
< max_early_res
&& early_res
[i
].end
; i
++) {
39 if (end
> r
->start
&& start
< r
->end
)
47 * Drop the i-th range from the early reservation map,
48 * by copying any higher ranges down one over it, and
49 * clearing what had been the last slot.
51 static void __init
drop_range(int i
)
55 for (j
= i
+ 1; j
< max_early_res
&& early_res
[j
].end
; j
++)
58 memmove(&early_res
[i
], &early_res
[i
+ 1],
59 (j
- 1 - i
) * sizeof(struct early_res
));
61 early_res
[j
- 1].end
= 0;
66 * Split any existing ranges that:
67 * 1) are marked 'overlap_ok', and
68 * 2) overlap with the stated range [start, end)
69 * into whatever portion (if any) of the existing range is entirely
70 * below or entirely above the stated range. Drop the portion
71 * of the existing range that overlaps with the stated range,
72 * which will allow the caller of this routine to then add that
73 * stated range without conflicting with any existing range.
75 static void __init
drop_overlaps_that_are_ok(u64 start
, u64 end
)
79 u64 lower_start
, lower_end
;
80 u64 upper_start
, upper_end
;
83 for (i
= 0; i
< max_early_res
&& early_res
[i
].end
; i
++) {
86 /* Continue past non-overlapping ranges */
87 if (end
<= r
->start
|| start
>= r
->end
)
91 * Leave non-ok overlaps as is; let caller
92 * panic "Overlapping early reservations"
93 * when it hits this overlap.
99 * We have an ok overlap. We will drop it from the early
100 * reservation map, and add back in any non-overlapping
101 * portions (lower or upper) as separate, overlap_ok,
102 * non-overlapping ranges.
105 /* 1. Note any non-overlapping (lower or upper) ranges. */
106 strncpy(name
, r
->name
, sizeof(name
) - 1);
108 lower_start
= lower_end
= 0;
109 upper_start
= upper_end
= 0;
110 if (r
->start
< start
) {
111 lower_start
= r
->start
;
119 /* 2. Drop the original ok overlapping range */
122 i
--; /* resume for-loop on copied down entry */
124 /* 3. Add back in any non-overlapping ranges. */
126 reserve_early_overlap_ok(lower_start
, lower_end
, name
);
128 reserve_early_overlap_ok(upper_start
, upper_end
, name
);
132 static void __init
__reserve_early(u64 start
, u64 end
, char *name
,
138 i
= find_overlapped_early(start
, end
);
139 if (i
>= max_early_res
)
140 panic("Too many early reservations");
143 panic("Overlapping early reservations "
144 "%llx-%llx %s to %llx-%llx %s\n",
145 start
, end
- 1, name
? name
: "", r
->start
,
146 r
->end
- 1, r
->name
);
149 r
->overlap_ok
= overlap_ok
;
151 strncpy(r
->name
, name
, sizeof(r
->name
) - 1);
156 * A few early reservtations come here.
158 * The 'overlap_ok' in the name of this routine does -not- mean it
159 * is ok for these reservations to overlap an earlier reservation.
160 * Rather it means that it is ok for subsequent reservations to
163 * Use this entry point to reserve early ranges when you are doing
164 * so out of "Paranoia", reserving perhaps more memory than you need,
165 * just in case, and don't mind a subsequent overlapping reservation
166 * that is known to be needed.
168 * The drop_overlaps_that_are_ok() call here isn't really needed.
169 * It would be needed if we had two colliding 'overlap_ok'
170 * reservations, so that the second such would not panic on the
171 * overlap with the first. We don't have any such as of this
172 * writing, but might as well tolerate such if it happens in
175 void __init
reserve_early_overlap_ok(u64 start
, u64 end
, char *name
)
177 drop_overlaps_that_are_ok(start
, end
);
178 __reserve_early(start
, end
, name
, 1);
181 u64 __init __weak
find_fw_memmap_area(u64 start
, u64 end
, u64 size
, u64 align
)
183 panic("should have find_fw_memmap_area defined with arch");
188 static void __init
__check_and_double_early_res(u64 ex_start
, u64 ex_end
)
190 u64 start
, end
, size
, mem
;
191 struct early_res
*new;
193 /* do we have enough slots left ? */
194 if ((max_early_res
- early_res_count
) > max(max_early_res
/8, 2))
199 size
= sizeof(struct early_res
) * max_early_res
* 2;
200 if (early_res
== early_res_x
)
203 start
= early_res
[0].end
;
205 if (start
+ size
< end
)
206 mem
= find_fw_memmap_area(start
, end
, size
,
207 sizeof(struct early_res
));
210 end
= max_pfn_mapped
<< PAGE_SHIFT
;
211 if (start
+ size
< end
)
212 mem
= find_fw_memmap_area(start
, end
, size
,
213 sizeof(struct early_res
));
216 panic("can not find more space for early_res array");
219 /* save the first one for own */
221 new[0].end
= mem
+ size
;
222 new[0].overlap_ok
= 0;
223 /* copy old to new */
224 if (early_res
== early_res_x
) {
225 memcpy(&new[1], &early_res
[0],
226 sizeof(struct early_res
) * max_early_res
);
227 memset(&new[max_early_res
+1], 0,
228 sizeof(struct early_res
) * (max_early_res
- 1));
231 memcpy(&new[1], &early_res
[1],
232 sizeof(struct early_res
) * (max_early_res
- 1));
233 memset(&new[max_early_res
], 0,
234 sizeof(struct early_res
) * max_early_res
);
236 memset(&early_res
[0], 0, sizeof(struct early_res
) * max_early_res
);
239 printk(KERN_DEBUG
"early_res array is doubled to %d at [%llx - %llx]\n",
240 max_early_res
, mem
, mem
+ size
- 1);
244 * Most early reservations come here.
246 * We first have drop_overlaps_that_are_ok() drop any pre-existing
247 * 'overlap_ok' ranges, so that we can then reserve this memory
248 * range without risk of panic'ing on an overlapping overlap_ok
251 void __init
reserve_early(u64 start
, u64 end
, char *name
)
256 __check_and_double_early_res(start
, end
);
258 drop_overlaps_that_are_ok(start
, end
);
259 __reserve_early(start
, end
, name
, 0);
262 void __init
reserve_early_without_check(u64 start
, u64 end
, char *name
)
269 __check_and_double_early_res(start
, end
);
271 r
= &early_res
[early_res_count
];
277 strncpy(r
->name
, name
, sizeof(r
->name
) - 1);
281 void __init
free_early(u64 start
, u64 end
)
286 i
= find_overlapped_early(start
, end
);
288 if (i
>= max_early_res
|| r
->end
!= end
|| r
->start
!= start
)
289 panic("free_early on not reserved area: %llx-%llx!",
295 #ifdef CONFIG_NO_BOOTMEM
296 static void __init
subtract_early_res(struct range
*range
, int az
)
299 u64 final_start
, final_end
;
303 for (i
= 0; i
< max_early_res
&& early_res
[i
].end
; i
++)
306 /* need to skip first one ?*/
307 if (early_res
!= early_res_x
)
310 #define DEBUG_PRINT_EARLY_RES 1
312 #if DEBUG_PRINT_EARLY_RES
313 printk(KERN_INFO
"Subtract (%d early reservations)\n", count
);
315 for (i
= idx
; i
< count
; i
++) {
316 struct early_res
*r
= &early_res
[i
];
317 #if DEBUG_PRINT_EARLY_RES
318 printk(KERN_INFO
" #%d [%010llx - %010llx] %15s\n", i
,
319 r
->start
, r
->end
, r
->name
);
321 final_start
= PFN_DOWN(r
->start
);
322 final_end
= PFN_UP(r
->end
);
323 if (final_start
>= final_end
)
325 subtract_range(range
, az
, final_start
, final_end
);
330 int __init
get_free_all_memory_range(struct range
**rangep
, int nodeid
)
340 for (i
= 0; i
< max_early_res
&& early_res
[i
].end
; i
++)
345 size
= sizeof(struct range
) * count
;
347 if (max_pfn_mapped
> MAX_DMA32_PFN
)
348 start
= MAX_DMA32_PFN
<< PAGE_SHIFT
;
350 end
= max_pfn_mapped
<< PAGE_SHIFT
;
351 mem
= find_fw_memmap_area(start
, end
, size
, sizeof(struct range
));
353 panic("can not find more space for range free");
356 /* use early_node_map[] and early_res to get range array at first */
357 memset(range
, 0, size
);
360 /* need to go over early_node_map to find out good range for node */
361 nr_range
= add_from_early_node_map(range
, count
, nr_range
, nodeid
);
363 subtract_range(range
, count
, max_low_pfn
, -1ULL);
365 subtract_early_res(range
, count
);
366 nr_range
= clean_sort_range(range
, count
);
368 /* need to clear it ? */
369 if (nodeid
== MAX_NUMNODES
) {
370 memset(&early_res
[0], 0,
371 sizeof(struct early_res
) * max_early_res
);
380 void __init
early_res_to_bootmem(u64 start
, u64 end
)
383 u64 final_start
, final_end
;
387 for (i
= 0; i
< max_early_res
&& early_res
[i
].end
; i
++)
390 /* need to skip first one ?*/
391 if (early_res
!= early_res_x
)
394 printk(KERN_INFO
"(%d/%d early reservations) ==> bootmem [%010llx - %010llx]\n",
395 count
- idx
, max_early_res
, start
, end
);
396 for (i
= idx
; i
< count
; i
++) {
397 struct early_res
*r
= &early_res
[i
];
398 printk(KERN_INFO
" #%d [%010llx - %010llx] %16s", i
,
399 r
->start
, r
->end
, r
->name
);
400 final_start
= max(start
, r
->start
);
401 final_end
= min(end
, r
->end
);
402 if (final_start
>= final_end
) {
403 printk(KERN_CONT
"\n");
406 printk(KERN_CONT
" ==> [%010llx - %010llx]\n",
407 final_start
, final_end
);
408 reserve_bootmem_generic(final_start
, final_end
- final_start
,
412 memset(&early_res
[0], 0, sizeof(struct early_res
) * max_early_res
);
419 /* Check for already reserved areas */
420 static inline int __init
bad_addr(u64
*addrp
, u64 size
, u64 align
)
427 i
= find_overlapped_early(addr
, addr
+ size
);
429 if (i
< max_early_res
&& r
->end
) {
430 *addrp
= addr
= round_up(r
->end
, align
);
437 /* Check for already reserved areas */
438 static inline int __init
bad_addr_size(u64
*addrp
, u64
*sizep
, u64 align
)
441 u64 addr
= *addrp
, last
;
446 for (i
= 0; i
< max_early_res
&& early_res
[i
].end
; i
++) {
447 struct early_res
*r
= &early_res
[i
];
448 if (last
> r
->start
&& addr
< r
->start
) {
449 size
= r
->start
- addr
;
453 if (last
> r
->end
&& addr
< r
->end
) {
454 addr
= round_up(r
->end
, align
);
459 if (last
<= r
->end
&& addr
>= r
->start
) {
472 * Find a free area with specified alignment in a specific range.
473 * only with the area.between start to end is active range from early_node_map
474 * so they are good as RAM
476 u64 __init
find_early_area(u64 ei_start
, u64 ei_last
, u64 start
, u64 end
,
481 addr
= round_up(ei_start
, align
);
483 addr
= round_up(start
, align
);
486 while (bad_addr(&addr
, size
, align
) && addr
+size
<= ei_last
)
500 u64 __init
find_early_area_size(u64 ei_start
, u64 ei_last
, u64 start
,
501 u64
*sizep
, u64 align
)
505 addr
= round_up(ei_start
, align
);
507 addr
= round_up(start
, align
);
510 *sizep
= ei_last
- addr
;
511 while (bad_addr_size(&addr
, sizep
, align
) && addr
+ *sizep
<= ei_last
)
513 last
= addr
+ *sizep
;