2 * early_res, could be used to replace bootmem
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/init.h>
7 #include <linux/bootmem.h>
9 #include <linux/early_res.h>
12 * Early reserved memory areas.
15 * need to make sure this one is bigger enough before
16 * find_fw_memmap_area could be used
18 #define MAX_EARLY_RES_X 32
25 static struct early_res early_res_x
[MAX_EARLY_RES_X
] __initdata
;
27 static int max_early_res __initdata
= MAX_EARLY_RES_X
;
28 static struct early_res
*early_res __initdata
= &early_res_x
[0];
29 static int early_res_count __initdata
;
31 static int __init
find_overlapped_early(u64 start
, u64 end
)
36 for (i
= 0; i
< max_early_res
&& early_res
[i
].end
; i
++) {
38 if (end
> r
->start
&& start
< r
->end
)
46 * Drop the i-th range from the early reservation map,
47 * by copying any higher ranges down one over it, and
48 * clearing what had been the last slot.
50 static void __init
drop_range(int i
)
54 for (j
= i
+ 1; j
< max_early_res
&& early_res
[j
].end
; j
++)
57 memmove(&early_res
[i
], &early_res
[i
+ 1],
58 (j
- 1 - i
) * sizeof(struct early_res
));
60 early_res
[j
- 1].end
= 0;
64 static void __init
drop_range_partial(int i
, u64 start
, u64 end
)
66 u64 common_start
, common_end
;
67 u64 old_start
, old_end
;
69 old_start
= early_res
[i
].start
;
70 old_end
= early_res
[i
].end
;
71 common_start
= max(old_start
, start
);
72 common_end
= min(old_end
, end
);
75 if (common_start
>= common_end
)
78 if (old_start
< common_start
) {
79 /* make head segment */
80 early_res
[i
].end
= common_start
;
81 if (old_end
> common_end
) {
82 /* add another for left over on tail */
83 reserve_early_without_check(common_end
, old_end
,
88 if (old_end
> common_end
) {
89 /* reuse the entry for tail left */
90 early_res
[i
].start
= common_end
;
99 * Split any existing ranges that:
100 * 1) are marked 'overlap_ok', and
101 * 2) overlap with the stated range [start, end)
102 * into whatever portion (if any) of the existing range is entirely
103 * below or entirely above the stated range. Drop the portion
104 * of the existing range that overlaps with the stated range,
105 * which will allow the caller of this routine to then add that
106 * stated range without conflicting with any existing range.
108 static void __init
drop_overlaps_that_are_ok(u64 start
, u64 end
)
112 u64 lower_start
, lower_end
;
113 u64 upper_start
, upper_end
;
116 for (i
= 0; i
< max_early_res
&& early_res
[i
].end
; i
++) {
119 /* Continue past non-overlapping ranges */
120 if (end
<= r
->start
|| start
>= r
->end
)
124 * Leave non-ok overlaps as is; let caller
125 * panic "Overlapping early reservations"
126 * when it hits this overlap.
132 * We have an ok overlap. We will drop it from the early
133 * reservation map, and add back in any non-overlapping
134 * portions (lower or upper) as separate, overlap_ok,
135 * non-overlapping ranges.
138 /* 1. Note any non-overlapping (lower or upper) ranges. */
139 strncpy(name
, r
->name
, sizeof(name
) - 1);
141 lower_start
= lower_end
= 0;
142 upper_start
= upper_end
= 0;
143 if (r
->start
< start
) {
144 lower_start
= r
->start
;
152 /* 2. Drop the original ok overlapping range */
155 i
--; /* resume for-loop on copied down entry */
157 /* 3. Add back in any non-overlapping ranges. */
159 reserve_early_overlap_ok(lower_start
, lower_end
, name
);
161 reserve_early_overlap_ok(upper_start
, upper_end
, name
);
165 static void __init
__reserve_early(u64 start
, u64 end
, char *name
,
171 i
= find_overlapped_early(start
, end
);
172 if (i
>= max_early_res
)
173 panic("Too many early reservations");
176 panic("Overlapping early reservations "
177 "%llx-%llx %s to %llx-%llx %s\n",
178 start
, end
- 1, name
? name
: "", r
->start
,
179 r
->end
- 1, r
->name
);
182 r
->overlap_ok
= overlap_ok
;
184 strncpy(r
->name
, name
, sizeof(r
->name
) - 1);
189 * A few early reservtations come here.
191 * The 'overlap_ok' in the name of this routine does -not- mean it
192 * is ok for these reservations to overlap an earlier reservation.
193 * Rather it means that it is ok for subsequent reservations to
196 * Use this entry point to reserve early ranges when you are doing
197 * so out of "Paranoia", reserving perhaps more memory than you need,
198 * just in case, and don't mind a subsequent overlapping reservation
199 * that is known to be needed.
201 * The drop_overlaps_that_are_ok() call here isn't really needed.
202 * It would be needed if we had two colliding 'overlap_ok'
203 * reservations, so that the second such would not panic on the
204 * overlap with the first. We don't have any such as of this
205 * writing, but might as well tolerate such if it happens in
208 void __init
reserve_early_overlap_ok(u64 start
, u64 end
, char *name
)
210 drop_overlaps_that_are_ok(start
, end
);
211 __reserve_early(start
, end
, name
, 1);
214 static void __init
__check_and_double_early_res(u64 ex_start
, u64 ex_end
)
216 u64 start
, end
, size
, mem
;
217 struct early_res
*new;
219 /* do we have enough slots left ? */
220 if ((max_early_res
- early_res_count
) > max(max_early_res
/8, 2))
225 size
= sizeof(struct early_res
) * max_early_res
* 2;
226 if (early_res
== early_res_x
)
229 start
= early_res
[0].end
;
231 if (start
+ size
< end
)
232 mem
= find_fw_memmap_area(start
, end
, size
,
233 sizeof(struct early_res
));
236 end
= get_max_mapped();
237 if (start
+ size
< end
)
238 mem
= find_fw_memmap_area(start
, end
, size
,
239 sizeof(struct early_res
));
242 panic("can not find more space for early_res array");
245 /* save the first one for own */
247 new[0].end
= mem
+ size
;
248 new[0].overlap_ok
= 0;
249 /* copy old to new */
250 if (early_res
== early_res_x
) {
251 memcpy(&new[1], &early_res
[0],
252 sizeof(struct early_res
) * max_early_res
);
253 memset(&new[max_early_res
+1], 0,
254 sizeof(struct early_res
) * (max_early_res
- 1));
257 memcpy(&new[1], &early_res
[1],
258 sizeof(struct early_res
) * (max_early_res
- 1));
259 memset(&new[max_early_res
], 0,
260 sizeof(struct early_res
) * max_early_res
);
262 memset(&early_res
[0], 0, sizeof(struct early_res
) * max_early_res
);
265 printk(KERN_DEBUG
"early_res array is doubled to %d at [%llx - %llx]\n",
266 max_early_res
, mem
, mem
+ size
- 1);
270 * Most early reservations come here.
272 * We first have drop_overlaps_that_are_ok() drop any pre-existing
273 * 'overlap_ok' ranges, so that we can then reserve this memory
274 * range without risk of panic'ing on an overlapping overlap_ok
277 void __init
reserve_early(u64 start
, u64 end
, char *name
)
282 __check_and_double_early_res(start
, end
);
284 drop_overlaps_that_are_ok(start
, end
);
285 __reserve_early(start
, end
, name
, 0);
288 void __init
reserve_early_without_check(u64 start
, u64 end
, char *name
)
295 __check_and_double_early_res(start
, end
);
297 r
= &early_res
[early_res_count
];
303 strncpy(r
->name
, name
, sizeof(r
->name
) - 1);
307 void __init
free_early(u64 start
, u64 end
)
312 i
= find_overlapped_early(start
, end
);
314 if (i
>= max_early_res
|| r
->end
!= end
|| r
->start
!= start
)
315 panic("free_early on not reserved area: %llx-%llx!",
321 void __init
free_early_partial(u64 start
, u64 end
)
327 i
= find_overlapped_early(start
, end
);
328 if (i
>= max_early_res
)
333 if (r
->end
>= end
&& r
->start
<= start
) {
334 drop_range_partial(i
, start
, end
);
338 drop_range_partial(i
, start
, end
);
342 #ifdef CONFIG_NO_BOOTMEM
343 static void __init
subtract_early_res(struct range
*range
, int az
)
346 u64 final_start
, final_end
;
350 for (i
= 0; i
< max_early_res
&& early_res
[i
].end
; i
++)
353 /* need to skip first one ?*/
354 if (early_res
!= early_res_x
)
357 #define DEBUG_PRINT_EARLY_RES 1
359 #if DEBUG_PRINT_EARLY_RES
360 printk(KERN_INFO
"Subtract (%d early reservations)\n", count
);
362 for (i
= idx
; i
< count
; i
++) {
363 struct early_res
*r
= &early_res
[i
];
364 #if DEBUG_PRINT_EARLY_RES
365 printk(KERN_INFO
" #%d [%010llx - %010llx] %15s\n", i
,
366 r
->start
, r
->end
, r
->name
);
368 final_start
= PFN_DOWN(r
->start
);
369 final_end
= PFN_UP(r
->end
);
370 if (final_start
>= final_end
)
372 subtract_range(range
, az
, final_start
, final_end
);
377 int __init
get_free_all_memory_range(struct range
**rangep
, int nodeid
)
387 for (i
= 0; i
< max_early_res
&& early_res
[i
].end
; i
++)
392 size
= sizeof(struct range
) * count
;
393 end
= get_max_mapped();
395 if (end
> (MAX_DMA32_PFN
<< PAGE_SHIFT
))
396 start
= MAX_DMA32_PFN
<< PAGE_SHIFT
;
398 mem
= find_fw_memmap_area(start
, end
, size
, sizeof(struct range
));
400 panic("can not find more space for range free");
403 /* use early_node_map[] and early_res to get range array at first */
404 memset(range
, 0, size
);
407 /* need to go over early_node_map to find out good range for node */
408 nr_range
= add_from_early_node_map(range
, count
, nr_range
, nodeid
);
410 subtract_range(range
, count
, max_low_pfn
, -1ULL);
412 subtract_early_res(range
, count
);
413 nr_range
= clean_sort_range(range
, count
);
415 /* need to clear it ? */
416 if (nodeid
== MAX_NUMNODES
) {
417 memset(&early_res
[0], 0,
418 sizeof(struct early_res
) * max_early_res
);
427 void __init
early_res_to_bootmem(u64 start
, u64 end
)
430 u64 final_start
, final_end
;
434 for (i
= 0; i
< max_early_res
&& early_res
[i
].end
; i
++)
437 /* need to skip first one ?*/
438 if (early_res
!= early_res_x
)
441 printk(KERN_INFO
"(%d/%d early reservations) ==> bootmem [%010llx - %010llx]\n",
442 count
- idx
, max_early_res
, start
, end
);
443 for (i
= idx
; i
< count
; i
++) {
444 struct early_res
*r
= &early_res
[i
];
445 printk(KERN_INFO
" #%d [%010llx - %010llx] %16s", i
,
446 r
->start
, r
->end
, r
->name
);
447 final_start
= max(start
, r
->start
);
448 final_end
= min(end
, r
->end
);
449 if (final_start
>= final_end
) {
450 printk(KERN_CONT
"\n");
453 printk(KERN_CONT
" ==> [%010llx - %010llx]\n",
454 final_start
, final_end
);
455 reserve_bootmem_generic(final_start
, final_end
- final_start
,
459 memset(&early_res
[0], 0, sizeof(struct early_res
) * max_early_res
);
466 /* Check for already reserved areas */
467 static inline int __init
bad_addr(u64
*addrp
, u64 size
, u64 align
)
474 i
= find_overlapped_early(addr
, addr
+ size
);
476 if (i
< max_early_res
&& r
->end
) {
477 *addrp
= addr
= round_up(r
->end
, align
);
484 /* Check for already reserved areas */
485 static inline int __init
bad_addr_size(u64
*addrp
, u64
*sizep
, u64 align
)
488 u64 addr
= *addrp
, last
;
493 for (i
= 0; i
< max_early_res
&& early_res
[i
].end
; i
++) {
494 struct early_res
*r
= &early_res
[i
];
495 if (last
> r
->start
&& addr
< r
->start
) {
496 size
= r
->start
- addr
;
500 if (last
> r
->end
&& addr
< r
->end
) {
501 addr
= round_up(r
->end
, align
);
506 if (last
<= r
->end
&& addr
>= r
->start
) {
519 * Find a free area with specified alignment in a specific range.
520 * only with the area.between start to end is active range from early_node_map
521 * so they are good as RAM
523 u64 __init
find_early_area(u64 ei_start
, u64 ei_last
, u64 start
, u64 end
,
528 addr
= round_up(ei_start
, align
);
530 addr
= round_up(start
, align
);
533 while (bad_addr(&addr
, size
, align
) && addr
+size
<= ei_last
)
547 u64 __init
find_early_area_size(u64 ei_start
, u64 ei_last
, u64 start
,
548 u64
*sizep
, u64 align
)
552 addr
= round_up(ei_start
, align
);
554 addr
= round_up(start
, align
);
557 *sizep
= ei_last
- addr
;
558 while (bad_addr_size(&addr
, sizep
, align
) && addr
+ *sizep
<= ei_last
)
560 last
= addr
+ *sizep
;