1 #include <linux/kernel.h>
2 #include <linux/types.h>
3 #include <linux/init.h>
4 #include <linux/bitops.h>
5 #include <linux/memblock.h>
6 #include <linux/bootmem.h>
8 #include <linux/range.h>
10 /* Check for already reserved areas */
11 bool __init
memblock_x86_check_reserved_size(u64
*addrp
, u64
*sizep
, u64 align
)
13 struct memblock_region
*r
;
14 u64 addr
= *addrp
, last
;
20 for_each_memblock(reserved
, r
) {
21 if (last
> r
->base
&& addr
< r
->base
) {
22 size
= r
->base
- addr
;
26 if (last
> (r
->base
+ r
->size
) && addr
< (r
->base
+ r
->size
)) {
27 addr
= round_up(r
->base
+ r
->size
, align
);
32 if (last
<= (r
->base
+ r
->size
) && addr
>= r
->base
) {
45 * Find next free range after start, and size is returned in *sizep
47 u64 __init
memblock_x86_find_in_range_size(u64 start
, u64
*sizep
, u64 align
)
49 struct memblock_region
*r
;
51 for_each_memblock(memory
, r
) {
52 u64 ei_start
= r
->base
;
53 u64 ei_last
= ei_start
+ r
->size
;
56 addr
= round_up(ei_start
, align
);
58 addr
= round_up(start
, align
);
61 *sizep
= ei_last
- addr
;
62 while (memblock_x86_check_reserved_size(&addr
, sizep
, align
))
72 static __init
struct range
*find_range_array(int count
)
77 size
= sizeof(struct range
) * count
;
78 end
= memblock
.current_limit
;
80 mem
= memblock_find_in_range(0, end
, size
, sizeof(struct range
));
82 panic("can not find more space for range array");
85 * This range is tempoaray, so don't reserve it, it will not be
86 * overlapped because We will not alloccate new buffer before
90 memset(range
, 0, size
);
95 static void __init
memblock_x86_subtract_reserved(struct range
*range
, int az
)
97 u64 final_start
, final_end
;
98 struct memblock_region
*r
;
100 /* Take out region array itself at first*/
101 memblock_free_reserved_regions();
103 memblock_dbg("Subtract (%ld early reservations)\n", memblock
.reserved
.cnt
);
105 for_each_memblock(reserved
, r
) {
106 memblock_dbg(" [%010llx-%010llx]\n", (u64
)r
->base
, (u64
)r
->base
+ r
->size
- 1);
107 final_start
= PFN_DOWN(r
->base
);
108 final_end
= PFN_UP(r
->base
+ r
->size
);
109 if (final_start
>= final_end
)
111 subtract_range(range
, az
, final_start
, final_end
);
114 /* Put region array back ? */
115 memblock_reserve_reserved_regions();
118 static int __init
count_early_node_map(int nodeid
)
122 for_each_mem_pfn_range(i
, nodeid
, NULL
, NULL
, NULL
)
127 int __init
__get_free_all_memory_range(struct range
**rangep
, int nodeid
,
128 unsigned long start_pfn
, unsigned long end_pfn
)
134 count
= (memblock
.reserved
.cnt
+ count_early_node_map(nodeid
)) * 2;
136 range
= find_range_array(count
);
140 * Use early_node_map[] and memblock.reserved.region to get range array
143 nr_range
= add_from_early_node_map(range
, count
, nr_range
, nodeid
);
144 subtract_range(range
, count
, 0, start_pfn
);
145 subtract_range(range
, count
, end_pfn
, -1ULL);
147 memblock_x86_subtract_reserved(range
, count
);
148 nr_range
= clean_sort_range(range
, count
);
154 int __init
get_free_all_memory_range(struct range
**rangep
, int nodeid
)
156 unsigned long end_pfn
= -1UL;
159 end_pfn
= max_low_pfn
;
161 return __get_free_all_memory_range(rangep
, nodeid
, 0, end_pfn
);
164 static u64 __init
__memblock_x86_memory_in_range(u64 addr
, u64 limit
, bool get_free
)
169 u64 final_start
, final_end
;
171 struct memblock_region
*r
;
173 count
= (memblock
.reserved
.cnt
+ memblock
.memory
.cnt
) * 2;
175 range
= find_range_array(count
);
179 limit
= PFN_DOWN(limit
);
181 for_each_memblock(memory
, r
) {
182 final_start
= PFN_UP(r
->base
);
183 final_end
= PFN_DOWN(r
->base
+ r
->size
);
184 if (final_start
>= final_end
)
186 if (final_start
>= limit
|| final_end
<= addr
)
189 nr_range
= add_range(range
, count
, nr_range
, final_start
, final_end
);
191 subtract_range(range
, count
, 0, addr
);
192 subtract_range(range
, count
, limit
, -1ULL);
194 /* Subtract memblock.reserved.region in range ? */
196 goto sort_and_count_them
;
197 for_each_memblock(reserved
, r
) {
198 final_start
= PFN_DOWN(r
->base
);
199 final_end
= PFN_UP(r
->base
+ r
->size
);
200 if (final_start
>= final_end
)
202 if (final_start
>= limit
|| final_end
<= addr
)
205 subtract_range(range
, count
, final_start
, final_end
);
209 nr_range
= clean_sort_range(range
, count
);
212 for (i
= 0; i
< nr_range
; i
++)
213 free_size
+= range
[i
].end
- range
[i
].start
;
215 return free_size
<< PAGE_SHIFT
;
218 u64 __init
memblock_x86_free_memory_in_range(u64 addr
, u64 limit
)
220 return __memblock_x86_memory_in_range(addr
, limit
, true);
223 u64 __init
memblock_x86_memory_in_range(u64 addr
, u64 limit
)
225 return __memblock_x86_memory_in_range(addr
, limit
, false);
228 void __init
memblock_x86_reserve_range(u64 start
, u64 end
, char *name
)
233 if (WARN_ONCE(start
> end
, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start
, end
))
236 memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start
, end
- 1, name
);
238 memblock_reserve(start
, end
- start
);
241 void __init
memblock_x86_free_range(u64 start
, u64 end
)
246 if (WARN_ONCE(start
> end
, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start
, end
))
249 memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start
, end
- 1);
251 memblock_free(start
, end
- start
);
255 * Finds an active region in the address range from start_pfn to last_pfn and
256 * returns its range in ei_startpfn and ei_endpfn for the memblock entry.
258 static int __init
memblock_x86_find_active_region(const struct memblock_region
*ei
,
259 unsigned long start_pfn
,
260 unsigned long last_pfn
,
261 unsigned long *ei_startpfn
,
262 unsigned long *ei_endpfn
)
264 u64 align
= PAGE_SIZE
;
266 *ei_startpfn
= round_up(ei
->base
, align
) >> PAGE_SHIFT
;
267 *ei_endpfn
= round_down(ei
->base
+ ei
->size
, align
) >> PAGE_SHIFT
;
269 /* Skip map entries smaller than a page */
270 if (*ei_startpfn
>= *ei_endpfn
)
273 /* Skip if map is outside the node */
274 if (*ei_endpfn
<= start_pfn
|| *ei_startpfn
>= last_pfn
)
277 /* Check for overlaps */
278 if (*ei_startpfn
< start_pfn
)
279 *ei_startpfn
= start_pfn
;
280 if (*ei_endpfn
> last_pfn
)
281 *ei_endpfn
= last_pfn
;
287 * Find the hole size (in bytes) in the memory range.
288 * @start: starting address of the memory range to scan
289 * @end: ending address of the memory range to scan
291 u64 __init
memblock_x86_hole_size(u64 start
, u64 end
)
293 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
294 unsigned long last_pfn
= end
>> PAGE_SHIFT
;
295 unsigned long ei_startpfn
, ei_endpfn
, ram
= 0;
296 struct memblock_region
*r
;
298 for_each_memblock(memory
, r
)
299 if (memblock_x86_find_active_region(r
, start_pfn
, last_pfn
,
300 &ei_startpfn
, &ei_endpfn
))
301 ram
+= ei_endpfn
- ei_startpfn
;
303 return end
- start
- ((u64
)ram
<< PAGE_SHIFT
);