1 #include <linux/kernel.h>
2 #include <linux/types.h>
3 #include <linux/init.h>
4 #include <linux/bitops.h>
5 #include <linux/memblock.h>
6 #include <linux/bootmem.h>
8 #include <linux/range.h>
10 /* Check for already reserved areas */
11 bool __init
memblock_x86_check_reserved_size(u64
*addrp
, u64
*sizep
, u64 align
)
13 struct memblock_region
*r
;
14 u64 addr
= *addrp
, last
;
20 for_each_memblock(reserved
, r
) {
21 if (last
> r
->base
&& addr
< r
->base
) {
22 size
= r
->base
- addr
;
26 if (last
> (r
->base
+ r
->size
) && addr
< (r
->base
+ r
->size
)) {
27 addr
= round_up(r
->base
+ r
->size
, align
);
32 if (last
<= (r
->base
+ r
->size
) && addr
>= r
->base
) {
45 * Find next free range after start, and size is returned in *sizep
47 u64 __init
memblock_x86_find_in_range_size(u64 start
, u64
*sizep
, u64 align
)
49 struct memblock_region
*r
;
51 for_each_memblock(memory
, r
) {
52 u64 ei_start
= r
->base
;
53 u64 ei_last
= ei_start
+ r
->size
;
56 addr
= round_up(ei_start
, align
);
58 addr
= round_up(start
, align
);
61 *sizep
= ei_last
- addr
;
62 while (memblock_x86_check_reserved_size(&addr
, sizep
, align
))
69 return MEMBLOCK_ERROR
;
72 static __init
struct range
*find_range_array(int count
)
77 size
= sizeof(struct range
) * count
;
78 end
= memblock
.current_limit
;
80 mem
= memblock_find_in_range(0, end
, size
, sizeof(struct range
));
81 if (mem
== MEMBLOCK_ERROR
)
82 panic("can not find more space for range array");
85 * This range is tempoaray, so don't reserve it, it will not be
86 * overlapped because We will not alloccate new buffer before
90 memset(range
, 0, size
);
95 static void __init
memblock_x86_subtract_reserved(struct range
*range
, int az
)
97 u64 final_start
, final_end
;
98 struct memblock_region
*r
;
100 /* Take out region array itself at first*/
101 memblock_free_reserved_regions();
103 memblock_dbg("Subtract (%ld early reservations)\n", memblock
.reserved
.cnt
);
105 for_each_memblock(reserved
, r
) {
106 memblock_dbg(" [%010llx-%010llx]\n", (u64
)r
->base
, (u64
)r
->base
+ r
->size
- 1);
107 final_start
= PFN_DOWN(r
->base
);
108 final_end
= PFN_UP(r
->base
+ r
->size
);
109 if (final_start
>= final_end
)
111 subtract_range(range
, az
, final_start
, final_end
);
114 /* Put region array back ? */
115 memblock_reserve_reserved_regions();
122 static int __init
count_work_fn(unsigned long start_pfn
,
123 unsigned long end_pfn
, void *datax
)
125 struct count_data
*data
= datax
;
132 static int __init
count_early_node_map(int nodeid
)
134 struct count_data data
;
137 work_with_active_regions(nodeid
, count_work_fn
, &data
);
142 int __init
__get_free_all_memory_range(struct range
**rangep
, int nodeid
,
143 unsigned long start_pfn
, unsigned long end_pfn
)
149 count
= (memblock
.reserved
.cnt
+ count_early_node_map(nodeid
)) * 2;
151 range
= find_range_array(count
);
155 * Use early_node_map[] and memblock.reserved.region to get range array
158 nr_range
= add_from_early_node_map(range
, count
, nr_range
, nodeid
);
159 subtract_range(range
, count
, 0, start_pfn
);
160 subtract_range(range
, count
, end_pfn
, -1ULL);
162 memblock_x86_subtract_reserved(range
, count
);
163 nr_range
= clean_sort_range(range
, count
);
169 int __init
get_free_all_memory_range(struct range
**rangep
, int nodeid
)
171 unsigned long end_pfn
= -1UL;
174 end_pfn
= max_low_pfn
;
176 return __get_free_all_memory_range(rangep
, nodeid
, 0, end_pfn
);
179 static u64 __init
__memblock_x86_memory_in_range(u64 addr
, u64 limit
, bool get_free
)
184 u64 final_start
, final_end
;
186 struct memblock_region
*r
;
188 count
= (memblock
.reserved
.cnt
+ memblock
.memory
.cnt
) * 2;
190 range
= find_range_array(count
);
194 limit
= PFN_DOWN(limit
);
196 for_each_memblock(memory
, r
) {
197 final_start
= PFN_UP(r
->base
);
198 final_end
= PFN_DOWN(r
->base
+ r
->size
);
199 if (final_start
>= final_end
)
201 if (final_start
>= limit
|| final_end
<= addr
)
204 nr_range
= add_range(range
, count
, nr_range
, final_start
, final_end
);
206 subtract_range(range
, count
, 0, addr
);
207 subtract_range(range
, count
, limit
, -1ULL);
209 /* Subtract memblock.reserved.region in range ? */
211 goto sort_and_count_them
;
212 for_each_memblock(reserved
, r
) {
213 final_start
= PFN_DOWN(r
->base
);
214 final_end
= PFN_UP(r
->base
+ r
->size
);
215 if (final_start
>= final_end
)
217 if (final_start
>= limit
|| final_end
<= addr
)
220 subtract_range(range
, count
, final_start
, final_end
);
224 nr_range
= clean_sort_range(range
, count
);
227 for (i
= 0; i
< nr_range
; i
++)
228 free_size
+= range
[i
].end
- range
[i
].start
;
230 return free_size
<< PAGE_SHIFT
;
233 u64 __init
memblock_x86_free_memory_in_range(u64 addr
, u64 limit
)
235 return __memblock_x86_memory_in_range(addr
, limit
, true);
238 u64 __init
memblock_x86_memory_in_range(u64 addr
, u64 limit
)
240 return __memblock_x86_memory_in_range(addr
, limit
, false);
243 void __init
memblock_x86_reserve_range(u64 start
, u64 end
, char *name
)
248 if (WARN_ONCE(start
> end
, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start
, end
))
251 memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start
, end
- 1, name
);
253 memblock_reserve(start
, end
- start
);
256 void __init
memblock_x86_free_range(u64 start
, u64 end
)
261 if (WARN_ONCE(start
> end
, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start
, end
))
264 memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start
, end
- 1);
266 memblock_free(start
, end
- start
);
270 * Need to call this function after memblock_x86_register_active_regions,
271 * so early_node_map[] is filled already.
273 u64 __init
memblock_x86_find_in_range_node(int nid
, u64 start
, u64 end
, u64 size
, u64 align
)
276 addr
= find_memory_core_early(nid
, size
, align
, start
, end
);
277 if (addr
!= MEMBLOCK_ERROR
)
280 /* Fallback, should already have start end within node range */
281 return memblock_find_in_range(start
, end
, size
, align
);
285 * Finds an active region in the address range from start_pfn to last_pfn and
286 * returns its range in ei_startpfn and ei_endpfn for the memblock entry.
288 static int __init
memblock_x86_find_active_region(const struct memblock_region
*ei
,
289 unsigned long start_pfn
,
290 unsigned long last_pfn
,
291 unsigned long *ei_startpfn
,
292 unsigned long *ei_endpfn
)
294 u64 align
= PAGE_SIZE
;
296 *ei_startpfn
= round_up(ei
->base
, align
) >> PAGE_SHIFT
;
297 *ei_endpfn
= round_down(ei
->base
+ ei
->size
, align
) >> PAGE_SHIFT
;
299 /* Skip map entries smaller than a page */
300 if (*ei_startpfn
>= *ei_endpfn
)
303 /* Skip if map is outside the node */
304 if (*ei_endpfn
<= start_pfn
|| *ei_startpfn
>= last_pfn
)
307 /* Check for overlaps */
308 if (*ei_startpfn
< start_pfn
)
309 *ei_startpfn
= start_pfn
;
310 if (*ei_endpfn
> last_pfn
)
311 *ei_endpfn
= last_pfn
;
316 /* Walk the memblock.memory map and register active regions within a node */
317 void __init
memblock_x86_register_active_regions(int nid
, unsigned long start_pfn
,
318 unsigned long last_pfn
)
320 unsigned long ei_startpfn
;
321 unsigned long ei_endpfn
;
322 struct memblock_region
*r
;
324 for_each_memblock(memory
, r
)
325 if (memblock_x86_find_active_region(r
, start_pfn
, last_pfn
,
326 &ei_startpfn
, &ei_endpfn
))
327 add_active_range(nid
, ei_startpfn
, ei_endpfn
);
331 * Find the hole size (in bytes) in the memory range.
332 * @start: starting address of the memory range to scan
333 * @end: ending address of the memory range to scan
335 u64 __init
memblock_x86_hole_size(u64 start
, u64 end
)
337 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
338 unsigned long last_pfn
= end
>> PAGE_SHIFT
;
339 unsigned long ei_startpfn
, ei_endpfn
, ram
= 0;
340 struct memblock_region
*r
;
342 for_each_memblock(memory
, r
)
343 if (memblock_x86_find_active_region(r
, start_pfn
, last_pfn
,
344 &ei_startpfn
, &ei_endpfn
))
345 ram
+= ei_endpfn
- ei_startpfn
;
347 return end
- start
- ((u64
)ram
<< PAGE_SHIFT
);