2 * Based on arch/arm/mm/init.c
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/errno.h>
23 #include <linux/swap.h>
24 #include <linux/init.h>
25 #include <linux/bootmem.h>
26 #include <linux/mman.h>
27 #include <linux/nodemask.h>
28 #include <linux/initrd.h>
29 #include <linux/gfp.h>
30 #include <linux/memblock.h>
31 #include <linux/sort.h>
32 #include <linux/of_fdt.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/dma-contiguous.h>
35 #include <linux/efi.h>
36 #include <linux/swiotlb.h>
39 #include <asm/fixmap.h>
40 #include <asm/kasan.h>
41 #include <asm/kernel-pgtable.h>
42 #include <asm/memory.h>
43 #include <asm/sections.h>
44 #include <asm/setup.h>
45 #include <asm/sizes.h>
47 #include <asm/alternative.h>
52 * We need to be able to catch inadvertent references to memstart_addr
53 * that occur (potentially in generic code) before arm64_memblock_init()
54 * executes, which assigns it its actual value. So use a default value
55 * that cannot be mistaken for a real physical address.
57 s64 memstart_addr __read_mostly
= -1;
58 phys_addr_t arm64_dma_phys_limit __read_mostly
;
60 #ifdef CONFIG_BLK_DEV_INITRD
61 static int __init
early_initrd(char *p
)
63 unsigned long start
, size
;
66 start
= memparse(p
, &endp
);
68 size
= memparse(endp
+ 1, NULL
);
71 initrd_end
= start
+ size
;
75 early_param("initrd", early_initrd
);
79 * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
80 * currently assumes that for memory starting above 4G, 32-bit devices will
83 static phys_addr_t __init
max_zone_dma_phys(void)
85 phys_addr_t offset
= memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
86 return min(offset
+ (1ULL << 32), memblock_end_of_DRAM());
89 static void __init
zone_sizes_init(unsigned long min
, unsigned long max
)
91 struct memblock_region
*reg
;
92 unsigned long zone_size
[MAX_NR_ZONES
], zhole_size
[MAX_NR_ZONES
];
93 unsigned long max_dma
= min
;
95 memset(zone_size
, 0, sizeof(zone_size
));
97 /* 4GB maximum for 32-bit only capable devices */
98 #ifdef CONFIG_ZONE_DMA
99 max_dma
= PFN_DOWN(arm64_dma_phys_limit
);
100 zone_size
[ZONE_DMA
] = max_dma
- min
;
102 zone_size
[ZONE_NORMAL
] = max
- max_dma
;
104 memcpy(zhole_size
, zone_size
, sizeof(zhole_size
));
106 for_each_memblock(memory
, reg
) {
107 unsigned long start
= memblock_region_memory_base_pfn(reg
);
108 unsigned long end
= memblock_region_memory_end_pfn(reg
);
113 #ifdef CONFIG_ZONE_DMA
114 if (start
< max_dma
) {
115 unsigned long dma_end
= min(end
, max_dma
);
116 zhole_size
[ZONE_DMA
] -= dma_end
- start
;
120 unsigned long normal_end
= min(end
, max
);
121 unsigned long normal_start
= max(start
, max_dma
);
122 zhole_size
[ZONE_NORMAL
] -= normal_end
- normal_start
;
126 free_area_init_node(0, zone_size
, min
, zhole_size
);
129 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
130 int pfn_valid(unsigned long pfn
)
132 return memblock_is_map_memory(pfn
<< PAGE_SHIFT
);
134 EXPORT_SYMBOL(pfn_valid
);
137 #ifndef CONFIG_SPARSEMEM
138 static void __init
arm64_memory_present(void)
142 static void __init
arm64_memory_present(void)
144 struct memblock_region
*reg
;
146 for_each_memblock(memory
, reg
)
147 memory_present(0, memblock_region_memory_base_pfn(reg
),
148 memblock_region_memory_end_pfn(reg
));
152 static phys_addr_t memory_limit
= (phys_addr_t
)ULLONG_MAX
;
155 * Limit the memory size that was specified via FDT.
157 static int __init
early_mem(char *p
)
162 memory_limit
= memparse(p
, &p
) & PAGE_MASK
;
163 pr_notice("Memory limited to %lldMB\n", memory_limit
>> 20);
167 early_param("mem", early_mem
);
169 void __init
arm64_memblock_init(void)
171 const s64 linear_region_size
= -(s64
)PAGE_OFFSET
;
174 * Ensure that the linear region takes up exactly half of the kernel
175 * virtual address space. This way, we can distinguish a linear address
176 * from a kernel/module/vmalloc address by testing a single bit.
178 BUILD_BUG_ON(linear_region_size
!= BIT(VA_BITS
- 1));
181 * Select a suitable value for the base of physical memory.
183 memstart_addr
= round_down(memblock_start_of_DRAM(),
184 ARM64_MEMSTART_ALIGN
);
187 * Remove the memory that we will not be able to cover with the
188 * linear mapping. Take care not to clip the kernel which may be
191 memblock_remove(max_t(u64
, memstart_addr
+ linear_region_size
, __pa(_end
)),
193 if (memstart_addr
+ linear_region_size
< memblock_end_of_DRAM()) {
194 /* ensure that memstart_addr remains sufficiently aligned */
195 memstart_addr
= round_up(memblock_end_of_DRAM() - linear_region_size
,
196 ARM64_MEMSTART_ALIGN
);
197 memblock_remove(0, memstart_addr
);
201 * Apply the memory limit if it was set. Since the kernel may be loaded
202 * high up in memory, add back the kernel region that must be accessible
203 * via the linear mapping.
205 if (memory_limit
!= (phys_addr_t
)ULLONG_MAX
) {
206 memblock_enforce_memory_limit(memory_limit
);
207 memblock_add(__pa(_text
), (u64
)(_end
- _text
));
210 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD
) && initrd_start
) {
212 * Add back the memory we just removed if it results in the
213 * initrd to become inaccessible via the linear mapping.
214 * Otherwise, this is a no-op
216 u64 base
= initrd_start
& PAGE_MASK
;
217 u64 size
= PAGE_ALIGN(initrd_end
) - base
;
220 * We can only add back the initrd memory if we don't end up
221 * with more memory than we can address via the linear mapping.
222 * It is up to the bootloader to position the kernel and the
223 * initrd reasonably close to each other (i.e., within 32 GB of
224 * each other) so that all granule/#levels combinations can
225 * always access both.
227 if (WARN(base
< memblock_start_of_DRAM() ||
228 base
+ size
> memblock_start_of_DRAM() +
230 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
233 memblock_remove(base
, size
); /* clear MEMBLOCK_ flags */
234 memblock_add(base
, size
);
235 memblock_reserve(base
, size
);
239 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE
)) {
240 extern u16 memstart_offset_seed
;
241 u64 range
= linear_region_size
-
242 (memblock_end_of_DRAM() - memblock_start_of_DRAM());
245 * If the size of the linear region exceeds, by a sufficient
246 * margin, the size of the region that the available physical
247 * memory spans, randomize the linear region as well.
249 if (memstart_offset_seed
> 0 && range
>= ARM64_MEMSTART_ALIGN
) {
250 range
= range
/ ARM64_MEMSTART_ALIGN
+ 1;
251 memstart_addr
-= ARM64_MEMSTART_ALIGN
*
252 ((range
* memstart_offset_seed
) >> 16);
257 * Register the kernel text, kernel data, initrd, and initial
258 * pagetables with memblock.
260 memblock_reserve(__pa(_text
), _end
- _text
);
261 #ifdef CONFIG_BLK_DEV_INITRD
263 memblock_reserve(initrd_start
, initrd_end
- initrd_start
);
265 /* the generic initrd code expects virtual addresses */
266 initrd_start
= __phys_to_virt(initrd_start
);
267 initrd_end
= __phys_to_virt(initrd_end
);
271 early_init_fdt_scan_reserved_mem();
273 /* 4GB maximum for 32-bit only capable devices */
274 if (IS_ENABLED(CONFIG_ZONE_DMA
))
275 arm64_dma_phys_limit
= max_zone_dma_phys();
277 arm64_dma_phys_limit
= PHYS_MASK
+ 1;
278 dma_contiguous_reserve(arm64_dma_phys_limit
);
280 memblock_allow_resize();
284 void __init
bootmem_init(void)
286 unsigned long min
, max
;
288 min
= PFN_UP(memblock_start_of_DRAM());
289 max
= PFN_DOWN(memblock_end_of_DRAM());
291 early_memtest(min
<< PAGE_SHIFT
, max
<< PAGE_SHIFT
);
294 * Sparsemem tries to allocate bootmem in memory_present(), so must be
295 * done after the fixed reservations.
297 arm64_memory_present();
300 zone_sizes_init(min
, max
);
302 high_memory
= __va((max
<< PAGE_SHIFT
) - 1) + 1;
303 max_pfn
= max_low_pfn
= max
;
306 #ifndef CONFIG_SPARSEMEM_VMEMMAP
307 static inline void free_memmap(unsigned long start_pfn
, unsigned long end_pfn
)
309 struct page
*start_pg
, *end_pg
;
310 unsigned long pg
, pgend
;
313 * Convert start_pfn/end_pfn to a struct page pointer.
315 start_pg
= pfn_to_page(start_pfn
- 1) + 1;
316 end_pg
= pfn_to_page(end_pfn
- 1) + 1;
319 * Convert to physical addresses, and round start upwards and end
322 pg
= (unsigned long)PAGE_ALIGN(__pa(start_pg
));
323 pgend
= (unsigned long)__pa(end_pg
) & PAGE_MASK
;
326 * If there are free pages between these, free the section of the
330 free_bootmem(pg
, pgend
- pg
);
334 * The mem_map array can get very big. Free the unused area of the memory map.
336 static void __init
free_unused_memmap(void)
338 unsigned long start
, prev_end
= 0;
339 struct memblock_region
*reg
;
341 for_each_memblock(memory
, reg
) {
342 start
= __phys_to_pfn(reg
->base
);
344 #ifdef CONFIG_SPARSEMEM
346 * Take care not to free memmap entries that don't exist due
347 * to SPARSEMEM sections which aren't present.
349 start
= min(start
, ALIGN(prev_end
, PAGES_PER_SECTION
));
352 * If we had a previous bank, and there is a space between the
353 * current bank and the previous, free it.
355 if (prev_end
&& prev_end
< start
)
356 free_memmap(prev_end
, start
);
359 * Align up here since the VM subsystem insists that the
360 * memmap entries are valid from the bank end aligned to
361 * MAX_ORDER_NR_PAGES.
363 prev_end
= ALIGN(__phys_to_pfn(reg
->base
+ reg
->size
),
367 #ifdef CONFIG_SPARSEMEM
368 if (!IS_ALIGNED(prev_end
, PAGES_PER_SECTION
))
369 free_memmap(prev_end
, ALIGN(prev_end
, PAGES_PER_SECTION
));
372 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
375 * mem_init() marks the free areas in the mem_map and tells us how much memory
376 * is free. This is done after various parts of the system have claimed their
377 * memory after the kernel image.
379 void __init
mem_init(void)
383 set_max_mapnr(pfn_to_page(max_pfn
) - mem_map
);
385 #ifndef CONFIG_SPARSEMEM_VMEMMAP
386 free_unused_memmap();
388 /* this will put all unused low memory onto the freelists */
391 mem_init_print_info(NULL
);
393 #define MLK(b, t) b, t, ((t) - (b)) >> 10
394 #define MLM(b, t) b, t, ((t) - (b)) >> 20
395 #define MLG(b, t) b, t, ((t) - (b)) >> 30
396 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
398 pr_notice("Virtual kernel memory layout:\n");
400 pr_cont(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n",
401 MLG(KASAN_SHADOW_START
, KASAN_SHADOW_END
));
403 pr_cont(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n",
404 MLM(MODULES_VADDR
, MODULES_END
));
405 pr_cont(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
406 MLG(VMALLOC_START
, VMALLOC_END
));
407 pr_cont(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n"
408 " .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n"
409 " .init : 0x%p" " - 0x%p" " (%6ld KB)\n"
410 " .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
411 MLK_ROUNDUP(_text
, __start_rodata
),
412 MLK_ROUNDUP(__start_rodata
, _etext
),
413 MLK_ROUNDUP(__init_begin
, __init_end
),
414 MLK_ROUNDUP(_sdata
, _edata
));
415 pr_cont(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n",
416 MLK(FIXADDR_START
, FIXADDR_TOP
));
417 pr_cont(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n",
418 MLM(PCI_IO_START
, PCI_IO_END
));
419 #ifdef CONFIG_SPARSEMEM_VMEMMAP
420 pr_cont(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n"
421 " 0x%16lx - 0x%16lx (%6ld MB actual)\n",
423 VMEMMAP_START
+ VMEMMAP_SIZE
),
424 MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
425 (unsigned long)virt_to_page(high_memory
)));
427 pr_cont(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
428 MLM(__phys_to_virt(memblock_start_of_DRAM()),
429 (unsigned long)high_memory
));
436 * Check boundaries twice: Some fundamental inconsistencies can be
437 * detected at build time already.
440 BUILD_BUG_ON(TASK_SIZE_32
> TASK_SIZE_64
);
444 * Make sure we chose the upper bound of sizeof(struct page)
447 BUILD_BUG_ON(sizeof(struct page
) > (1 << STRUCT_PAGE_MAX_SHIFT
));
449 if (PAGE_SIZE
>= 16384 && get_num_physpages() <= 128) {
450 extern int sysctl_overcommit_memory
;
452 * On a machine this small we won't get anywhere without
453 * overcommit, so turn it on by default.
455 sysctl_overcommit_memory
= OVERCOMMIT_ALWAYS
;
459 void free_initmem(void)
461 free_reserved_area(__va(__pa(__init_begin
)), __va(__pa(__init_end
)),
466 #ifdef CONFIG_BLK_DEV_INITRD
468 static int keep_initrd __initdata
;
470 void __init
free_initrd_mem(unsigned long start
, unsigned long end
)
473 free_reserved_area((void *)start
, (void *)end
, 0, "initrd");
476 static int __init
keepinitrd_setup(char *__unused
)
482 __setup("keepinitrd", keepinitrd_setup
);
486 * Dump out memory limit information on panic.
488 static int dump_mem_limit(struct notifier_block
*self
, unsigned long v
, void *p
)
490 if (memory_limit
!= (phys_addr_t
)ULLONG_MAX
) {
491 pr_emerg("Memory Limit: %llu MB\n", memory_limit
>> 20);
493 pr_emerg("Memory Limit: none\n");
498 static struct notifier_block mem_limit_notifier
= {
499 .notifier_call
= dump_mem_limit
,
502 static int __init
register_mem_limit_dumper(void)
504 atomic_notifier_chain_register(&panic_notifier_list
,
505 &mem_limit_notifier
);
508 __initcall(register_mem_limit_dumper
);