2 * linux/arch/arm/mm/init.c
4 * Copyright (C) 1995-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/initrd.h>
19 #include <linux/of_fdt.h>
20 #include <linux/of_reserved_mem.h>
21 #include <linux/highmem.h>
22 #include <linux/gfp.h>
23 #include <linux/memblock.h>
24 #include <linux/dma-contiguous.h>
25 #include <linux/sizes.h>
27 #include <asm/mach-types.h>
28 #include <asm/memblock.h>
30 #include <asm/sections.h>
31 #include <asm/setup.h>
33 #include <asm/fixmap.h>
35 #include <asm/mach/arch.h>
36 #include <asm/mach/map.h>
40 static phys_addr_t phys_initrd_start __initdata
= 0;
41 static unsigned long phys_initrd_size __initdata
= 0;
43 static int __init
early_initrd(char *p
)
49 start
= memparse(p
, &endp
);
51 size
= memparse(endp
+ 1, NULL
);
53 phys_initrd_start
= start
;
54 phys_initrd_size
= size
;
58 early_param("initrd", early_initrd
);
60 static int __init
parse_tag_initrd(const struct tag
*tag
)
62 printk(KERN_WARNING
"ATAG_INITRD is deprecated; "
63 "please update your bootloader.\n");
64 phys_initrd_start
= __virt_to_phys(tag
->u
.initrd
.start
);
65 phys_initrd_size
= tag
->u
.initrd
.size
;
69 __tagtable(ATAG_INITRD
, parse_tag_initrd
);
71 static int __init
parse_tag_initrd2(const struct tag
*tag
)
73 phys_initrd_start
= tag
->u
.initrd
.start
;
74 phys_initrd_size
= tag
->u
.initrd
.size
;
78 __tagtable(ATAG_INITRD2
, parse_tag_initrd2
);
80 #ifdef CONFIG_OF_FLATTREE
81 void __init
early_init_dt_setup_initrd_arch(u64 start
, u64 end
)
83 phys_initrd_start
= start
;
84 phys_initrd_size
= end
- start
;
86 #endif /* CONFIG_OF_FLATTREE */
89 * This keeps memory configuration data used by a couple memory
90 * initialization functions, as well as show_mem() for the skipping
91 * of holes in the memory map. It is populated by arm_add_memory().
93 struct meminfo meminfo
;
95 void show_mem(unsigned int filter
)
97 int free
= 0, total
= 0, reserved
= 0;
98 int shared
= 0, cached
= 0, slab
= 0, i
;
99 struct meminfo
* mi
= &meminfo
;
101 printk("Mem-info:\n");
102 show_free_areas(filter
);
104 if (filter
& SHOW_MEM_FILTER_PAGE_COUNT
)
107 for_each_bank (i
, mi
) {
108 struct membank
*bank
= &mi
->bank
[i
];
109 unsigned int pfn1
, pfn2
;
110 struct page
*page
, *end
;
112 pfn1
= bank_pfn_start(bank
);
113 pfn2
= bank_pfn_end(bank
);
115 page
= pfn_to_page(pfn1
);
116 end
= pfn_to_page(pfn2
- 1) + 1;
120 if (PageReserved(page
))
122 else if (PageSwapCache(page
))
124 else if (PageSlab(page
))
126 else if (!page_count(page
))
129 shared
+= page_count(page
) - 1;
131 } while (page
< end
);
134 printk("%d pages of RAM\n", total
);
135 printk("%d free pages\n", free
);
136 printk("%d reserved pages\n", reserved
);
137 printk("%d slab pages\n", slab
);
138 printk("%d pages shared\n", shared
);
139 printk("%d pages swap cached\n", cached
);
142 static void __init
find_limits(unsigned long *min
, unsigned long *max_low
,
143 unsigned long *max_high
)
145 struct meminfo
*mi
= &meminfo
;
148 /* This assumes the meminfo array is properly sorted */
149 *min
= bank_pfn_start(&mi
->bank
[0]);
150 for_each_bank (i
, mi
)
151 if (mi
->bank
[i
].highmem
)
153 *max_low
= bank_pfn_end(&mi
->bank
[i
- 1]);
154 *max_high
= bank_pfn_end(&mi
->bank
[mi
->nr_banks
- 1]);
157 static void __init
arm_bootmem_init(unsigned long start_pfn
,
158 unsigned long end_pfn
)
160 struct memblock_region
*reg
;
161 unsigned int boot_pages
;
166 * Allocate the bootmem bitmap page. This must be in a region
167 * of memory which has already been mapped.
169 boot_pages
= bootmem_bootmap_pages(end_pfn
- start_pfn
);
170 bitmap
= memblock_alloc_base(boot_pages
<< PAGE_SHIFT
, L1_CACHE_BYTES
,
171 __pfn_to_phys(end_pfn
));
174 * Initialise the bootmem allocator, handing the
175 * memory banks over to bootmem.
178 pgdat
= NODE_DATA(0);
179 init_bootmem_node(pgdat
, __phys_to_pfn(bitmap
), start_pfn
, end_pfn
);
181 /* Free the lowmem regions from memblock into bootmem. */
182 for_each_memblock(memory
, reg
) {
183 unsigned long start
= memblock_region_memory_base_pfn(reg
);
184 unsigned long end
= memblock_region_memory_end_pfn(reg
);
191 free_bootmem(__pfn_to_phys(start
), (end
- start
) << PAGE_SHIFT
);
194 /* Reserve the lowmem memblock reserved regions in bootmem. */
195 for_each_memblock(reserved
, reg
) {
196 unsigned long start
= memblock_region_reserved_base_pfn(reg
);
197 unsigned long end
= memblock_region_reserved_end_pfn(reg
);
204 reserve_bootmem(__pfn_to_phys(start
),
205 (end
- start
) << PAGE_SHIFT
, BOOTMEM_DEFAULT
);
209 #ifdef CONFIG_ZONE_DMA
211 phys_addr_t arm_dma_zone_size __read_mostly
;
212 EXPORT_SYMBOL(arm_dma_zone_size
);
215 * The DMA mask corresponding to the maximum bus address allocatable
216 * using GFP_DMA. The default here places no restriction on DMA
217 * allocations. This must be the smallest DMA mask in the system,
218 * so a successful GFP_DMA allocation will always satisfy this.
220 phys_addr_t arm_dma_limit
;
222 static void __init
arm_adjust_dma_zone(unsigned long *size
, unsigned long *hole
,
223 unsigned long dma_size
)
225 if (size
[0] <= dma_size
)
228 size
[ZONE_NORMAL
] = size
[0] - dma_size
;
229 size
[ZONE_DMA
] = dma_size
;
230 hole
[ZONE_NORMAL
] = hole
[0];
235 void __init
setup_dma_zone(const struct machine_desc
*mdesc
)
237 #ifdef CONFIG_ZONE_DMA
238 if (mdesc
->dma_zone_size
) {
239 arm_dma_zone_size
= mdesc
->dma_zone_size
;
240 arm_dma_limit
= PHYS_OFFSET
+ arm_dma_zone_size
- 1;
242 arm_dma_limit
= 0xffffffff;
246 static void __init
arm_bootmem_free(unsigned long min
, unsigned long max_low
,
247 unsigned long max_high
)
249 unsigned long zone_size
[MAX_NR_ZONES
], zhole_size
[MAX_NR_ZONES
];
250 struct memblock_region
*reg
;
253 * initialise the zones.
255 memset(zone_size
, 0, sizeof(zone_size
));
258 * The memory size has already been determined. If we need
259 * to do anything fancy with the allocation of this memory
260 * to the zones, now is the time to do it.
262 zone_size
[0] = max_low
- min
;
263 #ifdef CONFIG_HIGHMEM
264 zone_size
[ZONE_HIGHMEM
] = max_high
- max_low
;
268 * Calculate the size of the holes.
269 * holes = node_size - sum(bank_sizes)
271 memcpy(zhole_size
, zone_size
, sizeof(zhole_size
));
272 for_each_memblock(memory
, reg
) {
273 unsigned long start
= memblock_region_memory_base_pfn(reg
);
274 unsigned long end
= memblock_region_memory_end_pfn(reg
);
276 if (start
< max_low
) {
277 unsigned long low_end
= min(end
, max_low
);
278 zhole_size
[0] -= low_end
- start
;
280 #ifdef CONFIG_HIGHMEM
282 unsigned long high_start
= max(start
, max_low
);
283 zhole_size
[ZONE_HIGHMEM
] -= end
- high_start
;
288 #ifdef CONFIG_ZONE_DMA
290 * Adjust the sizes according to any special requirements for
293 if (arm_dma_zone_size
)
294 arm_adjust_dma_zone(zone_size
, zhole_size
,
295 arm_dma_zone_size
>> PAGE_SHIFT
);
298 free_area_init_node(0, zone_size
, min
, zhole_size
);
301 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
302 int pfn_valid(unsigned long pfn
)
304 return memblock_is_memory(__pfn_to_phys(pfn
));
306 EXPORT_SYMBOL(pfn_valid
);
309 #ifndef CONFIG_SPARSEMEM
310 static void __init
arm_memory_present(void)
314 static void __init
arm_memory_present(void)
316 struct memblock_region
*reg
;
318 for_each_memblock(memory
, reg
)
319 memory_present(0, memblock_region_memory_base_pfn(reg
),
320 memblock_region_memory_end_pfn(reg
));
324 static bool arm_memblock_steal_permitted
= true;
326 phys_addr_t __init
arm_memblock_steal(phys_addr_t size
, phys_addr_t align
)
330 BUG_ON(!arm_memblock_steal_permitted
);
332 phys
= memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ANYWHERE
);
333 memblock_free(phys
, size
);
334 memblock_remove(phys
, size
);
339 void __init
arm_memblock_init(struct meminfo
*mi
,
340 const struct machine_desc
*mdesc
)
344 for (i
= 0; i
< mi
->nr_banks
; i
++)
345 memblock_add(mi
->bank
[i
].start
, mi
->bank
[i
].size
);
347 /* Register the kernel text, kernel data and initrd with memblock. */
348 #ifdef CONFIG_XIP_KERNEL
349 memblock_reserve(__pa(_sdata
), _end
- _sdata
);
351 memblock_reserve(__pa(_stext
), _end
- _stext
);
353 #ifdef CONFIG_BLK_DEV_INITRD
354 if (phys_initrd_size
&&
355 !memblock_is_region_memory(phys_initrd_start
, phys_initrd_size
)) {
356 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
357 (u64
)phys_initrd_start
, phys_initrd_size
);
358 phys_initrd_start
= phys_initrd_size
= 0;
360 if (phys_initrd_size
&&
361 memblock_is_region_reserved(phys_initrd_start
, phys_initrd_size
)) {
362 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
363 (u64
)phys_initrd_start
, phys_initrd_size
);
364 phys_initrd_start
= phys_initrd_size
= 0;
366 if (phys_initrd_size
) {
367 memblock_reserve(phys_initrd_start
, phys_initrd_size
);
369 /* Now convert initrd to virtual addresses */
370 initrd_start
= __phys_to_virt(phys_initrd_start
);
371 initrd_end
= initrd_start
+ phys_initrd_size
;
375 arm_mm_memblock_reserve();
376 arm_dt_memblock_reserve();
378 /* reserve any platform specific memblock areas */
382 early_init_dt_scan_reserved_mem();
385 * reserve memory for DMA contigouos allocations,
386 * must come from DMA area inside low memory
388 dma_contiguous_reserve(min(arm_dma_limit
, arm_lowmem_limit
));
390 arm_memblock_steal_permitted
= false;
391 memblock_allow_resize();
395 void __init
bootmem_init(void)
397 unsigned long min
, max_low
, max_high
;
399 max_low
= max_high
= 0;
401 find_limits(&min
, &max_low
, &max_high
);
403 arm_bootmem_init(min
, max_low
);
406 * Sparsemem tries to allocate bootmem in memory_present(),
407 * so must be done after the fixed reservations
409 arm_memory_present();
412 * sparse_init() needs the bootmem allocator up and running.
417 * Now free the memory - free_area_init_node needs
418 * the sparse mem_map arrays initialized by sparse_init()
419 * for memmap_init_zone(), otherwise all PFNs are invalid.
421 arm_bootmem_free(min
, max_low
, max_high
);
424 * This doesn't seem to be used by the Linux memory manager any
425 * more, but is used by ll_rw_block. If we can get rid of it, we
426 * also get rid of some of the stuff above as well.
428 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
429 * the system, not the maximum PFN.
431 max_low_pfn
= max_low
- PHYS_PFN_OFFSET
;
432 max_pfn
= max_high
- PHYS_PFN_OFFSET
;
436 * Poison init memory with an undefined instruction (ARM) or a branch to an
437 * undefined instruction (Thumb).
439 static inline void poison_init_mem(void *s
, size_t count
)
442 for (; count
!= 0; count
-= 4)
447 free_memmap(unsigned long start_pfn
, unsigned long end_pfn
)
449 struct page
*start_pg
, *end_pg
;
450 phys_addr_t pg
, pgend
;
453 * Convert start_pfn/end_pfn to a struct page pointer.
455 start_pg
= pfn_to_page(start_pfn
- 1) + 1;
456 end_pg
= pfn_to_page(end_pfn
- 1) + 1;
459 * Convert to physical addresses, and
460 * round start upwards and end downwards.
462 pg
= PAGE_ALIGN(__pa(start_pg
));
463 pgend
= __pa(end_pg
) & PAGE_MASK
;
466 * If there are free pages between these,
467 * free the section of the memmap array.
470 free_bootmem(pg
, pgend
- pg
);
474 * The mem_map array can get very big. Free the unused area of the memory map.
476 static void __init
free_unused_memmap(struct meminfo
*mi
)
478 unsigned long bank_start
, prev_bank_end
= 0;
482 * This relies on each bank being in address order.
483 * The banks are sorted previously in bootmem_init().
485 for_each_bank(i
, mi
) {
486 struct membank
*bank
= &mi
->bank
[i
];
488 bank_start
= bank_pfn_start(bank
);
490 #ifdef CONFIG_SPARSEMEM
492 * Take care not to free memmap entries that don't exist
493 * due to SPARSEMEM sections which aren't present.
495 bank_start
= min(bank_start
,
496 ALIGN(prev_bank_end
, PAGES_PER_SECTION
));
499 * Align down here since the VM subsystem insists that the
500 * memmap entries are valid from the bank start aligned to
501 * MAX_ORDER_NR_PAGES.
503 bank_start
= round_down(bank_start
, MAX_ORDER_NR_PAGES
);
506 * If we had a previous bank, and there is a space
507 * between the current bank and the previous, free it.
509 if (prev_bank_end
&& prev_bank_end
< bank_start
)
510 free_memmap(prev_bank_end
, bank_start
);
513 * Align up here since the VM subsystem insists that the
514 * memmap entries are valid from the bank end aligned to
515 * MAX_ORDER_NR_PAGES.
517 prev_bank_end
= ALIGN(bank_pfn_end(bank
), MAX_ORDER_NR_PAGES
);
520 #ifdef CONFIG_SPARSEMEM
521 if (!IS_ALIGNED(prev_bank_end
, PAGES_PER_SECTION
))
522 free_memmap(prev_bank_end
,
523 ALIGN(prev_bank_end
, PAGES_PER_SECTION
));
527 #ifdef CONFIG_HIGHMEM
528 static inline void free_area_high(unsigned long pfn
, unsigned long end
)
530 for (; pfn
< end
; pfn
++)
531 free_highmem_page(pfn_to_page(pfn
));
535 static void __init
free_highpages(void)
537 #ifdef CONFIG_HIGHMEM
538 unsigned long max_low
= max_low_pfn
+ PHYS_PFN_OFFSET
;
539 struct memblock_region
*mem
, *res
;
541 /* set highmem page free */
542 for_each_memblock(memory
, mem
) {
543 unsigned long start
= memblock_region_memory_base_pfn(mem
);
544 unsigned long end
= memblock_region_memory_end_pfn(mem
);
546 /* Ignore complete lowmem entries */
550 /* Truncate partial highmem entries */
554 /* Find and exclude any reserved regions */
555 for_each_memblock(reserved
, res
) {
556 unsigned long res_start
, res_end
;
558 res_start
= memblock_region_reserved_base_pfn(res
);
559 res_end
= memblock_region_reserved_end_pfn(res
);
563 if (res_start
< start
)
569 if (res_start
!= start
)
570 free_area_high(start
, res_start
);
576 /* And now free anything which remains */
578 free_area_high(start
, end
);
584 * mem_init() marks the free areas in the mem_map and tells us how much
585 * memory is free. This is done after various parts of the system have
586 * claimed their memory after the kernel image.
588 void __init
mem_init(void)
590 #ifdef CONFIG_HAVE_TCM
591 /* These pointers are filled in on TCM detection */
596 max_mapnr
= pfn_to_page(max_pfn
+ PHYS_PFN_OFFSET
) - mem_map
;
598 /* this will put all unused low memory onto the freelists */
599 free_unused_memmap(&meminfo
);
603 /* now that our DMA memory is actually so designated, we can free it */
604 free_reserved_area(__va(PHYS_OFFSET
), swapper_pg_dir
, -1, NULL
);
609 mem_init_print_info(NULL
);
611 #define MLK(b, t) b, t, ((t) - (b)) >> 10
612 #define MLM(b, t) b, t, ((t) - (b)) >> 20
613 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
615 printk(KERN_NOTICE
"Virtual kernel memory layout:\n"
616 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
617 #ifdef CONFIG_HAVE_TCM
618 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
619 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
621 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
622 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
623 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
624 #ifdef CONFIG_HIGHMEM
625 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
627 #ifdef CONFIG_MODULES
628 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
630 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
631 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
632 " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
633 " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
635 MLK(UL(CONFIG_VECTORS_BASE
), UL(CONFIG_VECTORS_BASE
) +
637 #ifdef CONFIG_HAVE_TCM
638 MLK(DTCM_OFFSET
, (unsigned long) dtcm_end
),
639 MLK(ITCM_OFFSET
, (unsigned long) itcm_end
),
641 MLK(FIXADDR_START
, FIXADDR_TOP
),
642 MLM(VMALLOC_START
, VMALLOC_END
),
643 MLM(PAGE_OFFSET
, (unsigned long)high_memory
),
644 #ifdef CONFIG_HIGHMEM
645 MLM(PKMAP_BASE
, (PKMAP_BASE
) + (LAST_PKMAP
) *
648 #ifdef CONFIG_MODULES
649 MLM(MODULES_VADDR
, MODULES_END
),
652 MLK_ROUNDUP(_text
, _etext
),
653 MLK_ROUNDUP(__init_begin
, __init_end
),
654 MLK_ROUNDUP(_sdata
, _edata
),
655 MLK_ROUNDUP(__bss_start
, __bss_stop
));
662 * Check boundaries twice: Some fundamental inconsistencies can
663 * be detected at build time already.
666 BUILD_BUG_ON(TASK_SIZE
> MODULES_VADDR
);
667 BUG_ON(TASK_SIZE
> MODULES_VADDR
);
670 #ifdef CONFIG_HIGHMEM
671 BUILD_BUG_ON(PKMAP_BASE
+ LAST_PKMAP
* PAGE_SIZE
> PAGE_OFFSET
);
672 BUG_ON(PKMAP_BASE
+ LAST_PKMAP
* PAGE_SIZE
> PAGE_OFFSET
);
675 if (PAGE_SIZE
>= 16384 && get_num_physpages() <= 128) {
676 extern int sysctl_overcommit_memory
;
678 * On a machine this small we won't get
679 * anywhere without overcommit, so turn
682 sysctl_overcommit_memory
= OVERCOMMIT_ALWAYS
;
686 void free_initmem(void)
688 #ifdef CONFIG_HAVE_TCM
689 extern char __tcm_start
, __tcm_end
;
691 poison_init_mem(&__tcm_start
, &__tcm_end
- &__tcm_start
);
692 free_reserved_area(&__tcm_start
, &__tcm_end
, -1, "TCM link");
695 poison_init_mem(__init_begin
, __init_end
- __init_begin
);
696 if (!machine_is_integrator() && !machine_is_cintegrator())
697 free_initmem_default(-1);
700 #ifdef CONFIG_BLK_DEV_INITRD
702 static int keep_initrd
;
704 void free_initrd_mem(unsigned long start
, unsigned long end
)
707 poison_init_mem((void *)start
, PAGE_ALIGN(end
) - start
);
708 free_reserved_area((void *)start
, (void *)end
, -1, "initrd");
712 static int __init
keepinitrd_setup(char *__unused
)
718 __setup("keepinitrd", keepinitrd_setup
);