2 * Based on arch/arm/mm/mmu.c
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/libfdt.h>
25 #include <linux/mman.h>
26 #include <linux/nodemask.h>
27 #include <linux/memblock.h>
30 #include <linux/slab.h>
31 #include <linux/stop_machine.h>
33 #include <asm/barrier.h>
34 #include <asm/cputype.h>
35 #include <asm/fixmap.h>
36 #include <asm/kernel-pgtable.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/sizes.h>
41 #include <asm/memblock.h>
42 #include <asm/mmu_context.h>
46 u64 idmap_t0sz
= TCR_T0SZ(VA_BITS
);
49 * Empty_zero_page is a special page that is used for zero-initialized data
52 unsigned long empty_zero_page
[PAGE_SIZE
/ sizeof(unsigned long)] __page_aligned_bss
;
53 EXPORT_SYMBOL(empty_zero_page
);
55 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
56 unsigned long size
, pgprot_t vma_prot
)
59 return pgprot_noncached(vma_prot
);
60 else if (file
->f_flags
& O_SYNC
)
61 return pgprot_writecombine(vma_prot
);
64 EXPORT_SYMBOL(phys_mem_access_prot
);
66 static void __init
*early_pgtable_alloc(void)
71 phys
= memblock_alloc(PAGE_SIZE
, PAGE_SIZE
);
74 memset(ptr
, 0, PAGE_SIZE
);
76 /* Ensure the zeroed page is visible to the page table walker */
82 * remap a PMD into pages
84 static void split_pmd(pmd_t
*pmd
, pte_t
*pte
)
86 unsigned long pfn
= pmd_pfn(*pmd
);
91 * Need to have the least restrictive permissions available
92 * permissions will be fixed up later
94 set_pte(pte
, pfn_pte(pfn
, PAGE_KERNEL_EXEC
));
96 } while (pte
++, i
++, i
< PTRS_PER_PTE
);
99 static void alloc_init_pte(pmd_t
*pmd
, unsigned long addr
,
100 unsigned long end
, unsigned long pfn
,
102 void *(*pgtable_alloc
)(void))
106 if (pmd_none(*pmd
) || pmd_sect(*pmd
)) {
107 pte
= pgtable_alloc();
110 __pmd_populate(pmd
, __pa(pte
), PMD_TYPE_TABLE
);
113 BUG_ON(pmd_bad(*pmd
));
115 pte
= pte_offset_kernel(pmd
, addr
);
117 set_pte(pte
, pfn_pte(pfn
, prot
));
119 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
122 static void split_pud(pud_t
*old_pud
, pmd_t
*pmd
)
124 unsigned long addr
= pud_pfn(*old_pud
) << PAGE_SHIFT
;
125 pgprot_t prot
= __pgprot(pud_val(*old_pud
) ^ addr
);
129 set_pmd(pmd
, __pmd(addr
| pgprot_val(prot
)));
131 } while (pmd
++, i
++, i
< PTRS_PER_PMD
);
134 static void alloc_init_pmd(struct mm_struct
*mm
, pud_t
*pud
,
135 unsigned long addr
, unsigned long end
,
136 phys_addr_t phys
, pgprot_t prot
,
137 void *(*pgtable_alloc
)(void))
143 * Check for initial section mappings in the pgd/pud and remove them.
145 if (pud_none(*pud
) || pud_sect(*pud
)) {
146 pmd
= pgtable_alloc();
147 if (pud_sect(*pud
)) {
149 * need to have the 1G of mappings continue to be
154 pud_populate(mm
, pud
, pmd
);
157 BUG_ON(pud_bad(*pud
));
159 pmd
= pmd_offset(pud
, addr
);
161 next
= pmd_addr_end(addr
, end
);
162 /* try section mapping first */
163 if (((addr
| next
| phys
) & ~SECTION_MASK
) == 0) {
165 set_pmd(pmd
, __pmd(phys
|
166 pgprot_val(mk_sect_prot(prot
))));
168 * Check for previous table entries created during
169 * boot (__create_page_tables) and flush them.
171 if (!pmd_none(old_pmd
)) {
173 if (pmd_table(old_pmd
)) {
174 phys_addr_t table
= __pa(pte_offset_map(&old_pmd
, 0));
175 if (!WARN_ON_ONCE(slab_is_available()))
176 memblock_free(table
, PAGE_SIZE
);
180 alloc_init_pte(pmd
, addr
, next
, __phys_to_pfn(phys
),
181 prot
, pgtable_alloc
);
184 } while (pmd
++, addr
= next
, addr
!= end
);
187 static inline bool use_1G_block(unsigned long addr
, unsigned long next
,
190 if (PAGE_SHIFT
!= 12)
193 if (((addr
| next
| phys
) & ~PUD_MASK
) != 0)
199 static void alloc_init_pud(struct mm_struct
*mm
, pgd_t
*pgd
,
200 unsigned long addr
, unsigned long end
,
201 phys_addr_t phys
, pgprot_t prot
,
202 void *(*pgtable_alloc
)(void))
207 if (pgd_none(*pgd
)) {
208 pud
= pgtable_alloc();
209 pgd_populate(mm
, pgd
, pud
);
211 BUG_ON(pgd_bad(*pgd
));
213 pud
= pud_offset(pgd
, addr
);
215 next
= pud_addr_end(addr
, end
);
218 * For 4K granule only, attempt to put down a 1GB block
220 if (use_1G_block(addr
, next
, phys
)) {
221 pud_t old_pud
= *pud
;
222 set_pud(pud
, __pud(phys
|
223 pgprot_val(mk_sect_prot(prot
))));
226 * If we have an old value for a pud, it will
227 * be pointing to a pmd table that we no longer
228 * need (from swapper_pg_dir).
230 * Look up the old pmd table and free it.
232 if (!pud_none(old_pud
)) {
234 if (pud_table(old_pud
)) {
235 phys_addr_t table
= __pa(pmd_offset(&old_pud
, 0));
236 if (!WARN_ON_ONCE(slab_is_available()))
237 memblock_free(table
, PAGE_SIZE
);
241 alloc_init_pmd(mm
, pud
, addr
, next
, phys
, prot
,
245 } while (pud
++, addr
= next
, addr
!= end
);
249 * Create the page directory entries and any necessary page tables for the
250 * mapping specified by 'md'.
252 static void __create_mapping(struct mm_struct
*mm
, pgd_t
*pgd
,
253 phys_addr_t phys
, unsigned long virt
,
254 phys_addr_t size
, pgprot_t prot
,
255 void *(*pgtable_alloc
)(void))
257 unsigned long addr
, length
, end
, next
;
260 * If the virtual and physical address don't have the same offset
261 * within a page, we cannot map the region as the caller expects.
263 if (WARN_ON((phys
^ virt
) & ~PAGE_MASK
))
267 addr
= virt
& PAGE_MASK
;
268 length
= PAGE_ALIGN(size
+ (virt
& ~PAGE_MASK
));
272 next
= pgd_addr_end(addr
, end
);
273 alloc_init_pud(mm
, pgd
, addr
, next
, phys
, prot
, pgtable_alloc
);
275 } while (pgd
++, addr
= next
, addr
!= end
);
278 static void *late_pgtable_alloc(void)
280 void *ptr
= (void *)__get_free_page(PGALLOC_GFP
);
283 /* Ensure the zeroed page is visible to the page table walker */
288 static void __init
create_mapping(phys_addr_t phys
, unsigned long virt
,
289 phys_addr_t size
, pgprot_t prot
)
291 if (virt
< VMALLOC_START
) {
292 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
296 __create_mapping(&init_mm
, pgd_offset_k(virt
), phys
, virt
,
297 size
, prot
, early_pgtable_alloc
);
300 void __init
create_pgd_mapping(struct mm_struct
*mm
, phys_addr_t phys
,
301 unsigned long virt
, phys_addr_t size
,
304 __create_mapping(mm
, pgd_offset(mm
, virt
), phys
, virt
, size
, prot
,
308 static void create_mapping_late(phys_addr_t phys
, unsigned long virt
,
309 phys_addr_t size
, pgprot_t prot
)
311 if (virt
< VMALLOC_START
) {
312 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
317 return __create_mapping(&init_mm
, pgd_offset_k(virt
),
318 phys
, virt
, size
, prot
, late_pgtable_alloc
);
321 #ifdef CONFIG_DEBUG_RODATA
322 static void __init
__map_memblock(phys_addr_t start
, phys_addr_t end
)
325 * Set up the executable regions using the existing section mappings
326 * for now. This will get more fine grained later once all memory
329 unsigned long kernel_x_start
= round_down(__pa(_stext
), SWAPPER_BLOCK_SIZE
);
330 unsigned long kernel_x_end
= round_up(__pa(__init_end
), SWAPPER_BLOCK_SIZE
);
332 if (end
< kernel_x_start
) {
333 create_mapping(start
, __phys_to_virt(start
),
334 end
- start
, PAGE_KERNEL
);
335 } else if (start
>= kernel_x_end
) {
336 create_mapping(start
, __phys_to_virt(start
),
337 end
- start
, PAGE_KERNEL
);
339 if (start
< kernel_x_start
)
340 create_mapping(start
, __phys_to_virt(start
),
341 kernel_x_start
- start
,
343 create_mapping(kernel_x_start
,
344 __phys_to_virt(kernel_x_start
),
345 kernel_x_end
- kernel_x_start
,
347 if (kernel_x_end
< end
)
348 create_mapping(kernel_x_end
,
349 __phys_to_virt(kernel_x_end
),
356 static void __init
__map_memblock(phys_addr_t start
, phys_addr_t end
)
358 create_mapping(start
, __phys_to_virt(start
), end
- start
,
363 static void __init
map_mem(void)
365 struct memblock_region
*reg
;
369 * Temporarily limit the memblock range. We need to do this as
370 * create_mapping requires puds, pmds and ptes to be allocated from
371 * memory addressable from the initial direct kernel mapping.
373 * The initial direct kernel mapping, located at swapper_pg_dir, gives
374 * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
375 * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
376 * per Documentation/arm64/booting.txt).
378 limit
= PHYS_OFFSET
+ SWAPPER_INIT_MAP_SIZE
;
379 memblock_set_current_limit(limit
);
381 /* map all the memory banks */
382 for_each_memblock(memory
, reg
) {
383 phys_addr_t start
= reg
->base
;
384 phys_addr_t end
= start
+ reg
->size
;
388 if (memblock_is_nomap(reg
))
391 if (ARM64_SWAPPER_USES_SECTION_MAPS
) {
393 * For the first memory bank align the start address and
394 * current memblock limit to prevent create_mapping() from
395 * allocating pte page tables from unmapped memory. With
396 * the section maps, if the first block doesn't end on section
397 * size boundary, create_mapping() will try to allocate a pte
398 * page, which may be returned from an unmapped area.
399 * When section maps are not used, the pte page table for the
400 * current limit is already present in swapper_pg_dir.
403 start
= ALIGN(start
, SECTION_SIZE
);
405 limit
= end
& SECTION_MASK
;
406 memblock_set_current_limit(limit
);
409 __map_memblock(start
, end
);
412 /* Limit no longer required. */
413 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE
);
416 static void __init
fixup_executable(void)
418 #ifdef CONFIG_DEBUG_RODATA
419 /* now that we are actually fully mapped, make the start/end more fine grained */
420 if (!IS_ALIGNED((unsigned long)_stext
, SWAPPER_BLOCK_SIZE
)) {
421 unsigned long aligned_start
= round_down(__pa(_stext
),
424 create_mapping(aligned_start
, __phys_to_virt(aligned_start
),
425 __pa(_stext
) - aligned_start
,
429 if (!IS_ALIGNED((unsigned long)__init_end
, SWAPPER_BLOCK_SIZE
)) {
430 unsigned long aligned_end
= round_up(__pa(__init_end
),
432 create_mapping(__pa(__init_end
), (unsigned long)__init_end
,
433 aligned_end
- __pa(__init_end
),
439 #ifdef CONFIG_DEBUG_RODATA
440 void mark_rodata_ro(void)
442 create_mapping_late(__pa(_stext
), (unsigned long)_stext
,
443 (unsigned long)_etext
- (unsigned long)_stext
,
449 void fixup_init(void)
451 create_mapping_late(__pa(__init_begin
), (unsigned long)__init_begin
,
452 (unsigned long)__init_end
- (unsigned long)__init_begin
,
457 * paging_init() sets up the page tables, initialises the zone memory
458 * maps and sets up the zero page.
460 void __init
paging_init(void)
468 * TTBR0 is only used for the identity mapping at this stage. Make it
469 * point to zero page to avoid speculatively fetching new entries.
471 cpu_set_reserved_ttbr0();
472 local_flush_tlb_all();
473 cpu_set_default_tcr_t0sz();
477 * Check whether a kernel address is valid (derived from arch/x86/).
479 int kern_addr_valid(unsigned long addr
)
486 if ((((long)addr
) >> VA_BITS
) != -1UL)
489 pgd
= pgd_offset_k(addr
);
493 pud
= pud_offset(pgd
, addr
);
498 return pfn_valid(pud_pfn(*pud
));
500 pmd
= pmd_offset(pud
, addr
);
505 return pfn_valid(pmd_pfn(*pmd
));
507 pte
= pte_offset_kernel(pmd
, addr
);
511 return pfn_valid(pte_pfn(*pte
));
513 #ifdef CONFIG_SPARSEMEM_VMEMMAP
514 #if !ARM64_SWAPPER_USES_SECTION_MAPS
515 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
)
517 return vmemmap_populate_basepages(start
, end
, node
);
519 #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
520 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
)
522 unsigned long addr
= start
;
529 next
= pmd_addr_end(addr
, end
);
531 pgd
= vmemmap_pgd_populate(addr
, node
);
535 pud
= vmemmap_pud_populate(pgd
, addr
, node
);
539 pmd
= pmd_offset(pud
, addr
);
540 if (pmd_none(*pmd
)) {
543 p
= vmemmap_alloc_block_buf(PMD_SIZE
, node
);
547 set_pmd(pmd
, __pmd(__pa(p
) | PROT_SECT_NORMAL
));
549 vmemmap_verify((pte_t
*)pmd
, node
, addr
, next
);
550 } while (addr
= next
, addr
!= end
);
554 #endif /* CONFIG_ARM64_64K_PAGES */
555 void vmemmap_free(unsigned long start
, unsigned long end
)
558 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
560 static pte_t bm_pte
[PTRS_PER_PTE
] __page_aligned_bss
;
561 #if CONFIG_PGTABLE_LEVELS > 2
562 static pmd_t bm_pmd
[PTRS_PER_PMD
] __page_aligned_bss
;
564 #if CONFIG_PGTABLE_LEVELS > 3
565 static pud_t bm_pud
[PTRS_PER_PUD
] __page_aligned_bss
;
568 static inline pud_t
* fixmap_pud(unsigned long addr
)
570 pgd_t
*pgd
= pgd_offset_k(addr
);
572 BUG_ON(pgd_none(*pgd
) || pgd_bad(*pgd
));
574 return pud_offset(pgd
, addr
);
577 static inline pmd_t
* fixmap_pmd(unsigned long addr
)
579 pud_t
*pud
= fixmap_pud(addr
);
581 BUG_ON(pud_none(*pud
) || pud_bad(*pud
));
583 return pmd_offset(pud
, addr
);
586 static inline pte_t
* fixmap_pte(unsigned long addr
)
588 pmd_t
*pmd
= fixmap_pmd(addr
);
590 BUG_ON(pmd_none(*pmd
) || pmd_bad(*pmd
));
592 return pte_offset_kernel(pmd
, addr
);
595 void __init
early_fixmap_init(void)
600 unsigned long addr
= FIXADDR_START
;
602 pgd
= pgd_offset_k(addr
);
603 pgd_populate(&init_mm
, pgd
, bm_pud
);
604 pud
= pud_offset(pgd
, addr
);
605 pud_populate(&init_mm
, pud
, bm_pmd
);
606 pmd
= pmd_offset(pud
, addr
);
607 pmd_populate_kernel(&init_mm
, pmd
, bm_pte
);
610 * The boot-ioremap range spans multiple pmds, for which
611 * we are not preparted:
613 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN
) >> PMD_SHIFT
)
614 != (__fix_to_virt(FIX_BTMAP_END
) >> PMD_SHIFT
));
616 if ((pmd
!= fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN
)))
617 || pmd
!= fixmap_pmd(fix_to_virt(FIX_BTMAP_END
))) {
619 pr_warn("pmd %p != %p, %p\n",
620 pmd
, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN
)),
621 fixmap_pmd(fix_to_virt(FIX_BTMAP_END
)));
622 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
623 fix_to_virt(FIX_BTMAP_BEGIN
));
624 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
625 fix_to_virt(FIX_BTMAP_END
));
627 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END
);
628 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN
);
632 void __set_fixmap(enum fixed_addresses idx
,
633 phys_addr_t phys
, pgprot_t flags
)
635 unsigned long addr
= __fix_to_virt(idx
);
638 BUG_ON(idx
<= FIX_HOLE
|| idx
>= __end_of_fixed_addresses
);
640 pte
= fixmap_pte(addr
);
642 if (pgprot_val(flags
)) {
643 set_pte(pte
, pfn_pte(phys
>> PAGE_SHIFT
, flags
));
645 pte_clear(&init_mm
, addr
, pte
);
646 flush_tlb_kernel_range(addr
, addr
+PAGE_SIZE
);
650 void *__init
fixmap_remap_fdt(phys_addr_t dt_phys
)
652 const u64 dt_virt_base
= __fix_to_virt(FIX_FDT
);
653 pgprot_t prot
= PAGE_KERNEL_RO
;
658 * Check whether the physical FDT address is set and meets the minimum
659 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
660 * at least 8 bytes so that we can always access the size field of the
661 * FDT header after mapping the first chunk, double check here if that
662 * is indeed the case.
664 BUILD_BUG_ON(MIN_FDT_ALIGN
< 8);
665 if (!dt_phys
|| dt_phys
% MIN_FDT_ALIGN
)
669 * Make sure that the FDT region can be mapped without the need to
670 * allocate additional translation table pages, so that it is safe
671 * to call create_mapping() this early.
673 * On 64k pages, the FDT will be mapped using PTEs, so we need to
674 * be in the same PMD as the rest of the fixmap.
675 * On 4k pages, we'll use section mappings for the FDT so we only
676 * have to be in the same PUD.
678 BUILD_BUG_ON(dt_virt_base
% SZ_2M
);
680 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END
) >> SWAPPER_TABLE_SHIFT
!=
681 __fix_to_virt(FIX_BTMAP_BEGIN
) >> SWAPPER_TABLE_SHIFT
);
683 offset
= dt_phys
% SWAPPER_BLOCK_SIZE
;
684 dt_virt
= (void *)dt_virt_base
+ offset
;
686 /* map the first chunk so we can read the size from the header */
687 create_mapping(round_down(dt_phys
, SWAPPER_BLOCK_SIZE
), dt_virt_base
,
688 SWAPPER_BLOCK_SIZE
, prot
);
690 if (fdt_check_header(dt_virt
) != 0)
693 size
= fdt_totalsize(dt_virt
);
694 if (size
> MAX_FDT_SIZE
)
697 if (offset
+ size
> SWAPPER_BLOCK_SIZE
)
698 create_mapping(round_down(dt_phys
, SWAPPER_BLOCK_SIZE
), dt_virt_base
,
699 round_up(offset
+ size
, SWAPPER_BLOCK_SIZE
), prot
);
701 memblock_reserve(dt_phys
, size
);