1 #ifndef _ASM_POWERPC_PGTABLE_4K_H
2 #define _ASM_POWERPC_PGTABLE_4K_H
4 * Entries per page directory level. The PTE level must use a 64b record
5 * for each page table entry. The PMD and PGD level use a 32b record for
6 * each entry by assuming that each entry is page aligned.
8 #define PTE_INDEX_SIZE 9
9 #define PMD_INDEX_SIZE 7
10 #define PUD_INDEX_SIZE 7
11 #define PGD_INDEX_SIZE 9
14 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
15 #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
16 #define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
17 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
18 #endif /* __ASSEMBLY__ */
20 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
21 #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
22 #define PTRS_PER_PUD (1 << PMD_INDEX_SIZE)
23 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
25 /* PMD_SHIFT determines what a second-level page table entry can map */
26 #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
27 #define PMD_SIZE (1UL << PMD_SHIFT)
28 #define PMD_MASK (~(PMD_SIZE-1))
30 /* With 4k base page size, hugepage PTEs go at the PMD level */
31 #define MIN_HUGEPTE_SHIFT PMD_SHIFT
33 /* PUD_SHIFT determines what a third-level page table entry can map */
34 #define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
35 #define PUD_SIZE (1UL << PUD_SHIFT)
36 #define PUD_MASK (~(PUD_SIZE-1))
38 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
39 #define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE)
40 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
41 #define PGDIR_MASK (~(PGDIR_SIZE-1))
44 #define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */
45 #define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */
46 #define _PAGE_F_SECOND _PAGE_SECONDARY
47 #define _PAGE_F_GIX _PAGE_GROUP_IX
49 /* PTE flags to conserve for HPTE identification */
50 #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
51 _PAGE_SECONDARY | _PAGE_GROUP_IX)
53 /* PAGE_MASK gives the right answer below, but only by accident */
54 /* It should be preserving the high 48 bits and then specifically */
55 /* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */
56 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
59 /* Bits to mask out from a PMD to get to the PTE page */
60 #define PMD_MASKED_BITS 0
61 /* Bits to mask out from a PUD to get to the PMD page */
62 #define PUD_MASKED_BITS 0
63 /* Bits to mask out from a PGD to get to the PUD page */
64 #define PGD_MASKED_BITS 0
66 /* shift to put page number into pte */
67 #define PTE_RPN_SHIFT (17)
69 #ifdef STRICT_MM_TYPECHECKS
70 #define __real_pte(e,p) ((real_pte_t){(e)})
71 #define __rpte_to_pte(r) ((r).pte)
73 #define __real_pte(e,p) (e)
74 #define __rpte_to_pte(r) (__pte(r))
76 #define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12)
78 #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
81 shift = mmu_psize_defs[psize].shift; \
83 #define pte_iterate_hashed_end() } while(0)
85 #ifdef CONFIG_PPC_HAS_HASH_64K
86 #define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr)
88 #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
92 * 4-level page tables related bits
95 #define pgd_none(pgd) (!pgd_val(pgd))
96 #define pgd_bad(pgd) (pgd_val(pgd) == 0)
97 #define pgd_present(pgd) (pgd_val(pgd) != 0)
98 #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0)
99 #define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS)
100 #define pgd_page(pgd) virt_to_page(pgd_page_vaddr(pgd))
102 #define pud_offset(pgdp, addr) \
103 (((pud_t *) pgd_page_vaddr(*(pgdp))) + \
104 (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
106 #define pud_ERROR(e) \
107 printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
109 #define remap_4k_pfn(vma, addr, pfn, prot) \
110 remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
111 #endif /* _ASM_POWERPC_PGTABLE_4K_H */