[PATCH] ppc64: Fix bug in SLB miss handler for hugepages
[linux-2.6/zen-sources.git] / include / asm-ppc64 / pgtable-64k.h
blob154f1840ece4d0c49e403177b8262640e91048a6
1 #include <asm-generic/pgtable-nopud.h>
4 #define PTE_INDEX_SIZE 12
5 #define PMD_INDEX_SIZE 12
6 #define PUD_INDEX_SIZE 0
7 #define PGD_INDEX_SIZE 4
9 #define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
10 #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
11 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
13 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
14 #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
15 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
17 /* With 4k base page size, hugepage PTEs go at the PMD level */
18 #define MIN_HUGEPTE_SHIFT PAGE_SHIFT
20 /* PMD_SHIFT determines what a second-level page table entry can map */
21 #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
22 #define PMD_SIZE (1UL << PMD_SHIFT)
23 #define PMD_MASK (~(PMD_SIZE-1))
25 /* PGDIR_SHIFT determines what a third-level page table entry can map */
26 #define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
27 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
28 #define PGDIR_MASK (~(PGDIR_SIZE-1))
30 /* Additional PTE bits (don't change without checking asm in hash_low.S) */
31 #define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */
32 #define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */
33 #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */
34 #define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */
35 #define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */
37 /* PTE flags to conserve for HPTE identification */
38 #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_HPTE_SUB |\
39 _PAGE_COMBO)
41 /* Shift to put page number into pte.
43 * That gives us a max RPN of 32 bits, which means a max of 48 bits
44 * of addressable physical space.
45 * We could get 3 more bits here by setting PTE_RPN_SHIFT to 29 but
46 * 32 makes PTEs more readable for debugging for now :)
48 #define PTE_RPN_SHIFT (32)
49 #define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT))
50 #define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
52 /* _PAGE_CHG_MASK masks of bits that are to be preserved accross
53 * pgprot changes
55 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
56 _PAGE_ACCESSED)
58 /* Bits to mask out from a PMD to get to the PTE page */
59 #define PMD_MASKED_BITS 0x1ff
60 /* Bits to mask out from a PGD/PUD to get to the PMD page */
61 #define PUD_MASKED_BITS 0x1ff
63 #ifndef __ASSEMBLY__
65 /* Manipulate "rpte" values */
66 #define __real_pte(e,p) ((real_pte_t) { \
67 (e), pte_val(*((p) + PTRS_PER_PTE)) })
68 #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
69 (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
70 #define __rpte_to_pte(r) ((r).pte)
71 #define __rpte_sub_valid(rpte, index) \
72 (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
75 /* Trick: we set __end to va + 64k, which happens works for
76 * a 16M page as well as we want only one iteration
78 #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
79 do { \
80 unsigned long __end = va + PAGE_SIZE; \
81 unsigned __split = (psize == MMU_PAGE_4K || \
82 psize == MMU_PAGE_64K_AP); \
83 shift = mmu_psize_defs[psize].shift; \
84 for (index = 0; va < __end; index++, va += (1 << shift)) { \
85 if (!__split || __rpte_sub_valid(rpte, index)) do { \
87 #define pte_iterate_hashed_end() } while(0); } } while(0)
90 #endif /* __ASSEMBLY__ */