1 #ifndef _I386_PGTABLE_3LEVEL_H
2 #define _I386_PGTABLE_3LEVEL_H
5 * Intel Physical Address Extension (PAE) Mode - three-level page
6 * tables on PPro+ CPUs.
8 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
12 * PGDIR_SHIFT determines what a top-level page table entry can map
14 #define PGDIR_SHIFT 30
15 #define PTRS_PER_PGD 4
18 * PMD_SHIFT determines the size of the area a middle-level
22 #define PTRS_PER_PMD 512
25 * entries per page directory level
27 #define PTRS_PER_PTE 512
29 #define pte_ERROR(e) \
30 printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
31 #define pmd_ERROR(e) \
32 printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
33 #define pgd_ERROR(e) \
34 printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
37 * Subtle, in PAE mode we cannot have zeroes in the top level
38 * page directory, the CPU enforces this. (ie. the PGD entry
39 * always has to have the present bit set.) The CPU caches
40 * the 4 pgd entries internally, so there is no extra memory
41 * load on TLB miss, despite one more level of indirection.
43 #define EMPTY_PGD (__pa(empty_zero_page) + 1)
44 #define pgd_none(x) (pgd_val(x) == EMPTY_PGD)
45 extern inline int pgd_bad(pgd_t pgd
) { return 0; }
46 extern inline int pgd_present(pgd_t pgd
) { return !pgd_none(pgd
); }
48 /* Rules for using set_pte: the pte being assigned *must* be
49 * either not present or in a state where the hardware will
50 * not attempt to update the pte. In places where this is
51 * not possible, use pte_get_and_clear to obtain the old pte
52 * value and then use set_pte to update it. -ben
54 static inline void set_pte(pte_t
*ptep
, pte_t pte
)
56 ptep
->pte_high
= pte
.pte_high
;
58 ptep
->pte_low
= pte
.pte_low
;
60 #define set_pmd(pmdptr,pmdval) \
61 set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval))
62 #define set_pgd(pgdptr,pgdval) \
63 set_64bit((unsigned long long *)(pgdptr),pgd_val(pgdval))
66 * Pentium-II errata A13: in PAE mode we explicitly have to flush
67 * the TLB via cr3 if the top-level pgd is changed... This was one tough
68 * thing to find out - guess i should first read all the documentation
71 extern inline void __pgd_clear (pgd_t
* pgd
)
73 set_pgd(pgd
, __pgd(EMPTY_PGD
));
76 extern inline void pgd_clear (pgd_t
* pgd
)
82 #define pgd_page(pgd) \
83 ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
85 /* Find an entry in the second-level page table.. */
86 #define pmd_offset(dir, address) ((pmd_t *) pgd_page(*(dir)) + \
87 __pmd_offset(address))
89 static inline pte_t
ptep_get_and_clear(pte_t
*ptep
)
93 /* xchg acts as a barrier before the setting of the high bits */
94 res
.pte_low
= xchg(&ptep
->pte_low
, 0);
95 res
.pte_high
= ptep
->pte_high
;
101 static inline int pte_same(pte_t a
, pte_t b
)
103 return a
.pte_low
== b
.pte_low
&& a
.pte_high
== b
.pte_high
;
106 #define pte_page(x) (mem_map+(((x).pte_low >> PAGE_SHIFT) | ((x).pte_high << (32 - PAGE_SHIFT))))
107 #define pte_none(x) (!(x).pte_low && !(x).pte_high)
109 static inline pte_t
__mk_pte(unsigned long page_nr
, pgprot_t pgprot
)
113 pte
.pte_high
= page_nr
>> (32 - PAGE_SHIFT
);
114 pte
.pte_low
= (page_nr
<< PAGE_SHIFT
) | pgprot_val(pgprot
);
118 #endif /* _I386_PGTABLE_3LEVEL_H */