1 #ifndef _I386_PGTABLE_3LEVEL_H
2 #define _I386_PGTABLE_3LEVEL_H
4 #include <asm-generic/pgtable-nopud.h>
7 * Intel Physical Address Extension (PAE) Mode - three-level page
8 * tables on PPro+ CPUs.
10 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
13 #define pte_ERROR(e) \
14 printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
15 #define pmd_ERROR(e) \
16 printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
17 #define pgd_ERROR(e) \
18 printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
20 #define pud_none(pud) 0
21 #define pud_bad(pud) 0
22 #define pud_present(pud) 1
25 * Is the pte executable?
27 static inline int pte_x(pte_t pte
)
29 return !(pte_val(pte
) & _PAGE_NX
);
33 * All present user-pages with !NX bit are user-executable:
35 static inline int pte_exec(pte_t pte
)
37 return pte_user(pte
) && pte_x(pte
);
40 * All present pages with !NX bit are kernel-executable:
42 static inline int pte_exec_kernel(pte_t pte
)
47 /* Rules for using set_pte: the pte being assigned *must* be
48 * either not present or in a state where the hardware will
49 * not attempt to update the pte. In places where this is
50 * not possible, use pte_get_and_clear to obtain the old pte
51 * value and then use set_pte to update it. -ben
53 static inline void set_pte(pte_t
*ptep
, pte_t pte
)
55 ptep
->pte_high
= pte
.pte_high
;
57 ptep
->pte_low
= pte
.pte_low
;
59 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
62 * Since this is only called on user PTEs, and the page fault handler
63 * must handle the already racy situation of simultaneous page faults,
64 * we are justified in merely clearing the PTE present bit, followed
65 * by a set. The ordering here is important.
67 static inline void set_pte_present(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
, pte_t pte
)
71 ptep
->pte_high
= pte
.pte_high
;
73 ptep
->pte_low
= pte
.pte_low
;
76 #define set_pte_atomic(pteptr,pteval) \
77 set_64bit((unsigned long long *)(pteptr),pte_val(pteval))
78 #define set_pmd(pmdptr,pmdval) \
79 set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval))
80 #define set_pud(pudptr,pudval) \
81 (*(pudptr) = (pudval))
84 * Pentium-II erratum A13: in PAE mode we explicitly have to flush
85 * the TLB via cr3 if the top-level pgd is changed...
86 * We do not let the generic code free and clear pgd entries due to
89 static inline void pud_clear (pud_t
* pud
) { }
91 #define pud_page(pud) \
92 ((struct page *) __va(pud_val(pud) & PAGE_MASK))
94 #define pud_page_vaddr(pud) \
95 ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
98 /* Find an entry in the second-level page table.. */
99 #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
103 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
104 * entry, so clear the bottom half first and enforce ordering with a compiler
107 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
114 static inline void pmd_clear(pmd_t
*pmd
)
116 u32
*tmp
= (u32
*)pmd
;
122 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
123 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
127 /* xchg acts as a barrier before the setting of the high bits */
128 res
.pte_low
= xchg(&ptep
->pte_low
, 0);
129 res
.pte_high
= ptep
->pte_high
;
135 #define __HAVE_ARCH_PTE_SAME
136 static inline int pte_same(pte_t a
, pte_t b
)
138 return a
.pte_low
== b
.pte_low
&& a
.pte_high
== b
.pte_high
;
141 #define pte_page(x) pfn_to_page(pte_pfn(x))
143 static inline int pte_none(pte_t pte
)
145 return !pte
.pte_low
&& !pte
.pte_high
;
148 static inline unsigned long pte_pfn(pte_t pte
)
150 return (pte
.pte_low
>> PAGE_SHIFT
) |
151 (pte
.pte_high
<< (32 - PAGE_SHIFT
));
154 extern unsigned long long __supported_pte_mask
;
156 static inline pte_t
pfn_pte(unsigned long page_nr
, pgprot_t pgprot
)
160 pte
.pte_high
= (page_nr
>> (32 - PAGE_SHIFT
)) | \
161 (pgprot_val(pgprot
) >> 32);
162 pte
.pte_high
&= (__supported_pte_mask
>> 32);
163 pte
.pte_low
= ((page_nr
<< PAGE_SHIFT
) | pgprot_val(pgprot
)) & \
164 __supported_pte_mask
;
168 static inline pmd_t
pfn_pmd(unsigned long page_nr
, pgprot_t pgprot
)
170 return __pmd((((unsigned long long)page_nr
<< PAGE_SHIFT
) | \
171 pgprot_val(pgprot
)) & __supported_pte_mask
);
175 * Bits 0, 6 and 7 are taken in the low part of the pte,
176 * put the 32 bits of offset into the high part.
178 #define pte_to_pgoff(pte) ((pte).pte_high)
179 #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
180 #define PTE_FILE_MAX_BITS 32
182 /* Encode and de-code a swap entry */
183 #define __swp_type(x) (((x).val) & 0x1f)
184 #define __swp_offset(x) ((x).val >> 5)
185 #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
186 #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
187 #define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
189 #define __pmd_free_tlb(tlb, x) do { } while (0)
191 #define vmalloc_sync_all() ((void)0)
193 #endif /* _I386_PGTABLE_3LEVEL_H */