1 #ifndef _ASM_X86_PGTABLE_3LEVEL_H
2 #define _ASM_X86_PGTABLE_3LEVEL_H
5 * Intel Physical Address Extension (PAE) Mode - three-level page
6 * tables on PPro+ CPUs.
8 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
11 #define pte_ERROR(e) \
12 printk("%s:%d: bad pte %p(%08lx%08lx).\n", \
13 __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
14 #define pmd_ERROR(e) \
15 printk("%s:%d: bad pmd %p(%016Lx).\n", \
16 __FILE__, __LINE__, &(e), pmd_val(e))
17 #define pgd_ERROR(e) \
18 printk("%s:%d: bad pgd %p(%016Lx).\n", \
19 __FILE__, __LINE__, &(e), pgd_val(e))
21 /* Rules for using set_pte: the pte being assigned *must* be
22 * either not present or in a state where the hardware will
23 * not attempt to update the pte. In places where this is
24 * not possible, use pte_get_and_clear to obtain the old pte
25 * value and then use set_pte to update it. -ben
27 static inline void native_set_pte(pte_t
*ptep
, pte_t pte
)
29 ptep
->pte_high
= pte
.pte_high
;
31 ptep
->pte_low
= pte
.pte_low
;
34 #define pmd_read_atomic pmd_read_atomic
36 * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
37 * a "*pmdp" dereference done by gcc. Problem is, in certain places
38 * where pte_offset_map_lock is called, concurrent page faults are
39 * allowed, if the mmap_sem is hold for reading. An example is mincore
40 * vs page faults vs MADV_DONTNEED. On the page fault side
41 * pmd_populate rightfully does a set_64bit, but if we're reading the
42 * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
43 * because gcc will not read the 64bit of the pmd atomically. To fix
44 * this all places running pmd_offset_map_lock() while holding the
45 * mmap_sem in read mode, shall read the pmdp pointer using this
46 * function to know if the pmd is null nor not, and in turn to know if
47 * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
50 * Without THP if the mmap_sem is hold for reading, the
51 * pmd can only transition from null to not null while pmd_read_atomic runs.
52 * So there's no need of literally reading it atomically.
54 * With THP if the mmap_sem is hold for reading, the pmd can become
55 * THP or null or point to a pte (and in turn become "stable") at any
56 * time under pmd_read_atomic, so it's mandatory to read it atomically
59 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
60 static inline pmd_t
pmd_read_atomic(pmd_t
*pmdp
)
63 u32
*tmp
= (u32
*)pmdp
;
65 ret
= (pmdval_t
) (*tmp
);
68 * If the low part is null, we must not read the high part
69 * or we can end up with a partial pmd.
72 ret
|= ((pmdval_t
)*(tmp
+ 1)) << 32;
75 return (pmd_t
) { ret
};
77 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
78 static inline pmd_t
pmd_read_atomic(pmd_t
*pmdp
)
80 return (pmd_t
) { atomic64_read((atomic64_t
*)pmdp
) };
82 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
84 static inline void native_set_pte_atomic(pte_t
*ptep
, pte_t pte
)
86 set_64bit((unsigned long long *)(ptep
), native_pte_val(pte
));
89 static inline void native_set_pmd(pmd_t
*pmdp
, pmd_t pmd
)
91 set_64bit((unsigned long long *)(pmdp
), native_pmd_val(pmd
));
94 static inline void native_set_pud(pud_t
*pudp
, pud_t pud
)
96 set_64bit((unsigned long long *)(pudp
), native_pud_val(pud
));
100 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
101 * entry, so clear the bottom half first and enforce ordering with a compiler
104 static inline void native_pte_clear(struct mm_struct
*mm
, unsigned long addr
,
112 static inline void native_pmd_clear(pmd_t
*pmd
)
114 u32
*tmp
= (u32
*)pmd
;
120 static inline void pud_clear(pud_t
*pudp
)
122 set_pud(pudp
, __pud(0));
125 * According to Intel App note "TLBs, Paging-Structure Caches,
126 * and Their Invalidation", April 2007, document 317080-001,
127 * section 8.1: in PAE mode we explicitly have to flush the
128 * TLB via cr3 if the top-level pgd is changed...
130 * Currently all places where pud_clear() is called either have
131 * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or
132 * pud_clear_bad()), so we don't need TLB flush here.
137 static inline pte_t
native_ptep_get_and_clear(pte_t
*ptep
)
141 /* xchg acts as a barrier before the setting of the high bits */
142 res
.pte_low
= xchg(&ptep
->pte_low
, 0);
143 res
.pte_high
= ptep
->pte_high
;
149 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
160 static inline pmd_t
native_pmdp_get_and_clear(pmd_t
*pmdp
)
162 union split_pmd res
, *orig
= (union split_pmd
*)pmdp
;
164 /* xchg acts as a barrier before setting of the high bits */
165 res
.pmd_low
= xchg(&orig
->pmd_low
, 0);
166 res
.pmd_high
= orig
->pmd_high
;
172 #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
176 * Bits 0, 6 and 7 are taken in the low part of the pte,
177 * put the 32 bits of offset into the high part.
179 #define pte_to_pgoff(pte) ((pte).pte_high)
180 #define pgoff_to_pte(off) \
181 ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
182 #define PTE_FILE_MAX_BITS 32
184 /* Encode and de-code a swap entry */
185 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
186 #define __swp_type(x) (((x).val) & 0x1f)
187 #define __swp_offset(x) ((x).val >> 5)
188 #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
189 #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
190 #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
192 #endif /* _ASM_X86_PGTABLE_3LEVEL_H */