1 #ifndef _ASM_GENERIC_PGTABLE_H
2 #define _ASM_GENERIC_PGTABLE_H
6 #ifndef __HAVE_ARCH_PTEP_ESTABLISH
8 * Establish a new mapping:
10 * - update the page tables
11 * - inform the TLB about the new one
13 * We hold the mm semaphore for reading, and the pte lock.
15 * Note: the old pte is known to not be writable, so we don't need to
16 * worry about dirty bits etc getting lost.
18 #define ptep_establish(__vma, __address, __ptep, __entry) \
20 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
21 flush_tlb_page(__vma, __address); \
25 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
27 * Largely same as above, but only sets the access flags (dirty,
28 * accessed, and writable). Furthermore, we know it always gets set
29 * to a "more permissive" setting, which allows most architectures
32 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
34 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
35 flush_tlb_page(__vma, __address); \
39 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
40 #define ptep_test_and_clear_young(__vma, __address, __ptep) \
42 pte_t __pte = *(__ptep); \
44 if (!pte_young(__pte)) \
47 set_pte_at((__vma)->vm_mm, (__address), \
48 (__ptep), pte_mkold(__pte)); \
53 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
54 #define ptep_clear_flush_young(__vma, __address, __ptep) \
57 __young = ptep_test_and_clear_young(__vma, __address, __ptep); \
59 flush_tlb_page(__vma, __address); \
64 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
65 #define ptep_test_and_clear_dirty(__vma, __address, __ptep) \
67 pte_t __pte = *__ptep; \
69 if (!pte_dirty(__pte)) \
72 set_pte_at((__vma)->vm_mm, (__address), (__ptep), \
73 pte_mkclean(__pte)); \
78 #ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
79 #define ptep_clear_flush_dirty(__vma, __address, __ptep) \
82 __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \
84 flush_tlb_page(__vma, __address); \
89 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
90 #define ptep_get_and_clear(__mm, __address, __ptep) \
92 pte_t __pte = *(__ptep); \
93 pte_clear((__mm), (__address), (__ptep)); \
98 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
99 #define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \
102 __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \
108 * Some architectures may be able to avoid expensive synchronization
109 * primitives when modifications are made to PTE's which are already
110 * not present, or in the process of an address space destruction.
112 #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
113 #define pte_clear_not_present_full(__mm, __address, __ptep, __full) \
115 pte_clear((__mm), (__address), (__ptep)); \
119 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
120 #define ptep_clear_flush(__vma, __address, __ptep) \
123 __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \
124 flush_tlb_page(__vma, __address); \
129 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
131 static inline void ptep_set_wrprotect(struct mm_struct
*mm
, unsigned long address
, pte_t
*ptep
)
133 pte_t old_pte
= *ptep
;
134 set_pte_at(mm
, address
, ptep
, pte_wrprotect(old_pte
));
138 #ifndef __HAVE_ARCH_PTE_SAME
139 #define pte_same(A,B) (pte_val(A) == pte_val(B))
142 #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
143 #define page_test_dirty(page) (0)
146 #ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
147 #define page_clear_dirty(page) do { } while (0)
150 #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
151 #define pte_maybe_dirty(pte) pte_dirty(pte)
153 #define pte_maybe_dirty(pte) (1)
156 #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
157 #define page_test_and_clear_young(page) (0)
160 #ifndef __HAVE_ARCH_PGD_OFFSET_GATE
161 #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
164 #ifndef __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
165 #define lazy_mmu_prot_update(pte) do { } while (0)
168 #ifndef __HAVE_ARCH_MOVE_PTE
169 #define move_pte(pte, prot, old_addr, new_addr) (pte)
173 * A facility to provide lazy MMU batching. This allows PTE updates and
174 * page invalidations to be delayed until a call to leave lazy MMU mode
175 * is issued. Some architectures may benefit from doing this, and it is
176 * beneficial for both shadow and direct mode hypervisors, which may batch
177 * the PTE updates which happen during this window. Note that using this
178 * interface requires that read hazards be removed from the code. A read
179 * hazard could result in the direct mode hypervisor case, since the actual
180 * write to the page tables may not yet have taken place, so reads though
181 * a raw PTE pointer after it has been modified are not guaranteed to be
182 * up to date. This mode can only be entered and left under the protection of
183 * the page table locks for all page tables which may be modified. In the UP
184 * case, this is required so that preemption is disabled, and in the SMP case,
185 * it must synchronize the delayed page table writes properly on other CPUs.
187 #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
188 #define arch_enter_lazy_mmu_mode() do {} while (0)
189 #define arch_leave_lazy_mmu_mode() do {} while (0)
190 #define arch_flush_lazy_mmu_mode() do {} while (0)
194 * A facility to provide batching of the reload of page tables with the
195 * actual context switch code for paravirtualized guests. By convention,
196 * only one of the lazy modes (CPU, MMU) should be active at any given
197 * time, entry should never be nested, and entry and exits should always
198 * be paired. This is for sanity of maintaining and reasoning about the
201 #ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE
202 #define arch_enter_lazy_cpu_mode() do {} while (0)
203 #define arch_leave_lazy_cpu_mode() do {} while (0)
204 #define arch_flush_lazy_cpu_mode() do {} while (0)
208 * When walking page tables, get the address of the next boundary,
209 * or the end address of the range if that comes earlier. Although no
210 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
213 #define pgd_addr_end(addr, end) \
214 ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
215 (__boundary - 1 < (end) - 1)? __boundary: (end); \
219 #define pud_addr_end(addr, end) \
220 ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
221 (__boundary - 1 < (end) - 1)? __boundary: (end); \
226 #define pmd_addr_end(addr, end) \
227 ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
228 (__boundary - 1 < (end) - 1)? __boundary: (end); \
233 * When walking page tables, we usually want to skip any p?d_none entries;
234 * and any p?d_bad entries - reporting the error before resetting to none.
235 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
237 void pgd_clear_bad(pgd_t
*);
238 void pud_clear_bad(pud_t
*);
239 void pmd_clear_bad(pmd_t
*);
241 static inline int pgd_none_or_clear_bad(pgd_t
*pgd
)
245 if (unlikely(pgd_bad(*pgd
))) {
252 static inline int pud_none_or_clear_bad(pud_t
*pud
)
256 if (unlikely(pud_bad(*pud
))) {
263 static inline int pmd_none_or_clear_bad(pmd_t
*pmd
)
267 if (unlikely(pmd_bad(*pmd
))) {
273 #endif /* !__ASSEMBLY__ */
275 #endif /* _ASM_GENERIC_PGTABLE_H */