IB/mlx4: Incorrect semicolon after if statement
[linux-2.6/verdex.git] / include / asm-generic / pgtable.h
blobf605e8d0eed38a194b63cc04208cfc3455351574
1 #ifndef _ASM_GENERIC_PGTABLE_H
2 #define _ASM_GENERIC_PGTABLE_H
4 #ifndef __ASSEMBLY__
6 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
7 /*
8 * Largely same as above, but only sets the access flags (dirty,
9 * accessed, and writable). Furthermore, we know it always gets set
10 * to a "more permissive" setting, which allows most architectures
11 * to optimize this. We return whether the PTE actually changed, which
12 * in turn instructs the caller to do things like update__mmu_cache.
13 * This used to be done in the caller, but sparc needs minor faults to
14 * force that call on sun4c so we changed this macro slightly
16 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
17 ({ \
18 int __changed = !pte_same(*(__ptep), __entry); \
19 if (__changed) { \
20 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
21 flush_tlb_page(__vma, __address); \
22 } \
23 __changed; \
25 #endif
27 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
28 #define ptep_test_and_clear_young(__vma, __address, __ptep) \
29 ({ \
30 pte_t __pte = *(__ptep); \
31 int r = 1; \
32 if (!pte_young(__pte)) \
33 r = 0; \
34 else \
35 set_pte_at((__vma)->vm_mm, (__address), \
36 (__ptep), pte_mkold(__pte)); \
37 r; \
39 #endif
41 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
42 #define ptep_clear_flush_young(__vma, __address, __ptep) \
43 ({ \
44 int __young; \
45 __young = ptep_test_and_clear_young(__vma, __address, __ptep); \
46 if (__young) \
47 flush_tlb_page(__vma, __address); \
48 __young; \
50 #endif
52 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
53 #define ptep_get_and_clear(__mm, __address, __ptep) \
54 ({ \
55 pte_t __pte = *(__ptep); \
56 pte_clear((__mm), (__address), (__ptep)); \
57 __pte; \
59 #endif
61 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
62 #define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \
63 ({ \
64 pte_t __pte; \
65 __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \
66 __pte; \
68 #endif
71 * Some architectures may be able to avoid expensive synchronization
72 * primitives when modifications are made to PTE's which are already
73 * not present, or in the process of an address space destruction.
75 #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
76 #define pte_clear_not_present_full(__mm, __address, __ptep, __full) \
77 do { \
78 pte_clear((__mm), (__address), (__ptep)); \
79 } while (0)
80 #endif
82 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
83 #define ptep_clear_flush(__vma, __address, __ptep) \
84 ({ \
85 pte_t __pte; \
86 __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \
87 flush_tlb_page(__vma, __address); \
88 __pte; \
90 #endif
92 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
93 struct mm_struct;
94 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
96 pte_t old_pte = *ptep;
97 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
99 #endif
101 #ifndef __HAVE_ARCH_PTE_SAME
102 #define pte_same(A,B) (pte_val(A) == pte_val(B))
103 #endif
105 #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
106 #define page_test_dirty(page) (0)
107 #endif
109 #ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
110 #define page_clear_dirty(page) do { } while (0)
111 #endif
113 #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
114 #define pte_maybe_dirty(pte) pte_dirty(pte)
115 #else
116 #define pte_maybe_dirty(pte) (1)
117 #endif
119 #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
120 #define page_test_and_clear_young(page) (0)
121 #endif
123 #ifndef __HAVE_ARCH_PGD_OFFSET_GATE
124 #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
125 #endif
127 #ifndef __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
128 #define lazy_mmu_prot_update(pte) do { } while (0)
129 #endif
131 #ifndef __HAVE_ARCH_MOVE_PTE
132 #define move_pte(pte, prot, old_addr, new_addr) (pte)
133 #endif
136 * A facility to provide lazy MMU batching. This allows PTE updates and
137 * page invalidations to be delayed until a call to leave lazy MMU mode
138 * is issued. Some architectures may benefit from doing this, and it is
139 * beneficial for both shadow and direct mode hypervisors, which may batch
140 * the PTE updates which happen during this window. Note that using this
141 * interface requires that read hazards be removed from the code. A read
142 * hazard could result in the direct mode hypervisor case, since the actual
143 * write to the page tables may not yet have taken place, so reads though
144 * a raw PTE pointer after it has been modified are not guaranteed to be
145 * up to date. This mode can only be entered and left under the protection of
146 * the page table locks for all page tables which may be modified. In the UP
147 * case, this is required so that preemption is disabled, and in the SMP case,
148 * it must synchronize the delayed page table writes properly on other CPUs.
150 #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
151 #define arch_enter_lazy_mmu_mode() do {} while (0)
152 #define arch_leave_lazy_mmu_mode() do {} while (0)
153 #define arch_flush_lazy_mmu_mode() do {} while (0)
154 #endif
157 * A facility to provide batching of the reload of page tables with the
158 * actual context switch code for paravirtualized guests. By convention,
159 * only one of the lazy modes (CPU, MMU) should be active at any given
160 * time, entry should never be nested, and entry and exits should always
161 * be paired. This is for sanity of maintaining and reasoning about the
162 * kernel code.
164 #ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE
165 #define arch_enter_lazy_cpu_mode() do {} while (0)
166 #define arch_leave_lazy_cpu_mode() do {} while (0)
167 #define arch_flush_lazy_cpu_mode() do {} while (0)
168 #endif
171 * When walking page tables, get the address of the next boundary,
172 * or the end address of the range if that comes earlier. Although no
173 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
176 #define pgd_addr_end(addr, end) \
177 ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
178 (__boundary - 1 < (end) - 1)? __boundary: (end); \
181 #ifndef pud_addr_end
182 #define pud_addr_end(addr, end) \
183 ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
184 (__boundary - 1 < (end) - 1)? __boundary: (end); \
186 #endif
188 #ifndef pmd_addr_end
189 #define pmd_addr_end(addr, end) \
190 ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
191 (__boundary - 1 < (end) - 1)? __boundary: (end); \
193 #endif
196 * When walking page tables, we usually want to skip any p?d_none entries;
197 * and any p?d_bad entries - reporting the error before resetting to none.
198 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
200 void pgd_clear_bad(pgd_t *);
201 void pud_clear_bad(pud_t *);
202 void pmd_clear_bad(pmd_t *);
204 static inline int pgd_none_or_clear_bad(pgd_t *pgd)
206 if (pgd_none(*pgd))
207 return 1;
208 if (unlikely(pgd_bad(*pgd))) {
209 pgd_clear_bad(pgd);
210 return 1;
212 return 0;
215 static inline int pud_none_or_clear_bad(pud_t *pud)
217 if (pud_none(*pud))
218 return 1;
219 if (unlikely(pud_bad(*pud))) {
220 pud_clear_bad(pud);
221 return 1;
223 return 0;
226 static inline int pmd_none_or_clear_bad(pmd_t *pmd)
228 if (pmd_none(*pmd))
229 return 1;
230 if (unlikely(pmd_bad(*pmd))) {
231 pmd_clear_bad(pmd);
232 return 1;
234 return 0;
236 #endif /* !__ASSEMBLY__ */
238 #endif /* _ASM_GENERIC_PGTABLE_H */