KVM: Don't call get_user_pages(.force = 1)
[linux-2.6/verdex.git] / include / asm-x86 / page.h
blobd4f1d5791fc186f29a9a60d4fe182d80f05038e4
1 #ifndef ASM_X86__PAGE_H
2 #define ASM_X86__PAGE_H
4 #include <linux/const.h>
6 /* PAGE_SHIFT determines the page size */
7 #define PAGE_SHIFT 12
8 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
9 #define PAGE_MASK (~(PAGE_SIZE-1))
11 #ifdef __KERNEL__
13 #define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1)
14 #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
16 /* Cast PAGE_MASK to a signed type so that it is sign-extended if
17 virtual addresses are 32-bits but physical addresses are larger
18 (ie, 32-bit PAE). */
19 #define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
21 /* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
22 #define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
24 /* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
25 #define PTE_FLAGS_MASK (~PTE_PFN_MASK)
27 #define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
28 #define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
30 #define HPAGE_SHIFT PMD_SHIFT
31 #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
32 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
33 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
35 #define HUGE_MAX_HSTATE 2
37 #ifndef __ASSEMBLY__
38 #include <linux/types.h>
39 #endif
41 #ifdef CONFIG_X86_64
42 #include <asm/page_64.h>
43 #else
44 #include <asm/page_32.h>
45 #endif /* CONFIG_X86_64 */
47 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
49 #define VM_DATA_DEFAULT_FLAGS \
50 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
51 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
54 #ifndef __ASSEMBLY__
56 typedef struct { pgdval_t pgd; } pgd_t;
57 typedef struct { pgprotval_t pgprot; } pgprot_t;
59 extern int page_is_ram(unsigned long pagenr);
60 extern int pagerange_is_ram(unsigned long start, unsigned long end);
61 extern int devmem_is_allowed(unsigned long pagenr);
62 extern void map_devmem(unsigned long pfn, unsigned long size,
63 pgprot_t vma_prot);
64 extern void unmap_devmem(unsigned long pfn, unsigned long size,
65 pgprot_t vma_prot);
67 extern unsigned long max_low_pfn_mapped;
68 extern unsigned long max_pfn_mapped;
70 struct page;
72 static inline void clear_user_page(void *page, unsigned long vaddr,
73 struct page *pg)
75 clear_page(page);
78 static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
79 struct page *topage)
81 copy_page(to, from);
84 #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
85 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
86 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
88 static inline pgd_t native_make_pgd(pgdval_t val)
90 return (pgd_t) { val };
93 static inline pgdval_t native_pgd_val(pgd_t pgd)
95 return pgd.pgd;
98 #if PAGETABLE_LEVELS >= 3
99 #if PAGETABLE_LEVELS == 4
100 typedef struct { pudval_t pud; } pud_t;
102 static inline pud_t native_make_pud(pmdval_t val)
104 return (pud_t) { val };
107 static inline pudval_t native_pud_val(pud_t pud)
109 return pud.pud;
111 #else /* PAGETABLE_LEVELS == 3 */
112 #include <asm-generic/pgtable-nopud.h>
114 static inline pudval_t native_pud_val(pud_t pud)
116 return native_pgd_val(pud.pgd);
118 #endif /* PAGETABLE_LEVELS == 4 */
120 typedef struct { pmdval_t pmd; } pmd_t;
122 static inline pmd_t native_make_pmd(pmdval_t val)
124 return (pmd_t) { val };
127 static inline pmdval_t native_pmd_val(pmd_t pmd)
129 return pmd.pmd;
131 #else /* PAGETABLE_LEVELS == 2 */
132 #include <asm-generic/pgtable-nopmd.h>
134 static inline pmdval_t native_pmd_val(pmd_t pmd)
136 return native_pgd_val(pmd.pud.pgd);
138 #endif /* PAGETABLE_LEVELS >= 3 */
140 static inline pte_t native_make_pte(pteval_t val)
142 return (pte_t) { .pte = val };
145 static inline pteval_t native_pte_val(pte_t pte)
147 return pte.pte;
150 static inline pteval_t native_pte_flags(pte_t pte)
152 return native_pte_val(pte) & PTE_FLAGS_MASK;
155 #define pgprot_val(x) ((x).pgprot)
156 #define __pgprot(x) ((pgprot_t) { (x) } )
158 #ifdef CONFIG_PARAVIRT
159 #include <asm/paravirt.h>
160 #else /* !CONFIG_PARAVIRT */
162 #define pgd_val(x) native_pgd_val(x)
163 #define __pgd(x) native_make_pgd(x)
165 #ifndef __PAGETABLE_PUD_FOLDED
166 #define pud_val(x) native_pud_val(x)
167 #define __pud(x) native_make_pud(x)
168 #endif
170 #ifndef __PAGETABLE_PMD_FOLDED
171 #define pmd_val(x) native_pmd_val(x)
172 #define __pmd(x) native_make_pmd(x)
173 #endif
175 #define pte_val(x) native_pte_val(x)
176 #define pte_flags(x) native_pte_flags(x)
177 #define __pte(x) native_make_pte(x)
179 #endif /* CONFIG_PARAVIRT */
181 #define __pa(x) __phys_addr((unsigned long)(x))
182 #define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
183 /* __pa_symbol should be used for C visible symbols.
184 This seems to be the official gcc blessed way to do such arithmetic. */
185 #define __pa_symbol(x) __pa(__phys_reloc_hide((unsigned long)(x)))
187 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
189 #define __boot_va(x) __va(x)
190 #define __boot_pa(x) __pa(x)
193 * virt_to_page(kaddr) returns a valid pointer if and only if
194 * virt_addr_valid(kaddr) returns true.
196 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
197 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
198 extern bool __virt_addr_valid(unsigned long kaddr);
199 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
201 #endif /* __ASSEMBLY__ */
203 #include <asm-generic/memory_model.h>
204 #include <asm-generic/page.h>
206 #define __HAVE_ARCH_GATE_AREA 1
208 #endif /* __KERNEL__ */
209 #endif /* ASM_X86__PAGE_H */