x86: page.h: move remaining bits and pieces
[linux-2.6/verdex.git] / include / asm-x86 / page.h
blobf65a2ae6e32347d5a03b841f67a900f5b09431e3
1 #ifndef _ASM_X86_PAGE_H
2 #define _ASM_X86_PAGE_H
4 #include <linux/const.h>
6 /* PAGE_SHIFT determines the page size */
7 #define PAGE_SHIFT 12
8 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
9 #define PAGE_MASK (~(PAGE_SIZE-1))
11 #define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)
12 #define PTE_MASK PHYSICAL_PAGE_MASK
14 #define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
15 #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
17 #define HPAGE_SHIFT PMD_SHIFT
18 #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
19 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
20 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
22 /* to align the pointer to the (next) page boundary */
23 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
25 #define __PHYSICAL_MASK _AT(phys_addr_t, (_AC(1,ULL) << __PHYSICAL_MASK_SHIFT) - 1)
26 #define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1)
28 #ifndef __ASSEMBLY__
29 #include <linux/types.h>
30 #endif
32 #ifdef CONFIG_X86_64
33 #define PAGETABLE_LEVELS 4
35 #define THREAD_ORDER 1
36 #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
37 #define CURRENT_MASK (~(THREAD_SIZE-1))
39 #define EXCEPTION_STACK_ORDER 0
40 #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
42 #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
43 #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
45 #define IRQSTACK_ORDER 2
46 #define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
48 #define STACKFAULT_STACK 1
49 #define DOUBLEFAULT_STACK 2
50 #define NMI_STACK 3
51 #define DEBUG_STACK 4
52 #define MCE_STACK 5
53 #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
55 #define __PAGE_OFFSET _AC(0xffff810000000000, UL)
57 #define __PHYSICAL_START CONFIG_PHYSICAL_START
58 #define __KERNEL_ALIGN 0x200000
61 * Make sure kernel is aligned to 2MB address. Catching it at compile
62 * time is better. Change your config file and compile the kernel
63 * for a 2MB aligned address (CONFIG_PHYSICAL_START)
65 #if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
66 #error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
67 #endif
69 #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
70 #define __START_KERNEL_map _AC(0xffffffff80000000, UL)
72 /* See Documentation/x86_64/mm.txt for a description of the memory map. */
73 #define __PHYSICAL_MASK_SHIFT 46
74 #define __VIRTUAL_MASK_SHIFT 48
76 #define KERNEL_TEXT_SIZE (40*1024*1024)
77 #define KERNEL_TEXT_START _AC(0xffffffff80000000, UL)
79 #ifndef __ASSEMBLY__
80 void clear_page(void *page);
81 void copy_page(void *to, void *from);
83 extern unsigned long end_pfn;
84 extern unsigned long end_pfn_map;
85 extern unsigned long phys_base;
87 extern unsigned long __phys_addr(unsigned long);
88 #define __phys_reloc_hide(x) (x)
91 * These are used to make use of C type-checking..
93 typedef unsigned long pteval_t;
94 typedef unsigned long pmdval_t;
95 typedef unsigned long pudval_t;
96 typedef unsigned long pgdval_t;
97 typedef unsigned long pgprotval_t;
98 typedef unsigned long phys_addr_t;
100 typedef struct { pteval_t pte; } pte_t;
102 #define native_pte_val(x) ((x).pte)
103 #define native_make_pte(x) ((pte_t) { (x) } )
105 #define vmemmap ((struct page *)VMEMMAP_START)
107 #endif /* !__ASSEMBLY__ */
109 #endif /* CONFIG_X86_64 */
111 #ifdef CONFIG_X86_32
114 * This handles the memory map.
116 * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
117 * a virtual address space of one gigabyte, which limits the
118 * amount of physical memory you can use to about 950MB.
120 * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
121 * and CONFIG_HIGHMEM64G options in the kernel configuration.
123 #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
125 #ifdef CONFIG_X86_PAE
126 #define __PHYSICAL_MASK_SHIFT 36
127 #define __VIRTUAL_MASK_SHIFT 32
128 #define PAGETABLE_LEVELS 3
130 #ifndef __ASSEMBLY__
131 typedef u64 pteval_t;
132 typedef u64 pmdval_t;
133 typedef u64 pudval_t;
134 typedef u64 pgdval_t;
135 typedef u64 pgprotval_t;
136 typedef u64 phys_addr_t;
138 typedef struct { unsigned long pte_low, pte_high; } pte_t;
140 static inline unsigned long long native_pte_val(pte_t pte)
142 return pte.pte_low | ((unsigned long long)pte.pte_high << 32);
145 static inline pte_t native_make_pte(unsigned long long val)
147 return (pte_t) { .pte_low = val, .pte_high = (val >> 32) } ;
150 #endif /* __ASSEMBLY__
152 #else /* !CONFIG_X86_PAE */
153 #define __PHYSICAL_MASK_SHIFT 32
154 #define __VIRTUAL_MASK_SHIFT 32
155 #define PAGETABLE_LEVELS 2
157 #ifndef __ASSEMBLY__
158 typedef unsigned long pteval_t;
159 typedef unsigned long pmdval_t;
160 typedef unsigned long pudval_t;
161 typedef unsigned long pgdval_t;
162 typedef unsigned long pgprotval_t;
163 typedef unsigned long phys_addr_t;
165 typedef struct { pteval_t pte_low; } pte_t;
166 typedef pte_t boot_pte_t;
168 static inline unsigned long native_pte_val(pte_t pte)
170 return pte.pte_low;
173 static inline pte_t native_make_pte(unsigned long val)
175 return (pte_t) { .pte_low = val };
178 #endif /* __ASSEMBLY__ */
179 #endif /* CONFIG_X86_PAE */
181 #ifdef CONFIG_HUGETLB_PAGE
182 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
183 #endif
185 #ifndef __ASSEMBLY__
186 #define __phys_addr(x) ((x)-PAGE_OFFSET)
187 #define __phys_reloc_hide(x) RELOC_HIDE((x), 0)
189 #ifdef CONFIG_FLATMEM
190 #define pfn_valid(pfn) ((pfn) < max_mapnr)
191 #endif /* CONFIG_FLATMEM */
193 extern int nx_enabled;
196 * This much address space is reserved for vmalloc() and iomap()
197 * as well as fixmap mappings.
199 extern unsigned int __VMALLOC_RESERVE;
200 extern int sysctl_legacy_va_layout;
201 extern int page_is_ram(unsigned long pagenr);
203 #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
204 #define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
206 #ifdef CONFIG_X86_USE_3DNOW
207 #include <asm/mmx.h>
209 static inline void clear_page(void *page)
211 mmx_clear_page(page);
214 static inline void copy_page(void *to, void *from)
216 mmx_copy_page(to, from);
218 #else /* !CONFIG_X86_USE_3DNOW */
219 #include <linux/string.h>
221 static inline void clear_page(void *page)
223 memset(page, 0, PAGE_SIZE);
226 static inline void copy_page(void *to, void *from)
228 memcpy(to, from, PAGE_SIZE);
230 #endif /* CONFIG_X86_3DNOW */
231 #endif /* !__ASSEMBLY__ */
233 #endif /* CONFIG_X86_32 */
235 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
237 #define VM_DATA_DEFAULT_FLAGS \
238 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
239 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
242 #ifndef __ASSEMBLY__
243 struct page;
245 static void inline clear_user_page(void *page, unsigned long vaddr,
246 struct page *pg)
248 clear_page(page);
251 static void inline copy_user_page(void *to, void *from, unsigned long vaddr,
252 struct page *topage)
254 copy_page(to, from);
257 #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
258 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
259 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
261 typedef struct { pgdval_t pgd; } pgd_t;
262 typedef struct { pgprotval_t pgprot; } pgprot_t;
264 static inline pgd_t native_make_pgd(pgdval_t val)
266 return (pgd_t) { val };
269 static inline pgdval_t native_pgd_val(pgd_t pgd)
271 return pgd.pgd;
274 #if PAGETABLE_LEVELS >= 3
275 #if PAGETABLE_LEVELS == 4
276 typedef struct { pudval_t pud; } pud_t;
278 static inline pud_t native_make_pud(pmdval_t val)
280 return (pud_t) { val };
283 static inline pudval_t native_pud_val(pud_t pud)
285 return pud.pud;
287 #else /* PAGETABLE_LEVELS == 3 */
288 #include <asm-generic/pgtable-nopud.h>
289 #endif /* PAGETABLE_LEVELS == 4 */
291 typedef struct { pmdval_t pmd; } pmd_t;
293 static inline pmd_t native_make_pmd(pmdval_t val)
295 return (pmd_t) { val };
298 static inline pmdval_t native_pmd_val(pmd_t pmd)
300 return pmd.pmd;
302 #else /* PAGETABLE_LEVELS == 2 */
303 #include <asm-generic/pgtable-nopmd.h>
304 #endif /* PAGETABLE_LEVELS >= 3 */
306 #define pgprot_val(x) ((x).pgprot)
307 #define __pgprot(x) ((pgprot_t) { (x) } )
309 #ifdef CONFIG_PARAVIRT
310 #include <asm/paravirt.h>
311 #else /* !CONFIG_PARAVIRT */
313 #define pgd_val(x) native_pgd_val(x)
314 #define __pgd(x) native_make_pgd(x)
316 #ifndef __PAGETABLE_PUD_FOLDED
317 #define pud_val(x) native_pud_val(x)
318 #define __pud(x) native_make_pud(x)
319 #endif
321 #ifndef __PAGETABLE_PMD_FOLDED
322 #define pmd_val(x) native_pmd_val(x)
323 #define __pmd(x) native_make_pmd(x)
324 #endif
326 #define pte_val(x) native_pte_val(x)
327 #define __pte(x) native_make_pte(x)
329 #endif /* CONFIG_PARAVIRT */
331 #define __pa(x) __phys_addr((unsigned long)(x))
332 /* __pa_symbol should be used for C visible symbols.
333 This seems to be the official gcc blessed way to do such arithmetic. */
334 #define __pa_symbol(x) __pa(__phys_reloc_hide((unsigned long)(x)))
336 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
338 #define __boot_va(x) __va(x)
339 #define __boot_pa(x) __pa(x)
341 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
342 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
343 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
345 #endif /* __ASSEMBLY__ */
347 #include <asm-generic/memory_model.h>
348 #include <asm-generic/page.h>
350 #define __HAVE_ARCH_GATE_AREA 1
352 #ifdef CONFIG_X86_32
353 # include "page_32.h"
354 #else
355 # include "page_64.h"
356 #endif
358 #endif /* _ASM_X86_PAGE_H */