proc: cleanup: use seq_release_private() where appropriate
[linux-2.6/x86.git] / include / asm-x86_64 / page.h
blob4d04e2479569e64963f27f4bf5d2f44fe186238b
1 #ifndef _X86_64_PAGE_H
2 #define _X86_64_PAGE_H
4 #include <asm/const.h>
6 /* PAGE_SHIFT determines the page size */
7 #define PAGE_SHIFT 12
8 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
9 #define PAGE_MASK (~(PAGE_SIZE-1))
10 #define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
12 #define THREAD_ORDER 1
13 #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
14 #define CURRENT_MASK (~(THREAD_SIZE-1))
16 #define EXCEPTION_STACK_ORDER 0
17 #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
19 #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
20 #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
22 #define IRQSTACK_ORDER 2
23 #define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
25 #define STACKFAULT_STACK 1
26 #define DOUBLEFAULT_STACK 2
27 #define NMI_STACK 3
28 #define DEBUG_STACK 4
29 #define MCE_STACK 5
30 #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
32 #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
33 #define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
35 #define HPAGE_SHIFT PMD_SHIFT
36 #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
37 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
38 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
40 #ifdef __KERNEL__
41 #ifndef __ASSEMBLY__
43 extern unsigned long end_pfn;
45 void clear_page(void *);
46 void copy_page(void *, void *);
48 #define clear_user_page(page, vaddr, pg) clear_page(page)
49 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
51 #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
52 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
54 * These are used to make use of C type-checking..
56 typedef struct { unsigned long pte; } pte_t;
57 typedef struct { unsigned long pmd; } pmd_t;
58 typedef struct { unsigned long pud; } pud_t;
59 typedef struct { unsigned long pgd; } pgd_t;
60 #define PTE_MASK PHYSICAL_PAGE_MASK
62 typedef struct { unsigned long pgprot; } pgprot_t;
64 extern unsigned long phys_base;
66 #define pte_val(x) ((x).pte)
67 #define pmd_val(x) ((x).pmd)
68 #define pud_val(x) ((x).pud)
69 #define pgd_val(x) ((x).pgd)
70 #define pgprot_val(x) ((x).pgprot)
72 #define __pte(x) ((pte_t) { (x) } )
73 #define __pmd(x) ((pmd_t) { (x) } )
74 #define __pud(x) ((pud_t) { (x) } )
75 #define __pgd(x) ((pgd_t) { (x) } )
76 #define __pgprot(x) ((pgprot_t) { (x) } )
78 #endif /* !__ASSEMBLY__ */
80 #define __PHYSICAL_START CONFIG_PHYSICAL_START
81 #define __KERNEL_ALIGN 0x200000
82 #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
83 #define __START_KERNEL_map 0xffffffff80000000
84 #define __PAGE_OFFSET 0xffff810000000000
86 /* to align the pointer to the (next) page boundary */
87 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
89 /* See Documentation/x86_64/mm.txt for a description of the memory map. */
90 #define __PHYSICAL_MASK_SHIFT 46
91 #define __PHYSICAL_MASK ((_AC(1,UL) << __PHYSICAL_MASK_SHIFT) - 1)
92 #define __VIRTUAL_MASK_SHIFT 48
93 #define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1)
95 #define KERNEL_TEXT_SIZE (40*1024*1024)
96 #define KERNEL_TEXT_START 0xffffffff80000000
97 #define PAGE_OFFSET __PAGE_OFFSET
99 #ifndef __ASSEMBLY__
101 #include <asm/bug.h>
103 extern unsigned long __phys_addr(unsigned long);
105 #endif /* __ASSEMBLY__ */
107 #define __pa(x) __phys_addr((unsigned long)(x))
108 #define __pa_symbol(x) __phys_addr((unsigned long)(x))
110 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
111 #define __boot_va(x) __va(x)
112 #define __boot_pa(x) __pa(x)
113 #ifdef CONFIG_FLATMEM
114 #define pfn_valid(pfn) ((pfn) < end_pfn)
115 #endif
117 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
118 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
119 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
121 #define VM_DATA_DEFAULT_FLAGS \
122 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
123 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
125 #define __HAVE_ARCH_GATE_AREA 1
127 #include <asm-generic/memory_model.h>
128 #include <asm-generic/page.h>
130 #endif /* __KERNEL__ */
132 #endif /* _X86_64_PAGE_H */