[PATCH] kvm: __user annotations
[linux-2.6/linux-mips.git] / include / asm-powerpc / page_64.h
blobeab779c219958b18b82afd1ff5c55bc5de05f89b
1 #ifndef _ASM_POWERPC_PAGE_64_H
2 #define _ASM_POWERPC_PAGE_64_H
3 #ifdef __KERNEL__
5 /*
6 * Copyright (C) 2001 PPC64 Team, IBM Corp
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
15 * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
16 * specific, every notion of page number shared with the firmware, TCEs,
17 * iommu, etc... still uses a page size of 4K.
19 #define HW_PAGE_SHIFT 12
20 #define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
21 #define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
24 * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
25 * HW_PAGE_SHIFT, that is 4K pages.
27 #define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
29 /* Segment size */
30 #define SID_SHIFT 28
31 #define SID_MASK 0xfffffffffUL
32 #define ESID_MASK 0xfffffffff0000000UL
33 #define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
35 #ifndef __ASSEMBLY__
36 #include <asm/cache.h>
38 typedef unsigned long pte_basic_t;
40 static __inline__ void clear_page(void *addr)
42 unsigned long lines, line_size;
44 line_size = ppc64_caches.dline_size;
45 lines = ppc64_caches.dlines_per_page;
47 __asm__ __volatile__(
48 "mtctr %1 # clear_page\n\
49 1: dcbz 0,%0\n\
50 add %0,%0,%3\n\
51 bdnz+ 1b"
52 : "=r" (addr)
53 : "r" (lines), "0" (addr), "r" (line_size)
54 : "ctr", "memory");
57 extern void copy_4K_page(void *to, void *from);
59 #ifdef CONFIG_PPC_64K_PAGES
60 static inline void copy_page(void *to, void *from)
62 unsigned int i;
63 for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
64 copy_4K_page(to, from);
65 to += 4096;
66 from += 4096;
69 #else /* CONFIG_PPC_64K_PAGES */
70 static inline void copy_page(void *to, void *from)
72 copy_4K_page(to, from);
74 #endif /* CONFIG_PPC_64K_PAGES */
76 /* Log 2 of page table size */
77 extern u64 ppc64_pft_size;
79 /* Large pages size */
80 #ifdef CONFIG_HUGETLB_PAGE
81 extern unsigned int HPAGE_SHIFT;
82 #else
83 #define HPAGE_SHIFT PAGE_SHIFT
84 #endif
85 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
86 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
87 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
89 #endif /* __ASSEMBLY__ */
91 #ifdef CONFIG_HUGETLB_PAGE
93 #define HTLB_AREA_SHIFT 40
94 #define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
95 #define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
97 #define LOW_ESID_MASK(addr, len) \
98 (((1U << (GET_ESID(min((addr)+(len)-1, 0x100000000UL))+1)) \
99 - (1U << GET_ESID(min((addr), 0x100000000UL)))) & 0xffff)
100 #define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
101 - (1U << GET_HTLB_AREA(addr))) & 0xffff)
103 #define ARCH_HAS_HUGEPAGE_ONLY_RANGE
104 #define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
105 #define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
106 #define ARCH_HAS_SETCLEAR_HUGE_PTE
108 #define touches_hugepage_low_range(mm, addr, len) \
109 (((addr) < 0x100000000UL) \
110 && (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas))
111 #define touches_hugepage_high_range(mm, addr, len) \
112 ((((addr) + (len)) > 0x100000000UL) \
113 && (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas))
115 #define __within_hugepage_low_range(addr, len, segmask) \
116 ( (((addr)+(len)) <= 0x100000000UL) \
117 && ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask)))
118 #define within_hugepage_low_range(addr, len) \
119 __within_hugepage_low_range((addr), (len), \
120 current->mm->context.low_htlb_areas)
121 #define __within_hugepage_high_range(addr, len, zonemask) \
122 ( ((addr) >= 0x100000000UL) \
123 && ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask)))
124 #define within_hugepage_high_range(addr, len) \
125 __within_hugepage_high_range((addr), (len), \
126 current->mm->context.high_htlb_areas)
128 #define is_hugepage_only_range(mm, addr, len) \
129 (touches_hugepage_high_range((mm), (addr), (len)) || \
130 touches_hugepage_low_range((mm), (addr), (len)))
131 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
133 #define in_hugepage_area(context, addr) \
134 (cpu_has_feature(CPU_FTR_16M_PAGE) && \
135 ( ( (addr) >= 0x100000000UL) \
136 ? ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) \
137 : ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) )
139 #else /* !CONFIG_HUGETLB_PAGE */
141 #define in_hugepage_area(mm, addr) 0
143 #endif /* !CONFIG_HUGETLB_PAGE */
145 #ifdef MODULE
146 #define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
147 #else
148 #define __page_aligned \
149 __attribute__((__aligned__(PAGE_SIZE), \
150 __section__(".data.page_aligned")))
151 #endif
153 #define VM_DATA_DEFAULT_FLAGS \
154 (test_thread_flag(TIF_32BIT) ? \
155 VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
158 * This is the default if a program doesn't have a PT_GNU_STACK
159 * program header entry. The PPC64 ELF ABI has a non executable stack
160 * stack by default, so in the absense of a PT_GNU_STACK program header
161 * we turn execute permission off.
163 #define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
164 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
166 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
167 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
169 #define VM_STACK_DEFAULT_FLAGS \
170 (test_thread_flag(TIF_32BIT) ? \
171 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
173 #include <asm-generic/page.h>
175 #endif /* __KERNEL__ */
176 #endif /* _ASM_POWERPC_PAGE_64_H */