Merge master.kernel.org:/pub/scm/linux/kernel/git/paulus/ppc64-2.6
[linux-2.6/verdex.git] / include / asm-ppc64 / page.h
bloba15422bcf30d2c054b3cc915feb9d025d0b592cc
1 #ifndef _PPC64_PAGE_H
2 #define _PPC64_PAGE_H
4 /*
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/config.h>
15 #ifdef __ASSEMBLY__
16 #define ASM_CONST(x) x
17 #else
18 #define __ASM_CONST(x) x##UL
19 #define ASM_CONST(x) __ASM_CONST(x)
20 #endif
22 /* PAGE_SHIFT determines the page size */
23 #define PAGE_SHIFT 12
24 #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
25 #define PAGE_MASK (~(PAGE_SIZE-1))
27 #define SID_SHIFT 28
28 #define SID_MASK 0xfffffffffUL
29 #define ESID_MASK 0xfffffffff0000000UL
30 #define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
32 #define HPAGE_SHIFT 24
33 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
34 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
36 #ifdef CONFIG_HUGETLB_PAGE
38 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
40 #define HTLB_AREA_SHIFT 40
41 #define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
42 #define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
44 #define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
45 - (1U << GET_ESID(addr))) & 0xffff)
46 #define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
47 - (1U << GET_HTLB_AREA(addr))) & 0xffff)
49 #define ARCH_HAS_HUGEPAGE_ONLY_RANGE
50 #define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
51 #define ARCH_HAS_SETCLEAR_HUGE_PTE
53 #define touches_hugepage_low_range(mm, addr, len) \
54 (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)
55 #define touches_hugepage_high_range(mm, addr, len) \
56 (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)
58 #define __within_hugepage_low_range(addr, len, segmask) \
59 ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))
60 #define within_hugepage_low_range(addr, len) \
61 __within_hugepage_low_range((addr), (len), \
62 current->mm->context.low_htlb_areas)
63 #define __within_hugepage_high_range(addr, len, zonemask) \
64 ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask))
65 #define within_hugepage_high_range(addr, len) \
66 __within_hugepage_high_range((addr), (len), \
67 current->mm->context.high_htlb_areas)
69 #define is_hugepage_only_range(mm, addr, len) \
70 (touches_hugepage_high_range((mm), (addr), (len)) || \
71 touches_hugepage_low_range((mm), (addr), (len)))
72 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
74 #define in_hugepage_area(context, addr) \
75 (cpu_has_feature(CPU_FTR_16M_PAGE) && \
76 ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \
77 ( ((addr) < 0x100000000L) && \
78 ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) )
80 #else /* !CONFIG_HUGETLB_PAGE */
82 #define in_hugepage_area(mm, addr) 0
84 #endif /* !CONFIG_HUGETLB_PAGE */
86 /* align addr on a size boundary - adjust address up/down if needed */
87 #define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
88 #define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
90 /* align addr on a size boundary - adjust address up if needed */
91 #define _ALIGN(addr,size) _ALIGN_UP(addr,size)
93 /* to align the pointer to the (next) page boundary */
94 #define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
96 #ifdef __KERNEL__
97 #ifndef __ASSEMBLY__
98 #include <asm/cache.h>
100 #undef STRICT_MM_TYPECHECKS
102 #define REGION_SIZE 4UL
103 #define REGION_SHIFT 60UL
104 #define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
106 static __inline__ void clear_page(void *addr)
108 unsigned long lines, line_size;
110 line_size = ppc64_caches.dline_size;
111 lines = ppc64_caches.dlines_per_page;
113 __asm__ __volatile__(
114 "mtctr %1 # clear_page\n\
115 1: dcbz 0,%0\n\
116 add %0,%0,%3\n\
117 bdnz+ 1b"
118 : "=r" (addr)
119 : "r" (lines), "0" (addr), "r" (line_size)
120 : "ctr", "memory");
123 extern void copy_page(void *to, void *from);
124 struct page;
125 extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
126 extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p);
128 #ifdef STRICT_MM_TYPECHECKS
130 * These are used to make use of C type-checking.
131 * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b.
133 typedef struct { unsigned long pte; } pte_t;
134 typedef struct { unsigned long pmd; } pmd_t;
135 typedef struct { unsigned long pud; } pud_t;
136 typedef struct { unsigned long pgd; } pgd_t;
137 typedef struct { unsigned long pgprot; } pgprot_t;
139 #define pte_val(x) ((x).pte)
140 #define pmd_val(x) ((x).pmd)
141 #define pud_val(x) ((x).pud)
142 #define pgd_val(x) ((x).pgd)
143 #define pgprot_val(x) ((x).pgprot)
145 #define __pte(x) ((pte_t) { (x) })
146 #define __pmd(x) ((pmd_t) { (x) })
147 #define __pud(x) ((pud_t) { (x) })
148 #define __pgd(x) ((pgd_t) { (x) })
149 #define __pgprot(x) ((pgprot_t) { (x) })
151 #else
153 * .. while these make it easier on the compiler
155 typedef unsigned long pte_t;
156 typedef unsigned long pmd_t;
157 typedef unsigned long pud_t;
158 typedef unsigned long pgd_t;
159 typedef unsigned long pgprot_t;
161 #define pte_val(x) (x)
162 #define pmd_val(x) (x)
163 #define pud_val(x) (x)
164 #define pgd_val(x) (x)
165 #define pgprot_val(x) (x)
167 #define __pte(x) (x)
168 #define __pmd(x) (x)
169 #define __pud(x) (x)
170 #define __pgd(x) (x)
171 #define __pgprot(x) (x)
173 #endif
175 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
177 extern int page_is_ram(unsigned long pfn);
179 extern u64 ppc64_pft_size; /* Log 2 of page table size */
181 /* We do define AT_SYSINFO_EHDR but don't use the gate mecanism */
182 #define __HAVE_ARCH_GATE_AREA 1
184 #endif /* __ASSEMBLY__ */
186 #ifdef MODULE
187 #define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
188 #else
189 #define __page_aligned \
190 __attribute__((__aligned__(PAGE_SIZE), \
191 __section__(".data.page_aligned")))
192 #endif
195 /* This must match the -Ttext linker address */
196 /* Note: tophys & tovirt make assumptions about how */
197 /* KERNELBASE is defined for performance reasons. */
198 /* When KERNELBASE moves, those macros may have */
199 /* to change! */
200 #define PAGE_OFFSET ASM_CONST(0xC000000000000000)
201 #define KERNELBASE PAGE_OFFSET
202 #define VMALLOCBASE ASM_CONST(0xD000000000000000)
204 #define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT)
205 #define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT)
206 #define USER_REGION_ID (0UL)
207 #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
209 #define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
211 #ifdef CONFIG_DISCONTIGMEM
212 #define page_to_pfn(page) discontigmem_page_to_pfn(page)
213 #define pfn_to_page(pfn) discontigmem_pfn_to_page(pfn)
214 #define pfn_valid(pfn) discontigmem_pfn_valid(pfn)
215 #endif
216 #ifdef CONFIG_FLATMEM
217 #define pfn_to_page(pfn) (mem_map + (pfn))
218 #define page_to_pfn(page) ((unsigned long)((page) - mem_map))
219 #define pfn_valid(pfn) ((pfn) < max_mapnr)
220 #endif
222 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
223 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
225 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
228 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
229 * and needs to be executable. This means the whole heap ends
230 * up being executable.
232 #define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
233 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
235 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
236 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
238 #define VM_DATA_DEFAULT_FLAGS \
239 (test_thread_flag(TIF_32BIT) ? \
240 VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
243 * This is the default if a program doesn't have a PT_GNU_STACK
244 * program header entry. The PPC64 ELF ABI has a non executable stack
245 * stack by default, so in the absense of a PT_GNU_STACK program header
246 * we turn execute permission off.
248 #define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
249 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
251 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
252 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
254 #define VM_STACK_DEFAULT_FLAGS \
255 (test_thread_flag(TIF_32BIT) ? \
256 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
258 #endif /* __KERNEL__ */
260 #include <asm-generic/page.h>
262 #endif /* _PPC64_PAGE_H */