[PATCH] powerpc: Replace VMALLOCBASE with VMALLOC_START
[linux-2.6/sactl.git] / include / asm-powerpc / page_64.h
blob8a07a93b03210fe320807251ea2b3ffbbe1cda22
1 #ifndef _ASM_POWERPC_PAGE_64_H
2 #define _ASM_POWERPC_PAGE_64_H
4 /*
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
14 * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
15 * specific, every notion of page number shared with the firmware, TCEs,
16 * iommu, etc... still uses a page size of 4K.
18 #define HW_PAGE_SHIFT 12
19 #define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
20 #define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
23 * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
24 * HW_PAGE_SHIFT, that is 4K pages.
26 #define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
28 /* Segment size */
29 #define SID_SHIFT 28
30 #define SID_MASK 0xfffffffffUL
31 #define ESID_MASK 0xfffffffff0000000UL
32 #define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
34 #ifndef __ASSEMBLY__
35 #include <asm/cache.h>
37 typedef unsigned long pte_basic_t;
39 static __inline__ void clear_page(void *addr)
41 unsigned long lines, line_size;
43 line_size = ppc64_caches.dline_size;
44 lines = ppc64_caches.dlines_per_page;
46 __asm__ __volatile__(
47 "mtctr %1 # clear_page\n\
48 1: dcbz 0,%0\n\
49 add %0,%0,%3\n\
50 bdnz+ 1b"
51 : "=r" (addr)
52 : "r" (lines), "0" (addr), "r" (line_size)
53 : "ctr", "memory");
56 extern void copy_4K_page(void *to, void *from);
58 #ifdef CONFIG_PPC_64K_PAGES
59 static inline void copy_page(void *to, void *from)
61 unsigned int i;
62 for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
63 copy_4K_page(to, from);
64 to += 4096;
65 from += 4096;
68 #else /* CONFIG_PPC_64K_PAGES */
69 static inline void copy_page(void *to, void *from)
71 copy_4K_page(to, from);
73 #endif /* CONFIG_PPC_64K_PAGES */
75 /* Log 2 of page table size */
76 extern u64 ppc64_pft_size;
78 /* Large pages size */
79 #ifdef CONFIG_HUGETLB_PAGE
80 extern unsigned int HPAGE_SHIFT;
81 #else
82 #define HPAGE_SHIFT PAGE_SHIFT
83 #endif
84 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
85 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
86 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
88 #endif /* __ASSEMBLY__ */
90 #ifdef CONFIG_HUGETLB_PAGE
92 #define HTLB_AREA_SHIFT 40
93 #define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
94 #define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
96 #define LOW_ESID_MASK(addr, len) \
97 (((1U << (GET_ESID(min((addr)+(len)-1, 0x100000000UL))+1)) \
98 - (1U << GET_ESID(min((addr), 0x100000000UL)))) & 0xffff)
99 #define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
100 - (1U << GET_HTLB_AREA(addr))) & 0xffff)
102 #define ARCH_HAS_HUGEPAGE_ONLY_RANGE
103 #define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
104 #define ARCH_HAS_SETCLEAR_HUGE_PTE
106 #define touches_hugepage_low_range(mm, addr, len) \
107 (((addr) < 0x100000000UL) \
108 && (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas))
109 #define touches_hugepage_high_range(mm, addr, len) \
110 ((((addr) + (len)) > 0x100000000UL) \
111 && (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas))
113 #define __within_hugepage_low_range(addr, len, segmask) \
114 ( (((addr)+(len)) <= 0x100000000UL) \
115 && ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask)))
116 #define within_hugepage_low_range(addr, len) \
117 __within_hugepage_low_range((addr), (len), \
118 current->mm->context.low_htlb_areas)
119 #define __within_hugepage_high_range(addr, len, zonemask) \
120 ( ((addr) >= 0x100000000UL) \
121 && ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask)))
122 #define within_hugepage_high_range(addr, len) \
123 __within_hugepage_high_range((addr), (len), \
124 current->mm->context.high_htlb_areas)
126 #define is_hugepage_only_range(mm, addr, len) \
127 (touches_hugepage_high_range((mm), (addr), (len)) || \
128 touches_hugepage_low_range((mm), (addr), (len)))
129 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
131 #define in_hugepage_area(context, addr) \
132 (cpu_has_feature(CPU_FTR_16M_PAGE) && \
133 ( ( (addr) >= 0x100000000UL) \
134 ? ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) \
135 : ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) )
137 #else /* !CONFIG_HUGETLB_PAGE */
139 #define in_hugepage_area(mm, addr) 0
141 #endif /* !CONFIG_HUGETLB_PAGE */
143 #ifdef MODULE
144 #define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
145 #else
146 #define __page_aligned \
147 __attribute__((__aligned__(PAGE_SIZE), \
148 __section__(".data.page_aligned")))
149 #endif
151 #define VM_DATA_DEFAULT_FLAGS \
152 (test_thread_flag(TIF_32BIT) ? \
153 VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
156 * This is the default if a program doesn't have a PT_GNU_STACK
157 * program header entry. The PPC64 ELF ABI has a non executable stack
158 * stack by default, so in the absense of a PT_GNU_STACK program header
159 * we turn execute permission off.
161 #define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
162 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
164 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
165 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
167 #define VM_STACK_DEFAULT_FLAGS \
168 (test_thread_flag(TIF_32BIT) ? \
169 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
171 #include <asm-generic/page.h>
173 #endif /* _ASM_POWERPC_PAGE_64_H */