[PATCH] uml: fix 2 harmless cast warnings for 64-bit
[linux-2.6.22.y-op.git] / arch / sparc64 / mm / hugetlbpage.c
blobfbbbebbad8a49219674c119551f07a85b594bd9e
1 /*
2 * SPARC64 Huge TLB page support.
4 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
5 */
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/pagemap.h>
14 #include <linux/smp_lock.h>
15 #include <linux/slab.h>
16 #include <linux/sysctl.h>
18 #include <asm/mman.h>
19 #include <asm/pgalloc.h>
20 #include <asm/tlb.h>
21 #include <asm/tlbflush.h>
22 #include <asm/cacheflush.h>
23 #include <asm/mmu_context.h>
25 /* Slightly simplified from the non-hugepage variant because by
26 * definition we don't have to worry about any page coloring stuff
28 #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
29 #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
31 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
32 unsigned long addr,
33 unsigned long len,
34 unsigned long pgoff,
35 unsigned long flags)
37 struct mm_struct *mm = current->mm;
38 struct vm_area_struct * vma;
39 unsigned long task_size = TASK_SIZE;
40 unsigned long start_addr;
42 if (test_thread_flag(TIF_32BIT))
43 task_size = STACK_TOP32;
44 if (unlikely(len >= VA_EXCLUDE_START))
45 return -ENOMEM;
47 if (len > mm->cached_hole_size) {
48 start_addr = addr = mm->free_area_cache;
49 } else {
50 start_addr = addr = TASK_UNMAPPED_BASE;
51 mm->cached_hole_size = 0;
54 task_size -= len;
56 full_search:
57 addr = ALIGN(addr, HPAGE_SIZE);
59 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
60 /* At this point: (!vma || addr < vma->vm_end). */
61 if (addr < VA_EXCLUDE_START &&
62 (addr + len) >= VA_EXCLUDE_START) {
63 addr = VA_EXCLUDE_END;
64 vma = find_vma(mm, VA_EXCLUDE_END);
66 if (unlikely(task_size < addr)) {
67 if (start_addr != TASK_UNMAPPED_BASE) {
68 start_addr = addr = TASK_UNMAPPED_BASE;
69 mm->cached_hole_size = 0;
70 goto full_search;
72 return -ENOMEM;
74 if (likely(!vma || addr + len <= vma->vm_start)) {
76 * Remember the place where we stopped the search:
78 mm->free_area_cache = addr + len;
79 return addr;
81 if (addr + mm->cached_hole_size < vma->vm_start)
82 mm->cached_hole_size = vma->vm_start - addr;
84 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
88 static unsigned long
89 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
90 const unsigned long len,
91 const unsigned long pgoff,
92 const unsigned long flags)
94 struct vm_area_struct *vma;
95 struct mm_struct *mm = current->mm;
96 unsigned long addr = addr0;
98 /* This should only ever run for 32-bit processes. */
99 BUG_ON(!test_thread_flag(TIF_32BIT));
101 /* check if free_area_cache is useful for us */
102 if (len <= mm->cached_hole_size) {
103 mm->cached_hole_size = 0;
104 mm->free_area_cache = mm->mmap_base;
107 /* either no address requested or can't fit in requested address hole */
108 addr = mm->free_area_cache & HPAGE_MASK;
110 /* make sure it can fit in the remaining address space */
111 if (likely(addr > len)) {
112 vma = find_vma(mm, addr-len);
113 if (!vma || addr <= vma->vm_start) {
114 /* remember the address as a hint for next time */
115 return (mm->free_area_cache = addr-len);
119 if (unlikely(mm->mmap_base < len))
120 goto bottomup;
122 addr = (mm->mmap_base-len) & HPAGE_MASK;
124 do {
126 * Lookup failure means no vma is above this address,
127 * else if new region fits below vma->vm_start,
128 * return with success:
130 vma = find_vma(mm, addr);
131 if (likely(!vma || addr+len <= vma->vm_start)) {
132 /* remember the address as a hint for next time */
133 return (mm->free_area_cache = addr);
136 /* remember the largest hole we saw so far */
137 if (addr + mm->cached_hole_size < vma->vm_start)
138 mm->cached_hole_size = vma->vm_start - addr;
140 /* try just below the current vma->vm_start */
141 addr = (vma->vm_start-len) & HPAGE_MASK;
142 } while (likely(len < vma->vm_start));
144 bottomup:
146 * A failed mmap() very likely causes application failure,
147 * so fall back to the bottom-up function here. This scenario
148 * can happen with large stack limits and large mmap()
149 * allocations.
151 mm->cached_hole_size = ~0UL;
152 mm->free_area_cache = TASK_UNMAPPED_BASE;
153 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
155 * Restore the topdown base:
157 mm->free_area_cache = mm->mmap_base;
158 mm->cached_hole_size = ~0UL;
160 return addr;
163 unsigned long
164 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
165 unsigned long len, unsigned long pgoff, unsigned long flags)
167 struct mm_struct *mm = current->mm;
168 struct vm_area_struct *vma;
169 unsigned long task_size = TASK_SIZE;
171 if (test_thread_flag(TIF_32BIT))
172 task_size = STACK_TOP32;
174 if (len & ~HPAGE_MASK)
175 return -EINVAL;
176 if (len > task_size)
177 return -ENOMEM;
179 if (addr) {
180 addr = ALIGN(addr, HPAGE_SIZE);
181 vma = find_vma(mm, addr);
182 if (task_size - len >= addr &&
183 (!vma || addr + len <= vma->vm_start))
184 return addr;
186 if (mm->get_unmapped_area == arch_get_unmapped_area)
187 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
188 pgoff, flags);
189 else
190 return hugetlb_get_unmapped_area_topdown(file, addr, len,
191 pgoff, flags);
194 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
196 pgd_t *pgd;
197 pud_t *pud;
198 pmd_t *pmd;
199 pte_t *pte = NULL;
201 /* We must align the address, because our caller will run
202 * set_huge_pte_at() on whatever we return, which writes out
203 * all of the sub-ptes for the hugepage range. So we have
204 * to give it the first such sub-pte.
206 addr &= HPAGE_MASK;
208 pgd = pgd_offset(mm, addr);
209 pud = pud_alloc(mm, pgd, addr);
210 if (pud) {
211 pmd = pmd_alloc(mm, pud, addr);
212 if (pmd)
213 pte = pte_alloc_map(mm, pmd, addr);
215 return pte;
218 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
220 pgd_t *pgd;
221 pud_t *pud;
222 pmd_t *pmd;
223 pte_t *pte = NULL;
225 addr &= HPAGE_MASK;
227 pgd = pgd_offset(mm, addr);
228 if (!pgd_none(*pgd)) {
229 pud = pud_offset(pgd, addr);
230 if (!pud_none(*pud)) {
231 pmd = pmd_offset(pud, addr);
232 if (!pmd_none(*pmd))
233 pte = pte_offset_map(pmd, addr);
236 return pte;
239 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
240 pte_t *ptep, pte_t entry)
242 int i;
244 if (!pte_present(*ptep) && pte_present(entry))
245 mm->context.huge_pte_count++;
247 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
248 set_pte_at(mm, addr, ptep, entry);
249 ptep++;
250 addr += PAGE_SIZE;
251 pte_val(entry) += PAGE_SIZE;
255 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
256 pte_t *ptep)
258 pte_t entry;
259 int i;
261 entry = *ptep;
262 if (pte_present(entry))
263 mm->context.huge_pte_count--;
265 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
266 pte_clear(mm, addr, ptep);
267 addr += PAGE_SIZE;
268 ptep++;
271 return entry;
274 struct page *follow_huge_addr(struct mm_struct *mm,
275 unsigned long address, int write)
277 return ERR_PTR(-EINVAL);
280 int pmd_huge(pmd_t pmd)
282 return 0;
285 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
286 pmd_t *pmd, int write)
288 return NULL;
291 static void context_reload(void *__data)
293 struct mm_struct *mm = __data;
295 if (mm == current->mm)
296 load_secondary_context(mm);
299 void hugetlb_prefault_arch_hook(struct mm_struct *mm)
301 struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
303 if (likely(tp->tsb != NULL))
304 return;
306 tsb_grow(mm, MM_TSB_HUGE, 0);
307 tsb_context_switch(mm);
308 smp_tsb_sync(mm);
310 /* On UltraSPARC-III+ and later, configure the second half of
311 * the Data-TLB for huge pages.
313 if (tlb_type == cheetah_plus) {
314 unsigned long ctx;
316 spin_lock(&ctx_alloc_lock);
317 ctx = mm->context.sparc64_ctx_val;
318 ctx &= ~CTX_PGSZ_MASK;
319 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
320 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
322 if (ctx != mm->context.sparc64_ctx_val) {
323 /* When changing the page size fields, we
324 * must perform a context flush so that no
325 * stale entries match. This flush must
326 * occur with the original context register
327 * settings.
329 do_flush_tlb_mm(mm);
331 /* Reload the context register of all processors
332 * also executing in this address space.
334 mm->context.sparc64_ctx_val = ctx;
335 on_each_cpu(context_reload, mm, 0, 0);
337 spin_unlock(&ctx_alloc_lock);