2 * SPARC64 Huge TLB page support.
4 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
7 #include <linux/init.h>
8 #include <linux/module.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/slab.h>
14 #include <linux/sysctl.h>
17 #include <asm/pgalloc.h>
19 #include <asm/tlbflush.h>
20 #include <asm/cacheflush.h>
21 #include <asm/mmu_context.h>
23 /* Slightly simplified from the non-hugepage variant because by
24 * definition we don't have to worry about any page coloring stuff
26 #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
27 #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
29 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file
*filp
,
35 struct mm_struct
*mm
= current
->mm
;
36 struct vm_area_struct
* vma
;
37 unsigned long task_size
= TASK_SIZE
;
38 unsigned long start_addr
;
40 if (test_thread_flag(TIF_32BIT
))
41 task_size
= STACK_TOP32
;
42 if (unlikely(len
>= VA_EXCLUDE_START
))
45 if (len
> mm
->cached_hole_size
) {
46 start_addr
= addr
= mm
->free_area_cache
;
48 start_addr
= addr
= TASK_UNMAPPED_BASE
;
49 mm
->cached_hole_size
= 0;
55 addr
= ALIGN(addr
, HPAGE_SIZE
);
57 for (vma
= find_vma(mm
, addr
); ; vma
= vma
->vm_next
) {
58 /* At this point: (!vma || addr < vma->vm_end). */
59 if (addr
< VA_EXCLUDE_START
&&
60 (addr
+ len
) >= VA_EXCLUDE_START
) {
61 addr
= VA_EXCLUDE_END
;
62 vma
= find_vma(mm
, VA_EXCLUDE_END
);
64 if (unlikely(task_size
< addr
)) {
65 if (start_addr
!= TASK_UNMAPPED_BASE
) {
66 start_addr
= addr
= TASK_UNMAPPED_BASE
;
67 mm
->cached_hole_size
= 0;
72 if (likely(!vma
|| addr
+ len
<= vma
->vm_start
)) {
74 * Remember the place where we stopped the search:
76 mm
->free_area_cache
= addr
+ len
;
79 if (addr
+ mm
->cached_hole_size
< vma
->vm_start
)
80 mm
->cached_hole_size
= vma
->vm_start
- addr
;
82 addr
= ALIGN(vma
->vm_end
, HPAGE_SIZE
);
87 hugetlb_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
88 const unsigned long len
,
89 const unsigned long pgoff
,
90 const unsigned long flags
)
92 struct vm_area_struct
*vma
;
93 struct mm_struct
*mm
= current
->mm
;
94 unsigned long addr
= addr0
;
96 /* This should only ever run for 32-bit processes. */
97 BUG_ON(!test_thread_flag(TIF_32BIT
));
99 /* check if free_area_cache is useful for us */
100 if (len
<= mm
->cached_hole_size
) {
101 mm
->cached_hole_size
= 0;
102 mm
->free_area_cache
= mm
->mmap_base
;
105 /* either no address requested or can't fit in requested address hole */
106 addr
= mm
->free_area_cache
& HPAGE_MASK
;
108 /* make sure it can fit in the remaining address space */
109 if (likely(addr
> len
)) {
110 vma
= find_vma(mm
, addr
-len
);
111 if (!vma
|| addr
<= vma
->vm_start
) {
112 /* remember the address as a hint for next time */
113 return (mm
->free_area_cache
= addr
-len
);
117 if (unlikely(mm
->mmap_base
< len
))
120 addr
= (mm
->mmap_base
-len
) & HPAGE_MASK
;
124 * Lookup failure means no vma is above this address,
125 * else if new region fits below vma->vm_start,
126 * return with success:
128 vma
= find_vma(mm
, addr
);
129 if (likely(!vma
|| addr
+len
<= vma
->vm_start
)) {
130 /* remember the address as a hint for next time */
131 return (mm
->free_area_cache
= addr
);
134 /* remember the largest hole we saw so far */
135 if (addr
+ mm
->cached_hole_size
< vma
->vm_start
)
136 mm
->cached_hole_size
= vma
->vm_start
- addr
;
138 /* try just below the current vma->vm_start */
139 addr
= (vma
->vm_start
-len
) & HPAGE_MASK
;
140 } while (likely(len
< vma
->vm_start
));
144 * A failed mmap() very likely causes application failure,
145 * so fall back to the bottom-up function here. This scenario
146 * can happen with large stack limits and large mmap()
149 mm
->cached_hole_size
= ~0UL;
150 mm
->free_area_cache
= TASK_UNMAPPED_BASE
;
151 addr
= arch_get_unmapped_area(filp
, addr0
, len
, pgoff
, flags
);
153 * Restore the topdown base:
155 mm
->free_area_cache
= mm
->mmap_base
;
156 mm
->cached_hole_size
= ~0UL;
162 hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
163 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
165 struct mm_struct
*mm
= current
->mm
;
166 struct vm_area_struct
*vma
;
167 unsigned long task_size
= TASK_SIZE
;
169 if (test_thread_flag(TIF_32BIT
))
170 task_size
= STACK_TOP32
;
172 if (len
& ~HPAGE_MASK
)
177 if (flags
& MAP_FIXED
) {
178 if (prepare_hugepage_range(file
, addr
, len
))
184 addr
= ALIGN(addr
, HPAGE_SIZE
);
185 vma
= find_vma(mm
, addr
);
186 if (task_size
- len
>= addr
&&
187 (!vma
|| addr
+ len
<= vma
->vm_start
))
190 if (mm
->get_unmapped_area
== arch_get_unmapped_area
)
191 return hugetlb_get_unmapped_area_bottomup(file
, addr
, len
,
194 return hugetlb_get_unmapped_area_topdown(file
, addr
, len
,
198 pte_t
*huge_pte_alloc(struct mm_struct
*mm
,
199 unsigned long addr
, unsigned long sz
)
206 /* We must align the address, because our caller will run
207 * set_huge_pte_at() on whatever we return, which writes out
208 * all of the sub-ptes for the hugepage range. So we have
209 * to give it the first such sub-pte.
213 pgd
= pgd_offset(mm
, addr
);
214 pud
= pud_alloc(mm
, pgd
, addr
);
216 pmd
= pmd_alloc(mm
, pud
, addr
);
218 pte
= pte_alloc_map(mm
, pmd
, addr
);
223 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
232 pgd
= pgd_offset(mm
, addr
);
233 if (!pgd_none(*pgd
)) {
234 pud
= pud_offset(pgd
, addr
);
235 if (!pud_none(*pud
)) {
236 pmd
= pmd_offset(pud
, addr
);
238 pte
= pte_offset_map(pmd
, addr
);
244 int huge_pmd_unshare(struct mm_struct
*mm
, unsigned long *addr
, pte_t
*ptep
)
249 void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
250 pte_t
*ptep
, pte_t entry
)
254 if (!pte_present(*ptep
) && pte_present(entry
))
255 mm
->context
.huge_pte_count
++;
258 for (i
= 0; i
< (1 << HUGETLB_PAGE_ORDER
); i
++) {
259 set_pte_at(mm
, addr
, ptep
, entry
);
262 pte_val(entry
) += PAGE_SIZE
;
266 pte_t
huge_ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
273 if (pte_present(entry
))
274 mm
->context
.huge_pte_count
--;
278 for (i
= 0; i
< (1 << HUGETLB_PAGE_ORDER
); i
++) {
279 pte_clear(mm
, addr
, ptep
);
287 struct page
*follow_huge_addr(struct mm_struct
*mm
,
288 unsigned long address
, int write
)
290 return ERR_PTR(-EINVAL
);
293 int pmd_huge(pmd_t pmd
)
298 int pud_huge(pud_t pud
)
303 struct page
*follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
304 pmd_t
*pmd
, int write
)
309 static void context_reload(void *__data
)
311 struct mm_struct
*mm
= __data
;
313 if (mm
== current
->mm
)
314 load_secondary_context(mm
);
317 void hugetlb_prefault_arch_hook(struct mm_struct
*mm
)
319 struct tsb_config
*tp
= &mm
->context
.tsb_block
[MM_TSB_HUGE
];
321 if (likely(tp
->tsb
!= NULL
))
324 tsb_grow(mm
, MM_TSB_HUGE
, 0);
325 tsb_context_switch(mm
);
328 /* On UltraSPARC-III+ and later, configure the second half of
329 * the Data-TLB for huge pages.
331 if (tlb_type
== cheetah_plus
) {
334 spin_lock(&ctx_alloc_lock
);
335 ctx
= mm
->context
.sparc64_ctx_val
;
336 ctx
&= ~CTX_PGSZ_MASK
;
337 ctx
|= CTX_PGSZ_BASE
<< CTX_PGSZ0_SHIFT
;
338 ctx
|= CTX_PGSZ_HUGE
<< CTX_PGSZ1_SHIFT
;
340 if (ctx
!= mm
->context
.sparc64_ctx_val
) {
341 /* When changing the page size fields, we
342 * must perform a context flush so that no
343 * stale entries match. This flush must
344 * occur with the original context register
349 /* Reload the context register of all processors
350 * also executing in this address space.
352 mm
->context
.sparc64_ctx_val
= ctx
;
353 on_each_cpu(context_reload
, mm
, 0);
355 spin_unlock(&ctx_alloc_lock
);