[PATCH] hugetlb: move stale pte check into huge_pte_alloc()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / i386 / mm / hugetlbpage.c
blob57c486f0e896c71056fcd139a6b3e1faec925ba0
1 /*
2 * IA-32 Huge TLB Page Support for Kernel.
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5 */
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
21 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
23 pgd_t *pgd;
24 pud_t *pud;
25 pmd_t *pmd;
26 pte_t *pte = NULL;
28 pgd = pgd_offset(mm, addr);
29 pud = pud_alloc(mm, pgd, addr);
30 pmd = pmd_alloc(mm, pud, addr);
32 if (!pmd)
33 goto out;
35 pte = (pte_t *) pmd;
36 if (!pte_none(*pte) && !pte_huge(*pte))
37 hugetlb_clean_stale_pgtable(pte);
38 out:
39 return pte;
42 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
44 pgd_t *pgd;
45 pud_t *pud;
46 pmd_t *pmd = NULL;
48 pgd = pgd_offset(mm, addr);
49 pud = pud_offset(pgd, addr);
50 pmd = pmd_offset(pud, addr);
51 return (pte_t *) pmd;
55 * This function checks for proper alignment of input addr and len parameters.
57 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
59 if (len & ~HPAGE_MASK)
60 return -EINVAL;
61 if (addr & ~HPAGE_MASK)
62 return -EINVAL;
63 return 0;
66 #if 0 /* This is just for testing */
67 struct page *
68 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
70 unsigned long start = address;
71 int length = 1;
72 int nr;
73 struct page *page;
74 struct vm_area_struct *vma;
76 vma = find_vma(mm, addr);
77 if (!vma || !is_vm_hugetlb_page(vma))
78 return ERR_PTR(-EINVAL);
80 pte = huge_pte_offset(mm, address);
82 /* hugetlb should be locked, and hence, prefaulted */
83 WARN_ON(!pte || pte_none(*pte));
85 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
87 WARN_ON(!PageCompound(page));
89 return page;
92 int pmd_huge(pmd_t pmd)
94 return 0;
97 struct page *
98 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
99 pmd_t *pmd, int write)
101 return NULL;
104 #else
106 struct page *
107 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
109 return ERR_PTR(-EINVAL);
112 int pmd_huge(pmd_t pmd)
114 return !!(pmd_val(pmd) & _PAGE_PSE);
117 struct page *
118 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
119 pmd_t *pmd, int write)
121 struct page *page;
123 page = pte_page(*(pte_t *)pmd);
124 if (page)
125 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
126 return page;
128 #endif
130 void hugetlb_clean_stale_pgtable(pte_t *pte)
132 pmd_t *pmd = (pmd_t *) pte;
133 struct page *page;
135 page = pmd_page(*pmd);
136 pmd_clear(pmd);
137 dec_page_state(nr_page_table_pages);
138 page_cache_release(page);
141 /* x86_64 also uses this file */
143 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
144 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
145 unsigned long addr, unsigned long len,
146 unsigned long pgoff, unsigned long flags)
148 struct mm_struct *mm = current->mm;
149 struct vm_area_struct *vma;
150 unsigned long start_addr;
152 if (len > mm->cached_hole_size) {
153 start_addr = mm->free_area_cache;
154 } else {
155 start_addr = TASK_UNMAPPED_BASE;
156 mm->cached_hole_size = 0;
159 full_search:
160 addr = ALIGN(start_addr, HPAGE_SIZE);
162 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
163 /* At this point: (!vma || addr < vma->vm_end). */
164 if (TASK_SIZE - len < addr) {
166 * Start a new search - just in case we missed
167 * some holes.
169 if (start_addr != TASK_UNMAPPED_BASE) {
170 start_addr = TASK_UNMAPPED_BASE;
171 mm->cached_hole_size = 0;
172 goto full_search;
174 return -ENOMEM;
176 if (!vma || addr + len <= vma->vm_start) {
177 mm->free_area_cache = addr + len;
178 return addr;
180 if (addr + mm->cached_hole_size < vma->vm_start)
181 mm->cached_hole_size = vma->vm_start - addr;
182 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
186 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
187 unsigned long addr0, unsigned long len,
188 unsigned long pgoff, unsigned long flags)
190 struct mm_struct *mm = current->mm;
191 struct vm_area_struct *vma, *prev_vma;
192 unsigned long base = mm->mmap_base, addr = addr0;
193 unsigned long largest_hole = mm->cached_hole_size;
194 int first_time = 1;
196 /* don't allow allocations above current base */
197 if (mm->free_area_cache > base)
198 mm->free_area_cache = base;
200 if (len <= largest_hole) {
201 largest_hole = 0;
202 mm->free_area_cache = base;
204 try_again:
205 /* make sure it can fit in the remaining address space */
206 if (mm->free_area_cache < len)
207 goto fail;
209 /* either no address requested or cant fit in requested address hole */
210 addr = (mm->free_area_cache - len) & HPAGE_MASK;
211 do {
213 * Lookup failure means no vma is above this address,
214 * i.e. return with success:
216 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
217 return addr;
220 * new region fits between prev_vma->vm_end and
221 * vma->vm_start, use it:
223 if (addr + len <= vma->vm_start &&
224 (!prev_vma || (addr >= prev_vma->vm_end))) {
225 /* remember the address as a hint for next time */
226 mm->cached_hole_size = largest_hole;
227 return (mm->free_area_cache = addr);
228 } else {
229 /* pull free_area_cache down to the first hole */
230 if (mm->free_area_cache == vma->vm_end) {
231 mm->free_area_cache = vma->vm_start;
232 mm->cached_hole_size = largest_hole;
236 /* remember the largest hole we saw so far */
237 if (addr + largest_hole < vma->vm_start)
238 largest_hole = vma->vm_start - addr;
240 /* try just below the current vma->vm_start */
241 addr = (vma->vm_start - len) & HPAGE_MASK;
242 } while (len <= vma->vm_start);
244 fail:
246 * if hint left us with no space for the requested
247 * mapping then try again:
249 if (first_time) {
250 mm->free_area_cache = base;
251 largest_hole = 0;
252 first_time = 0;
253 goto try_again;
256 * A failed mmap() very likely causes application failure,
257 * so fall back to the bottom-up function here. This scenario
258 * can happen with large stack limits and large mmap()
259 * allocations.
261 mm->free_area_cache = TASK_UNMAPPED_BASE;
262 mm->cached_hole_size = ~0UL;
263 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
264 len, pgoff, flags);
267 * Restore the topdown base:
269 mm->free_area_cache = base;
270 mm->cached_hole_size = ~0UL;
272 return addr;
275 unsigned long
276 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
277 unsigned long len, unsigned long pgoff, unsigned long flags)
279 struct mm_struct *mm = current->mm;
280 struct vm_area_struct *vma;
282 if (len & ~HPAGE_MASK)
283 return -EINVAL;
284 if (len > TASK_SIZE)
285 return -ENOMEM;
287 if (addr) {
288 addr = ALIGN(addr, HPAGE_SIZE);
289 vma = find_vma(mm, addr);
290 if (TASK_SIZE - len >= addr &&
291 (!vma || addr + len <= vma->vm_start))
292 return addr;
294 if (mm->get_unmapped_area == arch_get_unmapped_area)
295 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
296 pgoff, flags);
297 else
298 return hugetlb_get_unmapped_area_topdown(file, addr, len,
299 pgoff, flags);
302 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/