RT-AC56 3.0.0.4.374.37 core
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / arch / tile / mm / hugetlbpage.c
blob24688b697a8d92e48284daff0643b61cfd11c281
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
14 * TILE Huge TLB Page Support for Kernel.
15 * Taken from i386 hugetlb implementation:
16 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
19 #include <linux/init.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/hugetlb.h>
23 #include <linux/pagemap.h>
24 #include <linux/smp_lock.h>
25 #include <linux/slab.h>
26 #include <linux/err.h>
27 #include <linux/sysctl.h>
28 #include <linux/mman.h>
29 #include <asm/tlb.h>
30 #include <asm/tlbflush.h>
32 pte_t *huge_pte_alloc(struct mm_struct *mm,
33 unsigned long addr, unsigned long sz)
35 pgd_t *pgd;
36 pud_t *pud;
37 pte_t *pte = NULL;
39 /* We do not yet support multiple huge page sizes. */
40 BUG_ON(sz != PMD_SIZE);
42 pgd = pgd_offset(mm, addr);
43 pud = pud_alloc(mm, pgd, addr);
44 if (pud)
45 pte = (pte_t *) pmd_alloc(mm, pud, addr);
46 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
48 return pte;
51 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
53 pgd_t *pgd;
54 pud_t *pud;
55 pmd_t *pmd = NULL;
57 pgd = pgd_offset(mm, addr);
58 if (pgd_present(*pgd)) {
59 pud = pud_offset(pgd, addr);
60 if (pud_present(*pud))
61 pmd = pmd_offset(pud, addr);
63 return (pte_t *) pmd;
66 #ifdef HUGETLB_TEST
67 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
68 int write)
70 unsigned long start = address;
71 int length = 1;
72 int nr;
73 struct page *page;
74 struct vm_area_struct *vma;
76 vma = find_vma(mm, addr);
77 if (!vma || !is_vm_hugetlb_page(vma))
78 return ERR_PTR(-EINVAL);
80 pte = huge_pte_offset(mm, address);
82 /* hugetlb should be locked, and hence, prefaulted */
83 WARN_ON(!pte || pte_none(*pte));
85 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
87 WARN_ON(!PageHead(page));
89 return page;
92 int pmd_huge(pmd_t pmd)
94 return 0;
97 int pud_huge(pud_t pud)
99 return 0;
102 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
103 pmd_t *pmd, int write)
105 return NULL;
108 #else
110 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
111 int write)
113 return ERR_PTR(-EINVAL);
116 int pmd_huge(pmd_t pmd)
118 return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
121 int pud_huge(pud_t pud)
123 return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
126 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
127 pmd_t *pmd, int write)
129 struct page *page;
131 page = pte_page(*(pte_t *)pmd);
132 if (page)
133 page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
134 return page;
137 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
138 pud_t *pud, int write)
140 struct page *page;
142 page = pte_page(*(pte_t *)pud);
143 if (page)
144 page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
145 return page;
148 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
150 return 0;
153 #endif
155 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
156 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
157 unsigned long addr, unsigned long len,
158 unsigned long pgoff, unsigned long flags)
160 struct hstate *h = hstate_file(file);
161 struct mm_struct *mm = current->mm;
162 struct vm_area_struct *vma;
163 unsigned long start_addr;
165 if (len > mm->cached_hole_size) {
166 start_addr = mm->free_area_cache;
167 } else {
168 start_addr = TASK_UNMAPPED_BASE;
169 mm->cached_hole_size = 0;
172 full_search:
173 addr = ALIGN(start_addr, huge_page_size(h));
175 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
176 /* At this point: (!vma || addr < vma->vm_end). */
177 if (TASK_SIZE - len < addr) {
179 * Start a new search - just in case we missed
180 * some holes.
182 if (start_addr != TASK_UNMAPPED_BASE) {
183 start_addr = TASK_UNMAPPED_BASE;
184 mm->cached_hole_size = 0;
185 goto full_search;
187 return -ENOMEM;
189 if (!vma || addr + len <= vma->vm_start) {
190 mm->free_area_cache = addr + len;
191 return addr;
193 if (addr + mm->cached_hole_size < vma->vm_start)
194 mm->cached_hole_size = vma->vm_start - addr;
195 addr = ALIGN(vma->vm_end, huge_page_size(h));
199 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
200 unsigned long addr0, unsigned long len,
201 unsigned long pgoff, unsigned long flags)
203 struct hstate *h = hstate_file(file);
204 struct mm_struct *mm = current->mm;
205 struct vm_area_struct *vma, *prev_vma;
206 unsigned long base = mm->mmap_base, addr = addr0;
207 unsigned long largest_hole = mm->cached_hole_size;
208 int first_time = 1;
210 /* don't allow allocations above current base */
211 if (mm->free_area_cache > base)
212 mm->free_area_cache = base;
214 if (len <= largest_hole) {
215 largest_hole = 0;
216 mm->free_area_cache = base;
218 try_again:
219 /* make sure it can fit in the remaining address space */
220 if (mm->free_area_cache < len)
221 goto fail;
223 /* either no address requested or cant fit in requested address hole */
224 addr = (mm->free_area_cache - len) & huge_page_mask(h);
225 do {
227 * Lookup failure means no vma is above this address,
228 * i.e. return with success:
230 vma = find_vma_prev(mm, addr, &prev_vma);
231 if (!vma) {
232 return addr;
233 break;
237 * new region fits between prev_vma->vm_end and
238 * vma->vm_start, use it:
240 if (addr + len <= vma->vm_start &&
241 (!prev_vma || (addr >= prev_vma->vm_end))) {
242 /* remember the address as a hint for next time */
243 mm->cached_hole_size = largest_hole;
244 mm->free_area_cache = addr;
245 return addr;
246 } else {
247 /* pull free_area_cache down to the first hole */
248 if (mm->free_area_cache == vma->vm_end) {
249 mm->free_area_cache = vma->vm_start;
250 mm->cached_hole_size = largest_hole;
254 /* remember the largest hole we saw so far */
255 if (addr + largest_hole < vma->vm_start)
256 largest_hole = vma->vm_start - addr;
258 /* try just below the current vma->vm_start */
259 addr = (vma->vm_start - len) & huge_page_mask(h);
261 } while (len <= vma->vm_start);
263 fail:
265 * if hint left us with no space for the requested
266 * mapping then try again:
268 if (first_time) {
269 mm->free_area_cache = base;
270 largest_hole = 0;
271 first_time = 0;
272 goto try_again;
275 * A failed mmap() very likely causes application failure,
276 * so fall back to the bottom-up function here. This scenario
277 * can happen with large stack limits and large mmap()
278 * allocations.
280 mm->free_area_cache = TASK_UNMAPPED_BASE;
281 mm->cached_hole_size = ~0UL;
282 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
283 len, pgoff, flags);
286 * Restore the topdown base:
288 mm->free_area_cache = base;
289 mm->cached_hole_size = ~0UL;
291 return addr;
294 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
295 unsigned long len, unsigned long pgoff, unsigned long flags)
297 struct hstate *h = hstate_file(file);
298 struct mm_struct *mm = current->mm;
299 struct vm_area_struct *vma;
301 if (len & ~huge_page_mask(h))
302 return -EINVAL;
303 if (len > TASK_SIZE)
304 return -ENOMEM;
306 if (flags & MAP_FIXED) {
307 if (prepare_hugepage_range(file, addr, len))
308 return -EINVAL;
309 return addr;
312 if (addr) {
313 addr = ALIGN(addr, huge_page_size(h));
314 vma = find_vma(mm, addr);
315 if (TASK_SIZE - len >= addr &&
316 (!vma || addr + len <= vma->vm_start))
317 return addr;
319 if (current->mm->get_unmapped_area == arch_get_unmapped_area)
320 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
321 pgoff, flags);
322 else
323 return hugetlb_get_unmapped_area_topdown(file, addr, len,
324 pgoff, flags);
327 static __init int setup_hugepagesz(char *opt)
329 unsigned long ps = memparse(opt, &opt);
330 if (ps == PMD_SIZE) {
331 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
332 } else if (ps == PUD_SIZE) {
333 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
334 } else {
335 pr_err("hugepagesz: Unsupported page size %lu M\n",
336 ps >> 20);
337 return 0;
339 return 1;
341 __setup("hugepagesz=", setup_hugepagesz);
343 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/