initial commit with v2.6.9
[linux-2.6.9-moxart.git] / arch / sparc64 / mm / hugetlbpage.c
blob6da2759c25176cf09c4e9d826be81043d4beffb4
1 /*
2 * SPARC64 Huge TLB page support.
4 * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
5 */
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/pagemap.h>
14 #include <linux/smp_lock.h>
15 #include <linux/slab.h>
16 #include <linux/sysctl.h>
18 #include <asm/mman.h>
19 #include <asm/pgalloc.h>
20 #include <asm/tlb.h>
21 #include <asm/tlbflush.h>
22 #include <asm/cacheflush.h>
24 static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
26 pgd_t *pgd;
27 pmd_t *pmd;
28 pte_t *pte = NULL;
30 pgd = pgd_offset(mm, addr);
31 if (pgd) {
32 pmd = pmd_alloc(mm, pgd, addr);
33 if (pmd)
34 pte = pte_alloc_map(mm, pmd, addr);
36 return pte;
39 static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
41 pgd_t *pgd;
42 pmd_t *pmd;
43 pte_t *pte = NULL;
45 pgd = pgd_offset(mm, addr);
46 if (pgd) {
47 pmd = pmd_offset(pgd, addr);
48 if (pmd)
49 pte = pte_offset_map(pmd, addr);
51 return pte;
54 #define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
56 static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
57 struct page *page, pte_t * page_table, int write_access)
59 unsigned long i;
60 pte_t entry;
62 mm->rss += (HPAGE_SIZE / PAGE_SIZE);
64 if (write_access)
65 entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
66 vma->vm_page_prot)));
67 else
68 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
69 entry = pte_mkyoung(entry);
70 mk_pte_huge(entry);
72 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
73 set_pte(page_table, entry);
74 page_table++;
76 pte_val(entry) += PAGE_SIZE;
81 * This function checks for proper alignment of input addr and len parameters.
83 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
85 if (len & ~HPAGE_MASK)
86 return -EINVAL;
87 if (addr & ~HPAGE_MASK)
88 return -EINVAL;
89 return 0;
92 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
93 struct vm_area_struct *vma)
95 pte_t *src_pte, *dst_pte, entry;
96 struct page *ptepage;
97 unsigned long addr = vma->vm_start;
98 unsigned long end = vma->vm_end;
99 int i;
101 while (addr < end) {
102 dst_pte = huge_pte_alloc(dst, addr);
103 if (!dst_pte)
104 goto nomem;
105 src_pte = huge_pte_offset(src, addr);
106 BUG_ON(!src_pte || pte_none(*src_pte));
107 entry = *src_pte;
108 ptepage = pte_page(entry);
109 get_page(ptepage);
110 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
111 set_pte(dst_pte, entry);
112 pte_val(entry) += PAGE_SIZE;
113 dst_pte++;
115 dst->rss += (HPAGE_SIZE / PAGE_SIZE);
116 addr += HPAGE_SIZE;
118 return 0;
120 nomem:
121 return -ENOMEM;
124 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
125 struct page **pages, struct vm_area_struct **vmas,
126 unsigned long *position, int *length, int i)
128 unsigned long vaddr = *position;
129 int remainder = *length;
131 WARN_ON(!is_vm_hugetlb_page(vma));
133 while (vaddr < vma->vm_end && remainder) {
134 if (pages) {
135 pte_t *pte;
136 struct page *page;
138 pte = huge_pte_offset(mm, vaddr);
140 /* hugetlb should be locked, and hence, prefaulted */
141 BUG_ON(!pte || pte_none(*pte));
143 page = pte_page(*pte);
145 WARN_ON(!PageCompound(page));
147 get_page(page);
148 pages[i] = page;
151 if (vmas)
152 vmas[i] = vma;
154 vaddr += PAGE_SIZE;
155 --remainder;
156 ++i;
159 *length = remainder;
160 *position = vaddr;
162 return i;
165 struct page *follow_huge_addr(struct mm_struct *mm,
166 unsigned long address, int write)
168 return ERR_PTR(-EINVAL);
171 int pmd_huge(pmd_t pmd)
173 return 0;
176 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
177 pmd_t *pmd, int write)
179 return NULL;
182 void unmap_hugepage_range(struct vm_area_struct *vma,
183 unsigned long start, unsigned long end)
185 struct mm_struct *mm = vma->vm_mm;
186 unsigned long address;
187 pte_t *pte;
188 struct page *page;
189 int i;
191 BUG_ON(start & (HPAGE_SIZE - 1));
192 BUG_ON(end & (HPAGE_SIZE - 1));
194 for (address = start; address < end; address += HPAGE_SIZE) {
195 pte = huge_pte_offset(mm, address);
196 BUG_ON(!pte);
197 if (pte_none(*pte))
198 continue;
199 page = pte_page(*pte);
200 put_page(page);
201 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
202 pte_clear(pte);
203 pte++;
206 mm->rss -= (end - start) >> PAGE_SHIFT;
207 flush_tlb_range(vma, start, end);
210 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
212 struct mm_struct *mm = current->mm;
213 unsigned long addr;
214 int ret = 0;
216 BUG_ON(vma->vm_start & ~HPAGE_MASK);
217 BUG_ON(vma->vm_end & ~HPAGE_MASK);
219 spin_lock(&mm->page_table_lock);
220 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
221 unsigned long idx;
222 pte_t *pte = huge_pte_alloc(mm, addr);
223 struct page *page;
225 if (!pte) {
226 ret = -ENOMEM;
227 goto out;
229 if (!pte_none(*pte))
230 continue;
232 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
233 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
234 page = find_get_page(mapping, idx);
235 if (!page) {
236 /* charge the fs quota first */
237 if (hugetlb_get_quota(mapping)) {
238 ret = -ENOMEM;
239 goto out;
241 page = alloc_huge_page();
242 if (!page) {
243 hugetlb_put_quota(mapping);
244 ret = -ENOMEM;
245 goto out;
247 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
248 if (! ret) {
249 unlock_page(page);
250 } else {
251 hugetlb_put_quota(mapping);
252 free_huge_page(page);
253 goto out;
256 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
258 out:
259 spin_unlock(&mm->page_table_lock);
260 return ret;