Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / mm / mincore.c
blob7c2874a9a73183338fe830b83bc8af6a847d8ed0
1 /*
2 * linux/mm/mincore.c
4 * Copyright (C) 1994-2006 Linus Torvalds
5 */
7 /*
8 * The mincore() system call.
9 */
10 #include <linux/pagemap.h>
11 #include <linux/gfp.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/syscalls.h>
15 #include <linux/swap.h>
16 #include <linux/swapops.h>
17 #include <linux/hugetlb.h>
19 #include <asm/uaccess.h>
20 #include <asm/pgtable.h>
22 static void mincore_hugetlb_page_range(struct vm_area_struct *vma,
23 unsigned long addr, unsigned long end,
24 unsigned char *vec)
26 #ifdef CONFIG_HUGETLB_PAGE
27 struct hstate *h;
29 h = hstate_vma(vma);
30 while (1) {
31 unsigned char present;
32 pte_t *ptep;
34 * Huge pages are always in RAM for now, but
35 * theoretically it needs to be checked.
37 ptep = huge_pte_offset(current->mm,
38 addr & huge_page_mask(h));
39 present = ptep && !huge_pte_none(huge_ptep_get(ptep));
40 while (1) {
41 *vec = present;
42 vec++;
43 addr += PAGE_SIZE;
44 if (addr == end)
45 return;
46 /* check hugepage border */
47 if (!(addr & ~huge_page_mask(h)))
48 break;
51 #else
52 BUG();
53 #endif
57 * Later we can get more picky about what "in core" means precisely.
58 * For now, simply check to see if the page is in the page cache,
59 * and is up to date; i.e. that no page-in operation would be required
60 * at this time if an application were to map and access this page.
62 static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
64 unsigned char present = 0;
65 struct page *page;
68 * When tmpfs swaps out a page from a file, any process mapping that
69 * file will not get a swp_entry_t in its pte, but rather it is like
70 * any other file mapping (ie. marked !present and faulted in with
71 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
73 page = find_get_page(mapping, pgoff);
74 #ifdef CONFIG_SWAP
75 /* shmem/tmpfs may return swap: account for swapcache page too. */
76 if (radix_tree_exceptional_entry(page)) {
77 swp_entry_t swap = radix_to_swp_entry(page);
78 page = find_get_page(&swapper_space, swap.val);
80 #endif
81 if (page) {
82 present = PageUptodate(page);
83 if (present)
84 present |= (PageReadaheadUnused(page) << 7);
85 page_cache_release(page);
88 return present;
91 static void mincore_unmapped_range(struct vm_area_struct *vma,
92 unsigned long addr, unsigned long end,
93 unsigned char *vec)
95 unsigned long nr = (end - addr) >> PAGE_SHIFT;
96 int i;
98 if (vma->vm_file) {
99 pgoff_t pgoff;
101 pgoff = linear_page_index(vma, addr);
102 for (i = 0; i < nr; i++, pgoff++)
103 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
104 } else {
105 for (i = 0; i < nr; i++)
106 vec[i] = 0;
110 static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
111 unsigned long addr, unsigned long end,
112 unsigned char *vec)
114 unsigned long next;
115 spinlock_t *ptl;
116 pte_t *ptep;
118 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
119 do {
120 pte_t pte = *ptep;
121 pgoff_t pgoff;
123 next = addr + PAGE_SIZE;
124 if (pte_none(pte))
125 mincore_unmapped_range(vma, addr, next, vec);
126 else if (pte_present(pte))
127 *vec = 1;
128 else if (pte_file(pte)) {
129 pgoff = pte_to_pgoff(pte);
130 *vec = mincore_page(vma->vm_file->f_mapping, pgoff);
131 } else { /* pte is a swap entry */
132 swp_entry_t entry = pte_to_swp_entry(pte);
134 if (is_migration_entry(entry)) {
135 /* migration entries are always uptodate */
136 *vec = 1;
137 } else {
138 #ifdef CONFIG_SWAP
139 pgoff = entry.val;
140 *vec = mincore_page(&swapper_space, pgoff);
141 #else
142 WARN_ON(1);
143 *vec = 1;
144 #endif
147 vec++;
148 } while (ptep++, addr = next, addr != end);
149 pte_unmap_unlock(ptep - 1, ptl);
152 static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
153 unsigned long addr, unsigned long end,
154 unsigned char *vec)
156 unsigned long next;
157 pmd_t *pmd;
159 pmd = pmd_offset(pud, addr);
160 do {
161 next = pmd_addr_end(addr, end);
162 if (pmd_trans_huge(*pmd)) {
163 if (mincore_huge_pmd(vma, pmd, addr, next, vec)) {
164 vec += (next - addr) >> PAGE_SHIFT;
165 continue;
167 /* fall through */
169 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
170 mincore_unmapped_range(vma, addr, next, vec);
171 else
172 mincore_pte_range(vma, pmd, addr, next, vec);
173 vec += (next - addr) >> PAGE_SHIFT;
174 } while (pmd++, addr = next, addr != end);
177 static void mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
178 unsigned long addr, unsigned long end,
179 unsigned char *vec)
181 unsigned long next;
182 pud_t *pud;
184 pud = pud_offset(pgd, addr);
185 do {
186 next = pud_addr_end(addr, end);
187 if (pud_none_or_clear_bad(pud))
188 mincore_unmapped_range(vma, addr, next, vec);
189 else
190 mincore_pmd_range(vma, pud, addr, next, vec);
191 vec += (next - addr) >> PAGE_SHIFT;
192 } while (pud++, addr = next, addr != end);
195 static void mincore_page_range(struct vm_area_struct *vma,
196 unsigned long addr, unsigned long end,
197 unsigned char *vec)
199 unsigned long next;
200 pgd_t *pgd;
202 pgd = pgd_offset(vma->vm_mm, addr);
203 do {
204 next = pgd_addr_end(addr, end);
205 if (pgd_none_or_clear_bad(pgd))
206 mincore_unmapped_range(vma, addr, next, vec);
207 else
208 mincore_pud_range(vma, pgd, addr, next, vec);
209 vec += (next - addr) >> PAGE_SHIFT;
210 } while (pgd++, addr = next, addr != end);
214 * Do a chunk of "sys_mincore()". We've already checked
215 * all the arguments, we hold the mmap semaphore: we should
216 * just return the amount of info we're asked for.
218 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
220 struct vm_area_struct *vma;
221 unsigned long end;
223 vma = find_vma(current->mm, addr);
224 if (!vma || addr < vma->vm_start)
225 return -ENOMEM;
227 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
229 if (is_vm_hugetlb_page(vma)) {
230 mincore_hugetlb_page_range(vma, addr, end, vec);
231 return (end - addr) >> PAGE_SHIFT;
234 end = pmd_addr_end(addr, end);
236 if (is_vm_hugetlb_page(vma))
237 mincore_hugetlb_page_range(vma, addr, end, vec);
238 else
239 mincore_page_range(vma, addr, end, vec);
241 return (end - addr) >> PAGE_SHIFT;
245 * The mincore(2) system call.
247 * mincore() returns the memory residency status of the pages in the
248 * current process's address space specified by [addr, addr + len).
249 * The status is returned in a vector of bytes. The least significant
250 * bit of each byte is 1 if the referenced page is in memory, otherwise
251 * it is zero.
253 * Because the status of a page can change after mincore() checks it
254 * but before it returns to the application, the returned vector may
255 * contain stale information. Only locked pages are guaranteed to
256 * remain in memory.
258 * return values:
259 * zero - success
260 * -EFAULT - vec points to an illegal address
261 * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE
262 * -ENOMEM - Addresses in the range [addr, addr + len] are
263 * invalid for the address space of this process, or
264 * specify one or more pages which are not currently
265 * mapped
266 * -EAGAIN - A kernel resource was temporarily unavailable.
268 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
269 unsigned char __user *, vec)
271 long retval;
272 unsigned long pages;
273 unsigned char *tmp;
275 /* Check the start address: needs to be page-aligned.. */
276 if (start & ~PAGE_CACHE_MASK)
277 return -EINVAL;
279 /* ..and we need to be passed a valid user-space range */
280 if (!access_ok(VERIFY_READ, (void __user *) start, len))
281 return -ENOMEM;
283 /* This also avoids any overflows on PAGE_CACHE_ALIGN */
284 pages = len >> PAGE_SHIFT;
285 pages += (len & ~PAGE_MASK) != 0;
287 if (!access_ok(VERIFY_WRITE, vec, pages))
288 return -EFAULT;
290 tmp = (void *) __get_free_page(GFP_USER);
291 if (!tmp)
292 return -EAGAIN;
294 retval = 0;
295 while (pages) {
297 * Do at most PAGE_SIZE entries per iteration, due to
298 * the temporary buffer size.
300 down_read(&current->mm->mmap_sem);
301 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
302 up_read(&current->mm->mmap_sem);
304 if (retval <= 0)
305 break;
306 if (copy_to_user(vec, tmp, retval)) {
307 retval = -EFAULT;
308 break;
310 pages -= retval;
311 vec += retval;
312 start += retval << PAGE_SHIFT;
313 retval = 0;
315 free_page((unsigned long) tmp);
316 return retval;