4 * Copyright (C) 1994-2006 Linus Torvalds
8 * The mincore() system call.
10 #include <linux/pagemap.h>
11 #include <linux/gfp.h>
13 #include <linux/mman.h>
14 #include <linux/syscalls.h>
15 #include <linux/swap.h>
16 #include <linux/swapops.h>
17 #include <linux/hugetlb.h>
19 #include <asm/uaccess.h>
20 #include <asm/pgtable.h>
22 static void mincore_hugetlb_page_range(struct vm_area_struct
*vma
,
23 unsigned long addr
, unsigned long end
,
26 #ifdef CONFIG_HUGETLB_PAGE
31 unsigned char present
;
34 * Huge pages are always in RAM for now, but
35 * theoretically it needs to be checked.
37 ptep
= huge_pte_offset(current
->mm
,
38 addr
& huge_page_mask(h
));
39 present
= ptep
&& !huge_pte_none(huge_ptep_get(ptep
));
46 /* check hugepage border */
47 if (!(addr
& ~huge_page_mask(h
)))
57 * Later we can get more picky about what "in core" means precisely.
58 * For now, simply check to see if the page is in the page cache,
59 * and is up to date; i.e. that no page-in operation would be required
60 * at this time if an application were to map and access this page.
62 static unsigned char mincore_page(struct address_space
*mapping
, pgoff_t pgoff
)
64 unsigned char present
= 0;
68 * When tmpfs swaps out a page from a file, any process mapping that
69 * file will not get a swp_entry_t in its pte, but rather it is like
70 * any other file mapping (ie. marked !present and faulted in with
71 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
73 page
= find_get_page(mapping
, pgoff
);
75 /* shmem/tmpfs may return swap: account for swapcache page too. */
76 if (radix_tree_exceptional_entry(page
)) {
77 swp_entry_t swap
= radix_to_swp_entry(page
);
78 page
= find_get_page(&swapper_space
, swap
.val
);
82 present
= PageUptodate(page
);
83 page_cache_release(page
);
89 static void mincore_unmapped_range(struct vm_area_struct
*vma
,
90 unsigned long addr
, unsigned long end
,
93 unsigned long nr
= (end
- addr
) >> PAGE_SHIFT
;
99 pgoff
= linear_page_index(vma
, addr
);
100 for (i
= 0; i
< nr
; i
++, pgoff
++)
101 vec
[i
] = mincore_page(vma
->vm_file
->f_mapping
, pgoff
);
103 for (i
= 0; i
< nr
; i
++)
108 static void mincore_pte_range(struct vm_area_struct
*vma
, pmd_t
*pmd
,
109 unsigned long addr
, unsigned long end
,
116 ptep
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
121 next
= addr
+ PAGE_SIZE
;
123 mincore_unmapped_range(vma
, addr
, next
, vec
);
124 else if (pte_present(pte
))
126 else if (pte_file(pte
)) {
127 pgoff
= pte_to_pgoff(pte
);
128 *vec
= mincore_page(vma
->vm_file
->f_mapping
, pgoff
);
129 } else { /* pte is a swap entry */
130 swp_entry_t entry
= pte_to_swp_entry(pte
);
132 if (is_migration_entry(entry
)) {
133 /* migration entries are always uptodate */
138 *vec
= mincore_page(&swapper_space
, pgoff
);
146 } while (ptep
++, addr
= next
, addr
!= end
);
147 pte_unmap_unlock(ptep
- 1, ptl
);
150 static void mincore_pmd_range(struct vm_area_struct
*vma
, pud_t
*pud
,
151 unsigned long addr
, unsigned long end
,
157 pmd
= pmd_offset(pud
, addr
);
159 next
= pmd_addr_end(addr
, end
);
160 if (pmd_trans_huge(*pmd
)) {
161 if (mincore_huge_pmd(vma
, pmd
, addr
, next
, vec
)) {
162 vec
+= (next
- addr
) >> PAGE_SHIFT
;
167 if (pmd_none_or_clear_bad(pmd
))
168 mincore_unmapped_range(vma
, addr
, next
, vec
);
170 mincore_pte_range(vma
, pmd
, addr
, next
, vec
);
171 vec
+= (next
- addr
) >> PAGE_SHIFT
;
172 } while (pmd
++, addr
= next
, addr
!= end
);
175 static void mincore_pud_range(struct vm_area_struct
*vma
, pgd_t
*pgd
,
176 unsigned long addr
, unsigned long end
,
182 pud
= pud_offset(pgd
, addr
);
184 next
= pud_addr_end(addr
, end
);
185 if (pud_none_or_clear_bad(pud
))
186 mincore_unmapped_range(vma
, addr
, next
, vec
);
188 mincore_pmd_range(vma
, pud
, addr
, next
, vec
);
189 vec
+= (next
- addr
) >> PAGE_SHIFT
;
190 } while (pud
++, addr
= next
, addr
!= end
);
193 static void mincore_page_range(struct vm_area_struct
*vma
,
194 unsigned long addr
, unsigned long end
,
200 pgd
= pgd_offset(vma
->vm_mm
, addr
);
202 next
= pgd_addr_end(addr
, end
);
203 if (pgd_none_or_clear_bad(pgd
))
204 mincore_unmapped_range(vma
, addr
, next
, vec
);
206 mincore_pud_range(vma
, pgd
, addr
, next
, vec
);
207 vec
+= (next
- addr
) >> PAGE_SHIFT
;
208 } while (pgd
++, addr
= next
, addr
!= end
);
212 * Do a chunk of "sys_mincore()". We've already checked
213 * all the arguments, we hold the mmap semaphore: we should
214 * just return the amount of info we're asked for.
216 static long do_mincore(unsigned long addr
, unsigned long pages
, unsigned char *vec
)
218 struct vm_area_struct
*vma
;
221 vma
= find_vma(current
->mm
, addr
);
222 if (!vma
|| addr
< vma
->vm_start
)
225 end
= min(vma
->vm_end
, addr
+ (pages
<< PAGE_SHIFT
));
227 if (is_vm_hugetlb_page(vma
)) {
228 mincore_hugetlb_page_range(vma
, addr
, end
, vec
);
229 return (end
- addr
) >> PAGE_SHIFT
;
232 end
= pmd_addr_end(addr
, end
);
234 if (is_vm_hugetlb_page(vma
))
235 mincore_hugetlb_page_range(vma
, addr
, end
, vec
);
237 mincore_page_range(vma
, addr
, end
, vec
);
239 return (end
- addr
) >> PAGE_SHIFT
;
243 * The mincore(2) system call.
245 * mincore() returns the memory residency status of the pages in the
246 * current process's address space specified by [addr, addr + len).
247 * The status is returned in a vector of bytes. The least significant
248 * bit of each byte is 1 if the referenced page is in memory, otherwise
251 * Because the status of a page can change after mincore() checks it
252 * but before it returns to the application, the returned vector may
253 * contain stale information. Only locked pages are guaranteed to
258 * -EFAULT - vec points to an illegal address
259 * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE
260 * -ENOMEM - Addresses in the range [addr, addr + len] are
261 * invalid for the address space of this process, or
262 * specify one or more pages which are not currently
264 * -EAGAIN - A kernel resource was temporarily unavailable.
266 SYSCALL_DEFINE3(mincore
, unsigned long, start
, size_t, len
,
267 unsigned char __user
*, vec
)
273 /* Check the start address: needs to be page-aligned.. */
274 if (start
& ~PAGE_CACHE_MASK
)
277 /* ..and we need to be passed a valid user-space range */
278 if (!access_ok(VERIFY_READ
, (void __user
*) start
, len
))
281 /* This also avoids any overflows on PAGE_CACHE_ALIGN */
282 pages
= len
>> PAGE_SHIFT
;
283 pages
+= (len
& ~PAGE_MASK
) != 0;
285 if (!access_ok(VERIFY_WRITE
, vec
, pages
))
288 tmp
= (void *) __get_free_page(GFP_USER
);
295 * Do at most PAGE_SIZE entries per iteration, due to
296 * the temporary buffer size.
298 down_read(¤t
->mm
->mmap_sem
);
299 retval
= do_mincore(start
, min(pages
, PAGE_SIZE
), tmp
);
300 up_read(¤t
->mm
->mmap_sem
);
304 if (copy_to_user(vec
, tmp
, retval
)) {
310 start
+= retval
<< PAGE_SHIFT
;
313 free_page((unsigned long) tmp
);