4 * Copyright (C) 1994-2006 Linus Torvalds
8 * The mincore() system call.
10 #include <linux/pagemap.h>
11 #include <linux/gfp.h>
13 #include <linux/mman.h>
14 #include <linux/syscalls.h>
15 #include <linux/swap.h>
16 #include <linux/swapops.h>
17 #include <linux/hugetlb.h>
19 #include <asm/uaccess.h>
20 #include <asm/pgtable.h>
22 static void mincore_hugetlb_page_range(struct vm_area_struct
*vma
,
23 unsigned long addr
, unsigned long end
,
26 #ifdef CONFIG_HUGETLB_PAGE
31 unsigned char present
;
34 * Huge pages are always in RAM for now, but
35 * theoretically it needs to be checked.
37 ptep
= huge_pte_offset(current
->mm
,
38 addr
& huge_page_mask(h
));
39 present
= ptep
&& !huge_pte_none(huge_ptep_get(ptep
));
46 /* check hugepage border */
47 if (!(addr
& ~huge_page_mask(h
)))
57 * Later we can get more picky about what "in core" means precisely.
58 * For now, simply check to see if the page is in the page cache,
59 * and is up to date; i.e. that no page-in operation would be required
60 * at this time if an application were to map and access this page.
62 static unsigned char mincore_page(struct address_space
*mapping
, pgoff_t pgoff
)
64 unsigned char present
= 0;
68 * When tmpfs swaps out a page from a file, any process mapping that
69 * file will not get a swp_entry_t in its pte, but rather it is like
70 * any other file mapping (ie. marked !present and faulted in with
71 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
73 page
= find_get_page(mapping
, pgoff
);
75 if (radix_tree_exceptional_entry(page
)) {
76 swp_entry_t swap
= radix_to_swp_entry(page
);
77 page
= find_get_page(&swapper_space
, swap
.val
);
81 present
= PageUptodate(page
);
82 page_cache_release(page
);
88 static void mincore_unmapped_range(struct vm_area_struct
*vma
,
89 unsigned long addr
, unsigned long end
,
92 unsigned long nr
= (end
- addr
) >> PAGE_SHIFT
;
98 pgoff
= linear_page_index(vma
, addr
);
99 for (i
= 0; i
< nr
; i
++, pgoff
++)
100 vec
[i
] = mincore_page(vma
->vm_file
->f_mapping
, pgoff
);
102 for (i
= 0; i
< nr
; i
++)
107 static void mincore_pte_range(struct vm_area_struct
*vma
, pmd_t
*pmd
,
108 unsigned long addr
, unsigned long end
,
115 ptep
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
120 next
= addr
+ PAGE_SIZE
;
122 mincore_unmapped_range(vma
, addr
, next
, vec
);
123 else if (pte_present(pte
))
125 else if (pte_file(pte
)) {
126 pgoff
= pte_to_pgoff(pte
);
127 *vec
= mincore_page(vma
->vm_file
->f_mapping
, pgoff
);
128 } else { /* pte is a swap entry */
129 swp_entry_t entry
= pte_to_swp_entry(pte
);
131 if (is_migration_entry(entry
)) {
132 /* migration entries are always uptodate */
137 *vec
= mincore_page(&swapper_space
, pgoff
);
145 } while (ptep
++, addr
= next
, addr
!= end
);
146 pte_unmap_unlock(ptep
- 1, ptl
);
149 static void mincore_pmd_range(struct vm_area_struct
*vma
, pud_t
*pud
,
150 unsigned long addr
, unsigned long end
,
156 pmd
= pmd_offset(pud
, addr
);
158 next
= pmd_addr_end(addr
, end
);
159 if (pmd_trans_huge(*pmd
)) {
160 if (mincore_huge_pmd(vma
, pmd
, addr
, next
, vec
)) {
161 vec
+= (next
- addr
) >> PAGE_SHIFT
;
166 if (pmd_none_or_clear_bad(pmd
))
167 mincore_unmapped_range(vma
, addr
, next
, vec
);
169 mincore_pte_range(vma
, pmd
, addr
, next
, vec
);
170 vec
+= (next
- addr
) >> PAGE_SHIFT
;
171 } while (pmd
++, addr
= next
, addr
!= end
);
174 static void mincore_pud_range(struct vm_area_struct
*vma
, pgd_t
*pgd
,
175 unsigned long addr
, unsigned long end
,
181 pud
= pud_offset(pgd
, addr
);
183 next
= pud_addr_end(addr
, end
);
184 if (pud_none_or_clear_bad(pud
))
185 mincore_unmapped_range(vma
, addr
, next
, vec
);
187 mincore_pmd_range(vma
, pud
, addr
, next
, vec
);
188 vec
+= (next
- addr
) >> PAGE_SHIFT
;
189 } while (pud
++, addr
= next
, addr
!= end
);
192 static void mincore_page_range(struct vm_area_struct
*vma
,
193 unsigned long addr
, unsigned long end
,
199 pgd
= pgd_offset(vma
->vm_mm
, addr
);
201 next
= pgd_addr_end(addr
, end
);
202 if (pgd_none_or_clear_bad(pgd
))
203 mincore_unmapped_range(vma
, addr
, next
, vec
);
205 mincore_pud_range(vma
, pgd
, addr
, next
, vec
);
206 vec
+= (next
- addr
) >> PAGE_SHIFT
;
207 } while (pgd
++, addr
= next
, addr
!= end
);
211 * Do a chunk of "sys_mincore()". We've already checked
212 * all the arguments, we hold the mmap semaphore: we should
213 * just return the amount of info we're asked for.
215 static long do_mincore(unsigned long addr
, unsigned long pages
, unsigned char *vec
)
217 struct vm_area_struct
*vma
;
220 vma
= find_vma(current
->mm
, addr
);
221 if (!vma
|| addr
< vma
->vm_start
)
224 end
= min(vma
->vm_end
, addr
+ (pages
<< PAGE_SHIFT
));
226 if (is_vm_hugetlb_page(vma
)) {
227 mincore_hugetlb_page_range(vma
, addr
, end
, vec
);
228 return (end
- addr
) >> PAGE_SHIFT
;
231 end
= pmd_addr_end(addr
, end
);
233 if (is_vm_hugetlb_page(vma
))
234 mincore_hugetlb_page_range(vma
, addr
, end
, vec
);
236 mincore_page_range(vma
, addr
, end
, vec
);
238 return (end
- addr
) >> PAGE_SHIFT
;
242 * The mincore(2) system call.
244 * mincore() returns the memory residency status of the pages in the
245 * current process's address space specified by [addr, addr + len).
246 * The status is returned in a vector of bytes. The least significant
247 * bit of each byte is 1 if the referenced page is in memory, otherwise
250 * Because the status of a page can change after mincore() checks it
251 * but before it returns to the application, the returned vector may
252 * contain stale information. Only locked pages are guaranteed to
257 * -EFAULT - vec points to an illegal address
258 * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE
259 * -ENOMEM - Addresses in the range [addr, addr + len] are
260 * invalid for the address space of this process, or
261 * specify one or more pages which are not currently
263 * -EAGAIN - A kernel resource was temporarily unavailable.
265 SYSCALL_DEFINE3(mincore
, unsigned long, start
, size_t, len
,
266 unsigned char __user
*, vec
)
272 /* Check the start address: needs to be page-aligned.. */
273 if (start
& ~PAGE_CACHE_MASK
)
276 /* ..and we need to be passed a valid user-space range */
277 if (!access_ok(VERIFY_READ
, (void __user
*) start
, len
))
280 /* This also avoids any overflows on PAGE_CACHE_ALIGN */
281 pages
= len
>> PAGE_SHIFT
;
282 pages
+= (len
& ~PAGE_MASK
) != 0;
284 if (!access_ok(VERIFY_WRITE
, vec
, pages
))
287 tmp
= (void *) __get_free_page(GFP_USER
);
294 * Do at most PAGE_SIZE entries per iteration, due to
295 * the temporary buffer size.
297 down_read(¤t
->mm
->mmap_sem
);
298 retval
= do_mincore(start
, min(pages
, PAGE_SIZE
), tmp
);
299 up_read(¤t
->mm
->mmap_sem
);
303 if (copy_to_user(vec
, tmp
, retval
)) {
309 start
+= retval
<< PAGE_SHIFT
;
312 free_page((unsigned long) tmp
);