4 * (C) Copyright 1995 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
8 #include <linux/capability.h>
9 #include <linux/mman.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/syscalls.h>
16 #include <linux/sched.h>
17 #include <linux/module.h>
18 #include <linux/rmap.h>
19 #include <linux/mmzone.h>
20 #include <linux/hugetlb.h>
24 int can_do_mlock(void)
26 if (capable(CAP_IPC_LOCK
))
28 if (rlimit(RLIMIT_MEMLOCK
) != 0)
32 EXPORT_SYMBOL(can_do_mlock
);
35 * Mlocked pages are marked with PageMlocked() flag for efficient testing
36 * in vmscan and, possibly, the fault path; and to support semi-accurate
39 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
40 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
41 * The unevictable list is an LRU sibling list to the [in]active lists.
42 * PageUnevictable is set to indicate the unevictable state.
44 * When lazy mlocking via vmscan, it is important to ensure that the
45 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
46 * may have mlocked a page that is being munlocked. So lazy mlock must take
47 * the mmap_sem for read, and verify that the vma really is locked
52 * LRU accounting for clear_page_mlock()
54 void __clear_page_mlock(struct page
*page
)
56 VM_BUG_ON(!PageLocked(page
));
58 if (!page
->mapping
) { /* truncated ? */
62 dec_zone_page_state(page
, NR_MLOCK
);
63 count_vm_event(UNEVICTABLE_PGCLEARED
);
64 if (!isolate_lru_page(page
)) {
65 putback_lru_page(page
);
68 * We lost the race. the page already moved to evictable list.
70 if (PageUnevictable(page
))
71 count_vm_event(UNEVICTABLE_PGSTRANDED
);
76 * Mark page as mlocked if not already.
77 * If page on LRU, isolate and putback to move to unevictable list.
79 void mlock_vma_page(struct page
*page
)
81 BUG_ON(!PageLocked(page
));
83 if (!TestSetPageMlocked(page
)) {
84 inc_zone_page_state(page
, NR_MLOCK
);
85 count_vm_event(UNEVICTABLE_PGMLOCKED
);
86 if (!isolate_lru_page(page
))
87 putback_lru_page(page
);
92 * munlock_vma_page - munlock a vma page
93 * @page - page to be unlocked
95 * called from munlock()/munmap() path with page supposedly on the LRU.
96 * When we munlock a page, because the vma where we found the page is being
97 * munlock()ed or munmap()ed, we want to check whether other vmas hold the
98 * page locked so that we can leave it on the unevictable lru list and not
99 * bother vmscan with it. However, to walk the page's rmap list in
100 * try_to_munlock() we must isolate the page from the LRU. If some other
101 * task has removed the page from the LRU, we won't be able to do that.
102 * So we clear the PageMlocked as we might not get another chance. If we
103 * can't isolate the page, we leave it for putback_lru_page() and vmscan
104 * [page_referenced()/try_to_unmap()] to deal with.
106 void munlock_vma_page(struct page
*page
)
108 BUG_ON(!PageLocked(page
));
110 if (TestClearPageMlocked(page
)) {
111 dec_zone_page_state(page
, NR_MLOCK
);
112 if (!isolate_lru_page(page
)) {
113 int ret
= try_to_munlock(page
);
115 * did try_to_unlock() succeed or punt?
117 if (ret
!= SWAP_MLOCK
)
118 count_vm_event(UNEVICTABLE_PGMUNLOCKED
);
120 putback_lru_page(page
);
123 * Some other task has removed the page from the LRU.
124 * putback_lru_page() will take care of removing the
125 * page from the unevictable list, if necessary.
126 * vmscan [page_referenced()] will move the page back
127 * to the unevictable list if some other vma has it
130 if (PageUnevictable(page
))
131 count_vm_event(UNEVICTABLE_PGSTRANDED
);
133 count_vm_event(UNEVICTABLE_PGMUNLOCKED
);
138 static inline int stack_guard_page(struct vm_area_struct
*vma
, unsigned long addr
)
140 return (vma
->vm_flags
& VM_GROWSDOWN
) &&
141 (vma
->vm_start
== addr
) &&
142 !vma_stack_continue(vma
->vm_prev
, addr
);
146 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
148 * @start: start address
151 * This takes care of making the pages present too.
153 * return 0 on success, negative error code on error.
155 * vma->vm_mm->mmap_sem must be held for at least read.
157 static long __mlock_vma_pages_range(struct vm_area_struct
*vma
,
158 unsigned long start
, unsigned long end
,
161 struct mm_struct
*mm
= vma
->vm_mm
;
162 unsigned long addr
= start
;
163 int nr_pages
= (end
- start
) / PAGE_SIZE
;
166 VM_BUG_ON(start
& ~PAGE_MASK
);
167 VM_BUG_ON(end
& ~PAGE_MASK
);
168 VM_BUG_ON(start
< vma
->vm_start
);
169 VM_BUG_ON(end
> vma
->vm_end
);
170 VM_BUG_ON(!rwsem_is_locked(&mm
->mmap_sem
));
172 gup_flags
= FOLL_TOUCH
;
174 * We want to touch writable mappings with a write fault in order
175 * to break COW, except for shared mappings because these don't COW
176 * and we would not want to dirty them for nothing.
178 if ((vma
->vm_flags
& (VM_WRITE
| VM_SHARED
)) == VM_WRITE
)
179 gup_flags
|= FOLL_WRITE
;
182 * We want mlock to succeed for regions that have any permissions
183 * other than PROT_NONE.
185 if (vma
->vm_flags
& (VM_READ
| VM_WRITE
| VM_EXEC
))
186 gup_flags
|= FOLL_FORCE
;
188 if (vma
->vm_flags
& VM_LOCKED
)
189 gup_flags
|= FOLL_MLOCK
;
191 /* We don't try to access the guard page of a stack vma */
192 if (stack_guard_page(vma
, start
)) {
197 return __get_user_pages(current
, mm
, addr
, nr_pages
, gup_flags
,
198 NULL
, NULL
, nonblocking
);
202 * convert get_user_pages() return value to posix mlock() error
204 static int __mlock_posix_error_return(long retval
)
206 if (retval
== -EFAULT
)
208 else if (retval
== -ENOMEM
)
214 * mlock_vma_pages_range() - mlock pages in specified vma range.
215 * @vma - the vma containing the specfied address range
216 * @start - starting address in @vma to mlock
217 * @end - end address [+1] in @vma to mlock
219 * For mmap()/mremap()/expansion of mlocked vma.
221 * return 0 on success for "normal" vmas.
223 * return number of pages [> 0] to be removed from locked_vm on success
226 long mlock_vma_pages_range(struct vm_area_struct
*vma
,
227 unsigned long start
, unsigned long end
)
229 int nr_pages
= (end
- start
) / PAGE_SIZE
;
230 BUG_ON(!(vma
->vm_flags
& VM_LOCKED
));
233 * filter unlockable vmas
235 if (vma
->vm_flags
& (VM_IO
| VM_PFNMAP
))
238 if (!((vma
->vm_flags
& (VM_DONTEXPAND
| VM_RESERVED
)) ||
239 is_vm_hugetlb_page(vma
) ||
240 vma
== get_gate_vma(current
))) {
242 __mlock_vma_pages_range(vma
, start
, end
, NULL
);
244 /* Hide errors from mmap() and other callers */
249 * User mapped kernel pages or huge pages:
250 * make these pages present to populate the ptes, but
251 * fall thru' to reset VM_LOCKED--no need to unlock, and
252 * return nr_pages so these don't get counted against task's
253 * locked limit. huge pages are already counted against
256 make_pages_present(start
, end
);
259 vma
->vm_flags
&= ~VM_LOCKED
; /* and don't come back! */
260 return nr_pages
; /* error or pages NOT mlocked */
264 * munlock_vma_pages_range() - munlock all pages in the vma range.'
265 * @vma - vma containing range to be munlock()ed.
266 * @start - start address in @vma of the range
267 * @end - end of range in @vma.
269 * For mremap(), munmap() and exit().
271 * Called with @vma VM_LOCKED.
273 * Returns with VM_LOCKED cleared. Callers must be prepared to
276 * We don't save and restore VM_LOCKED here because pages are
277 * still on lru. In unmap path, pages might be scanned by reclaim
278 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
279 * free them. This will result in freeing mlocked pages.
281 void munlock_vma_pages_range(struct vm_area_struct
*vma
,
282 unsigned long start
, unsigned long end
)
287 vma
->vm_flags
&= ~VM_LOCKED
;
289 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
) {
292 * Although FOLL_DUMP is intended for get_dump_page(),
293 * it just so happens that its special treatment of the
294 * ZERO_PAGE (returning an error instead of doing get_page)
295 * suits munlock very well (and if somehow an abnormal page
296 * has sneaked into the range, we won't oops here: great).
298 page
= follow_page(vma
, addr
, FOLL_GET
| FOLL_DUMP
);
299 if (page
&& !IS_ERR(page
)) {
302 * Like in __mlock_vma_pages_range(),
303 * because we lock page here and migration is
304 * blocked by the elevated reference, we need
305 * only check for file-cache page truncation.
308 munlock_vma_page(page
);
317 * mlock_fixup - handle mlock[all]/munlock[all] requests.
319 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
320 * munlock is a no-op. However, for some special vmas, we go ahead and
321 * populate the ptes via make_pages_present().
323 * For vmas that pass the filters, merge/split as appropriate.
325 static int mlock_fixup(struct vm_area_struct
*vma
, struct vm_area_struct
**prev
,
326 unsigned long start
, unsigned long end
, unsigned int newflags
)
328 struct mm_struct
*mm
= vma
->vm_mm
;
332 int lock
= newflags
& VM_LOCKED
;
334 if (newflags
== vma
->vm_flags
|| (vma
->vm_flags
& VM_SPECIAL
) ||
335 is_vm_hugetlb_page(vma
) || vma
== get_gate_vma(current
))
336 goto out
; /* don't set VM_LOCKED, don't count */
338 pgoff
= vma
->vm_pgoff
+ ((start
- vma
->vm_start
) >> PAGE_SHIFT
);
339 *prev
= vma_merge(mm
, *prev
, start
, end
, newflags
, vma
->anon_vma
,
340 vma
->vm_file
, pgoff
, vma_policy(vma
));
346 if (start
!= vma
->vm_start
) {
347 ret
= split_vma(mm
, vma
, start
, 1);
352 if (end
!= vma
->vm_end
) {
353 ret
= split_vma(mm
, vma
, end
, 0);
360 * Keep track of amount of locked VM.
362 nr_pages
= (end
- start
) >> PAGE_SHIFT
;
364 nr_pages
= -nr_pages
;
365 mm
->locked_vm
+= nr_pages
;
368 * vm_flags is protected by the mmap_sem held in write mode.
369 * It's okay if try_to_unmap_one unmaps a page just after we
370 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
374 vma
->vm_flags
= newflags
;
376 munlock_vma_pages_range(vma
, start
, end
);
383 static int do_mlock(unsigned long start
, size_t len
, int on
)
385 unsigned long nstart
, end
, tmp
;
386 struct vm_area_struct
* vma
, * prev
;
389 VM_BUG_ON(start
& ~PAGE_MASK
);
390 VM_BUG_ON(len
!= PAGE_ALIGN(len
));
396 vma
= find_vma_prev(current
->mm
, start
, &prev
);
397 if (!vma
|| vma
->vm_start
> start
)
400 if (start
> vma
->vm_start
)
403 for (nstart
= start
; ; ) {
404 unsigned int newflags
;
406 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
408 newflags
= vma
->vm_flags
| VM_LOCKED
;
410 newflags
&= ~VM_LOCKED
;
415 error
= mlock_fixup(vma
, &prev
, nstart
, tmp
, newflags
);
419 if (nstart
< prev
->vm_end
)
420 nstart
= prev
->vm_end
;
425 if (!vma
|| vma
->vm_start
!= nstart
) {
433 static int do_mlock_pages(unsigned long start
, size_t len
, int ignore_errors
)
435 struct mm_struct
*mm
= current
->mm
;
436 unsigned long end
, nstart
, nend
;
437 struct vm_area_struct
*vma
= NULL
;
441 VM_BUG_ON(start
& ~PAGE_MASK
);
442 VM_BUG_ON(len
!= PAGE_ALIGN(len
));
445 for (nstart
= start
; nstart
< end
; nstart
= nend
) {
447 * We want to fault in pages for [nstart; end) address range.
448 * Find first corresponding VMA.
452 down_read(&mm
->mmap_sem
);
453 vma
= find_vma(mm
, nstart
);
454 } else if (nstart
>= vma
->vm_end
)
456 if (!vma
|| vma
->vm_start
>= end
)
459 * Set [nstart; nend) to intersection of desired address
460 * range with the first VMA. Also, skip undesirable VMA types.
462 nend
= min(end
, vma
->vm_end
);
463 if (vma
->vm_flags
& (VM_IO
| VM_PFNMAP
))
465 if (nstart
< vma
->vm_start
)
466 nstart
= vma
->vm_start
;
468 * Now fault in a range of pages. __mlock_vma_pages_range()
469 * double checks the vma flags, so that it won't mlock pages
470 * if the vma was already munlocked.
472 ret
= __mlock_vma_pages_range(vma
, nstart
, nend
, &locked
);
476 continue; /* continue at next VMA */
478 ret
= __mlock_posix_error_return(ret
);
481 nend
= nstart
+ ret
* PAGE_SIZE
;
485 up_read(&mm
->mmap_sem
);
486 return ret
; /* 0 or negative error code */
489 SYSCALL_DEFINE2(mlock
, unsigned long, start
, size_t, len
)
491 unsigned long locked
;
492 unsigned long lock_limit
;
498 lru_add_drain_all(); /* flush pagevec */
500 down_write(¤t
->mm
->mmap_sem
);
501 len
= PAGE_ALIGN(len
+ (start
& ~PAGE_MASK
));
504 locked
= len
>> PAGE_SHIFT
;
505 locked
+= current
->mm
->locked_vm
;
507 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
508 lock_limit
>>= PAGE_SHIFT
;
510 /* check against resource limits */
511 if ((locked
<= lock_limit
) || capable(CAP_IPC_LOCK
))
512 error
= do_mlock(start
, len
, 1);
513 up_write(¤t
->mm
->mmap_sem
);
515 error
= do_mlock_pages(start
, len
, 0);
519 SYSCALL_DEFINE2(munlock
, unsigned long, start
, size_t, len
)
523 down_write(¤t
->mm
->mmap_sem
);
524 len
= PAGE_ALIGN(len
+ (start
& ~PAGE_MASK
));
526 ret
= do_mlock(start
, len
, 0);
527 up_write(¤t
->mm
->mmap_sem
);
531 static int do_mlockall(int flags
)
533 struct vm_area_struct
* vma
, * prev
= NULL
;
534 unsigned int def_flags
= 0;
536 if (flags
& MCL_FUTURE
)
537 def_flags
= VM_LOCKED
;
538 current
->mm
->def_flags
= def_flags
;
539 if (flags
== MCL_FUTURE
)
542 for (vma
= current
->mm
->mmap
; vma
; vma
= prev
->vm_next
) {
543 unsigned int newflags
;
545 newflags
= vma
->vm_flags
| VM_LOCKED
;
546 if (!(flags
& MCL_CURRENT
))
547 newflags
&= ~VM_LOCKED
;
550 mlock_fixup(vma
, &prev
, vma
->vm_start
, vma
->vm_end
, newflags
);
556 SYSCALL_DEFINE1(mlockall
, int, flags
)
558 unsigned long lock_limit
;
561 if (!flags
|| (flags
& ~(MCL_CURRENT
| MCL_FUTURE
)))
568 lru_add_drain_all(); /* flush pagevec */
570 down_write(¤t
->mm
->mmap_sem
);
572 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
573 lock_limit
>>= PAGE_SHIFT
;
576 if (!(flags
& MCL_CURRENT
) || (current
->mm
->total_vm
<= lock_limit
) ||
577 capable(CAP_IPC_LOCK
))
578 ret
= do_mlockall(flags
);
579 up_write(¤t
->mm
->mmap_sem
);
580 if (!ret
&& (flags
& MCL_CURRENT
)) {
582 do_mlock_pages(0, TASK_SIZE
, 1);
588 SYSCALL_DEFINE0(munlockall
)
592 down_write(¤t
->mm
->mmap_sem
);
593 ret
= do_mlockall(0);
594 up_write(¤t
->mm
->mmap_sem
);
599 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
600 * shm segments) get accounted against the user_struct instead.
602 static DEFINE_SPINLOCK(shmlock_user_lock
);
604 int user_shm_lock(size_t size
, struct user_struct
*user
)
606 unsigned long lock_limit
, locked
;
609 locked
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
610 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
611 if (lock_limit
== RLIM_INFINITY
)
613 lock_limit
>>= PAGE_SHIFT
;
614 spin_lock(&shmlock_user_lock
);
616 locked
+ user
->locked_shm
> lock_limit
&& !capable(CAP_IPC_LOCK
))
619 user
->locked_shm
+= locked
;
622 spin_unlock(&shmlock_user_lock
);
626 void user_shm_unlock(size_t size
, struct user_struct
*user
)
628 spin_lock(&shmlock_user_lock
);
629 user
->locked_shm
-= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
630 spin_unlock(&shmlock_user_lock
);