4 * (C) Copyright 1995 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
8 #include <linux/capability.h>
9 #include <linux/mman.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/syscalls.h>
16 #include <linux/sched.h>
17 #include <linux/module.h>
18 #include <linux/rmap.h>
19 #include <linux/mmzone.h>
20 #include <linux/hugetlb.h>
24 int can_do_mlock(void)
26 if (capable(CAP_IPC_LOCK
))
28 if (current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
!= 0)
32 EXPORT_SYMBOL(can_do_mlock
);
34 #ifdef CONFIG_UNEVICTABLE_LRU
36 * Mlocked pages are marked with PageMlocked() flag for efficient testing
37 * in vmscan and, possibly, the fault path; and to support semi-accurate
40 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
41 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
42 * The unevictable list is an LRU sibling list to the [in]active lists.
43 * PageUnevictable is set to indicate the unevictable state.
45 * When lazy mlocking via vmscan, it is important to ensure that the
46 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
47 * may have mlocked a page that is being munlocked. So lazy mlock must take
48 * the mmap_sem for read, and verify that the vma really is locked
53 * LRU accounting for clear_page_mlock()
55 void __clear_page_mlock(struct page
*page
)
57 VM_BUG_ON(!PageLocked(page
));
59 if (!page
->mapping
) { /* truncated ? */
63 if (!isolate_lru_page(page
)) {
64 putback_lru_page(page
);
67 * Page not on the LRU yet. Flush all pagevecs and retry.
70 if (!isolate_lru_page(page
))
71 putback_lru_page(page
);
76 * Mark page as mlocked if not already.
77 * If page on LRU, isolate and putback to move to unevictable list.
79 void mlock_vma_page(struct page
*page
)
81 BUG_ON(!PageLocked(page
));
83 if (!TestSetPageMlocked(page
) && !isolate_lru_page(page
))
84 putback_lru_page(page
);
88 * called from munlock()/munmap() path with page supposedly on the LRU.
90 * Note: unlike mlock_vma_page(), we can't just clear the PageMlocked
91 * [in try_to_munlock()] and then attempt to isolate the page. We must
92 * isolate the page to keep others from messing with its unevictable
93 * and mlocked state while trying to munlock. However, we pre-clear the
94 * mlocked state anyway as we might lose the isolation race and we might
95 * not get another chance to clear PageMlocked. If we successfully
96 * isolate the page and try_to_munlock() detects other VM_LOCKED vmas
97 * mapping the page, it will restore the PageMlocked state, unless the page
98 * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(),
99 * perhaps redundantly.
100 * If we lose the isolation race, and the page is mapped by other VM_LOCKED
101 * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
102 * either of which will restore the PageMlocked state by calling
103 * mlock_vma_page() above, if it can grab the vma's mmap sem.
105 static void munlock_vma_page(struct page
*page
)
107 BUG_ON(!PageLocked(page
));
109 if (TestClearPageMlocked(page
) && !isolate_lru_page(page
)) {
110 try_to_munlock(page
);
111 putback_lru_page(page
);
116 * mlock a range of pages in the vma.
118 * This takes care of making the pages present too.
120 * vma->vm_mm->mmap_sem must be held for write.
122 static int __mlock_vma_pages_range(struct vm_area_struct
*vma
,
123 unsigned long start
, unsigned long end
)
125 struct mm_struct
*mm
= vma
->vm_mm
;
126 unsigned long addr
= start
;
127 struct page
*pages
[16]; /* 16 gives a reasonable batch */
128 int write
= !!(vma
->vm_flags
& VM_WRITE
);
129 int nr_pages
= (end
- start
) / PAGE_SIZE
;
132 VM_BUG_ON(start
& ~PAGE_MASK
|| end
& ~PAGE_MASK
);
133 VM_BUG_ON(start
< vma
->vm_start
|| end
> vma
->vm_end
);
134 VM_BUG_ON(!rwsem_is_locked(&vma
->vm_mm
->mmap_sem
));
136 lru_add_drain_all(); /* push cached pages to LRU */
138 while (nr_pages
> 0) {
144 * get_user_pages makes pages present if we are
145 * setting mlock. and this extra reference count will
146 * disable migration of this page. However, page may
147 * still be truncated out from under us.
149 ret
= get_user_pages(current
, mm
, addr
,
150 min_t(int, nr_pages
, ARRAY_SIZE(pages
)),
151 write
, 0, pages
, NULL
);
153 * This can happen for, e.g., VM_NONLINEAR regions before
154 * a page has been allocated and mapped at a given offset,
155 * or for addresses that map beyond end of a file.
156 * We'll mlock the the pages if/when they get faulted in.
162 * We know the vma is there, so the only time
163 * we cannot get a single page should be an
164 * error (ret < 0) case.
170 lru_add_drain(); /* push cached pages to LRU */
172 for (i
= 0; i
< ret
; i
++) {
173 struct page
*page
= pages
[i
];
177 * Because we lock page here and migration is blocked
178 * by the elevated reference, we need only check for
179 * page truncation (file-cache only).
182 mlock_vma_page(page
);
184 put_page(page
); /* ref from get_user_pages() */
187 * here we assume that get_user_pages() has given us
188 * a list of virtually contiguous pages.
190 addr
+= PAGE_SIZE
; /* for next get_user_pages() */
195 lru_add_drain_all(); /* to update stats */
197 return 0; /* count entire vma as locked_vm */
201 * private structure for munlock page table walk
203 struct munlock_page_walk
{
204 struct vm_area_struct
*vma
;
205 pmd_t
*pmd
; /* for migration_entry_wait() */
209 * munlock normal pages for present ptes
211 static int __munlock_pte_handler(pte_t
*ptep
, unsigned long addr
,
212 unsigned long end
, struct mm_walk
*walk
)
214 struct munlock_page_walk
*mpw
= walk
->private;
222 * If it's a swap pte, we might be racing with page migration.
224 if (unlikely(!pte_present(pte
))) {
225 if (!is_swap_pte(pte
))
227 entry
= pte_to_swp_entry(pte
);
228 if (is_migration_entry(entry
)) {
229 migration_entry_wait(mpw
->vma
->vm_mm
, mpw
->pmd
, addr
);
235 page
= vm_normal_page(mpw
->vma
, addr
, pte
);
240 if (!page
->mapping
) {
244 munlock_vma_page(page
);
252 * Save pmd for pte handler for waiting on migration entries
254 static int __munlock_pmd_handler(pmd_t
*pmd
, unsigned long addr
,
255 unsigned long end
, struct mm_walk
*walk
)
257 struct munlock_page_walk
*mpw
= walk
->private;
265 * munlock a range of pages in the vma using standard page table walk.
267 * vma->vm_mm->mmap_sem must be held for write.
269 static void __munlock_vma_pages_range(struct vm_area_struct
*vma
,
270 unsigned long start
, unsigned long end
)
272 struct mm_struct
*mm
= vma
->vm_mm
;
273 struct munlock_page_walk mpw
= {
276 struct mm_walk munlock_page_walk
= {
277 .pmd_entry
= __munlock_pmd_handler
,
278 .pte_entry
= __munlock_pte_handler
,
283 VM_BUG_ON(start
& ~PAGE_MASK
|| end
& ~PAGE_MASK
);
284 VM_BUG_ON(!rwsem_is_locked(&vma
->vm_mm
->mmap_sem
));
285 VM_BUG_ON(start
< vma
->vm_start
);
286 VM_BUG_ON(end
> vma
->vm_end
);
288 lru_add_drain_all(); /* push cached pages to LRU */
289 walk_page_range(start
, end
, &munlock_page_walk
);
290 lru_add_drain_all(); /* to update stats */
293 #else /* CONFIG_UNEVICTABLE_LRU */
296 * Just make pages present if VM_LOCKED. No-op if unlocking.
298 static int __mlock_vma_pages_range(struct vm_area_struct
*vma
,
299 unsigned long start
, unsigned long end
)
301 if (vma
->vm_flags
& VM_LOCKED
)
302 make_pages_present(start
, end
);
307 * munlock a range of pages in the vma -- no-op.
309 static void __munlock_vma_pages_range(struct vm_area_struct
*vma
,
310 unsigned long start
, unsigned long end
)
313 #endif /* CONFIG_UNEVICTABLE_LRU */
316 * mlock all pages in this vma range. For mmap()/mremap()/...
318 int mlock_vma_pages_range(struct vm_area_struct
*vma
,
319 unsigned long start
, unsigned long end
)
321 struct mm_struct
*mm
= vma
->vm_mm
;
322 int nr_pages
= (end
- start
) / PAGE_SIZE
;
323 BUG_ON(!(vma
->vm_flags
& VM_LOCKED
));
326 * filter unlockable vmas
328 if (vma
->vm_flags
& (VM_IO
| VM_PFNMAP
))
331 if (!((vma
->vm_flags
& (VM_DONTEXPAND
| VM_RESERVED
)) ||
332 is_vm_hugetlb_page(vma
) ||
333 vma
== get_gate_vma(current
))) {
334 downgrade_write(&mm
->mmap_sem
);
335 nr_pages
= __mlock_vma_pages_range(vma
, start
, end
);
337 up_read(&mm
->mmap_sem
);
338 /* vma can change or disappear */
339 down_write(&mm
->mmap_sem
);
340 vma
= find_vma(mm
, start
);
341 /* non-NULL vma must contain @start, but need to check @end */
342 if (!vma
|| end
> vma
->vm_end
)
348 * User mapped kernel pages or huge pages:
349 * make these pages present to populate the ptes, but
350 * fall thru' to reset VM_LOCKED--no need to unlock, and
351 * return nr_pages so these don't get counted against task's
352 * locked limit. huge pages are already counted against
355 make_pages_present(start
, end
);
358 vma
->vm_flags
&= ~VM_LOCKED
; /* and don't come back! */
359 return nr_pages
; /* pages NOT mlocked */
364 * munlock all pages in vma. For munmap() and exit().
366 void munlock_vma_pages_all(struct vm_area_struct
*vma
)
368 vma
->vm_flags
&= ~VM_LOCKED
;
369 __munlock_vma_pages_range(vma
, vma
->vm_start
, vma
->vm_end
);
373 * mlock_fixup - handle mlock[all]/munlock[all] requests.
375 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
376 * munlock is a no-op. However, for some special vmas, we go ahead and
377 * populate the ptes via make_pages_present().
379 * For vmas that pass the filters, merge/split as appropriate.
381 static int mlock_fixup(struct vm_area_struct
*vma
, struct vm_area_struct
**prev
,
382 unsigned long start
, unsigned long end
, unsigned int newflags
)
384 struct mm_struct
*mm
= vma
->vm_mm
;
388 int lock
= newflags
& VM_LOCKED
;
390 if (newflags
== vma
->vm_flags
||
391 (vma
->vm_flags
& (VM_IO
| VM_PFNMAP
)))
392 goto out
; /* don't set VM_LOCKED, don't count */
394 if ((vma
->vm_flags
& (VM_DONTEXPAND
| VM_RESERVED
)) ||
395 is_vm_hugetlb_page(vma
) ||
396 vma
== get_gate_vma(current
)) {
398 make_pages_present(start
, end
);
399 goto out
; /* don't set VM_LOCKED, don't count */
402 pgoff
= vma
->vm_pgoff
+ ((start
- vma
->vm_start
) >> PAGE_SHIFT
);
403 *prev
= vma_merge(mm
, *prev
, start
, end
, newflags
, vma
->anon_vma
,
404 vma
->vm_file
, pgoff
, vma_policy(vma
));
410 if (start
!= vma
->vm_start
) {
411 ret
= split_vma(mm
, vma
, start
, 1);
416 if (end
!= vma
->vm_end
) {
417 ret
= split_vma(mm
, vma
, end
, 0);
424 * Keep track of amount of locked VM.
426 nr_pages
= (end
- start
) >> PAGE_SHIFT
;
428 nr_pages
= -nr_pages
;
429 mm
->locked_vm
+= nr_pages
;
432 * vm_flags is protected by the mmap_sem held in write mode.
433 * It's okay if try_to_unmap_one unmaps a page just after we
434 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
436 vma
->vm_flags
= newflags
;
440 * mmap_sem is currently held for write. Downgrade the write
441 * lock to a read lock so that other faults, mmap scans, ...
442 * while we fault in all pages.
444 downgrade_write(&mm
->mmap_sem
);
446 ret
= __mlock_vma_pages_range(vma
, start
, end
);
448 mm
->locked_vm
-= ret
;
452 * Need to reacquire mmap sem in write mode, as our callers
453 * expect this. We have no support for atomically upgrading
454 * a sem to write, so we need to check for ranges while sem
457 up_read(&mm
->mmap_sem
);
458 /* vma can change or disappear */
459 down_write(&mm
->mmap_sem
);
460 *prev
= find_vma(mm
, start
);
461 /* non-NULL *prev must contain @start, but need to check @end */
462 if (!(*prev
) || end
> (*prev
)->vm_end
)
466 * TODO: for unlocking, pages will already be resident, so
467 * we don't need to wait for allocations/reclaim/pagein, ...
468 * However, unlocking a very large region can still take a
469 * while. Should we downgrade the semaphore for both lock
472 __munlock_vma_pages_range(vma
, start
, end
);
480 static int do_mlock(unsigned long start
, size_t len
, int on
)
482 unsigned long nstart
, end
, tmp
;
483 struct vm_area_struct
* vma
, * prev
;
486 len
= PAGE_ALIGN(len
);
492 vma
= find_vma_prev(current
->mm
, start
, &prev
);
493 if (!vma
|| vma
->vm_start
> start
)
496 if (start
> vma
->vm_start
)
499 for (nstart
= start
; ; ) {
500 unsigned int newflags
;
502 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
504 newflags
= vma
->vm_flags
| VM_LOCKED
;
506 newflags
&= ~VM_LOCKED
;
511 error
= mlock_fixup(vma
, &prev
, nstart
, tmp
, newflags
);
515 if (nstart
< prev
->vm_end
)
516 nstart
= prev
->vm_end
;
521 if (!vma
|| vma
->vm_start
!= nstart
) {
529 asmlinkage
long sys_mlock(unsigned long start
, size_t len
)
531 unsigned long locked
;
532 unsigned long lock_limit
;
538 down_write(¤t
->mm
->mmap_sem
);
539 len
= PAGE_ALIGN(len
+ (start
& ~PAGE_MASK
));
542 locked
= len
>> PAGE_SHIFT
;
543 locked
+= current
->mm
->locked_vm
;
545 lock_limit
= current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
546 lock_limit
>>= PAGE_SHIFT
;
548 /* check against resource limits */
549 if ((locked
<= lock_limit
) || capable(CAP_IPC_LOCK
))
550 error
= do_mlock(start
, len
, 1);
551 up_write(¤t
->mm
->mmap_sem
);
555 asmlinkage
long sys_munlock(unsigned long start
, size_t len
)
559 down_write(¤t
->mm
->mmap_sem
);
560 len
= PAGE_ALIGN(len
+ (start
& ~PAGE_MASK
));
562 ret
= do_mlock(start
, len
, 0);
563 up_write(¤t
->mm
->mmap_sem
);
567 static int do_mlockall(int flags
)
569 struct vm_area_struct
* vma
, * prev
= NULL
;
570 unsigned int def_flags
= 0;
572 if (flags
& MCL_FUTURE
)
573 def_flags
= VM_LOCKED
;
574 current
->mm
->def_flags
= def_flags
;
575 if (flags
== MCL_FUTURE
)
578 for (vma
= current
->mm
->mmap
; vma
; vma
= prev
->vm_next
) {
579 unsigned int newflags
;
581 newflags
= vma
->vm_flags
| VM_LOCKED
;
582 if (!(flags
& MCL_CURRENT
))
583 newflags
&= ~VM_LOCKED
;
586 mlock_fixup(vma
, &prev
, vma
->vm_start
, vma
->vm_end
, newflags
);
592 asmlinkage
long sys_mlockall(int flags
)
594 unsigned long lock_limit
;
597 if (!flags
|| (flags
& ~(MCL_CURRENT
| MCL_FUTURE
)))
604 down_write(¤t
->mm
->mmap_sem
);
606 lock_limit
= current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
607 lock_limit
>>= PAGE_SHIFT
;
610 if (!(flags
& MCL_CURRENT
) || (current
->mm
->total_vm
<= lock_limit
) ||
611 capable(CAP_IPC_LOCK
))
612 ret
= do_mlockall(flags
);
613 up_write(¤t
->mm
->mmap_sem
);
618 asmlinkage
long sys_munlockall(void)
622 down_write(¤t
->mm
->mmap_sem
);
623 ret
= do_mlockall(0);
624 up_write(¤t
->mm
->mmap_sem
);
629 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
630 * shm segments) get accounted against the user_struct instead.
632 static DEFINE_SPINLOCK(shmlock_user_lock
);
634 int user_shm_lock(size_t size
, struct user_struct
*user
)
636 unsigned long lock_limit
, locked
;
639 locked
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
640 lock_limit
= current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
641 if (lock_limit
== RLIM_INFINITY
)
643 lock_limit
>>= PAGE_SHIFT
;
644 spin_lock(&shmlock_user_lock
);
646 locked
+ user
->locked_shm
> lock_limit
&& !capable(CAP_IPC_LOCK
))
649 user
->locked_shm
+= locked
;
652 spin_unlock(&shmlock_user_lock
);
656 void user_shm_unlock(size_t size
, struct user_struct
*user
)
658 spin_lock(&shmlock_user_lock
);
659 user
->locked_shm
-= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
660 spin_unlock(&shmlock_user_lock
);