CIFS: Reset read oplock to NONE if we have mandatory locks after reopen
[linux-stable.git] / mm / mprotect.c
blobbcdbe62f3e6da12766f0d96a24aa4c2111614e6b
1 /*
2 * mm/mprotect.c
4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/mman.h>
15 #include <linux/fs.h>
16 #include <linux/highmem.h>
17 #include <linux/security.h>
18 #include <linux/mempolicy.h>
19 #include <linux/personality.h>
20 #include <linux/syscalls.h>
21 #include <linux/swap.h>
22 #include <linux/swapops.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/migrate.h>
25 #include <linux/perf_event.h>
26 #include <linux/pkeys.h>
27 #include <linux/ksm.h>
28 #include <linux/pkeys.h>
29 #include <asm/uaccess.h>
30 #include <asm/pgtable.h>
31 #include <asm/cacheflush.h>
32 #include <asm/mmu_context.h>
33 #include <asm/tlbflush.h>
35 #include "internal.h"
38 * For a prot_numa update we only hold mmap_sem for read so there is a
39 * potential race with faulting where a pmd was temporarily none. This
40 * function checks for a transhuge pmd under the appropriate lock. It
41 * returns a pte if it was successfully locked or NULL if it raced with
42 * a transhuge insertion.
44 static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
45 unsigned long addr, int prot_numa, spinlock_t **ptl)
47 pte_t *pte;
48 spinlock_t *pmdl;
50 /* !prot_numa is protected by mmap_sem held for write */
51 if (!prot_numa)
52 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
54 pmdl = pmd_lock(vma->vm_mm, pmd);
55 if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) {
56 spin_unlock(pmdl);
57 return NULL;
60 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
61 spin_unlock(pmdl);
62 return pte;
65 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
66 unsigned long addr, unsigned long end, pgprot_t newprot,
67 int dirty_accountable, int prot_numa)
69 struct mm_struct *mm = vma->vm_mm;
70 pte_t *pte, oldpte;
71 spinlock_t *ptl;
72 unsigned long pages = 0;
74 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
75 if (!pte)
76 return 0;
78 arch_enter_lazy_mmu_mode();
79 do {
80 oldpte = *pte;
81 if (pte_present(oldpte)) {
82 pte_t ptent;
83 bool preserve_write = prot_numa && pte_write(oldpte);
86 * Avoid trapping faults against the zero or KSM
87 * pages. See similar comment in change_huge_pmd.
89 if (prot_numa) {
90 struct page *page;
92 page = vm_normal_page(vma, addr, oldpte);
93 if (!page || PageKsm(page))
94 continue;
96 /* Avoid TLB flush if possible */
97 if (pte_protnone(oldpte))
98 continue;
101 ptent = ptep_modify_prot_start(mm, addr, pte);
102 ptent = pte_modify(ptent, newprot);
103 if (preserve_write)
104 ptent = pte_mkwrite(ptent);
106 /* Avoid taking write faults for known dirty pages */
107 if (dirty_accountable && pte_dirty(ptent) &&
108 (pte_soft_dirty(ptent) ||
109 !(vma->vm_flags & VM_SOFTDIRTY))) {
110 ptent = pte_mkwrite(ptent);
112 ptep_modify_prot_commit(mm, addr, pte, ptent);
113 pages++;
114 } else if (IS_ENABLED(CONFIG_MIGRATION)) {
115 swp_entry_t entry = pte_to_swp_entry(oldpte);
117 if (is_write_migration_entry(entry)) {
118 pte_t newpte;
120 * A protection check is difficult so
121 * just be safe and disable write
123 make_migration_entry_read(&entry);
124 newpte = swp_entry_to_pte(entry);
125 if (pte_swp_soft_dirty(oldpte))
126 newpte = pte_swp_mksoft_dirty(newpte);
127 set_pte_at(mm, addr, pte, newpte);
129 pages++;
132 } while (pte++, addr += PAGE_SIZE, addr != end);
133 arch_leave_lazy_mmu_mode();
134 pte_unmap_unlock(pte - 1, ptl);
136 return pages;
139 static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
140 pud_t *pud, unsigned long addr, unsigned long end,
141 pgprot_t newprot, int dirty_accountable, int prot_numa)
143 pmd_t *pmd;
144 struct mm_struct *mm = vma->vm_mm;
145 unsigned long next;
146 unsigned long pages = 0;
147 unsigned long nr_huge_updates = 0;
148 unsigned long mni_start = 0;
150 pmd = pmd_offset(pud, addr);
151 do {
152 unsigned long this_pages;
154 next = pmd_addr_end(addr, end);
155 if (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
156 && pmd_none_or_clear_bad(pmd))
157 continue;
159 /* invoke the mmu notifier if the pmd is populated */
160 if (!mni_start) {
161 mni_start = addr;
162 mmu_notifier_invalidate_range_start(mm, mni_start, end);
165 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
166 if (next - addr != HPAGE_PMD_SIZE) {
167 split_huge_pmd(vma, pmd, addr);
168 if (pmd_trans_unstable(pmd))
169 continue;
170 } else {
171 int nr_ptes = change_huge_pmd(vma, pmd, addr,
172 newprot, prot_numa);
174 if (nr_ptes) {
175 if (nr_ptes == HPAGE_PMD_NR) {
176 pages += HPAGE_PMD_NR;
177 nr_huge_updates++;
180 /* huge pmd was handled */
181 continue;
184 /* fall through, the trans huge pmd just split */
186 this_pages = change_pte_range(vma, pmd, addr, next, newprot,
187 dirty_accountable, prot_numa);
188 pages += this_pages;
189 } while (pmd++, addr = next, addr != end);
191 if (mni_start)
192 mmu_notifier_invalidate_range_end(mm, mni_start, end);
194 if (nr_huge_updates)
195 count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
196 return pages;
199 static inline unsigned long change_pud_range(struct vm_area_struct *vma,
200 pgd_t *pgd, unsigned long addr, unsigned long end,
201 pgprot_t newprot, int dirty_accountable, int prot_numa)
203 pud_t *pud;
204 unsigned long next;
205 unsigned long pages = 0;
207 pud = pud_offset(pgd, addr);
208 do {
209 next = pud_addr_end(addr, end);
210 if (pud_none_or_clear_bad(pud))
211 continue;
212 pages += change_pmd_range(vma, pud, addr, next, newprot,
213 dirty_accountable, prot_numa);
214 } while (pud++, addr = next, addr != end);
216 return pages;
219 static unsigned long change_protection_range(struct vm_area_struct *vma,
220 unsigned long addr, unsigned long end, pgprot_t newprot,
221 int dirty_accountable, int prot_numa)
223 struct mm_struct *mm = vma->vm_mm;
224 pgd_t *pgd;
225 unsigned long next;
226 unsigned long start = addr;
227 unsigned long pages = 0;
229 BUG_ON(addr >= end);
230 pgd = pgd_offset(mm, addr);
231 flush_cache_range(vma, addr, end);
232 set_tlb_flush_pending(mm);
233 do {
234 next = pgd_addr_end(addr, end);
235 if (pgd_none_or_clear_bad(pgd))
236 continue;
237 pages += change_pud_range(vma, pgd, addr, next, newprot,
238 dirty_accountable, prot_numa);
239 } while (pgd++, addr = next, addr != end);
241 /* Only flush the TLB if we actually modified any entries: */
242 if (pages)
243 flush_tlb_range(vma, start, end);
244 clear_tlb_flush_pending(mm);
246 return pages;
249 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
250 unsigned long end, pgprot_t newprot,
251 int dirty_accountable, int prot_numa)
253 unsigned long pages;
255 if (is_vm_hugetlb_page(vma))
256 pages = hugetlb_change_protection(vma, start, end, newprot);
257 else
258 pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
260 return pages;
264 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
265 unsigned long start, unsigned long end, unsigned long newflags)
267 struct mm_struct *mm = vma->vm_mm;
268 unsigned long oldflags = vma->vm_flags;
269 long nrpages = (end - start) >> PAGE_SHIFT;
270 unsigned long charged = 0;
271 pgoff_t pgoff;
272 int error;
273 int dirty_accountable = 0;
275 if (newflags == oldflags) {
276 *pprev = vma;
277 return 0;
281 * If we make a private mapping writable we increase our commit;
282 * but (without finer accounting) cannot reduce our commit if we
283 * make it unwritable again. hugetlb mapping were accounted for
284 * even if read-only so there is no need to account for them here
286 if (newflags & VM_WRITE) {
287 /* Check space limits when area turns into data. */
288 if (!may_expand_vm(mm, newflags, nrpages) &&
289 may_expand_vm(mm, oldflags, nrpages))
290 return -ENOMEM;
291 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
292 VM_SHARED|VM_NORESERVE))) {
293 charged = nrpages;
294 if (security_vm_enough_memory_mm(mm, charged))
295 return -ENOMEM;
296 newflags |= VM_ACCOUNT;
301 * First try to merge with previous and/or next vma.
303 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
304 *pprev = vma_merge(mm, *pprev, start, end, newflags,
305 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
306 vma->vm_userfaultfd_ctx);
307 if (*pprev) {
308 vma = *pprev;
309 VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
310 goto success;
313 *pprev = vma;
315 if (start != vma->vm_start) {
316 error = split_vma(mm, vma, start, 1);
317 if (error)
318 goto fail;
321 if (end != vma->vm_end) {
322 error = split_vma(mm, vma, end, 0);
323 if (error)
324 goto fail;
327 success:
329 * vm_flags and vm_page_prot are protected by the mmap_sem
330 * held in write mode.
332 vma->vm_flags = newflags;
333 dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
334 vma_set_page_prot(vma);
336 change_protection(vma, start, end, vma->vm_page_prot,
337 dirty_accountable, 0);
340 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
341 * fault on access.
343 if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
344 (newflags & VM_WRITE)) {
345 populate_vma_page_range(vma, start, end, NULL);
348 vm_stat_account(mm, oldflags, -nrpages);
349 vm_stat_account(mm, newflags, nrpages);
350 perf_event_mmap(vma);
351 return 0;
353 fail:
354 vm_unacct_memory(charged);
355 return error;
359 * pkey==-1 when doing a legacy mprotect()
361 static int do_mprotect_pkey(unsigned long start, size_t len,
362 unsigned long prot, int pkey)
364 unsigned long nstart, end, tmp, reqprot;
365 struct vm_area_struct *vma, *prev;
366 int error = -EINVAL;
367 const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
368 const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
369 (prot & PROT_READ);
371 prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
372 if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
373 return -EINVAL;
375 if (start & ~PAGE_MASK)
376 return -EINVAL;
377 if (!len)
378 return 0;
379 len = PAGE_ALIGN(len);
380 end = start + len;
381 if (end <= start)
382 return -ENOMEM;
383 if (!arch_validate_prot(prot))
384 return -EINVAL;
386 reqprot = prot;
388 if (down_write_killable(&current->mm->mmap_sem))
389 return -EINTR;
392 * If userspace did not allocate the pkey, do not let
393 * them use it here.
395 error = -EINVAL;
396 if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
397 goto out;
399 vma = find_vma(current->mm, start);
400 error = -ENOMEM;
401 if (!vma)
402 goto out;
403 prev = vma->vm_prev;
404 if (unlikely(grows & PROT_GROWSDOWN)) {
405 if (vma->vm_start >= end)
406 goto out;
407 start = vma->vm_start;
408 error = -EINVAL;
409 if (!(vma->vm_flags & VM_GROWSDOWN))
410 goto out;
411 } else {
412 if (vma->vm_start > start)
413 goto out;
414 if (unlikely(grows & PROT_GROWSUP)) {
415 end = vma->vm_end;
416 error = -EINVAL;
417 if (!(vma->vm_flags & VM_GROWSUP))
418 goto out;
421 if (start > vma->vm_start)
422 prev = vma;
424 for (nstart = start ; ; ) {
425 unsigned long mask_off_old_flags;
426 unsigned long newflags;
427 int new_vma_pkey;
429 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
431 /* Does the application expect PROT_READ to imply PROT_EXEC */
432 if (rier && (vma->vm_flags & VM_MAYEXEC))
433 prot |= PROT_EXEC;
436 * Each mprotect() call explicitly passes r/w/x permissions.
437 * If a permission is not passed to mprotect(), it must be
438 * cleared from the VMA.
440 mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC |
441 ARCH_VM_PKEY_FLAGS;
443 new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
444 newflags = calc_vm_prot_bits(prot, new_vma_pkey);
445 newflags |= (vma->vm_flags & ~mask_off_old_flags);
447 /* newflags >> 4 shift VM_MAY% in place of VM_% */
448 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
449 error = -EACCES;
450 goto out;
453 error = security_file_mprotect(vma, reqprot, prot);
454 if (error)
455 goto out;
457 tmp = vma->vm_end;
458 if (tmp > end)
459 tmp = end;
460 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
461 if (error)
462 goto out;
463 nstart = tmp;
465 if (nstart < prev->vm_end)
466 nstart = prev->vm_end;
467 if (nstart >= end)
468 goto out;
470 vma = prev->vm_next;
471 if (!vma || vma->vm_start != nstart) {
472 error = -ENOMEM;
473 goto out;
475 prot = reqprot;
477 out:
478 up_write(&current->mm->mmap_sem);
479 return error;
482 SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
483 unsigned long, prot)
485 return do_mprotect_pkey(start, len, prot, -1);
488 SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
489 unsigned long, prot, int, pkey)
491 return do_mprotect_pkey(start, len, prot, pkey);
494 SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
496 int pkey;
497 int ret;
499 /* No flags supported yet. */
500 if (flags)
501 return -EINVAL;
502 /* check for unsupported init values */
503 if (init_val & ~PKEY_ACCESS_MASK)
504 return -EINVAL;
506 down_write(&current->mm->mmap_sem);
507 pkey = mm_pkey_alloc(current->mm);
509 ret = -ENOSPC;
510 if (pkey == -1)
511 goto out;
513 ret = arch_set_user_pkey_access(current, pkey, init_val);
514 if (ret) {
515 mm_pkey_free(current->mm, pkey);
516 goto out;
518 ret = pkey;
519 out:
520 up_write(&current->mm->mmap_sem);
521 return ret;
524 SYSCALL_DEFINE1(pkey_free, int, pkey)
526 int ret;
528 down_write(&current->mm->mmap_sem);
529 ret = mm_pkey_free(current->mm, pkey);
530 up_write(&current->mm->mmap_sem);
533 * We could provie warnings or errors if any VMA still
534 * has the pkey set here.
536 return ret;