4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
7 * Address space accounting code <alan@redhat.com>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <linux/shm.h>
15 #include <linux/mman.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
19 #include <linux/mempolicy.h>
20 #include <linux/personality.h>
21 #include <linux/syscalls.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <asm/uaccess.h>
25 #include <asm/pgtable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
30 static inline pgprot_t
pgprot_modify(pgprot_t oldprot
, pgprot_t newprot
)
36 static void change_pte_range(struct mm_struct
*mm
, pmd_t
*pmd
,
37 unsigned long addr
, unsigned long end
, pgprot_t newprot
,
38 int dirty_accountable
)
43 pte
= pte_offset_map_lock(mm
, pmd
, addr
, &ptl
);
44 arch_enter_lazy_mmu_mode();
47 if (pte_present(oldpte
)) {
50 /* Avoid an SMP race with hardware updated dirty/clean
51 * bits by wiping the pte and then setting the new pte
54 ptent
= ptep_get_and_clear(mm
, addr
, pte
);
55 ptent
= pte_modify(ptent
, newprot
);
57 * Avoid taking write faults for pages we know to be
60 if (dirty_accountable
&& pte_dirty(ptent
))
61 ptent
= pte_mkwrite(ptent
);
62 set_pte_at(mm
, addr
, pte
, ptent
);
63 #ifdef CONFIG_MIGRATION
64 } else if (!pte_file(oldpte
)) {
65 swp_entry_t entry
= pte_to_swp_entry(oldpte
);
67 if (is_write_migration_entry(entry
)) {
69 * A protection check is difficult so
70 * just be safe and disable write
72 make_migration_entry_read(&entry
);
73 set_pte_at(mm
, addr
, pte
,
74 swp_entry_to_pte(entry
));
79 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
80 arch_leave_lazy_mmu_mode();
81 pte_unmap_unlock(pte
- 1, ptl
);
84 static inline void change_pmd_range(struct mm_struct
*mm
, pud_t
*pud
,
85 unsigned long addr
, unsigned long end
, pgprot_t newprot
,
86 int dirty_accountable
)
91 pmd
= pmd_offset(pud
, addr
);
93 next
= pmd_addr_end(addr
, end
);
94 if (pmd_none_or_clear_bad(pmd
))
96 change_pte_range(mm
, pmd
, addr
, next
, newprot
, dirty_accountable
);
97 } while (pmd
++, addr
= next
, addr
!= end
);
100 static inline void change_pud_range(struct mm_struct
*mm
, pgd_t
*pgd
,
101 unsigned long addr
, unsigned long end
, pgprot_t newprot
,
102 int dirty_accountable
)
107 pud
= pud_offset(pgd
, addr
);
109 next
= pud_addr_end(addr
, end
);
110 if (pud_none_or_clear_bad(pud
))
112 change_pmd_range(mm
, pud
, addr
, next
, newprot
, dirty_accountable
);
113 } while (pud
++, addr
= next
, addr
!= end
);
116 static void change_protection(struct vm_area_struct
*vma
,
117 unsigned long addr
, unsigned long end
, pgprot_t newprot
,
118 int dirty_accountable
)
120 struct mm_struct
*mm
= vma
->vm_mm
;
123 unsigned long start
= addr
;
126 pgd
= pgd_offset(mm
, addr
);
127 flush_cache_range(vma
, addr
, end
);
129 next
= pgd_addr_end(addr
, end
);
130 if (pgd_none_or_clear_bad(pgd
))
132 change_pud_range(mm
, pgd
, addr
, next
, newprot
, dirty_accountable
);
133 } while (pgd
++, addr
= next
, addr
!= end
);
134 flush_tlb_range(vma
, start
, end
);
138 mprotect_fixup(struct vm_area_struct
*vma
, struct vm_area_struct
**pprev
,
139 unsigned long start
, unsigned long end
, unsigned long newflags
)
141 struct mm_struct
*mm
= vma
->vm_mm
;
142 unsigned long oldflags
= vma
->vm_flags
;
143 long nrpages
= (end
- start
) >> PAGE_SHIFT
;
144 unsigned long charged
= 0;
147 int dirty_accountable
= 0;
149 if (newflags
== oldflags
) {
155 * If we make a private mapping writable we increase our commit;
156 * but (without finer accounting) cannot reduce our commit if we
157 * make it unwritable again.
159 * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting
160 * a MAP_NORESERVE private mapping to writable will now reserve.
162 if (newflags
& VM_WRITE
) {
163 if (!(oldflags
& (VM_ACCOUNT
|VM_WRITE
|VM_SHARED
))) {
165 if (security_vm_enough_memory(charged
))
167 newflags
|= VM_ACCOUNT
;
172 * First try to merge with previous and/or next vma.
174 pgoff
= vma
->vm_pgoff
+ ((start
- vma
->vm_start
) >> PAGE_SHIFT
);
175 *pprev
= vma_merge(mm
, *pprev
, start
, end
, newflags
,
176 vma
->anon_vma
, vma
->vm_file
, pgoff
, vma_policy(vma
));
184 if (start
!= vma
->vm_start
) {
185 error
= split_vma(mm
, vma
, start
, 1);
190 if (end
!= vma
->vm_end
) {
191 error
= split_vma(mm
, vma
, end
, 0);
198 * vm_flags and vm_page_prot are protected by the mmap_sem
199 * held in write mode.
201 vma
->vm_flags
= newflags
;
202 vma
->vm_page_prot
= pgprot_modify(vma
->vm_page_prot
,
203 vm_get_page_prot(newflags
));
205 if (vma_wants_writenotify(vma
)) {
206 vma
->vm_page_prot
= vm_get_page_prot(newflags
& ~VM_SHARED
);
207 dirty_accountable
= 1;
210 if (is_vm_hugetlb_page(vma
))
211 hugetlb_change_protection(vma
, start
, end
, vma
->vm_page_prot
);
213 change_protection(vma
, start
, end
, vma
->vm_page_prot
, dirty_accountable
);
214 vm_stat_account(mm
, oldflags
, vma
->vm_file
, -nrpages
);
215 vm_stat_account(mm
, newflags
, vma
->vm_file
, nrpages
);
219 vm_unacct_memory(charged
);
224 sys_mprotect(unsigned long start
, size_t len
, unsigned long prot
)
226 unsigned long vm_flags
, nstart
, end
, tmp
, reqprot
;
227 struct vm_area_struct
*vma
, *prev
;
229 const int grows
= prot
& (PROT_GROWSDOWN
|PROT_GROWSUP
);
230 prot
&= ~(PROT_GROWSDOWN
|PROT_GROWSUP
);
231 if (grows
== (PROT_GROWSDOWN
|PROT_GROWSUP
)) /* can't be both */
234 if (start
& ~PAGE_MASK
)
238 len
= PAGE_ALIGN(len
);
242 if (prot
& ~(PROT_READ
| PROT_WRITE
| PROT_EXEC
| PROT_SEM
))
247 * Does the application expect PROT_READ to imply PROT_EXEC:
249 if ((prot
& PROT_READ
) && (current
->personality
& READ_IMPLIES_EXEC
))
252 vm_flags
= calc_vm_prot_bits(prot
);
254 down_write(¤t
->mm
->mmap_sem
);
256 vma
= find_vma_prev(current
->mm
, start
, &prev
);
260 if (unlikely(grows
& PROT_GROWSDOWN
)) {
261 if (vma
->vm_start
>= end
)
263 start
= vma
->vm_start
;
265 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
269 if (vma
->vm_start
> start
)
271 if (unlikely(grows
& PROT_GROWSUP
)) {
274 if (!(vma
->vm_flags
& VM_GROWSUP
))
278 if (start
> vma
->vm_start
)
281 for (nstart
= start
; ; ) {
282 unsigned long newflags
;
284 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
286 newflags
= vm_flags
| (vma
->vm_flags
& ~(VM_READ
| VM_WRITE
| VM_EXEC
));
288 /* newflags >> 4 shift VM_MAY% in place of VM_% */
289 if ((newflags
& ~(newflags
>> 4)) & (VM_READ
| VM_WRITE
| VM_EXEC
)) {
294 error
= security_file_mprotect(vma
, reqprot
, prot
);
301 error
= mprotect_fixup(vma
, &prev
, nstart
, tmp
, newflags
);
306 if (nstart
< prev
->vm_end
)
307 nstart
= prev
->vm_end
;
312 if (!vma
|| vma
->vm_start
!= nstart
) {
318 up_write(¤t
->mm
->mmap_sem
);