4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
7 * Address space accounting code <alan@redhat.com>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <linux/shm.h>
15 #include <linux/mman.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
20 #include <asm/uaccess.h>
21 #include <asm/pgalloc.h>
22 #include <asm/pgtable.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
27 change_pte_range(pmd_t
*pmd
, unsigned long address
,
28 unsigned long size
, pgprot_t newprot
)
40 pte
= pte_offset_map(pmd
, address
);
46 if (pte_present(*pte
)) {
49 /* Avoid an SMP race with hardware updated dirty/clean
50 * bits by wiping the pte and then setting the new pte
53 entry
= ptep_get_and_clear(pte
);
54 set_pte(pte
, pte_modify(entry
, newprot
));
58 } while (address
&& (address
< end
));
63 change_pmd_range(pgd_t
*pgd
, unsigned long address
,
64 unsigned long size
, pgprot_t newprot
)
76 pmd
= pmd_offset(pgd
, address
);
77 address
&= ~PGDIR_MASK
;
82 change_pte_range(pmd
, address
, end
- address
, newprot
);
83 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
85 } while (address
&& (address
< end
));
89 change_protection(struct vm_area_struct
*vma
, unsigned long start
,
90 unsigned long end
, pgprot_t newprot
)
93 unsigned long beg
= start
;
95 dir
= pgd_offset(current
->mm
, start
);
96 flush_cache_range(vma
, beg
, end
);
99 spin_lock(¤t
->mm
->page_table_lock
);
101 change_pmd_range(dir
, start
, end
- start
, newprot
);
102 start
= (start
+ PGDIR_SIZE
) & PGDIR_MASK
;
104 } while (start
&& (start
< end
));
105 flush_tlb_range(vma
, beg
, end
);
106 spin_unlock(¤t
->mm
->page_table_lock
);
110 * Try to merge a vma with the previous flag, return 1 if successful or 0 if it
114 mprotect_attempt_merge(struct vm_area_struct
*vma
, struct vm_area_struct
*prev
,
115 unsigned long end
, int newflags
)
117 struct mm_struct
* mm
= vma
->vm_mm
;
121 if (prev
->vm_end
!= vma
->vm_start
)
123 if (!can_vma_merge(prev
, newflags
))
125 if (vma
->vm_file
|| (vma
->vm_flags
& VM_SHARED
))
129 * If the whole area changes to the protection of the previous one
130 * we can just get rid of it.
132 if (end
== vma
->vm_end
) {
133 spin_lock(&mm
->page_table_lock
);
135 __vma_unlink(mm
, vma
, prev
);
136 spin_unlock(&mm
->page_table_lock
);
138 kmem_cache_free(vm_area_cachep
, vma
);
144 * Otherwise extend it.
146 spin_lock(&mm
->page_table_lock
);
149 spin_unlock(&mm
->page_table_lock
);
154 mprotect_fixup(struct vm_area_struct
*vma
, struct vm_area_struct
**pprev
,
155 unsigned long start
, unsigned long end
, unsigned int newflags
)
157 struct mm_struct
* mm
= vma
->vm_mm
;
158 unsigned long charged
= 0;
162 if (newflags
== vma
->vm_flags
) {
168 * If we make a private mapping writable we increase our commit;
169 * but (without finer accounting) cannot reduce our commit if we
170 * make it unwritable again.
172 * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting
173 * a MAP_NORESERVE private mapping to writable will now reserve.
175 if (newflags
& VM_WRITE
) {
176 if (!(vma
->vm_flags
& (VM_ACCOUNT
|VM_WRITE
|VM_SHARED
))) {
177 charged
= (end
- start
) >> PAGE_SHIFT
;
178 if (security_vm_enough_memory(charged
))
180 newflags
|= VM_ACCOUNT
;
184 newprot
= protection_map
[newflags
& 0xf];
186 if (start
== vma
->vm_start
) {
188 * Try to merge with the previous vma.
190 if (mprotect_attempt_merge(vma
, *pprev
, end
, newflags
)) {
195 error
= split_vma(mm
, vma
, start
, 1);
200 * Unless it returns an error, this function always sets *pprev to
201 * the first vma for which vma->vm_end >= end.
205 if (end
!= vma
->vm_end
) {
206 error
= split_vma(mm
, vma
, end
, 0);
211 spin_lock(&mm
->page_table_lock
);
212 vma
->vm_flags
= newflags
;
213 vma
->vm_page_prot
= newprot
;
214 spin_unlock(&mm
->page_table_lock
);
216 change_protection(vma
, start
, end
, newprot
);
220 vm_unacct_memory(charged
);
225 sys_mprotect(unsigned long start
, size_t len
, unsigned long prot
)
227 unsigned long nstart
, end
, tmp
;
228 struct vm_area_struct
* vma
, * next
, * prev
;
231 if (start
& ~PAGE_MASK
)
233 len
= PAGE_ALIGN(len
);
237 if (prot
& ~(PROT_READ
| PROT_WRITE
| PROT_EXEC
| PROT_SEM
))
242 down_write(¤t
->mm
->mmap_sem
);
244 vma
= find_vma_prev(current
->mm
, start
, &prev
);
246 if (!vma
|| vma
->vm_start
> start
)
249 for (nstart
= start
; ; ) {
250 unsigned int newflags
;
253 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
255 if (is_vm_hugetlb_page(vma
)) {
260 newflags
= prot
| (vma
->vm_flags
& ~(PROT_READ
| PROT_WRITE
| PROT_EXEC
));
261 if ((newflags
& ~(newflags
>> 4)) & 0xf) {
266 error
= security_file_mprotect(vma
, prot
);
270 if (vma
->vm_end
> end
) {
271 error
= mprotect_fixup(vma
, &prev
, nstart
, end
, newflags
);
274 if (vma
->vm_end
== end
)
279 error
= mprotect_fixup(vma
, &prev
, nstart
, tmp
, newflags
);
286 if (!vma
|| vma
->vm_start
!= nstart
) {
292 if (next
&& prev
->vm_end
== next
->vm_start
&&
293 can_vma_merge(next
, prev
->vm_flags
) &&
294 !prev
->vm_file
&& !(prev
->vm_flags
& VM_SHARED
)) {
295 spin_lock(&prev
->vm_mm
->page_table_lock
);
296 prev
->vm_end
= next
->vm_end
;
297 __vma_unlink(prev
->vm_mm
, next
, prev
);
298 spin_unlock(&prev
->vm_mm
->page_table_lock
);
300 kmem_cache_free(vm_area_cachep
, next
);
301 prev
->vm_mm
->map_count
--;
304 up_write(¤t
->mm
->mmap_sem
);