4 * (C) Copyright 1994 Linus Torvalds
6 #include <linux/slab.h>
7 #include <linux/smp_lock.h>
9 #include <linux/mman.h>
11 #include <asm/uaccess.h>
12 #include <asm/pgalloc.h>
14 static inline void change_pte_range(pmd_t
* pmd
, unsigned long address
,
15 unsigned long size
, pgprot_t newprot
)
27 pte
= pte_offset(pmd
, address
);
34 if (pte_present(entry
))
35 set_pte(pte
, pte_modify(entry
, newprot
));
38 } while (address
&& (address
< end
));
41 static inline void change_pmd_range(pgd_t
* pgd
, unsigned long address
,
42 unsigned long size
, pgprot_t newprot
)
54 pmd
= pmd_offset(pgd
, address
);
55 address
&= ~PGDIR_MASK
;
60 change_pte_range(pmd
, address
, end
- address
, newprot
);
61 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
63 } while (address
&& (address
< end
));
66 static void change_protection(unsigned long start
, unsigned long end
, pgprot_t newprot
)
69 unsigned long beg
= start
;
71 dir
= pgd_offset(current
->mm
, start
);
72 flush_cache_range(current
->mm
, beg
, end
);
75 spin_lock(¤t
->mm
->page_table_lock
);
77 change_pmd_range(dir
, start
, end
- start
, newprot
);
78 start
= (start
+ PGDIR_SIZE
) & PGDIR_MASK
;
80 } while (start
&& (start
< end
));
81 spin_unlock(¤t
->mm
->page_table_lock
);
82 flush_tlb_range(current
->mm
, beg
, end
);
86 static inline int mprotect_fixup_all(struct vm_area_struct
* vma
,
87 int newflags
, pgprot_t prot
)
89 vmlist_modify_lock(vma
->vm_mm
);
90 vma
->vm_flags
= newflags
;
91 vma
->vm_page_prot
= prot
;
92 vmlist_modify_unlock(vma
->vm_mm
);
96 static inline int mprotect_fixup_start(struct vm_area_struct
* vma
,
98 int newflags
, pgprot_t prot
)
100 struct vm_area_struct
* n
;
102 n
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
107 n
->vm_flags
= newflags
;
109 n
->vm_page_prot
= prot
;
111 get_file(n
->vm_file
);
112 if (n
->vm_ops
&& n
->vm_ops
->open
)
114 vmlist_modify_lock(vma
->vm_mm
);
115 vma
->vm_pgoff
+= (end
- vma
->vm_start
) >> PAGE_SHIFT
;
117 insert_vm_struct(current
->mm
, n
);
118 vmlist_modify_unlock(vma
->vm_mm
);
122 static inline int mprotect_fixup_end(struct vm_area_struct
* vma
,
124 int newflags
, pgprot_t prot
)
126 struct vm_area_struct
* n
;
128 n
= kmem_cache_alloc(vm_area_cachep
, GFP_KERNEL
);
133 n
->vm_pgoff
+= (n
->vm_start
- vma
->vm_start
) >> PAGE_SHIFT
;
134 n
->vm_flags
= newflags
;
136 n
->vm_page_prot
= prot
;
138 get_file(n
->vm_file
);
139 if (n
->vm_ops
&& n
->vm_ops
->open
)
141 vmlist_modify_lock(vma
->vm_mm
);
143 insert_vm_struct(current
->mm
, n
);
144 vmlist_modify_unlock(vma
->vm_mm
);
148 static inline int mprotect_fixup_middle(struct vm_area_struct
* vma
,
149 unsigned long start
, unsigned long end
,
150 int newflags
, pgprot_t prot
)
152 struct vm_area_struct
* left
, * right
;
154 left
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
157 right
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
159 kmem_cache_free(vm_area_cachep
, left
);
164 left
->vm_end
= start
;
165 right
->vm_start
= end
;
166 right
->vm_pgoff
+= (right
->vm_start
- left
->vm_start
) >> PAGE_SHIFT
;
170 atomic_add(2,&vma
->vm_file
->f_count
);
171 if (vma
->vm_ops
&& vma
->vm_ops
->open
) {
172 vma
->vm_ops
->open(left
);
173 vma
->vm_ops
->open(right
);
175 vmlist_modify_lock(vma
->vm_mm
);
176 vma
->vm_pgoff
+= (start
- vma
->vm_start
) >> PAGE_SHIFT
;
177 vma
->vm_start
= start
;
179 vma
->vm_flags
= newflags
;
181 vma
->vm_page_prot
= prot
;
182 insert_vm_struct(current
->mm
, left
);
183 insert_vm_struct(current
->mm
, right
);
184 vmlist_modify_unlock(vma
->vm_mm
);
188 static int mprotect_fixup(struct vm_area_struct
* vma
,
189 unsigned long start
, unsigned long end
, unsigned int newflags
)
194 if (newflags
== vma
->vm_flags
)
196 newprot
= protection_map
[newflags
& 0xf];
197 if (start
== vma
->vm_start
) {
198 if (end
== vma
->vm_end
)
199 error
= mprotect_fixup_all(vma
, newflags
, newprot
);
201 error
= mprotect_fixup_start(vma
, end
, newflags
, newprot
);
202 } else if (end
== vma
->vm_end
)
203 error
= mprotect_fixup_end(vma
, start
, newflags
, newprot
);
205 error
= mprotect_fixup_middle(vma
, start
, end
, newflags
, newprot
);
210 change_protection(start
, end
, newprot
);
214 asmlinkage
long sys_mprotect(unsigned long start
, size_t len
, unsigned long prot
)
216 unsigned long nstart
, end
, tmp
;
217 struct vm_area_struct
* vma
, * next
;
220 if (start
& ~PAGE_MASK
)
222 len
= PAGE_ALIGN(len
);
226 if (prot
& ~(PROT_READ
| PROT_WRITE
| PROT_EXEC
))
231 down(¤t
->mm
->mmap_sem
);
233 vma
= find_vma(current
->mm
, start
);
235 if (!vma
|| vma
->vm_start
> start
)
238 for (nstart
= start
; ; ) {
239 unsigned int newflags
;
241 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
243 newflags
= prot
| (vma
->vm_flags
& ~(PROT_READ
| PROT_WRITE
| PROT_EXEC
));
244 if ((newflags
& ~(newflags
>> 4)) & 0xf) {
249 if (vma
->vm_end
>= end
) {
250 error
= mprotect_fixup(vma
, nstart
, end
, newflags
);
256 error
= mprotect_fixup(vma
, nstart
, tmp
, newflags
);
261 if (!vma
|| vma
->vm_start
!= nstart
) {
266 vmlist_modify_lock(current
->mm
);
267 merge_segments(current
->mm
, start
, end
);
268 vmlist_modify_unlock(current
->mm
);
270 up(¤t
->mm
->mmap_sem
);