4 * (C) Copyright 1994 Linus Torvalds
6 #include <linux/slab.h>
7 #include <linux/smp_lock.h>
9 #include <linux/mman.h>
11 #include <asm/uaccess.h>
12 #include <asm/pgalloc.h>
13 #include <asm/pgtable.h>
15 static inline void change_pte_range(pmd_t
* pmd
, unsigned long address
,
16 unsigned long size
, pgprot_t newprot
)
28 pte
= pte_offset(pmd
, address
);
34 if (pte_present(*pte
)) {
37 /* Avoid an SMP race with hardware updated dirty/clean
38 * bits by wiping the pte and then setting the new pte
41 entry
= ptep_get_and_clear(pte
);
42 set_pte(pte
, pte_modify(entry
, newprot
));
46 } while (address
&& (address
< end
));
49 static inline void change_pmd_range(pgd_t
* pgd
, unsigned long address
,
50 unsigned long size
, pgprot_t newprot
)
62 pmd
= pmd_offset(pgd
, address
);
63 address
&= ~PGDIR_MASK
;
68 change_pte_range(pmd
, address
, end
- address
, newprot
);
69 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
71 } while (address
&& (address
< end
));
74 static void change_protection(unsigned long start
, unsigned long end
, pgprot_t newprot
)
77 unsigned long beg
= start
;
79 dir
= pgd_offset(current
->mm
, start
);
80 flush_cache_range(current
->mm
, beg
, end
);
83 spin_lock(¤t
->mm
->page_table_lock
);
85 change_pmd_range(dir
, start
, end
- start
, newprot
);
86 start
= (start
+ PGDIR_SIZE
) & PGDIR_MASK
;
88 } while (start
&& (start
< end
));
89 spin_unlock(¤t
->mm
->page_table_lock
);
90 flush_tlb_range(current
->mm
, beg
, end
);
94 static inline int mprotect_fixup_all(struct vm_area_struct
* vma
,
95 int newflags
, pgprot_t prot
)
97 spin_lock(&vma
->vm_mm
->page_table_lock
);
98 vma
->vm_flags
= newflags
;
99 vma
->vm_page_prot
= prot
;
100 spin_unlock(&vma
->vm_mm
->page_table_lock
);
104 static inline int mprotect_fixup_start(struct vm_area_struct
* vma
,
106 int newflags
, pgprot_t prot
)
108 struct vm_area_struct
* n
;
110 n
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
115 n
->vm_flags
= newflags
;
117 n
->vm_page_prot
= prot
;
119 get_file(n
->vm_file
);
120 if (n
->vm_ops
&& n
->vm_ops
->open
)
122 lock_vma_mappings(vma
);
123 spin_lock(&vma
->vm_mm
->page_table_lock
);
124 vma
->vm_pgoff
+= (end
- vma
->vm_start
) >> PAGE_SHIFT
;
126 __insert_vm_struct(current
->mm
, n
);
127 spin_unlock(&vma
->vm_mm
->page_table_lock
);
128 unlock_vma_mappings(vma
);
132 static inline int mprotect_fixup_end(struct vm_area_struct
* vma
,
134 int newflags
, pgprot_t prot
)
136 struct vm_area_struct
* n
;
138 n
= kmem_cache_alloc(vm_area_cachep
, GFP_KERNEL
);
143 n
->vm_pgoff
+= (n
->vm_start
- vma
->vm_start
) >> PAGE_SHIFT
;
144 n
->vm_flags
= newflags
;
146 n
->vm_page_prot
= prot
;
148 get_file(n
->vm_file
);
149 if (n
->vm_ops
&& n
->vm_ops
->open
)
151 lock_vma_mappings(vma
);
152 spin_lock(&vma
->vm_mm
->page_table_lock
);
154 __insert_vm_struct(current
->mm
, n
);
155 spin_unlock(&vma
->vm_mm
->page_table_lock
);
156 unlock_vma_mappings(vma
);
160 static inline int mprotect_fixup_middle(struct vm_area_struct
* vma
,
161 unsigned long start
, unsigned long end
,
162 int newflags
, pgprot_t prot
)
164 struct vm_area_struct
* left
, * right
;
166 left
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
169 right
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
171 kmem_cache_free(vm_area_cachep
, left
);
176 left
->vm_end
= start
;
177 right
->vm_start
= end
;
178 right
->vm_pgoff
+= (right
->vm_start
- left
->vm_start
) >> PAGE_SHIFT
;
182 atomic_add(2,&vma
->vm_file
->f_count
);
183 if (vma
->vm_ops
&& vma
->vm_ops
->open
) {
184 vma
->vm_ops
->open(left
);
185 vma
->vm_ops
->open(right
);
187 lock_vma_mappings(vma
);
188 spin_lock(&vma
->vm_mm
->page_table_lock
);
189 vma
->vm_pgoff
+= (start
- vma
->vm_start
) >> PAGE_SHIFT
;
190 vma
->vm_start
= start
;
192 vma
->vm_flags
= newflags
;
194 vma
->vm_page_prot
= prot
;
195 __insert_vm_struct(current
->mm
, left
);
196 __insert_vm_struct(current
->mm
, right
);
197 spin_unlock(&vma
->vm_mm
->page_table_lock
);
198 unlock_vma_mappings(vma
);
202 static int mprotect_fixup(struct vm_area_struct
* vma
,
203 unsigned long start
, unsigned long end
, unsigned int newflags
)
208 if (newflags
== vma
->vm_flags
)
210 newprot
= protection_map
[newflags
& 0xf];
211 if (start
== vma
->vm_start
) {
212 if (end
== vma
->vm_end
)
213 error
= mprotect_fixup_all(vma
, newflags
, newprot
);
215 error
= mprotect_fixup_start(vma
, end
, newflags
, newprot
);
216 } else if (end
== vma
->vm_end
)
217 error
= mprotect_fixup_end(vma
, start
, newflags
, newprot
);
219 error
= mprotect_fixup_middle(vma
, start
, end
, newflags
, newprot
);
224 change_protection(start
, end
, newprot
);
228 asmlinkage
long sys_mprotect(unsigned long start
, size_t len
, unsigned long prot
)
230 unsigned long nstart
, end
, tmp
;
231 struct vm_area_struct
* vma
, * next
;
234 if (start
& ~PAGE_MASK
)
236 len
= PAGE_ALIGN(len
);
240 if (prot
& ~(PROT_READ
| PROT_WRITE
| PROT_EXEC
))
245 down(¤t
->mm
->mmap_sem
);
247 vma
= find_vma(current
->mm
, start
);
249 if (!vma
|| vma
->vm_start
> start
)
252 for (nstart
= start
; ; ) {
253 unsigned int newflags
;
255 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
257 newflags
= prot
| (vma
->vm_flags
& ~(PROT_READ
| PROT_WRITE
| PROT_EXEC
));
258 if ((newflags
& ~(newflags
>> 4)) & 0xf) {
263 if (vma
->vm_end
>= end
) {
264 error
= mprotect_fixup(vma
, nstart
, end
, newflags
);
270 error
= mprotect_fixup(vma
, nstart
, tmp
, newflags
);
275 if (!vma
|| vma
->vm_start
!= nstart
) {
280 spin_lock(¤t
->mm
->page_table_lock
);
281 merge_segments(current
->mm
, start
, end
);
282 spin_unlock(¤t
->mm
->page_table_lock
);
284 up(¤t
->mm
->mmap_sem
);