4 * (C) Copyright 1994 Linus Torvalds
6 #include <linux/slab.h>
7 #include <linux/smp_lock.h>
9 #include <linux/mman.h>
11 #include <asm/uaccess.h>
12 #include <asm/pgtable.h>
14 static inline void change_pte_range(pmd_t
* pmd
, unsigned long address
,
15 unsigned long size
, pgprot_t newprot
)
23 printk("change_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd
));
27 pte
= pte_offset(pmd
, address
);
34 if (pte_present(entry
))
35 set_pte(pte
, pte_modify(entry
, newprot
));
38 } while (address
< end
);
41 static inline void change_pmd_range(pgd_t
* pgd
, unsigned long address
,
42 unsigned long size
, pgprot_t newprot
)
50 printk("change_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd
));
54 pmd
= pmd_offset(pgd
, address
);
55 address
&= ~PGDIR_MASK
;
60 change_pte_range(pmd
, address
, end
- address
, newprot
);
61 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
63 } while (address
< end
);
66 static void change_protection(unsigned long start
, unsigned long end
, pgprot_t newprot
)
69 unsigned long beg
= start
;
71 dir
= pgd_offset(current
->mm
, start
);
72 flush_cache_range(current
->mm
, beg
, end
);
74 change_pmd_range(dir
, start
, end
- start
, newprot
);
75 start
= (start
+ PGDIR_SIZE
) & PGDIR_MASK
;
78 flush_tlb_range(current
->mm
, beg
, end
);
82 static inline int mprotect_fixup_all(struct vm_area_struct
* vma
,
83 int newflags
, pgprot_t prot
)
85 vma
->vm_flags
= newflags
;
86 vma
->vm_page_prot
= prot
;
90 static inline int mprotect_fixup_start(struct vm_area_struct
* vma
,
92 int newflags
, pgprot_t prot
)
94 struct vm_area_struct
* n
;
96 n
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
102 vma
->vm_offset
+= vma
->vm_start
- n
->vm_start
;
103 n
->vm_flags
= newflags
;
104 n
->vm_page_prot
= prot
;
106 get_file(n
->vm_file
);
107 if (n
->vm_ops
&& n
->vm_ops
->open
)
109 insert_vm_struct(current
->mm
, n
);
113 static inline int mprotect_fixup_end(struct vm_area_struct
* vma
,
115 int newflags
, pgprot_t prot
)
117 struct vm_area_struct
* n
;
119 n
= kmem_cache_alloc(vm_area_cachep
, GFP_KERNEL
);
125 n
->vm_offset
+= n
->vm_start
- vma
->vm_start
;
126 n
->vm_flags
= newflags
;
127 n
->vm_page_prot
= prot
;
129 get_file(n
->vm_file
);
130 if (n
->vm_ops
&& n
->vm_ops
->open
)
132 insert_vm_struct(current
->mm
, n
);
136 static inline int mprotect_fixup_middle(struct vm_area_struct
* vma
,
137 unsigned long start
, unsigned long end
,
138 int newflags
, pgprot_t prot
)
140 struct vm_area_struct
* left
, * right
;
142 left
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
145 right
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
147 kmem_cache_free(vm_area_cachep
, left
);
152 left
->vm_end
= start
;
153 vma
->vm_start
= start
;
155 right
->vm_start
= end
;
156 vma
->vm_offset
+= vma
->vm_start
- left
->vm_start
;
157 right
->vm_offset
+= right
->vm_start
- left
->vm_start
;
158 vma
->vm_flags
= newflags
;
159 vma
->vm_page_prot
= prot
;
161 atomic_add(2,&vma
->vm_file
->f_count
);
162 if (vma
->vm_ops
&& vma
->vm_ops
->open
) {
163 vma
->vm_ops
->open(left
);
164 vma
->vm_ops
->open(right
);
166 insert_vm_struct(current
->mm
, left
);
167 insert_vm_struct(current
->mm
, right
);
171 static int mprotect_fixup(struct vm_area_struct
* vma
,
172 unsigned long start
, unsigned long end
, unsigned int newflags
)
177 if (newflags
== vma
->vm_flags
)
179 newprot
= protection_map
[newflags
& 0xf];
180 if (start
== vma
->vm_start
) {
181 if (end
== vma
->vm_end
)
182 error
= mprotect_fixup_all(vma
, newflags
, newprot
);
184 error
= mprotect_fixup_start(vma
, end
, newflags
, newprot
);
185 } else if (end
== vma
->vm_end
)
186 error
= mprotect_fixup_end(vma
, start
, newflags
, newprot
);
188 error
= mprotect_fixup_middle(vma
, start
, end
, newflags
, newprot
);
193 change_protection(start
, end
, newprot
);
197 asmlinkage
int sys_mprotect(unsigned long start
, size_t len
, unsigned long prot
)
199 unsigned long nstart
, end
, tmp
;
200 struct vm_area_struct
* vma
, * next
;
203 if (start
& ~PAGE_MASK
)
205 len
= (len
+ ~PAGE_MASK
) & PAGE_MASK
;
209 if (prot
& ~(PROT_READ
| PROT_WRITE
| PROT_EXEC
))
214 down(¤t
->mm
->mmap_sem
);
216 vma
= find_vma(current
->mm
, start
);
218 if (!vma
|| vma
->vm_start
> start
)
221 for (nstart
= start
; ; ) {
222 unsigned int newflags
;
224 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
226 newflags
= prot
| (vma
->vm_flags
& ~(PROT_READ
| PROT_WRITE
| PROT_EXEC
));
227 if ((newflags
& ~(newflags
>> 4)) & 0xf) {
232 if (vma
->vm_end
>= end
) {
233 error
= mprotect_fixup(vma
, nstart
, end
, newflags
);
239 error
= mprotect_fixup(vma
, nstart
, tmp
, newflags
);
244 if (!vma
|| vma
->vm_start
!= nstart
) {
249 merge_segments(current
->mm
, start
, end
);
251 up(¤t
->mm
->mmap_sem
);