- Stephen Rothwell: APM updates
[davej-history.git] / mm / mprotect.c
blobe47987f1e4c401a3497bb524989a1c3e8f651f94
1 /*
2 * linux/mm/mprotect.c
4 * (C) Copyright 1994 Linus Torvalds
5 */
6 #include <linux/slab.h>
7 #include <linux/smp_lock.h>
8 #include <linux/shm.h>
9 #include <linux/mman.h>
11 #include <asm/uaccess.h>
12 #include <asm/pgalloc.h>
13 #include <asm/pgtable.h>
15 static inline void change_pte_range(pmd_t * pmd, unsigned long address,
16 unsigned long size, pgprot_t newprot)
18 pte_t * pte;
19 unsigned long end;
21 if (pmd_none(*pmd))
22 return;
23 if (pmd_bad(*pmd)) {
24 pmd_ERROR(*pmd);
25 pmd_clear(pmd);
26 return;
28 pte = pte_offset(pmd, address);
29 address &= ~PMD_MASK;
30 end = address + size;
31 if (end > PMD_SIZE)
32 end = PMD_SIZE;
33 do {
34 if (pte_present(*pte)) {
35 pte_t entry;
37 /* Avoid an SMP race with hardware updated dirty/clean
38 * bits by wiping the pte and then setting the new pte
39 * into place.
41 entry = ptep_get_and_clear(pte);
42 set_pte(pte, pte_modify(entry, newprot));
44 address += PAGE_SIZE;
45 pte++;
46 } while (address && (address < end));
49 static inline void change_pmd_range(pgd_t * pgd, unsigned long address,
50 unsigned long size, pgprot_t newprot)
52 pmd_t * pmd;
53 unsigned long end;
55 if (pgd_none(*pgd))
56 return;
57 if (pgd_bad(*pgd)) {
58 pgd_ERROR(*pgd);
59 pgd_clear(pgd);
60 return;
62 pmd = pmd_offset(pgd, address);
63 address &= ~PGDIR_MASK;
64 end = address + size;
65 if (end > PGDIR_SIZE)
66 end = PGDIR_SIZE;
67 do {
68 change_pte_range(pmd, address, end - address, newprot);
69 address = (address + PMD_SIZE) & PMD_MASK;
70 pmd++;
71 } while (address && (address < end));
74 static void change_protection(unsigned long start, unsigned long end, pgprot_t newprot)
76 pgd_t *dir;
77 unsigned long beg = start;
79 dir = pgd_offset(current->mm, start);
80 flush_cache_range(current->mm, beg, end);
81 if (start >= end)
82 BUG();
83 spin_lock(&current->mm->page_table_lock);
84 do {
85 change_pmd_range(dir, start, end - start, newprot);
86 start = (start + PGDIR_SIZE) & PGDIR_MASK;
87 dir++;
88 } while (start && (start < end));
89 spin_unlock(&current->mm->page_table_lock);
90 flush_tlb_range(current->mm, beg, end);
91 return;
94 static inline int mprotect_fixup_all(struct vm_area_struct * vma,
95 int newflags, pgprot_t prot)
97 spin_lock(&vma->vm_mm->page_table_lock);
98 vma->vm_flags = newflags;
99 vma->vm_page_prot = prot;
100 spin_unlock(&vma->vm_mm->page_table_lock);
101 return 0;
104 static inline int mprotect_fixup_start(struct vm_area_struct * vma,
105 unsigned long end,
106 int newflags, pgprot_t prot)
108 struct vm_area_struct * n;
110 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
111 if (!n)
112 return -ENOMEM;
113 *n = *vma;
114 n->vm_end = end;
115 n->vm_flags = newflags;
116 n->vm_raend = 0;
117 n->vm_page_prot = prot;
118 if (n->vm_file)
119 get_file(n->vm_file);
120 if (n->vm_ops && n->vm_ops->open)
121 n->vm_ops->open(n);
122 lock_vma_mappings(vma);
123 spin_lock(&vma->vm_mm->page_table_lock);
124 vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
125 vma->vm_start = end;
126 __insert_vm_struct(current->mm, n);
127 spin_unlock(&vma->vm_mm->page_table_lock);
128 unlock_vma_mappings(vma);
129 return 0;
132 static inline int mprotect_fixup_end(struct vm_area_struct * vma,
133 unsigned long start,
134 int newflags, pgprot_t prot)
136 struct vm_area_struct * n;
138 n = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
139 if (!n)
140 return -ENOMEM;
141 *n = *vma;
142 n->vm_start = start;
143 n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
144 n->vm_flags = newflags;
145 n->vm_raend = 0;
146 n->vm_page_prot = prot;
147 if (n->vm_file)
148 get_file(n->vm_file);
149 if (n->vm_ops && n->vm_ops->open)
150 n->vm_ops->open(n);
151 lock_vma_mappings(vma);
152 spin_lock(&vma->vm_mm->page_table_lock);
153 vma->vm_end = start;
154 __insert_vm_struct(current->mm, n);
155 spin_unlock(&vma->vm_mm->page_table_lock);
156 unlock_vma_mappings(vma);
157 return 0;
160 static inline int mprotect_fixup_middle(struct vm_area_struct * vma,
161 unsigned long start, unsigned long end,
162 int newflags, pgprot_t prot)
164 struct vm_area_struct * left, * right;
166 left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
167 if (!left)
168 return -ENOMEM;
169 right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
170 if (!right) {
171 kmem_cache_free(vm_area_cachep, left);
172 return -ENOMEM;
174 *left = *vma;
175 *right = *vma;
176 left->vm_end = start;
177 right->vm_start = end;
178 right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
179 left->vm_raend = 0;
180 right->vm_raend = 0;
181 if (vma->vm_file)
182 atomic_add(2,&vma->vm_file->f_count);
183 if (vma->vm_ops && vma->vm_ops->open) {
184 vma->vm_ops->open(left);
185 vma->vm_ops->open(right);
187 lock_vma_mappings(vma);
188 spin_lock(&vma->vm_mm->page_table_lock);
189 vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
190 vma->vm_start = start;
191 vma->vm_end = end;
192 vma->vm_flags = newflags;
193 vma->vm_raend = 0;
194 vma->vm_page_prot = prot;
195 __insert_vm_struct(current->mm, left);
196 __insert_vm_struct(current->mm, right);
197 spin_unlock(&vma->vm_mm->page_table_lock);
198 unlock_vma_mappings(vma);
199 return 0;
202 static int mprotect_fixup(struct vm_area_struct * vma,
203 unsigned long start, unsigned long end, unsigned int newflags)
205 pgprot_t newprot;
206 int error;
208 if (newflags == vma->vm_flags)
209 return 0;
210 newprot = protection_map[newflags & 0xf];
211 if (start == vma->vm_start) {
212 if (end == vma->vm_end)
213 error = mprotect_fixup_all(vma, newflags, newprot);
214 else
215 error = mprotect_fixup_start(vma, end, newflags, newprot);
216 } else if (end == vma->vm_end)
217 error = mprotect_fixup_end(vma, start, newflags, newprot);
218 else
219 error = mprotect_fixup_middle(vma, start, end, newflags, newprot);
221 if (error)
222 return error;
224 change_protection(start, end, newprot);
225 return 0;
228 asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot)
230 unsigned long nstart, end, tmp;
231 struct vm_area_struct * vma, * next;
232 int error = -EINVAL;
234 if (start & ~PAGE_MASK)
235 return -EINVAL;
236 len = PAGE_ALIGN(len);
237 end = start + len;
238 if (end < start)
239 return -EINVAL;
240 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
241 return -EINVAL;
242 if (end == start)
243 return 0;
245 down(&current->mm->mmap_sem);
247 vma = find_vma(current->mm, start);
248 error = -EFAULT;
249 if (!vma || vma->vm_start > start)
250 goto out;
252 for (nstart = start ; ; ) {
253 unsigned int newflags;
255 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
257 newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
258 if ((newflags & ~(newflags >> 4)) & 0xf) {
259 error = -EACCES;
260 break;
263 if (vma->vm_end >= end) {
264 error = mprotect_fixup(vma, nstart, end, newflags);
265 break;
268 tmp = vma->vm_end;
269 next = vma->vm_next;
270 error = mprotect_fixup(vma, nstart, tmp, newflags);
271 if (error)
272 break;
273 nstart = tmp;
274 vma = next;
275 if (!vma || vma->vm_start != nstart) {
276 error = -EFAULT;
277 break;
280 spin_lock(&current->mm->page_table_lock);
281 merge_segments(current->mm, start, end);
282 spin_unlock(&current->mm->page_table_lock);
283 out:
284 up(&current->mm->mmap_sem);
285 return error;