There's a pre-3 patch on ftp.kernel.org in the kernel/testing directory,
[davej-history.git] / mm / mprotect.c
blobb28237c09e2678a6815a5a334a1a05238df39afa
1 /*
2 * linux/mm/mprotect.c
4 * (C) Copyright 1994 Linus Torvalds
5 */
6 #include <linux/slab.h>
7 #include <linux/smp_lock.h>
8 #include <linux/shm.h>
9 #include <linux/mman.h>
11 #include <asm/uaccess.h>
12 #include <asm/pgtable.h>
14 static inline void change_pte_range(pmd_t * pmd, unsigned long address,
15 unsigned long size, pgprot_t newprot)
17 pte_t * pte;
18 unsigned long end;
20 if (pmd_none(*pmd))
21 return;
22 if (pmd_bad(*pmd)) {
23 printk("change_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
24 pmd_clear(pmd);
25 return;
27 pte = pte_offset(pmd, address);
28 address &= ~PMD_MASK;
29 end = address + size;
30 if (end > PMD_SIZE)
31 end = PMD_SIZE;
32 do {
33 pte_t entry = *pte;
34 if (pte_present(entry))
35 set_pte(pte, pte_modify(entry, newprot));
36 address += PAGE_SIZE;
37 pte++;
38 } while (address < end);
41 static inline void change_pmd_range(pgd_t * pgd, unsigned long address,
42 unsigned long size, pgprot_t newprot)
44 pmd_t * pmd;
45 unsigned long end;
47 if (pgd_none(*pgd))
48 return;
49 if (pgd_bad(*pgd)) {
50 printk("change_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
51 pgd_clear(pgd);
52 return;
54 pmd = pmd_offset(pgd, address);
55 address &= ~PGDIR_MASK;
56 end = address + size;
57 if (end > PGDIR_SIZE)
58 end = PGDIR_SIZE;
59 do {
60 change_pte_range(pmd, address, end - address, newprot);
61 address = (address + PMD_SIZE) & PMD_MASK;
62 pmd++;
63 } while (address < end);
66 static void change_protection(unsigned long start, unsigned long end, pgprot_t newprot)
68 pgd_t *dir;
69 unsigned long beg = start;
71 dir = pgd_offset(current->mm, start);
72 flush_cache_range(current->mm, beg, end);
73 while (start < end) {
74 change_pmd_range(dir, start, end - start, newprot);
75 start = (start + PGDIR_SIZE) & PGDIR_MASK;
76 dir++;
78 flush_tlb_range(current->mm, beg, end);
79 return;
82 static inline int mprotect_fixup_all(struct vm_area_struct * vma,
83 int newflags, pgprot_t prot)
85 vma->vm_flags = newflags;
86 vma->vm_page_prot = prot;
87 return 0;
90 static inline int mprotect_fixup_start(struct vm_area_struct * vma,
91 unsigned long end,
92 int newflags, pgprot_t prot)
94 struct vm_area_struct * n;
96 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
97 if (!n)
98 return -ENOMEM;
99 *n = *vma;
100 vma->vm_start = end;
101 n->vm_end = end;
102 vma->vm_offset += vma->vm_start - n->vm_start;
103 n->vm_flags = newflags;
104 n->vm_page_prot = prot;
105 if (n->vm_file)
106 n->vm_file->f_count++;
107 if (n->vm_ops && n->vm_ops->open)
108 n->vm_ops->open(n);
109 insert_vm_struct(current->mm, n);
110 return 0;
113 static inline int mprotect_fixup_end(struct vm_area_struct * vma,
114 unsigned long start,
115 int newflags, pgprot_t prot)
117 struct vm_area_struct * n;
119 n = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
120 if (!n)
121 return -ENOMEM;
122 *n = *vma;
123 vma->vm_end = start;
124 n->vm_start = start;
125 n->vm_offset += n->vm_start - vma->vm_start;
126 n->vm_flags = newflags;
127 n->vm_page_prot = prot;
128 if (n->vm_file)
129 n->vm_file->f_count++;
130 if (n->vm_ops && n->vm_ops->open)
131 n->vm_ops->open(n);
132 insert_vm_struct(current->mm, n);
133 return 0;
136 static inline int mprotect_fixup_middle(struct vm_area_struct * vma,
137 unsigned long start, unsigned long end,
138 int newflags, pgprot_t prot)
140 struct vm_area_struct * left, * right;
142 left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
143 if (!left)
144 return -ENOMEM;
145 right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
146 if (!right) {
147 kmem_cache_free(vm_area_cachep, left);
148 return -ENOMEM;
150 *left = *vma;
151 *right = *vma;
152 left->vm_end = start;
153 vma->vm_start = start;
154 vma->vm_end = end;
155 right->vm_start = end;
156 vma->vm_offset += vma->vm_start - left->vm_start;
157 right->vm_offset += right->vm_start - left->vm_start;
158 vma->vm_flags = newflags;
159 vma->vm_page_prot = prot;
160 if (vma->vm_file)
161 vma->vm_file->f_count += 2;
162 if (vma->vm_ops && vma->vm_ops->open) {
163 vma->vm_ops->open(left);
164 vma->vm_ops->open(right);
166 insert_vm_struct(current->mm, left);
167 insert_vm_struct(current->mm, right);
168 return 0;
171 static int mprotect_fixup(struct vm_area_struct * vma,
172 unsigned long start, unsigned long end, unsigned int newflags)
174 pgprot_t newprot;
175 int error;
177 if (newflags == vma->vm_flags)
178 return 0;
179 newprot = protection_map[newflags & 0xf];
180 if (start == vma->vm_start) {
181 if (end == vma->vm_end)
182 error = mprotect_fixup_all(vma, newflags, newprot);
183 else
184 error = mprotect_fixup_start(vma, end, newflags, newprot);
185 } else if (end == vma->vm_end)
186 error = mprotect_fixup_end(vma, start, newflags, newprot);
187 else
188 error = mprotect_fixup_middle(vma, start, end, newflags, newprot);
190 if (error)
191 return error;
193 change_protection(start, end, newprot);
194 return 0;
197 asmlinkage int sys_mprotect(unsigned long start, size_t len, unsigned long prot)
199 unsigned long nstart, end, tmp;
200 struct vm_area_struct * vma, * next;
201 int error = -EINVAL;
203 if (start & ~PAGE_MASK)
204 return -EINVAL;
205 len = (len + ~PAGE_MASK) & PAGE_MASK;
206 end = start + len;
207 if (end < start)
208 return -EINVAL;
209 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
210 return -EINVAL;
211 if (end == start)
212 return 0;
214 down(&current->mm->mmap_sem);
215 lock_kernel();
217 vma = find_vma(current->mm, start);
218 error = -EFAULT;
219 if (!vma || vma->vm_start > start)
220 goto out;
222 for (nstart = start ; ; ) {
223 unsigned int newflags;
225 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
227 newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
228 if ((newflags & ~(newflags >> 4)) & 0xf) {
229 error = -EACCES;
230 break;
233 if (vma->vm_end >= end) {
234 error = mprotect_fixup(vma, nstart, end, newflags);
235 break;
238 tmp = vma->vm_end;
239 next = vma->vm_next;
240 error = mprotect_fixup(vma, nstart, tmp, newflags);
241 if (error)
242 break;
243 nstart = tmp;
244 vma = next;
245 if (!vma || vma->vm_start != nstart) {
246 error = -EFAULT;
247 break;
250 merge_segments(current->mm, start, end);
251 out:
252 unlock_kernel();
253 up(&current->mm->mmap_sem);
254 return error;