[PATCH] Add a driver for the Technisat Skystar2 DVB card
[linux-2.6/history.git] / mm / mprotect.c
blob2c015794e3c1a8cc659e43927e1777987075103a
1 /*
2 * mm/mprotect.c
4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
7 * Address space accounting code <alan@redhat.com>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <linux/shm.h>
15 #include <linux/mman.h>
16 #include <linux/fs.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
20 #include <asm/uaccess.h>
21 #include <asm/pgalloc.h>
22 #include <asm/pgtable.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
26 static inline void
27 change_pte_range(pmd_t *pmd, unsigned long address,
28 unsigned long size, pgprot_t newprot)
30 pte_t * pte;
31 unsigned long end;
33 if (pmd_none(*pmd))
34 return;
35 if (pmd_bad(*pmd)) {
36 pmd_ERROR(*pmd);
37 pmd_clear(pmd);
38 return;
40 pte = pte_offset_map(pmd, address);
41 address &= ~PMD_MASK;
42 end = address + size;
43 if (end > PMD_SIZE)
44 end = PMD_SIZE;
45 do {
46 if (pte_present(*pte)) {
47 pte_t entry;
49 /* Avoid an SMP race with hardware updated dirty/clean
50 * bits by wiping the pte and then setting the new pte
51 * into place.
53 entry = ptep_get_and_clear(pte);
54 set_pte(pte, pte_modify(entry, newprot));
56 address += PAGE_SIZE;
57 pte++;
58 } while (address && (address < end));
59 pte_unmap(pte - 1);
62 static inline void
63 change_pmd_range(pgd_t *pgd, unsigned long address,
64 unsigned long size, pgprot_t newprot)
66 pmd_t * pmd;
67 unsigned long end;
69 if (pgd_none(*pgd))
70 return;
71 if (pgd_bad(*pgd)) {
72 pgd_ERROR(*pgd);
73 pgd_clear(pgd);
74 return;
76 pmd = pmd_offset(pgd, address);
77 address &= ~PGDIR_MASK;
78 end = address + size;
79 if (end > PGDIR_SIZE)
80 end = PGDIR_SIZE;
81 do {
82 change_pte_range(pmd, address, end - address, newprot);
83 address = (address + PMD_SIZE) & PMD_MASK;
84 pmd++;
85 } while (address && (address < end));
88 static void
89 change_protection(struct vm_area_struct *vma, unsigned long start,
90 unsigned long end, pgprot_t newprot)
92 pgd_t *dir;
93 unsigned long beg = start;
95 dir = pgd_offset(current->mm, start);
96 flush_cache_range(vma, beg, end);
97 if (start >= end)
98 BUG();
99 spin_lock(&current->mm->page_table_lock);
100 do {
101 change_pmd_range(dir, start, end - start, newprot);
102 start = (start + PGDIR_SIZE) & PGDIR_MASK;
103 dir++;
104 } while (start && (start < end));
105 flush_tlb_range(vma, beg, end);
106 spin_unlock(&current->mm->page_table_lock);
107 return;
110 * Try to merge a vma with the previous flag, return 1 if successful or 0 if it
111 * was impossible.
113 static int
114 mprotect_attempt_merge(struct vm_area_struct *vma, struct vm_area_struct *prev,
115 unsigned long end, int newflags)
117 struct mm_struct * mm = vma->vm_mm;
119 if (!prev || !vma)
120 return 0;
121 if (prev->vm_end != vma->vm_start)
122 return 0;
123 if (!can_vma_merge(prev, newflags))
124 return 0;
125 if (vma->vm_file || (vma->vm_flags & VM_SHARED))
126 return 0;
129 * If the whole area changes to the protection of the previous one
130 * we can just get rid of it.
132 if (end == vma->vm_end) {
133 spin_lock(&mm->page_table_lock);
134 prev->vm_end = end;
135 __vma_unlink(mm, vma, prev);
136 spin_unlock(&mm->page_table_lock);
138 kmem_cache_free(vm_area_cachep, vma);
139 mm->map_count--;
140 return 1;
144 * Otherwise extend it.
146 spin_lock(&mm->page_table_lock);
147 prev->vm_end = end;
148 vma->vm_start = end;
149 spin_unlock(&mm->page_table_lock);
150 return 1;
153 static int
154 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
155 unsigned long start, unsigned long end, unsigned int newflags)
157 struct mm_struct * mm = vma->vm_mm;
158 unsigned long charged = 0;
159 pgprot_t newprot;
160 int error;
162 if (newflags == vma->vm_flags) {
163 *pprev = vma;
164 return 0;
168 * If we make a private mapping writable we increase our commit;
169 * but (without finer accounting) cannot reduce our commit if we
170 * make it unwritable again.
172 * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting
173 * a MAP_NORESERVE private mapping to writable will now reserve.
175 if (newflags & VM_WRITE) {
176 if (!(vma->vm_flags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) {
177 charged = (end - start) >> PAGE_SHIFT;
178 if (security_vm_enough_memory(charged))
179 return -ENOMEM;
180 newflags |= VM_ACCOUNT;
184 newprot = protection_map[newflags & 0xf];
186 if (start == vma->vm_start) {
188 * Try to merge with the previous vma.
190 if (mprotect_attempt_merge(vma, *pprev, end, newflags)) {
191 vma = *pprev;
192 goto success;
194 } else {
195 error = split_vma(mm, vma, start, 1);
196 if (error)
197 goto fail;
200 * Unless it returns an error, this function always sets *pprev to
201 * the first vma for which vma->vm_end >= end.
203 *pprev = vma;
205 if (end != vma->vm_end) {
206 error = split_vma(mm, vma, end, 0);
207 if (error)
208 goto fail;
211 spin_lock(&mm->page_table_lock);
212 vma->vm_flags = newflags;
213 vma->vm_page_prot = newprot;
214 spin_unlock(&mm->page_table_lock);
215 success:
216 change_protection(vma, start, end, newprot);
217 return 0;
219 fail:
220 vm_unacct_memory(charged);
221 return error;
224 asmlinkage long
225 sys_mprotect(unsigned long start, size_t len, unsigned long prot)
227 unsigned long nstart, end, tmp;
228 struct vm_area_struct * vma, * next, * prev;
229 int error = -EINVAL;
231 if (start & ~PAGE_MASK)
232 return -EINVAL;
233 len = PAGE_ALIGN(len);
234 end = start + len;
235 if (end < start)
236 return -EINVAL;
237 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM))
238 return -EINVAL;
239 if (end == start)
240 return 0;
242 down_write(&current->mm->mmap_sem);
244 vma = find_vma_prev(current->mm, start, &prev);
245 error = -ENOMEM;
246 if (!vma || vma->vm_start > start)
247 goto out;
249 for (nstart = start ; ; ) {
250 unsigned int newflags;
251 int last = 0;
253 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
255 if (is_vm_hugetlb_page(vma)) {
256 error = -EACCES;
257 goto out;
260 newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
261 if ((newflags & ~(newflags >> 4)) & 0xf) {
262 error = -EACCES;
263 goto out;
266 error = security_file_mprotect(vma, prot);
267 if (error)
268 goto out;
270 if (vma->vm_end > end) {
271 error = mprotect_fixup(vma, &prev, nstart, end, newflags);
272 goto out;
274 if (vma->vm_end == end)
275 last = 1;
277 tmp = vma->vm_end;
278 next = vma->vm_next;
279 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
280 if (error)
281 goto out;
282 if (last)
283 break;
284 nstart = tmp;
285 vma = next;
286 if (!vma || vma->vm_start != nstart) {
287 error = -ENOMEM;
288 goto out;
292 if (next && prev->vm_end == next->vm_start &&
293 can_vma_merge(next, prev->vm_flags) &&
294 !prev->vm_file && !(prev->vm_flags & VM_SHARED)) {
295 spin_lock(&prev->vm_mm->page_table_lock);
296 prev->vm_end = next->vm_end;
297 __vma_unlink(prev->vm_mm, next, prev);
298 spin_unlock(&prev->vm_mm->page_table_lock);
300 kmem_cache_free(vm_area_cachep, next);
301 prev->vm_mm->map_count--;
303 out:
304 up_write(&current->mm->mmap_sem);
305 return error;