Import 2.3.6
[davej-history.git] / mm / mmap.c
blob6e5eda00d171a985f9c763bea9015384595708ea
1 /*
2 * linux/mm/mmap.c
4 * Written by obz.
5 */
6 #include <linux/slab.h>
7 #include <linux/shm.h>
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/swap.h>
11 #include <linux/swapctl.h>
12 #include <linux/smp_lock.h>
13 #include <linux/init.h>
14 #include <linux/file.h>
16 #include <asm/uaccess.h>
17 #include <asm/pgtable.h>
19 /* description of effects of mapping type and prot in current implementation.
20 * this is due to the limited x86 page protection hardware. The expected
21 * behavior is in parens:
23 * map_type prot
24 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
25 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
26 * w: (no) no w: (no) no w: (yes) yes w: (no) no
27 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
29 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
30 * w: (no) no w: (no) no w: (copy) copy w: (no) no
31 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
34 pgprot_t protection_map[16] = {
35 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
36 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
39 /* SLAB cache for vm_area_struct's. */
40 kmem_cache_t *vm_area_cachep;
42 int sysctl_overcommit_memory;
44 /* Check that a process has enough memory to allocate a
45 * new virtual mapping.
47 int vm_enough_memory(long pages)
49 /* Stupid algorithm to decide if we have enough memory: while
50 * simple, it hopefully works in most obvious cases.. Easy to
51 * fool it, but this should catch most mistakes.
53 /* 23/11/98 NJC: Somewhat less stupid version of algorithm,
54 * which tries to do "TheRightThing". Instead of using half of
55 * (buffers+cache), use the minimum values. Allow an extra 2%
56 * of num_physpages for safety margin.
59 long free;
61 /* Sometimes we want to use more memory than we have. */
62 if (sysctl_overcommit_memory)
63 return 1;
65 free = buffermem >> PAGE_SHIFT;
66 free += page_cache_size;
67 free += nr_free_pages;
68 free += nr_swap_pages;
69 free -= (page_cache.min_percent + buffer_mem.min_percent + 2)*num_physpages/100;
70 return free > pages;
73 /* Remove one vm structure from the inode's i_mmap ring. */
74 static inline void remove_shared_vm_struct(struct vm_area_struct *vma)
76 struct file * file = vma->vm_file;
78 if (file) {
79 if (vma->vm_flags & VM_DENYWRITE)
80 file->f_dentry->d_inode->i_writecount++;
81 if(vma->vm_next_share)
82 vma->vm_next_share->vm_pprev_share = vma->vm_pprev_share;
83 *vma->vm_pprev_share = vma->vm_next_share;
88 * sys_brk() for the most part doesn't need the global kernel
89 * lock, except when an application is doing something nasty
90 * like trying to un-brk an area that has already been mapped
91 * to a regular file. in this case, the unmapping will need
92 * to invoke file system routines that need the global lock.
94 asmlinkage unsigned long sys_brk(unsigned long brk)
96 unsigned long rlim, retval;
97 unsigned long newbrk, oldbrk;
98 struct mm_struct *mm = current->mm;
100 down(&mm->mmap_sem);
102 if (brk < mm->end_code)
103 goto out;
104 newbrk = PAGE_ALIGN(brk);
105 oldbrk = PAGE_ALIGN(mm->brk);
106 if (oldbrk == newbrk)
107 goto set_brk;
109 /* Always allow shrinking brk. */
110 if (brk <= mm->brk) {
111 if (!do_munmap(newbrk, oldbrk-newbrk))
112 goto set_brk;
113 goto out;
116 /* Check against rlimit and stack.. */
117 rlim = current->rlim[RLIMIT_DATA].rlim_cur;
118 if (rlim < RLIM_INFINITY && brk - mm->end_code > rlim)
119 goto out;
121 /* Check against existing mmap mappings. */
122 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
123 goto out;
125 /* Check if we have enough memory.. */
126 if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT))
127 goto out;
129 /* Ok, looks good - let it rip. */
130 if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
131 goto out;
132 set_brk:
133 mm->brk = brk;
134 out:
135 retval = mm->brk;
136 up(&mm->mmap_sem);
137 return retval;
140 /* Combine the mmap "prot" and "flags" argument into one "vm_flags" used
141 * internally. Essentially, translate the "PROT_xxx" and "MAP_xxx" bits
142 * into "VM_xxx".
144 static inline unsigned long vm_flags(unsigned long prot, unsigned long flags)
146 #define _trans(x,bit1,bit2) \
147 ((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
149 unsigned long prot_bits, flag_bits;
150 prot_bits =
151 _trans(prot, PROT_READ, VM_READ) |
152 _trans(prot, PROT_WRITE, VM_WRITE) |
153 _trans(prot, PROT_EXEC, VM_EXEC);
154 flag_bits =
155 _trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) |
156 _trans(flags, MAP_DENYWRITE, VM_DENYWRITE) |
157 _trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE);
158 return prot_bits | flag_bits;
159 #undef _trans
162 unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
163 unsigned long prot, unsigned long flags, unsigned long off)
165 struct mm_struct * mm = current->mm;
166 struct vm_area_struct * vma;
167 int error;
169 if (file && (!file->f_op || !file->f_op->mmap))
170 return -ENODEV;
172 if ((len = PAGE_ALIGN(len)) == 0)
173 return addr;
175 if (len > TASK_SIZE || addr > TASK_SIZE-len)
176 return -EINVAL;
178 if (off & ~PAGE_MASK)
179 return -EINVAL;
181 /* offset overflow? */
182 if (off + len < off)
183 return -EINVAL;
185 /* Too many mappings? */
186 if (mm->map_count > MAX_MAP_COUNT)
187 return -ENOMEM;
189 /* mlock MCL_FUTURE? */
190 if (mm->def_flags & VM_LOCKED) {
191 unsigned long locked = mm->locked_vm << PAGE_SHIFT;
192 locked += len;
193 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
194 return -EAGAIN;
197 /* Do simple checking here so the lower-level routines won't have
198 * to. we assume access permissions have been handled by the open
199 * of the memory object, so we don't do any here.
201 if (file != NULL) {
202 switch (flags & MAP_TYPE) {
203 case MAP_SHARED:
204 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
205 return -EACCES;
207 /* Make sure we don't allow writing to an append-only file.. */
208 if (IS_APPEND(file->f_dentry->d_inode) && (file->f_mode & 2))
209 return -EACCES;
211 /* make sure there are no mandatory locks on the file. */
212 if (locks_verify_locked(file->f_dentry->d_inode))
213 return -EAGAIN;
215 /* fall through */
216 case MAP_PRIVATE:
217 if (!(file->f_mode & 1))
218 return -EACCES;
219 break;
221 default:
222 return -EINVAL;
224 } else if ((flags & MAP_TYPE) != MAP_PRIVATE)
225 return -EINVAL;
227 /* Obtain the address to map to. we verify (or select) it and ensure
228 * that it represents a valid section of the address space.
230 if (flags & MAP_FIXED) {
231 if (addr & ~PAGE_MASK)
232 return -EINVAL;
233 } else {
234 addr = get_unmapped_area(addr, len);
235 if (!addr)
236 return -ENOMEM;
239 /* Determine the object being mapped and call the appropriate
240 * specific mapper. the address has already been validated, but
241 * not unmapped, but the maps are removed from the list.
243 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
244 if (!vma)
245 return -ENOMEM;
247 vma->vm_mm = mm;
248 vma->vm_start = addr;
249 vma->vm_end = addr + len;
250 vma->vm_flags = vm_flags(prot,flags) | mm->def_flags;
252 if (file) {
253 if (file->f_mode & 1)
254 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
255 if (flags & MAP_SHARED) {
256 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
258 /* This looks strange, but when we don't have the file open
259 * for writing, we can demote the shared mapping to a simpler
260 * private mapping. That also takes care of a security hole
261 * with ptrace() writing to a shared mapping without write
262 * permissions.
264 * We leave the VM_MAYSHARE bit on, just to get correct output
265 * from /proc/xxx/maps..
267 if (!(file->f_mode & 2))
268 vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
270 } else
271 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
272 vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
273 vma->vm_ops = NULL;
274 vma->vm_offset = off;
275 vma->vm_file = NULL;
276 vma->vm_pte = 0;
278 /* Clear old maps */
279 error = -ENOMEM;
280 if (do_munmap(addr, len))
281 goto free_vma;
283 /* Check against address space limit. */
284 if ((mm->total_vm << PAGE_SHIFT) + len
285 > current->rlim[RLIMIT_AS].rlim_cur)
286 goto free_vma;
288 /* Private writable mapping? Check memory availability.. */
289 if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
290 !(flags & MAP_NORESERVE) &&
291 !vm_enough_memory(len >> PAGE_SHIFT))
292 goto free_vma;
294 if (file) {
295 int correct_wcount = 0;
296 if (vma->vm_flags & VM_DENYWRITE) {
297 if (file->f_dentry->d_inode->i_writecount > 0) {
298 error = -ETXTBSY;
299 goto free_vma;
301 /* f_op->mmap might possibly sleep
302 * (generic_file_mmap doesn't, but other code
303 * might). In any case, this takes care of any
304 * race that this might cause.
306 file->f_dentry->d_inode->i_writecount--;
307 correct_wcount = 1;
309 error = file->f_op->mmap(file, vma);
310 /* Fix up the count if necessary, then check for an error */
311 if (correct_wcount)
312 file->f_dentry->d_inode->i_writecount++;
313 if (error)
314 goto unmap_and_free_vma;
315 vma->vm_file = file;
316 file->f_count++;
320 * merge_segments may merge our vma, so we can't refer to it
321 * after the call. Save the values we need now ...
323 flags = vma->vm_flags;
324 addr = vma->vm_start; /* can addr have changed?? */
325 insert_vm_struct(mm, vma);
326 merge_segments(mm, vma->vm_start, vma->vm_end);
328 mm->total_vm += len >> PAGE_SHIFT;
329 if (flags & VM_LOCKED) {
330 mm->locked_vm += len >> PAGE_SHIFT;
331 make_pages_present(addr, addr + len);
333 return addr;
335 unmap_and_free_vma:
336 /* Undo any partial mapping done by a device driver. */
337 flush_cache_range(mm, vma->vm_start, vma->vm_end);
338 zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start);
339 flush_tlb_range(mm, vma->vm_start, vma->vm_end);
340 free_vma:
341 kmem_cache_free(vm_area_cachep, vma);
342 return error;
345 /* Get an address range which is currently unmapped.
346 * For mmap() without MAP_FIXED and shmat() with addr=0.
347 * Return value 0 means ENOMEM.
349 unsigned long get_unmapped_area(unsigned long addr, unsigned long len)
351 struct vm_area_struct * vmm;
353 if (len > TASK_SIZE)
354 return 0;
355 if (!addr)
356 addr = TASK_UNMAPPED_BASE;
357 addr = PAGE_ALIGN(addr);
359 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
360 /* At this point: (!vmm || addr < vmm->vm_end). */
361 if (TASK_SIZE - len < addr)
362 return 0;
363 if (!vmm || addr + len <= vmm->vm_start)
364 return addr;
365 addr = vmm->vm_end;
369 #define vm_avl_empty (struct vm_area_struct *) NULL
371 #include "mmap_avl.c"
373 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
374 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
376 struct vm_area_struct *vma = NULL;
378 if (mm) {
379 /* Check the cache first. */
380 /* (Cache hit rate is typically around 35%.) */
381 vma = mm->mmap_cache;
382 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
383 if (!mm->mmap_avl) {
384 /* Go through the linear list. */
385 vma = mm->mmap;
386 while (vma && vma->vm_end <= addr)
387 vma = vma->vm_next;
388 } else {
389 /* Then go through the AVL tree quickly. */
390 struct vm_area_struct * tree = mm->mmap_avl;
391 vma = NULL;
392 for (;;) {
393 if (tree == vm_avl_empty)
394 break;
395 if (tree->vm_end > addr) {
396 vma = tree;
397 if (tree->vm_start <= addr)
398 break;
399 tree = tree->vm_avl_left;
400 } else
401 tree = tree->vm_avl_right;
404 if (vma)
405 mm->mmap_cache = vma;
408 return vma;
411 /* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
412 struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
413 struct vm_area_struct **pprev)
415 if (mm) {
416 if (!mm->mmap_avl) {
417 /* Go through the linear list. */
418 struct vm_area_struct * prev = NULL;
419 struct vm_area_struct * vma = mm->mmap;
420 while (vma && vma->vm_end <= addr) {
421 prev = vma;
422 vma = vma->vm_next;
424 *pprev = prev;
425 return vma;
426 } else {
427 /* Go through the AVL tree quickly. */
428 struct vm_area_struct * vma = NULL;
429 struct vm_area_struct * last_turn_right = NULL;
430 struct vm_area_struct * prev = NULL;
431 struct vm_area_struct * tree = mm->mmap_avl;
432 for (;;) {
433 if (tree == vm_avl_empty)
434 break;
435 if (tree->vm_end > addr) {
436 vma = tree;
437 prev = last_turn_right;
438 if (tree->vm_start <= addr)
439 break;
440 tree = tree->vm_avl_left;
441 } else {
442 last_turn_right = tree;
443 tree = tree->vm_avl_right;
446 if (vma) {
447 if (vma->vm_avl_left != vm_avl_empty) {
448 prev = vma->vm_avl_left;
449 while (prev->vm_avl_right != vm_avl_empty)
450 prev = prev->vm_avl_right;
452 if ((prev ? prev->vm_next : mm->mmap) != vma)
453 printk("find_vma_prev: tree inconsistent with list\n");
454 *pprev = prev;
455 return vma;
459 *pprev = NULL;
460 return NULL;
463 struct vm_area_struct * find_extend_vma(struct task_struct * tsk, unsigned long addr)
465 struct vm_area_struct * vma;
466 unsigned long start;
468 addr &= PAGE_MASK;
469 vma = find_vma(tsk->mm,addr);
470 if (!vma)
471 return NULL;
472 if (vma->vm_start <= addr)
473 return vma;
474 if (!(vma->vm_flags & VM_GROWSDOWN))
475 return NULL;
476 start = vma->vm_start;
477 if (expand_stack(vma, addr))
478 return NULL;
479 if (vma->vm_flags & VM_LOCKED) {
480 make_pages_present(addr, start);
482 return vma;
485 /* Normal function to fix up a mapping
486 * This function is the default for when an area has no specific
487 * function. This may be used as part of a more specific routine.
488 * This function works out what part of an area is affected and
489 * adjusts the mapping information. Since the actual page
490 * manipulation is done in do_mmap(), none need be done here,
491 * though it would probably be more appropriate.
493 * By the time this function is called, the area struct has been
494 * removed from the process mapping list, so it needs to be
495 * reinserted if necessary.
497 * The 4 main cases are:
498 * Unmapping the whole area
499 * Unmapping from the start of the segment to a point in it
500 * Unmapping from an intermediate point to the end
501 * Unmapping between to intermediate points, making a hole.
503 * Case 4 involves the creation of 2 new areas, for each side of
504 * the hole. If possible, we reuse the existing area rather than
505 * allocate a new one, and the return indicates whether the old
506 * area was reused.
508 static struct vm_area_struct * unmap_fixup(struct vm_area_struct *area,
509 unsigned long addr, size_t len, struct vm_area_struct *extra)
511 struct vm_area_struct *mpnt;
512 unsigned long end = addr + len;
514 area->vm_mm->total_vm -= len >> PAGE_SHIFT;
515 if (area->vm_flags & VM_LOCKED)
516 area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
518 /* Unmapping the whole area. */
519 if (addr == area->vm_start && end == area->vm_end) {
520 if (area->vm_ops && area->vm_ops->close)
521 area->vm_ops->close(area);
522 if (area->vm_file)
523 fput(area->vm_file);
524 kmem_cache_free(vm_area_cachep, area);
525 return extra;
528 /* Work out to one of the ends. */
529 if (end == area->vm_end)
530 area->vm_end = addr;
531 else if (addr == area->vm_start) {
532 area->vm_offset += (end - area->vm_start);
533 area->vm_start = end;
534 } else {
535 /* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */
536 /* Add end mapping -- leave beginning for below */
537 mpnt = extra;
538 extra = NULL;
540 mpnt->vm_mm = area->vm_mm;
541 mpnt->vm_start = end;
542 mpnt->vm_end = area->vm_end;
543 mpnt->vm_page_prot = area->vm_page_prot;
544 mpnt->vm_flags = area->vm_flags;
545 mpnt->vm_ops = area->vm_ops;
546 mpnt->vm_offset = area->vm_offset + (end - area->vm_start);
547 mpnt->vm_file = area->vm_file;
548 mpnt->vm_pte = area->vm_pte;
549 if (mpnt->vm_file)
550 mpnt->vm_file->f_count++;
551 if (mpnt->vm_ops && mpnt->vm_ops->open)
552 mpnt->vm_ops->open(mpnt);
553 area->vm_end = addr; /* Truncate area */
554 insert_vm_struct(current->mm, mpnt);
557 insert_vm_struct(current->mm, area);
558 return extra;
562 * Try to free as many page directory entries as we can,
563 * without having to work very hard at actually scanning
564 * the page tables themselves.
566 * Right now we try to free page tables if we have a nice
567 * PGDIR-aligned area that got free'd up. We could be more
568 * granular if we want to, but this is fast and simple,
569 * and covers the bad cases.
571 * "prev", if it exists, points to a vma before the one
572 * we just free'd - but there's no telling how much before.
574 static void free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev,
575 unsigned long start, unsigned long end)
577 unsigned long first = start & PGDIR_MASK;
578 unsigned long last = (end + PGDIR_SIZE - 1) & PGDIR_MASK;
580 if (!prev) {
581 prev = mm->mmap;
582 if (!prev)
583 goto no_mmaps;
584 if (prev->vm_end > start) {
585 if (last > prev->vm_start)
586 last = prev->vm_start;
587 goto no_mmaps;
590 for (;;) {
591 struct vm_area_struct *next = prev->vm_next;
593 if (next) {
594 if (next->vm_start < start) {
595 prev = next;
596 continue;
598 if (last > next->vm_start)
599 last = next->vm_start;
601 if (prev->vm_end > first)
602 first = prev->vm_end + PGDIR_SIZE - 1;
603 break;
605 no_mmaps:
606 first = first >> PGDIR_SHIFT;
607 last = last >> PGDIR_SHIFT;
608 if (last > first)
609 clear_page_tables(mm, first, last-first);
612 /* Munmap is split into 2 main parts -- this part which finds
613 * what needs doing, and the areas themselves, which do the
614 * work. This now handles partial unmappings.
615 * Jeremy Fitzhardine <jeremy@sw.oz.au>
617 int do_munmap(unsigned long addr, size_t len)
619 struct mm_struct * mm;
620 struct vm_area_struct *mpnt, *prev, **npp, *free, *extra;
622 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
623 return -EINVAL;
625 if ((len = PAGE_ALIGN(len)) == 0)
626 return -EINVAL;
628 /* Check if this memory area is ok - put it on the temporary
629 * list if so.. The checks here are pretty simple --
630 * every area affected in some way (by any overlap) is put
631 * on the list. If nothing is put on, nothing is affected.
633 mm = current->mm;
634 mpnt = find_vma_prev(mm, addr, &prev);
635 if (!mpnt)
636 return 0;
637 /* we have addr < mpnt->vm_end */
639 if (mpnt->vm_start >= addr+len)
640 return 0;
642 /* If we'll make "hole", check the vm areas limit */
643 if ((mpnt->vm_start < addr && mpnt->vm_end > addr+len)
644 && mm->map_count >= MAX_MAP_COUNT)
645 return -ENOMEM;
648 * We may need one additional vma to fix up the mappings ...
649 * and this is the last chance for an easy error exit.
651 extra = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
652 if (!extra)
653 return -ENOMEM;
655 npp = (prev ? &prev->vm_next : &mm->mmap);
656 free = NULL;
657 for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
658 *npp = mpnt->vm_next;
659 mpnt->vm_next = free;
660 free = mpnt;
661 if (mm->mmap_avl)
662 avl_remove(mpnt, &mm->mmap_avl);
665 /* Ok - we have the memory areas we should free on the 'free' list,
666 * so release them, and unmap the page range..
667 * If the one of the segments is only being partially unmapped,
668 * it will put new vm_area_struct(s) into the address space.
670 while ((mpnt = free) != NULL) {
671 unsigned long st, end, size;
673 free = free->vm_next;
675 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
676 end = addr+len;
677 end = end > mpnt->vm_end ? mpnt->vm_end : end;
678 size = end - st;
680 lock_kernel();
682 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
683 mpnt->vm_ops->unmap(mpnt, st, size);
685 remove_shared_vm_struct(mpnt);
686 mm->map_count--;
688 flush_cache_range(mm, st, end);
689 zap_page_range(mm, st, size);
690 flush_tlb_range(mm, st, end);
693 * Fix the mapping, and free the old area if it wasn't reused.
695 extra = unmap_fixup(mpnt, st, size, extra);
697 unlock_kernel();
700 /* Release the extra vma struct if it wasn't used */
701 if (extra)
702 kmem_cache_free(vm_area_cachep, extra);
704 free_pgtables(mm, prev, addr, addr+len);
706 mm->mmap_cache = NULL; /* Kill the cache. */
707 return 0;
710 asmlinkage int sys_munmap(unsigned long addr, size_t len)
712 int ret;
714 down(&current->mm->mmap_sem);
715 ret = do_munmap(addr, len);
716 up(&current->mm->mmap_sem);
717 return ret;
721 * this is really a simplified "do_mmap". it only handles
722 * anonymous maps. eventually we may be able to do some
723 * brk-specific accounting here.
725 unsigned long do_brk(unsigned long addr, unsigned long len)
727 struct mm_struct * mm = current->mm;
728 struct vm_area_struct * vma;
729 unsigned long flags, retval;
732 * mlock MCL_FUTURE?
734 if (mm->def_flags & VM_LOCKED) {
735 unsigned long locked = mm->locked_vm << PAGE_SHIFT;
736 locked += len;
737 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
738 return -EAGAIN;
742 * Clear old maps. this also does some error checking for us
744 retval = do_munmap(addr, len);
745 if (retval != 0)
746 return retval;
748 /* Check against address space limits *after* clearing old maps... */
749 if ((mm->total_vm << PAGE_SHIFT) + len
750 > current->rlim[RLIMIT_AS].rlim_cur)
751 return -ENOMEM;
753 if (mm->map_count > MAX_MAP_COUNT)
754 return -ENOMEM;
756 if (!vm_enough_memory(len >> PAGE_SHIFT))
757 return -ENOMEM;
760 * create a vma struct for an anonymous mapping
762 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
763 if (!vma)
764 return -ENOMEM;
766 vma->vm_mm = mm;
767 vma->vm_start = addr;
768 vma->vm_end = addr + len;
769 vma->vm_flags = vm_flags(PROT_READ|PROT_WRITE|PROT_EXEC,
770 MAP_FIXED|MAP_PRIVATE) | mm->def_flags;
772 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
773 vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
774 vma->vm_ops = NULL;
775 vma->vm_offset = 0;
776 vma->vm_file = NULL;
777 vma->vm_pte = 0;
780 * merge_segments may merge our vma, so we can't refer to it
781 * after the call. Save the values we need now ...
783 flags = vma->vm_flags;
784 addr = vma->vm_start;
785 insert_vm_struct(mm, vma);
786 merge_segments(mm, vma->vm_start, vma->vm_end);
788 mm->total_vm += len >> PAGE_SHIFT;
789 if (flags & VM_LOCKED) {
790 mm->locked_vm += len >> PAGE_SHIFT;
791 make_pages_present(addr, addr + len);
793 return addr;
796 /* Build the AVL tree corresponding to the VMA list. */
797 void build_mmap_avl(struct mm_struct * mm)
799 struct vm_area_struct * vma;
801 mm->mmap_avl = NULL;
802 for (vma = mm->mmap; vma; vma = vma->vm_next)
803 avl_insert(vma, &mm->mmap_avl);
806 /* Release all mmaps. */
807 void exit_mmap(struct mm_struct * mm)
809 struct vm_area_struct * mpnt;
811 mpnt = mm->mmap;
812 mm->mmap = mm->mmap_avl = mm->mmap_cache = NULL;
813 mm->rss = 0;
814 mm->total_vm = 0;
815 mm->locked_vm = 0;
816 while (mpnt) {
817 struct vm_area_struct * next = mpnt->vm_next;
818 unsigned long start = mpnt->vm_start;
819 unsigned long end = mpnt->vm_end;
820 unsigned long size = end - start;
822 if (mpnt->vm_ops) {
823 if (mpnt->vm_ops->unmap)
824 mpnt->vm_ops->unmap(mpnt, start, size);
825 if (mpnt->vm_ops->close)
826 mpnt->vm_ops->close(mpnt);
828 mm->map_count--;
829 remove_shared_vm_struct(mpnt);
830 zap_page_range(mm, start, size);
831 if (mpnt->vm_file)
832 fput(mpnt->vm_file);
833 kmem_cache_free(vm_area_cachep, mpnt);
834 mpnt = next;
837 /* This is just debugging */
838 if (mm->map_count)
839 printk("exit_mmap: map count is %d\n", mm->map_count);
841 clear_page_tables(mm, 0, USER_PTRS_PER_PGD);
844 /* Insert vm structure into process list sorted by address
845 * and into the inode's i_mmap ring.
847 void insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vmp)
849 struct vm_area_struct **pprev;
850 struct file * file;
852 if (!mm->mmap_avl) {
853 pprev = &mm->mmap;
854 while (*pprev && (*pprev)->vm_start <= vmp->vm_start)
855 pprev = &(*pprev)->vm_next;
856 } else {
857 struct vm_area_struct *prev, *next;
858 avl_insert_neighbours(vmp, &mm->mmap_avl, &prev, &next);
859 pprev = (prev ? &prev->vm_next : &mm->mmap);
860 if (*pprev != next)
861 printk("insert_vm_struct: tree inconsistent with list\n");
863 vmp->vm_next = *pprev;
864 *pprev = vmp;
866 mm->map_count++;
867 if (mm->map_count >= AVL_MIN_MAP_COUNT && !mm->mmap_avl)
868 build_mmap_avl(mm);
870 file = vmp->vm_file;
871 if (file) {
872 struct inode * inode = file->f_dentry->d_inode;
873 if (vmp->vm_flags & VM_DENYWRITE)
874 inode->i_writecount--;
876 /* insert vmp into inode's share list */
877 if((vmp->vm_next_share = inode->i_mmap) != NULL)
878 inode->i_mmap->vm_pprev_share = &vmp->vm_next_share;
879 inode->i_mmap = vmp;
880 vmp->vm_pprev_share = &inode->i_mmap;
884 /* Merge the list of memory segments if possible.
885 * Redundant vm_area_structs are freed.
886 * This assumes that the list is ordered by address.
887 * We don't need to traverse the entire list, only those segments
888 * which intersect or are adjacent to a given interval.
890 * We must already hold the mm semaphore when we get here..
892 void merge_segments (struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
894 struct vm_area_struct *prev, *mpnt, *next, *prev1;
896 mpnt = find_vma_prev(mm, start_addr, &prev1);
897 if (!mpnt)
898 return;
900 if (prev1) {
901 prev = prev1;
902 } else {
903 prev = mpnt;
904 mpnt = mpnt->vm_next;
907 /* prev and mpnt cycle through the list, as long as
908 * start_addr < mpnt->vm_end && prev->vm_start < end_addr
910 for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) {
911 next = mpnt->vm_next;
913 /* To share, we must have the same file, operations.. */
914 if ((mpnt->vm_file != prev->vm_file)||
915 (mpnt->vm_pte != prev->vm_pte) ||
916 (mpnt->vm_ops != prev->vm_ops) ||
917 (mpnt->vm_flags != prev->vm_flags) ||
918 (prev->vm_end != mpnt->vm_start))
919 continue;
922 * If we have a file or it's a shared memory area
923 * the offsets must be contiguous..
925 if ((mpnt->vm_file != NULL) || (mpnt->vm_flags & VM_SHM)) {
926 unsigned long off = prev->vm_offset+prev->vm_end-prev->vm_start;
927 if (off != mpnt->vm_offset)
928 continue;
931 /* merge prev with mpnt and set up pointers so the new
932 * big segment can possibly merge with the next one.
933 * The old unused mpnt is freed.
935 if (mm->mmap_avl)
936 avl_remove(mpnt, &mm->mmap_avl);
937 prev->vm_end = mpnt->vm_end;
938 prev->vm_next = mpnt->vm_next;
939 if (mpnt->vm_ops && mpnt->vm_ops->close) {
940 mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
941 mpnt->vm_start = mpnt->vm_end;
942 mpnt->vm_ops->close(mpnt);
944 mm->map_count--;
945 remove_shared_vm_struct(mpnt);
946 if (mpnt->vm_file)
947 fput(mpnt->vm_file);
948 kmem_cache_free(vm_area_cachep, mpnt);
949 mpnt = prev;
951 mm->mmap_cache = NULL; /* Kill the cache. */
954 void __init vma_init(void)
956 vm_area_cachep = kmem_cache_create("vm_area_struct",
957 sizeof(struct vm_area_struct),
958 0, SLAB_HWCACHE_ALIGN,
959 NULL, NULL);
960 if(!vm_area_cachep)
961 panic("vma_init: Cannot alloc vm_area_struct cache.");
963 mm_cachep = kmem_cache_create("mm_struct",
964 sizeof(struct mm_struct),
965 0, SLAB_HWCACHE_ALIGN,
966 NULL, NULL);
967 if(!mm_cachep)
968 panic("vma_init: Cannot alloc mm_struct cache.");