Import 2.2.7
[davej-history.git] / mm / mmap.c
blob7d86c9d1fa153d9e1d37ce06b88c2913cb2ecdc5
1 /*
2 * linux/mm/mmap.c
4 * Written by obz.
5 */
6 #include <linux/slab.h>
7 #include <linux/shm.h>
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/swap.h>
11 #include <linux/swapctl.h>
12 #include <linux/smp_lock.h>
13 #include <linux/init.h>
14 #include <linux/file.h>
16 #include <asm/uaccess.h>
17 #include <asm/pgtable.h>
19 /* description of effects of mapping type and prot in current implementation.
20 * this is due to the limited x86 page protection hardware. The expected
21 * behavior is in parens:
23 * map_type prot
24 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
25 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
26 * w: (no) no w: (no) no w: (yes) yes w: (no) no
27 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
29 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
30 * w: (no) no w: (no) no w: (copy) copy w: (no) no
31 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
34 pgprot_t protection_map[16] = {
35 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
36 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
39 /* SLAB cache for vm_area_struct's. */
40 kmem_cache_t *vm_area_cachep;
42 int sysctl_overcommit_memory;
44 /* Check that a process has enough memory to allocate a
45 * new virtual mapping.
47 int vm_enough_memory(long pages)
49 /* Stupid algorithm to decide if we have enough memory: while
50 * simple, it hopefully works in most obvious cases.. Easy to
51 * fool it, but this should catch most mistakes.
53 /* 23/11/98 NJC: Somewhat less stupid version of algorithm,
54 * which tries to do "TheRightThing". Instead of using half of
55 * (buffers+cache), use the minimum values. Allow an extra 2%
56 * of num_physpages for safety margin.
59 long free;
61 /* Sometimes we want to use more memory than we have. */
62 if (sysctl_overcommit_memory)
63 return 1;
65 free = buffermem >> PAGE_SHIFT;
66 free += page_cache_size;
67 free += nr_free_pages;
68 free += nr_swap_pages;
69 free -= (page_cache.min_percent + buffer_mem.min_percent + 2)*num_physpages/100;
70 return free > pages;
73 /* Remove one vm structure from the inode's i_mmap ring. */
74 static inline void remove_shared_vm_struct(struct vm_area_struct *vma)
76 struct file * file = vma->vm_file;
78 if (file) {
79 if (vma->vm_flags & VM_DENYWRITE)
80 file->f_dentry->d_inode->i_writecount++;
81 if(vma->vm_next_share)
82 vma->vm_next_share->vm_pprev_share = vma->vm_pprev_share;
83 *vma->vm_pprev_share = vma->vm_next_share;
87 asmlinkage unsigned long sys_brk(unsigned long brk)
89 unsigned long rlim, retval;
90 unsigned long newbrk, oldbrk;
91 struct mm_struct *mm = current->mm;
93 down(&mm->mmap_sem);
96 * This lock-kernel is one of the main contention points for
97 * certain normal loads. And it really should not be here: almost
98 * everything in brk()/mmap()/munmap() is protected sufficiently by
99 * the mmap semaphore that we got above.
101 * We should move this into the few things that really want the
102 * lock, namely anything that actually touches a file descriptor
103 * etc. We can do all the normal anonymous mapping cases without
104 * ever getting the lock at all - the actual memory management
105 * code is already completely thread-safe.
107 lock_kernel();
109 if (brk < mm->end_code)
110 goto out;
111 newbrk = PAGE_ALIGN(brk);
112 oldbrk = PAGE_ALIGN(mm->brk);
113 if (oldbrk == newbrk)
114 goto set_brk;
116 /* Always allow shrinking brk. */
117 if (brk <= mm->brk) {
118 if (!do_munmap(newbrk, oldbrk-newbrk))
119 goto set_brk;
120 goto out;
123 /* Check against rlimit and stack.. */
124 rlim = current->rlim[RLIMIT_DATA].rlim_cur;
125 if (rlim < RLIM_INFINITY && brk - mm->end_code > rlim)
126 goto out;
128 /* Check against existing mmap mappings. */
129 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
130 goto out;
132 /* Check if we have enough memory.. */
133 if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT))
134 goto out;
136 /* Ok, looks good - let it rip. */
137 if (do_mmap(NULL, oldbrk, newbrk-oldbrk,
138 PROT_READ|PROT_WRITE|PROT_EXEC,
139 MAP_FIXED|MAP_PRIVATE, 0) != oldbrk)
140 goto out;
141 set_brk:
142 mm->brk = brk;
143 out:
144 retval = mm->brk;
145 unlock_kernel();
146 up(&mm->mmap_sem);
147 return retval;
150 /* Combine the mmap "prot" and "flags" argument into one "vm_flags" used
151 * internally. Essentially, translate the "PROT_xxx" and "MAP_xxx" bits
152 * into "VM_xxx".
154 static inline unsigned long vm_flags(unsigned long prot, unsigned long flags)
156 #define _trans(x,bit1,bit2) \
157 ((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
159 unsigned long prot_bits, flag_bits;
160 prot_bits =
161 _trans(prot, PROT_READ, VM_READ) |
162 _trans(prot, PROT_WRITE, VM_WRITE) |
163 _trans(prot, PROT_EXEC, VM_EXEC);
164 flag_bits =
165 _trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) |
166 _trans(flags, MAP_DENYWRITE, VM_DENYWRITE) |
167 _trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE);
168 return prot_bits | flag_bits;
169 #undef _trans
172 unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
173 unsigned long prot, unsigned long flags, unsigned long off)
175 struct mm_struct * mm = current->mm;
176 struct vm_area_struct * vma;
177 int error;
179 if ((len = PAGE_ALIGN(len)) == 0)
180 return addr;
182 if (len > TASK_SIZE || addr > TASK_SIZE-len)
183 return -EINVAL;
185 /* offset overflow? */
186 if (off + len < off)
187 return -EINVAL;
189 /* Too many mappings? */
190 if (mm->map_count > MAX_MAP_COUNT)
191 return -ENOMEM;
193 /* mlock MCL_FUTURE? */
194 if (mm->def_flags & VM_LOCKED) {
195 unsigned long locked = mm->locked_vm << PAGE_SHIFT;
196 locked += len;
197 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
198 return -EAGAIN;
201 /* Do simple checking here so the lower-level routines won't have
202 * to. we assume access permissions have been handled by the open
203 * of the memory object, so we don't do any here.
205 if (file != NULL) {
206 switch (flags & MAP_TYPE) {
207 case MAP_SHARED:
208 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
209 return -EACCES;
211 /* Make sure we don't allow writing to an append-only file.. */
212 if (IS_APPEND(file->f_dentry->d_inode) && (file->f_mode & 2))
213 return -EACCES;
215 /* make sure there are no mandatory locks on the file. */
216 if (locks_verify_locked(file->f_dentry->d_inode))
217 return -EAGAIN;
219 /* fall through */
220 case MAP_PRIVATE:
221 if (!(file->f_mode & 1))
222 return -EACCES;
223 break;
225 default:
226 return -EINVAL;
228 } else if ((flags & MAP_TYPE) != MAP_PRIVATE)
229 return -EINVAL;
231 /* Obtain the address to map to. we verify (or select) it and ensure
232 * that it represents a valid section of the address space.
234 if (flags & MAP_FIXED) {
235 if (addr & ~PAGE_MASK)
236 return -EINVAL;
237 } else {
238 addr = get_unmapped_area(addr, len);
239 if (!addr)
240 return -ENOMEM;
243 /* Determine the object being mapped and call the appropriate
244 * specific mapper. the address has already been validated, but
245 * not unmapped, but the maps are removed from the list.
247 if (file && (!file->f_op || !file->f_op->mmap))
248 return -ENODEV;
250 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
251 if (!vma)
252 return -ENOMEM;
254 vma->vm_mm = mm;
255 vma->vm_start = addr;
256 vma->vm_end = addr + len;
257 vma->vm_flags = vm_flags(prot,flags) | mm->def_flags;
259 if (file) {
260 if (file->f_mode & 1)
261 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
262 if (flags & MAP_SHARED) {
263 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
265 /* This looks strange, but when we don't have the file open
266 * for writing, we can demote the shared mapping to a simpler
267 * private mapping. That also takes care of a security hole
268 * with ptrace() writing to a shared mapping without write
269 * permissions.
271 * We leave the VM_MAYSHARE bit on, just to get correct output
272 * from /proc/xxx/maps..
274 if (!(file->f_mode & 2))
275 vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
277 } else
278 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
279 vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
280 vma->vm_ops = NULL;
281 vma->vm_offset = off;
282 vma->vm_file = NULL;
283 vma->vm_pte = 0;
285 /* Clear old maps */
286 error = -ENOMEM;
287 if (do_munmap(addr, len))
288 goto free_vma;
290 /* Check against address space limit. */
291 if ((mm->total_vm << PAGE_SHIFT) + len
292 > current->rlim[RLIMIT_AS].rlim_cur)
293 goto free_vma;
295 /* Private writable mapping? Check memory availability.. */
296 if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
297 !(flags & MAP_NORESERVE) &&
298 !vm_enough_memory(len >> PAGE_SHIFT))
299 goto free_vma;
301 if (file) {
302 int correct_wcount = 0;
303 if (vma->vm_flags & VM_DENYWRITE) {
304 if (file->f_dentry->d_inode->i_writecount > 0) {
305 error = -ETXTBSY;
306 goto free_vma;
308 /* f_op->mmap might possibly sleep
309 * (generic_file_mmap doesn't, but other code
310 * might). In any case, this takes care of any
311 * race that this might cause.
313 file->f_dentry->d_inode->i_writecount--;
314 correct_wcount = 1;
316 error = file->f_op->mmap(file, vma);
317 /* Fix up the count if necessary, then check for an error */
318 if (correct_wcount)
319 file->f_dentry->d_inode->i_writecount++;
320 if (error)
321 goto unmap_and_free_vma;
322 vma->vm_file = file;
323 file->f_count++;
327 * merge_segments may merge our vma, so we can't refer to it
328 * after the call. Save the values we need now ...
330 flags = vma->vm_flags;
331 addr = vma->vm_start; /* can addr have changed?? */
332 insert_vm_struct(mm, vma);
333 merge_segments(mm, vma->vm_start, vma->vm_end);
335 mm->total_vm += len >> PAGE_SHIFT;
336 if (flags & VM_LOCKED) {
337 mm->locked_vm += len >> PAGE_SHIFT;
338 make_pages_present(addr, addr + len);
340 return addr;
342 unmap_and_free_vma:
343 /* Undo any partial mapping done by a device driver. */
344 flush_cache_range(mm, vma->vm_start, vma->vm_end);
345 zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start);
346 flush_tlb_range(mm, vma->vm_start, vma->vm_end);
347 free_vma:
348 kmem_cache_free(vm_area_cachep, vma);
349 return error;
352 /* Get an address range which is currently unmapped.
353 * For mmap() without MAP_FIXED and shmat() with addr=0.
354 * Return value 0 means ENOMEM.
356 unsigned long get_unmapped_area(unsigned long addr, unsigned long len)
358 struct vm_area_struct * vmm;
360 if (len > TASK_SIZE)
361 return 0;
362 if (!addr)
363 addr = TASK_UNMAPPED_BASE;
364 addr = PAGE_ALIGN(addr);
366 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
367 /* At this point: (!vmm || addr < vmm->vm_end). */
368 if (TASK_SIZE - len < addr)
369 return 0;
370 if (!vmm || addr + len <= vmm->vm_start)
371 return addr;
372 addr = vmm->vm_end;
376 #define vm_avl_empty (struct vm_area_struct *) NULL
378 #include "mmap_avl.c"
380 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
381 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
383 struct vm_area_struct *vma = NULL;
385 if (mm) {
386 /* Check the cache first. */
387 /* (Cache hit rate is typically around 35%.) */
388 vma = mm->mmap_cache;
389 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
390 if (!mm->mmap_avl) {
391 /* Go through the linear list. */
392 vma = mm->mmap;
393 while (vma && vma->vm_end <= addr)
394 vma = vma->vm_next;
395 } else {
396 /* Then go through the AVL tree quickly. */
397 struct vm_area_struct * tree = mm->mmap_avl;
398 vma = NULL;
399 for (;;) {
400 if (tree == vm_avl_empty)
401 break;
402 if (tree->vm_end > addr) {
403 vma = tree;
404 if (tree->vm_start <= addr)
405 break;
406 tree = tree->vm_avl_left;
407 } else
408 tree = tree->vm_avl_right;
411 if (vma)
412 mm->mmap_cache = vma;
415 return vma;
418 /* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
419 struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
420 struct vm_area_struct **pprev)
422 if (mm) {
423 if (!mm->mmap_avl) {
424 /* Go through the linear list. */
425 struct vm_area_struct * prev = NULL;
426 struct vm_area_struct * vma = mm->mmap;
427 while (vma && vma->vm_end <= addr) {
428 prev = vma;
429 vma = vma->vm_next;
431 *pprev = prev;
432 return vma;
433 } else {
434 /* Go through the AVL tree quickly. */
435 struct vm_area_struct * vma = NULL;
436 struct vm_area_struct * last_turn_right = NULL;
437 struct vm_area_struct * prev = NULL;
438 struct vm_area_struct * tree = mm->mmap_avl;
439 for (;;) {
440 if (tree == vm_avl_empty)
441 break;
442 if (tree->vm_end > addr) {
443 vma = tree;
444 prev = last_turn_right;
445 if (tree->vm_start <= addr)
446 break;
447 tree = tree->vm_avl_left;
448 } else {
449 last_turn_right = tree;
450 tree = tree->vm_avl_right;
453 if (vma) {
454 if (vma->vm_avl_left != vm_avl_empty) {
455 prev = vma->vm_avl_left;
456 while (prev->vm_avl_right != vm_avl_empty)
457 prev = prev->vm_avl_right;
459 if ((prev ? prev->vm_next : mm->mmap) != vma)
460 printk("find_vma_prev: tree inconsistent with list\n");
461 *pprev = prev;
462 return vma;
466 *pprev = NULL;
467 return NULL;
470 /* Normal function to fix up a mapping
471 * This function is the default for when an area has no specific
472 * function. This may be used as part of a more specific routine.
473 * This function works out what part of an area is affected and
474 * adjusts the mapping information. Since the actual page
475 * manipulation is done in do_mmap(), none need be done here,
476 * though it would probably be more appropriate.
478 * By the time this function is called, the area struct has been
479 * removed from the process mapping list, so it needs to be
480 * reinserted if necessary.
482 * The 4 main cases are:
483 * Unmapping the whole area
484 * Unmapping from the start of the segment to a point in it
485 * Unmapping from an intermediate point to the end
486 * Unmapping between to intermediate points, making a hole.
488 * Case 4 involves the creation of 2 new areas, for each side of
489 * the hole. If possible, we reuse the existing area rather than
490 * allocate a new one, and the return indicates whether the old
491 * area was reused.
493 static struct vm_area_struct * unmap_fixup(struct vm_area_struct *area,
494 unsigned long addr, size_t len, struct vm_area_struct *extra)
496 struct vm_area_struct *mpnt;
497 unsigned long end = addr + len;
499 area->vm_mm->total_vm -= len >> PAGE_SHIFT;
500 if (area->vm_flags & VM_LOCKED)
501 area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
503 /* Unmapping the whole area. */
504 if (addr == area->vm_start && end == area->vm_end) {
505 if (area->vm_ops && area->vm_ops->close)
506 area->vm_ops->close(area);
507 if (area->vm_file)
508 fput(area->vm_file);
509 kmem_cache_free(vm_area_cachep, area);
510 return extra;
513 /* Work out to one of the ends. */
514 if (end == area->vm_end)
515 area->vm_end = addr;
516 else if (addr == area->vm_start) {
517 area->vm_offset += (end - area->vm_start);
518 area->vm_start = end;
519 } else {
520 /* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */
521 /* Add end mapping -- leave beginning for below */
522 mpnt = extra;
523 extra = NULL;
525 mpnt->vm_mm = area->vm_mm;
526 mpnt->vm_start = end;
527 mpnt->vm_end = area->vm_end;
528 mpnt->vm_page_prot = area->vm_page_prot;
529 mpnt->vm_flags = area->vm_flags;
530 mpnt->vm_ops = area->vm_ops;
531 mpnt->vm_offset = area->vm_offset + (end - area->vm_start);
532 mpnt->vm_file = area->vm_file;
533 mpnt->vm_pte = area->vm_pte;
534 if (mpnt->vm_file)
535 mpnt->vm_file->f_count++;
536 if (mpnt->vm_ops && mpnt->vm_ops->open)
537 mpnt->vm_ops->open(mpnt);
538 area->vm_end = addr; /* Truncate area */
539 insert_vm_struct(current->mm, mpnt);
542 insert_vm_struct(current->mm, area);
543 return extra;
547 * Try to free as many page directory entries as we can,
548 * without having to work very hard at actually scanning
549 * the page tables themselves.
551 * Right now we try to free page tables if we have a nice
552 * PGDIR-aligned area that got free'd up. We could be more
553 * granular if we want to, but this is fast and simple,
554 * and covers the bad cases.
556 * "prev", if it exists, points to a vma before the one
557 * we just free'd - but there's no telling how much before.
559 static void free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev,
560 unsigned long start, unsigned long end)
562 unsigned long first = start & PGDIR_MASK;
563 unsigned long last = (end + PGDIR_SIZE - 1) & PGDIR_MASK;
565 if (!prev) {
566 prev = mm->mmap;
567 if (!prev)
568 goto no_mmaps;
569 if (prev->vm_end > start) {
570 if (last > prev->vm_start)
571 last = prev->vm_start;
572 goto no_mmaps;
575 for (;;) {
576 struct vm_area_struct *next = prev->vm_next;
578 if (next) {
579 if (next->vm_start < start) {
580 prev = next;
581 continue;
583 if (last > next->vm_start)
584 last = next->vm_start;
586 if (prev->vm_end > first)
587 first = prev->vm_end + PGDIR_SIZE - 1;
588 break;
590 no_mmaps:
591 first = first >> PGDIR_SHIFT;
592 last = last >> PGDIR_SHIFT;
593 if (last > first)
594 clear_page_tables(mm, first, last-first);
597 /* Munmap is split into 2 main parts -- this part which finds
598 * what needs doing, and the areas themselves, which do the
599 * work. This now handles partial unmappings.
600 * Jeremy Fitzhardine <jeremy@sw.oz.au>
602 int do_munmap(unsigned long addr, size_t len)
604 struct mm_struct * mm;
605 struct vm_area_struct *mpnt, *prev, **npp, *free, *extra;
607 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
608 return -EINVAL;
610 if ((len = PAGE_ALIGN(len)) == 0)
611 return -EINVAL;
613 /* Check if this memory area is ok - put it on the temporary
614 * list if so.. The checks here are pretty simple --
615 * every area affected in some way (by any overlap) is put
616 * on the list. If nothing is put on, nothing is affected.
618 mm = current->mm;
619 mpnt = find_vma_prev(mm, addr, &prev);
620 if (!mpnt)
621 return 0;
622 /* we have addr < mpnt->vm_end */
624 if (mpnt->vm_start >= addr+len)
625 return 0;
627 /* If we'll make "hole", check the vm areas limit */
628 if ((mpnt->vm_start < addr && mpnt->vm_end > addr+len)
629 && mm->map_count >= MAX_MAP_COUNT)
630 return -ENOMEM;
633 * We may need one additional vma to fix up the mappings ...
634 * and this is the last chance for an easy error exit.
636 extra = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
637 if (!extra)
638 return -ENOMEM;
640 npp = (prev ? &prev->vm_next : &mm->mmap);
641 free = NULL;
642 for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
643 *npp = mpnt->vm_next;
644 mpnt->vm_next = free;
645 free = mpnt;
646 if (mm->mmap_avl)
647 avl_remove(mpnt, &mm->mmap_avl);
650 /* Ok - we have the memory areas we should free on the 'free' list,
651 * so release them, and unmap the page range..
652 * If the one of the segments is only being partially unmapped,
653 * it will put new vm_area_struct(s) into the address space.
655 while ((mpnt = free) != NULL) {
656 unsigned long st, end, size;
658 free = free->vm_next;
660 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
661 end = addr+len;
662 end = end > mpnt->vm_end ? mpnt->vm_end : end;
663 size = end - st;
665 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
666 mpnt->vm_ops->unmap(mpnt, st, size);
668 remove_shared_vm_struct(mpnt);
669 mm->map_count--;
671 flush_cache_range(mm, st, end);
672 zap_page_range(mm, st, size);
673 flush_tlb_range(mm, st, end);
676 * Fix the mapping, and free the old area if it wasn't reused.
678 extra = unmap_fixup(mpnt, st, size, extra);
681 /* Release the extra vma struct if it wasn't used */
682 if (extra)
683 kmem_cache_free(vm_area_cachep, extra);
685 free_pgtables(mm, prev, addr, addr+len);
687 mm->mmap_cache = NULL; /* Kill the cache. */
688 return 0;
691 asmlinkage int sys_munmap(unsigned long addr, size_t len)
693 int ret;
695 down(&current->mm->mmap_sem);
696 lock_kernel();
697 ret = do_munmap(addr, len);
698 unlock_kernel();
699 up(&current->mm->mmap_sem);
700 return ret;
703 /* Build the AVL tree corresponding to the VMA list. */
704 void build_mmap_avl(struct mm_struct * mm)
706 struct vm_area_struct * vma;
708 mm->mmap_avl = NULL;
709 for (vma = mm->mmap; vma; vma = vma->vm_next)
710 avl_insert(vma, &mm->mmap_avl);
713 /* Release all mmaps. */
714 void exit_mmap(struct mm_struct * mm)
716 struct vm_area_struct * mpnt;
718 mpnt = mm->mmap;
719 mm->mmap = mm->mmap_avl = mm->mmap_cache = NULL;
720 mm->rss = 0;
721 mm->total_vm = 0;
722 mm->locked_vm = 0;
723 while (mpnt) {
724 struct vm_area_struct * next = mpnt->vm_next;
725 unsigned long start = mpnt->vm_start;
726 unsigned long end = mpnt->vm_end;
727 unsigned long size = end - start;
729 if (mpnt->vm_ops) {
730 if (mpnt->vm_ops->unmap)
731 mpnt->vm_ops->unmap(mpnt, start, size);
732 if (mpnt->vm_ops->close)
733 mpnt->vm_ops->close(mpnt);
735 mm->map_count--;
736 remove_shared_vm_struct(mpnt);
737 zap_page_range(mm, start, size);
738 if (mpnt->vm_file)
739 fput(mpnt->vm_file);
740 kmem_cache_free(vm_area_cachep, mpnt);
741 mpnt = next;
744 /* This is just debugging */
745 if (mm->map_count)
746 printk("exit_mmap: map count is %d\n", mm->map_count);
748 clear_page_tables(mm, 0, USER_PTRS_PER_PGD);
751 /* Insert vm structure into process list sorted by address
752 * and into the inode's i_mmap ring.
754 void insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vmp)
756 struct vm_area_struct **pprev;
757 struct file * file;
759 if (!mm->mmap_avl) {
760 pprev = &mm->mmap;
761 while (*pprev && (*pprev)->vm_start <= vmp->vm_start)
762 pprev = &(*pprev)->vm_next;
763 } else {
764 struct vm_area_struct *prev, *next;
765 avl_insert_neighbours(vmp, &mm->mmap_avl, &prev, &next);
766 pprev = (prev ? &prev->vm_next : &mm->mmap);
767 if (*pprev != next)
768 printk("insert_vm_struct: tree inconsistent with list\n");
770 vmp->vm_next = *pprev;
771 *pprev = vmp;
773 mm->map_count++;
774 if (mm->map_count >= AVL_MIN_MAP_COUNT && !mm->mmap_avl)
775 build_mmap_avl(mm);
777 file = vmp->vm_file;
778 if (file) {
779 struct inode * inode = file->f_dentry->d_inode;
780 if (vmp->vm_flags & VM_DENYWRITE)
781 inode->i_writecount--;
783 /* insert vmp into inode's share list */
784 if((vmp->vm_next_share = inode->i_mmap) != NULL)
785 inode->i_mmap->vm_pprev_share = &vmp->vm_next_share;
786 inode->i_mmap = vmp;
787 vmp->vm_pprev_share = &inode->i_mmap;
791 /* Merge the list of memory segments if possible.
792 * Redundant vm_area_structs are freed.
793 * This assumes that the list is ordered by address.
794 * We don't need to traverse the entire list, only those segments
795 * which intersect or are adjacent to a given interval.
797 * We must already hold the mm semaphore when we get here..
799 void merge_segments (struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
801 struct vm_area_struct *prev, *mpnt, *next, *prev1;
803 mpnt = find_vma_prev(mm, start_addr, &prev1);
804 if (!mpnt)
805 return;
807 if (prev1) {
808 prev = prev1;
809 } else {
810 prev = mpnt;
811 mpnt = mpnt->vm_next;
814 /* prev and mpnt cycle through the list, as long as
815 * start_addr < mpnt->vm_end && prev->vm_start < end_addr
817 for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) {
818 next = mpnt->vm_next;
820 /* To share, we must have the same file, operations.. */
821 if ((mpnt->vm_file != prev->vm_file)||
822 (mpnt->vm_pte != prev->vm_pte) ||
823 (mpnt->vm_ops != prev->vm_ops) ||
824 (mpnt->vm_flags != prev->vm_flags) ||
825 (prev->vm_end != mpnt->vm_start))
826 continue;
829 * If we have a file or it's a shared memory area
830 * the offsets must be contiguous..
832 if ((mpnt->vm_file != NULL) || (mpnt->vm_flags & VM_SHM)) {
833 unsigned long off = prev->vm_offset+prev->vm_end-prev->vm_start;
834 if (off != mpnt->vm_offset)
835 continue;
838 /* merge prev with mpnt and set up pointers so the new
839 * big segment can possibly merge with the next one.
840 * The old unused mpnt is freed.
842 if (mm->mmap_avl)
843 avl_remove(mpnt, &mm->mmap_avl);
844 prev->vm_end = mpnt->vm_end;
845 prev->vm_next = mpnt->vm_next;
846 if (mpnt->vm_ops && mpnt->vm_ops->close) {
847 mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
848 mpnt->vm_start = mpnt->vm_end;
849 mpnt->vm_ops->close(mpnt);
851 mm->map_count--;
852 remove_shared_vm_struct(mpnt);
853 if (mpnt->vm_file)
854 fput(mpnt->vm_file);
855 kmem_cache_free(vm_area_cachep, mpnt);
856 mpnt = prev;
858 mm->mmap_cache = NULL; /* Kill the cache. */
861 void __init vma_init(void)
863 vm_area_cachep = kmem_cache_create("vm_area_struct",
864 sizeof(struct vm_area_struct),
865 0, SLAB_HWCACHE_ALIGN,
866 NULL, NULL);
867 if(!vm_area_cachep)
868 panic("vma_init: Cannot alloc vm_area_struct cache.");
870 mm_cachep = kmem_cache_create("mm_struct",
871 sizeof(struct mm_struct),
872 0, SLAB_HWCACHE_ALIGN,
873 NULL, NULL);
874 if(!mm_cachep)
875 panic("vma_init: Cannot alloc mm_struct cache.");