Import 2.1.116pre1
[davej-history.git] / mm / mmap.c
blob172bcd8f11e62d083081a192b173b74db4b40a8e
1 /*
2 * linux/mm/mmap.c
4 * Written by obz.
5 */
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/slab.h>
11 #include <linux/shm.h>
12 #include <linux/errno.h>
13 #include <linux/mman.h>
14 #include <linux/string.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <linux/smp.h>
18 #include <linux/smp_lock.h>
19 #include <linux/init.h>
20 #include <linux/file.h>
22 #include <asm/uaccess.h>
23 #include <asm/system.h>
24 #include <asm/pgtable.h>
26 /* description of effects of mapping type and prot in current implementation.
27 * this is due to the limited x86 page protection hardware. The expected
28 * behavior is in parens:
30 * map_type prot
31 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
32 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
33 * w: (no) no w: (no) no w: (yes) yes w: (no) no
34 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
36 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
37 * w: (no) no w: (no) no w: (copy) copy w: (no) no
38 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
41 pgprot_t protection_map[16] = {
42 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
43 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
46 /* SLAB cache for vm_area_struct's. */
47 kmem_cache_t *vm_area_cachep;
49 int sysctl_overcommit_memory;
51 /* Check that a process has enough memory to allocate a
52 * new virtual mapping.
54 int vm_enough_memory(long pages)
56 /* Stupid algorithm to decide if we have enough memory: while
57 * simple, it hopefully works in most obvious cases.. Easy to
58 * fool it, but this should catch most mistakes.
60 long free;
62 /* Sometimes we want to use more memory than we have. */
63 if (sysctl_overcommit_memory)
64 return 1;
66 free = buffermem >> PAGE_SHIFT;
67 free += page_cache_size;
68 free >>= 1;
69 free += nr_free_pages;
70 free += nr_swap_pages;
71 free -= num_physpages >> 4;
72 return free > pages;
75 /* Remove one vm structure from the inode's i_mmap ring. */
76 static inline void remove_shared_vm_struct(struct vm_area_struct *vma)
78 struct file * file = vma->vm_file;
80 if (file) {
81 if (vma->vm_flags & VM_DENYWRITE)
82 file->f_dentry->d_inode->i_writecount++;
83 if(vma->vm_next_share)
84 vma->vm_next_share->vm_pprev_share = vma->vm_pprev_share;
85 *vma->vm_pprev_share = vma->vm_next_share;
89 asmlinkage unsigned long sys_brk(unsigned long brk)
91 unsigned long rlim, retval;
92 unsigned long newbrk, oldbrk;
93 struct mm_struct *mm = current->mm;
95 down(&mm->mmap_sem);
96 lock_kernel();
97 if (brk < mm->end_code)
98 goto out;
99 newbrk = PAGE_ALIGN(brk);
100 oldbrk = PAGE_ALIGN(mm->brk);
101 if (oldbrk == newbrk)
102 goto set_brk;
104 /* Always allow shrinking brk. */
105 if (brk <= mm->brk) {
106 if (!do_munmap(newbrk, oldbrk-newbrk))
107 goto set_brk;
108 goto out;
111 /* Check against rlimit and stack.. */
112 rlim = current->rlim[RLIMIT_DATA].rlim_cur;
113 if (rlim < RLIM_INFINITY && brk - mm->end_code > rlim)
114 goto out;
116 /* Check against existing mmap mappings. */
117 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
118 goto out;
120 /* Check if we have enough memory.. */
121 if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT))
122 goto out;
124 /* Ok, looks good - let it rip. */
125 if (do_mmap(NULL, oldbrk, newbrk-oldbrk,
126 PROT_READ|PROT_WRITE|PROT_EXEC,
127 MAP_FIXED|MAP_PRIVATE, 0) != oldbrk)
128 goto out;
129 set_brk:
130 mm->brk = brk;
131 out:
132 retval = mm->brk;
133 unlock_kernel();
134 up(&mm->mmap_sem);
135 return retval;
138 /* Combine the mmap "prot" and "flags" argument into one "vm_flags" used
139 * internally. Essentially, translate the "PROT_xxx" and "MAP_xxx" bits
140 * into "VM_xxx".
142 static inline unsigned long vm_flags(unsigned long prot, unsigned long flags)
144 #define _trans(x,bit1,bit2) \
145 ((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
147 unsigned long prot_bits, flag_bits;
148 prot_bits =
149 _trans(prot, PROT_READ, VM_READ) |
150 _trans(prot, PROT_WRITE, VM_WRITE) |
151 _trans(prot, PROT_EXEC, VM_EXEC);
152 flag_bits =
153 _trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) |
154 _trans(flags, MAP_DENYWRITE, VM_DENYWRITE) |
155 _trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE);
156 return prot_bits | flag_bits;
157 #undef _trans
160 unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
161 unsigned long prot, unsigned long flags, unsigned long off)
163 struct mm_struct * mm = current->mm;
164 struct vm_area_struct * vma;
165 int correct_wcount = 0, error;
167 if ((len = PAGE_ALIGN(len)) == 0)
168 return addr;
170 if (len > TASK_SIZE || addr > TASK_SIZE-len)
171 return -EINVAL;
173 /* offset overflow? */
174 if (off + len < off)
175 return -EINVAL;
177 /* Too many mappings? */
178 if (mm->map_count > MAX_MAP_COUNT)
179 return -ENOMEM;
181 /* mlock MCL_FUTURE? */
182 if (mm->def_flags & VM_LOCKED) {
183 unsigned long locked = mm->locked_vm << PAGE_SHIFT;
184 locked += len;
185 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
186 return -EAGAIN;
189 /* Do simple checking here so the lower-level routines won't have
190 * to. we assume access permissions have been handled by the open
191 * of the memory object, so we don't do any here.
193 if (file != NULL) {
194 switch (flags & MAP_TYPE) {
195 case MAP_SHARED:
196 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
197 return -EACCES;
199 /* Make sure we don't allow writing to an append-only file.. */
200 if (IS_APPEND(file->f_dentry->d_inode) && (file->f_mode & 2))
201 return -EACCES;
203 /* make sure there are no mandatory locks on the file. */
204 if (locks_verify_locked(file->f_dentry->d_inode))
205 return -EAGAIN;
207 /* fall through */
208 case MAP_PRIVATE:
209 if (!(file->f_mode & 1))
210 return -EACCES;
211 break;
213 default:
214 return -EINVAL;
216 } else if ((flags & MAP_TYPE) != MAP_PRIVATE)
217 return -EINVAL;
219 /* Obtain the address to map to. we verify (or select) it and ensure
220 * that it represents a valid section of the address space.
222 if (flags & MAP_FIXED) {
223 if (addr & ~PAGE_MASK)
224 return -EINVAL;
225 } else {
226 addr = get_unmapped_area(addr, len);
227 if (!addr)
228 return -ENOMEM;
231 /* Determine the object being mapped and call the appropriate
232 * specific mapper. the address has already been validated, but
233 * not unmapped, but the maps are removed from the list.
235 if (file && (!file->f_op || !file->f_op->mmap))
236 return -ENODEV;
238 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
239 if (!vma)
240 return -ENOMEM;
242 vma->vm_mm = mm;
243 vma->vm_start = addr;
244 vma->vm_end = addr + len;
245 vma->vm_flags = vm_flags(prot,flags) | mm->def_flags;
247 if (file) {
248 if (file->f_mode & 1)
249 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
250 if (flags & MAP_SHARED) {
251 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
253 /* This looks strange, but when we don't have the file open
254 * for writing, we can demote the shared mapping to a simpler
255 * private mapping. That also takes care of a security hole
256 * with ptrace() writing to a shared mapping without write
257 * permissions.
259 * We leave the VM_MAYSHARE bit on, just to get correct output
260 * from /proc/xxx/maps..
262 if (!(file->f_mode & 2))
263 vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
265 } else
266 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
267 vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
268 vma->vm_ops = NULL;
269 vma->vm_offset = off;
270 vma->vm_file = NULL;
271 vma->vm_pte = 0;
273 /* Clear old maps */
274 error = -ENOMEM;
275 if (do_munmap(addr, len))
276 goto free_vma;
278 /* Check against address space limit. */
279 if ((mm->total_vm << PAGE_SHIFT) + len
280 > current->rlim[RLIMIT_AS].rlim_cur)
281 goto free_vma;
283 /* Private writable mapping? Check memory availability.. */
284 if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
285 !(flags & MAP_NORESERVE) &&
286 !vm_enough_memory(len >> PAGE_SHIFT))
287 goto free_vma;
289 error = 0;
290 if (file) {
291 if (vma->vm_flags & VM_DENYWRITE) {
292 if (file->f_dentry->d_inode->i_writecount > 0)
293 error = -ETXTBSY;
294 else {
295 /* f_op->mmap might possibly sleep
296 * (generic_file_mmap doesn't, but other code
297 * might). In any case, this takes care of any
298 * race that this might cause.
300 file->f_dentry->d_inode->i_writecount--;
301 correct_wcount = 1;
304 if (!error)
305 error = file->f_op->mmap(file, vma);
308 /* Fix up the count if necessary, then check for an error */
309 if (correct_wcount)
310 file->f_dentry->d_inode->i_writecount++;
311 if (error)
312 goto free_vma;
315 * merge_segments may merge our vma, so we can't refer to it
316 * after the call. Save the values we need now ...
318 flags = vma->vm_flags;
319 addr = vma->vm_start; /* can addr have changed?? */
320 insert_vm_struct(mm, vma);
321 merge_segments(mm, vma->vm_start, vma->vm_end);
323 mm->total_vm += len >> PAGE_SHIFT;
324 if (flags & VM_LOCKED) {
325 mm->locked_vm += len >> PAGE_SHIFT;
326 make_pages_present(addr, addr + len);
328 return addr;
330 free_vma:
331 kmem_cache_free(vm_area_cachep, vma);
332 return error;
335 /* Get an address range which is currently unmapped.
336 * For mmap() without MAP_FIXED and shmat() with addr=0.
337 * Return value 0 means ENOMEM.
339 unsigned long get_unmapped_area(unsigned long addr, unsigned long len)
341 struct vm_area_struct * vmm;
343 if (len > TASK_SIZE)
344 return 0;
345 if (!addr)
346 addr = TASK_UNMAPPED_BASE;
347 addr = PAGE_ALIGN(addr);
349 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
350 /* At this point: (!vmm || addr < vmm->vm_end). */
351 if (TASK_SIZE - len < addr)
352 return 0;
353 if (!vmm || addr + len <= vmm->vm_start)
354 return addr;
355 addr = vmm->vm_end;
359 /* Normal function to fix up a mapping
360 * This function is the default for when an area has no specific
361 * function. This may be used as part of a more specific routine.
362 * This function works out what part of an area is affected and
363 * adjusts the mapping information. Since the actual page
364 * manipulation is done in do_mmap(), none need be done here,
365 * though it would probably be more appropriate.
367 * By the time this function is called, the area struct has been
368 * removed from the process mapping list, so it needs to be
369 * reinserted if necessary.
371 * The 4 main cases are:
372 * Unmapping the whole area
373 * Unmapping from the start of the segment to a point in it
374 * Unmapping from an intermediate point to the end
375 * Unmapping between to intermediate points, making a hole.
377 * Case 4 involves the creation of 2 new areas, for each side of
378 * the hole. If possible, we reuse the existing area rather than
379 * allocate a new one, and the return indicates whether the old
380 * area was reused.
382 static int unmap_fixup(struct vm_area_struct *area, unsigned long addr,
383 size_t len, struct vm_area_struct **extra)
385 struct vm_area_struct *mpnt;
386 unsigned long end = addr + len;
388 area->vm_mm->total_vm -= len >> PAGE_SHIFT;
389 if (area->vm_flags & VM_LOCKED)
390 area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
392 /* Unmapping the whole area. */
393 if (addr == area->vm_start && end == area->vm_end) {
394 if (area->vm_ops && area->vm_ops->close)
395 area->vm_ops->close(area);
396 if (area->vm_file)
397 fput(area->vm_file);
398 return 0;
401 /* Work out to one of the ends. */
402 if (end == area->vm_end)
403 area->vm_end = addr;
404 else if (addr == area->vm_start) {
405 area->vm_offset += (end - area->vm_start);
406 area->vm_start = end;
407 } else {
408 /* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */
409 /* Add end mapping -- leave beginning for below */
410 mpnt = *extra;
411 *extra = NULL;
413 mpnt->vm_mm = area->vm_mm;
414 mpnt->vm_start = end;
415 mpnt->vm_end = area->vm_end;
416 mpnt->vm_page_prot = area->vm_page_prot;
417 mpnt->vm_flags = area->vm_flags;
418 mpnt->vm_ops = area->vm_ops;
419 mpnt->vm_offset = area->vm_offset + (end - area->vm_start);
420 mpnt->vm_file = area->vm_file;
421 if (mpnt->vm_file)
422 mpnt->vm_file->f_count++;
423 if (mpnt->vm_ops && mpnt->vm_ops->open)
424 mpnt->vm_ops->open(mpnt);
425 area->vm_end = addr; /* Truncate area */
426 insert_vm_struct(current->mm, mpnt);
429 insert_vm_struct(current->mm, area);
430 return 1;
433 /* Munmap is split into 2 main parts -- this part which finds
434 * what needs doing, and the areas themselves, which do the
435 * work. This now handles partial unmappings.
436 * Jeremy Fitzhardine <jeremy@sw.oz.au>
438 int do_munmap(unsigned long addr, size_t len)
440 struct mm_struct * mm;
441 struct vm_area_struct *mpnt, *free, *extra;
442 int freed;
444 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
445 return -EINVAL;
447 if ((len = PAGE_ALIGN(len)) == 0)
448 return 0;
450 /* Check if this memory area is ok - put it on the temporary
451 * list if so.. The checks here are pretty simple --
452 * every area affected in some way (by any overlap) is put
453 * on the list. If nothing is put on, nothing is affected.
455 mm = current->mm;
456 mpnt = mm->mmap;
457 while(mpnt && mpnt->vm_end <= addr)
458 mpnt = mpnt->vm_next;
459 if (!mpnt)
460 return 0;
462 /* If we'll make "hole", check the vm areas limit */
463 if ((mpnt->vm_start < addr && mpnt->vm_end > addr+len) &&
464 mm->map_count > MAX_MAP_COUNT)
465 return -ENOMEM;
468 * We may need one additional vma to fix up the mappings ...
469 * and this is the last chance for an easy error exit.
471 extra = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
472 if (!extra)
473 return -ENOMEM;
475 /* we have addr < mpnt->vm_end */
476 free = NULL;
477 for ( ; mpnt && mpnt->vm_start < addr+len; ) {
478 struct vm_area_struct *next = mpnt->vm_next;
480 if(mpnt->vm_next)
481 mpnt->vm_next->vm_pprev = mpnt->vm_pprev;
482 *mpnt->vm_pprev = mpnt->vm_next;
484 mpnt->vm_next = free;
485 free = mpnt;
486 mpnt = next;
489 /* Ok - we have the memory areas we should free on the 'free' list,
490 * so release them, and unmap the page range..
491 * If the one of the segments is only being partially unmapped,
492 * it will put new vm_area_struct(s) into the address space.
494 freed = 0;
495 while ((mpnt = free) != NULL) {
496 unsigned long st, end, size;
498 free = free->vm_next;
499 freed = 1;
501 mm->map_count--;
502 remove_shared_vm_struct(mpnt);
504 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
505 end = addr+len;
506 end = end > mpnt->vm_end ? mpnt->vm_end : end;
507 size = end - st;
509 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
510 mpnt->vm_ops->unmap(mpnt, st, size);
512 flush_cache_range(mm, st, end);
513 zap_page_range(mm, st, size);
514 flush_tlb_range(mm, st, end);
517 * Fix the mapping, and free the old area if it wasn't reused.
519 if (!unmap_fixup(mpnt, st, size, &extra))
520 kmem_cache_free(vm_area_cachep, mpnt);
523 /* Release the extra vma struct if it wasn't used */
524 if (extra)
525 kmem_cache_free(vm_area_cachep, extra);
527 if (freed)
528 mm->mmap_cache = NULL; /* Kill the cache. */
529 return 0;
532 asmlinkage int sys_munmap(unsigned long addr, size_t len)
534 int ret;
536 down(&current->mm->mmap_sem);
537 lock_kernel();
538 ret = do_munmap(addr, len);
539 unlock_kernel();
540 up(&current->mm->mmap_sem);
541 return ret;
544 /* Release all mmaps. */
545 void exit_mmap(struct mm_struct * mm)
547 struct vm_area_struct * mpnt;
549 mpnt = mm->mmap;
550 mm->mmap = mm->mmap_cache = NULL;
551 mm->rss = 0;
552 mm->total_vm = 0;
553 mm->locked_vm = 0;
554 while (mpnt) {
555 struct vm_area_struct * next = mpnt->vm_next;
556 unsigned long start = mpnt->vm_start;
557 unsigned long end = mpnt->vm_end;
558 unsigned long size = end - start;
560 if (mpnt->vm_ops) {
561 if (mpnt->vm_ops->unmap)
562 mpnt->vm_ops->unmap(mpnt, start, size);
563 if (mpnt->vm_ops->close)
564 mpnt->vm_ops->close(mpnt);
566 mm->map_count--;
567 remove_shared_vm_struct(mpnt);
568 zap_page_range(mm, start, size);
569 if (mpnt->vm_file)
570 fput(mpnt->vm_file);
571 kmem_cache_free(vm_area_cachep, mpnt);
572 mpnt = next;
575 /* This is just debugging */
576 if (mm->map_count)
577 printk("exit_mmap: map count is %d\n", mm->map_count);
580 /* Insert vm structure into process list sorted by address
581 * and into the inode's i_mmap ring.
583 void insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vmp)
585 struct vm_area_struct **pprev = &mm->mmap;
586 struct file * file;
588 mm->map_count++;
590 /* Find where to link it in. */
591 while(*pprev && (*pprev)->vm_start <= vmp->vm_start)
592 pprev = &(*pprev)->vm_next;
594 /* Insert it. */
595 if((vmp->vm_next = *pprev) != NULL)
596 (*pprev)->vm_pprev = &vmp->vm_next;
597 *pprev = vmp;
598 vmp->vm_pprev = pprev;
600 file = vmp->vm_file;
601 if (file) {
602 struct inode * inode = file->f_dentry->d_inode;
603 if (vmp->vm_flags & VM_DENYWRITE)
604 inode->i_writecount--;
606 /* insert vmp into inode's share list */
607 if((vmp->vm_next_share = inode->i_mmap) != NULL)
608 inode->i_mmap->vm_pprev_share = &vmp->vm_next_share;
609 inode->i_mmap = vmp;
610 vmp->vm_pprev_share = &inode->i_mmap;
614 /* Merge the list of memory segments if possible.
615 * Redundant vm_area_structs are freed.
616 * This assumes that the list is ordered by address.
617 * We don't need to traverse the entire list, only those segments
618 * which intersect or are adjacent to a given interval.
620 * We must already hold the mm semaphore when we get here..
622 void merge_segments (struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
624 struct vm_area_struct *prev, *mpnt, *next;
626 prev = NULL;
627 mpnt = mm->mmap;
628 while(mpnt && mpnt->vm_end <= start_addr) {
629 prev = mpnt;
630 mpnt = mpnt->vm_next;
632 if (!mpnt)
633 return;
635 next = mpnt->vm_next;
637 /* we have prev->vm_next == mpnt && mpnt->vm_next = next */
638 if (!prev) {
639 prev = mpnt;
640 mpnt = next;
643 /* prev and mpnt cycle through the list, as long as
644 * start_addr < mpnt->vm_end && prev->vm_start < end_addr
646 for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) {
647 next = mpnt->vm_next;
649 /* To share, we must have the same file, operations.. */
650 if ((mpnt->vm_file != prev->vm_file)||
651 (mpnt->vm_pte != prev->vm_pte) ||
652 (mpnt->vm_ops != prev->vm_ops) ||
653 (mpnt->vm_flags != prev->vm_flags) ||
654 (prev->vm_end != mpnt->vm_start))
655 continue;
658 * If we have a file or it's a shared memory area
659 * the offsets must be contiguous..
661 if ((mpnt->vm_file != NULL) || (mpnt->vm_flags & VM_SHM)) {
662 unsigned long off = prev->vm_offset+prev->vm_end-prev->vm_start;
663 if (off != mpnt->vm_offset)
664 continue;
667 /* merge prev with mpnt and set up pointers so the new
668 * big segment can possibly merge with the next one.
669 * The old unused mpnt is freed.
671 if(mpnt->vm_next)
672 mpnt->vm_next->vm_pprev = mpnt->vm_pprev;
673 *mpnt->vm_pprev = mpnt->vm_next;
675 prev->vm_end = mpnt->vm_end;
676 if (mpnt->vm_ops && mpnt->vm_ops->close) {
677 mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
678 mpnt->vm_start = mpnt->vm_end;
679 mpnt->vm_ops->close(mpnt);
681 mm->map_count--;
682 remove_shared_vm_struct(mpnt);
683 if (mpnt->vm_file)
684 fput(mpnt->vm_file);
685 kmem_cache_free(vm_area_cachep, mpnt);
686 mpnt = prev;
688 mm->mmap_cache = NULL; /* Kill the cache. */
691 __initfunc(void vma_init(void))
693 vm_area_cachep = kmem_cache_create("vm_area_struct",
694 sizeof(struct vm_area_struct),
695 0, SLAB_HWCACHE_ALIGN,
696 NULL, NULL);
697 if(!vm_area_cachep)
698 panic("vma_init: Cannot alloc vm_area_struct cache.");
700 mm_cachep = kmem_cache_create("mm_struct",
701 sizeof(struct mm_struct),
702 0, SLAB_HWCACHE_ALIGN,
703 NULL, NULL);
704 if(!mm_cachep)
705 panic("vma_init: Cannot alloc mm_struct cache.");