6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/shm.h>
12 #include <linux/errno.h>
13 #include <linux/mman.h>
14 #include <linux/string.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <linux/smp.h>
18 #include <linux/smp_lock.h>
19 #include <linux/init.h>
20 #include <linux/file.h>
22 #include <asm/uaccess.h>
23 #include <asm/system.h>
24 #include <asm/pgtable.h>
26 /* description of effects of mapping type and prot in current implementation.
27 * this is due to the limited x86 page protection hardware. The expected
28 * behavior is in parens:
31 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
32 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
33 * w: (no) no w: (no) no w: (yes) yes w: (no) no
34 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
36 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
37 * w: (no) no w: (no) no w: (copy) copy w: (no) no
38 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
41 pgprot_t protection_map
[16] = {
42 __P000
, __P001
, __P010
, __P011
, __P100
, __P101
, __P110
, __P111
,
43 __S000
, __S001
, __S010
, __S011
, __S100
, __S101
, __S110
, __S111
46 /* SLAB cache for vm_area_struct's. */
47 kmem_cache_t
*vm_area_cachep
;
49 int sysctl_overcommit_memory
;
51 /* Check that a process has enough memory to allocate a
52 * new virtual mapping.
54 int vm_enough_memory(long pages
)
56 /* Stupid algorithm to decide if we have enough memory: while
57 * simple, it hopefully works in most obvious cases.. Easy to
58 * fool it, but this should catch most mistakes.
62 /* Sometimes we want to use more memory than we have. */
63 if (sysctl_overcommit_memory
)
66 free
= buffermem
>> PAGE_SHIFT
;
67 free
+= page_cache_size
;
69 free
+= nr_free_pages
;
70 free
+= nr_swap_pages
;
71 free
-= num_physpages
>> 4;
75 /* Remove one vm structure from the inode's i_mmap ring. */
76 static inline void remove_shared_vm_struct(struct vm_area_struct
*vma
)
78 struct file
* file
= vma
->vm_file
;
81 if (vma
->vm_flags
& VM_DENYWRITE
)
82 file
->f_dentry
->d_inode
->i_writecount
++;
83 if(vma
->vm_next_share
)
84 vma
->vm_next_share
->vm_pprev_share
= vma
->vm_pprev_share
;
85 *vma
->vm_pprev_share
= vma
->vm_next_share
;
89 asmlinkage
unsigned long sys_brk(unsigned long brk
)
91 unsigned long rlim
, retval
;
92 unsigned long newbrk
, oldbrk
;
93 struct mm_struct
*mm
= current
->mm
;
97 if (brk
< mm
->end_code
)
99 newbrk
= PAGE_ALIGN(brk
);
100 oldbrk
= PAGE_ALIGN(mm
->brk
);
101 if (oldbrk
== newbrk
)
104 /* Always allow shrinking brk. */
105 if (brk
<= mm
->brk
) {
106 if (!do_munmap(newbrk
, oldbrk
-newbrk
))
111 /* Check against rlimit and stack.. */
112 rlim
= current
->rlim
[RLIMIT_DATA
].rlim_cur
;
113 if (rlim
< RLIM_INFINITY
&& brk
- mm
->end_code
> rlim
)
116 /* Check against existing mmap mappings. */
117 if (find_vma_intersection(mm
, oldbrk
, newbrk
+PAGE_SIZE
))
120 /* Check if we have enough memory.. */
121 if (!vm_enough_memory((newbrk
-oldbrk
) >> PAGE_SHIFT
))
124 /* Ok, looks good - let it rip. */
125 if (do_mmap(NULL
, oldbrk
, newbrk
-oldbrk
,
126 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
127 MAP_FIXED
|MAP_PRIVATE
, 0) != oldbrk
)
138 /* Combine the mmap "prot" and "flags" argument into one "vm_flags" used
139 * internally. Essentially, translate the "PROT_xxx" and "MAP_xxx" bits
142 static inline unsigned long vm_flags(unsigned long prot
, unsigned long flags
)
144 #define _trans(x,bit1,bit2) \
145 ((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
147 unsigned long prot_bits
, flag_bits
;
149 _trans(prot
, PROT_READ
, VM_READ
) |
150 _trans(prot
, PROT_WRITE
, VM_WRITE
) |
151 _trans(prot
, PROT_EXEC
, VM_EXEC
);
153 _trans(flags
, MAP_GROWSDOWN
, VM_GROWSDOWN
) |
154 _trans(flags
, MAP_DENYWRITE
, VM_DENYWRITE
) |
155 _trans(flags
, MAP_EXECUTABLE
, VM_EXECUTABLE
);
156 return prot_bits
| flag_bits
;
160 unsigned long do_mmap(struct file
* file
, unsigned long addr
, unsigned long len
,
161 unsigned long prot
, unsigned long flags
, unsigned long off
)
163 struct mm_struct
* mm
= current
->mm
;
164 struct vm_area_struct
* vma
;
165 int correct_wcount
= 0, error
;
167 if ((len
= PAGE_ALIGN(len
)) == 0)
170 if (len
> TASK_SIZE
|| addr
> TASK_SIZE
-len
)
173 /* offset overflow? */
177 /* Too many mappings? */
178 if (mm
->map_count
> MAX_MAP_COUNT
)
181 /* mlock MCL_FUTURE? */
182 if (mm
->def_flags
& VM_LOCKED
) {
183 unsigned long locked
= mm
->locked_vm
<< PAGE_SHIFT
;
185 if (locked
> current
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
)
189 /* Do simple checking here so the lower-level routines won't have
190 * to. we assume access permissions have been handled by the open
191 * of the memory object, so we don't do any here.
194 switch (flags
& MAP_TYPE
) {
196 if ((prot
& PROT_WRITE
) && !(file
->f_mode
& 2))
199 /* Make sure we don't allow writing to an append-only file.. */
200 if (IS_APPEND(file
->f_dentry
->d_inode
) && (file
->f_mode
& 2))
203 /* make sure there are no mandatory locks on the file. */
204 if (locks_verify_locked(file
->f_dentry
->d_inode
))
209 if (!(file
->f_mode
& 1))
216 } else if ((flags
& MAP_TYPE
) != MAP_PRIVATE
)
219 /* Obtain the address to map to. we verify (or select) it and ensure
220 * that it represents a valid section of the address space.
222 if (flags
& MAP_FIXED
) {
223 if (addr
& ~PAGE_MASK
)
226 addr
= get_unmapped_area(addr
, len
);
231 /* Determine the object being mapped and call the appropriate
232 * specific mapper. the address has already been validated, but
233 * not unmapped, but the maps are removed from the list.
235 if (file
&& (!file
->f_op
|| !file
->f_op
->mmap
))
238 vma
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
243 vma
->vm_start
= addr
;
244 vma
->vm_end
= addr
+ len
;
245 vma
->vm_flags
= vm_flags(prot
,flags
) | mm
->def_flags
;
248 if (file
->f_mode
& 1)
249 vma
->vm_flags
|= VM_MAYREAD
| VM_MAYWRITE
| VM_MAYEXEC
;
250 if (flags
& MAP_SHARED
) {
251 vma
->vm_flags
|= VM_SHARED
| VM_MAYSHARE
;
253 /* This looks strange, but when we don't have the file open
254 * for writing, we can demote the shared mapping to a simpler
255 * private mapping. That also takes care of a security hole
256 * with ptrace() writing to a shared mapping without write
259 * We leave the VM_MAYSHARE bit on, just to get correct output
260 * from /proc/xxx/maps..
262 if (!(file
->f_mode
& 2))
263 vma
->vm_flags
&= ~(VM_MAYWRITE
| VM_SHARED
);
266 vma
->vm_flags
|= VM_MAYREAD
| VM_MAYWRITE
| VM_MAYEXEC
;
267 vma
->vm_page_prot
= protection_map
[vma
->vm_flags
& 0x0f];
269 vma
->vm_offset
= off
;
275 if (do_munmap(addr
, len
))
278 /* Check against address space limit. */
279 if ((mm
->total_vm
<< PAGE_SHIFT
) + len
280 > current
->rlim
[RLIMIT_AS
].rlim_cur
)
283 /* Private writable mapping? Check memory availability.. */
284 if ((vma
->vm_flags
& (VM_SHARED
| VM_WRITE
)) == VM_WRITE
&&
285 !(flags
& MAP_NORESERVE
) &&
286 !vm_enough_memory(len
>> PAGE_SHIFT
))
291 if (vma
->vm_flags
& VM_DENYWRITE
) {
292 if (file
->f_dentry
->d_inode
->i_writecount
> 0)
295 /* f_op->mmap might possibly sleep
296 * (generic_file_mmap doesn't, but other code
297 * might). In any case, this takes care of any
298 * race that this might cause.
300 file
->f_dentry
->d_inode
->i_writecount
--;
305 error
= file
->f_op
->mmap(file
, vma
);
308 /* Fix up the count if necessary, then check for an error */
310 file
->f_dentry
->d_inode
->i_writecount
++;
315 * merge_segments may merge our vma, so we can't refer to it
316 * after the call. Save the values we need now ...
318 flags
= vma
->vm_flags
;
319 addr
= vma
->vm_start
; /* can addr have changed?? */
320 insert_vm_struct(mm
, vma
);
321 merge_segments(mm
, vma
->vm_start
, vma
->vm_end
);
323 mm
->total_vm
+= len
>> PAGE_SHIFT
;
324 if (flags
& VM_LOCKED
) {
325 mm
->locked_vm
+= len
>> PAGE_SHIFT
;
326 make_pages_present(addr
, addr
+ len
);
331 kmem_cache_free(vm_area_cachep
, vma
);
335 /* Get an address range which is currently unmapped.
336 * For mmap() without MAP_FIXED and shmat() with addr=0.
337 * Return value 0 means ENOMEM.
339 unsigned long get_unmapped_area(unsigned long addr
, unsigned long len
)
341 struct vm_area_struct
* vmm
;
346 addr
= TASK_UNMAPPED_BASE
;
347 addr
= PAGE_ALIGN(addr
);
349 for (vmm
= find_vma(current
->mm
, addr
); ; vmm
= vmm
->vm_next
) {
350 /* At this point: (!vmm || addr < vmm->vm_end). */
351 if (TASK_SIZE
- len
< addr
)
353 if (!vmm
|| addr
+ len
<= vmm
->vm_start
)
359 /* Normal function to fix up a mapping
360 * This function is the default for when an area has no specific
361 * function. This may be used as part of a more specific routine.
362 * This function works out what part of an area is affected and
363 * adjusts the mapping information. Since the actual page
364 * manipulation is done in do_mmap(), none need be done here,
365 * though it would probably be more appropriate.
367 * By the time this function is called, the area struct has been
368 * removed from the process mapping list, so it needs to be
369 * reinserted if necessary.
371 * The 4 main cases are:
372 * Unmapping the whole area
373 * Unmapping from the start of the segment to a point in it
374 * Unmapping from an intermediate point to the end
375 * Unmapping between to intermediate points, making a hole.
377 * Case 4 involves the creation of 2 new areas, for each side of
378 * the hole. If possible, we reuse the existing area rather than
379 * allocate a new one, and the return indicates whether the old
382 static int unmap_fixup(struct vm_area_struct
*area
, unsigned long addr
,
383 size_t len
, struct vm_area_struct
**extra
)
385 struct vm_area_struct
*mpnt
;
386 unsigned long end
= addr
+ len
;
388 area
->vm_mm
->total_vm
-= len
>> PAGE_SHIFT
;
389 if (area
->vm_flags
& VM_LOCKED
)
390 area
->vm_mm
->locked_vm
-= len
>> PAGE_SHIFT
;
392 /* Unmapping the whole area. */
393 if (addr
== area
->vm_start
&& end
== area
->vm_end
) {
394 if (area
->vm_ops
&& area
->vm_ops
->close
)
395 area
->vm_ops
->close(area
);
401 /* Work out to one of the ends. */
402 if (end
== area
->vm_end
)
404 else if (addr
== area
->vm_start
) {
405 area
->vm_offset
+= (end
- area
->vm_start
);
406 area
->vm_start
= end
;
408 /* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */
409 /* Add end mapping -- leave beginning for below */
413 mpnt
->vm_mm
= area
->vm_mm
;
414 mpnt
->vm_start
= end
;
415 mpnt
->vm_end
= area
->vm_end
;
416 mpnt
->vm_page_prot
= area
->vm_page_prot
;
417 mpnt
->vm_flags
= area
->vm_flags
;
418 mpnt
->vm_ops
= area
->vm_ops
;
419 mpnt
->vm_offset
= area
->vm_offset
+ (end
- area
->vm_start
);
420 mpnt
->vm_file
= area
->vm_file
;
422 mpnt
->vm_file
->f_count
++;
423 if (mpnt
->vm_ops
&& mpnt
->vm_ops
->open
)
424 mpnt
->vm_ops
->open(mpnt
);
425 area
->vm_end
= addr
; /* Truncate area */
426 insert_vm_struct(current
->mm
, mpnt
);
429 insert_vm_struct(current
->mm
, area
);
433 /* Munmap is split into 2 main parts -- this part which finds
434 * what needs doing, and the areas themselves, which do the
435 * work. This now handles partial unmappings.
436 * Jeremy Fitzhardine <jeremy@sw.oz.au>
438 int do_munmap(unsigned long addr
, size_t len
)
440 struct mm_struct
* mm
;
441 struct vm_area_struct
*mpnt
, *free
, *extra
;
444 if ((addr
& ~PAGE_MASK
) || addr
> TASK_SIZE
|| len
> TASK_SIZE
-addr
)
447 if ((len
= PAGE_ALIGN(len
)) == 0)
450 /* Check if this memory area is ok - put it on the temporary
451 * list if so.. The checks here are pretty simple --
452 * every area affected in some way (by any overlap) is put
453 * on the list. If nothing is put on, nothing is affected.
457 while(mpnt
&& mpnt
->vm_end
<= addr
)
458 mpnt
= mpnt
->vm_next
;
462 /* If we'll make "hole", check the vm areas limit */
463 if ((mpnt
->vm_start
< addr
&& mpnt
->vm_end
> addr
+len
) &&
464 mm
->map_count
> MAX_MAP_COUNT
)
468 * We may need one additional vma to fix up the mappings ...
469 * and this is the last chance for an easy error exit.
471 extra
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
475 /* we have addr < mpnt->vm_end */
477 for ( ; mpnt
&& mpnt
->vm_start
< addr
+len
; ) {
478 struct vm_area_struct
*next
= mpnt
->vm_next
;
481 mpnt
->vm_next
->vm_pprev
= mpnt
->vm_pprev
;
482 *mpnt
->vm_pprev
= mpnt
->vm_next
;
484 mpnt
->vm_next
= free
;
489 /* Ok - we have the memory areas we should free on the 'free' list,
490 * so release them, and unmap the page range..
491 * If the one of the segments is only being partially unmapped,
492 * it will put new vm_area_struct(s) into the address space.
495 while ((mpnt
= free
) != NULL
) {
496 unsigned long st
, end
, size
;
498 free
= free
->vm_next
;
502 remove_shared_vm_struct(mpnt
);
504 st
= addr
< mpnt
->vm_start
? mpnt
->vm_start
: addr
;
506 end
= end
> mpnt
->vm_end
? mpnt
->vm_end
: end
;
509 if (mpnt
->vm_ops
&& mpnt
->vm_ops
->unmap
)
510 mpnt
->vm_ops
->unmap(mpnt
, st
, size
);
512 flush_cache_range(mm
, st
, end
);
513 zap_page_range(mm
, st
, size
);
514 flush_tlb_range(mm
, st
, end
);
517 * Fix the mapping, and free the old area if it wasn't reused.
519 if (!unmap_fixup(mpnt
, st
, size
, &extra
))
520 kmem_cache_free(vm_area_cachep
, mpnt
);
523 /* Release the extra vma struct if it wasn't used */
525 kmem_cache_free(vm_area_cachep
, extra
);
528 mm
->mmap_cache
= NULL
; /* Kill the cache. */
532 asmlinkage
int sys_munmap(unsigned long addr
, size_t len
)
536 down(¤t
->mm
->mmap_sem
);
538 ret
= do_munmap(addr
, len
);
540 up(¤t
->mm
->mmap_sem
);
544 /* Release all mmaps. */
545 void exit_mmap(struct mm_struct
* mm
)
547 struct vm_area_struct
* mpnt
;
550 mm
->mmap
= mm
->mmap_cache
= NULL
;
555 struct vm_area_struct
* next
= mpnt
->vm_next
;
556 unsigned long start
= mpnt
->vm_start
;
557 unsigned long end
= mpnt
->vm_end
;
558 unsigned long size
= end
- start
;
561 if (mpnt
->vm_ops
->unmap
)
562 mpnt
->vm_ops
->unmap(mpnt
, start
, size
);
563 if (mpnt
->vm_ops
->close
)
564 mpnt
->vm_ops
->close(mpnt
);
567 remove_shared_vm_struct(mpnt
);
568 zap_page_range(mm
, start
, size
);
571 kmem_cache_free(vm_area_cachep
, mpnt
);
575 /* This is just debugging */
577 printk("exit_mmap: map count is %d\n", mm
->map_count
);
580 /* Insert vm structure into process list sorted by address
581 * and into the inode's i_mmap ring.
583 void insert_vm_struct(struct mm_struct
*mm
, struct vm_area_struct
*vmp
)
585 struct vm_area_struct
**pprev
= &mm
->mmap
;
590 /* Find where to link it in. */
591 while(*pprev
&& (*pprev
)->vm_start
<= vmp
->vm_start
)
592 pprev
= &(*pprev
)->vm_next
;
595 if((vmp
->vm_next
= *pprev
) != NULL
)
596 (*pprev
)->vm_pprev
= &vmp
->vm_next
;
598 vmp
->vm_pprev
= pprev
;
602 struct inode
* inode
= file
->f_dentry
->d_inode
;
603 if (vmp
->vm_flags
& VM_DENYWRITE
)
604 inode
->i_writecount
--;
606 /* insert vmp into inode's share list */
607 if((vmp
->vm_next_share
= inode
->i_mmap
) != NULL
)
608 inode
->i_mmap
->vm_pprev_share
= &vmp
->vm_next_share
;
610 vmp
->vm_pprev_share
= &inode
->i_mmap
;
614 /* Merge the list of memory segments if possible.
615 * Redundant vm_area_structs are freed.
616 * This assumes that the list is ordered by address.
617 * We don't need to traverse the entire list, only those segments
618 * which intersect or are adjacent to a given interval.
620 * We must already hold the mm semaphore when we get here..
622 void merge_segments (struct mm_struct
* mm
, unsigned long start_addr
, unsigned long end_addr
)
624 struct vm_area_struct
*prev
, *mpnt
, *next
;
628 while(mpnt
&& mpnt
->vm_end
<= start_addr
) {
630 mpnt
= mpnt
->vm_next
;
635 next
= mpnt
->vm_next
;
637 /* we have prev->vm_next == mpnt && mpnt->vm_next = next */
643 /* prev and mpnt cycle through the list, as long as
644 * start_addr < mpnt->vm_end && prev->vm_start < end_addr
646 for ( ; mpnt
&& prev
->vm_start
< end_addr
; prev
= mpnt
, mpnt
= next
) {
647 next
= mpnt
->vm_next
;
649 /* To share, we must have the same file, operations.. */
650 if ((mpnt
->vm_file
!= prev
->vm_file
)||
651 (mpnt
->vm_pte
!= prev
->vm_pte
) ||
652 (mpnt
->vm_ops
!= prev
->vm_ops
) ||
653 (mpnt
->vm_flags
!= prev
->vm_flags
) ||
654 (prev
->vm_end
!= mpnt
->vm_start
))
658 * If we have a file or it's a shared memory area
659 * the offsets must be contiguous..
661 if ((mpnt
->vm_file
!= NULL
) || (mpnt
->vm_flags
& VM_SHM
)) {
662 unsigned long off
= prev
->vm_offset
+prev
->vm_end
-prev
->vm_start
;
663 if (off
!= mpnt
->vm_offset
)
667 /* merge prev with mpnt and set up pointers so the new
668 * big segment can possibly merge with the next one.
669 * The old unused mpnt is freed.
672 mpnt
->vm_next
->vm_pprev
= mpnt
->vm_pprev
;
673 *mpnt
->vm_pprev
= mpnt
->vm_next
;
675 prev
->vm_end
= mpnt
->vm_end
;
676 if (mpnt
->vm_ops
&& mpnt
->vm_ops
->close
) {
677 mpnt
->vm_offset
+= mpnt
->vm_end
- mpnt
->vm_start
;
678 mpnt
->vm_start
= mpnt
->vm_end
;
679 mpnt
->vm_ops
->close(mpnt
);
682 remove_shared_vm_struct(mpnt
);
685 kmem_cache_free(vm_area_cachep
, mpnt
);
688 mm
->mmap_cache
= NULL
; /* Kill the cache. */
691 void __init
vma_init(void)
693 vm_area_cachep
= kmem_cache_create("vm_area_struct",
694 sizeof(struct vm_area_struct
),
695 0, SLAB_HWCACHE_ALIGN
,
698 panic("vma_init: Cannot alloc vm_area_struct cache.");
700 mm_cachep
= kmem_cache_create("mm_struct",
701 sizeof(struct mm_struct
),
702 0, SLAB_HWCACHE_ALIGN
,
705 panic("vma_init: Cannot alloc mm_struct cache.");