4 * (C) Copyright 1996 Linus Torvalds
6 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
7 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
11 #include <linux/hugetlb.h>
12 #include <linux/shm.h>
13 #include <linux/ksm.h>
14 #include <linux/mman.h>
15 #include <linux/swap.h>
16 #include <linux/capability.h>
18 #include <linux/highmem.h>
19 #include <linux/security.h>
20 #include <linux/syscalls.h>
21 #include <linux/mmu_notifier.h>
23 #include <asm/uaccess.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
29 static pmd_t
*get_old_pmd(struct mm_struct
*mm
, unsigned long addr
)
35 pgd
= pgd_offset(mm
, addr
);
36 if (pgd_none_or_clear_bad(pgd
))
39 pud
= pud_offset(pgd
, addr
);
40 if (pud_none_or_clear_bad(pud
))
43 pmd
= pmd_offset(pud
, addr
);
44 if (pmd_none_or_clear_bad(pmd
))
50 static pmd_t
*alloc_new_pmd(struct mm_struct
*mm
, unsigned long addr
)
56 pgd
= pgd_offset(mm
, addr
);
57 pud
= pud_alloc(mm
, pgd
, addr
);
61 pmd
= pmd_alloc(mm
, pud
, addr
);
65 if (!pmd_present(*pmd
) && __pte_alloc(mm
, pmd
, addr
))
71 static void move_ptes(struct vm_area_struct
*vma
, pmd_t
*old_pmd
,
72 unsigned long old_addr
, unsigned long old_end
,
73 struct vm_area_struct
*new_vma
, pmd_t
*new_pmd
,
74 unsigned long new_addr
)
76 struct address_space
*mapping
= NULL
;
77 struct mm_struct
*mm
= vma
->vm_mm
;
78 pte_t
*old_pte
, *new_pte
, pte
;
79 spinlock_t
*old_ptl
, *new_ptl
;
80 unsigned long old_start
;
83 mmu_notifier_invalidate_range_start(vma
->vm_mm
,
87 * Subtle point from Rajesh Venkatasubramanian: before
88 * moving file-based ptes, we must lock truncate_pagecache
89 * out, since it might clean the dst vma before the src vma,
90 * and we propagate stale pages into the dst afterward.
92 mapping
= vma
->vm_file
->f_mapping
;
93 spin_lock(&mapping
->i_mmap_lock
);
94 new_vma
->vm_truncate_count
= 0;
98 * We don't have to worry about the ordering of src and dst
99 * pte locks because exclusive mmap_sem prevents deadlock.
101 old_pte
= pte_offset_map_lock(mm
, old_pmd
, old_addr
, &old_ptl
);
102 new_pte
= pte_offset_map(new_pmd
, new_addr
);
103 new_ptl
= pte_lockptr(mm
, new_pmd
);
104 if (new_ptl
!= old_ptl
)
105 spin_lock_nested(new_ptl
, SINGLE_DEPTH_NESTING
);
106 arch_enter_lazy_mmu_mode();
108 for (; old_addr
< old_end
; old_pte
++, old_addr
+= PAGE_SIZE
,
109 new_pte
++, new_addr
+= PAGE_SIZE
) {
110 if (pte_none(*old_pte
))
112 pte
= ptep_clear_flush(vma
, old_addr
, old_pte
);
113 pte
= move_pte(pte
, new_vma
->vm_page_prot
, old_addr
, new_addr
);
114 set_pte_at(mm
, new_addr
, new_pte
, pte
);
117 arch_leave_lazy_mmu_mode();
118 if (new_ptl
!= old_ptl
)
119 spin_unlock(new_ptl
);
120 pte_unmap(new_pte
- 1);
121 pte_unmap_unlock(old_pte
- 1, old_ptl
);
123 spin_unlock(&mapping
->i_mmap_lock
);
124 mmu_notifier_invalidate_range_end(vma
->vm_mm
, old_start
, old_end
);
127 #define LATENCY_LIMIT (64 * PAGE_SIZE)
129 unsigned long move_page_tables(struct vm_area_struct
*vma
,
130 unsigned long old_addr
, struct vm_area_struct
*new_vma
,
131 unsigned long new_addr
, unsigned long len
)
133 unsigned long extent
, next
, old_end
;
134 pmd_t
*old_pmd
, *new_pmd
;
136 old_end
= old_addr
+ len
;
137 flush_cache_range(vma
, old_addr
, old_end
);
139 for (; old_addr
< old_end
; old_addr
+= extent
, new_addr
+= extent
) {
141 next
= (old_addr
+ PMD_SIZE
) & PMD_MASK
;
142 if (next
- 1 > old_end
)
144 extent
= next
- old_addr
;
145 old_pmd
= get_old_pmd(vma
->vm_mm
, old_addr
);
148 new_pmd
= alloc_new_pmd(vma
->vm_mm
, new_addr
);
151 next
= (new_addr
+ PMD_SIZE
) & PMD_MASK
;
152 if (extent
> next
- new_addr
)
153 extent
= next
- new_addr
;
154 if (extent
> LATENCY_LIMIT
)
155 extent
= LATENCY_LIMIT
;
156 move_ptes(vma
, old_pmd
, old_addr
, old_addr
+ extent
,
157 new_vma
, new_pmd
, new_addr
);
160 return len
+ old_addr
- old_end
; /* how much done */
163 static unsigned long move_vma(struct vm_area_struct
*vma
,
164 unsigned long old_addr
, unsigned long old_len
,
165 unsigned long new_len
, unsigned long new_addr
)
167 struct mm_struct
*mm
= vma
->vm_mm
;
168 struct vm_area_struct
*new_vma
;
169 unsigned long vm_flags
= vma
->vm_flags
;
170 unsigned long new_pgoff
;
171 unsigned long moved_len
;
172 unsigned long excess
= 0;
173 unsigned long hiwater_vm
;
178 * We'd prefer to avoid failure later on in do_munmap:
179 * which may split one vma into three before unmapping.
181 if (mm
->map_count
>= sysctl_max_map_count
- 3)
185 * Advise KSM to break any KSM pages in the area to be moved:
186 * it would be confusing if they were to turn up at the new
187 * location, where they happen to coincide with different KSM
188 * pages recently unmapped. But leave vma->vm_flags as it was,
189 * so KSM can come around to merge on vma and new_vma afterwards.
191 err
= ksm_madvise(vma
, old_addr
, old_addr
+ old_len
,
192 MADV_UNMERGEABLE
, &vm_flags
);
196 new_pgoff
= vma
->vm_pgoff
+ ((old_addr
- vma
->vm_start
) >> PAGE_SHIFT
);
197 new_vma
= copy_vma(&vma
, new_addr
, new_len
, new_pgoff
);
201 moved_len
= move_page_tables(vma
, old_addr
, new_vma
, new_addr
, old_len
);
202 if (moved_len
< old_len
) {
204 * On error, move entries back from new area to old,
205 * which will succeed since page tables still there,
206 * and then proceed to unmap new area instead of old.
208 move_page_tables(new_vma
, new_addr
, vma
, old_addr
, moved_len
);
215 /* Conceal VM_ACCOUNT so old reservation is not undone */
216 if (vm_flags
& VM_ACCOUNT
) {
217 vma
->vm_flags
&= ~VM_ACCOUNT
;
218 excess
= vma
->vm_end
- vma
->vm_start
- old_len
;
219 if (old_addr
> vma
->vm_start
&&
220 old_addr
+ old_len
< vma
->vm_end
)
225 * If we failed to move page tables we still do total_vm increment
226 * since do_munmap() will decrement it by old_len == new_len.
228 * Since total_vm is about to be raised artificially high for a
229 * moment, we need to restore high watermark afterwards: if stats
230 * are taken meanwhile, total_vm and hiwater_vm appear too high.
231 * If this were a serious issue, we'd add a flag to do_munmap().
233 hiwater_vm
= mm
->hiwater_vm
;
234 mm
->total_vm
+= new_len
>> PAGE_SHIFT
;
235 vm_stat_account(mm
, vma
->vm_flags
, vma
->vm_file
, new_len
>>PAGE_SHIFT
);
237 if (do_munmap(mm
, old_addr
, old_len
) < 0) {
238 /* OOM: unable to split vma, just get accounts right */
239 vm_unacct_memory(excess
>> PAGE_SHIFT
);
242 mm
->hiwater_vm
= hiwater_vm
;
244 /* Restore VM_ACCOUNT if one or two pieces of vma left */
246 vma
->vm_flags
|= VM_ACCOUNT
;
248 vma
->vm_next
->vm_flags
|= VM_ACCOUNT
;
251 if (vm_flags
& VM_LOCKED
) {
252 mm
->locked_vm
+= new_len
>> PAGE_SHIFT
;
253 if (new_len
> old_len
)
254 mlock_vma_pages_range(new_vma
, new_addr
+ old_len
,
261 static struct vm_area_struct
*vma_to_resize(unsigned long addr
,
262 unsigned long old_len
, unsigned long new_len
, unsigned long *p
)
264 struct mm_struct
*mm
= current
->mm
;
265 struct vm_area_struct
*vma
= find_vma(mm
, addr
);
267 if (!vma
|| vma
->vm_start
> addr
)
270 if (is_vm_hugetlb_page(vma
))
273 /* We can't remap across vm area boundaries */
274 if (old_len
> vma
->vm_end
- addr
)
277 if (vma
->vm_flags
& (VM_DONTEXPAND
| VM_PFNMAP
)) {
278 if (new_len
> old_len
)
282 if (vma
->vm_flags
& VM_LOCKED
) {
283 unsigned long locked
, lock_limit
;
284 locked
= mm
->locked_vm
<< PAGE_SHIFT
;
285 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
286 locked
+= new_len
- old_len
;
287 if (locked
> lock_limit
&& !capable(CAP_IPC_LOCK
))
291 if (!may_expand_vm(mm
, (new_len
- old_len
) >> PAGE_SHIFT
))
294 if (vma
->vm_flags
& VM_ACCOUNT
) {
295 unsigned long charged
= (new_len
- old_len
) >> PAGE_SHIFT
;
296 if (security_vm_enough_memory(charged
))
303 Efault
: /* very odd choice for most of the cases, but... */
304 return ERR_PTR(-EFAULT
);
306 return ERR_PTR(-EINVAL
);
308 return ERR_PTR(-ENOMEM
);
310 return ERR_PTR(-EAGAIN
);
313 static unsigned long mremap_to(unsigned long addr
,
314 unsigned long old_len
, unsigned long new_addr
,
315 unsigned long new_len
)
317 struct mm_struct
*mm
= current
->mm
;
318 struct vm_area_struct
*vma
;
319 unsigned long ret
= -EINVAL
;
320 unsigned long charged
= 0;
321 unsigned long map_flags
;
323 if (new_addr
& ~PAGE_MASK
)
326 if (new_len
> TASK_SIZE
|| new_addr
> TASK_SIZE
- new_len
)
329 /* Check if the location we're moving into overlaps the
330 * old location at all, and fail if it does.
332 if ((new_addr
<= addr
) && (new_addr
+new_len
) > addr
)
335 if ((addr
<= new_addr
) && (addr
+old_len
) > new_addr
)
338 ret
= security_file_mmap(NULL
, 0, 0, 0, new_addr
, 1);
342 ret
= do_munmap(mm
, new_addr
, new_len
);
346 if (old_len
>= new_len
) {
347 ret
= do_munmap(mm
, addr
+new_len
, old_len
- new_len
);
348 if (ret
&& old_len
!= new_len
)
353 vma
= vma_to_resize(addr
, old_len
, new_len
, &charged
);
359 map_flags
= MAP_FIXED
;
360 if (vma
->vm_flags
& VM_MAYSHARE
)
361 map_flags
|= MAP_SHARED
;
363 ret
= get_unmapped_area(vma
->vm_file
, new_addr
, new_len
, vma
->vm_pgoff
+
364 ((addr
- vma
->vm_start
) >> PAGE_SHIFT
),
366 if (ret
& ~PAGE_MASK
)
369 ret
= move_vma(vma
, addr
, old_len
, new_len
, new_addr
);
370 if (!(ret
& ~PAGE_MASK
))
373 vm_unacct_memory(charged
);
379 static int vma_expandable(struct vm_area_struct
*vma
, unsigned long delta
)
381 unsigned long end
= vma
->vm_end
+ delta
;
382 if (end
< vma
->vm_end
) /* overflow */
384 if (vma
->vm_next
&& vma
->vm_next
->vm_start
< end
) /* intersection */
386 if (get_unmapped_area(NULL
, vma
->vm_start
, end
- vma
->vm_start
,
387 0, MAP_FIXED
) & ~PAGE_MASK
)
393 * Expand (or shrink) an existing mapping, potentially moving it at the
394 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
396 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
397 * This option implies MREMAP_MAYMOVE.
399 unsigned long do_mremap(unsigned long addr
,
400 unsigned long old_len
, unsigned long new_len
,
401 unsigned long flags
, unsigned long new_addr
)
403 struct mm_struct
*mm
= current
->mm
;
404 struct vm_area_struct
*vma
;
405 unsigned long ret
= -EINVAL
;
406 unsigned long charged
= 0;
408 if (flags
& ~(MREMAP_FIXED
| MREMAP_MAYMOVE
))
411 if (addr
& ~PAGE_MASK
)
414 old_len
= PAGE_ALIGN(old_len
);
415 new_len
= PAGE_ALIGN(new_len
);
418 * We allow a zero old-len as a special case
419 * for DOS-emu "duplicate shm area" thing. But
420 * a zero new-len is nonsensical.
425 if (flags
& MREMAP_FIXED
) {
426 if (flags
& MREMAP_MAYMOVE
)
427 ret
= mremap_to(addr
, old_len
, new_addr
, new_len
);
432 * Always allow a shrinking remap: that just unmaps
433 * the unnecessary pages..
434 * do_munmap does all the needed commit accounting
436 if (old_len
>= new_len
) {
437 ret
= do_munmap(mm
, addr
+new_len
, old_len
- new_len
);
438 if (ret
&& old_len
!= new_len
)
445 * Ok, we need to grow..
447 vma
= vma_to_resize(addr
, old_len
, new_len
, &charged
);
453 /* old_len exactly to the end of the area..
455 if (old_len
== vma
->vm_end
- addr
) {
456 /* can we just expand the current mapping? */
457 if (vma_expandable(vma
, new_len
- old_len
)) {
458 int pages
= (new_len
- old_len
) >> PAGE_SHIFT
;
460 if (vma_adjust(vma
, vma
->vm_start
, addr
+ new_len
,
461 vma
->vm_pgoff
, NULL
)) {
466 mm
->total_vm
+= pages
;
467 vm_stat_account(mm
, vma
->vm_flags
, vma
->vm_file
, pages
);
468 if (vma
->vm_flags
& VM_LOCKED
) {
469 mm
->locked_vm
+= pages
;
470 mlock_vma_pages_range(vma
, addr
+ old_len
,
479 * We weren't able to just expand or shrink the area,
480 * we need to create a new one and move it..
483 if (flags
& MREMAP_MAYMOVE
) {
484 unsigned long map_flags
= 0;
485 if (vma
->vm_flags
& VM_MAYSHARE
)
486 map_flags
|= MAP_SHARED
;
488 new_addr
= get_unmapped_area(vma
->vm_file
, 0, new_len
,
490 ((addr
- vma
->vm_start
) >> PAGE_SHIFT
),
492 if (new_addr
& ~PAGE_MASK
) {
497 ret
= security_file_mmap(NULL
, 0, 0, 0, new_addr
, 1);
500 ret
= move_vma(vma
, addr
, old_len
, new_len
, new_addr
);
503 if (ret
& ~PAGE_MASK
)
504 vm_unacct_memory(charged
);
508 SYSCALL_DEFINE5(mremap
, unsigned long, addr
, unsigned long, old_len
,
509 unsigned long, new_len
, unsigned long, flags
,
510 unsigned long, new_addr
)
514 down_write(¤t
->mm
->mmap_sem
);
515 ret
= do_mremap(addr
, old_len
, new_len
, flags
, new_addr
);
516 up_write(¤t
->mm
->mmap_sem
);