4 * (C) Copyright 1996 Linus Torvalds
6 * Address space accounting code <alan@redhat.com>
7 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <linux/shm.h>
14 #include <linux/mman.h>
15 #include <linux/swap.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
20 #include <asm/uaccess.h>
21 #include <asm/cacheflush.h>
22 #include <asm/tlbflush.h>
24 static pte_t
*get_one_pte_map_nested(struct mm_struct
*mm
, unsigned long addr
)
30 pgd
= pgd_offset(mm
, addr
);
39 pmd
= pmd_offset(pgd
, addr
);
48 pte
= pte_offset_map_nested(pmd
, addr
);
50 pte_unmap_nested(pte
);
57 static pte_t
*get_one_pte_map(struct mm_struct
*mm
, unsigned long addr
)
62 pgd
= pgd_offset(mm
, addr
);
65 pmd
= pmd_offset(pgd
, addr
);
66 if (!pmd_present(*pmd
))
68 return pte_offset_map(pmd
, addr
);
71 static inline pte_t
*alloc_one_pte_map(struct mm_struct
*mm
, unsigned long addr
)
76 pmd
= pmd_alloc(mm
, pgd_offset(mm
, addr
), addr
);
78 pte
= pte_alloc_map(mm
, pmd
, addr
);
83 move_one_page(struct vm_area_struct
*vma
, unsigned long old_addr
,
84 unsigned long new_addr
)
86 struct address_space
*mapping
= NULL
;
87 struct mm_struct
*mm
= vma
->vm_mm
;
93 * Subtle point from Rajesh Venkatasubramanian: before
94 * moving file-based ptes, we must lock vmtruncate out,
95 * since it might clean the dst vma before the src vma,
96 * and we propagate stale pages into the dst afterward.
98 mapping
= vma
->vm_file
->f_mapping
;
99 spin_lock(&mapping
->i_mmap_lock
);
101 spin_lock(&mm
->page_table_lock
);
103 src
= get_one_pte_map_nested(mm
, old_addr
);
106 * Look to see whether alloc_one_pte_map needs to perform a
107 * memory allocation. If it does then we need to drop the
110 dst
= get_one_pte_map(mm
, new_addr
);
111 if (unlikely(!dst
)) {
112 pte_unmap_nested(src
);
114 spin_unlock(&mapping
->i_mmap_lock
);
115 dst
= alloc_one_pte_map(mm
, new_addr
);
116 if (mapping
&& !spin_trylock(&mapping
->i_mmap_lock
)) {
117 spin_unlock(&mm
->page_table_lock
);
118 spin_lock(&mapping
->i_mmap_lock
);
119 spin_lock(&mm
->page_table_lock
);
121 src
= get_one_pte_map_nested(mm
, old_addr
);
124 * Since alloc_one_pte_map can drop and re-acquire
125 * page_table_lock, we should re-check the src entry...
130 pte
= ptep_clear_flush(vma
, old_addr
, src
);
134 pte_unmap_nested(src
);
139 spin_unlock(&mm
->page_table_lock
);
141 spin_unlock(&mapping
->i_mmap_lock
);
145 static unsigned long move_page_tables(struct vm_area_struct
*vma
,
146 unsigned long new_addr
, unsigned long old_addr
,
149 unsigned long offset
;
151 flush_cache_range(vma
, old_addr
, old_addr
+ len
);
154 * This is not the clever way to do this, but we're taking the
155 * easy way out on the assumption that most remappings will be
156 * only a few pages.. This also makes error recovery easier.
158 for (offset
= 0; offset
< len
; offset
+= PAGE_SIZE
) {
159 if (move_one_page(vma
, old_addr
+offset
, new_addr
+offset
) < 0)
166 static unsigned long move_vma(struct vm_area_struct
*vma
,
167 unsigned long old_addr
, unsigned long old_len
,
168 unsigned long new_len
, unsigned long new_addr
)
170 struct mm_struct
*mm
= vma
->vm_mm
;
171 struct vm_area_struct
*new_vma
;
172 unsigned long vm_flags
= vma
->vm_flags
;
173 unsigned long new_pgoff
;
174 unsigned long moved_len
;
175 unsigned long excess
= 0;
179 * We'd prefer to avoid failure later on in do_munmap:
180 * which may split one vma into three before unmapping.
182 if (mm
->map_count
>= sysctl_max_map_count
- 3)
185 new_pgoff
= vma
->vm_pgoff
+ ((old_addr
- vma
->vm_start
) >> PAGE_SHIFT
);
186 new_vma
= copy_vma(&vma
, new_addr
, new_len
, new_pgoff
);
190 moved_len
= move_page_tables(vma
, new_addr
, old_addr
, old_len
);
191 if (moved_len
< old_len
) {
193 * On error, move entries back from new area to old,
194 * which will succeed since page tables still there,
195 * and then proceed to unmap new area instead of old.
197 move_page_tables(new_vma
, old_addr
, new_addr
, moved_len
);
204 /* Conceal VM_ACCOUNT so old reservation is not undone */
205 if (vm_flags
& VM_ACCOUNT
) {
206 vma
->vm_flags
&= ~VM_ACCOUNT
;
207 excess
= vma
->vm_end
- vma
->vm_start
- old_len
;
208 if (old_addr
> vma
->vm_start
&&
209 old_addr
+ old_len
< vma
->vm_end
)
213 if (do_munmap(mm
, old_addr
, old_len
) < 0) {
214 /* OOM: unable to split vma, just get accounts right */
215 vm_unacct_memory(excess
>> PAGE_SHIFT
);
219 /* Restore VM_ACCOUNT if one or two pieces of vma left */
221 vma
->vm_flags
|= VM_ACCOUNT
;
223 vma
->vm_next
->vm_flags
|= VM_ACCOUNT
;
226 mm
->total_vm
+= new_len
>> PAGE_SHIFT
;
227 __vm_stat_account(mm
, vma
->vm_flags
, vma
->vm_file
, new_len
>>PAGE_SHIFT
);
228 if (vm_flags
& VM_LOCKED
) {
229 mm
->locked_vm
+= new_len
>> PAGE_SHIFT
;
230 if (new_len
> old_len
)
231 make_pages_present(new_addr
+ old_len
,
239 * Expand (or shrink) an existing mapping, potentially moving it at the
240 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
242 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
243 * This option implies MREMAP_MAYMOVE.
245 unsigned long do_mremap(unsigned long addr
,
246 unsigned long old_len
, unsigned long new_len
,
247 unsigned long flags
, unsigned long new_addr
)
249 struct vm_area_struct
*vma
;
250 unsigned long ret
= -EINVAL
;
251 unsigned long charged
= 0;
253 if (flags
& ~(MREMAP_FIXED
| MREMAP_MAYMOVE
))
256 if (addr
& ~PAGE_MASK
)
259 old_len
= PAGE_ALIGN(old_len
);
260 new_len
= PAGE_ALIGN(new_len
);
263 * We allow a zero old-len as a special case
264 * for DOS-emu "duplicate shm area" thing. But
265 * a zero new-len is nonsensical.
270 /* new_addr is only valid if MREMAP_FIXED is specified */
271 if (flags
& MREMAP_FIXED
) {
272 if (new_addr
& ~PAGE_MASK
)
274 if (!(flags
& MREMAP_MAYMOVE
))
277 if (new_len
> TASK_SIZE
|| new_addr
> TASK_SIZE
- new_len
)
280 /* Check if the location we're moving into overlaps the
281 * old location at all, and fail if it does.
283 if ((new_addr
<= addr
) && (new_addr
+new_len
) > addr
)
286 if ((addr
<= new_addr
) && (addr
+old_len
) > new_addr
)
289 ret
= do_munmap(current
->mm
, new_addr
, new_len
);
295 * Always allow a shrinking remap: that just unmaps
296 * the unnecessary pages..
297 * do_munmap does all the needed commit accounting
299 if (old_len
>= new_len
) {
300 ret
= do_munmap(current
->mm
, addr
+new_len
, old_len
- new_len
);
301 if (ret
&& old_len
!= new_len
)
304 if (!(flags
& MREMAP_FIXED
) || (new_addr
== addr
))
310 * Ok, we need to grow.. or relocate.
313 vma
= find_vma(current
->mm
, addr
);
314 if (!vma
|| vma
->vm_start
> addr
)
316 if (is_vm_hugetlb_page(vma
)) {
320 /* We can't remap across vm area boundaries */
321 if (old_len
> vma
->vm_end
- addr
)
323 if (vma
->vm_flags
& VM_DONTEXPAND
) {
324 if (new_len
> old_len
)
327 if (vma
->vm_flags
& VM_LOCKED
) {
328 unsigned long locked
, lock_limit
;
329 locked
= current
->mm
->locked_vm
<< PAGE_SHIFT
;
330 lock_limit
= current
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
331 locked
+= new_len
- old_len
;
333 if (locked
> lock_limit
&& !capable(CAP_IPC_LOCK
))
337 if ((current
->mm
->total_vm
<< PAGE_SHIFT
) + (new_len
- old_len
)
338 > current
->rlim
[RLIMIT_AS
].rlim_cur
)
341 if (vma
->vm_flags
& VM_ACCOUNT
) {
342 charged
= (new_len
- old_len
) >> PAGE_SHIFT
;
343 if (security_vm_enough_memory(charged
))
347 /* old_len exactly to the end of the area..
348 * And we're not relocating the area.
350 if (old_len
== vma
->vm_end
- addr
&&
351 !((flags
& MREMAP_FIXED
) && (addr
!= new_addr
)) &&
352 (old_len
!= new_len
|| !(flags
& MREMAP_MAYMOVE
))) {
353 unsigned long max_addr
= TASK_SIZE
;
355 max_addr
= vma
->vm_next
->vm_start
;
356 /* can we just expand the current mapping? */
357 if (max_addr
- addr
>= new_len
) {
358 int pages
= (new_len
- old_len
) >> PAGE_SHIFT
;
360 vma_adjust(vma
, vma
->vm_start
,
361 addr
+ new_len
, vma
->vm_pgoff
, NULL
);
363 current
->mm
->total_vm
+= pages
;
364 __vm_stat_account(vma
->vm_mm
, vma
->vm_flags
,
365 vma
->vm_file
, pages
);
366 if (vma
->vm_flags
& VM_LOCKED
) {
367 current
->mm
->locked_vm
+= pages
;
368 make_pages_present(addr
+ old_len
,
377 * We weren't able to just expand or shrink the area,
378 * we need to create a new one and move it..
381 if (flags
& MREMAP_MAYMOVE
) {
382 if (!(flags
& MREMAP_FIXED
)) {
383 unsigned long map_flags
= 0;
384 if (vma
->vm_flags
& VM_MAYSHARE
)
385 map_flags
|= MAP_SHARED
;
387 new_addr
= get_unmapped_area(vma
->vm_file
, 0, new_len
,
388 vma
->vm_pgoff
, map_flags
);
390 if (new_addr
& ~PAGE_MASK
)
393 ret
= move_vma(vma
, addr
, old_len
, new_len
, new_addr
);
396 if (ret
& ~PAGE_MASK
)
397 vm_unacct_memory(charged
);
402 asmlinkage
unsigned long sys_mremap(unsigned long addr
,
403 unsigned long old_len
, unsigned long new_len
,
404 unsigned long flags
, unsigned long new_addr
)
408 down_write(¤t
->mm
->mmap_sem
);
409 ret
= do_mremap(addr
, old_len
, new_len
, flags
, new_addr
);
410 up_write(¤t
->mm
->mmap_sem
);