4 * (C) Copyright 1996 Linus Torvalds
6 * Address space accounting code <alan@redhat.com>
7 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <linux/shm.h>
14 #include <linux/mman.h>
15 #include <linux/swap.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
19 #include <linux/syscalls.h>
21 #include <asm/uaccess.h>
22 #include <asm/cacheflush.h>
23 #include <asm/tlbflush.h>
25 static pte_t
*get_one_pte_map_nested(struct mm_struct
*mm
, unsigned long addr
)
32 pgd
= pgd_offset(mm
, addr
);
33 if (pgd_none_or_clear_bad(pgd
))
36 pud
= pud_offset(pgd
, addr
);
37 if (pud_none_or_clear_bad(pud
))
40 pmd
= pmd_offset(pud
, addr
);
41 if (pmd_none_or_clear_bad(pmd
))
44 pte
= pte_offset_map_nested(pmd
, addr
);
46 pte_unmap_nested(pte
);
53 static pte_t
*get_one_pte_map(struct mm_struct
*mm
, unsigned long addr
)
59 pgd
= pgd_offset(mm
, addr
);
60 if (pgd_none_or_clear_bad(pgd
))
63 pud
= pud_offset(pgd
, addr
);
64 if (pud_none_or_clear_bad(pud
))
67 pmd
= pmd_offset(pud
, addr
);
68 if (pmd_none_or_clear_bad(pmd
))
71 return pte_offset_map(pmd
, addr
);
74 static inline pte_t
*alloc_one_pte_map(struct mm_struct
*mm
, unsigned long addr
)
81 pgd
= pgd_offset(mm
, addr
);
83 pud
= pud_alloc(mm
, pgd
, addr
);
86 pmd
= pmd_alloc(mm
, pud
, addr
);
88 pte
= pte_alloc_map(mm
, pmd
, addr
);
93 move_one_page(struct vm_area_struct
*vma
, unsigned long old_addr
,
94 struct vm_area_struct
*new_vma
, unsigned long new_addr
)
96 struct address_space
*mapping
= NULL
;
97 struct mm_struct
*mm
= vma
->vm_mm
;
103 * Subtle point from Rajesh Venkatasubramanian: before
104 * moving file-based ptes, we must lock vmtruncate out,
105 * since it might clean the dst vma before the src vma,
106 * and we propagate stale pages into the dst afterward.
108 mapping
= vma
->vm_file
->f_mapping
;
109 spin_lock(&mapping
->i_mmap_lock
);
110 if (new_vma
->vm_truncate_count
&&
111 new_vma
->vm_truncate_count
!= vma
->vm_truncate_count
)
112 new_vma
->vm_truncate_count
= 0;
114 spin_lock(&mm
->page_table_lock
);
116 src
= get_one_pte_map_nested(mm
, old_addr
);
119 * Look to see whether alloc_one_pte_map needs to perform a
120 * memory allocation. If it does then we need to drop the
123 dst
= get_one_pte_map(mm
, new_addr
);
124 if (unlikely(!dst
)) {
125 pte_unmap_nested(src
);
127 spin_unlock(&mapping
->i_mmap_lock
);
128 dst
= alloc_one_pte_map(mm
, new_addr
);
129 if (mapping
&& !spin_trylock(&mapping
->i_mmap_lock
)) {
130 spin_unlock(&mm
->page_table_lock
);
131 spin_lock(&mapping
->i_mmap_lock
);
132 spin_lock(&mm
->page_table_lock
);
134 src
= get_one_pte_map_nested(mm
, old_addr
);
137 * Since alloc_one_pte_map can drop and re-acquire
138 * page_table_lock, we should re-check the src entry...
143 pte
= ptep_clear_flush(vma
, old_addr
, src
);
144 /* ZERO_PAGE can be dependant on virtual addr */
145 if (pfn_valid(pte_pfn(pte
)) &&
146 pte_page(pte
) == ZERO_PAGE(old_addr
))
147 pte
= pte_wrprotect(mk_pte(ZERO_PAGE(new_addr
), new_vma
->vm_page_prot
));
148 set_pte_at(mm
, new_addr
, dst
, pte
);
151 pte_unmap_nested(src
);
156 spin_unlock(&mm
->page_table_lock
);
158 spin_unlock(&mapping
->i_mmap_lock
);
162 static unsigned long move_page_tables(struct vm_area_struct
*vma
,
163 unsigned long old_addr
, struct vm_area_struct
*new_vma
,
164 unsigned long new_addr
, unsigned long len
)
166 unsigned long offset
;
168 flush_cache_range(vma
, old_addr
, old_addr
+ len
);
171 * This is not the clever way to do this, but we're taking the
172 * easy way out on the assumption that most remappings will be
173 * only a few pages.. This also makes error recovery easier.
175 for (offset
= 0; offset
< len
; offset
+= PAGE_SIZE
) {
176 if (move_one_page(vma
, old_addr
+ offset
,
177 new_vma
, new_addr
+ offset
) < 0)
184 static unsigned long move_vma(struct vm_area_struct
*vma
,
185 unsigned long old_addr
, unsigned long old_len
,
186 unsigned long new_len
, unsigned long new_addr
)
188 struct mm_struct
*mm
= vma
->vm_mm
;
189 struct vm_area_struct
*new_vma
;
190 unsigned long vm_flags
= vma
->vm_flags
;
191 unsigned long new_pgoff
;
192 unsigned long moved_len
;
193 unsigned long excess
= 0;
197 * We'd prefer to avoid failure later on in do_munmap:
198 * which may split one vma into three before unmapping.
200 if (mm
->map_count
>= sysctl_max_map_count
- 3)
203 new_pgoff
= vma
->vm_pgoff
+ ((old_addr
- vma
->vm_start
) >> PAGE_SHIFT
);
204 new_vma
= copy_vma(&vma
, new_addr
, new_len
, new_pgoff
);
208 moved_len
= move_page_tables(vma
, old_addr
, new_vma
, new_addr
, old_len
);
209 if (moved_len
< old_len
) {
211 * On error, move entries back from new area to old,
212 * which will succeed since page tables still there,
213 * and then proceed to unmap new area instead of old.
215 move_page_tables(new_vma
, new_addr
, vma
, old_addr
, moved_len
);
222 /* Conceal VM_ACCOUNT so old reservation is not undone */
223 if (vm_flags
& VM_ACCOUNT
) {
224 vma
->vm_flags
&= ~VM_ACCOUNT
;
225 excess
= vma
->vm_end
- vma
->vm_start
- old_len
;
226 if (old_addr
> vma
->vm_start
&&
227 old_addr
+ old_len
< vma
->vm_end
)
232 * if we failed to move page tables we still do total_vm increment
233 * since do_munmap() will decrement it by old_len == new_len
235 mm
->total_vm
+= new_len
>> PAGE_SHIFT
;
236 __vm_stat_account(mm
, vma
->vm_flags
, vma
->vm_file
, new_len
>>PAGE_SHIFT
);
238 if (do_munmap(mm
, old_addr
, old_len
) < 0) {
239 /* OOM: unable to split vma, just get accounts right */
240 vm_unacct_memory(excess
>> PAGE_SHIFT
);
244 /* Restore VM_ACCOUNT if one or two pieces of vma left */
246 vma
->vm_flags
|= VM_ACCOUNT
;
248 vma
->vm_next
->vm_flags
|= VM_ACCOUNT
;
251 if (vm_flags
& VM_LOCKED
) {
252 mm
->locked_vm
+= new_len
>> PAGE_SHIFT
;
253 if (new_len
> old_len
)
254 make_pages_present(new_addr
+ old_len
,
262 * Expand (or shrink) an existing mapping, potentially moving it at the
263 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
265 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
266 * This option implies MREMAP_MAYMOVE.
268 unsigned long do_mremap(unsigned long addr
,
269 unsigned long old_len
, unsigned long new_len
,
270 unsigned long flags
, unsigned long new_addr
)
272 struct vm_area_struct
*vma
;
273 unsigned long ret
= -EINVAL
;
274 unsigned long charged
= 0;
276 if (flags
& ~(MREMAP_FIXED
| MREMAP_MAYMOVE
))
279 if (addr
& ~PAGE_MASK
)
282 old_len
= PAGE_ALIGN(old_len
);
283 new_len
= PAGE_ALIGN(new_len
);
286 * We allow a zero old-len as a special case
287 * for DOS-emu "duplicate shm area" thing. But
288 * a zero new-len is nonsensical.
293 /* new_addr is only valid if MREMAP_FIXED is specified */
294 if (flags
& MREMAP_FIXED
) {
295 if (new_addr
& ~PAGE_MASK
)
297 if (!(flags
& MREMAP_MAYMOVE
))
300 if (new_len
> TASK_SIZE
|| new_addr
> TASK_SIZE
- new_len
)
303 /* Check if the location we're moving into overlaps the
304 * old location at all, and fail if it does.
306 if ((new_addr
<= addr
) && (new_addr
+new_len
) > addr
)
309 if ((addr
<= new_addr
) && (addr
+old_len
) > new_addr
)
312 ret
= do_munmap(current
->mm
, new_addr
, new_len
);
318 * Always allow a shrinking remap: that just unmaps
319 * the unnecessary pages..
320 * do_munmap does all the needed commit accounting
322 if (old_len
>= new_len
) {
323 ret
= do_munmap(current
->mm
, addr
+new_len
, old_len
- new_len
);
324 if (ret
&& old_len
!= new_len
)
327 if (!(flags
& MREMAP_FIXED
) || (new_addr
== addr
))
333 * Ok, we need to grow.. or relocate.
336 vma
= find_vma(current
->mm
, addr
);
337 if (!vma
|| vma
->vm_start
> addr
)
339 if (is_vm_hugetlb_page(vma
)) {
343 /* We can't remap across vm area boundaries */
344 if (old_len
> vma
->vm_end
- addr
)
346 if (vma
->vm_flags
& VM_DONTEXPAND
) {
347 if (new_len
> old_len
)
350 if (vma
->vm_flags
& VM_LOCKED
) {
351 unsigned long locked
, lock_limit
;
352 locked
= current
->mm
->locked_vm
<< PAGE_SHIFT
;
353 lock_limit
= current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
354 locked
+= new_len
- old_len
;
356 if (locked
> lock_limit
&& !capable(CAP_IPC_LOCK
))
359 if (!may_expand_vm(current
->mm
, (new_len
- old_len
) >> PAGE_SHIFT
)) {
364 if (vma
->vm_flags
& VM_ACCOUNT
) {
365 charged
= (new_len
- old_len
) >> PAGE_SHIFT
;
366 if (security_vm_enough_memory(charged
))
370 /* old_len exactly to the end of the area..
371 * And we're not relocating the area.
373 if (old_len
== vma
->vm_end
- addr
&&
374 !((flags
& MREMAP_FIXED
) && (addr
!= new_addr
)) &&
375 (old_len
!= new_len
|| !(flags
& MREMAP_MAYMOVE
))) {
376 unsigned long max_addr
= TASK_SIZE
;
378 max_addr
= vma
->vm_next
->vm_start
;
379 /* can we just expand the current mapping? */
380 if (max_addr
- addr
>= new_len
) {
381 int pages
= (new_len
- old_len
) >> PAGE_SHIFT
;
383 vma_adjust(vma
, vma
->vm_start
,
384 addr
+ new_len
, vma
->vm_pgoff
, NULL
);
386 current
->mm
->total_vm
+= pages
;
387 __vm_stat_account(vma
->vm_mm
, vma
->vm_flags
,
388 vma
->vm_file
, pages
);
389 if (vma
->vm_flags
& VM_LOCKED
) {
390 current
->mm
->locked_vm
+= pages
;
391 make_pages_present(addr
+ old_len
,
400 * We weren't able to just expand or shrink the area,
401 * we need to create a new one and move it..
404 if (flags
& MREMAP_MAYMOVE
) {
405 if (!(flags
& MREMAP_FIXED
)) {
406 unsigned long map_flags
= 0;
407 if (vma
->vm_flags
& VM_MAYSHARE
)
408 map_flags
|= MAP_SHARED
;
410 new_addr
= get_unmapped_area(vma
->vm_file
, 0, new_len
,
411 vma
->vm_pgoff
, map_flags
);
413 if (new_addr
& ~PAGE_MASK
)
416 ret
= move_vma(vma
, addr
, old_len
, new_len
, new_addr
);
419 if (ret
& ~PAGE_MASK
)
420 vm_unacct_memory(charged
);
425 asmlinkage
unsigned long sys_mremap(unsigned long addr
,
426 unsigned long old_len
, unsigned long new_len
,
427 unsigned long flags
, unsigned long new_addr
)
431 down_write(¤t
->mm
->mmap_sem
);
432 ret
= do_mremap(addr
, old_len
, new_len
, flags
, new_addr
);
433 up_write(¤t
->mm
->mmap_sem
);