4 * Explicit pagetable population and nonlinear (random) mappings support.
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
8 #include <linux/backing-dev.h>
10 #include <linux/swap.h>
11 #include <linux/file.h>
12 #include <linux/mman.h>
13 #include <linux/pagemap.h>
14 #include <linux/swapops.h>
15 #include <linux/rmap.h>
16 #include <linux/module.h>
17 #include <linux/syscalls.h>
18 #include <linux/mmu_notifier.h>
20 #include <asm/mmu_context.h>
21 #include <asm/cacheflush.h>
22 #include <asm/tlbflush.h>
24 static void zap_pte(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
25 unsigned long addr
, pte_t
*ptep
)
29 if (pte_present(pte
)) {
32 flush_cache_page(vma
, addr
, pte_pfn(pte
));
33 pte
= ptep_clear_flush(vma
, addr
, ptep
);
34 page
= vm_normal_page(vma
, addr
, pte
);
38 page_remove_rmap(page
, vma
);
39 page_cache_release(page
);
40 update_hiwater_rss(mm
);
41 dec_mm_counter(mm
, file_rss
);
45 free_swap_and_cache(pte_to_swp_entry(pte
));
46 pte_clear_not_present_full(mm
, addr
, ptep
, 0);
51 * Install a file pte to a given virtual memory address, release any
52 * previously existing mapping.
54 static int install_file_pte(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
55 unsigned long addr
, unsigned long pgoff
, pgprot_t prot
)
61 pte
= get_locked_pte(mm
, addr
, &ptl
);
66 zap_pte(mm
, vma
, addr
, pte
);
68 set_pte_at(mm
, addr
, pte
, pgoff_to_pte(pgoff
));
70 * We don't need to run update_mmu_cache() here because the "file pte"
71 * being installed by install_file_pte() is not a real pte - it's a
72 * non-present entry (like a swap entry), noting what file offset should
73 * be mapped there when there's a fault (in a non-linear vma where
74 * that's not obvious).
76 pte_unmap_unlock(pte
, ptl
);
82 static int populate_range(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
83 unsigned long addr
, unsigned long size
, pgoff_t pgoff
)
88 err
= install_file_pte(mm
, vma
, addr
, pgoff
, vma
->vm_page_prot
);
102 * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
103 * @start: start of the remapped virtual memory range
104 * @size: size of the remapped virtual memory range
105 * @prot: new protection bits of the range (see NOTE)
106 * @pgoff: to-be-mapped page of the backing store file
107 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
109 * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
110 * (shared backing store file).
112 * This syscall works purely via pagetables, so it's the most efficient
113 * way to map the same (large) file into a given virtual window. Unlike
114 * mmap()/mremap() it does not create any new vmas. The new mappings are
115 * also safe across swapout.
117 * NOTE: the @prot parameter right now is ignored (but must be zero),
118 * and the vma's default protection is used. Arbitrary protections
119 * might be implemented in the future.
121 asmlinkage
long sys_remap_file_pages(unsigned long start
, unsigned long size
,
122 unsigned long prot
, unsigned long pgoff
, unsigned long flags
)
124 struct mm_struct
*mm
= current
->mm
;
125 struct address_space
*mapping
;
126 unsigned long end
= start
+ size
;
127 struct vm_area_struct
*vma
;
129 int has_write_lock
= 0;
134 * Sanitize the syscall parameters:
136 start
= start
& PAGE_MASK
;
137 size
= size
& PAGE_MASK
;
139 /* Does the address range wrap, or is the span zero-sized? */
140 if (start
+ size
<= start
)
143 /* Can we represent this offset inside this architecture's pte's? */
144 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
145 if (pgoff
+ (size
>> PAGE_SHIFT
) >= (1UL << PTE_FILE_MAX_BITS
))
149 /* We need down_write() to change vma->vm_flags. */
150 down_read(&mm
->mmap_sem
);
152 vma
= find_vma(mm
, start
);
155 * Make sure the vma is shared, that it supports prefaulting,
156 * and that the remapped range is valid and fully within
157 * the single existing vma. vm_private_data is used as a
158 * swapout cursor in a VM_NONLINEAR vma.
160 if (!vma
|| !(vma
->vm_flags
& VM_SHARED
))
163 if (vma
->vm_private_data
&& !(vma
->vm_flags
& VM_NONLINEAR
))
166 if (!(vma
->vm_flags
& VM_CAN_NONLINEAR
))
169 if (end
<= start
|| start
< vma
->vm_start
|| end
> vma
->vm_end
)
172 /* Must set VM_NONLINEAR before any pages are populated. */
173 if (!(vma
->vm_flags
& VM_NONLINEAR
)) {
174 /* Don't need a nonlinear mapping, exit success */
175 if (pgoff
== linear_page_index(vma
, start
)) {
180 if (!has_write_lock
) {
181 up_read(&mm
->mmap_sem
);
182 down_write(&mm
->mmap_sem
);
186 mapping
= vma
->vm_file
->f_mapping
;
188 * page_mkclean doesn't work on nonlinear vmas, so if
189 * dirty pages need to be accounted, emulate with linear
192 if (mapping_cap_account_dirty(mapping
)) {
194 struct file
*file
= vma
->vm_file
;
196 flags
&= MAP_NONBLOCK
;
198 addr
= mmap_region(file
, start
, size
,
199 flags
, vma
->vm_flags
, pgoff
, 1);
201 if (IS_ERR_VALUE(addr
)) {
204 BUG_ON(addr
!= start
);
209 spin_lock(&mapping
->i_mmap_lock
);
210 flush_dcache_mmap_lock(mapping
);
211 vma
->vm_flags
|= VM_NONLINEAR
;
212 vma_prio_tree_remove(vma
, &mapping
->i_mmap
);
213 vma_nonlinear_insert(vma
, &mapping
->i_mmap_nonlinear
);
214 flush_dcache_mmap_unlock(mapping
);
215 spin_unlock(&mapping
->i_mmap_lock
);
218 mmu_notifier_invalidate_range_start(mm
, start
, start
+ size
);
219 err
= populate_range(mm
, vma
, start
, size
, pgoff
);
220 mmu_notifier_invalidate_range_end(mm
, start
, start
+ size
);
221 if (!err
&& !(flags
& MAP_NONBLOCK
)) {
222 if (unlikely(has_write_lock
)) {
223 downgrade_write(&mm
->mmap_sem
);
226 make_pages_present(start
, start
+size
);
230 * We can't clear VM_NONLINEAR because we'd have to do
231 * it after ->populate completes, and that would prevent
232 * downgrading the lock. (Locks can't be upgraded).
236 if (likely(!has_write_lock
))
237 up_read(&mm
->mmap_sem
);
239 up_write(&mm
->mmap_sem
);