4 * Explicit pagetable population and nonlinear (random) mappings support.
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
10 #include <linux/swap.h>
11 #include <linux/file.h>
12 #include <linux/mman.h>
13 #include <linux/pagemap.h>
14 #include <linux/swapops.h>
15 #include <linux/rmap.h>
16 #include <linux/module.h>
17 #include <linux/syscalls.h>
19 #include <asm/mmu_context.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
23 static int zap_pte(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
24 unsigned long addr
, pte_t
*ptep
)
27 struct page
*page
= NULL
;
29 if (pte_present(pte
)) {
30 unsigned long pfn
= pte_pfn(pte
);
31 flush_cache_page(vma
, addr
, pfn
);
32 pte
= ptep_clear_flush(vma
, addr
, ptep
);
33 if (unlikely(!pfn_valid(pfn
))) {
34 print_bad_pte(vma
, pte
, addr
);
37 page
= pfn_to_page(pfn
);
40 page_remove_rmap(page
);
41 page_cache_release(page
);
44 free_swap_and_cache(pte_to_swp_entry(pte
));
45 pte_clear(mm
, addr
, ptep
);
52 * Install a file page to a given virtual memory address, release any
53 * previously existing mapping.
55 int install_page(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
56 unsigned long addr
, struct page
*page
, pgprot_t prot
)
68 BUG_ON(vma
->vm_flags
& VM_RESERVED
);
70 pgd
= pgd_offset(mm
, addr
);
71 pud
= pud_alloc(mm
, pgd
, addr
);
74 pmd
= pmd_alloc(mm
, pud
, addr
);
77 pte
= pte_alloc_map_lock(mm
, pmd
, addr
, &ptl
);
82 * This page may have been truncated. Tell the
86 inode
= vma
->vm_file
->f_mapping
->host
;
87 size
= (i_size_read(inode
) + PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
88 if (!page
->mapping
|| page
->index
>= size
)
91 if (page_mapcount(page
) > INT_MAX
/2)
94 if (pte_none(*pte
) || !zap_pte(mm
, vma
, addr
, pte
))
95 inc_mm_counter(mm
, file_rss
);
97 flush_icache_page(vma
, page
);
98 set_pte_at(mm
, addr
, pte
, mk_pte(page
, prot
));
99 page_add_file_rmap(page
);
101 update_mmu_cache(vma
, addr
, pte_val
);
104 pte_unmap_unlock(pte
, ptl
);
108 EXPORT_SYMBOL(install_page
);
111 * Install a file pte to a given virtual memory address, release any
112 * previously existing mapping.
114 int install_file_pte(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
115 unsigned long addr
, unsigned long pgoff
, pgprot_t prot
)
125 BUG_ON(vma
->vm_flags
& VM_RESERVED
);
127 pgd
= pgd_offset(mm
, addr
);
128 pud
= pud_alloc(mm
, pgd
, addr
);
131 pmd
= pmd_alloc(mm
, pud
, addr
);
134 pte
= pte_alloc_map_lock(mm
, pmd
, addr
, &ptl
);
138 if (!pte_none(*pte
) && zap_pte(mm
, vma
, addr
, pte
)) {
139 update_hiwater_rss(mm
);
140 dec_mm_counter(mm
, file_rss
);
143 set_pte_at(mm
, addr
, pte
, pgoff_to_pte(pgoff
));
145 update_mmu_cache(vma
, addr
, pte_val
);
146 pte_unmap_unlock(pte
, ptl
);
153 * sys_remap_file_pages - remap arbitrary pages of a shared backing store
154 * file within an existing vma.
155 * @start: start of the remapped virtual memory range
156 * @size: size of the remapped virtual memory range
157 * @prot: new protection bits of the range
158 * @pgoff: to be mapped page of the backing store file
159 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
161 * this syscall works purely via pagetables, so it's the most efficient
162 * way to map the same (large) file into a given virtual window. Unlike
163 * mmap()/mremap() it does not create any new vmas. The new mappings are
164 * also safe across swapout.
166 * NOTE: the 'prot' parameter right now is ignored, and the vma's default
167 * protection is used. Arbitrary protections might be implemented in the
170 asmlinkage
long sys_remap_file_pages(unsigned long start
, unsigned long size
,
171 unsigned long __prot
, unsigned long pgoff
, unsigned long flags
)
173 struct mm_struct
*mm
= current
->mm
;
174 struct address_space
*mapping
;
175 unsigned long end
= start
+ size
;
176 struct vm_area_struct
*vma
;
178 int has_write_lock
= 0;
183 * Sanitize the syscall parameters:
185 start
= start
& PAGE_MASK
;
186 size
= size
& PAGE_MASK
;
188 /* Does the address range wrap, or is the span zero-sized? */
189 if (start
+ size
<= start
)
192 /* Can we represent this offset inside this architecture's pte's? */
193 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
194 if (pgoff
+ (size
>> PAGE_SHIFT
) >= (1UL << PTE_FILE_MAX_BITS
))
198 /* We need down_write() to change vma->vm_flags. */
199 down_read(&mm
->mmap_sem
);
201 vma
= find_vma(mm
, start
);
204 * Make sure the vma is shared, that it supports prefaulting,
205 * and that the remapped range is valid and fully within
206 * the single existing vma. vm_private_data is used as a
207 * swapout cursor in a VM_NONLINEAR vma (unless VM_RESERVED
208 * or VM_LOCKED, but VM_LOCKED could be revoked later on).
210 if (vma
&& (vma
->vm_flags
& VM_SHARED
) &&
211 (!vma
->vm_private_data
||
212 (vma
->vm_flags
& (VM_NONLINEAR
|VM_RESERVED
))) &&
213 vma
->vm_ops
&& vma
->vm_ops
->populate
&&
214 end
> start
&& start
>= vma
->vm_start
&&
215 end
<= vma
->vm_end
) {
217 /* Must set VM_NONLINEAR before any pages are populated. */
218 if (pgoff
!= linear_page_index(vma
, start
) &&
219 !(vma
->vm_flags
& VM_NONLINEAR
)) {
220 if (!has_write_lock
) {
221 up_read(&mm
->mmap_sem
);
222 down_write(&mm
->mmap_sem
);
226 mapping
= vma
->vm_file
->f_mapping
;
227 spin_lock(&mapping
->i_mmap_lock
);
228 flush_dcache_mmap_lock(mapping
);
229 vma
->vm_flags
|= VM_NONLINEAR
;
230 vma_prio_tree_remove(vma
, &mapping
->i_mmap
);
231 vma_nonlinear_insert(vma
, &mapping
->i_mmap_nonlinear
);
232 flush_dcache_mmap_unlock(mapping
);
233 spin_unlock(&mapping
->i_mmap_lock
);
236 err
= vma
->vm_ops
->populate(vma
, start
, size
,
238 pgoff
, flags
& MAP_NONBLOCK
);
241 * We can't clear VM_NONLINEAR because we'd have to do
242 * it after ->populate completes, and that would prevent
243 * downgrading the lock. (Locks can't be upgraded).
246 if (likely(!has_write_lock
))
247 up_read(&mm
->mmap_sem
);
249 up_write(&mm
->mmap_sem
);