MAINTAINERS: update mfd F: patterns
[pohmelfs.git] / mm / fremap.c
blob9ed4fd432467ee45a5310cd152fbba4399cbf722
1 /*
2 * linux/mm/fremap.c
3 *
4 * Explicit pagetable population and nonlinear (random) mappings support.
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
7 */
8 #include <linux/backing-dev.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/file.h>
12 #include <linux/mman.h>
13 #include <linux/pagemap.h>
14 #include <linux/swapops.h>
15 #include <linux/rmap.h>
16 #include <linux/syscalls.h>
17 #include <linux/mmu_notifier.h>
19 #include <asm/mmu_context.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
23 #include "internal.h"
25 static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
26 unsigned long addr, pte_t *ptep)
28 pte_t pte = *ptep;
30 if (pte_present(pte)) {
31 struct page *page;
33 flush_cache_page(vma, addr, pte_pfn(pte));
34 pte = ptep_clear_flush(vma, addr, ptep);
35 page = vm_normal_page(vma, addr, pte);
36 if (page) {
37 if (pte_dirty(pte))
38 set_page_dirty(page);
39 page_remove_rmap(page);
40 page_cache_release(page);
41 update_hiwater_rss(mm);
42 dec_mm_counter(mm, MM_FILEPAGES);
44 } else {
45 if (!pte_file(pte))
46 free_swap_and_cache(pte_to_swp_entry(pte));
47 pte_clear_not_present_full(mm, addr, ptep, 0);
52 * Install a file pte to a given virtual memory address, release any
53 * previously existing mapping.
55 static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
56 unsigned long addr, unsigned long pgoff, pgprot_t prot)
58 int err = -ENOMEM;
59 pte_t *pte;
60 spinlock_t *ptl;
62 pte = get_locked_pte(mm, addr, &ptl);
63 if (!pte)
64 goto out;
66 if (!pte_none(*pte))
67 zap_pte(mm, vma, addr, pte);
69 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
71 * We don't need to run update_mmu_cache() here because the "file pte"
72 * being installed by install_file_pte() is not a real pte - it's a
73 * non-present entry (like a swap entry), noting what file offset should
74 * be mapped there when there's a fault (in a non-linear vma where
75 * that's not obvious).
77 pte_unmap_unlock(pte, ptl);
78 err = 0;
79 out:
80 return err;
83 static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma,
84 unsigned long addr, unsigned long size, pgoff_t pgoff)
86 int err;
88 do {
89 err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
90 if (err)
91 return err;
93 size -= PAGE_SIZE;
94 addr += PAGE_SIZE;
95 pgoff++;
96 } while (size);
98 return 0;
103 * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
104 * @start: start of the remapped virtual memory range
105 * @size: size of the remapped virtual memory range
106 * @prot: new protection bits of the range (see NOTE)
107 * @pgoff: to-be-mapped page of the backing store file
108 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
110 * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
111 * (shared backing store file).
113 * This syscall works purely via pagetables, so it's the most efficient
114 * way to map the same (large) file into a given virtual window. Unlike
115 * mmap()/mremap() it does not create any new vmas. The new mappings are
116 * also safe across swapout.
118 * NOTE: the @prot parameter right now is ignored (but must be zero),
119 * and the vma's default protection is used. Arbitrary protections
120 * might be implemented in the future.
122 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
123 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
125 struct mm_struct *mm = current->mm;
126 struct address_space *mapping;
127 struct vm_area_struct *vma;
128 int err = -EINVAL;
129 int has_write_lock = 0;
131 if (prot)
132 return err;
134 * Sanitize the syscall parameters:
136 start = start & PAGE_MASK;
137 size = size & PAGE_MASK;
139 /* Does the address range wrap, or is the span zero-sized? */
140 if (start + size <= start)
141 return err;
143 /* Does pgoff wrap? */
144 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
145 return err;
147 /* Can we represent this offset inside this architecture's pte's? */
148 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
149 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
150 return err;
151 #endif
153 /* We need down_write() to change vma->vm_flags. */
154 down_read(&mm->mmap_sem);
155 retry:
156 vma = find_vma(mm, start);
159 * Make sure the vma is shared, that it supports prefaulting,
160 * and that the remapped range is valid and fully within
161 * the single existing vma. vm_private_data is used as a
162 * swapout cursor in a VM_NONLINEAR vma.
164 if (!vma || !(vma->vm_flags & VM_SHARED))
165 goto out;
167 if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR))
168 goto out;
170 if (!(vma->vm_flags & VM_CAN_NONLINEAR))
171 goto out;
173 if (start < vma->vm_start || start + size > vma->vm_end)
174 goto out;
176 /* Must set VM_NONLINEAR before any pages are populated. */
177 if (!(vma->vm_flags & VM_NONLINEAR)) {
178 /* Don't need a nonlinear mapping, exit success */
179 if (pgoff == linear_page_index(vma, start)) {
180 err = 0;
181 goto out;
184 if (!has_write_lock) {
185 up_read(&mm->mmap_sem);
186 down_write(&mm->mmap_sem);
187 has_write_lock = 1;
188 goto retry;
190 mapping = vma->vm_file->f_mapping;
192 * page_mkclean doesn't work on nonlinear vmas, so if
193 * dirty pages need to be accounted, emulate with linear
194 * vmas.
196 if (mapping_cap_account_dirty(mapping)) {
197 unsigned long addr;
198 struct file *file = vma->vm_file;
200 flags &= MAP_NONBLOCK;
201 get_file(file);
202 addr = mmap_region(file, start, size,
203 flags, vma->vm_flags, pgoff);
204 fput(file);
205 if (IS_ERR_VALUE(addr)) {
206 err = addr;
207 } else {
208 BUG_ON(addr != start);
209 err = 0;
211 goto out;
213 mutex_lock(&mapping->i_mmap_mutex);
214 flush_dcache_mmap_lock(mapping);
215 vma->vm_flags |= VM_NONLINEAR;
216 vma_prio_tree_remove(vma, &mapping->i_mmap);
217 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
218 flush_dcache_mmap_unlock(mapping);
219 mutex_unlock(&mapping->i_mmap_mutex);
222 if (vma->vm_flags & VM_LOCKED) {
224 * drop PG_Mlocked flag for over-mapped range
226 vm_flags_t saved_flags = vma->vm_flags;
227 munlock_vma_pages_range(vma, start, start + size);
228 vma->vm_flags = saved_flags;
231 mmu_notifier_invalidate_range_start(mm, start, start + size);
232 err = populate_range(mm, vma, start, size, pgoff);
233 mmu_notifier_invalidate_range_end(mm, start, start + size);
234 if (!err && !(flags & MAP_NONBLOCK)) {
235 if (vma->vm_flags & VM_LOCKED) {
237 * might be mapping previously unmapped range of file
239 mlock_vma_pages_range(vma, start, start + size);
240 } else {
241 if (unlikely(has_write_lock)) {
242 downgrade_write(&mm->mmap_sem);
243 has_write_lock = 0;
245 make_pages_present(start, start+size);
250 * We can't clear VM_NONLINEAR because we'd have to do
251 * it after ->populate completes, and that would prevent
252 * downgrading the lock. (Locks can't be upgraded).
255 out:
256 if (likely(!has_write_lock))
257 up_read(&mm->mmap_sem);
258 else
259 up_write(&mm->mmap_sem);
261 return err;