4 * Explicit pagetable population and nonlinear (random) mappings support.
6 * started by Ingo Molnar, Copyright (C) 2002
10 #include <linux/swap.h>
11 #include <linux/file.h>
12 #include <linux/mman.h>
13 #include <linux/pagemap.h>
14 #include <linux/swapops.h>
15 #include <linux/rmap-locking.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
20 static inline int zap_pte(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
21 unsigned long addr
, pte_t
*ptep
)
27 if (pte_present(pte
)) {
28 unsigned long pfn
= pte_pfn(pte
);
30 flush_cache_page(vma
, addr
);
31 pte
= ptep_get_and_clear(ptep
);
33 struct page
*page
= pfn_to_page(pfn
);
34 if (!PageReserved(page
)) {
37 page_remove_rmap(page
, ptep
);
38 page_cache_release(page
);
45 free_swap_and_cache(pte_to_swp_entry(pte
));
52 * Install a page to a given virtual memory address, release any
53 * previously existing mapping.
55 int install_page(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
56 unsigned long addr
, struct page
*page
, pgprot_t prot
)
58 int err
= -ENOMEM
, flush
;
62 struct pte_chain
*pte_chain
;
64 pte_chain
= pte_chain_alloc(GFP_KERNEL
);
67 pgd
= pgd_offset(mm
, addr
);
68 spin_lock(&mm
->page_table_lock
);
70 pmd
= pmd_alloc(mm
, pgd
, addr
);
74 pte
= pte_alloc_map(mm
, pmd
, addr
);
78 flush
= zap_pte(mm
, vma
, addr
, pte
);
81 flush_icache_page(vma
, page
);
82 set_pte(pte
, mk_pte(page
, prot
));
83 pte_chain
= page_add_rmap(page
, pte
, pte_chain
);
86 flush_tlb_page(vma
, addr
);
87 update_mmu_cache(vma
, addr
, *pte
);
88 spin_unlock(&mm
->page_table_lock
);
89 pte_chain_free(pte_chain
);
93 spin_unlock(&mm
->page_table_lock
);
94 pte_chain_free(pte_chain
);
100 * sys_remap_file_pages - remap arbitrary pages of a shared backing store
101 * file within an existing vma.
102 * @start: start of the remapped virtual memory range
103 * @size: size of the remapped virtual memory range
104 * @prot: new protection bits of the range
105 * @pgoff: to be mapped page of the backing store file
106 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
108 * this syscall works purely via pagetables, so it's the most efficient
109 * way to map the same (large) file into a given virtual window. Unlike
110 * mmap()/mremap() it does not create any new vmas. The new mappings are
111 * also safe across swapout.
113 * NOTE: the 'prot' parameter right now is ignored, and the vma's default
114 * protection is used. Arbitrary protections might be implemented in the
117 long sys_remap_file_pages(unsigned long start
, unsigned long size
,
118 unsigned long __prot
, unsigned long pgoff
, unsigned long flags
)
120 struct mm_struct
*mm
= current
->mm
;
121 unsigned long end
= start
+ size
;
122 struct vm_area_struct
*vma
;
128 * Sanitize the syscall parameters:
130 start
= start
& PAGE_MASK
;
131 size
= size
& PAGE_MASK
;
133 /* Does the address range wrap, or is the span zero-sized? */
134 if (start
+ size
<= start
)
137 /* Can we represent this offset inside this architecture's pte's? */
138 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
139 if (pgoff
+ (size
>> PAGE_SHIFT
) >= (1UL << PTE_FILE_MAX_BITS
))
143 down_read(&mm
->mmap_sem
);
145 vma
= find_vma(mm
, start
);
147 * Make sure the vma is shared, that it supports prefaulting,
148 * and that the remapped range is valid and fully within
149 * the single existing vma:
151 if (vma
&& (vma
->vm_flags
& VM_SHARED
) &&
152 vma
->vm_ops
&& vma
->vm_ops
->populate
&&
153 end
> start
&& start
>= vma
->vm_start
&&
155 err
= vma
->vm_ops
->populate(vma
, start
, size
, vma
->vm_page_prot
,
156 pgoff
, flags
& MAP_NONBLOCK
);
158 up_read(&mm
->mmap_sem
);