[PATCH] fix make rpm versioning
[linux-2.6/history.git] / mm / fremap.c
blob0f0db6a2cdf887c85d56cd42964b94de786d3b33
1 /*
2 * linux/mm/fremap.c
3 *
4 * Explicit pagetable population and nonlinear (random) mappings support.
6 * started by Ingo Molnar, Copyright (C) 2002
7 */
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/file.h>
12 #include <linux/mman.h>
13 #include <linux/pagemap.h>
14 #include <linux/swapops.h>
15 #include <linux/rmap-locking.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
20 static inline int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
21 unsigned long addr, pte_t *ptep)
23 pte_t pte = *ptep;
25 if (pte_none(pte))
26 return 0;
27 if (pte_present(pte)) {
28 unsigned long pfn = pte_pfn(pte);
30 flush_cache_page(vma, addr);
31 pte = ptep_get_and_clear(ptep);
32 if (pfn_valid(pfn)) {
33 struct page *page = pfn_to_page(pfn);
34 if (!PageReserved(page)) {
35 if (pte_dirty(pte))
36 set_page_dirty(page);
37 page_remove_rmap(page, ptep);
38 page_cache_release(page);
39 mm->rss--;
42 return 1;
43 } else {
44 if (!pte_file(pte))
45 free_swap_and_cache(pte_to_swp_entry(pte));
46 pte_clear(ptep);
47 return 0;
52 * Install a page to a given virtual memory address, release any
53 * previously existing mapping.
55 int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
56 unsigned long addr, struct page *page, pgprot_t prot)
58 int err = -ENOMEM, flush;
59 pte_t *pte;
60 pgd_t *pgd;
61 pmd_t *pmd;
62 struct pte_chain *pte_chain;
64 pte_chain = pte_chain_alloc(GFP_KERNEL);
65 if (!pte_chain)
66 goto err;
67 pgd = pgd_offset(mm, addr);
68 spin_lock(&mm->page_table_lock);
70 pmd = pmd_alloc(mm, pgd, addr);
71 if (!pmd)
72 goto err_unlock;
74 pte = pte_alloc_map(mm, pmd, addr);
75 if (!pte)
76 goto err_unlock;
78 flush = zap_pte(mm, vma, addr, pte);
80 mm->rss++;
81 flush_icache_page(vma, page);
82 set_pte(pte, mk_pte(page, prot));
83 pte_chain = page_add_rmap(page, pte, pte_chain);
84 pte_unmap(pte);
85 if (flush)
86 flush_tlb_page(vma, addr);
87 update_mmu_cache(vma, addr, *pte);
88 spin_unlock(&mm->page_table_lock);
89 pte_chain_free(pte_chain);
90 return 0;
92 err_unlock:
93 spin_unlock(&mm->page_table_lock);
94 pte_chain_free(pte_chain);
95 err:
96 return err;
99 /***
100 * sys_remap_file_pages - remap arbitrary pages of a shared backing store
101 * file within an existing vma.
102 * @start: start of the remapped virtual memory range
103 * @size: size of the remapped virtual memory range
104 * @prot: new protection bits of the range
105 * @pgoff: to be mapped page of the backing store file
106 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
108 * this syscall works purely via pagetables, so it's the most efficient
109 * way to map the same (large) file into a given virtual window. Unlike
110 * mmap()/mremap() it does not create any new vmas. The new mappings are
111 * also safe across swapout.
113 * NOTE: the 'prot' parameter right now is ignored, and the vma's default
114 * protection is used. Arbitrary protections might be implemented in the
115 * future.
117 long sys_remap_file_pages(unsigned long start, unsigned long size,
118 unsigned long __prot, unsigned long pgoff, unsigned long flags)
120 struct mm_struct *mm = current->mm;
121 unsigned long end = start + size;
122 struct vm_area_struct *vma;
123 int err = -EINVAL;
125 if (__prot)
126 return err;
128 * Sanitize the syscall parameters:
130 start = start & PAGE_MASK;
131 size = size & PAGE_MASK;
133 /* Does the address range wrap, or is the span zero-sized? */
134 if (start + size <= start)
135 return err;
137 /* Can we represent this offset inside this architecture's pte's? */
138 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
139 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
140 return err;
141 #endif
143 down_read(&mm->mmap_sem);
145 vma = find_vma(mm, start);
147 * Make sure the vma is shared, that it supports prefaulting,
148 * and that the remapped range is valid and fully within
149 * the single existing vma:
151 if (vma && (vma->vm_flags & VM_SHARED) &&
152 vma->vm_ops && vma->vm_ops->populate &&
153 end > start && start >= vma->vm_start &&
154 end <= vma->vm_end)
155 err = vma->vm_ops->populate(vma, start, size, vma->vm_page_prot,
156 pgoff, flags & MAP_NONBLOCK);
158 up_read(&mm->mmap_sem);
160 return err;