[PATCH] ptrace/coredump/exit_group deadlock
[linux-2.6/openmoko-kernel.git] / mm / msync.c
blob0e040e9c39d835a7cf938468be6a9b7bccb95c2d
1 /*
2 * linux/mm/msync.c
4 * Copyright (C) 1994-1999 Linus Torvalds
5 */
7 /*
8 * The msync() system call.
9 */
10 #include <linux/slab.h>
11 #include <linux/pagemap.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/hugetlb.h>
15 #include <linux/syscalls.h>
17 #include <asm/pgtable.h>
18 #include <asm/tlbflush.h>
20 static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
21 unsigned long addr, unsigned long end)
23 pte_t *pte;
24 spinlock_t *ptl;
25 int progress = 0;
27 again:
28 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
29 do {
30 unsigned long pfn;
31 struct page *page;
33 if (progress >= 64) {
34 progress = 0;
35 if (need_resched() || need_lockbreak(ptl))
36 break;
38 progress++;
39 if (!pte_present(*pte))
40 continue;
41 if (!pte_maybe_dirty(*pte))
42 continue;
43 pfn = pte_pfn(*pte);
44 if (unlikely(!pfn_valid(pfn))) {
45 print_bad_pte(vma, *pte, addr);
46 continue;
48 page = pfn_to_page(pfn);
50 if (ptep_clear_flush_dirty(vma, addr, pte) ||
51 page_test_and_clear_dirty(page))
52 set_page_dirty(page);
53 progress += 3;
54 } while (pte++, addr += PAGE_SIZE, addr != end);
55 pte_unmap_unlock(pte - 1, ptl);
56 cond_resched();
57 if (addr != end)
58 goto again;
61 static inline void msync_pmd_range(struct vm_area_struct *vma, pud_t *pud,
62 unsigned long addr, unsigned long end)
64 pmd_t *pmd;
65 unsigned long next;
67 pmd = pmd_offset(pud, addr);
68 do {
69 next = pmd_addr_end(addr, end);
70 if (pmd_none_or_clear_bad(pmd))
71 continue;
72 msync_pte_range(vma, pmd, addr, next);
73 } while (pmd++, addr = next, addr != end);
76 static inline void msync_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
77 unsigned long addr, unsigned long end)
79 pud_t *pud;
80 unsigned long next;
82 pud = pud_offset(pgd, addr);
83 do {
84 next = pud_addr_end(addr, end);
85 if (pud_none_or_clear_bad(pud))
86 continue;
87 msync_pmd_range(vma, pud, addr, next);
88 } while (pud++, addr = next, addr != end);
91 static void msync_page_range(struct vm_area_struct *vma,
92 unsigned long addr, unsigned long end)
94 pgd_t *pgd;
95 unsigned long next;
97 /* For hugepages we can't go walking the page table normally,
98 * but that's ok, hugetlbfs is memory based, so we don't need
99 * to do anything more on an msync().
100 * Can't do anything with VM_RESERVED regions either.
102 if (vma->vm_flags & (VM_HUGETLB|VM_RESERVED))
103 return;
105 BUG_ON(addr >= end);
106 pgd = pgd_offset(vma->vm_mm, addr);
107 flush_cache_range(vma, addr, end);
108 do {
109 next = pgd_addr_end(addr, end);
110 if (pgd_none_or_clear_bad(pgd))
111 continue;
112 msync_pud_range(vma, pgd, addr, next);
113 } while (pgd++, addr = next, addr != end);
117 * MS_SYNC syncs the entire file - including mappings.
119 * MS_ASYNC does not start I/O (it used to, up to 2.5.67). Instead, it just
120 * marks the relevant pages dirty. The application may now run fsync() to
121 * write out the dirty pages and wait on the writeout and check the result.
122 * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
123 * async writeout immediately.
124 * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
125 * applications.
127 static int msync_interval(struct vm_area_struct *vma,
128 unsigned long addr, unsigned long end, int flags)
130 int ret = 0;
131 struct file *file = vma->vm_file;
133 if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED))
134 return -EBUSY;
136 if (file && (vma->vm_flags & VM_SHARED)) {
137 msync_page_range(vma, addr, end);
139 if (flags & MS_SYNC) {
140 struct address_space *mapping = file->f_mapping;
141 int err;
143 ret = filemap_fdatawrite(mapping);
144 if (file->f_op && file->f_op->fsync) {
146 * We don't take i_sem here because mmap_sem
147 * is already held.
149 err = file->f_op->fsync(file,file->f_dentry,1);
150 if (err && !ret)
151 ret = err;
153 err = filemap_fdatawait(mapping);
154 if (!ret)
155 ret = err;
158 return ret;
161 asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
163 unsigned long end;
164 struct vm_area_struct *vma;
165 int unmapped_error, error = -EINVAL;
167 if (flags & MS_SYNC)
168 current->flags |= PF_SYNCWRITE;
170 down_read(&current->mm->mmap_sem);
171 if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
172 goto out;
173 if (start & ~PAGE_MASK)
174 goto out;
175 if ((flags & MS_ASYNC) && (flags & MS_SYNC))
176 goto out;
177 error = -ENOMEM;
178 len = (len + ~PAGE_MASK) & PAGE_MASK;
179 end = start + len;
180 if (end < start)
181 goto out;
182 error = 0;
183 if (end == start)
184 goto out;
186 * If the interval [start,end) covers some unmapped address ranges,
187 * just ignore them, but return -ENOMEM at the end.
189 vma = find_vma(current->mm, start);
190 unmapped_error = 0;
191 for (;;) {
192 /* Still start < end. */
193 error = -ENOMEM;
194 if (!vma)
195 goto out;
196 /* Here start < vma->vm_end. */
197 if (start < vma->vm_start) {
198 unmapped_error = -ENOMEM;
199 start = vma->vm_start;
201 /* Here vma->vm_start <= start < vma->vm_end. */
202 if (end <= vma->vm_end) {
203 if (start < end) {
204 error = msync_interval(vma, start, end, flags);
205 if (error)
206 goto out;
208 error = unmapped_error;
209 goto out;
211 /* Here vma->vm_start <= start < vma->vm_end < end. */
212 error = msync_interval(vma, start, vma->vm_end, flags);
213 if (error)
214 goto out;
215 start = vma->vm_end;
216 vma = vma->vm_next;
218 out:
219 up_read(&current->mm->mmap_sem);
220 current->flags &= ~PF_SYNCWRITE;
221 return error;