4 * Copyright (C) 1994-1999 Linus Torvalds
8 * The msync() system call.
10 #include <linux/slab.h>
11 #include <linux/pagemap.h>
14 #include <linux/mman.h>
15 #include <linux/hugetlb.h>
16 #include <linux/writeback.h>
17 #include <linux/file.h>
18 #include <linux/syscalls.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
23 static unsigned long msync_pte_range(struct vm_area_struct
*vma
, pmd_t
*pmd
,
24 unsigned long addr
, unsigned long end
)
29 unsigned long ret
= 0;
32 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
38 if (need_resched() || need_lockbreak(ptl
))
42 if (!pte_present(*pte
))
44 if (!pte_maybe_dirty(*pte
))
46 page
= vm_normal_page(vma
, addr
, *pte
);
49 if (ptep_clear_flush_dirty(vma
, addr
, pte
) ||
50 page_test_and_clear_dirty(page
))
51 ret
+= set_page_dirty(page
);
53 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
54 pte_unmap_unlock(pte
- 1, ptl
);
61 static inline unsigned long msync_pmd_range(struct vm_area_struct
*vma
,
62 pud_t
*pud
, unsigned long addr
, unsigned long end
)
66 unsigned long ret
= 0;
68 pmd
= pmd_offset(pud
, addr
);
70 next
= pmd_addr_end(addr
, end
);
71 if (pmd_none_or_clear_bad(pmd
))
73 ret
+= msync_pte_range(vma
, pmd
, addr
, next
);
74 } while (pmd
++, addr
= next
, addr
!= end
);
78 static inline unsigned long msync_pud_range(struct vm_area_struct
*vma
,
79 pgd_t
*pgd
, unsigned long addr
, unsigned long end
)
83 unsigned long ret
= 0;
85 pud
= pud_offset(pgd
, addr
);
87 next
= pud_addr_end(addr
, end
);
88 if (pud_none_or_clear_bad(pud
))
90 ret
+= msync_pmd_range(vma
, pud
, addr
, next
);
91 } while (pud
++, addr
= next
, addr
!= end
);
95 static unsigned long msync_page_range(struct vm_area_struct
*vma
,
96 unsigned long addr
, unsigned long end
)
100 unsigned long ret
= 0;
102 /* For hugepages we can't go walking the page table normally,
103 * but that's ok, hugetlbfs is memory based, so we don't need
104 * to do anything more on an msync().
106 if (vma
->vm_flags
& VM_HUGETLB
)
110 pgd
= pgd_offset(vma
->vm_mm
, addr
);
111 flush_cache_range(vma
, addr
, end
);
113 next
= pgd_addr_end(addr
, end
);
114 if (pgd_none_or_clear_bad(pgd
))
116 ret
+= msync_pud_range(vma
, pgd
, addr
, next
);
117 } while (pgd
++, addr
= next
, addr
!= end
);
122 * MS_SYNC syncs the entire file - including mappings.
124 * MS_ASYNC does not start I/O (it used to, up to 2.5.67). Instead, it just
125 * marks the relevant pages dirty. The application may now run fsync() to
126 * write out the dirty pages and wait on the writeout and check the result.
127 * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
128 * async writeout immediately.
129 * So by _not_ starting I/O in MS_ASYNC we provide complete flexibility to
132 static int msync_interval(struct vm_area_struct
*vma
, unsigned long addr
,
133 unsigned long end
, int flags
,
134 unsigned long *nr_pages_dirtied
)
136 struct file
*file
= vma
->vm_file
;
138 if ((flags
& MS_INVALIDATE
) && (vma
->vm_flags
& VM_LOCKED
))
141 if (file
&& (vma
->vm_flags
& VM_SHARED
))
142 *nr_pages_dirtied
= msync_page_range(vma
, addr
, end
);
146 asmlinkage
long sys_msync(unsigned long start
, size_t len
, int flags
)
149 struct vm_area_struct
*vma
;
150 int unmapped_error
= 0;
154 if (flags
& ~(MS_ASYNC
| MS_INVALIDATE
| MS_SYNC
))
156 if (start
& ~PAGE_MASK
)
158 if ((flags
& MS_ASYNC
) && (flags
& MS_SYNC
))
161 len
= (len
+ ~PAGE_MASK
) & PAGE_MASK
;
169 * If the interval [start,end) covers some unmapped address ranges,
170 * just ignore them, but return -ENOMEM at the end.
172 down_read(¤t
->mm
->mmap_sem
);
173 vma
= find_vma(current
->mm
, start
);
179 unsigned long nr_pages_dirtied
= 0;
182 /* Here start < vma->vm_end. */
183 if (start
< vma
->vm_start
) {
184 unmapped_error
= -ENOMEM
;
185 start
= vma
->vm_start
;
187 /* Here vma->vm_start <= start < vma->vm_end. */
188 if (end
<= vma
->vm_end
) {
190 error
= msync_interval(vma
, start
, end
, flags
,
195 error
= unmapped_error
;
198 /* Here vma->vm_start <= start < vma->vm_end < end. */
199 error
= msync_interval(vma
, start
, vma
->vm_end
, flags
,
206 if ((flags
& MS_ASYNC
) && file
&& nr_pages_dirtied
) {
208 up_read(¤t
->mm
->mmap_sem
);
209 balance_dirty_pages_ratelimited_nr(file
->f_mapping
,
212 down_read(¤t
->mm
->mmap_sem
);
213 vma
= find_vma(current
->mm
, start
);
214 } else if ((flags
& MS_SYNC
) && file
&&
215 (vma
->vm_flags
& VM_SHARED
)) {
217 up_read(¤t
->mm
->mmap_sem
);
218 error
= do_fsync(file
, 0);
220 down_read(¤t
->mm
->mmap_sem
);
223 vma
= find_vma(current
->mm
, start
);
227 } while (vma
&& !done
);
229 up_read(¤t
->mm
->mmap_sem
);