4 * Copyright (C) 1994-1999 Linus Torvalds
8 * The msync() system call.
10 #include <linux/slab.h>
11 #include <linux/pagemap.h>
13 #include <linux/mman.h>
14 #include <linux/hugetlb.h>
15 #include <linux/syscalls.h>
17 #include <asm/pgtable.h>
18 #include <asm/tlbflush.h>
21 * Called with mm->page_table_lock held to protect against other
22 * threads/the swapper from ripping pte's out from under us.
25 static void sync_pte_range(struct vm_area_struct
*vma
, pmd_t
*pmd
,
26 unsigned long addr
, unsigned long end
)
30 pte
= pte_offset_map(pmd
, addr
);
35 if (!pte_present(*pte
))
37 if (!pte_maybe_dirty(*pte
))
42 page
= pfn_to_page(pfn
);
43 if (PageReserved(page
))
46 if (ptep_clear_flush_dirty(vma
, addr
, pte
) ||
47 page_test_and_clear_dirty(page
))
49 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
53 static inline void sync_pmd_range(struct vm_area_struct
*vma
, pud_t
*pud
,
54 unsigned long addr
, unsigned long end
)
59 pmd
= pmd_offset(pud
, addr
);
61 next
= pmd_addr_end(addr
, end
);
62 if (pmd_none_or_clear_bad(pmd
))
64 sync_pte_range(vma
, pmd
, addr
, next
);
65 } while (pmd
++, addr
= next
, addr
!= end
);
68 static inline void sync_pud_range(struct vm_area_struct
*vma
, pgd_t
*pgd
,
69 unsigned long addr
, unsigned long end
)
74 pud
= pud_offset(pgd
, addr
);
76 next
= pud_addr_end(addr
, end
);
77 if (pud_none_or_clear_bad(pud
))
79 sync_pmd_range(vma
, pud
, addr
, next
);
80 } while (pud
++, addr
= next
, addr
!= end
);
83 static void sync_page_range(struct vm_area_struct
*vma
,
84 unsigned long addr
, unsigned long end
)
86 struct mm_struct
*mm
= vma
->vm_mm
;
90 /* For hugepages we can't go walking the page table normally,
91 * but that's ok, hugetlbfs is memory based, so we don't need
92 * to do anything more on an msync() */
93 if (is_vm_hugetlb_page(vma
))
97 pgd
= pgd_offset(mm
, addr
);
98 flush_cache_range(vma
, addr
, end
);
99 spin_lock(&mm
->page_table_lock
);
101 next
= pgd_addr_end(addr
, end
);
102 if (pgd_none_or_clear_bad(pgd
))
104 sync_pud_range(vma
, pgd
, addr
, next
);
105 } while (pgd
++, addr
= next
, addr
!= end
);
106 spin_unlock(&mm
->page_table_lock
);
109 #ifdef CONFIG_PREEMPT
110 static inline void filemap_sync(struct vm_area_struct
*vma
,
111 unsigned long addr
, unsigned long end
)
113 const size_t chunk
= 64 * 1024; /* bytes */
118 if (next
> end
|| next
< addr
)
120 sync_page_range(vma
, addr
, next
);
122 } while (addr
= next
, addr
!= end
);
125 static inline void filemap_sync(struct vm_area_struct
*vma
,
126 unsigned long addr
, unsigned long end
)
128 sync_page_range(vma
, addr
, end
);
133 * MS_SYNC syncs the entire file - including mappings.
135 * MS_ASYNC does not start I/O (it used to, up to 2.5.67). Instead, it just
136 * marks the relevant pages dirty. The application may now run fsync() to
137 * write out the dirty pages and wait on the writeout and check the result.
138 * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
139 * async writeout immediately.
140 * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
143 static int msync_interval(struct vm_area_struct
*vma
,
144 unsigned long addr
, unsigned long end
, int flags
)
147 struct file
*file
= vma
->vm_file
;
149 if ((flags
& MS_INVALIDATE
) && (vma
->vm_flags
& VM_LOCKED
))
152 if (file
&& (vma
->vm_flags
& VM_SHARED
)) {
153 filemap_sync(vma
, addr
, end
);
155 if (flags
& MS_SYNC
) {
156 struct address_space
*mapping
= file
->f_mapping
;
159 ret
= filemap_fdatawrite(mapping
);
160 if (file
->f_op
&& file
->f_op
->fsync
) {
162 * We don't take i_sem here because mmap_sem
165 err
= file
->f_op
->fsync(file
,file
->f_dentry
,1);
169 err
= filemap_fdatawait(mapping
);
177 asmlinkage
long sys_msync(unsigned long start
, size_t len
, int flags
)
180 struct vm_area_struct
*vma
;
181 int unmapped_error
, error
= -EINVAL
;
184 current
->flags
|= PF_SYNCWRITE
;
186 down_read(¤t
->mm
->mmap_sem
);
187 if (flags
& ~(MS_ASYNC
| MS_INVALIDATE
| MS_SYNC
))
189 if (start
& ~PAGE_MASK
)
191 if ((flags
& MS_ASYNC
) && (flags
& MS_SYNC
))
194 len
= (len
+ ~PAGE_MASK
) & PAGE_MASK
;
202 * If the interval [start,end) covers some unmapped address ranges,
203 * just ignore them, but return -ENOMEM at the end.
205 vma
= find_vma(current
->mm
, start
);
208 /* Still start < end. */
212 /* Here start < vma->vm_end. */
213 if (start
< vma
->vm_start
) {
214 unmapped_error
= -ENOMEM
;
215 start
= vma
->vm_start
;
217 /* Here vma->vm_start <= start < vma->vm_end. */
218 if (end
<= vma
->vm_end
) {
220 error
= msync_interval(vma
, start
, end
, flags
);
224 error
= unmapped_error
;
227 /* Here vma->vm_start <= start < vma->vm_end < end. */
228 error
= msync_interval(vma
, start
, vma
->vm_end
, flags
);
235 up_read(¤t
->mm
->mmap_sem
);
236 current
->flags
&= ~PF_SYNCWRITE
;