4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 2002 Christoph Hellwig
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/syscalls.h>
11 #include <linux/mempolicy.h>
12 #include <linux/hugetlb.h>
13 #include <linux/sched.h>
14 #include <linux/ksm.h>
17 * Any behaviour which results in changes to the vma->vm_flags needs to
18 * take mmap_sem for writing. Others, which simply traverse vmas, need
19 * to only take it for reading.
21 static int madvise_need_mmap_write(int behavior
)
29 /* be safe, default to 1. list exceptions explicitly */
35 * We can potentially split a vm area into separate
36 * areas, each area with its own behavior.
38 static long madvise_behavior(struct vm_area_struct
* vma
,
39 struct vm_area_struct
**prev
,
40 unsigned long start
, unsigned long end
, int behavior
)
42 struct mm_struct
* mm
= vma
->vm_mm
;
45 unsigned long new_flags
= vma
->vm_flags
;
49 new_flags
= new_flags
& ~VM_RAND_READ
& ~VM_SEQ_READ
;
52 new_flags
= (new_flags
& ~VM_RAND_READ
) | VM_SEQ_READ
;
55 new_flags
= (new_flags
& ~VM_SEQ_READ
) | VM_RAND_READ
;
58 new_flags
|= VM_DONTCOPY
;
61 if (vma
->vm_flags
& VM_IO
) {
65 new_flags
&= ~VM_DONTCOPY
;
68 case MADV_UNMERGEABLE
:
69 error
= ksm_madvise(vma
, start
, end
, behavior
, &new_flags
);
75 if (new_flags
== vma
->vm_flags
) {
80 pgoff
= vma
->vm_pgoff
+ ((start
- vma
->vm_start
) >> PAGE_SHIFT
);
81 *prev
= vma_merge(mm
, *prev
, start
, end
, new_flags
, vma
->anon_vma
,
82 vma
->vm_file
, pgoff
, vma_policy(vma
));
90 if (start
!= vma
->vm_start
) {
91 error
= split_vma(mm
, vma
, start
, 1);
96 if (end
!= vma
->vm_end
) {
97 error
= split_vma(mm
, vma
, end
, 0);
104 * vm_flags is protected by the mmap_sem held in write mode.
106 vma
->vm_flags
= new_flags
;
109 if (error
== -ENOMEM
)
115 * Schedule all required I/O operations. Do not wait for completion.
117 static long madvise_willneed(struct vm_area_struct
* vma
,
118 struct vm_area_struct
** prev
,
119 unsigned long start
, unsigned long end
)
121 struct file
*file
= vma
->vm_file
;
126 if (file
->f_mapping
->a_ops
->get_xip_mem
) {
127 /* no bad return value, but ignore advice */
132 start
= ((start
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
133 if (end
> vma
->vm_end
)
135 end
= ((end
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
137 force_page_cache_readahead(file
->f_mapping
, file
, start
, end
- start
);
142 * Application no longer needs these pages. If the pages are dirty,
143 * it's OK to just throw them away. The app will be more careful about
144 * data it wants to keep. Be sure to free swap resources too. The
145 * zap_page_range call sets things up for shrink_active_list to actually free
146 * these pages later if no one else has touched them in the meantime,
147 * although we could add these pages to a global reuse list for
148 * shrink_active_list to pick up before reclaiming other pages.
150 * NB: This interface discards data rather than pushes it out to swap,
151 * as some implementations do. This has performance implications for
152 * applications like large transactional databases which want to discard
153 * pages in anonymous maps after committing to backing store the data
154 * that was kept in them. There is no reason to write this data out to
155 * the swap area if the application is discarding it.
157 * An interface that causes the system to free clean pages and flush
158 * dirty pages is already available as msync(MS_INVALIDATE).
160 static long madvise_dontneed(struct vm_area_struct
* vma
,
161 struct vm_area_struct
** prev
,
162 unsigned long start
, unsigned long end
)
165 if (vma
->vm_flags
& (VM_LOCKED
|VM_HUGETLB
|VM_PFNMAP
))
168 if (unlikely(vma
->vm_flags
& VM_NONLINEAR
)) {
169 struct zap_details details
= {
170 .nonlinear_vma
= vma
,
171 .last_index
= ULONG_MAX
,
173 zap_page_range(vma
, start
, end
- start
, &details
);
175 zap_page_range(vma
, start
, end
- start
, NULL
);
180 * Application wants to free up the pages and associated backing store.
181 * This is effectively punching a hole into the middle of a file.
183 * NOTE: Currently, only shmfs/tmpfs is supported for this operation.
184 * Other filesystems return -ENOSYS.
186 static long madvise_remove(struct vm_area_struct
*vma
,
187 struct vm_area_struct
**prev
,
188 unsigned long start
, unsigned long end
)
190 struct address_space
*mapping
;
191 loff_t offset
, endoff
;
194 *prev
= NULL
; /* tell sys_madvise we drop mmap_sem */
196 if (vma
->vm_flags
& (VM_LOCKED
|VM_NONLINEAR
|VM_HUGETLB
))
199 if (!vma
->vm_file
|| !vma
->vm_file
->f_mapping
200 || !vma
->vm_file
->f_mapping
->host
) {
204 if ((vma
->vm_flags
& (VM_SHARED
|VM_WRITE
)) != (VM_SHARED
|VM_WRITE
))
207 mapping
= vma
->vm_file
->f_mapping
;
209 offset
= (loff_t
)(start
- vma
->vm_start
)
210 + ((loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
);
211 endoff
= (loff_t
)(end
- vma
->vm_start
- 1)
212 + ((loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
);
214 /* vmtruncate_range needs to take i_mutex and i_alloc_sem */
215 up_read(¤t
->mm
->mmap_sem
);
216 error
= vmtruncate_range(mapping
->host
, offset
, endoff
);
217 down_read(¤t
->mm
->mmap_sem
);
221 #ifdef CONFIG_MEMORY_FAILURE
223 * Error injection support for memory error handling.
225 static int madvise_hwpoison(unsigned long start
, unsigned long end
)
229 if (!capable(CAP_SYS_ADMIN
))
231 for (; start
< end
; start
+= PAGE_SIZE
) {
233 int ret
= get_user_pages(current
, current
->mm
, start
, 1,
237 printk(KERN_INFO
"Injecting memory failure for page %lx at %lx\n",
238 page_to_pfn(p
), start
);
239 /* Ignore return value for now */
240 __memory_failure(page_to_pfn(p
), 0, 1);
248 madvise_vma(struct vm_area_struct
*vma
, struct vm_area_struct
**prev
,
249 unsigned long start
, unsigned long end
, int behavior
)
253 return madvise_remove(vma
, prev
, start
, end
);
255 return madvise_willneed(vma
, prev
, start
, end
);
257 return madvise_dontneed(vma
, prev
, start
, end
);
259 return madvise_behavior(vma
, prev
, start
, end
, behavior
);
264 madvise_behavior_valid(int behavior
)
270 case MADV_SEQUENTIAL
:
277 case MADV_UNMERGEABLE
:
287 * The madvise(2) system call.
289 * Applications can use madvise() to advise the kernel how it should
290 * handle paging I/O in this VM area. The idea is to help the kernel
291 * use appropriate read-ahead and caching techniques. The information
292 * provided is advisory only, and can be safely disregarded by the
293 * kernel without affecting the correct operation of the application.
296 * MADV_NORMAL - the default behavior is to read clusters. This
297 * results in some read-ahead and read-behind.
298 * MADV_RANDOM - the system should read the minimum amount of data
299 * on any access, since it is unlikely that the appli-
300 * cation will need more than what it asks for.
301 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
302 * once, so they can be aggressively read ahead, and
303 * can be freed soon after they are accessed.
304 * MADV_WILLNEED - the application is notifying the system to read
306 * MADV_DONTNEED - the application is finished with the given range,
307 * so the kernel can free resources associated with it.
308 * MADV_REMOVE - the application wants to free up the given range of
309 * pages and associated backing store.
310 * MADV_DONTFORK - omit this area from child's address space when forking:
311 * typically, to avoid COWing pages pinned by get_user_pages().
312 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
313 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
314 * this area with pages of identical content from other such areas.
315 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
319 * -EINVAL - start + len < 0, start is not page-aligned,
320 * "behavior" is not a valid value, or application
321 * is attempting to release locked or shared pages.
322 * -ENOMEM - addresses in the specified range are not currently
323 * mapped, or are outside the AS of the process.
324 * -EIO - an I/O error occurred while paging in data.
325 * -EBADF - map exists, but area maps something that isn't a file.
326 * -EAGAIN - a kernel resource was temporarily unavailable.
328 SYSCALL_DEFINE3(madvise
, unsigned long, start
, size_t, len_in
, int, behavior
)
330 unsigned long end
, tmp
;
331 struct vm_area_struct
* vma
, *prev
;
332 int unmapped_error
= 0;
337 #ifdef CONFIG_MEMORY_FAILURE
338 if (behavior
== MADV_HWPOISON
)
339 return madvise_hwpoison(start
, start
+len_in
);
341 if (!madvise_behavior_valid(behavior
))
344 write
= madvise_need_mmap_write(behavior
);
346 down_write(¤t
->mm
->mmap_sem
);
348 down_read(¤t
->mm
->mmap_sem
);
350 if (start
& ~PAGE_MASK
)
352 len
= (len_in
+ ~PAGE_MASK
) & PAGE_MASK
;
354 /* Check to see whether len was rounded up from small -ve to zero */
367 * If the interval [start,end) covers some unmapped address
368 * ranges, just ignore them, but return -ENOMEM at the end.
369 * - different from the way of handling in mlock etc.
371 vma
= find_vma_prev(current
->mm
, start
, &prev
);
372 if (vma
&& start
> vma
->vm_start
)
376 /* Still start < end. */
381 /* Here start < (end|vma->vm_end). */
382 if (start
< vma
->vm_start
) {
383 unmapped_error
= -ENOMEM
;
384 start
= vma
->vm_start
;
389 /* Here vma->vm_start <= start < (end|vma->vm_end) */
394 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
395 error
= madvise_vma(vma
, &prev
, start
, tmp
, behavior
);
399 if (prev
&& start
< prev
->vm_end
)
400 start
= prev
->vm_end
;
401 error
= unmapped_error
;
406 else /* madvise_remove dropped mmap_sem */
407 vma
= find_vma(current
->mm
, start
);
411 up_write(¤t
->mm
->mmap_sem
);
413 up_read(¤t
->mm
->mmap_sem
);