4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 2002 Christoph Hellwig
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/syscalls.h>
11 #include <linux/mempolicy.h>
12 #include <linux/page-isolation.h>
13 #include <linux/hugetlb.h>
14 #include <linux/sched.h>
15 #include <linux/ksm.h>
18 * Any behaviour which results in changes to the vma->vm_flags needs to
19 * take mmap_sem for writing. Others, which simply traverse vmas, need
20 * to only take it for reading.
22 static int madvise_need_mmap_write(int behavior
)
30 /* be safe, default to 1. list exceptions explicitly */
36 * We can potentially split a vm area into separate
37 * areas, each area with its own behavior.
39 static long madvise_behavior(struct vm_area_struct
* vma
,
40 struct vm_area_struct
**prev
,
41 unsigned long start
, unsigned long end
, int behavior
)
43 struct mm_struct
* mm
= vma
->vm_mm
;
46 unsigned long new_flags
= vma
->vm_flags
;
50 new_flags
= new_flags
& ~VM_RAND_READ
& ~VM_SEQ_READ
;
53 new_flags
= (new_flags
& ~VM_RAND_READ
) | VM_SEQ_READ
;
56 new_flags
= (new_flags
& ~VM_SEQ_READ
) | VM_RAND_READ
;
59 new_flags
|= VM_DONTCOPY
;
62 if (vma
->vm_flags
& VM_IO
) {
66 new_flags
&= ~VM_DONTCOPY
;
69 case MADV_UNMERGEABLE
:
70 error
= ksm_madvise(vma
, start
, end
, behavior
, &new_flags
);
75 error
= hugepage_madvise(&new_flags
);
81 if (new_flags
== vma
->vm_flags
) {
86 pgoff
= vma
->vm_pgoff
+ ((start
- vma
->vm_start
) >> PAGE_SHIFT
);
87 *prev
= vma_merge(mm
, *prev
, start
, end
, new_flags
, vma
->anon_vma
,
88 vma
->vm_file
, pgoff
, vma_policy(vma
));
96 if (start
!= vma
->vm_start
) {
97 error
= split_vma(mm
, vma
, start
, 1);
102 if (end
!= vma
->vm_end
) {
103 error
= split_vma(mm
, vma
, end
, 0);
110 * vm_flags is protected by the mmap_sem held in write mode.
112 vma
->vm_flags
= new_flags
;
115 if (error
== -ENOMEM
)
121 * Schedule all required I/O operations. Do not wait for completion.
123 static long madvise_willneed(struct vm_area_struct
* vma
,
124 struct vm_area_struct
** prev
,
125 unsigned long start
, unsigned long end
)
127 struct file
*file
= vma
->vm_file
;
132 if (file
->f_mapping
->a_ops
->get_xip_mem
) {
133 /* no bad return value, but ignore advice */
138 start
= ((start
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
139 if (end
> vma
->vm_end
)
141 end
= ((end
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
143 force_page_cache_readahead(file
->f_mapping
, file
, start
, end
- start
);
148 * Application no longer needs these pages. If the pages are dirty,
149 * it's OK to just throw them away. The app will be more careful about
150 * data it wants to keep. Be sure to free swap resources too. The
151 * zap_page_range call sets things up for shrink_active_list to actually free
152 * these pages later if no one else has touched them in the meantime,
153 * although we could add these pages to a global reuse list for
154 * shrink_active_list to pick up before reclaiming other pages.
156 * NB: This interface discards data rather than pushes it out to swap,
157 * as some implementations do. This has performance implications for
158 * applications like large transactional databases which want to discard
159 * pages in anonymous maps after committing to backing store the data
160 * that was kept in them. There is no reason to write this data out to
161 * the swap area if the application is discarding it.
163 * An interface that causes the system to free clean pages and flush
164 * dirty pages is already available as msync(MS_INVALIDATE).
166 static long madvise_dontneed(struct vm_area_struct
* vma
,
167 struct vm_area_struct
** prev
,
168 unsigned long start
, unsigned long end
)
171 if (vma
->vm_flags
& (VM_LOCKED
|VM_HUGETLB
|VM_PFNMAP
))
174 if (unlikely(vma
->vm_flags
& VM_NONLINEAR
)) {
175 struct zap_details details
= {
176 .nonlinear_vma
= vma
,
177 .last_index
= ULONG_MAX
,
179 zap_page_range(vma
, start
, end
- start
, &details
);
181 zap_page_range(vma
, start
, end
- start
, NULL
);
186 * Application wants to free up the pages and associated backing store.
187 * This is effectively punching a hole into the middle of a file.
189 * NOTE: Currently, only shmfs/tmpfs is supported for this operation.
190 * Other filesystems return -ENOSYS.
192 static long madvise_remove(struct vm_area_struct
*vma
,
193 struct vm_area_struct
**prev
,
194 unsigned long start
, unsigned long end
)
196 struct address_space
*mapping
;
197 loff_t offset
, endoff
;
200 *prev
= NULL
; /* tell sys_madvise we drop mmap_sem */
202 if (vma
->vm_flags
& (VM_LOCKED
|VM_NONLINEAR
|VM_HUGETLB
))
205 if (!vma
->vm_file
|| !vma
->vm_file
->f_mapping
206 || !vma
->vm_file
->f_mapping
->host
) {
210 if ((vma
->vm_flags
& (VM_SHARED
|VM_WRITE
)) != (VM_SHARED
|VM_WRITE
))
213 mapping
= vma
->vm_file
->f_mapping
;
215 offset
= (loff_t
)(start
- vma
->vm_start
)
216 + ((loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
);
217 endoff
= (loff_t
)(end
- vma
->vm_start
- 1)
218 + ((loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
);
220 /* vmtruncate_range needs to take i_mutex and i_alloc_sem */
221 up_read(¤t
->mm
->mmap_sem
);
222 error
= vmtruncate_range(mapping
->host
, offset
, endoff
);
223 down_read(¤t
->mm
->mmap_sem
);
227 #ifdef CONFIG_MEMORY_FAILURE
229 * Error injection support for memory error handling.
231 static int madvise_hwpoison(int bhv
, unsigned long start
, unsigned long end
)
235 if (!capable(CAP_SYS_ADMIN
))
237 for (; start
< end
; start
+= PAGE_SIZE
) {
239 int ret
= get_user_pages_fast(start
, 1, 0, &p
);
242 if (bhv
== MADV_SOFT_OFFLINE
) {
243 printk(KERN_INFO
"Soft offlining page %lx at %lx\n",
244 page_to_pfn(p
), start
);
245 ret
= soft_offline_page(p
, MF_COUNT_INCREASED
);
250 printk(KERN_INFO
"Injecting memory failure for page %lx at %lx\n",
251 page_to_pfn(p
), start
);
252 /* Ignore return value for now */
253 __memory_failure(page_to_pfn(p
), 0, MF_COUNT_INCREASED
);
260 madvise_vma(struct vm_area_struct
*vma
, struct vm_area_struct
**prev
,
261 unsigned long start
, unsigned long end
, int behavior
)
265 return madvise_remove(vma
, prev
, start
, end
);
267 return madvise_willneed(vma
, prev
, start
, end
);
269 return madvise_dontneed(vma
, prev
, start
, end
);
271 return madvise_behavior(vma
, prev
, start
, end
, behavior
);
276 madvise_behavior_valid(int behavior
)
282 case MADV_SEQUENTIAL
:
289 case MADV_UNMERGEABLE
:
291 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
302 * The madvise(2) system call.
304 * Applications can use madvise() to advise the kernel how it should
305 * handle paging I/O in this VM area. The idea is to help the kernel
306 * use appropriate read-ahead and caching techniques. The information
307 * provided is advisory only, and can be safely disregarded by the
308 * kernel without affecting the correct operation of the application.
311 * MADV_NORMAL - the default behavior is to read clusters. This
312 * results in some read-ahead and read-behind.
313 * MADV_RANDOM - the system should read the minimum amount of data
314 * on any access, since it is unlikely that the appli-
315 * cation will need more than what it asks for.
316 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
317 * once, so they can be aggressively read ahead, and
318 * can be freed soon after they are accessed.
319 * MADV_WILLNEED - the application is notifying the system to read
321 * MADV_DONTNEED - the application is finished with the given range,
322 * so the kernel can free resources associated with it.
323 * MADV_REMOVE - the application wants to free up the given range of
324 * pages and associated backing store.
325 * MADV_DONTFORK - omit this area from child's address space when forking:
326 * typically, to avoid COWing pages pinned by get_user_pages().
327 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
328 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
329 * this area with pages of identical content from other such areas.
330 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
334 * -EINVAL - start + len < 0, start is not page-aligned,
335 * "behavior" is not a valid value, or application
336 * is attempting to release locked or shared pages.
337 * -ENOMEM - addresses in the specified range are not currently
338 * mapped, or are outside the AS of the process.
339 * -EIO - an I/O error occurred while paging in data.
340 * -EBADF - map exists, but area maps something that isn't a file.
341 * -EAGAIN - a kernel resource was temporarily unavailable.
343 SYSCALL_DEFINE3(madvise
, unsigned long, start
, size_t, len_in
, int, behavior
)
345 unsigned long end
, tmp
;
346 struct vm_area_struct
* vma
, *prev
;
347 int unmapped_error
= 0;
352 #ifdef CONFIG_MEMORY_FAILURE
353 if (behavior
== MADV_HWPOISON
|| behavior
== MADV_SOFT_OFFLINE
)
354 return madvise_hwpoison(behavior
, start
, start
+len_in
);
356 if (!madvise_behavior_valid(behavior
))
359 write
= madvise_need_mmap_write(behavior
);
361 down_write(¤t
->mm
->mmap_sem
);
363 down_read(¤t
->mm
->mmap_sem
);
365 if (start
& ~PAGE_MASK
)
367 len
= (len_in
+ ~PAGE_MASK
) & PAGE_MASK
;
369 /* Check to see whether len was rounded up from small -ve to zero */
382 * If the interval [start,end) covers some unmapped address
383 * ranges, just ignore them, but return -ENOMEM at the end.
384 * - different from the way of handling in mlock etc.
386 vma
= find_vma_prev(current
->mm
, start
, &prev
);
387 if (vma
&& start
> vma
->vm_start
)
391 /* Still start < end. */
396 /* Here start < (end|vma->vm_end). */
397 if (start
< vma
->vm_start
) {
398 unmapped_error
= -ENOMEM
;
399 start
= vma
->vm_start
;
404 /* Here vma->vm_start <= start < (end|vma->vm_end) */
409 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
410 error
= madvise_vma(vma
, &prev
, start
, tmp
, behavior
);
414 if (prev
&& start
< prev
->vm_end
)
415 start
= prev
->vm_end
;
416 error
= unmapped_error
;
421 else /* madvise_remove dropped mmap_sem */
422 vma
= find_vma(current
->mm
, start
);
426 up_write(¤t
->mm
->mmap_sem
);
428 up_read(¤t
->mm
->mmap_sem
);