2 * mm/readahead.c - address_space-level file readahead.
4 * Copyright (C) 2002, Linus Torvalds
6 * 09Apr2002 akpm@zip.com.au
10 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/backing-dev.h>
16 #include <linux/task_io_accounting_ops.h>
17 #include <linux/pagevec.h>
19 void default_unplug_io_fn(struct backing_dev_info
*bdi
, struct page
*page
)
22 EXPORT_SYMBOL(default_unplug_io_fn
);
25 * Convienent macros for min/max read-ahead pages.
26 * Note that MAX_RA_PAGES is rounded down, while MIN_RA_PAGES is rounded up.
27 * The latter is necessary for systems with large page size(i.e. 64k).
29 #define MAX_RA_PAGES (VM_MAX_READAHEAD*1024 / PAGE_CACHE_SIZE)
30 #define MIN_RA_PAGES DIV_ROUND_UP(VM_MIN_READAHEAD*1024, PAGE_CACHE_SIZE)
32 struct backing_dev_info default_backing_dev_info
= {
33 .ra_pages
= MAX_RA_PAGES
,
35 .capabilities
= BDI_CAP_MAP_COPY
,
36 .unplug_io_fn
= default_unplug_io_fn
,
38 EXPORT_SYMBOL_GPL(default_backing_dev_info
);
41 * Initialise a struct file's readahead state. Assumes that the caller has
45 file_ra_state_init(struct file_ra_state
*ra
, struct address_space
*mapping
)
47 ra
->ra_pages
= mapping
->backing_dev_info
->ra_pages
;
50 EXPORT_SYMBOL_GPL(file_ra_state_init
);
53 * Return max readahead size for this inode in number-of-pages.
55 static inline unsigned long get_max_readahead(struct file_ra_state
*ra
)
60 static inline unsigned long get_min_readahead(struct file_ra_state
*ra
)
65 static inline void reset_ahead_window(struct file_ra_state
*ra
)
68 * ... but preserve ahead_start + ahead_size value,
69 * see 'recheck:' label in page_cache_readahead().
70 * Note: We never use ->ahead_size as rvalue without
71 * checking ->ahead_start != 0 first.
73 ra
->ahead_size
+= ra
->ahead_start
;
77 static inline void ra_off(struct file_ra_state
*ra
)
82 reset_ahead_window(ra
);
87 * Set the initial window size, round to next power of 2 and square
88 * for small size, x 4 for medium, and x 2 for large
89 * for 128k (32 page) max ra
90 * 1-8 page = 32k initial, > 8 page = 128k initial
92 static unsigned long get_init_ra_size(unsigned long size
, unsigned long max
)
94 unsigned long newsize
= roundup_pow_of_two(size
);
96 if (newsize
<= max
/ 32)
97 newsize
= newsize
* 4;
98 else if (newsize
<= max
/ 4)
99 newsize
= newsize
* 2;
106 * Set the new window size, this is called only when I/O is to be submitted,
107 * not for each call to readahead. If a cache miss occured, reduce next I/O
108 * size, else increase depending on how close to max we are.
110 static inline unsigned long get_next_ra_size(struct file_ra_state
*ra
)
112 unsigned long max
= get_max_readahead(ra
);
113 unsigned long min
= get_min_readahead(ra
);
114 unsigned long cur
= ra
->size
;
115 unsigned long newsize
;
117 if (ra
->flags
& RA_FLAG_MISS
) {
118 ra
->flags
&= ~RA_FLAG_MISS
;
119 newsize
= max((cur
- 2), min
);
120 } else if (cur
< max
/ 16) {
125 return min(newsize
, max
);
128 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
131 * read_cache_pages - populate an address space with some pages & start reads against them
132 * @mapping: the address_space
133 * @pages: The address of a list_head which contains the target pages. These
134 * pages have their ->index populated and are otherwise uninitialised.
135 * @filler: callback routine for filling a single page.
136 * @data: private data for the callback routine.
138 * Hides the details of the LRU cache etc from the filesystems.
140 int read_cache_pages(struct address_space
*mapping
, struct list_head
*pages
,
141 int (*filler
)(void *, struct page
*), void *data
)
144 struct pagevec lru_pvec
;
147 pagevec_init(&lru_pvec
, 0);
149 while (!list_empty(pages
)) {
150 page
= list_to_page(pages
);
151 list_del(&page
->lru
);
152 if (add_to_page_cache(page
, mapping
, page
->index
, GFP_KERNEL
)) {
153 page_cache_release(page
);
156 ret
= filler(data
, page
);
157 if (!pagevec_add(&lru_pvec
, page
))
158 __pagevec_lru_add(&lru_pvec
);
160 put_pages_list(pages
);
163 task_io_account_read(PAGE_CACHE_SIZE
);
165 pagevec_lru_add(&lru_pvec
);
169 EXPORT_SYMBOL(read_cache_pages
);
171 static int read_pages(struct address_space
*mapping
, struct file
*filp
,
172 struct list_head
*pages
, unsigned nr_pages
)
175 struct pagevec lru_pvec
;
178 if (mapping
->a_ops
->readpages
) {
179 ret
= mapping
->a_ops
->readpages(filp
, mapping
, pages
, nr_pages
);
180 /* Clean up the remaining pages */
181 put_pages_list(pages
);
185 pagevec_init(&lru_pvec
, 0);
186 for (page_idx
= 0; page_idx
< nr_pages
; page_idx
++) {
187 struct page
*page
= list_to_page(pages
);
188 list_del(&page
->lru
);
189 if (!add_to_page_cache(page
, mapping
,
190 page
->index
, GFP_KERNEL
)) {
191 mapping
->a_ops
->readpage(filp
, page
);
192 if (!pagevec_add(&lru_pvec
, page
))
193 __pagevec_lru_add(&lru_pvec
);
195 page_cache_release(page
);
197 pagevec_lru_add(&lru_pvec
);
206 * The fields in struct file_ra_state represent the most-recently-executed
209 * start: Page index at which we started the readahead
210 * size: Number of pages in that read
211 * Together, these form the "current window".
212 * Together, start and size represent the `readahead window'.
213 * prev_page: The page which the readahead algorithm most-recently inspected.
214 * It is mainly used to detect sequential file reading.
215 * If page_cache_readahead sees that it is again being called for
216 * a page which it just looked at, it can return immediately without
217 * making any state changes.
219 * ahead_size: Together, these form the "ahead window".
220 * ra_pages: The externally controlled max readahead for this fd.
222 * When readahead is in the off state (size == 0), readahead is disabled.
223 * In this state, prev_page is used to detect the resumption of sequential I/O.
225 * The readahead code manages two windows - the "current" and the "ahead"
226 * windows. The intent is that while the application is walking the pages
227 * in the current window, I/O is underway on the ahead window. When the
228 * current window is fully traversed, it is replaced by the ahead window
229 * and the ahead window is invalidated. When this copying happens, the
230 * new current window's pages are probably still locked. So
231 * we submit a new batch of I/O immediately, creating a new ahead window.
235 * ----|----------------|----------------|-----
237 * ^ahead_start ^ahead_start+ahead_size
239 * ^ When this page is read, we submit I/O for the
242 * A `readahead hit' occurs when a read request is made against a page which is
243 * the next sequential page. Ahead window calculations are done only when it
244 * is time to submit a new IO. The code ramps up the size agressively at first,
245 * but slow down as it approaches max_readhead.
247 * Any seek/ramdom IO will result in readahead being turned off. It will resume
248 * at the first sequential access.
250 * There is a special-case: if the first page which the application tries to
251 * read happens to be the first page of the file, it is assumed that a linear
252 * read is about to happen and the window is immediately set to the initial size
253 * based on I/O request size and the max_readahead.
255 * This function is to be called for every read request, rather than when
256 * it is time to perform readahead. It is called only once for the entire I/O
257 * regardless of size unless readahead is unable to start enough I/O to satisfy
258 * the request (I/O request > max_readahead).
262 * do_page_cache_readahead actually reads a chunk of disk. It allocates all
263 * the pages first, then submits them all for I/O. This avoids the very bad
264 * behaviour which would occur if page allocations are causing VM writeback.
265 * We really don't want to intermingle reads and writes like that.
267 * Returns the number of pages requested, or the maximum amount of I/O allowed.
269 * do_page_cache_readahead() returns -1 if it encountered request queue
273 __do_page_cache_readahead(struct address_space
*mapping
, struct file
*filp
,
274 pgoff_t offset
, unsigned long nr_to_read
)
276 struct inode
*inode
= mapping
->host
;
278 unsigned long end_index
; /* The last page we want to read */
279 LIST_HEAD(page_pool
);
282 loff_t isize
= i_size_read(inode
);
287 end_index
= ((isize
- 1) >> PAGE_CACHE_SHIFT
);
290 * Preallocate as many pages as we will need.
292 read_lock_irq(&mapping
->tree_lock
);
293 for (page_idx
= 0; page_idx
< nr_to_read
; page_idx
++) {
294 pgoff_t page_offset
= offset
+ page_idx
;
296 if (page_offset
> end_index
)
299 page
= radix_tree_lookup(&mapping
->page_tree
, page_offset
);
303 read_unlock_irq(&mapping
->tree_lock
);
304 page
= page_cache_alloc_cold(mapping
);
305 read_lock_irq(&mapping
->tree_lock
);
308 page
->index
= page_offset
;
309 list_add(&page
->lru
, &page_pool
);
312 read_unlock_irq(&mapping
->tree_lock
);
315 * Now start the IO. We ignore I/O errors - if the page is not
316 * uptodate then the caller will launch readpage again, and
317 * will then handle the error.
320 read_pages(mapping
, filp
, &page_pool
, ret
);
321 BUG_ON(!list_empty(&page_pool
));
327 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
330 int force_page_cache_readahead(struct address_space
*mapping
, struct file
*filp
,
331 pgoff_t offset
, unsigned long nr_to_read
)
335 if (unlikely(!mapping
->a_ops
->readpage
&& !mapping
->a_ops
->readpages
))
341 unsigned long this_chunk
= (2 * 1024 * 1024) / PAGE_CACHE_SIZE
;
343 if (this_chunk
> nr_to_read
)
344 this_chunk
= nr_to_read
;
345 err
= __do_page_cache_readahead(mapping
, filp
,
352 offset
+= this_chunk
;
353 nr_to_read
-= this_chunk
;
359 * Check how effective readahead is being. If the amount of started IO is
360 * less than expected then the file is partly or fully in pagecache and
361 * readahead isn't helping.
364 static inline int check_ra_success(struct file_ra_state
*ra
,
365 unsigned long nr_to_read
, unsigned long actual
)
368 ra
->cache_hit
+= nr_to_read
;
369 if (ra
->cache_hit
>= VM_MAX_CACHE_HIT
) {
371 ra
->flags
|= RA_FLAG_INCACHE
;
381 * This version skips the IO if the queue is read-congested, and will tell the
382 * block layer to abandon the readahead if request allocation would block.
384 * force_page_cache_readahead() will ignore queue congestion and will block on
387 int do_page_cache_readahead(struct address_space
*mapping
, struct file
*filp
,
388 pgoff_t offset
, unsigned long nr_to_read
)
390 if (bdi_read_congested(mapping
->backing_dev_info
))
393 return __do_page_cache_readahead(mapping
, filp
, offset
, nr_to_read
);
397 * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block'
398 * is set wait till the read completes. Otherwise attempt to read without
400 * Returns 1 meaning 'success' if read is successful without switching off
401 * readahead mode. Otherwise return failure.
404 blockable_page_cache_readahead(struct address_space
*mapping
, struct file
*filp
,
405 pgoff_t offset
, unsigned long nr_to_read
,
406 struct file_ra_state
*ra
, int block
)
410 if (!block
&& bdi_read_congested(mapping
->backing_dev_info
))
413 actual
= __do_page_cache_readahead(mapping
, filp
, offset
, nr_to_read
);
415 return check_ra_success(ra
, nr_to_read
, actual
);
418 static int make_ahead_window(struct address_space
*mapping
, struct file
*filp
,
419 struct file_ra_state
*ra
, int force
)
423 ra
->ahead_size
= get_next_ra_size(ra
);
424 ra
->ahead_start
= ra
->start
+ ra
->size
;
426 block
= force
|| (ra
->prev_page
>= ra
->ahead_start
);
427 ret
= blockable_page_cache_readahead(mapping
, filp
,
428 ra
->ahead_start
, ra
->ahead_size
, ra
, block
);
430 if (!ret
&& !force
) {
431 /* A read failure in blocking mode, implies pages are
432 * all cached. So we can safely assume we have taken
433 * care of all the pages requested in this call.
434 * A read failure in non-blocking mode, implies we are
435 * reading more pages than requested in this call. So
436 * we safely assume we have taken care of all the pages
437 * requested in this call.
439 * Just reset the ahead window in case we failed due to
440 * congestion. The ahead window will any way be closed
441 * in case we failed due to excessive page cache hits.
443 reset_ahead_window(ra
);
450 * page_cache_readahead - generic adaptive readahead
451 * @mapping: address_space which holds the pagecache and I/O vectors
452 * @ra: file_ra_state which holds the readahead state
453 * @filp: passed on to ->readpage() and ->readpages()
454 * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units
455 * @req_size: hint: total size of the read which the caller is performing in
456 * PAGE_CACHE_SIZE units
458 * page_cache_readahead() is the main function. If performs the adaptive
459 * readahead window size management and submits the readahead I/O.
461 * Note that @filp is purely used for passing on to the ->readpage[s]()
462 * handler: it may refer to a different file from @mapping (so we may not use
463 * @filp->f_mapping or @filp->f_path.dentry->d_inode here).
464 * Also, @ra may not be equal to &@filp->f_ra.
468 page_cache_readahead(struct address_space
*mapping
, struct file_ra_state
*ra
,
469 struct file
*filp
, pgoff_t offset
, unsigned long req_size
)
471 unsigned long max
, newsize
;
475 * We avoid doing extra work and bogusly perturbing the readahead
476 * window expansion logic.
478 if (offset
== ra
->prev_page
&& --req_size
)
481 /* Note that prev_page == -1 if it is a first read */
482 sequential
= (offset
== ra
->prev_page
+ 1);
483 ra
->prev_page
= offset
;
485 max
= get_max_readahead(ra
);
486 newsize
= min(req_size
, max
);
488 /* No readahead or sub-page sized read or file already in cache */
489 if (newsize
== 0 || (ra
->flags
& RA_FLAG_INCACHE
))
492 ra
->prev_page
+= newsize
- 1;
495 * Special case - first read at start of file. We'll assume it's
496 * a whole-file read and grow the window fast. Or detect first
499 if (sequential
&& ra
->size
== 0) {
500 ra
->size
= get_init_ra_size(newsize
, max
);
502 if (!blockable_page_cache_readahead(mapping
, filp
, offset
,
507 * If the request size is larger than our max readahead, we
508 * at least want to be sure that we get 2 IOs in flight and
509 * we know that we will definitly need the new I/O.
510 * once we do this, subsequent calls should be able to overlap
511 * IOs,* thus preventing stalls. so issue the ahead window
515 make_ahead_window(mapping
, filp
, ra
, 1);
521 * Now handle the random case:
522 * partial page reads and first access were handled above,
523 * so this must be the next page otherwise it is random
527 blockable_page_cache_readahead(mapping
, filp
, offset
,
533 * If we get here we are doing sequential IO and this was not the first
534 * occurence (ie we have an existing window)
536 if (ra
->ahead_start
== 0) { /* no ahead window yet */
537 if (!make_ahead_window(mapping
, filp
, ra
, 0))
542 * Already have an ahead window, check if we crossed into it.
543 * If so, shift windows and issue a new ahead window.
544 * Only return the #pages that are in the current window, so that
545 * we get called back on the first page of the ahead window which
546 * will allow us to submit more IO.
548 if (ra
->prev_page
>= ra
->ahead_start
) {
549 ra
->start
= ra
->ahead_start
;
550 ra
->size
= ra
->ahead_size
;
551 make_ahead_window(mapping
, filp
, ra
, 0);
553 /* prev_page shouldn't overrun the ahead window */
554 ra
->prev_page
= min(ra
->prev_page
,
555 ra
->ahead_start
+ ra
->ahead_size
- 1);
559 return ra
->prev_page
+ 1;
561 EXPORT_SYMBOL_GPL(page_cache_readahead
);
564 * handle_ra_miss() is called when it is known that a page which should have
565 * been present in the pagecache (we just did some readahead there) was in fact
566 * not found. This will happen if it was evicted by the VM (readahead
569 * Turn on the cache miss flag in the RA struct, this will cause the RA code
570 * to reduce the RA size on the next read.
572 void handle_ra_miss(struct address_space
*mapping
,
573 struct file_ra_state
*ra
, pgoff_t offset
)
575 ra
->flags
|= RA_FLAG_MISS
;
576 ra
->flags
&= ~RA_FLAG_INCACHE
;
581 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
582 * sensible upper limit.
584 unsigned long max_sane_readahead(unsigned long nr
)
586 unsigned long active
;
587 unsigned long inactive
;
590 __get_zone_counts(&active
, &inactive
, &free
, NODE_DATA(numa_node_id()));
591 return min(nr
, (inactive
+ free
) / 2);