2 * mm/readahead.c - address_space-level file readahead.
4 * Copyright (C) 2002, Linus Torvalds
6 * 09Apr2002 akpm@zip.com.au
10 #include <linux/kernel.h>
13 #include <linux/blkdev.h>
14 #include <linux/backing-dev.h>
15 #include <linux/pagevec.h>
17 struct backing_dev_info default_backing_dev_info
= {
18 .ra_pages
= (VM_MAX_READAHEAD
* 1024) / PAGE_CACHE_SIZE
,
23 * Initialise a struct file's readahead state
26 file_ra_state_init(struct file_ra_state
*ra
, struct address_space
*mapping
)
28 memset(ra
, 0, sizeof(*ra
));
29 ra
->ra_pages
= mapping
->backing_dev_info
->ra_pages
;
33 * Return max readahead size for this inode in number-of-pages.
35 static inline unsigned long get_max_readahead(struct file_ra_state
*ra
)
40 static inline unsigned long get_min_readahead(struct file_ra_state
*ra
)
42 return (VM_MIN_READAHEAD
* 1024) / PAGE_CACHE_SIZE
;
45 #define list_to_page(head) (list_entry((head)->prev, struct page, list))
48 * read_cache_pages - populate an address space with some pages, and
49 * start reads against them.
50 * @mapping: the address_space
51 * @pages: The address of a list_head which contains the target pages. These
52 * pages have their ->index populated and are otherwise uninitialised.
53 * @filler: callback routine for filling a single page.
54 * @data: private data for the callback routine.
56 * Hides the details of the LRU cache etc from the filesystems.
58 int read_cache_pages(struct address_space
*mapping
, struct list_head
*pages
,
59 int (*filler
)(void *, struct page
*), void *data
)
62 struct pagevec lru_pvec
;
65 pagevec_init(&lru_pvec
, 0);
67 while (!list_empty(pages
)) {
68 page
= list_to_page(pages
);
69 list_del(&page
->list
);
70 if (add_to_page_cache(page
, mapping
, page
->index
, GFP_KERNEL
)) {
71 page_cache_release(page
);
74 ret
= filler(data
, page
);
75 if (!pagevec_add(&lru_pvec
, page
))
76 __pagevec_lru_add(&lru_pvec
);
78 while (!list_empty(pages
)) {
81 victim
= list_to_page(pages
);
82 list_del(&victim
->list
);
83 page_cache_release(victim
);
88 pagevec_lru_add(&lru_pvec
);
92 static int read_pages(struct address_space
*mapping
, struct file
*filp
,
93 struct list_head
*pages
, unsigned nr_pages
)
96 struct pagevec lru_pvec
;
99 if (mapping
->a_ops
->readpages
) {
100 ret
= mapping
->a_ops
->readpages(filp
, mapping
, pages
, nr_pages
);
104 pagevec_init(&lru_pvec
, 0);
105 for (page_idx
= 0; page_idx
< nr_pages
; page_idx
++) {
106 struct page
*page
= list_to_page(pages
);
107 list_del(&page
->list
);
108 if (!add_to_page_cache(page
, mapping
,
109 page
->index
, GFP_KERNEL
)) {
110 mapping
->a_ops
->readpage(filp
, page
);
111 if (!pagevec_add(&lru_pvec
, page
))
112 __pagevec_lru_add(&lru_pvec
);
114 page_cache_release(page
);
117 pagevec_lru_add(&lru_pvec
);
125 * The fields in struct file_ra_state represent the most-recently-executed
128 * start: Page index at which we started the readahead
129 * size: Number of pages in that read
130 * Together, these form the "current window".
131 * Together, start and size represent the `readahead window'.
132 * next_size: The number of pages to read on the next readahead miss.
133 * Has the magical value -1UL if readahead has been disabled.
134 * prev_page: The page which the readahead algorithm most-recently inspected.
135 * prev_page is mainly an optimisation: if page_cache_readahead
136 * sees that it is again being called for a page which it just
137 * looked at, it can return immediately without making any state
140 * ahead_size: Together, these form the "ahead window".
141 * ra_pages: The externally controlled max readahead for this fd.
143 * When readahead is in the "maximally shrunk" state (next_size == -1UL),
144 * readahead is disabled. In this state, prev_page and size are used, inside
145 * handle_ra_miss(), to detect the resumption of sequential I/O. Once there
146 * has been a decent run of sequential I/O (defined by get_min_readahead),
147 * readahead is reenabled.
149 * The readahead code manages two windows - the "current" and the "ahead"
150 * windows. The intent is that while the application is walking the pages
151 * in the current window, I/O is underway on the ahead window. When the
152 * current window is fully traversed, it is replaced by the ahead window
153 * and the ahead window is invalidated. When this copying happens, the
154 * new current window's pages are probably still locked. When I/O has
155 * completed, we submit a new batch of I/O, creating a new ahead window.
159 * ----|----------------|----------------|-----
161 * ^ahead_start ^ahead_start+ahead_size
163 * ^ When this page is read, we submit I/O for the
166 * A `readahead hit' occurs when a read request is made against a page which is
167 * inside the current window. Hits are good, and the window size (next_size)
168 * is grown aggressively when hits occur. Two pages are added to the next
169 * window size on each hit, which will end up doubling the next window size by
170 * the time I/O is submitted for it.
172 * If readahead hits are more sparse (say, the application is only reading
173 * every second page) then the window will build more slowly.
175 * On a readahead miss (the application seeked away) the readahead window is
176 * shrunk by 25%. We don't want to drop it too aggressively, because it is a
177 * good assumption that an application which has built a good readahead window
178 * will continue to perform linear reads. Either at the new file position, or
179 * at the old one after another seek.
181 * After enough misses, readahead is fully disabled. (next_size = -1UL).
183 * There is a special-case: if the first page which the application tries to
184 * read happens to be the first page of the file, it is assumed that a linear
185 * read is about to happen and the window is immediately set to half of the
188 * A page request at (start + size) is not a miss at all - it's just a part of
189 * sequential file reading.
191 * This function is to be called for every page which is read, rather than when
192 * it is time to perform readahead. This is so the readahead algorithm can
193 * centrally work out the access patterns. This could be costly with many tiny
194 * read()s, so we specifically optimise for that case with prev_page.
198 * do_page_cache_readahead actually reads a chunk of disk. It allocates all
199 * the pages first, then submits them all for I/O. This avoids the very bad
200 * behaviour which would occur if page allocations are causing VM writeback.
201 * We really don't want to intermingle reads and writes like that.
203 * Returns the number of pages which actually had IO started against them.
206 __do_page_cache_readahead(struct address_space
*mapping
, struct file
*filp
,
207 unsigned long offset
, unsigned long nr_to_read
)
209 struct inode
*inode
= mapping
->host
;
211 unsigned long end_index
; /* The last page we want to read */
212 LIST_HEAD(page_pool
);
215 loff_t isize
= i_size_read(inode
);
220 end_index
= ((isize
- 1) >> PAGE_CACHE_SHIFT
);
223 * Preallocate as many pages as we will need.
225 spin_lock(&mapping
->page_lock
);
226 for (page_idx
= 0; page_idx
< nr_to_read
; page_idx
++) {
227 unsigned long page_offset
= offset
+ page_idx
;
229 if (page_offset
> end_index
)
232 page
= radix_tree_lookup(&mapping
->page_tree
, page_offset
);
236 spin_unlock(&mapping
->page_lock
);
237 page
= page_cache_alloc_cold(mapping
);
238 spin_lock(&mapping
->page_lock
);
241 page
->index
= page_offset
;
242 list_add(&page
->list
, &page_pool
);
245 spin_unlock(&mapping
->page_lock
);
248 * Now start the IO. We ignore I/O errors - if the page is not
249 * uptodate then the caller will launch readpage again, and
250 * will then handle the error.
253 read_pages(mapping
, filp
, &page_pool
, ret
);
254 BUG_ON(!list_empty(&page_pool
));
260 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
263 int force_page_cache_readahead(struct address_space
*mapping
, struct file
*filp
,
264 unsigned long offset
, unsigned long nr_to_read
)
268 if (unlikely(!mapping
->a_ops
->readpage
&& !mapping
->a_ops
->readpages
))
274 unsigned long this_chunk
= (2 * 1024 * 1024) / PAGE_CACHE_SIZE
;
276 if (this_chunk
> nr_to_read
)
277 this_chunk
= nr_to_read
;
278 err
= __do_page_cache_readahead(mapping
, filp
,
285 offset
+= this_chunk
;
286 nr_to_read
-= this_chunk
;
292 * This version skips the IO if the queue is read-congested, and will tell the
293 * block layer to abandon the readahead if request allocation would block.
295 * force_page_cache_readahead() will ignore queue congestion and will block on
298 int do_page_cache_readahead(struct address_space
*mapping
, struct file
*filp
,
299 unsigned long offset
, unsigned long nr_to_read
)
301 if (!bdi_read_congested(mapping
->backing_dev_info
))
302 return __do_page_cache_readahead(mapping
, filp
,
308 * Check how effective readahead is being. If the amount of started IO is
309 * less than expected then the file is partly or fully in pagecache and
310 * readahead isn't helping. Shrink the window.
312 * But don't shrink it too much - the application may read the same page
316 check_ra_success(struct file_ra_state
*ra
, pgoff_t attempt
,
317 pgoff_t actual
, pgoff_t orig_next_size
)
320 if (orig_next_size
> 1) {
321 ra
->next_size
= orig_next_size
- 1;
323 ra
->ahead_size
= ra
->next_size
;
325 ra
->next_size
= -1UL;
332 * page_cache_readahead is the main function. If performs the adaptive
333 * readahead window size management and submits the readahead I/O.
336 page_cache_readahead(struct address_space
*mapping
, struct file_ra_state
*ra
,
337 struct file
*filp
, unsigned long offset
)
341 unsigned orig_next_size
;
345 * Here we detect the case where the application is performing
346 * sub-page sized reads. We avoid doing extra work and bogusly
347 * perturbing the readahead window expansion logic.
348 * If next_size is zero, this is the very first read for this
349 * file handle, or the window is maximally shrunk.
351 if (offset
== ra
->prev_page
) {
352 if (ra
->next_size
!= 0)
356 if (ra
->next_size
== -1UL)
357 goto out
; /* Maximally shrunk */
359 max
= get_max_readahead(ra
);
361 goto out
; /* No readahead */
363 min
= get_min_readahead(ra
);
364 orig_next_size
= ra
->next_size
;
366 if (ra
->next_size
== 0 && offset
== 0) {
368 * Special case - first read from first page.
369 * We'll assume it's a whole-file read, and
370 * grow the window fast.
372 ra
->next_size
= max
/ 2;
376 ra
->prev_page
= offset
;
378 if (offset
>= ra
->start
&& offset
<= (ra
->start
+ ra
->size
)) {
380 * A readahead hit. Either inside the window, or one
381 * page beyond the end. Expand the next readahead size.
386 * A miss - lseek, pagefault, pread, etc. Shrink the readahead
392 if ((long)ra
->next_size
> (long)max
)
394 if ((long)ra
->next_size
<= 0L) {
395 ra
->next_size
= -1UL;
397 goto out
; /* Readahead is off */
401 * Is this request outside the current window?
403 if (offset
< ra
->start
|| offset
>= (ra
->start
+ ra
->size
)) {
405 * A miss against the current window. Have we merely
406 * advanced into the ahead window?
408 if (offset
== ra
->ahead_start
) {
410 * Yes, we have. The ahead window now becomes
411 * the current window.
413 ra
->start
= ra
->ahead_start
;
414 ra
->size
= ra
->ahead_size
;
415 ra
->prev_page
= ra
->start
;
420 * Control now returns, probably to sleep until I/O
421 * completes against the first ahead page.
422 * When the second page in the old ahead window is
423 * requested, control will return here and more I/O
424 * will be submitted to build the new ahead window.
430 * This is the "unusual" path. We come here during
431 * startup or after an lseek. We invalidate the
432 * ahead window and get some I/O underway for the new
436 ra
->size
= ra
->next_size
;
437 ra
->ahead_start
= 0; /* Invalidate these */
439 actual
= do_page_cache_readahead(mapping
, filp
, offset
,
441 check_ra_success(ra
, ra
->size
, actual
, orig_next_size
);
444 * This read request is within the current window. It is time
445 * to submit I/O for the ahead window while the application is
446 * crunching through the current window.
448 if (ra
->ahead_start
== 0) {
449 ra
->ahead_start
= ra
->start
+ ra
->size
;
450 ra
->ahead_size
= ra
->next_size
;
451 actual
= do_page_cache_readahead(mapping
, filp
,
452 ra
->ahead_start
, ra
->ahead_size
);
453 check_ra_success(ra
, ra
->ahead_size
,
454 actual
, orig_next_size
);
463 * handle_ra_miss() is called when it is known that a page which should have
464 * been present in the pagecache (we just did some readahead there) was in fact
465 * not found. This will happen if it was evicted by the VM (readahead
466 * thrashing) or if the readahead window is maximally shrunk.
468 * If the window has been maximally shrunk (next_size == -1UL) then look to see
469 * if we are getting misses against sequential file offsets. If so, and this
470 * persists then resume readahead.
472 * Otherwise we're thrashing, so shrink the readahead window by three pages.
473 * This is because it is grown by two pages on a readahead hit. Theory being
474 * that the readahead window size will stabilise around the maximum level at
475 * which there is no thrashing.
477 void handle_ra_miss(struct address_space
*mapping
,
478 struct file_ra_state
*ra
, pgoff_t offset
)
480 if (ra
->next_size
== -1UL) {
481 const unsigned long max
= get_max_readahead(ra
);
483 if (offset
!= ra
->prev_page
+ 1) {
484 ra
->size
= 0; /* Not sequential */
486 ra
->size
++; /* A sequential read */
487 if (ra
->size
>= max
) { /* Resume readahead */
488 ra
->start
= offset
- max
;
495 ra
->prev_page
= offset
;
497 const unsigned long min
= get_min_readahead(ra
);
500 if (ra
->next_size
< min
)
506 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
507 * sensible upper limit.
509 unsigned long max_sane_readahead(unsigned long nr
)
511 unsigned long active
;
512 unsigned long inactive
;
515 get_zone_counts(&active
, &inactive
, &free
);
516 return min(nr
, (inactive
+ free
) / 2);