2 * mm/readahead.c - address_space-level file readahead.
4 * Copyright (C) 2002, Linus Torvalds
6 * 09Apr2002 akpm@zip.com.au
10 #include <linux/kernel.h>
13 #include <linux/blkdev.h>
14 #include <linux/backing-dev.h>
15 #include <linux/pagevec.h>
17 struct backing_dev_info default_backing_dev_info
= {
18 .ra_pages
= (VM_MAX_READAHEAD
* 1024) / PAGE_CACHE_SIZE
,
23 * Initialise a struct file's readahead state
26 file_ra_state_init(struct file_ra_state
*ra
, struct address_space
*mapping
)
28 memset(ra
, 0, sizeof(*ra
));
29 ra
->ra_pages
= mapping
->backing_dev_info
->ra_pages
;
33 * Return max readahead size for this inode in number-of-pages.
35 static inline unsigned long get_max_readahead(struct file_ra_state
*ra
)
40 static inline unsigned long get_min_readahead(struct file_ra_state
*ra
)
42 return (VM_MIN_READAHEAD
* 1024) / PAGE_CACHE_SIZE
;
46 * read_cache_pages - populate an address space with some pages, and
47 * start reads against them.
48 * @mapping: the address_space
49 * @pages: The address of a list_head which contains the target pages. These
50 * pages have their ->index populated and are otherwise uninitialised.
51 * @filler: callback routine for filling a single page.
52 * @data: private data for the callback routine.
54 * Hides the details of the LRU cache etc from the filesystems.
56 int read_cache_pages(struct address_space
*mapping
, struct list_head
*pages
,
57 int (*filler
)(void *, struct page
*), void *data
)
60 struct pagevec lru_pvec
;
63 pagevec_init(&lru_pvec
, 0);
65 while (!list_empty(pages
)) {
66 page
= list_entry(pages
->prev
, struct page
, list
);
67 list_del(&page
->list
);
68 if (add_to_page_cache(page
, mapping
, page
->index
, GFP_KERNEL
)) {
69 page_cache_release(page
);
72 ret
= filler(data
, page
);
73 if (!pagevec_add(&lru_pvec
, page
))
74 __pagevec_lru_add(&lru_pvec
);
78 pagevec_lru_add(&lru_pvec
);
82 static int read_pages(struct address_space
*mapping
, struct file
*filp
,
83 struct list_head
*pages
, unsigned nr_pages
)
86 struct pagevec lru_pvec
;
88 pagevec_init(&lru_pvec
, 0);
90 if (mapping
->a_ops
->readpages
)
91 return mapping
->a_ops
->readpages(filp
, mapping
, pages
, nr_pages
);
93 for (page_idx
= 0; page_idx
< nr_pages
; page_idx
++) {
94 struct page
*page
= list_entry(pages
->prev
, struct page
, list
);
95 list_del(&page
->list
);
96 if (!add_to_page_cache(page
, mapping
,
97 page
->index
, GFP_KERNEL
)) {
98 mapping
->a_ops
->readpage(filp
, page
);
99 if (!pagevec_add(&lru_pvec
, page
))
100 __pagevec_lru_add(&lru_pvec
);
102 page_cache_release(page
);
105 pagevec_lru_add(&lru_pvec
);
112 * The fields in struct file_ra_state represent the most-recently-executed
115 * start: Page index at which we started the readahead
116 * size: Number of pages in that read
117 * Together, these form the "current window".
118 * Together, start and size represent the `readahead window'.
119 * next_size: The number of pages to read on the next readahead miss.
120 * Has the magical value -1UL if readahead has been disabled.
121 * prev_page: The page which the readahead algorithm most-recently inspected.
122 * prev_page is mainly an optimisation: if page_cache_readahead
123 * sees that it is again being called for a page which it just
124 * looked at, it can return immediately without making any state
127 * ahead_size: Together, these form the "ahead window".
128 * ra_pages: The externally controlled max readahead for this fd.
130 * The readahead code manages two windows - the "current" and the "ahead"
131 * windows. The intent is that while the application is walking the pages
132 * in the current window, I/O is underway on the ahead window. When the
133 * current window is fully traversed, it is replaced by the ahead window
134 * and the ahead window is invalidated. When this copying happens, the
135 * new current window's pages are probably still locked. When I/O has
136 * completed, we submit a new batch of I/O, creating a new ahead window.
140 * ----|----------------|----------------|-----
142 * ^ahead_start ^ahead_start+ahead_size
144 * ^ When this page is read, we submit I/O for the
147 * A `readahead hit' occurs when a read request is made against a page which is
148 * inside the current window. Hits are good, and the window size (next_size)
149 * is grown aggressively when hits occur. Two pages are added to the next
150 * window size on each hit, which will end up doubling the next window size by
151 * the time I/O is submitted for it.
153 * If readahead hits are more sparse (say, the application is only reading
154 * every second page) then the window will build more slowly.
156 * On a readahead miss (the application seeked away) the readahead window is
157 * shrunk by 25%. We don't want to drop it too aggressively, because it is a
158 * good assumption that an application which has built a good readahead window
159 * will continue to perform linear reads. Either at the new file position, or
160 * at the old one after another seek.
162 * There is a special-case: if the first page which the application tries to
163 * read happens to be the first page of the file, it is assumed that a linear
164 * read is about to happen and the window is immediately set to half of the
167 * A page request at (start + size) is not a miss at all - it's just a part of
168 * sequential file reading.
170 * This function is to be called for every page which is read, rather than when
171 * it is time to perform readahead. This is so the readahead algorithm can
172 * centrally work out the access patterns. This could be costly with many tiny
173 * read()s, so we specifically optimise for that case with prev_page.
177 * do_page_cache_readahead actually reads a chunk of disk. It allocates all
178 * the pages first, then submits them all for I/O. This avoids the very bad
179 * behaviour which would occur if page allocations are causing VM writeback.
180 * We really don't want to intermingle reads and writes like that.
182 * Returns the number of pages which actually had IO started against them.
185 __do_page_cache_readahead(struct address_space
*mapping
, struct file
*filp
,
186 unsigned long offset
, unsigned long nr_to_read
)
188 struct inode
*inode
= mapping
->host
;
190 unsigned long end_index
; /* The last page we want to read */
191 LIST_HEAD(page_pool
);
195 if (inode
->i_size
== 0)
198 end_index
= ((inode
->i_size
- 1) >> PAGE_CACHE_SHIFT
);
201 * Preallocate as many pages as we will need.
203 read_lock(&mapping
->page_lock
);
204 for (page_idx
= 0; page_idx
< nr_to_read
; page_idx
++) {
205 unsigned long page_offset
= offset
+ page_idx
;
207 if (page_offset
> end_index
)
210 page
= radix_tree_lookup(&mapping
->page_tree
, page_offset
);
214 read_unlock(&mapping
->page_lock
);
215 page
= page_cache_alloc_cold(mapping
);
216 read_lock(&mapping
->page_lock
);
219 page
->index
= page_offset
;
220 list_add(&page
->list
, &page_pool
);
223 read_unlock(&mapping
->page_lock
);
226 * Now start the IO. We ignore I/O errors - if the page is not
227 * uptodate then the caller will launch readpage again, and
228 * will then handle the error.
231 read_pages(mapping
, filp
, &page_pool
, ret
);
234 BUG_ON(!list_empty(&page_pool
));
240 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
243 int do_page_cache_readahead(struct address_space
*mapping
, struct file
*filp
,
244 unsigned long offset
, unsigned long nr_to_read
)
249 unsigned long this_chunk
= (2 * 1024 * 1024) / PAGE_CACHE_SIZE
;
251 if (this_chunk
> nr_to_read
)
252 this_chunk
= nr_to_read
;
253 ret
= __do_page_cache_readahead(mapping
, filp
,
257 offset
+= this_chunk
;
258 nr_to_read
-= this_chunk
;
264 * Check how effective readahead is being. If the amount of started IO is
265 * less than expected then the file is partly or fully in pagecache and
266 * readahead isn't helping. Shrink the window.
268 * But don't shrink it too much - the application may read the same page
272 check_ra_success(struct file_ra_state
*ra
, pgoff_t attempt
,
273 pgoff_t actual
, pgoff_t orig_next_size
)
276 if (orig_next_size
> 1) {
277 ra
->next_size
= orig_next_size
- 1;
279 ra
->ahead_size
= ra
->next_size
;
281 ra
->next_size
= -1UL;
287 * page_cache_readahead is the main function. If performs the adaptive
288 * readahead window size management and submits the readahead I/O.
291 page_cache_readahead(struct address_space
*mapping
, struct file_ra_state
*ra
,
292 struct file
*filp
, unsigned long offset
)
296 unsigned orig_next_size
;
300 * Here we detect the case where the application is performing
301 * sub-page sized reads. We avoid doing extra work and bogusly
302 * perturbing the readahead window expansion logic.
303 * If next_size is zero, this is the very first read for this
304 * file handle, or the window is maximally shrunk.
306 if (offset
== ra
->prev_page
) {
307 if (ra
->next_size
!= 0)
311 if (ra
->next_size
== -1UL)
312 goto out
; /* Maximally shrunk */
314 max
= get_max_readahead(ra
);
316 goto out
; /* No readahead */
318 min
= get_min_readahead(ra
);
319 orig_next_size
= ra
->next_size
;
321 if (ra
->next_size
== 0 && offset
== 0) {
323 * Special case - first read from first page.
324 * We'll assume it's a whole-file read, and
325 * grow the window fast.
327 ra
->next_size
= max
/ 2;
331 ra
->prev_page
= offset
;
333 if (offset
>= ra
->start
&& offset
<= (ra
->start
+ ra
->size
)) {
335 * A readahead hit. Either inside the window, or one
336 * page beyond the end. Expand the next readahead size.
341 * A miss - lseek, pread, etc. Shrink the readahead
344 ra
->next_size
-= ra
->next_size
/ 4;
347 if (ra
->next_size
> max
)
349 if (ra
->next_size
< min
)
353 * Is this request outside the current window?
355 if (offset
< ra
->start
|| offset
>= (ra
->start
+ ra
->size
)) {
357 * A miss against the current window. Have we merely
358 * advanced into the ahead window?
360 if (offset
== ra
->ahead_start
) {
362 * Yes, we have. The ahead window now becomes
363 * the current window.
365 ra
->start
= ra
->ahead_start
;
366 ra
->size
= ra
->ahead_size
;
367 ra
->prev_page
= ra
->start
;
371 * Control now returns, probably to sleep until I/O
372 * completes against the first ahead page.
373 * When the second page in the old ahead window is
374 * requested, control will return here and more I/O
375 * will be submitted to build the new ahead window.
381 * This is the "unusual" path. We come here during
382 * startup or after an lseek. We invalidate the
383 * ahead window and get some I/O underway for the new
387 ra
->size
= ra
->next_size
;
388 ra
->ahead_start
= 0; /* Invalidate these */
391 actual
= do_page_cache_readahead(mapping
, filp
, offset
,
393 check_ra_success(ra
, ra
->size
, actual
, orig_next_size
);
396 * This read request is within the current window. It is time
397 * to submit I/O for the ahead window while the application is
398 * crunching through the current window.
400 if (ra
->ahead_start
== 0) {
401 ra
->ahead_start
= ra
->start
+ ra
->size
;
402 ra
->ahead_size
= ra
->next_size
;
403 actual
= do_page_cache_readahead(mapping
, filp
,
404 ra
->ahead_start
, ra
->ahead_size
);
405 check_ra_success(ra
, ra
->ahead_size
,
406 actual
, orig_next_size
);
414 * For mmap reads (typically executables) the access pattern is fairly random,
415 * but somewhat ascending. So readaround favours pages beyond the target one.
416 * We also boost the window size, as it can easily shrink due to misses.
419 page_cache_readaround(struct address_space
*mapping
, struct file_ra_state
*ra
,
420 struct file
*filp
, unsigned long offset
)
422 if (ra
->next_size
!= -1UL) {
423 const unsigned long min
= get_min_readahead(ra
) * 2;
424 unsigned long target
;
425 unsigned long backward
;
428 * If next_size is zero then leave it alone, because that's a
429 * readahead startup state.
431 if (ra
->next_size
&& ra
->next_size
< min
)
435 backward
= ra
->next_size
/ 4;
437 if (backward
> target
)
441 page_cache_readahead(mapping
, ra
, filp
, target
);
446 * handle_ra_miss() is called when it is known that a page which should have
447 * been present in the pagecache (we just did some readahead there) was in fact
448 * not found. This will happen if it was evicted by the VM (readahead
449 * thrashing) or if the readahead window is maximally shrunk.
451 * If the window has been maximally shrunk (next_size == 0) then bump it up
452 * again to resume readahead.
454 * Otherwise we're thrashing, so shrink the readahead window by three pages.
455 * This is because it is grown by two pages on a readahead hit. Theory being
456 * that the readahead window size will stabilise around the maximum level at
457 * which there is no thrashing.
459 void handle_ra_miss(struct address_space
*mapping
, struct file_ra_state
*ra
)
461 const unsigned long min
= get_min_readahead(ra
);
463 if (ra
->next_size
== -1UL) {
467 if (ra
->next_size
< min
)
473 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
474 * sensible upper limit.
476 unsigned long max_sane_readahead(unsigned long nr
)
478 unsigned long active
;
479 unsigned long inactive
;
481 get_zone_counts(&active
, &inactive
);
482 return min(nr
, inactive
/ 2);