2 * mm/readahead.c - address_space-level file readahead.
4 * Copyright (C) 2002, Linus Torvalds
6 * 09Apr2002 akpm@zip.com.au
10 #include <linux/kernel.h>
13 #include <linux/blkdev.h>
14 #include <linux/backing-dev.h>
15 #include <linux/pagevec.h>
17 struct backing_dev_info default_backing_dev_info
= {
18 .ra_pages
= (VM_MAX_READAHEAD
* 1024) / PAGE_CACHE_SIZE
,
23 * Initialise a struct file's readahead state
26 file_ra_state_init(struct file_ra_state
*ra
, struct address_space
*mapping
)
28 memset(ra
, 0, sizeof(*ra
));
29 ra
->ra_pages
= mapping
->backing_dev_info
->ra_pages
;
33 * Return max readahead size for this inode in number-of-pages.
35 static inline unsigned long get_max_readahead(struct file_ra_state
*ra
)
40 static inline unsigned long get_min_readahead(struct file_ra_state
*ra
)
42 return (VM_MIN_READAHEAD
* 1024) / PAGE_CACHE_SIZE
;
45 #define list_to_page(head) (list_entry((head)->prev, struct page, list))
48 * read_cache_pages - populate an address space with some pages, and
49 * start reads against them.
50 * @mapping: the address_space
51 * @pages: The address of a list_head which contains the target pages. These
52 * pages have their ->index populated and are otherwise uninitialised.
53 * @filler: callback routine for filling a single page.
54 * @data: private data for the callback routine.
56 * Hides the details of the LRU cache etc from the filesystems.
58 int read_cache_pages(struct address_space
*mapping
, struct list_head
*pages
,
59 int (*filler
)(void *, struct page
*), void *data
)
62 struct pagevec lru_pvec
;
65 pagevec_init(&lru_pvec
, 0);
67 while (!list_empty(pages
)) {
68 page
= list_to_page(pages
);
69 list_del(&page
->list
);
70 if (add_to_page_cache(page
, mapping
, page
->index
, GFP_KERNEL
)) {
71 page_cache_release(page
);
74 ret
= filler(data
, page
);
75 if (!pagevec_add(&lru_pvec
, page
))
76 __pagevec_lru_add(&lru_pvec
);
78 while (!list_empty(pages
)) {
81 victim
= list_to_page(pages
);
82 list_del(&victim
->list
);
83 page_cache_release(victim
);
88 pagevec_lru_add(&lru_pvec
);
92 static int read_pages(struct address_space
*mapping
, struct file
*filp
,
93 struct list_head
*pages
, unsigned nr_pages
)
96 struct pagevec lru_pvec
;
98 if (mapping
->a_ops
->readpages
)
99 return mapping
->a_ops
->readpages(filp
, mapping
, pages
, nr_pages
);
101 pagevec_init(&lru_pvec
, 0);
102 for (page_idx
= 0; page_idx
< nr_pages
; page_idx
++) {
103 struct page
*page
= list_to_page(pages
);
104 list_del(&page
->list
);
105 if (!add_to_page_cache(page
, mapping
,
106 page
->index
, GFP_KERNEL
)) {
107 mapping
->a_ops
->readpage(filp
, page
);
108 if (!pagevec_add(&lru_pvec
, page
))
109 __pagevec_lru_add(&lru_pvec
);
111 page_cache_release(page
);
114 pagevec_lru_add(&lru_pvec
);
121 * The fields in struct file_ra_state represent the most-recently-executed
124 * start: Page index at which we started the readahead
125 * size: Number of pages in that read
126 * Together, these form the "current window".
127 * Together, start and size represent the `readahead window'.
128 * next_size: The number of pages to read on the next readahead miss.
129 * Has the magical value -1UL if readahead has been disabled.
130 * prev_page: The page which the readahead algorithm most-recently inspected.
131 * prev_page is mainly an optimisation: if page_cache_readahead
132 * sees that it is again being called for a page which it just
133 * looked at, it can return immediately without making any state
136 * ahead_size: Together, these form the "ahead window".
137 * ra_pages: The externally controlled max readahead for this fd.
139 * When readahead is in the "maximally shrunk" state (next_size == -1UL),
140 * readahead is disabled. In this state, prev_page and size are used, inside
141 * handle_ra_miss(), to detect the resumption of sequential I/O. Once there
142 * has been a decent run of sequential I/O (defined by get_min_readahead),
143 * readahead is reenabled.
145 * The readahead code manages two windows - the "current" and the "ahead"
146 * windows. The intent is that while the application is walking the pages
147 * in the current window, I/O is underway on the ahead window. When the
148 * current window is fully traversed, it is replaced by the ahead window
149 * and the ahead window is invalidated. When this copying happens, the
150 * new current window's pages are probably still locked. When I/O has
151 * completed, we submit a new batch of I/O, creating a new ahead window.
155 * ----|----------------|----------------|-----
157 * ^ahead_start ^ahead_start+ahead_size
159 * ^ When this page is read, we submit I/O for the
162 * A `readahead hit' occurs when a read request is made against a page which is
163 * inside the current window. Hits are good, and the window size (next_size)
164 * is grown aggressively when hits occur. Two pages are added to the next
165 * window size on each hit, which will end up doubling the next window size by
166 * the time I/O is submitted for it.
168 * If readahead hits are more sparse (say, the application is only reading
169 * every second page) then the window will build more slowly.
171 * On a readahead miss (the application seeked away) the readahead window is
172 * shrunk by 25%. We don't want to drop it too aggressively, because it is a
173 * good assumption that an application which has built a good readahead window
174 * will continue to perform linear reads. Either at the new file position, or
175 * at the old one after another seek.
177 * After enough misses, readahead is fully disabled. (next_size = -1UL).
179 * There is a special-case: if the first page which the application tries to
180 * read happens to be the first page of the file, it is assumed that a linear
181 * read is about to happen and the window is immediately set to half of the
184 * A page request at (start + size) is not a miss at all - it's just a part of
185 * sequential file reading.
187 * This function is to be called for every page which is read, rather than when
188 * it is time to perform readahead. This is so the readahead algorithm can
189 * centrally work out the access patterns. This could be costly with many tiny
190 * read()s, so we specifically optimise for that case with prev_page.
194 * do_page_cache_readahead actually reads a chunk of disk. It allocates all
195 * the pages first, then submits them all for I/O. This avoids the very bad
196 * behaviour which would occur if page allocations are causing VM writeback.
197 * We really don't want to intermingle reads and writes like that.
199 * Returns the number of pages which actually had IO started against them.
202 __do_page_cache_readahead(struct address_space
*mapping
, struct file
*filp
,
203 unsigned long offset
, unsigned long nr_to_read
)
205 struct inode
*inode
= mapping
->host
;
207 unsigned long end_index
; /* The last page we want to read */
208 LIST_HEAD(page_pool
);
212 if (inode
->i_size
== 0)
215 end_index
= ((inode
->i_size
- 1) >> PAGE_CACHE_SHIFT
);
218 * Preallocate as many pages as we will need.
220 spin_lock(&mapping
->page_lock
);
221 for (page_idx
= 0; page_idx
< nr_to_read
; page_idx
++) {
222 unsigned long page_offset
= offset
+ page_idx
;
224 if (page_offset
> end_index
)
227 page
= radix_tree_lookup(&mapping
->page_tree
, page_offset
);
231 spin_unlock(&mapping
->page_lock
);
232 page
= page_cache_alloc_cold(mapping
);
233 spin_lock(&mapping
->page_lock
);
236 page
->index
= page_offset
;
237 list_add(&page
->list
, &page_pool
);
240 spin_unlock(&mapping
->page_lock
);
243 * Now start the IO. We ignore I/O errors - if the page is not
244 * uptodate then the caller will launch readpage again, and
245 * will then handle the error.
248 read_pages(mapping
, filp
, &page_pool
, ret
);
249 BUG_ON(!list_empty(&page_pool
));
255 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
258 int do_page_cache_readahead(struct address_space
*mapping
, struct file
*filp
,
259 unsigned long offset
, unsigned long nr_to_read
)
263 if (unlikely(!mapping
->a_ops
->readpage
&& !mapping
->a_ops
->readpages
))
269 unsigned long this_chunk
= (2 * 1024 * 1024) / PAGE_CACHE_SIZE
;
271 if (this_chunk
> nr_to_read
)
272 this_chunk
= nr_to_read
;
273 err
= __do_page_cache_readahead(mapping
, filp
,
280 offset
+= this_chunk
;
281 nr_to_read
-= this_chunk
;
287 * Check how effective readahead is being. If the amount of started IO is
288 * less than expected then the file is partly or fully in pagecache and
289 * readahead isn't helping. Shrink the window.
291 * But don't shrink it too much - the application may read the same page
295 check_ra_success(struct file_ra_state
*ra
, pgoff_t attempt
,
296 pgoff_t actual
, pgoff_t orig_next_size
)
299 if (orig_next_size
> 1) {
300 ra
->next_size
= orig_next_size
- 1;
302 ra
->ahead_size
= ra
->next_size
;
304 ra
->next_size
= -1UL;
311 * page_cache_readahead is the main function. If performs the adaptive
312 * readahead window size management and submits the readahead I/O.
315 page_cache_readahead(struct address_space
*mapping
, struct file_ra_state
*ra
,
316 struct file
*filp
, unsigned long offset
)
320 unsigned orig_next_size
;
324 * Here we detect the case where the application is performing
325 * sub-page sized reads. We avoid doing extra work and bogusly
326 * perturbing the readahead window expansion logic.
327 * If next_size is zero, this is the very first read for this
328 * file handle, or the window is maximally shrunk.
330 if (offset
== ra
->prev_page
) {
331 if (ra
->next_size
!= 0)
335 if (ra
->next_size
== -1UL)
336 goto out
; /* Maximally shrunk */
338 max
= get_max_readahead(ra
);
340 goto out
; /* No readahead */
342 min
= get_min_readahead(ra
);
343 orig_next_size
= ra
->next_size
;
345 if (ra
->next_size
== 0 && offset
== 0) {
347 * Special case - first read from first page.
348 * We'll assume it's a whole-file read, and
349 * grow the window fast.
351 ra
->next_size
= max
/ 2;
355 ra
->prev_page
= offset
;
357 if (offset
>= ra
->start
&& offset
<= (ra
->start
+ ra
->size
)) {
359 * A readahead hit. Either inside the window, or one
360 * page beyond the end. Expand the next readahead size.
365 * A miss - lseek, pagefault, pread, etc. Shrink the readahead
371 if ((long)ra
->next_size
> (long)max
)
373 if ((long)ra
->next_size
<= 0L) {
374 ra
->next_size
= -1UL;
376 goto out
; /* Readahead is off */
380 * Is this request outside the current window?
382 if (offset
< ra
->start
|| offset
>= (ra
->start
+ ra
->size
)) {
384 * A miss against the current window. Have we merely
385 * advanced into the ahead window?
387 if (offset
== ra
->ahead_start
) {
389 * Yes, we have. The ahead window now becomes
390 * the current window.
392 ra
->start
= ra
->ahead_start
;
393 ra
->size
= ra
->ahead_size
;
394 ra
->prev_page
= ra
->start
;
399 * Control now returns, probably to sleep until I/O
400 * completes against the first ahead page.
401 * When the second page in the old ahead window is
402 * requested, control will return here and more I/O
403 * will be submitted to build the new ahead window.
409 * This is the "unusual" path. We come here during
410 * startup or after an lseek. We invalidate the
411 * ahead window and get some I/O underway for the new
415 ra
->size
= ra
->next_size
;
416 ra
->ahead_start
= 0; /* Invalidate these */
418 actual
= do_page_cache_readahead(mapping
, filp
, offset
,
420 check_ra_success(ra
, ra
->size
, actual
, orig_next_size
);
423 * This read request is within the current window. It is time
424 * to submit I/O for the ahead window while the application is
425 * crunching through the current window.
427 if (ra
->ahead_start
== 0) {
428 ra
->ahead_start
= ra
->start
+ ra
->size
;
429 ra
->ahead_size
= ra
->next_size
;
430 actual
= do_page_cache_readahead(mapping
, filp
,
431 ra
->ahead_start
, ra
->ahead_size
);
432 check_ra_success(ra
, ra
->ahead_size
,
433 actual
, orig_next_size
);
442 * handle_ra_miss() is called when it is known that a page which should have
443 * been present in the pagecache (we just did some readahead there) was in fact
444 * not found. This will happen if it was evicted by the VM (readahead
445 * thrashing) or if the readahead window is maximally shrunk.
447 * If the window has been maximally shrunk (next_size == -1UL) then look to see
448 * if we are getting misses against sequential file offsets. If so, and this
449 * persists then resume readahead.
451 * Otherwise we're thrashing, so shrink the readahead window by three pages.
452 * This is because it is grown by two pages on a readahead hit. Theory being
453 * that the readahead window size will stabilise around the maximum level at
454 * which there is no thrashing.
456 void handle_ra_miss(struct address_space
*mapping
,
457 struct file_ra_state
*ra
, pgoff_t offset
)
459 if (ra
->next_size
== -1UL) {
460 const unsigned long max
= get_max_readahead(ra
);
462 if (offset
!= ra
->prev_page
+ 1) {
463 ra
->size
= 0; /* Not sequential */
465 ra
->size
++; /* A sequential read */
466 if (ra
->size
>= max
) { /* Resume readahead */
467 ra
->start
= offset
- max
;
474 ra
->prev_page
= offset
;
476 const unsigned long min
= get_min_readahead(ra
);
479 if (ra
->next_size
< min
)
485 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
486 * sensible upper limit.
488 unsigned long max_sane_readahead(unsigned long nr
)
490 unsigned long active
;
491 unsigned long inactive
;
494 get_zone_counts(&active
, &inactive
, &free
);
495 return min(nr
, (inactive
+ free
) / 2);