initial commit with v2.6.9
[linux-2.6.9-moxart.git] / mm / readahead.c
bloba5e6906a01e03073ff399da20f75821efb47e2dc
1 /*
2 * mm/readahead.c - address_space-level file readahead.
4 * Copyright (C) 2002, Linus Torvalds
6 * 09Apr2002 akpm@zip.com.au
7 * Initial version.
8 */
10 #include <linux/kernel.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/backing-dev.h>
16 #include <linux/pagevec.h>
18 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
21 EXPORT_SYMBOL(default_unplug_io_fn);
23 struct backing_dev_info default_backing_dev_info = {
24 .ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE,
25 .state = 0,
26 .unplug_io_fn = default_unplug_io_fn,
28 EXPORT_SYMBOL_GPL(default_backing_dev_info);
31 * Initialise a struct file's readahead state. Assumes that the caller has
32 * memset *ra to zero.
34 void
35 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
37 ra->ra_pages = mapping->backing_dev_info->ra_pages;
38 ra->average = ra->ra_pages / 2;
42 * Return max readahead size for this inode in number-of-pages.
44 static inline unsigned long get_max_readahead(struct file_ra_state *ra)
46 return ra->ra_pages;
49 static inline unsigned long get_min_readahead(struct file_ra_state *ra)
51 return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE;
54 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
56 /**
57 * read_cache_pages - populate an address space with some pages, and
58 * start reads against them.
59 * @mapping: the address_space
60 * @pages: The address of a list_head which contains the target pages. These
61 * pages have their ->index populated and are otherwise uninitialised.
62 * @filler: callback routine for filling a single page.
63 * @data: private data for the callback routine.
65 * Hides the details of the LRU cache etc from the filesystems.
67 int read_cache_pages(struct address_space *mapping, struct list_head *pages,
68 int (*filler)(void *, struct page *), void *data)
70 struct page *page;
71 struct pagevec lru_pvec;
72 int ret = 0;
74 pagevec_init(&lru_pvec, 0);
76 while (!list_empty(pages)) {
77 page = list_to_page(pages);
78 list_del(&page->lru);
79 if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
80 page_cache_release(page);
81 continue;
83 ret = filler(data, page);
84 if (!pagevec_add(&lru_pvec, page))
85 __pagevec_lru_add(&lru_pvec);
86 if (ret) {
87 while (!list_empty(pages)) {
88 struct page *victim;
90 victim = list_to_page(pages);
91 list_del(&victim->lru);
92 page_cache_release(victim);
94 break;
97 pagevec_lru_add(&lru_pvec);
98 return ret;
101 EXPORT_SYMBOL(read_cache_pages);
103 static int read_pages(struct address_space *mapping, struct file *filp,
104 struct list_head *pages, unsigned nr_pages)
106 unsigned page_idx;
107 struct pagevec lru_pvec;
108 int ret = 0;
110 if (mapping->a_ops->readpages) {
111 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
112 goto out;
115 pagevec_init(&lru_pvec, 0);
116 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
117 struct page *page = list_to_page(pages);
118 list_del(&page->lru);
119 if (!add_to_page_cache(page, mapping,
120 page->index, GFP_KERNEL)) {
121 mapping->a_ops->readpage(filp, page);
122 if (!pagevec_add(&lru_pvec, page))
123 __pagevec_lru_add(&lru_pvec);
124 } else {
125 page_cache_release(page);
128 pagevec_lru_add(&lru_pvec);
129 out:
130 return ret;
134 * Readahead design.
136 * The fields in struct file_ra_state represent the most-recently-executed
137 * readahead attempt:
139 * start: Page index at which we started the readahead
140 * size: Number of pages in that read
141 * Together, these form the "current window".
142 * Together, start and size represent the `readahead window'.
143 * next_size: The number of pages to read on the next readahead miss.
144 * Has the magical value -1UL if readahead has been disabled.
145 * prev_page: The page which the readahead algorithm most-recently inspected.
146 * prev_page is mainly an optimisation: if page_cache_readahead
147 * sees that it is again being called for a page which it just
148 * looked at, it can return immediately without making any state
149 * changes.
150 * ahead_start,
151 * ahead_size: Together, these form the "ahead window".
152 * ra_pages: The externally controlled max readahead for this fd.
154 * When readahead is in the "maximally shrunk" state (next_size == -1UL),
155 * readahead is disabled. In this state, prev_page and size are used, inside
156 * handle_ra_miss(), to detect the resumption of sequential I/O. Once there
157 * has been a decent run of sequential I/O (defined by get_min_readahead),
158 * readahead is reenabled.
160 * The readahead code manages two windows - the "current" and the "ahead"
161 * windows. The intent is that while the application is walking the pages
162 * in the current window, I/O is underway on the ahead window. When the
163 * current window is fully traversed, it is replaced by the ahead window
164 * and the ahead window is invalidated. When this copying happens, the
165 * new current window's pages are probably still locked. When I/O has
166 * completed, we submit a new batch of I/O, creating a new ahead window.
168 * So:
170 * ----|----------------|----------------|-----
171 * ^start ^start+size
172 * ^ahead_start ^ahead_start+ahead_size
174 * ^ When this page is read, we submit I/O for the
175 * ahead window.
177 * A `readahead hit' occurs when a read request is made against a page which is
178 * inside the current window. Hits are good, and the window size (next_size)
179 * is grown aggressively when hits occur. Two pages are added to the next
180 * window size on each hit, which will end up doubling the next window size by
181 * the time I/O is submitted for it.
183 * If readahead hits are more sparse (say, the application is only reading
184 * every second page) then the window will build more slowly.
186 * On a readahead miss (the application seeked away) the readahead window is
187 * shrunk by 25%. We don't want to drop it too aggressively, because it is a
188 * good assumption that an application which has built a good readahead window
189 * will continue to perform linear reads. Either at the new file position, or
190 * at the old one after another seek.
192 * After enough misses, readahead is fully disabled. (next_size = -1UL).
194 * There is a special-case: if the first page which the application tries to
195 * read happens to be the first page of the file, it is assumed that a linear
196 * read is about to happen and the window is immediately set to half of the
197 * device maximum.
199 * A page request at (start + size) is not a miss at all - it's just a part of
200 * sequential file reading.
202 * This function is to be called for every page which is read, rather than when
203 * it is time to perform readahead. This is so the readahead algorithm can
204 * centrally work out the access patterns. This could be costly with many tiny
205 * read()s, so we specifically optimise for that case with prev_page.
209 * do_page_cache_readahead actually reads a chunk of disk. It allocates all
210 * the pages first, then submits them all for I/O. This avoids the very bad
211 * behaviour which would occur if page allocations are causing VM writeback.
212 * We really don't want to intermingle reads and writes like that.
214 * Returns the number of pages which actually had IO started against them.
216 static inline int
217 __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
218 unsigned long offset, unsigned long nr_to_read)
220 struct inode *inode = mapping->host;
221 struct page *page;
222 unsigned long end_index; /* The last page we want to read */
223 LIST_HEAD(page_pool);
224 int page_idx;
225 int ret = 0;
226 loff_t isize = i_size_read(inode);
228 if (isize == 0)
229 goto out;
231 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
234 * Preallocate as many pages as we will need.
236 spin_lock_irq(&mapping->tree_lock);
237 for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
238 unsigned long page_offset = offset + page_idx;
240 if (page_offset > end_index)
241 break;
243 page = radix_tree_lookup(&mapping->page_tree, page_offset);
244 if (page)
245 continue;
247 spin_unlock_irq(&mapping->tree_lock);
248 page = page_cache_alloc_cold(mapping);
249 spin_lock_irq(&mapping->tree_lock);
250 if (!page)
251 break;
252 page->index = page_offset;
253 list_add(&page->lru, &page_pool);
254 ret++;
256 spin_unlock_irq(&mapping->tree_lock);
259 * Now start the IO. We ignore I/O errors - if the page is not
260 * uptodate then the caller will launch readpage again, and
261 * will then handle the error.
263 if (ret)
264 read_pages(mapping, filp, &page_pool, ret);
265 BUG_ON(!list_empty(&page_pool));
266 out:
267 return ret;
271 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
272 * memory at once.
274 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
275 unsigned long offset, unsigned long nr_to_read)
277 int ret = 0;
279 if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
280 return -EINVAL;
282 while (nr_to_read) {
283 int err;
285 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
287 if (this_chunk > nr_to_read)
288 this_chunk = nr_to_read;
289 err = __do_page_cache_readahead(mapping, filp,
290 offset, this_chunk);
291 if (err < 0) {
292 ret = err;
293 break;
295 ret += err;
296 offset += this_chunk;
297 nr_to_read -= this_chunk;
299 return ret;
303 * This version skips the IO if the queue is read-congested, and will tell the
304 * block layer to abandon the readahead if request allocation would block.
306 * force_page_cache_readahead() will ignore queue congestion and will block on
307 * request queues.
309 int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
310 unsigned long offset, unsigned long nr_to_read)
312 if (!bdi_read_congested(mapping->backing_dev_info))
313 return __do_page_cache_readahead(mapping, filp,
314 offset, nr_to_read);
315 return 0;
319 * Check how effective readahead is being. If the amount of started IO is
320 * less than expected then the file is partly or fully in pagecache and
321 * readahead isn't helping. Shrink the window.
323 * But don't shrink it too much - the application may read the same page
324 * occasionally.
326 static inline void
327 check_ra_success(struct file_ra_state *ra, pgoff_t attempt,
328 pgoff_t actual, pgoff_t orig_next_size)
330 if (actual == 0) {
331 if (orig_next_size > 1) {
332 ra->next_size = orig_next_size - 1;
333 if (ra->ahead_size)
334 ra->ahead_size = ra->next_size;
335 } else {
336 ra->next_size = -1UL;
337 ra->size = 0;
343 * page_cache_readahead is the main function. If performs the adaptive
344 * readahead window size management and submits the readahead I/O.
346 void
347 page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra,
348 struct file *filp, unsigned long offset)
350 unsigned max;
351 unsigned orig_next_size;
352 unsigned actual;
353 int first_access=0;
354 unsigned long average;
357 * Here we detect the case where the application is performing
358 * sub-page sized reads. We avoid doing extra work and bogusly
359 * perturbing the readahead window expansion logic.
360 * If next_size is zero, this is the very first read for this
361 * file handle, or the window is maximally shrunk.
363 if (offset == ra->prev_page) {
364 if (ra->next_size != 0)
365 goto out;
368 if (ra->next_size == -1UL)
369 goto out; /* Maximally shrunk */
371 max = get_max_readahead(ra);
372 if (max == 0)
373 goto out; /* No readahead */
375 orig_next_size = ra->next_size;
377 if (ra->next_size == 0) {
379 * Special case - first read.
380 * We'll assume it's a whole-file read, and
381 * grow the window fast.
383 first_access=1;
384 ra->next_size = max / 2;
385 ra->prev_page = offset;
386 ra->currnt_wnd_hit++;
387 goto do_io;
390 ra->prev_page = offset;
392 if (offset >= ra->start && offset <= (ra->start + ra->size)) {
394 * A readahead hit. Either inside the window, or one
395 * page beyond the end. Expand the next readahead size.
397 ra->next_size += 2;
399 if (ra->currnt_wnd_hit <= (max * 2))
400 ra->currnt_wnd_hit++;
401 } else {
403 * A miss - lseek, pagefault, pread, etc. Shrink the readahead
404 * window.
406 ra->next_size -= 2;
408 average = ra->average;
409 if (average < ra->currnt_wnd_hit) {
410 average++;
412 ra->average = (average + ra->currnt_wnd_hit) / 2;
413 ra->currnt_wnd_hit = 1;
416 if ((long)ra->next_size > (long)max)
417 ra->next_size = max;
418 if ((long)ra->next_size <= 0L) {
419 ra->next_size = -1UL;
420 ra->size = 0;
421 goto out; /* Readahead is off */
425 * Is this request outside the current window?
427 if (offset < ra->start || offset >= (ra->start + ra->size)) {
429 * A miss against the current window. Have we merely
430 * advanced into the ahead window?
432 if (offset == ra->ahead_start) {
434 * Yes, we have. The ahead window now becomes
435 * the current window.
437 ra->start = ra->ahead_start;
438 ra->size = ra->ahead_size;
439 ra->prev_page = ra->start;
440 ra->ahead_start = 0;
441 ra->ahead_size = 0;
444 * Control now returns, probably to sleep until I/O
445 * completes against the first ahead page.
446 * When the second page in the old ahead window is
447 * requested, control will return here and more I/O
448 * will be submitted to build the new ahead window.
450 goto out;
452 do_io:
454 * This is the "unusual" path. We come here during
455 * startup or after an lseek. We invalidate the
456 * ahead window and get some I/O underway for the new
457 * current window.
459 if (!first_access) {
460 /* Heuristic: there is a high probability
461 * that around ra->average number of
462 * pages shall be accessed in the next
463 * current window.
465 average = ra->average;
466 if (ra->currnt_wnd_hit > average)
467 average = (ra->currnt_wnd_hit + ra->average + 1) / 2;
469 ra->next_size = min(average , (unsigned long)max);
471 ra->start = offset;
472 ra->size = ra->next_size;
473 ra->ahead_start = 0; /* Invalidate these */
474 ra->ahead_size = 0;
475 actual = do_page_cache_readahead(mapping, filp, offset,
476 ra->size);
477 if(!first_access) {
479 * do not adjust the readahead window size the first
480 * time, the ahead window might get closed if all
481 * the pages are already in the cache.
483 check_ra_success(ra, ra->size, actual, orig_next_size);
485 } else {
487 * This read request is within the current window. It may be
488 * time to submit I/O for the ahead window while the
489 * application is about to step into the ahead window.
491 if (ra->ahead_start == 0) {
493 * If the average io-size is more than maximum
494 * readahead size of the file the io pattern is
495 * sequential. Hence bring in the readahead window
496 * immediately.
497 * If the average io-size is less than maximum
498 * readahead size of the file the io pattern is
499 * random. Hence don't bother to readahead.
501 average = ra->average;
502 if (ra->currnt_wnd_hit > average)
503 average = (ra->currnt_wnd_hit + ra->average + 1) / 2;
505 if (average > max) {
506 ra->ahead_start = ra->start + ra->size;
507 ra->ahead_size = ra->next_size;
508 actual = do_page_cache_readahead(mapping, filp,
509 ra->ahead_start, ra->ahead_size);
510 check_ra_success(ra, ra->ahead_size,
511 actual, orig_next_size);
515 out:
516 return;
521 * handle_ra_miss() is called when it is known that a page which should have
522 * been present in the pagecache (we just did some readahead there) was in fact
523 * not found. This will happen if it was evicted by the VM (readahead
524 * thrashing) or if the readahead window is maximally shrunk.
526 * If the window has been maximally shrunk (next_size == -1UL) then look to see
527 * if we are getting misses against sequential file offsets. If so, and this
528 * persists then resume readahead.
530 * Otherwise we're thrashing, so shrink the readahead window by three pages.
531 * This is because it is grown by two pages on a readahead hit. Theory being
532 * that the readahead window size will stabilise around the maximum level at
533 * which there is no thrashing.
535 void handle_ra_miss(struct address_space *mapping,
536 struct file_ra_state *ra, pgoff_t offset)
538 if (ra->next_size == -1UL) {
539 const unsigned long max = get_max_readahead(ra);
541 if (offset != ra->prev_page + 1) {
542 ra->size = ra->size?ra->size-1:0; /* Not sequential */
543 } else {
544 ra->size++; /* A sequential read */
545 if (ra->size >= max) { /* Resume readahead */
546 ra->start = offset - max;
547 ra->next_size = max;
548 ra->size = max;
549 ra->ahead_start = 0;
550 ra->ahead_size = 0;
551 ra->average = max / 2;
554 ra->prev_page = offset;
555 } else {
556 const unsigned long min = get_min_readahead(ra);
558 ra->next_size -= 3;
559 if (ra->next_size < min)
560 ra->next_size = min;
565 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
566 * sensible upper limit.
568 unsigned long max_sane_readahead(unsigned long nr)
570 unsigned long active;
571 unsigned long inactive;
572 unsigned long free;
574 __get_zone_counts(&active, &inactive, &free, NODE_DATA(numa_node_id()));
575 return min(nr, (inactive + free) / 2);