r8169: merge with version 8.001.00 of Realtek's r8168 driver
[linux-2.6/sactl.git] / mm / readahead.c
blob9861e883fe57e069d43492ebdbd7d7671464f371
1 /*
2 * mm/readahead.c - address_space-level file readahead.
4 * Copyright (C) 2002, Linus Torvalds
6 * 09Apr2002 akpm@zip.com.au
7 * Initial version.
8 */
10 #include <linux/kernel.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/backing-dev.h>
16 #include <linux/task_io_accounting_ops.h>
17 #include <linux/pagevec.h>
19 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
22 EXPORT_SYMBOL(default_unplug_io_fn);
24 struct backing_dev_info default_backing_dev_info = {
25 .ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE,
26 .state = 0,
27 .capabilities = BDI_CAP_MAP_COPY,
28 .unplug_io_fn = default_unplug_io_fn,
30 EXPORT_SYMBOL_GPL(default_backing_dev_info);
33 * Initialise a struct file's readahead state. Assumes that the caller has
34 * memset *ra to zero.
36 void
37 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
39 ra->ra_pages = mapping->backing_dev_info->ra_pages;
40 ra->prev_index = -1;
42 EXPORT_SYMBOL_GPL(file_ra_state_init);
45 * Return max readahead size for this inode in number-of-pages.
47 static inline unsigned long get_max_readahead(struct file_ra_state *ra)
49 return ra->ra_pages;
52 static inline unsigned long get_min_readahead(struct file_ra_state *ra)
54 return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE;
57 static inline void reset_ahead_window(struct file_ra_state *ra)
60 * ... but preserve ahead_start + ahead_size value,
61 * see 'recheck:' label in page_cache_readahead().
62 * Note: We never use ->ahead_size as rvalue without
63 * checking ->ahead_start != 0 first.
65 ra->ahead_size += ra->ahead_start;
66 ra->ahead_start = 0;
69 static inline void ra_off(struct file_ra_state *ra)
71 ra->start = 0;
72 ra->flags = 0;
73 ra->size = 0;
74 reset_ahead_window(ra);
75 return;
79 * Set the initial window size, round to next power of 2 and square
80 * for small size, x 4 for medium, and x 2 for large
81 * for 128k (32 page) max ra
82 * 1-8 page = 32k initial, > 8 page = 128k initial
84 static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
86 unsigned long newsize = roundup_pow_of_two(size);
88 if (newsize <= max / 32)
89 newsize = newsize * 4;
90 else if (newsize <= max / 4)
91 newsize = newsize * 2;
92 else
93 newsize = max;
94 return newsize;
98 * Set the new window size, this is called only when I/O is to be submitted,
99 * not for each call to readahead. If a cache miss occured, reduce next I/O
100 * size, else increase depending on how close to max we are.
102 static inline unsigned long get_next_ra_size(struct file_ra_state *ra)
104 unsigned long max = get_max_readahead(ra);
105 unsigned long min = get_min_readahead(ra);
106 unsigned long cur = ra->size;
107 unsigned long newsize;
109 if (ra->flags & RA_FLAG_MISS) {
110 ra->flags &= ~RA_FLAG_MISS;
111 newsize = max((cur - 2), min);
112 } else if (cur < max / 16) {
113 newsize = 4 * cur;
114 } else {
115 newsize = 2 * cur;
117 return min(newsize, max);
120 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
123 * read_cache_pages - populate an address space with some pages & start reads against them
124 * @mapping: the address_space
125 * @pages: The address of a list_head which contains the target pages. These
126 * pages have their ->index populated and are otherwise uninitialised.
127 * @filler: callback routine for filling a single page.
128 * @data: private data for the callback routine.
130 * Hides the details of the LRU cache etc from the filesystems.
132 int read_cache_pages(struct address_space *mapping, struct list_head *pages,
133 int (*filler)(void *, struct page *), void *data)
135 struct page *page;
136 struct pagevec lru_pvec;
137 int ret = 0;
139 pagevec_init(&lru_pvec, 0);
141 while (!list_empty(pages)) {
142 page = list_to_page(pages);
143 list_del(&page->lru);
144 if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
145 page_cache_release(page);
146 continue;
148 ret = filler(data, page);
149 if (!pagevec_add(&lru_pvec, page))
150 __pagevec_lru_add(&lru_pvec);
151 if (ret) {
152 put_pages_list(pages);
153 break;
155 task_io_account_read(PAGE_CACHE_SIZE);
157 pagevec_lru_add(&lru_pvec);
158 return ret;
161 EXPORT_SYMBOL(read_cache_pages);
163 static int read_pages(struct address_space *mapping, struct file *filp,
164 struct list_head *pages, unsigned nr_pages)
166 unsigned page_idx;
167 struct pagevec lru_pvec;
168 int ret;
170 if (mapping->a_ops->readpages) {
171 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
172 /* Clean up the remaining pages */
173 put_pages_list(pages);
174 goto out;
177 pagevec_init(&lru_pvec, 0);
178 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
179 struct page *page = list_to_page(pages);
180 list_del(&page->lru);
181 if (!add_to_page_cache(page, mapping,
182 page->index, GFP_KERNEL)) {
183 mapping->a_ops->readpage(filp, page);
184 if (!pagevec_add(&lru_pvec, page))
185 __pagevec_lru_add(&lru_pvec);
186 } else
187 page_cache_release(page);
189 pagevec_lru_add(&lru_pvec);
190 ret = 0;
191 out:
192 return ret;
196 * Readahead design.
198 * The fields in struct file_ra_state represent the most-recently-executed
199 * readahead attempt:
201 * start: Page index at which we started the readahead
202 * size: Number of pages in that read
203 * Together, these form the "current window".
204 * Together, start and size represent the `readahead window'.
205 * prev_index: The page which the readahead algorithm most-recently inspected.
206 * It is mainly used to detect sequential file reading.
207 * If page_cache_readahead sees that it is again being called for
208 * a page which it just looked at, it can return immediately without
209 * making any state changes.
210 * offset: Offset in the prev_index where the last read ended - used for
211 * detection of sequential file reading.
212 * ahead_start,
213 * ahead_size: Together, these form the "ahead window".
214 * ra_pages: The externally controlled max readahead for this fd.
216 * When readahead is in the off state (size == 0), readahead is disabled.
217 * In this state, prev_index is used to detect the resumption of sequential I/O.
219 * The readahead code manages two windows - the "current" and the "ahead"
220 * windows. The intent is that while the application is walking the pages
221 * in the current window, I/O is underway on the ahead window. When the
222 * current window is fully traversed, it is replaced by the ahead window
223 * and the ahead window is invalidated. When this copying happens, the
224 * new current window's pages are probably still locked. So
225 * we submit a new batch of I/O immediately, creating a new ahead window.
227 * So:
229 * ----|----------------|----------------|-----
230 * ^start ^start+size
231 * ^ahead_start ^ahead_start+ahead_size
233 * ^ When this page is read, we submit I/O for the
234 * ahead window.
236 * A `readahead hit' occurs when a read request is made against a page which is
237 * the next sequential page. Ahead window calculations are done only when it
238 * is time to submit a new IO. The code ramps up the size agressively at first,
239 * but slow down as it approaches max_readhead.
241 * Any seek/ramdom IO will result in readahead being turned off. It will resume
242 * at the first sequential access.
244 * There is a special-case: if the first page which the application tries to
245 * read happens to be the first page of the file, it is assumed that a linear
246 * read is about to happen and the window is immediately set to the initial size
247 * based on I/O request size and the max_readahead.
249 * This function is to be called for every read request, rather than when
250 * it is time to perform readahead. It is called only once for the entire I/O
251 * regardless of size unless readahead is unable to start enough I/O to satisfy
252 * the request (I/O request > max_readahead).
256 * do_page_cache_readahead actually reads a chunk of disk. It allocates all
257 * the pages first, then submits them all for I/O. This avoids the very bad
258 * behaviour which would occur if page allocations are causing VM writeback.
259 * We really don't want to intermingle reads and writes like that.
261 * Returns the number of pages requested, or the maximum amount of I/O allowed.
263 * do_page_cache_readahead() returns -1 if it encountered request queue
264 * congestion.
266 static int
267 __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
268 pgoff_t offset, unsigned long nr_to_read)
270 struct inode *inode = mapping->host;
271 struct page *page;
272 unsigned long end_index; /* The last page we want to read */
273 LIST_HEAD(page_pool);
274 int page_idx;
275 int ret = 0;
276 loff_t isize = i_size_read(inode);
278 if (isize == 0)
279 goto out;
281 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
284 * Preallocate as many pages as we will need.
286 read_lock_irq(&mapping->tree_lock);
287 for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
288 pgoff_t page_offset = offset + page_idx;
290 if (page_offset > end_index)
291 break;
293 page = radix_tree_lookup(&mapping->page_tree, page_offset);
294 if (page)
295 continue;
297 read_unlock_irq(&mapping->tree_lock);
298 page = page_cache_alloc_cold(mapping);
299 read_lock_irq(&mapping->tree_lock);
300 if (!page)
301 break;
302 page->index = page_offset;
303 list_add(&page->lru, &page_pool);
304 ret++;
306 read_unlock_irq(&mapping->tree_lock);
309 * Now start the IO. We ignore I/O errors - if the page is not
310 * uptodate then the caller will launch readpage again, and
311 * will then handle the error.
313 if (ret)
314 read_pages(mapping, filp, &page_pool, ret);
315 BUG_ON(!list_empty(&page_pool));
316 out:
317 return ret;
321 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
322 * memory at once.
324 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
325 pgoff_t offset, unsigned long nr_to_read)
327 int ret = 0;
329 if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
330 return -EINVAL;
332 while (nr_to_read) {
333 int err;
335 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
337 if (this_chunk > nr_to_read)
338 this_chunk = nr_to_read;
339 err = __do_page_cache_readahead(mapping, filp,
340 offset, this_chunk);
341 if (err < 0) {
342 ret = err;
343 break;
345 ret += err;
346 offset += this_chunk;
347 nr_to_read -= this_chunk;
349 return ret;
353 * Check how effective readahead is being. If the amount of started IO is
354 * less than expected then the file is partly or fully in pagecache and
355 * readahead isn't helping.
358 static inline int check_ra_success(struct file_ra_state *ra,
359 unsigned long nr_to_read, unsigned long actual)
361 if (actual == 0) {
362 ra->cache_hit += nr_to_read;
363 if (ra->cache_hit >= VM_MAX_CACHE_HIT) {
364 ra_off(ra);
365 ra->flags |= RA_FLAG_INCACHE;
366 return 0;
368 } else {
369 ra->cache_hit=0;
371 return 1;
375 * This version skips the IO if the queue is read-congested, and will tell the
376 * block layer to abandon the readahead if request allocation would block.
378 * force_page_cache_readahead() will ignore queue congestion and will block on
379 * request queues.
381 int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
382 pgoff_t offset, unsigned long nr_to_read)
384 if (bdi_read_congested(mapping->backing_dev_info))
385 return -1;
387 return __do_page_cache_readahead(mapping, filp, offset, nr_to_read);
391 * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block'
392 * is set wait till the read completes. Otherwise attempt to read without
393 * blocking.
394 * Returns 1 meaning 'success' if read is successful without switching off
395 * readahead mode. Otherwise return failure.
397 static int
398 blockable_page_cache_readahead(struct address_space *mapping, struct file *filp,
399 pgoff_t offset, unsigned long nr_to_read,
400 struct file_ra_state *ra, int block)
402 int actual;
404 if (!block && bdi_read_congested(mapping->backing_dev_info))
405 return 0;
407 actual = __do_page_cache_readahead(mapping, filp, offset, nr_to_read);
409 return check_ra_success(ra, nr_to_read, actual);
412 static int make_ahead_window(struct address_space *mapping, struct file *filp,
413 struct file_ra_state *ra, int force)
415 int block, ret;
417 ra->ahead_size = get_next_ra_size(ra);
418 ra->ahead_start = ra->start + ra->size;
420 block = force || (ra->prev_index >= ra->ahead_start);
421 ret = blockable_page_cache_readahead(mapping, filp,
422 ra->ahead_start, ra->ahead_size, ra, block);
424 if (!ret && !force) {
425 /* A read failure in blocking mode, implies pages are
426 * all cached. So we can safely assume we have taken
427 * care of all the pages requested in this call.
428 * A read failure in non-blocking mode, implies we are
429 * reading more pages than requested in this call. So
430 * we safely assume we have taken care of all the pages
431 * requested in this call.
433 * Just reset the ahead window in case we failed due to
434 * congestion. The ahead window will any way be closed
435 * in case we failed due to excessive page cache hits.
437 reset_ahead_window(ra);
440 return ret;
444 * page_cache_readahead - generic adaptive readahead
445 * @mapping: address_space which holds the pagecache and I/O vectors
446 * @ra: file_ra_state which holds the readahead state
447 * @filp: passed on to ->readpage() and ->readpages()
448 * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units
449 * @req_size: hint: total size of the read which the caller is performing in
450 * PAGE_CACHE_SIZE units
452 * page_cache_readahead() is the main function. If performs the adaptive
453 * readahead window size management and submits the readahead I/O.
455 * Note that @filp is purely used for passing on to the ->readpage[s]()
456 * handler: it may refer to a different file from @mapping (so we may not use
457 * @filp->f_mapping or @filp->f_path.dentry->d_inode here).
458 * Also, @ra may not be equal to &@filp->f_ra.
461 unsigned long
462 page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra,
463 struct file *filp, pgoff_t offset, unsigned long req_size)
465 unsigned long max, newsize;
466 int sequential;
469 * We avoid doing extra work and bogusly perturbing the readahead
470 * window expansion logic.
472 if (offset == ra->prev_index && --req_size)
473 ++offset;
475 /* Note that prev_index == -1 if it is a first read */
476 sequential = (offset == ra->prev_index + 1);
477 ra->prev_index = offset;
478 ra->prev_offset = 0;
480 max = get_max_readahead(ra);
481 newsize = min(req_size, max);
483 /* No readahead or sub-page sized read or file already in cache */
484 if (newsize == 0 || (ra->flags & RA_FLAG_INCACHE))
485 goto out;
487 ra->prev_index += newsize - 1;
490 * Special case - first read at start of file. We'll assume it's
491 * a whole-file read and grow the window fast. Or detect first
492 * sequential access
494 if (sequential && ra->size == 0) {
495 ra->size = get_init_ra_size(newsize, max);
496 ra->start = offset;
497 if (!blockable_page_cache_readahead(mapping, filp, offset,
498 ra->size, ra, 1))
499 goto out;
502 * If the request size is larger than our max readahead, we
503 * at least want to be sure that we get 2 IOs in flight and
504 * we know that we will definitly need the new I/O.
505 * once we do this, subsequent calls should be able to overlap
506 * IOs,* thus preventing stalls. so issue the ahead window
507 * immediately.
509 if (req_size >= max)
510 make_ahead_window(mapping, filp, ra, 1);
512 goto out;
516 * Now handle the random case:
517 * partial page reads and first access were handled above,
518 * so this must be the next page otherwise it is random
520 if (!sequential) {
521 ra_off(ra);
522 blockable_page_cache_readahead(mapping, filp, offset,
523 newsize, ra, 1);
524 goto out;
528 * If we get here we are doing sequential IO and this was not the first
529 * occurence (ie we have an existing window)
531 if (ra->ahead_start == 0) { /* no ahead window yet */
532 if (!make_ahead_window(mapping, filp, ra, 0))
533 goto recheck;
537 * Already have an ahead window, check if we crossed into it.
538 * If so, shift windows and issue a new ahead window.
539 * Only return the #pages that are in the current window, so that
540 * we get called back on the first page of the ahead window which
541 * will allow us to submit more IO.
543 if (ra->prev_index >= ra->ahead_start) {
544 ra->start = ra->ahead_start;
545 ra->size = ra->ahead_size;
546 make_ahead_window(mapping, filp, ra, 0);
547 recheck:
548 /* prev_index shouldn't overrun the ahead window */
549 ra->prev_index = min(ra->prev_index,
550 ra->ahead_start + ra->ahead_size - 1);
553 out:
554 return ra->prev_index + 1;
556 EXPORT_SYMBOL_GPL(page_cache_readahead);
559 * handle_ra_miss() is called when it is known that a page which should have
560 * been present in the pagecache (we just did some readahead there) was in fact
561 * not found. This will happen if it was evicted by the VM (readahead
562 * thrashing)
564 * Turn on the cache miss flag in the RA struct, this will cause the RA code
565 * to reduce the RA size on the next read.
567 void handle_ra_miss(struct address_space *mapping,
568 struct file_ra_state *ra, pgoff_t offset)
570 ra->flags |= RA_FLAG_MISS;
571 ra->flags &= ~RA_FLAG_INCACHE;
572 ra->cache_hit = 0;
576 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
577 * sensible upper limit.
579 unsigned long max_sane_readahead(unsigned long nr)
581 return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE)
582 + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);