2 * mm/readahead.c - address_space-level file readahead.
4 * Copyright (C) 2002, Linus Torvalds
6 * 09Apr2002 akpm@zip.com.au
10 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/backing-dev.h>
16 #include <linux/pagevec.h>
18 void default_unplug_io_fn(struct backing_dev_info
*bdi
, struct page
*page
)
21 EXPORT_SYMBOL(default_unplug_io_fn
);
23 struct backing_dev_info default_backing_dev_info
= {
24 .ra_pages
= (VM_MAX_READAHEAD
* 1024) / PAGE_CACHE_SIZE
,
26 .capabilities
= BDI_CAP_MAP_COPY
,
27 .unplug_io_fn
= default_unplug_io_fn
,
29 EXPORT_SYMBOL_GPL(default_backing_dev_info
);
32 * Initialise a struct file's readahead state. Assumes that the caller has
36 file_ra_state_init(struct file_ra_state
*ra
, struct address_space
*mapping
)
38 ra
->ra_pages
= mapping
->backing_dev_info
->ra_pages
;
41 EXPORT_SYMBOL_GPL(file_ra_state_init
);
44 * Return max readahead size for this inode in number-of-pages.
46 static inline unsigned long get_max_readahead(struct file_ra_state
*ra
)
51 static inline unsigned long get_min_readahead(struct file_ra_state
*ra
)
53 return (VM_MIN_READAHEAD
* 1024) / PAGE_CACHE_SIZE
;
56 static inline void reset_ahead_window(struct file_ra_state
*ra
)
59 * ... but preserve ahead_start + ahead_size value,
60 * see 'recheck:' label in page_cache_readahead().
61 * Note: We never use ->ahead_size as rvalue without
62 * checking ->ahead_start != 0 first.
64 ra
->ahead_size
+= ra
->ahead_start
;
68 static inline void ra_off(struct file_ra_state
*ra
)
73 reset_ahead_window(ra
);
78 * Set the initial window size, round to next power of 2 and square
79 * for small size, x 4 for medium, and x 2 for large
80 * for 128k (32 page) max ra
81 * 1-8 page = 32k initial, > 8 page = 128k initial
83 static unsigned long get_init_ra_size(unsigned long size
, unsigned long max
)
85 unsigned long newsize
= roundup_pow_of_two(size
);
87 if (newsize
<= max
/ 32)
88 newsize
= newsize
* 4;
89 else if (newsize
<= max
/ 4)
90 newsize
= newsize
* 2;
97 * Set the new window size, this is called only when I/O is to be submitted,
98 * not for each call to readahead. If a cache miss occured, reduce next I/O
99 * size, else increase depending on how close to max we are.
101 static inline unsigned long get_next_ra_size(struct file_ra_state
*ra
)
103 unsigned long max
= get_max_readahead(ra
);
104 unsigned long min
= get_min_readahead(ra
);
105 unsigned long cur
= ra
->size
;
106 unsigned long newsize
;
108 if (ra
->flags
& RA_FLAG_MISS
) {
109 ra
->flags
&= ~RA_FLAG_MISS
;
110 newsize
= max((cur
- 2), min
);
111 } else if (cur
< max
/ 16) {
116 return min(newsize
, max
);
119 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
122 * read_cache_pages - populate an address space with some pages & start reads against them
123 * @mapping: the address_space
124 * @pages: The address of a list_head which contains the target pages. These
125 * pages have their ->index populated and are otherwise uninitialised.
126 * @filler: callback routine for filling a single page.
127 * @data: private data for the callback routine.
129 * Hides the details of the LRU cache etc from the filesystems.
131 int read_cache_pages(struct address_space
*mapping
, struct list_head
*pages
,
132 int (*filler
)(void *, struct page
*), void *data
)
135 struct pagevec lru_pvec
;
138 pagevec_init(&lru_pvec
, 0);
140 while (!list_empty(pages
)) {
141 page
= list_to_page(pages
);
142 list_del(&page
->lru
);
143 if (add_to_page_cache(page
, mapping
, page
->index
, GFP_KERNEL
)) {
144 page_cache_release(page
);
147 ret
= filler(data
, page
);
148 if (!pagevec_add(&lru_pvec
, page
))
149 __pagevec_lru_add(&lru_pvec
);
151 while (!list_empty(pages
)) {
154 victim
= list_to_page(pages
);
155 list_del(&victim
->lru
);
156 page_cache_release(victim
);
161 pagevec_lru_add(&lru_pvec
);
165 EXPORT_SYMBOL(read_cache_pages
);
167 static int read_pages(struct address_space
*mapping
, struct file
*filp
,
168 struct list_head
*pages
, unsigned nr_pages
)
171 struct pagevec lru_pvec
;
174 if (mapping
->a_ops
->readpages
) {
175 ret
= mapping
->a_ops
->readpages(filp
, mapping
, pages
, nr_pages
);
179 pagevec_init(&lru_pvec
, 0);
180 for (page_idx
= 0; page_idx
< nr_pages
; page_idx
++) {
181 struct page
*page
= list_to_page(pages
);
182 list_del(&page
->lru
);
183 if (!add_to_page_cache(page
, mapping
,
184 page
->index
, GFP_KERNEL
)) {
185 mapping
->a_ops
->readpage(filp
, page
);
186 if (!pagevec_add(&lru_pvec
, page
))
187 __pagevec_lru_add(&lru_pvec
);
189 page_cache_release(page
);
191 pagevec_lru_add(&lru_pvec
);
200 * The fields in struct file_ra_state represent the most-recently-executed
203 * start: Page index at which we started the readahead
204 * size: Number of pages in that read
205 * Together, these form the "current window".
206 * Together, start and size represent the `readahead window'.
207 * prev_page: The page which the readahead algorithm most-recently inspected.
208 * It is mainly used to detect sequential file reading.
209 * If page_cache_readahead sees that it is again being called for
210 * a page which it just looked at, it can return immediately without
211 * making any state changes.
213 * ahead_size: Together, these form the "ahead window".
214 * ra_pages: The externally controlled max readahead for this fd.
216 * When readahead is in the off state (size == 0), readahead is disabled.
217 * In this state, prev_page is used to detect the resumption of sequential I/O.
219 * The readahead code manages two windows - the "current" and the "ahead"
220 * windows. The intent is that while the application is walking the pages
221 * in the current window, I/O is underway on the ahead window. When the
222 * current window is fully traversed, it is replaced by the ahead window
223 * and the ahead window is invalidated. When this copying happens, the
224 * new current window's pages are probably still locked. So
225 * we submit a new batch of I/O immediately, creating a new ahead window.
229 * ----|----------------|----------------|-----
231 * ^ahead_start ^ahead_start+ahead_size
233 * ^ When this page is read, we submit I/O for the
236 * A `readahead hit' occurs when a read request is made against a page which is
237 * the next sequential page. Ahead window calculations are done only when it
238 * is time to submit a new IO. The code ramps up the size agressively at first,
239 * but slow down as it approaches max_readhead.
241 * Any seek/ramdom IO will result in readahead being turned off. It will resume
242 * at the first sequential access.
244 * There is a special-case: if the first page which the application tries to
245 * read happens to be the first page of the file, it is assumed that a linear
246 * read is about to happen and the window is immediately set to the initial size
247 * based on I/O request size and the max_readahead.
249 * This function is to be called for every read request, rather than when
250 * it is time to perform readahead. It is called only once for the entire I/O
251 * regardless of size unless readahead is unable to start enough I/O to satisfy
252 * the request (I/O request > max_readahead).
256 * do_page_cache_readahead actually reads a chunk of disk. It allocates all
257 * the pages first, then submits them all for I/O. This avoids the very bad
258 * behaviour which would occur if page allocations are causing VM writeback.
259 * We really don't want to intermingle reads and writes like that.
261 * Returns the number of pages requested, or the maximum amount of I/O allowed.
263 * do_page_cache_readahead() returns -1 if it encountered request queue
267 __do_page_cache_readahead(struct address_space
*mapping
, struct file
*filp
,
268 pgoff_t offset
, unsigned long nr_to_read
)
270 struct inode
*inode
= mapping
->host
;
272 unsigned long end_index
; /* The last page we want to read */
273 LIST_HEAD(page_pool
);
276 loff_t isize
= i_size_read(inode
);
281 end_index
= ((isize
- 1) >> PAGE_CACHE_SHIFT
);
284 * Preallocate as many pages as we will need.
286 read_lock_irq(&mapping
->tree_lock
);
287 for (page_idx
= 0; page_idx
< nr_to_read
; page_idx
++) {
288 pgoff_t page_offset
= offset
+ page_idx
;
290 if (page_offset
> end_index
)
293 page
= radix_tree_lookup(&mapping
->page_tree
, page_offset
);
297 read_unlock_irq(&mapping
->tree_lock
);
298 page
= page_cache_alloc_cold(mapping
);
299 read_lock_irq(&mapping
->tree_lock
);
302 page
->index
= page_offset
;
303 list_add(&page
->lru
, &page_pool
);
306 read_unlock_irq(&mapping
->tree_lock
);
309 * Now start the IO. We ignore I/O errors - if the page is not
310 * uptodate then the caller will launch readpage again, and
311 * will then handle the error.
314 read_pages(mapping
, filp
, &page_pool
, ret
);
315 BUG_ON(!list_empty(&page_pool
));
321 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
324 int force_page_cache_readahead(struct address_space
*mapping
, struct file
*filp
,
325 pgoff_t offset
, unsigned long nr_to_read
)
329 if (unlikely(!mapping
->a_ops
->readpage
&& !mapping
->a_ops
->readpages
))
335 unsigned long this_chunk
= (2 * 1024 * 1024) / PAGE_CACHE_SIZE
;
337 if (this_chunk
> nr_to_read
)
338 this_chunk
= nr_to_read
;
339 err
= __do_page_cache_readahead(mapping
, filp
,
346 offset
+= this_chunk
;
347 nr_to_read
-= this_chunk
;
353 * Check how effective readahead is being. If the amount of started IO is
354 * less than expected then the file is partly or fully in pagecache and
355 * readahead isn't helping.
358 static inline int check_ra_success(struct file_ra_state
*ra
,
359 unsigned long nr_to_read
, unsigned long actual
)
362 ra
->cache_hit
+= nr_to_read
;
363 if (ra
->cache_hit
>= VM_MAX_CACHE_HIT
) {
365 ra
->flags
|= RA_FLAG_INCACHE
;
375 * This version skips the IO if the queue is read-congested, and will tell the
376 * block layer to abandon the readahead if request allocation would block.
378 * force_page_cache_readahead() will ignore queue congestion and will block on
381 int do_page_cache_readahead(struct address_space
*mapping
, struct file
*filp
,
382 pgoff_t offset
, unsigned long nr_to_read
)
384 if (bdi_read_congested(mapping
->backing_dev_info
))
387 return __do_page_cache_readahead(mapping
, filp
, offset
, nr_to_read
);
391 * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block'
392 * is set wait till the read completes. Otherwise attempt to read without
394 * Returns 1 meaning 'success' if read is successful without switching off
395 * readahead mode. Otherwise return failure.
398 blockable_page_cache_readahead(struct address_space
*mapping
, struct file
*filp
,
399 pgoff_t offset
, unsigned long nr_to_read
,
400 struct file_ra_state
*ra
, int block
)
404 if (!block
&& bdi_read_congested(mapping
->backing_dev_info
))
407 actual
= __do_page_cache_readahead(mapping
, filp
, offset
, nr_to_read
);
409 return check_ra_success(ra
, nr_to_read
, actual
);
412 static int make_ahead_window(struct address_space
*mapping
, struct file
*filp
,
413 struct file_ra_state
*ra
, int force
)
417 ra
->ahead_size
= get_next_ra_size(ra
);
418 ra
->ahead_start
= ra
->start
+ ra
->size
;
420 block
= force
|| (ra
->prev_page
>= ra
->ahead_start
);
421 ret
= blockable_page_cache_readahead(mapping
, filp
,
422 ra
->ahead_start
, ra
->ahead_size
, ra
, block
);
424 if (!ret
&& !force
) {
425 /* A read failure in blocking mode, implies pages are
426 * all cached. So we can safely assume we have taken
427 * care of all the pages requested in this call.
428 * A read failure in non-blocking mode, implies we are
429 * reading more pages than requested in this call. So
430 * we safely assume we have taken care of all the pages
431 * requested in this call.
433 * Just reset the ahead window in case we failed due to
434 * congestion. The ahead window will any way be closed
435 * in case we failed due to excessive page cache hits.
437 reset_ahead_window(ra
);
444 * page_cache_readahead - generic adaptive readahead
445 * @mapping: address_space which holds the pagecache and I/O vectors
446 * @ra: file_ra_state which holds the readahead state
447 * @filp: passed on to ->readpage() and ->readpages()
448 * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units
449 * @req_size: hint: total size of the read which the caller is performing in
450 * PAGE_CACHE_SIZE units
452 * page_cache_readahead() is the main function. If performs the adaptive
453 * readahead window size management and submits the readahead I/O.
455 * Note that @filp is purely used for passing on to the ->readpage[s]()
456 * handler: it may refer to a different file from @mapping (so we may not use
457 * @filp->f_mapping or @filp->f_dentry->d_inode here).
458 * Also, @ra may not be equal to &@filp->f_ra.
462 page_cache_readahead(struct address_space
*mapping
, struct file_ra_state
*ra
,
463 struct file
*filp
, pgoff_t offset
, unsigned long req_size
)
465 unsigned long max
, newsize
;
469 * We avoid doing extra work and bogusly perturbing the readahead
470 * window expansion logic.
472 if (offset
== ra
->prev_page
&& --req_size
)
475 /* Note that prev_page == -1 if it is a first read */
476 sequential
= (offset
== ra
->prev_page
+ 1);
477 ra
->prev_page
= offset
;
479 max
= get_max_readahead(ra
);
480 newsize
= min(req_size
, max
);
482 /* No readahead or sub-page sized read or file already in cache */
483 if (newsize
== 0 || (ra
->flags
& RA_FLAG_INCACHE
))
486 ra
->prev_page
+= newsize
- 1;
489 * Special case - first read at start of file. We'll assume it's
490 * a whole-file read and grow the window fast. Or detect first
493 if (sequential
&& ra
->size
== 0) {
494 ra
->size
= get_init_ra_size(newsize
, max
);
496 if (!blockable_page_cache_readahead(mapping
, filp
, offset
,
501 * If the request size is larger than our max readahead, we
502 * at least want to be sure that we get 2 IOs in flight and
503 * we know that we will definitly need the new I/O.
504 * once we do this, subsequent calls should be able to overlap
505 * IOs,* thus preventing stalls. so issue the ahead window
509 make_ahead_window(mapping
, filp
, ra
, 1);
515 * Now handle the random case:
516 * partial page reads and first access were handled above,
517 * so this must be the next page otherwise it is random
521 blockable_page_cache_readahead(mapping
, filp
, offset
,
527 * If we get here we are doing sequential IO and this was not the first
528 * occurence (ie we have an existing window)
530 if (ra
->ahead_start
== 0) { /* no ahead window yet */
531 if (!make_ahead_window(mapping
, filp
, ra
, 0))
536 * Already have an ahead window, check if we crossed into it.
537 * If so, shift windows and issue a new ahead window.
538 * Only return the #pages that are in the current window, so that
539 * we get called back on the first page of the ahead window which
540 * will allow us to submit more IO.
542 if (ra
->prev_page
>= ra
->ahead_start
) {
543 ra
->start
= ra
->ahead_start
;
544 ra
->size
= ra
->ahead_size
;
545 make_ahead_window(mapping
, filp
, ra
, 0);
547 /* prev_page shouldn't overrun the ahead window */
548 ra
->prev_page
= min(ra
->prev_page
,
549 ra
->ahead_start
+ ra
->ahead_size
- 1);
553 return ra
->prev_page
+ 1;
555 EXPORT_SYMBOL_GPL(page_cache_readahead
);
558 * handle_ra_miss() is called when it is known that a page which should have
559 * been present in the pagecache (we just did some readahead there) was in fact
560 * not found. This will happen if it was evicted by the VM (readahead
563 * Turn on the cache miss flag in the RA struct, this will cause the RA code
564 * to reduce the RA size on the next read.
566 void handle_ra_miss(struct address_space
*mapping
,
567 struct file_ra_state
*ra
, pgoff_t offset
)
569 ra
->flags
|= RA_FLAG_MISS
;
570 ra
->flags
&= ~RA_FLAG_INCACHE
;
575 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
576 * sensible upper limit.
578 unsigned long max_sane_readahead(unsigned long nr
)
580 unsigned long active
;
581 unsigned long inactive
;
584 __get_zone_counts(&active
, &inactive
, &free
, NODE_DATA(numa_node_id()));
585 return min(nr
, (inactive
+ free
) / 2);