4 * Copyright (C) 2002, Linus Torvalds.
6 * Contains functions related to writing back dirty pages at the
9 * 10Apr2002 akpm@zip.com.au
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/spinlock.h>
18 #include <linux/swap.h>
19 #include <linux/slab.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/init.h>
23 #include <linux/backing-dev.h>
24 #include <linux/blkdev.h>
25 #include <linux/mpage.h>
26 #include <linux/percpu.h>
27 #include <linux/notifier.h>
28 #include <linux/smp.h>
29 #include <linux/sysctl.h>
30 #include <linux/cpu.h>
33 * The maximum number of pages to writeout in a single bdflush/kupdate
34 * operation. We do this so we don't hold I_LOCK against an inode for
35 * enormous amounts of time, which would block a userspace task which has
36 * been forced to throttle against that inode. Also, the code reevaluates
37 * the dirty each time it has written this many pages.
39 #define MAX_WRITEBACK_PAGES 1024
42 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
43 * will look to see if it needs to force writeback or throttling.
45 static long ratelimit_pages
= 32;
47 static long total_pages
; /* The total number of pages in the machine. */
48 static int dirty_exceeded
; /* Dirty mem may be over limit */
51 * When balance_dirty_pages decides that the caller needs to perform some
52 * non-background writeback, this is how many pages it will attempt to write.
53 * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
54 * large amounts of I/O are submitted.
56 static inline long sync_writeback_pages(void)
58 return ratelimit_pages
+ ratelimit_pages
/ 2;
61 /* The following parameters are exported via /proc/sys/vm */
64 * Start background writeback (via pdflush) at this percentage
66 int dirty_background_ratio
= 10;
69 * The generator of dirty data starts writeback at this percentage
71 int vm_dirty_ratio
= 40;
74 * The interval between `kupdate'-style writebacks, in centiseconds
75 * (hundredths of a second)
77 int dirty_writeback_centisecs
= 5 * 100;
80 * The longest number of centiseconds for which data is allowed to remain dirty
82 int dirty_expire_centisecs
= 30 * 100;
84 /* End of sysctl-exported parameters */
87 static void background_writeout(unsigned long _min_pages
);
90 * Work out the current dirty-memory clamping and background writeout
93 * The main aim here is to lower them aggressively if there is a lot of mapped
94 * memory around. To avoid stressing page reclaim with lots of unreclaimable
95 * pages. It is better to clamp down on writers than to start swapping, and
96 * performing lots of scanning.
98 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
100 * We don't permit the clamping level to fall below 5% - that is getting rather
103 * We make sure that the background writeout level is below the adjusted
107 get_dirty_limits(struct page_state
*ps
, long *pbackground
, long *pdirty
)
109 int background_ratio
; /* Percentages */
117 unmapped_ratio
= 100 - (ps
->nr_mapped
* 100) / total_pages
;
119 dirty_ratio
= vm_dirty_ratio
;
120 if (dirty_ratio
> unmapped_ratio
/ 2)
121 dirty_ratio
= unmapped_ratio
/ 2;
126 background_ratio
= dirty_background_ratio
;
127 if (background_ratio
>= dirty_ratio
)
128 background_ratio
= dirty_ratio
/ 2;
130 background
= (background_ratio
* total_pages
) / 100;
131 dirty
= (dirty_ratio
* total_pages
) / 100;
132 if (current
->flags
& PF_LESS_THROTTLE
) {
133 background
+= background
/ 4;
136 *pbackground
= background
;
141 * balance_dirty_pages() must be called by processes which are generating dirty
142 * data. It looks at the number of dirty pages in the machine and will force
143 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
144 * If we're over `background_thresh' then pdflush is woken to perform some
147 void balance_dirty_pages(struct address_space
*mapping
)
149 struct page_state ps
;
151 long background_thresh
;
153 unsigned long pages_written
= 0;
154 unsigned long write_chunk
= sync_writeback_pages();
156 struct backing_dev_info
*bdi
= mapping
->backing_dev_info
;
159 struct writeback_control wbc
= {
161 .sync_mode
= WB_SYNC_NONE
,
162 .older_than_this
= NULL
,
163 .nr_to_write
= write_chunk
,
166 get_dirty_limits(&ps
, &background_thresh
, &dirty_thresh
);
167 nr_reclaimable
= ps
.nr_dirty
+ ps
.nr_unstable
;
168 if (nr_reclaimable
+ ps
.nr_writeback
<= dirty_thresh
)
173 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
174 * Unstable writes are a feature of certain networked
175 * filesystems (i.e. NFS) in which data may have been
176 * written to the server's write cache, but has not yet
177 * been flushed to permanent storage.
179 if (nr_reclaimable
) {
180 writeback_inodes(&wbc
);
181 get_dirty_limits(&ps
, &background_thresh
,
183 nr_reclaimable
= ps
.nr_dirty
+ ps
.nr_unstable
;
184 if (nr_reclaimable
+ ps
.nr_writeback
<= dirty_thresh
)
186 pages_written
+= write_chunk
- wbc
.nr_to_write
;
187 if (pages_written
>= write_chunk
)
188 break; /* We've done our duty */
190 blk_congestion_wait(WRITE
, HZ
/10);
193 if (nr_reclaimable
+ ps
.nr_writeback
<= dirty_thresh
)
196 if (!writeback_in_progress(bdi
) && nr_reclaimable
> background_thresh
)
197 pdflush_operation(background_writeout
, 0);
201 * balance_dirty_pages_ratelimited - balance dirty memory state
202 * @mapping - address_space which was dirtied
204 * Processes which are dirtying memory should call in here once for each page
205 * which was newly dirtied. The function will periodically check the system's
206 * dirty state and will initiate writeback if needed.
208 * On really big machines, get_page_state is expensive, so try to avoid calling
209 * it too often (ratelimiting). But once we're over the dirty memory limit we
210 * decrease the ratelimiting by a lot, to prevent individual processes from
211 * overshooting the limit by (ratelimit_pages) each.
213 void balance_dirty_pages_ratelimited(struct address_space
*mapping
)
215 static DEFINE_PER_CPU(int, ratelimits
) = 0;
218 ratelimit
= ratelimit_pages
;
222 if (get_cpu_var(ratelimits
)++ >= ratelimit
) {
223 __get_cpu_var(ratelimits
) = 0;
224 put_cpu_var(ratelimits
);
225 balance_dirty_pages(mapping
);
228 put_cpu_var(ratelimits
);
232 * writeback at least _min_pages, and keep writing until the amount of dirty
233 * memory is less than the background threshold, or until we're all clean.
235 static void background_writeout(unsigned long _min_pages
)
237 long min_pages
= _min_pages
;
238 struct writeback_control wbc
= {
240 .sync_mode
= WB_SYNC_NONE
,
241 .older_than_this
= NULL
,
247 struct page_state ps
;
248 long background_thresh
;
251 get_dirty_limits(&ps
, &background_thresh
, &dirty_thresh
);
252 if (ps
.nr_dirty
+ ps
.nr_unstable
< background_thresh
255 wbc
.encountered_congestion
= 0;
256 wbc
.nr_to_write
= MAX_WRITEBACK_PAGES
;
257 writeback_inodes(&wbc
);
258 min_pages
-= MAX_WRITEBACK_PAGES
- wbc
.nr_to_write
;
259 if (wbc
.nr_to_write
> 0) {
260 /* Wrote less than expected */
261 if (wbc
.encountered_congestion
)
262 blk_congestion_wait(WRITE
, HZ
/10);
270 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
271 * the whole world. Returns 0 if a pdflush thread was dispatched. Returns
272 * -1 if all pdflush threads were busy.
274 int wakeup_bdflush(long nr_pages
)
277 struct page_state ps
;
280 nr_pages
= ps
.nr_dirty
+ ps
.nr_unstable
;
282 return pdflush_operation(background_writeout
, nr_pages
);
285 static struct timer_list wb_timer
;
288 * Periodic writeback of "old" data.
290 * Define "old": the first time one of an inode's pages is dirtied, we mark the
291 * dirtying-time in the inode's address_space. So this periodic writeback code
292 * just walks the superblock inode list, writing back any inodes which are
293 * older than a specific point in time.
295 * Try to run once per dirty_writeback_centisecs. But if a writeback event
296 * takes longer than a dirty_writeback_centisecs interval, then leave a
299 * older_than_this takes precedence over nr_to_write. So we'll only write back
300 * all dirty pages if they are all attached to "old" mappings.
302 static void wb_kupdate(unsigned long arg
)
304 unsigned long oldest_jif
;
305 unsigned long start_jif
;
306 unsigned long next_jif
;
308 struct page_state ps
;
309 struct writeback_control wbc
= {
311 .sync_mode
= WB_SYNC_NONE
,
312 .older_than_this
= &oldest_jif
,
321 oldest_jif
= jiffies
- (dirty_expire_centisecs
* HZ
) / 100;
323 next_jif
= start_jif
+ (dirty_writeback_centisecs
* HZ
) / 100;
324 nr_to_write
= ps
.nr_dirty
+ ps
.nr_unstable
+
325 (inodes_stat
.nr_inodes
- inodes_stat
.nr_unused
);
326 while (nr_to_write
> 0) {
327 wbc
.encountered_congestion
= 0;
328 wbc
.nr_to_write
= MAX_WRITEBACK_PAGES
;
329 writeback_inodes(&wbc
);
330 if (wbc
.nr_to_write
> 0) {
331 if (wbc
.encountered_congestion
)
332 blk_congestion_wait(WRITE
, HZ
/10);
334 break; /* All the old data is written */
336 nr_to_write
-= MAX_WRITEBACK_PAGES
- wbc
.nr_to_write
;
338 if (time_before(next_jif
, jiffies
+ HZ
))
339 next_jif
= jiffies
+ HZ
;
340 if (dirty_writeback_centisecs
)
341 mod_timer(&wb_timer
, next_jif
);
345 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
347 int dirty_writeback_centisecs_handler(ctl_table
*table
, int write
,
348 struct file
*file
, void *buffer
, size_t *length
)
350 proc_dointvec(table
, write
, file
, buffer
, length
);
351 if (dirty_writeback_centisecs
) {
353 jiffies
+ (dirty_writeback_centisecs
* HZ
) / 100);
355 del_timer(&wb_timer
);
360 static void wb_timer_fn(unsigned long unused
)
362 if (pdflush_operation(wb_kupdate
, 0) < 0)
363 mod_timer(&wb_timer
, jiffies
+ HZ
); /* delay 1 second */
368 * If ratelimit_pages is too high then we can get into dirty-data overload
369 * if a large number of processes all perform writes at the same time.
370 * If it is too low then SMP machines will call the (expensive) get_page_state
373 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
374 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
375 * thresholds before writeback cuts in.
377 * But the limit should not be set too high. Because it also controls the
378 * amount of memory which the balance_dirty_pages() caller has to write back.
379 * If this is too large then the caller will block on the IO queue all the
380 * time. So limit it to four megabytes - the balance_dirty_pages() caller
381 * will write six megabyte chunks, max.
384 static void set_ratelimit(void)
386 ratelimit_pages
= total_pages
/ (num_online_cpus() * 32);
387 if (ratelimit_pages
< 16)
388 ratelimit_pages
= 16;
389 if (ratelimit_pages
* PAGE_CACHE_SIZE
> 4096 * 1024)
390 ratelimit_pages
= (4096 * 1024) / PAGE_CACHE_SIZE
;
394 ratelimit_handler(struct notifier_block
*self
, unsigned long u
, void *v
)
400 static struct notifier_block ratelimit_nb
= {
401 .notifier_call
= ratelimit_handler
,
406 * If the machine has a large highmem:lowmem ratio then scale back the default
407 * dirty memory thresholds: allowing too much dirty highmem pins an excessive
408 * number of buffer_heads.
410 void __init
page_writeback_init(void)
412 long buffer_pages
= nr_free_buffer_pages();
415 total_pages
= nr_free_pagecache_pages();
417 correction
= (100 * 4 * buffer_pages
) / total_pages
;
419 if (correction
< 100) {
420 dirty_background_ratio
*= correction
;
421 dirty_background_ratio
/= 100;
422 vm_dirty_ratio
*= correction
;
423 vm_dirty_ratio
/= 100;
426 init_timer(&wb_timer
);
427 wb_timer
.expires
= jiffies
+ (dirty_writeback_centisecs
* HZ
) / 100;
429 wb_timer
.function
= wb_timer_fn
;
430 add_timer(&wb_timer
);
432 register_cpu_notifier(&ratelimit_nb
);
435 int do_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
)
437 if (mapping
->a_ops
->writepages
)
438 return mapping
->a_ops
->writepages(mapping
, wbc
);
439 return generic_writepages(mapping
, wbc
);
443 * write_one_page - write out a single page and optionally wait on I/O
445 * @page - the page to write
446 * @wait - if true, wait on writeout
448 * The page must be locked by the caller and will be unlocked upon return.
450 * write_one_page() returns a negative error code if I/O failed.
452 int write_one_page(struct page
*page
, int wait
)
454 struct address_space
*mapping
= page
->mapping
;
456 struct writeback_control wbc
= {
457 .sync_mode
= WB_SYNC_ALL
,
461 BUG_ON(!PageLocked(page
));
464 wait_on_page_writeback(page
);
466 spin_lock(&mapping
->page_lock
);
467 list_del(&page
->list
);
468 if (test_clear_page_dirty(page
)) {
469 list_add(&page
->list
, &mapping
->locked_pages
);
470 page_cache_get(page
);
471 spin_unlock(&mapping
->page_lock
);
472 ret
= mapping
->a_ops
->writepage(page
, &wbc
);
473 if (ret
== 0 && wait
) {
474 wait_on_page_writeback(page
);
478 page_cache_release(page
);
480 list_add(&page
->list
, &mapping
->clean_pages
);
481 spin_unlock(&mapping
->page_lock
);
486 EXPORT_SYMBOL(write_one_page
);
489 * For address_spaces which do not use buffers. Just set the page's dirty bit
490 * and move it to the dirty_pages list. Also perform space reservation if
493 * __set_page_dirty_nobuffers() may return -ENOSPC. But if it does, the page
494 * is still safe, as long as it actually manages to find some blocks at
497 * This is also used when a single buffer is being dirtied: we want to set the
498 * page dirty in that case, but not all the buffers. This is a "bottom-up"
499 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
501 int __set_page_dirty_nobuffers(struct page
*page
)
505 if (!TestSetPageDirty(page
)) {
506 struct address_space
*mapping
= page
->mapping
;
509 spin_lock(&mapping
->page_lock
);
510 if (page
->mapping
) { /* Race with truncate? */
511 BUG_ON(page
->mapping
!= mapping
);
512 if (!mapping
->backing_dev_info
->memory_backed
)
513 inc_page_state(nr_dirty
);
514 list_del(&page
->list
);
515 list_add(&page
->list
, &mapping
->dirty_pages
);
517 spin_unlock(&mapping
->page_lock
);
518 if (!PageSwapCache(page
))
519 __mark_inode_dirty(mapping
->host
,
525 EXPORT_SYMBOL(__set_page_dirty_nobuffers
);
528 * set_page_dirty() is racy if the caller has no reference against
529 * page->mapping->host, and if the page is unlocked. This is because another
530 * CPU could truncate the page off the mapping and then free the mapping.
532 * Usually, the page _is_ locked, or the caller is a user-space process which
533 * holds a reference on the inode by having an open file.
535 * In other cases, the page should be locked before running set_page_dirty().
537 int set_page_dirty_lock(struct page
*page
)
542 ret
= set_page_dirty(page
);
548 * Clear a page's dirty flag, while caring for dirty memory accounting.
549 * Returns true if the page was previously dirty.
551 int test_clear_page_dirty(struct page
*page
)
553 if (TestClearPageDirty(page
)) {
554 struct address_space
*mapping
= page
->mapping
;
556 if (mapping
&& !mapping
->backing_dev_info
->memory_backed
)
557 dec_page_state(nr_dirty
);
562 EXPORT_SYMBOL(test_clear_page_dirty
);