[ARM] arm's arch_local_page_offset() fix against 2.6.17-rc1
[linux-2.6/mini2440.git] / mm / page-writeback.c
blob6dcce3a4bbdc6eff21c99607c463bdb15cccd752
1 /*
2 * mm/page-writeback.c.
4 * Copyright (C) 2002, Linus Torvalds.
6 * Contains functions related to writing back dirty pages at the
7 * address_space level.
9 * 10Apr2002 akpm@zip.com.au
10 * Initial version
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/spinlock.h>
16 #include <linux/fs.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/slab.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/init.h>
23 #include <linux/backing-dev.h>
24 #include <linux/blkdev.h>
25 #include <linux/mpage.h>
26 #include <linux/percpu.h>
27 #include <linux/notifier.h>
28 #include <linux/smp.h>
29 #include <linux/sysctl.h>
30 #include <linux/cpu.h>
31 #include <linux/syscalls.h>
34 * The maximum number of pages to writeout in a single bdflush/kupdate
35 * operation. We do this so we don't hold I_LOCK against an inode for
36 * enormous amounts of time, which would block a userspace task which has
37 * been forced to throttle against that inode. Also, the code reevaluates
38 * the dirty each time it has written this many pages.
40 #define MAX_WRITEBACK_PAGES 1024
43 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
44 * will look to see if it needs to force writeback or throttling.
46 static long ratelimit_pages = 32;
48 static long total_pages; /* The total number of pages in the machine. */
49 static int dirty_exceeded __cacheline_aligned_in_smp; /* Dirty mem may be over limit */
52 * When balance_dirty_pages decides that the caller needs to perform some
53 * non-background writeback, this is how many pages it will attempt to write.
54 * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
55 * large amounts of I/O are submitted.
57 static inline long sync_writeback_pages(void)
59 return ratelimit_pages + ratelimit_pages / 2;
62 /* The following parameters are exported via /proc/sys/vm */
65 * Start background writeback (via pdflush) at this percentage
67 int dirty_background_ratio = 10;
70 * The generator of dirty data starts writeback at this percentage
72 int vm_dirty_ratio = 40;
75 * The interval between `kupdate'-style writebacks, in centiseconds
76 * (hundredths of a second)
78 int dirty_writeback_interval = 5 * HZ;
81 * The longest number of centiseconds for which data is allowed to remain dirty
83 int dirty_expire_interval = 30 * HZ;
86 * Flag that makes the machine dump writes/reads and block dirtyings.
88 int block_dump;
91 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
92 * a full sync is triggered after this time elapses without any disk activity.
94 int laptop_mode;
96 EXPORT_SYMBOL(laptop_mode);
98 /* End of sysctl-exported parameters */
101 static void background_writeout(unsigned long _min_pages);
103 struct writeback_state
105 unsigned long nr_dirty;
106 unsigned long nr_unstable;
107 unsigned long nr_mapped;
108 unsigned long nr_writeback;
111 static void get_writeback_state(struct writeback_state *wbs)
113 wbs->nr_dirty = read_page_state(nr_dirty);
114 wbs->nr_unstable = read_page_state(nr_unstable);
115 wbs->nr_mapped = read_page_state(nr_mapped);
116 wbs->nr_writeback = read_page_state(nr_writeback);
120 * Work out the current dirty-memory clamping and background writeout
121 * thresholds.
123 * The main aim here is to lower them aggressively if there is a lot of mapped
124 * memory around. To avoid stressing page reclaim with lots of unreclaimable
125 * pages. It is better to clamp down on writers than to start swapping, and
126 * performing lots of scanning.
128 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
130 * We don't permit the clamping level to fall below 5% - that is getting rather
131 * excessive.
133 * We make sure that the background writeout level is below the adjusted
134 * clamping level.
136 static void
137 get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
138 struct address_space *mapping)
140 int background_ratio; /* Percentages */
141 int dirty_ratio;
142 int unmapped_ratio;
143 long background;
144 long dirty;
145 unsigned long available_memory = total_pages;
146 struct task_struct *tsk;
148 get_writeback_state(wbs);
150 #ifdef CONFIG_HIGHMEM
152 * If this mapping can only allocate from low memory,
153 * we exclude high memory from our count.
155 if (mapping && !(mapping_gfp_mask(mapping) & __GFP_HIGHMEM))
156 available_memory -= totalhigh_pages;
157 #endif
160 unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages;
162 dirty_ratio = vm_dirty_ratio;
163 if (dirty_ratio > unmapped_ratio / 2)
164 dirty_ratio = unmapped_ratio / 2;
166 if (dirty_ratio < 5)
167 dirty_ratio = 5;
169 background_ratio = dirty_background_ratio;
170 if (background_ratio >= dirty_ratio)
171 background_ratio = dirty_ratio / 2;
173 background = (background_ratio * available_memory) / 100;
174 dirty = (dirty_ratio * available_memory) / 100;
175 tsk = current;
176 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
177 background += background / 4;
178 dirty += dirty / 4;
180 *pbackground = background;
181 *pdirty = dirty;
185 * balance_dirty_pages() must be called by processes which are generating dirty
186 * data. It looks at the number of dirty pages in the machine and will force
187 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
188 * If we're over `background_thresh' then pdflush is woken to perform some
189 * writeout.
191 static void balance_dirty_pages(struct address_space *mapping)
193 struct writeback_state wbs;
194 long nr_reclaimable;
195 long background_thresh;
196 long dirty_thresh;
197 unsigned long pages_written = 0;
198 unsigned long write_chunk = sync_writeback_pages();
200 struct backing_dev_info *bdi = mapping->backing_dev_info;
202 for (;;) {
203 struct writeback_control wbc = {
204 .bdi = bdi,
205 .sync_mode = WB_SYNC_NONE,
206 .older_than_this = NULL,
207 .nr_to_write = write_chunk,
210 get_dirty_limits(&wbs, &background_thresh,
211 &dirty_thresh, mapping);
212 nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
213 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
214 break;
216 if (!dirty_exceeded)
217 dirty_exceeded = 1;
219 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
220 * Unstable writes are a feature of certain networked
221 * filesystems (i.e. NFS) in which data may have been
222 * written to the server's write cache, but has not yet
223 * been flushed to permanent storage.
225 if (nr_reclaimable) {
226 writeback_inodes(&wbc);
227 get_dirty_limits(&wbs, &background_thresh,
228 &dirty_thresh, mapping);
229 nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
230 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
231 break;
232 pages_written += write_chunk - wbc.nr_to_write;
233 if (pages_written >= write_chunk)
234 break; /* We've done our duty */
236 blk_congestion_wait(WRITE, HZ/10);
239 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh && dirty_exceeded)
240 dirty_exceeded = 0;
242 if (writeback_in_progress(bdi))
243 return; /* pdflush is already working this queue */
246 * In laptop mode, we wait until hitting the higher threshold before
247 * starting background writeout, and then write out all the way down
248 * to the lower threshold. So slow writers cause minimal disk activity.
250 * In normal mode, we start background writeout at the lower
251 * background_thresh, to keep the amount of dirty memory low.
253 if ((laptop_mode && pages_written) ||
254 (!laptop_mode && (nr_reclaimable > background_thresh)))
255 pdflush_operation(background_writeout, 0);
259 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
260 * @mapping: address_space which was dirtied
261 * @nr_pages_dirtied: number of pages which the caller has just dirtied
263 * Processes which are dirtying memory should call in here once for each page
264 * which was newly dirtied. The function will periodically check the system's
265 * dirty state and will initiate writeback if needed.
267 * On really big machines, get_writeback_state is expensive, so try to avoid
268 * calling it too often (ratelimiting). But once we're over the dirty memory
269 * limit we decrease the ratelimiting by a lot, to prevent individual processes
270 * from overshooting the limit by (ratelimit_pages) each.
272 void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
273 unsigned long nr_pages_dirtied)
275 static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
276 unsigned long ratelimit;
277 unsigned long *p;
279 ratelimit = ratelimit_pages;
280 if (dirty_exceeded)
281 ratelimit = 8;
284 * Check the rate limiting. Also, we do not want to throttle real-time
285 * tasks in balance_dirty_pages(). Period.
287 preempt_disable();
288 p = &__get_cpu_var(ratelimits);
289 *p += nr_pages_dirtied;
290 if (unlikely(*p >= ratelimit)) {
291 *p = 0;
292 preempt_enable();
293 balance_dirty_pages(mapping);
294 return;
296 preempt_enable();
298 EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
300 void throttle_vm_writeout(void)
302 struct writeback_state wbs;
303 long background_thresh;
304 long dirty_thresh;
306 for ( ; ; ) {
307 get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL);
310 * Boost the allowable dirty threshold a bit for page
311 * allocators so they don't get DoS'ed by heavy writers
313 dirty_thresh += dirty_thresh / 10; /* wheeee... */
315 if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh)
316 break;
317 blk_congestion_wait(WRITE, HZ/10);
323 * writeback at least _min_pages, and keep writing until the amount of dirty
324 * memory is less than the background threshold, or until we're all clean.
326 static void background_writeout(unsigned long _min_pages)
328 long min_pages = _min_pages;
329 struct writeback_control wbc = {
330 .bdi = NULL,
331 .sync_mode = WB_SYNC_NONE,
332 .older_than_this = NULL,
333 .nr_to_write = 0,
334 .nonblocking = 1,
337 for ( ; ; ) {
338 struct writeback_state wbs;
339 long background_thresh;
340 long dirty_thresh;
342 get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL);
343 if (wbs.nr_dirty + wbs.nr_unstable < background_thresh
344 && min_pages <= 0)
345 break;
346 wbc.encountered_congestion = 0;
347 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
348 wbc.pages_skipped = 0;
349 writeback_inodes(&wbc);
350 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
351 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
352 /* Wrote less than expected */
353 blk_congestion_wait(WRITE, HZ/10);
354 if (!wbc.encountered_congestion)
355 break;
361 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
362 * the whole world. Returns 0 if a pdflush thread was dispatched. Returns
363 * -1 if all pdflush threads were busy.
365 int wakeup_pdflush(long nr_pages)
367 if (nr_pages == 0) {
368 struct writeback_state wbs;
370 get_writeback_state(&wbs);
371 nr_pages = wbs.nr_dirty + wbs.nr_unstable;
373 return pdflush_operation(background_writeout, nr_pages);
376 static void wb_timer_fn(unsigned long unused);
377 static void laptop_timer_fn(unsigned long unused);
379 static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
380 static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
383 * Periodic writeback of "old" data.
385 * Define "old": the first time one of an inode's pages is dirtied, we mark the
386 * dirtying-time in the inode's address_space. So this periodic writeback code
387 * just walks the superblock inode list, writing back any inodes which are
388 * older than a specific point in time.
390 * Try to run once per dirty_writeback_interval. But if a writeback event
391 * takes longer than a dirty_writeback_interval interval, then leave a
392 * one-second gap.
394 * older_than_this takes precedence over nr_to_write. So we'll only write back
395 * all dirty pages if they are all attached to "old" mappings.
397 static void wb_kupdate(unsigned long arg)
399 unsigned long oldest_jif;
400 unsigned long start_jif;
401 unsigned long next_jif;
402 long nr_to_write;
403 struct writeback_state wbs;
404 struct writeback_control wbc = {
405 .bdi = NULL,
406 .sync_mode = WB_SYNC_NONE,
407 .older_than_this = &oldest_jif,
408 .nr_to_write = 0,
409 .nonblocking = 1,
410 .for_kupdate = 1,
413 sync_supers();
415 get_writeback_state(&wbs);
416 oldest_jif = jiffies - dirty_expire_interval;
417 start_jif = jiffies;
418 next_jif = start_jif + dirty_writeback_interval;
419 nr_to_write = wbs.nr_dirty + wbs.nr_unstable +
420 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
421 while (nr_to_write > 0) {
422 wbc.encountered_congestion = 0;
423 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
424 writeback_inodes(&wbc);
425 if (wbc.nr_to_write > 0) {
426 if (wbc.encountered_congestion)
427 blk_congestion_wait(WRITE, HZ/10);
428 else
429 break; /* All the old data is written */
431 nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
433 if (time_before(next_jif, jiffies + HZ))
434 next_jif = jiffies + HZ;
435 if (dirty_writeback_interval)
436 mod_timer(&wb_timer, next_jif);
440 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
442 int dirty_writeback_centisecs_handler(ctl_table *table, int write,
443 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
445 proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
446 if (dirty_writeback_interval) {
447 mod_timer(&wb_timer,
448 jiffies + dirty_writeback_interval);
449 } else {
450 del_timer(&wb_timer);
452 return 0;
455 static void wb_timer_fn(unsigned long unused)
457 if (pdflush_operation(wb_kupdate, 0) < 0)
458 mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
461 static void laptop_flush(unsigned long unused)
463 sys_sync();
466 static void laptop_timer_fn(unsigned long unused)
468 pdflush_operation(laptop_flush, 0);
472 * We've spun up the disk and we're in laptop mode: schedule writeback
473 * of all dirty data a few seconds from now. If the flush is already scheduled
474 * then push it back - the user is still using the disk.
476 void laptop_io_completion(void)
478 mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
482 * We're in laptop mode and we've just synced. The sync's writes will have
483 * caused another writeback to be scheduled by laptop_io_completion.
484 * Nothing needs to be written back anymore, so we unschedule the writeback.
486 void laptop_sync_completion(void)
488 del_timer(&laptop_mode_wb_timer);
492 * If ratelimit_pages is too high then we can get into dirty-data overload
493 * if a large number of processes all perform writes at the same time.
494 * If it is too low then SMP machines will call the (expensive)
495 * get_writeback_state too often.
497 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
498 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
499 * thresholds before writeback cuts in.
501 * But the limit should not be set too high. Because it also controls the
502 * amount of memory which the balance_dirty_pages() caller has to write back.
503 * If this is too large then the caller will block on the IO queue all the
504 * time. So limit it to four megabytes - the balance_dirty_pages() caller
505 * will write six megabyte chunks, max.
508 static void set_ratelimit(void)
510 ratelimit_pages = total_pages / (num_online_cpus() * 32);
511 if (ratelimit_pages < 16)
512 ratelimit_pages = 16;
513 if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
514 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
517 static int
518 ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
520 set_ratelimit();
521 return 0;
524 static struct notifier_block ratelimit_nb = {
525 .notifier_call = ratelimit_handler,
526 .next = NULL,
530 * If the machine has a large highmem:lowmem ratio then scale back the default
531 * dirty memory thresholds: allowing too much dirty highmem pins an excessive
532 * number of buffer_heads.
534 void __init page_writeback_init(void)
536 long buffer_pages = nr_free_buffer_pages();
537 long correction;
539 total_pages = nr_free_pagecache_pages();
541 correction = (100 * 4 * buffer_pages) / total_pages;
543 if (correction < 100) {
544 dirty_background_ratio *= correction;
545 dirty_background_ratio /= 100;
546 vm_dirty_ratio *= correction;
547 vm_dirty_ratio /= 100;
549 if (dirty_background_ratio <= 0)
550 dirty_background_ratio = 1;
551 if (vm_dirty_ratio <= 0)
552 vm_dirty_ratio = 1;
554 mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
555 set_ratelimit();
556 register_cpu_notifier(&ratelimit_nb);
559 int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
561 int ret;
563 if (wbc->nr_to_write <= 0)
564 return 0;
565 wbc->for_writepages = 1;
566 if (mapping->a_ops->writepages)
567 ret = mapping->a_ops->writepages(mapping, wbc);
568 else
569 ret = generic_writepages(mapping, wbc);
570 wbc->for_writepages = 0;
571 return ret;
575 * write_one_page - write out a single page and optionally wait on I/O
577 * @page: the page to write
578 * @wait: if true, wait on writeout
580 * The page must be locked by the caller and will be unlocked upon return.
582 * write_one_page() returns a negative error code if I/O failed.
584 int write_one_page(struct page *page, int wait)
586 struct address_space *mapping = page->mapping;
587 int ret = 0;
588 struct writeback_control wbc = {
589 .sync_mode = WB_SYNC_ALL,
590 .nr_to_write = 1,
593 BUG_ON(!PageLocked(page));
595 if (wait)
596 wait_on_page_writeback(page);
598 if (clear_page_dirty_for_io(page)) {
599 page_cache_get(page);
600 ret = mapping->a_ops->writepage(page, &wbc);
601 if (ret == 0 && wait) {
602 wait_on_page_writeback(page);
603 if (PageError(page))
604 ret = -EIO;
606 page_cache_release(page);
607 } else {
608 unlock_page(page);
610 return ret;
612 EXPORT_SYMBOL(write_one_page);
615 * For address_spaces which do not use buffers. Just tag the page as dirty in
616 * its radix tree.
618 * This is also used when a single buffer is being dirtied: we want to set the
619 * page dirty in that case, but not all the buffers. This is a "bottom-up"
620 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
622 * Most callers have locked the page, which pins the address_space in memory.
623 * But zap_pte_range() does not lock the page, however in that case the
624 * mapping is pinned by the vma's ->vm_file reference.
626 * We take care to handle the case where the page was truncated from the
627 * mapping by re-checking page_mapping() insode tree_lock.
629 int __set_page_dirty_nobuffers(struct page *page)
631 if (!TestSetPageDirty(page)) {
632 struct address_space *mapping = page_mapping(page);
633 struct address_space *mapping2;
635 if (mapping) {
636 write_lock_irq(&mapping->tree_lock);
637 mapping2 = page_mapping(page);
638 if (mapping2) { /* Race with truncate? */
639 BUG_ON(mapping2 != mapping);
640 if (mapping_cap_account_dirty(mapping))
641 inc_page_state(nr_dirty);
642 radix_tree_tag_set(&mapping->page_tree,
643 page_index(page), PAGECACHE_TAG_DIRTY);
645 write_unlock_irq(&mapping->tree_lock);
646 if (mapping->host) {
647 /* !PageAnon && !swapper_space */
648 __mark_inode_dirty(mapping->host,
649 I_DIRTY_PAGES);
652 return 1;
654 return 0;
656 EXPORT_SYMBOL(__set_page_dirty_nobuffers);
659 * When a writepage implementation decides that it doesn't want to write this
660 * page for some reason, it should redirty the locked page via
661 * redirty_page_for_writepage() and it should then unlock the page and return 0
663 int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
665 wbc->pages_skipped++;
666 return __set_page_dirty_nobuffers(page);
668 EXPORT_SYMBOL(redirty_page_for_writepage);
671 * If the mapping doesn't provide a set_page_dirty a_op, then
672 * just fall through and assume that it wants buffer_heads.
674 int fastcall set_page_dirty(struct page *page)
676 struct address_space *mapping = page_mapping(page);
678 if (likely(mapping)) {
679 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
680 if (spd)
681 return (*spd)(page);
682 return __set_page_dirty_buffers(page);
684 if (!PageDirty(page)) {
685 if (!TestSetPageDirty(page))
686 return 1;
688 return 0;
690 EXPORT_SYMBOL(set_page_dirty);
693 * set_page_dirty() is racy if the caller has no reference against
694 * page->mapping->host, and if the page is unlocked. This is because another
695 * CPU could truncate the page off the mapping and then free the mapping.
697 * Usually, the page _is_ locked, or the caller is a user-space process which
698 * holds a reference on the inode by having an open file.
700 * In other cases, the page should be locked before running set_page_dirty().
702 int set_page_dirty_lock(struct page *page)
704 int ret;
706 lock_page(page);
707 ret = set_page_dirty(page);
708 unlock_page(page);
709 return ret;
711 EXPORT_SYMBOL(set_page_dirty_lock);
714 * Clear a page's dirty flag, while caring for dirty memory accounting.
715 * Returns true if the page was previously dirty.
717 int test_clear_page_dirty(struct page *page)
719 struct address_space *mapping = page_mapping(page);
720 unsigned long flags;
722 if (mapping) {
723 write_lock_irqsave(&mapping->tree_lock, flags);
724 if (TestClearPageDirty(page)) {
725 radix_tree_tag_clear(&mapping->page_tree,
726 page_index(page),
727 PAGECACHE_TAG_DIRTY);
728 write_unlock_irqrestore(&mapping->tree_lock, flags);
729 if (mapping_cap_account_dirty(mapping))
730 dec_page_state(nr_dirty);
731 return 1;
733 write_unlock_irqrestore(&mapping->tree_lock, flags);
734 return 0;
736 return TestClearPageDirty(page);
738 EXPORT_SYMBOL(test_clear_page_dirty);
741 * Clear a page's dirty flag, while caring for dirty memory accounting.
742 * Returns true if the page was previously dirty.
744 * This is for preparing to put the page under writeout. We leave the page
745 * tagged as dirty in the radix tree so that a concurrent write-for-sync
746 * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage
747 * implementation will run either set_page_writeback() or set_page_dirty(),
748 * at which stage we bring the page's dirty flag and radix-tree dirty tag
749 * back into sync.
751 * This incoherency between the page's dirty flag and radix-tree tag is
752 * unfortunate, but it only exists while the page is locked.
754 int clear_page_dirty_for_io(struct page *page)
756 struct address_space *mapping = page_mapping(page);
758 if (mapping) {
759 if (TestClearPageDirty(page)) {
760 if (mapping_cap_account_dirty(mapping))
761 dec_page_state(nr_dirty);
762 return 1;
764 return 0;
766 return TestClearPageDirty(page);
768 EXPORT_SYMBOL(clear_page_dirty_for_io);
770 int test_clear_page_writeback(struct page *page)
772 struct address_space *mapping = page_mapping(page);
773 int ret;
775 if (mapping) {
776 unsigned long flags;
778 write_lock_irqsave(&mapping->tree_lock, flags);
779 ret = TestClearPageWriteback(page);
780 if (ret)
781 radix_tree_tag_clear(&mapping->page_tree,
782 page_index(page),
783 PAGECACHE_TAG_WRITEBACK);
784 write_unlock_irqrestore(&mapping->tree_lock, flags);
785 } else {
786 ret = TestClearPageWriteback(page);
788 return ret;
791 int test_set_page_writeback(struct page *page)
793 struct address_space *mapping = page_mapping(page);
794 int ret;
796 if (mapping) {
797 unsigned long flags;
799 write_lock_irqsave(&mapping->tree_lock, flags);
800 ret = TestSetPageWriteback(page);
801 if (!ret)
802 radix_tree_tag_set(&mapping->page_tree,
803 page_index(page),
804 PAGECACHE_TAG_WRITEBACK);
805 if (!PageDirty(page))
806 radix_tree_tag_clear(&mapping->page_tree,
807 page_index(page),
808 PAGECACHE_TAG_DIRTY);
809 write_unlock_irqrestore(&mapping->tree_lock, flags);
810 } else {
811 ret = TestSetPageWriteback(page);
813 return ret;
816 EXPORT_SYMBOL(test_set_page_writeback);
819 * Return true if any of the pages in the mapping are marged with the
820 * passed tag.
822 int mapping_tagged(struct address_space *mapping, int tag)
824 unsigned long flags;
825 int ret;
827 read_lock_irqsave(&mapping->tree_lock, flags);
828 ret = radix_tree_tagged(&mapping->page_tree, tag);
829 read_unlock_irqrestore(&mapping->tree_lock, flags);
830 return ret;
832 EXPORT_SYMBOL(mapping_tagged);