4 * Copyright (C) 2002, Linus Torvalds.
6 * Contains functions related to writing back dirty pages at the
9 * 10Apr2002 akpm@zip.com.au
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/spinlock.h>
18 #include <linux/swap.h>
19 #include <linux/slab.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/init.h>
23 #include <linux/backing-dev.h>
24 #include <linux/blkdev.h>
25 #include <linux/mpage.h>
26 #include <linux/percpu.h>
27 #include <linux/notifier.h>
28 #include <linux/smp.h>
29 #include <linux/sysctl.h>
30 #include <linux/cpu.h>
31 #include <linux/syscalls.h>
34 * The maximum number of pages to writeout in a single bdflush/kupdate
35 * operation. We do this so we don't hold I_LOCK against an inode for
36 * enormous amounts of time, which would block a userspace task which has
37 * been forced to throttle against that inode. Also, the code reevaluates
38 * the dirty each time it has written this many pages.
40 #define MAX_WRITEBACK_PAGES 1024
43 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
44 * will look to see if it needs to force writeback or throttling.
46 static long ratelimit_pages
= 32;
48 static long total_pages
; /* The total number of pages in the machine. */
49 static int dirty_exceeded __cacheline_aligned_in_smp
; /* Dirty mem may be over limit */
52 * When balance_dirty_pages decides that the caller needs to perform some
53 * non-background writeback, this is how many pages it will attempt to write.
54 * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
55 * large amounts of I/O are submitted.
57 static inline long sync_writeback_pages(void)
59 return ratelimit_pages
+ ratelimit_pages
/ 2;
62 /* The following parameters are exported via /proc/sys/vm */
65 * Start background writeback (via pdflush) at this percentage
67 int dirty_background_ratio
= 10;
70 * The generator of dirty data starts writeback at this percentage
72 int vm_dirty_ratio
= 40;
75 * The interval between `kupdate'-style writebacks, in jiffies
77 int dirty_writeback_interval
= 5 * HZ
;
80 * The longest number of jiffies for which data is allowed to remain dirty
82 int dirty_expire_interval
= 30 * HZ
;
85 * Flag that makes the machine dump writes/reads and block dirtyings.
90 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
91 * a full sync is triggered after this time elapses without any disk activity.
95 EXPORT_SYMBOL(laptop_mode
);
97 /* End of sysctl-exported parameters */
100 static void background_writeout(unsigned long _min_pages
);
102 struct writeback_state
104 unsigned long nr_dirty
;
105 unsigned long nr_unstable
;
106 unsigned long nr_mapped
;
107 unsigned long nr_writeback
;
110 static void get_writeback_state(struct writeback_state
*wbs
)
112 wbs
->nr_dirty
= global_page_state(NR_FILE_DIRTY
);
113 wbs
->nr_unstable
= read_page_state(nr_unstable
);
114 wbs
->nr_mapped
= global_page_state(NR_FILE_MAPPED
) +
115 global_page_state(NR_ANON_PAGES
);
116 wbs
->nr_writeback
= global_page_state(NR_WRITEBACK
);
120 * Work out the current dirty-memory clamping and background writeout
123 * The main aim here is to lower them aggressively if there is a lot of mapped
124 * memory around. To avoid stressing page reclaim with lots of unreclaimable
125 * pages. It is better to clamp down on writers than to start swapping, and
126 * performing lots of scanning.
128 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
130 * We don't permit the clamping level to fall below 5% - that is getting rather
133 * We make sure that the background writeout level is below the adjusted
137 get_dirty_limits(struct writeback_state
*wbs
, long *pbackground
, long *pdirty
,
138 struct address_space
*mapping
)
140 int background_ratio
; /* Percentages */
145 unsigned long available_memory
= total_pages
;
146 struct task_struct
*tsk
;
148 get_writeback_state(wbs
);
150 #ifdef CONFIG_HIGHMEM
152 * If this mapping can only allocate from low memory,
153 * we exclude high memory from our count.
155 if (mapping
&& !(mapping_gfp_mask(mapping
) & __GFP_HIGHMEM
))
156 available_memory
-= totalhigh_pages
;
160 unmapped_ratio
= 100 - (wbs
->nr_mapped
* 100) / total_pages
;
162 dirty_ratio
= vm_dirty_ratio
;
163 if (dirty_ratio
> unmapped_ratio
/ 2)
164 dirty_ratio
= unmapped_ratio
/ 2;
169 background_ratio
= dirty_background_ratio
;
170 if (background_ratio
>= dirty_ratio
)
171 background_ratio
= dirty_ratio
/ 2;
173 background
= (background_ratio
* available_memory
) / 100;
174 dirty
= (dirty_ratio
* available_memory
) / 100;
176 if (tsk
->flags
& PF_LESS_THROTTLE
|| rt_task(tsk
)) {
177 background
+= background
/ 4;
180 *pbackground
= background
;
185 * balance_dirty_pages() must be called by processes which are generating dirty
186 * data. It looks at the number of dirty pages in the machine and will force
187 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
188 * If we're over `background_thresh' then pdflush is woken to perform some
191 static void balance_dirty_pages(struct address_space
*mapping
)
193 struct writeback_state wbs
;
195 long background_thresh
;
197 unsigned long pages_written
= 0;
198 unsigned long write_chunk
= sync_writeback_pages();
200 struct backing_dev_info
*bdi
= mapping
->backing_dev_info
;
203 struct writeback_control wbc
= {
205 .sync_mode
= WB_SYNC_NONE
,
206 .older_than_this
= NULL
,
207 .nr_to_write
= write_chunk
,
211 get_dirty_limits(&wbs
, &background_thresh
,
212 &dirty_thresh
, mapping
);
213 nr_reclaimable
= wbs
.nr_dirty
+ wbs
.nr_unstable
;
214 if (nr_reclaimable
+ wbs
.nr_writeback
<= dirty_thresh
)
220 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
221 * Unstable writes are a feature of certain networked
222 * filesystems (i.e. NFS) in which data may have been
223 * written to the server's write cache, but has not yet
224 * been flushed to permanent storage.
226 if (nr_reclaimable
) {
227 writeback_inodes(&wbc
);
228 get_dirty_limits(&wbs
, &background_thresh
,
229 &dirty_thresh
, mapping
);
230 nr_reclaimable
= wbs
.nr_dirty
+ wbs
.nr_unstable
;
231 if (nr_reclaimable
+ wbs
.nr_writeback
<= dirty_thresh
)
233 pages_written
+= write_chunk
- wbc
.nr_to_write
;
234 if (pages_written
>= write_chunk
)
235 break; /* We've done our duty */
237 blk_congestion_wait(WRITE
, HZ
/10);
240 if (nr_reclaimable
+ wbs
.nr_writeback
<= dirty_thresh
&& dirty_exceeded
)
243 if (writeback_in_progress(bdi
))
244 return; /* pdflush is already working this queue */
247 * In laptop mode, we wait until hitting the higher threshold before
248 * starting background writeout, and then write out all the way down
249 * to the lower threshold. So slow writers cause minimal disk activity.
251 * In normal mode, we start background writeout at the lower
252 * background_thresh, to keep the amount of dirty memory low.
254 if ((laptop_mode
&& pages_written
) ||
255 (!laptop_mode
&& (nr_reclaimable
> background_thresh
)))
256 pdflush_operation(background_writeout
, 0);
260 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
261 * @mapping: address_space which was dirtied
262 * @nr_pages_dirtied: number of pages which the caller has just dirtied
264 * Processes which are dirtying memory should call in here once for each page
265 * which was newly dirtied. The function will periodically check the system's
266 * dirty state and will initiate writeback if needed.
268 * On really big machines, get_writeback_state is expensive, so try to avoid
269 * calling it too often (ratelimiting). But once we're over the dirty memory
270 * limit we decrease the ratelimiting by a lot, to prevent individual processes
271 * from overshooting the limit by (ratelimit_pages) each.
273 void balance_dirty_pages_ratelimited_nr(struct address_space
*mapping
,
274 unsigned long nr_pages_dirtied
)
276 static DEFINE_PER_CPU(unsigned long, ratelimits
) = 0;
277 unsigned long ratelimit
;
280 ratelimit
= ratelimit_pages
;
285 * Check the rate limiting. Also, we do not want to throttle real-time
286 * tasks in balance_dirty_pages(). Period.
289 p
= &__get_cpu_var(ratelimits
);
290 *p
+= nr_pages_dirtied
;
291 if (unlikely(*p
>= ratelimit
)) {
294 balance_dirty_pages(mapping
);
299 EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr
);
301 void throttle_vm_writeout(void)
303 struct writeback_state wbs
;
304 long background_thresh
;
308 get_dirty_limits(&wbs
, &background_thresh
, &dirty_thresh
, NULL
);
311 * Boost the allowable dirty threshold a bit for page
312 * allocators so they don't get DoS'ed by heavy writers
314 dirty_thresh
+= dirty_thresh
/ 10; /* wheeee... */
316 if (wbs
.nr_unstable
+ wbs
.nr_writeback
<= dirty_thresh
)
318 blk_congestion_wait(WRITE
, HZ
/10);
324 * writeback at least _min_pages, and keep writing until the amount of dirty
325 * memory is less than the background threshold, or until we're all clean.
327 static void background_writeout(unsigned long _min_pages
)
329 long min_pages
= _min_pages
;
330 struct writeback_control wbc
= {
332 .sync_mode
= WB_SYNC_NONE
,
333 .older_than_this
= NULL
,
340 struct writeback_state wbs
;
341 long background_thresh
;
344 get_dirty_limits(&wbs
, &background_thresh
, &dirty_thresh
, NULL
);
345 if (wbs
.nr_dirty
+ wbs
.nr_unstable
< background_thresh
348 wbc
.encountered_congestion
= 0;
349 wbc
.nr_to_write
= MAX_WRITEBACK_PAGES
;
350 wbc
.pages_skipped
= 0;
351 writeback_inodes(&wbc
);
352 min_pages
-= MAX_WRITEBACK_PAGES
- wbc
.nr_to_write
;
353 if (wbc
.nr_to_write
> 0 || wbc
.pages_skipped
> 0) {
354 /* Wrote less than expected */
355 blk_congestion_wait(WRITE
, HZ
/10);
356 if (!wbc
.encountered_congestion
)
363 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
364 * the whole world. Returns 0 if a pdflush thread was dispatched. Returns
365 * -1 if all pdflush threads were busy.
367 int wakeup_pdflush(long nr_pages
)
370 struct writeback_state wbs
;
372 get_writeback_state(&wbs
);
373 nr_pages
= wbs
.nr_dirty
+ wbs
.nr_unstable
;
375 return pdflush_operation(background_writeout
, nr_pages
);
378 static void wb_timer_fn(unsigned long unused
);
379 static void laptop_timer_fn(unsigned long unused
);
381 static DEFINE_TIMER(wb_timer
, wb_timer_fn
, 0, 0);
382 static DEFINE_TIMER(laptop_mode_wb_timer
, laptop_timer_fn
, 0, 0);
385 * Periodic writeback of "old" data.
387 * Define "old": the first time one of an inode's pages is dirtied, we mark the
388 * dirtying-time in the inode's address_space. So this periodic writeback code
389 * just walks the superblock inode list, writing back any inodes which are
390 * older than a specific point in time.
392 * Try to run once per dirty_writeback_interval. But if a writeback event
393 * takes longer than a dirty_writeback_interval interval, then leave a
396 * older_than_this takes precedence over nr_to_write. So we'll only write back
397 * all dirty pages if they are all attached to "old" mappings.
399 static void wb_kupdate(unsigned long arg
)
401 unsigned long oldest_jif
;
402 unsigned long start_jif
;
403 unsigned long next_jif
;
405 struct writeback_state wbs
;
406 struct writeback_control wbc
= {
408 .sync_mode
= WB_SYNC_NONE
,
409 .older_than_this
= &oldest_jif
,
418 get_writeback_state(&wbs
);
419 oldest_jif
= jiffies
- dirty_expire_interval
;
421 next_jif
= start_jif
+ dirty_writeback_interval
;
422 nr_to_write
= wbs
.nr_dirty
+ wbs
.nr_unstable
+
423 (inodes_stat
.nr_inodes
- inodes_stat
.nr_unused
);
424 while (nr_to_write
> 0) {
425 wbc
.encountered_congestion
= 0;
426 wbc
.nr_to_write
= MAX_WRITEBACK_PAGES
;
427 writeback_inodes(&wbc
);
428 if (wbc
.nr_to_write
> 0) {
429 if (wbc
.encountered_congestion
)
430 blk_congestion_wait(WRITE
, HZ
/10);
432 break; /* All the old data is written */
434 nr_to_write
-= MAX_WRITEBACK_PAGES
- wbc
.nr_to_write
;
436 if (time_before(next_jif
, jiffies
+ HZ
))
437 next_jif
= jiffies
+ HZ
;
438 if (dirty_writeback_interval
)
439 mod_timer(&wb_timer
, next_jif
);
443 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
445 int dirty_writeback_centisecs_handler(ctl_table
*table
, int write
,
446 struct file
*file
, void __user
*buffer
, size_t *length
, loff_t
*ppos
)
448 proc_dointvec_userhz_jiffies(table
, write
, file
, buffer
, length
, ppos
);
449 if (dirty_writeback_interval
) {
451 jiffies
+ dirty_writeback_interval
);
453 del_timer(&wb_timer
);
458 static void wb_timer_fn(unsigned long unused
)
460 if (pdflush_operation(wb_kupdate
, 0) < 0)
461 mod_timer(&wb_timer
, jiffies
+ HZ
); /* delay 1 second */
464 static void laptop_flush(unsigned long unused
)
469 static void laptop_timer_fn(unsigned long unused
)
471 pdflush_operation(laptop_flush
, 0);
475 * We've spun up the disk and we're in laptop mode: schedule writeback
476 * of all dirty data a few seconds from now. If the flush is already scheduled
477 * then push it back - the user is still using the disk.
479 void laptop_io_completion(void)
481 mod_timer(&laptop_mode_wb_timer
, jiffies
+ laptop_mode
);
485 * We're in laptop mode and we've just synced. The sync's writes will have
486 * caused another writeback to be scheduled by laptop_io_completion.
487 * Nothing needs to be written back anymore, so we unschedule the writeback.
489 void laptop_sync_completion(void)
491 del_timer(&laptop_mode_wb_timer
);
495 * If ratelimit_pages is too high then we can get into dirty-data overload
496 * if a large number of processes all perform writes at the same time.
497 * If it is too low then SMP machines will call the (expensive)
498 * get_writeback_state too often.
500 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
501 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
502 * thresholds before writeback cuts in.
504 * But the limit should not be set too high. Because it also controls the
505 * amount of memory which the balance_dirty_pages() caller has to write back.
506 * If this is too large then the caller will block on the IO queue all the
507 * time. So limit it to four megabytes - the balance_dirty_pages() caller
508 * will write six megabyte chunks, max.
511 static void set_ratelimit(void)
513 ratelimit_pages
= total_pages
/ (num_online_cpus() * 32);
514 if (ratelimit_pages
< 16)
515 ratelimit_pages
= 16;
516 if (ratelimit_pages
* PAGE_CACHE_SIZE
> 4096 * 1024)
517 ratelimit_pages
= (4096 * 1024) / PAGE_CACHE_SIZE
;
521 ratelimit_handler(struct notifier_block
*self
, unsigned long u
, void *v
)
527 static struct notifier_block __cpuinitdata ratelimit_nb
= {
528 .notifier_call
= ratelimit_handler
,
533 * If the machine has a large highmem:lowmem ratio then scale back the default
534 * dirty memory thresholds: allowing too much dirty highmem pins an excessive
535 * number of buffer_heads.
537 void __init
page_writeback_init(void)
539 long buffer_pages
= nr_free_buffer_pages();
542 total_pages
= nr_free_pagecache_pages();
544 correction
= (100 * 4 * buffer_pages
) / total_pages
;
546 if (correction
< 100) {
547 dirty_background_ratio
*= correction
;
548 dirty_background_ratio
/= 100;
549 vm_dirty_ratio
*= correction
;
550 vm_dirty_ratio
/= 100;
552 if (dirty_background_ratio
<= 0)
553 dirty_background_ratio
= 1;
554 if (vm_dirty_ratio
<= 0)
557 mod_timer(&wb_timer
, jiffies
+ dirty_writeback_interval
);
559 register_cpu_notifier(&ratelimit_nb
);
562 int do_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
)
566 if (wbc
->nr_to_write
<= 0)
568 wbc
->for_writepages
= 1;
569 if (mapping
->a_ops
->writepages
)
570 ret
= mapping
->a_ops
->writepages(mapping
, wbc
);
572 ret
= generic_writepages(mapping
, wbc
);
573 wbc
->for_writepages
= 0;
578 * write_one_page - write out a single page and optionally wait on I/O
580 * @page: the page to write
581 * @wait: if true, wait on writeout
583 * The page must be locked by the caller and will be unlocked upon return.
585 * write_one_page() returns a negative error code if I/O failed.
587 int write_one_page(struct page
*page
, int wait
)
589 struct address_space
*mapping
= page
->mapping
;
591 struct writeback_control wbc
= {
592 .sync_mode
= WB_SYNC_ALL
,
596 BUG_ON(!PageLocked(page
));
599 wait_on_page_writeback(page
);
601 if (clear_page_dirty_for_io(page
)) {
602 page_cache_get(page
);
603 ret
= mapping
->a_ops
->writepage(page
, &wbc
);
604 if (ret
== 0 && wait
) {
605 wait_on_page_writeback(page
);
609 page_cache_release(page
);
615 EXPORT_SYMBOL(write_one_page
);
618 * For address_spaces which do not use buffers. Just tag the page as dirty in
621 * This is also used when a single buffer is being dirtied: we want to set the
622 * page dirty in that case, but not all the buffers. This is a "bottom-up"
623 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
625 * Most callers have locked the page, which pins the address_space in memory.
626 * But zap_pte_range() does not lock the page, however in that case the
627 * mapping is pinned by the vma's ->vm_file reference.
629 * We take care to handle the case where the page was truncated from the
630 * mapping by re-checking page_mapping() insode tree_lock.
632 int __set_page_dirty_nobuffers(struct page
*page
)
634 if (!TestSetPageDirty(page
)) {
635 struct address_space
*mapping
= page_mapping(page
);
636 struct address_space
*mapping2
;
639 write_lock_irq(&mapping
->tree_lock
);
640 mapping2
= page_mapping(page
);
641 if (mapping2
) { /* Race with truncate? */
642 BUG_ON(mapping2
!= mapping
);
643 if (mapping_cap_account_dirty(mapping
))
644 __inc_zone_page_state(page
,
646 radix_tree_tag_set(&mapping
->page_tree
,
647 page_index(page
), PAGECACHE_TAG_DIRTY
);
649 write_unlock_irq(&mapping
->tree_lock
);
651 /* !PageAnon && !swapper_space */
652 __mark_inode_dirty(mapping
->host
,
660 EXPORT_SYMBOL(__set_page_dirty_nobuffers
);
663 * When a writepage implementation decides that it doesn't want to write this
664 * page for some reason, it should redirty the locked page via
665 * redirty_page_for_writepage() and it should then unlock the page and return 0
667 int redirty_page_for_writepage(struct writeback_control
*wbc
, struct page
*page
)
669 wbc
->pages_skipped
++;
670 return __set_page_dirty_nobuffers(page
);
672 EXPORT_SYMBOL(redirty_page_for_writepage
);
675 * If the mapping doesn't provide a set_page_dirty a_op, then
676 * just fall through and assume that it wants buffer_heads.
678 int fastcall
set_page_dirty(struct page
*page
)
680 struct address_space
*mapping
= page_mapping(page
);
682 if (likely(mapping
)) {
683 int (*spd
)(struct page
*) = mapping
->a_ops
->set_page_dirty
;
686 return __set_page_dirty_buffers(page
);
688 if (!PageDirty(page
)) {
689 if (!TestSetPageDirty(page
))
694 EXPORT_SYMBOL(set_page_dirty
);
697 * set_page_dirty() is racy if the caller has no reference against
698 * page->mapping->host, and if the page is unlocked. This is because another
699 * CPU could truncate the page off the mapping and then free the mapping.
701 * Usually, the page _is_ locked, or the caller is a user-space process which
702 * holds a reference on the inode by having an open file.
704 * In other cases, the page should be locked before running set_page_dirty().
706 int set_page_dirty_lock(struct page
*page
)
711 ret
= set_page_dirty(page
);
715 EXPORT_SYMBOL(set_page_dirty_lock
);
718 * Clear a page's dirty flag, while caring for dirty memory accounting.
719 * Returns true if the page was previously dirty.
721 int test_clear_page_dirty(struct page
*page
)
723 struct address_space
*mapping
= page_mapping(page
);
727 write_lock_irqsave(&mapping
->tree_lock
, flags
);
728 if (TestClearPageDirty(page
)) {
729 radix_tree_tag_clear(&mapping
->page_tree
,
731 PAGECACHE_TAG_DIRTY
);
732 if (mapping_cap_account_dirty(mapping
))
733 __dec_zone_page_state(page
, NR_FILE_DIRTY
);
734 write_unlock_irqrestore(&mapping
->tree_lock
, flags
);
737 write_unlock_irqrestore(&mapping
->tree_lock
, flags
);
740 return TestClearPageDirty(page
);
742 EXPORT_SYMBOL(test_clear_page_dirty
);
745 * Clear a page's dirty flag, while caring for dirty memory accounting.
746 * Returns true if the page was previously dirty.
748 * This is for preparing to put the page under writeout. We leave the page
749 * tagged as dirty in the radix tree so that a concurrent write-for-sync
750 * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage
751 * implementation will run either set_page_writeback() or set_page_dirty(),
752 * at which stage we bring the page's dirty flag and radix-tree dirty tag
755 * This incoherency between the page's dirty flag and radix-tree tag is
756 * unfortunate, but it only exists while the page is locked.
758 int clear_page_dirty_for_io(struct page
*page
)
760 struct address_space
*mapping
= page_mapping(page
);
763 if (TestClearPageDirty(page
)) {
764 if (mapping_cap_account_dirty(mapping
))
765 dec_zone_page_state(page
, NR_FILE_DIRTY
);
770 return TestClearPageDirty(page
);
772 EXPORT_SYMBOL(clear_page_dirty_for_io
);
774 int test_clear_page_writeback(struct page
*page
)
776 struct address_space
*mapping
= page_mapping(page
);
782 write_lock_irqsave(&mapping
->tree_lock
, flags
);
783 ret
= TestClearPageWriteback(page
);
785 radix_tree_tag_clear(&mapping
->page_tree
,
787 PAGECACHE_TAG_WRITEBACK
);
788 write_unlock_irqrestore(&mapping
->tree_lock
, flags
);
790 ret
= TestClearPageWriteback(page
);
795 int test_set_page_writeback(struct page
*page
)
797 struct address_space
*mapping
= page_mapping(page
);
803 write_lock_irqsave(&mapping
->tree_lock
, flags
);
804 ret
= TestSetPageWriteback(page
);
806 radix_tree_tag_set(&mapping
->page_tree
,
808 PAGECACHE_TAG_WRITEBACK
);
809 if (!PageDirty(page
))
810 radix_tree_tag_clear(&mapping
->page_tree
,
812 PAGECACHE_TAG_DIRTY
);
813 write_unlock_irqrestore(&mapping
->tree_lock
, flags
);
815 ret
= TestSetPageWriteback(page
);
820 EXPORT_SYMBOL(test_set_page_writeback
);
823 * Return true if any of the pages in the mapping are marged with the
826 int mapping_tagged(struct address_space
*mapping
, int tag
)
831 read_lock_irqsave(&mapping
->tree_lock
, flags
);
832 ret
= radix_tree_tagged(&mapping
->page_tree
, tag
);
833 read_unlock_irqrestore(&mapping
->tree_lock
, flags
);
836 EXPORT_SYMBOL(mapping_tagged
);