Merge tag 'xtensa-next-20130225' of git://github.com/czankel/xtensa-linux
[linux-2.6/cjktty.git] / fs / ext4 / extents_status.c
blobf768f4a98a2bb520bdb785f6718c9543454d401b
1 /*
2 * fs/ext4/extents_status.c
4 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
5 * Modified by
6 * Allison Henderson <achender@linux.vnet.ibm.com>
7 * Hugh Dickins <hughd@google.com>
8 * Zheng Liu <wenqing.lz@taobao.com>
10 * Ext4 extents status tree core functions.
12 #include <linux/rbtree.h>
13 #include "ext4.h"
14 #include "extents_status.h"
15 #include "ext4_extents.h"
17 #include <trace/events/ext4.h>
20 * According to previous discussion in Ext4 Developer Workshop, we
21 * will introduce a new structure called io tree to track all extent
22 * status in order to solve some problems that we have met
23 * (e.g. Reservation space warning), and provide extent-level locking.
24 * Delay extent tree is the first step to achieve this goal. It is
25 * original built by Yongqiang Yang. At that time it is called delay
26 * extent tree, whose goal is only track delayed extents in memory to
27 * simplify the implementation of fiemap and bigalloc, and introduce
28 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called
29 * delay extent tree at the first commit. But for better understand
30 * what it does, it has been rename to extent status tree.
32 * Step1:
33 * Currently the first step has been done. All delayed extents are
34 * tracked in the tree. It maintains the delayed extent when a delayed
35 * allocation is issued, and the delayed extent is written out or
36 * invalidated. Therefore the implementation of fiemap and bigalloc
37 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
39 * The following comment describes the implemenmtation of extent
40 * status tree and future works.
42 * Step2:
43 * In this step all extent status are tracked by extent status tree.
44 * Thus, we can first try to lookup a block mapping in this tree before
45 * finding it in extent tree. Hence, single extent cache can be removed
46 * because extent status tree can do a better job. Extents in status
47 * tree are loaded on-demand. Therefore, the extent status tree may not
48 * contain all of the extents in a file. Meanwhile we define a shrinker
49 * to reclaim memory from extent status tree because fragmented extent
50 * tree will make status tree cost too much memory. written/unwritten/-
51 * hole extents in the tree will be reclaimed by this shrinker when we
52 * are under high memory pressure. Delayed extents will not be
53 * reclimed because fiemap, bigalloc, and seek_data/hole need it.
57 * Extent status tree implementation for ext4.
60 * ==========================================================================
61 * Extent status tree tracks all extent status.
63 * 1. Why we need to implement extent status tree?
65 * Without extent status tree, ext4 identifies a delayed extent by looking
66 * up page cache, this has several deficiencies - complicated, buggy,
67 * and inefficient code.
69 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a
70 * block or a range of blocks are belonged to a delayed extent.
72 * Let us have a look at how they do without extent status tree.
73 * -- FIEMAP
74 * FIEMAP looks up page cache to identify delayed allocations from holes.
76 * -- SEEK_HOLE/DATA
77 * SEEK_HOLE/DATA has the same problem as FIEMAP.
79 * -- bigalloc
80 * bigalloc looks up page cache to figure out if a block is
81 * already under delayed allocation or not to determine whether
82 * quota reserving is needed for the cluster.
84 * -- writeout
85 * Writeout looks up whole page cache to see if a buffer is
86 * mapped, If there are not very many delayed buffers, then it is
87 * time comsuming.
89 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA,
90 * bigalloc and writeout can figure out if a block or a range of
91 * blocks is under delayed allocation(belonged to a delayed extent) or
92 * not by searching the extent tree.
95 * ==========================================================================
96 * 2. Ext4 extent status tree impelmentation
98 * -- extent
99 * A extent is a range of blocks which are contiguous logically and
100 * physically. Unlike extent in extent tree, this extent in ext4 is
101 * a in-memory struct, there is no corresponding on-disk data. There
102 * is no limit on length of extent, so an extent can contain as many
103 * blocks as they are contiguous logically and physically.
105 * -- extent status tree
106 * Every inode has an extent status tree and all allocation blocks
107 * are added to the tree with different status. The extent in the
108 * tree are ordered by logical block no.
110 * -- operations on a extent status tree
111 * There are three important operations on a delayed extent tree: find
112 * next extent, adding a extent(a range of blocks) and removing a extent.
114 * -- race on a extent status tree
115 * Extent status tree is protected by inode->i_es_lock.
117 * -- memory consumption
118 * Fragmented extent tree will make extent status tree cost too much
119 * memory. Hence, we will reclaim written/unwritten/hole extents from
120 * the tree under a heavy memory pressure.
123 * ==========================================================================
124 * 3. Performance analysis
126 * -- overhead
127 * 1. There is a cache extent for write access, so if writes are
128 * not very random, adding space operaions are in O(1) time.
130 * -- gain
131 * 2. Code is much simpler, more readable, more maintainable and
132 * more efficient.
135 * ==========================================================================
136 * 4. TODO list
138 * -- Refactor delayed space reservation
140 * -- Extent-level locking
143 static struct kmem_cache *ext4_es_cachep;
145 static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
146 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
147 ext4_lblk_t end);
148 static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
149 int nr_to_scan);
150 static int ext4_es_reclaim_extents_count(struct super_block *sb);
152 int __init ext4_init_es(void)
154 ext4_es_cachep = KMEM_CACHE(extent_status, SLAB_RECLAIM_ACCOUNT);
155 if (ext4_es_cachep == NULL)
156 return -ENOMEM;
157 return 0;
160 void ext4_exit_es(void)
162 if (ext4_es_cachep)
163 kmem_cache_destroy(ext4_es_cachep);
166 void ext4_es_init_tree(struct ext4_es_tree *tree)
168 tree->root = RB_ROOT;
169 tree->cache_es = NULL;
172 #ifdef ES_DEBUG__
173 static void ext4_es_print_tree(struct inode *inode)
175 struct ext4_es_tree *tree;
176 struct rb_node *node;
178 printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino);
179 tree = &EXT4_I(inode)->i_es_tree;
180 node = rb_first(&tree->root);
181 while (node) {
182 struct extent_status *es;
183 es = rb_entry(node, struct extent_status, rb_node);
184 printk(KERN_DEBUG " [%u/%u) %llu %llx",
185 es->es_lblk, es->es_len,
186 ext4_es_pblock(es), ext4_es_status(es));
187 node = rb_next(node);
189 printk(KERN_DEBUG "\n");
191 #else
192 #define ext4_es_print_tree(inode)
193 #endif
195 static inline ext4_lblk_t ext4_es_end(struct extent_status *es)
197 BUG_ON(es->es_lblk + es->es_len < es->es_lblk);
198 return es->es_lblk + es->es_len - 1;
202 * search through the tree for an delayed extent with a given offset. If
203 * it can't be found, try to find next extent.
205 static struct extent_status *__es_tree_search(struct rb_root *root,
206 ext4_lblk_t lblk)
208 struct rb_node *node = root->rb_node;
209 struct extent_status *es = NULL;
211 while (node) {
212 es = rb_entry(node, struct extent_status, rb_node);
213 if (lblk < es->es_lblk)
214 node = node->rb_left;
215 else if (lblk > ext4_es_end(es))
216 node = node->rb_right;
217 else
218 return es;
221 if (es && lblk < es->es_lblk)
222 return es;
224 if (es && lblk > ext4_es_end(es)) {
225 node = rb_next(&es->rb_node);
226 return node ? rb_entry(node, struct extent_status, rb_node) :
227 NULL;
230 return NULL;
234 * ext4_es_find_delayed_extent: find the 1st delayed extent covering @es->lblk
235 * if it exists, otherwise, the next extent after @es->lblk.
237 * @inode: the inode which owns delayed extents
238 * @lblk: the offset where we start to search
239 * @es: delayed extent that we found
241 void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
242 struct extent_status *es)
244 struct ext4_es_tree *tree = NULL;
245 struct extent_status *es1 = NULL;
246 struct rb_node *node;
248 BUG_ON(es == NULL);
249 trace_ext4_es_find_delayed_extent_enter(inode, lblk);
251 read_lock(&EXT4_I(inode)->i_es_lock);
252 tree = &EXT4_I(inode)->i_es_tree;
254 /* find extent in cache firstly */
255 es->es_lblk = es->es_len = es->es_pblk = 0;
256 if (tree->cache_es) {
257 es1 = tree->cache_es;
258 if (in_range(lblk, es1->es_lblk, es1->es_len)) {
259 es_debug("%u cached by [%u/%u) %llu %llx\n",
260 lblk, es1->es_lblk, es1->es_len,
261 ext4_es_pblock(es1), ext4_es_status(es1));
262 goto out;
266 es1 = __es_tree_search(&tree->root, lblk);
268 out:
269 if (es1 && !ext4_es_is_delayed(es1)) {
270 while ((node = rb_next(&es1->rb_node)) != NULL) {
271 es1 = rb_entry(node, struct extent_status, rb_node);
272 if (ext4_es_is_delayed(es1))
273 break;
277 if (es1 && ext4_es_is_delayed(es1)) {
278 tree->cache_es = es1;
279 es->es_lblk = es1->es_lblk;
280 es->es_len = es1->es_len;
281 es->es_pblk = es1->es_pblk;
284 read_unlock(&EXT4_I(inode)->i_es_lock);
286 ext4_es_lru_add(inode);
287 trace_ext4_es_find_delayed_extent_exit(inode, es);
290 static struct extent_status *
291 ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
292 ext4_fsblk_t pblk)
294 struct extent_status *es;
295 es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
296 if (es == NULL)
297 return NULL;
298 es->es_lblk = lblk;
299 es->es_len = len;
300 es->es_pblk = pblk;
303 * We don't count delayed extent because we never try to reclaim them
305 if (!ext4_es_is_delayed(es))
306 EXT4_I(inode)->i_es_lru_nr++;
308 return es;
311 static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
313 /* Decrease the lru counter when this es is not delayed */
314 if (!ext4_es_is_delayed(es)) {
315 BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0);
316 EXT4_I(inode)->i_es_lru_nr--;
319 kmem_cache_free(ext4_es_cachep, es);
323 * Check whether or not two extents can be merged
324 * Condition:
325 * - logical block number is contiguous
326 * - physical block number is contiguous
327 * - status is equal
329 static int ext4_es_can_be_merged(struct extent_status *es1,
330 struct extent_status *es2)
332 if (es1->es_lblk + es1->es_len != es2->es_lblk)
333 return 0;
335 if (ext4_es_status(es1) != ext4_es_status(es2))
336 return 0;
338 if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
339 (ext4_es_pblock(es1) + es1->es_len != ext4_es_pblock(es2)))
340 return 0;
342 return 1;
345 static struct extent_status *
346 ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es)
348 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
349 struct extent_status *es1;
350 struct rb_node *node;
352 node = rb_prev(&es->rb_node);
353 if (!node)
354 return es;
356 es1 = rb_entry(node, struct extent_status, rb_node);
357 if (ext4_es_can_be_merged(es1, es)) {
358 es1->es_len += es->es_len;
359 rb_erase(&es->rb_node, &tree->root);
360 ext4_es_free_extent(inode, es);
361 es = es1;
364 return es;
367 static struct extent_status *
368 ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
370 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
371 struct extent_status *es1;
372 struct rb_node *node;
374 node = rb_next(&es->rb_node);
375 if (!node)
376 return es;
378 es1 = rb_entry(node, struct extent_status, rb_node);
379 if (ext4_es_can_be_merged(es, es1)) {
380 es->es_len += es1->es_len;
381 rb_erase(node, &tree->root);
382 ext4_es_free_extent(inode, es1);
385 return es;
388 static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
390 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
391 struct rb_node **p = &tree->root.rb_node;
392 struct rb_node *parent = NULL;
393 struct extent_status *es;
395 while (*p) {
396 parent = *p;
397 es = rb_entry(parent, struct extent_status, rb_node);
399 if (newes->es_lblk < es->es_lblk) {
400 if (ext4_es_can_be_merged(newes, es)) {
402 * Here we can modify es_lblk directly
403 * because it isn't overlapped.
405 es->es_lblk = newes->es_lblk;
406 es->es_len += newes->es_len;
407 if (ext4_es_is_written(es) ||
408 ext4_es_is_unwritten(es))
409 ext4_es_store_pblock(es,
410 newes->es_pblk);
411 es = ext4_es_try_to_merge_left(inode, es);
412 goto out;
414 p = &(*p)->rb_left;
415 } else if (newes->es_lblk > ext4_es_end(es)) {
416 if (ext4_es_can_be_merged(es, newes)) {
417 es->es_len += newes->es_len;
418 es = ext4_es_try_to_merge_right(inode, es);
419 goto out;
421 p = &(*p)->rb_right;
422 } else {
423 BUG_ON(1);
424 return -EINVAL;
428 es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
429 newes->es_pblk);
430 if (!es)
431 return -ENOMEM;
432 rb_link_node(&es->rb_node, parent, p);
433 rb_insert_color(&es->rb_node, &tree->root);
435 out:
436 tree->cache_es = es;
437 return 0;
441 * ext4_es_insert_extent() adds a space to a extent status tree.
443 * ext4_es_insert_extent is called by ext4_da_write_begin and
444 * ext4_es_remove_extent.
446 * Return 0 on success, error code on failure.
448 int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
449 ext4_lblk_t len, ext4_fsblk_t pblk,
450 unsigned long long status)
452 struct extent_status newes;
453 ext4_lblk_t end = lblk + len - 1;
454 int err = 0;
456 es_debug("add [%u/%u) %llu %llx to extent status tree of inode %lu\n",
457 lblk, len, pblk, status, inode->i_ino);
459 if (!len)
460 return 0;
462 BUG_ON(end < lblk);
464 newes.es_lblk = lblk;
465 newes.es_len = len;
466 ext4_es_store_pblock(&newes, pblk);
467 ext4_es_store_status(&newes, status);
468 trace_ext4_es_insert_extent(inode, &newes);
470 write_lock(&EXT4_I(inode)->i_es_lock);
471 err = __es_remove_extent(inode, lblk, end);
472 if (err != 0)
473 goto error;
474 err = __es_insert_extent(inode, &newes);
476 error:
477 write_unlock(&EXT4_I(inode)->i_es_lock);
479 ext4_es_lru_add(inode);
480 ext4_es_print_tree(inode);
482 return err;
486 * ext4_es_lookup_extent() looks up an extent in extent status tree.
488 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
490 * Return: 1 on found, 0 on not
492 int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
493 struct extent_status *es)
495 struct ext4_es_tree *tree;
496 struct extent_status *es1 = NULL;
497 struct rb_node *node;
498 int found = 0;
500 trace_ext4_es_lookup_extent_enter(inode, lblk);
501 es_debug("lookup extent in block %u\n", lblk);
503 tree = &EXT4_I(inode)->i_es_tree;
504 read_lock(&EXT4_I(inode)->i_es_lock);
506 /* find extent in cache firstly */
507 es->es_lblk = es->es_len = es->es_pblk = 0;
508 if (tree->cache_es) {
509 es1 = tree->cache_es;
510 if (in_range(lblk, es1->es_lblk, es1->es_len)) {
511 es_debug("%u cached by [%u/%u)\n",
512 lblk, es1->es_lblk, es1->es_len);
513 found = 1;
514 goto out;
518 node = tree->root.rb_node;
519 while (node) {
520 es1 = rb_entry(node, struct extent_status, rb_node);
521 if (lblk < es1->es_lblk)
522 node = node->rb_left;
523 else if (lblk > ext4_es_end(es1))
524 node = node->rb_right;
525 else {
526 found = 1;
527 break;
531 out:
532 if (found) {
533 BUG_ON(!es1);
534 es->es_lblk = es1->es_lblk;
535 es->es_len = es1->es_len;
536 es->es_pblk = es1->es_pblk;
539 read_unlock(&EXT4_I(inode)->i_es_lock);
541 ext4_es_lru_add(inode);
542 trace_ext4_es_lookup_extent_exit(inode, es, found);
543 return found;
546 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
547 ext4_lblk_t end)
549 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
550 struct rb_node *node;
551 struct extent_status *es;
552 struct extent_status orig_es;
553 ext4_lblk_t len1, len2;
554 ext4_fsblk_t block;
555 int err = 0;
557 es = __es_tree_search(&tree->root, lblk);
558 if (!es)
559 goto out;
560 if (es->es_lblk > end)
561 goto out;
563 /* Simply invalidate cache_es. */
564 tree->cache_es = NULL;
566 orig_es.es_lblk = es->es_lblk;
567 orig_es.es_len = es->es_len;
568 orig_es.es_pblk = es->es_pblk;
570 len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0;
571 len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0;
572 if (len1 > 0)
573 es->es_len = len1;
574 if (len2 > 0) {
575 if (len1 > 0) {
576 struct extent_status newes;
578 newes.es_lblk = end + 1;
579 newes.es_len = len2;
580 if (ext4_es_is_written(&orig_es) ||
581 ext4_es_is_unwritten(&orig_es)) {
582 block = ext4_es_pblock(&orig_es) +
583 orig_es.es_len - len2;
584 ext4_es_store_pblock(&newes, block);
586 ext4_es_store_status(&newes, ext4_es_status(&orig_es));
587 err = __es_insert_extent(inode, &newes);
588 if (err) {
589 es->es_lblk = orig_es.es_lblk;
590 es->es_len = orig_es.es_len;
591 goto out;
593 } else {
594 es->es_lblk = end + 1;
595 es->es_len = len2;
596 if (ext4_es_is_written(es) ||
597 ext4_es_is_unwritten(es)) {
598 block = orig_es.es_pblk + orig_es.es_len - len2;
599 ext4_es_store_pblock(es, block);
602 goto out;
605 if (len1 > 0) {
606 node = rb_next(&es->rb_node);
607 if (node)
608 es = rb_entry(node, struct extent_status, rb_node);
609 else
610 es = NULL;
613 while (es && ext4_es_end(es) <= end) {
614 node = rb_next(&es->rb_node);
615 rb_erase(&es->rb_node, &tree->root);
616 ext4_es_free_extent(inode, es);
617 if (!node) {
618 es = NULL;
619 break;
621 es = rb_entry(node, struct extent_status, rb_node);
624 if (es && es->es_lblk < end + 1) {
625 ext4_lblk_t orig_len = es->es_len;
627 len1 = ext4_es_end(es) - end;
628 es->es_lblk = end + 1;
629 es->es_len = len1;
630 if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) {
631 block = es->es_pblk + orig_len - len1;
632 ext4_es_store_pblock(es, block);
636 out:
637 return err;
641 * ext4_es_remove_extent() removes a space from a extent status tree.
643 * Return 0 on success, error code on failure.
645 int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
646 ext4_lblk_t len)
648 ext4_lblk_t end;
649 int err = 0;
651 trace_ext4_es_remove_extent(inode, lblk, len);
652 es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
653 lblk, len, inode->i_ino);
655 if (!len)
656 return err;
658 end = lblk + len - 1;
659 BUG_ON(end < lblk);
661 write_lock(&EXT4_I(inode)->i_es_lock);
662 err = __es_remove_extent(inode, lblk, end);
663 write_unlock(&EXT4_I(inode)->i_es_lock);
664 ext4_es_print_tree(inode);
665 return err;
668 static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
670 struct ext4_sb_info *sbi = container_of(shrink,
671 struct ext4_sb_info, s_es_shrinker);
672 struct ext4_inode_info *ei;
673 struct list_head *cur, *tmp, scanned;
674 int nr_to_scan = sc->nr_to_scan;
675 int ret, nr_shrunk = 0;
677 trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan);
679 if (!nr_to_scan)
680 return ext4_es_reclaim_extents_count(sbi->s_sb);
682 INIT_LIST_HEAD(&scanned);
684 spin_lock(&sbi->s_es_lru_lock);
685 list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
686 list_move_tail(cur, &scanned);
688 ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
690 read_lock(&ei->i_es_lock);
691 if (ei->i_es_lru_nr == 0) {
692 read_unlock(&ei->i_es_lock);
693 continue;
695 read_unlock(&ei->i_es_lock);
697 write_lock(&ei->i_es_lock);
698 ret = __es_try_to_reclaim_extents(ei, nr_to_scan);
699 write_unlock(&ei->i_es_lock);
701 nr_shrunk += ret;
702 nr_to_scan -= ret;
703 if (nr_to_scan == 0)
704 break;
706 list_splice_tail(&scanned, &sbi->s_es_lru);
707 spin_unlock(&sbi->s_es_lru_lock);
708 trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk);
710 return ext4_es_reclaim_extents_count(sbi->s_sb);
713 void ext4_es_register_shrinker(struct super_block *sb)
715 struct ext4_sb_info *sbi;
717 sbi = EXT4_SB(sb);
718 INIT_LIST_HEAD(&sbi->s_es_lru);
719 spin_lock_init(&sbi->s_es_lru_lock);
720 sbi->s_es_shrinker.shrink = ext4_es_shrink;
721 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
722 register_shrinker(&sbi->s_es_shrinker);
725 void ext4_es_unregister_shrinker(struct super_block *sb)
727 unregister_shrinker(&EXT4_SB(sb)->s_es_shrinker);
730 void ext4_es_lru_add(struct inode *inode)
732 struct ext4_inode_info *ei = EXT4_I(inode);
733 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
735 spin_lock(&sbi->s_es_lru_lock);
736 if (list_empty(&ei->i_es_lru))
737 list_add_tail(&ei->i_es_lru, &sbi->s_es_lru);
738 else
739 list_move_tail(&ei->i_es_lru, &sbi->s_es_lru);
740 spin_unlock(&sbi->s_es_lru_lock);
743 void ext4_es_lru_del(struct inode *inode)
745 struct ext4_inode_info *ei = EXT4_I(inode);
746 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
748 spin_lock(&sbi->s_es_lru_lock);
749 if (!list_empty(&ei->i_es_lru))
750 list_del_init(&ei->i_es_lru);
751 spin_unlock(&sbi->s_es_lru_lock);
754 static int ext4_es_reclaim_extents_count(struct super_block *sb)
756 struct ext4_sb_info *sbi = EXT4_SB(sb);
757 struct ext4_inode_info *ei;
758 struct list_head *cur;
759 int nr_cached = 0;
761 spin_lock(&sbi->s_es_lru_lock);
762 list_for_each(cur, &sbi->s_es_lru) {
763 ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
764 read_lock(&ei->i_es_lock);
765 nr_cached += ei->i_es_lru_nr;
766 read_unlock(&ei->i_es_lock);
768 spin_unlock(&sbi->s_es_lru_lock);
769 trace_ext4_es_reclaim_extents_count(sb, nr_cached);
770 return nr_cached;
773 static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
774 int nr_to_scan)
776 struct inode *inode = &ei->vfs_inode;
777 struct ext4_es_tree *tree = &ei->i_es_tree;
778 struct rb_node *node;
779 struct extent_status *es;
780 int nr_shrunk = 0;
782 if (ei->i_es_lru_nr == 0)
783 return 0;
785 node = rb_first(&tree->root);
786 while (node != NULL) {
787 es = rb_entry(node, struct extent_status, rb_node);
788 node = rb_next(&es->rb_node);
790 * We can't reclaim delayed extent from status tree because
791 * fiemap, bigallic, and seek_data/hole need to use it.
793 if (!ext4_es_is_delayed(es)) {
794 rb_erase(&es->rb_node, &tree->root);
795 ext4_es_free_extent(inode, es);
796 nr_shrunk++;
797 if (--nr_to_scan == 0)
798 break;
801 tree->cache_es = NULL;
802 return nr_shrunk;