add patch fix-overflow-when-updating-superblock-backups
[ext4-patch-queue.git] / change-lru-to-round-rubin-in-extent-status-tree
blobe305919cf9143fbbf1c09b6b36789bead54b42cc
1 ext4: change lru to round-robin in extent status tree shrinker
3 From: Zheng Liu <wenqing.lz@taobao.com>
5 In this commit we discard the lru algorithm because it should take a
6 long time to keep a lru list in extent status tree shrinker and the
7 shrinker should take a long time to scan this lru list in order to
8 reclaim some objects.
10 For reducing the latency, this commit does two works.  The first one
11 is to replace lru with round-robin.  After that we never need to keep
12 a lru list.  That means that the list shouldn't be sorted if the
13 shrinker can not reclaim any objects in the first round.  The second
14 one is to shrink the length of the list.  After using round-robin
15 algorithm, the shrinker takes the first inode in the list and handle
16 it.  If this inode is skipped, it will be moved into the tail of the
17 list.  Otherwise it will be added back when it is touched again.
19 [ Changed the locking in __es_shrink to avoid the inode potentially
20   disappearing out from under us; this was suggested by Jan -- TYT ] 
22 Cc: Andreas Dilger <adilger.kernel@dilger.ca>
23 Cc: Jan Kara <jack@suse.cz>
24 Signed-off-by: Zheng Liu <wenqing.lz@taobao.com>
25 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
26 ---
27  fs/ext4/ext4.h              |  10 +--
28  fs/ext4/extents.c           |   4 +-
29  fs/ext4/extents_status.c    | 217 +++++++++++++++++++++++++---------------------------------
30  fs/ext4/extents_status.h    |   7 +-
31  fs/ext4/inode.c             |   4 +-
32  fs/ext4/ioctl.c             |   4 +-
33  fs/ext4/super.c             |   7 +-
34  include/trace/events/ext4.h |  11 ++-
35  8 files changed, 113 insertions(+), 151 deletions(-)
37 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
38 index 612b079..4ce413a 100644
39 --- a/fs/ext4/ext4.h
40 +++ b/fs/ext4/ext4.h
41 @@ -890,10 +890,9 @@ struct ext4_inode_info {
42         /* extents status tree */
43         struct ext4_es_tree i_es_tree;
44         rwlock_t i_es_lock;
45 -       struct list_head i_es_lru;
46 +       struct list_head i_es_list;
47         unsigned int i_es_all_nr;       /* protected by i_es_lock */
48 -       unsigned int i_es_lru_nr;       /* protected by i_es_lock */
49 -       unsigned long i_touch_when;     /* jiffies of last accessing */
50 +       unsigned int i_es_shk_nr;       /* protected by i_es_lock */
52         /* ialloc */
53         ext4_group_t    i_last_alloc_group;
54 @@ -1339,10 +1338,11 @@ struct ext4_sb_info {
56         /* Reclaim extents from extent status tree */
57         struct shrinker s_es_shrinker;
58 -       struct list_head s_es_lru;
59 +       struct list_head s_es_list;
60 +       long s_es_nr_inode;
61         struct ext4_es_stats s_es_stats;
62         struct mb_cache *s_mb_cache;
63 -       spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
64 +       spinlock_t s_es_lock ____cacheline_aligned_in_smp;
66         /* Ratelimit ext4 messages. */
67         struct ratelimit_state s_err_ratelimit_state;
68 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
69 index cd504d9..8d20b12 100644
70 --- a/fs/ext4/extents.c
71 +++ b/fs/ext4/extents.c
72 @@ -4616,7 +4616,7 @@ out2:
74         trace_ext4_ext_map_blocks_exit(inode, flags, map,
75                                        err ? err : allocated);
76 -       ext4_es_lru_add(inode);
77 +       ext4_es_list_add(inode);
78         return err ? err : allocated;
79  }
81 @@ -5177,7 +5177,7 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
82                 error = ext4_fill_fiemap_extents(inode, start_blk,
83                                                  len_blks, fieinfo);
84         }
85 -       ext4_es_lru_add(inode);
86 +       ext4_es_list_add(inode);
87         return error;
88  }
90 diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
91 index 94e7855..e57ac7c 100644
92 --- a/fs/ext4/extents_status.c
93 +++ b/fs/ext4/extents_status.c
94 @@ -149,8 +149,8 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
95                               ext4_lblk_t end);
96  static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
97                                        int nr_to_scan);
98 -static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
99 -                           struct ext4_inode_info *locked_ei);
100 +static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
101 +                      struct ext4_inode_info *locked_ei);
103  int __init ext4_init_es(void)
105 @@ -314,9 +314,9 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
106          * We don't count delayed extent because we never try to reclaim them
107          */
108         if (!ext4_es_is_delayed(es)) {
109 -               EXT4_I(inode)->i_es_lru_nr++;
110 +               EXT4_I(inode)->i_es_shk_nr++;
111                 percpu_counter_inc(&EXT4_SB(inode->i_sb)->
112 -                                       s_es_stats.es_stats_lru_cnt);
113 +                                       s_es_stats.es_stats_shk_cnt);
114         }
116         EXT4_I(inode)->i_es_all_nr++;
117 @@ -330,12 +330,12 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
118         EXT4_I(inode)->i_es_all_nr--;
119         percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
121 -       /* Decrease the lru counter when this es is not delayed */
122 +       /* Decrease the shrink counter when this es is not delayed */
123         if (!ext4_es_is_delayed(es)) {
124 -               BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0);
125 -               EXT4_I(inode)->i_es_lru_nr--;
126 +               BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
127 +               EXT4_I(inode)->i_es_shk_nr--;
128                 percpu_counter_dec(&EXT4_SB(inode->i_sb)->
129 -                                       s_es_stats.es_stats_lru_cnt);
130 +                                       s_es_stats.es_stats_shk_cnt);
131         }
133         kmem_cache_free(ext4_es_cachep, es);
134 @@ -683,8 +683,8 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
135                 goto error;
136  retry:
137         err = __es_insert_extent(inode, &newes);
138 -       if (err == -ENOMEM && __ext4_es_shrink(EXT4_SB(inode->i_sb), 1,
139 -                                              EXT4_I(inode)))
140 +       if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
141 +                                         1, EXT4_I(inode)))
142                 goto retry;
143         if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
144                 err = 0;
145 @@ -841,8 +841,8 @@ retry:
146                                 es->es_lblk = orig_es.es_lblk;
147                                 es->es_len = orig_es.es_len;
148                                 if ((err == -ENOMEM) &&
149 -                                   __ext4_es_shrink(EXT4_SB(inode->i_sb), 1,
150 -                                                    EXT4_I(inode)))
151 +                                   __es_shrink(EXT4_SB(inode->i_sb),
152 +                                                       1, EXT4_I(inode)))
153                                         goto retry;
154                                 goto out;
155                         }
156 @@ -921,114 +921,112 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
157         return err;
160 -static int ext4_inode_touch_time_cmp(void *priv, struct list_head *a,
161 -                                    struct list_head *b)
162 +static inline void __ext4_es_list_add(struct ext4_sb_info *sbi,
163 +                                     struct ext4_inode_info *ei)
165 -       struct ext4_inode_info *eia, *eib;
166 -       eia = list_entry(a, struct ext4_inode_info, i_es_lru);
167 -       eib = list_entry(b, struct ext4_inode_info, i_es_lru);
168 +       if (list_empty(&ei->i_es_list)) {
169 +               list_add_tail(&ei->i_es_list, &sbi->s_es_list);
170 +               sbi->s_es_nr_inode++;
171 +       }
174 -       if (ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
175 -           !ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
176 -               return 1;
177 -       if (!ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
178 -           ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
179 -               return -1;
180 -       if (eia->i_touch_when == eib->i_touch_when)
181 -               return 0;
182 -       if (time_after(eia->i_touch_when, eib->i_touch_when))
183 -               return 1;
184 -       else
185 -               return -1;
186 +void ext4_es_list_add(struct inode *inode)
188 +       struct ext4_inode_info *ei = EXT4_I(inode);
189 +       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
191 +       if (!list_empty(&ei->i_es_list))
192 +               return;
194 +       spin_lock(&sbi->s_es_lock);
195 +       __ext4_es_list_add(sbi, ei);
196 +       spin_unlock(&sbi->s_es_lock);
199 -static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
200 -                           struct ext4_inode_info *locked_ei)
201 +void ext4_es_list_del(struct inode *inode)
203 +       struct ext4_inode_info *ei = EXT4_I(inode);
204 +       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
206 +       spin_lock(&sbi->s_es_lock);
207 +       if (!list_empty(&ei->i_es_list)) {
208 +               list_del_init(&ei->i_es_list);
209 +               WARN_ON_ONCE(sbi->s_es_nr_inode-- < 0);
210 +       }
211 +       spin_unlock(&sbi->s_es_lock);
214 +static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
215 +                      struct ext4_inode_info *locked_ei)
217         struct ext4_inode_info *ei;
218         struct ext4_es_stats *es_stats;
219 -       struct list_head *cur, *tmp;
220 -       LIST_HEAD(skipped);
221         ktime_t start_time;
222         u64 scan_time;
223 +       int nr_to_walk;
224         int nr_shrunk = 0;
225 -       int retried = 0, skip_precached = 1, nr_skipped = 0;
226 +       int retried = 0, nr_skipped = 0;
228         es_stats = &sbi->s_es_stats;
229         start_time = ktime_get();
230 -       spin_lock(&sbi->s_es_lru_lock);
232  retry:
233 -       list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
234 +       spin_lock(&sbi->s_es_lock);
235 +       nr_to_walk = sbi->s_es_nr_inode;
236 +       while (!list_empty(&sbi->s_es_list) && nr_to_walk-- > 0) {
237                 int shrunk;
239 -               /*
240 -                * If we have already reclaimed all extents from extent
241 -                * status tree, just stop the loop immediately.
242 -                */
243 -               if (percpu_counter_read_positive(
244 -                               &es_stats->es_stats_lru_cnt) == 0)
245 -                       break;
246 +               ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info,
247 +                                     i_es_list);
249 -               ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
250 +               list_del_init(&ei->i_es_list);
251 +               sbi->s_es_nr_inode--;
252 +               if (ei->i_es_shk_nr == 0)
253 +                       continue;
255                 /*
256 -                * Skip the inode that is newer than the last_sorted
257 -                * time.  Normally we try hard to avoid shrinking
258 -                * precached inodes, but we will as a last resort.
259 +                * Normally we try hard to avoid shrinking precached inodes,
260 +                * but we will as a last resort.
261                  */
262 -               if ((es_stats->es_stats_last_sorted < ei->i_touch_when) ||
263 -                   (skip_precached && ext4_test_inode_state(&ei->vfs_inode,
264 -                                               EXT4_STATE_EXT_PRECACHED))) {
265 +               if ((!retried && ext4_test_inode_state(&ei->vfs_inode,
266 +                                      EXT4_STATE_EXT_PRECACHED)) ||
267 +                   ei == locked_ei ||
268 +                   !write_trylock(&ei->i_es_lock)) {
269                         nr_skipped++;
270 -                       list_move_tail(cur, &skipped);
271 +                       __ext4_es_list_add(sbi, ei);
272 +                       if (spin_is_contended(&sbi->s_es_lock)) {
273 +                               spin_unlock(&sbi->s_es_lock);
274 +                               spin_lock(&sbi->s_es_lock);
275 +                       }
276                         continue;
277                 }
279 -               if (ei->i_es_lru_nr == 0 || ei == locked_ei ||
280 -                   !write_trylock(&ei->i_es_lock))
281 -                       continue;
283 +               /* we only release s_es_lock once we have i_es_lock */
284 +               spin_unlock(&sbi->s_es_lock);
285                 shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
286 -               if (ei->i_es_lru_nr == 0)
287 -                       list_del_init(&ei->i_es_lru);
288                 write_unlock(&ei->i_es_lock);
290                 nr_shrunk += shrunk;
291                 nr_to_scan -= shrunk;
293                 if (nr_to_scan == 0)
294 -                       break;
295 +                       goto out;
296 +               spin_lock(&sbi->s_es_lock);
297         }
299 -       /* Move the newer inodes into the tail of the LRU list. */
300 -       list_splice_tail(&skipped, &sbi->s_es_lru);
301 -       INIT_LIST_HEAD(&skipped);
302 +       spin_unlock(&sbi->s_es_lock);
304         /*
305          * If we skipped any inodes, and we weren't able to make any
306 -        * forward progress, sort the list and try again.
307 +        * forward progress, try again to scan precached inodes.
308          */
309         if ((nr_shrunk == 0) && nr_skipped && !retried) {
310                 retried++;
311 -               list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp);
312 -               es_stats->es_stats_last_sorted = jiffies;
313 -               ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info,
314 -                                     i_es_lru);
315 -               /*
316 -                * If there are no non-precached inodes left on the
317 -                * list, start releasing precached extents.
318 -                */
319 -               if (ext4_test_inode_state(&ei->vfs_inode,
320 -                                         EXT4_STATE_EXT_PRECACHED))
321 -                       skip_precached = 0;
322                 goto retry;
323         }
325 -       spin_unlock(&sbi->s_es_lru_lock);
327         if (locked_ei && nr_shrunk == 0)
328                 nr_shrunk = __es_try_to_reclaim_extents(locked_ei, nr_to_scan);
330 +out:
331         scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
332         if (likely(es_stats->es_stats_scan_time))
333                 es_stats->es_stats_scan_time = (scan_time +
334 @@ -1043,7 +1041,7 @@ retry:
335         else
336                 es_stats->es_stats_shrunk = nr_shrunk;
338 -       trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time, skip_precached,
339 +       trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time,
340                              nr_skipped, retried);
341         return nr_shrunk;
343 @@ -1055,7 +1053,7 @@ static unsigned long ext4_es_count(struct shrinker *shrink,
344         struct ext4_sb_info *sbi;
346         sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
347 -       nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt);
348 +       nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
349         trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr);
350         return nr;
352 @@ -1068,13 +1066,13 @@ static unsigned long ext4_es_scan(struct shrinker *shrink,
353         int nr_to_scan = sc->nr_to_scan;
354         int ret, nr_shrunk;
356 -       ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt);
357 +       ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
358         trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret);
360         if (!nr_to_scan)
361                 return ret;
363 -       nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL);
364 +       nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL);
366         trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret);
367         return nr_shrunk;
368 @@ -1102,28 +1100,24 @@ static int ext4_es_seq_shrinker_info_show(struct seq_file *seq, void *v)
369                 return 0;
371         /* here we just find an inode that has the max nr. of objects */
372 -       spin_lock(&sbi->s_es_lru_lock);
373 -       list_for_each_entry(ei, &sbi->s_es_lru, i_es_lru) {
374 +       spin_lock(&sbi->s_es_lock);
375 +       list_for_each_entry(ei, &sbi->s_es_list, i_es_list) {
376                 inode_cnt++;
377                 if (max && max->i_es_all_nr < ei->i_es_all_nr)
378                         max = ei;
379                 else if (!max)
380                         max = ei;
381         }
382 -       spin_unlock(&sbi->s_es_lru_lock);
383 +       spin_unlock(&sbi->s_es_lock);
385         seq_printf(seq, "stats:\n  %lld objects\n  %lld reclaimable objects\n",
386                    percpu_counter_sum_positive(&es_stats->es_stats_all_cnt),
387 -                  percpu_counter_sum_positive(&es_stats->es_stats_lru_cnt));
388 +                  percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt));
389         seq_printf(seq, "  %lu/%lu cache hits/misses\n",
390                    es_stats->es_stats_cache_hits,
391                    es_stats->es_stats_cache_misses);
392 -       if (es_stats->es_stats_last_sorted != 0)
393 -               seq_printf(seq, "  %u ms last sorted interval\n",
394 -                          jiffies_to_msecs(jiffies -
395 -                                           es_stats->es_stats_last_sorted));
396         if (inode_cnt)
397 -               seq_printf(seq, "  %d inodes on lru list\n", inode_cnt);
398 +               seq_printf(seq, "  %d inodes on list\n", inode_cnt);
400         seq_printf(seq, "average:\n  %llu us scan time\n",
401             div_u64(es_stats->es_stats_scan_time, 1000));
402 @@ -1132,7 +1126,7 @@ static int ext4_es_seq_shrinker_info_show(struct seq_file *seq, void *v)
403                 seq_printf(seq,
404                     "maximum:\n  %lu inode (%u objects, %u reclaimable)\n"
405                     "  %llu us max scan time\n",
406 -                   max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_lru_nr,
407 +                   max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr,
408                     div_u64(es_stats->es_stats_max_scan_time, 1000));
410         return 0;
411 @@ -1181,9 +1175,9 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
413         int err;
415 -       INIT_LIST_HEAD(&sbi->s_es_lru);
416 -       spin_lock_init(&sbi->s_es_lru_lock);
417 -       sbi->s_es_stats.es_stats_last_sorted = 0;
418 +       INIT_LIST_HEAD(&sbi->s_es_list);
419 +       sbi->s_es_nr_inode = 0;
420 +       spin_lock_init(&sbi->s_es_lock);
421         sbi->s_es_stats.es_stats_shrunk = 0;
422         sbi->s_es_stats.es_stats_cache_hits = 0;
423         sbi->s_es_stats.es_stats_cache_misses = 0;
424 @@ -1192,7 +1186,7 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
425         err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL);
426         if (err)
427                 return err;
428 -       err = percpu_counter_init(&sbi->s_es_stats.es_stats_lru_cnt, 0, GFP_KERNEL);
429 +       err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL);
430         if (err)
431                 goto err1;
433 @@ -1210,7 +1204,7 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
434         return 0;
436  err2:
437 -       percpu_counter_destroy(&sbi->s_es_stats.es_stats_lru_cnt);
438 +       percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
439  err1:
440         percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
441         return err;
442 @@ -1221,37 +1215,10 @@ void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
443         if (sbi->s_proc)
444                 remove_proc_entry("es_shrinker_info", sbi->s_proc);
445         percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
446 -       percpu_counter_destroy(&sbi->s_es_stats.es_stats_lru_cnt);
447 +       percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
448         unregister_shrinker(&sbi->s_es_shrinker);
451 -void ext4_es_lru_add(struct inode *inode)
453 -       struct ext4_inode_info *ei = EXT4_I(inode);
454 -       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
456 -       ei->i_touch_when = jiffies;
458 -       if (!list_empty(&ei->i_es_lru))
459 -               return;
461 -       spin_lock(&sbi->s_es_lru_lock);
462 -       if (list_empty(&ei->i_es_lru))
463 -               list_add_tail(&ei->i_es_lru, &sbi->s_es_lru);
464 -       spin_unlock(&sbi->s_es_lru_lock);
467 -void ext4_es_lru_del(struct inode *inode)
469 -       struct ext4_inode_info *ei = EXT4_I(inode);
470 -       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
472 -       spin_lock(&sbi->s_es_lru_lock);
473 -       if (!list_empty(&ei->i_es_lru))
474 -               list_del_init(&ei->i_es_lru);
475 -       spin_unlock(&sbi->s_es_lru_lock);
478  static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
479                                        int nr_to_scan)
481 @@ -1263,7 +1230,7 @@ static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
482         static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
483                                       DEFAULT_RATELIMIT_BURST);
485 -       if (ei->i_es_lru_nr == 0)
486 +       if (ei->i_es_shk_nr == 0)
487                 return 0;
489         if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) &&
490 diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
491 index efd5f97..0e6a33e 100644
492 --- a/fs/ext4/extents_status.h
493 +++ b/fs/ext4/extents_status.h
494 @@ -65,14 +65,13 @@ struct ext4_es_tree {
495  };
497  struct ext4_es_stats {
498 -       unsigned long es_stats_last_sorted;
499         unsigned long es_stats_shrunk;
500         unsigned long es_stats_cache_hits;
501         unsigned long es_stats_cache_misses;
502         u64 es_stats_scan_time;
503         u64 es_stats_max_scan_time;
504         struct percpu_counter es_stats_all_cnt;
505 -       struct percpu_counter es_stats_lru_cnt;
506 +       struct percpu_counter es_stats_shk_cnt;
507  };
509  extern int __init ext4_init_es(void);
510 @@ -151,7 +150,7 @@ static inline void ext4_es_store_pblock_status(struct extent_status *es,
512  extern int ext4_es_register_shrinker(struct ext4_sb_info *sbi);
513  extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi);
514 -extern void ext4_es_lru_add(struct inode *inode);
515 -extern void ext4_es_lru_del(struct inode *inode);
516 +extern void ext4_es_list_add(struct inode *inode);
517 +extern void ext4_es_list_del(struct inode *inode);
519  #endif /* _EXT4_EXTENTS_STATUS_H */
520 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
521 index eeaffd0..2424875 100644
522 --- a/fs/ext4/inode.c
523 +++ b/fs/ext4/inode.c
524 @@ -492,7 +492,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
526         /* Lookup extent status tree firstly */
527         if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
528 -               ext4_es_lru_add(inode);
529 +               ext4_es_list_add(inode);
530                 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
531                         map->m_pblk = ext4_es_pblock(&es) +
532                                         map->m_lblk - es.es_lblk;
533 @@ -1404,7 +1404,7 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
535         /* Lookup extent status tree firstly */
536         if (ext4_es_lookup_extent(inode, iblock, &es)) {
537 -               ext4_es_lru_add(inode);
538 +               ext4_es_list_add(inode);
539                 if (ext4_es_is_hole(&es)) {
540                         retval = 0;
541                         down_read(&EXT4_I(inode)->i_data_sem);
542 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
543 index bfda18a..7b377c4 100644
544 --- a/fs/ext4/ioctl.c
545 +++ b/fs/ext4/ioctl.c
546 @@ -78,8 +78,8 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
547         memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize));
548         ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
549         ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
550 -       ext4_es_lru_del(inode1);
551 -       ext4_es_lru_del(inode2);
552 +       ext4_es_list_del(inode1);
553 +       ext4_es_list_del(inode2);
555         isize = i_size_read(inode1);
556         i_size_write(inode1, i_size_read(inode2));
557 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
558 index 240bce4..0d67cda 100644
559 --- a/fs/ext4/super.c
560 +++ b/fs/ext4/super.c
561 @@ -880,10 +880,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
562         spin_lock_init(&ei->i_prealloc_lock);
563         ext4_es_init_tree(&ei->i_es_tree);
564         rwlock_init(&ei->i_es_lock);
565 -       INIT_LIST_HEAD(&ei->i_es_lru);
566 +       INIT_LIST_HEAD(&ei->i_es_list);
567         ei->i_es_all_nr = 0;
568 -       ei->i_es_lru_nr = 0;
569 -       ei->i_touch_when = 0;
570 +       ei->i_es_shk_nr = 0;
571         ei->i_reserved_data_blocks = 0;
572         ei->i_reserved_meta_blocks = 0;
573         ei->i_allocated_meta_blocks = 0;
574 @@ -973,7 +972,7 @@ void ext4_clear_inode(struct inode *inode)
575         dquot_drop(inode);
576         ext4_discard_preallocations(inode);
577         ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
578 -       ext4_es_lru_del(inode);
579 +       ext4_es_list_del(inode);
580         if (EXT4_I(inode)->jinode) {
581                 jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
582                                                EXT4_I(inode)->jinode);
583 diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
584 index d905f3a..7b3fe2f 100644
585 --- a/include/trace/events/ext4.h
586 +++ b/include/trace/events/ext4.h
587 @@ -2497,15 +2497,14 @@ TRACE_EVENT(ext4_collapse_range,
589  TRACE_EVENT(ext4_es_shrink,
590         TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time,
591 -                int skip_precached, int nr_skipped, int retried),
592 +                int nr_skipped, int retried),
594 -       TP_ARGS(sb, nr_shrunk, scan_time, skip_precached, nr_skipped, retried),
595 +       TP_ARGS(sb, nr_shrunk, scan_time, nr_skipped, retried),
597         TP_STRUCT__entry(
598                 __field(        dev_t,          dev             )
599                 __field(        int,            nr_shrunk       )
600                 __field(        unsigned long long, scan_time   )
601 -               __field(        int,            skip_precached  )
602                 __field(        int,            nr_skipped      )
603                 __field(        int,            retried         )
604         ),
605 @@ -2514,16 +2513,14 @@ TRACE_EVENT(ext4_es_shrink,
606                 __entry->dev            = sb->s_dev;
607                 __entry->nr_shrunk      = nr_shrunk;
608                 __entry->scan_time      = div_u64(scan_time, 1000);
609 -               __entry->skip_precached = skip_precached;
610                 __entry->nr_skipped     = nr_skipped;
611                 __entry->retried        = retried;
612         ),
614 -       TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu skip_precached %d "
615 +       TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu "
616                   "nr_skipped %d retried %d",
617                   MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_shrunk,
618 -                 __entry->scan_time, __entry->skip_precached,
619 -                 __entry->nr_skipped, __entry->retried)
620 +                 __entry->scan_time, __entry->nr_skipped, __entry->retried)
621  );
623  #endif /* _TRACE_EXT4_H */