1 ext4: change LRU to round-robin in extent status tree shrinker
3 From: Zheng Liu <wenqing.lz@taobao.com>
5 In this commit we discard the lru algorithm for inodes with extent
6 status tree because it takes significant effort to maintain a lru list
7 in extent status tree shrinker and the shrinker can take a long time to
8 scan this lru list in order to reclaim some objects.
10 We replace the lru ordering with a simple round-robin. After that we
11 never need to keep a lru list. That means that the list needn't be
12 sorted if the shrinker can not reclaim any objects in the first round.
14 Cc: Andreas Dilger <adilger.kernel@dilger.ca>
15 Signed-off-by: Zheng Liu <wenqing.lz@taobao.com>
16 Signed-off-by: Jan Kara <jack@suse.cz>
17 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
19 fs/ext4/ext4.h | 10 +-
20 fs/ext4/extents.c | 4 +-
21 fs/ext4/extents_status.c | 224 +++++++++++++++++++-------------------------
22 fs/ext4/extents_status.h | 7 +-
23 fs/ext4/inode.c | 4 +-
24 fs/ext4/ioctl.c | 4 +-
25 fs/ext4/super.c | 7 +-
26 include/trace/events/ext4.h | 11 +--
27 8 files changed, 118 insertions(+), 153 deletions(-)
29 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
30 index b9e9fdb3793a..ede6dd43fe13 100644
33 @@ -878,10 +878,9 @@ struct ext4_inode_info {
34 /* extents status tree */
35 struct ext4_es_tree i_es_tree;
37 - struct list_head i_es_lru;
38 + struct list_head i_es_list;
39 unsigned int i_es_all_nr; /* protected by i_es_lock */
40 - unsigned int i_es_lru_nr; /* protected by i_es_lock */
41 - unsigned long i_touch_when; /* jiffies of last accessing */
42 + unsigned int i_es_shk_nr; /* protected by i_es_lock */
45 ext4_group_t i_last_alloc_group;
46 @@ -1322,10 +1321,11 @@ struct ext4_sb_info {
48 /* Reclaim extents from extent status tree */
49 struct shrinker s_es_shrinker;
50 - struct list_head s_es_lru;
51 + struct list_head s_es_list;
53 struct ext4_es_stats s_es_stats;
54 struct mb_cache *s_mb_cache;
55 - spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
56 + spinlock_t s_es_lock ____cacheline_aligned_in_smp;
58 /* Ratelimit ext4 messages. */
59 struct ratelimit_state s_err_ratelimit_state;
60 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
61 index d0e502da8fbe..60569277584a 100644
62 --- a/fs/ext4/extents.c
63 +++ b/fs/ext4/extents.c
64 @@ -4618,7 +4618,7 @@ out2:
66 trace_ext4_ext_map_blocks_exit(inode, flags, map,
67 err ? err : allocated);
68 - ext4_es_lru_add(inode);
69 + ext4_es_list_add(inode);
70 return err ? err : allocated;
73 @@ -5177,7 +5177,7 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
74 error = ext4_fill_fiemap_extents(inode, start_blk,
77 - ext4_es_lru_add(inode);
78 + ext4_es_list_add(inode);
82 diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
83 index 94e7855ae71b..0193ca107396 100644
84 --- a/fs/ext4/extents_status.c
85 +++ b/fs/ext4/extents_status.c
86 @@ -149,8 +149,8 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
88 static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
90 -static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
91 - struct ext4_inode_info *locked_ei);
92 +static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
93 + struct ext4_inode_info *locked_ei);
95 int __init ext4_init_es(void)
97 @@ -298,6 +298,36 @@ out:
98 trace_ext4_es_find_delayed_extent_range_exit(inode, es);
101 +void ext4_es_list_add(struct inode *inode)
103 + struct ext4_inode_info *ei = EXT4_I(inode);
104 + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
106 + if (!list_empty(&ei->i_es_list))
109 + spin_lock(&sbi->s_es_lock);
110 + if (list_empty(&ei->i_es_list)) {
111 + list_add_tail(&ei->i_es_list, &sbi->s_es_list);
112 + sbi->s_es_nr_inode++;
114 + spin_unlock(&sbi->s_es_lock);
117 +void ext4_es_list_del(struct inode *inode)
119 + struct ext4_inode_info *ei = EXT4_I(inode);
120 + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
122 + spin_lock(&sbi->s_es_lock);
123 + if (!list_empty(&ei->i_es_list)) {
124 + list_del_init(&ei->i_es_list);
125 + sbi->s_es_nr_inode--;
126 + WARN_ON_ONCE(sbi->s_es_nr_inode < 0);
128 + spin_unlock(&sbi->s_es_lock);
131 static struct extent_status *
132 ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
134 @@ -314,9 +344,9 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
135 * We don't count delayed extent because we never try to reclaim them
137 if (!ext4_es_is_delayed(es)) {
138 - EXT4_I(inode)->i_es_lru_nr++;
139 + EXT4_I(inode)->i_es_shk_nr++;
140 percpu_counter_inc(&EXT4_SB(inode->i_sb)->
141 - s_es_stats.es_stats_lru_cnt);
142 + s_es_stats.es_stats_shk_cnt);
145 EXT4_I(inode)->i_es_all_nr++;
146 @@ -330,12 +360,12 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
147 EXT4_I(inode)->i_es_all_nr--;
148 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
150 - /* Decrease the lru counter when this es is not delayed */
151 + /* Decrease the shrink counter when this es is not delayed */
152 if (!ext4_es_is_delayed(es)) {
153 - BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0);
154 - EXT4_I(inode)->i_es_lru_nr--;
155 + BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
156 + EXT4_I(inode)->i_es_shk_nr--;
157 percpu_counter_dec(&EXT4_SB(inode->i_sb)->
158 - s_es_stats.es_stats_lru_cnt);
159 + s_es_stats.es_stats_shk_cnt);
162 kmem_cache_free(ext4_es_cachep, es);
163 @@ -683,8 +713,8 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
166 err = __es_insert_extent(inode, &newes);
167 - if (err == -ENOMEM && __ext4_es_shrink(EXT4_SB(inode->i_sb), 1,
169 + if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
172 if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
174 @@ -841,8 +871,8 @@ retry:
175 es->es_lblk = orig_es.es_lblk;
176 es->es_len = orig_es.es_len;
177 if ((err == -ENOMEM) &&
178 - __ext4_es_shrink(EXT4_SB(inode->i_sb), 1,
180 + __es_shrink(EXT4_SB(inode->i_sb),
185 @@ -914,6 +944,11 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
186 end = lblk + len - 1;
190 + * ext4_clear_inode() depends on us taking i_es_lock unconditionally
191 + * so that we are sure __es_shrink() is done with the inode before it
194 write_lock(&EXT4_I(inode)->i_es_lock);
195 err = __es_remove_extent(inode, lblk, end);
196 write_unlock(&EXT4_I(inode)->i_es_lock);
197 @@ -921,114 +956,80 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
201 -static int ext4_inode_touch_time_cmp(void *priv, struct list_head *a,
202 - struct list_head *b)
204 - struct ext4_inode_info *eia, *eib;
205 - eia = list_entry(a, struct ext4_inode_info, i_es_lru);
206 - eib = list_entry(b, struct ext4_inode_info, i_es_lru);
208 - if (ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
209 - !ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
211 - if (!ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
212 - ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
214 - if (eia->i_touch_when == eib->i_touch_when)
216 - if (time_after(eia->i_touch_when, eib->i_touch_when))
222 -static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
223 - struct ext4_inode_info *locked_ei)
224 +static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
225 + struct ext4_inode_info *locked_ei)
227 struct ext4_inode_info *ei;
228 struct ext4_es_stats *es_stats;
229 - struct list_head *cur, *tmp;
230 - LIST_HEAD(skipped);
235 - int retried = 0, skip_precached = 1, nr_skipped = 0;
236 + int retried = 0, nr_skipped = 0;
238 es_stats = &sbi->s_es_stats;
239 start_time = ktime_get();
240 - spin_lock(&sbi->s_es_lru_lock);
243 - list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
244 + spin_lock(&sbi->s_es_lock);
245 + nr_to_walk = sbi->s_es_nr_inode;
246 + while (nr_to_walk-- > 0) {
250 - * If we have already reclaimed all extents from extent
251 - * status tree, just stop the loop immediately.
253 - if (percpu_counter_read_positive(
254 - &es_stats->es_stats_lru_cnt) == 0)
257 - ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
258 + if (list_empty(&sbi->s_es_list)) {
259 + spin_unlock(&sbi->s_es_lock);
262 + ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info,
264 + /* Move the inode to the tail */
265 + list_move(&ei->i_es_list, sbi->s_es_list.prev);
268 - * Skip the inode that is newer than the last_sorted
269 - * time. Normally we try hard to avoid shrinking
270 - * precached inodes, but we will as a last resort.
271 + * Normally we try hard to avoid shrinking precached inodes,
272 + * but we will as a last resort.
274 - if ((es_stats->es_stats_last_sorted < ei->i_touch_when) ||
275 - (skip_precached && ext4_test_inode_state(&ei->vfs_inode,
276 - EXT4_STATE_EXT_PRECACHED))) {
277 + if (!retried && ext4_test_inode_state(&ei->vfs_inode,
278 + EXT4_STATE_EXT_PRECACHED)) {
280 - list_move_tail(cur, &skipped);
284 - if (ei->i_es_lru_nr == 0 || ei == locked_ei ||
285 - !write_trylock(&ei->i_es_lock))
286 + if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) {
291 + * Now we hold i_es_lock which protects us from inode reclaim
292 + * freeing inode under us
294 + spin_unlock(&sbi->s_es_lock);
296 shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
297 - if (ei->i_es_lru_nr == 0)
298 - list_del_init(&ei->i_es_lru);
299 write_unlock(&ei->i_es_lock);
302 nr_to_scan -= shrunk;
307 + spin_lock(&sbi->s_es_lock);
310 - /* Move the newer inodes into the tail of the LRU list. */
311 - list_splice_tail(&skipped, &sbi->s_es_lru);
312 - INIT_LIST_HEAD(&skipped);
313 + spin_unlock(&sbi->s_es_lock);
316 * If we skipped any inodes, and we weren't able to make any
317 - * forward progress, sort the list and try again.
318 + * forward progress, try again to scan precached inodes.
320 if ((nr_shrunk == 0) && nr_skipped && !retried) {
322 - list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp);
323 - es_stats->es_stats_last_sorted = jiffies;
324 - ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info,
327 - * If there are no non-precached inodes left on the
328 - * list, start releasing precached extents.
330 - if (ext4_test_inode_state(&ei->vfs_inode,
331 - EXT4_STATE_EXT_PRECACHED))
332 - skip_precached = 0;
336 - spin_unlock(&sbi->s_es_lru_lock);
338 if (locked_ei && nr_shrunk == 0)
339 nr_shrunk = __es_try_to_reclaim_extents(locked_ei, nr_to_scan);
342 scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
343 if (likely(es_stats->es_stats_scan_time))
344 es_stats->es_stats_scan_time = (scan_time +
345 @@ -1043,7 +1044,7 @@ retry:
347 es_stats->es_stats_shrunk = nr_shrunk;
349 - trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time, skip_precached,
350 + trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time,
351 nr_skipped, retried);
354 @@ -1055,7 +1056,7 @@ static unsigned long ext4_es_count(struct shrinker *shrink,
355 struct ext4_sb_info *sbi;
357 sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
358 - nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt);
359 + nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
360 trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr);
363 @@ -1068,13 +1069,13 @@ static unsigned long ext4_es_scan(struct shrinker *shrink,
364 int nr_to_scan = sc->nr_to_scan;
367 - ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt);
368 + ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
369 trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret);
374 - nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL);
375 + nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL);
377 trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret);
379 @@ -1102,28 +1103,24 @@ static int ext4_es_seq_shrinker_info_show(struct seq_file *seq, void *v)
382 /* here we just find an inode that has the max nr. of objects */
383 - spin_lock(&sbi->s_es_lru_lock);
384 - list_for_each_entry(ei, &sbi->s_es_lru, i_es_lru) {
385 + spin_lock(&sbi->s_es_lock);
386 + list_for_each_entry(ei, &sbi->s_es_list, i_es_list) {
388 if (max && max->i_es_all_nr < ei->i_es_all_nr)
393 - spin_unlock(&sbi->s_es_lru_lock);
394 + spin_unlock(&sbi->s_es_lock);
396 seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n",
397 percpu_counter_sum_positive(&es_stats->es_stats_all_cnt),
398 - percpu_counter_sum_positive(&es_stats->es_stats_lru_cnt));
399 + percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt));
400 seq_printf(seq, " %lu/%lu cache hits/misses\n",
401 es_stats->es_stats_cache_hits,
402 es_stats->es_stats_cache_misses);
403 - if (es_stats->es_stats_last_sorted != 0)
404 - seq_printf(seq, " %u ms last sorted interval\n",
405 - jiffies_to_msecs(jiffies -
406 - es_stats->es_stats_last_sorted));
408 - seq_printf(seq, " %d inodes on lru list\n", inode_cnt);
409 + seq_printf(seq, " %d inodes on list\n", inode_cnt);
411 seq_printf(seq, "average:\n %llu us scan time\n",
412 div_u64(es_stats->es_stats_scan_time, 1000));
413 @@ -1132,7 +1129,7 @@ static int ext4_es_seq_shrinker_info_show(struct seq_file *seq, void *v)
415 "maximum:\n %lu inode (%u objects, %u reclaimable)\n"
416 " %llu us max scan time\n",
417 - max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_lru_nr,
418 + max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr,
419 div_u64(es_stats->es_stats_max_scan_time, 1000));
422 @@ -1181,9 +1178,9 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
426 - INIT_LIST_HEAD(&sbi->s_es_lru);
427 - spin_lock_init(&sbi->s_es_lru_lock);
428 - sbi->s_es_stats.es_stats_last_sorted = 0;
429 + INIT_LIST_HEAD(&sbi->s_es_list);
430 + sbi->s_es_nr_inode = 0;
431 + spin_lock_init(&sbi->s_es_lock);
432 sbi->s_es_stats.es_stats_shrunk = 0;
433 sbi->s_es_stats.es_stats_cache_hits = 0;
434 sbi->s_es_stats.es_stats_cache_misses = 0;
435 @@ -1192,7 +1189,7 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
436 err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL);
439 - err = percpu_counter_init(&sbi->s_es_stats.es_stats_lru_cnt, 0, GFP_KERNEL);
440 + err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL);
444 @@ -1210,7 +1207,7 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
448 - percpu_counter_destroy(&sbi->s_es_stats.es_stats_lru_cnt);
449 + percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
451 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
453 @@ -1221,37 +1218,10 @@ void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
455 remove_proc_entry("es_shrinker_info", sbi->s_proc);
456 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
457 - percpu_counter_destroy(&sbi->s_es_stats.es_stats_lru_cnt);
458 + percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
459 unregister_shrinker(&sbi->s_es_shrinker);
462 -void ext4_es_lru_add(struct inode *inode)
464 - struct ext4_inode_info *ei = EXT4_I(inode);
465 - struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
467 - ei->i_touch_when = jiffies;
469 - if (!list_empty(&ei->i_es_lru))
472 - spin_lock(&sbi->s_es_lru_lock);
473 - if (list_empty(&ei->i_es_lru))
474 - list_add_tail(&ei->i_es_lru, &sbi->s_es_lru);
475 - spin_unlock(&sbi->s_es_lru_lock);
478 -void ext4_es_lru_del(struct inode *inode)
480 - struct ext4_inode_info *ei = EXT4_I(inode);
481 - struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
483 - spin_lock(&sbi->s_es_lru_lock);
484 - if (!list_empty(&ei->i_es_lru))
485 - list_del_init(&ei->i_es_lru);
486 - spin_unlock(&sbi->s_es_lru_lock);
489 static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
492 @@ -1263,7 +1233,7 @@ static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
493 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
494 DEFAULT_RATELIMIT_BURST);
496 - if (ei->i_es_lru_nr == 0)
497 + if (ei->i_es_shk_nr == 0)
500 if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) &&
501 diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
502 index efd5f970b501..0e6a33e81e5f 100644
503 --- a/fs/ext4/extents_status.h
504 +++ b/fs/ext4/extents_status.h
505 @@ -65,14 +65,13 @@ struct ext4_es_tree {
508 struct ext4_es_stats {
509 - unsigned long es_stats_last_sorted;
510 unsigned long es_stats_shrunk;
511 unsigned long es_stats_cache_hits;
512 unsigned long es_stats_cache_misses;
513 u64 es_stats_scan_time;
514 u64 es_stats_max_scan_time;
515 struct percpu_counter es_stats_all_cnt;
516 - struct percpu_counter es_stats_lru_cnt;
517 + struct percpu_counter es_stats_shk_cnt;
520 extern int __init ext4_init_es(void);
521 @@ -151,7 +150,7 @@ static inline void ext4_es_store_pblock_status(struct extent_status *es,
523 extern int ext4_es_register_shrinker(struct ext4_sb_info *sbi);
524 extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi);
525 -extern void ext4_es_lru_add(struct inode *inode);
526 -extern void ext4_es_lru_del(struct inode *inode);
527 +extern void ext4_es_list_add(struct inode *inode);
528 +extern void ext4_es_list_del(struct inode *inode);
530 #endif /* _EXT4_EXTENTS_STATUS_H */
531 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
532 index d0e9cbb56f94..dd21ab011a2a 100644
533 --- a/fs/ext4/inode.c
534 +++ b/fs/ext4/inode.c
535 @@ -486,7 +486,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
537 /* Lookup extent status tree firstly */
538 if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
539 - ext4_es_lru_add(inode);
540 + ext4_es_list_add(inode);
541 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
542 map->m_pblk = ext4_es_pblock(&es) +
543 map->m_lblk - es.es_lblk;
544 @@ -1388,7 +1388,7 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
546 /* Lookup extent status tree firstly */
547 if (ext4_es_lookup_extent(inode, iblock, &es)) {
548 - ext4_es_lru_add(inode);
549 + ext4_es_list_add(inode);
550 if (ext4_es_is_hole(&es)) {
552 down_read(&EXT4_I(inode)->i_data_sem);
553 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
554 index bfda18a15592..7b377c41dd81 100644
555 --- a/fs/ext4/ioctl.c
556 +++ b/fs/ext4/ioctl.c
557 @@ -78,8 +78,8 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
558 memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize));
559 ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
560 ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
561 - ext4_es_lru_del(inode1);
562 - ext4_es_lru_del(inode2);
563 + ext4_es_list_del(inode1);
564 + ext4_es_list_del(inode2);
566 isize = i_size_read(inode1);
567 i_size_write(inode1, i_size_read(inode2));
568 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
569 index 1eda6ab0ef9d..6f64086ac495 100644
570 --- a/fs/ext4/super.c
571 +++ b/fs/ext4/super.c
572 @@ -880,10 +880,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
573 spin_lock_init(&ei->i_prealloc_lock);
574 ext4_es_init_tree(&ei->i_es_tree);
575 rwlock_init(&ei->i_es_lock);
576 - INIT_LIST_HEAD(&ei->i_es_lru);
577 + INIT_LIST_HEAD(&ei->i_es_list);
579 - ei->i_es_lru_nr = 0;
580 - ei->i_touch_when = 0;
581 + ei->i_es_shk_nr = 0;
582 ei->i_reserved_data_blocks = 0;
583 ei->i_reserved_meta_blocks = 0;
584 ei->i_allocated_meta_blocks = 0;
585 @@ -972,7 +971,7 @@ void ext4_clear_inode(struct inode *inode)
587 ext4_discard_preallocations(inode);
588 ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
589 - ext4_es_lru_del(inode);
590 + ext4_es_list_del(inode);
591 if (EXT4_I(inode)->jinode) {
592 jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
593 EXT4_I(inode)->jinode);
594 diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
595 index cd37a584ee88..6cfb841fea7c 100644
596 --- a/include/trace/events/ext4.h
597 +++ b/include/trace/events/ext4.h
598 @@ -2450,15 +2450,14 @@ TRACE_EVENT(ext4_collapse_range,
600 TRACE_EVENT(ext4_es_shrink,
601 TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time,
602 - int skip_precached, int nr_skipped, int retried),
603 + int nr_skipped, int retried),
605 - TP_ARGS(sb, nr_shrunk, scan_time, skip_precached, nr_skipped, retried),
606 + TP_ARGS(sb, nr_shrunk, scan_time, nr_skipped, retried),
609 __field( dev_t, dev )
610 __field( int, nr_shrunk )
611 __field( unsigned long long, scan_time )
612 - __field( int, skip_precached )
613 __field( int, nr_skipped )
614 __field( int, retried )
616 @@ -2467,16 +2466,14 @@ TRACE_EVENT(ext4_es_shrink,
617 __entry->dev = sb->s_dev;
618 __entry->nr_shrunk = nr_shrunk;
619 __entry->scan_time = div_u64(scan_time, 1000);
620 - __entry->skip_precached = skip_precached;
621 __entry->nr_skipped = nr_skipped;
622 __entry->retried = retried;
625 - TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu skip_precached %d "
626 + TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu "
627 "nr_skipped %d retried %d",
628 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_shrunk,
629 - __entry->scan_time, __entry->skip_precached,
630 - __entry->nr_skipped, __entry->retried)
631 + __entry->scan_time, __entry->nr_skipped, __entry->retried)
634 #endif /* _TRACE_EXT4_H */