1 vfs: add support for a lazytime mount option
3 Add a new mount option which enables a new "lazytime" mode. This mode
4 causes atime, mtime, and ctime updates to only be made to the
5 in-memory version of the inode. The on-disk times will only get
6 updated when (a) if the inode needs to be updated for some non-time
7 related change, (b) if userspace calls fsync(), syncfs() or sync(), or
8 (c) just before an undeleted inode is evicted from memory.
10 This is OK according to POSIX because there are no guarantees after a
11 crash unless userspace explicitly requests via a fsync(2) call.
13 For workloads which feature a large number of random write to a
14 preallocated file, the lazytime mount option significantly reduces
15 writes to the inode table. The repeated 4k writes to a single block
16 will result in undesirable stress on flash devices and SMR disk
17 drives. Even on conventional HDD's, the repeated writes to the inode
18 table block will trigger Adjacent Track Interference (ATI) remediation
19 latencies, which very negatively impact long tail latencies --- which
20 is a very big deal for web serving tiers (for example).
22 Google-Bug-Id: 18297052
24 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
26 fs/ext4/inode.c | 6 +++++
27 fs/fs-writeback.c | 62 +++++++++++++++++++++++++++++++++++++++++---------
28 fs/gfs2/file.c | 4 ++--
29 fs/inode.c | 56 ++++++++++++++++++++++++++++++++-------------
32 fs/proc_namespace.c | 1 +
34 include/linux/backing-dev.h | 1 +
35 include/linux/fs.h | 5 ++++
36 include/trace/events/writeback.h | 60 +++++++++++++++++++++++++++++++++++++++++++++++-
37 include/uapi/linux/fs.h | 4 +++-
38 mm/backing-dev.c | 10 ++++++--
39 13 files changed, 186 insertions(+), 35 deletions(-)
41 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
42 index 5653fa4..628df5b 100644
45 @@ -4840,11 +4840,17 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
46 * If the inode is marked synchronous, we don't honour that here - doing
47 * so would cause a commit on atime updates, which we don't bother doing.
48 * We handle synchronous inodes at the highest possible level.
50 + * If only the I_DIRTY_TIME flag is set, we can skip everything. If
51 + * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need
52 + * to copy into the on-disk inode structure are the timestamp files.
54 void ext4_dirty_inode(struct inode *inode, int flags)
58 + if (flags == I_DIRTY_TIME)
60 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
63 diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
64 index 2d609a5..0046861 100644
65 --- a/fs/fs-writeback.c
66 +++ b/fs/fs-writeback.c
67 @@ -247,14 +247,19 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
71 +#define EXPIRE_DIRTY_ATIME 0x0001
74 * Move expired (dirtied before work->older_than_this) dirty inodes from
75 * @delaying_queue to @dispatch_queue.
77 static int move_expired_inodes(struct list_head *delaying_queue,
78 struct list_head *dispatch_queue,
80 struct wb_writeback_work *work)
82 + unsigned long *older_than_this = NULL;
83 + unsigned long expire_time;
85 struct list_head *pos, *node;
86 struct super_block *sb = NULL;
87 @@ -262,13 +267,21 @@ static int move_expired_inodes(struct list_head *delaying_queue,
91 + if ((flags & EXPIRE_DIRTY_ATIME) == 0)
92 + older_than_this = work->older_than_this;
93 + else if ((work->reason == WB_REASON_SYNC) == 0) {
94 + expire_time = jiffies - (HZ * 86400);
95 + older_than_this = &expire_time;
97 while (!list_empty(delaying_queue)) {
98 inode = wb_inode(delaying_queue->prev);
99 - if (work->older_than_this &&
100 - inode_dirtied_after(inode, *work->older_than_this))
101 + if (older_than_this &&
102 + inode_dirtied_after(inode, *older_than_this))
104 list_move(&inode->i_wb_list, &tmp);
106 + if (flags & EXPIRE_DIRTY_ATIME)
107 + set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state);
108 if (sb_is_blkdev_sb(inode->i_sb))
110 if (sb && sb != inode->i_sb)
111 @@ -309,9 +322,12 @@ out:
112 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
116 assert_spin_locked(&wb->list_lock);
117 list_splice_init(&wb->b_more_io, &wb->b_io);
118 - moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
119 + moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work);
120 + moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
121 + EXPIRE_DIRTY_ATIME, work);
122 trace_writeback_queue_io(wb, work, moved);
125 @@ -435,6 +451,8 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
126 * updates after data IO completion.
128 redirty_tail(inode, wb);
129 + } else if (inode->i_state & I_DIRTY_TIME) {
130 + list_move(&inode->i_wb_list, &wb->b_dirty_time);
132 /* The inode is clean. Remove from writeback lists. */
133 list_del_init(&inode->i_wb_list);
134 @@ -481,7 +499,13 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
135 spin_lock(&inode->i_lock);
137 dirty = inode->i_state & I_DIRTY;
138 - inode->i_state &= ~I_DIRTY;
139 + if (((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) &&
140 + (inode->i_state & I_DIRTY_TIME)) ||
141 + (inode->i_state & I_DIRTY_TIME_EXPIRED)) {
142 + dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
143 + trace_writeback_lazytime(inode);
145 + inode->i_state &= ~dirty;
148 * Paired with smp_mb() in __mark_inode_dirty(). This allows
149 @@ -501,8 +525,10 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
151 spin_unlock(&inode->i_lock);
153 + if (dirty & I_DIRTY_TIME)
154 + mark_inode_dirty_sync(inode);
155 /* Don't write the inode if only I_DIRTY_PAGES was set */
156 - if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
157 + if (dirty & ~I_DIRTY_PAGES) {
158 int err = write_inode(inode, wbc);
161 @@ -550,7 +576,7 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
162 * make sure inode is on some writeback list and leave it there unless
163 * we have completely cleaned the inode.
165 - if (!(inode->i_state & I_DIRTY) &&
166 + if (!(inode->i_state & I_DIRTY_ALL) &&
167 (wbc->sync_mode != WB_SYNC_ALL ||
168 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
170 @@ -565,7 +591,7 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
171 * If inode is clean, remove it from writeback lists. Otherwise don't
172 * touch it. See comment above for explanation.
174 - if (!(inode->i_state & I_DIRTY))
175 + if (!(inode->i_state & I_DIRTY_ALL))
176 list_del_init(&inode->i_wb_list);
177 spin_unlock(&wb->list_lock);
178 inode_sync_complete(inode);
179 @@ -707,7 +733,7 @@ static long writeback_sb_inodes(struct super_block *sb,
180 wrote += write_chunk - wbc.nr_to_write;
181 spin_lock(&wb->list_lock);
182 spin_lock(&inode->i_lock);
183 - if (!(inode->i_state & I_DIRTY))
184 + if (!(inode->i_state & I_DIRTY_ALL))
186 requeue_inode(inode, wb, &wbc);
187 inode_sync_complete(inode);
188 @@ -1145,16 +1171,20 @@ static noinline void block_dump___mark_inode_dirty(struct inode *inode)
189 * page->mapping->host, so the page-dirtying time is recorded in the internal
192 +#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
193 void __mark_inode_dirty(struct inode *inode, int flags)
195 struct super_block *sb = inode->i_sb;
196 struct backing_dev_info *bdi = NULL;
199 + trace_writeback_mark_inode_dirty(inode, flags);
202 * Don't do this for I_DIRTY_PAGES - that doesn't actually
203 * dirty the inode itself
205 - if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
206 + if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_TIME)) {
207 trace_writeback_dirty_inode_start(inode, flags);
209 if (sb->s_op->dirty_inode)
210 @@ -1162,6 +1192,9 @@ void __mark_inode_dirty(struct inode *inode, int flags)
212 trace_writeback_dirty_inode(inode, flags);
214 + if (flags & I_DIRTY_INODE)
215 + flags &= ~I_DIRTY_TIME;
216 + dirtytime = flags & I_DIRTY_TIME;
219 * Paired with smp_mb() in __writeback_single_inode() for the
220 @@ -1169,16 +1202,21 @@ void __mark_inode_dirty(struct inode *inode, int flags)
224 - if ((inode->i_state & flags) == flags)
225 + if (((inode->i_state & flags) == flags) ||
226 + (dirtytime && (inode->i_state & I_DIRTY_INODE)))
229 if (unlikely(block_dump))
230 block_dump___mark_inode_dirty(inode);
232 spin_lock(&inode->i_lock);
233 + if (dirtytime && (inode->i_state & I_DIRTY_INODE))
234 + goto out_unlock_inode;
235 if ((inode->i_state & flags) != flags) {
236 const int was_dirty = inode->i_state & I_DIRTY;
238 + if (flags & I_DIRTY_INODE)
239 + inode->i_state &= ~I_DIRTY_TIME;
240 inode->i_state |= flags;
243 @@ -1225,8 +1263,10 @@ void __mark_inode_dirty(struct inode *inode, int flags)
246 inode->dirtied_when = jiffies;
247 - list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
248 + list_move(&inode->i_wb_list, dirtytime ?
249 + &bdi->wb.b_dirty_time : &bdi->wb.b_dirty);
250 spin_unlock(&bdi->wb.list_lock);
251 + trace_writeback_dirty_inode_enqueue(inode);
254 bdi_wakeup_thread_delayed(bdi);
255 diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
256 index 6e600ab..15c44cf 100644
259 @@ -655,7 +655,7 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
261 struct address_space *mapping = file->f_mapping;
262 struct inode *inode = mapping->host;
263 - int sync_state = inode->i_state & I_DIRTY;
264 + int sync_state = inode->i_state & I_DIRTY_ALL;
265 struct gfs2_inode *ip = GFS2_I(inode);
266 int ret = 0, ret1 = 0;
268 @@ -668,7 +668,7 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
269 if (!gfs2_is_jdata(ip))
270 sync_state &= ~I_DIRTY_PAGES;
272 - sync_state &= ~I_DIRTY_SYNC;
273 + sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME);
276 ret = sync_inode_metadata(inode, 1);
277 diff --git a/fs/inode.c b/fs/inode.c
278 index aa149e7..4feb85c 100644
282 #include <linux/buffer_head.h> /* for inode_has_buffers */
283 #include <linux/ratelimit.h>
284 #include <linux/list_lru.h>
285 +#include <trace/events/writeback.h>
286 #include "internal.h"
290 * inode_sb_list_lock protects:
291 * sb->s_inodes, inode->i_sb_list
292 * bdi->wb.list_lock protects:
293 - * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
294 + * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_wb_list
295 * inode_hash_lock protects:
296 * inode_hashtable, inode->i_hash
298 @@ -416,7 +417,8 @@ static void inode_lru_list_add(struct inode *inode)
300 void inode_add_lru(struct inode *inode)
302 - if (!(inode->i_state & (I_DIRTY | I_SYNC | I_FREEING | I_WILL_FREE)) &&
303 + if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
304 + I_FREEING | I_WILL_FREE)) &&
305 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
306 inode_lru_list_add(inode);
308 @@ -647,7 +649,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
309 spin_unlock(&inode->i_lock);
312 - if (inode->i_state & I_DIRTY && !kill_dirty) {
313 + if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
314 spin_unlock(&inode->i_lock);
317 @@ -1432,11 +1434,20 @@ static void iput_final(struct inode *inode)
319 void iput(struct inode *inode)
322 - BUG_ON(inode->i_state & I_CLEAR);
324 - if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))
328 + BUG_ON(inode->i_state & I_CLEAR);
330 + if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
331 + if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
332 + atomic_inc(&inode->i_count);
333 + inode->i_state &= ~I_DIRTY_TIME;
334 + spin_unlock(&inode->i_lock);
335 + trace_writeback_lazytime_iput(inode);
336 + mark_inode_dirty_sync(inode);
343 @@ -1495,14 +1506,9 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
348 - * This does the actual work of updating an inodes time or version. Must have
349 - * had called mnt_want_write() before calling this.
351 -static int update_time(struct inode *inode, struct timespec *time, int flags)
352 +int generic_update_time(struct inode *inode, struct timespec *time, int flags)
354 - if (inode->i_op->update_time)
355 - return inode->i_op->update_time(inode, time, flags);
356 + int iflags = I_DIRTY_TIME;
359 inode->i_atime = *time;
360 @@ -1512,9 +1518,27 @@ static int update_time(struct inode *inode, struct timespec *time, int flags)
361 inode->i_ctime = *time;
363 inode->i_mtime = *time;
364 - mark_inode_dirty_sync(inode);
366 + if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION))
367 + iflags |= I_DIRTY_SYNC;
368 + __mark_inode_dirty(inode, iflags);
371 +EXPORT_SYMBOL(generic_update_time);
374 + * This does the actual work of updating an inodes time or version. Must have
375 + * had called mnt_want_write() before calling this.
377 +static int update_time(struct inode *inode, struct timespec *time, int flags)
379 + int (*update_time)(struct inode *, struct timespec *, int);
381 + update_time = inode->i_op->update_time ? inode->i_op->update_time :
382 + generic_update_time;
384 + return update_time(inode, time, flags);
388 * touch_atime - update the access time
389 diff --git a/fs/jfs/file.c b/fs/jfs/file.c
390 index 33aa0cc..10815f8 100644
393 @@ -39,7 +39,7 @@ int jfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
396 mutex_lock(&inode->i_mutex);
397 - if (!(inode->i_state & I_DIRTY) ||
398 + if (!(inode->i_state & I_DIRTY_ALL) ||
399 (datasync && !(inode->i_state & I_DIRTY_DATASYNC))) {
400 /* Make sure committed changes hit the disk */
401 jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1);
402 diff --git a/fs/libfs.c b/fs/libfs.c
403 index 005843c..b2ffdb0 100644
406 @@ -948,7 +948,7 @@ int __generic_file_fsync(struct file *file, loff_t start, loff_t end,
408 mutex_lock(&inode->i_mutex);
409 ret = sync_mapping_buffers(inode->i_mapping);
410 - if (!(inode->i_state & I_DIRTY))
411 + if (!(inode->i_state & I_DIRTY_ALL))
413 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
415 diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
416 index 0f96f71..8db932d 100644
417 --- a/fs/proc_namespace.c
418 +++ b/fs/proc_namespace.c
419 @@ -44,6 +44,7 @@ static int show_sb_opts(struct seq_file *m, struct super_block *sb)
420 { MS_SYNCHRONOUS, ",sync" },
421 { MS_DIRSYNC, ",dirsync" },
422 { MS_MANDLOCK, ",mand" },
423 + { MS_LAZYTIME, ",lazytime" },
426 const struct proc_fs_info *fs_infop;
427 diff --git a/fs/sync.c b/fs/sync.c
428 index 01d9f18..fbc98ee 100644
431 @@ -177,8 +177,16 @@ SYSCALL_DEFINE1(syncfs, int, fd)
433 int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync)
435 + struct inode *inode = file->f_mapping->host;
437 if (!file->f_op->fsync)
439 + if (!datasync && (inode->i_state & I_DIRTY_TIME)) {
440 + spin_lock(&inode->i_lock);
441 + inode->i_state &= ~I_DIRTY_TIME;
442 + spin_unlock(&inode->i_lock);
443 + mark_inode_dirty_sync(inode);
445 return file->f_op->fsync(file, start, end, datasync);
447 EXPORT_SYMBOL(vfs_fsync_range);
448 diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
449 index 5da6012..4cdf733 100644
450 --- a/include/linux/backing-dev.h
451 +++ b/include/linux/backing-dev.h
452 @@ -55,6 +55,7 @@ struct bdi_writeback {
453 struct list_head b_dirty; /* dirty inodes */
454 struct list_head b_io; /* parked for writeback */
455 struct list_head b_more_io; /* parked for more writeback */
456 + struct list_head b_dirty_time; /* time stamps are dirty */
457 spinlock_t list_lock; /* protects the b_* lists */
460 diff --git a/include/linux/fs.h b/include/linux/fs.h
461 index f90c028..5ca285f 100644
462 --- a/include/linux/fs.h
463 +++ b/include/linux/fs.h
464 @@ -1746,8 +1746,12 @@ struct super_operations {
465 #define __I_DIO_WAKEUP 9
466 #define I_DIO_WAKEUP (1 << I_DIO_WAKEUP)
467 #define I_LINKABLE (1 << 10)
468 +#define I_DIRTY_TIME (1 << 11)
469 +#define __I_DIRTY_TIME_EXPIRED 12
470 +#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
472 #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
473 +#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
475 extern void __mark_inode_dirty(struct inode *, int);
476 static inline void mark_inode_dirty(struct inode *inode)
477 @@ -1910,6 +1914,7 @@ extern int current_umask(void);
479 extern void ihold(struct inode * inode);
480 extern void iput(struct inode *);
481 +extern int generic_update_time(struct inode *, struct timespec *, int);
483 static inline struct inode *file_inode(const struct file *f)
485 diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
486 index cee02d6..5ecb4c2 100644
487 --- a/include/trace/events/writeback.h
488 +++ b/include/trace/events/writeback.h
490 {I_FREEING, "I_FREEING"}, \
491 {I_CLEAR, "I_CLEAR"}, \
492 {I_SYNC, "I_SYNC"}, \
493 + {I_DIRTY_TIME, "I_DIRTY_TIME"}, \
494 + {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \
495 {I_REFERENCED, "I_REFERENCED"} \
498 @@ -68,6 +70,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
500 __array(char, name, 32)
501 __field(unsigned long, ino)
502 + __field(unsigned long, state)
503 __field(unsigned long, flags)
506 @@ -78,16 +81,25 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
507 strncpy(__entry->name,
508 bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
509 __entry->ino = inode->i_ino;
510 + __entry->state = inode->i_state;
511 __entry->flags = flags;
514 - TP_printk("bdi %s: ino=%lu flags=%s",
515 + TP_printk("bdi %s: ino=%lu state=%s flags=%s",
518 + show_inode_state(__entry->state),
519 show_inode_state(__entry->flags)
523 +DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
525 + TP_PROTO(struct inode *inode, int flags),
527 + TP_ARGS(inode, flags)
530 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
532 TP_PROTO(struct inode *inode, int flags),
533 @@ -598,6 +610,52 @@ DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
534 TP_ARGS(inode, wbc, nr_to_write)
537 +DECLARE_EVENT_CLASS(writeback_lazytime_template,
538 + TP_PROTO(struct inode *inode),
543 + __field( dev_t, dev )
544 + __field(unsigned long, ino )
545 + __field(unsigned long, state )
546 + __field( __u16, mode )
547 + __field(unsigned long, dirtied_when )
551 + __entry->dev = inode->i_sb->s_dev;
552 + __entry->ino = inode->i_ino;
553 + __entry->state = inode->i_state;
554 + __entry->mode = inode->i_mode;
555 + __entry->dirtied_when = inode->dirtied_when;
558 + TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
559 + MAJOR(__entry->dev), MINOR(__entry->dev),
560 + __entry->ino, __entry->dirtied_when,
561 + show_inode_state(__entry->state), __entry->mode)
564 +DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime,
565 + TP_PROTO(struct inode *inode),
570 +DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime_iput,
571 + TP_PROTO(struct inode *inode),
576 +DEFINE_EVENT(writeback_lazytime_template, writeback_dirty_inode_enqueue,
578 + TP_PROTO(struct inode *inode),
583 #endif /* _TRACE_WRITEBACK_H */
585 /* This part must be outside protection */
586 diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
587 index 3735fa0..9b964a5 100644
588 --- a/include/uapi/linux/fs.h
589 +++ b/include/uapi/linux/fs.h
590 @@ -90,6 +90,7 @@ struct inodes_stat_t {
591 #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
592 #define MS_I_VERSION (1<<23) /* Update inode I_version field */
593 #define MS_STRICTATIME (1<<24) /* Always perform atime updates */
594 +#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
596 /* These sb flags are internal to the kernel */
597 #define MS_NOSEC (1<<28)
598 @@ -100,7 +101,8 @@ struct inodes_stat_t {
600 * Superblock flags that can be altered by MS_REMOUNT
602 -#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION)
603 +#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\
607 * Old magic mount flag and mask
608 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
609 index 0ae0df5..915feea 100644
610 --- a/mm/backing-dev.c
611 +++ b/mm/backing-dev.c
612 @@ -69,10 +69,10 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
613 unsigned long background_thresh;
614 unsigned long dirty_thresh;
615 unsigned long bdi_thresh;
616 - unsigned long nr_dirty, nr_io, nr_more_io;
617 + unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
620 - nr_dirty = nr_io = nr_more_io = 0;
621 + nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
622 spin_lock(&wb->list_lock);
623 list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
625 @@ -80,6 +80,9 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
627 list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
629 + list_for_each_entry(inode, &wb->b_dirty_time, i_wb_list)
630 + if (inode->i_state & I_DIRTY_TIME)
632 spin_unlock(&wb->list_lock);
634 global_dirty_limits(&background_thresh, &dirty_thresh);
635 @@ -98,6 +101,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
639 + "b_dirty_time: %10lu\n"
642 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
643 @@ -111,6 +115,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
648 !list_empty(&bdi->bdi_list), bdi->state);
651 @@ -418,6 +423,7 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
652 INIT_LIST_HEAD(&wb->b_dirty);
653 INIT_LIST_HEAD(&wb->b_io);
654 INIT_LIST_HEAD(&wb->b_more_io);
655 + INIT_LIST_HEAD(&wb->b_dirty_time);
656 spin_lock_init(&wb->list_lock);
657 INIT_DELAYED_WORK(&wb->dwork, bdi_writeback_workfn);