2 * include/linux/backing-dev.h
4 * low-level device information and state which is propagated up through
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
11 #include <linux/percpu_counter.h>
12 #include <linux/log2.h>
13 #include <linux/proportions.h>
14 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/writeback.h>
18 #include <asm/atomic.h>
25 * Bits in backing_dev_info.state
28 BDI_pending
, /* On its way to being activated */
29 BDI_wb_alloc
, /* Default embedded wb allocated */
30 BDI_async_congested
, /* The async (write) queue is getting full */
31 BDI_sync_congested
, /* The sync queue is getting full */
32 BDI_registered
, /* bdi_register() was done */
33 BDI_unused
, /* Available bits start here */
36 typedef int (congested_fn
)(void *, int);
44 #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
46 struct bdi_writeback
{
47 struct list_head list
; /* hangs off the bdi */
49 struct backing_dev_info
*bdi
; /* our parent bdi */
52 unsigned long last_old_flush
; /* last old data flush */
54 struct task_struct
*task
; /* writeback task */
55 struct list_head b_dirty
; /* dirty inodes */
56 struct list_head b_io
; /* parked for writeback */
57 struct list_head b_more_io
; /* parked for more writeback */
60 struct backing_dev_info
{
61 struct list_head bdi_list
;
62 struct rcu_head rcu_head
;
63 unsigned long ra_pages
; /* max readahead in PAGE_CACHE_SIZE units */
64 unsigned long state
; /* Always use atomic bitops on this */
65 unsigned int capabilities
; /* Device capabilities */
66 congested_fn
*congested_fn
; /* Function pointer if device is md/dm */
67 void *congested_data
; /* Pointer to aux data for congested func */
68 void (*unplug_io_fn
)(struct backing_dev_info
*, struct page
*);
73 struct percpu_counter bdi_stat
[NR_BDI_STAT_ITEMS
];
75 struct prop_local_percpu completions
;
78 unsigned int min_ratio
;
79 unsigned int max_ratio
, max_prop_frac
;
81 struct bdi_writeback wb
; /* default writeback info for this bdi */
82 spinlock_t wb_lock
; /* protects update side of wb_list */
83 struct list_head wb_list
; /* the flusher threads hanging off this bdi */
84 unsigned long wb_mask
; /* bitmask of registered tasks */
85 unsigned int wb_cnt
; /* number of registered tasks */
87 struct list_head work_list
;
91 #ifdef CONFIG_DEBUG_FS
92 struct dentry
*debug_dir
;
93 struct dentry
*debug_stats
;
97 int bdi_init(struct backing_dev_info
*bdi
);
98 void bdi_destroy(struct backing_dev_info
*bdi
);
100 int bdi_register(struct backing_dev_info
*bdi
, struct device
*parent
,
101 const char *fmt
, ...);
102 int bdi_register_dev(struct backing_dev_info
*bdi
, dev_t dev
);
103 void bdi_unregister(struct backing_dev_info
*bdi
);
104 void bdi_start_writeback(struct backing_dev_info
*bdi
, struct super_block
*sb
,
106 int bdi_writeback_task(struct bdi_writeback
*wb
);
107 int bdi_has_dirty_io(struct backing_dev_info
*bdi
);
109 extern spinlock_t bdi_lock
;
110 extern struct list_head bdi_list
;
112 static inline int wb_has_dirty_io(struct bdi_writeback
*wb
)
114 return !list_empty(&wb
->b_dirty
) ||
115 !list_empty(&wb
->b_io
) ||
116 !list_empty(&wb
->b_more_io
);
119 static inline void __add_bdi_stat(struct backing_dev_info
*bdi
,
120 enum bdi_stat_item item
, s64 amount
)
122 __percpu_counter_add(&bdi
->bdi_stat
[item
], amount
, BDI_STAT_BATCH
);
125 static inline void __inc_bdi_stat(struct backing_dev_info
*bdi
,
126 enum bdi_stat_item item
)
128 __add_bdi_stat(bdi
, item
, 1);
131 static inline void inc_bdi_stat(struct backing_dev_info
*bdi
,
132 enum bdi_stat_item item
)
136 local_irq_save(flags
);
137 __inc_bdi_stat(bdi
, item
);
138 local_irq_restore(flags
);
141 static inline void __dec_bdi_stat(struct backing_dev_info
*bdi
,
142 enum bdi_stat_item item
)
144 __add_bdi_stat(bdi
, item
, -1);
147 static inline void dec_bdi_stat(struct backing_dev_info
*bdi
,
148 enum bdi_stat_item item
)
152 local_irq_save(flags
);
153 __dec_bdi_stat(bdi
, item
);
154 local_irq_restore(flags
);
157 static inline s64
bdi_stat(struct backing_dev_info
*bdi
,
158 enum bdi_stat_item item
)
160 return percpu_counter_read_positive(&bdi
->bdi_stat
[item
]);
163 static inline s64
__bdi_stat_sum(struct backing_dev_info
*bdi
,
164 enum bdi_stat_item item
)
166 return percpu_counter_sum_positive(&bdi
->bdi_stat
[item
]);
169 static inline s64
bdi_stat_sum(struct backing_dev_info
*bdi
,
170 enum bdi_stat_item item
)
175 local_irq_save(flags
);
176 sum
= __bdi_stat_sum(bdi
, item
);
177 local_irq_restore(flags
);
182 extern void bdi_writeout_inc(struct backing_dev_info
*bdi
);
185 * maximal error of a stat counter.
187 static inline unsigned long bdi_stat_error(struct backing_dev_info
*bdi
)
190 return nr_cpu_ids
* BDI_STAT_BATCH
;
196 int bdi_set_min_ratio(struct backing_dev_info
*bdi
, unsigned int min_ratio
);
197 int bdi_set_max_ratio(struct backing_dev_info
*bdi
, unsigned int max_ratio
);
200 * Flags in backing_dev_info::capability
202 * The first three flags control whether dirty pages will contribute to the
203 * VM's accounting and whether writepages() should be called for dirty pages
204 * (something that would not, for example, be appropriate for ramfs)
206 * WARNING: these flags are closely related and should not normally be
207 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
208 * three flags into a single convenience macro.
210 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
211 * BDI_CAP_NO_WRITEBACK: Don't write pages back
212 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
214 * These flags let !MMU mmap() govern direct device mapping vs immediate
215 * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
217 * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
218 * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
219 * BDI_CAP_READ_MAP: Can be mapped for reading
220 * BDI_CAP_WRITE_MAP: Can be mapped for writing
221 * BDI_CAP_EXEC_MAP: Can be mapped for execution
223 * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
225 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
226 #define BDI_CAP_NO_WRITEBACK 0x00000002
227 #define BDI_CAP_MAP_COPY 0x00000004
228 #define BDI_CAP_MAP_DIRECT 0x00000008
229 #define BDI_CAP_READ_MAP 0x00000010
230 #define BDI_CAP_WRITE_MAP 0x00000020
231 #define BDI_CAP_EXEC_MAP 0x00000040
232 #define BDI_CAP_NO_ACCT_WB 0x00000080
233 #define BDI_CAP_SWAP_BACKED 0x00000100
235 #define BDI_CAP_VMFLAGS \
236 (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
238 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
239 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
241 #if defined(VM_MAYREAD) && \
242 (BDI_CAP_READ_MAP != VM_MAYREAD || \
243 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
244 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
245 #error please change backing_dev_info::capabilities flags
248 extern struct backing_dev_info default_backing_dev_info
;
249 void default_unplug_io_fn(struct backing_dev_info
*bdi
, struct page
*page
);
251 int writeback_in_progress(struct backing_dev_info
*bdi
);
253 static inline int bdi_congested(struct backing_dev_info
*bdi
, int bdi_bits
)
255 if (bdi
->congested_fn
)
256 return bdi
->congested_fn(bdi
->congested_data
, bdi_bits
);
257 return (bdi
->state
& bdi_bits
);
260 static inline int bdi_read_congested(struct backing_dev_info
*bdi
)
262 return bdi_congested(bdi
, 1 << BDI_sync_congested
);
265 static inline int bdi_write_congested(struct backing_dev_info
*bdi
)
267 return bdi_congested(bdi
, 1 << BDI_async_congested
);
270 static inline int bdi_rw_congested(struct backing_dev_info
*bdi
)
272 return bdi_congested(bdi
, (1 << BDI_sync_congested
) |
273 (1 << BDI_async_congested
));
281 void clear_bdi_congested(struct backing_dev_info
*bdi
, int sync
);
282 void set_bdi_congested(struct backing_dev_info
*bdi
, int sync
);
283 long congestion_wait(int sync
, long timeout
);
286 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info
*bdi
)
288 return !(bdi
->capabilities
& BDI_CAP_NO_WRITEBACK
);
291 static inline bool bdi_cap_account_dirty(struct backing_dev_info
*bdi
)
293 return !(bdi
->capabilities
& BDI_CAP_NO_ACCT_DIRTY
);
296 static inline bool bdi_cap_account_writeback(struct backing_dev_info
*bdi
)
298 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
299 return !(bdi
->capabilities
& (BDI_CAP_NO_ACCT_WB
|
300 BDI_CAP_NO_WRITEBACK
));
303 static inline bool bdi_cap_swap_backed(struct backing_dev_info
*bdi
)
305 return bdi
->capabilities
& BDI_CAP_SWAP_BACKED
;
308 static inline bool bdi_cap_flush_forker(struct backing_dev_info
*bdi
)
310 return bdi
== &default_backing_dev_info
;
313 static inline bool mapping_cap_writeback_dirty(struct address_space
*mapping
)
315 return bdi_cap_writeback_dirty(mapping
->backing_dev_info
);
318 static inline bool mapping_cap_account_dirty(struct address_space
*mapping
)
320 return bdi_cap_account_dirty(mapping
->backing_dev_info
);
323 static inline bool mapping_cap_swap_backed(struct address_space
*mapping
)
325 return bdi_cap_swap_backed(mapping
->backing_dev_info
);
328 static inline int bdi_sched_wait(void *word
)
334 static inline void blk_run_backing_dev(struct backing_dev_info
*bdi
,
337 if (bdi
&& bdi
->unplug_io_fn
)
338 bdi
->unplug_io_fn(bdi
, page
);
341 static inline void blk_run_address_space(struct address_space
*mapping
)
344 blk_run_backing_dev(mapping
->backing_dev_info
, NULL
);
347 #endif /* _LINUX_BACKING_DEV_H */