4 * Common Block IO controller cgroup interface
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
16 #include <linux/cgroup.h>
18 enum blkio_policy_id
{
19 BLKIO_POLICY_PROP
= 0, /* Proportional Bandwidth division */
20 BLKIO_POLICY_THROTL
, /* Throttling */
23 /* Max limits for throttle policy */
24 #define THROTL_IOPS_MAX UINT_MAX
26 #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
28 #ifndef CONFIG_BLK_CGROUP
29 /* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
30 extern struct cgroup_subsys blkio_subsys
;
31 #define blkio_subsys_id blkio_subsys.subsys_id
35 /* Total time spent (in ns) between request dispatch to the driver and
36 * request completion for IOs doen by this cgroup. This may not be
37 * accurate when NCQ is turned on. */
38 BLKIO_STAT_SERVICE_TIME
= 0,
39 /* Total bytes transferred */
40 BLKIO_STAT_SERVICE_BYTES
,
41 /* Total IOs serviced, post merge */
43 /* Total time spent waiting in scheduler queue in ns */
45 /* Number of IOs merged */
47 /* Number of IOs queued up */
49 /* All the single valued stats go below this */
52 #ifdef CONFIG_DEBUG_BLK_CGROUP
53 BLKIO_STAT_AVG_QUEUE_SIZE
,
55 BLKIO_STAT_EMPTY_TIME
,
56 BLKIO_STAT_GROUP_WAIT_TIME
,
69 /* blkg state flags */
70 enum blkg_state_flags
{
76 /* cgroup files owned by proportional weight policy */
77 enum blkcg_file_name_prop
{
78 BLKIO_PROP_weight
= 1,
79 BLKIO_PROP_weight_device
,
80 BLKIO_PROP_io_service_bytes
,
81 BLKIO_PROP_io_serviced
,
84 BLKIO_PROP_io_service_time
,
85 BLKIO_PROP_io_wait_time
,
88 BLKIO_PROP_avg_queue_size
,
89 BLKIO_PROP_group_wait_time
,
91 BLKIO_PROP_empty_time
,
95 /* cgroup files owned by throttle policy */
96 enum blkcg_file_name_throtl
{
97 BLKIO_THROTL_read_bps_device
,
98 BLKIO_THROTL_write_bps_device
,
99 BLKIO_THROTL_read_iops_device
,
100 BLKIO_THROTL_write_iops_device
,
101 BLKIO_THROTL_io_service_bytes
,
102 BLKIO_THROTL_io_serviced
,
105 struct blkio_cgroup
{
106 struct cgroup_subsys_state css
;
109 struct hlist_head blkg_list
;
110 struct list_head policy_list
; /* list of blkio_policy_node */
113 struct blkio_group_stats
{
114 /* total disk time and nr sectors dispatched by this group */
117 uint64_t stat_arr
[BLKIO_STAT_QUEUED
+ 1][BLKIO_STAT_TOTAL
];
118 #ifdef CONFIG_DEBUG_BLK_CGROUP
119 /* Sum of number of IOs queued across all samples */
120 uint64_t avg_queue_size_sum
;
121 /* Count of samples taken for average */
122 uint64_t avg_queue_size_samples
;
123 /* How many times this group has been removed from service tree */
124 unsigned long dequeue
;
126 /* Total time spent waiting for it to be assigned a timeslice. */
127 uint64_t group_wait_time
;
128 uint64_t start_group_wait_time
;
130 /* Time spent idling for this blkio_group */
132 uint64_t start_idle_time
;
134 * Total time when we have requests queued and do not contain the
135 * current active queue.
138 uint64_t start_empty_time
;
144 /* An rcu protected unique identifier for the group */
146 struct hlist_node blkcg_node
;
147 unsigned short blkcg_id
;
148 /* Store cgroup path */
150 /* The device MKDEV(major, minor), this group has been created for */
152 /* policy which owns this blk group */
153 enum blkio_policy_id plid
;
155 /* Need to serialize the stats in the case of reset/update */
156 spinlock_t stats_lock
;
157 struct blkio_group_stats stats
;
160 struct blkio_policy_node
{
161 struct list_head node
;
163 /* This node belongs to max bw policy or porportional weight policy */
164 enum blkio_policy_id plid
;
165 /* cgroup file to which this rule belongs to */
171 * Rate read/write in terms of byptes per second
172 * Whether this rate represents read or write is determined
173 * by file type "fileid".
180 extern unsigned int blkcg_get_weight(struct blkio_cgroup
*blkcg
,
182 extern uint64_t blkcg_get_read_bps(struct blkio_cgroup
*blkcg
,
184 extern uint64_t blkcg_get_write_bps(struct blkio_cgroup
*blkcg
,
186 extern unsigned int blkcg_get_read_iops(struct blkio_cgroup
*blkcg
,
188 extern unsigned int blkcg_get_write_iops(struct blkio_cgroup
*blkcg
,
191 typedef void (blkio_unlink_group_fn
) (void *key
, struct blkio_group
*blkg
);
193 typedef void (blkio_update_group_weight_fn
) (void *key
,
194 struct blkio_group
*blkg
, unsigned int weight
);
195 typedef void (blkio_update_group_read_bps_fn
) (void * key
,
196 struct blkio_group
*blkg
, u64 read_bps
);
197 typedef void (blkio_update_group_write_bps_fn
) (void *key
,
198 struct blkio_group
*blkg
, u64 write_bps
);
199 typedef void (blkio_update_group_read_iops_fn
) (void *key
,
200 struct blkio_group
*blkg
, unsigned int read_iops
);
201 typedef void (blkio_update_group_write_iops_fn
) (void *key
,
202 struct blkio_group
*blkg
, unsigned int write_iops
);
204 struct blkio_policy_ops
{
205 blkio_unlink_group_fn
*blkio_unlink_group_fn
;
206 blkio_update_group_weight_fn
*blkio_update_group_weight_fn
;
207 blkio_update_group_read_bps_fn
*blkio_update_group_read_bps_fn
;
208 blkio_update_group_write_bps_fn
*blkio_update_group_write_bps_fn
;
209 blkio_update_group_read_iops_fn
*blkio_update_group_read_iops_fn
;
210 blkio_update_group_write_iops_fn
*blkio_update_group_write_iops_fn
;
213 struct blkio_policy_type
{
214 struct list_head list
;
215 struct blkio_policy_ops ops
;
216 enum blkio_policy_id plid
;
219 /* Blkio controller policy registration */
220 extern void blkio_policy_register(struct blkio_policy_type
*);
221 extern void blkio_policy_unregister(struct blkio_policy_type
*);
223 static inline char *blkg_path(struct blkio_group
*blkg
)
233 struct blkio_policy_type
{
236 static inline void blkio_policy_register(struct blkio_policy_type
*blkiop
) { }
237 static inline void blkio_policy_unregister(struct blkio_policy_type
*blkiop
) { }
239 static inline char *blkg_path(struct blkio_group
*blkg
) { return NULL
; }
243 #define BLKIO_WEIGHT_MIN 100
244 #define BLKIO_WEIGHT_MAX 1000
245 #define BLKIO_WEIGHT_DEFAULT 500
247 #ifdef CONFIG_DEBUG_BLK_CGROUP
248 void blkiocg_update_avg_queue_size_stats(struct blkio_group
*blkg
);
249 void blkiocg_update_dequeue_stats(struct blkio_group
*blkg
,
250 unsigned long dequeue
);
251 void blkiocg_update_set_idle_time_stats(struct blkio_group
*blkg
);
252 void blkiocg_update_idle_time_stats(struct blkio_group
*blkg
);
253 void blkiocg_set_start_empty_time(struct blkio_group
*blkg
);
255 #define BLKG_FLAG_FNS(name) \
256 static inline void blkio_mark_blkg_##name( \
257 struct blkio_group_stats *stats) \
259 stats->flags |= (1 << BLKG_##name); \
261 static inline void blkio_clear_blkg_##name( \
262 struct blkio_group_stats *stats) \
264 stats->flags &= ~(1 << BLKG_##name); \
266 static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
268 return (stats->flags & (1 << BLKG_##name)) != 0; \
271 BLKG_FLAG_FNS(waiting)
272 BLKG_FLAG_FNS(idling
)
276 static inline void blkiocg_update_avg_queue_size_stats(
277 struct blkio_group
*blkg
) {}
278 static inline void blkiocg_update_dequeue_stats(struct blkio_group
*blkg
,
279 unsigned long dequeue
) {}
280 static inline void blkiocg_update_set_idle_time_stats(struct blkio_group
*blkg
)
282 static inline void blkiocg_update_idle_time_stats(struct blkio_group
*blkg
) {}
283 static inline void blkiocg_set_start_empty_time(struct blkio_group
*blkg
) {}
286 #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
287 extern struct blkio_cgroup blkio_root_cgroup
;
288 extern struct blkio_cgroup
*cgroup_to_blkio_cgroup(struct cgroup
*cgroup
);
289 extern void blkiocg_add_blkio_group(struct blkio_cgroup
*blkcg
,
290 struct blkio_group
*blkg
, void *key
, dev_t dev
,
291 enum blkio_policy_id plid
);
292 extern int blkiocg_del_blkio_group(struct blkio_group
*blkg
);
293 extern struct blkio_group
*blkiocg_lookup_group(struct blkio_cgroup
*blkcg
,
295 void blkiocg_update_timeslice_used(struct blkio_group
*blkg
,
297 void blkiocg_update_dispatch_stats(struct blkio_group
*blkg
, uint64_t bytes
,
298 bool direction
, bool sync
);
299 void blkiocg_update_completion_stats(struct blkio_group
*blkg
,
300 uint64_t start_time
, uint64_t io_start_time
, bool direction
, bool sync
);
301 void blkiocg_update_io_merged_stats(struct blkio_group
*blkg
, bool direction
,
303 void blkiocg_update_io_add_stats(struct blkio_group
*blkg
,
304 struct blkio_group
*curr_blkg
, bool direction
, bool sync
);
305 void blkiocg_update_io_remove_stats(struct blkio_group
*blkg
,
306 bool direction
, bool sync
);
309 static inline struct blkio_cgroup
*
310 cgroup_to_blkio_cgroup(struct cgroup
*cgroup
) { return NULL
; }
312 static inline void blkiocg_add_blkio_group(struct blkio_cgroup
*blkcg
,
313 struct blkio_group
*blkg
, void *key
, dev_t dev
,
314 enum blkio_policy_id plid
) {}
317 blkiocg_del_blkio_group(struct blkio_group
*blkg
) { return 0; }
319 static inline struct blkio_group
*
320 blkiocg_lookup_group(struct blkio_cgroup
*blkcg
, void *key
) { return NULL
; }
321 static inline void blkiocg_update_timeslice_used(struct blkio_group
*blkg
,
322 unsigned long time
) {}
323 static inline void blkiocg_update_dispatch_stats(struct blkio_group
*blkg
,
324 uint64_t bytes
, bool direction
, bool sync
) {}
325 static inline void blkiocg_update_completion_stats(struct blkio_group
*blkg
,
326 uint64_t start_time
, uint64_t io_start_time
, bool direction
,
328 static inline void blkiocg_update_io_merged_stats(struct blkio_group
*blkg
,
329 bool direction
, bool sync
) {}
330 static inline void blkiocg_update_io_add_stats(struct blkio_group
*blkg
,
331 struct blkio_group
*curr_blkg
, bool direction
, bool sync
) {}
332 static inline void blkiocg_update_io_remove_stats(struct blkio_group
*blkg
,
333 bool direction
, bool sync
) {}
335 #endif /* _BLK_CGROUP_H */