4 * Common Block IO controller cgroup interface
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
16 #include <linux/cgroup.h>
17 #include <linux/u64_stats_sync.h>
19 enum blkio_policy_id
{
20 BLKIO_POLICY_PROP
= 0, /* Proportional Bandwidth division */
21 BLKIO_POLICY_THROTL
, /* Throttling */
24 /* Max limits for throttle policy */
25 #define THROTL_IOPS_MAX UINT_MAX
27 #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
29 #ifndef CONFIG_BLK_CGROUP
30 /* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
31 extern struct cgroup_subsys blkio_subsys
;
32 #define blkio_subsys_id blkio_subsys.subsys_id
36 /* Total time spent (in ns) between request dispatch to the driver and
37 * request completion for IOs doen by this cgroup. This may not be
38 * accurate when NCQ is turned on. */
39 BLKIO_STAT_SERVICE_TIME
= 0,
40 /* Total time spent waiting in scheduler queue in ns */
42 /* Number of IOs queued up */
44 /* All the single valued stats go below this */
46 #ifdef CONFIG_DEBUG_BLK_CGROUP
47 /* Time not charged to this cgroup */
48 BLKIO_STAT_UNACCOUNTED_TIME
,
49 BLKIO_STAT_AVG_QUEUE_SIZE
,
51 BLKIO_STAT_EMPTY_TIME
,
52 BLKIO_STAT_GROUP_WAIT_TIME
,
59 BLKIO_STAT_CPU_SECTORS
,
60 /* Total bytes transferred */
61 BLKIO_STAT_CPU_SERVICE_BYTES
,
62 /* Total IOs serviced, post merge */
63 BLKIO_STAT_CPU_SERVICED
,
64 /* Number of IOs merged */
65 BLKIO_STAT_CPU_MERGED
,
77 /* blkg state flags */
78 enum blkg_state_flags
{
84 /* cgroup files owned by proportional weight policy */
85 enum blkcg_file_name_prop
{
86 BLKIO_PROP_weight
= 1,
87 BLKIO_PROP_weight_device
,
88 BLKIO_PROP_io_service_bytes
,
89 BLKIO_PROP_io_serviced
,
92 BLKIO_PROP_unaccounted_time
,
93 BLKIO_PROP_io_service_time
,
94 BLKIO_PROP_io_wait_time
,
97 BLKIO_PROP_avg_queue_size
,
98 BLKIO_PROP_group_wait_time
,
100 BLKIO_PROP_empty_time
,
104 /* cgroup files owned by throttle policy */
105 enum blkcg_file_name_throtl
{
106 BLKIO_THROTL_read_bps_device
,
107 BLKIO_THROTL_write_bps_device
,
108 BLKIO_THROTL_read_iops_device
,
109 BLKIO_THROTL_write_iops_device
,
110 BLKIO_THROTL_io_service_bytes
,
111 BLKIO_THROTL_io_serviced
,
114 struct blkio_cgroup
{
115 struct cgroup_subsys_state css
;
118 struct hlist_head blkg_list
;
119 struct list_head policy_list
; /* list of blkio_policy_node */
122 struct blkio_group_stats
{
123 /* total disk time and nr sectors dispatched by this group */
125 uint64_t stat_arr
[BLKIO_STAT_QUEUED
+ 1][BLKIO_STAT_TOTAL
];
126 #ifdef CONFIG_DEBUG_BLK_CGROUP
127 /* Time not charged to this cgroup */
128 uint64_t unaccounted_time
;
130 /* Sum of number of IOs queued across all samples */
131 uint64_t avg_queue_size_sum
;
132 /* Count of samples taken for average */
133 uint64_t avg_queue_size_samples
;
134 /* How many times this group has been removed from service tree */
135 unsigned long dequeue
;
137 /* Total time spent waiting for it to be assigned a timeslice. */
138 uint64_t group_wait_time
;
139 uint64_t start_group_wait_time
;
141 /* Time spent idling for this blkio_group */
143 uint64_t start_idle_time
;
145 * Total time when we have requests queued and do not contain the
146 * current active queue.
149 uint64_t start_empty_time
;
154 /* Per cpu blkio group stats */
155 struct blkio_group_stats_cpu
{
157 uint64_t stat_arr_cpu
[BLKIO_STAT_CPU_NR
][BLKIO_STAT_TOTAL
];
158 struct u64_stats_sync syncp
;
162 /* An rcu protected unique identifier for the group */
164 struct hlist_node blkcg_node
;
165 unsigned short blkcg_id
;
166 /* Store cgroup path */
168 /* The device MKDEV(major, minor), this group has been created for */
170 /* policy which owns this blk group */
171 enum blkio_policy_id plid
;
173 /* Need to serialize the stats in the case of reset/update */
174 spinlock_t stats_lock
;
175 struct blkio_group_stats stats
;
176 /* Per cpu stats pointer */
177 struct blkio_group_stats_cpu __percpu
*stats_cpu
;
180 struct blkio_policy_node
{
181 struct list_head node
;
183 /* This node belongs to max bw policy or porportional weight policy */
184 enum blkio_policy_id plid
;
185 /* cgroup file to which this rule belongs to */
191 * Rate read/write in terms of bytes per second
192 * Whether this rate represents read or write is determined
193 * by file type "fileid".
200 extern unsigned int blkcg_get_weight(struct blkio_cgroup
*blkcg
,
202 extern uint64_t blkcg_get_read_bps(struct blkio_cgroup
*blkcg
,
204 extern uint64_t blkcg_get_write_bps(struct blkio_cgroup
*blkcg
,
206 extern unsigned int blkcg_get_read_iops(struct blkio_cgroup
*blkcg
,
208 extern unsigned int blkcg_get_write_iops(struct blkio_cgroup
*blkcg
,
211 typedef void (blkio_unlink_group_fn
) (void *key
, struct blkio_group
*blkg
);
213 typedef void (blkio_update_group_weight_fn
) (void *key
,
214 struct blkio_group
*blkg
, unsigned int weight
);
215 typedef void (blkio_update_group_read_bps_fn
) (void * key
,
216 struct blkio_group
*blkg
, u64 read_bps
);
217 typedef void (blkio_update_group_write_bps_fn
) (void *key
,
218 struct blkio_group
*blkg
, u64 write_bps
);
219 typedef void (blkio_update_group_read_iops_fn
) (void *key
,
220 struct blkio_group
*blkg
, unsigned int read_iops
);
221 typedef void (blkio_update_group_write_iops_fn
) (void *key
,
222 struct blkio_group
*blkg
, unsigned int write_iops
);
224 struct blkio_policy_ops
{
225 blkio_unlink_group_fn
*blkio_unlink_group_fn
;
226 blkio_update_group_weight_fn
*blkio_update_group_weight_fn
;
227 blkio_update_group_read_bps_fn
*blkio_update_group_read_bps_fn
;
228 blkio_update_group_write_bps_fn
*blkio_update_group_write_bps_fn
;
229 blkio_update_group_read_iops_fn
*blkio_update_group_read_iops_fn
;
230 blkio_update_group_write_iops_fn
*blkio_update_group_write_iops_fn
;
233 struct blkio_policy_type
{
234 struct list_head list
;
235 struct blkio_policy_ops ops
;
236 enum blkio_policy_id plid
;
239 /* Blkio controller policy registration */
240 extern void blkio_policy_register(struct blkio_policy_type
*);
241 extern void blkio_policy_unregister(struct blkio_policy_type
*);
243 static inline char *blkg_path(struct blkio_group
*blkg
)
253 struct blkio_policy_type
{
256 static inline void blkio_policy_register(struct blkio_policy_type
*blkiop
) { }
257 static inline void blkio_policy_unregister(struct blkio_policy_type
*blkiop
) { }
259 static inline char *blkg_path(struct blkio_group
*blkg
) { return NULL
; }
263 #define BLKIO_WEIGHT_MIN 10
264 #define BLKIO_WEIGHT_MAX 1000
265 #define BLKIO_WEIGHT_DEFAULT 500
267 #ifdef CONFIG_DEBUG_BLK_CGROUP
268 void blkiocg_update_avg_queue_size_stats(struct blkio_group
*blkg
);
269 void blkiocg_update_dequeue_stats(struct blkio_group
*blkg
,
270 unsigned long dequeue
);
271 void blkiocg_update_set_idle_time_stats(struct blkio_group
*blkg
);
272 void blkiocg_update_idle_time_stats(struct blkio_group
*blkg
);
273 void blkiocg_set_start_empty_time(struct blkio_group
*blkg
);
275 #define BLKG_FLAG_FNS(name) \
276 static inline void blkio_mark_blkg_##name( \
277 struct blkio_group_stats *stats) \
279 stats->flags |= (1 << BLKG_##name); \
281 static inline void blkio_clear_blkg_##name( \
282 struct blkio_group_stats *stats) \
284 stats->flags &= ~(1 << BLKG_##name); \
286 static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
288 return (stats->flags & (1 << BLKG_##name)) != 0; \
291 BLKG_FLAG_FNS(waiting)
292 BLKG_FLAG_FNS(idling
)
296 static inline void blkiocg_update_avg_queue_size_stats(
297 struct blkio_group
*blkg
) {}
298 static inline void blkiocg_update_dequeue_stats(struct blkio_group
*blkg
,
299 unsigned long dequeue
) {}
300 static inline void blkiocg_update_set_idle_time_stats(struct blkio_group
*blkg
)
302 static inline void blkiocg_update_idle_time_stats(struct blkio_group
*blkg
) {}
303 static inline void blkiocg_set_start_empty_time(struct blkio_group
*blkg
) {}
306 #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
307 extern struct blkio_cgroup blkio_root_cgroup
;
308 extern struct blkio_cgroup
*cgroup_to_blkio_cgroup(struct cgroup
*cgroup
);
309 extern struct blkio_cgroup
*task_blkio_cgroup(struct task_struct
*tsk
);
310 extern void blkiocg_add_blkio_group(struct blkio_cgroup
*blkcg
,
311 struct blkio_group
*blkg
, void *key
, dev_t dev
,
312 enum blkio_policy_id plid
);
313 extern int blkio_alloc_blkg_stats(struct blkio_group
*blkg
);
314 extern int blkiocg_del_blkio_group(struct blkio_group
*blkg
);
315 extern struct blkio_group
*blkiocg_lookup_group(struct blkio_cgroup
*blkcg
,
317 void blkiocg_update_timeslice_used(struct blkio_group
*blkg
,
319 unsigned long unaccounted_time
);
320 void blkiocg_update_dispatch_stats(struct blkio_group
*blkg
, uint64_t bytes
,
321 bool direction
, bool sync
);
322 void blkiocg_update_completion_stats(struct blkio_group
*blkg
,
323 uint64_t start_time
, uint64_t io_start_time
, bool direction
, bool sync
);
324 void blkiocg_update_io_merged_stats(struct blkio_group
*blkg
, bool direction
,
326 void blkiocg_update_io_add_stats(struct blkio_group
*blkg
,
327 struct blkio_group
*curr_blkg
, bool direction
, bool sync
);
328 void blkiocg_update_io_remove_stats(struct blkio_group
*blkg
,
329 bool direction
, bool sync
);
332 static inline struct blkio_cgroup
*
333 cgroup_to_blkio_cgroup(struct cgroup
*cgroup
) { return NULL
; }
334 static inline struct blkio_cgroup
*
335 task_blkio_cgroup(struct task_struct
*tsk
) { return NULL
; }
337 static inline void blkiocg_add_blkio_group(struct blkio_cgroup
*blkcg
,
338 struct blkio_group
*blkg
, void *key
, dev_t dev
,
339 enum blkio_policy_id plid
) {}
341 static inline int blkio_alloc_blkg_stats(struct blkio_group
*blkg
) { return 0; }
344 blkiocg_del_blkio_group(struct blkio_group
*blkg
) { return 0; }
346 static inline struct blkio_group
*
347 blkiocg_lookup_group(struct blkio_cgroup
*blkcg
, void *key
) { return NULL
; }
348 static inline void blkiocg_update_timeslice_used(struct blkio_group
*blkg
,
350 unsigned long unaccounted_time
)
352 static inline void blkiocg_update_dispatch_stats(struct blkio_group
*blkg
,
353 uint64_t bytes
, bool direction
, bool sync
) {}
354 static inline void blkiocg_update_completion_stats(struct blkio_group
*blkg
,
355 uint64_t start_time
, uint64_t io_start_time
, bool direction
,
357 static inline void blkiocg_update_io_merged_stats(struct blkio_group
*blkg
,
358 bool direction
, bool sync
) {}
359 static inline void blkiocg_update_io_add_stats(struct blkio_group
*blkg
,
360 struct blkio_group
*curr_blkg
, bool direction
, bool sync
) {}
361 static inline void blkiocg_update_io_remove_stats(struct blkio_group
*blkg
,
362 bool direction
, bool sync
) {}
364 #endif /* _BLK_CGROUP_H */