2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include "blk-cgroup.h"
21 #include <linux/genhd.h>
23 #define MAX_KEY_LEN 100
25 static DEFINE_SPINLOCK(blkio_list_lock
);
26 static LIST_HEAD(blkio_list
);
28 struct blkio_cgroup blkio_root_cgroup
= { .weight
= 2*BLKIO_WEIGHT_DEFAULT
};
29 EXPORT_SYMBOL_GPL(blkio_root_cgroup
);
31 static struct cgroup_subsys_state
*blkiocg_create(struct cgroup_subsys
*,
33 static int blkiocg_can_attach(struct cgroup_subsys
*, struct cgroup
*,
34 struct task_struct
*, bool);
35 static void blkiocg_attach(struct cgroup_subsys
*, struct cgroup
*,
36 struct cgroup
*, struct task_struct
*, bool);
37 static void blkiocg_destroy(struct cgroup_subsys
*, struct cgroup
*);
38 static int blkiocg_populate(struct cgroup_subsys
*, struct cgroup
*);
40 struct cgroup_subsys blkio_subsys
= {
42 .create
= blkiocg_create
,
43 .can_attach
= blkiocg_can_attach
,
44 .attach
= blkiocg_attach
,
45 .destroy
= blkiocg_destroy
,
46 .populate
= blkiocg_populate
,
47 #ifdef CONFIG_BLK_CGROUP
48 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
49 .subsys_id
= blkio_subsys_id
,
52 .module
= THIS_MODULE
,
54 EXPORT_SYMBOL_GPL(blkio_subsys
);
56 static inline void blkio_policy_insert_node(struct blkio_cgroup
*blkcg
,
57 struct blkio_policy_node
*pn
)
59 list_add(&pn
->node
, &blkcg
->policy_list
);
62 /* Must be called with blkcg->lock held */
63 static inline void blkio_policy_delete_node(struct blkio_policy_node
*pn
)
68 /* Must be called with blkcg->lock held */
69 static struct blkio_policy_node
*
70 blkio_policy_search_node(const struct blkio_cgroup
*blkcg
, dev_t dev
)
72 struct blkio_policy_node
*pn
;
74 list_for_each_entry(pn
, &blkcg
->policy_list
, node
) {
82 struct blkio_cgroup
*cgroup_to_blkio_cgroup(struct cgroup
*cgroup
)
84 return container_of(cgroup_subsys_state(cgroup
, blkio_subsys_id
),
85 struct blkio_cgroup
, css
);
87 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup
);
90 * Add to the appropriate stat variable depending on the request type.
91 * This should be called with the blkg->stats_lock held.
93 static void blkio_add_stat(uint64_t *stat
, uint64_t add
, bool direction
,
97 stat
[BLKIO_STAT_WRITE
] += add
;
99 stat
[BLKIO_STAT_READ
] += add
;
101 stat
[BLKIO_STAT_SYNC
] += add
;
103 stat
[BLKIO_STAT_ASYNC
] += add
;
107 * Decrements the appropriate stat variable if non-zero depending on the
108 * request type. Panics on value being zero.
109 * This should be called with the blkg->stats_lock held.
111 static void blkio_check_and_dec_stat(uint64_t *stat
, bool direction
, bool sync
)
114 BUG_ON(stat
[BLKIO_STAT_WRITE
] == 0);
115 stat
[BLKIO_STAT_WRITE
]--;
117 BUG_ON(stat
[BLKIO_STAT_READ
] == 0);
118 stat
[BLKIO_STAT_READ
]--;
121 BUG_ON(stat
[BLKIO_STAT_SYNC
] == 0);
122 stat
[BLKIO_STAT_SYNC
]--;
124 BUG_ON(stat
[BLKIO_STAT_ASYNC
] == 0);
125 stat
[BLKIO_STAT_ASYNC
]--;
129 #ifdef CONFIG_DEBUG_BLK_CGROUP
130 /* This should be called with the blkg->stats_lock held. */
131 static void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
132 struct blkio_group
*curr_blkg
)
134 if (blkio_blkg_waiting(&blkg
->stats
))
136 if (blkg
== curr_blkg
)
138 blkg
->stats
.start_group_wait_time
= sched_clock();
139 blkio_mark_blkg_waiting(&blkg
->stats
);
142 /* This should be called with the blkg->stats_lock held. */
143 static void blkio_update_group_wait_time(struct blkio_group_stats
*stats
)
145 unsigned long long now
;
147 if (!blkio_blkg_waiting(stats
))
151 if (time_after64(now
, stats
->start_group_wait_time
))
152 stats
->group_wait_time
+= now
- stats
->start_group_wait_time
;
153 blkio_clear_blkg_waiting(stats
);
156 /* This should be called with the blkg->stats_lock held. */
157 static void blkio_end_empty_time(struct blkio_group_stats
*stats
)
159 unsigned long long now
;
161 if (!blkio_blkg_empty(stats
))
165 if (time_after64(now
, stats
->start_empty_time
))
166 stats
->empty_time
+= now
- stats
->start_empty_time
;
167 blkio_clear_blkg_empty(stats
);
170 void blkiocg_update_set_idle_time_stats(struct blkio_group
*blkg
)
174 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
175 BUG_ON(blkio_blkg_idling(&blkg
->stats
));
176 blkg
->stats
.start_idle_time
= sched_clock();
177 blkio_mark_blkg_idling(&blkg
->stats
);
178 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
180 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats
);
182 void blkiocg_update_idle_time_stats(struct blkio_group
*blkg
)
185 unsigned long long now
;
186 struct blkio_group_stats
*stats
;
188 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
189 stats
= &blkg
->stats
;
190 if (blkio_blkg_idling(stats
)) {
192 if (time_after64(now
, stats
->start_idle_time
))
193 stats
->idle_time
+= now
- stats
->start_idle_time
;
194 blkio_clear_blkg_idling(stats
);
196 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
198 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats
);
200 void blkiocg_update_avg_queue_size_stats(struct blkio_group
*blkg
)
203 struct blkio_group_stats
*stats
;
205 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
206 stats
= &blkg
->stats
;
207 stats
->avg_queue_size_sum
+=
208 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_READ
] +
209 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_WRITE
];
210 stats
->avg_queue_size_samples
++;
211 blkio_update_group_wait_time(stats
);
212 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
214 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats
);
216 void blkiocg_set_start_empty_time(struct blkio_group
*blkg
)
219 struct blkio_group_stats
*stats
;
221 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
222 stats
= &blkg
->stats
;
224 if (stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_READ
] ||
225 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_WRITE
]) {
226 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
231 * group is already marked empty. This can happen if cfqq got new
232 * request in parent group and moved to this group while being added
233 * to service tree. Just ignore the event and move on.
235 if(blkio_blkg_empty(stats
)) {
236 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
240 stats
->start_empty_time
= sched_clock();
241 blkio_mark_blkg_empty(stats
);
242 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
244 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time
);
246 void blkiocg_update_dequeue_stats(struct blkio_group
*blkg
,
247 unsigned long dequeue
)
249 blkg
->stats
.dequeue
+= dequeue
;
251 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats
);
253 static inline void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
254 struct blkio_group
*curr_blkg
) {}
255 static inline void blkio_end_empty_time(struct blkio_group_stats
*stats
) {}
258 void blkiocg_update_io_add_stats(struct blkio_group
*blkg
,
259 struct blkio_group
*curr_blkg
, bool direction
,
264 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
265 blkio_add_stat(blkg
->stats
.stat_arr
[BLKIO_STAT_QUEUED
], 1, direction
,
267 blkio_end_empty_time(&blkg
->stats
);
268 blkio_set_start_group_wait_time(blkg
, curr_blkg
);
269 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
271 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats
);
273 void blkiocg_update_io_remove_stats(struct blkio_group
*blkg
,
274 bool direction
, bool sync
)
278 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
279 blkio_check_and_dec_stat(blkg
->stats
.stat_arr
[BLKIO_STAT_QUEUED
],
281 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
283 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats
);
285 void blkiocg_update_timeslice_used(struct blkio_group
*blkg
, unsigned long time
)
289 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
290 blkg
->stats
.time
+= time
;
291 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
293 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used
);
295 void blkiocg_update_dispatch_stats(struct blkio_group
*blkg
,
296 uint64_t bytes
, bool direction
, bool sync
)
298 struct blkio_group_stats
*stats
;
301 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
302 stats
= &blkg
->stats
;
303 stats
->sectors
+= bytes
>> 9;
304 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_SERVICED
], 1, direction
,
306 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_SERVICE_BYTES
], bytes
,
308 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
310 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats
);
312 void blkiocg_update_completion_stats(struct blkio_group
*blkg
,
313 uint64_t start_time
, uint64_t io_start_time
, bool direction
, bool sync
)
315 struct blkio_group_stats
*stats
;
317 unsigned long long now
= sched_clock();
319 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
320 stats
= &blkg
->stats
;
321 if (time_after64(now
, io_start_time
))
322 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_SERVICE_TIME
],
323 now
- io_start_time
, direction
, sync
);
324 if (time_after64(io_start_time
, start_time
))
325 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_WAIT_TIME
],
326 io_start_time
- start_time
, direction
, sync
);
327 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
329 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats
);
331 void blkiocg_update_io_merged_stats(struct blkio_group
*blkg
, bool direction
,
336 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
337 blkio_add_stat(blkg
->stats
.stat_arr
[BLKIO_STAT_MERGED
], 1, direction
,
339 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
341 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats
);
343 void blkiocg_add_blkio_group(struct blkio_cgroup
*blkcg
,
344 struct blkio_group
*blkg
, void *key
, dev_t dev
)
348 spin_lock_irqsave(&blkcg
->lock
, flags
);
349 spin_lock_init(&blkg
->stats_lock
);
350 rcu_assign_pointer(blkg
->key
, key
);
351 blkg
->blkcg_id
= css_id(&blkcg
->css
);
352 hlist_add_head_rcu(&blkg
->blkcg_node
, &blkcg
->blkg_list
);
353 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
354 /* Need to take css reference ? */
355 cgroup_path(blkcg
->css
.cgroup
, blkg
->path
, sizeof(blkg
->path
));
358 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group
);
360 static void __blkiocg_del_blkio_group(struct blkio_group
*blkg
)
362 hlist_del_init_rcu(&blkg
->blkcg_node
);
367 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
368 * indicating that blk_group was unhashed by the time we got to it.
370 int blkiocg_del_blkio_group(struct blkio_group
*blkg
)
372 struct blkio_cgroup
*blkcg
;
374 struct cgroup_subsys_state
*css
;
378 css
= css_lookup(&blkio_subsys
, blkg
->blkcg_id
);
380 blkcg
= container_of(css
, struct blkio_cgroup
, css
);
381 spin_lock_irqsave(&blkcg
->lock
, flags
);
382 if (!hlist_unhashed(&blkg
->blkcg_node
)) {
383 __blkiocg_del_blkio_group(blkg
);
386 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
392 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group
);
394 /* called under rcu_read_lock(). */
395 struct blkio_group
*blkiocg_lookup_group(struct blkio_cgroup
*blkcg
, void *key
)
397 struct blkio_group
*blkg
;
398 struct hlist_node
*n
;
401 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
409 EXPORT_SYMBOL_GPL(blkiocg_lookup_group
);
411 #define SHOW_FUNCTION(__VAR) \
412 static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
413 struct cftype *cftype) \
415 struct blkio_cgroup *blkcg; \
417 blkcg = cgroup_to_blkio_cgroup(cgroup); \
418 return (u64)blkcg->__VAR; \
421 SHOW_FUNCTION(weight
);
425 blkiocg_weight_write(struct cgroup
*cgroup
, struct cftype
*cftype
, u64 val
)
427 struct blkio_cgroup
*blkcg
;
428 struct blkio_group
*blkg
;
429 struct hlist_node
*n
;
430 struct blkio_policy_type
*blkiop
;
431 struct blkio_policy_node
*pn
;
433 if (val
< BLKIO_WEIGHT_MIN
|| val
> BLKIO_WEIGHT_MAX
)
436 blkcg
= cgroup_to_blkio_cgroup(cgroup
);
437 spin_lock(&blkio_list_lock
);
438 spin_lock_irq(&blkcg
->lock
);
439 blkcg
->weight
= (unsigned int)val
;
441 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
442 pn
= blkio_policy_search_node(blkcg
, blkg
->dev
);
447 list_for_each_entry(blkiop
, &blkio_list
, list
)
448 blkiop
->ops
.blkio_update_group_weight_fn(blkg
,
451 spin_unlock_irq(&blkcg
->lock
);
452 spin_unlock(&blkio_list_lock
);
457 blkiocg_reset_stats(struct cgroup
*cgroup
, struct cftype
*cftype
, u64 val
)
459 struct blkio_cgroup
*blkcg
;
460 struct blkio_group
*blkg
;
461 struct blkio_group_stats
*stats
;
462 struct hlist_node
*n
;
463 uint64_t queued
[BLKIO_STAT_TOTAL
];
465 #ifdef CONFIG_DEBUG_BLK_CGROUP
466 bool idling
, waiting
, empty
;
467 unsigned long long now
= sched_clock();
470 blkcg
= cgroup_to_blkio_cgroup(cgroup
);
471 spin_lock_irq(&blkcg
->lock
);
472 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
473 spin_lock(&blkg
->stats_lock
);
474 stats
= &blkg
->stats
;
475 #ifdef CONFIG_DEBUG_BLK_CGROUP
476 idling
= blkio_blkg_idling(stats
);
477 waiting
= blkio_blkg_waiting(stats
);
478 empty
= blkio_blkg_empty(stats
);
480 for (i
= 0; i
< BLKIO_STAT_TOTAL
; i
++)
481 queued
[i
] = stats
->stat_arr
[BLKIO_STAT_QUEUED
][i
];
482 memset(stats
, 0, sizeof(struct blkio_group_stats
));
483 for (i
= 0; i
< BLKIO_STAT_TOTAL
; i
++)
484 stats
->stat_arr
[BLKIO_STAT_QUEUED
][i
] = queued
[i
];
485 #ifdef CONFIG_DEBUG_BLK_CGROUP
487 blkio_mark_blkg_idling(stats
);
488 stats
->start_idle_time
= now
;
491 blkio_mark_blkg_waiting(stats
);
492 stats
->start_group_wait_time
= now
;
495 blkio_mark_blkg_empty(stats
);
496 stats
->start_empty_time
= now
;
499 spin_unlock(&blkg
->stats_lock
);
501 spin_unlock_irq(&blkcg
->lock
);
505 static void blkio_get_key_name(enum stat_sub_type type
, dev_t dev
, char *str
,
506 int chars_left
, bool diskname_only
)
508 snprintf(str
, chars_left
, "%d:%d", MAJOR(dev
), MINOR(dev
));
509 chars_left
-= strlen(str
);
510 if (chars_left
<= 0) {
512 "Possibly incorrect cgroup stat display format");
518 case BLKIO_STAT_READ
:
519 strlcat(str
, " Read", chars_left
);
521 case BLKIO_STAT_WRITE
:
522 strlcat(str
, " Write", chars_left
);
524 case BLKIO_STAT_SYNC
:
525 strlcat(str
, " Sync", chars_left
);
527 case BLKIO_STAT_ASYNC
:
528 strlcat(str
, " Async", chars_left
);
530 case BLKIO_STAT_TOTAL
:
531 strlcat(str
, " Total", chars_left
);
534 strlcat(str
, " Invalid", chars_left
);
538 static uint64_t blkio_fill_stat(char *str
, int chars_left
, uint64_t val
,
539 struct cgroup_map_cb
*cb
, dev_t dev
)
541 blkio_get_key_name(0, dev
, str
, chars_left
, true);
542 cb
->fill(cb
, str
, val
);
546 /* This should be called with blkg->stats_lock held */
547 static uint64_t blkio_get_stat(struct blkio_group
*blkg
,
548 struct cgroup_map_cb
*cb
, dev_t dev
, enum stat_type type
)
551 char key_str
[MAX_KEY_LEN
];
552 enum stat_sub_type sub_type
;
554 if (type
== BLKIO_STAT_TIME
)
555 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
556 blkg
->stats
.time
, cb
, dev
);
557 if (type
== BLKIO_STAT_SECTORS
)
558 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
559 blkg
->stats
.sectors
, cb
, dev
);
560 #ifdef CONFIG_DEBUG_BLK_CGROUP
561 if (type
== BLKIO_STAT_AVG_QUEUE_SIZE
) {
562 uint64_t sum
= blkg
->stats
.avg_queue_size_sum
;
563 uint64_t samples
= blkg
->stats
.avg_queue_size_samples
;
565 do_div(sum
, samples
);
568 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1, sum
, cb
, dev
);
570 if (type
== BLKIO_STAT_GROUP_WAIT_TIME
)
571 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
572 blkg
->stats
.group_wait_time
, cb
, dev
);
573 if (type
== BLKIO_STAT_IDLE_TIME
)
574 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
575 blkg
->stats
.idle_time
, cb
, dev
);
576 if (type
== BLKIO_STAT_EMPTY_TIME
)
577 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
578 blkg
->stats
.empty_time
, cb
, dev
);
579 if (type
== BLKIO_STAT_DEQUEUE
)
580 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
581 blkg
->stats
.dequeue
, cb
, dev
);
584 for (sub_type
= BLKIO_STAT_READ
; sub_type
< BLKIO_STAT_TOTAL
;
586 blkio_get_key_name(sub_type
, dev
, key_str
, MAX_KEY_LEN
, false);
587 cb
->fill(cb
, key_str
, blkg
->stats
.stat_arr
[type
][sub_type
]);
589 disk_total
= blkg
->stats
.stat_arr
[type
][BLKIO_STAT_READ
] +
590 blkg
->stats
.stat_arr
[type
][BLKIO_STAT_WRITE
];
591 blkio_get_key_name(BLKIO_STAT_TOTAL
, dev
, key_str
, MAX_KEY_LEN
, false);
592 cb
->fill(cb
, key_str
, disk_total
);
596 #define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \
597 static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
598 struct cftype *cftype, struct cgroup_map_cb *cb) \
600 struct blkio_cgroup *blkcg; \
601 struct blkio_group *blkg; \
602 struct hlist_node *n; \
603 uint64_t cgroup_total = 0; \
605 if (!cgroup_lock_live_group(cgroup)) \
608 blkcg = cgroup_to_blkio_cgroup(cgroup); \
610 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
612 spin_lock_irq(&blkg->stats_lock); \
613 cgroup_total += blkio_get_stat(blkg, cb, \
615 spin_unlock_irq(&blkg->stats_lock); \
619 cb->fill(cb, "Total", cgroup_total); \
625 SHOW_FUNCTION_PER_GROUP(time
, BLKIO_STAT_TIME
, 0);
626 SHOW_FUNCTION_PER_GROUP(sectors
, BLKIO_STAT_SECTORS
, 0);
627 SHOW_FUNCTION_PER_GROUP(io_service_bytes
, BLKIO_STAT_SERVICE_BYTES
, 1);
628 SHOW_FUNCTION_PER_GROUP(io_serviced
, BLKIO_STAT_SERVICED
, 1);
629 SHOW_FUNCTION_PER_GROUP(io_service_time
, BLKIO_STAT_SERVICE_TIME
, 1);
630 SHOW_FUNCTION_PER_GROUP(io_wait_time
, BLKIO_STAT_WAIT_TIME
, 1);
631 SHOW_FUNCTION_PER_GROUP(io_merged
, BLKIO_STAT_MERGED
, 1);
632 SHOW_FUNCTION_PER_GROUP(io_queued
, BLKIO_STAT_QUEUED
, 1);
633 #ifdef CONFIG_DEBUG_BLK_CGROUP
634 SHOW_FUNCTION_PER_GROUP(dequeue
, BLKIO_STAT_DEQUEUE
, 0);
635 SHOW_FUNCTION_PER_GROUP(avg_queue_size
, BLKIO_STAT_AVG_QUEUE_SIZE
, 0);
636 SHOW_FUNCTION_PER_GROUP(group_wait_time
, BLKIO_STAT_GROUP_WAIT_TIME
, 0);
637 SHOW_FUNCTION_PER_GROUP(idle_time
, BLKIO_STAT_IDLE_TIME
, 0);
638 SHOW_FUNCTION_PER_GROUP(empty_time
, BLKIO_STAT_EMPTY_TIME
, 0);
640 #undef SHOW_FUNCTION_PER_GROUP
642 static int blkio_check_dev_num(dev_t dev
)
645 struct gendisk
*disk
;
647 disk
= get_gendisk(dev
, &part
);
654 static int blkio_policy_parse_and_set(char *buf
,
655 struct blkio_policy_node
*newpn
)
657 char *s
[4], *p
, *major_s
= NULL
, *minor_s
= NULL
;
659 unsigned long major
, minor
, temp
;
663 memset(s
, 0, sizeof(s
));
665 while ((p
= strsep(&buf
, " ")) != NULL
) {
671 /* Prevent from inputing too many things */
679 p
= strsep(&s
[0], ":");
689 ret
= strict_strtoul(major_s
, 10, &major
);
693 ret
= strict_strtoul(minor_s
, 10, &minor
);
697 dev
= MKDEV(major
, minor
);
699 ret
= blkio_check_dev_num(dev
);
708 ret
= strict_strtoul(s
[1], 10, &temp
);
709 if (ret
|| (temp
< BLKIO_WEIGHT_MIN
&& temp
> 0) ||
710 temp
> BLKIO_WEIGHT_MAX
)
713 newpn
->weight
= temp
;
718 unsigned int blkcg_get_weight(struct blkio_cgroup
*blkcg
,
721 struct blkio_policy_node
*pn
;
723 pn
= blkio_policy_search_node(blkcg
, dev
);
727 return blkcg
->weight
;
729 EXPORT_SYMBOL_GPL(blkcg_get_weight
);
732 static int blkiocg_weight_device_write(struct cgroup
*cgrp
, struct cftype
*cft
,
737 struct blkio_policy_node
*newpn
, *pn
;
738 struct blkio_cgroup
*blkcg
;
739 struct blkio_group
*blkg
;
741 struct hlist_node
*n
;
742 struct blkio_policy_type
*blkiop
;
744 buf
= kstrdup(buffer
, GFP_KERNEL
);
748 newpn
= kzalloc(sizeof(*newpn
), GFP_KERNEL
);
754 ret
= blkio_policy_parse_and_set(buf
, newpn
);
758 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
760 spin_lock_irq(&blkcg
->lock
);
762 pn
= blkio_policy_search_node(blkcg
, newpn
->dev
);
764 if (newpn
->weight
!= 0) {
765 blkio_policy_insert_node(blkcg
, newpn
);
768 spin_unlock_irq(&blkcg
->lock
);
769 goto update_io_group
;
772 if (newpn
->weight
== 0) {
773 /* weight == 0 means deleteing a specific weight */
774 blkio_policy_delete_node(pn
);
775 spin_unlock_irq(&blkcg
->lock
);
776 goto update_io_group
;
778 spin_unlock_irq(&blkcg
->lock
);
780 pn
->weight
= newpn
->weight
;
783 /* update weight for each cfqg */
784 spin_lock(&blkio_list_lock
);
785 spin_lock_irq(&blkcg
->lock
);
787 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
788 if (newpn
->dev
== blkg
->dev
) {
789 list_for_each_entry(blkiop
, &blkio_list
, list
)
790 blkiop
->ops
.blkio_update_group_weight_fn(blkg
,
797 spin_unlock_irq(&blkcg
->lock
);
798 spin_unlock(&blkio_list_lock
);
808 static int blkiocg_weight_device_read(struct cgroup
*cgrp
, struct cftype
*cft
,
811 struct blkio_cgroup
*blkcg
;
812 struct blkio_policy_node
*pn
;
814 seq_printf(m
, "dev\tweight\n");
816 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
817 if (!list_empty(&blkcg
->policy_list
)) {
818 spin_lock_irq(&blkcg
->lock
);
819 list_for_each_entry(pn
, &blkcg
->policy_list
, node
) {
820 seq_printf(m
, "%u:%u\t%u\n", MAJOR(pn
->dev
),
821 MINOR(pn
->dev
), pn
->weight
);
823 spin_unlock_irq(&blkcg
->lock
);
829 struct cftype blkio_files
[] = {
831 .name
= "weight_device",
832 .read_seq_string
= blkiocg_weight_device_read
,
833 .write_string
= blkiocg_weight_device_write
,
834 .max_write_len
= 256,
838 .read_u64
= blkiocg_weight_read
,
839 .write_u64
= blkiocg_weight_write
,
843 .read_map
= blkiocg_time_read
,
847 .read_map
= blkiocg_sectors_read
,
850 .name
= "io_service_bytes",
851 .read_map
= blkiocg_io_service_bytes_read
,
854 .name
= "io_serviced",
855 .read_map
= blkiocg_io_serviced_read
,
858 .name
= "io_service_time",
859 .read_map
= blkiocg_io_service_time_read
,
862 .name
= "io_wait_time",
863 .read_map
= blkiocg_io_wait_time_read
,
867 .read_map
= blkiocg_io_merged_read
,
871 .read_map
= blkiocg_io_queued_read
,
874 .name
= "reset_stats",
875 .write_u64
= blkiocg_reset_stats
,
877 #ifdef CONFIG_DEBUG_BLK_CGROUP
879 .name
= "avg_queue_size",
880 .read_map
= blkiocg_avg_queue_size_read
,
883 .name
= "group_wait_time",
884 .read_map
= blkiocg_group_wait_time_read
,
888 .read_map
= blkiocg_idle_time_read
,
891 .name
= "empty_time",
892 .read_map
= blkiocg_empty_time_read
,
896 .read_map
= blkiocg_dequeue_read
,
901 static int blkiocg_populate(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
903 return cgroup_add_files(cgroup
, subsys
, blkio_files
,
904 ARRAY_SIZE(blkio_files
));
907 static void blkiocg_destroy(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
909 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
911 struct blkio_group
*blkg
;
913 struct blkio_policy_type
*blkiop
;
914 struct blkio_policy_node
*pn
, *pntmp
;
918 spin_lock_irqsave(&blkcg
->lock
, flags
);
920 if (hlist_empty(&blkcg
->blkg_list
)) {
921 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
925 blkg
= hlist_entry(blkcg
->blkg_list
.first
, struct blkio_group
,
927 key
= rcu_dereference(blkg
->key
);
928 __blkiocg_del_blkio_group(blkg
);
930 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
933 * This blkio_group is being unlinked as associated cgroup is
934 * going away. Let all the IO controlling policies know about
935 * this event. Currently this is static call to one io
936 * controlling policy. Once we have more policies in place, we
937 * need some dynamic registration of callback function.
939 spin_lock(&blkio_list_lock
);
940 list_for_each_entry(blkiop
, &blkio_list
, list
)
941 blkiop
->ops
.blkio_unlink_group_fn(key
, blkg
);
942 spin_unlock(&blkio_list_lock
);
945 list_for_each_entry_safe(pn
, pntmp
, &blkcg
->policy_list
, node
) {
946 blkio_policy_delete_node(pn
);
950 free_css_id(&blkio_subsys
, &blkcg
->css
);
952 if (blkcg
!= &blkio_root_cgroup
)
956 static struct cgroup_subsys_state
*
957 blkiocg_create(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
959 struct blkio_cgroup
*blkcg
;
960 struct cgroup
*parent
= cgroup
->parent
;
963 blkcg
= &blkio_root_cgroup
;
967 /* Currently we do not support hierarchy deeper than two level (0,1) */
968 if (parent
!= cgroup
->top_cgroup
)
969 return ERR_PTR(-EINVAL
);
971 blkcg
= kzalloc(sizeof(*blkcg
), GFP_KERNEL
);
973 return ERR_PTR(-ENOMEM
);
975 blkcg
->weight
= BLKIO_WEIGHT_DEFAULT
;
977 spin_lock_init(&blkcg
->lock
);
978 INIT_HLIST_HEAD(&blkcg
->blkg_list
);
980 INIT_LIST_HEAD(&blkcg
->policy_list
);
985 * We cannot support shared io contexts, as we have no mean to support
986 * two tasks with the same ioc in two different groups without major rework
987 * of the main cic data structures. For now we allow a task to change
988 * its cgroup only if it's the only owner of its ioc.
990 static int blkiocg_can_attach(struct cgroup_subsys
*subsys
,
991 struct cgroup
*cgroup
, struct task_struct
*tsk
,
994 struct io_context
*ioc
;
997 /* task_lock() is needed to avoid races with exit_io_context() */
999 ioc
= tsk
->io_context
;
1000 if (ioc
&& atomic_read(&ioc
->nr_tasks
) > 1)
1007 static void blkiocg_attach(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
,
1008 struct cgroup
*prev
, struct task_struct
*tsk
,
1011 struct io_context
*ioc
;
1014 ioc
= tsk
->io_context
;
1016 ioc
->cgroup_changed
= 1;
1020 void blkio_policy_register(struct blkio_policy_type
*blkiop
)
1022 spin_lock(&blkio_list_lock
);
1023 list_add_tail(&blkiop
->list
, &blkio_list
);
1024 spin_unlock(&blkio_list_lock
);
1026 EXPORT_SYMBOL_GPL(blkio_policy_register
);
1028 void blkio_policy_unregister(struct blkio_policy_type
*blkiop
)
1030 spin_lock(&blkio_list_lock
);
1031 list_del_init(&blkiop
->list
);
1032 spin_unlock(&blkio_list_lock
);
1034 EXPORT_SYMBOL_GPL(blkio_policy_unregister
);
1036 static int __init
init_cgroup_blkio(void)
1038 return cgroup_load_subsys(&blkio_subsys
);
1041 static void __exit
exit_cgroup_blkio(void)
1043 cgroup_unload_subsys(&blkio_subsys
);
1046 module_init(init_cgroup_blkio
);
1047 module_exit(exit_cgroup_blkio
);
1048 MODULE_LICENSE("GPL");