2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include "blk-cgroup.h"
20 #include <linux/genhd.h>
22 #define MAX_KEY_LEN 100
24 static DEFINE_SPINLOCK(blkio_list_lock
);
25 static LIST_HEAD(blkio_list
);
27 struct blkio_cgroup blkio_root_cgroup
= { .weight
= 2*BLKIO_WEIGHT_DEFAULT
};
28 EXPORT_SYMBOL_GPL(blkio_root_cgroup
);
30 static struct cgroup_subsys_state
*blkiocg_create(struct cgroup_subsys
*,
32 static int blkiocg_can_attach(struct cgroup_subsys
*, struct cgroup
*,
33 struct task_struct
*, bool);
34 static void blkiocg_attach(struct cgroup_subsys
*, struct cgroup
*,
35 struct cgroup
*, struct task_struct
*, bool);
36 static void blkiocg_destroy(struct cgroup_subsys
*, struct cgroup
*);
37 static int blkiocg_populate(struct cgroup_subsys
*, struct cgroup
*);
39 struct cgroup_subsys blkio_subsys
= {
41 .create
= blkiocg_create
,
42 .can_attach
= blkiocg_can_attach
,
43 .attach
= blkiocg_attach
,
44 .destroy
= blkiocg_destroy
,
45 .populate
= blkiocg_populate
,
46 #ifdef CONFIG_BLK_CGROUP
47 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
48 .subsys_id
= blkio_subsys_id
,
51 .module
= THIS_MODULE
,
53 EXPORT_SYMBOL_GPL(blkio_subsys
);
55 static inline void blkio_policy_insert_node(struct blkio_cgroup
*blkcg
,
56 struct blkio_policy_node
*pn
)
58 list_add(&pn
->node
, &blkcg
->policy_list
);
61 /* Must be called with blkcg->lock held */
62 static inline void blkio_policy_delete_node(struct blkio_policy_node
*pn
)
67 /* Must be called with blkcg->lock held */
68 static struct blkio_policy_node
*
69 blkio_policy_search_node(const struct blkio_cgroup
*blkcg
, dev_t dev
)
71 struct blkio_policy_node
*pn
;
73 list_for_each_entry(pn
, &blkcg
->policy_list
, node
) {
81 struct blkio_cgroup
*cgroup_to_blkio_cgroup(struct cgroup
*cgroup
)
83 return container_of(cgroup_subsys_state(cgroup
, blkio_subsys_id
),
84 struct blkio_cgroup
, css
);
86 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup
);
88 void blkio_group_init(struct blkio_group
*blkg
)
90 spin_lock_init(&blkg
->stats_lock
);
92 EXPORT_SYMBOL_GPL(blkio_group_init
);
95 * Add to the appropriate stat variable depending on the request type.
96 * This should be called with the blkg->stats_lock held.
98 static void blkio_add_stat(uint64_t *stat
, uint64_t add
, bool direction
,
102 stat
[BLKIO_STAT_WRITE
] += add
;
104 stat
[BLKIO_STAT_READ
] += add
;
106 stat
[BLKIO_STAT_SYNC
] += add
;
108 stat
[BLKIO_STAT_ASYNC
] += add
;
112 * Decrements the appropriate stat variable if non-zero depending on the
113 * request type. Panics on value being zero.
114 * This should be called with the blkg->stats_lock held.
116 static void blkio_check_and_dec_stat(uint64_t *stat
, bool direction
, bool sync
)
119 BUG_ON(stat
[BLKIO_STAT_WRITE
] == 0);
120 stat
[BLKIO_STAT_WRITE
]--;
122 BUG_ON(stat
[BLKIO_STAT_READ
] == 0);
123 stat
[BLKIO_STAT_READ
]--;
126 BUG_ON(stat
[BLKIO_STAT_SYNC
] == 0);
127 stat
[BLKIO_STAT_SYNC
]--;
129 BUG_ON(stat
[BLKIO_STAT_ASYNC
] == 0);
130 stat
[BLKIO_STAT_ASYNC
]--;
134 #ifdef CONFIG_DEBUG_BLK_CGROUP
135 /* This should be called with the blkg->stats_lock held. */
136 static void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
137 struct blkio_group
*curr_blkg
)
139 if (blkio_blkg_waiting(&blkg
->stats
))
141 if (blkg
== curr_blkg
)
143 blkg
->stats
.start_group_wait_time
= sched_clock();
144 blkio_mark_blkg_waiting(&blkg
->stats
);
147 /* This should be called with the blkg->stats_lock held. */
148 static void blkio_update_group_wait_time(struct blkio_group_stats
*stats
)
150 unsigned long long now
;
152 if (!blkio_blkg_waiting(stats
))
156 if (time_after64(now
, stats
->start_group_wait_time
))
157 stats
->group_wait_time
+= now
- stats
->start_group_wait_time
;
158 blkio_clear_blkg_waiting(stats
);
161 /* This should be called with the blkg->stats_lock held. */
162 static void blkio_end_empty_time(struct blkio_group_stats
*stats
)
164 unsigned long long now
;
166 if (!blkio_blkg_empty(stats
))
170 if (time_after64(now
, stats
->start_empty_time
))
171 stats
->empty_time
+= now
- stats
->start_empty_time
;
172 blkio_clear_blkg_empty(stats
);
175 void blkiocg_update_set_idle_time_stats(struct blkio_group
*blkg
)
179 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
180 BUG_ON(blkio_blkg_idling(&blkg
->stats
));
181 blkg
->stats
.start_idle_time
= sched_clock();
182 blkio_mark_blkg_idling(&blkg
->stats
);
183 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
185 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats
);
187 void blkiocg_update_idle_time_stats(struct blkio_group
*blkg
)
190 unsigned long long now
;
191 struct blkio_group_stats
*stats
;
193 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
194 stats
= &blkg
->stats
;
195 if (blkio_blkg_idling(stats
)) {
197 if (time_after64(now
, stats
->start_idle_time
))
198 stats
->idle_time
+= now
- stats
->start_idle_time
;
199 blkio_clear_blkg_idling(stats
);
201 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
203 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats
);
205 void blkiocg_update_avg_queue_size_stats(struct blkio_group
*blkg
)
208 struct blkio_group_stats
*stats
;
210 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
211 stats
= &blkg
->stats
;
212 stats
->avg_queue_size_sum
+=
213 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_READ
] +
214 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_WRITE
];
215 stats
->avg_queue_size_samples
++;
216 blkio_update_group_wait_time(stats
);
217 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
219 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats
);
221 void blkiocg_update_dequeue_stats(struct blkio_group
*blkg
,
222 unsigned long dequeue
)
224 blkg
->stats
.dequeue
+= dequeue
;
226 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats
);
228 static inline void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
229 struct blkio_group
*curr_blkg
) {}
230 static inline void blkio_end_empty_time(struct blkio_group_stats
*stats
) {}
233 void blkiocg_update_io_add_stats(struct blkio_group
*blkg
,
234 struct blkio_group
*curr_blkg
, bool direction
,
239 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
240 blkio_add_stat(blkg
->stats
.stat_arr
[BLKIO_STAT_QUEUED
], 1, direction
,
242 blkio_end_empty_time(&blkg
->stats
);
243 blkio_set_start_group_wait_time(blkg
, curr_blkg
);
244 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
246 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats
);
248 void blkiocg_update_io_remove_stats(struct blkio_group
*blkg
,
249 bool direction
, bool sync
)
253 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
254 blkio_check_and_dec_stat(blkg
->stats
.stat_arr
[BLKIO_STAT_QUEUED
],
256 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
258 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats
);
260 void blkiocg_update_timeslice_used(struct blkio_group
*blkg
, unsigned long time
)
264 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
265 blkg
->stats
.time
+= time
;
266 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
268 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used
);
270 void blkiocg_set_start_empty_time(struct blkio_group
*blkg
, bool ignore
)
273 struct blkio_group_stats
*stats
;
275 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
276 stats
= &blkg
->stats
;
278 if (stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_READ
] ||
279 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_WRITE
]) {
280 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
285 * If ignore is set, we do not panic on the empty flag being set
286 * already. This is to avoid cases where there are superfluous timeslice
287 * complete events (for eg., forced_dispatch in CFQ) when no IOs are
288 * served which could result in triggering the empty check incorrectly.
290 BUG_ON(!ignore
&& blkio_blkg_empty(stats
));
291 stats
->start_empty_time
= sched_clock();
292 blkio_mark_blkg_empty(stats
);
293 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
295 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time
);
297 void blkiocg_update_dispatch_stats(struct blkio_group
*blkg
,
298 uint64_t bytes
, bool direction
, bool sync
)
300 struct blkio_group_stats
*stats
;
303 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
304 stats
= &blkg
->stats
;
305 stats
->sectors
+= bytes
>> 9;
306 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_SERVICED
], 1, direction
,
308 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_SERVICE_BYTES
], bytes
,
310 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
312 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats
);
314 void blkiocg_update_completion_stats(struct blkio_group
*blkg
,
315 uint64_t start_time
, uint64_t io_start_time
, bool direction
, bool sync
)
317 struct blkio_group_stats
*stats
;
319 unsigned long long now
= sched_clock();
321 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
322 stats
= &blkg
->stats
;
323 if (time_after64(now
, io_start_time
))
324 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_SERVICE_TIME
],
325 now
- io_start_time
, direction
, sync
);
326 if (time_after64(io_start_time
, start_time
))
327 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_WAIT_TIME
],
328 io_start_time
- start_time
, direction
, sync
);
329 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
331 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats
);
333 void blkiocg_update_io_merged_stats(struct blkio_group
*blkg
, bool direction
,
338 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
339 blkio_add_stat(blkg
->stats
.stat_arr
[BLKIO_STAT_MERGED
], 1, direction
,
341 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
343 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats
);
345 void blkiocg_add_blkio_group(struct blkio_cgroup
*blkcg
,
346 struct blkio_group
*blkg
, void *key
, dev_t dev
)
350 spin_lock_irqsave(&blkcg
->lock
, flags
);
351 rcu_assign_pointer(blkg
->key
, key
);
352 blkg
->blkcg_id
= css_id(&blkcg
->css
);
353 hlist_add_head_rcu(&blkg
->blkcg_node
, &blkcg
->blkg_list
);
354 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
355 #ifdef CONFIG_DEBUG_BLK_CGROUP
356 /* Need to take css reference ? */
357 cgroup_path(blkcg
->css
.cgroup
, blkg
->path
, sizeof(blkg
->path
));
361 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group
);
363 static void __blkiocg_del_blkio_group(struct blkio_group
*blkg
)
365 hlist_del_init_rcu(&blkg
->blkcg_node
);
370 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
371 * indicating that blk_group was unhashed by the time we got to it.
373 int blkiocg_del_blkio_group(struct blkio_group
*blkg
)
375 struct blkio_cgroup
*blkcg
;
377 struct cgroup_subsys_state
*css
;
381 css
= css_lookup(&blkio_subsys
, blkg
->blkcg_id
);
385 blkcg
= container_of(css
, struct blkio_cgroup
, css
);
386 spin_lock_irqsave(&blkcg
->lock
, flags
);
387 if (!hlist_unhashed(&blkg
->blkcg_node
)) {
388 __blkiocg_del_blkio_group(blkg
);
391 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
396 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group
);
398 /* called under rcu_read_lock(). */
399 struct blkio_group
*blkiocg_lookup_group(struct blkio_cgroup
*blkcg
, void *key
)
401 struct blkio_group
*blkg
;
402 struct hlist_node
*n
;
405 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
413 EXPORT_SYMBOL_GPL(blkiocg_lookup_group
);
415 #define SHOW_FUNCTION(__VAR) \
416 static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
417 struct cftype *cftype) \
419 struct blkio_cgroup *blkcg; \
421 blkcg = cgroup_to_blkio_cgroup(cgroup); \
422 return (u64)blkcg->__VAR; \
425 SHOW_FUNCTION(weight
);
429 blkiocg_weight_write(struct cgroup
*cgroup
, struct cftype
*cftype
, u64 val
)
431 struct blkio_cgroup
*blkcg
;
432 struct blkio_group
*blkg
;
433 struct hlist_node
*n
;
434 struct blkio_policy_type
*blkiop
;
435 struct blkio_policy_node
*pn
;
437 if (val
< BLKIO_WEIGHT_MIN
|| val
> BLKIO_WEIGHT_MAX
)
440 blkcg
= cgroup_to_blkio_cgroup(cgroup
);
441 spin_lock(&blkio_list_lock
);
442 spin_lock_irq(&blkcg
->lock
);
443 blkcg
->weight
= (unsigned int)val
;
445 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
446 pn
= blkio_policy_search_node(blkcg
, blkg
->dev
);
451 list_for_each_entry(blkiop
, &blkio_list
, list
)
452 blkiop
->ops
.blkio_update_group_weight_fn(blkg
,
455 spin_unlock_irq(&blkcg
->lock
);
456 spin_unlock(&blkio_list_lock
);
461 blkiocg_reset_stats(struct cgroup
*cgroup
, struct cftype
*cftype
, u64 val
)
463 struct blkio_cgroup
*blkcg
;
464 struct blkio_group
*blkg
;
465 struct blkio_group_stats
*stats
;
466 struct hlist_node
*n
;
467 uint64_t queued
[BLKIO_STAT_TOTAL
];
469 #ifdef CONFIG_DEBUG_BLK_CGROUP
470 bool idling
, waiting
, empty
;
471 unsigned long long now
= sched_clock();
474 blkcg
= cgroup_to_blkio_cgroup(cgroup
);
475 spin_lock_irq(&blkcg
->lock
);
476 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
477 spin_lock(&blkg
->stats_lock
);
478 stats
= &blkg
->stats
;
479 #ifdef CONFIG_DEBUG_BLK_CGROUP
480 idling
= blkio_blkg_idling(stats
);
481 waiting
= blkio_blkg_waiting(stats
);
482 empty
= blkio_blkg_empty(stats
);
484 for (i
= 0; i
< BLKIO_STAT_TOTAL
; i
++)
485 queued
[i
] = stats
->stat_arr
[BLKIO_STAT_QUEUED
][i
];
486 memset(stats
, 0, sizeof(struct blkio_group_stats
));
487 for (i
= 0; i
< BLKIO_STAT_TOTAL
; i
++)
488 stats
->stat_arr
[BLKIO_STAT_QUEUED
][i
] = queued
[i
];
489 #ifdef CONFIG_DEBUG_BLK_CGROUP
491 blkio_mark_blkg_idling(stats
);
492 stats
->start_idle_time
= now
;
495 blkio_mark_blkg_waiting(stats
);
496 stats
->start_group_wait_time
= now
;
499 blkio_mark_blkg_empty(stats
);
500 stats
->start_empty_time
= now
;
503 spin_unlock(&blkg
->stats_lock
);
505 spin_unlock_irq(&blkcg
->lock
);
509 static void blkio_get_key_name(enum stat_sub_type type
, dev_t dev
, char *str
,
510 int chars_left
, bool diskname_only
)
512 snprintf(str
, chars_left
, "%d:%d", MAJOR(dev
), MINOR(dev
));
513 chars_left
-= strlen(str
);
514 if (chars_left
<= 0) {
516 "Possibly incorrect cgroup stat display format");
522 case BLKIO_STAT_READ
:
523 strlcat(str
, " Read", chars_left
);
525 case BLKIO_STAT_WRITE
:
526 strlcat(str
, " Write", chars_left
);
528 case BLKIO_STAT_SYNC
:
529 strlcat(str
, " Sync", chars_left
);
531 case BLKIO_STAT_ASYNC
:
532 strlcat(str
, " Async", chars_left
);
534 case BLKIO_STAT_TOTAL
:
535 strlcat(str
, " Total", chars_left
);
538 strlcat(str
, " Invalid", chars_left
);
542 static uint64_t blkio_fill_stat(char *str
, int chars_left
, uint64_t val
,
543 struct cgroup_map_cb
*cb
, dev_t dev
)
545 blkio_get_key_name(0, dev
, str
, chars_left
, true);
546 cb
->fill(cb
, str
, val
);
550 /* This should be called with blkg->stats_lock held */
551 static uint64_t blkio_get_stat(struct blkio_group
*blkg
,
552 struct cgroup_map_cb
*cb
, dev_t dev
, enum stat_type type
)
555 char key_str
[MAX_KEY_LEN
];
556 enum stat_sub_type sub_type
;
558 if (type
== BLKIO_STAT_TIME
)
559 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
560 blkg
->stats
.time
, cb
, dev
);
561 if (type
== BLKIO_STAT_SECTORS
)
562 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
563 blkg
->stats
.sectors
, cb
, dev
);
564 #ifdef CONFIG_DEBUG_BLK_CGROUP
565 if (type
== BLKIO_STAT_AVG_QUEUE_SIZE
) {
566 uint64_t sum
= blkg
->stats
.avg_queue_size_sum
;
567 uint64_t samples
= blkg
->stats
.avg_queue_size_samples
;
569 do_div(sum
, samples
);
572 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1, sum
, cb
, dev
);
574 if (type
== BLKIO_STAT_GROUP_WAIT_TIME
)
575 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
576 blkg
->stats
.group_wait_time
, cb
, dev
);
577 if (type
== BLKIO_STAT_IDLE_TIME
)
578 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
579 blkg
->stats
.idle_time
, cb
, dev
);
580 if (type
== BLKIO_STAT_EMPTY_TIME
)
581 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
582 blkg
->stats
.empty_time
, cb
, dev
);
583 if (type
== BLKIO_STAT_DEQUEUE
)
584 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
585 blkg
->stats
.dequeue
, cb
, dev
);
588 for (sub_type
= BLKIO_STAT_READ
; sub_type
< BLKIO_STAT_TOTAL
;
590 blkio_get_key_name(sub_type
, dev
, key_str
, MAX_KEY_LEN
, false);
591 cb
->fill(cb
, key_str
, blkg
->stats
.stat_arr
[type
][sub_type
]);
593 disk_total
= blkg
->stats
.stat_arr
[type
][BLKIO_STAT_READ
] +
594 blkg
->stats
.stat_arr
[type
][BLKIO_STAT_WRITE
];
595 blkio_get_key_name(BLKIO_STAT_TOTAL
, dev
, key_str
, MAX_KEY_LEN
, false);
596 cb
->fill(cb
, key_str
, disk_total
);
600 #define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \
601 static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
602 struct cftype *cftype, struct cgroup_map_cb *cb) \
604 struct blkio_cgroup *blkcg; \
605 struct blkio_group *blkg; \
606 struct hlist_node *n; \
607 uint64_t cgroup_total = 0; \
609 if (!cgroup_lock_live_group(cgroup)) \
612 blkcg = cgroup_to_blkio_cgroup(cgroup); \
614 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
616 spin_lock_irq(&blkg->stats_lock); \
617 cgroup_total += blkio_get_stat(blkg, cb, \
619 spin_unlock_irq(&blkg->stats_lock); \
623 cb->fill(cb, "Total", cgroup_total); \
629 SHOW_FUNCTION_PER_GROUP(time
, BLKIO_STAT_TIME
, 0);
630 SHOW_FUNCTION_PER_GROUP(sectors
, BLKIO_STAT_SECTORS
, 0);
631 SHOW_FUNCTION_PER_GROUP(io_service_bytes
, BLKIO_STAT_SERVICE_BYTES
, 1);
632 SHOW_FUNCTION_PER_GROUP(io_serviced
, BLKIO_STAT_SERVICED
, 1);
633 SHOW_FUNCTION_PER_GROUP(io_service_time
, BLKIO_STAT_SERVICE_TIME
, 1);
634 SHOW_FUNCTION_PER_GROUP(io_wait_time
, BLKIO_STAT_WAIT_TIME
, 1);
635 SHOW_FUNCTION_PER_GROUP(io_merged
, BLKIO_STAT_MERGED
, 1);
636 SHOW_FUNCTION_PER_GROUP(io_queued
, BLKIO_STAT_QUEUED
, 1);
637 #ifdef CONFIG_DEBUG_BLK_CGROUP
638 SHOW_FUNCTION_PER_GROUP(dequeue
, BLKIO_STAT_DEQUEUE
, 0);
639 SHOW_FUNCTION_PER_GROUP(avg_queue_size
, BLKIO_STAT_AVG_QUEUE_SIZE
, 0);
640 SHOW_FUNCTION_PER_GROUP(group_wait_time
, BLKIO_STAT_GROUP_WAIT_TIME
, 0);
641 SHOW_FUNCTION_PER_GROUP(idle_time
, BLKIO_STAT_IDLE_TIME
, 0);
642 SHOW_FUNCTION_PER_GROUP(empty_time
, BLKIO_STAT_EMPTY_TIME
, 0);
644 #undef SHOW_FUNCTION_PER_GROUP
646 static int blkio_check_dev_num(dev_t dev
)
649 struct gendisk
*disk
;
651 disk
= get_gendisk(dev
, &part
);
658 static int blkio_policy_parse_and_set(char *buf
,
659 struct blkio_policy_node
*newpn
)
661 char *s
[4], *p
, *major_s
= NULL
, *minor_s
= NULL
;
663 unsigned long major
, minor
, temp
;
667 memset(s
, 0, sizeof(s
));
669 while ((p
= strsep(&buf
, " ")) != NULL
) {
675 /* Prevent from inputing too many things */
683 p
= strsep(&s
[0], ":");
693 ret
= strict_strtoul(major_s
, 10, &major
);
697 ret
= strict_strtoul(minor_s
, 10, &minor
);
701 dev
= MKDEV(major
, minor
);
703 ret
= blkio_check_dev_num(dev
);
712 ret
= strict_strtoul(s
[1], 10, &temp
);
713 if (ret
|| (temp
< BLKIO_WEIGHT_MIN
&& temp
> 0) ||
714 temp
> BLKIO_WEIGHT_MAX
)
717 newpn
->weight
= temp
;
722 unsigned int blkcg_get_weight(struct blkio_cgroup
*blkcg
,
725 struct blkio_policy_node
*pn
;
727 pn
= blkio_policy_search_node(blkcg
, dev
);
731 return blkcg
->weight
;
733 EXPORT_SYMBOL_GPL(blkcg_get_weight
);
736 static int blkiocg_weight_device_write(struct cgroup
*cgrp
, struct cftype
*cft
,
741 struct blkio_policy_node
*newpn
, *pn
;
742 struct blkio_cgroup
*blkcg
;
743 struct blkio_group
*blkg
;
745 struct hlist_node
*n
;
746 struct blkio_policy_type
*blkiop
;
748 buf
= kstrdup(buffer
, GFP_KERNEL
);
752 newpn
= kzalloc(sizeof(*newpn
), GFP_KERNEL
);
758 ret
= blkio_policy_parse_and_set(buf
, newpn
);
762 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
764 spin_lock_irq(&blkcg
->lock
);
766 pn
= blkio_policy_search_node(blkcg
, newpn
->dev
);
768 if (newpn
->weight
!= 0) {
769 blkio_policy_insert_node(blkcg
, newpn
);
772 spin_unlock_irq(&blkcg
->lock
);
773 goto update_io_group
;
776 if (newpn
->weight
== 0) {
777 /* weight == 0 means deleteing a specific weight */
778 blkio_policy_delete_node(pn
);
779 spin_unlock_irq(&blkcg
->lock
);
780 goto update_io_group
;
782 spin_unlock_irq(&blkcg
->lock
);
784 pn
->weight
= newpn
->weight
;
787 /* update weight for each cfqg */
788 spin_lock(&blkio_list_lock
);
789 spin_lock_irq(&blkcg
->lock
);
791 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
792 if (newpn
->dev
== blkg
->dev
) {
793 list_for_each_entry(blkiop
, &blkio_list
, list
)
794 blkiop
->ops
.blkio_update_group_weight_fn(blkg
,
801 spin_unlock_irq(&blkcg
->lock
);
802 spin_unlock(&blkio_list_lock
);
812 static int blkiocg_weight_device_read(struct cgroup
*cgrp
, struct cftype
*cft
,
815 struct blkio_cgroup
*blkcg
;
816 struct blkio_policy_node
*pn
;
818 seq_printf(m
, "dev\tweight\n");
820 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
821 if (list_empty(&blkcg
->policy_list
))
824 spin_lock_irq(&blkcg
->lock
);
825 list_for_each_entry(pn
, &blkcg
->policy_list
, node
) {
826 seq_printf(m
, "%u:%u\t%u\n", MAJOR(pn
->dev
),
827 MINOR(pn
->dev
), pn
->weight
);
829 spin_unlock_irq(&blkcg
->lock
);
835 struct cftype blkio_files
[] = {
837 .name
= "weight_device",
838 .read_seq_string
= blkiocg_weight_device_read
,
839 .write_string
= blkiocg_weight_device_write
,
840 .max_write_len
= 256,
844 .read_u64
= blkiocg_weight_read
,
845 .write_u64
= blkiocg_weight_write
,
849 .read_map
= blkiocg_time_read
,
853 .read_map
= blkiocg_sectors_read
,
856 .name
= "io_service_bytes",
857 .read_map
= blkiocg_io_service_bytes_read
,
860 .name
= "io_serviced",
861 .read_map
= blkiocg_io_serviced_read
,
864 .name
= "io_service_time",
865 .read_map
= blkiocg_io_service_time_read
,
868 .name
= "io_wait_time",
869 .read_map
= blkiocg_io_wait_time_read
,
873 .read_map
= blkiocg_io_merged_read
,
877 .read_map
= blkiocg_io_queued_read
,
880 .name
= "reset_stats",
881 .write_u64
= blkiocg_reset_stats
,
883 #ifdef CONFIG_DEBUG_BLK_CGROUP
885 .name
= "avg_queue_size",
886 .read_map
= blkiocg_avg_queue_size_read
,
889 .name
= "group_wait_time",
890 .read_map
= blkiocg_group_wait_time_read
,
894 .read_map
= blkiocg_idle_time_read
,
897 .name
= "empty_time",
898 .read_map
= blkiocg_empty_time_read
,
902 .read_map
= blkiocg_dequeue_read
,
907 static int blkiocg_populate(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
909 return cgroup_add_files(cgroup
, subsys
, blkio_files
,
910 ARRAY_SIZE(blkio_files
));
913 static void blkiocg_destroy(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
915 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
917 struct blkio_group
*blkg
;
919 struct blkio_policy_type
*blkiop
;
920 struct blkio_policy_node
*pn
, *pntmp
;
924 spin_lock_irqsave(&blkcg
->lock
, flags
);
926 if (hlist_empty(&blkcg
->blkg_list
)) {
927 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
931 blkg
= hlist_entry(blkcg
->blkg_list
.first
, struct blkio_group
,
933 key
= rcu_dereference(blkg
->key
);
934 __blkiocg_del_blkio_group(blkg
);
936 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
939 * This blkio_group is being unlinked as associated cgroup is going
940 * away. Let all the IO controlling policies know about this event.
942 * Currently this is static call to one io controlling policy. Once
943 * we have more policies in place, we need some dynamic registration
944 * of callback function.
946 spin_lock(&blkio_list_lock
);
947 list_for_each_entry(blkiop
, &blkio_list
, list
)
948 blkiop
->ops
.blkio_unlink_group_fn(key
, blkg
);
949 spin_unlock(&blkio_list_lock
);
953 list_for_each_entry_safe(pn
, pntmp
, &blkcg
->policy_list
, node
) {
954 blkio_policy_delete_node(pn
);
957 free_css_id(&blkio_subsys
, &blkcg
->css
);
959 if (blkcg
!= &blkio_root_cgroup
)
963 static struct cgroup_subsys_state
*
964 blkiocg_create(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
966 struct blkio_cgroup
*blkcg
, *parent_blkcg
;
968 if (!cgroup
->parent
) {
969 blkcg
= &blkio_root_cgroup
;
973 /* Currently we do not support hierarchy deeper than two level (0,1) */
974 parent_blkcg
= cgroup_to_blkio_cgroup(cgroup
->parent
);
975 if (css_depth(&parent_blkcg
->css
) > 0)
976 return ERR_PTR(-EINVAL
);
978 blkcg
= kzalloc(sizeof(*blkcg
), GFP_KERNEL
);
980 return ERR_PTR(-ENOMEM
);
982 blkcg
->weight
= BLKIO_WEIGHT_DEFAULT
;
984 spin_lock_init(&blkcg
->lock
);
985 INIT_HLIST_HEAD(&blkcg
->blkg_list
);
987 INIT_LIST_HEAD(&blkcg
->policy_list
);
992 * We cannot support shared io contexts, as we have no mean to support
993 * two tasks with the same ioc in two different groups without major rework
994 * of the main cic data structures. For now we allow a task to change
995 * its cgroup only if it's the only owner of its ioc.
997 static int blkiocg_can_attach(struct cgroup_subsys
*subsys
,
998 struct cgroup
*cgroup
, struct task_struct
*tsk
,
1001 struct io_context
*ioc
;
1004 /* task_lock() is needed to avoid races with exit_io_context() */
1006 ioc
= tsk
->io_context
;
1007 if (ioc
&& atomic_read(&ioc
->nr_tasks
) > 1)
1014 static void blkiocg_attach(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
,
1015 struct cgroup
*prev
, struct task_struct
*tsk
,
1018 struct io_context
*ioc
;
1021 ioc
= tsk
->io_context
;
1023 ioc
->cgroup_changed
= 1;
1027 void blkio_policy_register(struct blkio_policy_type
*blkiop
)
1029 spin_lock(&blkio_list_lock
);
1030 list_add_tail(&blkiop
->list
, &blkio_list
);
1031 spin_unlock(&blkio_list_lock
);
1033 EXPORT_SYMBOL_GPL(blkio_policy_register
);
1035 void blkio_policy_unregister(struct blkio_policy_type
*blkiop
)
1037 spin_lock(&blkio_list_lock
);
1038 list_del_init(&blkiop
->list
);
1039 spin_unlock(&blkio_list_lock
);
1041 EXPORT_SYMBOL_GPL(blkio_policy_unregister
);
1043 static int __init
init_cgroup_blkio(void)
1045 return cgroup_load_subsys(&blkio_subsys
);
1048 static void __exit
exit_cgroup_blkio(void)
1050 cgroup_unload_subsys(&blkio_subsys
);
1053 module_init(init_cgroup_blkio
);
1054 module_exit(exit_cgroup_blkio
);
1055 MODULE_LICENSE("GPL");