2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include "blk-cgroup.h"
21 #include <linux/genhd.h>
23 #define MAX_KEY_LEN 100
25 static DEFINE_SPINLOCK(blkio_list_lock
);
26 static LIST_HEAD(blkio_list
);
28 struct blkio_cgroup blkio_root_cgroup
= { .weight
= 2*BLKIO_WEIGHT_DEFAULT
};
29 EXPORT_SYMBOL_GPL(blkio_root_cgroup
);
31 static struct cgroup_subsys_state
*blkiocg_create(struct cgroup_subsys
*,
33 static int blkiocg_can_attach_task(struct cgroup
*, struct task_struct
*);
34 static void blkiocg_attach_task(struct cgroup
*, struct task_struct
*);
35 static void blkiocg_destroy(struct cgroup_subsys
*, struct cgroup
*);
36 static int blkiocg_populate(struct cgroup_subsys
*, struct cgroup
*);
38 /* for encoding cft->private value on file */
39 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
40 /* What policy owns the file, proportional or throttle */
41 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
42 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
44 struct cgroup_subsys blkio_subsys
= {
46 .create
= blkiocg_create
,
47 .can_attach_task
= blkiocg_can_attach_task
,
48 .attach_task
= blkiocg_attach_task
,
49 .destroy
= blkiocg_destroy
,
50 .populate
= blkiocg_populate
,
51 #ifdef CONFIG_BLK_CGROUP
52 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
53 .subsys_id
= blkio_subsys_id
,
56 .module
= THIS_MODULE
,
58 EXPORT_SYMBOL_GPL(blkio_subsys
);
60 static inline void blkio_policy_insert_node(struct blkio_cgroup
*blkcg
,
61 struct blkio_policy_node
*pn
)
63 list_add(&pn
->node
, &blkcg
->policy_list
);
66 static inline bool cftype_blkg_same_policy(struct cftype
*cft
,
67 struct blkio_group
*blkg
)
69 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
71 if (blkg
->plid
== plid
)
77 /* Determines if policy node matches cgroup file being accessed */
78 static inline bool pn_matches_cftype(struct cftype
*cft
,
79 struct blkio_policy_node
*pn
)
81 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
82 int fileid
= BLKIOFILE_ATTR(cft
->private);
84 return (plid
== pn
->plid
&& fileid
== pn
->fileid
);
87 /* Must be called with blkcg->lock held */
88 static inline void blkio_policy_delete_node(struct blkio_policy_node
*pn
)
93 /* Must be called with blkcg->lock held */
94 static struct blkio_policy_node
*
95 blkio_policy_search_node(const struct blkio_cgroup
*blkcg
, dev_t dev
,
96 enum blkio_policy_id plid
, int fileid
)
98 struct blkio_policy_node
*pn
;
100 list_for_each_entry(pn
, &blkcg
->policy_list
, node
) {
101 if (pn
->dev
== dev
&& pn
->plid
== plid
&& pn
->fileid
== fileid
)
108 struct blkio_cgroup
*cgroup_to_blkio_cgroup(struct cgroup
*cgroup
)
110 return container_of(cgroup_subsys_state(cgroup
, blkio_subsys_id
),
111 struct blkio_cgroup
, css
);
113 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup
);
115 struct blkio_cgroup
*task_blkio_cgroup(struct task_struct
*tsk
)
117 return container_of(task_subsys_state(tsk
, blkio_subsys_id
),
118 struct blkio_cgroup
, css
);
120 EXPORT_SYMBOL_GPL(task_blkio_cgroup
);
123 blkio_update_group_weight(struct blkio_group
*blkg
, unsigned int weight
)
125 struct blkio_policy_type
*blkiop
;
127 list_for_each_entry(blkiop
, &blkio_list
, list
) {
128 /* If this policy does not own the blkg, do not send updates */
129 if (blkiop
->plid
!= blkg
->plid
)
131 if (blkiop
->ops
.blkio_update_group_weight_fn
)
132 blkiop
->ops
.blkio_update_group_weight_fn(blkg
->key
,
137 static inline void blkio_update_group_bps(struct blkio_group
*blkg
, u64 bps
,
140 struct blkio_policy_type
*blkiop
;
142 list_for_each_entry(blkiop
, &blkio_list
, list
) {
144 /* If this policy does not own the blkg, do not send updates */
145 if (blkiop
->plid
!= blkg
->plid
)
148 if (fileid
== BLKIO_THROTL_read_bps_device
149 && blkiop
->ops
.blkio_update_group_read_bps_fn
)
150 blkiop
->ops
.blkio_update_group_read_bps_fn(blkg
->key
,
153 if (fileid
== BLKIO_THROTL_write_bps_device
154 && blkiop
->ops
.blkio_update_group_write_bps_fn
)
155 blkiop
->ops
.blkio_update_group_write_bps_fn(blkg
->key
,
160 static inline void blkio_update_group_iops(struct blkio_group
*blkg
,
161 unsigned int iops
, int fileid
)
163 struct blkio_policy_type
*blkiop
;
165 list_for_each_entry(blkiop
, &blkio_list
, list
) {
167 /* If this policy does not own the blkg, do not send updates */
168 if (blkiop
->plid
!= blkg
->plid
)
171 if (fileid
== BLKIO_THROTL_read_iops_device
172 && blkiop
->ops
.blkio_update_group_read_iops_fn
)
173 blkiop
->ops
.blkio_update_group_read_iops_fn(blkg
->key
,
176 if (fileid
== BLKIO_THROTL_write_iops_device
177 && blkiop
->ops
.blkio_update_group_write_iops_fn
)
178 blkiop
->ops
.blkio_update_group_write_iops_fn(blkg
->key
,
184 * Add to the appropriate stat variable depending on the request type.
185 * This should be called with the blkg->stats_lock held.
187 static void blkio_add_stat(uint64_t *stat
, uint64_t add
, bool direction
,
191 stat
[BLKIO_STAT_WRITE
] += add
;
193 stat
[BLKIO_STAT_READ
] += add
;
195 stat
[BLKIO_STAT_SYNC
] += add
;
197 stat
[BLKIO_STAT_ASYNC
] += add
;
201 * Decrements the appropriate stat variable if non-zero depending on the
202 * request type. Panics on value being zero.
203 * This should be called with the blkg->stats_lock held.
205 static void blkio_check_and_dec_stat(uint64_t *stat
, bool direction
, bool sync
)
208 BUG_ON(stat
[BLKIO_STAT_WRITE
] == 0);
209 stat
[BLKIO_STAT_WRITE
]--;
211 BUG_ON(stat
[BLKIO_STAT_READ
] == 0);
212 stat
[BLKIO_STAT_READ
]--;
215 BUG_ON(stat
[BLKIO_STAT_SYNC
] == 0);
216 stat
[BLKIO_STAT_SYNC
]--;
218 BUG_ON(stat
[BLKIO_STAT_ASYNC
] == 0);
219 stat
[BLKIO_STAT_ASYNC
]--;
223 #ifdef CONFIG_DEBUG_BLK_CGROUP
224 /* This should be called with the blkg->stats_lock held. */
225 static void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
226 struct blkio_group
*curr_blkg
)
228 if (blkio_blkg_waiting(&blkg
->stats
))
230 if (blkg
== curr_blkg
)
232 blkg
->stats
.start_group_wait_time
= sched_clock();
233 blkio_mark_blkg_waiting(&blkg
->stats
);
236 /* This should be called with the blkg->stats_lock held. */
237 static void blkio_update_group_wait_time(struct blkio_group_stats
*stats
)
239 unsigned long long now
;
241 if (!blkio_blkg_waiting(stats
))
245 if (time_after64(now
, stats
->start_group_wait_time
))
246 stats
->group_wait_time
+= now
- stats
->start_group_wait_time
;
247 blkio_clear_blkg_waiting(stats
);
250 /* This should be called with the blkg->stats_lock held. */
251 static void blkio_end_empty_time(struct blkio_group_stats
*stats
)
253 unsigned long long now
;
255 if (!blkio_blkg_empty(stats
))
259 if (time_after64(now
, stats
->start_empty_time
))
260 stats
->empty_time
+= now
- stats
->start_empty_time
;
261 blkio_clear_blkg_empty(stats
);
264 void blkiocg_update_set_idle_time_stats(struct blkio_group
*blkg
)
268 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
269 BUG_ON(blkio_blkg_idling(&blkg
->stats
));
270 blkg
->stats
.start_idle_time
= sched_clock();
271 blkio_mark_blkg_idling(&blkg
->stats
);
272 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
274 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats
);
276 void blkiocg_update_idle_time_stats(struct blkio_group
*blkg
)
279 unsigned long long now
;
280 struct blkio_group_stats
*stats
;
282 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
283 stats
= &blkg
->stats
;
284 if (blkio_blkg_idling(stats
)) {
286 if (time_after64(now
, stats
->start_idle_time
))
287 stats
->idle_time
+= now
- stats
->start_idle_time
;
288 blkio_clear_blkg_idling(stats
);
290 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
292 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats
);
294 void blkiocg_update_avg_queue_size_stats(struct blkio_group
*blkg
)
297 struct blkio_group_stats
*stats
;
299 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
300 stats
= &blkg
->stats
;
301 stats
->avg_queue_size_sum
+=
302 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_READ
] +
303 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_WRITE
];
304 stats
->avg_queue_size_samples
++;
305 blkio_update_group_wait_time(stats
);
306 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
308 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats
);
310 void blkiocg_set_start_empty_time(struct blkio_group
*blkg
)
313 struct blkio_group_stats
*stats
;
315 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
316 stats
= &blkg
->stats
;
318 if (stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_READ
] ||
319 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_WRITE
]) {
320 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
325 * group is already marked empty. This can happen if cfqq got new
326 * request in parent group and moved to this group while being added
327 * to service tree. Just ignore the event and move on.
329 if(blkio_blkg_empty(stats
)) {
330 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
334 stats
->start_empty_time
= sched_clock();
335 blkio_mark_blkg_empty(stats
);
336 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
338 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time
);
340 void blkiocg_update_dequeue_stats(struct blkio_group
*blkg
,
341 unsigned long dequeue
)
343 blkg
->stats
.dequeue
+= dequeue
;
345 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats
);
347 static inline void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
348 struct blkio_group
*curr_blkg
) {}
349 static inline void blkio_end_empty_time(struct blkio_group_stats
*stats
) {}
352 void blkiocg_update_io_add_stats(struct blkio_group
*blkg
,
353 struct blkio_group
*curr_blkg
, bool direction
,
358 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
359 blkio_add_stat(blkg
->stats
.stat_arr
[BLKIO_STAT_QUEUED
], 1, direction
,
361 blkio_end_empty_time(&blkg
->stats
);
362 blkio_set_start_group_wait_time(blkg
, curr_blkg
);
363 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
365 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats
);
367 void blkiocg_update_io_remove_stats(struct blkio_group
*blkg
,
368 bool direction
, bool sync
)
372 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
373 blkio_check_and_dec_stat(blkg
->stats
.stat_arr
[BLKIO_STAT_QUEUED
],
375 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
377 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats
);
379 void blkiocg_update_timeslice_used(struct blkio_group
*blkg
, unsigned long time
,
380 unsigned long unaccounted_time
)
384 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
385 blkg
->stats
.time
+= time
;
386 #ifdef CONFIG_DEBUG_BLK_CGROUP
387 blkg
->stats
.unaccounted_time
+= unaccounted_time
;
389 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
391 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used
);
394 * should be called under rcu read lock or queue lock to make sure blkg pointer
397 void blkiocg_update_dispatch_stats(struct blkio_group
*blkg
,
398 uint64_t bytes
, bool direction
, bool sync
)
400 struct blkio_group_stats_cpu
*stats_cpu
;
404 * Disabling interrupts to provide mutual exclusion between two
405 * writes on same cpu. It probably is not needed for 64bit. Not
406 * optimizing that case yet.
408 local_irq_save(flags
);
410 stats_cpu
= this_cpu_ptr(blkg
->stats_cpu
);
412 u64_stats_update_begin(&stats_cpu
->syncp
);
413 stats_cpu
->sectors
+= bytes
>> 9;
414 blkio_add_stat(stats_cpu
->stat_arr_cpu
[BLKIO_STAT_CPU_SERVICED
],
416 blkio_add_stat(stats_cpu
->stat_arr_cpu
[BLKIO_STAT_CPU_SERVICE_BYTES
],
417 bytes
, direction
, sync
);
418 u64_stats_update_end(&stats_cpu
->syncp
);
419 local_irq_restore(flags
);
421 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats
);
423 void blkiocg_update_completion_stats(struct blkio_group
*blkg
,
424 uint64_t start_time
, uint64_t io_start_time
, bool direction
, bool sync
)
426 struct blkio_group_stats
*stats
;
428 unsigned long long now
= sched_clock();
430 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
431 stats
= &blkg
->stats
;
432 if (time_after64(now
, io_start_time
))
433 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_SERVICE_TIME
],
434 now
- io_start_time
, direction
, sync
);
435 if (time_after64(io_start_time
, start_time
))
436 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_WAIT_TIME
],
437 io_start_time
- start_time
, direction
, sync
);
438 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
440 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats
);
442 /* Merged stats are per cpu. */
443 void blkiocg_update_io_merged_stats(struct blkio_group
*blkg
, bool direction
,
446 struct blkio_group_stats_cpu
*stats_cpu
;
450 * Disabling interrupts to provide mutual exclusion between two
451 * writes on same cpu. It probably is not needed for 64bit. Not
452 * optimizing that case yet.
454 local_irq_save(flags
);
456 stats_cpu
= this_cpu_ptr(blkg
->stats_cpu
);
458 u64_stats_update_begin(&stats_cpu
->syncp
);
459 blkio_add_stat(stats_cpu
->stat_arr_cpu
[BLKIO_STAT_CPU_MERGED
], 1,
461 u64_stats_update_end(&stats_cpu
->syncp
);
462 local_irq_restore(flags
);
464 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats
);
467 * This function allocates the per cpu stats for blkio_group. Should be called
468 * from sleepable context as alloc_per_cpu() requires that.
470 int blkio_alloc_blkg_stats(struct blkio_group
*blkg
)
472 /* Allocate memory for per cpu stats */
473 blkg
->stats_cpu
= alloc_percpu(struct blkio_group_stats_cpu
);
474 if (!blkg
->stats_cpu
)
478 EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats
);
480 void blkiocg_add_blkio_group(struct blkio_cgroup
*blkcg
,
481 struct blkio_group
*blkg
, void *key
, dev_t dev
,
482 enum blkio_policy_id plid
)
486 spin_lock_irqsave(&blkcg
->lock
, flags
);
487 spin_lock_init(&blkg
->stats_lock
);
488 rcu_assign_pointer(blkg
->key
, key
);
489 blkg
->blkcg_id
= css_id(&blkcg
->css
);
490 hlist_add_head_rcu(&blkg
->blkcg_node
, &blkcg
->blkg_list
);
492 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
493 /* Need to take css reference ? */
494 cgroup_path(blkcg
->css
.cgroup
, blkg
->path
, sizeof(blkg
->path
));
497 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group
);
499 static void __blkiocg_del_blkio_group(struct blkio_group
*blkg
)
501 hlist_del_init_rcu(&blkg
->blkcg_node
);
506 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
507 * indicating that blk_group was unhashed by the time we got to it.
509 int blkiocg_del_blkio_group(struct blkio_group
*blkg
)
511 struct blkio_cgroup
*blkcg
;
513 struct cgroup_subsys_state
*css
;
517 css
= css_lookup(&blkio_subsys
, blkg
->blkcg_id
);
519 blkcg
= container_of(css
, struct blkio_cgroup
, css
);
520 spin_lock_irqsave(&blkcg
->lock
, flags
);
521 if (!hlist_unhashed(&blkg
->blkcg_node
)) {
522 __blkiocg_del_blkio_group(blkg
);
525 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
531 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group
);
533 /* called under rcu_read_lock(). */
534 struct blkio_group
*blkiocg_lookup_group(struct blkio_cgroup
*blkcg
, void *key
)
536 struct blkio_group
*blkg
;
537 struct hlist_node
*n
;
540 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
548 EXPORT_SYMBOL_GPL(blkiocg_lookup_group
);
550 static void blkio_reset_stats_cpu(struct blkio_group
*blkg
)
552 struct blkio_group_stats_cpu
*stats_cpu
;
555 * Note: On 64 bit arch this should not be an issue. This has the
556 * possibility of returning some inconsistent value on 32bit arch
557 * as 64bit update on 32bit is non atomic. Taking care of this
558 * corner case makes code very complicated, like sending IPIs to
559 * cpus, taking care of stats of offline cpus etc.
561 * reset stats is anyway more of a debug feature and this sounds a
562 * corner case. So I am not complicating the code yet until and
563 * unless this becomes a real issue.
565 for_each_possible_cpu(i
) {
566 stats_cpu
= per_cpu_ptr(blkg
->stats_cpu
, i
);
567 stats_cpu
->sectors
= 0;
568 for(j
= 0; j
< BLKIO_STAT_CPU_NR
; j
++)
569 for (k
= 0; k
< BLKIO_STAT_TOTAL
; k
++)
570 stats_cpu
->stat_arr_cpu
[j
][k
] = 0;
575 blkiocg_reset_stats(struct cgroup
*cgroup
, struct cftype
*cftype
, u64 val
)
577 struct blkio_cgroup
*blkcg
;
578 struct blkio_group
*blkg
;
579 struct blkio_group_stats
*stats
;
580 struct hlist_node
*n
;
581 uint64_t queued
[BLKIO_STAT_TOTAL
];
583 #ifdef CONFIG_DEBUG_BLK_CGROUP
584 bool idling
, waiting
, empty
;
585 unsigned long long now
= sched_clock();
588 blkcg
= cgroup_to_blkio_cgroup(cgroup
);
589 spin_lock_irq(&blkcg
->lock
);
590 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
591 spin_lock(&blkg
->stats_lock
);
592 stats
= &blkg
->stats
;
593 #ifdef CONFIG_DEBUG_BLK_CGROUP
594 idling
= blkio_blkg_idling(stats
);
595 waiting
= blkio_blkg_waiting(stats
);
596 empty
= blkio_blkg_empty(stats
);
598 for (i
= 0; i
< BLKIO_STAT_TOTAL
; i
++)
599 queued
[i
] = stats
->stat_arr
[BLKIO_STAT_QUEUED
][i
];
600 memset(stats
, 0, sizeof(struct blkio_group_stats
));
601 for (i
= 0; i
< BLKIO_STAT_TOTAL
; i
++)
602 stats
->stat_arr
[BLKIO_STAT_QUEUED
][i
] = queued
[i
];
603 #ifdef CONFIG_DEBUG_BLK_CGROUP
605 blkio_mark_blkg_idling(stats
);
606 stats
->start_idle_time
= now
;
609 blkio_mark_blkg_waiting(stats
);
610 stats
->start_group_wait_time
= now
;
613 blkio_mark_blkg_empty(stats
);
614 stats
->start_empty_time
= now
;
617 spin_unlock(&blkg
->stats_lock
);
619 /* Reset Per cpu stats which don't take blkg->stats_lock */
620 blkio_reset_stats_cpu(blkg
);
623 spin_unlock_irq(&blkcg
->lock
);
627 static void blkio_get_key_name(enum stat_sub_type type
, dev_t dev
, char *str
,
628 int chars_left
, bool diskname_only
)
630 snprintf(str
, chars_left
, "%d:%d", MAJOR(dev
), MINOR(dev
));
631 chars_left
-= strlen(str
);
632 if (chars_left
<= 0) {
634 "Possibly incorrect cgroup stat display format");
640 case BLKIO_STAT_READ
:
641 strlcat(str
, " Read", chars_left
);
643 case BLKIO_STAT_WRITE
:
644 strlcat(str
, " Write", chars_left
);
646 case BLKIO_STAT_SYNC
:
647 strlcat(str
, " Sync", chars_left
);
649 case BLKIO_STAT_ASYNC
:
650 strlcat(str
, " Async", chars_left
);
652 case BLKIO_STAT_TOTAL
:
653 strlcat(str
, " Total", chars_left
);
656 strlcat(str
, " Invalid", chars_left
);
660 static uint64_t blkio_fill_stat(char *str
, int chars_left
, uint64_t val
,
661 struct cgroup_map_cb
*cb
, dev_t dev
)
663 blkio_get_key_name(0, dev
, str
, chars_left
, true);
664 cb
->fill(cb
, str
, val
);
669 static uint64_t blkio_read_stat_cpu(struct blkio_group
*blkg
,
670 enum stat_type_cpu type
, enum stat_sub_type sub_type
)
673 struct blkio_group_stats_cpu
*stats_cpu
;
676 for_each_possible_cpu(cpu
) {
678 stats_cpu
= per_cpu_ptr(blkg
->stats_cpu
, cpu
);
681 start
= u64_stats_fetch_begin(&stats_cpu
->syncp
);
682 if (type
== BLKIO_STAT_CPU_SECTORS
)
683 tval
= stats_cpu
->sectors
;
685 tval
= stats_cpu
->stat_arr_cpu
[type
][sub_type
];
686 } while(u64_stats_fetch_retry(&stats_cpu
->syncp
, start
));
694 static uint64_t blkio_get_stat_cpu(struct blkio_group
*blkg
,
695 struct cgroup_map_cb
*cb
, dev_t dev
, enum stat_type_cpu type
)
697 uint64_t disk_total
, val
;
698 char key_str
[MAX_KEY_LEN
];
699 enum stat_sub_type sub_type
;
701 if (type
== BLKIO_STAT_CPU_SECTORS
) {
702 val
= blkio_read_stat_cpu(blkg
, type
, 0);
703 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1, val
, cb
, dev
);
706 for (sub_type
= BLKIO_STAT_READ
; sub_type
< BLKIO_STAT_TOTAL
;
708 blkio_get_key_name(sub_type
, dev
, key_str
, MAX_KEY_LEN
, false);
709 val
= blkio_read_stat_cpu(blkg
, type
, sub_type
);
710 cb
->fill(cb
, key_str
, val
);
713 disk_total
= blkio_read_stat_cpu(blkg
, type
, BLKIO_STAT_READ
) +
714 blkio_read_stat_cpu(blkg
, type
, BLKIO_STAT_WRITE
);
716 blkio_get_key_name(BLKIO_STAT_TOTAL
, dev
, key_str
, MAX_KEY_LEN
, false);
717 cb
->fill(cb
, key_str
, disk_total
);
721 /* This should be called with blkg->stats_lock held */
722 static uint64_t blkio_get_stat(struct blkio_group
*blkg
,
723 struct cgroup_map_cb
*cb
, dev_t dev
, enum stat_type type
)
726 char key_str
[MAX_KEY_LEN
];
727 enum stat_sub_type sub_type
;
729 if (type
== BLKIO_STAT_TIME
)
730 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
731 blkg
->stats
.time
, cb
, dev
);
732 #ifdef CONFIG_DEBUG_BLK_CGROUP
733 if (type
== BLKIO_STAT_UNACCOUNTED_TIME
)
734 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
735 blkg
->stats
.unaccounted_time
, cb
, dev
);
736 if (type
== BLKIO_STAT_AVG_QUEUE_SIZE
) {
737 uint64_t sum
= blkg
->stats
.avg_queue_size_sum
;
738 uint64_t samples
= blkg
->stats
.avg_queue_size_samples
;
740 do_div(sum
, samples
);
743 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1, sum
, cb
, dev
);
745 if (type
== BLKIO_STAT_GROUP_WAIT_TIME
)
746 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
747 blkg
->stats
.group_wait_time
, cb
, dev
);
748 if (type
== BLKIO_STAT_IDLE_TIME
)
749 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
750 blkg
->stats
.idle_time
, cb
, dev
);
751 if (type
== BLKIO_STAT_EMPTY_TIME
)
752 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
753 blkg
->stats
.empty_time
, cb
, dev
);
754 if (type
== BLKIO_STAT_DEQUEUE
)
755 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
756 blkg
->stats
.dequeue
, cb
, dev
);
759 for (sub_type
= BLKIO_STAT_READ
; sub_type
< BLKIO_STAT_TOTAL
;
761 blkio_get_key_name(sub_type
, dev
, key_str
, MAX_KEY_LEN
, false);
762 cb
->fill(cb
, key_str
, blkg
->stats
.stat_arr
[type
][sub_type
]);
764 disk_total
= blkg
->stats
.stat_arr
[type
][BLKIO_STAT_READ
] +
765 blkg
->stats
.stat_arr
[type
][BLKIO_STAT_WRITE
];
766 blkio_get_key_name(BLKIO_STAT_TOTAL
, dev
, key_str
, MAX_KEY_LEN
, false);
767 cb
->fill(cb
, key_str
, disk_total
);
771 static int blkio_policy_parse_and_set(char *buf
,
772 struct blkio_policy_node
*newpn
, enum blkio_policy_id plid
, int fileid
)
774 struct gendisk
*disk
= NULL
;
775 char *s
[4], *p
, *major_s
= NULL
, *minor_s
= NULL
;
776 unsigned long major
, minor
;
777 int i
= 0, ret
= -EINVAL
;
782 memset(s
, 0, sizeof(s
));
784 while ((p
= strsep(&buf
, " ")) != NULL
) {
790 /* Prevent from inputing too many things */
798 p
= strsep(&s
[0], ":");
808 if (strict_strtoul(major_s
, 10, &major
))
811 if (strict_strtoul(minor_s
, 10, &minor
))
814 dev
= MKDEV(major
, minor
);
816 if (strict_strtoull(s
[1], 10, &temp
))
819 /* For rule removal, do not check for device presence. */
821 disk
= get_gendisk(dev
, &part
);
831 case BLKIO_POLICY_PROP
:
832 if ((temp
< BLKIO_WEIGHT_MIN
&& temp
> 0) ||
833 temp
> BLKIO_WEIGHT_MAX
)
837 newpn
->fileid
= fileid
;
838 newpn
->val
.weight
= temp
;
840 case BLKIO_POLICY_THROTL
:
842 case BLKIO_THROTL_read_bps_device
:
843 case BLKIO_THROTL_write_bps_device
:
845 newpn
->fileid
= fileid
;
846 newpn
->val
.bps
= temp
;
848 case BLKIO_THROTL_read_iops_device
:
849 case BLKIO_THROTL_write_iops_device
:
850 if (temp
> THROTL_IOPS_MAX
)
854 newpn
->fileid
= fileid
;
855 newpn
->val
.iops
= (unsigned int)temp
;
868 unsigned int blkcg_get_weight(struct blkio_cgroup
*blkcg
,
871 struct blkio_policy_node
*pn
;
875 spin_lock_irqsave(&blkcg
->lock
, flags
);
877 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_PROP
,
878 BLKIO_PROP_weight_device
);
880 weight
= pn
->val
.weight
;
882 weight
= blkcg
->weight
;
884 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
888 EXPORT_SYMBOL_GPL(blkcg_get_weight
);
890 uint64_t blkcg_get_read_bps(struct blkio_cgroup
*blkcg
, dev_t dev
)
892 struct blkio_policy_node
*pn
;
896 spin_lock_irqsave(&blkcg
->lock
, flags
);
897 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_THROTL
,
898 BLKIO_THROTL_read_bps_device
);
901 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
906 uint64_t blkcg_get_write_bps(struct blkio_cgroup
*blkcg
, dev_t dev
)
908 struct blkio_policy_node
*pn
;
912 spin_lock_irqsave(&blkcg
->lock
, flags
);
913 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_THROTL
,
914 BLKIO_THROTL_write_bps_device
);
917 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
922 unsigned int blkcg_get_read_iops(struct blkio_cgroup
*blkcg
, dev_t dev
)
924 struct blkio_policy_node
*pn
;
926 unsigned int iops
= -1;
928 spin_lock_irqsave(&blkcg
->lock
, flags
);
929 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_THROTL
,
930 BLKIO_THROTL_read_iops_device
);
933 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
938 unsigned int blkcg_get_write_iops(struct blkio_cgroup
*blkcg
, dev_t dev
)
940 struct blkio_policy_node
*pn
;
942 unsigned int iops
= -1;
944 spin_lock_irqsave(&blkcg
->lock
, flags
);
945 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_THROTL
,
946 BLKIO_THROTL_write_iops_device
);
949 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
954 /* Checks whether user asked for deleting a policy rule */
955 static bool blkio_delete_rule_command(struct blkio_policy_node
*pn
)
958 case BLKIO_POLICY_PROP
:
959 if (pn
->val
.weight
== 0)
962 case BLKIO_POLICY_THROTL
:
964 case BLKIO_THROTL_read_bps_device
:
965 case BLKIO_THROTL_write_bps_device
:
966 if (pn
->val
.bps
== 0)
969 case BLKIO_THROTL_read_iops_device
:
970 case BLKIO_THROTL_write_iops_device
:
971 if (pn
->val
.iops
== 0)
982 static void blkio_update_policy_rule(struct blkio_policy_node
*oldpn
,
983 struct blkio_policy_node
*newpn
)
985 switch(oldpn
->plid
) {
986 case BLKIO_POLICY_PROP
:
987 oldpn
->val
.weight
= newpn
->val
.weight
;
989 case BLKIO_POLICY_THROTL
:
990 switch(newpn
->fileid
) {
991 case BLKIO_THROTL_read_bps_device
:
992 case BLKIO_THROTL_write_bps_device
:
993 oldpn
->val
.bps
= newpn
->val
.bps
;
995 case BLKIO_THROTL_read_iops_device
:
996 case BLKIO_THROTL_write_iops_device
:
997 oldpn
->val
.iops
= newpn
->val
.iops
;
1006 * Some rules/values in blkg have changed. Propagate those to respective
1009 static void blkio_update_blkg_policy(struct blkio_cgroup
*blkcg
,
1010 struct blkio_group
*blkg
, struct blkio_policy_node
*pn
)
1012 unsigned int weight
, iops
;
1016 case BLKIO_POLICY_PROP
:
1017 weight
= pn
->val
.weight
? pn
->val
.weight
:
1019 blkio_update_group_weight(blkg
, weight
);
1021 case BLKIO_POLICY_THROTL
:
1022 switch(pn
->fileid
) {
1023 case BLKIO_THROTL_read_bps_device
:
1024 case BLKIO_THROTL_write_bps_device
:
1025 bps
= pn
->val
.bps
? pn
->val
.bps
: (-1);
1026 blkio_update_group_bps(blkg
, bps
, pn
->fileid
);
1028 case BLKIO_THROTL_read_iops_device
:
1029 case BLKIO_THROTL_write_iops_device
:
1030 iops
= pn
->val
.iops
? pn
->val
.iops
: (-1);
1031 blkio_update_group_iops(blkg
, iops
, pn
->fileid
);
1041 * A policy node rule has been updated. Propagate this update to all the
1042 * block groups which might be affected by this update.
1044 static void blkio_update_policy_node_blkg(struct blkio_cgroup
*blkcg
,
1045 struct blkio_policy_node
*pn
)
1047 struct blkio_group
*blkg
;
1048 struct hlist_node
*n
;
1050 spin_lock(&blkio_list_lock
);
1051 spin_lock_irq(&blkcg
->lock
);
1053 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
1054 if (pn
->dev
!= blkg
->dev
|| pn
->plid
!= blkg
->plid
)
1056 blkio_update_blkg_policy(blkcg
, blkg
, pn
);
1059 spin_unlock_irq(&blkcg
->lock
);
1060 spin_unlock(&blkio_list_lock
);
1063 static int blkiocg_file_write(struct cgroup
*cgrp
, struct cftype
*cft
,
1068 struct blkio_policy_node
*newpn
, *pn
;
1069 struct blkio_cgroup
*blkcg
;
1071 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1072 int fileid
= BLKIOFILE_ATTR(cft
->private);
1074 buf
= kstrdup(buffer
, GFP_KERNEL
);
1078 newpn
= kzalloc(sizeof(*newpn
), GFP_KERNEL
);
1084 ret
= blkio_policy_parse_and_set(buf
, newpn
, plid
, fileid
);
1088 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1090 spin_lock_irq(&blkcg
->lock
);
1092 pn
= blkio_policy_search_node(blkcg
, newpn
->dev
, plid
, fileid
);
1094 if (!blkio_delete_rule_command(newpn
)) {
1095 blkio_policy_insert_node(blkcg
, newpn
);
1098 spin_unlock_irq(&blkcg
->lock
);
1099 goto update_io_group
;
1102 if (blkio_delete_rule_command(newpn
)) {
1103 blkio_policy_delete_node(pn
);
1105 spin_unlock_irq(&blkcg
->lock
);
1106 goto update_io_group
;
1108 spin_unlock_irq(&blkcg
->lock
);
1110 blkio_update_policy_rule(pn
, newpn
);
1113 blkio_update_policy_node_blkg(blkcg
, newpn
);
1124 blkio_print_policy_node(struct seq_file
*m
, struct blkio_policy_node
*pn
)
1127 case BLKIO_POLICY_PROP
:
1128 if (pn
->fileid
== BLKIO_PROP_weight_device
)
1129 seq_printf(m
, "%u:%u\t%u\n", MAJOR(pn
->dev
),
1130 MINOR(pn
->dev
), pn
->val
.weight
);
1132 case BLKIO_POLICY_THROTL
:
1133 switch(pn
->fileid
) {
1134 case BLKIO_THROTL_read_bps_device
:
1135 case BLKIO_THROTL_write_bps_device
:
1136 seq_printf(m
, "%u:%u\t%llu\n", MAJOR(pn
->dev
),
1137 MINOR(pn
->dev
), pn
->val
.bps
);
1139 case BLKIO_THROTL_read_iops_device
:
1140 case BLKIO_THROTL_write_iops_device
:
1141 seq_printf(m
, "%u:%u\t%u\n", MAJOR(pn
->dev
),
1142 MINOR(pn
->dev
), pn
->val
.iops
);
1151 /* cgroup files which read their data from policy nodes end up here */
1152 static void blkio_read_policy_node_files(struct cftype
*cft
,
1153 struct blkio_cgroup
*blkcg
, struct seq_file
*m
)
1155 struct blkio_policy_node
*pn
;
1157 if (!list_empty(&blkcg
->policy_list
)) {
1158 spin_lock_irq(&blkcg
->lock
);
1159 list_for_each_entry(pn
, &blkcg
->policy_list
, node
) {
1160 if (!pn_matches_cftype(cft
, pn
))
1162 blkio_print_policy_node(m
, pn
);
1164 spin_unlock_irq(&blkcg
->lock
);
1168 static int blkiocg_file_read(struct cgroup
*cgrp
, struct cftype
*cft
,
1171 struct blkio_cgroup
*blkcg
;
1172 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1173 int name
= BLKIOFILE_ATTR(cft
->private);
1175 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1178 case BLKIO_POLICY_PROP
:
1180 case BLKIO_PROP_weight_device
:
1181 blkio_read_policy_node_files(cft
, blkcg
, m
);
1187 case BLKIO_POLICY_THROTL
:
1189 case BLKIO_THROTL_read_bps_device
:
1190 case BLKIO_THROTL_write_bps_device
:
1191 case BLKIO_THROTL_read_iops_device
:
1192 case BLKIO_THROTL_write_iops_device
:
1193 blkio_read_policy_node_files(cft
, blkcg
, m
);
1206 static int blkio_read_blkg_stats(struct blkio_cgroup
*blkcg
,
1207 struct cftype
*cft
, struct cgroup_map_cb
*cb
,
1208 enum stat_type type
, bool show_total
, bool pcpu
)
1210 struct blkio_group
*blkg
;
1211 struct hlist_node
*n
;
1212 uint64_t cgroup_total
= 0;
1215 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
1217 if (!cftype_blkg_same_policy(cft
, blkg
))
1220 cgroup_total
+= blkio_get_stat_cpu(blkg
, cb
,
1223 spin_lock_irq(&blkg
->stats_lock
);
1224 cgroup_total
+= blkio_get_stat(blkg
, cb
,
1226 spin_unlock_irq(&blkg
->stats_lock
);
1231 cb
->fill(cb
, "Total", cgroup_total
);
1236 /* All map kind of cgroup file get serviced by this function */
1237 static int blkiocg_file_read_map(struct cgroup
*cgrp
, struct cftype
*cft
,
1238 struct cgroup_map_cb
*cb
)
1240 struct blkio_cgroup
*blkcg
;
1241 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1242 int name
= BLKIOFILE_ATTR(cft
->private);
1244 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1247 case BLKIO_POLICY_PROP
:
1249 case BLKIO_PROP_time
:
1250 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1251 BLKIO_STAT_TIME
, 0, 0);
1252 case BLKIO_PROP_sectors
:
1253 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1254 BLKIO_STAT_CPU_SECTORS
, 0, 1);
1255 case BLKIO_PROP_io_service_bytes
:
1256 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1257 BLKIO_STAT_CPU_SERVICE_BYTES
, 1, 1);
1258 case BLKIO_PROP_io_serviced
:
1259 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1260 BLKIO_STAT_CPU_SERVICED
, 1, 1);
1261 case BLKIO_PROP_io_service_time
:
1262 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1263 BLKIO_STAT_SERVICE_TIME
, 1, 0);
1264 case BLKIO_PROP_io_wait_time
:
1265 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1266 BLKIO_STAT_WAIT_TIME
, 1, 0);
1267 case BLKIO_PROP_io_merged
:
1268 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1269 BLKIO_STAT_CPU_MERGED
, 1, 1);
1270 case BLKIO_PROP_io_queued
:
1271 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1272 BLKIO_STAT_QUEUED
, 1, 0);
1273 #ifdef CONFIG_DEBUG_BLK_CGROUP
1274 case BLKIO_PROP_unaccounted_time
:
1275 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1276 BLKIO_STAT_UNACCOUNTED_TIME
, 0, 0);
1277 case BLKIO_PROP_dequeue
:
1278 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1279 BLKIO_STAT_DEQUEUE
, 0, 0);
1280 case BLKIO_PROP_avg_queue_size
:
1281 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1282 BLKIO_STAT_AVG_QUEUE_SIZE
, 0, 0);
1283 case BLKIO_PROP_group_wait_time
:
1284 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1285 BLKIO_STAT_GROUP_WAIT_TIME
, 0, 0);
1286 case BLKIO_PROP_idle_time
:
1287 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1288 BLKIO_STAT_IDLE_TIME
, 0, 0);
1289 case BLKIO_PROP_empty_time
:
1290 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1291 BLKIO_STAT_EMPTY_TIME
, 0, 0);
1297 case BLKIO_POLICY_THROTL
:
1299 case BLKIO_THROTL_io_service_bytes
:
1300 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1301 BLKIO_STAT_CPU_SERVICE_BYTES
, 1, 1);
1302 case BLKIO_THROTL_io_serviced
:
1303 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1304 BLKIO_STAT_CPU_SERVICED
, 1, 1);
1316 static int blkio_weight_write(struct blkio_cgroup
*blkcg
, u64 val
)
1318 struct blkio_group
*blkg
;
1319 struct hlist_node
*n
;
1320 struct blkio_policy_node
*pn
;
1322 if (val
< BLKIO_WEIGHT_MIN
|| val
> BLKIO_WEIGHT_MAX
)
1325 spin_lock(&blkio_list_lock
);
1326 spin_lock_irq(&blkcg
->lock
);
1327 blkcg
->weight
= (unsigned int)val
;
1329 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
1330 pn
= blkio_policy_search_node(blkcg
, blkg
->dev
,
1331 BLKIO_POLICY_PROP
, BLKIO_PROP_weight_device
);
1335 blkio_update_group_weight(blkg
, blkcg
->weight
);
1337 spin_unlock_irq(&blkcg
->lock
);
1338 spin_unlock(&blkio_list_lock
);
1342 static u64
blkiocg_file_read_u64 (struct cgroup
*cgrp
, struct cftype
*cft
) {
1343 struct blkio_cgroup
*blkcg
;
1344 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1345 int name
= BLKIOFILE_ATTR(cft
->private);
1347 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1350 case BLKIO_POLICY_PROP
:
1352 case BLKIO_PROP_weight
:
1353 return (u64
)blkcg
->weight
;
1363 blkiocg_file_write_u64(struct cgroup
*cgrp
, struct cftype
*cft
, u64 val
)
1365 struct blkio_cgroup
*blkcg
;
1366 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1367 int name
= BLKIOFILE_ATTR(cft
->private);
1369 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1372 case BLKIO_POLICY_PROP
:
1374 case BLKIO_PROP_weight
:
1375 return blkio_weight_write(blkcg
, val
);
1385 struct cftype blkio_files
[] = {
1387 .name
= "weight_device",
1388 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1389 BLKIO_PROP_weight_device
),
1390 .read_seq_string
= blkiocg_file_read
,
1391 .write_string
= blkiocg_file_write
,
1392 .max_write_len
= 256,
1396 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1398 .read_u64
= blkiocg_file_read_u64
,
1399 .write_u64
= blkiocg_file_write_u64
,
1403 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1405 .read_map
= blkiocg_file_read_map
,
1409 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1410 BLKIO_PROP_sectors
),
1411 .read_map
= blkiocg_file_read_map
,
1414 .name
= "io_service_bytes",
1415 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1416 BLKIO_PROP_io_service_bytes
),
1417 .read_map
= blkiocg_file_read_map
,
1420 .name
= "io_serviced",
1421 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1422 BLKIO_PROP_io_serviced
),
1423 .read_map
= blkiocg_file_read_map
,
1426 .name
= "io_service_time",
1427 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1428 BLKIO_PROP_io_service_time
),
1429 .read_map
= blkiocg_file_read_map
,
1432 .name
= "io_wait_time",
1433 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1434 BLKIO_PROP_io_wait_time
),
1435 .read_map
= blkiocg_file_read_map
,
1438 .name
= "io_merged",
1439 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1440 BLKIO_PROP_io_merged
),
1441 .read_map
= blkiocg_file_read_map
,
1444 .name
= "io_queued",
1445 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1446 BLKIO_PROP_io_queued
),
1447 .read_map
= blkiocg_file_read_map
,
1450 .name
= "reset_stats",
1451 .write_u64
= blkiocg_reset_stats
,
1453 #ifdef CONFIG_BLK_DEV_THROTTLING
1455 .name
= "throttle.read_bps_device",
1456 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1457 BLKIO_THROTL_read_bps_device
),
1458 .read_seq_string
= blkiocg_file_read
,
1459 .write_string
= blkiocg_file_write
,
1460 .max_write_len
= 256,
1464 .name
= "throttle.write_bps_device",
1465 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1466 BLKIO_THROTL_write_bps_device
),
1467 .read_seq_string
= blkiocg_file_read
,
1468 .write_string
= blkiocg_file_write
,
1469 .max_write_len
= 256,
1473 .name
= "throttle.read_iops_device",
1474 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1475 BLKIO_THROTL_read_iops_device
),
1476 .read_seq_string
= blkiocg_file_read
,
1477 .write_string
= blkiocg_file_write
,
1478 .max_write_len
= 256,
1482 .name
= "throttle.write_iops_device",
1483 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1484 BLKIO_THROTL_write_iops_device
),
1485 .read_seq_string
= blkiocg_file_read
,
1486 .write_string
= blkiocg_file_write
,
1487 .max_write_len
= 256,
1490 .name
= "throttle.io_service_bytes",
1491 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1492 BLKIO_THROTL_io_service_bytes
),
1493 .read_map
= blkiocg_file_read_map
,
1496 .name
= "throttle.io_serviced",
1497 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1498 BLKIO_THROTL_io_serviced
),
1499 .read_map
= blkiocg_file_read_map
,
1501 #endif /* CONFIG_BLK_DEV_THROTTLING */
1503 #ifdef CONFIG_DEBUG_BLK_CGROUP
1505 .name
= "avg_queue_size",
1506 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1507 BLKIO_PROP_avg_queue_size
),
1508 .read_map
= blkiocg_file_read_map
,
1511 .name
= "group_wait_time",
1512 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1513 BLKIO_PROP_group_wait_time
),
1514 .read_map
= blkiocg_file_read_map
,
1517 .name
= "idle_time",
1518 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1519 BLKIO_PROP_idle_time
),
1520 .read_map
= blkiocg_file_read_map
,
1523 .name
= "empty_time",
1524 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1525 BLKIO_PROP_empty_time
),
1526 .read_map
= blkiocg_file_read_map
,
1530 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1531 BLKIO_PROP_dequeue
),
1532 .read_map
= blkiocg_file_read_map
,
1535 .name
= "unaccounted_time",
1536 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1537 BLKIO_PROP_unaccounted_time
),
1538 .read_map
= blkiocg_file_read_map
,
1543 static int blkiocg_populate(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
1545 return cgroup_add_files(cgroup
, subsys
, blkio_files
,
1546 ARRAY_SIZE(blkio_files
));
1549 static void blkiocg_destroy(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
1551 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
1552 unsigned long flags
;
1553 struct blkio_group
*blkg
;
1555 struct blkio_policy_type
*blkiop
;
1556 struct blkio_policy_node
*pn
, *pntmp
;
1560 spin_lock_irqsave(&blkcg
->lock
, flags
);
1562 if (hlist_empty(&blkcg
->blkg_list
)) {
1563 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
1567 blkg
= hlist_entry(blkcg
->blkg_list
.first
, struct blkio_group
,
1569 key
= rcu_dereference(blkg
->key
);
1570 __blkiocg_del_blkio_group(blkg
);
1572 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
1575 * This blkio_group is being unlinked as associated cgroup is
1576 * going away. Let all the IO controlling policies know about
1579 spin_lock(&blkio_list_lock
);
1580 list_for_each_entry(blkiop
, &blkio_list
, list
) {
1581 if (blkiop
->plid
!= blkg
->plid
)
1583 blkiop
->ops
.blkio_unlink_group_fn(key
, blkg
);
1585 spin_unlock(&blkio_list_lock
);
1588 list_for_each_entry_safe(pn
, pntmp
, &blkcg
->policy_list
, node
) {
1589 blkio_policy_delete_node(pn
);
1593 free_css_id(&blkio_subsys
, &blkcg
->css
);
1595 if (blkcg
!= &blkio_root_cgroup
)
1599 static struct cgroup_subsys_state
*
1600 blkiocg_create(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
1602 struct blkio_cgroup
*blkcg
;
1603 struct cgroup
*parent
= cgroup
->parent
;
1606 blkcg
= &blkio_root_cgroup
;
1610 blkcg
= kzalloc(sizeof(*blkcg
), GFP_KERNEL
);
1612 return ERR_PTR(-ENOMEM
);
1614 blkcg
->weight
= BLKIO_WEIGHT_DEFAULT
;
1616 spin_lock_init(&blkcg
->lock
);
1617 INIT_HLIST_HEAD(&blkcg
->blkg_list
);
1619 INIT_LIST_HEAD(&blkcg
->policy_list
);
1624 * We cannot support shared io contexts, as we have no mean to support
1625 * two tasks with the same ioc in two different groups without major rework
1626 * of the main cic data structures. For now we allow a task to change
1627 * its cgroup only if it's the only owner of its ioc.
1629 static int blkiocg_can_attach_task(struct cgroup
*cgrp
, struct task_struct
*tsk
)
1631 struct io_context
*ioc
;
1634 /* task_lock() is needed to avoid races with exit_io_context() */
1636 ioc
= tsk
->io_context
;
1637 if (ioc
&& atomic_read(&ioc
->nr_tasks
) > 1)
1644 static void blkiocg_attach_task(struct cgroup
*cgrp
, struct task_struct
*tsk
)
1646 struct io_context
*ioc
;
1649 ioc
= tsk
->io_context
;
1651 ioc
->cgroup_changed
= 1;
1655 void blkio_policy_register(struct blkio_policy_type
*blkiop
)
1657 spin_lock(&blkio_list_lock
);
1658 list_add_tail(&blkiop
->list
, &blkio_list
);
1659 spin_unlock(&blkio_list_lock
);
1661 EXPORT_SYMBOL_GPL(blkio_policy_register
);
1663 void blkio_policy_unregister(struct blkio_policy_type
*blkiop
)
1665 spin_lock(&blkio_list_lock
);
1666 list_del_init(&blkiop
->list
);
1667 spin_unlock(&blkio_list_lock
);
1669 EXPORT_SYMBOL_GPL(blkio_policy_unregister
);
1671 static int __init
init_cgroup_blkio(void)
1673 return cgroup_load_subsys(&blkio_subsys
);
1676 static void __exit
exit_cgroup_blkio(void)
1678 cgroup_unload_subsys(&blkio_subsys
);
1681 module_init(init_cgroup_blkio
);
1682 module_exit(exit_cgroup_blkio
);
1683 MODULE_LICENSE("GPL");