2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/kdev_t.h>
15 #include <linux/module.h>
16 #include <linux/err.h>
17 #include <linux/blkdev.h>
18 #include <linux/slab.h>
19 #include <linux/genhd.h>
20 #include <linux/delay.h>
21 #include <linux/atomic.h>
22 #include "blk-cgroup.h"
25 #define MAX_KEY_LEN 100
27 static DEFINE_SPINLOCK(blkio_list_lock
);
28 static LIST_HEAD(blkio_list
);
30 static DEFINE_MUTEX(all_q_mutex
);
31 static LIST_HEAD(all_q_list
);
33 /* List of groups pending per cpu stats allocation */
34 static DEFINE_SPINLOCK(alloc_list_lock
);
35 static LIST_HEAD(alloc_list
);
37 static void blkio_stat_alloc_fn(struct work_struct
*);
38 static DECLARE_DELAYED_WORK(blkio_stat_alloc_work
, blkio_stat_alloc_fn
);
40 struct blkio_cgroup blkio_root_cgroup
= { .weight
= 2*BLKIO_WEIGHT_DEFAULT
};
41 EXPORT_SYMBOL_GPL(blkio_root_cgroup
);
43 static struct blkio_policy_type
*blkio_policy
[BLKIO_NR_POLICIES
];
45 struct blkio_cgroup
*cgroup_to_blkio_cgroup(struct cgroup
*cgroup
)
47 return container_of(cgroup_subsys_state(cgroup
, blkio_subsys_id
),
48 struct blkio_cgroup
, css
);
50 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup
);
52 static struct blkio_cgroup
*task_blkio_cgroup(struct task_struct
*tsk
)
54 return container_of(task_subsys_state(tsk
, blkio_subsys_id
),
55 struct blkio_cgroup
, css
);
58 struct blkio_cgroup
*bio_blkio_cgroup(struct bio
*bio
)
60 if (bio
&& bio
->bi_css
)
61 return container_of(bio
->bi_css
, struct blkio_cgroup
, css
);
62 return task_blkio_cgroup(current
);
64 EXPORT_SYMBOL_GPL(bio_blkio_cgroup
);
66 #ifdef CONFIG_DEBUG_BLK_CGROUP
67 /* This should be called with the queue_lock held. */
68 static void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
69 struct blkio_policy_type
*pol
,
70 struct blkio_group
*curr_blkg
)
72 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
74 if (blkio_blkg_waiting(&pd
->stats
))
76 if (blkg
== curr_blkg
)
78 pd
->stats
.start_group_wait_time
= sched_clock();
79 blkio_mark_blkg_waiting(&pd
->stats
);
82 /* This should be called with the queue_lock held. */
83 static void blkio_update_group_wait_time(struct blkio_group_stats
*stats
)
85 unsigned long long now
;
87 if (!blkio_blkg_waiting(stats
))
91 if (time_after64(now
, stats
->start_group_wait_time
))
92 blkg_stat_add(&stats
->group_wait_time
,
93 now
- stats
->start_group_wait_time
);
94 blkio_clear_blkg_waiting(stats
);
97 /* This should be called with the queue_lock held. */
98 static void blkio_end_empty_time(struct blkio_group_stats
*stats
)
100 unsigned long long now
;
102 if (!blkio_blkg_empty(stats
))
106 if (time_after64(now
, stats
->start_empty_time
))
107 blkg_stat_add(&stats
->empty_time
,
108 now
- stats
->start_empty_time
);
109 blkio_clear_blkg_empty(stats
);
112 void blkiocg_update_set_idle_time_stats(struct blkio_group
*blkg
,
113 struct blkio_policy_type
*pol
)
115 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
117 lockdep_assert_held(blkg
->q
->queue_lock
);
118 BUG_ON(blkio_blkg_idling(stats
));
120 stats
->start_idle_time
= sched_clock();
121 blkio_mark_blkg_idling(stats
);
123 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats
);
125 void blkiocg_update_idle_time_stats(struct blkio_group
*blkg
,
126 struct blkio_policy_type
*pol
)
128 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
130 lockdep_assert_held(blkg
->q
->queue_lock
);
132 if (blkio_blkg_idling(stats
)) {
133 unsigned long long now
= sched_clock();
135 if (time_after64(now
, stats
->start_idle_time
))
136 blkg_stat_add(&stats
->idle_time
,
137 now
- stats
->start_idle_time
);
138 blkio_clear_blkg_idling(stats
);
141 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats
);
143 void blkiocg_update_avg_queue_size_stats(struct blkio_group
*blkg
,
144 struct blkio_policy_type
*pol
)
146 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
148 lockdep_assert_held(blkg
->q
->queue_lock
);
150 blkg_stat_add(&stats
->avg_queue_size_sum
,
151 blkg_rwstat_sum(&stats
->queued
));
152 blkg_stat_add(&stats
->avg_queue_size_samples
, 1);
153 blkio_update_group_wait_time(stats
);
155 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats
);
157 void blkiocg_set_start_empty_time(struct blkio_group
*blkg
,
158 struct blkio_policy_type
*pol
)
160 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
162 lockdep_assert_held(blkg
->q
->queue_lock
);
164 if (blkg_rwstat_sum(&stats
->queued
))
168 * group is already marked empty. This can happen if cfqq got new
169 * request in parent group and moved to this group while being added
170 * to service tree. Just ignore the event and move on.
172 if (blkio_blkg_empty(stats
))
175 stats
->start_empty_time
= sched_clock();
176 blkio_mark_blkg_empty(stats
);
178 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time
);
180 void blkiocg_update_dequeue_stats(struct blkio_group
*blkg
,
181 struct blkio_policy_type
*pol
,
182 unsigned long dequeue
)
184 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
186 lockdep_assert_held(blkg
->q
->queue_lock
);
188 blkg_stat_add(&pd
->stats
.dequeue
, dequeue
);
190 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats
);
192 static inline void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
193 struct blkio_policy_type
*pol
,
194 struct blkio_group
*curr_blkg
) { }
195 static inline void blkio_end_empty_time(struct blkio_group_stats
*stats
) { }
198 void blkiocg_update_io_add_stats(struct blkio_group
*blkg
,
199 struct blkio_policy_type
*pol
,
200 struct blkio_group
*curr_blkg
, bool direction
,
203 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
204 int rw
= (direction
? REQ_WRITE
: 0) | (sync
? REQ_SYNC
: 0);
206 lockdep_assert_held(blkg
->q
->queue_lock
);
208 blkg_rwstat_add(&stats
->queued
, rw
, 1);
209 blkio_end_empty_time(stats
);
210 blkio_set_start_group_wait_time(blkg
, pol
, curr_blkg
);
212 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats
);
214 void blkiocg_update_io_remove_stats(struct blkio_group
*blkg
,
215 struct blkio_policy_type
*pol
,
216 bool direction
, bool sync
)
218 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
219 int rw
= (direction
? REQ_WRITE
: 0) | (sync
? REQ_SYNC
: 0);
221 lockdep_assert_held(blkg
->q
->queue_lock
);
223 blkg_rwstat_add(&stats
->queued
, rw
, -1);
225 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats
);
227 void blkiocg_update_timeslice_used(struct blkio_group
*blkg
,
228 struct blkio_policy_type
*pol
,
230 unsigned long unaccounted_time
)
232 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
234 lockdep_assert_held(blkg
->q
->queue_lock
);
236 blkg_stat_add(&stats
->time
, time
);
237 #ifdef CONFIG_DEBUG_BLK_CGROUP
238 blkg_stat_add(&stats
->unaccounted_time
, unaccounted_time
);
241 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used
);
244 * should be called under rcu read lock or queue lock to make sure blkg pointer
247 void blkiocg_update_dispatch_stats(struct blkio_group
*blkg
,
248 struct blkio_policy_type
*pol
,
249 uint64_t bytes
, bool direction
, bool sync
)
251 int rw
= (direction
? REQ_WRITE
: 0) | (sync
? REQ_SYNC
: 0);
252 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
253 struct blkio_group_stats_cpu
*stats_cpu
;
256 /* If per cpu stats are not allocated yet, don't do any accounting. */
257 if (pd
->stats_cpu
== NULL
)
261 * Disabling interrupts to provide mutual exclusion between two
262 * writes on same cpu. It probably is not needed for 64bit. Not
263 * optimizing that case yet.
265 local_irq_save(flags
);
267 stats_cpu
= this_cpu_ptr(pd
->stats_cpu
);
269 blkg_stat_add(&stats_cpu
->sectors
, bytes
>> 9);
270 blkg_rwstat_add(&stats_cpu
->serviced
, rw
, 1);
271 blkg_rwstat_add(&stats_cpu
->service_bytes
, rw
, bytes
);
273 local_irq_restore(flags
);
275 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats
);
277 void blkiocg_update_completion_stats(struct blkio_group
*blkg
,
278 struct blkio_policy_type
*pol
,
280 uint64_t io_start_time
, bool direction
,
283 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
284 unsigned long long now
= sched_clock();
285 int rw
= (direction
? REQ_WRITE
: 0) | (sync
? REQ_SYNC
: 0);
287 lockdep_assert_held(blkg
->q
->queue_lock
);
289 if (time_after64(now
, io_start_time
))
290 blkg_rwstat_add(&stats
->service_time
, rw
, now
- io_start_time
);
291 if (time_after64(io_start_time
, start_time
))
292 blkg_rwstat_add(&stats
->wait_time
, rw
,
293 io_start_time
- start_time
);
295 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats
);
297 /* Merged stats are per cpu. */
298 void blkiocg_update_io_merged_stats(struct blkio_group
*blkg
,
299 struct blkio_policy_type
*pol
,
300 bool direction
, bool sync
)
302 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
303 int rw
= (direction
? REQ_WRITE
: 0) | (sync
? REQ_SYNC
: 0);
305 lockdep_assert_held(blkg
->q
->queue_lock
);
307 blkg_rwstat_add(&stats
->merged
, rw
, 1);
309 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats
);
312 * Worker for allocating per cpu stat for blk groups. This is scheduled on
313 * the system_nrt_wq once there are some groups on the alloc_list waiting
316 static void blkio_stat_alloc_fn(struct work_struct
*work
)
318 static void *pcpu_stats
[BLKIO_NR_POLICIES
];
319 struct delayed_work
*dwork
= to_delayed_work(work
);
320 struct blkio_group
*blkg
;
325 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
326 if (pcpu_stats
[i
] != NULL
)
329 pcpu_stats
[i
] = alloc_percpu(struct blkio_group_stats_cpu
);
331 /* Allocation failed. Try again after some time. */
332 if (pcpu_stats
[i
] == NULL
) {
333 queue_delayed_work(system_nrt_wq
, dwork
,
334 msecs_to_jiffies(10));
339 spin_lock_irq(&blkio_list_lock
);
340 spin_lock(&alloc_list_lock
);
342 /* cgroup got deleted or queue exited. */
343 if (!list_empty(&alloc_list
)) {
344 blkg
= list_first_entry(&alloc_list
, struct blkio_group
,
346 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
347 struct blkg_policy_data
*pd
= blkg
->pd
[i
];
349 if (blkio_policy
[i
] && pd
&& !pd
->stats_cpu
)
350 swap(pd
->stats_cpu
, pcpu_stats
[i
]);
353 list_del_init(&blkg
->alloc_node
);
356 empty
= list_empty(&alloc_list
);
358 spin_unlock(&alloc_list_lock
);
359 spin_unlock_irq(&blkio_list_lock
);
366 * blkg_free - free a blkg
367 * @blkg: blkg to free
369 * Free @blkg which may be partially allocated.
371 static void blkg_free(struct blkio_group
*blkg
)
378 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
379 struct blkg_policy_data
*pd
= blkg
->pd
[i
];
382 free_percpu(pd
->stats_cpu
);
391 * blkg_alloc - allocate a blkg
392 * @blkcg: block cgroup the new blkg is associated with
393 * @q: request_queue the new blkg is associated with
395 * Allocate a new blkg assocating @blkcg and @q.
397 static struct blkio_group
*blkg_alloc(struct blkio_cgroup
*blkcg
,
398 struct request_queue
*q
)
400 struct blkio_group
*blkg
;
403 /* alloc and init base part */
404 blkg
= kzalloc_node(sizeof(*blkg
), GFP_ATOMIC
, q
->node
);
409 INIT_LIST_HEAD(&blkg
->q_node
);
410 INIT_LIST_HEAD(&blkg
->alloc_node
);
413 cgroup_path(blkcg
->css
.cgroup
, blkg
->path
, sizeof(blkg
->path
));
415 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
416 struct blkio_policy_type
*pol
= blkio_policy
[i
];
417 struct blkg_policy_data
*pd
;
422 /* alloc per-policy data and attach it to blkg */
423 pd
= kzalloc_node(sizeof(*pd
) + pol
->pdata_size
, GFP_ATOMIC
,
434 /* invoke per-policy init */
435 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
436 struct blkio_policy_type
*pol
= blkio_policy
[i
];
439 pol
->ops
.blkio_init_group_fn(blkg
);
445 struct blkio_group
*blkg_lookup_create(struct blkio_cgroup
*blkcg
,
446 struct request_queue
*q
,
448 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
450 struct blkio_group
*blkg
;
452 WARN_ON_ONCE(!rcu_read_lock_held());
453 lockdep_assert_held(q
->queue_lock
);
456 * This could be the first entry point of blkcg implementation and
457 * we shouldn't allow anything to go through for a bypassing queue.
458 * The following can be removed if blkg lookup is guaranteed to
459 * fail on a bypassing queue.
461 if (unlikely(blk_queue_bypass(q
)) && !for_root
)
462 return ERR_PTR(blk_queue_dead(q
) ? -EINVAL
: -EBUSY
);
464 blkg
= blkg_lookup(blkcg
, q
);
468 /* blkg holds a reference to blkcg */
469 if (!css_tryget(&blkcg
->css
))
470 return ERR_PTR(-EINVAL
);
473 * Allocate and initialize.
475 blkg
= blkg_alloc(blkcg
, q
);
477 /* did alloc fail? */
478 if (unlikely(!blkg
)) {
479 blkg
= ERR_PTR(-ENOMEM
);
484 spin_lock(&blkcg
->lock
);
485 hlist_add_head_rcu(&blkg
->blkcg_node
, &blkcg
->blkg_list
);
486 list_add(&blkg
->q_node
, &q
->blkg_list
);
487 spin_unlock(&blkcg
->lock
);
489 spin_lock(&alloc_list_lock
);
490 list_add(&blkg
->alloc_node
, &alloc_list
);
491 /* Queue per cpu stat allocation from worker thread. */
492 queue_delayed_work(system_nrt_wq
, &blkio_stat_alloc_work
, 0);
493 spin_unlock(&alloc_list_lock
);
497 EXPORT_SYMBOL_GPL(blkg_lookup_create
);
499 /* called under rcu_read_lock(). */
500 struct blkio_group
*blkg_lookup(struct blkio_cgroup
*blkcg
,
501 struct request_queue
*q
)
503 struct blkio_group
*blkg
;
504 struct hlist_node
*n
;
506 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
)
511 EXPORT_SYMBOL_GPL(blkg_lookup
);
513 static void blkg_destroy(struct blkio_group
*blkg
)
515 struct request_queue
*q
= blkg
->q
;
516 struct blkio_cgroup
*blkcg
= blkg
->blkcg
;
518 lockdep_assert_held(q
->queue_lock
);
519 lockdep_assert_held(&blkcg
->lock
);
521 /* Something wrong if we are trying to remove same group twice */
522 WARN_ON_ONCE(list_empty(&blkg
->q_node
));
523 WARN_ON_ONCE(hlist_unhashed(&blkg
->blkcg_node
));
524 list_del_init(&blkg
->q_node
);
525 hlist_del_init_rcu(&blkg
->blkcg_node
);
527 spin_lock(&alloc_list_lock
);
528 list_del_init(&blkg
->alloc_node
);
529 spin_unlock(&alloc_list_lock
);
532 * Put the reference taken at the time of creation so that when all
533 * queues are gone, group can be destroyed.
539 * XXX: This updates blkg policy data in-place for root blkg, which is
540 * necessary across elevator switch and policy registration as root blkgs
541 * aren't shot down. This broken and racy implementation is temporary.
542 * Eventually, blkg shoot down will be replaced by proper in-place update.
544 void update_root_blkg_pd(struct request_queue
*q
, enum blkio_policy_id plid
)
546 struct blkio_policy_type
*pol
= blkio_policy
[plid
];
547 struct blkio_group
*blkg
= blkg_lookup(&blkio_root_cgroup
, q
);
548 struct blkg_policy_data
*pd
;
553 kfree(blkg
->pd
[plid
]);
554 blkg
->pd
[plid
] = NULL
;
559 pd
= kzalloc(sizeof(*pd
) + pol
->pdata_size
, GFP_KERNEL
);
562 pd
->stats_cpu
= alloc_percpu(struct blkio_group_stats_cpu
);
563 WARN_ON_ONCE(!pd
->stats_cpu
);
567 pol
->ops
.blkio_init_group_fn(blkg
);
569 EXPORT_SYMBOL_GPL(update_root_blkg_pd
);
572 * blkg_destroy_all - destroy all blkgs associated with a request_queue
573 * @q: request_queue of interest
574 * @destroy_root: whether to destroy root blkg or not
576 * Destroy blkgs associated with @q. If @destroy_root is %true, all are
577 * destroyed; otherwise, root blkg is left alone.
579 void blkg_destroy_all(struct request_queue
*q
, bool destroy_root
)
581 struct blkio_group
*blkg
, *n
;
583 spin_lock_irq(q
->queue_lock
);
585 list_for_each_entry_safe(blkg
, n
, &q
->blkg_list
, q_node
) {
586 struct blkio_cgroup
*blkcg
= blkg
->blkcg
;
589 if (!destroy_root
&& blkg
->blkcg
== &blkio_root_cgroup
)
592 spin_lock(&blkcg
->lock
);
594 spin_unlock(&blkcg
->lock
);
597 spin_unlock_irq(q
->queue_lock
);
599 EXPORT_SYMBOL_GPL(blkg_destroy_all
);
601 static void blkg_rcu_free(struct rcu_head
*rcu_head
)
603 blkg_free(container_of(rcu_head
, struct blkio_group
, rcu_head
));
606 void __blkg_release(struct blkio_group
*blkg
)
608 /* release the extra blkcg reference this blkg has been holding */
609 css_put(&blkg
->blkcg
->css
);
612 * A group is freed in rcu manner. But having an rcu lock does not
613 * mean that one can access all the fields of blkg and assume these
614 * are valid. For example, don't try to follow throtl_data and
615 * request queue links.
617 * Having a reference to blkg under an rcu allows acess to only
618 * values local to groups like group stats and group rate limits
620 call_rcu(&blkg
->rcu_head
, blkg_rcu_free
);
622 EXPORT_SYMBOL_GPL(__blkg_release
);
624 static void blkio_reset_stats_cpu(struct blkio_group
*blkg
, int plid
)
626 struct blkg_policy_data
*pd
= blkg
->pd
[plid
];
629 if (pd
->stats_cpu
== NULL
)
632 for_each_possible_cpu(cpu
) {
633 struct blkio_group_stats_cpu
*sc
=
634 per_cpu_ptr(pd
->stats_cpu
, cpu
);
636 blkg_rwstat_reset(&sc
->service_bytes
);
637 blkg_rwstat_reset(&sc
->serviced
);
638 blkg_stat_reset(&sc
->sectors
);
643 blkiocg_reset_stats(struct cgroup
*cgroup
, struct cftype
*cftype
, u64 val
)
645 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
646 struct blkio_group
*blkg
;
647 struct hlist_node
*n
;
649 spin_lock(&blkio_list_lock
);
650 spin_lock_irq(&blkcg
->lock
);
653 * Note that stat reset is racy - it doesn't synchronize against
654 * stat updates. This is a debug feature which shouldn't exist
655 * anyway. If you get hit by a race, retry.
657 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
658 struct blkio_policy_type
*pol
;
660 list_for_each_entry(pol
, &blkio_list
, list
) {
661 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
662 struct blkio_group_stats
*stats
= &pd
->stats
;
664 /* queued stats shouldn't be cleared */
665 blkg_rwstat_reset(&stats
->merged
);
666 blkg_rwstat_reset(&stats
->service_time
);
667 blkg_rwstat_reset(&stats
->wait_time
);
668 blkg_stat_reset(&stats
->time
);
669 #ifdef CONFIG_DEBUG_BLK_CGROUP
670 blkg_stat_reset(&stats
->unaccounted_time
);
671 blkg_stat_reset(&stats
->avg_queue_size_sum
);
672 blkg_stat_reset(&stats
->avg_queue_size_samples
);
673 blkg_stat_reset(&stats
->dequeue
);
674 blkg_stat_reset(&stats
->group_wait_time
);
675 blkg_stat_reset(&stats
->idle_time
);
676 blkg_stat_reset(&stats
->empty_time
);
678 blkio_reset_stats_cpu(blkg
, pol
->plid
);
682 spin_unlock_irq(&blkcg
->lock
);
683 spin_unlock(&blkio_list_lock
);
687 static const char *blkg_dev_name(struct blkio_group
*blkg
)
689 /* some drivers (floppy) instantiate a queue w/o disk registered */
690 if (blkg
->q
->backing_dev_info
.dev
)
691 return dev_name(blkg
->q
->backing_dev_info
.dev
);
696 * blkcg_print_blkgs - helper for printing per-blkg data
697 * @sf: seq_file to print to
698 * @blkcg: blkcg of interest
699 * @prfill: fill function to print out a blkg
700 * @pol: policy in question
701 * @data: data to be passed to @prfill
702 * @show_total: to print out sum of prfill return values or not
704 * This function invokes @prfill on each blkg of @blkcg if pd for the
705 * policy specified by @pol exists. @prfill is invoked with @sf, the
706 * policy data and @data. If @show_total is %true, the sum of the return
707 * values from @prfill is printed with "Total" label at the end.
709 * This is to be used to construct print functions for
710 * cftype->read_seq_string method.
712 void blkcg_print_blkgs(struct seq_file
*sf
, struct blkio_cgroup
*blkcg
,
713 u64 (*prfill
)(struct seq_file
*, struct blkg_policy_data
*, int),
714 int pol
, int data
, bool show_total
)
716 struct blkio_group
*blkg
;
717 struct hlist_node
*n
;
720 spin_lock_irq(&blkcg
->lock
);
721 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
)
723 total
+= prfill(sf
, blkg
->pd
[pol
], data
);
724 spin_unlock_irq(&blkcg
->lock
);
727 seq_printf(sf
, "Total %llu\n", (unsigned long long)total
);
729 EXPORT_SYMBOL_GPL(blkcg_print_blkgs
);
732 * __blkg_prfill_u64 - prfill helper for a single u64 value
733 * @sf: seq_file to print to
734 * @pd: policy data of interest
737 * Print @v to @sf for the device assocaited with @pd.
739 u64
__blkg_prfill_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
, u64 v
)
741 const char *dname
= blkg_dev_name(pd
->blkg
);
746 seq_printf(sf
, "%s %llu\n", dname
, (unsigned long long)v
);
749 EXPORT_SYMBOL_GPL(__blkg_prfill_u64
);
752 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
753 * @sf: seq_file to print to
754 * @pd: policy data of interest
755 * @rwstat: rwstat to print
757 * Print @rwstat to @sf for the device assocaited with @pd.
759 u64
__blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
760 const struct blkg_rwstat
*rwstat
)
762 static const char *rwstr
[] = {
763 [BLKG_RWSTAT_READ
] = "Read",
764 [BLKG_RWSTAT_WRITE
] = "Write",
765 [BLKG_RWSTAT_SYNC
] = "Sync",
766 [BLKG_RWSTAT_ASYNC
] = "Async",
768 const char *dname
= blkg_dev_name(pd
->blkg
);
775 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
776 seq_printf(sf
, "%s %s %llu\n", dname
, rwstr
[i
],
777 (unsigned long long)rwstat
->cnt
[i
]);
779 v
= rwstat
->cnt
[BLKG_RWSTAT_READ
] + rwstat
->cnt
[BLKG_RWSTAT_WRITE
];
780 seq_printf(sf
, "%s Total %llu\n", dname
, (unsigned long long)v
);
784 static u64
blkg_prfill_stat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
787 return __blkg_prfill_u64(sf
, pd
,
788 blkg_stat_read((void *)&pd
->stats
+ off
));
791 static u64
blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
794 struct blkg_rwstat rwstat
= blkg_rwstat_read((void *)&pd
->stats
+ off
);
796 return __blkg_prfill_rwstat(sf
, pd
, &rwstat
);
799 /* print blkg_stat specified by BLKCG_STAT_PRIV() */
800 int blkcg_print_stat(struct cgroup
*cgrp
, struct cftype
*cft
,
803 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgrp
);
805 blkcg_print_blkgs(sf
, blkcg
, blkg_prfill_stat
,
806 BLKCG_STAT_POL(cft
->private),
807 BLKCG_STAT_OFF(cft
->private), false);
810 EXPORT_SYMBOL_GPL(blkcg_print_stat
);
812 /* print blkg_rwstat specified by BLKCG_STAT_PRIV() */
813 int blkcg_print_rwstat(struct cgroup
*cgrp
, struct cftype
*cft
,
816 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgrp
);
818 blkcg_print_blkgs(sf
, blkcg
, blkg_prfill_rwstat
,
819 BLKCG_STAT_POL(cft
->private),
820 BLKCG_STAT_OFF(cft
->private), true);
823 EXPORT_SYMBOL_GPL(blkcg_print_rwstat
);
825 static u64
blkg_prfill_cpu_stat(struct seq_file
*sf
,
826 struct blkg_policy_data
*pd
, int off
)
831 for_each_possible_cpu(cpu
) {
832 struct blkio_group_stats_cpu
*sc
=
833 per_cpu_ptr(pd
->stats_cpu
, cpu
);
835 v
+= blkg_stat_read((void *)sc
+ off
);
838 return __blkg_prfill_u64(sf
, pd
, v
);
841 static u64
blkg_prfill_cpu_rwstat(struct seq_file
*sf
,
842 struct blkg_policy_data
*pd
, int off
)
844 struct blkg_rwstat rwstat
= { }, tmp
;
847 for_each_possible_cpu(cpu
) {
848 struct blkio_group_stats_cpu
*sc
=
849 per_cpu_ptr(pd
->stats_cpu
, cpu
);
851 tmp
= blkg_rwstat_read((void *)sc
+ off
);
852 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
853 rwstat
.cnt
[i
] += tmp
.cnt
[i
];
856 return __blkg_prfill_rwstat(sf
, pd
, &rwstat
);
859 /* print per-cpu blkg_stat specified by BLKCG_STAT_PRIV() */
860 int blkcg_print_cpu_stat(struct cgroup
*cgrp
, struct cftype
*cft
,
863 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgrp
);
865 blkcg_print_blkgs(sf
, blkcg
, blkg_prfill_cpu_stat
,
866 BLKCG_STAT_POL(cft
->private),
867 BLKCG_STAT_OFF(cft
->private), false);
870 EXPORT_SYMBOL_GPL(blkcg_print_cpu_stat
);
872 /* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */
873 int blkcg_print_cpu_rwstat(struct cgroup
*cgrp
, struct cftype
*cft
,
876 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgrp
);
878 blkcg_print_blkgs(sf
, blkcg
, blkg_prfill_cpu_rwstat
,
879 BLKCG_STAT_POL(cft
->private),
880 BLKCG_STAT_OFF(cft
->private), true);
883 EXPORT_SYMBOL_GPL(blkcg_print_cpu_rwstat
);
886 * blkg_conf_prep - parse and prepare for per-blkg config update
887 * @blkcg: target block cgroup
888 * @input: input string
889 * @ctx: blkg_conf_ctx to be filled
891 * Parse per-blkg config update from @input and initialize @ctx with the
892 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
893 * value. This function returns with RCU read locked and must be paired
894 * with blkg_conf_finish().
896 int blkg_conf_prep(struct blkio_cgroup
*blkcg
, const char *input
,
897 struct blkg_conf_ctx
*ctx
)
900 struct gendisk
*disk
;
901 struct blkio_group
*blkg
;
902 unsigned int major
, minor
;
903 unsigned long long v
;
906 if (sscanf(input
, "%u:%u %llu", &major
, &minor
, &v
) != 3)
909 disk
= get_gendisk(MKDEV(major
, minor
), &part
);
915 spin_lock_irq(disk
->queue
->queue_lock
);
916 blkg
= blkg_lookup_create(blkcg
, disk
->queue
, false);
917 spin_unlock_irq(disk
->queue
->queue_lock
);
924 * If queue was bypassing, we should retry. Do so after a
925 * short msleep(). It isn't strictly necessary but queue
926 * can be bypassing for some time and it's always nice to
927 * avoid busy looping.
931 ret
= restart_syscall();
941 EXPORT_SYMBOL_GPL(blkg_conf_prep
);
944 * blkg_conf_finish - finish up per-blkg config update
945 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
947 * Finish up after per-blkg config update. This function must be paired
948 * with blkg_conf_prep().
950 void blkg_conf_finish(struct blkg_conf_ctx
*ctx
)
956 EXPORT_SYMBOL_GPL(blkg_conf_finish
);
958 struct cftype blkio_files
[] = {
960 .name
= "reset_stats",
961 .write_u64
= blkiocg_reset_stats
,
967 * blkiocg_pre_destroy - cgroup pre_destroy callback
968 * @cgroup: cgroup of interest
970 * This function is called when @cgroup is about to go away and responsible
971 * for shooting down all blkgs associated with @cgroup. blkgs should be
972 * removed while holding both q and blkcg locks. As blkcg lock is nested
973 * inside q lock, this function performs reverse double lock dancing.
975 * This is the blkcg counterpart of ioc_release_fn().
977 static int blkiocg_pre_destroy(struct cgroup
*cgroup
)
979 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
981 spin_lock_irq(&blkcg
->lock
);
983 while (!hlist_empty(&blkcg
->blkg_list
)) {
984 struct blkio_group
*blkg
= hlist_entry(blkcg
->blkg_list
.first
,
985 struct blkio_group
, blkcg_node
);
986 struct request_queue
*q
= blkg
->q
;
988 if (spin_trylock(q
->queue_lock
)) {
990 spin_unlock(q
->queue_lock
);
992 spin_unlock_irq(&blkcg
->lock
);
994 spin_lock_irq(&blkcg
->lock
);
998 spin_unlock_irq(&blkcg
->lock
);
1002 static void blkiocg_destroy(struct cgroup
*cgroup
)
1004 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
1006 if (blkcg
!= &blkio_root_cgroup
)
1010 static struct cgroup_subsys_state
*blkiocg_create(struct cgroup
*cgroup
)
1012 static atomic64_t id_seq
= ATOMIC64_INIT(0);
1013 struct blkio_cgroup
*blkcg
;
1014 struct cgroup
*parent
= cgroup
->parent
;
1017 blkcg
= &blkio_root_cgroup
;
1021 blkcg
= kzalloc(sizeof(*blkcg
), GFP_KERNEL
);
1023 return ERR_PTR(-ENOMEM
);
1025 blkcg
->weight
= BLKIO_WEIGHT_DEFAULT
;
1026 blkcg
->id
= atomic64_inc_return(&id_seq
); /* root is 0, start from 1 */
1028 spin_lock_init(&blkcg
->lock
);
1029 INIT_HLIST_HEAD(&blkcg
->blkg_list
);
1035 * blkcg_init_queue - initialize blkcg part of request queue
1036 * @q: request_queue to initialize
1038 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1039 * part of new request_queue @q.
1042 * 0 on success, -errno on failure.
1044 int blkcg_init_queue(struct request_queue
*q
)
1050 ret
= blk_throtl_init(q
);
1054 mutex_lock(&all_q_mutex
);
1055 INIT_LIST_HEAD(&q
->all_q_node
);
1056 list_add_tail(&q
->all_q_node
, &all_q_list
);
1057 mutex_unlock(&all_q_mutex
);
1063 * blkcg_drain_queue - drain blkcg part of request_queue
1064 * @q: request_queue to drain
1066 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1068 void blkcg_drain_queue(struct request_queue
*q
)
1070 lockdep_assert_held(q
->queue_lock
);
1072 blk_throtl_drain(q
);
1076 * blkcg_exit_queue - exit and release blkcg part of request_queue
1077 * @q: request_queue being released
1079 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1081 void blkcg_exit_queue(struct request_queue
*q
)
1083 mutex_lock(&all_q_mutex
);
1084 list_del_init(&q
->all_q_node
);
1085 mutex_unlock(&all_q_mutex
);
1087 blkg_destroy_all(q
, true);
1093 * We cannot support shared io contexts, as we have no mean to support
1094 * two tasks with the same ioc in two different groups without major rework
1095 * of the main cic data structures. For now we allow a task to change
1096 * its cgroup only if it's the only owner of its ioc.
1098 static int blkiocg_can_attach(struct cgroup
*cgrp
, struct cgroup_taskset
*tset
)
1100 struct task_struct
*task
;
1101 struct io_context
*ioc
;
1104 /* task_lock() is needed to avoid races with exit_io_context() */
1105 cgroup_taskset_for_each(task
, cgrp
, tset
) {
1107 ioc
= task
->io_context
;
1108 if (ioc
&& atomic_read(&ioc
->nr_tasks
) > 1)
1117 static void blkcg_bypass_start(void)
1118 __acquires(&all_q_mutex
)
1120 struct request_queue
*q
;
1122 mutex_lock(&all_q_mutex
);
1124 list_for_each_entry(q
, &all_q_list
, all_q_node
) {
1125 blk_queue_bypass_start(q
);
1126 blkg_destroy_all(q
, false);
1130 static void blkcg_bypass_end(void)
1131 __releases(&all_q_mutex
)
1133 struct request_queue
*q
;
1135 list_for_each_entry(q
, &all_q_list
, all_q_node
)
1136 blk_queue_bypass_end(q
);
1138 mutex_unlock(&all_q_mutex
);
1141 struct cgroup_subsys blkio_subsys
= {
1143 .create
= blkiocg_create
,
1144 .can_attach
= blkiocg_can_attach
,
1145 .pre_destroy
= blkiocg_pre_destroy
,
1146 .destroy
= blkiocg_destroy
,
1147 .subsys_id
= blkio_subsys_id
,
1148 .base_cftypes
= blkio_files
,
1149 .module
= THIS_MODULE
,
1151 EXPORT_SYMBOL_GPL(blkio_subsys
);
1153 void blkio_policy_register(struct blkio_policy_type
*blkiop
)
1155 struct request_queue
*q
;
1157 blkcg_bypass_start();
1158 spin_lock(&blkio_list_lock
);
1160 BUG_ON(blkio_policy
[blkiop
->plid
]);
1161 blkio_policy
[blkiop
->plid
] = blkiop
;
1162 list_add_tail(&blkiop
->list
, &blkio_list
);
1164 spin_unlock(&blkio_list_lock
);
1165 list_for_each_entry(q
, &all_q_list
, all_q_node
)
1166 update_root_blkg_pd(q
, blkiop
->plid
);
1169 if (blkiop
->cftypes
)
1170 WARN_ON(cgroup_add_cftypes(&blkio_subsys
, blkiop
->cftypes
));
1172 EXPORT_SYMBOL_GPL(blkio_policy_register
);
1174 void blkio_policy_unregister(struct blkio_policy_type
*blkiop
)
1176 struct request_queue
*q
;
1178 if (blkiop
->cftypes
)
1179 cgroup_rm_cftypes(&blkio_subsys
, blkiop
->cftypes
);
1181 blkcg_bypass_start();
1182 spin_lock(&blkio_list_lock
);
1184 BUG_ON(blkio_policy
[blkiop
->plid
] != blkiop
);
1185 blkio_policy
[blkiop
->plid
] = NULL
;
1186 list_del_init(&blkiop
->list
);
1188 spin_unlock(&blkio_list_lock
);
1189 list_for_each_entry(q
, &all_q_list
, all_q_node
)
1190 update_root_blkg_pd(q
, blkiop
->plid
);
1193 EXPORT_SYMBOL_GPL(blkio_policy_unregister
);