2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/kdev_t.h>
15 #include <linux/module.h>
16 #include <linux/err.h>
17 #include <linux/blkdev.h>
18 #include <linux/slab.h>
19 #include <linux/genhd.h>
20 #include <linux/delay.h>
21 #include <linux/atomic.h>
22 #include "blk-cgroup.h"
25 #define MAX_KEY_LEN 100
27 static DEFINE_MUTEX(blkcg_pol_mutex
);
29 struct blkcg blkcg_root
= { .cfq_weight
= 2 * CFQ_WEIGHT_DEFAULT
};
30 EXPORT_SYMBOL_GPL(blkcg_root
);
32 static struct blkcg_policy
*blkcg_policy
[BLKCG_MAX_POLS
];
34 static bool blkcg_policy_enabled(struct request_queue
*q
,
35 const struct blkcg_policy
*pol
)
37 return pol
&& test_bit(pol
->plid
, q
->blkcg_pols
);
41 * blkg_free - free a blkg
44 * Free @blkg which may be partially allocated.
46 static void blkg_free(struct blkcg_gq
*blkg
)
53 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
54 struct blkcg_policy
*pol
= blkcg_policy
[i
];
55 struct blkg_policy_data
*pd
= blkg
->pd
[i
];
60 if (pol
&& pol
->pd_exit_fn
)
61 pol
->pd_exit_fn(blkg
);
66 blk_exit_rl(&blkg
->rl
);
71 * blkg_alloc - allocate a blkg
72 * @blkcg: block cgroup the new blkg is associated with
73 * @q: request_queue the new blkg is associated with
74 * @gfp_mask: allocation mask to use
76 * Allocate a new blkg assocating @blkcg and @q.
78 static struct blkcg_gq
*blkg_alloc(struct blkcg
*blkcg
, struct request_queue
*q
,
81 struct blkcg_gq
*blkg
;
84 /* alloc and init base part */
85 blkg
= kzalloc_node(sizeof(*blkg
), gfp_mask
, q
->node
);
90 INIT_LIST_HEAD(&blkg
->q_node
);
94 /* root blkg uses @q->root_rl, init rl only for !root blkgs */
95 if (blkcg
!= &blkcg_root
) {
96 if (blk_init_rl(&blkg
->rl
, q
, gfp_mask
))
101 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
102 struct blkcg_policy
*pol
= blkcg_policy
[i
];
103 struct blkg_policy_data
*pd
;
105 if (!blkcg_policy_enabled(q
, pol
))
108 /* alloc per-policy data and attach it to blkg */
109 pd
= kzalloc_node(pol
->pd_size
, gfp_mask
, q
->node
);
116 /* invoke per-policy init */
117 if (blkcg_policy_enabled(blkg
->q
, pol
))
118 pol
->pd_init_fn(blkg
);
128 static struct blkcg_gq
*__blkg_lookup(struct blkcg
*blkcg
,
129 struct request_queue
*q
)
131 struct blkcg_gq
*blkg
;
133 blkg
= rcu_dereference(blkcg
->blkg_hint
);
134 if (blkg
&& blkg
->q
== q
)
138 * Hint didn't match. Look up from the radix tree. Note that we
139 * may not be holding queue_lock and thus are not sure whether
140 * @blkg from blkg_tree has already been removed or not, so we
141 * can't update hint to the lookup result. Leave it to the caller.
143 blkg
= radix_tree_lookup(&blkcg
->blkg_tree
, q
->id
);
144 if (blkg
&& blkg
->q
== q
)
151 * blkg_lookup - lookup blkg for the specified blkcg - q pair
152 * @blkcg: blkcg of interest
153 * @q: request_queue of interest
155 * Lookup blkg for the @blkcg - @q pair. This function should be called
156 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
157 * - see blk_queue_bypass_start() for details.
159 struct blkcg_gq
*blkg_lookup(struct blkcg
*blkcg
, struct request_queue
*q
)
161 WARN_ON_ONCE(!rcu_read_lock_held());
163 if (unlikely(blk_queue_bypass(q
)))
165 return __blkg_lookup(blkcg
, q
);
167 EXPORT_SYMBOL_GPL(blkg_lookup
);
170 * If @new_blkg is %NULL, this function tries to allocate a new one as
171 * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return.
173 static struct blkcg_gq
*__blkg_lookup_create(struct blkcg
*blkcg
,
174 struct request_queue
*q
,
175 struct blkcg_gq
*new_blkg
)
177 struct blkcg_gq
*blkg
;
180 WARN_ON_ONCE(!rcu_read_lock_held());
181 lockdep_assert_held(q
->queue_lock
);
183 /* lookup and update hint on success, see __blkg_lookup() for details */
184 blkg
= __blkg_lookup(blkcg
, q
);
186 rcu_assign_pointer(blkcg
->blkg_hint
, blkg
);
190 /* blkg holds a reference to blkcg */
191 if (!css_tryget(&blkcg
->css
)) {
192 blkg
= ERR_PTR(-EINVAL
);
198 new_blkg
= blkg_alloc(blkcg
, q
, GFP_ATOMIC
);
199 if (unlikely(!new_blkg
)) {
200 blkg
= ERR_PTR(-ENOMEM
);
207 spin_lock(&blkcg
->lock
);
208 ret
= radix_tree_insert(&blkcg
->blkg_tree
, q
->id
, blkg
);
210 hlist_add_head_rcu(&blkg
->blkcg_node
, &blkcg
->blkg_list
);
211 list_add(&blkg
->q_node
, &q
->blkg_list
);
213 spin_unlock(&blkcg
->lock
);
220 css_put(&blkcg
->css
);
226 struct blkcg_gq
*blkg_lookup_create(struct blkcg
*blkcg
,
227 struct request_queue
*q
)
230 * This could be the first entry point of blkcg implementation and
231 * we shouldn't allow anything to go through for a bypassing queue.
233 if (unlikely(blk_queue_bypass(q
)))
234 return ERR_PTR(blk_queue_dead(q
) ? -EINVAL
: -EBUSY
);
235 return __blkg_lookup_create(blkcg
, q
, NULL
);
237 EXPORT_SYMBOL_GPL(blkg_lookup_create
);
239 static void blkg_destroy(struct blkcg_gq
*blkg
)
241 struct blkcg
*blkcg
= blkg
->blkcg
;
243 lockdep_assert_held(blkg
->q
->queue_lock
);
244 lockdep_assert_held(&blkcg
->lock
);
246 /* Something wrong if we are trying to remove same group twice */
247 WARN_ON_ONCE(list_empty(&blkg
->q_node
));
248 WARN_ON_ONCE(hlist_unhashed(&blkg
->blkcg_node
));
250 radix_tree_delete(&blkcg
->blkg_tree
, blkg
->q
->id
);
251 list_del_init(&blkg
->q_node
);
252 hlist_del_init_rcu(&blkg
->blkcg_node
);
255 * Both setting lookup hint to and clearing it from @blkg are done
256 * under queue_lock. If it's not pointing to @blkg now, it never
257 * will. Hint assignment itself can race safely.
259 if (rcu_dereference_raw(blkcg
->blkg_hint
) == blkg
)
260 rcu_assign_pointer(blkcg
->blkg_hint
, NULL
);
263 * Put the reference taken at the time of creation so that when all
264 * queues are gone, group can be destroyed.
270 * blkg_destroy_all - destroy all blkgs associated with a request_queue
271 * @q: request_queue of interest
273 * Destroy all blkgs associated with @q.
275 static void blkg_destroy_all(struct request_queue
*q
)
277 struct blkcg_gq
*blkg
, *n
;
279 lockdep_assert_held(q
->queue_lock
);
281 list_for_each_entry_safe(blkg
, n
, &q
->blkg_list
, q_node
) {
282 struct blkcg
*blkcg
= blkg
->blkcg
;
284 spin_lock(&blkcg
->lock
);
286 spin_unlock(&blkcg
->lock
);
290 * root blkg is destroyed. Just clear the pointer since
291 * root_rl does not take reference on root blkg.
294 q
->root_rl
.blkg
= NULL
;
297 static void blkg_rcu_free(struct rcu_head
*rcu_head
)
299 blkg_free(container_of(rcu_head
, struct blkcg_gq
, rcu_head
));
302 void __blkg_release(struct blkcg_gq
*blkg
)
304 /* release the extra blkcg reference this blkg has been holding */
305 css_put(&blkg
->blkcg
->css
);
308 * A group is freed in rcu manner. But having an rcu lock does not
309 * mean that one can access all the fields of blkg and assume these
310 * are valid. For example, don't try to follow throtl_data and
311 * request queue links.
313 * Having a reference to blkg under an rcu allows acess to only
314 * values local to groups like group stats and group rate limits
316 call_rcu(&blkg
->rcu_head
, blkg_rcu_free
);
318 EXPORT_SYMBOL_GPL(__blkg_release
);
321 * The next function used by blk_queue_for_each_rl(). It's a bit tricky
322 * because the root blkg uses @q->root_rl instead of its own rl.
324 struct request_list
*__blk_queue_next_rl(struct request_list
*rl
,
325 struct request_queue
*q
)
327 struct list_head
*ent
;
328 struct blkcg_gq
*blkg
;
331 * Determine the current blkg list_head. The first entry is
332 * root_rl which is off @q->blkg_list and mapped to the head.
334 if (rl
== &q
->root_rl
) {
336 /* There are no more block groups, hence no request lists */
340 blkg
= container_of(rl
, struct blkcg_gq
, rl
);
344 /* walk to the next list_head, skip root blkcg */
346 if (ent
== &q
->root_blkg
->q_node
)
348 if (ent
== &q
->blkg_list
)
351 blkg
= container_of(ent
, struct blkcg_gq
, q_node
);
355 static int blkcg_reset_stats(struct cgroup
*cgroup
, struct cftype
*cftype
,
358 struct blkcg
*blkcg
= cgroup_to_blkcg(cgroup
);
359 struct blkcg_gq
*blkg
;
360 struct hlist_node
*n
;
363 mutex_lock(&blkcg_pol_mutex
);
364 spin_lock_irq(&blkcg
->lock
);
367 * Note that stat reset is racy - it doesn't synchronize against
368 * stat updates. This is a debug feature which shouldn't exist
369 * anyway. If you get hit by a race, retry.
371 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
372 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
373 struct blkcg_policy
*pol
= blkcg_policy
[i
];
375 if (blkcg_policy_enabled(blkg
->q
, pol
) &&
376 pol
->pd_reset_stats_fn
)
377 pol
->pd_reset_stats_fn(blkg
);
381 spin_unlock_irq(&blkcg
->lock
);
382 mutex_unlock(&blkcg_pol_mutex
);
386 static const char *blkg_dev_name(struct blkcg_gq
*blkg
)
388 /* some drivers (floppy) instantiate a queue w/o disk registered */
389 if (blkg
->q
->backing_dev_info
.dev
)
390 return dev_name(blkg
->q
->backing_dev_info
.dev
);
395 * blkcg_print_blkgs - helper for printing per-blkg data
396 * @sf: seq_file to print to
397 * @blkcg: blkcg of interest
398 * @prfill: fill function to print out a blkg
399 * @pol: policy in question
400 * @data: data to be passed to @prfill
401 * @show_total: to print out sum of prfill return values or not
403 * This function invokes @prfill on each blkg of @blkcg if pd for the
404 * policy specified by @pol exists. @prfill is invoked with @sf, the
405 * policy data and @data. If @show_total is %true, the sum of the return
406 * values from @prfill is printed with "Total" label at the end.
408 * This is to be used to construct print functions for
409 * cftype->read_seq_string method.
411 void blkcg_print_blkgs(struct seq_file
*sf
, struct blkcg
*blkcg
,
412 u64 (*prfill
)(struct seq_file
*,
413 struct blkg_policy_data
*, int),
414 const struct blkcg_policy
*pol
, int data
,
417 struct blkcg_gq
*blkg
;
418 struct hlist_node
*n
;
421 spin_lock_irq(&blkcg
->lock
);
422 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
)
423 if (blkcg_policy_enabled(blkg
->q
, pol
))
424 total
+= prfill(sf
, blkg
->pd
[pol
->plid
], data
);
425 spin_unlock_irq(&blkcg
->lock
);
428 seq_printf(sf
, "Total %llu\n", (unsigned long long)total
);
430 EXPORT_SYMBOL_GPL(blkcg_print_blkgs
);
433 * __blkg_prfill_u64 - prfill helper for a single u64 value
434 * @sf: seq_file to print to
435 * @pd: policy private data of interest
438 * Print @v to @sf for the device assocaited with @pd.
440 u64
__blkg_prfill_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
, u64 v
)
442 const char *dname
= blkg_dev_name(pd
->blkg
);
447 seq_printf(sf
, "%s %llu\n", dname
, (unsigned long long)v
);
450 EXPORT_SYMBOL_GPL(__blkg_prfill_u64
);
453 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
454 * @sf: seq_file to print to
455 * @pd: policy private data of interest
456 * @rwstat: rwstat to print
458 * Print @rwstat to @sf for the device assocaited with @pd.
460 u64
__blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
461 const struct blkg_rwstat
*rwstat
)
463 static const char *rwstr
[] = {
464 [BLKG_RWSTAT_READ
] = "Read",
465 [BLKG_RWSTAT_WRITE
] = "Write",
466 [BLKG_RWSTAT_SYNC
] = "Sync",
467 [BLKG_RWSTAT_ASYNC
] = "Async",
469 const char *dname
= blkg_dev_name(pd
->blkg
);
476 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
477 seq_printf(sf
, "%s %s %llu\n", dname
, rwstr
[i
],
478 (unsigned long long)rwstat
->cnt
[i
]);
480 v
= rwstat
->cnt
[BLKG_RWSTAT_READ
] + rwstat
->cnt
[BLKG_RWSTAT_WRITE
];
481 seq_printf(sf
, "%s Total %llu\n", dname
, (unsigned long long)v
);
486 * blkg_prfill_stat - prfill callback for blkg_stat
487 * @sf: seq_file to print to
488 * @pd: policy private data of interest
489 * @off: offset to the blkg_stat in @pd
491 * prfill callback for printing a blkg_stat.
493 u64
blkg_prfill_stat(struct seq_file
*sf
, struct blkg_policy_data
*pd
, int off
)
495 return __blkg_prfill_u64(sf
, pd
, blkg_stat_read((void *)pd
+ off
));
497 EXPORT_SYMBOL_GPL(blkg_prfill_stat
);
500 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
501 * @sf: seq_file to print to
502 * @pd: policy private data of interest
503 * @off: offset to the blkg_rwstat in @pd
505 * prfill callback for printing a blkg_rwstat.
507 u64
blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
510 struct blkg_rwstat rwstat
= blkg_rwstat_read((void *)pd
+ off
);
512 return __blkg_prfill_rwstat(sf
, pd
, &rwstat
);
514 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat
);
517 * blkg_conf_prep - parse and prepare for per-blkg config update
518 * @blkcg: target block cgroup
519 * @pol: target policy
520 * @input: input string
521 * @ctx: blkg_conf_ctx to be filled
523 * Parse per-blkg config update from @input and initialize @ctx with the
524 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
525 * value. This function returns with RCU read lock and queue lock held and
526 * must be paired with blkg_conf_finish().
528 int blkg_conf_prep(struct blkcg
*blkcg
, const struct blkcg_policy
*pol
,
529 const char *input
, struct blkg_conf_ctx
*ctx
)
530 __acquires(rcu
) __acquires(disk
->queue
->queue_lock
)
532 struct gendisk
*disk
;
533 struct blkcg_gq
*blkg
;
534 unsigned int major
, minor
;
535 unsigned long long v
;
538 if (sscanf(input
, "%u:%u %llu", &major
, &minor
, &v
) != 3)
541 disk
= get_gendisk(MKDEV(major
, minor
), &part
);
546 spin_lock_irq(disk
->queue
->queue_lock
);
548 if (blkcg_policy_enabled(disk
->queue
, pol
))
549 blkg
= blkg_lookup_create(blkcg
, disk
->queue
);
551 blkg
= ERR_PTR(-EINVAL
);
556 spin_unlock_irq(disk
->queue
->queue_lock
);
559 * If queue was bypassing, we should retry. Do so after a
560 * short msleep(). It isn't strictly necessary but queue
561 * can be bypassing for some time and it's always nice to
562 * avoid busy looping.
566 ret
= restart_syscall();
576 EXPORT_SYMBOL_GPL(blkg_conf_prep
);
579 * blkg_conf_finish - finish up per-blkg config update
580 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
582 * Finish up after per-blkg config update. This function must be paired
583 * with blkg_conf_prep().
585 void blkg_conf_finish(struct blkg_conf_ctx
*ctx
)
586 __releases(ctx
->disk
->queue
->queue_lock
) __releases(rcu
)
588 spin_unlock_irq(ctx
->disk
->queue
->queue_lock
);
592 EXPORT_SYMBOL_GPL(blkg_conf_finish
);
594 struct cftype blkcg_files
[] = {
596 .name
= "reset_stats",
597 .write_u64
= blkcg_reset_stats
,
603 * blkcg_pre_destroy - cgroup pre_destroy callback
604 * @cgroup: cgroup of interest
606 * This function is called when @cgroup is about to go away and responsible
607 * for shooting down all blkgs associated with @cgroup. blkgs should be
608 * removed while holding both q and blkcg locks. As blkcg lock is nested
609 * inside q lock, this function performs reverse double lock dancing.
611 * This is the blkcg counterpart of ioc_release_fn().
613 static int blkcg_pre_destroy(struct cgroup
*cgroup
)
615 struct blkcg
*blkcg
= cgroup_to_blkcg(cgroup
);
617 spin_lock_irq(&blkcg
->lock
);
619 while (!hlist_empty(&blkcg
->blkg_list
)) {
620 struct blkcg_gq
*blkg
= hlist_entry(blkcg
->blkg_list
.first
,
621 struct blkcg_gq
, blkcg_node
);
622 struct request_queue
*q
= blkg
->q
;
624 if (spin_trylock(q
->queue_lock
)) {
626 spin_unlock(q
->queue_lock
);
628 spin_unlock_irq(&blkcg
->lock
);
630 spin_lock_irq(&blkcg
->lock
);
634 spin_unlock_irq(&blkcg
->lock
);
638 static void blkcg_destroy(struct cgroup
*cgroup
)
640 struct blkcg
*blkcg
= cgroup_to_blkcg(cgroup
);
642 if (blkcg
!= &blkcg_root
)
646 static struct cgroup_subsys_state
*blkcg_create(struct cgroup
*cgroup
)
648 static atomic64_t id_seq
= ATOMIC64_INIT(0);
650 struct cgroup
*parent
= cgroup
->parent
;
657 blkcg
= kzalloc(sizeof(*blkcg
), GFP_KERNEL
);
659 return ERR_PTR(-ENOMEM
);
661 blkcg
->cfq_weight
= CFQ_WEIGHT_DEFAULT
;
662 blkcg
->id
= atomic64_inc_return(&id_seq
); /* root is 0, start from 1 */
664 spin_lock_init(&blkcg
->lock
);
665 INIT_RADIX_TREE(&blkcg
->blkg_tree
, GFP_ATOMIC
);
666 INIT_HLIST_HEAD(&blkcg
->blkg_list
);
672 * blkcg_init_queue - initialize blkcg part of request queue
673 * @q: request_queue to initialize
675 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
676 * part of new request_queue @q.
679 * 0 on success, -errno on failure.
681 int blkcg_init_queue(struct request_queue
*q
)
685 return blk_throtl_init(q
);
689 * blkcg_drain_queue - drain blkcg part of request_queue
690 * @q: request_queue to drain
692 * Called from blk_drain_queue(). Responsible for draining blkcg part.
694 void blkcg_drain_queue(struct request_queue
*q
)
696 lockdep_assert_held(q
->queue_lock
);
702 * blkcg_exit_queue - exit and release blkcg part of request_queue
703 * @q: request_queue being released
705 * Called from blk_release_queue(). Responsible for exiting blkcg part.
707 void blkcg_exit_queue(struct request_queue
*q
)
709 spin_lock_irq(q
->queue_lock
);
711 spin_unlock_irq(q
->queue_lock
);
717 * We cannot support shared io contexts, as we have no mean to support
718 * two tasks with the same ioc in two different groups without major rework
719 * of the main cic data structures. For now we allow a task to change
720 * its cgroup only if it's the only owner of its ioc.
722 static int blkcg_can_attach(struct cgroup
*cgrp
, struct cgroup_taskset
*tset
)
724 struct task_struct
*task
;
725 struct io_context
*ioc
;
728 /* task_lock() is needed to avoid races with exit_io_context() */
729 cgroup_taskset_for_each(task
, cgrp
, tset
) {
731 ioc
= task
->io_context
;
732 if (ioc
&& atomic_read(&ioc
->nr_tasks
) > 1)
741 struct cgroup_subsys blkio_subsys
= {
743 .create
= blkcg_create
,
744 .can_attach
= blkcg_can_attach
,
745 .pre_destroy
= blkcg_pre_destroy
,
746 .destroy
= blkcg_destroy
,
747 .subsys_id
= blkio_subsys_id
,
748 .base_cftypes
= blkcg_files
,
749 .module
= THIS_MODULE
,
752 * blkio subsystem is utterly broken in terms of hierarchy support.
753 * It treats all cgroups equally regardless of where they're
754 * located in the hierarchy - all cgroups are treated as if they're
755 * right below the root. Fix it and remove the following.
757 .broken_hierarchy
= true,
759 EXPORT_SYMBOL_GPL(blkio_subsys
);
762 * blkcg_activate_policy - activate a blkcg policy on a request_queue
763 * @q: request_queue of interest
764 * @pol: blkcg policy to activate
766 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
767 * bypass mode to populate its blkgs with policy_data for @pol.
769 * Activation happens with @q bypassed, so nobody would be accessing blkgs
770 * from IO path. Update of each blkg is protected by both queue and blkcg
771 * locks so that holding either lock and testing blkcg_policy_enabled() is
772 * always enough for dereferencing policy data.
774 * The caller is responsible for synchronizing [de]activations and policy
775 * [un]registerations. Returns 0 on success, -errno on failure.
777 int blkcg_activate_policy(struct request_queue
*q
,
778 const struct blkcg_policy
*pol
)
781 struct blkcg_gq
*blkg
;
782 struct blkg_policy_data
*pd
, *n
;
786 if (blkcg_policy_enabled(q
, pol
))
789 /* preallocations for root blkg */
790 blkg
= blkg_alloc(&blkcg_root
, q
, GFP_KERNEL
);
794 preloaded
= !radix_tree_preload(GFP_KERNEL
);
796 blk_queue_bypass_start(q
);
798 /* make sure the root blkg exists and count the existing blkgs */
799 spin_lock_irq(q
->queue_lock
);
802 blkg
= __blkg_lookup_create(&blkcg_root
, q
, blkg
);
806 radix_tree_preload_end();
813 q
->root_rl
.blkg
= blkg
;
815 list_for_each_entry(blkg
, &q
->blkg_list
, q_node
)
818 spin_unlock_irq(q
->queue_lock
);
820 /* allocate policy_data for all existing blkgs */
822 pd
= kzalloc_node(pol
->pd_size
, GFP_KERNEL
, q
->node
);
827 list_add_tail(&pd
->alloc_node
, &pds
);
831 * Install the allocated pds. With @q bypassing, no new blkg
832 * should have been created while the queue lock was dropped.
834 spin_lock_irq(q
->queue_lock
);
836 list_for_each_entry(blkg
, &q
->blkg_list
, q_node
) {
837 if (WARN_ON(list_empty(&pds
))) {
838 /* umm... this shouldn't happen, just abort */
842 pd
= list_first_entry(&pds
, struct blkg_policy_data
, alloc_node
);
843 list_del_init(&pd
->alloc_node
);
845 /* grab blkcg lock too while installing @pd on @blkg */
846 spin_lock(&blkg
->blkcg
->lock
);
848 blkg
->pd
[pol
->plid
] = pd
;
850 pol
->pd_init_fn(blkg
);
852 spin_unlock(&blkg
->blkcg
->lock
);
855 __set_bit(pol
->plid
, q
->blkcg_pols
);
858 spin_unlock_irq(q
->queue_lock
);
860 blk_queue_bypass_end(q
);
861 list_for_each_entry_safe(pd
, n
, &pds
, alloc_node
)
865 EXPORT_SYMBOL_GPL(blkcg_activate_policy
);
868 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
869 * @q: request_queue of interest
870 * @pol: blkcg policy to deactivate
872 * Deactivate @pol on @q. Follows the same synchronization rules as
873 * blkcg_activate_policy().
875 void blkcg_deactivate_policy(struct request_queue
*q
,
876 const struct blkcg_policy
*pol
)
878 struct blkcg_gq
*blkg
;
880 if (!blkcg_policy_enabled(q
, pol
))
883 blk_queue_bypass_start(q
);
884 spin_lock_irq(q
->queue_lock
);
886 __clear_bit(pol
->plid
, q
->blkcg_pols
);
888 /* if no policy is left, no need for blkgs - shoot them down */
889 if (bitmap_empty(q
->blkcg_pols
, BLKCG_MAX_POLS
))
892 list_for_each_entry(blkg
, &q
->blkg_list
, q_node
) {
893 /* grab blkcg lock too while removing @pd from @blkg */
894 spin_lock(&blkg
->blkcg
->lock
);
897 pol
->pd_exit_fn(blkg
);
899 kfree(blkg
->pd
[pol
->plid
]);
900 blkg
->pd
[pol
->plid
] = NULL
;
902 spin_unlock(&blkg
->blkcg
->lock
);
905 spin_unlock_irq(q
->queue_lock
);
906 blk_queue_bypass_end(q
);
908 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy
);
911 * blkcg_policy_register - register a blkcg policy
912 * @pol: blkcg policy to register
914 * Register @pol with blkcg core. Might sleep and @pol may be modified on
915 * successful registration. Returns 0 on success and -errno on failure.
917 int blkcg_policy_register(struct blkcg_policy
*pol
)
921 if (WARN_ON(pol
->pd_size
< sizeof(struct blkg_policy_data
)))
924 mutex_lock(&blkcg_pol_mutex
);
926 /* find an empty slot */
928 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++)
929 if (!blkcg_policy
[i
])
931 if (i
>= BLKCG_MAX_POLS
)
934 /* register and update blkgs */
936 blkcg_policy
[i
] = pol
;
938 /* everything is in place, add intf files for the new policy */
940 WARN_ON(cgroup_add_cftypes(&blkio_subsys
, pol
->cftypes
));
943 mutex_unlock(&blkcg_pol_mutex
);
946 EXPORT_SYMBOL_GPL(blkcg_policy_register
);
949 * blkcg_policy_unregister - unregister a blkcg policy
950 * @pol: blkcg policy to unregister
952 * Undo blkcg_policy_register(@pol). Might sleep.
954 void blkcg_policy_unregister(struct blkcg_policy
*pol
)
956 mutex_lock(&blkcg_pol_mutex
);
958 if (WARN_ON(blkcg_policy
[pol
->plid
] != pol
))
961 /* kill the intf files first */
963 cgroup_rm_cftypes(&blkio_subsys
, pol
->cftypes
);
965 /* unregister and update blkgs */
966 blkcg_policy
[pol
->plid
] = NULL
;
968 mutex_unlock(&blkcg_pol_mutex
);
970 EXPORT_SYMBOL_GPL(blkcg_policy_unregister
);