2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/kdev_t.h>
15 #include <linux/module.h>
16 #include <linux/err.h>
17 #include <linux/blkdev.h>
18 #include <linux/slab.h>
19 #include <linux/genhd.h>
20 #include <linux/delay.h>
21 #include <linux/atomic.h>
22 #include "blk-cgroup.h"
25 #define MAX_KEY_LEN 100
27 static DEFINE_MUTEX(blkcg_pol_mutex
);
28 static DEFINE_MUTEX(all_q_mutex
);
29 static LIST_HEAD(all_q_list
);
31 struct blkio_cgroup blkio_root_cgroup
= { .cfq_weight
= 2 * CFQ_WEIGHT_DEFAULT
};
32 EXPORT_SYMBOL_GPL(blkio_root_cgroup
);
34 static struct blkio_policy_type
*blkio_policy
[BLKCG_MAX_POLS
];
36 struct blkio_cgroup
*cgroup_to_blkio_cgroup(struct cgroup
*cgroup
)
38 return container_of(cgroup_subsys_state(cgroup
, blkio_subsys_id
),
39 struct blkio_cgroup
, css
);
41 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup
);
43 static struct blkio_cgroup
*task_blkio_cgroup(struct task_struct
*tsk
)
45 return container_of(task_subsys_state(tsk
, blkio_subsys_id
),
46 struct blkio_cgroup
, css
);
49 struct blkio_cgroup
*bio_blkio_cgroup(struct bio
*bio
)
51 if (bio
&& bio
->bi_css
)
52 return container_of(bio
->bi_css
, struct blkio_cgroup
, css
);
53 return task_blkio_cgroup(current
);
55 EXPORT_SYMBOL_GPL(bio_blkio_cgroup
);
58 * blkg_free - free a blkg
61 * Free @blkg which may be partially allocated.
63 static void blkg_free(struct blkio_group
*blkg
)
70 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
71 struct blkio_policy_type
*pol
= blkio_policy
[i
];
72 struct blkg_policy_data
*pd
= blkg
->pd
[i
];
77 if (pol
&& pol
->ops
.blkio_exit_group_fn
)
78 pol
->ops
.blkio_exit_group_fn(blkg
);
87 * blkg_alloc - allocate a blkg
88 * @blkcg: block cgroup the new blkg is associated with
89 * @q: request_queue the new blkg is associated with
91 * Allocate a new blkg assocating @blkcg and @q.
93 static struct blkio_group
*blkg_alloc(struct blkio_cgroup
*blkcg
,
94 struct request_queue
*q
)
96 struct blkio_group
*blkg
;
99 /* alloc and init base part */
100 blkg
= kzalloc_node(sizeof(*blkg
), GFP_ATOMIC
, q
->node
);
105 INIT_LIST_HEAD(&blkg
->q_node
);
108 cgroup_path(blkcg
->css
.cgroup
, blkg
->path
, sizeof(blkg
->path
));
110 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
111 struct blkio_policy_type
*pol
= blkio_policy
[i
];
112 struct blkg_policy_data
*pd
;
117 /* alloc per-policy data and attach it to blkg */
118 pd
= kzalloc_node(sizeof(*pd
) + pol
->pdata_size
, GFP_ATOMIC
,
129 /* invoke per-policy init */
130 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
131 struct blkio_policy_type
*pol
= blkio_policy
[i
];
134 pol
->ops
.blkio_init_group_fn(blkg
);
140 struct blkio_group
*blkg_lookup_create(struct blkio_cgroup
*blkcg
,
141 struct request_queue
*q
,
143 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
145 struct blkio_group
*blkg
;
147 WARN_ON_ONCE(!rcu_read_lock_held());
148 lockdep_assert_held(q
->queue_lock
);
151 * This could be the first entry point of blkcg implementation and
152 * we shouldn't allow anything to go through for a bypassing queue.
153 * The following can be removed if blkg lookup is guaranteed to
154 * fail on a bypassing queue.
156 if (unlikely(blk_queue_bypass(q
)) && !for_root
)
157 return ERR_PTR(blk_queue_dead(q
) ? -EINVAL
: -EBUSY
);
159 blkg
= blkg_lookup(blkcg
, q
);
163 /* blkg holds a reference to blkcg */
164 if (!css_tryget(&blkcg
->css
))
165 return ERR_PTR(-EINVAL
);
168 * Allocate and initialize.
170 blkg
= blkg_alloc(blkcg
, q
);
172 /* did alloc fail? */
173 if (unlikely(!blkg
)) {
174 blkg
= ERR_PTR(-ENOMEM
);
179 spin_lock(&blkcg
->lock
);
180 hlist_add_head_rcu(&blkg
->blkcg_node
, &blkcg
->blkg_list
);
181 list_add(&blkg
->q_node
, &q
->blkg_list
);
182 spin_unlock(&blkcg
->lock
);
186 EXPORT_SYMBOL_GPL(blkg_lookup_create
);
188 /* called under rcu_read_lock(). */
189 struct blkio_group
*blkg_lookup(struct blkio_cgroup
*blkcg
,
190 struct request_queue
*q
)
192 struct blkio_group
*blkg
;
193 struct hlist_node
*n
;
195 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
)
200 EXPORT_SYMBOL_GPL(blkg_lookup
);
202 static void blkg_destroy(struct blkio_group
*blkg
)
204 struct request_queue
*q
= blkg
->q
;
205 struct blkio_cgroup
*blkcg
= blkg
->blkcg
;
207 lockdep_assert_held(q
->queue_lock
);
208 lockdep_assert_held(&blkcg
->lock
);
210 /* Something wrong if we are trying to remove same group twice */
211 WARN_ON_ONCE(list_empty(&blkg
->q_node
));
212 WARN_ON_ONCE(hlist_unhashed(&blkg
->blkcg_node
));
213 list_del_init(&blkg
->q_node
);
214 hlist_del_init_rcu(&blkg
->blkcg_node
);
217 * Put the reference taken at the time of creation so that when all
218 * queues are gone, group can be destroyed.
224 * XXX: This updates blkg policy data in-place for root blkg, which is
225 * necessary across elevator switch and policy registration as root blkgs
226 * aren't shot down. This broken and racy implementation is temporary.
227 * Eventually, blkg shoot down will be replaced by proper in-place update.
229 void update_root_blkg_pd(struct request_queue
*q
,
230 const struct blkio_policy_type
*pol
)
232 struct blkio_group
*blkg
= blkg_lookup(&blkio_root_cgroup
, q
);
233 struct blkg_policy_data
*pd
;
238 kfree(blkg
->pd
[pol
->plid
]);
239 blkg
->pd
[pol
->plid
] = NULL
;
244 pd
= kzalloc(sizeof(*pd
) + pol
->pdata_size
, GFP_KERNEL
);
247 blkg
->pd
[pol
->plid
] = pd
;
249 pol
->ops
.blkio_init_group_fn(blkg
);
251 EXPORT_SYMBOL_GPL(update_root_blkg_pd
);
254 * blkg_destroy_all - destroy all blkgs associated with a request_queue
255 * @q: request_queue of interest
256 * @destroy_root: whether to destroy root blkg or not
258 * Destroy blkgs associated with @q. If @destroy_root is %true, all are
259 * destroyed; otherwise, root blkg is left alone.
261 void blkg_destroy_all(struct request_queue
*q
, bool destroy_root
)
263 struct blkio_group
*blkg
, *n
;
265 spin_lock_irq(q
->queue_lock
);
267 list_for_each_entry_safe(blkg
, n
, &q
->blkg_list
, q_node
) {
268 struct blkio_cgroup
*blkcg
= blkg
->blkcg
;
271 if (!destroy_root
&& blkg
->blkcg
== &blkio_root_cgroup
)
274 spin_lock(&blkcg
->lock
);
276 spin_unlock(&blkcg
->lock
);
279 spin_unlock_irq(q
->queue_lock
);
281 EXPORT_SYMBOL_GPL(blkg_destroy_all
);
283 static void blkg_rcu_free(struct rcu_head
*rcu_head
)
285 blkg_free(container_of(rcu_head
, struct blkio_group
, rcu_head
));
288 void __blkg_release(struct blkio_group
*blkg
)
290 /* release the extra blkcg reference this blkg has been holding */
291 css_put(&blkg
->blkcg
->css
);
294 * A group is freed in rcu manner. But having an rcu lock does not
295 * mean that one can access all the fields of blkg and assume these
296 * are valid. For example, don't try to follow throtl_data and
297 * request queue links.
299 * Having a reference to blkg under an rcu allows acess to only
300 * values local to groups like group stats and group rate limits
302 call_rcu(&blkg
->rcu_head
, blkg_rcu_free
);
304 EXPORT_SYMBOL_GPL(__blkg_release
);
307 blkiocg_reset_stats(struct cgroup
*cgroup
, struct cftype
*cftype
, u64 val
)
309 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
310 struct blkio_group
*blkg
;
311 struct hlist_node
*n
;
314 mutex_lock(&blkcg_pol_mutex
);
315 spin_lock_irq(&blkcg
->lock
);
318 * Note that stat reset is racy - it doesn't synchronize against
319 * stat updates. This is a debug feature which shouldn't exist
320 * anyway. If you get hit by a race, retry.
322 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
323 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++) {
324 struct blkio_policy_type
*pol
= blkio_policy
[i
];
326 if (pol
&& pol
->ops
.blkio_reset_group_stats_fn
)
327 pol
->ops
.blkio_reset_group_stats_fn(blkg
);
331 spin_unlock_irq(&blkcg
->lock
);
332 mutex_unlock(&blkcg_pol_mutex
);
336 static const char *blkg_dev_name(struct blkio_group
*blkg
)
338 /* some drivers (floppy) instantiate a queue w/o disk registered */
339 if (blkg
->q
->backing_dev_info
.dev
)
340 return dev_name(blkg
->q
->backing_dev_info
.dev
);
345 * blkcg_print_blkgs - helper for printing per-blkg data
346 * @sf: seq_file to print to
347 * @blkcg: blkcg of interest
348 * @prfill: fill function to print out a blkg
349 * @pol: policy in question
350 * @data: data to be passed to @prfill
351 * @show_total: to print out sum of prfill return values or not
353 * This function invokes @prfill on each blkg of @blkcg if pd for the
354 * policy specified by @pol exists. @prfill is invoked with @sf, the
355 * policy data and @data. If @show_total is %true, the sum of the return
356 * values from @prfill is printed with "Total" label at the end.
358 * This is to be used to construct print functions for
359 * cftype->read_seq_string method.
361 void blkcg_print_blkgs(struct seq_file
*sf
, struct blkio_cgroup
*blkcg
,
362 u64 (*prfill
)(struct seq_file
*, void *, int),
363 const struct blkio_policy_type
*pol
, int data
,
366 struct blkio_group
*blkg
;
367 struct hlist_node
*n
;
370 spin_lock_irq(&blkcg
->lock
);
371 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
)
372 if (blkg
->pd
[pol
->plid
])
373 total
+= prfill(sf
, blkg
->pd
[pol
->plid
]->pdata
, data
);
374 spin_unlock_irq(&blkcg
->lock
);
377 seq_printf(sf
, "Total %llu\n", (unsigned long long)total
);
379 EXPORT_SYMBOL_GPL(blkcg_print_blkgs
);
382 * __blkg_prfill_u64 - prfill helper for a single u64 value
383 * @sf: seq_file to print to
384 * @pdata: policy private data of interest
387 * Print @v to @sf for the device assocaited with @pdata.
389 u64
__blkg_prfill_u64(struct seq_file
*sf
, void *pdata
, u64 v
)
391 const char *dname
= blkg_dev_name(pdata_to_blkg(pdata
));
396 seq_printf(sf
, "%s %llu\n", dname
, (unsigned long long)v
);
399 EXPORT_SYMBOL_GPL(__blkg_prfill_u64
);
402 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
403 * @sf: seq_file to print to
404 * @pdata: policy private data of interest
405 * @rwstat: rwstat to print
407 * Print @rwstat to @sf for the device assocaited with @pdata.
409 u64
__blkg_prfill_rwstat(struct seq_file
*sf
, void *pdata
,
410 const struct blkg_rwstat
*rwstat
)
412 static const char *rwstr
[] = {
413 [BLKG_RWSTAT_READ
] = "Read",
414 [BLKG_RWSTAT_WRITE
] = "Write",
415 [BLKG_RWSTAT_SYNC
] = "Sync",
416 [BLKG_RWSTAT_ASYNC
] = "Async",
418 const char *dname
= blkg_dev_name(pdata_to_blkg(pdata
));
425 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
426 seq_printf(sf
, "%s %s %llu\n", dname
, rwstr
[i
],
427 (unsigned long long)rwstat
->cnt
[i
]);
429 v
= rwstat
->cnt
[BLKG_RWSTAT_READ
] + rwstat
->cnt
[BLKG_RWSTAT_WRITE
];
430 seq_printf(sf
, "%s Total %llu\n", dname
, (unsigned long long)v
);
435 * blkg_prfill_stat - prfill callback for blkg_stat
436 * @sf: seq_file to print to
437 * @pdata: policy private data of interest
438 * @off: offset to the blkg_stat in @pdata
440 * prfill callback for printing a blkg_stat.
442 u64
blkg_prfill_stat(struct seq_file
*sf
, void *pdata
, int off
)
444 return __blkg_prfill_u64(sf
, pdata
, blkg_stat_read(pdata
+ off
));
446 EXPORT_SYMBOL_GPL(blkg_prfill_stat
);
449 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
450 * @sf: seq_file to print to
451 * @pdata: policy private data of interest
452 * @off: offset to the blkg_rwstat in @pdata
454 * prfill callback for printing a blkg_rwstat.
456 u64
blkg_prfill_rwstat(struct seq_file
*sf
, void *pdata
, int off
)
458 struct blkg_rwstat rwstat
= blkg_rwstat_read(pdata
+ off
);
460 return __blkg_prfill_rwstat(sf
, pdata
, &rwstat
);
462 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat
);
465 * blkg_conf_prep - parse and prepare for per-blkg config update
466 * @blkcg: target block cgroup
467 * @pol: target policy
468 * @input: input string
469 * @ctx: blkg_conf_ctx to be filled
471 * Parse per-blkg config update from @input and initialize @ctx with the
472 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
473 * value. This function returns with RCU read lock and queue lock held and
474 * must be paired with blkg_conf_finish().
476 int blkg_conf_prep(struct blkio_cgroup
*blkcg
,
477 const struct blkio_policy_type
*pol
, const char *input
,
478 struct blkg_conf_ctx
*ctx
)
479 __acquires(rcu
) __acquires(disk
->queue
->queue_lock
)
481 struct gendisk
*disk
;
482 struct blkio_group
*blkg
;
483 unsigned int major
, minor
;
484 unsigned long long v
;
487 if (sscanf(input
, "%u:%u %llu", &major
, &minor
, &v
) != 3)
490 disk
= get_gendisk(MKDEV(major
, minor
), &part
);
495 spin_lock_irq(disk
->queue
->queue_lock
);
497 blkg
= blkg_lookup_create(blkcg
, disk
->queue
, false);
502 spin_unlock_irq(disk
->queue
->queue_lock
);
505 * If queue was bypassing, we should retry. Do so after a
506 * short msleep(). It isn't strictly necessary but queue
507 * can be bypassing for some time and it's always nice to
508 * avoid busy looping.
512 ret
= restart_syscall();
522 EXPORT_SYMBOL_GPL(blkg_conf_prep
);
525 * blkg_conf_finish - finish up per-blkg config update
526 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
528 * Finish up after per-blkg config update. This function must be paired
529 * with blkg_conf_prep().
531 void blkg_conf_finish(struct blkg_conf_ctx
*ctx
)
532 __releases(ctx
->disk
->queue
->queue_lock
) __releases(rcu
)
534 spin_unlock_irq(ctx
->disk
->queue
->queue_lock
);
538 EXPORT_SYMBOL_GPL(blkg_conf_finish
);
540 struct cftype blkio_files
[] = {
542 .name
= "reset_stats",
543 .write_u64
= blkiocg_reset_stats
,
549 * blkiocg_pre_destroy - cgroup pre_destroy callback
550 * @cgroup: cgroup of interest
552 * This function is called when @cgroup is about to go away and responsible
553 * for shooting down all blkgs associated with @cgroup. blkgs should be
554 * removed while holding both q and blkcg locks. As blkcg lock is nested
555 * inside q lock, this function performs reverse double lock dancing.
557 * This is the blkcg counterpart of ioc_release_fn().
559 static int blkiocg_pre_destroy(struct cgroup
*cgroup
)
561 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
563 spin_lock_irq(&blkcg
->lock
);
565 while (!hlist_empty(&blkcg
->blkg_list
)) {
566 struct blkio_group
*blkg
= hlist_entry(blkcg
->blkg_list
.first
,
567 struct blkio_group
, blkcg_node
);
568 struct request_queue
*q
= blkg
->q
;
570 if (spin_trylock(q
->queue_lock
)) {
572 spin_unlock(q
->queue_lock
);
574 spin_unlock_irq(&blkcg
->lock
);
576 spin_lock_irq(&blkcg
->lock
);
580 spin_unlock_irq(&blkcg
->lock
);
584 static void blkiocg_destroy(struct cgroup
*cgroup
)
586 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
588 if (blkcg
!= &blkio_root_cgroup
)
592 static struct cgroup_subsys_state
*blkiocg_create(struct cgroup
*cgroup
)
594 static atomic64_t id_seq
= ATOMIC64_INIT(0);
595 struct blkio_cgroup
*blkcg
;
596 struct cgroup
*parent
= cgroup
->parent
;
599 blkcg
= &blkio_root_cgroup
;
603 blkcg
= kzalloc(sizeof(*blkcg
), GFP_KERNEL
);
605 return ERR_PTR(-ENOMEM
);
607 blkcg
->cfq_weight
= CFQ_WEIGHT_DEFAULT
;
608 blkcg
->id
= atomic64_inc_return(&id_seq
); /* root is 0, start from 1 */
610 spin_lock_init(&blkcg
->lock
);
611 INIT_HLIST_HEAD(&blkcg
->blkg_list
);
617 * blkcg_init_queue - initialize blkcg part of request queue
618 * @q: request_queue to initialize
620 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
621 * part of new request_queue @q.
624 * 0 on success, -errno on failure.
626 int blkcg_init_queue(struct request_queue
*q
)
632 ret
= blk_throtl_init(q
);
636 mutex_lock(&all_q_mutex
);
637 INIT_LIST_HEAD(&q
->all_q_node
);
638 list_add_tail(&q
->all_q_node
, &all_q_list
);
639 mutex_unlock(&all_q_mutex
);
645 * blkcg_drain_queue - drain blkcg part of request_queue
646 * @q: request_queue to drain
648 * Called from blk_drain_queue(). Responsible for draining blkcg part.
650 void blkcg_drain_queue(struct request_queue
*q
)
652 lockdep_assert_held(q
->queue_lock
);
658 * blkcg_exit_queue - exit and release blkcg part of request_queue
659 * @q: request_queue being released
661 * Called from blk_release_queue(). Responsible for exiting blkcg part.
663 void blkcg_exit_queue(struct request_queue
*q
)
665 mutex_lock(&all_q_mutex
);
666 list_del_init(&q
->all_q_node
);
667 mutex_unlock(&all_q_mutex
);
669 blkg_destroy_all(q
, true);
675 * We cannot support shared io contexts, as we have no mean to support
676 * two tasks with the same ioc in two different groups without major rework
677 * of the main cic data structures. For now we allow a task to change
678 * its cgroup only if it's the only owner of its ioc.
680 static int blkiocg_can_attach(struct cgroup
*cgrp
, struct cgroup_taskset
*tset
)
682 struct task_struct
*task
;
683 struct io_context
*ioc
;
686 /* task_lock() is needed to avoid races with exit_io_context() */
687 cgroup_taskset_for_each(task
, cgrp
, tset
) {
689 ioc
= task
->io_context
;
690 if (ioc
&& atomic_read(&ioc
->nr_tasks
) > 1)
699 static void blkcg_bypass_start(void)
700 __acquires(&all_q_mutex
)
702 struct request_queue
*q
;
704 mutex_lock(&all_q_mutex
);
706 list_for_each_entry(q
, &all_q_list
, all_q_node
) {
707 blk_queue_bypass_start(q
);
708 blkg_destroy_all(q
, false);
712 static void blkcg_bypass_end(void)
713 __releases(&all_q_mutex
)
715 struct request_queue
*q
;
717 list_for_each_entry(q
, &all_q_list
, all_q_node
)
718 blk_queue_bypass_end(q
);
720 mutex_unlock(&all_q_mutex
);
723 struct cgroup_subsys blkio_subsys
= {
725 .create
= blkiocg_create
,
726 .can_attach
= blkiocg_can_attach
,
727 .pre_destroy
= blkiocg_pre_destroy
,
728 .destroy
= blkiocg_destroy
,
729 .subsys_id
= blkio_subsys_id
,
730 .base_cftypes
= blkio_files
,
731 .module
= THIS_MODULE
,
733 EXPORT_SYMBOL_GPL(blkio_subsys
);
736 * blkio_policy_register - register a blkcg policy
737 * @blkiop: blkcg policy to register
739 * Register @blkiop with blkcg core. Might sleep and @blkiop may be
740 * modified on successful registration. Returns 0 on success and -errno on
743 int blkio_policy_register(struct blkio_policy_type
*blkiop
)
745 struct request_queue
*q
;
748 mutex_lock(&blkcg_pol_mutex
);
750 /* find an empty slot */
752 for (i
= 0; i
< BLKCG_MAX_POLS
; i
++)
753 if (!blkio_policy
[i
])
755 if (i
>= BLKCG_MAX_POLS
)
758 /* register and update blkgs */
760 blkio_policy
[i
] = blkiop
;
762 blkcg_bypass_start();
763 list_for_each_entry(q
, &all_q_list
, all_q_node
)
764 update_root_blkg_pd(q
, blkiop
);
767 /* everything is in place, add intf files for the new policy */
769 WARN_ON(cgroup_add_cftypes(&blkio_subsys
, blkiop
->cftypes
));
772 mutex_unlock(&blkcg_pol_mutex
);
775 EXPORT_SYMBOL_GPL(blkio_policy_register
);
778 * blkiop_policy_unregister - unregister a blkcg policy
779 * @blkiop: blkcg policy to unregister
781 * Undo blkio_policy_register(@blkiop). Might sleep.
783 void blkio_policy_unregister(struct blkio_policy_type
*blkiop
)
785 struct request_queue
*q
;
787 mutex_lock(&blkcg_pol_mutex
);
789 if (WARN_ON(blkio_policy
[blkiop
->plid
] != blkiop
))
792 /* kill the intf files first */
794 cgroup_rm_cftypes(&blkio_subsys
, blkiop
->cftypes
);
796 /* unregister and update blkgs */
797 blkio_policy
[blkiop
->plid
] = NULL
;
799 blkcg_bypass_start();
800 list_for_each_entry(q
, &all_q_list
, all_q_node
)
801 update_root_blkg_pd(q
, blkiop
);
804 mutex_unlock(&blkcg_pol_mutex
);
806 EXPORT_SYMBOL_GPL(blkio_policy_unregister
);