2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include <linux/genhd.h>
21 #include <linux/delay.h>
22 #include <linux/atomic.h>
23 #include "blk-cgroup.h"
26 #define MAX_KEY_LEN 100
28 static DEFINE_SPINLOCK(blkio_list_lock
);
29 static LIST_HEAD(blkio_list
);
31 static DEFINE_MUTEX(all_q_mutex
);
32 static LIST_HEAD(all_q_list
);
34 /* List of groups pending per cpu stats allocation */
35 static DEFINE_SPINLOCK(alloc_list_lock
);
36 static LIST_HEAD(alloc_list
);
38 static void blkio_stat_alloc_fn(struct work_struct
*);
39 static DECLARE_DELAYED_WORK(blkio_stat_alloc_work
, blkio_stat_alloc_fn
);
41 struct blkio_cgroup blkio_root_cgroup
= { .weight
= 2*BLKIO_WEIGHT_DEFAULT
};
42 EXPORT_SYMBOL_GPL(blkio_root_cgroup
);
44 static struct blkio_policy_type
*blkio_policy
[BLKIO_NR_POLICIES
];
46 /* for encoding cft->private value on file */
47 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
48 /* What policy owns the file, proportional or throttle */
49 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
50 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
52 struct blkio_cgroup
*cgroup_to_blkio_cgroup(struct cgroup
*cgroup
)
54 return container_of(cgroup_subsys_state(cgroup
, blkio_subsys_id
),
55 struct blkio_cgroup
, css
);
57 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup
);
59 static struct blkio_cgroup
*task_blkio_cgroup(struct task_struct
*tsk
)
61 return container_of(task_subsys_state(tsk
, blkio_subsys_id
),
62 struct blkio_cgroup
, css
);
65 struct blkio_cgroup
*bio_blkio_cgroup(struct bio
*bio
)
67 if (bio
&& bio
->bi_css
)
68 return container_of(bio
->bi_css
, struct blkio_cgroup
, css
);
69 return task_blkio_cgroup(current
);
71 EXPORT_SYMBOL_GPL(bio_blkio_cgroup
);
73 static inline void blkio_update_group_weight(struct blkio_group
*blkg
,
74 int plid
, unsigned int weight
)
76 struct blkio_policy_type
*blkiop
;
78 list_for_each_entry(blkiop
, &blkio_list
, list
) {
79 /* If this policy does not own the blkg, do not send updates */
80 if (blkiop
->plid
!= plid
)
82 if (blkiop
->ops
.blkio_update_group_weight_fn
)
83 blkiop
->ops
.blkio_update_group_weight_fn(blkg
->q
,
88 static inline void blkio_update_group_bps(struct blkio_group
*blkg
, int plid
,
91 struct blkio_policy_type
*blkiop
;
93 list_for_each_entry(blkiop
, &blkio_list
, list
) {
95 /* If this policy does not own the blkg, do not send updates */
96 if (blkiop
->plid
!= plid
)
99 if (fileid
== BLKIO_THROTL_read_bps_device
100 && blkiop
->ops
.blkio_update_group_read_bps_fn
)
101 blkiop
->ops
.blkio_update_group_read_bps_fn(blkg
->q
,
104 if (fileid
== BLKIO_THROTL_write_bps_device
105 && blkiop
->ops
.blkio_update_group_write_bps_fn
)
106 blkiop
->ops
.blkio_update_group_write_bps_fn(blkg
->q
,
111 static inline void blkio_update_group_iops(struct blkio_group
*blkg
,
112 int plid
, unsigned int iops
,
115 struct blkio_policy_type
*blkiop
;
117 list_for_each_entry(blkiop
, &blkio_list
, list
) {
119 /* If this policy does not own the blkg, do not send updates */
120 if (blkiop
->plid
!= plid
)
123 if (fileid
== BLKIO_THROTL_read_iops_device
124 && blkiop
->ops
.blkio_update_group_read_iops_fn
)
125 blkiop
->ops
.blkio_update_group_read_iops_fn(blkg
->q
,
128 if (fileid
== BLKIO_THROTL_write_iops_device
129 && blkiop
->ops
.blkio_update_group_write_iops_fn
)
130 blkiop
->ops
.blkio_update_group_write_iops_fn(blkg
->q
,
135 #ifdef CONFIG_DEBUG_BLK_CGROUP
136 /* This should be called with the queue_lock held. */
137 static void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
138 struct blkio_policy_type
*pol
,
139 struct blkio_group
*curr_blkg
)
141 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
143 if (blkio_blkg_waiting(&pd
->stats
))
145 if (blkg
== curr_blkg
)
147 pd
->stats
.start_group_wait_time
= sched_clock();
148 blkio_mark_blkg_waiting(&pd
->stats
);
151 /* This should be called with the queue_lock held. */
152 static void blkio_update_group_wait_time(struct blkio_group_stats
*stats
)
154 unsigned long long now
;
156 if (!blkio_blkg_waiting(stats
))
160 if (time_after64(now
, stats
->start_group_wait_time
))
161 blkg_stat_add(&stats
->group_wait_time
,
162 now
- stats
->start_group_wait_time
);
163 blkio_clear_blkg_waiting(stats
);
166 /* This should be called with the queue_lock held. */
167 static void blkio_end_empty_time(struct blkio_group_stats
*stats
)
169 unsigned long long now
;
171 if (!blkio_blkg_empty(stats
))
175 if (time_after64(now
, stats
->start_empty_time
))
176 blkg_stat_add(&stats
->empty_time
,
177 now
- stats
->start_empty_time
);
178 blkio_clear_blkg_empty(stats
);
181 void blkiocg_update_set_idle_time_stats(struct blkio_group
*blkg
,
182 struct blkio_policy_type
*pol
)
184 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
186 lockdep_assert_held(blkg
->q
->queue_lock
);
187 BUG_ON(blkio_blkg_idling(stats
));
189 stats
->start_idle_time
= sched_clock();
190 blkio_mark_blkg_idling(stats
);
192 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats
);
194 void blkiocg_update_idle_time_stats(struct blkio_group
*blkg
,
195 struct blkio_policy_type
*pol
)
197 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
199 lockdep_assert_held(blkg
->q
->queue_lock
);
201 if (blkio_blkg_idling(stats
)) {
202 unsigned long long now
= sched_clock();
204 if (time_after64(now
, stats
->start_idle_time
))
205 blkg_stat_add(&stats
->idle_time
,
206 now
- stats
->start_idle_time
);
207 blkio_clear_blkg_idling(stats
);
210 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats
);
212 void blkiocg_update_avg_queue_size_stats(struct blkio_group
*blkg
,
213 struct blkio_policy_type
*pol
)
215 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
217 lockdep_assert_held(blkg
->q
->queue_lock
);
219 blkg_stat_add(&stats
->avg_queue_size_sum
,
220 blkg_rwstat_sum(&stats
->queued
));
221 blkg_stat_add(&stats
->avg_queue_size_samples
, 1);
222 blkio_update_group_wait_time(stats
);
224 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats
);
226 void blkiocg_set_start_empty_time(struct blkio_group
*blkg
,
227 struct blkio_policy_type
*pol
)
229 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
231 lockdep_assert_held(blkg
->q
->queue_lock
);
233 if (blkg_rwstat_sum(&stats
->queued
))
237 * group is already marked empty. This can happen if cfqq got new
238 * request in parent group and moved to this group while being added
239 * to service tree. Just ignore the event and move on.
241 if (blkio_blkg_empty(stats
))
244 stats
->start_empty_time
= sched_clock();
245 blkio_mark_blkg_empty(stats
);
247 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time
);
249 void blkiocg_update_dequeue_stats(struct blkio_group
*blkg
,
250 struct blkio_policy_type
*pol
,
251 unsigned long dequeue
)
253 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
255 lockdep_assert_held(blkg
->q
->queue_lock
);
257 blkg_stat_add(&pd
->stats
.dequeue
, dequeue
);
259 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats
);
261 static inline void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
262 struct blkio_policy_type
*pol
,
263 struct blkio_group
*curr_blkg
) { }
264 static inline void blkio_end_empty_time(struct blkio_group_stats
*stats
) { }
267 void blkiocg_update_io_add_stats(struct blkio_group
*blkg
,
268 struct blkio_policy_type
*pol
,
269 struct blkio_group
*curr_blkg
, bool direction
,
272 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
273 int rw
= (direction
? REQ_WRITE
: 0) | (sync
? REQ_SYNC
: 0);
275 lockdep_assert_held(blkg
->q
->queue_lock
);
277 blkg_rwstat_add(&stats
->queued
, rw
, 1);
278 blkio_end_empty_time(stats
);
279 blkio_set_start_group_wait_time(blkg
, pol
, curr_blkg
);
281 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats
);
283 void blkiocg_update_io_remove_stats(struct blkio_group
*blkg
,
284 struct blkio_policy_type
*pol
,
285 bool direction
, bool sync
)
287 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
288 int rw
= (direction
? REQ_WRITE
: 0) | (sync
? REQ_SYNC
: 0);
290 lockdep_assert_held(blkg
->q
->queue_lock
);
292 blkg_rwstat_add(&stats
->queued
, rw
, -1);
294 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats
);
296 void blkiocg_update_timeslice_used(struct blkio_group
*blkg
,
297 struct blkio_policy_type
*pol
,
299 unsigned long unaccounted_time
)
301 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
303 lockdep_assert_held(blkg
->q
->queue_lock
);
305 blkg_stat_add(&stats
->time
, time
);
306 #ifdef CONFIG_DEBUG_BLK_CGROUP
307 blkg_stat_add(&stats
->unaccounted_time
, unaccounted_time
);
310 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used
);
313 * should be called under rcu read lock or queue lock to make sure blkg pointer
316 void blkiocg_update_dispatch_stats(struct blkio_group
*blkg
,
317 struct blkio_policy_type
*pol
,
318 uint64_t bytes
, bool direction
, bool sync
)
320 int rw
= (direction
? REQ_WRITE
: 0) | (sync
? REQ_SYNC
: 0);
321 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
322 struct blkio_group_stats_cpu
*stats_cpu
;
325 /* If per cpu stats are not allocated yet, don't do any accounting. */
326 if (pd
->stats_cpu
== NULL
)
330 * Disabling interrupts to provide mutual exclusion between two
331 * writes on same cpu. It probably is not needed for 64bit. Not
332 * optimizing that case yet.
334 local_irq_save(flags
);
336 stats_cpu
= this_cpu_ptr(pd
->stats_cpu
);
338 blkg_stat_add(&stats_cpu
->sectors
, bytes
>> 9);
339 blkg_rwstat_add(&stats_cpu
->serviced
, rw
, 1);
340 blkg_rwstat_add(&stats_cpu
->service_bytes
, rw
, bytes
);
342 local_irq_restore(flags
);
344 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats
);
346 void blkiocg_update_completion_stats(struct blkio_group
*blkg
,
347 struct blkio_policy_type
*pol
,
349 uint64_t io_start_time
, bool direction
,
352 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
353 unsigned long long now
= sched_clock();
354 int rw
= (direction
? REQ_WRITE
: 0) | (sync
? REQ_SYNC
: 0);
356 lockdep_assert_held(blkg
->q
->queue_lock
);
358 if (time_after64(now
, io_start_time
))
359 blkg_rwstat_add(&stats
->service_time
, rw
, now
- io_start_time
);
360 if (time_after64(io_start_time
, start_time
))
361 blkg_rwstat_add(&stats
->wait_time
, rw
,
362 io_start_time
- start_time
);
364 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats
);
366 /* Merged stats are per cpu. */
367 void blkiocg_update_io_merged_stats(struct blkio_group
*blkg
,
368 struct blkio_policy_type
*pol
,
369 bool direction
, bool sync
)
371 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
372 int rw
= (direction
? REQ_WRITE
: 0) | (sync
? REQ_SYNC
: 0);
374 lockdep_assert_held(blkg
->q
->queue_lock
);
376 blkg_rwstat_add(&stats
->merged
, rw
, 1);
378 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats
);
381 * Worker for allocating per cpu stat for blk groups. This is scheduled on
382 * the system_nrt_wq once there are some groups on the alloc_list waiting
385 static void blkio_stat_alloc_fn(struct work_struct
*work
)
387 static void *pcpu_stats
[BLKIO_NR_POLICIES
];
388 struct delayed_work
*dwork
= to_delayed_work(work
);
389 struct blkio_group
*blkg
;
394 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
395 if (pcpu_stats
[i
] != NULL
)
398 pcpu_stats
[i
] = alloc_percpu(struct blkio_group_stats_cpu
);
400 /* Allocation failed. Try again after some time. */
401 if (pcpu_stats
[i
] == NULL
) {
402 queue_delayed_work(system_nrt_wq
, dwork
,
403 msecs_to_jiffies(10));
408 spin_lock_irq(&blkio_list_lock
);
409 spin_lock(&alloc_list_lock
);
411 /* cgroup got deleted or queue exited. */
412 if (!list_empty(&alloc_list
)) {
413 blkg
= list_first_entry(&alloc_list
, struct blkio_group
,
415 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
416 struct blkg_policy_data
*pd
= blkg
->pd
[i
];
418 if (blkio_policy
[i
] && pd
&& !pd
->stats_cpu
)
419 swap(pd
->stats_cpu
, pcpu_stats
[i
]);
422 list_del_init(&blkg
->alloc_node
);
425 empty
= list_empty(&alloc_list
);
427 spin_unlock(&alloc_list_lock
);
428 spin_unlock_irq(&blkio_list_lock
);
435 * blkg_free - free a blkg
436 * @blkg: blkg to free
438 * Free @blkg which may be partially allocated.
440 static void blkg_free(struct blkio_group
*blkg
)
447 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
448 struct blkg_policy_data
*pd
= blkg
->pd
[i
];
451 free_percpu(pd
->stats_cpu
);
460 * blkg_alloc - allocate a blkg
461 * @blkcg: block cgroup the new blkg is associated with
462 * @q: request_queue the new blkg is associated with
464 * Allocate a new blkg assocating @blkcg and @q.
466 static struct blkio_group
*blkg_alloc(struct blkio_cgroup
*blkcg
,
467 struct request_queue
*q
)
469 struct blkio_group
*blkg
;
472 /* alloc and init base part */
473 blkg
= kzalloc_node(sizeof(*blkg
), GFP_ATOMIC
, q
->node
);
478 INIT_LIST_HEAD(&blkg
->q_node
);
479 INIT_LIST_HEAD(&blkg
->alloc_node
);
482 cgroup_path(blkcg
->css
.cgroup
, blkg
->path
, sizeof(blkg
->path
));
484 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
485 struct blkio_policy_type
*pol
= blkio_policy
[i
];
486 struct blkg_policy_data
*pd
;
491 /* alloc per-policy data and attach it to blkg */
492 pd
= kzalloc_node(sizeof(*pd
) + pol
->pdata_size
, GFP_ATOMIC
,
503 /* invoke per-policy init */
504 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
505 struct blkio_policy_type
*pol
= blkio_policy
[i
];
508 pol
->ops
.blkio_init_group_fn(blkg
);
514 struct blkio_group
*blkg_lookup_create(struct blkio_cgroup
*blkcg
,
515 struct request_queue
*q
,
517 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
519 struct blkio_group
*blkg
;
521 WARN_ON_ONCE(!rcu_read_lock_held());
522 lockdep_assert_held(q
->queue_lock
);
525 * This could be the first entry point of blkcg implementation and
526 * we shouldn't allow anything to go through for a bypassing queue.
527 * The following can be removed if blkg lookup is guaranteed to
528 * fail on a bypassing queue.
530 if (unlikely(blk_queue_bypass(q
)) && !for_root
)
531 return ERR_PTR(blk_queue_dead(q
) ? -EINVAL
: -EBUSY
);
533 blkg
= blkg_lookup(blkcg
, q
);
537 /* blkg holds a reference to blkcg */
538 if (!css_tryget(&blkcg
->css
))
539 return ERR_PTR(-EINVAL
);
542 * Allocate and initialize.
544 blkg
= blkg_alloc(blkcg
, q
);
546 /* did alloc fail? */
547 if (unlikely(!blkg
)) {
548 blkg
= ERR_PTR(-ENOMEM
);
553 spin_lock(&blkcg
->lock
);
554 hlist_add_head_rcu(&blkg
->blkcg_node
, &blkcg
->blkg_list
);
555 list_add(&blkg
->q_node
, &q
->blkg_list
);
556 spin_unlock(&blkcg
->lock
);
558 spin_lock(&alloc_list_lock
);
559 list_add(&blkg
->alloc_node
, &alloc_list
);
560 /* Queue per cpu stat allocation from worker thread. */
561 queue_delayed_work(system_nrt_wq
, &blkio_stat_alloc_work
, 0);
562 spin_unlock(&alloc_list_lock
);
566 EXPORT_SYMBOL_GPL(blkg_lookup_create
);
568 /* called under rcu_read_lock(). */
569 struct blkio_group
*blkg_lookup(struct blkio_cgroup
*blkcg
,
570 struct request_queue
*q
)
572 struct blkio_group
*blkg
;
573 struct hlist_node
*n
;
575 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
)
580 EXPORT_SYMBOL_GPL(blkg_lookup
);
582 static void blkg_destroy(struct blkio_group
*blkg
)
584 struct request_queue
*q
= blkg
->q
;
585 struct blkio_cgroup
*blkcg
= blkg
->blkcg
;
587 lockdep_assert_held(q
->queue_lock
);
588 lockdep_assert_held(&blkcg
->lock
);
590 /* Something wrong if we are trying to remove same group twice */
591 WARN_ON_ONCE(list_empty(&blkg
->q_node
));
592 WARN_ON_ONCE(hlist_unhashed(&blkg
->blkcg_node
));
593 list_del_init(&blkg
->q_node
);
594 hlist_del_init_rcu(&blkg
->blkcg_node
);
596 spin_lock(&alloc_list_lock
);
597 list_del_init(&blkg
->alloc_node
);
598 spin_unlock(&alloc_list_lock
);
601 * Put the reference taken at the time of creation so that when all
602 * queues are gone, group can be destroyed.
608 * XXX: This updates blkg policy data in-place for root blkg, which is
609 * necessary across elevator switch and policy registration as root blkgs
610 * aren't shot down. This broken and racy implementation is temporary.
611 * Eventually, blkg shoot down will be replaced by proper in-place update.
613 void update_root_blkg_pd(struct request_queue
*q
, enum blkio_policy_id plid
)
615 struct blkio_policy_type
*pol
= blkio_policy
[plid
];
616 struct blkio_group
*blkg
= blkg_lookup(&blkio_root_cgroup
, q
);
617 struct blkg_policy_data
*pd
;
622 kfree(blkg
->pd
[plid
]);
623 blkg
->pd
[plid
] = NULL
;
628 pd
= kzalloc(sizeof(*pd
) + pol
->pdata_size
, GFP_KERNEL
);
631 pd
->stats_cpu
= alloc_percpu(struct blkio_group_stats_cpu
);
632 WARN_ON_ONCE(!pd
->stats_cpu
);
636 pol
->ops
.blkio_init_group_fn(blkg
);
638 EXPORT_SYMBOL_GPL(update_root_blkg_pd
);
641 * blkg_destroy_all - destroy all blkgs associated with a request_queue
642 * @q: request_queue of interest
643 * @destroy_root: whether to destroy root blkg or not
645 * Destroy blkgs associated with @q. If @destroy_root is %true, all are
646 * destroyed; otherwise, root blkg is left alone.
648 void blkg_destroy_all(struct request_queue
*q
, bool destroy_root
)
650 struct blkio_group
*blkg
, *n
;
652 spin_lock_irq(q
->queue_lock
);
654 list_for_each_entry_safe(blkg
, n
, &q
->blkg_list
, q_node
) {
655 struct blkio_cgroup
*blkcg
= blkg
->blkcg
;
658 if (!destroy_root
&& blkg
->blkcg
== &blkio_root_cgroup
)
661 spin_lock(&blkcg
->lock
);
663 spin_unlock(&blkcg
->lock
);
666 spin_unlock_irq(q
->queue_lock
);
668 EXPORT_SYMBOL_GPL(blkg_destroy_all
);
670 static void blkg_rcu_free(struct rcu_head
*rcu_head
)
672 blkg_free(container_of(rcu_head
, struct blkio_group
, rcu_head
));
675 void __blkg_release(struct blkio_group
*blkg
)
677 /* release the extra blkcg reference this blkg has been holding */
678 css_put(&blkg
->blkcg
->css
);
681 * A group is freed in rcu manner. But having an rcu lock does not
682 * mean that one can access all the fields of blkg and assume these
683 * are valid. For example, don't try to follow throtl_data and
684 * request queue links.
686 * Having a reference to blkg under an rcu allows acess to only
687 * values local to groups like group stats and group rate limits
689 call_rcu(&blkg
->rcu_head
, blkg_rcu_free
);
691 EXPORT_SYMBOL_GPL(__blkg_release
);
693 static void blkio_reset_stats_cpu(struct blkio_group
*blkg
, int plid
)
695 struct blkg_policy_data
*pd
= blkg
->pd
[plid
];
698 if (pd
->stats_cpu
== NULL
)
701 for_each_possible_cpu(cpu
) {
702 struct blkio_group_stats_cpu
*sc
=
703 per_cpu_ptr(pd
->stats_cpu
, cpu
);
705 blkg_rwstat_reset(&sc
->service_bytes
);
706 blkg_rwstat_reset(&sc
->serviced
);
707 blkg_stat_reset(&sc
->sectors
);
712 blkiocg_reset_stats(struct cgroup
*cgroup
, struct cftype
*cftype
, u64 val
)
714 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
715 struct blkio_group
*blkg
;
716 struct hlist_node
*n
;
718 spin_lock(&blkio_list_lock
);
719 spin_lock_irq(&blkcg
->lock
);
722 * Note that stat reset is racy - it doesn't synchronize against
723 * stat updates. This is a debug feature which shouldn't exist
724 * anyway. If you get hit by a race, retry.
726 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
727 struct blkio_policy_type
*pol
;
729 list_for_each_entry(pol
, &blkio_list
, list
) {
730 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
731 struct blkio_group_stats
*stats
= &pd
->stats
;
733 /* queued stats shouldn't be cleared */
734 blkg_rwstat_reset(&stats
->merged
);
735 blkg_rwstat_reset(&stats
->service_time
);
736 blkg_rwstat_reset(&stats
->wait_time
);
737 blkg_stat_reset(&stats
->time
);
738 #ifdef CONFIG_DEBUG_BLK_CGROUP
739 blkg_stat_reset(&stats
->unaccounted_time
);
740 blkg_stat_reset(&stats
->avg_queue_size_sum
);
741 blkg_stat_reset(&stats
->avg_queue_size_samples
);
742 blkg_stat_reset(&stats
->dequeue
);
743 blkg_stat_reset(&stats
->group_wait_time
);
744 blkg_stat_reset(&stats
->idle_time
);
745 blkg_stat_reset(&stats
->empty_time
);
747 blkio_reset_stats_cpu(blkg
, pol
->plid
);
751 spin_unlock_irq(&blkcg
->lock
);
752 spin_unlock(&blkio_list_lock
);
756 static const char *blkg_dev_name(struct blkio_group
*blkg
)
758 /* some drivers (floppy) instantiate a queue w/o disk registered */
759 if (blkg
->q
->backing_dev_info
.dev
)
760 return dev_name(blkg
->q
->backing_dev_info
.dev
);
765 * blkcg_print_blkgs - helper for printing per-blkg data
766 * @sf: seq_file to print to
767 * @blkcg: blkcg of interest
768 * @prfill: fill function to print out a blkg
769 * @pol: policy in question
770 * @data: data to be passed to @prfill
771 * @show_total: to print out sum of prfill return values or not
773 * This function invokes @prfill on each blkg of @blkcg if pd for the
774 * policy specified by @pol exists. @prfill is invoked with @sf, the
775 * policy data and @data. If @show_total is %true, the sum of the return
776 * values from @prfill is printed with "Total" label at the end.
778 * This is to be used to construct print functions for
779 * cftype->read_seq_string method.
781 static void blkcg_print_blkgs(struct seq_file
*sf
, struct blkio_cgroup
*blkcg
,
782 u64 (*prfill
)(struct seq_file
*,
783 struct blkg_policy_data
*, int),
784 int pol
, int data
, bool show_total
)
786 struct blkio_group
*blkg
;
787 struct hlist_node
*n
;
790 spin_lock_irq(&blkcg
->lock
);
791 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
)
793 total
+= prfill(sf
, blkg
->pd
[pol
], data
);
794 spin_unlock_irq(&blkcg
->lock
);
797 seq_printf(sf
, "Total %llu\n", (unsigned long long)total
);
801 * __blkg_prfill_u64 - prfill helper for a single u64 value
802 * @sf: seq_file to print to
803 * @pd: policy data of interest
806 * Print @v to @sf for the device assocaited with @pd.
808 static u64
__blkg_prfill_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
811 const char *dname
= blkg_dev_name(pd
->blkg
);
816 seq_printf(sf
, "%s %llu\n", dname
, (unsigned long long)v
);
821 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
822 * @sf: seq_file to print to
823 * @pd: policy data of interest
824 * @rwstat: rwstat to print
826 * Print @rwstat to @sf for the device assocaited with @pd.
828 static u64
__blkg_prfill_rwstat(struct seq_file
*sf
,
829 struct blkg_policy_data
*pd
,
830 const struct blkg_rwstat
*rwstat
)
832 static const char *rwstr
[] = {
833 [BLKG_RWSTAT_READ
] = "Read",
834 [BLKG_RWSTAT_WRITE
] = "Write",
835 [BLKG_RWSTAT_SYNC
] = "Sync",
836 [BLKG_RWSTAT_ASYNC
] = "Async",
838 const char *dname
= blkg_dev_name(pd
->blkg
);
845 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
846 seq_printf(sf
, "%s %s %llu\n", dname
, rwstr
[i
],
847 (unsigned long long)rwstat
->cnt
[i
]);
849 v
= rwstat
->cnt
[BLKG_RWSTAT_READ
] + rwstat
->cnt
[BLKG_RWSTAT_WRITE
];
850 seq_printf(sf
, "%s Total %llu\n", dname
, (unsigned long long)v
);
854 static u64
blkg_prfill_stat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
857 return __blkg_prfill_u64(sf
, pd
,
858 blkg_stat_read((void *)&pd
->stats
+ off
));
861 static u64
blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
864 struct blkg_rwstat rwstat
= blkg_rwstat_read((void *)&pd
->stats
+ off
);
866 return __blkg_prfill_rwstat(sf
, pd
, &rwstat
);
869 /* print blkg_stat specified by BLKCG_STAT_PRIV() */
870 static int blkcg_print_stat(struct cgroup
*cgrp
, struct cftype
*cft
,
873 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgrp
);
875 blkcg_print_blkgs(sf
, blkcg
, blkg_prfill_stat
,
876 BLKCG_STAT_POL(cft
->private),
877 BLKCG_STAT_OFF(cft
->private), false);
881 /* print blkg_rwstat specified by BLKCG_STAT_PRIV() */
882 static int blkcg_print_rwstat(struct cgroup
*cgrp
, struct cftype
*cft
,
885 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgrp
);
887 blkcg_print_blkgs(sf
, blkcg
, blkg_prfill_rwstat
,
888 BLKCG_STAT_POL(cft
->private),
889 BLKCG_STAT_OFF(cft
->private), true);
893 static u64
blkg_prfill_cpu_stat(struct seq_file
*sf
,
894 struct blkg_policy_data
*pd
, int off
)
899 for_each_possible_cpu(cpu
) {
900 struct blkio_group_stats_cpu
*sc
=
901 per_cpu_ptr(pd
->stats_cpu
, cpu
);
903 v
+= blkg_stat_read((void *)sc
+ off
);
906 return __blkg_prfill_u64(sf
, pd
, v
);
909 static u64
blkg_prfill_cpu_rwstat(struct seq_file
*sf
,
910 struct blkg_policy_data
*pd
, int off
)
912 struct blkg_rwstat rwstat
= { }, tmp
;
915 for_each_possible_cpu(cpu
) {
916 struct blkio_group_stats_cpu
*sc
=
917 per_cpu_ptr(pd
->stats_cpu
, cpu
);
919 tmp
= blkg_rwstat_read((void *)sc
+ off
);
920 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
921 rwstat
.cnt
[i
] += tmp
.cnt
[i
];
924 return __blkg_prfill_rwstat(sf
, pd
, &rwstat
);
927 /* print per-cpu blkg_stat specified by BLKCG_STAT_PRIV() */
928 static int blkcg_print_cpu_stat(struct cgroup
*cgrp
, struct cftype
*cft
,
931 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgrp
);
933 blkcg_print_blkgs(sf
, blkcg
, blkg_prfill_cpu_stat
,
934 BLKCG_STAT_POL(cft
->private),
935 BLKCG_STAT_OFF(cft
->private), false);
939 /* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */
940 static int blkcg_print_cpu_rwstat(struct cgroup
*cgrp
, struct cftype
*cft
,
943 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgrp
);
945 blkcg_print_blkgs(sf
, blkcg
, blkg_prfill_cpu_rwstat
,
946 BLKCG_STAT_POL(cft
->private),
947 BLKCG_STAT_OFF(cft
->private), true);
951 #ifdef CONFIG_DEBUG_BLK_CGROUP
952 static u64
blkg_prfill_avg_queue_size(struct seq_file
*sf
,
953 struct blkg_policy_data
*pd
, int off
)
955 u64 samples
= blkg_stat_read(&pd
->stats
.avg_queue_size_samples
);
959 v
= blkg_stat_read(&pd
->stats
.avg_queue_size_sum
);
962 __blkg_prfill_u64(sf
, pd
, v
);
966 /* print avg_queue_size */
967 static int blkcg_print_avg_queue_size(struct cgroup
*cgrp
, struct cftype
*cft
,
970 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgrp
);
972 blkcg_print_blkgs(sf
, blkcg
, blkg_prfill_avg_queue_size
,
973 BLKIO_POLICY_PROP
, 0, false);
976 #endif /* CONFIG_DEBUG_BLK_CGROUP */
978 static int blkio_policy_parse_and_set(char *buf
, enum blkio_policy_id plid
,
979 int fileid
, struct blkio_cgroup
*blkcg
)
981 struct gendisk
*disk
= NULL
;
982 struct blkio_group
*blkg
= NULL
;
983 struct blkg_policy_data
*pd
;
984 char *s
[4], *p
, *major_s
= NULL
, *minor_s
= NULL
;
985 unsigned long major
, minor
;
986 int i
= 0, ret
= -EINVAL
;
991 memset(s
, 0, sizeof(s
));
993 while ((p
= strsep(&buf
, " ")) != NULL
) {
999 /* Prevent from inputing too many things */
1007 p
= strsep(&s
[0], ":");
1017 if (strict_strtoul(major_s
, 10, &major
))
1020 if (strict_strtoul(minor_s
, 10, &minor
))
1023 dev
= MKDEV(major
, minor
);
1025 if (strict_strtoull(s
[1], 10, &temp
))
1028 disk
= get_gendisk(dev
, &part
);
1034 spin_lock_irq(disk
->queue
->queue_lock
);
1035 blkg
= blkg_lookup_create(blkcg
, disk
->queue
, false);
1036 spin_unlock_irq(disk
->queue
->queue_lock
);
1039 ret
= PTR_ERR(blkg
);
1043 pd
= blkg
->pd
[plid
];
1046 case BLKIO_POLICY_PROP
:
1047 if ((temp
< BLKIO_WEIGHT_MIN
&& temp
> 0) ||
1048 temp
> BLKIO_WEIGHT_MAX
)
1051 pd
->conf
.weight
= temp
;
1052 blkio_update_group_weight(blkg
, plid
, temp
?: blkcg
->weight
);
1054 case BLKIO_POLICY_THROTL
:
1056 case BLKIO_THROTL_read_bps_device
:
1057 pd
->conf
.bps
[READ
] = temp
;
1058 blkio_update_group_bps(blkg
, plid
, temp
?: -1, fileid
);
1060 case BLKIO_THROTL_write_bps_device
:
1061 pd
->conf
.bps
[WRITE
] = temp
;
1062 blkio_update_group_bps(blkg
, plid
, temp
?: -1, fileid
);
1064 case BLKIO_THROTL_read_iops_device
:
1065 if (temp
> THROTL_IOPS_MAX
)
1067 pd
->conf
.iops
[READ
] = temp
;
1068 blkio_update_group_iops(blkg
, plid
, temp
?: -1, fileid
);
1070 case BLKIO_THROTL_write_iops_device
:
1071 if (temp
> THROTL_IOPS_MAX
)
1073 pd
->conf
.iops
[WRITE
] = temp
;
1074 blkio_update_group_iops(blkg
, plid
, temp
?: -1, fileid
);
1088 * If queue was bypassing, we should retry. Do so after a short
1089 * msleep(). It isn't strictly necessary but queue can be
1090 * bypassing for some time and it's always nice to avoid busy
1093 if (ret
== -EBUSY
) {
1095 return restart_syscall();
1100 static int blkiocg_file_write(struct cgroup
*cgrp
, struct cftype
*cft
,
1105 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1106 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1107 int fileid
= BLKIOFILE_ATTR(cft
->private);
1109 buf
= kstrdup(buffer
, GFP_KERNEL
);
1113 ret
= blkio_policy_parse_and_set(buf
, plid
, fileid
, blkcg
);
1118 /* for propio conf */
1119 static u64
blkg_prfill_weight_device(struct seq_file
*sf
,
1120 struct blkg_policy_data
*pd
, int off
)
1122 if (!pd
->conf
.weight
)
1124 return __blkg_prfill_u64(sf
, pd
, pd
->conf
.weight
);
1127 static int blkcg_print_weight_device(struct cgroup
*cgrp
, struct cftype
*cft
,
1128 struct seq_file
*sf
)
1130 blkcg_print_blkgs(sf
, cgroup_to_blkio_cgroup(cgrp
),
1131 blkg_prfill_weight_device
, BLKIO_POLICY_PROP
, 0,
1136 static int blkcg_print_weight(struct cgroup
*cgrp
, struct cftype
*cft
,
1137 struct seq_file
*sf
)
1139 seq_printf(sf
, "%u\n", cgroup_to_blkio_cgroup(cgrp
)->weight
);
1143 static int blkcg_set_weight(struct cgroup
*cgrp
, struct cftype
*cft
, u64 val
)
1145 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1146 struct blkio_group
*blkg
;
1147 struct hlist_node
*n
;
1149 if (val
< BLKIO_WEIGHT_MIN
|| val
> BLKIO_WEIGHT_MAX
)
1152 spin_lock(&blkio_list_lock
);
1153 spin_lock_irq(&blkcg
->lock
);
1154 blkcg
->weight
= (unsigned int)val
;
1156 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
1157 struct blkg_policy_data
*pd
= blkg
->pd
[BLKIO_POLICY_PROP
];
1159 if (pd
&& !pd
->conf
.weight
)
1160 blkio_update_group_weight(blkg
, BLKIO_POLICY_PROP
,
1164 spin_unlock_irq(&blkcg
->lock
);
1165 spin_unlock(&blkio_list_lock
);
1169 /* for blk-throttle conf */
1170 #ifdef CONFIG_BLK_DEV_THROTTLING
1171 static u64
blkg_prfill_conf_u64(struct seq_file
*sf
,
1172 struct blkg_policy_data
*pd
, int off
)
1174 u64 v
= *(u64
*)((void *)&pd
->conf
+ off
);
1178 return __blkg_prfill_u64(sf
, pd
, v
);
1181 static int blkcg_print_conf_u64(struct cgroup
*cgrp
, struct cftype
*cft
,
1182 struct seq_file
*sf
)
1186 switch (BLKIOFILE_ATTR(cft
->private)) {
1187 case BLKIO_THROTL_read_bps_device
:
1188 off
= offsetof(struct blkio_group_conf
, bps
[READ
]);
1190 case BLKIO_THROTL_write_bps_device
:
1191 off
= offsetof(struct blkio_group_conf
, bps
[WRITE
]);
1193 case BLKIO_THROTL_read_iops_device
:
1194 off
= offsetof(struct blkio_group_conf
, iops
[READ
]);
1196 case BLKIO_THROTL_write_iops_device
:
1197 off
= offsetof(struct blkio_group_conf
, iops
[WRITE
]);
1203 blkcg_print_blkgs(sf
, cgroup_to_blkio_cgroup(cgrp
),
1204 blkg_prfill_conf_u64
, BLKIO_POLICY_THROTL
,
1210 struct cftype blkio_files
[] = {
1212 .name
= "weight_device",
1213 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1214 BLKIO_PROP_weight_device
),
1215 .read_seq_string
= blkcg_print_weight_device
,
1216 .write_string
= blkiocg_file_write
,
1217 .max_write_len
= 256,
1221 .read_seq_string
= blkcg_print_weight
,
1222 .write_u64
= blkcg_set_weight
,
1226 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP
,
1227 offsetof(struct blkio_group_stats
, time
)),
1228 .read_seq_string
= blkcg_print_stat
,
1232 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP
,
1233 offsetof(struct blkio_group_stats_cpu
, sectors
)),
1234 .read_seq_string
= blkcg_print_cpu_stat
,
1237 .name
= "io_service_bytes",
1238 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP
,
1239 offsetof(struct blkio_group_stats_cpu
, service_bytes
)),
1240 .read_seq_string
= blkcg_print_cpu_rwstat
,
1243 .name
= "io_serviced",
1244 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP
,
1245 offsetof(struct blkio_group_stats_cpu
, serviced
)),
1246 .read_seq_string
= blkcg_print_cpu_rwstat
,
1249 .name
= "io_service_time",
1250 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP
,
1251 offsetof(struct blkio_group_stats
, service_time
)),
1252 .read_seq_string
= blkcg_print_rwstat
,
1255 .name
= "io_wait_time",
1256 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP
,
1257 offsetof(struct blkio_group_stats
, wait_time
)),
1258 .read_seq_string
= blkcg_print_rwstat
,
1261 .name
= "io_merged",
1262 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP
,
1263 offsetof(struct blkio_group_stats
, merged
)),
1264 .read_seq_string
= blkcg_print_rwstat
,
1267 .name
= "io_queued",
1268 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP
,
1269 offsetof(struct blkio_group_stats
, queued
)),
1270 .read_seq_string
= blkcg_print_rwstat
,
1273 .name
= "reset_stats",
1274 .write_u64
= blkiocg_reset_stats
,
1276 #ifdef CONFIG_BLK_DEV_THROTTLING
1278 .name
= "throttle.read_bps_device",
1279 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1280 BLKIO_THROTL_read_bps_device
),
1281 .read_seq_string
= blkcg_print_conf_u64
,
1282 .write_string
= blkiocg_file_write
,
1283 .max_write_len
= 256,
1287 .name
= "throttle.write_bps_device",
1288 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1289 BLKIO_THROTL_write_bps_device
),
1290 .read_seq_string
= blkcg_print_conf_u64
,
1291 .write_string
= blkiocg_file_write
,
1292 .max_write_len
= 256,
1296 .name
= "throttle.read_iops_device",
1297 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1298 BLKIO_THROTL_read_iops_device
),
1299 .read_seq_string
= blkcg_print_conf_u64
,
1300 .write_string
= blkiocg_file_write
,
1301 .max_write_len
= 256,
1305 .name
= "throttle.write_iops_device",
1306 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1307 BLKIO_THROTL_write_iops_device
),
1308 .read_seq_string
= blkcg_print_conf_u64
,
1309 .write_string
= blkiocg_file_write
,
1310 .max_write_len
= 256,
1313 .name
= "throttle.io_service_bytes",
1314 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL
,
1315 offsetof(struct blkio_group_stats_cpu
, service_bytes
)),
1316 .read_seq_string
= blkcg_print_cpu_rwstat
,
1319 .name
= "throttle.io_serviced",
1320 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL
,
1321 offsetof(struct blkio_group_stats_cpu
, serviced
)),
1322 .read_seq_string
= blkcg_print_cpu_rwstat
,
1324 #endif /* CONFIG_BLK_DEV_THROTTLING */
1326 #ifdef CONFIG_DEBUG_BLK_CGROUP
1328 .name
= "avg_queue_size",
1329 .read_seq_string
= blkcg_print_avg_queue_size
,
1332 .name
= "group_wait_time",
1333 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP
,
1334 offsetof(struct blkio_group_stats
, group_wait_time
)),
1335 .read_seq_string
= blkcg_print_stat
,
1338 .name
= "idle_time",
1339 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP
,
1340 offsetof(struct blkio_group_stats
, idle_time
)),
1341 .read_seq_string
= blkcg_print_stat
,
1344 .name
= "empty_time",
1345 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP
,
1346 offsetof(struct blkio_group_stats
, empty_time
)),
1347 .read_seq_string
= blkcg_print_stat
,
1351 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP
,
1352 offsetof(struct blkio_group_stats
, dequeue
)),
1353 .read_seq_string
= blkcg_print_stat
,
1356 .name
= "unaccounted_time",
1357 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP
,
1358 offsetof(struct blkio_group_stats
, unaccounted_time
)),
1359 .read_seq_string
= blkcg_print_stat
,
1366 * blkiocg_pre_destroy - cgroup pre_destroy callback
1367 * @cgroup: cgroup of interest
1369 * This function is called when @cgroup is about to go away and responsible
1370 * for shooting down all blkgs associated with @cgroup. blkgs should be
1371 * removed while holding both q and blkcg locks. As blkcg lock is nested
1372 * inside q lock, this function performs reverse double lock dancing.
1374 * This is the blkcg counterpart of ioc_release_fn().
1376 static int blkiocg_pre_destroy(struct cgroup
*cgroup
)
1378 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
1380 spin_lock_irq(&blkcg
->lock
);
1382 while (!hlist_empty(&blkcg
->blkg_list
)) {
1383 struct blkio_group
*blkg
= hlist_entry(blkcg
->blkg_list
.first
,
1384 struct blkio_group
, blkcg_node
);
1385 struct request_queue
*q
= blkg
->q
;
1387 if (spin_trylock(q
->queue_lock
)) {
1389 spin_unlock(q
->queue_lock
);
1391 spin_unlock_irq(&blkcg
->lock
);
1393 spin_lock_irq(&blkcg
->lock
);
1397 spin_unlock_irq(&blkcg
->lock
);
1401 static void blkiocg_destroy(struct cgroup
*cgroup
)
1403 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
1405 if (blkcg
!= &blkio_root_cgroup
)
1409 static struct cgroup_subsys_state
*blkiocg_create(struct cgroup
*cgroup
)
1411 static atomic64_t id_seq
= ATOMIC64_INIT(0);
1412 struct blkio_cgroup
*blkcg
;
1413 struct cgroup
*parent
= cgroup
->parent
;
1416 blkcg
= &blkio_root_cgroup
;
1420 blkcg
= kzalloc(sizeof(*blkcg
), GFP_KERNEL
);
1422 return ERR_PTR(-ENOMEM
);
1424 blkcg
->weight
= BLKIO_WEIGHT_DEFAULT
;
1425 blkcg
->id
= atomic64_inc_return(&id_seq
); /* root is 0, start from 1 */
1427 spin_lock_init(&blkcg
->lock
);
1428 INIT_HLIST_HEAD(&blkcg
->blkg_list
);
1434 * blkcg_init_queue - initialize blkcg part of request queue
1435 * @q: request_queue to initialize
1437 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1438 * part of new request_queue @q.
1441 * 0 on success, -errno on failure.
1443 int blkcg_init_queue(struct request_queue
*q
)
1449 ret
= blk_throtl_init(q
);
1453 mutex_lock(&all_q_mutex
);
1454 INIT_LIST_HEAD(&q
->all_q_node
);
1455 list_add_tail(&q
->all_q_node
, &all_q_list
);
1456 mutex_unlock(&all_q_mutex
);
1462 * blkcg_drain_queue - drain blkcg part of request_queue
1463 * @q: request_queue to drain
1465 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1467 void blkcg_drain_queue(struct request_queue
*q
)
1469 lockdep_assert_held(q
->queue_lock
);
1471 blk_throtl_drain(q
);
1475 * blkcg_exit_queue - exit and release blkcg part of request_queue
1476 * @q: request_queue being released
1478 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1480 void blkcg_exit_queue(struct request_queue
*q
)
1482 mutex_lock(&all_q_mutex
);
1483 list_del_init(&q
->all_q_node
);
1484 mutex_unlock(&all_q_mutex
);
1486 blkg_destroy_all(q
, true);
1492 * We cannot support shared io contexts, as we have no mean to support
1493 * two tasks with the same ioc in two different groups without major rework
1494 * of the main cic data structures. For now we allow a task to change
1495 * its cgroup only if it's the only owner of its ioc.
1497 static int blkiocg_can_attach(struct cgroup
*cgrp
, struct cgroup_taskset
*tset
)
1499 struct task_struct
*task
;
1500 struct io_context
*ioc
;
1503 /* task_lock() is needed to avoid races with exit_io_context() */
1504 cgroup_taskset_for_each(task
, cgrp
, tset
) {
1506 ioc
= task
->io_context
;
1507 if (ioc
&& atomic_read(&ioc
->nr_tasks
) > 1)
1516 static void blkcg_bypass_start(void)
1517 __acquires(&all_q_mutex
)
1519 struct request_queue
*q
;
1521 mutex_lock(&all_q_mutex
);
1523 list_for_each_entry(q
, &all_q_list
, all_q_node
) {
1524 blk_queue_bypass_start(q
);
1525 blkg_destroy_all(q
, false);
1529 static void blkcg_bypass_end(void)
1530 __releases(&all_q_mutex
)
1532 struct request_queue
*q
;
1534 list_for_each_entry(q
, &all_q_list
, all_q_node
)
1535 blk_queue_bypass_end(q
);
1537 mutex_unlock(&all_q_mutex
);
1540 struct cgroup_subsys blkio_subsys
= {
1542 .create
= blkiocg_create
,
1543 .can_attach
= blkiocg_can_attach
,
1544 .pre_destroy
= blkiocg_pre_destroy
,
1545 .destroy
= blkiocg_destroy
,
1546 .subsys_id
= blkio_subsys_id
,
1547 .base_cftypes
= blkio_files
,
1548 .module
= THIS_MODULE
,
1550 EXPORT_SYMBOL_GPL(blkio_subsys
);
1552 void blkio_policy_register(struct blkio_policy_type
*blkiop
)
1554 struct request_queue
*q
;
1556 blkcg_bypass_start();
1557 spin_lock(&blkio_list_lock
);
1559 BUG_ON(blkio_policy
[blkiop
->plid
]);
1560 blkio_policy
[blkiop
->plid
] = blkiop
;
1561 list_add_tail(&blkiop
->list
, &blkio_list
);
1563 spin_unlock(&blkio_list_lock
);
1564 list_for_each_entry(q
, &all_q_list
, all_q_node
)
1565 update_root_blkg_pd(q
, blkiop
->plid
);
1568 EXPORT_SYMBOL_GPL(blkio_policy_register
);
1570 void blkio_policy_unregister(struct blkio_policy_type
*blkiop
)
1572 struct request_queue
*q
;
1574 blkcg_bypass_start();
1575 spin_lock(&blkio_list_lock
);
1577 BUG_ON(blkio_policy
[blkiop
->plid
] != blkiop
);
1578 blkio_policy
[blkiop
->plid
] = NULL
;
1579 list_del_init(&blkiop
->list
);
1581 spin_unlock(&blkio_list_lock
);
1582 list_for_each_entry(q
, &all_q_list
, all_q_node
)
1583 update_root_blkg_pd(q
, blkiop
->plid
);
1586 EXPORT_SYMBOL_GPL(blkio_policy_unregister
);