2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include "blk-cgroup.h"
21 static DEFINE_SPINLOCK(blkio_list_lock
);
22 static LIST_HEAD(blkio_list
);
24 struct blkio_cgroup blkio_root_cgroup
= { .weight
= 2*BLKIO_WEIGHT_DEFAULT
};
25 EXPORT_SYMBOL_GPL(blkio_root_cgroup
);
27 static struct cgroup_subsys_state
*blkiocg_create(struct cgroup_subsys
*,
29 static int blkiocg_can_attach(struct cgroup_subsys
*, struct cgroup
*,
30 struct task_struct
*, bool);
31 static void blkiocg_attach(struct cgroup_subsys
*, struct cgroup
*,
32 struct cgroup
*, struct task_struct
*, bool);
33 static void blkiocg_destroy(struct cgroup_subsys
*, struct cgroup
*);
34 static int blkiocg_populate(struct cgroup_subsys
*, struct cgroup
*);
36 struct cgroup_subsys blkio_subsys
= {
38 .create
= blkiocg_create
,
39 .can_attach
= blkiocg_can_attach
,
40 .attach
= blkiocg_attach
,
41 .destroy
= blkiocg_destroy
,
42 .populate
= blkiocg_populate
,
43 #ifdef CONFIG_BLK_CGROUP
44 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
45 .subsys_id
= blkio_subsys_id
,
48 .module
= THIS_MODULE
,
50 EXPORT_SYMBOL_GPL(blkio_subsys
);
52 struct blkio_cgroup
*cgroup_to_blkio_cgroup(struct cgroup
*cgroup
)
54 return container_of(cgroup_subsys_state(cgroup
, blkio_subsys_id
),
55 struct blkio_cgroup
, css
);
57 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup
);
59 void blkiocg_update_blkio_group_stats(struct blkio_group
*blkg
,
60 unsigned long time
, unsigned long sectors
)
63 blkg
->sectors
+= sectors
;
65 EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_stats
);
67 void blkiocg_add_blkio_group(struct blkio_cgroup
*blkcg
,
68 struct blkio_group
*blkg
, void *key
, dev_t dev
)
72 spin_lock_irqsave(&blkcg
->lock
, flags
);
73 rcu_assign_pointer(blkg
->key
, key
);
74 blkg
->blkcg_id
= css_id(&blkcg
->css
);
75 hlist_add_head_rcu(&blkg
->blkcg_node
, &blkcg
->blkg_list
);
76 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
77 #ifdef CONFIG_DEBUG_BLK_CGROUP
78 /* Need to take css reference ? */
79 cgroup_path(blkcg
->css
.cgroup
, blkg
->path
, sizeof(blkg
->path
));
83 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group
);
85 static void __blkiocg_del_blkio_group(struct blkio_group
*blkg
)
87 hlist_del_init_rcu(&blkg
->blkcg_node
);
92 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
93 * indicating that blk_group was unhashed by the time we got to it.
95 int blkiocg_del_blkio_group(struct blkio_group
*blkg
)
97 struct blkio_cgroup
*blkcg
;
99 struct cgroup_subsys_state
*css
;
103 css
= css_lookup(&blkio_subsys
, blkg
->blkcg_id
);
107 blkcg
= container_of(css
, struct blkio_cgroup
, css
);
108 spin_lock_irqsave(&blkcg
->lock
, flags
);
109 if (!hlist_unhashed(&blkg
->blkcg_node
)) {
110 __blkiocg_del_blkio_group(blkg
);
113 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
118 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group
);
120 /* called under rcu_read_lock(). */
121 struct blkio_group
*blkiocg_lookup_group(struct blkio_cgroup
*blkcg
, void *key
)
123 struct blkio_group
*blkg
;
124 struct hlist_node
*n
;
127 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
135 EXPORT_SYMBOL_GPL(blkiocg_lookup_group
);
137 #define SHOW_FUNCTION(__VAR) \
138 static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
139 struct cftype *cftype) \
141 struct blkio_cgroup *blkcg; \
143 blkcg = cgroup_to_blkio_cgroup(cgroup); \
144 return (u64)blkcg->__VAR; \
147 SHOW_FUNCTION(weight
);
151 blkiocg_weight_write(struct cgroup
*cgroup
, struct cftype
*cftype
, u64 val
)
153 struct blkio_cgroup
*blkcg
;
154 struct blkio_group
*blkg
;
155 struct hlist_node
*n
;
156 struct blkio_policy_type
*blkiop
;
158 if (val
< BLKIO_WEIGHT_MIN
|| val
> BLKIO_WEIGHT_MAX
)
161 blkcg
= cgroup_to_blkio_cgroup(cgroup
);
162 spin_lock(&blkio_list_lock
);
163 spin_lock_irq(&blkcg
->lock
);
164 blkcg
->weight
= (unsigned int)val
;
165 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
166 list_for_each_entry(blkiop
, &blkio_list
, list
)
167 blkiop
->ops
.blkio_update_group_weight_fn(blkg
,
170 spin_unlock_irq(&blkcg
->lock
);
171 spin_unlock(&blkio_list_lock
);
175 #define SHOW_FUNCTION_PER_GROUP(__VAR) \
176 static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
177 struct cftype *cftype, struct seq_file *m) \
179 struct blkio_cgroup *blkcg; \
180 struct blkio_group *blkg; \
181 struct hlist_node *n; \
183 if (!cgroup_lock_live_group(cgroup)) \
186 blkcg = cgroup_to_blkio_cgroup(cgroup); \
188 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
190 seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev), \
191 MINOR(blkg->dev), blkg->__VAR); \
198 SHOW_FUNCTION_PER_GROUP(time
);
199 SHOW_FUNCTION_PER_GROUP(sectors
);
200 #ifdef CONFIG_DEBUG_BLK_CGROUP
201 SHOW_FUNCTION_PER_GROUP(dequeue
);
203 #undef SHOW_FUNCTION_PER_GROUP
205 #ifdef CONFIG_DEBUG_BLK_CGROUP
206 void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group
*blkg
,
207 unsigned long dequeue
)
209 blkg
->dequeue
+= dequeue
;
211 EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_dequeue_stats
);
214 struct cftype blkio_files
[] = {
217 .read_u64
= blkiocg_weight_read
,
218 .write_u64
= blkiocg_weight_write
,
222 .read_seq_string
= blkiocg_time_read
,
226 .read_seq_string
= blkiocg_sectors_read
,
228 #ifdef CONFIG_DEBUG_BLK_CGROUP
231 .read_seq_string
= blkiocg_dequeue_read
,
236 static int blkiocg_populate(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
238 return cgroup_add_files(cgroup
, subsys
, blkio_files
,
239 ARRAY_SIZE(blkio_files
));
242 static void blkiocg_destroy(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
244 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
246 struct blkio_group
*blkg
;
248 struct blkio_policy_type
*blkiop
;
252 spin_lock_irqsave(&blkcg
->lock
, flags
);
254 if (hlist_empty(&blkcg
->blkg_list
)) {
255 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
259 blkg
= hlist_entry(blkcg
->blkg_list
.first
, struct blkio_group
,
261 key
= rcu_dereference(blkg
->key
);
262 __blkiocg_del_blkio_group(blkg
);
264 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
267 * This blkio_group is being unlinked as associated cgroup is going
268 * away. Let all the IO controlling policies know about this event.
270 * Currently this is static call to one io controlling policy. Once
271 * we have more policies in place, we need some dynamic registration
272 * of callback function.
274 spin_lock(&blkio_list_lock
);
275 list_for_each_entry(blkiop
, &blkio_list
, list
)
276 blkiop
->ops
.blkio_unlink_group_fn(key
, blkg
);
277 spin_unlock(&blkio_list_lock
);
280 free_css_id(&blkio_subsys
, &blkcg
->css
);
282 if (blkcg
!= &blkio_root_cgroup
)
286 static struct cgroup_subsys_state
*
287 blkiocg_create(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
289 struct blkio_cgroup
*blkcg
, *parent_blkcg
;
291 if (!cgroup
->parent
) {
292 blkcg
= &blkio_root_cgroup
;
296 /* Currently we do not support hierarchy deeper than two level (0,1) */
297 parent_blkcg
= cgroup_to_blkio_cgroup(cgroup
->parent
);
298 if (css_depth(&parent_blkcg
->css
) > 0)
299 return ERR_PTR(-EINVAL
);
301 blkcg
= kzalloc(sizeof(*blkcg
), GFP_KERNEL
);
303 return ERR_PTR(-ENOMEM
);
305 blkcg
->weight
= BLKIO_WEIGHT_DEFAULT
;
307 spin_lock_init(&blkcg
->lock
);
308 INIT_HLIST_HEAD(&blkcg
->blkg_list
);
314 * We cannot support shared io contexts, as we have no mean to support
315 * two tasks with the same ioc in two different groups without major rework
316 * of the main cic data structures. For now we allow a task to change
317 * its cgroup only if it's the only owner of its ioc.
319 static int blkiocg_can_attach(struct cgroup_subsys
*subsys
,
320 struct cgroup
*cgroup
, struct task_struct
*tsk
,
323 struct io_context
*ioc
;
326 /* task_lock() is needed to avoid races with exit_io_context() */
328 ioc
= tsk
->io_context
;
329 if (ioc
&& atomic_read(&ioc
->nr_tasks
) > 1)
336 static void blkiocg_attach(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
,
337 struct cgroup
*prev
, struct task_struct
*tsk
,
340 struct io_context
*ioc
;
343 ioc
= tsk
->io_context
;
345 ioc
->cgroup_changed
= 1;
349 void blkio_policy_register(struct blkio_policy_type
*blkiop
)
351 spin_lock(&blkio_list_lock
);
352 list_add_tail(&blkiop
->list
, &blkio_list
);
353 spin_unlock(&blkio_list_lock
);
355 EXPORT_SYMBOL_GPL(blkio_policy_register
);
357 void blkio_policy_unregister(struct blkio_policy_type
*blkiop
)
359 spin_lock(&blkio_list_lock
);
360 list_del_init(&blkiop
->list
);
361 spin_unlock(&blkio_list_lock
);
363 EXPORT_SYMBOL_GPL(blkio_policy_unregister
);
365 static int __init
init_cgroup_blkio(void)
367 return cgroup_load_subsys(&blkio_subsys
);
370 static void __exit
exit_cgroup_blkio(void)
372 cgroup_unload_subsys(&blkio_subsys
);
375 module_init(init_cgroup_blkio
);
376 module_exit(exit_cgroup_blkio
);
377 MODULE_LICENSE("GPL");