blkio: Export some symbols from blkio as its user CFQ can be a module
[linux-2.6/btrfs-unstable.git] / block / blk-cgroup.c
blob4d4a277b2905de7bef24e8eb126f08df63284702
1 /*
2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include "blk-cgroup.h"
18 #include "cfq-iosched.h"
20 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
21 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
23 bool blkiocg_css_tryget(struct blkio_cgroup *blkcg)
25 if (!css_tryget(&blkcg->css))
26 return false;
27 return true;
29 EXPORT_SYMBOL_GPL(blkiocg_css_tryget);
31 void blkiocg_css_put(struct blkio_cgroup *blkcg)
33 css_put(&blkcg->css);
35 EXPORT_SYMBOL_GPL(blkiocg_css_put);
37 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
39 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
40 struct blkio_cgroup, css);
42 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
44 void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
45 unsigned long time, unsigned long sectors)
47 blkg->time += time;
48 blkg->sectors += sectors;
50 EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_stats);
52 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
53 struct blkio_group *blkg, void *key, dev_t dev)
55 unsigned long flags;
57 spin_lock_irqsave(&blkcg->lock, flags);
58 rcu_assign_pointer(blkg->key, key);
59 blkg->blkcg_id = css_id(&blkcg->css);
60 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
61 spin_unlock_irqrestore(&blkcg->lock, flags);
62 #ifdef CONFIG_DEBUG_BLK_CGROUP
63 /* Need to take css reference ? */
64 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
65 #endif
66 blkg->dev = dev;
68 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
70 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
72 hlist_del_init_rcu(&blkg->blkcg_node);
73 blkg->blkcg_id = 0;
77 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
78 * indicating that blk_group was unhashed by the time we got to it.
80 int blkiocg_del_blkio_group(struct blkio_group *blkg)
82 struct blkio_cgroup *blkcg;
83 unsigned long flags;
84 struct cgroup_subsys_state *css;
85 int ret = 1;
87 rcu_read_lock();
88 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
89 if (!css)
90 goto out;
92 blkcg = container_of(css, struct blkio_cgroup, css);
93 spin_lock_irqsave(&blkcg->lock, flags);
94 if (!hlist_unhashed(&blkg->blkcg_node)) {
95 __blkiocg_del_blkio_group(blkg);
96 ret = 0;
98 spin_unlock_irqrestore(&blkcg->lock, flags);
99 out:
100 rcu_read_unlock();
101 return ret;
103 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
105 /* called under rcu_read_lock(). */
106 struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
108 struct blkio_group *blkg;
109 struct hlist_node *n;
110 void *__key;
112 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
113 __key = blkg->key;
114 if (__key == key)
115 return blkg;
118 return NULL;
120 EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
122 #define SHOW_FUNCTION(__VAR) \
123 static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
124 struct cftype *cftype) \
126 struct blkio_cgroup *blkcg; \
128 blkcg = cgroup_to_blkio_cgroup(cgroup); \
129 return (u64)blkcg->__VAR; \
132 SHOW_FUNCTION(weight);
133 #undef SHOW_FUNCTION
135 static int
136 blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
138 struct blkio_cgroup *blkcg;
139 struct blkio_group *blkg;
140 struct hlist_node *n;
142 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
143 return -EINVAL;
145 blkcg = cgroup_to_blkio_cgroup(cgroup);
146 spin_lock_irq(&blkcg->lock);
147 blkcg->weight = (unsigned int)val;
148 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
149 cfq_update_blkio_group_weight(blkg, blkcg->weight);
150 spin_unlock_irq(&blkcg->lock);
151 return 0;
154 #define SHOW_FUNCTION_PER_GROUP(__VAR) \
155 static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
156 struct cftype *cftype, struct seq_file *m) \
158 struct blkio_cgroup *blkcg; \
159 struct blkio_group *blkg; \
160 struct hlist_node *n; \
162 if (!cgroup_lock_live_group(cgroup)) \
163 return -ENODEV; \
165 blkcg = cgroup_to_blkio_cgroup(cgroup); \
166 rcu_read_lock(); \
167 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
168 if (blkg->dev) \
169 seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev), \
170 MINOR(blkg->dev), blkg->__VAR); \
172 rcu_read_unlock(); \
173 cgroup_unlock(); \
174 return 0; \
177 SHOW_FUNCTION_PER_GROUP(time);
178 SHOW_FUNCTION_PER_GROUP(sectors);
179 #ifdef CONFIG_DEBUG_BLK_CGROUP
180 SHOW_FUNCTION_PER_GROUP(dequeue);
181 #endif
182 #undef SHOW_FUNCTION_PER_GROUP
184 #ifdef CONFIG_DEBUG_BLK_CGROUP
185 void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg,
186 unsigned long dequeue)
188 blkg->dequeue += dequeue;
190 EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_dequeue_stats);
191 #endif
193 struct cftype blkio_files[] = {
195 .name = "weight",
196 .read_u64 = blkiocg_weight_read,
197 .write_u64 = blkiocg_weight_write,
200 .name = "time",
201 .read_seq_string = blkiocg_time_read,
204 .name = "sectors",
205 .read_seq_string = blkiocg_sectors_read,
207 #ifdef CONFIG_DEBUG_BLK_CGROUP
209 .name = "dequeue",
210 .read_seq_string = blkiocg_dequeue_read,
212 #endif
215 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
217 return cgroup_add_files(cgroup, subsys, blkio_files,
218 ARRAY_SIZE(blkio_files));
221 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
223 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
224 unsigned long flags;
225 struct blkio_group *blkg;
226 void *key;
228 rcu_read_lock();
229 remove_entry:
230 spin_lock_irqsave(&blkcg->lock, flags);
232 if (hlist_empty(&blkcg->blkg_list)) {
233 spin_unlock_irqrestore(&blkcg->lock, flags);
234 goto done;
237 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
238 blkcg_node);
239 key = rcu_dereference(blkg->key);
240 __blkiocg_del_blkio_group(blkg);
242 spin_unlock_irqrestore(&blkcg->lock, flags);
245 * This blkio_group is being unlinked as associated cgroup is going
246 * away. Let all the IO controlling policies know about this event.
248 * Currently this is static call to one io controlling policy. Once
249 * we have more policies in place, we need some dynamic registration
250 * of callback function.
252 cfq_unlink_blkio_group(key, blkg);
253 goto remove_entry;
254 done:
255 free_css_id(&blkio_subsys, &blkcg->css);
256 rcu_read_unlock();
257 kfree(blkcg);
260 static struct cgroup_subsys_state *
261 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
263 struct blkio_cgroup *blkcg, *parent_blkcg;
265 if (!cgroup->parent) {
266 blkcg = &blkio_root_cgroup;
267 goto done;
270 /* Currently we do not support hierarchy deeper than two level (0,1) */
271 parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
272 if (css_depth(&parent_blkcg->css) > 0)
273 return ERR_PTR(-EINVAL);
275 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
276 if (!blkcg)
277 return ERR_PTR(-ENOMEM);
279 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
280 done:
281 spin_lock_init(&blkcg->lock);
282 INIT_HLIST_HEAD(&blkcg->blkg_list);
284 return &blkcg->css;
288 * We cannot support shared io contexts, as we have no mean to support
289 * two tasks with the same ioc in two different groups without major rework
290 * of the main cic data structures. For now we allow a task to change
291 * its cgroup only if it's the only owner of its ioc.
293 static int blkiocg_can_attach(struct cgroup_subsys *subsys,
294 struct cgroup *cgroup, struct task_struct *tsk,
295 bool threadgroup)
297 struct io_context *ioc;
298 int ret = 0;
300 /* task_lock() is needed to avoid races with exit_io_context() */
301 task_lock(tsk);
302 ioc = tsk->io_context;
303 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
304 ret = -EINVAL;
305 task_unlock(tsk);
307 return ret;
310 static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
311 struct cgroup *prev, struct task_struct *tsk,
312 bool threadgroup)
314 struct io_context *ioc;
316 task_lock(tsk);
317 ioc = tsk->io_context;
318 if (ioc)
319 ioc->cgroup_changed = 1;
320 task_unlock(tsk);
323 struct cgroup_subsys blkio_subsys = {
324 .name = "blkio",
325 .create = blkiocg_create,
326 .can_attach = blkiocg_can_attach,
327 .attach = blkiocg_attach,
328 .destroy = blkiocg_destroy,
329 .populate = blkiocg_populate,
330 .subsys_id = blkio_subsys_id,
331 .use_id = 1,