Staging: hv: Eliminate vmbus_event_dpc()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / block / blk-throttle.c
bloba89043a3caa416bd59f9a24486698e8d5ce30e1c
1 /*
2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
14 /* Max dispatch from a group in 1 round */
15 static int throtl_grp_quantum = 8;
17 /* Total max dispatch from all groups in one round */
18 static int throtl_quantum = 32;
20 /* Throttling is performed over 100ms slice and after that slice is renewed */
21 static unsigned long throtl_slice = HZ/10; /* 100 ms */
23 struct throtl_rb_root {
24 struct rb_root rb;
25 struct rb_node *left;
26 unsigned int count;
27 unsigned long min_disptime;
30 #define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
31 .count = 0, .min_disptime = 0}
33 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
35 struct throtl_grp {
36 /* List of throtl groups on the request queue*/
37 struct hlist_node tg_node;
39 /* active throtl group service_tree member */
40 struct rb_node rb_node;
43 * Dispatch time in jiffies. This is the estimated time when group
44 * will unthrottle and is ready to dispatch more bio. It is used as
45 * key to sort active groups in service tree.
47 unsigned long disptime;
49 struct blkio_group blkg;
50 atomic_t ref;
51 unsigned int flags;
53 /* Two lists for READ and WRITE */
54 struct bio_list bio_lists[2];
56 /* Number of queued bios on READ and WRITE lists */
57 unsigned int nr_queued[2];
59 /* bytes per second rate limits */
60 uint64_t bps[2];
62 /* IOPS limits */
63 unsigned int iops[2];
65 /* Number of bytes disptached in current slice */
66 uint64_t bytes_disp[2];
67 /* Number of bio's dispatched in current slice */
68 unsigned int io_disp[2];
70 /* When did we start a new slice */
71 unsigned long slice_start[2];
72 unsigned long slice_end[2];
74 /* Some throttle limits got updated for the group */
75 bool limits_changed;
78 struct throtl_data
80 /* List of throtl groups */
81 struct hlist_head tg_list;
83 /* service tree for active throtl groups */
84 struct throtl_rb_root tg_service_tree;
86 struct throtl_grp root_tg;
87 struct request_queue *queue;
89 /* Total Number of queued bios on READ and WRITE lists */
90 unsigned int nr_queued[2];
93 * number of total undestroyed groups
95 unsigned int nr_undestroyed_grps;
97 /* Work for dispatching throttled bios */
98 struct delayed_work throtl_work;
100 atomic_t limits_changed;
103 enum tg_state_flags {
104 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
107 #define THROTL_TG_FNS(name) \
108 static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
110 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
112 static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
114 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
116 static inline int throtl_tg_##name(const struct throtl_grp *tg) \
118 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
121 THROTL_TG_FNS(on_rr);
123 #define throtl_log_tg(td, tg, fmt, args...) \
124 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
125 blkg_path(&(tg)->blkg), ##args); \
127 #define throtl_log(td, fmt, args...) \
128 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
130 static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
132 if (blkg)
133 return container_of(blkg, struct throtl_grp, blkg);
135 return NULL;
138 static inline int total_nr_queued(struct throtl_data *td)
140 return (td->nr_queued[0] + td->nr_queued[1]);
143 static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
145 atomic_inc(&tg->ref);
146 return tg;
149 static void throtl_put_tg(struct throtl_grp *tg)
151 BUG_ON(atomic_read(&tg->ref) <= 0);
152 if (!atomic_dec_and_test(&tg->ref))
153 return;
154 kfree(tg);
157 static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
158 struct cgroup *cgroup)
160 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
161 struct throtl_grp *tg = NULL;
162 void *key = td;
163 struct backing_dev_info *bdi = &td->queue->backing_dev_info;
164 unsigned int major, minor;
167 * TODO: Speed up blkiocg_lookup_group() by maintaining a radix
168 * tree of blkg (instead of traversing through hash list all
169 * the time.
173 * This is the common case when there are no blkio cgroups.
174 * Avoid lookup in this case
176 if (blkcg == &blkio_root_cgroup)
177 tg = &td->root_tg;
178 else
179 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
181 /* Fill in device details for root group */
182 if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
183 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
184 tg->blkg.dev = MKDEV(major, minor);
185 goto done;
188 if (tg)
189 goto done;
191 tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
192 if (!tg)
193 goto done;
195 INIT_HLIST_NODE(&tg->tg_node);
196 RB_CLEAR_NODE(&tg->rb_node);
197 bio_list_init(&tg->bio_lists[0]);
198 bio_list_init(&tg->bio_lists[1]);
201 * Take the initial reference that will be released on destroy
202 * This can be thought of a joint reference by cgroup and
203 * request queue which will be dropped by either request queue
204 * exit or cgroup deletion path depending on who is exiting first.
206 atomic_set(&tg->ref, 1);
208 /* Add group onto cgroup list */
209 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
210 blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
211 MKDEV(major, minor), BLKIO_POLICY_THROTL);
213 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
214 tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
215 tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
216 tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
218 hlist_add_head(&tg->tg_node, &td->tg_list);
219 td->nr_undestroyed_grps++;
220 done:
221 return tg;
224 static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
226 struct cgroup *cgroup;
227 struct throtl_grp *tg = NULL;
229 rcu_read_lock();
230 cgroup = task_cgroup(current, blkio_subsys_id);
231 tg = throtl_find_alloc_tg(td, cgroup);
232 if (!tg)
233 tg = &td->root_tg;
234 rcu_read_unlock();
235 return tg;
238 static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
240 /* Service tree is empty */
241 if (!root->count)
242 return NULL;
244 if (!root->left)
245 root->left = rb_first(&root->rb);
247 if (root->left)
248 return rb_entry_tg(root->left);
250 return NULL;
253 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
255 rb_erase(n, root);
256 RB_CLEAR_NODE(n);
259 static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
261 if (root->left == n)
262 root->left = NULL;
263 rb_erase_init(n, &root->rb);
264 --root->count;
267 static void update_min_dispatch_time(struct throtl_rb_root *st)
269 struct throtl_grp *tg;
271 tg = throtl_rb_first(st);
272 if (!tg)
273 return;
275 st->min_disptime = tg->disptime;
278 static void
279 tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
281 struct rb_node **node = &st->rb.rb_node;
282 struct rb_node *parent = NULL;
283 struct throtl_grp *__tg;
284 unsigned long key = tg->disptime;
285 int left = 1;
287 while (*node != NULL) {
288 parent = *node;
289 __tg = rb_entry_tg(parent);
291 if (time_before(key, __tg->disptime))
292 node = &parent->rb_left;
293 else {
294 node = &parent->rb_right;
295 left = 0;
299 if (left)
300 st->left = &tg->rb_node;
302 rb_link_node(&tg->rb_node, parent, node);
303 rb_insert_color(&tg->rb_node, &st->rb);
306 static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
308 struct throtl_rb_root *st = &td->tg_service_tree;
310 tg_service_tree_add(st, tg);
311 throtl_mark_tg_on_rr(tg);
312 st->count++;
315 static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
317 if (!throtl_tg_on_rr(tg))
318 __throtl_enqueue_tg(td, tg);
321 static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
323 throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
324 throtl_clear_tg_on_rr(tg);
327 static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
329 if (throtl_tg_on_rr(tg))
330 __throtl_dequeue_tg(td, tg);
333 static void throtl_schedule_next_dispatch(struct throtl_data *td)
335 struct throtl_rb_root *st = &td->tg_service_tree;
338 * If there are more bios pending, schedule more work.
340 if (!total_nr_queued(td))
341 return;
343 BUG_ON(!st->count);
345 update_min_dispatch_time(st);
347 if (time_before_eq(st->min_disptime, jiffies))
348 throtl_schedule_delayed_work(td->queue, 0);
349 else
350 throtl_schedule_delayed_work(td->queue,
351 (st->min_disptime - jiffies));
354 static inline void
355 throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
357 tg->bytes_disp[rw] = 0;
358 tg->io_disp[rw] = 0;
359 tg->slice_start[rw] = jiffies;
360 tg->slice_end[rw] = jiffies + throtl_slice;
361 throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
362 rw == READ ? 'R' : 'W', tg->slice_start[rw],
363 tg->slice_end[rw], jiffies);
366 static inline void throtl_set_slice_end(struct throtl_data *td,
367 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
369 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
372 static inline void throtl_extend_slice(struct throtl_data *td,
373 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
375 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
376 throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
377 rw == READ ? 'R' : 'W', tg->slice_start[rw],
378 tg->slice_end[rw], jiffies);
381 /* Determine if previously allocated or extended slice is complete or not */
382 static bool
383 throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
385 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
386 return 0;
388 return 1;
391 /* Trim the used slices and adjust slice start accordingly */
392 static inline void
393 throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
395 unsigned long nr_slices, time_elapsed, io_trim;
396 u64 bytes_trim, tmp;
398 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
401 * If bps are unlimited (-1), then time slice don't get
402 * renewed. Don't try to trim the slice if slice is used. A new
403 * slice will start when appropriate.
405 if (throtl_slice_used(td, tg, rw))
406 return;
409 * A bio has been dispatched. Also adjust slice_end. It might happen
410 * that initially cgroup limit was very low resulting in high
411 * slice_end, but later limit was bumped up and bio was dispached
412 * sooner, then we need to reduce slice_end. A high bogus slice_end
413 * is bad because it does not allow new slice to start.
416 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
418 time_elapsed = jiffies - tg->slice_start[rw];
420 nr_slices = time_elapsed / throtl_slice;
422 if (!nr_slices)
423 return;
424 tmp = tg->bps[rw] * throtl_slice * nr_slices;
425 do_div(tmp, HZ);
426 bytes_trim = tmp;
428 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
430 if (!bytes_trim && !io_trim)
431 return;
433 if (tg->bytes_disp[rw] >= bytes_trim)
434 tg->bytes_disp[rw] -= bytes_trim;
435 else
436 tg->bytes_disp[rw] = 0;
438 if (tg->io_disp[rw] >= io_trim)
439 tg->io_disp[rw] -= io_trim;
440 else
441 tg->io_disp[rw] = 0;
443 tg->slice_start[rw] += nr_slices * throtl_slice;
445 throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
446 " start=%lu end=%lu jiffies=%lu",
447 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
448 tg->slice_start[rw], tg->slice_end[rw], jiffies);
451 static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
452 struct bio *bio, unsigned long *wait)
454 bool rw = bio_data_dir(bio);
455 unsigned int io_allowed;
456 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
457 u64 tmp;
459 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
461 /* Slice has just started. Consider one slice interval */
462 if (!jiffy_elapsed)
463 jiffy_elapsed_rnd = throtl_slice;
465 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
468 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
469 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
470 * will allow dispatch after 1 second and after that slice should
471 * have been trimmed.
474 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
475 do_div(tmp, HZ);
477 if (tmp > UINT_MAX)
478 io_allowed = UINT_MAX;
479 else
480 io_allowed = tmp;
482 if (tg->io_disp[rw] + 1 <= io_allowed) {
483 if (wait)
484 *wait = 0;
485 return 1;
488 /* Calc approx time to dispatch */
489 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
491 if (jiffy_wait > jiffy_elapsed)
492 jiffy_wait = jiffy_wait - jiffy_elapsed;
493 else
494 jiffy_wait = 1;
496 if (wait)
497 *wait = jiffy_wait;
498 return 0;
501 static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
502 struct bio *bio, unsigned long *wait)
504 bool rw = bio_data_dir(bio);
505 u64 bytes_allowed, extra_bytes, tmp;
506 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
508 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
510 /* Slice has just started. Consider one slice interval */
511 if (!jiffy_elapsed)
512 jiffy_elapsed_rnd = throtl_slice;
514 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
516 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
517 do_div(tmp, HZ);
518 bytes_allowed = tmp;
520 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
521 if (wait)
522 *wait = 0;
523 return 1;
526 /* Calc approx time to dispatch */
527 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
528 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
530 if (!jiffy_wait)
531 jiffy_wait = 1;
534 * This wait time is without taking into consideration the rounding
535 * up we did. Add that time also.
537 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
538 if (wait)
539 *wait = jiffy_wait;
540 return 0;
544 * Returns whether one can dispatch a bio or not. Also returns approx number
545 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
547 static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
548 struct bio *bio, unsigned long *wait)
550 bool rw = bio_data_dir(bio);
551 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
554 * Currently whole state machine of group depends on first bio
555 * queued in the group bio list. So one should not be calling
556 * this function with a different bio if there are other bios
557 * queued.
559 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
561 /* If tg->bps = -1, then BW is unlimited */
562 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
563 if (wait)
564 *wait = 0;
565 return 1;
569 * If previous slice expired, start a new one otherwise renew/extend
570 * existing slice to make sure it is at least throtl_slice interval
571 * long since now.
573 if (throtl_slice_used(td, tg, rw))
574 throtl_start_new_slice(td, tg, rw);
575 else {
576 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
577 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
580 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
581 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
582 if (wait)
583 *wait = 0;
584 return 1;
587 max_wait = max(bps_wait, iops_wait);
589 if (wait)
590 *wait = max_wait;
592 if (time_before(tg->slice_end[rw], jiffies + max_wait))
593 throtl_extend_slice(td, tg, rw, jiffies + max_wait);
595 return 0;
598 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
600 bool rw = bio_data_dir(bio);
601 bool sync = bio->bi_rw & REQ_SYNC;
603 /* Charge the bio to the group */
604 tg->bytes_disp[rw] += bio->bi_size;
605 tg->io_disp[rw]++;
608 * TODO: This will take blkg->stats_lock. Figure out a way
609 * to avoid this cost.
611 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
614 static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
615 struct bio *bio)
617 bool rw = bio_data_dir(bio);
619 bio_list_add(&tg->bio_lists[rw], bio);
620 /* Take a bio reference on tg */
621 throtl_ref_get_tg(tg);
622 tg->nr_queued[rw]++;
623 td->nr_queued[rw]++;
624 throtl_enqueue_tg(td, tg);
627 static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
629 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
630 struct bio *bio;
632 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
633 tg_may_dispatch(td, tg, bio, &read_wait);
635 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
636 tg_may_dispatch(td, tg, bio, &write_wait);
638 min_wait = min(read_wait, write_wait);
639 disptime = jiffies + min_wait;
641 /* Update dispatch time */
642 throtl_dequeue_tg(td, tg);
643 tg->disptime = disptime;
644 throtl_enqueue_tg(td, tg);
647 static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
648 bool rw, struct bio_list *bl)
650 struct bio *bio;
652 bio = bio_list_pop(&tg->bio_lists[rw]);
653 tg->nr_queued[rw]--;
654 /* Drop bio reference on tg */
655 throtl_put_tg(tg);
657 BUG_ON(td->nr_queued[rw] <= 0);
658 td->nr_queued[rw]--;
660 throtl_charge_bio(tg, bio);
661 bio_list_add(bl, bio);
662 bio->bi_rw |= REQ_THROTTLED;
664 throtl_trim_slice(td, tg, rw);
667 static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
668 struct bio_list *bl)
670 unsigned int nr_reads = 0, nr_writes = 0;
671 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
672 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
673 struct bio *bio;
675 /* Try to dispatch 75% READS and 25% WRITES */
677 while ((bio = bio_list_peek(&tg->bio_lists[READ]))
678 && tg_may_dispatch(td, tg, bio, NULL)) {
680 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
681 nr_reads++;
683 if (nr_reads >= max_nr_reads)
684 break;
687 while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
688 && tg_may_dispatch(td, tg, bio, NULL)) {
690 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
691 nr_writes++;
693 if (nr_writes >= max_nr_writes)
694 break;
697 return nr_reads + nr_writes;
700 static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
702 unsigned int nr_disp = 0;
703 struct throtl_grp *tg;
704 struct throtl_rb_root *st = &td->tg_service_tree;
706 while (1) {
707 tg = throtl_rb_first(st);
709 if (!tg)
710 break;
712 if (time_before(jiffies, tg->disptime))
713 break;
715 throtl_dequeue_tg(td, tg);
717 nr_disp += throtl_dispatch_tg(td, tg, bl);
719 if (tg->nr_queued[0] || tg->nr_queued[1]) {
720 tg_update_disptime(td, tg);
721 throtl_enqueue_tg(td, tg);
724 if (nr_disp >= throtl_quantum)
725 break;
728 return nr_disp;
731 static void throtl_process_limit_change(struct throtl_data *td)
733 struct throtl_grp *tg;
734 struct hlist_node *pos, *n;
736 if (!atomic_read(&td->limits_changed))
737 return;
739 throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed));
742 * Make sure updates from throtl_update_blkio_group_read_bps() group
743 * of functions to tg->limits_changed are visible. We do not
744 * want update td->limits_changed to be visible but update to
745 * tg->limits_changed not being visible yet on this cpu. Hence
746 * the read barrier.
748 smp_rmb();
750 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
751 if (throtl_tg_on_rr(tg) && tg->limits_changed) {
752 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
753 " riops=%u wiops=%u", tg->bps[READ],
754 tg->bps[WRITE], tg->iops[READ],
755 tg->iops[WRITE]);
756 tg_update_disptime(td, tg);
757 tg->limits_changed = false;
761 smp_mb__before_atomic_dec();
762 atomic_dec(&td->limits_changed);
763 smp_mb__after_atomic_dec();
766 /* Dispatch throttled bios. Should be called without queue lock held. */
767 static int throtl_dispatch(struct request_queue *q)
769 struct throtl_data *td = q->td;
770 unsigned int nr_disp = 0;
771 struct bio_list bio_list_on_stack;
772 struct bio *bio;
774 spin_lock_irq(q->queue_lock);
776 throtl_process_limit_change(td);
778 if (!total_nr_queued(td))
779 goto out;
781 bio_list_init(&bio_list_on_stack);
783 throtl_log(td, "dispatch nr_queued=%lu read=%u write=%u",
784 total_nr_queued(td), td->nr_queued[READ],
785 td->nr_queued[WRITE]);
787 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
789 if (nr_disp)
790 throtl_log(td, "bios disp=%u", nr_disp);
792 throtl_schedule_next_dispatch(td);
793 out:
794 spin_unlock_irq(q->queue_lock);
797 * If we dispatched some requests, unplug the queue to make sure
798 * immediate dispatch
800 if (nr_disp) {
801 while((bio = bio_list_pop(&bio_list_on_stack)))
802 generic_make_request(bio);
803 blk_unplug(q);
805 return nr_disp;
808 void blk_throtl_work(struct work_struct *work)
810 struct throtl_data *td = container_of(work, struct throtl_data,
811 throtl_work.work);
812 struct request_queue *q = td->queue;
814 throtl_dispatch(q);
817 /* Call with queue lock held */
818 void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
821 struct throtl_data *td = q->td;
822 struct delayed_work *dwork = &td->throtl_work;
824 if (total_nr_queued(td) > 0) {
826 * We might have a work scheduled to be executed in future.
827 * Cancel that and schedule a new one.
829 __cancel_delayed_work(dwork);
830 kblockd_schedule_delayed_work(q, dwork, delay);
831 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
832 delay, jiffies);
835 EXPORT_SYMBOL(throtl_schedule_delayed_work);
837 static void
838 throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
840 /* Something wrong if we are trying to remove same group twice */
841 BUG_ON(hlist_unhashed(&tg->tg_node));
843 hlist_del_init(&tg->tg_node);
846 * Put the reference taken at the time of creation so that when all
847 * queues are gone, group can be destroyed.
849 throtl_put_tg(tg);
850 td->nr_undestroyed_grps--;
853 static void throtl_release_tgs(struct throtl_data *td)
855 struct hlist_node *pos, *n;
856 struct throtl_grp *tg;
858 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
860 * If cgroup removal path got to blk_group first and removed
861 * it from cgroup list, then it will take care of destroying
862 * cfqg also.
864 if (!blkiocg_del_blkio_group(&tg->blkg))
865 throtl_destroy_tg(td, tg);
869 static void throtl_td_free(struct throtl_data *td)
871 kfree(td);
875 * Blk cgroup controller notification saying that blkio_group object is being
876 * delinked as associated cgroup object is going away. That also means that
877 * no new IO will come in this group. So get rid of this group as soon as
878 * any pending IO in the group is finished.
880 * This function is called under rcu_read_lock(). key is the rcu protected
881 * pointer. That means "key" is a valid throtl_data pointer as long as we are
882 * rcu read lock.
884 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
885 * it should not be NULL as even if queue was going away, cgroup deltion
886 * path got to it first.
888 void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
890 unsigned long flags;
891 struct throtl_data *td = key;
893 spin_lock_irqsave(td->queue->queue_lock, flags);
894 throtl_destroy_tg(td, tg_of_blkg(blkg));
895 spin_unlock_irqrestore(td->queue->queue_lock, flags);
899 * For all update functions, key should be a valid pointer because these
900 * update functions are called under blkcg_lock, that means, blkg is
901 * valid and in turn key is valid. queue exit path can not race becuase
902 * of blkcg_lock
904 * Can not take queue lock in update functions as queue lock under blkcg_lock
905 * is not allowed. Under other paths we take blkcg_lock under queue_lock.
907 static void throtl_update_blkio_group_read_bps(void *key,
908 struct blkio_group *blkg, u64 read_bps)
910 struct throtl_data *td = key;
912 tg_of_blkg(blkg)->bps[READ] = read_bps;
913 /* Make sure read_bps is updated before setting limits_changed */
914 smp_wmb();
915 tg_of_blkg(blkg)->limits_changed = true;
917 /* Make sure tg->limits_changed is updated before td->limits_changed */
918 smp_mb__before_atomic_inc();
919 atomic_inc(&td->limits_changed);
920 smp_mb__after_atomic_inc();
922 /* Schedule a work now to process the limit change */
923 throtl_schedule_delayed_work(td->queue, 0);
926 static void throtl_update_blkio_group_write_bps(void *key,
927 struct blkio_group *blkg, u64 write_bps)
929 struct throtl_data *td = key;
931 tg_of_blkg(blkg)->bps[WRITE] = write_bps;
932 smp_wmb();
933 tg_of_blkg(blkg)->limits_changed = true;
934 smp_mb__before_atomic_inc();
935 atomic_inc(&td->limits_changed);
936 smp_mb__after_atomic_inc();
937 throtl_schedule_delayed_work(td->queue, 0);
940 static void throtl_update_blkio_group_read_iops(void *key,
941 struct blkio_group *blkg, unsigned int read_iops)
943 struct throtl_data *td = key;
945 tg_of_blkg(blkg)->iops[READ] = read_iops;
946 smp_wmb();
947 tg_of_blkg(blkg)->limits_changed = true;
948 smp_mb__before_atomic_inc();
949 atomic_inc(&td->limits_changed);
950 smp_mb__after_atomic_inc();
951 throtl_schedule_delayed_work(td->queue, 0);
954 static void throtl_update_blkio_group_write_iops(void *key,
955 struct blkio_group *blkg, unsigned int write_iops)
957 struct throtl_data *td = key;
959 tg_of_blkg(blkg)->iops[WRITE] = write_iops;
960 smp_wmb();
961 tg_of_blkg(blkg)->limits_changed = true;
962 smp_mb__before_atomic_inc();
963 atomic_inc(&td->limits_changed);
964 smp_mb__after_atomic_inc();
965 throtl_schedule_delayed_work(td->queue, 0);
968 void throtl_shutdown_timer_wq(struct request_queue *q)
970 struct throtl_data *td = q->td;
972 cancel_delayed_work_sync(&td->throtl_work);
975 static struct blkio_policy_type blkio_policy_throtl = {
976 .ops = {
977 .blkio_unlink_group_fn = throtl_unlink_blkio_group,
978 .blkio_update_group_read_bps_fn =
979 throtl_update_blkio_group_read_bps,
980 .blkio_update_group_write_bps_fn =
981 throtl_update_blkio_group_write_bps,
982 .blkio_update_group_read_iops_fn =
983 throtl_update_blkio_group_read_iops,
984 .blkio_update_group_write_iops_fn =
985 throtl_update_blkio_group_write_iops,
987 .plid = BLKIO_POLICY_THROTL,
990 int blk_throtl_bio(struct request_queue *q, struct bio **biop)
992 struct throtl_data *td = q->td;
993 struct throtl_grp *tg;
994 struct bio *bio = *biop;
995 bool rw = bio_data_dir(bio), update_disptime = true;
997 if (bio->bi_rw & REQ_THROTTLED) {
998 bio->bi_rw &= ~REQ_THROTTLED;
999 return 0;
1002 spin_lock_irq(q->queue_lock);
1003 tg = throtl_get_tg(td);
1005 if (tg->nr_queued[rw]) {
1007 * There is already another bio queued in same dir. No
1008 * need to update dispatch time.
1009 * Still update the disptime if rate limits on this group
1010 * were changed.
1012 if (!tg->limits_changed)
1013 update_disptime = false;
1014 else
1015 tg->limits_changed = false;
1017 goto queue_bio;
1020 /* Bio is with-in rate limit of group */
1021 if (tg_may_dispatch(td, tg, bio, NULL)) {
1022 throtl_charge_bio(tg, bio);
1023 goto out;
1026 queue_bio:
1027 throtl_log_tg(td, tg, "[%c] bio. bdisp=%u sz=%u bps=%llu"
1028 " iodisp=%u iops=%u queued=%d/%d",
1029 rw == READ ? 'R' : 'W',
1030 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1031 tg->io_disp[rw], tg->iops[rw],
1032 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1034 throtl_add_bio_tg(q->td, tg, bio);
1035 *biop = NULL;
1037 if (update_disptime) {
1038 tg_update_disptime(td, tg);
1039 throtl_schedule_next_dispatch(td);
1042 out:
1043 spin_unlock_irq(q->queue_lock);
1044 return 0;
1047 int blk_throtl_init(struct request_queue *q)
1049 struct throtl_data *td;
1050 struct throtl_grp *tg;
1052 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1053 if (!td)
1054 return -ENOMEM;
1056 INIT_HLIST_HEAD(&td->tg_list);
1057 td->tg_service_tree = THROTL_RB_ROOT;
1058 atomic_set(&td->limits_changed, 0);
1060 /* Init root group */
1061 tg = &td->root_tg;
1062 INIT_HLIST_NODE(&tg->tg_node);
1063 RB_CLEAR_NODE(&tg->rb_node);
1064 bio_list_init(&tg->bio_lists[0]);
1065 bio_list_init(&tg->bio_lists[1]);
1067 /* Practically unlimited BW */
1068 tg->bps[0] = tg->bps[1] = -1;
1069 tg->iops[0] = tg->iops[1] = -1;
1072 * Set root group reference to 2. One reference will be dropped when
1073 * all groups on tg_list are being deleted during queue exit. Other
1074 * reference will remain there as we don't want to delete this group
1075 * as it is statically allocated and gets destroyed when throtl_data
1076 * goes away.
1078 atomic_set(&tg->ref, 2);
1079 hlist_add_head(&tg->tg_node, &td->tg_list);
1080 td->nr_undestroyed_grps++;
1082 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
1084 rcu_read_lock();
1085 blkiocg_add_blkio_group(&blkio_root_cgroup, &tg->blkg, (void *)td,
1086 0, BLKIO_POLICY_THROTL);
1087 rcu_read_unlock();
1089 /* Attach throtl data to request queue */
1090 td->queue = q;
1091 q->td = td;
1092 return 0;
1095 void blk_throtl_exit(struct request_queue *q)
1097 struct throtl_data *td = q->td;
1098 bool wait = false;
1100 BUG_ON(!td);
1102 throtl_shutdown_timer_wq(q);
1104 spin_lock_irq(q->queue_lock);
1105 throtl_release_tgs(td);
1107 /* If there are other groups */
1108 if (td->nr_undestroyed_grps > 0)
1109 wait = true;
1111 spin_unlock_irq(q->queue_lock);
1114 * Wait for tg->blkg->key accessors to exit their grace periods.
1115 * Do this wait only if there are other undestroyed groups out
1116 * there (other than root group). This can happen if cgroup deletion
1117 * path claimed the responsibility of cleaning up a group before
1118 * queue cleanup code get to the group.
1120 * Do not call synchronize_rcu() unconditionally as there are drivers
1121 * which create/delete request queue hundreds of times during scan/boot
1122 * and synchronize_rcu() can take significant time and slow down boot.
1124 if (wait)
1125 synchronize_rcu();
1128 * Just being safe to make sure after previous flush if some body did
1129 * update limits through cgroup and another work got queued, cancel
1130 * it.
1132 throtl_shutdown_timer_wq(q);
1133 throtl_td_free(td);
1136 static int __init throtl_init(void)
1138 blkio_policy_register(&blkio_policy_throtl);
1139 return 0;
1142 module_init(throtl_init);