2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum
= 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum
= 32;
21 /* Throttling is performed over 100ms slice and after that slice is renewed */
22 static unsigned long throtl_slice
= HZ
/10; /* 100 ms */
24 static struct blkio_policy_type blkio_policy_throtl
;
26 /* A workqueue to queue throttle related work */
27 static struct workqueue_struct
*kthrotld_workqueue
;
28 static void throtl_schedule_delayed_work(struct throtl_data
*td
,
31 struct throtl_rb_root
{
35 unsigned long min_disptime
;
38 #define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
39 .count = 0, .min_disptime = 0}
41 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
43 /* Per-cpu group stats */
45 /* total bytes transferred */
46 struct blkg_rwstat service_bytes
;
47 /* total IOs serviced, post merge */
48 struct blkg_rwstat serviced
;
52 /* active throtl group service_tree member */
53 struct rb_node rb_node
;
56 * Dispatch time in jiffies. This is the estimated time when group
57 * will unthrottle and is ready to dispatch more bio. It is used as
58 * key to sort active groups in service tree.
60 unsigned long disptime
;
64 /* Two lists for READ and WRITE */
65 struct bio_list bio_lists
[2];
67 /* Number of queued bios on READ and WRITE lists */
68 unsigned int nr_queued
[2];
70 /* bytes per second rate limits */
76 /* Number of bytes disptached in current slice */
77 uint64_t bytes_disp
[2];
78 /* Number of bio's dispatched in current slice */
79 unsigned int io_disp
[2];
81 /* When did we start a new slice */
82 unsigned long slice_start
[2];
83 unsigned long slice_end
[2];
85 /* Some throttle limits got updated for the group */
88 /* Per cpu stats pointer */
89 struct tg_stats_cpu __percpu
*stats_cpu
;
91 /* List of tgs waiting for per cpu stats memory to be allocated */
92 struct list_head stats_alloc_node
;
97 /* service tree for active throtl groups */
98 struct throtl_rb_root tg_service_tree
;
100 struct throtl_grp
*root_tg
;
101 struct request_queue
*queue
;
103 /* Total Number of queued bios on READ and WRITE lists */
104 unsigned int nr_queued
[2];
107 * number of total undestroyed groups
109 unsigned int nr_undestroyed_grps
;
111 /* Work for dispatching throttled bios */
112 struct delayed_work throtl_work
;
117 /* list and work item to allocate percpu group stats */
118 static DEFINE_SPINLOCK(tg_stats_alloc_lock
);
119 static LIST_HEAD(tg_stats_alloc_list
);
121 static void tg_stats_alloc_fn(struct work_struct
*);
122 static DECLARE_DELAYED_WORK(tg_stats_alloc_work
, tg_stats_alloc_fn
);
124 static inline struct throtl_grp
*blkg_to_tg(struct blkio_group
*blkg
)
126 return blkg_to_pdata(blkg
, &blkio_policy_throtl
);
129 static inline struct blkio_group
*tg_to_blkg(struct throtl_grp
*tg
)
131 return pdata_to_blkg(tg
);
134 enum tg_state_flags
{
135 THROTL_TG_FLAG_on_rr
= 0, /* on round-robin busy list */
138 #define THROTL_TG_FNS(name) \
139 static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
141 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
143 static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
145 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
147 static inline int throtl_tg_##name(const struct throtl_grp *tg) \
149 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
152 THROTL_TG_FNS(on_rr
);
154 #define throtl_log_tg(td, tg, fmt, args...) \
155 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
156 blkg_path(tg_to_blkg(tg)), ##args); \
158 #define throtl_log(td, fmt, args...) \
159 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
161 static inline unsigned int total_nr_queued(struct throtl_data
*td
)
163 return td
->nr_queued
[0] + td
->nr_queued
[1];
167 * Worker for allocating per cpu stat for tgs. This is scheduled on the
168 * system_nrt_wq once there are some groups on the alloc_list waiting for
171 static void tg_stats_alloc_fn(struct work_struct
*work
)
173 static struct tg_stats_cpu
*stats_cpu
; /* this fn is non-reentrant */
174 struct delayed_work
*dwork
= to_delayed_work(work
);
179 stats_cpu
= alloc_percpu(struct tg_stats_cpu
);
181 /* allocation failed, try again after some time */
182 queue_delayed_work(system_nrt_wq
, dwork
,
183 msecs_to_jiffies(10));
188 spin_lock_irq(&tg_stats_alloc_lock
);
190 if (!list_empty(&tg_stats_alloc_list
)) {
191 struct throtl_grp
*tg
= list_first_entry(&tg_stats_alloc_list
,
194 swap(tg
->stats_cpu
, stats_cpu
);
195 list_del_init(&tg
->stats_alloc_node
);
198 empty
= list_empty(&tg_stats_alloc_list
);
199 spin_unlock_irq(&tg_stats_alloc_lock
);
204 static void throtl_init_blkio_group(struct blkio_group
*blkg
)
206 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
208 RB_CLEAR_NODE(&tg
->rb_node
);
209 bio_list_init(&tg
->bio_lists
[0]);
210 bio_list_init(&tg
->bio_lists
[1]);
211 tg
->limits_changed
= false;
216 tg
->iops
[WRITE
] = -1;
219 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
220 * but percpu allocator can't be called from IO path. Queue tg on
221 * tg_stats_alloc_list and allocate from work item.
223 spin_lock(&tg_stats_alloc_lock
);
224 list_add(&tg
->stats_alloc_node
, &tg_stats_alloc_list
);
225 queue_delayed_work(system_nrt_wq
, &tg_stats_alloc_work
, 0);
226 spin_unlock(&tg_stats_alloc_lock
);
229 static void throtl_exit_blkio_group(struct blkio_group
*blkg
)
231 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
233 spin_lock(&tg_stats_alloc_lock
);
234 list_del_init(&tg
->stats_alloc_node
);
235 spin_unlock(&tg_stats_alloc_lock
);
237 free_percpu(tg
->stats_cpu
);
240 static void throtl_reset_group_stats(struct blkio_group
*blkg
)
242 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
245 if (tg
->stats_cpu
== NULL
)
248 for_each_possible_cpu(cpu
) {
249 struct tg_stats_cpu
*sc
= per_cpu_ptr(tg
->stats_cpu
, cpu
);
251 blkg_rwstat_reset(&sc
->service_bytes
);
252 blkg_rwstat_reset(&sc
->serviced
);
257 throtl_grp
*throtl_lookup_tg(struct throtl_data
*td
, struct blkio_cgroup
*blkcg
)
260 * This is the common case when there are no blkio cgroups.
261 * Avoid lookup in this case
263 if (blkcg
== &blkio_root_cgroup
)
266 return blkg_to_tg(blkg_lookup(blkcg
, td
->queue
));
269 static struct throtl_grp
*throtl_lookup_create_tg(struct throtl_data
*td
,
270 struct blkio_cgroup
*blkcg
)
272 struct request_queue
*q
= td
->queue
;
273 struct throtl_grp
*tg
= NULL
;
276 * This is the common case when there are no blkio cgroups.
277 * Avoid lookup in this case
279 if (blkcg
== &blkio_root_cgroup
) {
282 struct blkio_group
*blkg
;
284 blkg
= blkg_lookup_create(blkcg
, q
, false);
286 /* if %NULL and @q is alive, fall back to root_tg */
288 tg
= blkg_to_tg(blkg
);
289 else if (!blk_queue_dead(q
))
296 static struct throtl_grp
*throtl_rb_first(struct throtl_rb_root
*root
)
298 /* Service tree is empty */
303 root
->left
= rb_first(&root
->rb
);
306 return rb_entry_tg(root
->left
);
311 static void rb_erase_init(struct rb_node
*n
, struct rb_root
*root
)
317 static void throtl_rb_erase(struct rb_node
*n
, struct throtl_rb_root
*root
)
321 rb_erase_init(n
, &root
->rb
);
325 static void update_min_dispatch_time(struct throtl_rb_root
*st
)
327 struct throtl_grp
*tg
;
329 tg
= throtl_rb_first(st
);
333 st
->min_disptime
= tg
->disptime
;
337 tg_service_tree_add(struct throtl_rb_root
*st
, struct throtl_grp
*tg
)
339 struct rb_node
**node
= &st
->rb
.rb_node
;
340 struct rb_node
*parent
= NULL
;
341 struct throtl_grp
*__tg
;
342 unsigned long key
= tg
->disptime
;
345 while (*node
!= NULL
) {
347 __tg
= rb_entry_tg(parent
);
349 if (time_before(key
, __tg
->disptime
))
350 node
= &parent
->rb_left
;
352 node
= &parent
->rb_right
;
358 st
->left
= &tg
->rb_node
;
360 rb_link_node(&tg
->rb_node
, parent
, node
);
361 rb_insert_color(&tg
->rb_node
, &st
->rb
);
364 static void __throtl_enqueue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
366 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
368 tg_service_tree_add(st
, tg
);
369 throtl_mark_tg_on_rr(tg
);
373 static void throtl_enqueue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
375 if (!throtl_tg_on_rr(tg
))
376 __throtl_enqueue_tg(td
, tg
);
379 static void __throtl_dequeue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
381 throtl_rb_erase(&tg
->rb_node
, &td
->tg_service_tree
);
382 throtl_clear_tg_on_rr(tg
);
385 static void throtl_dequeue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
387 if (throtl_tg_on_rr(tg
))
388 __throtl_dequeue_tg(td
, tg
);
391 static void throtl_schedule_next_dispatch(struct throtl_data
*td
)
393 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
396 * If there are more bios pending, schedule more work.
398 if (!total_nr_queued(td
))
403 update_min_dispatch_time(st
);
405 if (time_before_eq(st
->min_disptime
, jiffies
))
406 throtl_schedule_delayed_work(td
, 0);
408 throtl_schedule_delayed_work(td
, (st
->min_disptime
- jiffies
));
412 throtl_start_new_slice(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
414 tg
->bytes_disp
[rw
] = 0;
416 tg
->slice_start
[rw
] = jiffies
;
417 tg
->slice_end
[rw
] = jiffies
+ throtl_slice
;
418 throtl_log_tg(td
, tg
, "[%c] new slice start=%lu end=%lu jiffies=%lu",
419 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
420 tg
->slice_end
[rw
], jiffies
);
423 static inline void throtl_set_slice_end(struct throtl_data
*td
,
424 struct throtl_grp
*tg
, bool rw
, unsigned long jiffy_end
)
426 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
429 static inline void throtl_extend_slice(struct throtl_data
*td
,
430 struct throtl_grp
*tg
, bool rw
, unsigned long jiffy_end
)
432 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
433 throtl_log_tg(td
, tg
, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
434 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
435 tg
->slice_end
[rw
], jiffies
);
438 /* Determine if previously allocated or extended slice is complete or not */
440 throtl_slice_used(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
442 if (time_in_range(jiffies
, tg
->slice_start
[rw
], tg
->slice_end
[rw
]))
448 /* Trim the used slices and adjust slice start accordingly */
450 throtl_trim_slice(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
452 unsigned long nr_slices
, time_elapsed
, io_trim
;
455 BUG_ON(time_before(tg
->slice_end
[rw
], tg
->slice_start
[rw
]));
458 * If bps are unlimited (-1), then time slice don't get
459 * renewed. Don't try to trim the slice if slice is used. A new
460 * slice will start when appropriate.
462 if (throtl_slice_used(td
, tg
, rw
))
466 * A bio has been dispatched. Also adjust slice_end. It might happen
467 * that initially cgroup limit was very low resulting in high
468 * slice_end, but later limit was bumped up and bio was dispached
469 * sooner, then we need to reduce slice_end. A high bogus slice_end
470 * is bad because it does not allow new slice to start.
473 throtl_set_slice_end(td
, tg
, rw
, jiffies
+ throtl_slice
);
475 time_elapsed
= jiffies
- tg
->slice_start
[rw
];
477 nr_slices
= time_elapsed
/ throtl_slice
;
481 tmp
= tg
->bps
[rw
] * throtl_slice
* nr_slices
;
485 io_trim
= (tg
->iops
[rw
] * throtl_slice
* nr_slices
)/HZ
;
487 if (!bytes_trim
&& !io_trim
)
490 if (tg
->bytes_disp
[rw
] >= bytes_trim
)
491 tg
->bytes_disp
[rw
] -= bytes_trim
;
493 tg
->bytes_disp
[rw
] = 0;
495 if (tg
->io_disp
[rw
] >= io_trim
)
496 tg
->io_disp
[rw
] -= io_trim
;
500 tg
->slice_start
[rw
] += nr_slices
* throtl_slice
;
502 throtl_log_tg(td
, tg
, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
503 " start=%lu end=%lu jiffies=%lu",
504 rw
== READ
? 'R' : 'W', nr_slices
, bytes_trim
, io_trim
,
505 tg
->slice_start
[rw
], tg
->slice_end
[rw
], jiffies
);
508 static bool tg_with_in_iops_limit(struct throtl_data
*td
, struct throtl_grp
*tg
,
509 struct bio
*bio
, unsigned long *wait
)
511 bool rw
= bio_data_dir(bio
);
512 unsigned int io_allowed
;
513 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
516 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
518 /* Slice has just started. Consider one slice interval */
520 jiffy_elapsed_rnd
= throtl_slice
;
522 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
525 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
526 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
527 * will allow dispatch after 1 second and after that slice should
531 tmp
= (u64
)tg
->iops
[rw
] * jiffy_elapsed_rnd
;
535 io_allowed
= UINT_MAX
;
539 if (tg
->io_disp
[rw
] + 1 <= io_allowed
) {
545 /* Calc approx time to dispatch */
546 jiffy_wait
= ((tg
->io_disp
[rw
] + 1) * HZ
)/tg
->iops
[rw
] + 1;
548 if (jiffy_wait
> jiffy_elapsed
)
549 jiffy_wait
= jiffy_wait
- jiffy_elapsed
;
558 static bool tg_with_in_bps_limit(struct throtl_data
*td
, struct throtl_grp
*tg
,
559 struct bio
*bio
, unsigned long *wait
)
561 bool rw
= bio_data_dir(bio
);
562 u64 bytes_allowed
, extra_bytes
, tmp
;
563 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
565 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
567 /* Slice has just started. Consider one slice interval */
569 jiffy_elapsed_rnd
= throtl_slice
;
571 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
573 tmp
= tg
->bps
[rw
] * jiffy_elapsed_rnd
;
577 if (tg
->bytes_disp
[rw
] + bio
->bi_size
<= bytes_allowed
) {
583 /* Calc approx time to dispatch */
584 extra_bytes
= tg
->bytes_disp
[rw
] + bio
->bi_size
- bytes_allowed
;
585 jiffy_wait
= div64_u64(extra_bytes
* HZ
, tg
->bps
[rw
]);
591 * This wait time is without taking into consideration the rounding
592 * up we did. Add that time also.
594 jiffy_wait
= jiffy_wait
+ (jiffy_elapsed_rnd
- jiffy_elapsed
);
600 static bool tg_no_rule_group(struct throtl_grp
*tg
, bool rw
) {
601 if (tg
->bps
[rw
] == -1 && tg
->iops
[rw
] == -1)
607 * Returns whether one can dispatch a bio or not. Also returns approx number
608 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
610 static bool tg_may_dispatch(struct throtl_data
*td
, struct throtl_grp
*tg
,
611 struct bio
*bio
, unsigned long *wait
)
613 bool rw
= bio_data_dir(bio
);
614 unsigned long bps_wait
= 0, iops_wait
= 0, max_wait
= 0;
617 * Currently whole state machine of group depends on first bio
618 * queued in the group bio list. So one should not be calling
619 * this function with a different bio if there are other bios
622 BUG_ON(tg
->nr_queued
[rw
] && bio
!= bio_list_peek(&tg
->bio_lists
[rw
]));
624 /* If tg->bps = -1, then BW is unlimited */
625 if (tg
->bps
[rw
] == -1 && tg
->iops
[rw
] == -1) {
632 * If previous slice expired, start a new one otherwise renew/extend
633 * existing slice to make sure it is at least throtl_slice interval
636 if (throtl_slice_used(td
, tg
, rw
))
637 throtl_start_new_slice(td
, tg
, rw
);
639 if (time_before(tg
->slice_end
[rw
], jiffies
+ throtl_slice
))
640 throtl_extend_slice(td
, tg
, rw
, jiffies
+ throtl_slice
);
643 if (tg_with_in_bps_limit(td
, tg
, bio
, &bps_wait
)
644 && tg_with_in_iops_limit(td
, tg
, bio
, &iops_wait
)) {
650 max_wait
= max(bps_wait
, iops_wait
);
655 if (time_before(tg
->slice_end
[rw
], jiffies
+ max_wait
))
656 throtl_extend_slice(td
, tg
, rw
, jiffies
+ max_wait
);
661 static void throtl_update_dispatch_stats(struct blkio_group
*blkg
, u64 bytes
,
664 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
665 struct tg_stats_cpu
*stats_cpu
;
668 /* If per cpu stats are not allocated yet, don't do any accounting. */
669 if (tg
->stats_cpu
== NULL
)
673 * Disabling interrupts to provide mutual exclusion between two
674 * writes on same cpu. It probably is not needed for 64bit. Not
675 * optimizing that case yet.
677 local_irq_save(flags
);
679 stats_cpu
= this_cpu_ptr(tg
->stats_cpu
);
681 blkg_rwstat_add(&stats_cpu
->serviced
, rw
, 1);
682 blkg_rwstat_add(&stats_cpu
->service_bytes
, rw
, bytes
);
684 local_irq_restore(flags
);
687 static void throtl_charge_bio(struct throtl_grp
*tg
, struct bio
*bio
)
689 bool rw
= bio_data_dir(bio
);
691 /* Charge the bio to the group */
692 tg
->bytes_disp
[rw
] += bio
->bi_size
;
695 throtl_update_dispatch_stats(tg_to_blkg(tg
), bio
->bi_size
, bio
->bi_rw
);
698 static void throtl_add_bio_tg(struct throtl_data
*td
, struct throtl_grp
*tg
,
701 bool rw
= bio_data_dir(bio
);
703 bio_list_add(&tg
->bio_lists
[rw
], bio
);
704 /* Take a bio reference on tg */
705 blkg_get(tg_to_blkg(tg
));
708 throtl_enqueue_tg(td
, tg
);
711 static void tg_update_disptime(struct throtl_data
*td
, struct throtl_grp
*tg
)
713 unsigned long read_wait
= -1, write_wait
= -1, min_wait
= -1, disptime
;
716 if ((bio
= bio_list_peek(&tg
->bio_lists
[READ
])))
717 tg_may_dispatch(td
, tg
, bio
, &read_wait
);
719 if ((bio
= bio_list_peek(&tg
->bio_lists
[WRITE
])))
720 tg_may_dispatch(td
, tg
, bio
, &write_wait
);
722 min_wait
= min(read_wait
, write_wait
);
723 disptime
= jiffies
+ min_wait
;
725 /* Update dispatch time */
726 throtl_dequeue_tg(td
, tg
);
727 tg
->disptime
= disptime
;
728 throtl_enqueue_tg(td
, tg
);
731 static void tg_dispatch_one_bio(struct throtl_data
*td
, struct throtl_grp
*tg
,
732 bool rw
, struct bio_list
*bl
)
736 bio
= bio_list_pop(&tg
->bio_lists
[rw
]);
738 /* Drop bio reference on blkg */
739 blkg_put(tg_to_blkg(tg
));
741 BUG_ON(td
->nr_queued
[rw
] <= 0);
744 throtl_charge_bio(tg
, bio
);
745 bio_list_add(bl
, bio
);
746 bio
->bi_rw
|= REQ_THROTTLED
;
748 throtl_trim_slice(td
, tg
, rw
);
751 static int throtl_dispatch_tg(struct throtl_data
*td
, struct throtl_grp
*tg
,
754 unsigned int nr_reads
= 0, nr_writes
= 0;
755 unsigned int max_nr_reads
= throtl_grp_quantum
*3/4;
756 unsigned int max_nr_writes
= throtl_grp_quantum
- max_nr_reads
;
759 /* Try to dispatch 75% READS and 25% WRITES */
761 while ((bio
= bio_list_peek(&tg
->bio_lists
[READ
]))
762 && tg_may_dispatch(td
, tg
, bio
, NULL
)) {
764 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), bl
);
767 if (nr_reads
>= max_nr_reads
)
771 while ((bio
= bio_list_peek(&tg
->bio_lists
[WRITE
]))
772 && tg_may_dispatch(td
, tg
, bio
, NULL
)) {
774 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), bl
);
777 if (nr_writes
>= max_nr_writes
)
781 return nr_reads
+ nr_writes
;
784 static int throtl_select_dispatch(struct throtl_data
*td
, struct bio_list
*bl
)
786 unsigned int nr_disp
= 0;
787 struct throtl_grp
*tg
;
788 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
791 tg
= throtl_rb_first(st
);
796 if (time_before(jiffies
, tg
->disptime
))
799 throtl_dequeue_tg(td
, tg
);
801 nr_disp
+= throtl_dispatch_tg(td
, tg
, bl
);
803 if (tg
->nr_queued
[0] || tg
->nr_queued
[1]) {
804 tg_update_disptime(td
, tg
);
805 throtl_enqueue_tg(td
, tg
);
808 if (nr_disp
>= throtl_quantum
)
815 static void throtl_process_limit_change(struct throtl_data
*td
)
817 struct request_queue
*q
= td
->queue
;
818 struct blkio_group
*blkg
, *n
;
820 if (!td
->limits_changed
)
823 xchg(&td
->limits_changed
, false);
825 throtl_log(td
, "limits changed");
827 list_for_each_entry_safe(blkg
, n
, &q
->blkg_list
, q_node
) {
828 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
830 if (!tg
->limits_changed
)
833 if (!xchg(&tg
->limits_changed
, false))
836 throtl_log_tg(td
, tg
, "limit change rbps=%llu wbps=%llu"
837 " riops=%u wiops=%u", tg
->bps
[READ
], tg
->bps
[WRITE
],
838 tg
->iops
[READ
], tg
->iops
[WRITE
]);
841 * Restart the slices for both READ and WRITES. It
842 * might happen that a group's limit are dropped
843 * suddenly and we don't want to account recently
844 * dispatched IO with new low rate
846 throtl_start_new_slice(td
, tg
, 0);
847 throtl_start_new_slice(td
, tg
, 1);
849 if (throtl_tg_on_rr(tg
))
850 tg_update_disptime(td
, tg
);
854 /* Dispatch throttled bios. Should be called without queue lock held. */
855 static int throtl_dispatch(struct request_queue
*q
)
857 struct throtl_data
*td
= q
->td
;
858 unsigned int nr_disp
= 0;
859 struct bio_list bio_list_on_stack
;
861 struct blk_plug plug
;
863 spin_lock_irq(q
->queue_lock
);
865 throtl_process_limit_change(td
);
867 if (!total_nr_queued(td
))
870 bio_list_init(&bio_list_on_stack
);
872 throtl_log(td
, "dispatch nr_queued=%u read=%u write=%u",
873 total_nr_queued(td
), td
->nr_queued
[READ
],
874 td
->nr_queued
[WRITE
]);
876 nr_disp
= throtl_select_dispatch(td
, &bio_list_on_stack
);
879 throtl_log(td
, "bios disp=%u", nr_disp
);
881 throtl_schedule_next_dispatch(td
);
883 spin_unlock_irq(q
->queue_lock
);
886 * If we dispatched some requests, unplug the queue to make sure
890 blk_start_plug(&plug
);
891 while((bio
= bio_list_pop(&bio_list_on_stack
)))
892 generic_make_request(bio
);
893 blk_finish_plug(&plug
);
898 void blk_throtl_work(struct work_struct
*work
)
900 struct throtl_data
*td
= container_of(work
, struct throtl_data
,
902 struct request_queue
*q
= td
->queue
;
907 /* Call with queue lock held */
909 throtl_schedule_delayed_work(struct throtl_data
*td
, unsigned long delay
)
912 struct delayed_work
*dwork
= &td
->throtl_work
;
914 /* schedule work if limits changed even if no bio is queued */
915 if (total_nr_queued(td
) || td
->limits_changed
) {
917 * We might have a work scheduled to be executed in future.
918 * Cancel that and schedule a new one.
920 __cancel_delayed_work(dwork
);
921 queue_delayed_work(kthrotld_workqueue
, dwork
, delay
);
922 throtl_log(td
, "schedule work. delay=%lu jiffies=%lu",
927 static u64
tg_prfill_cpu_rwstat(struct seq_file
*sf
,
928 struct blkg_policy_data
*pd
, int off
)
930 struct throtl_grp
*tg
= (void *)pd
->pdata
;
931 struct blkg_rwstat rwstat
= { }, tmp
;
934 for_each_possible_cpu(cpu
) {
935 struct tg_stats_cpu
*sc
= per_cpu_ptr(tg
->stats_cpu
, cpu
);
937 tmp
= blkg_rwstat_read((void *)sc
+ off
);
938 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
939 rwstat
.cnt
[i
] += tmp
.cnt
[i
];
942 return __blkg_prfill_rwstat(sf
, pd
, &rwstat
);
945 /* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */
946 static int tg_print_cpu_rwstat(struct cgroup
*cgrp
, struct cftype
*cft
,
949 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgrp
);
951 blkcg_print_blkgs(sf
, blkcg
, tg_prfill_cpu_rwstat
,
952 BLKCG_STAT_POL(cft
->private),
953 BLKCG_STAT_OFF(cft
->private), true);
957 static u64
tg_prfill_conf_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
960 u64 v
= *(u64
*)((void *)pd
->pdata
+ off
);
964 return __blkg_prfill_u64(sf
, pd
, v
);
967 static u64
tg_prfill_conf_uint(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
970 unsigned int v
= *(unsigned int *)((void *)pd
->pdata
+ off
);
974 return __blkg_prfill_u64(sf
, pd
, v
);
977 static int tg_print_conf_u64(struct cgroup
*cgrp
, struct cftype
*cft
,
980 blkcg_print_blkgs(sf
, cgroup_to_blkio_cgroup(cgrp
), tg_prfill_conf_u64
,
981 BLKIO_POLICY_THROTL
, cft
->private, false);
985 static int tg_print_conf_uint(struct cgroup
*cgrp
, struct cftype
*cft
,
988 blkcg_print_blkgs(sf
, cgroup_to_blkio_cgroup(cgrp
), tg_prfill_conf_uint
,
989 BLKIO_POLICY_THROTL
, cft
->private, false);
993 static int tg_set_conf(struct cgroup
*cgrp
, struct cftype
*cft
, const char *buf
,
996 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgrp
);
997 struct blkg_conf_ctx ctx
;
998 struct throtl_grp
*tg
;
1001 ret
= blkg_conf_prep(blkcg
, buf
, &ctx
);
1006 tg
= blkg_to_tg(ctx
.blkg
);
1008 struct throtl_data
*td
= ctx
.blkg
->q
->td
;
1014 *(u64
*)((void *)tg
+ cft
->private) = ctx
.v
;
1016 *(unsigned int *)((void *)tg
+ cft
->private) = ctx
.v
;
1018 /* XXX: we don't need the following deferred processing */
1019 xchg(&tg
->limits_changed
, true);
1020 xchg(&td
->limits_changed
, true);
1021 throtl_schedule_delayed_work(td
, 0);
1026 blkg_conf_finish(&ctx
);
1030 static int tg_set_conf_u64(struct cgroup
*cgrp
, struct cftype
*cft
,
1033 return tg_set_conf(cgrp
, cft
, buf
, true);
1036 static int tg_set_conf_uint(struct cgroup
*cgrp
, struct cftype
*cft
,
1039 return tg_set_conf(cgrp
, cft
, buf
, false);
1042 static struct cftype throtl_files
[] = {
1044 .name
= "throttle.read_bps_device",
1045 .private = offsetof(struct throtl_grp
, bps
[READ
]),
1046 .read_seq_string
= tg_print_conf_u64
,
1047 .write_string
= tg_set_conf_u64
,
1048 .max_write_len
= 256,
1051 .name
= "throttle.write_bps_device",
1052 .private = offsetof(struct throtl_grp
, bps
[WRITE
]),
1053 .read_seq_string
= tg_print_conf_u64
,
1054 .write_string
= tg_set_conf_u64
,
1055 .max_write_len
= 256,
1058 .name
= "throttle.read_iops_device",
1059 .private = offsetof(struct throtl_grp
, iops
[READ
]),
1060 .read_seq_string
= tg_print_conf_uint
,
1061 .write_string
= tg_set_conf_uint
,
1062 .max_write_len
= 256,
1065 .name
= "throttle.write_iops_device",
1066 .private = offsetof(struct throtl_grp
, iops
[WRITE
]),
1067 .read_seq_string
= tg_print_conf_uint
,
1068 .write_string
= tg_set_conf_uint
,
1069 .max_write_len
= 256,
1072 .name
= "throttle.io_service_bytes",
1073 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL
,
1074 offsetof(struct tg_stats_cpu
, service_bytes
)),
1075 .read_seq_string
= tg_print_cpu_rwstat
,
1078 .name
= "throttle.io_serviced",
1079 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL
,
1080 offsetof(struct tg_stats_cpu
, serviced
)),
1081 .read_seq_string
= tg_print_cpu_rwstat
,
1086 static void throtl_shutdown_wq(struct request_queue
*q
)
1088 struct throtl_data
*td
= q
->td
;
1090 cancel_delayed_work_sync(&td
->throtl_work
);
1093 static struct blkio_policy_type blkio_policy_throtl
= {
1095 .blkio_init_group_fn
= throtl_init_blkio_group
,
1096 .blkio_exit_group_fn
= throtl_exit_blkio_group
,
1097 .blkio_reset_group_stats_fn
= throtl_reset_group_stats
,
1099 .plid
= BLKIO_POLICY_THROTL
,
1100 .pdata_size
= sizeof(struct throtl_grp
),
1101 .cftypes
= throtl_files
,
1104 bool blk_throtl_bio(struct request_queue
*q
, struct bio
*bio
)
1106 struct throtl_data
*td
= q
->td
;
1107 struct throtl_grp
*tg
;
1108 bool rw
= bio_data_dir(bio
), update_disptime
= true;
1109 struct blkio_cgroup
*blkcg
;
1110 bool throttled
= false;
1112 if (bio
->bi_rw
& REQ_THROTTLED
) {
1113 bio
->bi_rw
&= ~REQ_THROTTLED
;
1117 /* bio_associate_current() needs ioc, try creating */
1118 create_io_context(GFP_ATOMIC
, q
->node
);
1121 * A throtl_grp pointer retrieved under rcu can be used to access
1122 * basic fields like stats and io rates. If a group has no rules,
1123 * just update the dispatch stats in lockless manner and return.
1126 blkcg
= bio_blkio_cgroup(bio
);
1127 tg
= throtl_lookup_tg(td
, blkcg
);
1129 if (tg_no_rule_group(tg
, rw
)) {
1130 throtl_update_dispatch_stats(tg_to_blkg(tg
),
1131 bio
->bi_size
, bio
->bi_rw
);
1132 goto out_unlock_rcu
;
1137 * Either group has not been allocated yet or it is not an unlimited
1140 spin_lock_irq(q
->queue_lock
);
1141 tg
= throtl_lookup_create_tg(td
, blkcg
);
1145 if (tg
->nr_queued
[rw
]) {
1147 * There is already another bio queued in same dir. No
1148 * need to update dispatch time.
1150 update_disptime
= false;
1155 /* Bio is with-in rate limit of group */
1156 if (tg_may_dispatch(td
, tg
, bio
, NULL
)) {
1157 throtl_charge_bio(tg
, bio
);
1160 * We need to trim slice even when bios are not being queued
1161 * otherwise it might happen that a bio is not queued for
1162 * a long time and slice keeps on extending and trim is not
1163 * called for a long time. Now if limits are reduced suddenly
1164 * we take into account all the IO dispatched so far at new
1165 * low rate and * newly queued IO gets a really long dispatch
1168 * So keep on trimming slice even if bio is not queued.
1170 throtl_trim_slice(td
, tg
, rw
);
1175 throtl_log_tg(td
, tg
, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1176 " iodisp=%u iops=%u queued=%d/%d",
1177 rw
== READ
? 'R' : 'W',
1178 tg
->bytes_disp
[rw
], bio
->bi_size
, tg
->bps
[rw
],
1179 tg
->io_disp
[rw
], tg
->iops
[rw
],
1180 tg
->nr_queued
[READ
], tg
->nr_queued
[WRITE
]);
1182 bio_associate_current(bio
);
1183 throtl_add_bio_tg(q
->td
, tg
, bio
);
1186 if (update_disptime
) {
1187 tg_update_disptime(td
, tg
);
1188 throtl_schedule_next_dispatch(td
);
1192 spin_unlock_irq(q
->queue_lock
);
1200 * blk_throtl_drain - drain throttled bios
1201 * @q: request_queue to drain throttled bios for
1203 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1205 void blk_throtl_drain(struct request_queue
*q
)
1206 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
1208 struct throtl_data
*td
= q
->td
;
1209 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
1210 struct throtl_grp
*tg
;
1214 WARN_ON_ONCE(!queue_is_locked(q
));
1218 while ((tg
= throtl_rb_first(st
))) {
1219 throtl_dequeue_tg(td
, tg
);
1221 while ((bio
= bio_list_peek(&tg
->bio_lists
[READ
])))
1222 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), &bl
);
1223 while ((bio
= bio_list_peek(&tg
->bio_lists
[WRITE
])))
1224 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), &bl
);
1226 spin_unlock_irq(q
->queue_lock
);
1228 while ((bio
= bio_list_pop(&bl
)))
1229 generic_make_request(bio
);
1231 spin_lock_irq(q
->queue_lock
);
1234 int blk_throtl_init(struct request_queue
*q
)
1236 struct throtl_data
*td
;
1237 struct blkio_group
*blkg
;
1239 td
= kzalloc_node(sizeof(*td
), GFP_KERNEL
, q
->node
);
1243 td
->tg_service_tree
= THROTL_RB_ROOT
;
1244 td
->limits_changed
= false;
1245 INIT_DELAYED_WORK(&td
->throtl_work
, blk_throtl_work
);
1250 /* alloc and init root group. */
1252 spin_lock_irq(q
->queue_lock
);
1254 blkg
= blkg_lookup_create(&blkio_root_cgroup
, q
, true);
1256 td
->root_tg
= blkg_to_tg(blkg
);
1258 spin_unlock_irq(q
->queue_lock
);
1268 void blk_throtl_exit(struct request_queue
*q
)
1271 throtl_shutdown_wq(q
);
1275 static int __init
throtl_init(void)
1277 kthrotld_workqueue
= alloc_workqueue("kthrotld", WQ_MEM_RECLAIM
, 0);
1278 if (!kthrotld_workqueue
)
1279 panic("Failed to create kthrotld\n");
1281 blkio_policy_register(&blkio_policy_throtl
);
1285 module_init(throtl_init
);