2 * QEMU block throttling group infrastructure
4 * Copyright (C) Nodalink, EURL. 2014
5 * Copyright (C) Igalia, S.L. 2015
8 * BenoƮt Canet <benoit.canet@nodalink.com>
9 * Alberto Garcia <berto@igalia.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2 or
14 * (at your option) version 3 of the License.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see <http://www.gnu.org/licenses/>.
25 #include "qemu/osdep.h"
26 #include "sysemu/block-backend.h"
27 #include "block/throttle-groups.h"
28 #include "qemu/queue.h"
29 #include "qemu/thread.h"
30 #include "sysemu/qtest.h"
32 /* The ThrottleGroup structure (with its ThrottleState) is shared
33 * among different BlockBackends and it's independent from
34 * AioContext, so in order to use it from different threads it needs
37 * This locking is however handled internally in this file, so it's
38 * transparent to outside users.
40 * The whole ThrottleGroup structure is private and invisible to
41 * outside users, that only use it through its ThrottleState.
43 * In addition to the ThrottleGroup structure, BlockBackendPublic has
44 * fields that need to be accessed by other members of the group and
45 * therefore also need to be protected by this lock. Once a
46 * BlockBackend is registered in a group those fields can be accessed
47 * by other threads any time.
49 * Again, all this is handled internally and is mostly transparent to
50 * the outside. The 'throttle_timers' field however has an additional
51 * constraint because it may be temporarily invalid (see for example
52 * blk_set_aio_context()). Therefore in this file a thread will
53 * access some other BlockBackend's timers only after verifying that
54 * that BlockBackend has throttled requests in the queue.
56 typedef struct ThrottleGroup
{
57 char *name
; /* This is constant during the lifetime of the group */
59 QemuMutex lock
; /* This lock protects the following four fields */
61 QLIST_HEAD(, BlockBackendPublic
) head
;
62 BlockBackend
*tokens
[2];
63 bool any_timer_armed
[2];
65 /* These two are protected by the global throttle_groups_lock */
67 QTAILQ_ENTRY(ThrottleGroup
) list
;
70 static QemuMutex throttle_groups_lock
;
71 static QTAILQ_HEAD(, ThrottleGroup
) throttle_groups
=
72 QTAILQ_HEAD_INITIALIZER(throttle_groups
);
74 /* Increments the reference count of a ThrottleGroup given its name.
76 * If no ThrottleGroup is found with the given name a new one is
79 * @name: the name of the ThrottleGroup
80 * @ret: the ThrottleState member of the ThrottleGroup
82 ThrottleState
*throttle_group_incref(const char *name
)
84 ThrottleGroup
*tg
= NULL
;
87 qemu_mutex_lock(&throttle_groups_lock
);
89 /* Look for an existing group with that name */
90 QTAILQ_FOREACH(iter
, &throttle_groups
, list
) {
91 if (!strcmp(name
, iter
->name
)) {
97 /* Create a new one if not found */
99 tg
= g_new0(ThrottleGroup
, 1);
100 tg
->name
= g_strdup(name
);
101 qemu_mutex_init(&tg
->lock
);
102 throttle_init(&tg
->ts
);
103 QLIST_INIT(&tg
->head
);
105 QTAILQ_INSERT_TAIL(&throttle_groups
, tg
, list
);
110 qemu_mutex_unlock(&throttle_groups_lock
);
115 /* Decrease the reference count of a ThrottleGroup.
117 * When the reference count reaches zero the ThrottleGroup is
120 * @ts: The ThrottleGroup to unref, given by its ThrottleState member
122 void throttle_group_unref(ThrottleState
*ts
)
124 ThrottleGroup
*tg
= container_of(ts
, ThrottleGroup
, ts
);
126 qemu_mutex_lock(&throttle_groups_lock
);
127 if (--tg
->refcount
== 0) {
128 QTAILQ_REMOVE(&throttle_groups
, tg
, list
);
129 qemu_mutex_destroy(&tg
->lock
);
133 qemu_mutex_unlock(&throttle_groups_lock
);
136 /* Get the name from a BlockBackend's ThrottleGroup. The name (and the pointer)
137 * is guaranteed to remain constant during the lifetime of the group.
139 * @blk: a BlockBackend that is member of a throttling group
140 * @ret: the name of the group.
142 const char *throttle_group_get_name(BlockBackend
*blk
)
144 BlockBackendPublic
*blkp
= blk_get_public(blk
);
145 ThrottleGroup
*tg
= container_of(blkp
->throttle_state
, ThrottleGroup
, ts
);
149 /* Return the next BlockBackend in the round-robin sequence, simulating a
152 * This assumes that tg->lock is held.
154 * @blk: the current BlockBackend
155 * @ret: the next BlockBackend in the sequence
157 static BlockBackend
*throttle_group_next_blk(BlockBackend
*blk
)
159 BlockBackendPublic
*blkp
= blk_get_public(blk
);
160 ThrottleState
*ts
= blkp
->throttle_state
;
161 ThrottleGroup
*tg
= container_of(ts
, ThrottleGroup
, ts
);
162 BlockBackendPublic
*next
= QLIST_NEXT(blkp
, round_robin
);
165 next
= QLIST_FIRST(&tg
->head
);
168 return blk_by_public(next
);
172 * Return whether a BlockBackend has pending requests.
174 * This assumes that tg->lock is held.
176 * @blk: the BlockBackend
177 * @is_write: the type of operation (read/write)
178 * @ret: whether the BlockBackend has pending requests.
180 static inline bool blk_has_pending_reqs(BlockBackend
*blk
,
183 const BlockBackendPublic
*blkp
= blk_get_public(blk
);
184 return blkp
->pending_reqs
[is_write
];
187 /* Return the next BlockBackend in the round-robin sequence with pending I/O
190 * This assumes that tg->lock is held.
192 * @blk: the current BlockBackend
193 * @is_write: the type of operation (read/write)
194 * @ret: the next BlockBackend with pending requests, or blk if there is
197 static BlockBackend
*next_throttle_token(BlockBackend
*blk
, bool is_write
)
199 BlockBackendPublic
*blkp
= blk_get_public(blk
);
200 ThrottleGroup
*tg
= container_of(blkp
->throttle_state
, ThrottleGroup
, ts
);
201 BlockBackend
*token
, *start
;
203 start
= token
= tg
->tokens
[is_write
];
205 /* get next bs round in round robin style */
206 token
= throttle_group_next_blk(token
);
207 while (token
!= start
&& !blk_has_pending_reqs(token
, is_write
)) {
208 token
= throttle_group_next_blk(token
);
211 /* If no IO are queued for scheduling on the next round robin token
212 * then decide the token is the current bs because chances are
213 * the current bs get the current request queued.
215 if (token
== start
&& !blk_has_pending_reqs(token
, is_write
)) {
219 /* Either we return the original BB, or one with pending requests */
220 assert(token
== blk
|| blk_has_pending_reqs(token
, is_write
));
225 /* Check if the next I/O request for a BlockBackend needs to be throttled or
226 * not. If there's no timer set in this group, set one and update the token
229 * This assumes that tg->lock is held.
231 * @blk: the current BlockBackend
232 * @is_write: the type of operation (read/write)
233 * @ret: whether the I/O request needs to be throttled or not
235 static bool throttle_group_schedule_timer(BlockBackend
*blk
, bool is_write
)
237 BlockBackendPublic
*blkp
= blk_get_public(blk
);
238 ThrottleState
*ts
= blkp
->throttle_state
;
239 ThrottleTimers
*tt
= &blkp
->throttle_timers
;
240 ThrottleGroup
*tg
= container_of(ts
, ThrottleGroup
, ts
);
243 if (atomic_read(&blkp
->io_limits_disabled
)) {
247 /* Check if any of the timers in this group is already armed */
248 if (tg
->any_timer_armed
[is_write
]) {
252 must_wait
= throttle_schedule_timer(ts
, tt
, is_write
);
254 /* If a timer just got armed, set blk as the current token */
256 tg
->tokens
[is_write
] = blk
;
257 tg
->any_timer_armed
[is_write
] = true;
263 /* Start the next pending I/O request for a BlockBackend. Return whether
264 * any request was actually pending.
266 * @blk: the current BlockBackend
267 * @is_write: the type of operation (read/write)
269 static bool coroutine_fn
throttle_group_co_restart_queue(BlockBackend
*blk
,
272 BlockBackendPublic
*blkp
= blk_get_public(blk
);
275 qemu_co_mutex_lock(&blkp
->throttled_reqs_lock
);
276 ret
= qemu_co_queue_next(&blkp
->throttled_reqs
[is_write
]);
277 qemu_co_mutex_unlock(&blkp
->throttled_reqs_lock
);
282 /* Look for the next pending I/O request and schedule it.
284 * This assumes that tg->lock is held.
286 * @blk: the current BlockBackend
287 * @is_write: the type of operation (read/write)
289 static void schedule_next_request(BlockBackend
*blk
, bool is_write
)
291 BlockBackendPublic
*blkp
= blk_get_public(blk
);
292 ThrottleGroup
*tg
= container_of(blkp
->throttle_state
, ThrottleGroup
, ts
);
296 /* Check if there's any pending request to schedule next */
297 token
= next_throttle_token(blk
, is_write
);
298 if (!blk_has_pending_reqs(token
, is_write
)) {
302 /* Set a timer for the request if it needs to be throttled */
303 must_wait
= throttle_group_schedule_timer(token
, is_write
);
305 /* If it doesn't have to wait, queue it for immediate execution */
307 /* Give preference to requests from the current blk */
308 if (qemu_in_coroutine() &&
309 throttle_group_co_restart_queue(blk
, is_write
)) {
312 ThrottleTimers
*tt
= &blk_get_public(token
)->throttle_timers
;
313 int64_t now
= qemu_clock_get_ns(tt
->clock_type
);
314 timer_mod(tt
->timers
[is_write
], now
);
315 tg
->any_timer_armed
[is_write
] = true;
317 tg
->tokens
[is_write
] = token
;
321 /* Check if an I/O request needs to be throttled, wait and set a timer
322 * if necessary, and schedule the next request using a round robin
325 * @blk: the current BlockBackend
326 * @bytes: the number of bytes for this I/O
327 * @is_write: the type of operation (read/write)
329 void coroutine_fn
throttle_group_co_io_limits_intercept(BlockBackend
*blk
,
336 BlockBackendPublic
*blkp
= blk_get_public(blk
);
337 ThrottleGroup
*tg
= container_of(blkp
->throttle_state
, ThrottleGroup
, ts
);
338 qemu_mutex_lock(&tg
->lock
);
340 /* First we check if this I/O has to be throttled. */
341 token
= next_throttle_token(blk
, is_write
);
342 must_wait
= throttle_group_schedule_timer(token
, is_write
);
344 /* Wait if there's a timer set or queued requests of this type */
345 if (must_wait
|| blkp
->pending_reqs
[is_write
]) {
346 blkp
->pending_reqs
[is_write
]++;
347 qemu_mutex_unlock(&tg
->lock
);
348 qemu_co_mutex_lock(&blkp
->throttled_reqs_lock
);
349 qemu_co_queue_wait(&blkp
->throttled_reqs
[is_write
],
350 &blkp
->throttled_reqs_lock
);
351 qemu_co_mutex_unlock(&blkp
->throttled_reqs_lock
);
352 qemu_mutex_lock(&tg
->lock
);
353 blkp
->pending_reqs
[is_write
]--;
356 /* The I/O will be executed, so do the accounting */
357 throttle_account(blkp
->throttle_state
, is_write
, bytes
);
359 /* Schedule the next request */
360 schedule_next_request(blk
, is_write
);
362 qemu_mutex_unlock(&tg
->lock
);
370 static void coroutine_fn
throttle_group_restart_queue_entry(void *opaque
)
372 RestartData
*data
= opaque
;
373 BlockBackend
*blk
= data
->blk
;
374 bool is_write
= data
->is_write
;
375 BlockBackendPublic
*blkp
= blk_get_public(blk
);
376 ThrottleGroup
*tg
= container_of(blkp
->throttle_state
, ThrottleGroup
, ts
);
379 empty_queue
= !throttle_group_co_restart_queue(blk
, is_write
);
381 /* If the request queue was empty then we have to take care of
382 * scheduling the next one */
384 qemu_mutex_lock(&tg
->lock
);
385 schedule_next_request(blk
, is_write
);
386 qemu_mutex_unlock(&tg
->lock
);
390 static void throttle_group_restart_queue(BlockBackend
*blk
, bool is_write
)
398 co
= qemu_coroutine_create(throttle_group_restart_queue_entry
, &rd
);
399 aio_co_enter(blk_get_aio_context(blk
), co
);
402 void throttle_group_restart_blk(BlockBackend
*blk
)
404 BlockBackendPublic
*blkp
= blk_get_public(blk
);
406 if (blkp
->throttle_state
) {
407 throttle_group_restart_queue(blk
, 0);
408 throttle_group_restart_queue(blk
, 1);
412 /* Update the throttle configuration for a particular group. Similar
413 * to throttle_config(), but guarantees atomicity within the
416 * @blk: a BlockBackend that is a member of the group
417 * @cfg: the configuration to set
419 void throttle_group_config(BlockBackend
*blk
, ThrottleConfig
*cfg
)
421 BlockBackendPublic
*blkp
= blk_get_public(blk
);
422 ThrottleTimers
*tt
= &blkp
->throttle_timers
;
423 ThrottleState
*ts
= blkp
->throttle_state
;
424 ThrottleGroup
*tg
= container_of(ts
, ThrottleGroup
, ts
);
425 qemu_mutex_lock(&tg
->lock
);
426 /* throttle_config() cancels the timers */
427 if (timer_pending(tt
->timers
[0])) {
428 tg
->any_timer_armed
[0] = false;
430 if (timer_pending(tt
->timers
[1])) {
431 tg
->any_timer_armed
[1] = false;
433 throttle_config(ts
, tt
, cfg
);
434 qemu_mutex_unlock(&tg
->lock
);
436 throttle_group_restart_blk(blk
);
439 /* Get the throttle configuration from a particular group. Similar to
440 * throttle_get_config(), but guarantees atomicity within the
443 * @blk: a BlockBackend that is a member of the group
444 * @cfg: the configuration will be written here
446 void throttle_group_get_config(BlockBackend
*blk
, ThrottleConfig
*cfg
)
448 BlockBackendPublic
*blkp
= blk_get_public(blk
);
449 ThrottleState
*ts
= blkp
->throttle_state
;
450 ThrottleGroup
*tg
= container_of(ts
, ThrottleGroup
, ts
);
451 qemu_mutex_lock(&tg
->lock
);
452 throttle_get_config(ts
, cfg
);
453 qemu_mutex_unlock(&tg
->lock
);
456 /* ThrottleTimers callback. This wakes up a request that was waiting
457 * because it had been throttled.
459 * @blk: the BlockBackend whose request had been throttled
460 * @is_write: the type of operation (read/write)
462 static void timer_cb(BlockBackend
*blk
, bool is_write
)
464 BlockBackendPublic
*blkp
= blk_get_public(blk
);
465 ThrottleState
*ts
= blkp
->throttle_state
;
466 ThrottleGroup
*tg
= container_of(ts
, ThrottleGroup
, ts
);
468 /* The timer has just been fired, so we can update the flag */
469 qemu_mutex_lock(&tg
->lock
);
470 tg
->any_timer_armed
[is_write
] = false;
471 qemu_mutex_unlock(&tg
->lock
);
473 /* Run the request that was waiting for this timer */
474 throttle_group_restart_queue(blk
, is_write
);
477 static void read_timer_cb(void *opaque
)
479 timer_cb(opaque
, false);
482 static void write_timer_cb(void *opaque
)
484 timer_cb(opaque
, true);
487 /* Register a BlockBackend in the throttling group, also initializing its
488 * timers and updating its throttle_state pointer to point to it. If a
489 * throttling group with that name does not exist yet, it will be created.
491 * @blk: the BlockBackend to insert
492 * @groupname: the name of the group
494 void throttle_group_register_blk(BlockBackend
*blk
, const char *groupname
)
497 BlockBackendPublic
*blkp
= blk_get_public(blk
);
498 ThrottleState
*ts
= throttle_group_incref(groupname
);
499 ThrottleGroup
*tg
= container_of(ts
, ThrottleGroup
, ts
);
500 int clock_type
= QEMU_CLOCK_REALTIME
;
502 if (qtest_enabled()) {
503 /* For testing block IO throttling only */
504 clock_type
= QEMU_CLOCK_VIRTUAL
;
507 blkp
->throttle_state
= ts
;
509 qemu_mutex_lock(&tg
->lock
);
510 /* If the ThrottleGroup is new set this BlockBackend as the token */
511 for (i
= 0; i
< 2; i
++) {
512 if (!tg
->tokens
[i
]) {
517 QLIST_INSERT_HEAD(&tg
->head
, blkp
, round_robin
);
519 throttle_timers_init(&blkp
->throttle_timers
,
520 blk_get_aio_context(blk
),
526 qemu_mutex_unlock(&tg
->lock
);
529 /* Unregister a BlockBackend from its group, removing it from the list,
530 * destroying the timers and setting the throttle_state pointer to NULL.
532 * The BlockBackend must not have pending throttled requests, so the caller has
533 * to drain them first.
535 * The group will be destroyed if it's empty after this operation.
537 * @blk: the BlockBackend to remove
539 void throttle_group_unregister_blk(BlockBackend
*blk
)
541 BlockBackendPublic
*blkp
= blk_get_public(blk
);
542 ThrottleGroup
*tg
= container_of(blkp
->throttle_state
, ThrottleGroup
, ts
);
545 assert(blkp
->pending_reqs
[0] == 0 && blkp
->pending_reqs
[1] == 0);
546 assert(qemu_co_queue_empty(&blkp
->throttled_reqs
[0]));
547 assert(qemu_co_queue_empty(&blkp
->throttled_reqs
[1]));
549 qemu_mutex_lock(&tg
->lock
);
550 for (i
= 0; i
< 2; i
++) {
551 if (tg
->tokens
[i
] == blk
) {
552 BlockBackend
*token
= throttle_group_next_blk(blk
);
553 /* Take care of the case where this is the last blk in the group */
557 tg
->tokens
[i
] = token
;
561 /* remove the current blk from the list */
562 QLIST_REMOVE(blkp
, round_robin
);
563 throttle_timers_destroy(&blkp
->throttle_timers
);
564 qemu_mutex_unlock(&tg
->lock
);
566 throttle_group_unref(&tg
->ts
);
567 blkp
->throttle_state
= NULL
;
570 static void throttle_groups_init(void)
572 qemu_mutex_init(&throttle_groups_lock
);
575 block_init(throttle_groups_init
);