2 * Dirty page rate limit implementation code
4 * Copyright (c) 2022 CHINA TELECOM CO.,LTD.
7 * Hyman Huang(黄勇) <huangy81@chinatelecom.cn>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "qemu/main-loop.h"
15 #include "qapi/qapi-commands-migration.h"
16 #include "qapi/qmp/qdict.h"
17 #include "qapi/error.h"
18 #include "sysemu/dirtyrate.h"
19 #include "sysemu/dirtylimit.h"
20 #include "monitor/hmp.h"
21 #include "monitor/monitor.h"
22 #include "exec/memory.h"
23 #include "exec/target_page.h"
24 #include "hw/boards.h"
25 #include "sysemu/kvm.h"
27 #include "migration/misc.h"
30 * Dirtylimit stop working if dirty page rate error
31 * value less than DIRTYLIMIT_TOLERANCE_RANGE
33 #define DIRTYLIMIT_TOLERANCE_RANGE 25 /* MB/s */
35 * Plus or minus vcpu sleep time linearly if dirty
36 * page rate error value percentage over
37 * DIRTYLIMIT_LINEAR_ADJUSTMENT_PCT.
38 * Otherwise, plus or minus a fixed vcpu sleep time.
40 #define DIRTYLIMIT_LINEAR_ADJUSTMENT_PCT 50
42 * Max vcpu sleep time percentage during a cycle
43 * composed of dirty ring full and sleep time.
45 #define DIRTYLIMIT_THROTTLE_PCT_MAX 99
51 } *vcpu_dirty_rate_stat
;
53 typedef struct VcpuDirtyLimitState
{
57 * Quota dirty page rate, unit is MB/s
58 * zero if not enabled.
61 } VcpuDirtyLimitState
;
64 VcpuDirtyLimitState
*states
;
65 /* Max cpus number configured by user */
67 /* Number of vcpu under dirtylimit */
71 /* protect dirtylimit_state */
72 static QemuMutex dirtylimit_mutex
;
74 /* dirtylimit thread quit if dirtylimit_quit is true */
75 static bool dirtylimit_quit
;
77 static void vcpu_dirty_rate_stat_collect(void)
81 int64_t period
= DIRTYLIMIT_CALC_TIME_MS
;
83 if (migrate_dirty_limit() &&
84 migration_is_active()) {
85 period
= migrate_vcpu_dirty_limit_period();
88 /* calculate vcpu dirtyrate */
89 vcpu_calculate_dirtyrate(period
,
94 for (i
= 0; i
< stat
.nvcpu
; i
++) {
95 vcpu_dirty_rate_stat
->stat
.rates
[i
].id
= i
;
96 vcpu_dirty_rate_stat
->stat
.rates
[i
].dirty_rate
=
97 stat
.rates
[i
].dirty_rate
;
103 static void *vcpu_dirty_rate_stat_thread(void *opaque
)
105 rcu_register_thread();
108 global_dirty_log_change(GLOBAL_DIRTY_LIMIT
, true);
110 while (qatomic_read(&vcpu_dirty_rate_stat
->running
)) {
111 vcpu_dirty_rate_stat_collect();
112 if (dirtylimit_in_service()) {
113 dirtylimit_process();
118 global_dirty_log_change(GLOBAL_DIRTY_LIMIT
, false);
120 rcu_unregister_thread();
124 int64_t vcpu_dirty_rate_get(int cpu_index
)
126 DirtyRateVcpu
*rates
= vcpu_dirty_rate_stat
->stat
.rates
;
127 return qatomic_read_i64(&rates
[cpu_index
].dirty_rate
);
130 void vcpu_dirty_rate_stat_start(void)
132 if (qatomic_read(&vcpu_dirty_rate_stat
->running
)) {
136 qatomic_set(&vcpu_dirty_rate_stat
->running
, 1);
137 qemu_thread_create(&vcpu_dirty_rate_stat
->thread
,
139 vcpu_dirty_rate_stat_thread
,
141 QEMU_THREAD_JOINABLE
);
144 void vcpu_dirty_rate_stat_stop(void)
146 qatomic_set(&vcpu_dirty_rate_stat
->running
, 0);
147 dirtylimit_state_unlock();
149 qemu_thread_join(&vcpu_dirty_rate_stat
->thread
);
151 dirtylimit_state_lock();
154 void vcpu_dirty_rate_stat_initialize(void)
156 MachineState
*ms
= MACHINE(qdev_get_machine());
157 int max_cpus
= ms
->smp
.max_cpus
;
159 vcpu_dirty_rate_stat
=
160 g_malloc0(sizeof(*vcpu_dirty_rate_stat
));
162 vcpu_dirty_rate_stat
->stat
.nvcpu
= max_cpus
;
163 vcpu_dirty_rate_stat
->stat
.rates
=
164 g_new0(DirtyRateVcpu
, max_cpus
);
166 vcpu_dirty_rate_stat
->running
= false;
169 void vcpu_dirty_rate_stat_finalize(void)
171 g_free(vcpu_dirty_rate_stat
->stat
.rates
);
172 vcpu_dirty_rate_stat
->stat
.rates
= NULL
;
174 g_free(vcpu_dirty_rate_stat
);
175 vcpu_dirty_rate_stat
= NULL
;
178 void dirtylimit_state_lock(void)
180 qemu_mutex_lock(&dirtylimit_mutex
);
183 void dirtylimit_state_unlock(void)
185 qemu_mutex_unlock(&dirtylimit_mutex
);
189 __attribute__((__constructor__
)) dirtylimit_mutex_init(void)
191 qemu_mutex_init(&dirtylimit_mutex
);
194 static inline VcpuDirtyLimitState
*dirtylimit_vcpu_get_state(int cpu_index
)
196 return &dirtylimit_state
->states
[cpu_index
];
199 void dirtylimit_state_initialize(void)
201 MachineState
*ms
= MACHINE(qdev_get_machine());
202 int max_cpus
= ms
->smp
.max_cpus
;
205 dirtylimit_state
= g_malloc0(sizeof(*dirtylimit_state
));
207 dirtylimit_state
->states
=
208 g_new0(VcpuDirtyLimitState
, max_cpus
);
210 for (i
= 0; i
< max_cpus
; i
++) {
211 dirtylimit_state
->states
[i
].cpu_index
= i
;
214 dirtylimit_state
->max_cpus
= max_cpus
;
215 trace_dirtylimit_state_initialize(max_cpus
);
218 void dirtylimit_state_finalize(void)
220 g_free(dirtylimit_state
->states
);
221 dirtylimit_state
->states
= NULL
;
223 g_free(dirtylimit_state
);
224 dirtylimit_state
= NULL
;
226 trace_dirtylimit_state_finalize();
229 bool dirtylimit_in_service(void)
231 return !!dirtylimit_state
;
234 bool dirtylimit_vcpu_index_valid(int cpu_index
)
236 MachineState
*ms
= MACHINE(qdev_get_machine());
238 return !(cpu_index
< 0 ||
239 cpu_index
>= ms
->smp
.max_cpus
);
242 static uint64_t dirtylimit_dirty_ring_full_time(uint64_t dirtyrate
)
244 static uint64_t max_dirtyrate
;
245 uint64_t dirty_ring_size_MiB
;
247 dirty_ring_size_MiB
= qemu_target_pages_to_MiB(kvm_dirty_ring_size());
249 if (max_dirtyrate
< dirtyrate
) {
250 max_dirtyrate
= dirtyrate
;
253 return dirty_ring_size_MiB
* 1000000 / max_dirtyrate
;
256 static inline bool dirtylimit_done(uint64_t quota
,
261 min
= MIN(quota
, current
);
262 max
= MAX(quota
, current
);
264 return ((max
- min
) <= DIRTYLIMIT_TOLERANCE_RANGE
) ? true : false;
268 dirtylimit_need_linear_adjustment(uint64_t quota
,
273 min
= MIN(quota
, current
);
274 max
= MAX(quota
, current
);
276 return ((max
- min
) * 100 / max
) > DIRTYLIMIT_LINEAR_ADJUSTMENT_PCT
;
279 static void dirtylimit_set_throttle(CPUState
*cpu
,
283 int64_t ring_full_time_us
= 0;
284 uint64_t sleep_pct
= 0;
285 uint64_t throttle_us
= 0;
288 cpu
->throttle_us_per_full
= 0;
292 ring_full_time_us
= dirtylimit_dirty_ring_full_time(current
);
294 if (dirtylimit_need_linear_adjustment(quota
, current
)) {
295 if (quota
< current
) {
296 sleep_pct
= (current
- quota
) * 100 / current
;
298 ring_full_time_us
* sleep_pct
/ (double)(100 - sleep_pct
);
299 cpu
->throttle_us_per_full
+= throttle_us
;
301 sleep_pct
= (quota
- current
) * 100 / quota
;
303 ring_full_time_us
* sleep_pct
/ (double)(100 - sleep_pct
);
304 cpu
->throttle_us_per_full
-= throttle_us
;
307 trace_dirtylimit_throttle_pct(cpu
->cpu_index
,
311 if (quota
< current
) {
312 cpu
->throttle_us_per_full
+= ring_full_time_us
/ 10;
314 cpu
->throttle_us_per_full
-= ring_full_time_us
/ 10;
319 * TODO: in the big kvm_dirty_ring_size case (eg: 65536, or other scenario),
320 * current dirty page rate may never reach the quota, we should stop
321 * increasing sleep time?
323 cpu
->throttle_us_per_full
= MIN(cpu
->throttle_us_per_full
,
324 ring_full_time_us
* DIRTYLIMIT_THROTTLE_PCT_MAX
);
326 cpu
->throttle_us_per_full
= MAX(cpu
->throttle_us_per_full
, 0);
329 static void dirtylimit_adjust_throttle(CPUState
*cpu
)
332 uint64_t current
= 0;
333 int cpu_index
= cpu
->cpu_index
;
335 quota
= dirtylimit_vcpu_get_state(cpu_index
)->quota
;
336 current
= vcpu_dirty_rate_get(cpu_index
);
338 if (!dirtylimit_done(quota
, current
)) {
339 dirtylimit_set_throttle(cpu
, quota
, current
);
345 void dirtylimit_process(void)
349 if (!qatomic_read(&dirtylimit_quit
)) {
350 dirtylimit_state_lock();
352 if (!dirtylimit_in_service()) {
353 dirtylimit_state_unlock();
358 if (!dirtylimit_vcpu_get_state(cpu
->cpu_index
)->enabled
) {
361 dirtylimit_adjust_throttle(cpu
);
363 dirtylimit_state_unlock();
367 void dirtylimit_change(bool start
)
370 qatomic_set(&dirtylimit_quit
, 0);
372 qatomic_set(&dirtylimit_quit
, 1);
376 void dirtylimit_set_vcpu(int cpu_index
,
380 trace_dirtylimit_set_vcpu(cpu_index
, quota
);
383 dirtylimit_state
->states
[cpu_index
].quota
= quota
;
384 if (!dirtylimit_vcpu_get_state(cpu_index
)->enabled
) {
385 dirtylimit_state
->limited_nvcpu
++;
388 dirtylimit_state
->states
[cpu_index
].quota
= 0;
389 if (dirtylimit_state
->states
[cpu_index
].enabled
) {
390 dirtylimit_state
->limited_nvcpu
--;
394 dirtylimit_state
->states
[cpu_index
].enabled
= enable
;
397 void dirtylimit_set_all(uint64_t quota
,
400 MachineState
*ms
= MACHINE(qdev_get_machine());
401 int max_cpus
= ms
->smp
.max_cpus
;
404 for (i
= 0; i
< max_cpus
; i
++) {
405 dirtylimit_set_vcpu(i
, quota
, enable
);
409 void dirtylimit_vcpu_execute(CPUState
*cpu
)
411 if (cpu
->throttle_us_per_full
) {
412 dirtylimit_state_lock();
414 if (dirtylimit_in_service() &&
415 dirtylimit_vcpu_get_state(cpu
->cpu_index
)->enabled
) {
416 dirtylimit_state_unlock();
417 trace_dirtylimit_vcpu_execute(cpu
->cpu_index
,
418 cpu
->throttle_us_per_full
);
420 g_usleep(cpu
->throttle_us_per_full
);
424 dirtylimit_state_unlock();
428 static void dirtylimit_init(void)
430 dirtylimit_state_initialize();
431 dirtylimit_change(true);
432 vcpu_dirty_rate_stat_initialize();
433 vcpu_dirty_rate_stat_start();
436 static void dirtylimit_cleanup(void)
438 vcpu_dirty_rate_stat_stop();
439 vcpu_dirty_rate_stat_finalize();
440 dirtylimit_change(false);
441 dirtylimit_state_finalize();
445 * dirty page rate limit is not allowed to set if migration
446 * is running with dirty-limit capability enabled.
448 static bool dirtylimit_is_allowed(void)
450 if (migration_is_running() &&
451 !migration_thread_is_self() &&
452 migrate_dirty_limit() &&
453 dirtylimit_in_service()) {
459 void qmp_cancel_vcpu_dirty_limit(bool has_cpu_index
,
463 if (!kvm_enabled() || !kvm_dirty_ring_enabled()) {
467 if (has_cpu_index
&& !dirtylimit_vcpu_index_valid(cpu_index
)) {
468 error_setg(errp
, "incorrect cpu index specified");
472 if (!dirtylimit_is_allowed()) {
473 error_setg(errp
, "can't cancel dirty page rate limit while"
474 " migration is running");
478 if (!dirtylimit_in_service()) {
482 dirtylimit_state_lock();
485 dirtylimit_set_vcpu(cpu_index
, 0, false);
487 dirtylimit_set_all(0, false);
490 if (!dirtylimit_state
->limited_nvcpu
) {
491 dirtylimit_cleanup();
494 dirtylimit_state_unlock();
497 void hmp_cancel_vcpu_dirty_limit(Monitor
*mon
, const QDict
*qdict
)
499 int64_t cpu_index
= qdict_get_try_int(qdict
, "cpu_index", -1);
502 qmp_cancel_vcpu_dirty_limit(!!(cpu_index
!= -1), cpu_index
, &err
);
504 hmp_handle_error(mon
, err
);
508 monitor_printf(mon
, "[Please use 'info vcpu_dirty_limit' to query "
509 "dirty limit for virtual CPU]\n");
512 void qmp_set_vcpu_dirty_limit(bool has_cpu_index
,
517 if (!kvm_enabled() || !kvm_dirty_ring_enabled()) {
518 error_setg(errp
, "dirty page limit feature requires KVM with"
519 " accelerator property 'dirty-ring-size' set'");
523 if (has_cpu_index
&& !dirtylimit_vcpu_index_valid(cpu_index
)) {
524 error_setg(errp
, "incorrect cpu index specified");
528 if (!dirtylimit_is_allowed()) {
529 error_setg(errp
, "can't set dirty page rate limit while"
530 " migration is running");
535 qmp_cancel_vcpu_dirty_limit(has_cpu_index
, cpu_index
, errp
);
539 dirtylimit_state_lock();
541 if (!dirtylimit_in_service()) {
546 dirtylimit_set_vcpu(cpu_index
, dirty_rate
, true);
548 dirtylimit_set_all(dirty_rate
, true);
551 dirtylimit_state_unlock();
554 void hmp_set_vcpu_dirty_limit(Monitor
*mon
, const QDict
*qdict
)
556 int64_t dirty_rate
= qdict_get_int(qdict
, "dirty_rate");
557 int64_t cpu_index
= qdict_get_try_int(qdict
, "cpu_index", -1);
560 if (dirty_rate
< 0) {
561 error_setg(&err
, "invalid dirty page limit %" PRId64
, dirty_rate
);
565 qmp_set_vcpu_dirty_limit(!!(cpu_index
!= -1), cpu_index
, dirty_rate
, &err
);
568 hmp_handle_error(mon
, err
);
571 /* Return the max throttle time of each virtual CPU */
572 uint64_t dirtylimit_throttle_time_per_round(void)
578 if (cpu
->throttle_us_per_full
> max
) {
579 max
= cpu
->throttle_us_per_full
;
587 * Estimate average dirty ring full time of each virtaul CPU.
588 * Return 0 if guest doesn't dirty memory.
590 uint64_t dirtylimit_ring_full_time(void)
593 uint64_t curr_rate
= 0;
599 curr_rate
+= vcpu_dirty_rate_get(cpu
->cpu_index
);
603 if (!curr_rate
|| !nvcpus
) {
607 return dirtylimit_dirty_ring_full_time(curr_rate
/ nvcpus
);
610 static struct DirtyLimitInfo
*dirtylimit_query_vcpu(int cpu_index
)
612 DirtyLimitInfo
*info
= NULL
;
614 info
= g_malloc0(sizeof(*info
));
615 info
->cpu_index
= cpu_index
;
616 info
->limit_rate
= dirtylimit_vcpu_get_state(cpu_index
)->quota
;
617 info
->current_rate
= vcpu_dirty_rate_get(cpu_index
);
622 static struct DirtyLimitInfoList
*dirtylimit_query_all(void)
625 DirtyLimitInfo
*info
= NULL
;
626 DirtyLimitInfoList
*head
= NULL
, **tail
= &head
;
628 dirtylimit_state_lock();
630 if (!dirtylimit_in_service()) {
631 dirtylimit_state_unlock();
635 for (i
= 0; i
< dirtylimit_state
->max_cpus
; i
++) {
636 index
= dirtylimit_state
->states
[i
].cpu_index
;
637 if (dirtylimit_vcpu_get_state(index
)->enabled
) {
638 info
= dirtylimit_query_vcpu(index
);
639 QAPI_LIST_APPEND(tail
, info
);
643 dirtylimit_state_unlock();
648 struct DirtyLimitInfoList
*qmp_query_vcpu_dirty_limit(Error
**errp
)
650 return dirtylimit_query_all();
653 void hmp_info_vcpu_dirty_limit(Monitor
*mon
, const QDict
*qdict
)
655 DirtyLimitInfoList
*info
;
656 g_autoptr(DirtyLimitInfoList
) head
= NULL
;
659 if (!dirtylimit_in_service()) {
660 monitor_printf(mon
, "Dirty page limit not enabled!\n");
664 head
= qmp_query_vcpu_dirty_limit(&err
);
666 hmp_handle_error(mon
, err
);
670 for (info
= head
; info
!= NULL
; info
= info
->next
) {
671 monitor_printf(mon
, "vcpu[%"PRIi64
"], limit rate %"PRIi64
" (MB/s),"
672 " current rate %"PRIi64
" (MB/s)\n",
673 info
->value
->cpu_index
,
674 info
->value
->limit_rate
,
675 info
->value
->current_rate
);