2 * Dirtyrate implement code
4 * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD.
7 * Chuan Zheng <zhengchuan@huawei.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "qemu/error-report.h"
16 #include "hw/core/cpu.h"
17 #include "qapi/error.h"
18 #include "exec/ramblock.h"
19 #include "exec/target_page.h"
20 #include "qemu/rcu_queue.h"
21 #include "qemu/main-loop.h"
22 #include "qapi/qapi-commands-migration.h"
25 #include "dirtyrate.h"
26 #include "monitor/hmp.h"
27 #include "monitor/monitor.h"
28 #include "qapi/qmp/qdict.h"
29 #include "sysemu/kvm.h"
30 #include "sysemu/runstate.h"
31 #include "exec/memory.h"
32 #include "qemu/xxhash.h"
35 * total_dirty_pages is procted by BQL and is used
36 * to stat dirty pages during the period of two
37 * memory_global_dirty_log_sync
39 uint64_t total_dirty_pages
;
41 typedef struct DirtyPageRecord
{
46 static int CalculatingState
= DIRTY_RATE_STATUS_UNSTARTED
;
47 static struct DirtyRateStat DirtyStat
;
48 static DirtyRateMeasureMode dirtyrate_mode
=
49 DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING
;
51 static int64_t dirty_stat_wait(int64_t msec
, int64_t initial_time
)
55 current_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
56 if ((current_time
- initial_time
) >= msec
) {
57 msec
= current_time
- initial_time
;
59 g_usleep((msec
+ initial_time
- current_time
) * 1000);
60 /* g_usleep may overshoot */
61 msec
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
) - initial_time
;
67 static inline void record_dirtypages(DirtyPageRecord
*dirty_pages
,
68 CPUState
*cpu
, bool start
)
71 dirty_pages
[cpu
->cpu_index
].start_pages
= cpu
->dirty_pages
;
73 dirty_pages
[cpu
->cpu_index
].end_pages
= cpu
->dirty_pages
;
77 static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages
,
80 uint64_t increased_dirty_pages
=
81 dirty_pages
.end_pages
- dirty_pages
.start_pages
;
84 * multiply by 1000ms/s _before_ converting down to megabytes
85 * to avoid losing precision
87 return qemu_target_pages_to_MiB(increased_dirty_pages
* 1000) /
91 void global_dirty_log_change(unsigned int flag
, bool start
)
93 Error
*local_err
= NULL
;
98 ret
= memory_global_dirty_log_start(flag
, &local_err
);
100 error_report_err(local_err
);
103 memory_global_dirty_log_stop(flag
);
109 * global_dirty_log_sync
110 * 1. sync dirty log from kvm
111 * 2. stop dirty tracking if needed.
113 static void global_dirty_log_sync(unsigned int flag
, bool one_shot
)
116 memory_global_dirty_log_sync(false);
118 memory_global_dirty_log_stop(flag
);
123 static DirtyPageRecord
*vcpu_dirty_stat_alloc(VcpuStat
*stat
)
133 stat
->rates
= g_new0(DirtyRateVcpu
, nvcpu
);
135 return g_new0(DirtyPageRecord
, nvcpu
);
138 static void vcpu_dirty_stat_collect(DirtyPageRecord
*records
,
144 record_dirtypages(records
, cpu
, start
);
148 int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms
,
153 DirtyPageRecord
*records
;
154 int64_t init_time_ms
;
161 init_time_ms
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
163 WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock
) {
164 gen_id
= cpu_list_generation_id_get();
165 records
= vcpu_dirty_stat_alloc(stat
);
166 vcpu_dirty_stat_collect(records
, true);
169 duration
= dirty_stat_wait(calc_time_ms
, init_time_ms
);
171 global_dirty_log_sync(flag
, one_shot
);
173 WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock
) {
174 if (gen_id
!= cpu_list_generation_id_get()) {
180 vcpu_dirty_stat_collect(records
, false);
183 for (i
= 0; i
< stat
->nvcpu
; i
++) {
184 dirtyrate
= do_calculate_dirtyrate(records
[i
], duration
);
186 stat
->rates
[i
].id
= i
;
187 stat
->rates
[i
].dirty_rate
= dirtyrate
;
189 trace_dirtyrate_do_calculate_vcpu(i
, dirtyrate
);
197 static bool is_calc_time_valid(int64_t msec
)
199 if ((msec
< MIN_CALC_TIME_MS
) || (msec
> MAX_CALC_TIME_MS
)) {
206 static bool is_sample_pages_valid(int64_t pages
)
208 return pages
>= MIN_SAMPLE_PAGE_COUNT
&&
209 pages
<= MAX_SAMPLE_PAGE_COUNT
;
212 static int dirtyrate_set_state(int *state
, int old_state
, int new_state
)
214 assert(new_state
< DIRTY_RATE_STATUS__MAX
);
215 trace_dirtyrate_set_state(DirtyRateStatus_str(new_state
));
216 if (qatomic_cmpxchg(state
, old_state
, new_state
) == old_state
) {
223 /* Decimal power of given time unit relative to one second */
224 static int time_unit_to_power(TimeUnit time_unit
)
227 case TIME_UNIT_SECOND
:
229 case TIME_UNIT_MILLISECOND
:
232 assert(false); /* unreachable */
237 static int64_t convert_time_unit(int64_t value
, TimeUnit unit_from
,
240 int power
= time_unit_to_power(unit_from
) -
241 time_unit_to_power(unit_to
);
254 static struct DirtyRateInfo
*
255 query_dirty_rate_info(TimeUnit calc_time_unit
)
258 int64_t dirty_rate
= DirtyStat
.dirty_rate
;
259 struct DirtyRateInfo
*info
= g_new0(DirtyRateInfo
, 1);
260 DirtyRateVcpuList
*head
= NULL
, **tail
= &head
;
262 info
->status
= CalculatingState
;
263 info
->start_time
= DirtyStat
.start_time
;
264 info
->calc_time
= convert_time_unit(DirtyStat
.calc_time_ms
,
265 TIME_UNIT_MILLISECOND
,
267 info
->calc_time_unit
= calc_time_unit
;
268 info
->sample_pages
= DirtyStat
.sample_pages
;
269 info
->mode
= dirtyrate_mode
;
271 if (qatomic_read(&CalculatingState
) == DIRTY_RATE_STATUS_MEASURED
) {
272 info
->has_dirty_rate
= true;
273 info
->dirty_rate
= dirty_rate
;
275 if (dirtyrate_mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_RING
) {
277 * set sample_pages with 0 to indicate page sampling
280 info
->sample_pages
= 0;
281 info
->has_vcpu_dirty_rate
= true;
282 for (i
= 0; i
< DirtyStat
.dirty_ring
.nvcpu
; i
++) {
283 DirtyRateVcpu
*rate
= g_new0(DirtyRateVcpu
, 1);
284 rate
->id
= DirtyStat
.dirty_ring
.rates
[i
].id
;
285 rate
->dirty_rate
= DirtyStat
.dirty_ring
.rates
[i
].dirty_rate
;
286 QAPI_LIST_APPEND(tail
, rate
);
288 info
->vcpu_dirty_rate
= head
;
291 if (dirtyrate_mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP
) {
292 info
->sample_pages
= 0;
296 trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState
));
301 static void init_dirtyrate_stat(struct DirtyRateConfig config
)
303 DirtyStat
.dirty_rate
= -1;
304 DirtyStat
.start_time
= qemu_clock_get_ms(QEMU_CLOCK_HOST
) / 1000;
305 DirtyStat
.calc_time_ms
= config
.calc_time_ms
;
306 DirtyStat
.sample_pages
= config
.sample_pages_per_gigabytes
;
308 switch (config
.mode
) {
309 case DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING
:
310 DirtyStat
.page_sampling
.total_dirty_samples
= 0;
311 DirtyStat
.page_sampling
.total_sample_count
= 0;
312 DirtyStat
.page_sampling
.total_block_mem_MB
= 0;
314 case DIRTY_RATE_MEASURE_MODE_DIRTY_RING
:
315 DirtyStat
.dirty_ring
.nvcpu
= -1;
316 DirtyStat
.dirty_ring
.rates
= NULL
;
323 static void cleanup_dirtyrate_stat(struct DirtyRateConfig config
)
325 /* last calc-dirty-rate qmp use dirty ring mode */
326 if (dirtyrate_mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_RING
) {
327 free(DirtyStat
.dirty_ring
.rates
);
328 DirtyStat
.dirty_ring
.rates
= NULL
;
332 static void update_dirtyrate_stat(struct RamblockDirtyInfo
*info
)
334 DirtyStat
.page_sampling
.total_dirty_samples
+= info
->sample_dirty_count
;
335 DirtyStat
.page_sampling
.total_sample_count
+= info
->sample_pages_count
;
336 /* size of total pages in MB */
337 DirtyStat
.page_sampling
.total_block_mem_MB
+=
338 qemu_target_pages_to_MiB(info
->ramblock_pages
);
341 static void update_dirtyrate(uint64_t msec
)
344 uint64_t total_dirty_samples
= DirtyStat
.page_sampling
.total_dirty_samples
;
345 uint64_t total_sample_count
= DirtyStat
.page_sampling
.total_sample_count
;
346 uint64_t total_block_mem_MB
= DirtyStat
.page_sampling
.total_block_mem_MB
;
348 dirtyrate
= total_dirty_samples
* total_block_mem_MB
*
349 1000 / (total_sample_count
* msec
);
351 DirtyStat
.dirty_rate
= dirtyrate
;
355 * Compute hash of a single page of size TARGET_PAGE_SIZE.
357 static uint32_t compute_page_hash(void *ptr
)
359 size_t page_size
= qemu_target_page_size();
361 uint64_t v1
, v2
, v3
, v4
;
363 const uint64_t *p
= ptr
;
365 v1
= QEMU_XXHASH_SEED
+ XXH_PRIME64_1
+ XXH_PRIME64_2
;
366 v2
= QEMU_XXHASH_SEED
+ XXH_PRIME64_2
;
367 v3
= QEMU_XXHASH_SEED
+ 0;
368 v4
= QEMU_XXHASH_SEED
- XXH_PRIME64_1
;
369 for (i
= 0; i
< page_size
/ 8; i
+= 4) {
370 v1
= XXH64_round(v1
, p
[i
+ 0]);
371 v2
= XXH64_round(v2
, p
[i
+ 1]);
372 v3
= XXH64_round(v3
, p
[i
+ 2]);
373 v4
= XXH64_round(v4
, p
[i
+ 3]);
375 res
= XXH64_mergerounds(v1
, v2
, v3
, v4
);
377 res
= XXH64_avalanche(res
);
378 return (uint32_t)(res
& UINT32_MAX
);
383 * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
384 * in ramblock, which starts from ramblock base address.
386 static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo
*info
,
391 hash
= compute_page_hash(info
->ramblock_addr
+
392 vfn
* qemu_target_page_size());
394 trace_get_ramblock_vfn_hash(info
->idstr
, vfn
, hash
);
398 static bool save_ramblock_hash(struct RamblockDirtyInfo
*info
)
400 unsigned int sample_pages_count
;
404 sample_pages_count
= info
->sample_pages_count
;
406 /* ramblock size less than one page, return success to skip this ramblock */
407 if (unlikely(info
->ramblock_pages
== 0 || sample_pages_count
== 0)) {
411 info
->hash_result
= g_try_malloc0_n(sample_pages_count
,
413 if (!info
->hash_result
) {
417 info
->sample_page_vfn
= g_try_malloc0_n(sample_pages_count
,
419 if (!info
->sample_page_vfn
) {
420 g_free(info
->hash_result
);
425 for (i
= 0; i
< sample_pages_count
; i
++) {
426 info
->sample_page_vfn
[i
] = g_rand_int_range(rand
, 0,
427 info
->ramblock_pages
- 1);
428 info
->hash_result
[i
] = get_ramblock_vfn_hash(info
,
429 info
->sample_page_vfn
[i
]);
436 static void get_ramblock_dirty_info(RAMBlock
*block
,
437 struct RamblockDirtyInfo
*info
,
438 struct DirtyRateConfig
*config
)
440 uint64_t sample_pages_per_gigabytes
= config
->sample_pages_per_gigabytes
;
442 /* Right shift 30 bits to calc ramblock size in GB */
443 info
->sample_pages_count
= (qemu_ram_get_used_length(block
) *
444 sample_pages_per_gigabytes
) >> 30;
445 /* Right shift TARGET_PAGE_BITS to calc page count */
446 info
->ramblock_pages
= qemu_ram_get_used_length(block
) >>
447 qemu_target_page_bits();
448 info
->ramblock_addr
= qemu_ram_get_host_addr(block
);
449 strcpy(info
->idstr
, qemu_ram_get_idstr(block
));
452 static void free_ramblock_dirty_info(struct RamblockDirtyInfo
*infos
, int count
)
460 for (i
= 0; i
< count
; i
++) {
461 g_free(infos
[i
].sample_page_vfn
);
462 g_free(infos
[i
].hash_result
);
467 static bool skip_sample_ramblock(RAMBlock
*block
)
470 * Sample only blocks larger than MIN_RAMBLOCK_SIZE.
472 if (qemu_ram_get_used_length(block
) < (MIN_RAMBLOCK_SIZE
<< 10)) {
473 trace_skip_sample_ramblock(block
->idstr
,
474 qemu_ram_get_used_length(block
));
481 static bool record_ramblock_hash_info(struct RamblockDirtyInfo
**block_dinfo
,
482 struct DirtyRateConfig config
,
485 struct RamblockDirtyInfo
*info
= NULL
;
486 struct RamblockDirtyInfo
*dinfo
= NULL
;
487 RAMBlock
*block
= NULL
;
492 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
493 if (skip_sample_ramblock(block
)) {
499 dinfo
= g_try_malloc0_n(total_count
, sizeof(struct RamblockDirtyInfo
));
504 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
505 if (skip_sample_ramblock(block
)) {
508 if (index
>= total_count
) {
511 info
= &dinfo
[index
];
512 get_ramblock_dirty_info(block
, info
, &config
);
513 if (!save_ramblock_hash(info
)) {
521 *block_count
= index
;
522 *block_dinfo
= dinfo
;
526 static void calc_page_dirty_rate(struct RamblockDirtyInfo
*info
)
531 for (i
= 0; i
< info
->sample_pages_count
; i
++) {
532 hash
= get_ramblock_vfn_hash(info
, info
->sample_page_vfn
[i
]);
533 if (hash
!= info
->hash_result
[i
]) {
534 trace_calc_page_dirty_rate(info
->idstr
, hash
, info
->hash_result
[i
]);
535 info
->sample_dirty_count
++;
540 static struct RamblockDirtyInfo
*
541 find_block_matched(RAMBlock
*block
, int count
,
542 struct RamblockDirtyInfo
*infos
)
546 for (i
= 0; i
< count
; i
++) {
547 if (!strcmp(infos
[i
].idstr
, qemu_ram_get_idstr(block
))) {
556 if (infos
[i
].ramblock_addr
!= qemu_ram_get_host_addr(block
) ||
557 infos
[i
].ramblock_pages
!=
558 (qemu_ram_get_used_length(block
) >> qemu_target_page_bits())) {
559 trace_find_page_matched(block
->idstr
);
566 static bool compare_page_hash_info(struct RamblockDirtyInfo
*info
,
569 struct RamblockDirtyInfo
*block_dinfo
= NULL
;
570 RAMBlock
*block
= NULL
;
572 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
573 if (skip_sample_ramblock(block
)) {
576 block_dinfo
= find_block_matched(block
, block_count
, info
);
577 if (block_dinfo
== NULL
) {
580 calc_page_dirty_rate(block_dinfo
);
581 update_dirtyrate_stat(block_dinfo
);
584 if (DirtyStat
.page_sampling
.total_sample_count
== 0) {
591 static inline void record_dirtypages_bitmap(DirtyPageRecord
*dirty_pages
,
595 dirty_pages
->start_pages
= total_dirty_pages
;
597 dirty_pages
->end_pages
= total_dirty_pages
;
601 static inline void dirtyrate_manual_reset_protect(void)
603 RAMBlock
*block
= NULL
;
605 WITH_RCU_READ_LOCK_GUARD() {
606 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
607 memory_region_clear_dirty_bitmap(block
->mr
, 0,
613 static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config
)
616 DirtyPageRecord dirty_pages
;
617 Error
*local_err
= NULL
;
620 if (!memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE
, &local_err
)) {
621 error_report_err(local_err
);
625 * 1'round of log sync may return all 1 bits with
626 * KVM_DIRTY_LOG_INITIALLY_SET enable
627 * skip it unconditionally and start dirty tracking
628 * from 2'round of log sync
630 memory_global_dirty_log_sync(false);
633 * reset page protect manually and unconditionally.
634 * this make sure kvm dirty log be cleared if
635 * KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled.
637 dirtyrate_manual_reset_protect();
640 record_dirtypages_bitmap(&dirty_pages
, true);
642 start_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
643 DirtyStat
.start_time
= qemu_clock_get_ms(QEMU_CLOCK_HOST
) / 1000;
645 DirtyStat
.calc_time_ms
= dirty_stat_wait(config
.calc_time_ms
, start_time
);
649 * 1. fetch dirty bitmap from kvm
650 * 2. stop dirty tracking
652 global_dirty_log_sync(GLOBAL_DIRTY_DIRTY_RATE
, true);
654 record_dirtypages_bitmap(&dirty_pages
, false);
656 DirtyStat
.dirty_rate
= do_calculate_dirtyrate(dirty_pages
,
657 DirtyStat
.calc_time_ms
);
660 static void calculate_dirtyrate_dirty_ring(struct DirtyRateConfig config
)
662 uint64_t dirtyrate
= 0;
663 uint64_t dirtyrate_sum
= 0;
667 global_dirty_log_change(GLOBAL_DIRTY_DIRTY_RATE
, true);
669 DirtyStat
.start_time
= qemu_clock_get_ms(QEMU_CLOCK_HOST
) / 1000;
671 /* calculate vcpu dirtyrate */
672 DirtyStat
.calc_time_ms
= vcpu_calculate_dirtyrate(config
.calc_time_ms
,
673 &DirtyStat
.dirty_ring
,
674 GLOBAL_DIRTY_DIRTY_RATE
,
677 /* calculate vm dirtyrate */
678 for (i
= 0; i
< DirtyStat
.dirty_ring
.nvcpu
; i
++) {
679 dirtyrate
= DirtyStat
.dirty_ring
.rates
[i
].dirty_rate
;
680 DirtyStat
.dirty_ring
.rates
[i
].dirty_rate
= dirtyrate
;
681 dirtyrate_sum
+= dirtyrate
;
684 DirtyStat
.dirty_rate
= dirtyrate_sum
;
687 static void calculate_dirtyrate_sample_vm(struct DirtyRateConfig config
)
689 struct RamblockDirtyInfo
*block_dinfo
= NULL
;
691 int64_t initial_time
;
694 initial_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
695 DirtyStat
.start_time
= qemu_clock_get_ms(QEMU_CLOCK_HOST
) / 1000;
696 if (!record_ramblock_hash_info(&block_dinfo
, config
, &block_count
)) {
701 DirtyStat
.calc_time_ms
= dirty_stat_wait(config
.calc_time_ms
,
705 if (!compare_page_hash_info(block_dinfo
, block_count
)) {
709 update_dirtyrate(DirtyStat
.calc_time_ms
);
713 free_ramblock_dirty_info(block_dinfo
, block_count
);
716 static void calculate_dirtyrate(struct DirtyRateConfig config
)
718 if (config
.mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP
) {
719 calculate_dirtyrate_dirty_bitmap(config
);
720 } else if (config
.mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_RING
) {
721 calculate_dirtyrate_dirty_ring(config
);
723 calculate_dirtyrate_sample_vm(config
);
726 trace_dirtyrate_calculate(DirtyStat
.dirty_rate
);
729 void *get_dirtyrate_thread(void *arg
)
731 struct DirtyRateConfig config
= *(struct DirtyRateConfig
*)arg
;
733 rcu_register_thread();
735 ret
= dirtyrate_set_state(&CalculatingState
, DIRTY_RATE_STATUS_UNSTARTED
,
736 DIRTY_RATE_STATUS_MEASURING
);
738 error_report("change dirtyrate state failed.");
742 calculate_dirtyrate(config
);
744 ret
= dirtyrate_set_state(&CalculatingState
, DIRTY_RATE_STATUS_MEASURING
,
745 DIRTY_RATE_STATUS_MEASURED
);
747 error_report("change dirtyrate state failed.");
750 rcu_unregister_thread();
754 void qmp_calc_dirty_rate(int64_t calc_time
,
755 bool has_calc_time_unit
,
756 TimeUnit calc_time_unit
,
757 bool has_sample_pages
,
758 int64_t sample_pages
,
760 DirtyRateMeasureMode mode
,
763 static struct DirtyRateConfig config
;
768 * If the dirty rate is already being measured, don't attempt to start.
770 if (qatomic_read(&CalculatingState
) == DIRTY_RATE_STATUS_MEASURING
) {
771 error_setg(errp
, "the dirty rate is already being measured.");
775 int64_t calc_time_ms
= convert_time_unit(
777 has_calc_time_unit
? calc_time_unit
: TIME_UNIT_SECOND
,
778 TIME_UNIT_MILLISECOND
781 if (!is_calc_time_valid(calc_time_ms
)) {
782 error_setg(errp
, "Calculation time is out of range [%dms, %dms].",
783 MIN_CALC_TIME_MS
, MAX_CALC_TIME_MS
);
788 mode
= DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING
;
791 if (has_sample_pages
&& mode
!= DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING
) {
792 error_setg(errp
, "sample-pages is used only in page-sampling mode");
796 if (has_sample_pages
) {
797 if (!is_sample_pages_valid(sample_pages
)) {
798 error_setg(errp
, "sample-pages is out of range[%d, %d].",
799 MIN_SAMPLE_PAGE_COUNT
,
800 MAX_SAMPLE_PAGE_COUNT
);
804 sample_pages
= DIRTYRATE_DEFAULT_SAMPLE_PAGES
;
808 * dirty ring mode only works when kvm dirty ring is enabled.
809 * on the contrary, dirty bitmap mode is not.
811 if (((mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_RING
) &&
812 !kvm_dirty_ring_enabled()) ||
813 ((mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP
) &&
814 kvm_dirty_ring_enabled())) {
815 error_setg(errp
, "mode %s is not enabled, use other method instead.",
816 DirtyRateMeasureMode_str(mode
));
821 * Init calculation state as unstarted.
823 ret
= dirtyrate_set_state(&CalculatingState
, CalculatingState
,
824 DIRTY_RATE_STATUS_UNSTARTED
);
826 error_setg(errp
, "init dirty rate calculation state failed.");
830 config
.calc_time_ms
= calc_time_ms
;
831 config
.sample_pages_per_gigabytes
= sample_pages
;
834 cleanup_dirtyrate_stat(config
);
837 * update dirty rate mode so that we can figure out what mode has
838 * been used in last calculation
840 dirtyrate_mode
= mode
;
842 init_dirtyrate_stat(config
);
844 qemu_thread_create(&thread
, "get_dirtyrate", get_dirtyrate_thread
,
845 (void *)&config
, QEMU_THREAD_DETACHED
);
849 struct DirtyRateInfo
*qmp_query_dirty_rate(bool has_calc_time_unit
,
850 TimeUnit calc_time_unit
,
853 return query_dirty_rate_info(
854 has_calc_time_unit
? calc_time_unit
: TIME_UNIT_SECOND
);
857 void hmp_info_dirty_rate(Monitor
*mon
, const QDict
*qdict
)
859 DirtyRateInfo
*info
= query_dirty_rate_info(TIME_UNIT_SECOND
);
861 monitor_printf(mon
, "Status: %s\n",
862 DirtyRateStatus_str(info
->status
));
863 monitor_printf(mon
, "Start Time: %"PRIi64
" (ms)\n",
865 if (info
->mode
== DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING
) {
866 monitor_printf(mon
, "Sample Pages: %"PRIu64
" (per GB)\n",
869 monitor_printf(mon
, "Period: %"PRIi64
" (sec)\n",
871 monitor_printf(mon
, "Mode: %s\n",
872 DirtyRateMeasureMode_str(info
->mode
));
873 monitor_printf(mon
, "Dirty rate: ");
874 if (info
->has_dirty_rate
) {
875 monitor_printf(mon
, "%"PRIi64
" (MB/s)\n", info
->dirty_rate
);
876 if (info
->has_vcpu_dirty_rate
) {
877 DirtyRateVcpuList
*rate
, *head
= info
->vcpu_dirty_rate
;
878 for (rate
= head
; rate
!= NULL
; rate
= rate
->next
) {
879 monitor_printf(mon
, "vcpu[%"PRIi64
"], Dirty rate: %"PRIi64
880 " (MB/s)\n", rate
->value
->id
,
881 rate
->value
->dirty_rate
);
885 monitor_printf(mon
, "(not ready)\n");
888 qapi_free_DirtyRateVcpuList(info
->vcpu_dirty_rate
);
892 void hmp_calc_dirty_rate(Monitor
*mon
, const QDict
*qdict
)
894 int64_t sec
= qdict_get_try_int(qdict
, "second", 0);
895 int64_t sample_pages
= qdict_get_try_int(qdict
, "sample_pages_per_GB", -1);
896 bool has_sample_pages
= (sample_pages
!= -1);
897 bool dirty_ring
= qdict_get_try_bool(qdict
, "dirty_ring", false);
898 bool dirty_bitmap
= qdict_get_try_bool(qdict
, "dirty_bitmap", false);
899 DirtyRateMeasureMode mode
= DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING
;
903 monitor_printf(mon
, "Incorrect period length specified!\n");
907 if (dirty_ring
&& dirty_bitmap
) {
908 monitor_printf(mon
, "Either dirty ring or dirty bitmap "
909 "can be specified!\n");
914 mode
= DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP
;
915 } else if (dirty_ring
) {
916 mode
= DIRTY_RATE_MEASURE_MODE_DIRTY_RING
;
919 qmp_calc_dirty_rate(sec
, /* calc-time */
920 false, TIME_UNIT_SECOND
, /* calc-time-unit */
921 has_sample_pages
, sample_pages
,
925 hmp_handle_error(mon
, err
);
929 monitor_printf(mon
, "Starting dirty rate measurement with period %"PRIi64
931 monitor_printf(mon
, "[Please use 'info dirty_rate' to check results]\n");