2 * Dirtyrate implement code
4 * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD.
7 * Chuan Zheng <zhengchuan@huawei.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "qemu/error-report.h"
15 #include "hw/core/cpu.h"
16 #include "qapi/error.h"
17 #include "exec/ramblock.h"
18 #include "exec/target_page.h"
19 #include "qemu/rcu_queue.h"
20 #include "qemu/main-loop.h"
21 #include "qapi/qapi-commands-migration.h"
24 #include "dirtyrate.h"
25 #include "monitor/hmp.h"
26 #include "monitor/monitor.h"
27 #include "qapi/qmp/qdict.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/runstate.h"
30 #include "exec/memory.h"
31 #include "qemu/xxhash.h"
34 * total_dirty_pages is procted by BQL and is used
35 * to stat dirty pages during the period of two
36 * memory_global_dirty_log_sync
38 uint64_t total_dirty_pages
;
40 typedef struct DirtyPageRecord
{
45 static int CalculatingState
= DIRTY_RATE_STATUS_UNSTARTED
;
46 static struct DirtyRateStat DirtyStat
;
47 static DirtyRateMeasureMode dirtyrate_mode
=
48 DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING
;
50 static int64_t dirty_stat_wait(int64_t msec
, int64_t initial_time
)
54 current_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
55 if ((current_time
- initial_time
) >= msec
) {
56 msec
= current_time
- initial_time
;
58 g_usleep((msec
+ initial_time
- current_time
) * 1000);
59 /* g_usleep may overshoot */
60 msec
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
) - initial_time
;
66 static inline void record_dirtypages(DirtyPageRecord
*dirty_pages
,
67 CPUState
*cpu
, bool start
)
70 dirty_pages
[cpu
->cpu_index
].start_pages
= cpu
->dirty_pages
;
72 dirty_pages
[cpu
->cpu_index
].end_pages
= cpu
->dirty_pages
;
76 static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages
,
79 uint64_t increased_dirty_pages
=
80 dirty_pages
.end_pages
- dirty_pages
.start_pages
;
83 * multiply by 1000ms/s _before_ converting down to megabytes
84 * to avoid losing precision
86 return qemu_target_pages_to_MiB(increased_dirty_pages
* 1000) /
90 void global_dirty_log_change(unsigned int flag
, bool start
)
92 Error
*local_err
= NULL
;
97 ret
= memory_global_dirty_log_start(flag
, &local_err
);
99 error_report_err(local_err
);
102 memory_global_dirty_log_stop(flag
);
108 * global_dirty_log_sync
109 * 1. sync dirty log from kvm
110 * 2. stop dirty tracking if needed.
112 static void global_dirty_log_sync(unsigned int flag
, bool one_shot
)
115 memory_global_dirty_log_sync(false);
117 memory_global_dirty_log_stop(flag
);
122 static DirtyPageRecord
*vcpu_dirty_stat_alloc(VcpuStat
*stat
)
132 stat
->rates
= g_new0(DirtyRateVcpu
, nvcpu
);
134 return g_new0(DirtyPageRecord
, nvcpu
);
137 static void vcpu_dirty_stat_collect(DirtyPageRecord
*records
,
143 record_dirtypages(records
, cpu
, start
);
147 int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms
,
152 DirtyPageRecord
*records
;
153 int64_t init_time_ms
;
160 init_time_ms
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
162 WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock
) {
163 gen_id
= cpu_list_generation_id_get();
164 records
= vcpu_dirty_stat_alloc(stat
);
165 vcpu_dirty_stat_collect(records
, true);
168 duration
= dirty_stat_wait(calc_time_ms
, init_time_ms
);
170 global_dirty_log_sync(flag
, one_shot
);
172 WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock
) {
173 if (gen_id
!= cpu_list_generation_id_get()) {
179 vcpu_dirty_stat_collect(records
, false);
182 for (i
= 0; i
< stat
->nvcpu
; i
++) {
183 dirtyrate
= do_calculate_dirtyrate(records
[i
], duration
);
185 stat
->rates
[i
].id
= i
;
186 stat
->rates
[i
].dirty_rate
= dirtyrate
;
188 trace_dirtyrate_do_calculate_vcpu(i
, dirtyrate
);
196 static bool is_calc_time_valid(int64_t msec
)
198 if ((msec
< MIN_CALC_TIME_MS
) || (msec
> MAX_CALC_TIME_MS
)) {
205 static bool is_sample_pages_valid(int64_t pages
)
207 return pages
>= MIN_SAMPLE_PAGE_COUNT
&&
208 pages
<= MAX_SAMPLE_PAGE_COUNT
;
211 static int dirtyrate_set_state(int *state
, int old_state
, int new_state
)
213 assert(new_state
< DIRTY_RATE_STATUS__MAX
);
214 trace_dirtyrate_set_state(DirtyRateStatus_str(new_state
));
215 if (qatomic_cmpxchg(state
, old_state
, new_state
) == old_state
) {
222 /* Decimal power of given time unit relative to one second */
223 static int time_unit_to_power(TimeUnit time_unit
)
226 case TIME_UNIT_SECOND
:
228 case TIME_UNIT_MILLISECOND
:
231 assert(false); /* unreachable */
236 static int64_t convert_time_unit(int64_t value
, TimeUnit unit_from
,
239 int power
= time_unit_to_power(unit_from
) -
240 time_unit_to_power(unit_to
);
253 static struct DirtyRateInfo
*
254 query_dirty_rate_info(TimeUnit calc_time_unit
)
257 int64_t dirty_rate
= DirtyStat
.dirty_rate
;
258 struct DirtyRateInfo
*info
= g_new0(DirtyRateInfo
, 1);
259 DirtyRateVcpuList
*head
= NULL
, **tail
= &head
;
261 info
->status
= CalculatingState
;
262 info
->start_time
= DirtyStat
.start_time
;
263 info
->calc_time
= convert_time_unit(DirtyStat
.calc_time_ms
,
264 TIME_UNIT_MILLISECOND
,
266 info
->calc_time_unit
= calc_time_unit
;
267 info
->sample_pages
= DirtyStat
.sample_pages
;
268 info
->mode
= dirtyrate_mode
;
270 if (qatomic_read(&CalculatingState
) == DIRTY_RATE_STATUS_MEASURED
) {
271 info
->has_dirty_rate
= true;
272 info
->dirty_rate
= dirty_rate
;
274 if (dirtyrate_mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_RING
) {
276 * set sample_pages with 0 to indicate page sampling
279 info
->sample_pages
= 0;
280 info
->has_vcpu_dirty_rate
= true;
281 for (i
= 0; i
< DirtyStat
.dirty_ring
.nvcpu
; i
++) {
282 DirtyRateVcpu
*rate
= g_new0(DirtyRateVcpu
, 1);
283 rate
->id
= DirtyStat
.dirty_ring
.rates
[i
].id
;
284 rate
->dirty_rate
= DirtyStat
.dirty_ring
.rates
[i
].dirty_rate
;
285 QAPI_LIST_APPEND(tail
, rate
);
287 info
->vcpu_dirty_rate
= head
;
290 if (dirtyrate_mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP
) {
291 info
->sample_pages
= 0;
295 trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState
));
300 static void init_dirtyrate_stat(struct DirtyRateConfig config
)
302 DirtyStat
.dirty_rate
= -1;
303 DirtyStat
.start_time
= qemu_clock_get_ms(QEMU_CLOCK_HOST
) / 1000;
304 DirtyStat
.calc_time_ms
= config
.calc_time_ms
;
305 DirtyStat
.sample_pages
= config
.sample_pages_per_gigabytes
;
307 switch (config
.mode
) {
308 case DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING
:
309 DirtyStat
.page_sampling
.total_dirty_samples
= 0;
310 DirtyStat
.page_sampling
.total_sample_count
= 0;
311 DirtyStat
.page_sampling
.total_block_mem_MB
= 0;
313 case DIRTY_RATE_MEASURE_MODE_DIRTY_RING
:
314 DirtyStat
.dirty_ring
.nvcpu
= -1;
315 DirtyStat
.dirty_ring
.rates
= NULL
;
322 static void cleanup_dirtyrate_stat(struct DirtyRateConfig config
)
324 /* last calc-dirty-rate qmp use dirty ring mode */
325 if (dirtyrate_mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_RING
) {
326 free(DirtyStat
.dirty_ring
.rates
);
327 DirtyStat
.dirty_ring
.rates
= NULL
;
331 static void update_dirtyrate_stat(struct RamblockDirtyInfo
*info
)
333 DirtyStat
.page_sampling
.total_dirty_samples
+= info
->sample_dirty_count
;
334 DirtyStat
.page_sampling
.total_sample_count
+= info
->sample_pages_count
;
335 /* size of total pages in MB */
336 DirtyStat
.page_sampling
.total_block_mem_MB
+=
337 qemu_target_pages_to_MiB(info
->ramblock_pages
);
340 static void update_dirtyrate(uint64_t msec
)
343 uint64_t total_dirty_samples
= DirtyStat
.page_sampling
.total_dirty_samples
;
344 uint64_t total_sample_count
= DirtyStat
.page_sampling
.total_sample_count
;
345 uint64_t total_block_mem_MB
= DirtyStat
.page_sampling
.total_block_mem_MB
;
347 dirtyrate
= total_dirty_samples
* total_block_mem_MB
*
348 1000 / (total_sample_count
* msec
);
350 DirtyStat
.dirty_rate
= dirtyrate
;
354 * Compute hash of a single page of size TARGET_PAGE_SIZE.
356 static uint32_t compute_page_hash(void *ptr
)
358 size_t page_size
= qemu_target_page_size();
360 uint64_t v1
, v2
, v3
, v4
;
362 const uint64_t *p
= ptr
;
364 v1
= QEMU_XXHASH_SEED
+ XXH_PRIME64_1
+ XXH_PRIME64_2
;
365 v2
= QEMU_XXHASH_SEED
+ XXH_PRIME64_2
;
366 v3
= QEMU_XXHASH_SEED
+ 0;
367 v4
= QEMU_XXHASH_SEED
- XXH_PRIME64_1
;
368 for (i
= 0; i
< page_size
/ 8; i
+= 4) {
369 v1
= XXH64_round(v1
, p
[i
+ 0]);
370 v2
= XXH64_round(v2
, p
[i
+ 1]);
371 v3
= XXH64_round(v3
, p
[i
+ 2]);
372 v4
= XXH64_round(v4
, p
[i
+ 3]);
374 res
= XXH64_mergerounds(v1
, v2
, v3
, v4
);
376 res
= XXH64_avalanche(res
);
377 return (uint32_t)(res
& UINT32_MAX
);
382 * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
383 * in ramblock, which starts from ramblock base address.
385 static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo
*info
,
390 hash
= compute_page_hash(info
->ramblock_addr
+
391 vfn
* qemu_target_page_size());
393 trace_get_ramblock_vfn_hash(info
->idstr
, vfn
, hash
);
397 static bool save_ramblock_hash(struct RamblockDirtyInfo
*info
)
399 unsigned int sample_pages_count
;
403 sample_pages_count
= info
->sample_pages_count
;
405 /* ramblock size less than one page, return success to skip this ramblock */
406 if (unlikely(info
->ramblock_pages
== 0 || sample_pages_count
== 0)) {
410 info
->hash_result
= g_try_malloc0_n(sample_pages_count
,
412 if (!info
->hash_result
) {
416 info
->sample_page_vfn
= g_try_malloc0_n(sample_pages_count
,
418 if (!info
->sample_page_vfn
) {
419 g_free(info
->hash_result
);
424 for (i
= 0; i
< sample_pages_count
; i
++) {
425 info
->sample_page_vfn
[i
] = g_rand_int_range(rand
, 0,
426 info
->ramblock_pages
- 1);
427 info
->hash_result
[i
] = get_ramblock_vfn_hash(info
,
428 info
->sample_page_vfn
[i
]);
435 static void get_ramblock_dirty_info(RAMBlock
*block
,
436 struct RamblockDirtyInfo
*info
,
437 struct DirtyRateConfig
*config
)
439 uint64_t sample_pages_per_gigabytes
= config
->sample_pages_per_gigabytes
;
441 /* Right shift 30 bits to calc ramblock size in GB */
442 info
->sample_pages_count
= (qemu_ram_get_used_length(block
) *
443 sample_pages_per_gigabytes
) >> 30;
444 /* Right shift TARGET_PAGE_BITS to calc page count */
445 info
->ramblock_pages
= qemu_ram_get_used_length(block
) >>
446 qemu_target_page_bits();
447 info
->ramblock_addr
= qemu_ram_get_host_addr(block
);
448 strcpy(info
->idstr
, qemu_ram_get_idstr(block
));
451 static void free_ramblock_dirty_info(struct RamblockDirtyInfo
*infos
, int count
)
459 for (i
= 0; i
< count
; i
++) {
460 g_free(infos
[i
].sample_page_vfn
);
461 g_free(infos
[i
].hash_result
);
466 static bool skip_sample_ramblock(RAMBlock
*block
)
469 * Sample only blocks larger than MIN_RAMBLOCK_SIZE.
471 if (qemu_ram_get_used_length(block
) < (MIN_RAMBLOCK_SIZE
<< 10)) {
472 trace_skip_sample_ramblock(block
->idstr
,
473 qemu_ram_get_used_length(block
));
480 static bool record_ramblock_hash_info(struct RamblockDirtyInfo
**block_dinfo
,
481 struct DirtyRateConfig config
,
484 struct RamblockDirtyInfo
*info
= NULL
;
485 struct RamblockDirtyInfo
*dinfo
= NULL
;
486 RAMBlock
*block
= NULL
;
491 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
492 if (skip_sample_ramblock(block
)) {
498 dinfo
= g_try_malloc0_n(total_count
, sizeof(struct RamblockDirtyInfo
));
503 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
504 if (skip_sample_ramblock(block
)) {
507 if (index
>= total_count
) {
510 info
= &dinfo
[index
];
511 get_ramblock_dirty_info(block
, info
, &config
);
512 if (!save_ramblock_hash(info
)) {
520 *block_count
= index
;
521 *block_dinfo
= dinfo
;
525 static void calc_page_dirty_rate(struct RamblockDirtyInfo
*info
)
530 for (i
= 0; i
< info
->sample_pages_count
; i
++) {
531 hash
= get_ramblock_vfn_hash(info
, info
->sample_page_vfn
[i
]);
532 if (hash
!= info
->hash_result
[i
]) {
533 trace_calc_page_dirty_rate(info
->idstr
, hash
, info
->hash_result
[i
]);
534 info
->sample_dirty_count
++;
539 static struct RamblockDirtyInfo
*
540 find_block_matched(RAMBlock
*block
, int count
,
541 struct RamblockDirtyInfo
*infos
)
545 for (i
= 0; i
< count
; i
++) {
546 if (!strcmp(infos
[i
].idstr
, qemu_ram_get_idstr(block
))) {
555 if (infos
[i
].ramblock_addr
!= qemu_ram_get_host_addr(block
) ||
556 infos
[i
].ramblock_pages
!=
557 (qemu_ram_get_used_length(block
) >> qemu_target_page_bits())) {
558 trace_find_page_matched(block
->idstr
);
565 static bool compare_page_hash_info(struct RamblockDirtyInfo
*info
,
568 struct RamblockDirtyInfo
*block_dinfo
= NULL
;
569 RAMBlock
*block
= NULL
;
571 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
572 if (skip_sample_ramblock(block
)) {
575 block_dinfo
= find_block_matched(block
, block_count
, info
);
576 if (block_dinfo
== NULL
) {
579 calc_page_dirty_rate(block_dinfo
);
580 update_dirtyrate_stat(block_dinfo
);
583 if (DirtyStat
.page_sampling
.total_sample_count
== 0) {
590 static inline void record_dirtypages_bitmap(DirtyPageRecord
*dirty_pages
,
594 dirty_pages
->start_pages
= total_dirty_pages
;
596 dirty_pages
->end_pages
= total_dirty_pages
;
600 static inline void dirtyrate_manual_reset_protect(void)
602 RAMBlock
*block
= NULL
;
604 WITH_RCU_READ_LOCK_GUARD() {
605 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
606 memory_region_clear_dirty_bitmap(block
->mr
, 0,
612 static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config
)
615 DirtyPageRecord dirty_pages
;
616 Error
*local_err
= NULL
;
619 if (!memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE
, &local_err
)) {
620 error_report_err(local_err
);
624 * 1'round of log sync may return all 1 bits with
625 * KVM_DIRTY_LOG_INITIALLY_SET enable
626 * skip it unconditionally and start dirty tracking
627 * from 2'round of log sync
629 memory_global_dirty_log_sync(false);
632 * reset page protect manually and unconditionally.
633 * this make sure kvm dirty log be cleared if
634 * KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled.
636 dirtyrate_manual_reset_protect();
639 record_dirtypages_bitmap(&dirty_pages
, true);
641 start_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
642 DirtyStat
.start_time
= qemu_clock_get_ms(QEMU_CLOCK_HOST
) / 1000;
644 DirtyStat
.calc_time_ms
= dirty_stat_wait(config
.calc_time_ms
, start_time
);
648 * 1. fetch dirty bitmap from kvm
649 * 2. stop dirty tracking
651 global_dirty_log_sync(GLOBAL_DIRTY_DIRTY_RATE
, true);
653 record_dirtypages_bitmap(&dirty_pages
, false);
655 DirtyStat
.dirty_rate
= do_calculate_dirtyrate(dirty_pages
,
656 DirtyStat
.calc_time_ms
);
659 static void calculate_dirtyrate_dirty_ring(struct DirtyRateConfig config
)
661 uint64_t dirtyrate
= 0;
662 uint64_t dirtyrate_sum
= 0;
666 global_dirty_log_change(GLOBAL_DIRTY_DIRTY_RATE
, true);
668 DirtyStat
.start_time
= qemu_clock_get_ms(QEMU_CLOCK_HOST
) / 1000;
670 /* calculate vcpu dirtyrate */
671 DirtyStat
.calc_time_ms
= vcpu_calculate_dirtyrate(config
.calc_time_ms
,
672 &DirtyStat
.dirty_ring
,
673 GLOBAL_DIRTY_DIRTY_RATE
,
676 /* calculate vm dirtyrate */
677 for (i
= 0; i
< DirtyStat
.dirty_ring
.nvcpu
; i
++) {
678 dirtyrate
= DirtyStat
.dirty_ring
.rates
[i
].dirty_rate
;
679 DirtyStat
.dirty_ring
.rates
[i
].dirty_rate
= dirtyrate
;
680 dirtyrate_sum
+= dirtyrate
;
683 DirtyStat
.dirty_rate
= dirtyrate_sum
;
686 static void calculate_dirtyrate_sample_vm(struct DirtyRateConfig config
)
688 struct RamblockDirtyInfo
*block_dinfo
= NULL
;
690 int64_t initial_time
;
693 initial_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
694 DirtyStat
.start_time
= qemu_clock_get_ms(QEMU_CLOCK_HOST
) / 1000;
695 if (!record_ramblock_hash_info(&block_dinfo
, config
, &block_count
)) {
700 DirtyStat
.calc_time_ms
= dirty_stat_wait(config
.calc_time_ms
,
704 if (!compare_page_hash_info(block_dinfo
, block_count
)) {
708 update_dirtyrate(DirtyStat
.calc_time_ms
);
712 free_ramblock_dirty_info(block_dinfo
, block_count
);
715 static void calculate_dirtyrate(struct DirtyRateConfig config
)
717 if (config
.mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP
) {
718 calculate_dirtyrate_dirty_bitmap(config
);
719 } else if (config
.mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_RING
) {
720 calculate_dirtyrate_dirty_ring(config
);
722 calculate_dirtyrate_sample_vm(config
);
725 trace_dirtyrate_calculate(DirtyStat
.dirty_rate
);
728 void *get_dirtyrate_thread(void *arg
)
730 struct DirtyRateConfig config
= *(struct DirtyRateConfig
*)arg
;
732 rcu_register_thread();
734 ret
= dirtyrate_set_state(&CalculatingState
, DIRTY_RATE_STATUS_UNSTARTED
,
735 DIRTY_RATE_STATUS_MEASURING
);
737 error_report("change dirtyrate state failed.");
741 calculate_dirtyrate(config
);
743 ret
= dirtyrate_set_state(&CalculatingState
, DIRTY_RATE_STATUS_MEASURING
,
744 DIRTY_RATE_STATUS_MEASURED
);
746 error_report("change dirtyrate state failed.");
749 rcu_unregister_thread();
753 void qmp_calc_dirty_rate(int64_t calc_time
,
754 bool has_calc_time_unit
,
755 TimeUnit calc_time_unit
,
756 bool has_sample_pages
,
757 int64_t sample_pages
,
759 DirtyRateMeasureMode mode
,
762 static struct DirtyRateConfig config
;
767 * If the dirty rate is already being measured, don't attempt to start.
769 if (qatomic_read(&CalculatingState
) == DIRTY_RATE_STATUS_MEASURING
) {
770 error_setg(errp
, "the dirty rate is already being measured.");
774 int64_t calc_time_ms
= convert_time_unit(
776 has_calc_time_unit
? calc_time_unit
: TIME_UNIT_SECOND
,
777 TIME_UNIT_MILLISECOND
780 if (!is_calc_time_valid(calc_time_ms
)) {
781 error_setg(errp
, "Calculation time is out of range [%dms, %dms].",
782 MIN_CALC_TIME_MS
, MAX_CALC_TIME_MS
);
787 mode
= DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING
;
790 if (has_sample_pages
&& mode
!= DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING
) {
791 error_setg(errp
, "sample-pages is used only in page-sampling mode");
795 if (has_sample_pages
) {
796 if (!is_sample_pages_valid(sample_pages
)) {
797 error_setg(errp
, "sample-pages is out of range[%d, %d].",
798 MIN_SAMPLE_PAGE_COUNT
,
799 MAX_SAMPLE_PAGE_COUNT
);
803 sample_pages
= DIRTYRATE_DEFAULT_SAMPLE_PAGES
;
807 * dirty ring mode only works when kvm dirty ring is enabled.
808 * on the contrary, dirty bitmap mode is not.
810 if (((mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_RING
) &&
811 !kvm_dirty_ring_enabled()) ||
812 ((mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP
) &&
813 kvm_dirty_ring_enabled())) {
814 error_setg(errp
, "mode %s is not enabled, use other method instead.",
815 DirtyRateMeasureMode_str(mode
));
820 * Init calculation state as unstarted.
822 ret
= dirtyrate_set_state(&CalculatingState
, CalculatingState
,
823 DIRTY_RATE_STATUS_UNSTARTED
);
825 error_setg(errp
, "init dirty rate calculation state failed.");
829 config
.calc_time_ms
= calc_time_ms
;
830 config
.sample_pages_per_gigabytes
= sample_pages
;
833 cleanup_dirtyrate_stat(config
);
836 * update dirty rate mode so that we can figure out what mode has
837 * been used in last calculation
839 dirtyrate_mode
= mode
;
841 init_dirtyrate_stat(config
);
843 qemu_thread_create(&thread
, "get_dirtyrate", get_dirtyrate_thread
,
844 (void *)&config
, QEMU_THREAD_DETACHED
);
848 struct DirtyRateInfo
*qmp_query_dirty_rate(bool has_calc_time_unit
,
849 TimeUnit calc_time_unit
,
852 return query_dirty_rate_info(
853 has_calc_time_unit
? calc_time_unit
: TIME_UNIT_SECOND
);
856 void hmp_info_dirty_rate(Monitor
*mon
, const QDict
*qdict
)
858 DirtyRateInfo
*info
= query_dirty_rate_info(TIME_UNIT_SECOND
);
860 monitor_printf(mon
, "Status: %s\n",
861 DirtyRateStatus_str(info
->status
));
862 monitor_printf(mon
, "Start Time: %"PRIi64
" (ms)\n",
864 if (info
->mode
== DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING
) {
865 monitor_printf(mon
, "Sample Pages: %"PRIu64
" (per GB)\n",
868 monitor_printf(mon
, "Period: %"PRIi64
" (sec)\n",
870 monitor_printf(mon
, "Mode: %s\n",
871 DirtyRateMeasureMode_str(info
->mode
));
872 monitor_printf(mon
, "Dirty rate: ");
873 if (info
->has_dirty_rate
) {
874 monitor_printf(mon
, "%"PRIi64
" (MB/s)\n", info
->dirty_rate
);
875 if (info
->has_vcpu_dirty_rate
) {
876 DirtyRateVcpuList
*rate
, *head
= info
->vcpu_dirty_rate
;
877 for (rate
= head
; rate
!= NULL
; rate
= rate
->next
) {
878 monitor_printf(mon
, "vcpu[%"PRIi64
"], Dirty rate: %"PRIi64
879 " (MB/s)\n", rate
->value
->id
,
880 rate
->value
->dirty_rate
);
884 monitor_printf(mon
, "(not ready)\n");
887 qapi_free_DirtyRateVcpuList(info
->vcpu_dirty_rate
);
891 void hmp_calc_dirty_rate(Monitor
*mon
, const QDict
*qdict
)
893 int64_t sec
= qdict_get_try_int(qdict
, "second", 0);
894 int64_t sample_pages
= qdict_get_try_int(qdict
, "sample_pages_per_GB", -1);
895 bool has_sample_pages
= (sample_pages
!= -1);
896 bool dirty_ring
= qdict_get_try_bool(qdict
, "dirty_ring", false);
897 bool dirty_bitmap
= qdict_get_try_bool(qdict
, "dirty_bitmap", false);
898 DirtyRateMeasureMode mode
= DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING
;
902 monitor_printf(mon
, "Incorrect period length specified!\n");
906 if (dirty_ring
&& dirty_bitmap
) {
907 monitor_printf(mon
, "Either dirty ring or dirty bitmap "
908 "can be specified!\n");
913 mode
= DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP
;
914 } else if (dirty_ring
) {
915 mode
= DIRTY_RATE_MEASURE_MODE_DIRTY_RING
;
918 qmp_calc_dirty_rate(sec
, /* calc-time */
919 false, TIME_UNIT_SECOND
, /* calc-time-unit */
920 has_sample_pages
, sample_pages
,
924 hmp_handle_error(mon
, err
);
928 monitor_printf(mon
, "Starting dirty rate measurement with period %"PRIi64
930 monitor_printf(mon
, "[Please use 'info dirty_rate' to check results]\n");