2 * Dirtyrate implement code
4 * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD.
7 * Chuan Zheng <zhengchuan@huawei.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
15 #include "qapi/error.h"
17 #include "exec/ramblock.h"
18 #include "exec/ram_addr.h"
19 #include "qemu/rcu_queue.h"
20 #include "qemu/main-loop.h"
21 #include "qapi/qapi-commands-migration.h"
24 #include "dirtyrate.h"
25 #include "monitor/hmp.h"
26 #include "monitor/monitor.h"
27 #include "qapi/qmp/qdict.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/runstate.h"
30 #include "exec/memory.h"
33 * total_dirty_pages is procted by BQL and is used
34 * to stat dirty pages during the period of two
35 * memory_global_dirty_log_sync
37 uint64_t total_dirty_pages
;
39 typedef struct DirtyPageRecord
{
44 static int CalculatingState
= DIRTY_RATE_STATUS_UNSTARTED
;
45 static struct DirtyRateStat DirtyStat
;
46 static DirtyRateMeasureMode dirtyrate_mode
=
47 DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING
;
49 static int64_t set_sample_page_period(int64_t msec
, int64_t initial_time
)
53 current_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
54 if ((current_time
- initial_time
) >= msec
) {
55 msec
= current_time
- initial_time
;
57 g_usleep((msec
+ initial_time
- current_time
) * 1000);
63 static bool is_sample_period_valid(int64_t sec
)
65 if (sec
< MIN_FETCH_DIRTYRATE_TIME_SEC
||
66 sec
> MAX_FETCH_DIRTYRATE_TIME_SEC
) {
73 static bool is_sample_pages_valid(int64_t pages
)
75 return pages
>= MIN_SAMPLE_PAGE_COUNT
&&
76 pages
<= MAX_SAMPLE_PAGE_COUNT
;
79 static int dirtyrate_set_state(int *state
, int old_state
, int new_state
)
81 assert(new_state
< DIRTY_RATE_STATUS__MAX
);
82 trace_dirtyrate_set_state(DirtyRateStatus_str(new_state
));
83 if (qatomic_cmpxchg(state
, old_state
, new_state
) == old_state
) {
90 static struct DirtyRateInfo
*query_dirty_rate_info(void)
93 int64_t dirty_rate
= DirtyStat
.dirty_rate
;
94 struct DirtyRateInfo
*info
= g_malloc0(sizeof(DirtyRateInfo
));
95 DirtyRateVcpuList
*head
= NULL
, **tail
= &head
;
97 info
->status
= CalculatingState
;
98 info
->start_time
= DirtyStat
.start_time
;
99 info
->calc_time
= DirtyStat
.calc_time
;
100 info
->sample_pages
= DirtyStat
.sample_pages
;
101 info
->mode
= dirtyrate_mode
;
103 if (qatomic_read(&CalculatingState
) == DIRTY_RATE_STATUS_MEASURED
) {
104 info
->has_dirty_rate
= true;
105 info
->dirty_rate
= dirty_rate
;
107 if (dirtyrate_mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_RING
) {
109 * set sample_pages with 0 to indicate page sampling
112 info
->sample_pages
= 0;
113 info
->has_vcpu_dirty_rate
= true;
114 for (i
= 0; i
< DirtyStat
.dirty_ring
.nvcpu
; i
++) {
115 DirtyRateVcpu
*rate
= g_malloc0(sizeof(DirtyRateVcpu
));
116 rate
->id
= DirtyStat
.dirty_ring
.rates
[i
].id
;
117 rate
->dirty_rate
= DirtyStat
.dirty_ring
.rates
[i
].dirty_rate
;
118 QAPI_LIST_APPEND(tail
, rate
);
120 info
->vcpu_dirty_rate
= head
;
123 if (dirtyrate_mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP
) {
124 info
->sample_pages
= 0;
128 trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState
));
133 static void init_dirtyrate_stat(int64_t start_time
,
134 struct DirtyRateConfig config
)
136 DirtyStat
.dirty_rate
= -1;
137 DirtyStat
.start_time
= start_time
;
138 DirtyStat
.calc_time
= config
.sample_period_seconds
;
139 DirtyStat
.sample_pages
= config
.sample_pages_per_gigabytes
;
141 switch (config
.mode
) {
142 case DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING
:
143 DirtyStat
.page_sampling
.total_dirty_samples
= 0;
144 DirtyStat
.page_sampling
.total_sample_count
= 0;
145 DirtyStat
.page_sampling
.total_block_mem_MB
= 0;
147 case DIRTY_RATE_MEASURE_MODE_DIRTY_RING
:
148 DirtyStat
.dirty_ring
.nvcpu
= -1;
149 DirtyStat
.dirty_ring
.rates
= NULL
;
156 static void cleanup_dirtyrate_stat(struct DirtyRateConfig config
)
158 /* last calc-dirty-rate qmp use dirty ring mode */
159 if (dirtyrate_mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_RING
) {
160 free(DirtyStat
.dirty_ring
.rates
);
161 DirtyStat
.dirty_ring
.rates
= NULL
;
165 static void update_dirtyrate_stat(struct RamblockDirtyInfo
*info
)
167 DirtyStat
.page_sampling
.total_dirty_samples
+= info
->sample_dirty_count
;
168 DirtyStat
.page_sampling
.total_sample_count
+= info
->sample_pages_count
;
169 /* size of total pages in MB */
170 DirtyStat
.page_sampling
.total_block_mem_MB
+= (info
->ramblock_pages
*
171 TARGET_PAGE_SIZE
) >> 20;
174 static void update_dirtyrate(uint64_t msec
)
177 uint64_t total_dirty_samples
= DirtyStat
.page_sampling
.total_dirty_samples
;
178 uint64_t total_sample_count
= DirtyStat
.page_sampling
.total_sample_count
;
179 uint64_t total_block_mem_MB
= DirtyStat
.page_sampling
.total_block_mem_MB
;
181 dirtyrate
= total_dirty_samples
* total_block_mem_MB
*
182 1000 / (total_sample_count
* msec
);
184 DirtyStat
.dirty_rate
= dirtyrate
;
188 * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
189 * in ramblock, which starts from ramblock base address.
191 static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo
*info
,
196 crc
= crc32(0, (info
->ramblock_addr
+
197 vfn
* TARGET_PAGE_SIZE
), TARGET_PAGE_SIZE
);
199 trace_get_ramblock_vfn_hash(info
->idstr
, vfn
, crc
);
203 static bool save_ramblock_hash(struct RamblockDirtyInfo
*info
)
205 unsigned int sample_pages_count
;
209 sample_pages_count
= info
->sample_pages_count
;
211 /* ramblock size less than one page, return success to skip this ramblock */
212 if (unlikely(info
->ramblock_pages
== 0 || sample_pages_count
== 0)) {
216 info
->hash_result
= g_try_malloc0_n(sample_pages_count
,
218 if (!info
->hash_result
) {
222 info
->sample_page_vfn
= g_try_malloc0_n(sample_pages_count
,
224 if (!info
->sample_page_vfn
) {
225 g_free(info
->hash_result
);
230 for (i
= 0; i
< sample_pages_count
; i
++) {
231 info
->sample_page_vfn
[i
] = g_rand_int_range(rand
, 0,
232 info
->ramblock_pages
- 1);
233 info
->hash_result
[i
] = get_ramblock_vfn_hash(info
,
234 info
->sample_page_vfn
[i
]);
241 static void get_ramblock_dirty_info(RAMBlock
*block
,
242 struct RamblockDirtyInfo
*info
,
243 struct DirtyRateConfig
*config
)
245 uint64_t sample_pages_per_gigabytes
= config
->sample_pages_per_gigabytes
;
247 /* Right shift 30 bits to calc ramblock size in GB */
248 info
->sample_pages_count
= (qemu_ram_get_used_length(block
) *
249 sample_pages_per_gigabytes
) >> 30;
250 /* Right shift TARGET_PAGE_BITS to calc page count */
251 info
->ramblock_pages
= qemu_ram_get_used_length(block
) >>
253 info
->ramblock_addr
= qemu_ram_get_host_addr(block
);
254 strcpy(info
->idstr
, qemu_ram_get_idstr(block
));
257 static void free_ramblock_dirty_info(struct RamblockDirtyInfo
*infos
, int count
)
265 for (i
= 0; i
< count
; i
++) {
266 g_free(infos
[i
].sample_page_vfn
);
267 g_free(infos
[i
].hash_result
);
272 static bool skip_sample_ramblock(RAMBlock
*block
)
275 * Sample only blocks larger than MIN_RAMBLOCK_SIZE.
277 if (qemu_ram_get_used_length(block
) < (MIN_RAMBLOCK_SIZE
<< 10)) {
278 trace_skip_sample_ramblock(block
->idstr
,
279 qemu_ram_get_used_length(block
));
286 static bool record_ramblock_hash_info(struct RamblockDirtyInfo
**block_dinfo
,
287 struct DirtyRateConfig config
,
290 struct RamblockDirtyInfo
*info
= NULL
;
291 struct RamblockDirtyInfo
*dinfo
= NULL
;
292 RAMBlock
*block
= NULL
;
297 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
298 if (skip_sample_ramblock(block
)) {
304 dinfo
= g_try_malloc0_n(total_count
, sizeof(struct RamblockDirtyInfo
));
309 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
310 if (skip_sample_ramblock(block
)) {
313 if (index
>= total_count
) {
316 info
= &dinfo
[index
];
317 get_ramblock_dirty_info(block
, info
, &config
);
318 if (!save_ramblock_hash(info
)) {
326 *block_count
= index
;
327 *block_dinfo
= dinfo
;
331 static void calc_page_dirty_rate(struct RamblockDirtyInfo
*info
)
336 for (i
= 0; i
< info
->sample_pages_count
; i
++) {
337 crc
= get_ramblock_vfn_hash(info
, info
->sample_page_vfn
[i
]);
338 if (crc
!= info
->hash_result
[i
]) {
339 trace_calc_page_dirty_rate(info
->idstr
, crc
, info
->hash_result
[i
]);
340 info
->sample_dirty_count
++;
345 static struct RamblockDirtyInfo
*
346 find_block_matched(RAMBlock
*block
, int count
,
347 struct RamblockDirtyInfo
*infos
)
350 struct RamblockDirtyInfo
*matched
;
352 for (i
= 0; i
< count
; i
++) {
353 if (!strcmp(infos
[i
].idstr
, qemu_ram_get_idstr(block
))) {
362 if (infos
[i
].ramblock_addr
!= qemu_ram_get_host_addr(block
) ||
363 infos
[i
].ramblock_pages
!=
364 (qemu_ram_get_used_length(block
) >> TARGET_PAGE_BITS
)) {
365 trace_find_page_matched(block
->idstr
);
374 static bool compare_page_hash_info(struct RamblockDirtyInfo
*info
,
377 struct RamblockDirtyInfo
*block_dinfo
= NULL
;
378 RAMBlock
*block
= NULL
;
380 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
381 if (skip_sample_ramblock(block
)) {
384 block_dinfo
= find_block_matched(block
, block_count
, info
);
385 if (block_dinfo
== NULL
) {
388 calc_page_dirty_rate(block_dinfo
);
389 update_dirtyrate_stat(block_dinfo
);
392 if (DirtyStat
.page_sampling
.total_sample_count
== 0) {
399 static inline void record_dirtypages(DirtyPageRecord
*dirty_pages
,
400 CPUState
*cpu
, bool start
)
403 dirty_pages
[cpu
->cpu_index
].start_pages
= cpu
->dirty_pages
;
405 dirty_pages
[cpu
->cpu_index
].end_pages
= cpu
->dirty_pages
;
409 static void dirtyrate_global_dirty_log_start(void)
411 qemu_mutex_lock_iothread();
412 memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE
);
413 qemu_mutex_unlock_iothread();
416 static void dirtyrate_global_dirty_log_stop(void)
418 qemu_mutex_lock_iothread();
419 memory_global_dirty_log_sync();
420 memory_global_dirty_log_stop(GLOBAL_DIRTY_DIRTY_RATE
);
421 qemu_mutex_unlock_iothread();
424 static int64_t do_calculate_dirtyrate_vcpu(DirtyPageRecord dirty_pages
)
426 uint64_t memory_size_MB
;
428 uint64_t increased_dirty_pages
=
429 dirty_pages
.end_pages
- dirty_pages
.start_pages
;
431 memory_size_MB
= (increased_dirty_pages
* TARGET_PAGE_SIZE
) >> 20;
432 time_s
= DirtyStat
.calc_time
;
434 return memory_size_MB
/ time_s
;
437 static inline void record_dirtypages_bitmap(DirtyPageRecord
*dirty_pages
,
441 dirty_pages
->start_pages
= total_dirty_pages
;
443 dirty_pages
->end_pages
= total_dirty_pages
;
447 static void do_calculate_dirtyrate_bitmap(DirtyPageRecord dirty_pages
)
449 DirtyStat
.dirty_rate
= do_calculate_dirtyrate_vcpu(dirty_pages
);
452 static inline void dirtyrate_manual_reset_protect(void)
454 RAMBlock
*block
= NULL
;
456 WITH_RCU_READ_LOCK_GUARD() {
457 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
458 memory_region_clear_dirty_bitmap(block
->mr
, 0,
464 static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config
)
468 DirtyPageRecord dirty_pages
;
470 qemu_mutex_lock_iothread();
471 memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE
);
474 * 1'round of log sync may return all 1 bits with
475 * KVM_DIRTY_LOG_INITIALLY_SET enable
476 * skip it unconditionally and start dirty tracking
477 * from 2'round of log sync
479 memory_global_dirty_log_sync();
482 * reset page protect manually and unconditionally.
483 * this make sure kvm dirty log be cleared if
484 * KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled.
486 dirtyrate_manual_reset_protect();
487 qemu_mutex_unlock_iothread();
489 record_dirtypages_bitmap(&dirty_pages
, true);
491 start_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
492 DirtyStat
.start_time
= start_time
/ 1000;
494 msec
= config
.sample_period_seconds
* 1000;
495 msec
= set_sample_page_period(msec
, start_time
);
496 DirtyStat
.calc_time
= msec
/ 1000;
499 * dirtyrate_global_dirty_log_stop do two things.
500 * 1. fetch dirty bitmap from kvm
501 * 2. stop dirty tracking
503 dirtyrate_global_dirty_log_stop();
505 record_dirtypages_bitmap(&dirty_pages
, false);
507 do_calculate_dirtyrate_bitmap(dirty_pages
);
510 static void calculate_dirtyrate_dirty_ring(struct DirtyRateConfig config
)
515 uint64_t dirtyrate
= 0;
516 uint64_t dirtyrate_sum
= 0;
517 DirtyPageRecord
*dirty_pages
;
525 dirty_pages
= malloc(sizeof(*dirty_pages
) * nvcpu
);
527 DirtyStat
.dirty_ring
.nvcpu
= nvcpu
;
528 DirtyStat
.dirty_ring
.rates
= malloc(sizeof(DirtyRateVcpu
) * nvcpu
);
530 dirtyrate_global_dirty_log_start();
533 record_dirtypages(dirty_pages
, cpu
, true);
536 start_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
537 DirtyStat
.start_time
= start_time
/ 1000;
539 msec
= config
.sample_period_seconds
* 1000;
540 msec
= set_sample_page_period(msec
, start_time
);
541 DirtyStat
.calc_time
= msec
/ 1000;
543 dirtyrate_global_dirty_log_stop();
546 record_dirtypages(dirty_pages
, cpu
, false);
549 for (i
= 0; i
< DirtyStat
.dirty_ring
.nvcpu
; i
++) {
550 dirtyrate
= do_calculate_dirtyrate_vcpu(dirty_pages
[i
]);
551 trace_dirtyrate_do_calculate_vcpu(i
, dirtyrate
);
553 DirtyStat
.dirty_ring
.rates
[i
].id
= i
;
554 DirtyStat
.dirty_ring
.rates
[i
].dirty_rate
= dirtyrate
;
555 dirtyrate_sum
+= dirtyrate
;
558 DirtyStat
.dirty_rate
= dirtyrate_sum
;
562 static void calculate_dirtyrate_sample_vm(struct DirtyRateConfig config
)
564 struct RamblockDirtyInfo
*block_dinfo
= NULL
;
567 int64_t initial_time
;
570 initial_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
571 if (!record_ramblock_hash_info(&block_dinfo
, config
, &block_count
)) {
576 msec
= config
.sample_period_seconds
* 1000;
577 msec
= set_sample_page_period(msec
, initial_time
);
578 DirtyStat
.start_time
= initial_time
/ 1000;
579 DirtyStat
.calc_time
= msec
/ 1000;
582 if (!compare_page_hash_info(block_dinfo
, block_count
)) {
586 update_dirtyrate(msec
);
590 free_ramblock_dirty_info(block_dinfo
, block_count
);
593 static void calculate_dirtyrate(struct DirtyRateConfig config
)
595 if (config
.mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP
) {
596 calculate_dirtyrate_dirty_bitmap(config
);
597 } else if (config
.mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_RING
) {
598 calculate_dirtyrate_dirty_ring(config
);
600 calculate_dirtyrate_sample_vm(config
);
603 trace_dirtyrate_calculate(DirtyStat
.dirty_rate
);
606 void *get_dirtyrate_thread(void *arg
)
608 struct DirtyRateConfig config
= *(struct DirtyRateConfig
*)arg
;
610 rcu_register_thread();
612 ret
= dirtyrate_set_state(&CalculatingState
, DIRTY_RATE_STATUS_UNSTARTED
,
613 DIRTY_RATE_STATUS_MEASURING
);
615 error_report("change dirtyrate state failed.");
619 calculate_dirtyrate(config
);
621 ret
= dirtyrate_set_state(&CalculatingState
, DIRTY_RATE_STATUS_MEASURING
,
622 DIRTY_RATE_STATUS_MEASURED
);
624 error_report("change dirtyrate state failed.");
627 rcu_unregister_thread();
631 void qmp_calc_dirty_rate(int64_t calc_time
,
632 bool has_sample_pages
,
633 int64_t sample_pages
,
635 DirtyRateMeasureMode mode
,
638 static struct DirtyRateConfig config
;
644 * If the dirty rate is already being measured, don't attempt to start.
646 if (qatomic_read(&CalculatingState
) == DIRTY_RATE_STATUS_MEASURING
) {
647 error_setg(errp
, "the dirty rate is already being measured.");
651 if (!is_sample_period_valid(calc_time
)) {
652 error_setg(errp
, "calc-time is out of range[%d, %d].",
653 MIN_FETCH_DIRTYRATE_TIME_SEC
,
654 MAX_FETCH_DIRTYRATE_TIME_SEC
);
659 mode
= DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING
;
662 if (has_sample_pages
&& mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_RING
) {
663 error_setg(errp
, "either sample-pages or dirty-ring can be specified.");
667 if (has_sample_pages
) {
668 if (!is_sample_pages_valid(sample_pages
)) {
669 error_setg(errp
, "sample-pages is out of range[%d, %d].",
670 MIN_SAMPLE_PAGE_COUNT
,
671 MAX_SAMPLE_PAGE_COUNT
);
675 sample_pages
= DIRTYRATE_DEFAULT_SAMPLE_PAGES
;
679 * dirty ring mode only works when kvm dirty ring is enabled.
680 * on the contrary, dirty bitmap mode is not.
682 if (((mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_RING
) &&
683 !kvm_dirty_ring_enabled()) ||
684 ((mode
== DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP
) &&
685 kvm_dirty_ring_enabled())) {
686 error_setg(errp
, "mode %s is not enabled, use other method instead.",
687 DirtyRateMeasureMode_str(mode
));
692 * Init calculation state as unstarted.
694 ret
= dirtyrate_set_state(&CalculatingState
, CalculatingState
,
695 DIRTY_RATE_STATUS_UNSTARTED
);
697 error_setg(errp
, "init dirty rate calculation state failed.");
701 config
.sample_period_seconds
= calc_time
;
702 config
.sample_pages_per_gigabytes
= sample_pages
;
705 cleanup_dirtyrate_stat(config
);
708 * update dirty rate mode so that we can figure out what mode has
709 * been used in last calculation
711 dirtyrate_mode
= mode
;
713 start_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
) / 1000;
714 init_dirtyrate_stat(start_time
, config
);
716 qemu_thread_create(&thread
, "get_dirtyrate", get_dirtyrate_thread
,
717 (void *)&config
, QEMU_THREAD_DETACHED
);
720 struct DirtyRateInfo
*qmp_query_dirty_rate(Error
**errp
)
722 return query_dirty_rate_info();
725 void hmp_info_dirty_rate(Monitor
*mon
, const QDict
*qdict
)
727 DirtyRateInfo
*info
= query_dirty_rate_info();
729 monitor_printf(mon
, "Status: %s\n",
730 DirtyRateStatus_str(info
->status
));
731 monitor_printf(mon
, "Start Time: %"PRIi64
" (ms)\n",
733 monitor_printf(mon
, "Sample Pages: %"PRIu64
" (per GB)\n",
735 monitor_printf(mon
, "Period: %"PRIi64
" (sec)\n",
737 monitor_printf(mon
, "Mode: %s\n",
738 DirtyRateMeasureMode_str(info
->mode
));
739 monitor_printf(mon
, "Dirty rate: ");
740 if (info
->has_dirty_rate
) {
741 monitor_printf(mon
, "%"PRIi64
" (MB/s)\n", info
->dirty_rate
);
742 if (info
->has_vcpu_dirty_rate
) {
743 DirtyRateVcpuList
*rate
, *head
= info
->vcpu_dirty_rate
;
744 for (rate
= head
; rate
!= NULL
; rate
= rate
->next
) {
745 monitor_printf(mon
, "vcpu[%"PRIi64
"], Dirty rate: %"PRIi64
746 " (MB/s)\n", rate
->value
->id
,
747 rate
->value
->dirty_rate
);
751 monitor_printf(mon
, "(not ready)\n");
754 qapi_free_DirtyRateVcpuList(info
->vcpu_dirty_rate
);
758 void hmp_calc_dirty_rate(Monitor
*mon
, const QDict
*qdict
)
760 int64_t sec
= qdict_get_try_int(qdict
, "second", 0);
761 int64_t sample_pages
= qdict_get_try_int(qdict
, "sample_pages_per_GB", -1);
762 bool has_sample_pages
= (sample_pages
!= -1);
763 bool dirty_ring
= qdict_get_try_bool(qdict
, "dirty_ring", false);
764 bool dirty_bitmap
= qdict_get_try_bool(qdict
, "dirty_bitmap", false);
765 DirtyRateMeasureMode mode
= DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING
;
769 monitor_printf(mon
, "Incorrect period length specified!\n");
773 if (dirty_ring
&& dirty_bitmap
) {
774 monitor_printf(mon
, "Either dirty ring or dirty bitmap "
775 "can be specified!\n");
780 mode
= DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP
;
781 } else if (dirty_ring
) {
782 mode
= DIRTY_RATE_MEASURE_MODE_DIRTY_RING
;
785 qmp_calc_dirty_rate(sec
, has_sample_pages
, sample_pages
, true,
788 hmp_handle_error(mon
, err
);
792 monitor_printf(mon
, "Starting dirty rate measurement with period %"PRIi64
794 monitor_printf(mon
, "[Please use 'info dirty_rate' to check results]\n");