2 * Dirtyrate implement code
4 * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD.
7 * Chuan Zheng <zhengchuan@huawei.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
17 #include "qemu/config-file.h"
18 #include "exec/memory.h"
19 #include "exec/ramblock.h"
20 #include "exec/target_page.h"
21 #include "qemu/rcu_queue.h"
22 #include "qapi/qapi-commands-migration.h"
23 #include "migration.h"
26 #include "dirtyrate.h"
28 static int CalculatingState
= DIRTY_RATE_STATUS_UNSTARTED
;
29 static struct DirtyRateStat DirtyStat
;
31 static int64_t set_sample_page_period(int64_t msec
, int64_t initial_time
)
35 current_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
36 if ((current_time
- initial_time
) >= msec
) {
37 msec
= current_time
- initial_time
;
39 g_usleep((msec
+ initial_time
- current_time
) * 1000);
45 static bool is_sample_period_valid(int64_t sec
)
47 if (sec
< MIN_FETCH_DIRTYRATE_TIME_SEC
||
48 sec
> MAX_FETCH_DIRTYRATE_TIME_SEC
) {
55 static int dirtyrate_set_state(int *state
, int old_state
, int new_state
)
57 assert(new_state
< DIRTY_RATE_STATUS__MAX
);
58 trace_dirtyrate_set_state(DirtyRateStatus_str(new_state
));
59 if (qatomic_cmpxchg(state
, old_state
, new_state
) == old_state
) {
66 static struct DirtyRateInfo
*query_dirty_rate_info(void)
68 int64_t dirty_rate
= DirtyStat
.dirty_rate
;
69 struct DirtyRateInfo
*info
= g_malloc0(sizeof(DirtyRateInfo
));
71 if (qatomic_read(&CalculatingState
) == DIRTY_RATE_STATUS_MEASURED
) {
72 info
->dirty_rate
= dirty_rate
;
74 info
->dirty_rate
= -1;
77 info
->status
= CalculatingState
;
78 info
->start_time
= DirtyStat
.start_time
;
79 info
->calc_time
= DirtyStat
.calc_time
;
81 trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState
));
86 static void reset_dirtyrate_stat(void)
88 DirtyStat
.total_dirty_samples
= 0;
89 DirtyStat
.total_sample_count
= 0;
90 DirtyStat
.total_block_mem_MB
= 0;
91 DirtyStat
.dirty_rate
= -1;
92 DirtyStat
.start_time
= 0;
93 DirtyStat
.calc_time
= 0;
96 static void update_dirtyrate_stat(struct RamblockDirtyInfo
*info
)
98 DirtyStat
.total_dirty_samples
+= info
->sample_dirty_count
;
99 DirtyStat
.total_sample_count
+= info
->sample_pages_count
;
100 /* size of total pages in MB */
101 DirtyStat
.total_block_mem_MB
+= (info
->ramblock_pages
*
102 TARGET_PAGE_SIZE
) >> 20;
105 static void update_dirtyrate(uint64_t msec
)
108 uint64_t total_dirty_samples
= DirtyStat
.total_dirty_samples
;
109 uint64_t total_sample_count
= DirtyStat
.total_sample_count
;
110 uint64_t total_block_mem_MB
= DirtyStat
.total_block_mem_MB
;
112 dirtyrate
= total_dirty_samples
* total_block_mem_MB
*
113 1000 / (total_sample_count
* msec
);
115 DirtyStat
.dirty_rate
= dirtyrate
;
119 * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
120 * in ramblock, which starts from ramblock base address.
122 static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo
*info
,
127 crc
= crc32(0, (info
->ramblock_addr
+
128 vfn
* TARGET_PAGE_SIZE
), TARGET_PAGE_SIZE
);
130 trace_get_ramblock_vfn_hash(info
->idstr
, vfn
, crc
);
134 static bool save_ramblock_hash(struct RamblockDirtyInfo
*info
)
136 unsigned int sample_pages_count
;
140 sample_pages_count
= info
->sample_pages_count
;
142 /* ramblock size less than one page, return success to skip this ramblock */
143 if (unlikely(info
->ramblock_pages
== 0 || sample_pages_count
== 0)) {
147 info
->hash_result
= g_try_malloc0_n(sample_pages_count
,
149 if (!info
->hash_result
) {
153 info
->sample_page_vfn
= g_try_malloc0_n(sample_pages_count
,
155 if (!info
->sample_page_vfn
) {
156 g_free(info
->hash_result
);
161 for (i
= 0; i
< sample_pages_count
; i
++) {
162 info
->sample_page_vfn
[i
] = g_rand_int_range(rand
, 0,
163 info
->ramblock_pages
- 1);
164 info
->hash_result
[i
] = get_ramblock_vfn_hash(info
,
165 info
->sample_page_vfn
[i
]);
172 static void get_ramblock_dirty_info(RAMBlock
*block
,
173 struct RamblockDirtyInfo
*info
,
174 struct DirtyRateConfig
*config
)
176 uint64_t sample_pages_per_gigabytes
= config
->sample_pages_per_gigabytes
;
178 /* Right shift 30 bits to calc ramblock size in GB */
179 info
->sample_pages_count
= (qemu_ram_get_used_length(block
) *
180 sample_pages_per_gigabytes
) >> 30;
181 /* Right shift TARGET_PAGE_BITS to calc page count */
182 info
->ramblock_pages
= qemu_ram_get_used_length(block
) >>
184 info
->ramblock_addr
= qemu_ram_get_host_addr(block
);
185 strcpy(info
->idstr
, qemu_ram_get_idstr(block
));
188 static void free_ramblock_dirty_info(struct RamblockDirtyInfo
*infos
, int count
)
196 for (i
= 0; i
< count
; i
++) {
197 g_free(infos
[i
].sample_page_vfn
);
198 g_free(infos
[i
].hash_result
);
203 static bool skip_sample_ramblock(RAMBlock
*block
)
206 * Sample only blocks larger than MIN_RAMBLOCK_SIZE.
208 if (qemu_ram_get_used_length(block
) < (MIN_RAMBLOCK_SIZE
<< 10)) {
209 trace_skip_sample_ramblock(block
->idstr
,
210 qemu_ram_get_used_length(block
));
217 static bool record_ramblock_hash_info(struct RamblockDirtyInfo
**block_dinfo
,
218 struct DirtyRateConfig config
,
221 struct RamblockDirtyInfo
*info
= NULL
;
222 struct RamblockDirtyInfo
*dinfo
= NULL
;
223 RAMBlock
*block
= NULL
;
228 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
229 if (skip_sample_ramblock(block
)) {
235 dinfo
= g_try_malloc0_n(total_count
, sizeof(struct RamblockDirtyInfo
));
240 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
241 if (skip_sample_ramblock(block
)) {
244 if (index
>= total_count
) {
247 info
= &dinfo
[index
];
248 get_ramblock_dirty_info(block
, info
, &config
);
249 if (!save_ramblock_hash(info
)) {
257 *block_count
= index
;
258 *block_dinfo
= dinfo
;
262 static void calc_page_dirty_rate(struct RamblockDirtyInfo
*info
)
267 for (i
= 0; i
< info
->sample_pages_count
; i
++) {
268 crc
= get_ramblock_vfn_hash(info
, info
->sample_page_vfn
[i
]);
269 if (crc
!= info
->hash_result
[i
]) {
270 trace_calc_page_dirty_rate(info
->idstr
, crc
, info
->hash_result
[i
]);
271 info
->sample_dirty_count
++;
276 static struct RamblockDirtyInfo
*
277 find_block_matched(RAMBlock
*block
, int count
,
278 struct RamblockDirtyInfo
*infos
)
281 struct RamblockDirtyInfo
*matched
;
283 for (i
= 0; i
< count
; i
++) {
284 if (!strcmp(infos
[i
].idstr
, qemu_ram_get_idstr(block
))) {
293 if (infos
[i
].ramblock_addr
!= qemu_ram_get_host_addr(block
) ||
294 infos
[i
].ramblock_pages
!=
295 (qemu_ram_get_used_length(block
) >> TARGET_PAGE_BITS
)) {
296 trace_find_page_matched(block
->idstr
);
305 static bool compare_page_hash_info(struct RamblockDirtyInfo
*info
,
308 struct RamblockDirtyInfo
*block_dinfo
= NULL
;
309 RAMBlock
*block
= NULL
;
311 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
312 if (skip_sample_ramblock(block
)) {
315 block_dinfo
= find_block_matched(block
, block_count
, info
);
316 if (block_dinfo
== NULL
) {
319 calc_page_dirty_rate(block_dinfo
);
320 update_dirtyrate_stat(block_dinfo
);
323 if (DirtyStat
.total_sample_count
== 0) {
330 static void calculate_dirtyrate(struct DirtyRateConfig config
)
332 struct RamblockDirtyInfo
*block_dinfo
= NULL
;
335 int64_t initial_time
;
337 rcu_register_thread();
338 reset_dirtyrate_stat();
340 initial_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
341 if (!record_ramblock_hash_info(&block_dinfo
, config
, &block_count
)) {
346 msec
= config
.sample_period_seconds
* 1000;
347 msec
= set_sample_page_period(msec
, initial_time
);
348 DirtyStat
.start_time
= initial_time
/ 1000;
349 DirtyStat
.calc_time
= msec
/ 1000;
352 if (!compare_page_hash_info(block_dinfo
, block_count
)) {
356 update_dirtyrate(msec
);
360 free_ramblock_dirty_info(block_dinfo
, block_count
);
361 rcu_unregister_thread();
364 void *get_dirtyrate_thread(void *arg
)
366 struct DirtyRateConfig config
= *(struct DirtyRateConfig
*)arg
;
369 ret
= dirtyrate_set_state(&CalculatingState
, DIRTY_RATE_STATUS_UNSTARTED
,
370 DIRTY_RATE_STATUS_MEASURING
);
372 error_report("change dirtyrate state failed.");
376 calculate_dirtyrate(config
);
378 ret
= dirtyrate_set_state(&CalculatingState
, DIRTY_RATE_STATUS_MEASURING
,
379 DIRTY_RATE_STATUS_MEASURED
);
381 error_report("change dirtyrate state failed.");
386 void qmp_calc_dirty_rate(int64_t calc_time
, Error
**errp
)
388 static struct DirtyRateConfig config
;
393 * If the dirty rate is already being measured, don't attempt to start.
395 if (qatomic_read(&CalculatingState
) == DIRTY_RATE_STATUS_MEASURING
) {
396 error_setg(errp
, "the dirty rate is already being measured.");
400 if (!is_sample_period_valid(calc_time
)) {
401 error_setg(errp
, "calc-time is out of range[%d, %d].",
402 MIN_FETCH_DIRTYRATE_TIME_SEC
,
403 MAX_FETCH_DIRTYRATE_TIME_SEC
);
408 * Init calculation state as unstarted.
410 ret
= dirtyrate_set_state(&CalculatingState
, CalculatingState
,
411 DIRTY_RATE_STATUS_UNSTARTED
);
413 error_setg(errp
, "init dirty rate calculation state failed.");
417 config
.sample_period_seconds
= calc_time
;
418 config
.sample_pages_per_gigabytes
= DIRTYRATE_DEFAULT_SAMPLE_PAGES
;
419 qemu_thread_create(&thread
, "get_dirtyrate", get_dirtyrate_thread
,
420 (void *)&config
, QEMU_THREAD_DETACHED
);
423 struct DirtyRateInfo
*qmp_query_dirty_rate(Error
**errp
)
425 return query_dirty_rate_info();