Merge tag 'net-pull-request' of https://github.com/jasowang/qemu into staging
[qemu/ar7.git] / migration / ram.c
blobdc1de9ddbc68a8caba1f28c3d6d4ee6091f459c3
1 /*
2 * QEMU System Emulator
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
7 * Authors:
8 * Juan Quintela <quintela@redhat.com>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
29 #include "qemu/osdep.h"
30 #include "qemu/cutils.h"
31 #include "qemu/bitops.h"
32 #include "qemu/bitmap.h"
33 #include "qemu/madvise.h"
34 #include "qemu/main-loop.h"
35 #include "io/channel-null.h"
36 #include "xbzrle.h"
37 #include "ram.h"
38 #include "migration.h"
39 #include "migration/register.h"
40 #include "migration/misc.h"
41 #include "qemu-file.h"
42 #include "postcopy-ram.h"
43 #include "page_cache.h"
44 #include "qemu/error-report.h"
45 #include "qapi/error.h"
46 #include "qapi/qapi-types-migration.h"
47 #include "qapi/qapi-events-migration.h"
48 #include "qapi/qmp/qerror.h"
49 #include "trace.h"
50 #include "exec/ram_addr.h"
51 #include "exec/target_page.h"
52 #include "qemu/rcu_queue.h"
53 #include "migration/colo.h"
54 #include "block.h"
55 #include "sysemu/cpu-throttle.h"
56 #include "savevm.h"
57 #include "qemu/iov.h"
58 #include "multifd.h"
59 #include "sysemu/runstate.h"
61 #include "hw/boards.h" /* for machine_dump_guest_core() */
63 #if defined(__linux__)
64 #include "qemu/userfaultfd.h"
65 #endif /* defined(__linux__) */
67 /***********************************************************/
68 /* ram save/restore */
70 /* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
71 * worked for pages that where filled with the same char. We switched
72 * it to only search for the zero value. And to avoid confusion with
73 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
76 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
77 #define RAM_SAVE_FLAG_ZERO 0x02
78 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
79 #define RAM_SAVE_FLAG_PAGE 0x08
80 #define RAM_SAVE_FLAG_EOS 0x10
81 #define RAM_SAVE_FLAG_CONTINUE 0x20
82 #define RAM_SAVE_FLAG_XBZRLE 0x40
83 /* 0x80 is reserved in migration.h start with 0x100 next */
84 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
86 XBZRLECacheStats xbzrle_counters;
88 /* struct contains XBZRLE cache and a static page
89 used by the compression */
90 static struct {
91 /* buffer used for XBZRLE encoding */
92 uint8_t *encoded_buf;
93 /* buffer for storing page content */
94 uint8_t *current_buf;
95 /* Cache for XBZRLE, Protected by lock. */
96 PageCache *cache;
97 QemuMutex lock;
98 /* it will store a page full of zeros */
99 uint8_t *zero_target_page;
100 /* buffer used for XBZRLE decoding */
101 uint8_t *decoded_buf;
102 } XBZRLE;
104 static void XBZRLE_cache_lock(void)
106 if (migrate_use_xbzrle()) {
107 qemu_mutex_lock(&XBZRLE.lock);
111 static void XBZRLE_cache_unlock(void)
113 if (migrate_use_xbzrle()) {
114 qemu_mutex_unlock(&XBZRLE.lock);
119 * xbzrle_cache_resize: resize the xbzrle cache
121 * This function is called from migrate_params_apply in main
122 * thread, possibly while a migration is in progress. A running
123 * migration may be using the cache and might finish during this call,
124 * hence changes to the cache are protected by XBZRLE.lock().
126 * Returns 0 for success or -1 for error
128 * @new_size: new cache size
129 * @errp: set *errp if the check failed, with reason
131 int xbzrle_cache_resize(uint64_t new_size, Error **errp)
133 PageCache *new_cache;
134 int64_t ret = 0;
136 /* Check for truncation */
137 if (new_size != (size_t)new_size) {
138 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
139 "exceeding address space");
140 return -1;
143 if (new_size == migrate_xbzrle_cache_size()) {
144 /* nothing to do */
145 return 0;
148 XBZRLE_cache_lock();
150 if (XBZRLE.cache != NULL) {
151 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
152 if (!new_cache) {
153 ret = -1;
154 goto out;
157 cache_fini(XBZRLE.cache);
158 XBZRLE.cache = new_cache;
160 out:
161 XBZRLE_cache_unlock();
162 return ret;
165 bool ramblock_is_ignored(RAMBlock *block)
167 return !qemu_ram_is_migratable(block) ||
168 (migrate_ignore_shared() && qemu_ram_is_shared(block));
171 #undef RAMBLOCK_FOREACH
173 int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
175 RAMBlock *block;
176 int ret = 0;
178 RCU_READ_LOCK_GUARD();
180 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
181 ret = func(block, opaque);
182 if (ret) {
183 break;
186 return ret;
189 static void ramblock_recv_map_init(void)
191 RAMBlock *rb;
193 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
194 assert(!rb->receivedmap);
195 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
199 int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
201 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
202 rb->receivedmap);
205 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
207 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
210 void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
212 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
215 void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
216 size_t nr)
218 bitmap_set_atomic(rb->receivedmap,
219 ramblock_recv_bitmap_offset(host_addr, rb),
220 nr);
223 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
226 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
228 * Returns >0 if success with sent bytes, or <0 if error.
230 int64_t ramblock_recv_bitmap_send(QEMUFile *file,
231 const char *block_name)
233 RAMBlock *block = qemu_ram_block_by_name(block_name);
234 unsigned long *le_bitmap, nbits;
235 uint64_t size;
237 if (!block) {
238 error_report("%s: invalid block name: %s", __func__, block_name);
239 return -1;
242 nbits = block->postcopy_length >> TARGET_PAGE_BITS;
245 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
246 * machines we may need 4 more bytes for padding (see below
247 * comment). So extend it a bit before hand.
249 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
252 * Always use little endian when sending the bitmap. This is
253 * required that when source and destination VMs are not using the
254 * same endianness. (Note: big endian won't work.)
256 bitmap_to_le(le_bitmap, block->receivedmap, nbits);
258 /* Size of the bitmap, in bytes */
259 size = DIV_ROUND_UP(nbits, 8);
262 * size is always aligned to 8 bytes for 64bit machines, but it
263 * may not be true for 32bit machines. We need this padding to
264 * make sure the migration can survive even between 32bit and
265 * 64bit machines.
267 size = ROUND_UP(size, 8);
269 qemu_put_be64(file, size);
270 qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
272 * Mark as an end, in case the middle part is screwed up due to
273 * some "mysterious" reason.
275 qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
276 qemu_fflush(file);
278 g_free(le_bitmap);
280 if (qemu_file_get_error(file)) {
281 return qemu_file_get_error(file);
284 return size + sizeof(size);
288 * An outstanding page request, on the source, having been received
289 * and queued
291 struct RAMSrcPageRequest {
292 RAMBlock *rb;
293 hwaddr offset;
294 hwaddr len;
296 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
299 typedef struct {
301 * Cached ramblock/offset values if preempted. They're only meaningful if
302 * preempted==true below.
304 RAMBlock *ram_block;
305 unsigned long ram_page;
307 * Whether a postcopy preemption just happened. Will be reset after
308 * precopy recovered to background migration.
310 bool preempted;
311 } PostcopyPreemptState;
313 /* State of RAM for migration */
314 struct RAMState {
315 /* QEMUFile used for this migration */
316 QEMUFile *f;
317 /* UFFD file descriptor, used in 'write-tracking' migration */
318 int uffdio_fd;
319 /* Last block that we have visited searching for dirty pages */
320 RAMBlock *last_seen_block;
321 /* Last block from where we have sent data */
322 RAMBlock *last_sent_block;
323 /* Last dirty target page we have sent */
324 ram_addr_t last_page;
325 /* last ram version we have seen */
326 uint32_t last_version;
327 /* How many times we have dirty too many pages */
328 int dirty_rate_high_cnt;
329 /* these variables are used for bitmap sync */
330 /* last time we did a full bitmap_sync */
331 int64_t time_last_bitmap_sync;
332 /* bytes transferred at start_time */
333 uint64_t bytes_xfer_prev;
334 /* number of dirty pages since start_time */
335 uint64_t num_dirty_pages_period;
336 /* xbzrle misses since the beginning of the period */
337 uint64_t xbzrle_cache_miss_prev;
338 /* Amount of xbzrle pages since the beginning of the period */
339 uint64_t xbzrle_pages_prev;
340 /* Amount of xbzrle encoded bytes since the beginning of the period */
341 uint64_t xbzrle_bytes_prev;
342 /* Start using XBZRLE (e.g., after the first round). */
343 bool xbzrle_enabled;
344 /* Are we on the last stage of migration */
345 bool last_stage;
346 /* compression statistics since the beginning of the period */
347 /* amount of count that no free thread to compress data */
348 uint64_t compress_thread_busy_prev;
349 /* amount bytes after compression */
350 uint64_t compressed_size_prev;
351 /* amount of compressed pages */
352 uint64_t compress_pages_prev;
354 /* total handled target pages at the beginning of period */
355 uint64_t target_page_count_prev;
356 /* total handled target pages since start */
357 uint64_t target_page_count;
358 /* number of dirty bits in the bitmap */
359 uint64_t migration_dirty_pages;
360 /* Protects modification of the bitmap and migration dirty pages */
361 QemuMutex bitmap_mutex;
362 /* The RAMBlock used in the last src_page_requests */
363 RAMBlock *last_req_rb;
364 /* Queue of outstanding page requests from the destination */
365 QemuMutex src_page_req_mutex;
366 QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
368 /* Postcopy preemption informations */
369 PostcopyPreemptState postcopy_preempt_state;
371 * Current channel we're using on src VM. Only valid if postcopy-preempt
372 * is enabled.
374 unsigned int postcopy_channel;
376 typedef struct RAMState RAMState;
378 static RAMState *ram_state;
380 static NotifierWithReturnList precopy_notifier_list;
382 static void postcopy_preempt_reset(RAMState *rs)
384 memset(&rs->postcopy_preempt_state, 0, sizeof(PostcopyPreemptState));
387 /* Whether postcopy has queued requests? */
388 static bool postcopy_has_request(RAMState *rs)
390 return !QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests);
393 void precopy_infrastructure_init(void)
395 notifier_with_return_list_init(&precopy_notifier_list);
398 void precopy_add_notifier(NotifierWithReturn *n)
400 notifier_with_return_list_add(&precopy_notifier_list, n);
403 void precopy_remove_notifier(NotifierWithReturn *n)
405 notifier_with_return_remove(n);
408 int precopy_notify(PrecopyNotifyReason reason, Error **errp)
410 PrecopyNotifyData pnd;
411 pnd.reason = reason;
412 pnd.errp = errp;
414 return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
417 uint64_t ram_bytes_remaining(void)
419 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
423 MigrationStats ram_counters;
425 static void ram_transferred_add(uint64_t bytes)
427 if (runstate_is_running()) {
428 ram_counters.precopy_bytes += bytes;
429 } else if (migration_in_postcopy()) {
430 ram_counters.postcopy_bytes += bytes;
431 } else {
432 ram_counters.downtime_bytes += bytes;
434 ram_counters.transferred += bytes;
437 void dirty_sync_missed_zero_copy(void)
439 ram_counters.dirty_sync_missed_zero_copy++;
442 /* used by the search for pages to send */
443 struct PageSearchStatus {
444 /* Current block being searched */
445 RAMBlock *block;
446 /* Current page to search from */
447 unsigned long page;
448 /* Set once we wrap around */
449 bool complete_round;
451 * [POSTCOPY-ONLY] Whether current page is explicitly requested by
452 * postcopy. When set, the request is "urgent" because the dest QEMU
453 * threads are waiting for us.
455 bool postcopy_requested;
457 * [POSTCOPY-ONLY] The target channel to use to send current page.
459 * Note: This may _not_ match with the value in postcopy_requested
460 * above. Let's imagine the case where the postcopy request is exactly
461 * the page that we're sending in progress during precopy. In this case
462 * we'll have postcopy_requested set to true but the target channel
463 * will be the precopy channel (so that we don't split brain on that
464 * specific page since the precopy channel already contains partial of
465 * that page data).
467 * Besides that specific use case, postcopy_target_channel should
468 * always be equal to postcopy_requested, because by default we send
469 * postcopy pages via postcopy preempt channel.
471 bool postcopy_target_channel;
473 typedef struct PageSearchStatus PageSearchStatus;
475 CompressionStats compression_counters;
477 struct CompressParam {
478 bool done;
479 bool quit;
480 bool zero_page;
481 QEMUFile *file;
482 QemuMutex mutex;
483 QemuCond cond;
484 RAMBlock *block;
485 ram_addr_t offset;
487 /* internally used fields */
488 z_stream stream;
489 uint8_t *originbuf;
491 typedef struct CompressParam CompressParam;
493 struct DecompressParam {
494 bool done;
495 bool quit;
496 QemuMutex mutex;
497 QemuCond cond;
498 void *des;
499 uint8_t *compbuf;
500 int len;
501 z_stream stream;
503 typedef struct DecompressParam DecompressParam;
505 static CompressParam *comp_param;
506 static QemuThread *compress_threads;
507 /* comp_done_cond is used to wake up the migration thread when
508 * one of the compression threads has finished the compression.
509 * comp_done_lock is used to co-work with comp_done_cond.
511 static QemuMutex comp_done_lock;
512 static QemuCond comp_done_cond;
514 static QEMUFile *decomp_file;
515 static DecompressParam *decomp_param;
516 static QemuThread *decompress_threads;
517 static QemuMutex decomp_done_lock;
518 static QemuCond decomp_done_cond;
520 static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
521 ram_addr_t offset, uint8_t *source_buf);
523 static void postcopy_preempt_restore(RAMState *rs, PageSearchStatus *pss,
524 bool postcopy_requested);
526 static void *do_data_compress(void *opaque)
528 CompressParam *param = opaque;
529 RAMBlock *block;
530 ram_addr_t offset;
531 bool zero_page;
533 qemu_mutex_lock(&param->mutex);
534 while (!param->quit) {
535 if (param->block) {
536 block = param->block;
537 offset = param->offset;
538 param->block = NULL;
539 qemu_mutex_unlock(&param->mutex);
541 zero_page = do_compress_ram_page(param->file, &param->stream,
542 block, offset, param->originbuf);
544 qemu_mutex_lock(&comp_done_lock);
545 param->done = true;
546 param->zero_page = zero_page;
547 qemu_cond_signal(&comp_done_cond);
548 qemu_mutex_unlock(&comp_done_lock);
550 qemu_mutex_lock(&param->mutex);
551 } else {
552 qemu_cond_wait(&param->cond, &param->mutex);
555 qemu_mutex_unlock(&param->mutex);
557 return NULL;
560 static void compress_threads_save_cleanup(void)
562 int i, thread_count;
564 if (!migrate_use_compression() || !comp_param) {
565 return;
568 thread_count = migrate_compress_threads();
569 for (i = 0; i < thread_count; i++) {
571 * we use it as a indicator which shows if the thread is
572 * properly init'd or not
574 if (!comp_param[i].file) {
575 break;
578 qemu_mutex_lock(&comp_param[i].mutex);
579 comp_param[i].quit = true;
580 qemu_cond_signal(&comp_param[i].cond);
581 qemu_mutex_unlock(&comp_param[i].mutex);
583 qemu_thread_join(compress_threads + i);
584 qemu_mutex_destroy(&comp_param[i].mutex);
585 qemu_cond_destroy(&comp_param[i].cond);
586 deflateEnd(&comp_param[i].stream);
587 g_free(comp_param[i].originbuf);
588 qemu_fclose(comp_param[i].file);
589 comp_param[i].file = NULL;
591 qemu_mutex_destroy(&comp_done_lock);
592 qemu_cond_destroy(&comp_done_cond);
593 g_free(compress_threads);
594 g_free(comp_param);
595 compress_threads = NULL;
596 comp_param = NULL;
599 static int compress_threads_save_setup(void)
601 int i, thread_count;
603 if (!migrate_use_compression()) {
604 return 0;
606 thread_count = migrate_compress_threads();
607 compress_threads = g_new0(QemuThread, thread_count);
608 comp_param = g_new0(CompressParam, thread_count);
609 qemu_cond_init(&comp_done_cond);
610 qemu_mutex_init(&comp_done_lock);
611 for (i = 0; i < thread_count; i++) {
612 comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
613 if (!comp_param[i].originbuf) {
614 goto exit;
617 if (deflateInit(&comp_param[i].stream,
618 migrate_compress_level()) != Z_OK) {
619 g_free(comp_param[i].originbuf);
620 goto exit;
623 /* comp_param[i].file is just used as a dummy buffer to save data,
624 * set its ops to empty.
626 comp_param[i].file = qemu_file_new_output(
627 QIO_CHANNEL(qio_channel_null_new()));
628 comp_param[i].done = true;
629 comp_param[i].quit = false;
630 qemu_mutex_init(&comp_param[i].mutex);
631 qemu_cond_init(&comp_param[i].cond);
632 qemu_thread_create(compress_threads + i, "compress",
633 do_data_compress, comp_param + i,
634 QEMU_THREAD_JOINABLE);
636 return 0;
638 exit:
639 compress_threads_save_cleanup();
640 return -1;
644 * save_page_header: write page header to wire
646 * If this is the 1st block, it also writes the block identification
648 * Returns the number of bytes written
650 * @f: QEMUFile where to send the data
651 * @block: block that contains the page we want to send
652 * @offset: offset inside the block for the page
653 * in the lower bits, it contains flags
655 static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
656 ram_addr_t offset)
658 size_t size, len;
660 if (block == rs->last_sent_block) {
661 offset |= RAM_SAVE_FLAG_CONTINUE;
663 qemu_put_be64(f, offset);
664 size = 8;
666 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
667 len = strlen(block->idstr);
668 qemu_put_byte(f, len);
669 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
670 size += 1 + len;
671 rs->last_sent_block = block;
673 return size;
677 * mig_throttle_guest_down: throttle down the guest
679 * Reduce amount of guest cpu execution to hopefully slow down memory
680 * writes. If guest dirty memory rate is reduced below the rate at
681 * which we can transfer pages to the destination then we should be
682 * able to complete migration. Some workloads dirty memory way too
683 * fast and will not effectively converge, even with auto-converge.
685 static void mig_throttle_guest_down(uint64_t bytes_dirty_period,
686 uint64_t bytes_dirty_threshold)
688 MigrationState *s = migrate_get_current();
689 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
690 uint64_t pct_increment = s->parameters.cpu_throttle_increment;
691 bool pct_tailslow = s->parameters.cpu_throttle_tailslow;
692 int pct_max = s->parameters.max_cpu_throttle;
694 uint64_t throttle_now = cpu_throttle_get_percentage();
695 uint64_t cpu_now, cpu_ideal, throttle_inc;
697 /* We have not started throttling yet. Let's start it. */
698 if (!cpu_throttle_active()) {
699 cpu_throttle_set(pct_initial);
700 } else {
701 /* Throttling already on, just increase the rate */
702 if (!pct_tailslow) {
703 throttle_inc = pct_increment;
704 } else {
705 /* Compute the ideal CPU percentage used by Guest, which may
706 * make the dirty rate match the dirty rate threshold. */
707 cpu_now = 100 - throttle_now;
708 cpu_ideal = cpu_now * (bytes_dirty_threshold * 1.0 /
709 bytes_dirty_period);
710 throttle_inc = MIN(cpu_now - cpu_ideal, pct_increment);
712 cpu_throttle_set(MIN(throttle_now + throttle_inc, pct_max));
716 void mig_throttle_counter_reset(void)
718 RAMState *rs = ram_state;
720 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
721 rs->num_dirty_pages_period = 0;
722 rs->bytes_xfer_prev = ram_counters.transferred;
726 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
728 * @rs: current RAM state
729 * @current_addr: address for the zero page
731 * Update the xbzrle cache to reflect a page that's been sent as all 0.
732 * The important thing is that a stale (not-yet-0'd) page be replaced
733 * by the new data.
734 * As a bonus, if the page wasn't in the cache it gets added so that
735 * when a small write is made into the 0'd page it gets XBZRLE sent.
737 static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
739 if (!rs->xbzrle_enabled) {
740 return;
743 /* We don't care if this fails to allocate a new cache page
744 * as long as it updated an old one */
745 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
746 ram_counters.dirty_sync_count);
749 #define ENCODING_FLAG_XBZRLE 0x1
752 * save_xbzrle_page: compress and send current page
754 * Returns: 1 means that we wrote the page
755 * 0 means that page is identical to the one already sent
756 * -1 means that xbzrle would be longer than normal
758 * @rs: current RAM state
759 * @current_data: pointer to the address of the page contents
760 * @current_addr: addr of the page
761 * @block: block that contains the page we want to send
762 * @offset: offset inside the block for the page
764 static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
765 ram_addr_t current_addr, RAMBlock *block,
766 ram_addr_t offset)
768 int encoded_len = 0, bytes_xbzrle;
769 uint8_t *prev_cached_page;
771 if (!cache_is_cached(XBZRLE.cache, current_addr,
772 ram_counters.dirty_sync_count)) {
773 xbzrle_counters.cache_miss++;
774 if (!rs->last_stage) {
775 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
776 ram_counters.dirty_sync_count) == -1) {
777 return -1;
778 } else {
779 /* update *current_data when the page has been
780 inserted into cache */
781 *current_data = get_cached_data(XBZRLE.cache, current_addr);
784 return -1;
788 * Reaching here means the page has hit the xbzrle cache, no matter what
789 * encoding result it is (normal encoding, overflow or skipping the page),
790 * count the page as encoded. This is used to calculate the encoding rate.
792 * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB,
793 * 2nd page turns out to be skipped (i.e. no new bytes written to the
794 * page), the overall encoding rate will be 8KB / 2KB = 4, which has the
795 * skipped page included. In this way, the encoding rate can tell if the
796 * guest page is good for xbzrle encoding.
798 xbzrle_counters.pages++;
799 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
801 /* save current buffer into memory */
802 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
804 /* XBZRLE encoding (if there is no overflow) */
805 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
806 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
807 TARGET_PAGE_SIZE);
810 * Update the cache contents, so that it corresponds to the data
811 * sent, in all cases except where we skip the page.
813 if (!rs->last_stage && encoded_len != 0) {
814 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
816 * In the case where we couldn't compress, ensure that the caller
817 * sends the data from the cache, since the guest might have
818 * changed the RAM since we copied it.
820 *current_data = prev_cached_page;
823 if (encoded_len == 0) {
824 trace_save_xbzrle_page_skipping();
825 return 0;
826 } else if (encoded_len == -1) {
827 trace_save_xbzrle_page_overflow();
828 xbzrle_counters.overflow++;
829 xbzrle_counters.bytes += TARGET_PAGE_SIZE;
830 return -1;
833 /* Send XBZRLE based compressed page */
834 bytes_xbzrle = save_page_header(rs, rs->f, block,
835 offset | RAM_SAVE_FLAG_XBZRLE);
836 qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
837 qemu_put_be16(rs->f, encoded_len);
838 qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
839 bytes_xbzrle += encoded_len + 1 + 2;
841 * Like compressed_size (please see update_compress_thread_counts),
842 * the xbzrle encoded bytes don't count the 8 byte header with
843 * RAM_SAVE_FLAG_CONTINUE.
845 xbzrle_counters.bytes += bytes_xbzrle - 8;
846 ram_transferred_add(bytes_xbzrle);
848 return 1;
852 * migration_bitmap_find_dirty: find the next dirty page from start
854 * Returns the page offset within memory region of the start of a dirty page
856 * @rs: current RAM state
857 * @rb: RAMBlock where to search for dirty pages
858 * @start: page where we start the search
860 static inline
861 unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
862 unsigned long start)
864 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
865 unsigned long *bitmap = rb->bmap;
867 if (ramblock_is_ignored(rb)) {
868 return size;
871 return find_next_bit(bitmap, size, start);
874 static void migration_clear_memory_region_dirty_bitmap(RAMBlock *rb,
875 unsigned long page)
877 uint8_t shift;
878 hwaddr size, start;
880 if (!rb->clear_bmap || !clear_bmap_test_and_clear(rb, page)) {
881 return;
884 shift = rb->clear_bmap_shift;
886 * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
887 * can make things easier sometimes since then start address
888 * of the small chunk will always be 64 pages aligned so the
889 * bitmap will always be aligned to unsigned long. We should
890 * even be able to remove this restriction but I'm simply
891 * keeping it.
893 assert(shift >= 6);
895 size = 1ULL << (TARGET_PAGE_BITS + shift);
896 start = QEMU_ALIGN_DOWN((ram_addr_t)page << TARGET_PAGE_BITS, size);
897 trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page);
898 memory_region_clear_dirty_bitmap(rb->mr, start, size);
901 static void
902 migration_clear_memory_region_dirty_bitmap_range(RAMBlock *rb,
903 unsigned long start,
904 unsigned long npages)
906 unsigned long i, chunk_pages = 1UL << rb->clear_bmap_shift;
907 unsigned long chunk_start = QEMU_ALIGN_DOWN(start, chunk_pages);
908 unsigned long chunk_end = QEMU_ALIGN_UP(start + npages, chunk_pages);
911 * Clear pages from start to start + npages - 1, so the end boundary is
912 * exclusive.
914 for (i = chunk_start; i < chunk_end; i += chunk_pages) {
915 migration_clear_memory_region_dirty_bitmap(rb, i);
920 * colo_bitmap_find_diry:find contiguous dirty pages from start
922 * Returns the page offset within memory region of the start of the contiguout
923 * dirty page
925 * @rs: current RAM state
926 * @rb: RAMBlock where to search for dirty pages
927 * @start: page where we start the search
928 * @num: the number of contiguous dirty pages
930 static inline
931 unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
932 unsigned long start, unsigned long *num)
934 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
935 unsigned long *bitmap = rb->bmap;
936 unsigned long first, next;
938 *num = 0;
940 if (ramblock_is_ignored(rb)) {
941 return size;
944 first = find_next_bit(bitmap, size, start);
945 if (first >= size) {
946 return first;
948 next = find_next_zero_bit(bitmap, size, first + 1);
949 assert(next >= first);
950 *num = next - first;
951 return first;
954 static inline bool migration_bitmap_clear_dirty(RAMState *rs,
955 RAMBlock *rb,
956 unsigned long page)
958 bool ret;
961 * Clear dirty bitmap if needed. This _must_ be called before we
962 * send any of the page in the chunk because we need to make sure
963 * we can capture further page content changes when we sync dirty
964 * log the next time. So as long as we are going to send any of
965 * the page in the chunk we clear the remote dirty bitmap for all.
966 * Clearing it earlier won't be a problem, but too late will.
968 migration_clear_memory_region_dirty_bitmap(rb, page);
970 ret = test_and_clear_bit(page, rb->bmap);
971 if (ret) {
972 rs->migration_dirty_pages--;
975 return ret;
978 static void dirty_bitmap_clear_section(MemoryRegionSection *section,
979 void *opaque)
981 const hwaddr offset = section->offset_within_region;
982 const hwaddr size = int128_get64(section->size);
983 const unsigned long start = offset >> TARGET_PAGE_BITS;
984 const unsigned long npages = size >> TARGET_PAGE_BITS;
985 RAMBlock *rb = section->mr->ram_block;
986 uint64_t *cleared_bits = opaque;
989 * We don't grab ram_state->bitmap_mutex because we expect to run
990 * only when starting migration or during postcopy recovery where
991 * we don't have concurrent access.
993 if (!migration_in_postcopy() && !migrate_background_snapshot()) {
994 migration_clear_memory_region_dirty_bitmap_range(rb, start, npages);
996 *cleared_bits += bitmap_count_one_with_offset(rb->bmap, start, npages);
997 bitmap_clear(rb->bmap, start, npages);
1001 * Exclude all dirty pages from migration that fall into a discarded range as
1002 * managed by a RamDiscardManager responsible for the mapped memory region of
1003 * the RAMBlock. Clear the corresponding bits in the dirty bitmaps.
1005 * Discarded pages ("logically unplugged") have undefined content and must
1006 * not get migrated, because even reading these pages for migration might
1007 * result in undesired behavior.
1009 * Returns the number of cleared bits in the RAMBlock dirty bitmap.
1011 * Note: The result is only stable while migrating (precopy/postcopy).
1013 static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock *rb)
1015 uint64_t cleared_bits = 0;
1017 if (rb->mr && rb->bmap && memory_region_has_ram_discard_manager(rb->mr)) {
1018 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
1019 MemoryRegionSection section = {
1020 .mr = rb->mr,
1021 .offset_within_region = 0,
1022 .size = int128_make64(qemu_ram_get_used_length(rb)),
1025 ram_discard_manager_replay_discarded(rdm, &section,
1026 dirty_bitmap_clear_section,
1027 &cleared_bits);
1029 return cleared_bits;
1033 * Check if a host-page aligned page falls into a discarded range as managed by
1034 * a RamDiscardManager responsible for the mapped memory region of the RAMBlock.
1036 * Note: The result is only stable while migrating (precopy/postcopy).
1038 bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start)
1040 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) {
1041 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
1042 MemoryRegionSection section = {
1043 .mr = rb->mr,
1044 .offset_within_region = start,
1045 .size = int128_make64(qemu_ram_pagesize(rb)),
1048 return !ram_discard_manager_is_populated(rdm, &section);
1050 return false;
1053 /* Called with RCU critical section */
1054 static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb)
1056 uint64_t new_dirty_pages =
1057 cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length);
1059 rs->migration_dirty_pages += new_dirty_pages;
1060 rs->num_dirty_pages_period += new_dirty_pages;
1064 * ram_pagesize_summary: calculate all the pagesizes of a VM
1066 * Returns a summary bitmap of the page sizes of all RAMBlocks
1068 * For VMs with just normal pages this is equivalent to the host page
1069 * size. If it's got some huge pages then it's the OR of all the
1070 * different page sizes.
1072 uint64_t ram_pagesize_summary(void)
1074 RAMBlock *block;
1075 uint64_t summary = 0;
1077 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1078 summary |= block->page_size;
1081 return summary;
1084 uint64_t ram_get_total_transferred_pages(void)
1086 return ram_counters.normal + ram_counters.duplicate +
1087 compression_counters.pages + xbzrle_counters.pages;
1090 static void migration_update_rates(RAMState *rs, int64_t end_time)
1092 uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
1093 double compressed_size;
1095 /* calculate period counters */
1096 ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
1097 / (end_time - rs->time_last_bitmap_sync);
1099 if (!page_count) {
1100 return;
1103 if (migrate_use_xbzrle()) {
1104 double encoded_size, unencoded_size;
1106 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
1107 rs->xbzrle_cache_miss_prev) / page_count;
1108 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
1109 unencoded_size = (xbzrle_counters.pages - rs->xbzrle_pages_prev) *
1110 TARGET_PAGE_SIZE;
1111 encoded_size = xbzrle_counters.bytes - rs->xbzrle_bytes_prev;
1112 if (xbzrle_counters.pages == rs->xbzrle_pages_prev || !encoded_size) {
1113 xbzrle_counters.encoding_rate = 0;
1114 } else {
1115 xbzrle_counters.encoding_rate = unencoded_size / encoded_size;
1117 rs->xbzrle_pages_prev = xbzrle_counters.pages;
1118 rs->xbzrle_bytes_prev = xbzrle_counters.bytes;
1121 if (migrate_use_compression()) {
1122 compression_counters.busy_rate = (double)(compression_counters.busy -
1123 rs->compress_thread_busy_prev) / page_count;
1124 rs->compress_thread_busy_prev = compression_counters.busy;
1126 compressed_size = compression_counters.compressed_size -
1127 rs->compressed_size_prev;
1128 if (compressed_size) {
1129 double uncompressed_size = (compression_counters.pages -
1130 rs->compress_pages_prev) * TARGET_PAGE_SIZE;
1132 /* Compression-Ratio = Uncompressed-size / Compressed-size */
1133 compression_counters.compression_rate =
1134 uncompressed_size / compressed_size;
1136 rs->compress_pages_prev = compression_counters.pages;
1137 rs->compressed_size_prev = compression_counters.compressed_size;
1142 static void migration_trigger_throttle(RAMState *rs)
1144 MigrationState *s = migrate_get_current();
1145 uint64_t threshold = s->parameters.throttle_trigger_threshold;
1147 uint64_t bytes_xfer_period = ram_counters.transferred - rs->bytes_xfer_prev;
1148 uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE;
1149 uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100;
1151 /* During block migration the auto-converge logic incorrectly detects
1152 * that ram migration makes no progress. Avoid this by disabling the
1153 * throttling logic during the bulk phase of block migration. */
1154 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
1155 /* The following detection logic can be refined later. For now:
1156 Check to see if the ratio between dirtied bytes and the approx.
1157 amount of bytes that just got transferred since the last time
1158 we were in this routine reaches the threshold. If that happens
1159 twice, start or increase throttling. */
1161 if ((bytes_dirty_period > bytes_dirty_threshold) &&
1162 (++rs->dirty_rate_high_cnt >= 2)) {
1163 trace_migration_throttle();
1164 rs->dirty_rate_high_cnt = 0;
1165 mig_throttle_guest_down(bytes_dirty_period,
1166 bytes_dirty_threshold);
1171 static void migration_bitmap_sync(RAMState *rs)
1173 RAMBlock *block;
1174 int64_t end_time;
1176 ram_counters.dirty_sync_count++;
1178 if (!rs->time_last_bitmap_sync) {
1179 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1182 trace_migration_bitmap_sync_start();
1183 memory_global_dirty_log_sync();
1185 qemu_mutex_lock(&rs->bitmap_mutex);
1186 WITH_RCU_READ_LOCK_GUARD() {
1187 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1188 ramblock_sync_dirty_bitmap(rs, block);
1190 ram_counters.remaining = ram_bytes_remaining();
1192 qemu_mutex_unlock(&rs->bitmap_mutex);
1194 memory_global_after_dirty_log_sync();
1195 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1197 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1199 /* more than 1 second = 1000 millisecons */
1200 if (end_time > rs->time_last_bitmap_sync + 1000) {
1201 migration_trigger_throttle(rs);
1203 migration_update_rates(rs, end_time);
1205 rs->target_page_count_prev = rs->target_page_count;
1207 /* reset period counters */
1208 rs->time_last_bitmap_sync = end_time;
1209 rs->num_dirty_pages_period = 0;
1210 rs->bytes_xfer_prev = ram_counters.transferred;
1212 if (migrate_use_events()) {
1213 qapi_event_send_migration_pass(ram_counters.dirty_sync_count);
1217 static void migration_bitmap_sync_precopy(RAMState *rs)
1219 Error *local_err = NULL;
1222 * The current notifier usage is just an optimization to migration, so we
1223 * don't stop the normal migration process in the error case.
1225 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) {
1226 error_report_err(local_err);
1227 local_err = NULL;
1230 migration_bitmap_sync(rs);
1232 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
1233 error_report_err(local_err);
1237 static void ram_release_page(const char *rbname, uint64_t offset)
1239 if (!migrate_release_ram() || !migration_in_postcopy()) {
1240 return;
1243 ram_discard_range(rbname, offset, TARGET_PAGE_SIZE);
1247 * save_zero_page_to_file: send the zero page to the file
1249 * Returns the size of data written to the file, 0 means the page is not
1250 * a zero page
1252 * @rs: current RAM state
1253 * @file: the file where the data is saved
1254 * @block: block that contains the page we want to send
1255 * @offset: offset inside the block for the page
1257 static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
1258 RAMBlock *block, ram_addr_t offset)
1260 uint8_t *p = block->host + offset;
1261 int len = 0;
1263 if (buffer_is_zero(p, TARGET_PAGE_SIZE)) {
1264 len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
1265 qemu_put_byte(file, 0);
1266 len += 1;
1267 ram_release_page(block->idstr, offset);
1269 return len;
1273 * save_zero_page: send the zero page to the stream
1275 * Returns the number of pages written.
1277 * @rs: current RAM state
1278 * @block: block that contains the page we want to send
1279 * @offset: offset inside the block for the page
1281 static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
1283 int len = save_zero_page_to_file(rs, rs->f, block, offset);
1285 if (len) {
1286 ram_counters.duplicate++;
1287 ram_transferred_add(len);
1288 return 1;
1290 return -1;
1294 * @pages: the number of pages written by the control path,
1295 * < 0 - error
1296 * > 0 - number of pages written
1298 * Return true if the pages has been saved, otherwise false is returned.
1300 static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1301 int *pages)
1303 uint64_t bytes_xmit = 0;
1304 int ret;
1306 *pages = -1;
1307 ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
1308 &bytes_xmit);
1309 if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
1310 return false;
1313 if (bytes_xmit) {
1314 ram_transferred_add(bytes_xmit);
1315 *pages = 1;
1318 if (ret == RAM_SAVE_CONTROL_DELAYED) {
1319 return true;
1322 if (bytes_xmit > 0) {
1323 ram_counters.normal++;
1324 } else if (bytes_xmit == 0) {
1325 ram_counters.duplicate++;
1328 return true;
1332 * directly send the page to the stream
1334 * Returns the number of pages written.
1336 * @rs: current RAM state
1337 * @block: block that contains the page we want to send
1338 * @offset: offset inside the block for the page
1339 * @buf: the page to be sent
1340 * @async: send to page asyncly
1342 static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1343 uint8_t *buf, bool async)
1345 ram_transferred_add(save_page_header(rs, rs->f, block,
1346 offset | RAM_SAVE_FLAG_PAGE));
1347 if (async) {
1348 qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
1349 migrate_release_ram() &&
1350 migration_in_postcopy());
1351 } else {
1352 qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
1354 ram_transferred_add(TARGET_PAGE_SIZE);
1355 ram_counters.normal++;
1356 return 1;
1360 * ram_save_page: send the given page to the stream
1362 * Returns the number of pages written.
1363 * < 0 - error
1364 * >=0 - Number of pages written - this might legally be 0
1365 * if xbzrle noticed the page was the same.
1367 * @rs: current RAM state
1368 * @block: block that contains the page we want to send
1369 * @offset: offset inside the block for the page
1371 static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
1373 int pages = -1;
1374 uint8_t *p;
1375 bool send_async = true;
1376 RAMBlock *block = pss->block;
1377 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
1378 ram_addr_t current_addr = block->offset + offset;
1380 p = block->host + offset;
1381 trace_ram_save_page(block->idstr, (uint64_t)offset, p);
1383 XBZRLE_cache_lock();
1384 if (rs->xbzrle_enabled && !migration_in_postcopy()) {
1385 pages = save_xbzrle_page(rs, &p, current_addr, block,
1386 offset);
1387 if (!rs->last_stage) {
1388 /* Can't send this cached data async, since the cache page
1389 * might get updated before it gets to the wire
1391 send_async = false;
1395 /* XBZRLE overflow or normal page */
1396 if (pages == -1) {
1397 pages = save_normal_page(rs, block, offset, p, send_async);
1400 XBZRLE_cache_unlock();
1402 return pages;
1405 static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
1406 ram_addr_t offset)
1408 if (multifd_queue_page(rs->f, block, offset) < 0) {
1409 return -1;
1411 ram_counters.normal++;
1413 return 1;
1416 static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
1417 ram_addr_t offset, uint8_t *source_buf)
1419 RAMState *rs = ram_state;
1420 uint8_t *p = block->host + offset;
1421 int ret;
1423 if (save_zero_page_to_file(rs, f, block, offset)) {
1424 return true;
1427 save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
1430 * copy it to a internal buffer to avoid it being modified by VM
1431 * so that we can catch up the error during compression and
1432 * decompression
1434 memcpy(source_buf, p, TARGET_PAGE_SIZE);
1435 ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
1436 if (ret < 0) {
1437 qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
1438 error_report("compressed data failed!");
1440 return false;
1443 static void
1444 update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
1446 ram_transferred_add(bytes_xmit);
1448 if (param->zero_page) {
1449 ram_counters.duplicate++;
1450 return;
1453 /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
1454 compression_counters.compressed_size += bytes_xmit - 8;
1455 compression_counters.pages++;
1458 static bool save_page_use_compression(RAMState *rs);
1460 static void flush_compressed_data(RAMState *rs)
1462 int idx, len, thread_count;
1464 if (!save_page_use_compression(rs)) {
1465 return;
1467 thread_count = migrate_compress_threads();
1469 qemu_mutex_lock(&comp_done_lock);
1470 for (idx = 0; idx < thread_count; idx++) {
1471 while (!comp_param[idx].done) {
1472 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
1475 qemu_mutex_unlock(&comp_done_lock);
1477 for (idx = 0; idx < thread_count; idx++) {
1478 qemu_mutex_lock(&comp_param[idx].mutex);
1479 if (!comp_param[idx].quit) {
1480 len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
1482 * it's safe to fetch zero_page without holding comp_done_lock
1483 * as there is no further request submitted to the thread,
1484 * i.e, the thread should be waiting for a request at this point.
1486 update_compress_thread_counts(&comp_param[idx], len);
1488 qemu_mutex_unlock(&comp_param[idx].mutex);
1492 static inline void set_compress_params(CompressParam *param, RAMBlock *block,
1493 ram_addr_t offset)
1495 param->block = block;
1496 param->offset = offset;
1499 static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
1500 ram_addr_t offset)
1502 int idx, thread_count, bytes_xmit = -1, pages = -1;
1503 bool wait = migrate_compress_wait_thread();
1505 thread_count = migrate_compress_threads();
1506 qemu_mutex_lock(&comp_done_lock);
1507 retry:
1508 for (idx = 0; idx < thread_count; idx++) {
1509 if (comp_param[idx].done) {
1510 comp_param[idx].done = false;
1511 bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
1512 qemu_mutex_lock(&comp_param[idx].mutex);
1513 set_compress_params(&comp_param[idx], block, offset);
1514 qemu_cond_signal(&comp_param[idx].cond);
1515 qemu_mutex_unlock(&comp_param[idx].mutex);
1516 pages = 1;
1517 update_compress_thread_counts(&comp_param[idx], bytes_xmit);
1518 break;
1523 * wait for the free thread if the user specifies 'compress-wait-thread',
1524 * otherwise we will post the page out in the main thread as normal page.
1526 if (pages < 0 && wait) {
1527 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
1528 goto retry;
1530 qemu_mutex_unlock(&comp_done_lock);
1532 return pages;
1536 * find_dirty_block: find the next dirty page and update any state
1537 * associated with the search process.
1539 * Returns true if a page is found
1541 * @rs: current RAM state
1542 * @pss: data about the state of the current dirty page scan
1543 * @again: set to false if the search has scanned the whole of RAM
1545 static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
1548 * This is not a postcopy requested page, mark it "not urgent", and use
1549 * precopy channel to send it.
1551 pss->postcopy_requested = false;
1552 pss->postcopy_target_channel = RAM_CHANNEL_PRECOPY;
1554 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
1555 if (pss->complete_round && pss->block == rs->last_seen_block &&
1556 pss->page >= rs->last_page) {
1558 * We've been once around the RAM and haven't found anything.
1559 * Give up.
1561 *again = false;
1562 return false;
1564 if (!offset_in_ramblock(pss->block,
1565 ((ram_addr_t)pss->page) << TARGET_PAGE_BITS)) {
1566 /* Didn't find anything in this RAM Block */
1567 pss->page = 0;
1568 pss->block = QLIST_NEXT_RCU(pss->block, next);
1569 if (!pss->block) {
1571 * If memory migration starts over, we will meet a dirtied page
1572 * which may still exists in compression threads's ring, so we
1573 * should flush the compressed data to make sure the new page
1574 * is not overwritten by the old one in the destination.
1576 * Also If xbzrle is on, stop using the data compression at this
1577 * point. In theory, xbzrle can do better than compression.
1579 flush_compressed_data(rs);
1581 /* Hit the end of the list */
1582 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1583 /* Flag that we've looped */
1584 pss->complete_round = true;
1585 /* After the first round, enable XBZRLE. */
1586 if (migrate_use_xbzrle()) {
1587 rs->xbzrle_enabled = true;
1590 /* Didn't find anything this time, but try again on the new block */
1591 *again = true;
1592 return false;
1593 } else {
1594 /* Can go around again, but... */
1595 *again = true;
1596 /* We've found something so probably don't need to */
1597 return true;
1602 * unqueue_page: gets a page of the queue
1604 * Helper for 'get_queued_page' - gets a page off the queue
1606 * Returns the block of the page (or NULL if none available)
1608 * @rs: current RAM state
1609 * @offset: used to return the offset within the RAMBlock
1611 static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
1613 struct RAMSrcPageRequest *entry;
1614 RAMBlock *block = NULL;
1616 if (!postcopy_has_request(rs)) {
1617 return NULL;
1620 QEMU_LOCK_GUARD(&rs->src_page_req_mutex);
1623 * This should _never_ change even after we take the lock, because no one
1624 * should be taking anything off the request list other than us.
1626 assert(postcopy_has_request(rs));
1628 entry = QSIMPLEQ_FIRST(&rs->src_page_requests);
1629 block = entry->rb;
1630 *offset = entry->offset;
1632 if (entry->len > TARGET_PAGE_SIZE) {
1633 entry->len -= TARGET_PAGE_SIZE;
1634 entry->offset += TARGET_PAGE_SIZE;
1635 } else {
1636 memory_region_unref(block->mr);
1637 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
1638 g_free(entry);
1639 migration_consume_urgent_request();
1642 return block;
1645 #if defined(__linux__)
1647 * poll_fault_page: try to get next UFFD write fault page and, if pending fault
1648 * is found, return RAM block pointer and page offset
1650 * Returns pointer to the RAMBlock containing faulting page,
1651 * NULL if no write faults are pending
1653 * @rs: current RAM state
1654 * @offset: page offset from the beginning of the block
1656 static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset)
1658 struct uffd_msg uffd_msg;
1659 void *page_address;
1660 RAMBlock *block;
1661 int res;
1663 if (!migrate_background_snapshot()) {
1664 return NULL;
1667 res = uffd_read_events(rs->uffdio_fd, &uffd_msg, 1);
1668 if (res <= 0) {
1669 return NULL;
1672 page_address = (void *)(uintptr_t) uffd_msg.arg.pagefault.address;
1673 block = qemu_ram_block_from_host(page_address, false, offset);
1674 assert(block && (block->flags & RAM_UF_WRITEPROTECT) != 0);
1675 return block;
1679 * ram_save_release_protection: release UFFD write protection after
1680 * a range of pages has been saved
1682 * @rs: current RAM state
1683 * @pss: page-search-status structure
1684 * @start_page: index of the first page in the range relative to pss->block
1686 * Returns 0 on success, negative value in case of an error
1688 static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss,
1689 unsigned long start_page)
1691 int res = 0;
1693 /* Check if page is from UFFD-managed region. */
1694 if (pss->block->flags & RAM_UF_WRITEPROTECT) {
1695 void *page_address = pss->block->host + (start_page << TARGET_PAGE_BITS);
1696 uint64_t run_length = (pss->page - start_page) << TARGET_PAGE_BITS;
1698 /* Flush async buffers before un-protect. */
1699 qemu_fflush(rs->f);
1700 /* Un-protect memory range. */
1701 res = uffd_change_protection(rs->uffdio_fd, page_address, run_length,
1702 false, false);
1705 return res;
1708 /* ram_write_tracking_available: check if kernel supports required UFFD features
1710 * Returns true if supports, false otherwise
1712 bool ram_write_tracking_available(void)
1714 uint64_t uffd_features;
1715 int res;
1717 res = uffd_query_features(&uffd_features);
1718 return (res == 0 &&
1719 (uffd_features & UFFD_FEATURE_PAGEFAULT_FLAG_WP) != 0);
1722 /* ram_write_tracking_compatible: check if guest configuration is
1723 * compatible with 'write-tracking'
1725 * Returns true if compatible, false otherwise
1727 bool ram_write_tracking_compatible(void)
1729 const uint64_t uffd_ioctls_mask = BIT(_UFFDIO_WRITEPROTECT);
1730 int uffd_fd;
1731 RAMBlock *block;
1732 bool ret = false;
1734 /* Open UFFD file descriptor */
1735 uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, false);
1736 if (uffd_fd < 0) {
1737 return false;
1740 RCU_READ_LOCK_GUARD();
1742 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1743 uint64_t uffd_ioctls;
1745 /* Nothing to do with read-only and MMIO-writable regions */
1746 if (block->mr->readonly || block->mr->rom_device) {
1747 continue;
1749 /* Try to register block memory via UFFD-IO to track writes */
1750 if (uffd_register_memory(uffd_fd, block->host, block->max_length,
1751 UFFDIO_REGISTER_MODE_WP, &uffd_ioctls)) {
1752 goto out;
1754 if ((uffd_ioctls & uffd_ioctls_mask) != uffd_ioctls_mask) {
1755 goto out;
1758 ret = true;
1760 out:
1761 uffd_close_fd(uffd_fd);
1762 return ret;
1765 static inline void populate_read_range(RAMBlock *block, ram_addr_t offset,
1766 ram_addr_t size)
1769 * We read one byte of each page; this will preallocate page tables if
1770 * required and populate the shared zeropage on MAP_PRIVATE anonymous memory
1771 * where no page was populated yet. This might require adaption when
1772 * supporting other mappings, like shmem.
1774 for (; offset < size; offset += block->page_size) {
1775 char tmp = *((char *)block->host + offset);
1777 /* Don't optimize the read out */
1778 asm volatile("" : "+r" (tmp));
1782 static inline int populate_read_section(MemoryRegionSection *section,
1783 void *opaque)
1785 const hwaddr size = int128_get64(section->size);
1786 hwaddr offset = section->offset_within_region;
1787 RAMBlock *block = section->mr->ram_block;
1789 populate_read_range(block, offset, size);
1790 return 0;
1794 * ram_block_populate_read: preallocate page tables and populate pages in the
1795 * RAM block by reading a byte of each page.
1797 * Since it's solely used for userfault_fd WP feature, here we just
1798 * hardcode page size to qemu_real_host_page_size.
1800 * @block: RAM block to populate
1802 static void ram_block_populate_read(RAMBlock *rb)
1805 * Skip populating all pages that fall into a discarded range as managed by
1806 * a RamDiscardManager responsible for the mapped memory region of the
1807 * RAMBlock. Such discarded ("logically unplugged") parts of a RAMBlock
1808 * must not get populated automatically. We don't have to track
1809 * modifications via userfaultfd WP reliably, because these pages will
1810 * not be part of the migration stream either way -- see
1811 * ramblock_dirty_bitmap_exclude_discarded_pages().
1813 * Note: The result is only stable while migrating (precopy/postcopy).
1815 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) {
1816 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
1817 MemoryRegionSection section = {
1818 .mr = rb->mr,
1819 .offset_within_region = 0,
1820 .size = rb->mr->size,
1823 ram_discard_manager_replay_populated(rdm, &section,
1824 populate_read_section, NULL);
1825 } else {
1826 populate_read_range(rb, 0, rb->used_length);
1831 * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking
1833 void ram_write_tracking_prepare(void)
1835 RAMBlock *block;
1837 RCU_READ_LOCK_GUARD();
1839 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1840 /* Nothing to do with read-only and MMIO-writable regions */
1841 if (block->mr->readonly || block->mr->rom_device) {
1842 continue;
1846 * Populate pages of the RAM block before enabling userfault_fd
1847 * write protection.
1849 * This stage is required since ioctl(UFFDIO_WRITEPROTECT) with
1850 * UFFDIO_WRITEPROTECT_MODE_WP mode setting would silently skip
1851 * pages with pte_none() entries in page table.
1853 ram_block_populate_read(block);
1858 * ram_write_tracking_start: start UFFD-WP memory tracking
1860 * Returns 0 for success or negative value in case of error
1862 int ram_write_tracking_start(void)
1864 int uffd_fd;
1865 RAMState *rs = ram_state;
1866 RAMBlock *block;
1868 /* Open UFFD file descriptor */
1869 uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, true);
1870 if (uffd_fd < 0) {
1871 return uffd_fd;
1873 rs->uffdio_fd = uffd_fd;
1875 RCU_READ_LOCK_GUARD();
1877 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1878 /* Nothing to do with read-only and MMIO-writable regions */
1879 if (block->mr->readonly || block->mr->rom_device) {
1880 continue;
1883 /* Register block memory with UFFD to track writes */
1884 if (uffd_register_memory(rs->uffdio_fd, block->host,
1885 block->max_length, UFFDIO_REGISTER_MODE_WP, NULL)) {
1886 goto fail;
1888 /* Apply UFFD write protection to the block memory range */
1889 if (uffd_change_protection(rs->uffdio_fd, block->host,
1890 block->max_length, true, false)) {
1891 goto fail;
1893 block->flags |= RAM_UF_WRITEPROTECT;
1894 memory_region_ref(block->mr);
1896 trace_ram_write_tracking_ramblock_start(block->idstr, block->page_size,
1897 block->host, block->max_length);
1900 return 0;
1902 fail:
1903 error_report("ram_write_tracking_start() failed: restoring initial memory state");
1905 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1906 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) {
1907 continue;
1910 * In case some memory block failed to be write-protected
1911 * remove protection and unregister all succeeded RAM blocks
1913 uffd_change_protection(rs->uffdio_fd, block->host, block->max_length,
1914 false, false);
1915 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length);
1916 /* Cleanup flags and remove reference */
1917 block->flags &= ~RAM_UF_WRITEPROTECT;
1918 memory_region_unref(block->mr);
1921 uffd_close_fd(uffd_fd);
1922 rs->uffdio_fd = -1;
1923 return -1;
1927 * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection
1929 void ram_write_tracking_stop(void)
1931 RAMState *rs = ram_state;
1932 RAMBlock *block;
1934 RCU_READ_LOCK_GUARD();
1936 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1937 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) {
1938 continue;
1940 /* Remove protection and unregister all affected RAM blocks */
1941 uffd_change_protection(rs->uffdio_fd, block->host, block->max_length,
1942 false, false);
1943 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length);
1945 trace_ram_write_tracking_ramblock_stop(block->idstr, block->page_size,
1946 block->host, block->max_length);
1948 /* Cleanup flags and remove reference */
1949 block->flags &= ~RAM_UF_WRITEPROTECT;
1950 memory_region_unref(block->mr);
1953 /* Finally close UFFD file descriptor */
1954 uffd_close_fd(rs->uffdio_fd);
1955 rs->uffdio_fd = -1;
1958 #else
1959 /* No target OS support, stubs just fail or ignore */
1961 static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset)
1963 (void) rs;
1964 (void) offset;
1966 return NULL;
1969 static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss,
1970 unsigned long start_page)
1972 (void) rs;
1973 (void) pss;
1974 (void) start_page;
1976 return 0;
1979 bool ram_write_tracking_available(void)
1981 return false;
1984 bool ram_write_tracking_compatible(void)
1986 assert(0);
1987 return false;
1990 int ram_write_tracking_start(void)
1992 assert(0);
1993 return -1;
1996 void ram_write_tracking_stop(void)
1998 assert(0);
2000 #endif /* defined(__linux__) */
2003 * Check whether two addr/offset of the ramblock falls onto the same host huge
2004 * page. Returns true if so, false otherwise.
2006 static bool offset_on_same_huge_page(RAMBlock *rb, uint64_t addr1,
2007 uint64_t addr2)
2009 size_t page_size = qemu_ram_pagesize(rb);
2011 addr1 = ROUND_DOWN(addr1, page_size);
2012 addr2 = ROUND_DOWN(addr2, page_size);
2014 return addr1 == addr2;
2018 * Whether a previous preempted precopy huge page contains current requested
2019 * page? Returns true if so, false otherwise.
2021 * This should really happen very rarely, because it means when we were sending
2022 * during background migration for postcopy we're sending exactly the page that
2023 * some vcpu got faulted on on dest node. When it happens, we probably don't
2024 * need to do much but drop the request, because we know right after we restore
2025 * the precopy stream it'll be serviced. It'll slightly affect the order of
2026 * postcopy requests to be serviced (e.g. it'll be the same as we move current
2027 * request to the end of the queue) but it shouldn't be a big deal. The most
2028 * imporant thing is we can _never_ try to send a partial-sent huge page on the
2029 * POSTCOPY channel again, otherwise that huge page will got "split brain" on
2030 * two channels (PRECOPY, POSTCOPY).
2032 static bool postcopy_preempted_contains(RAMState *rs, RAMBlock *block,
2033 ram_addr_t offset)
2035 PostcopyPreemptState *state = &rs->postcopy_preempt_state;
2037 /* No preemption at all? */
2038 if (!state->preempted) {
2039 return false;
2042 /* Not even the same ramblock? */
2043 if (state->ram_block != block) {
2044 return false;
2047 return offset_on_same_huge_page(block, offset,
2048 state->ram_page << TARGET_PAGE_BITS);
2052 * get_queued_page: unqueue a page from the postcopy requests
2054 * Skips pages that are already sent (!dirty)
2056 * Returns true if a queued page is found
2058 * @rs: current RAM state
2059 * @pss: data about the state of the current dirty page scan
2061 static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
2063 RAMBlock *block;
2064 ram_addr_t offset;
2065 bool dirty;
2067 do {
2068 block = unqueue_page(rs, &offset);
2070 * We're sending this page, and since it's postcopy nothing else
2071 * will dirty it, and we must make sure it doesn't get sent again
2072 * even if this queue request was received after the background
2073 * search already sent it.
2075 if (block) {
2076 unsigned long page;
2078 page = offset >> TARGET_PAGE_BITS;
2079 dirty = test_bit(page, block->bmap);
2080 if (!dirty) {
2081 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
2082 page);
2083 } else {
2084 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
2088 } while (block && !dirty);
2090 if (block) {
2091 /* See comment above postcopy_preempted_contains() */
2092 if (postcopy_preempted_contains(rs, block, offset)) {
2093 trace_postcopy_preempt_hit(block->idstr, offset);
2095 * If what we preempted previously was exactly what we're
2096 * requesting right now, restore the preempted precopy
2097 * immediately, boosting its priority as it's requested by
2098 * postcopy.
2100 postcopy_preempt_restore(rs, pss, true);
2101 return true;
2103 } else {
2105 * Poll write faults too if background snapshot is enabled; that's
2106 * when we have vcpus got blocked by the write protected pages.
2108 block = poll_fault_page(rs, &offset);
2111 if (block) {
2113 * We want the background search to continue from the queued page
2114 * since the guest is likely to want other pages near to the page
2115 * it just requested.
2117 pss->block = block;
2118 pss->page = offset >> TARGET_PAGE_BITS;
2121 * This unqueued page would break the "one round" check, even is
2122 * really rare.
2124 pss->complete_round = false;
2125 /* Mark it an urgent request, meanwhile using POSTCOPY channel */
2126 pss->postcopy_requested = true;
2127 pss->postcopy_target_channel = RAM_CHANNEL_POSTCOPY;
2130 return !!block;
2134 * migration_page_queue_free: drop any remaining pages in the ram
2135 * request queue
2137 * It should be empty at the end anyway, but in error cases there may
2138 * be some left. in case that there is any page left, we drop it.
2141 static void migration_page_queue_free(RAMState *rs)
2143 struct RAMSrcPageRequest *mspr, *next_mspr;
2144 /* This queue generally should be empty - but in the case of a failed
2145 * migration might have some droppings in.
2147 RCU_READ_LOCK_GUARD();
2148 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
2149 memory_region_unref(mspr->rb->mr);
2150 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
2151 g_free(mspr);
2156 * ram_save_queue_pages: queue the page for transmission
2158 * A request from postcopy destination for example.
2160 * Returns zero on success or negative on error
2162 * @rbname: Name of the RAMBLock of the request. NULL means the
2163 * same that last one.
2164 * @start: starting address from the start of the RAMBlock
2165 * @len: length (in bytes) to send
2167 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
2169 RAMBlock *ramblock;
2170 RAMState *rs = ram_state;
2172 ram_counters.postcopy_requests++;
2173 RCU_READ_LOCK_GUARD();
2175 if (!rbname) {
2176 /* Reuse last RAMBlock */
2177 ramblock = rs->last_req_rb;
2179 if (!ramblock) {
2181 * Shouldn't happen, we can't reuse the last RAMBlock if
2182 * it's the 1st request.
2184 error_report("ram_save_queue_pages no previous block");
2185 return -1;
2187 } else {
2188 ramblock = qemu_ram_block_by_name(rbname);
2190 if (!ramblock) {
2191 /* We shouldn't be asked for a non-existent RAMBlock */
2192 error_report("ram_save_queue_pages no block '%s'", rbname);
2193 return -1;
2195 rs->last_req_rb = ramblock;
2197 trace_ram_save_queue_pages(ramblock->idstr, start, len);
2198 if (!offset_in_ramblock(ramblock, start + len - 1)) {
2199 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
2200 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
2201 __func__, start, len, ramblock->used_length);
2202 return -1;
2205 struct RAMSrcPageRequest *new_entry =
2206 g_new0(struct RAMSrcPageRequest, 1);
2207 new_entry->rb = ramblock;
2208 new_entry->offset = start;
2209 new_entry->len = len;
2211 memory_region_ref(ramblock->mr);
2212 qemu_mutex_lock(&rs->src_page_req_mutex);
2213 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
2214 migration_make_urgent_request();
2215 qemu_mutex_unlock(&rs->src_page_req_mutex);
2217 return 0;
2220 static bool save_page_use_compression(RAMState *rs)
2222 if (!migrate_use_compression()) {
2223 return false;
2227 * If xbzrle is enabled (e.g., after first round of migration), stop
2228 * using the data compression. In theory, xbzrle can do better than
2229 * compression.
2231 if (rs->xbzrle_enabled) {
2232 return false;
2235 return true;
2239 * try to compress the page before posting it out, return true if the page
2240 * has been properly handled by compression, otherwise needs other
2241 * paths to handle it
2243 static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
2245 if (!save_page_use_compression(rs)) {
2246 return false;
2250 * When starting the process of a new block, the first page of
2251 * the block should be sent out before other pages in the same
2252 * block, and all the pages in last block should have been sent
2253 * out, keeping this order is important, because the 'cont' flag
2254 * is used to avoid resending the block name.
2256 * We post the fist page as normal page as compression will take
2257 * much CPU resource.
2259 if (block != rs->last_sent_block) {
2260 flush_compressed_data(rs);
2261 return false;
2264 if (compress_page_with_multi_thread(rs, block, offset) > 0) {
2265 return true;
2268 compression_counters.busy++;
2269 return false;
2273 * ram_save_target_page: save one target page
2275 * Returns the number of pages written
2277 * @rs: current RAM state
2278 * @pss: data about the page we want to send
2280 static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss)
2282 RAMBlock *block = pss->block;
2283 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
2284 int res;
2286 if (control_save_page(rs, block, offset, &res)) {
2287 return res;
2290 if (save_compress_page(rs, block, offset)) {
2291 return 1;
2294 res = save_zero_page(rs, block, offset);
2295 if (res > 0) {
2296 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2297 * page would be stale
2299 if (!save_page_use_compression(rs)) {
2300 XBZRLE_cache_lock();
2301 xbzrle_cache_zero_page(rs, block->offset + offset);
2302 XBZRLE_cache_unlock();
2304 return res;
2308 * Do not use multifd for:
2309 * 1. Compression as the first page in the new block should be posted out
2310 * before sending the compressed page
2311 * 2. In postcopy as one whole host page should be placed
2313 if (!save_page_use_compression(rs) && migrate_use_multifd()
2314 && !migration_in_postcopy()) {
2315 return ram_save_multifd_page(rs, block, offset);
2318 return ram_save_page(rs, pss);
2321 static bool postcopy_needs_preempt(RAMState *rs, PageSearchStatus *pss)
2323 MigrationState *ms = migrate_get_current();
2325 /* Not enabled eager preempt? Then never do that. */
2326 if (!migrate_postcopy_preempt()) {
2327 return false;
2330 /* If the user explicitly disabled breaking of huge page, skip */
2331 if (!ms->postcopy_preempt_break_huge) {
2332 return false;
2335 /* If the ramblock we're sending is a small page? Never bother. */
2336 if (qemu_ram_pagesize(pss->block) == TARGET_PAGE_SIZE) {
2337 return false;
2340 /* Not in postcopy at all? */
2341 if (!migration_in_postcopy()) {
2342 return false;
2346 * If we're already handling a postcopy request, don't preempt as this page
2347 * has got the same high priority.
2349 if (pss->postcopy_requested) {
2350 return false;
2353 /* If there's postcopy requests, then check it up! */
2354 return postcopy_has_request(rs);
2357 /* Returns true if we preempted precopy, false otherwise */
2358 static void postcopy_do_preempt(RAMState *rs, PageSearchStatus *pss)
2360 PostcopyPreemptState *p_state = &rs->postcopy_preempt_state;
2362 trace_postcopy_preempt_triggered(pss->block->idstr, pss->page);
2365 * Time to preempt precopy. Cache current PSS into preempt state, so that
2366 * after handling the postcopy pages we can recover to it. We need to do
2367 * so because the dest VM will have partial of the precopy huge page kept
2368 * over in its tmp huge page caches; better move on with it when we can.
2370 p_state->ram_block = pss->block;
2371 p_state->ram_page = pss->page;
2372 p_state->preempted = true;
2375 /* Whether we're preempted by a postcopy request during sending a huge page */
2376 static bool postcopy_preempt_triggered(RAMState *rs)
2378 return rs->postcopy_preempt_state.preempted;
2381 static void postcopy_preempt_restore(RAMState *rs, PageSearchStatus *pss,
2382 bool postcopy_requested)
2384 PostcopyPreemptState *state = &rs->postcopy_preempt_state;
2386 assert(state->preempted);
2388 pss->block = state->ram_block;
2389 pss->page = state->ram_page;
2391 /* Whether this is a postcopy request? */
2392 pss->postcopy_requested = postcopy_requested;
2394 * When restoring a preempted page, the old data resides in PRECOPY
2395 * slow channel, even if postcopy_requested is set. So always use
2396 * PRECOPY channel here.
2398 pss->postcopy_target_channel = RAM_CHANNEL_PRECOPY;
2400 trace_postcopy_preempt_restored(pss->block->idstr, pss->page);
2402 /* Reset preempt state, most importantly, set preempted==false */
2403 postcopy_preempt_reset(rs);
2406 static void postcopy_preempt_choose_channel(RAMState *rs, PageSearchStatus *pss)
2408 MigrationState *s = migrate_get_current();
2409 unsigned int channel = pss->postcopy_target_channel;
2410 QEMUFile *next;
2412 if (channel != rs->postcopy_channel) {
2413 if (channel == RAM_CHANNEL_PRECOPY) {
2414 next = s->to_dst_file;
2415 } else {
2416 next = s->postcopy_qemufile_src;
2418 /* Update and cache the current channel */
2419 rs->f = next;
2420 rs->postcopy_channel = channel;
2423 * If channel switched, reset last_sent_block since the old sent block
2424 * may not be on the same channel.
2426 rs->last_sent_block = NULL;
2428 trace_postcopy_preempt_switch_channel(channel);
2431 trace_postcopy_preempt_send_host_page(pss->block->idstr, pss->page);
2434 /* We need to make sure rs->f always points to the default channel elsewhere */
2435 static void postcopy_preempt_reset_channel(RAMState *rs)
2437 if (migrate_postcopy_preempt() && migration_in_postcopy()) {
2438 rs->postcopy_channel = RAM_CHANNEL_PRECOPY;
2439 rs->f = migrate_get_current()->to_dst_file;
2440 trace_postcopy_preempt_reset_channel();
2445 * ram_save_host_page: save a whole host page
2447 * Starting at *offset send pages up to the end of the current host
2448 * page. It's valid for the initial offset to point into the middle of
2449 * a host page in which case the remainder of the hostpage is sent.
2450 * Only dirty target pages are sent. Note that the host page size may
2451 * be a huge page for this block.
2452 * The saving stops at the boundary of the used_length of the block
2453 * if the RAMBlock isn't a multiple of the host page size.
2455 * Returns the number of pages written or negative on error
2457 * @rs: current RAM state
2458 * @pss: data about the page we want to send
2460 static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss)
2462 int tmppages, pages = 0;
2463 size_t pagesize_bits =
2464 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
2465 unsigned long hostpage_boundary =
2466 QEMU_ALIGN_UP(pss->page + 1, pagesize_bits);
2467 unsigned long start_page = pss->page;
2468 int res;
2470 if (ramblock_is_ignored(pss->block)) {
2471 error_report("block %s should not be migrated !", pss->block->idstr);
2472 return 0;
2475 if (migrate_postcopy_preempt() && migration_in_postcopy()) {
2476 postcopy_preempt_choose_channel(rs, pss);
2479 do {
2480 if (postcopy_needs_preempt(rs, pss)) {
2481 postcopy_do_preempt(rs, pss);
2482 break;
2485 /* Check the pages is dirty and if it is send it */
2486 if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
2487 tmppages = ram_save_target_page(rs, pss);
2488 if (tmppages < 0) {
2489 return tmppages;
2492 pages += tmppages;
2494 * Allow rate limiting to happen in the middle of huge pages if
2495 * something is sent in the current iteration.
2497 if (pagesize_bits > 1 && tmppages > 0) {
2498 migration_rate_limit();
2501 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
2502 } while ((pss->page < hostpage_boundary) &&
2503 offset_in_ramblock(pss->block,
2504 ((ram_addr_t)pss->page) << TARGET_PAGE_BITS));
2505 /* The offset we leave with is the min boundary of host page and block */
2506 pss->page = MIN(pss->page, hostpage_boundary);
2509 * When with postcopy preempt mode, flush the data as soon as possible for
2510 * postcopy requests, because we've already sent a whole huge page, so the
2511 * dst node should already have enough resource to atomically filling in
2512 * the current missing page.
2514 * More importantly, when using separate postcopy channel, we must do
2515 * explicit flush or it won't flush until the buffer is full.
2517 if (migrate_postcopy_preempt() && pss->postcopy_requested) {
2518 qemu_fflush(rs->f);
2521 res = ram_save_release_protection(rs, pss, start_page);
2522 return (res < 0 ? res : pages);
2526 * ram_find_and_save_block: finds a dirty page and sends it to f
2528 * Called within an RCU critical section.
2530 * Returns the number of pages written where zero means no dirty pages,
2531 * or negative on error
2533 * @rs: current RAM state
2535 * On systems where host-page-size > target-page-size it will send all the
2536 * pages in a host page that are dirty.
2538 static int ram_find_and_save_block(RAMState *rs)
2540 PageSearchStatus pss;
2541 int pages = 0;
2542 bool again, found;
2544 /* No dirty page as there is zero RAM */
2545 if (!ram_bytes_total()) {
2546 return pages;
2549 pss.block = rs->last_seen_block;
2550 pss.page = rs->last_page;
2551 pss.complete_round = false;
2553 if (!pss.block) {
2554 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
2557 do {
2558 again = true;
2559 found = get_queued_page(rs, &pss);
2561 if (!found) {
2563 * Recover previous precopy ramblock/offset if postcopy has
2564 * preempted precopy. Otherwise find the next dirty bit.
2566 if (postcopy_preempt_triggered(rs)) {
2567 postcopy_preempt_restore(rs, &pss, false);
2568 found = true;
2569 } else {
2570 /* priority queue empty, so just search for something dirty */
2571 found = find_dirty_block(rs, &pss, &again);
2575 if (found) {
2576 pages = ram_save_host_page(rs, &pss);
2578 } while (!pages && again);
2580 rs->last_seen_block = pss.block;
2581 rs->last_page = pss.page;
2583 return pages;
2586 void acct_update_position(QEMUFile *f, size_t size, bool zero)
2588 uint64_t pages = size / TARGET_PAGE_SIZE;
2590 if (zero) {
2591 ram_counters.duplicate += pages;
2592 } else {
2593 ram_counters.normal += pages;
2594 ram_transferred_add(size);
2595 qemu_file_credit_transfer(f, size);
2599 static uint64_t ram_bytes_total_common(bool count_ignored)
2601 RAMBlock *block;
2602 uint64_t total = 0;
2604 RCU_READ_LOCK_GUARD();
2606 if (count_ignored) {
2607 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2608 total += block->used_length;
2610 } else {
2611 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2612 total += block->used_length;
2615 return total;
2618 uint64_t ram_bytes_total(void)
2620 return ram_bytes_total_common(false);
2623 static void xbzrle_load_setup(void)
2625 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2628 static void xbzrle_load_cleanup(void)
2630 g_free(XBZRLE.decoded_buf);
2631 XBZRLE.decoded_buf = NULL;
2634 static void ram_state_cleanup(RAMState **rsp)
2636 if (*rsp) {
2637 migration_page_queue_free(*rsp);
2638 qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
2639 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
2640 g_free(*rsp);
2641 *rsp = NULL;
2645 static void xbzrle_cleanup(void)
2647 XBZRLE_cache_lock();
2648 if (XBZRLE.cache) {
2649 cache_fini(XBZRLE.cache);
2650 g_free(XBZRLE.encoded_buf);
2651 g_free(XBZRLE.current_buf);
2652 g_free(XBZRLE.zero_target_page);
2653 XBZRLE.cache = NULL;
2654 XBZRLE.encoded_buf = NULL;
2655 XBZRLE.current_buf = NULL;
2656 XBZRLE.zero_target_page = NULL;
2658 XBZRLE_cache_unlock();
2661 static void ram_save_cleanup(void *opaque)
2663 RAMState **rsp = opaque;
2664 RAMBlock *block;
2666 /* We don't use dirty log with background snapshots */
2667 if (!migrate_background_snapshot()) {
2668 /* caller have hold iothread lock or is in a bh, so there is
2669 * no writing race against the migration bitmap
2671 if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) {
2673 * do not stop dirty log without starting it, since
2674 * memory_global_dirty_log_stop will assert that
2675 * memory_global_dirty_log_start/stop used in pairs
2677 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
2681 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2682 g_free(block->clear_bmap);
2683 block->clear_bmap = NULL;
2684 g_free(block->bmap);
2685 block->bmap = NULL;
2688 xbzrle_cleanup();
2689 compress_threads_save_cleanup();
2690 ram_state_cleanup(rsp);
2693 static void ram_state_reset(RAMState *rs)
2695 rs->last_seen_block = NULL;
2696 rs->last_sent_block = NULL;
2697 rs->last_page = 0;
2698 rs->last_version = ram_list.version;
2699 rs->xbzrle_enabled = false;
2700 postcopy_preempt_reset(rs);
2701 rs->postcopy_channel = RAM_CHANNEL_PRECOPY;
2704 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2706 /* **** functions for postcopy ***** */
2708 void ram_postcopy_migrated_memory_release(MigrationState *ms)
2710 struct RAMBlock *block;
2712 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2713 unsigned long *bitmap = block->bmap;
2714 unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2715 unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
2717 while (run_start < range) {
2718 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
2719 ram_discard_range(block->idstr,
2720 ((ram_addr_t)run_start) << TARGET_PAGE_BITS,
2721 ((ram_addr_t)(run_end - run_start))
2722 << TARGET_PAGE_BITS);
2723 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2729 * postcopy_send_discard_bm_ram: discard a RAMBlock
2731 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2733 * @ms: current migration state
2734 * @block: RAMBlock to discard
2736 static void postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
2738 unsigned long end = block->used_length >> TARGET_PAGE_BITS;
2739 unsigned long current;
2740 unsigned long *bitmap = block->bmap;
2742 for (current = 0; current < end; ) {
2743 unsigned long one = find_next_bit(bitmap, end, current);
2744 unsigned long zero, discard_length;
2746 if (one >= end) {
2747 break;
2750 zero = find_next_zero_bit(bitmap, end, one + 1);
2752 if (zero >= end) {
2753 discard_length = end - one;
2754 } else {
2755 discard_length = zero - one;
2757 postcopy_discard_send_range(ms, one, discard_length);
2758 current = one + discard_length;
2762 static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block);
2765 * postcopy_each_ram_send_discard: discard all RAMBlocks
2767 * Utility for the outgoing postcopy code.
2768 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2769 * passing it bitmap indexes and name.
2770 * (qemu_ram_foreach_block ends up passing unscaled lengths
2771 * which would mean postcopy code would have to deal with target page)
2773 * @ms: current migration state
2775 static void postcopy_each_ram_send_discard(MigrationState *ms)
2777 struct RAMBlock *block;
2779 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2780 postcopy_discard_send_init(ms, block->idstr);
2783 * Deal with TPS != HPS and huge pages. It discard any partially sent
2784 * host-page size chunks, mark any partially dirty host-page size
2785 * chunks as all dirty. In this case the host-page is the host-page
2786 * for the particular RAMBlock, i.e. it might be a huge page.
2788 postcopy_chunk_hostpages_pass(ms, block);
2791 * Postcopy sends chunks of bitmap over the wire, but it
2792 * just needs indexes at this point, avoids it having
2793 * target page specific code.
2795 postcopy_send_discard_bm_ram(ms, block);
2796 postcopy_discard_send_finish(ms);
2801 * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages
2803 * Helper for postcopy_chunk_hostpages; it's called twice to
2804 * canonicalize the two bitmaps, that are similar, but one is
2805 * inverted.
2807 * Postcopy requires that all target pages in a hostpage are dirty or
2808 * clean, not a mix. This function canonicalizes the bitmaps.
2810 * @ms: current migration state
2811 * @block: block that contains the page we want to canonicalize
2813 static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block)
2815 RAMState *rs = ram_state;
2816 unsigned long *bitmap = block->bmap;
2817 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
2818 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
2819 unsigned long run_start;
2821 if (block->page_size == TARGET_PAGE_SIZE) {
2822 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2823 return;
2826 /* Find a dirty page */
2827 run_start = find_next_bit(bitmap, pages, 0);
2829 while (run_start < pages) {
2832 * If the start of this run of pages is in the middle of a host
2833 * page, then we need to fixup this host page.
2835 if (QEMU_IS_ALIGNED(run_start, host_ratio)) {
2836 /* Find the end of this run */
2837 run_start = find_next_zero_bit(bitmap, pages, run_start + 1);
2839 * If the end isn't at the start of a host page, then the
2840 * run doesn't finish at the end of a host page
2841 * and we need to discard.
2845 if (!QEMU_IS_ALIGNED(run_start, host_ratio)) {
2846 unsigned long page;
2847 unsigned long fixup_start_addr = QEMU_ALIGN_DOWN(run_start,
2848 host_ratio);
2849 run_start = QEMU_ALIGN_UP(run_start, host_ratio);
2851 /* Clean up the bitmap */
2852 for (page = fixup_start_addr;
2853 page < fixup_start_addr + host_ratio; page++) {
2855 * Remark them as dirty, updating the count for any pages
2856 * that weren't previously dirty.
2858 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
2862 /* Find the next dirty page for the next iteration */
2863 run_start = find_next_bit(bitmap, pages, run_start);
2868 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2870 * Transmit the set of pages to be discarded after precopy to the target
2871 * these are pages that:
2872 * a) Have been previously transmitted but are now dirty again
2873 * b) Pages that have never been transmitted, this ensures that
2874 * any pages on the destination that have been mapped by background
2875 * tasks get discarded (transparent huge pages is the specific concern)
2876 * Hopefully this is pretty sparse
2878 * @ms: current migration state
2880 void ram_postcopy_send_discard_bitmap(MigrationState *ms)
2882 RAMState *rs = ram_state;
2884 RCU_READ_LOCK_GUARD();
2886 /* This should be our last sync, the src is now paused */
2887 migration_bitmap_sync(rs);
2889 /* Easiest way to make sure we don't resume in the middle of a host-page */
2890 rs->last_seen_block = NULL;
2891 rs->last_sent_block = NULL;
2892 rs->last_page = 0;
2894 postcopy_each_ram_send_discard(ms);
2896 trace_ram_postcopy_send_discard_bitmap();
2900 * ram_discard_range: discard dirtied pages at the beginning of postcopy
2902 * Returns zero on success
2904 * @rbname: name of the RAMBlock of the request. NULL means the
2905 * same that last one.
2906 * @start: RAMBlock starting page
2907 * @length: RAMBlock size
2909 int ram_discard_range(const char *rbname, uint64_t start, size_t length)
2911 trace_ram_discard_range(rbname, start, length);
2913 RCU_READ_LOCK_GUARD();
2914 RAMBlock *rb = qemu_ram_block_by_name(rbname);
2916 if (!rb) {
2917 error_report("ram_discard_range: Failed to find block '%s'", rbname);
2918 return -1;
2922 * On source VM, we don't need to update the received bitmap since
2923 * we don't even have one.
2925 if (rb->receivedmap) {
2926 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
2927 length >> qemu_target_page_bits());
2930 return ram_block_discard_range(rb, start, length);
2934 * For every allocation, we will try not to crash the VM if the
2935 * allocation failed.
2937 static int xbzrle_init(void)
2939 Error *local_err = NULL;
2941 if (!migrate_use_xbzrle()) {
2942 return 0;
2945 XBZRLE_cache_lock();
2947 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
2948 if (!XBZRLE.zero_target_page) {
2949 error_report("%s: Error allocating zero page", __func__);
2950 goto err_out;
2953 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
2954 TARGET_PAGE_SIZE, &local_err);
2955 if (!XBZRLE.cache) {
2956 error_report_err(local_err);
2957 goto free_zero_page;
2960 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
2961 if (!XBZRLE.encoded_buf) {
2962 error_report("%s: Error allocating encoded_buf", __func__);
2963 goto free_cache;
2966 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
2967 if (!XBZRLE.current_buf) {
2968 error_report("%s: Error allocating current_buf", __func__);
2969 goto free_encoded_buf;
2972 /* We are all good */
2973 XBZRLE_cache_unlock();
2974 return 0;
2976 free_encoded_buf:
2977 g_free(XBZRLE.encoded_buf);
2978 XBZRLE.encoded_buf = NULL;
2979 free_cache:
2980 cache_fini(XBZRLE.cache);
2981 XBZRLE.cache = NULL;
2982 free_zero_page:
2983 g_free(XBZRLE.zero_target_page);
2984 XBZRLE.zero_target_page = NULL;
2985 err_out:
2986 XBZRLE_cache_unlock();
2987 return -ENOMEM;
2990 static int ram_state_init(RAMState **rsp)
2992 *rsp = g_try_new0(RAMState, 1);
2994 if (!*rsp) {
2995 error_report("%s: Init ramstate fail", __func__);
2996 return -1;
2999 qemu_mutex_init(&(*rsp)->bitmap_mutex);
3000 qemu_mutex_init(&(*rsp)->src_page_req_mutex);
3001 QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
3004 * Count the total number of pages used by ram blocks not including any
3005 * gaps due to alignment or unplugs.
3006 * This must match with the initial values of dirty bitmap.
3008 (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
3009 ram_state_reset(*rsp);
3011 return 0;
3014 static void ram_list_init_bitmaps(void)
3016 MigrationState *ms = migrate_get_current();
3017 RAMBlock *block;
3018 unsigned long pages;
3019 uint8_t shift;
3021 /* Skip setting bitmap if there is no RAM */
3022 if (ram_bytes_total()) {
3023 shift = ms->clear_bitmap_shift;
3024 if (shift > CLEAR_BITMAP_SHIFT_MAX) {
3025 error_report("clear_bitmap_shift (%u) too big, using "
3026 "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX);
3027 shift = CLEAR_BITMAP_SHIFT_MAX;
3028 } else if (shift < CLEAR_BITMAP_SHIFT_MIN) {
3029 error_report("clear_bitmap_shift (%u) too small, using "
3030 "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN);
3031 shift = CLEAR_BITMAP_SHIFT_MIN;
3034 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3035 pages = block->max_length >> TARGET_PAGE_BITS;
3037 * The initial dirty bitmap for migration must be set with all
3038 * ones to make sure we'll migrate every guest RAM page to
3039 * destination.
3040 * Here we set RAMBlock.bmap all to 1 because when rebegin a
3041 * new migration after a failed migration, ram_list.
3042 * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
3043 * guest memory.
3045 block->bmap = bitmap_new(pages);
3046 bitmap_set(block->bmap, 0, pages);
3047 block->clear_bmap_shift = shift;
3048 block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift));
3053 static void migration_bitmap_clear_discarded_pages(RAMState *rs)
3055 unsigned long pages;
3056 RAMBlock *rb;
3058 RCU_READ_LOCK_GUARD();
3060 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
3061 pages = ramblock_dirty_bitmap_clear_discarded_pages(rb);
3062 rs->migration_dirty_pages -= pages;
3066 static void ram_init_bitmaps(RAMState *rs)
3068 /* For memory_global_dirty_log_start below. */
3069 qemu_mutex_lock_iothread();
3070 qemu_mutex_lock_ramlist();
3072 WITH_RCU_READ_LOCK_GUARD() {
3073 ram_list_init_bitmaps();
3074 /* We don't use dirty log with background snapshots */
3075 if (!migrate_background_snapshot()) {
3076 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION);
3077 migration_bitmap_sync_precopy(rs);
3080 qemu_mutex_unlock_ramlist();
3081 qemu_mutex_unlock_iothread();
3084 * After an eventual first bitmap sync, fixup the initial bitmap
3085 * containing all 1s to exclude any discarded pages from migration.
3087 migration_bitmap_clear_discarded_pages(rs);
3090 static int ram_init_all(RAMState **rsp)
3092 if (ram_state_init(rsp)) {
3093 return -1;
3096 if (xbzrle_init()) {
3097 ram_state_cleanup(rsp);
3098 return -1;
3101 ram_init_bitmaps(*rsp);
3103 return 0;
3106 static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
3108 RAMBlock *block;
3109 uint64_t pages = 0;
3112 * Postcopy is not using xbzrle/compression, so no need for that.
3113 * Also, since source are already halted, we don't need to care
3114 * about dirty page logging as well.
3117 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3118 pages += bitmap_count_one(block->bmap,
3119 block->used_length >> TARGET_PAGE_BITS);
3122 /* This may not be aligned with current bitmaps. Recalculate. */
3123 rs->migration_dirty_pages = pages;
3125 ram_state_reset(rs);
3127 /* Update RAMState cache of output QEMUFile */
3128 rs->f = out;
3130 trace_ram_state_resume_prepare(pages);
3134 * This function clears bits of the free pages reported by the caller from the
3135 * migration dirty bitmap. @addr is the host address corresponding to the
3136 * start of the continuous guest free pages, and @len is the total bytes of
3137 * those pages.
3139 void qemu_guest_free_page_hint(void *addr, size_t len)
3141 RAMBlock *block;
3142 ram_addr_t offset;
3143 size_t used_len, start, npages;
3144 MigrationState *s = migrate_get_current();
3146 /* This function is currently expected to be used during live migration */
3147 if (!migration_is_setup_or_active(s->state)) {
3148 return;
3151 for (; len > 0; len -= used_len, addr += used_len) {
3152 block = qemu_ram_block_from_host(addr, false, &offset);
3153 if (unlikely(!block || offset >= block->used_length)) {
3155 * The implementation might not support RAMBlock resize during
3156 * live migration, but it could happen in theory with future
3157 * updates. So we add a check here to capture that case.
3159 error_report_once("%s unexpected error", __func__);
3160 return;
3163 if (len <= block->used_length - offset) {
3164 used_len = len;
3165 } else {
3166 used_len = block->used_length - offset;
3169 start = offset >> TARGET_PAGE_BITS;
3170 npages = used_len >> TARGET_PAGE_BITS;
3172 qemu_mutex_lock(&ram_state->bitmap_mutex);
3174 * The skipped free pages are equavalent to be sent from clear_bmap's
3175 * perspective, so clear the bits from the memory region bitmap which
3176 * are initially set. Otherwise those skipped pages will be sent in
3177 * the next round after syncing from the memory region bitmap.
3179 migration_clear_memory_region_dirty_bitmap_range(block, start, npages);
3180 ram_state->migration_dirty_pages -=
3181 bitmap_count_one_with_offset(block->bmap, start, npages);
3182 bitmap_clear(block->bmap, start, npages);
3183 qemu_mutex_unlock(&ram_state->bitmap_mutex);
3188 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
3189 * long-running RCU critical section. When rcu-reclaims in the code
3190 * start to become numerous it will be necessary to reduce the
3191 * granularity of these critical sections.
3195 * ram_save_setup: Setup RAM for migration
3197 * Returns zero to indicate success and negative for error
3199 * @f: QEMUFile where to send the data
3200 * @opaque: RAMState pointer
3202 static int ram_save_setup(QEMUFile *f, void *opaque)
3204 RAMState **rsp = opaque;
3205 RAMBlock *block;
3206 int ret;
3208 if (compress_threads_save_setup()) {
3209 return -1;
3212 /* migration has already setup the bitmap, reuse it. */
3213 if (!migration_in_colo_state()) {
3214 if (ram_init_all(rsp) != 0) {
3215 compress_threads_save_cleanup();
3216 return -1;
3219 (*rsp)->f = f;
3221 WITH_RCU_READ_LOCK_GUARD() {
3222 qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE);
3224 RAMBLOCK_FOREACH_MIGRATABLE(block) {
3225 qemu_put_byte(f, strlen(block->idstr));
3226 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
3227 qemu_put_be64(f, block->used_length);
3228 if (migrate_postcopy_ram() && block->page_size !=
3229 qemu_host_page_size) {
3230 qemu_put_be64(f, block->page_size);
3232 if (migrate_ignore_shared()) {
3233 qemu_put_be64(f, block->mr->addr);
3238 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
3239 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
3241 ret = multifd_send_sync_main(f);
3242 if (ret < 0) {
3243 return ret;
3246 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3247 qemu_fflush(f);
3249 return 0;
3253 * ram_save_iterate: iterative stage for migration
3255 * Returns zero to indicate success and negative for error
3257 * @f: QEMUFile where to send the data
3258 * @opaque: RAMState pointer
3260 static int ram_save_iterate(QEMUFile *f, void *opaque)
3262 RAMState **temp = opaque;
3263 RAMState *rs = *temp;
3264 int ret = 0;
3265 int i;
3266 int64_t t0;
3267 int done = 0;
3269 if (blk_mig_bulk_active()) {
3270 /* Avoid transferring ram during bulk phase of block migration as
3271 * the bulk phase will usually take a long time and transferring
3272 * ram updates during that time is pointless. */
3273 goto out;
3277 * We'll take this lock a little bit long, but it's okay for two reasons.
3278 * Firstly, the only possible other thread to take it is who calls
3279 * qemu_guest_free_page_hint(), which should be rare; secondly, see
3280 * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which
3281 * guarantees that we'll at least released it in a regular basis.
3283 qemu_mutex_lock(&rs->bitmap_mutex);
3284 WITH_RCU_READ_LOCK_GUARD() {
3285 if (ram_list.version != rs->last_version) {
3286 ram_state_reset(rs);
3289 /* Read version before ram_list.blocks */
3290 smp_rmb();
3292 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
3294 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
3295 i = 0;
3296 while ((ret = qemu_file_rate_limit(f)) == 0 ||
3297 postcopy_has_request(rs)) {
3298 int pages;
3300 if (qemu_file_get_error(f)) {
3301 break;
3304 pages = ram_find_and_save_block(rs);
3305 /* no more pages to sent */
3306 if (pages == 0) {
3307 done = 1;
3308 break;
3311 if (pages < 0) {
3312 qemu_file_set_error(f, pages);
3313 break;
3316 rs->target_page_count += pages;
3319 * During postcopy, it is necessary to make sure one whole host
3320 * page is sent in one chunk.
3322 if (migrate_postcopy_ram()) {
3323 flush_compressed_data(rs);
3327 * we want to check in the 1st loop, just in case it was the 1st
3328 * time and we had to sync the dirty bitmap.
3329 * qemu_clock_get_ns() is a bit expensive, so we only check each
3330 * some iterations
3332 if ((i & 63) == 0) {
3333 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) /
3334 1000000;
3335 if (t1 > MAX_WAIT) {
3336 trace_ram_save_iterate_big_wait(t1, i);
3337 break;
3340 i++;
3343 qemu_mutex_unlock(&rs->bitmap_mutex);
3345 postcopy_preempt_reset_channel(rs);
3348 * Must occur before EOS (or any QEMUFile operation)
3349 * because of RDMA protocol.
3351 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
3353 out:
3354 if (ret >= 0
3355 && migration_is_setup_or_active(migrate_get_current()->state)) {
3356 ret = multifd_send_sync_main(rs->f);
3357 if (ret < 0) {
3358 return ret;
3361 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3362 qemu_fflush(f);
3363 ram_transferred_add(8);
3365 ret = qemu_file_get_error(f);
3367 if (ret < 0) {
3368 return ret;
3371 return done;
3375 * ram_save_complete: function called to send the remaining amount of ram
3377 * Returns zero to indicate success or negative on error
3379 * Called with iothread lock
3381 * @f: QEMUFile where to send the data
3382 * @opaque: RAMState pointer
3384 static int ram_save_complete(QEMUFile *f, void *opaque)
3386 RAMState **temp = opaque;
3387 RAMState *rs = *temp;
3388 int ret = 0;
3390 rs->last_stage = !migration_in_colo_state();
3392 WITH_RCU_READ_LOCK_GUARD() {
3393 if (!migration_in_postcopy()) {
3394 migration_bitmap_sync_precopy(rs);
3397 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
3399 /* try transferring iterative blocks of memory */
3401 /* flush all remaining blocks regardless of rate limiting */
3402 while (true) {
3403 int pages;
3405 pages = ram_find_and_save_block(rs);
3406 /* no more blocks to sent */
3407 if (pages == 0) {
3408 break;
3410 if (pages < 0) {
3411 ret = pages;
3412 break;
3416 flush_compressed_data(rs);
3417 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
3420 if (ret < 0) {
3421 return ret;
3424 postcopy_preempt_reset_channel(rs);
3426 ret = multifd_send_sync_main(rs->f);
3427 if (ret < 0) {
3428 return ret;
3431 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3432 qemu_fflush(f);
3434 return 0;
3437 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
3438 uint64_t *res_precopy_only,
3439 uint64_t *res_compatible,
3440 uint64_t *res_postcopy_only)
3442 RAMState **temp = opaque;
3443 RAMState *rs = *temp;
3444 uint64_t remaining_size;
3446 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3448 if (!migration_in_postcopy() &&
3449 remaining_size < max_size) {
3450 qemu_mutex_lock_iothread();
3451 WITH_RCU_READ_LOCK_GUARD() {
3452 migration_bitmap_sync_precopy(rs);
3454 qemu_mutex_unlock_iothread();
3455 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3458 if (migrate_postcopy_ram()) {
3459 /* We can do postcopy, and all the data is postcopiable */
3460 *res_compatible += remaining_size;
3461 } else {
3462 *res_precopy_only += remaining_size;
3466 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
3468 unsigned int xh_len;
3469 int xh_flags;
3470 uint8_t *loaded_data;
3472 /* extract RLE header */
3473 xh_flags = qemu_get_byte(f);
3474 xh_len = qemu_get_be16(f);
3476 if (xh_flags != ENCODING_FLAG_XBZRLE) {
3477 error_report("Failed to load XBZRLE page - wrong compression!");
3478 return -1;
3481 if (xh_len > TARGET_PAGE_SIZE) {
3482 error_report("Failed to load XBZRLE page - len overflow!");
3483 return -1;
3485 loaded_data = XBZRLE.decoded_buf;
3486 /* load data and decode */
3487 /* it can change loaded_data to point to an internal buffer */
3488 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
3490 /* decode RLE */
3491 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
3492 TARGET_PAGE_SIZE) == -1) {
3493 error_report("Failed to load XBZRLE page - decode error!");
3494 return -1;
3497 return 0;
3501 * ram_block_from_stream: read a RAMBlock id from the migration stream
3503 * Must be called from within a rcu critical section.
3505 * Returns a pointer from within the RCU-protected ram_list.
3507 * @mis: the migration incoming state pointer
3508 * @f: QEMUFile where to read the data from
3509 * @flags: Page flags (mostly to see if it's a continuation of previous block)
3510 * @channel: the channel we're using
3512 static inline RAMBlock *ram_block_from_stream(MigrationIncomingState *mis,
3513 QEMUFile *f, int flags,
3514 int channel)
3516 RAMBlock *block = mis->last_recv_block[channel];
3517 char id[256];
3518 uint8_t len;
3520 if (flags & RAM_SAVE_FLAG_CONTINUE) {
3521 if (!block) {
3522 error_report("Ack, bad migration stream!");
3523 return NULL;
3525 return block;
3528 len = qemu_get_byte(f);
3529 qemu_get_buffer(f, (uint8_t *)id, len);
3530 id[len] = 0;
3532 block = qemu_ram_block_by_name(id);
3533 if (!block) {
3534 error_report("Can't find block %s", id);
3535 return NULL;
3538 if (ramblock_is_ignored(block)) {
3539 error_report("block %s should not be migrated !", id);
3540 return NULL;
3543 mis->last_recv_block[channel] = block;
3545 return block;
3548 static inline void *host_from_ram_block_offset(RAMBlock *block,
3549 ram_addr_t offset)
3551 if (!offset_in_ramblock(block, offset)) {
3552 return NULL;
3555 return block->host + offset;
3558 static void *host_page_from_ram_block_offset(RAMBlock *block,
3559 ram_addr_t offset)
3561 /* Note: Explicitly no check against offset_in_ramblock(). */
3562 return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block->host + offset),
3563 block->page_size);
3566 static ram_addr_t host_page_offset_from_ram_block_offset(RAMBlock *block,
3567 ram_addr_t offset)
3569 return ((uintptr_t)block->host + offset) & (block->page_size - 1);
3572 static inline void *colo_cache_from_block_offset(RAMBlock *block,
3573 ram_addr_t offset, bool record_bitmap)
3575 if (!offset_in_ramblock(block, offset)) {
3576 return NULL;
3578 if (!block->colo_cache) {
3579 error_report("%s: colo_cache is NULL in block :%s",
3580 __func__, block->idstr);
3581 return NULL;
3585 * During colo checkpoint, we need bitmap of these migrated pages.
3586 * It help us to decide which pages in ram cache should be flushed
3587 * into VM's RAM later.
3589 if (record_bitmap &&
3590 !test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) {
3591 ram_state->migration_dirty_pages++;
3593 return block->colo_cache + offset;
3597 * ram_handle_compressed: handle the zero page case
3599 * If a page (or a whole RDMA chunk) has been
3600 * determined to be zero, then zap it.
3602 * @host: host address for the zero page
3603 * @ch: what the page is filled from. We only support zero
3604 * @size: size of the zero page
3606 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
3608 if (ch != 0 || !buffer_is_zero(host, size)) {
3609 memset(host, ch, size);
3613 /* return the size after decompression, or negative value on error */
3614 static int
3615 qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
3616 const uint8_t *source, size_t source_len)
3618 int err;
3620 err = inflateReset(stream);
3621 if (err != Z_OK) {
3622 return -1;
3625 stream->avail_in = source_len;
3626 stream->next_in = (uint8_t *)source;
3627 stream->avail_out = dest_len;
3628 stream->next_out = dest;
3630 err = inflate(stream, Z_NO_FLUSH);
3631 if (err != Z_STREAM_END) {
3632 return -1;
3635 return stream->total_out;
3638 static void *do_data_decompress(void *opaque)
3640 DecompressParam *param = opaque;
3641 unsigned long pagesize;
3642 uint8_t *des;
3643 int len, ret;
3645 qemu_mutex_lock(&param->mutex);
3646 while (!param->quit) {
3647 if (param->des) {
3648 des = param->des;
3649 len = param->len;
3650 param->des = 0;
3651 qemu_mutex_unlock(&param->mutex);
3653 pagesize = TARGET_PAGE_SIZE;
3655 ret = qemu_uncompress_data(&param->stream, des, pagesize,
3656 param->compbuf, len);
3657 if (ret < 0 && migrate_get_current()->decompress_error_check) {
3658 error_report("decompress data failed");
3659 qemu_file_set_error(decomp_file, ret);
3662 qemu_mutex_lock(&decomp_done_lock);
3663 param->done = true;
3664 qemu_cond_signal(&decomp_done_cond);
3665 qemu_mutex_unlock(&decomp_done_lock);
3667 qemu_mutex_lock(&param->mutex);
3668 } else {
3669 qemu_cond_wait(&param->cond, &param->mutex);
3672 qemu_mutex_unlock(&param->mutex);
3674 return NULL;
3677 static int wait_for_decompress_done(void)
3679 int idx, thread_count;
3681 if (!migrate_use_compression()) {
3682 return 0;
3685 thread_count = migrate_decompress_threads();
3686 qemu_mutex_lock(&decomp_done_lock);
3687 for (idx = 0; idx < thread_count; idx++) {
3688 while (!decomp_param[idx].done) {
3689 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3692 qemu_mutex_unlock(&decomp_done_lock);
3693 return qemu_file_get_error(decomp_file);
3696 static void compress_threads_load_cleanup(void)
3698 int i, thread_count;
3700 if (!migrate_use_compression()) {
3701 return;
3703 thread_count = migrate_decompress_threads();
3704 for (i = 0; i < thread_count; i++) {
3706 * we use it as a indicator which shows if the thread is
3707 * properly init'd or not
3709 if (!decomp_param[i].compbuf) {
3710 break;
3713 qemu_mutex_lock(&decomp_param[i].mutex);
3714 decomp_param[i].quit = true;
3715 qemu_cond_signal(&decomp_param[i].cond);
3716 qemu_mutex_unlock(&decomp_param[i].mutex);
3718 for (i = 0; i < thread_count; i++) {
3719 if (!decomp_param[i].compbuf) {
3720 break;
3723 qemu_thread_join(decompress_threads + i);
3724 qemu_mutex_destroy(&decomp_param[i].mutex);
3725 qemu_cond_destroy(&decomp_param[i].cond);
3726 inflateEnd(&decomp_param[i].stream);
3727 g_free(decomp_param[i].compbuf);
3728 decomp_param[i].compbuf = NULL;
3730 g_free(decompress_threads);
3731 g_free(decomp_param);
3732 decompress_threads = NULL;
3733 decomp_param = NULL;
3734 decomp_file = NULL;
3737 static int compress_threads_load_setup(QEMUFile *f)
3739 int i, thread_count;
3741 if (!migrate_use_compression()) {
3742 return 0;
3745 thread_count = migrate_decompress_threads();
3746 decompress_threads = g_new0(QemuThread, thread_count);
3747 decomp_param = g_new0(DecompressParam, thread_count);
3748 qemu_mutex_init(&decomp_done_lock);
3749 qemu_cond_init(&decomp_done_cond);
3750 decomp_file = f;
3751 for (i = 0; i < thread_count; i++) {
3752 if (inflateInit(&decomp_param[i].stream) != Z_OK) {
3753 goto exit;
3756 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
3757 qemu_mutex_init(&decomp_param[i].mutex);
3758 qemu_cond_init(&decomp_param[i].cond);
3759 decomp_param[i].done = true;
3760 decomp_param[i].quit = false;
3761 qemu_thread_create(decompress_threads + i, "decompress",
3762 do_data_decompress, decomp_param + i,
3763 QEMU_THREAD_JOINABLE);
3765 return 0;
3766 exit:
3767 compress_threads_load_cleanup();
3768 return -1;
3771 static void decompress_data_with_multi_threads(QEMUFile *f,
3772 void *host, int len)
3774 int idx, thread_count;
3776 thread_count = migrate_decompress_threads();
3777 QEMU_LOCK_GUARD(&decomp_done_lock);
3778 while (true) {
3779 for (idx = 0; idx < thread_count; idx++) {
3780 if (decomp_param[idx].done) {
3781 decomp_param[idx].done = false;
3782 qemu_mutex_lock(&decomp_param[idx].mutex);
3783 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
3784 decomp_param[idx].des = host;
3785 decomp_param[idx].len = len;
3786 qemu_cond_signal(&decomp_param[idx].cond);
3787 qemu_mutex_unlock(&decomp_param[idx].mutex);
3788 break;
3791 if (idx < thread_count) {
3792 break;
3793 } else {
3794 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3799 static void colo_init_ram_state(void)
3801 ram_state_init(&ram_state);
3805 * colo cache: this is for secondary VM, we cache the whole
3806 * memory of the secondary VM, it is need to hold the global lock
3807 * to call this helper.
3809 int colo_init_ram_cache(void)
3811 RAMBlock *block;
3813 WITH_RCU_READ_LOCK_GUARD() {
3814 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3815 block->colo_cache = qemu_anon_ram_alloc(block->used_length,
3816 NULL, false, false);
3817 if (!block->colo_cache) {
3818 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3819 "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
3820 block->used_length);
3821 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3822 if (block->colo_cache) {
3823 qemu_anon_ram_free(block->colo_cache, block->used_length);
3824 block->colo_cache = NULL;
3827 return -errno;
3829 if (!machine_dump_guest_core(current_machine)) {
3830 qemu_madvise(block->colo_cache, block->used_length,
3831 QEMU_MADV_DONTDUMP);
3837 * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3838 * with to decide which page in cache should be flushed into SVM's RAM. Here
3839 * we use the same name 'ram_bitmap' as for migration.
3841 if (ram_bytes_total()) {
3842 RAMBlock *block;
3844 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3845 unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
3846 block->bmap = bitmap_new(pages);
3850 colo_init_ram_state();
3851 return 0;
3854 /* TODO: duplicated with ram_init_bitmaps */
3855 void colo_incoming_start_dirty_log(void)
3857 RAMBlock *block = NULL;
3858 /* For memory_global_dirty_log_start below. */
3859 qemu_mutex_lock_iothread();
3860 qemu_mutex_lock_ramlist();
3862 memory_global_dirty_log_sync();
3863 WITH_RCU_READ_LOCK_GUARD() {
3864 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3865 ramblock_sync_dirty_bitmap(ram_state, block);
3866 /* Discard this dirty bitmap record */
3867 bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS);
3869 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION);
3871 ram_state->migration_dirty_pages = 0;
3872 qemu_mutex_unlock_ramlist();
3873 qemu_mutex_unlock_iothread();
3876 /* It is need to hold the global lock to call this helper */
3877 void colo_release_ram_cache(void)
3879 RAMBlock *block;
3881 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
3882 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3883 g_free(block->bmap);
3884 block->bmap = NULL;
3887 WITH_RCU_READ_LOCK_GUARD() {
3888 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3889 if (block->colo_cache) {
3890 qemu_anon_ram_free(block->colo_cache, block->used_length);
3891 block->colo_cache = NULL;
3895 ram_state_cleanup(&ram_state);
3899 * ram_load_setup: Setup RAM for migration incoming side
3901 * Returns zero to indicate success and negative for error
3903 * @f: QEMUFile where to receive the data
3904 * @opaque: RAMState pointer
3906 static int ram_load_setup(QEMUFile *f, void *opaque)
3908 if (compress_threads_load_setup(f)) {
3909 return -1;
3912 xbzrle_load_setup();
3913 ramblock_recv_map_init();
3915 return 0;
3918 static int ram_load_cleanup(void *opaque)
3920 RAMBlock *rb;
3922 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
3923 qemu_ram_block_writeback(rb);
3926 xbzrle_load_cleanup();
3927 compress_threads_load_cleanup();
3929 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
3930 g_free(rb->receivedmap);
3931 rb->receivedmap = NULL;
3934 return 0;
3938 * ram_postcopy_incoming_init: allocate postcopy data structures
3940 * Returns 0 for success and negative if there was one error
3942 * @mis: current migration incoming state
3944 * Allocate data structures etc needed by incoming migration with
3945 * postcopy-ram. postcopy-ram's similarly names
3946 * postcopy_ram_incoming_init does the work.
3948 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
3950 return postcopy_ram_incoming_init(mis);
3954 * ram_load_postcopy: load a page in postcopy case
3956 * Returns 0 for success or -errno in case of error
3958 * Called in postcopy mode by ram_load().
3959 * rcu_read_lock is taken prior to this being called.
3961 * @f: QEMUFile where to send the data
3962 * @channel: the channel to use for loading
3964 int ram_load_postcopy(QEMUFile *f, int channel)
3966 int flags = 0, ret = 0;
3967 bool place_needed = false;
3968 bool matches_target_page_size = false;
3969 MigrationIncomingState *mis = migration_incoming_get_current();
3970 PostcopyTmpPage *tmp_page = &mis->postcopy_tmp_pages[channel];
3972 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
3973 ram_addr_t addr;
3974 void *page_buffer = NULL;
3975 void *place_source = NULL;
3976 RAMBlock *block = NULL;
3977 uint8_t ch;
3978 int len;
3980 addr = qemu_get_be64(f);
3983 * If qemu file error, we should stop here, and then "addr"
3984 * may be invalid
3986 ret = qemu_file_get_error(f);
3987 if (ret) {
3988 break;
3991 flags = addr & ~TARGET_PAGE_MASK;
3992 addr &= TARGET_PAGE_MASK;
3994 trace_ram_load_postcopy_loop(channel, (uint64_t)addr, flags);
3995 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
3996 RAM_SAVE_FLAG_COMPRESS_PAGE)) {
3997 block = ram_block_from_stream(mis, f, flags, channel);
3998 if (!block) {
3999 ret = -EINVAL;
4000 break;
4004 * Relying on used_length is racy and can result in false positives.
4005 * We might place pages beyond used_length in case RAM was shrunk
4006 * while in postcopy, which is fine - trying to place via
4007 * UFFDIO_COPY/UFFDIO_ZEROPAGE will never segfault.
4009 if (!block->host || addr >= block->postcopy_length) {
4010 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4011 ret = -EINVAL;
4012 break;
4014 tmp_page->target_pages++;
4015 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
4017 * Postcopy requires that we place whole host pages atomically;
4018 * these may be huge pages for RAMBlocks that are backed by
4019 * hugetlbfs.
4020 * To make it atomic, the data is read into a temporary page
4021 * that's moved into place later.
4022 * The migration protocol uses, possibly smaller, target-pages
4023 * however the source ensures it always sends all the components
4024 * of a host page in one chunk.
4026 page_buffer = tmp_page->tmp_huge_page +
4027 host_page_offset_from_ram_block_offset(block, addr);
4028 /* If all TP are zero then we can optimise the place */
4029 if (tmp_page->target_pages == 1) {
4030 tmp_page->host_addr =
4031 host_page_from_ram_block_offset(block, addr);
4032 } else if (tmp_page->host_addr !=
4033 host_page_from_ram_block_offset(block, addr)) {
4034 /* not the 1st TP within the HP */
4035 error_report("Non-same host page detected on channel %d: "
4036 "Target host page %p, received host page %p "
4037 "(rb %s offset 0x"RAM_ADDR_FMT" target_pages %d)",
4038 channel, tmp_page->host_addr,
4039 host_page_from_ram_block_offset(block, addr),
4040 block->idstr, addr, tmp_page->target_pages);
4041 ret = -EINVAL;
4042 break;
4046 * If it's the last part of a host page then we place the host
4047 * page
4049 if (tmp_page->target_pages ==
4050 (block->page_size / TARGET_PAGE_SIZE)) {
4051 place_needed = true;
4053 place_source = tmp_page->tmp_huge_page;
4056 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
4057 case RAM_SAVE_FLAG_ZERO:
4058 ch = qemu_get_byte(f);
4060 * Can skip to set page_buffer when
4061 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE).
4063 if (ch || !matches_target_page_size) {
4064 memset(page_buffer, ch, TARGET_PAGE_SIZE);
4066 if (ch) {
4067 tmp_page->all_zero = false;
4069 break;
4071 case RAM_SAVE_FLAG_PAGE:
4072 tmp_page->all_zero = false;
4073 if (!matches_target_page_size) {
4074 /* For huge pages, we always use temporary buffer */
4075 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
4076 } else {
4078 * For small pages that matches target page size, we
4079 * avoid the qemu_file copy. Instead we directly use
4080 * the buffer of QEMUFile to place the page. Note: we
4081 * cannot do any QEMUFile operation before using that
4082 * buffer to make sure the buffer is valid when
4083 * placing the page.
4085 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
4086 TARGET_PAGE_SIZE);
4088 break;
4089 case RAM_SAVE_FLAG_COMPRESS_PAGE:
4090 tmp_page->all_zero = false;
4091 len = qemu_get_be32(f);
4092 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
4093 error_report("Invalid compressed data length: %d", len);
4094 ret = -EINVAL;
4095 break;
4097 decompress_data_with_multi_threads(f, page_buffer, len);
4098 break;
4100 case RAM_SAVE_FLAG_EOS:
4101 /* normal exit */
4102 multifd_recv_sync_main();
4103 break;
4104 default:
4105 error_report("Unknown combination of migration flags: 0x%x"
4106 " (postcopy mode)", flags);
4107 ret = -EINVAL;
4108 break;
4111 /* Got the whole host page, wait for decompress before placing. */
4112 if (place_needed) {
4113 ret |= wait_for_decompress_done();
4116 /* Detect for any possible file errors */
4117 if (!ret && qemu_file_get_error(f)) {
4118 ret = qemu_file_get_error(f);
4121 if (!ret && place_needed) {
4122 if (tmp_page->all_zero) {
4123 ret = postcopy_place_page_zero(mis, tmp_page->host_addr, block);
4124 } else {
4125 ret = postcopy_place_page(mis, tmp_page->host_addr,
4126 place_source, block);
4128 place_needed = false;
4129 postcopy_temp_page_reset(tmp_page);
4133 return ret;
4136 static bool postcopy_is_advised(void)
4138 PostcopyState ps = postcopy_state_get();
4139 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
4142 static bool postcopy_is_running(void)
4144 PostcopyState ps = postcopy_state_get();
4145 return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
4149 * Flush content of RAM cache into SVM's memory.
4150 * Only flush the pages that be dirtied by PVM or SVM or both.
4152 void colo_flush_ram_cache(void)
4154 RAMBlock *block = NULL;
4155 void *dst_host;
4156 void *src_host;
4157 unsigned long offset = 0;
4159 memory_global_dirty_log_sync();
4160 WITH_RCU_READ_LOCK_GUARD() {
4161 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4162 ramblock_sync_dirty_bitmap(ram_state, block);
4166 trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
4167 WITH_RCU_READ_LOCK_GUARD() {
4168 block = QLIST_FIRST_RCU(&ram_list.blocks);
4170 while (block) {
4171 unsigned long num = 0;
4173 offset = colo_bitmap_find_dirty(ram_state, block, offset, &num);
4174 if (!offset_in_ramblock(block,
4175 ((ram_addr_t)offset) << TARGET_PAGE_BITS)) {
4176 offset = 0;
4177 num = 0;
4178 block = QLIST_NEXT_RCU(block, next);
4179 } else {
4180 unsigned long i = 0;
4182 for (i = 0; i < num; i++) {
4183 migration_bitmap_clear_dirty(ram_state, block, offset + i);
4185 dst_host = block->host
4186 + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
4187 src_host = block->colo_cache
4188 + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
4189 memcpy(dst_host, src_host, TARGET_PAGE_SIZE * num);
4190 offset += num;
4194 trace_colo_flush_ram_cache_end();
4198 * ram_load_precopy: load pages in precopy case
4200 * Returns 0 for success or -errno in case of error
4202 * Called in precopy mode by ram_load().
4203 * rcu_read_lock is taken prior to this being called.
4205 * @f: QEMUFile where to send the data
4207 static int ram_load_precopy(QEMUFile *f)
4209 MigrationIncomingState *mis = migration_incoming_get_current();
4210 int flags = 0, ret = 0, invalid_flags = 0, len = 0, i = 0;
4211 /* ADVISE is earlier, it shows the source has the postcopy capability on */
4212 bool postcopy_advised = postcopy_is_advised();
4213 if (!migrate_use_compression()) {
4214 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
4217 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
4218 ram_addr_t addr, total_ram_bytes;
4219 void *host = NULL, *host_bak = NULL;
4220 uint8_t ch;
4223 * Yield periodically to let main loop run, but an iteration of
4224 * the main loop is expensive, so do it each some iterations
4226 if ((i & 32767) == 0 && qemu_in_coroutine()) {
4227 aio_co_schedule(qemu_get_current_aio_context(),
4228 qemu_coroutine_self());
4229 qemu_coroutine_yield();
4231 i++;
4233 addr = qemu_get_be64(f);
4234 flags = addr & ~TARGET_PAGE_MASK;
4235 addr &= TARGET_PAGE_MASK;
4237 if (flags & invalid_flags) {
4238 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
4239 error_report("Received an unexpected compressed page");
4242 ret = -EINVAL;
4243 break;
4246 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
4247 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
4248 RAMBlock *block = ram_block_from_stream(mis, f, flags,
4249 RAM_CHANNEL_PRECOPY);
4251 host = host_from_ram_block_offset(block, addr);
4253 * After going into COLO stage, we should not load the page
4254 * into SVM's memory directly, we put them into colo_cache firstly.
4255 * NOTE: We need to keep a copy of SVM's ram in colo_cache.
4256 * Previously, we copied all these memory in preparing stage of COLO
4257 * while we need to stop VM, which is a time-consuming process.
4258 * Here we optimize it by a trick, back-up every page while in
4259 * migration process while COLO is enabled, though it affects the
4260 * speed of the migration, but it obviously reduce the downtime of
4261 * back-up all SVM'S memory in COLO preparing stage.
4263 if (migration_incoming_colo_enabled()) {
4264 if (migration_incoming_in_colo_state()) {
4265 /* In COLO stage, put all pages into cache temporarily */
4266 host = colo_cache_from_block_offset(block, addr, true);
4267 } else {
4269 * In migration stage but before COLO stage,
4270 * Put all pages into both cache and SVM's memory.
4272 host_bak = colo_cache_from_block_offset(block, addr, false);
4275 if (!host) {
4276 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4277 ret = -EINVAL;
4278 break;
4280 if (!migration_incoming_in_colo_state()) {
4281 ramblock_recv_bitmap_set(block, host);
4284 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
4287 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
4288 case RAM_SAVE_FLAG_MEM_SIZE:
4289 /* Synchronize RAM block list */
4290 total_ram_bytes = addr;
4291 while (!ret && total_ram_bytes) {
4292 RAMBlock *block;
4293 char id[256];
4294 ram_addr_t length;
4296 len = qemu_get_byte(f);
4297 qemu_get_buffer(f, (uint8_t *)id, len);
4298 id[len] = 0;
4299 length = qemu_get_be64(f);
4301 block = qemu_ram_block_by_name(id);
4302 if (block && !qemu_ram_is_migratable(block)) {
4303 error_report("block %s should not be migrated !", id);
4304 ret = -EINVAL;
4305 } else if (block) {
4306 if (length != block->used_length) {
4307 Error *local_err = NULL;
4309 ret = qemu_ram_resize(block, length,
4310 &local_err);
4311 if (local_err) {
4312 error_report_err(local_err);
4315 /* For postcopy we need to check hugepage sizes match */
4316 if (postcopy_advised && migrate_postcopy_ram() &&
4317 block->page_size != qemu_host_page_size) {
4318 uint64_t remote_page_size = qemu_get_be64(f);
4319 if (remote_page_size != block->page_size) {
4320 error_report("Mismatched RAM page size %s "
4321 "(local) %zd != %" PRId64,
4322 id, block->page_size,
4323 remote_page_size);
4324 ret = -EINVAL;
4327 if (migrate_ignore_shared()) {
4328 hwaddr addr = qemu_get_be64(f);
4329 if (ramblock_is_ignored(block) &&
4330 block->mr->addr != addr) {
4331 error_report("Mismatched GPAs for block %s "
4332 "%" PRId64 "!= %" PRId64,
4333 id, (uint64_t)addr,
4334 (uint64_t)block->mr->addr);
4335 ret = -EINVAL;
4338 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
4339 block->idstr);
4340 } else {
4341 error_report("Unknown ramblock \"%s\", cannot "
4342 "accept migration", id);
4343 ret = -EINVAL;
4346 total_ram_bytes -= length;
4348 break;
4350 case RAM_SAVE_FLAG_ZERO:
4351 ch = qemu_get_byte(f);
4352 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
4353 break;
4355 case RAM_SAVE_FLAG_PAGE:
4356 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
4357 break;
4359 case RAM_SAVE_FLAG_COMPRESS_PAGE:
4360 len = qemu_get_be32(f);
4361 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
4362 error_report("Invalid compressed data length: %d", len);
4363 ret = -EINVAL;
4364 break;
4366 decompress_data_with_multi_threads(f, host, len);
4367 break;
4369 case RAM_SAVE_FLAG_XBZRLE:
4370 if (load_xbzrle(f, addr, host) < 0) {
4371 error_report("Failed to decompress XBZRLE page at "
4372 RAM_ADDR_FMT, addr);
4373 ret = -EINVAL;
4374 break;
4376 break;
4377 case RAM_SAVE_FLAG_EOS:
4378 /* normal exit */
4379 multifd_recv_sync_main();
4380 break;
4381 default:
4382 if (flags & RAM_SAVE_FLAG_HOOK) {
4383 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
4384 } else {
4385 error_report("Unknown combination of migration flags: 0x%x",
4386 flags);
4387 ret = -EINVAL;
4390 if (!ret) {
4391 ret = qemu_file_get_error(f);
4393 if (!ret && host_bak) {
4394 memcpy(host_bak, host, TARGET_PAGE_SIZE);
4398 ret |= wait_for_decompress_done();
4399 return ret;
4402 static int ram_load(QEMUFile *f, void *opaque, int version_id)
4404 int ret = 0;
4405 static uint64_t seq_iter;
4407 * If system is running in postcopy mode, page inserts to host memory must
4408 * be atomic
4410 bool postcopy_running = postcopy_is_running();
4412 seq_iter++;
4414 if (version_id != 4) {
4415 return -EINVAL;
4419 * This RCU critical section can be very long running.
4420 * When RCU reclaims in the code start to become numerous,
4421 * it will be necessary to reduce the granularity of this
4422 * critical section.
4424 WITH_RCU_READ_LOCK_GUARD() {
4425 if (postcopy_running) {
4427 * Note! Here RAM_CHANNEL_PRECOPY is the precopy channel of
4428 * postcopy migration, we have another RAM_CHANNEL_POSTCOPY to
4429 * service fast page faults.
4431 ret = ram_load_postcopy(f, RAM_CHANNEL_PRECOPY);
4432 } else {
4433 ret = ram_load_precopy(f);
4436 trace_ram_load_complete(ret, seq_iter);
4438 return ret;
4441 static bool ram_has_postcopy(void *opaque)
4443 RAMBlock *rb;
4444 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
4445 if (ramblock_is_pmem(rb)) {
4446 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4447 "is not supported now!", rb->idstr, rb->host);
4448 return false;
4452 return migrate_postcopy_ram();
4455 /* Sync all the dirty bitmap with destination VM. */
4456 static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
4458 RAMBlock *block;
4459 QEMUFile *file = s->to_dst_file;
4460 int ramblock_count = 0;
4462 trace_ram_dirty_bitmap_sync_start();
4464 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4465 qemu_savevm_send_recv_bitmap(file, block->idstr);
4466 trace_ram_dirty_bitmap_request(block->idstr);
4467 ramblock_count++;
4470 trace_ram_dirty_bitmap_sync_wait();
4472 /* Wait until all the ramblocks' dirty bitmap synced */
4473 while (ramblock_count--) {
4474 qemu_sem_wait(&s->rp_state.rp_sem);
4477 trace_ram_dirty_bitmap_sync_complete();
4479 return 0;
4482 static void ram_dirty_bitmap_reload_notify(MigrationState *s)
4484 qemu_sem_post(&s->rp_state.rp_sem);
4488 * Read the received bitmap, revert it as the initial dirty bitmap.
4489 * This is only used when the postcopy migration is paused but wants
4490 * to resume from a middle point.
4492 int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
4494 int ret = -EINVAL;
4495 /* from_dst_file is always valid because we're within rp_thread */
4496 QEMUFile *file = s->rp_state.from_dst_file;
4497 unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
4498 uint64_t local_size = DIV_ROUND_UP(nbits, 8);
4499 uint64_t size, end_mark;
4501 trace_ram_dirty_bitmap_reload_begin(block->idstr);
4503 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
4504 error_report("%s: incorrect state %s", __func__,
4505 MigrationStatus_str(s->state));
4506 return -EINVAL;
4510 * Note: see comments in ramblock_recv_bitmap_send() on why we
4511 * need the endianness conversion, and the paddings.
4513 local_size = ROUND_UP(local_size, 8);
4515 /* Add paddings */
4516 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
4518 size = qemu_get_be64(file);
4520 /* The size of the bitmap should match with our ramblock */
4521 if (size != local_size) {
4522 error_report("%s: ramblock '%s' bitmap size mismatch "
4523 "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
4524 block->idstr, size, local_size);
4525 ret = -EINVAL;
4526 goto out;
4529 size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
4530 end_mark = qemu_get_be64(file);
4532 ret = qemu_file_get_error(file);
4533 if (ret || size != local_size) {
4534 error_report("%s: read bitmap failed for ramblock '%s': %d"
4535 " (size 0x%"PRIx64", got: 0x%"PRIx64")",
4536 __func__, block->idstr, ret, local_size, size);
4537 ret = -EIO;
4538 goto out;
4541 if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
4542 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIx64,
4543 __func__, block->idstr, end_mark);
4544 ret = -EINVAL;
4545 goto out;
4549 * Endianness conversion. We are during postcopy (though paused).
4550 * The dirty bitmap won't change. We can directly modify it.
4552 bitmap_from_le(block->bmap, le_bitmap, nbits);
4555 * What we received is "received bitmap". Revert it as the initial
4556 * dirty bitmap for this ramblock.
4558 bitmap_complement(block->bmap, block->bmap, nbits);
4560 /* Clear dirty bits of discarded ranges that we don't want to migrate. */
4561 ramblock_dirty_bitmap_clear_discarded_pages(block);
4563 /* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */
4564 trace_ram_dirty_bitmap_reload_complete(block->idstr);
4567 * We succeeded to sync bitmap for current ramblock. If this is
4568 * the last one to sync, we need to notify the main send thread.
4570 ram_dirty_bitmap_reload_notify(s);
4572 ret = 0;
4573 out:
4574 g_free(le_bitmap);
4575 return ret;
4578 static int ram_resume_prepare(MigrationState *s, void *opaque)
4580 RAMState *rs = *(RAMState **)opaque;
4581 int ret;
4583 ret = ram_dirty_bitmap_sync_all(s, rs);
4584 if (ret) {
4585 return ret;
4588 ram_state_resume_prepare(rs, s->to_dst_file);
4590 return 0;
4593 void postcopy_preempt_shutdown_file(MigrationState *s)
4595 qemu_put_be64(s->postcopy_qemufile_src, RAM_SAVE_FLAG_EOS);
4596 qemu_fflush(s->postcopy_qemufile_src);
4599 static SaveVMHandlers savevm_ram_handlers = {
4600 .save_setup = ram_save_setup,
4601 .save_live_iterate = ram_save_iterate,
4602 .save_live_complete_postcopy = ram_save_complete,
4603 .save_live_complete_precopy = ram_save_complete,
4604 .has_postcopy = ram_has_postcopy,
4605 .save_live_pending = ram_save_pending,
4606 .load_state = ram_load,
4607 .save_cleanup = ram_save_cleanup,
4608 .load_setup = ram_load_setup,
4609 .load_cleanup = ram_load_cleanup,
4610 .resume_prepare = ram_resume_prepare,
4613 static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host,
4614 size_t old_size, size_t new_size)
4616 PostcopyState ps = postcopy_state_get();
4617 ram_addr_t offset;
4618 RAMBlock *rb = qemu_ram_block_from_host(host, false, &offset);
4619 Error *err = NULL;
4621 if (ramblock_is_ignored(rb)) {
4622 return;
4625 if (!migration_is_idle()) {
4627 * Precopy code on the source cannot deal with the size of RAM blocks
4628 * changing at random points in time - especially after sending the
4629 * RAM block sizes in the migration stream, they must no longer change.
4630 * Abort and indicate a proper reason.
4632 error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr);
4633 migration_cancel(err);
4634 error_free(err);
4637 switch (ps) {
4638 case POSTCOPY_INCOMING_ADVISE:
4640 * Update what ram_postcopy_incoming_init()->init_range() does at the
4641 * time postcopy was advised. Syncing RAM blocks with the source will
4642 * result in RAM resizes.
4644 if (old_size < new_size) {
4645 if (ram_discard_range(rb->idstr, old_size, new_size - old_size)) {
4646 error_report("RAM block '%s' discard of resized RAM failed",
4647 rb->idstr);
4650 rb->postcopy_length = new_size;
4651 break;
4652 case POSTCOPY_INCOMING_NONE:
4653 case POSTCOPY_INCOMING_RUNNING:
4654 case POSTCOPY_INCOMING_END:
4656 * Once our guest is running, postcopy does no longer care about
4657 * resizes. When growing, the new memory was not available on the
4658 * source, no handler needed.
4660 break;
4661 default:
4662 error_report("RAM block '%s' resized during postcopy state: %d",
4663 rb->idstr, ps);
4664 exit(-1);
4668 static RAMBlockNotifier ram_mig_ram_notifier = {
4669 .ram_block_resized = ram_mig_ram_block_resized,
4672 void ram_mig_init(void)
4674 qemu_mutex_init(&XBZRLE.lock);
4675 register_savevm_live("ram", 0, 4, &savevm_ram_handlers, &ram_state);
4676 ram_block_notifier_add(&ram_mig_ram_notifier);