4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
8 * Juan Quintela <quintela@redhat.com>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 #include "qemu/osdep.h"
30 #include "qemu/cutils.h"
31 #include "qemu/bitops.h"
32 #include "qemu/bitmap.h"
33 #include "qemu/madvise.h"
34 #include "qemu/main-loop.h"
36 #include "ram-compress.h"
38 #include "migration.h"
39 #include "migration-stats.h"
40 #include "migration/register.h"
41 #include "migration/misc.h"
42 #include "qemu-file.h"
43 #include "postcopy-ram.h"
44 #include "page_cache.h"
45 #include "qemu/error-report.h"
46 #include "qapi/error.h"
47 #include "qapi/qapi-types-migration.h"
48 #include "qapi/qapi-events-migration.h"
49 #include "qapi/qapi-commands-migration.h"
50 #include "qapi/qmp/qerror.h"
52 #include "exec/ram_addr.h"
53 #include "exec/target_page.h"
54 #include "qemu/rcu_queue.h"
55 #include "migration/colo.h"
57 #include "sysemu/cpu-throttle.h"
61 #include "sysemu/runstate.h"
64 #include "sysemu/dirtylimit.h"
65 #include "sysemu/kvm.h"
67 #include "hw/boards.h" /* for machine_dump_guest_core() */
69 #if defined(__linux__)
70 #include "qemu/userfaultfd.h"
71 #endif /* defined(__linux__) */
73 /***********************************************************/
74 /* ram save/restore */
77 * RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
78 * worked for pages that were filled with the same char. We switched
79 * it to only search for the zero value. And to avoid confusion with
80 * RAM_SAVE_FLAG_COMPRESS_PAGE just rename it.
83 * RAM_SAVE_FLAG_FULL was obsoleted in 2009, it can be reused now
85 #define RAM_SAVE_FLAG_FULL 0x01
86 #define RAM_SAVE_FLAG_ZERO 0x02
87 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
88 #define RAM_SAVE_FLAG_PAGE 0x08
89 #define RAM_SAVE_FLAG_EOS 0x10
90 #define RAM_SAVE_FLAG_CONTINUE 0x20
91 #define RAM_SAVE_FLAG_XBZRLE 0x40
92 /* 0x80 is reserved in rdma.h for RAM_SAVE_FLAG_HOOK */
93 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
94 #define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200
95 /* We can't use any flag that is bigger than 0x200 */
97 XBZRLECacheStats xbzrle_counters
;
99 /* used by the search for pages to send */
100 struct PageSearchStatus
{
101 /* The migration channel used for a specific host page */
102 QEMUFile
*pss_channel
;
103 /* Last block from where we have sent data */
104 RAMBlock
*last_sent_block
;
105 /* Current block being searched */
107 /* Current page to search from */
109 /* Set once we wrap around */
111 /* Whether we're sending a host page */
112 bool host_page_sending
;
113 /* The start/end of current host page. Invalid if host_page_sending==false */
114 unsigned long host_page_start
;
115 unsigned long host_page_end
;
117 typedef struct PageSearchStatus PageSearchStatus
;
119 /* struct contains XBZRLE cache and a static page
120 used by the compression */
122 /* buffer used for XBZRLE encoding */
123 uint8_t *encoded_buf
;
124 /* buffer for storing page content */
125 uint8_t *current_buf
;
126 /* Cache for XBZRLE, Protected by lock. */
129 /* it will store a page full of zeros */
130 uint8_t *zero_target_page
;
131 /* buffer used for XBZRLE decoding */
132 uint8_t *decoded_buf
;
135 static void XBZRLE_cache_lock(void)
137 if (migrate_xbzrle()) {
138 qemu_mutex_lock(&XBZRLE
.lock
);
142 static void XBZRLE_cache_unlock(void)
144 if (migrate_xbzrle()) {
145 qemu_mutex_unlock(&XBZRLE
.lock
);
150 * xbzrle_cache_resize: resize the xbzrle cache
152 * This function is called from migrate_params_apply in main
153 * thread, possibly while a migration is in progress. A running
154 * migration may be using the cache and might finish during this call,
155 * hence changes to the cache are protected by XBZRLE.lock().
157 * Returns 0 for success or -1 for error
159 * @new_size: new cache size
160 * @errp: set *errp if the check failed, with reason
162 int xbzrle_cache_resize(uint64_t new_size
, Error
**errp
)
164 PageCache
*new_cache
;
167 /* Check for truncation */
168 if (new_size
!= (size_t)new_size
) {
169 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, "cache size",
170 "exceeding address space");
174 if (new_size
== migrate_xbzrle_cache_size()) {
181 if (XBZRLE
.cache
!= NULL
) {
182 new_cache
= cache_init(new_size
, TARGET_PAGE_SIZE
, errp
);
188 cache_fini(XBZRLE
.cache
);
189 XBZRLE
.cache
= new_cache
;
192 XBZRLE_cache_unlock();
196 static bool postcopy_preempt_active(void)
198 return migrate_postcopy_preempt() && migration_in_postcopy();
201 bool migrate_ram_is_ignored(RAMBlock
*block
)
203 return !qemu_ram_is_migratable(block
) ||
204 (migrate_ignore_shared() && qemu_ram_is_shared(block
)
205 && qemu_ram_is_named_file(block
));
208 #undef RAMBLOCK_FOREACH
210 int foreach_not_ignored_block(RAMBlockIterFunc func
, void *opaque
)
215 RCU_READ_LOCK_GUARD();
217 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
218 ret
= func(block
, opaque
);
226 static void ramblock_recv_map_init(void)
230 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
231 assert(!rb
->receivedmap
);
232 rb
->receivedmap
= bitmap_new(rb
->max_length
>> qemu_target_page_bits());
236 int ramblock_recv_bitmap_test(RAMBlock
*rb
, void *host_addr
)
238 return test_bit(ramblock_recv_bitmap_offset(host_addr
, rb
),
242 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock
*rb
, uint64_t byte_offset
)
244 return test_bit(byte_offset
>> TARGET_PAGE_BITS
, rb
->receivedmap
);
247 void ramblock_recv_bitmap_set(RAMBlock
*rb
, void *host_addr
)
249 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr
, rb
), rb
->receivedmap
);
252 void ramblock_recv_bitmap_set_range(RAMBlock
*rb
, void *host_addr
,
255 bitmap_set_atomic(rb
->receivedmap
,
256 ramblock_recv_bitmap_offset(host_addr
, rb
),
260 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
263 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
265 * Returns >0 if success with sent bytes, or <0 if error.
267 int64_t ramblock_recv_bitmap_send(QEMUFile
*file
,
268 const char *block_name
)
270 RAMBlock
*block
= qemu_ram_block_by_name(block_name
);
271 unsigned long *le_bitmap
, nbits
;
275 error_report("%s: invalid block name: %s", __func__
, block_name
);
279 nbits
= block
->postcopy_length
>> TARGET_PAGE_BITS
;
282 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
283 * machines we may need 4 more bytes for padding (see below
284 * comment). So extend it a bit before hand.
286 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
289 * Always use little endian when sending the bitmap. This is
290 * required that when source and destination VMs are not using the
291 * same endianness. (Note: big endian won't work.)
293 bitmap_to_le(le_bitmap
, block
->receivedmap
, nbits
);
295 /* Size of the bitmap, in bytes */
296 size
= DIV_ROUND_UP(nbits
, 8);
299 * size is always aligned to 8 bytes for 64bit machines, but it
300 * may not be true for 32bit machines. We need this padding to
301 * make sure the migration can survive even between 32bit and
304 size
= ROUND_UP(size
, 8);
306 qemu_put_be64(file
, size
);
307 qemu_put_buffer(file
, (const uint8_t *)le_bitmap
, size
);
309 * Mark as an end, in case the middle part is screwed up due to
310 * some "mysterious" reason.
312 qemu_put_be64(file
, RAMBLOCK_RECV_BITMAP_ENDING
);
317 if (qemu_file_get_error(file
)) {
318 return qemu_file_get_error(file
);
321 return size
+ sizeof(size
);
325 * An outstanding page request, on the source, having been received
328 struct RAMSrcPageRequest
{
333 QSIMPLEQ_ENTRY(RAMSrcPageRequest
) next_req
;
336 /* State of RAM for migration */
339 * PageSearchStatus structures for the channels when send pages.
340 * Protected by the bitmap_mutex.
342 PageSearchStatus pss
[RAM_CHANNEL_MAX
];
343 /* UFFD file descriptor, used in 'write-tracking' migration */
345 /* total ram size in bytes */
346 uint64_t ram_bytes_total
;
347 /* Last block that we have visited searching for dirty pages */
348 RAMBlock
*last_seen_block
;
349 /* Last dirty target page we have sent */
350 ram_addr_t last_page
;
351 /* last ram version we have seen */
352 uint32_t last_version
;
353 /* How many times we have dirty too many pages */
354 int dirty_rate_high_cnt
;
355 /* these variables are used for bitmap sync */
356 /* last time we did a full bitmap_sync */
357 int64_t time_last_bitmap_sync
;
358 /* bytes transferred at start_time */
359 uint64_t bytes_xfer_prev
;
360 /* number of dirty pages since start_time */
361 uint64_t num_dirty_pages_period
;
362 /* xbzrle misses since the beginning of the period */
363 uint64_t xbzrle_cache_miss_prev
;
364 /* Amount of xbzrle pages since the beginning of the period */
365 uint64_t xbzrle_pages_prev
;
366 /* Amount of xbzrle encoded bytes since the beginning of the period */
367 uint64_t xbzrle_bytes_prev
;
368 /* Are we really using XBZRLE (e.g., after the first round). */
370 /* Are we on the last stage of migration */
372 /* compression statistics since the beginning of the period */
373 /* amount of count that no free thread to compress data */
374 uint64_t compress_thread_busy_prev
;
375 /* amount bytes after compression */
376 uint64_t compressed_size_prev
;
377 /* amount of compressed pages */
378 uint64_t compress_pages_prev
;
380 /* total handled target pages at the beginning of period */
381 uint64_t target_page_count_prev
;
382 /* total handled target pages since start */
383 uint64_t target_page_count
;
384 /* number of dirty bits in the bitmap */
385 uint64_t migration_dirty_pages
;
388 * - dirty/clear bitmap
389 * - migration_dirty_pages
392 QemuMutex bitmap_mutex
;
393 /* The RAMBlock used in the last src_page_requests */
394 RAMBlock
*last_req_rb
;
395 /* Queue of outstanding page requests from the destination */
396 QemuMutex src_page_req_mutex
;
397 QSIMPLEQ_HEAD(, RAMSrcPageRequest
) src_page_requests
;
400 * This is only used when postcopy is in recovery phase, to communicate
401 * between the migration thread and the return path thread on dirty
402 * bitmap synchronizations. This field is unused in other stages of
405 unsigned int postcopy_bmap_sync_requested
;
407 typedef struct RAMState RAMState
;
409 static RAMState
*ram_state
;
411 static NotifierWithReturnList precopy_notifier_list
;
413 /* Whether postcopy has queued requests? */
414 static bool postcopy_has_request(RAMState
*rs
)
416 return !QSIMPLEQ_EMPTY_ATOMIC(&rs
->src_page_requests
);
419 void precopy_infrastructure_init(void)
421 notifier_with_return_list_init(&precopy_notifier_list
);
424 void precopy_add_notifier(NotifierWithReturn
*n
)
426 notifier_with_return_list_add(&precopy_notifier_list
, n
);
429 void precopy_remove_notifier(NotifierWithReturn
*n
)
431 notifier_with_return_remove(n
);
434 int precopy_notify(PrecopyNotifyReason reason
, Error
**errp
)
436 PrecopyNotifyData pnd
;
440 return notifier_with_return_list_notify(&precopy_notifier_list
, &pnd
);
443 uint64_t ram_bytes_remaining(void)
445 return ram_state
? (ram_state
->migration_dirty_pages
* TARGET_PAGE_SIZE
) :
449 void ram_transferred_add(uint64_t bytes
)
451 if (runstate_is_running()) {
452 stat64_add(&mig_stats
.precopy_bytes
, bytes
);
453 } else if (migration_in_postcopy()) {
454 stat64_add(&mig_stats
.postcopy_bytes
, bytes
);
456 stat64_add(&mig_stats
.downtime_bytes
, bytes
);
458 stat64_add(&mig_stats
.transferred
, bytes
);
461 struct MigrationOps
{
462 int (*ram_save_target_page
)(RAMState
*rs
, PageSearchStatus
*pss
);
464 typedef struct MigrationOps MigrationOps
;
466 MigrationOps
*migration_ops
;
468 static int ram_save_host_page_urgent(PageSearchStatus
*pss
);
470 /* NOTE: page is the PFN not real ram_addr_t. */
471 static void pss_init(PageSearchStatus
*pss
, RAMBlock
*rb
, ram_addr_t page
)
475 pss
->complete_round
= false;
479 * Check whether two PSSs are actively sending the same page. Return true
480 * if it is, false otherwise.
482 static bool pss_overlap(PageSearchStatus
*pss1
, PageSearchStatus
*pss2
)
484 return pss1
->host_page_sending
&& pss2
->host_page_sending
&&
485 (pss1
->host_page_start
== pss2
->host_page_start
);
489 * save_page_header: write page header to wire
491 * If this is the 1st block, it also writes the block identification
493 * Returns the number of bytes written
495 * @pss: current PSS channel status
496 * @block: block that contains the page we want to send
497 * @offset: offset inside the block for the page
498 * in the lower bits, it contains flags
500 static size_t save_page_header(PageSearchStatus
*pss
, QEMUFile
*f
,
501 RAMBlock
*block
, ram_addr_t offset
)
504 bool same_block
= (block
== pss
->last_sent_block
);
507 offset
|= RAM_SAVE_FLAG_CONTINUE
;
509 qemu_put_be64(f
, offset
);
513 len
= strlen(block
->idstr
);
514 qemu_put_byte(f
, len
);
515 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, len
);
517 pss
->last_sent_block
= block
;
523 * mig_throttle_guest_down: throttle down the guest
525 * Reduce amount of guest cpu execution to hopefully slow down memory
526 * writes. If guest dirty memory rate is reduced below the rate at
527 * which we can transfer pages to the destination then we should be
528 * able to complete migration. Some workloads dirty memory way too
529 * fast and will not effectively converge, even with auto-converge.
531 static void mig_throttle_guest_down(uint64_t bytes_dirty_period
,
532 uint64_t bytes_dirty_threshold
)
534 uint64_t pct_initial
= migrate_cpu_throttle_initial();
535 uint64_t pct_increment
= migrate_cpu_throttle_increment();
536 bool pct_tailslow
= migrate_cpu_throttle_tailslow();
537 int pct_max
= migrate_max_cpu_throttle();
539 uint64_t throttle_now
= cpu_throttle_get_percentage();
540 uint64_t cpu_now
, cpu_ideal
, throttle_inc
;
542 /* We have not started throttling yet. Let's start it. */
543 if (!cpu_throttle_active()) {
544 cpu_throttle_set(pct_initial
);
546 /* Throttling already on, just increase the rate */
548 throttle_inc
= pct_increment
;
550 /* Compute the ideal CPU percentage used by Guest, which may
551 * make the dirty rate match the dirty rate threshold. */
552 cpu_now
= 100 - throttle_now
;
553 cpu_ideal
= cpu_now
* (bytes_dirty_threshold
* 1.0 /
555 throttle_inc
= MIN(cpu_now
- cpu_ideal
, pct_increment
);
557 cpu_throttle_set(MIN(throttle_now
+ throttle_inc
, pct_max
));
561 void mig_throttle_counter_reset(void)
563 RAMState
*rs
= ram_state
;
565 rs
->time_last_bitmap_sync
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
566 rs
->num_dirty_pages_period
= 0;
567 rs
->bytes_xfer_prev
= stat64_get(&mig_stats
.transferred
);
571 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
573 * @rs: current RAM state
574 * @current_addr: address for the zero page
576 * Update the xbzrle cache to reflect a page that's been sent as all 0.
577 * The important thing is that a stale (not-yet-0'd) page be replaced
579 * As a bonus, if the page wasn't in the cache it gets added so that
580 * when a small write is made into the 0'd page it gets XBZRLE sent.
582 static void xbzrle_cache_zero_page(RAMState
*rs
, ram_addr_t current_addr
)
584 /* We don't care if this fails to allocate a new cache page
585 * as long as it updated an old one */
586 cache_insert(XBZRLE
.cache
, current_addr
, XBZRLE
.zero_target_page
,
587 stat64_get(&mig_stats
.dirty_sync_count
));
590 #define ENCODING_FLAG_XBZRLE 0x1
593 * save_xbzrle_page: compress and send current page
595 * Returns: 1 means that we wrote the page
596 * 0 means that page is identical to the one already sent
597 * -1 means that xbzrle would be longer than normal
599 * @rs: current RAM state
600 * @pss: current PSS channel
601 * @current_data: pointer to the address of the page contents
602 * @current_addr: addr of the page
603 * @block: block that contains the page we want to send
604 * @offset: offset inside the block for the page
606 static int save_xbzrle_page(RAMState
*rs
, PageSearchStatus
*pss
,
607 uint8_t **current_data
, ram_addr_t current_addr
,
608 RAMBlock
*block
, ram_addr_t offset
)
610 int encoded_len
= 0, bytes_xbzrle
;
611 uint8_t *prev_cached_page
;
612 QEMUFile
*file
= pss
->pss_channel
;
613 uint64_t generation
= stat64_get(&mig_stats
.dirty_sync_count
);
615 if (!cache_is_cached(XBZRLE
.cache
, current_addr
, generation
)) {
616 xbzrle_counters
.cache_miss
++;
617 if (!rs
->last_stage
) {
618 if (cache_insert(XBZRLE
.cache
, current_addr
, *current_data
,
622 /* update *current_data when the page has been
623 inserted into cache */
624 *current_data
= get_cached_data(XBZRLE
.cache
, current_addr
);
631 * Reaching here means the page has hit the xbzrle cache, no matter what
632 * encoding result it is (normal encoding, overflow or skipping the page),
633 * count the page as encoded. This is used to calculate the encoding rate.
635 * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB,
636 * 2nd page turns out to be skipped (i.e. no new bytes written to the
637 * page), the overall encoding rate will be 8KB / 2KB = 4, which has the
638 * skipped page included. In this way, the encoding rate can tell if the
639 * guest page is good for xbzrle encoding.
641 xbzrle_counters
.pages
++;
642 prev_cached_page
= get_cached_data(XBZRLE
.cache
, current_addr
);
644 /* save current buffer into memory */
645 memcpy(XBZRLE
.current_buf
, *current_data
, TARGET_PAGE_SIZE
);
647 /* XBZRLE encoding (if there is no overflow) */
648 encoded_len
= xbzrle_encode_buffer(prev_cached_page
, XBZRLE
.current_buf
,
649 TARGET_PAGE_SIZE
, XBZRLE
.encoded_buf
,
653 * Update the cache contents, so that it corresponds to the data
654 * sent, in all cases except where we skip the page.
656 if (!rs
->last_stage
&& encoded_len
!= 0) {
657 memcpy(prev_cached_page
, XBZRLE
.current_buf
, TARGET_PAGE_SIZE
);
659 * In the case where we couldn't compress, ensure that the caller
660 * sends the data from the cache, since the guest might have
661 * changed the RAM since we copied it.
663 *current_data
= prev_cached_page
;
666 if (encoded_len
== 0) {
667 trace_save_xbzrle_page_skipping();
669 } else if (encoded_len
== -1) {
670 trace_save_xbzrle_page_overflow();
671 xbzrle_counters
.overflow
++;
672 xbzrle_counters
.bytes
+= TARGET_PAGE_SIZE
;
676 /* Send XBZRLE based compressed page */
677 bytes_xbzrle
= save_page_header(pss
, pss
->pss_channel
, block
,
678 offset
| RAM_SAVE_FLAG_XBZRLE
);
679 qemu_put_byte(file
, ENCODING_FLAG_XBZRLE
);
680 qemu_put_be16(file
, encoded_len
);
681 qemu_put_buffer(file
, XBZRLE
.encoded_buf
, encoded_len
);
682 bytes_xbzrle
+= encoded_len
+ 1 + 2;
684 * Like compressed_size (please see update_compress_thread_counts),
685 * the xbzrle encoded bytes don't count the 8 byte header with
686 * RAM_SAVE_FLAG_CONTINUE.
688 xbzrle_counters
.bytes
+= bytes_xbzrle
- 8;
689 ram_transferred_add(bytes_xbzrle
);
695 * pss_find_next_dirty: find the next dirty page of current ramblock
697 * This function updates pss->page to point to the next dirty page index
698 * within the ramblock to migrate, or the end of ramblock when nothing
699 * found. Note that when pss->host_page_sending==true it means we're
700 * during sending a host page, so we won't look for dirty page that is
701 * outside the host page boundary.
703 * @pss: the current page search status
705 static void pss_find_next_dirty(PageSearchStatus
*pss
)
707 RAMBlock
*rb
= pss
->block
;
708 unsigned long size
= rb
->used_length
>> TARGET_PAGE_BITS
;
709 unsigned long *bitmap
= rb
->bmap
;
711 if (migrate_ram_is_ignored(rb
)) {
712 /* Points directly to the end, so we know no dirty page */
718 * If during sending a host page, only look for dirty pages within the
719 * current host page being send.
721 if (pss
->host_page_sending
) {
722 assert(pss
->host_page_end
);
723 size
= MIN(size
, pss
->host_page_end
);
726 pss
->page
= find_next_bit(bitmap
, size
, pss
->page
);
729 static void migration_clear_memory_region_dirty_bitmap(RAMBlock
*rb
,
735 if (!rb
->clear_bmap
|| !clear_bmap_test_and_clear(rb
, page
)) {
739 shift
= rb
->clear_bmap_shift
;
741 * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
742 * can make things easier sometimes since then start address
743 * of the small chunk will always be 64 pages aligned so the
744 * bitmap will always be aligned to unsigned long. We should
745 * even be able to remove this restriction but I'm simply
750 size
= 1ULL << (TARGET_PAGE_BITS
+ shift
);
751 start
= QEMU_ALIGN_DOWN((ram_addr_t
)page
<< TARGET_PAGE_BITS
, size
);
752 trace_migration_bitmap_clear_dirty(rb
->idstr
, start
, size
, page
);
753 memory_region_clear_dirty_bitmap(rb
->mr
, start
, size
);
757 migration_clear_memory_region_dirty_bitmap_range(RAMBlock
*rb
,
759 unsigned long npages
)
761 unsigned long i
, chunk_pages
= 1UL << rb
->clear_bmap_shift
;
762 unsigned long chunk_start
= QEMU_ALIGN_DOWN(start
, chunk_pages
);
763 unsigned long chunk_end
= QEMU_ALIGN_UP(start
+ npages
, chunk_pages
);
766 * Clear pages from start to start + npages - 1, so the end boundary is
769 for (i
= chunk_start
; i
< chunk_end
; i
+= chunk_pages
) {
770 migration_clear_memory_region_dirty_bitmap(rb
, i
);
775 * colo_bitmap_find_diry:find contiguous dirty pages from start
777 * Returns the page offset within memory region of the start of the contiguout
780 * @rs: current RAM state
781 * @rb: RAMBlock where to search for dirty pages
782 * @start: page where we start the search
783 * @num: the number of contiguous dirty pages
786 unsigned long colo_bitmap_find_dirty(RAMState
*rs
, RAMBlock
*rb
,
787 unsigned long start
, unsigned long *num
)
789 unsigned long size
= rb
->used_length
>> TARGET_PAGE_BITS
;
790 unsigned long *bitmap
= rb
->bmap
;
791 unsigned long first
, next
;
795 if (migrate_ram_is_ignored(rb
)) {
799 first
= find_next_bit(bitmap
, size
, start
);
803 next
= find_next_zero_bit(bitmap
, size
, first
+ 1);
804 assert(next
>= first
);
809 static inline bool migration_bitmap_clear_dirty(RAMState
*rs
,
816 * Clear dirty bitmap if needed. This _must_ be called before we
817 * send any of the page in the chunk because we need to make sure
818 * we can capture further page content changes when we sync dirty
819 * log the next time. So as long as we are going to send any of
820 * the page in the chunk we clear the remote dirty bitmap for all.
821 * Clearing it earlier won't be a problem, but too late will.
823 migration_clear_memory_region_dirty_bitmap(rb
, page
);
825 ret
= test_and_clear_bit(page
, rb
->bmap
);
827 rs
->migration_dirty_pages
--;
833 static void dirty_bitmap_clear_section(MemoryRegionSection
*section
,
836 const hwaddr offset
= section
->offset_within_region
;
837 const hwaddr size
= int128_get64(section
->size
);
838 const unsigned long start
= offset
>> TARGET_PAGE_BITS
;
839 const unsigned long npages
= size
>> TARGET_PAGE_BITS
;
840 RAMBlock
*rb
= section
->mr
->ram_block
;
841 uint64_t *cleared_bits
= opaque
;
844 * We don't grab ram_state->bitmap_mutex because we expect to run
845 * only when starting migration or during postcopy recovery where
846 * we don't have concurrent access.
848 if (!migration_in_postcopy() && !migrate_background_snapshot()) {
849 migration_clear_memory_region_dirty_bitmap_range(rb
, start
, npages
);
851 *cleared_bits
+= bitmap_count_one_with_offset(rb
->bmap
, start
, npages
);
852 bitmap_clear(rb
->bmap
, start
, npages
);
856 * Exclude all dirty pages from migration that fall into a discarded range as
857 * managed by a RamDiscardManager responsible for the mapped memory region of
858 * the RAMBlock. Clear the corresponding bits in the dirty bitmaps.
860 * Discarded pages ("logically unplugged") have undefined content and must
861 * not get migrated, because even reading these pages for migration might
862 * result in undesired behavior.
864 * Returns the number of cleared bits in the RAMBlock dirty bitmap.
866 * Note: The result is only stable while migrating (precopy/postcopy).
868 static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock
*rb
)
870 uint64_t cleared_bits
= 0;
872 if (rb
->mr
&& rb
->bmap
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
873 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
874 MemoryRegionSection section
= {
876 .offset_within_region
= 0,
877 .size
= int128_make64(qemu_ram_get_used_length(rb
)),
880 ram_discard_manager_replay_discarded(rdm
, §ion
,
881 dirty_bitmap_clear_section
,
888 * Check if a host-page aligned page falls into a discarded range as managed by
889 * a RamDiscardManager responsible for the mapped memory region of the RAMBlock.
891 * Note: The result is only stable while migrating (precopy/postcopy).
893 bool ramblock_page_is_discarded(RAMBlock
*rb
, ram_addr_t start
)
895 if (rb
->mr
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
896 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
897 MemoryRegionSection section
= {
899 .offset_within_region
= start
,
900 .size
= int128_make64(qemu_ram_pagesize(rb
)),
903 return !ram_discard_manager_is_populated(rdm
, §ion
);
908 /* Called with RCU critical section */
909 static void ramblock_sync_dirty_bitmap(RAMState
*rs
, RAMBlock
*rb
)
911 uint64_t new_dirty_pages
=
912 cpu_physical_memory_sync_dirty_bitmap(rb
, 0, rb
->used_length
);
914 rs
->migration_dirty_pages
+= new_dirty_pages
;
915 rs
->num_dirty_pages_period
+= new_dirty_pages
;
919 * ram_pagesize_summary: calculate all the pagesizes of a VM
921 * Returns a summary bitmap of the page sizes of all RAMBlocks
923 * For VMs with just normal pages this is equivalent to the host page
924 * size. If it's got some huge pages then it's the OR of all the
925 * different page sizes.
927 uint64_t ram_pagesize_summary(void)
930 uint64_t summary
= 0;
932 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
933 summary
|= block
->page_size
;
939 uint64_t ram_get_total_transferred_pages(void)
941 return stat64_get(&mig_stats
.normal_pages
) +
942 stat64_get(&mig_stats
.zero_pages
) +
943 compression_counters
.pages
+ xbzrle_counters
.pages
;
946 static void migration_update_rates(RAMState
*rs
, int64_t end_time
)
948 uint64_t page_count
= rs
->target_page_count
- rs
->target_page_count_prev
;
949 double compressed_size
;
951 /* calculate period counters */
952 stat64_set(&mig_stats
.dirty_pages_rate
,
953 rs
->num_dirty_pages_period
* 1000 /
954 (end_time
- rs
->time_last_bitmap_sync
));
960 if (migrate_xbzrle()) {
961 double encoded_size
, unencoded_size
;
963 xbzrle_counters
.cache_miss_rate
= (double)(xbzrle_counters
.cache_miss
-
964 rs
->xbzrle_cache_miss_prev
) / page_count
;
965 rs
->xbzrle_cache_miss_prev
= xbzrle_counters
.cache_miss
;
966 unencoded_size
= (xbzrle_counters
.pages
- rs
->xbzrle_pages_prev
) *
968 encoded_size
= xbzrle_counters
.bytes
- rs
->xbzrle_bytes_prev
;
969 if (xbzrle_counters
.pages
== rs
->xbzrle_pages_prev
|| !encoded_size
) {
970 xbzrle_counters
.encoding_rate
= 0;
972 xbzrle_counters
.encoding_rate
= unencoded_size
/ encoded_size
;
974 rs
->xbzrle_pages_prev
= xbzrle_counters
.pages
;
975 rs
->xbzrle_bytes_prev
= xbzrle_counters
.bytes
;
978 if (migrate_compress()) {
979 compression_counters
.busy_rate
= (double)(compression_counters
.busy
-
980 rs
->compress_thread_busy_prev
) / page_count
;
981 rs
->compress_thread_busy_prev
= compression_counters
.busy
;
983 compressed_size
= compression_counters
.compressed_size
-
984 rs
->compressed_size_prev
;
985 if (compressed_size
) {
986 double uncompressed_size
= (compression_counters
.pages
-
987 rs
->compress_pages_prev
) * TARGET_PAGE_SIZE
;
989 /* Compression-Ratio = Uncompressed-size / Compressed-size */
990 compression_counters
.compression_rate
=
991 uncompressed_size
/ compressed_size
;
993 rs
->compress_pages_prev
= compression_counters
.pages
;
994 rs
->compressed_size_prev
= compression_counters
.compressed_size
;
1000 * Enable dirty-limit to throttle down the guest
1002 static void migration_dirty_limit_guest(void)
1005 * dirty page rate quota for all vCPUs fetched from
1006 * migration parameter 'vcpu_dirty_limit'
1008 static int64_t quota_dirtyrate
;
1009 MigrationState
*s
= migrate_get_current();
1012 * If dirty limit already enabled and migration parameter
1013 * vcpu-dirty-limit untouched.
1015 if (dirtylimit_in_service() &&
1016 quota_dirtyrate
== s
->parameters
.vcpu_dirty_limit
) {
1020 quota_dirtyrate
= s
->parameters
.vcpu_dirty_limit
;
1023 * Set all vCPU a quota dirtyrate, note that the second
1024 * parameter will be ignored if setting all vCPU for the vm
1026 qmp_set_vcpu_dirty_limit(false, -1, quota_dirtyrate
, NULL
);
1027 trace_migration_dirty_limit_guest(quota_dirtyrate
);
1030 static void migration_trigger_throttle(RAMState
*rs
)
1032 uint64_t threshold
= migrate_throttle_trigger_threshold();
1033 uint64_t bytes_xfer_period
=
1034 stat64_get(&mig_stats
.transferred
) - rs
->bytes_xfer_prev
;
1035 uint64_t bytes_dirty_period
= rs
->num_dirty_pages_period
* TARGET_PAGE_SIZE
;
1036 uint64_t bytes_dirty_threshold
= bytes_xfer_period
* threshold
/ 100;
1038 /* During block migration the auto-converge logic incorrectly detects
1039 * that ram migration makes no progress. Avoid this by disabling the
1040 * throttling logic during the bulk phase of block migration. */
1041 if (blk_mig_bulk_active()) {
1046 * The following detection logic can be refined later. For now:
1047 * Check to see if the ratio between dirtied bytes and the approx.
1048 * amount of bytes that just got transferred since the last time
1049 * we were in this routine reaches the threshold. If that happens
1050 * twice, start or increase throttling.
1052 if ((bytes_dirty_period
> bytes_dirty_threshold
) &&
1053 (++rs
->dirty_rate_high_cnt
>= 2)) {
1054 rs
->dirty_rate_high_cnt
= 0;
1055 if (migrate_auto_converge()) {
1056 trace_migration_throttle();
1057 mig_throttle_guest_down(bytes_dirty_period
,
1058 bytes_dirty_threshold
);
1059 } else if (migrate_dirty_limit()) {
1060 migration_dirty_limit_guest();
1065 static void migration_bitmap_sync(RAMState
*rs
, bool last_stage
)
1070 stat64_add(&mig_stats
.dirty_sync_count
, 1);
1072 if (!rs
->time_last_bitmap_sync
) {
1073 rs
->time_last_bitmap_sync
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1076 trace_migration_bitmap_sync_start();
1077 memory_global_dirty_log_sync(last_stage
);
1079 qemu_mutex_lock(&rs
->bitmap_mutex
);
1080 WITH_RCU_READ_LOCK_GUARD() {
1081 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1082 ramblock_sync_dirty_bitmap(rs
, block
);
1084 stat64_set(&mig_stats
.dirty_bytes_last_sync
, ram_bytes_remaining());
1086 qemu_mutex_unlock(&rs
->bitmap_mutex
);
1088 memory_global_after_dirty_log_sync();
1089 trace_migration_bitmap_sync_end(rs
->num_dirty_pages_period
);
1091 end_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1093 /* more than 1 second = 1000 millisecons */
1094 if (end_time
> rs
->time_last_bitmap_sync
+ 1000) {
1095 migration_trigger_throttle(rs
);
1097 migration_update_rates(rs
, end_time
);
1099 rs
->target_page_count_prev
= rs
->target_page_count
;
1101 /* reset period counters */
1102 rs
->time_last_bitmap_sync
= end_time
;
1103 rs
->num_dirty_pages_period
= 0;
1104 rs
->bytes_xfer_prev
= stat64_get(&mig_stats
.transferred
);
1106 if (migrate_events()) {
1107 uint64_t generation
= stat64_get(&mig_stats
.dirty_sync_count
);
1108 qapi_event_send_migration_pass(generation
);
1112 static void migration_bitmap_sync_precopy(RAMState
*rs
, bool last_stage
)
1114 Error
*local_err
= NULL
;
1117 * The current notifier usage is just an optimization to migration, so we
1118 * don't stop the normal migration process in the error case.
1120 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC
, &local_err
)) {
1121 error_report_err(local_err
);
1125 migration_bitmap_sync(rs
, last_stage
);
1127 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC
, &local_err
)) {
1128 error_report_err(local_err
);
1132 void ram_release_page(const char *rbname
, uint64_t offset
)
1134 if (!migrate_release_ram() || !migration_in_postcopy()) {
1138 ram_discard_range(rbname
, offset
, TARGET_PAGE_SIZE
);
1142 * save_zero_page_to_file: send the zero page to the file
1144 * Returns the size of data written to the file, 0 means the page is not
1147 * @pss: current PSS channel
1148 * @block: block that contains the page we want to send
1149 * @offset: offset inside the block for the page
1151 static int save_zero_page_to_file(PageSearchStatus
*pss
, QEMUFile
*file
,
1152 RAMBlock
*block
, ram_addr_t offset
)
1154 uint8_t *p
= block
->host
+ offset
;
1157 if (buffer_is_zero(p
, TARGET_PAGE_SIZE
)) {
1158 len
+= save_page_header(pss
, file
, block
, offset
| RAM_SAVE_FLAG_ZERO
);
1159 qemu_put_byte(file
, 0);
1161 ram_release_page(block
->idstr
, offset
);
1167 * save_zero_page: send the zero page to the stream
1169 * Returns the number of pages written.
1171 * @pss: current PSS channel
1172 * @block: block that contains the page we want to send
1173 * @offset: offset inside the block for the page
1175 static int save_zero_page(PageSearchStatus
*pss
, QEMUFile
*f
, RAMBlock
*block
,
1178 int len
= save_zero_page_to_file(pss
, f
, block
, offset
);
1181 stat64_add(&mig_stats
.zero_pages
, 1);
1182 ram_transferred_add(len
);
1189 * @pages: the number of pages written by the control path,
1191 * > 0 - number of pages written
1193 * Return true if the pages has been saved, otherwise false is returned.
1195 static bool control_save_page(PageSearchStatus
*pss
, RAMBlock
*block
,
1196 ram_addr_t offset
, int *pages
)
1200 ret
= rdma_control_save_page(pss
->pss_channel
, block
->offset
, offset
,
1202 if (ret
== RAM_SAVE_CONTROL_NOT_SUPP
) {
1206 if (ret
== RAM_SAVE_CONTROL_DELAYED
) {
1215 * directly send the page to the stream
1217 * Returns the number of pages written.
1219 * @pss: current PSS channel
1220 * @block: block that contains the page we want to send
1221 * @offset: offset inside the block for the page
1222 * @buf: the page to be sent
1223 * @async: send to page asyncly
1225 static int save_normal_page(PageSearchStatus
*pss
, RAMBlock
*block
,
1226 ram_addr_t offset
, uint8_t *buf
, bool async
)
1228 QEMUFile
*file
= pss
->pss_channel
;
1230 ram_transferred_add(save_page_header(pss
, pss
->pss_channel
, block
,
1231 offset
| RAM_SAVE_FLAG_PAGE
));
1233 qemu_put_buffer_async(file
, buf
, TARGET_PAGE_SIZE
,
1234 migrate_release_ram() &&
1235 migration_in_postcopy());
1237 qemu_put_buffer(file
, buf
, TARGET_PAGE_SIZE
);
1239 ram_transferred_add(TARGET_PAGE_SIZE
);
1240 stat64_add(&mig_stats
.normal_pages
, 1);
1245 * ram_save_page: send the given page to the stream
1247 * Returns the number of pages written.
1249 * >=0 - Number of pages written - this might legally be 0
1250 * if xbzrle noticed the page was the same.
1252 * @rs: current RAM state
1253 * @block: block that contains the page we want to send
1254 * @offset: offset inside the block for the page
1256 static int ram_save_page(RAMState
*rs
, PageSearchStatus
*pss
)
1260 bool send_async
= true;
1261 RAMBlock
*block
= pss
->block
;
1262 ram_addr_t offset
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
1263 ram_addr_t current_addr
= block
->offset
+ offset
;
1265 p
= block
->host
+ offset
;
1266 trace_ram_save_page(block
->idstr
, (uint64_t)offset
, p
);
1268 XBZRLE_cache_lock();
1269 if (rs
->xbzrle_started
&& !migration_in_postcopy()) {
1270 pages
= save_xbzrle_page(rs
, pss
, &p
, current_addr
,
1272 if (!rs
->last_stage
) {
1273 /* Can't send this cached data async, since the cache page
1274 * might get updated before it gets to the wire
1280 /* XBZRLE overflow or normal page */
1282 pages
= save_normal_page(pss
, block
, offset
, p
, send_async
);
1285 XBZRLE_cache_unlock();
1290 static int ram_save_multifd_page(QEMUFile
*file
, RAMBlock
*block
,
1293 if (multifd_queue_page(file
, block
, offset
) < 0) {
1296 stat64_add(&mig_stats
.normal_pages
, 1);
1302 update_compress_thread_counts(const CompressParam
*param
, int bytes_xmit
)
1304 ram_transferred_add(bytes_xmit
);
1306 if (param
->result
== RES_ZEROPAGE
) {
1307 stat64_add(&mig_stats
.zero_pages
, 1);
1311 /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
1312 compression_counters
.compressed_size
+= bytes_xmit
- 8;
1313 compression_counters
.pages
++;
1316 static bool save_page_use_compression(RAMState
*rs
);
1318 static int send_queued_data(CompressParam
*param
)
1320 PageSearchStatus
*pss
= &ram_state
->pss
[RAM_CHANNEL_PRECOPY
];
1321 MigrationState
*ms
= migrate_get_current();
1322 QEMUFile
*file
= ms
->to_dst_file
;
1325 RAMBlock
*block
= param
->block
;
1326 ram_addr_t offset
= param
->offset
;
1328 if (param
->result
== RES_NONE
) {
1332 assert(block
== pss
->last_sent_block
);
1334 if (param
->result
== RES_ZEROPAGE
) {
1335 assert(qemu_file_buffer_empty(param
->file
));
1336 len
+= save_page_header(pss
, file
, block
, offset
| RAM_SAVE_FLAG_ZERO
);
1337 qemu_put_byte(file
, 0);
1339 ram_release_page(block
->idstr
, offset
);
1340 } else if (param
->result
== RES_COMPRESS
) {
1341 assert(!qemu_file_buffer_empty(param
->file
));
1342 len
+= save_page_header(pss
, file
, block
,
1343 offset
| RAM_SAVE_FLAG_COMPRESS_PAGE
);
1344 len
+= qemu_put_qemu_file(file
, param
->file
);
1349 update_compress_thread_counts(param
, len
);
1354 static void ram_flush_compressed_data(RAMState
*rs
)
1356 if (!save_page_use_compression(rs
)) {
1360 flush_compressed_data(send_queued_data
);
1363 #define PAGE_ALL_CLEAN 0
1364 #define PAGE_TRY_AGAIN 1
1365 #define PAGE_DIRTY_FOUND 2
1367 * find_dirty_block: find the next dirty page and update any state
1368 * associated with the search process.
1371 * <0: An error happened
1372 * PAGE_ALL_CLEAN: no dirty page found, give up
1373 * PAGE_TRY_AGAIN: no dirty page found, retry for next block
1374 * PAGE_DIRTY_FOUND: dirty page found
1376 * @rs: current RAM state
1377 * @pss: data about the state of the current dirty page scan
1378 * @again: set to false if the search has scanned the whole of RAM
1380 static int find_dirty_block(RAMState
*rs
, PageSearchStatus
*pss
)
1382 /* Update pss->page for the next dirty bit in ramblock */
1383 pss_find_next_dirty(pss
);
1385 if (pss
->complete_round
&& pss
->block
== rs
->last_seen_block
&&
1386 pss
->page
>= rs
->last_page
) {
1388 * We've been once around the RAM and haven't found anything.
1391 return PAGE_ALL_CLEAN
;
1393 if (!offset_in_ramblock(pss
->block
,
1394 ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
)) {
1395 /* Didn't find anything in this RAM Block */
1397 pss
->block
= QLIST_NEXT_RCU(pss
->block
, next
);
1399 if (migrate_multifd() &&
1400 !migrate_multifd_flush_after_each_section()) {
1401 QEMUFile
*f
= rs
->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
;
1402 int ret
= multifd_send_sync_main(f
);
1406 qemu_put_be64(f
, RAM_SAVE_FLAG_MULTIFD_FLUSH
);
1410 * If memory migration starts over, we will meet a dirtied page
1411 * which may still exists in compression threads's ring, so we
1412 * should flush the compressed data to make sure the new page
1413 * is not overwritten by the old one in the destination.
1415 * Also If xbzrle is on, stop using the data compression at this
1416 * point. In theory, xbzrle can do better than compression.
1418 ram_flush_compressed_data(rs
);
1420 /* Hit the end of the list */
1421 pss
->block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
1422 /* Flag that we've looped */
1423 pss
->complete_round
= true;
1424 /* After the first round, enable XBZRLE. */
1425 if (migrate_xbzrle()) {
1426 rs
->xbzrle_started
= true;
1429 /* Didn't find anything this time, but try again on the new block */
1430 return PAGE_TRY_AGAIN
;
1432 /* We've found something */
1433 return PAGE_DIRTY_FOUND
;
1438 * unqueue_page: gets a page of the queue
1440 * Helper for 'get_queued_page' - gets a page off the queue
1442 * Returns the block of the page (or NULL if none available)
1444 * @rs: current RAM state
1445 * @offset: used to return the offset within the RAMBlock
1447 static RAMBlock
*unqueue_page(RAMState
*rs
, ram_addr_t
*offset
)
1449 struct RAMSrcPageRequest
*entry
;
1450 RAMBlock
*block
= NULL
;
1452 if (!postcopy_has_request(rs
)) {
1456 QEMU_LOCK_GUARD(&rs
->src_page_req_mutex
);
1459 * This should _never_ change even after we take the lock, because no one
1460 * should be taking anything off the request list other than us.
1462 assert(postcopy_has_request(rs
));
1464 entry
= QSIMPLEQ_FIRST(&rs
->src_page_requests
);
1466 *offset
= entry
->offset
;
1468 if (entry
->len
> TARGET_PAGE_SIZE
) {
1469 entry
->len
-= TARGET_PAGE_SIZE
;
1470 entry
->offset
+= TARGET_PAGE_SIZE
;
1472 memory_region_unref(block
->mr
);
1473 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
1475 migration_consume_urgent_request();
1481 #if defined(__linux__)
1483 * poll_fault_page: try to get next UFFD write fault page and, if pending fault
1484 * is found, return RAM block pointer and page offset
1486 * Returns pointer to the RAMBlock containing faulting page,
1487 * NULL if no write faults are pending
1489 * @rs: current RAM state
1490 * @offset: page offset from the beginning of the block
1492 static RAMBlock
*poll_fault_page(RAMState
*rs
, ram_addr_t
*offset
)
1494 struct uffd_msg uffd_msg
;
1499 if (!migrate_background_snapshot()) {
1503 res
= uffd_read_events(rs
->uffdio_fd
, &uffd_msg
, 1);
1508 page_address
= (void *)(uintptr_t) uffd_msg
.arg
.pagefault
.address
;
1509 block
= qemu_ram_block_from_host(page_address
, false, offset
);
1510 assert(block
&& (block
->flags
& RAM_UF_WRITEPROTECT
) != 0);
1515 * ram_save_release_protection: release UFFD write protection after
1516 * a range of pages has been saved
1518 * @rs: current RAM state
1519 * @pss: page-search-status structure
1520 * @start_page: index of the first page in the range relative to pss->block
1522 * Returns 0 on success, negative value in case of an error
1524 static int ram_save_release_protection(RAMState
*rs
, PageSearchStatus
*pss
,
1525 unsigned long start_page
)
1529 /* Check if page is from UFFD-managed region. */
1530 if (pss
->block
->flags
& RAM_UF_WRITEPROTECT
) {
1531 void *page_address
= pss
->block
->host
+ (start_page
<< TARGET_PAGE_BITS
);
1532 uint64_t run_length
= (pss
->page
- start_page
) << TARGET_PAGE_BITS
;
1534 /* Flush async buffers before un-protect. */
1535 qemu_fflush(pss
->pss_channel
);
1536 /* Un-protect memory range. */
1537 res
= uffd_change_protection(rs
->uffdio_fd
, page_address
, run_length
,
1544 /* ram_write_tracking_available: check if kernel supports required UFFD features
1546 * Returns true if supports, false otherwise
1548 bool ram_write_tracking_available(void)
1550 uint64_t uffd_features
;
1553 res
= uffd_query_features(&uffd_features
);
1555 (uffd_features
& UFFD_FEATURE_PAGEFAULT_FLAG_WP
) != 0);
1558 /* ram_write_tracking_compatible: check if guest configuration is
1559 * compatible with 'write-tracking'
1561 * Returns true if compatible, false otherwise
1563 bool ram_write_tracking_compatible(void)
1565 const uint64_t uffd_ioctls_mask
= BIT(_UFFDIO_WRITEPROTECT
);
1570 /* Open UFFD file descriptor */
1571 uffd_fd
= uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP
, false);
1576 RCU_READ_LOCK_GUARD();
1578 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1579 uint64_t uffd_ioctls
;
1581 /* Nothing to do with read-only and MMIO-writable regions */
1582 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1585 /* Try to register block memory via UFFD-IO to track writes */
1586 if (uffd_register_memory(uffd_fd
, block
->host
, block
->max_length
,
1587 UFFDIO_REGISTER_MODE_WP
, &uffd_ioctls
)) {
1590 if ((uffd_ioctls
& uffd_ioctls_mask
) != uffd_ioctls_mask
) {
1597 uffd_close_fd(uffd_fd
);
1601 static inline void populate_read_range(RAMBlock
*block
, ram_addr_t offset
,
1604 const ram_addr_t end
= offset
+ size
;
1607 * We read one byte of each page; this will preallocate page tables if
1608 * required and populate the shared zeropage on MAP_PRIVATE anonymous memory
1609 * where no page was populated yet. This might require adaption when
1610 * supporting other mappings, like shmem.
1612 for (; offset
< end
; offset
+= block
->page_size
) {
1613 char tmp
= *((char *)block
->host
+ offset
);
1615 /* Don't optimize the read out */
1616 asm volatile("" : "+r" (tmp
));
1620 static inline int populate_read_section(MemoryRegionSection
*section
,
1623 const hwaddr size
= int128_get64(section
->size
);
1624 hwaddr offset
= section
->offset_within_region
;
1625 RAMBlock
*block
= section
->mr
->ram_block
;
1627 populate_read_range(block
, offset
, size
);
1632 * ram_block_populate_read: preallocate page tables and populate pages in the
1633 * RAM block by reading a byte of each page.
1635 * Since it's solely used for userfault_fd WP feature, here we just
1636 * hardcode page size to qemu_real_host_page_size.
1638 * @block: RAM block to populate
1640 static void ram_block_populate_read(RAMBlock
*rb
)
1643 * Skip populating all pages that fall into a discarded range as managed by
1644 * a RamDiscardManager responsible for the mapped memory region of the
1645 * RAMBlock. Such discarded ("logically unplugged") parts of a RAMBlock
1646 * must not get populated automatically. We don't have to track
1647 * modifications via userfaultfd WP reliably, because these pages will
1648 * not be part of the migration stream either way -- see
1649 * ramblock_dirty_bitmap_exclude_discarded_pages().
1651 * Note: The result is only stable while migrating (precopy/postcopy).
1653 if (rb
->mr
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
1654 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
1655 MemoryRegionSection section
= {
1657 .offset_within_region
= 0,
1658 .size
= rb
->mr
->size
,
1661 ram_discard_manager_replay_populated(rdm
, §ion
,
1662 populate_read_section
, NULL
);
1664 populate_read_range(rb
, 0, rb
->used_length
);
1669 * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking
1671 void ram_write_tracking_prepare(void)
1675 RCU_READ_LOCK_GUARD();
1677 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1678 /* Nothing to do with read-only and MMIO-writable regions */
1679 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1684 * Populate pages of the RAM block before enabling userfault_fd
1687 * This stage is required since ioctl(UFFDIO_WRITEPROTECT) with
1688 * UFFDIO_WRITEPROTECT_MODE_WP mode setting would silently skip
1689 * pages with pte_none() entries in page table.
1691 ram_block_populate_read(block
);
1695 static inline int uffd_protect_section(MemoryRegionSection
*section
,
1698 const hwaddr size
= int128_get64(section
->size
);
1699 const hwaddr offset
= section
->offset_within_region
;
1700 RAMBlock
*rb
= section
->mr
->ram_block
;
1701 int uffd_fd
= (uintptr_t)opaque
;
1703 return uffd_change_protection(uffd_fd
, rb
->host
+ offset
, size
, true,
1707 static int ram_block_uffd_protect(RAMBlock
*rb
, int uffd_fd
)
1709 assert(rb
->flags
& RAM_UF_WRITEPROTECT
);
1711 /* See ram_block_populate_read() */
1712 if (rb
->mr
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
1713 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
1714 MemoryRegionSection section
= {
1716 .offset_within_region
= 0,
1717 .size
= rb
->mr
->size
,
1720 return ram_discard_manager_replay_populated(rdm
, §ion
,
1721 uffd_protect_section
,
1722 (void *)(uintptr_t)uffd_fd
);
1724 return uffd_change_protection(uffd_fd
, rb
->host
,
1725 rb
->used_length
, true, false);
1729 * ram_write_tracking_start: start UFFD-WP memory tracking
1731 * Returns 0 for success or negative value in case of error
1733 int ram_write_tracking_start(void)
1736 RAMState
*rs
= ram_state
;
1739 /* Open UFFD file descriptor */
1740 uffd_fd
= uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP
, true);
1744 rs
->uffdio_fd
= uffd_fd
;
1746 RCU_READ_LOCK_GUARD();
1748 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1749 /* Nothing to do with read-only and MMIO-writable regions */
1750 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1754 /* Register block memory with UFFD to track writes */
1755 if (uffd_register_memory(rs
->uffdio_fd
, block
->host
,
1756 block
->max_length
, UFFDIO_REGISTER_MODE_WP
, NULL
)) {
1759 block
->flags
|= RAM_UF_WRITEPROTECT
;
1760 memory_region_ref(block
->mr
);
1762 /* Apply UFFD write protection to the block memory range */
1763 if (ram_block_uffd_protect(block
, uffd_fd
)) {
1767 trace_ram_write_tracking_ramblock_start(block
->idstr
, block
->page_size
,
1768 block
->host
, block
->max_length
);
1774 error_report("ram_write_tracking_start() failed: restoring initial memory state");
1776 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1777 if ((block
->flags
& RAM_UF_WRITEPROTECT
) == 0) {
1780 uffd_unregister_memory(rs
->uffdio_fd
, block
->host
, block
->max_length
);
1781 /* Cleanup flags and remove reference */
1782 block
->flags
&= ~RAM_UF_WRITEPROTECT
;
1783 memory_region_unref(block
->mr
);
1786 uffd_close_fd(uffd_fd
);
1792 * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection
1794 void ram_write_tracking_stop(void)
1796 RAMState
*rs
= ram_state
;
1799 RCU_READ_LOCK_GUARD();
1801 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1802 if ((block
->flags
& RAM_UF_WRITEPROTECT
) == 0) {
1805 uffd_unregister_memory(rs
->uffdio_fd
, block
->host
, block
->max_length
);
1807 trace_ram_write_tracking_ramblock_stop(block
->idstr
, block
->page_size
,
1808 block
->host
, block
->max_length
);
1810 /* Cleanup flags and remove reference */
1811 block
->flags
&= ~RAM_UF_WRITEPROTECT
;
1812 memory_region_unref(block
->mr
);
1815 /* Finally close UFFD file descriptor */
1816 uffd_close_fd(rs
->uffdio_fd
);
1821 /* No target OS support, stubs just fail or ignore */
1823 static RAMBlock
*poll_fault_page(RAMState
*rs
, ram_addr_t
*offset
)
1831 static int ram_save_release_protection(RAMState
*rs
, PageSearchStatus
*pss
,
1832 unsigned long start_page
)
1841 bool ram_write_tracking_available(void)
1846 bool ram_write_tracking_compatible(void)
1852 int ram_write_tracking_start(void)
1858 void ram_write_tracking_stop(void)
1862 #endif /* defined(__linux__) */
1865 * get_queued_page: unqueue a page from the postcopy requests
1867 * Skips pages that are already sent (!dirty)
1869 * Returns true if a queued page is found
1871 * @rs: current RAM state
1872 * @pss: data about the state of the current dirty page scan
1874 static bool get_queued_page(RAMState
*rs
, PageSearchStatus
*pss
)
1881 block
= unqueue_page(rs
, &offset
);
1883 * We're sending this page, and since it's postcopy nothing else
1884 * will dirty it, and we must make sure it doesn't get sent again
1885 * even if this queue request was received after the background
1886 * search already sent it.
1891 page
= offset
>> TARGET_PAGE_BITS
;
1892 dirty
= test_bit(page
, block
->bmap
);
1894 trace_get_queued_page_not_dirty(block
->idstr
, (uint64_t)offset
,
1897 trace_get_queued_page(block
->idstr
, (uint64_t)offset
, page
);
1901 } while (block
&& !dirty
);
1905 * Poll write faults too if background snapshot is enabled; that's
1906 * when we have vcpus got blocked by the write protected pages.
1908 block
= poll_fault_page(rs
, &offset
);
1913 * We want the background search to continue from the queued page
1914 * since the guest is likely to want other pages near to the page
1915 * it just requested.
1918 pss
->page
= offset
>> TARGET_PAGE_BITS
;
1921 * This unqueued page would break the "one round" check, even is
1924 pss
->complete_round
= false;
1931 * migration_page_queue_free: drop any remaining pages in the ram
1934 * It should be empty at the end anyway, but in error cases there may
1935 * be some left. in case that there is any page left, we drop it.
1938 static void migration_page_queue_free(RAMState
*rs
)
1940 struct RAMSrcPageRequest
*mspr
, *next_mspr
;
1941 /* This queue generally should be empty - but in the case of a failed
1942 * migration might have some droppings in.
1944 RCU_READ_LOCK_GUARD();
1945 QSIMPLEQ_FOREACH_SAFE(mspr
, &rs
->src_page_requests
, next_req
, next_mspr
) {
1946 memory_region_unref(mspr
->rb
->mr
);
1947 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
1953 * ram_save_queue_pages: queue the page for transmission
1955 * A request from postcopy destination for example.
1957 * Returns zero on success or negative on error
1959 * @rbname: Name of the RAMBLock of the request. NULL means the
1960 * same that last one.
1961 * @start: starting address from the start of the RAMBlock
1962 * @len: length (in bytes) to send
1964 int ram_save_queue_pages(const char *rbname
, ram_addr_t start
, ram_addr_t len
)
1967 RAMState
*rs
= ram_state
;
1969 stat64_add(&mig_stats
.postcopy_requests
, 1);
1970 RCU_READ_LOCK_GUARD();
1973 /* Reuse last RAMBlock */
1974 ramblock
= rs
->last_req_rb
;
1978 * Shouldn't happen, we can't reuse the last RAMBlock if
1979 * it's the 1st request.
1981 error_report("ram_save_queue_pages no previous block");
1985 ramblock
= qemu_ram_block_by_name(rbname
);
1988 /* We shouldn't be asked for a non-existent RAMBlock */
1989 error_report("ram_save_queue_pages no block '%s'", rbname
);
1992 rs
->last_req_rb
= ramblock
;
1994 trace_ram_save_queue_pages(ramblock
->idstr
, start
, len
);
1995 if (!offset_in_ramblock(ramblock
, start
+ len
- 1)) {
1996 error_report("%s request overrun start=" RAM_ADDR_FMT
" len="
1997 RAM_ADDR_FMT
" blocklen=" RAM_ADDR_FMT
,
1998 __func__
, start
, len
, ramblock
->used_length
);
2003 * When with postcopy preempt, we send back the page directly in the
2006 if (postcopy_preempt_active()) {
2007 ram_addr_t page_start
= start
>> TARGET_PAGE_BITS
;
2008 size_t page_size
= qemu_ram_pagesize(ramblock
);
2009 PageSearchStatus
*pss
= &ram_state
->pss
[RAM_CHANNEL_POSTCOPY
];
2012 qemu_mutex_lock(&rs
->bitmap_mutex
);
2014 pss_init(pss
, ramblock
, page_start
);
2016 * Always use the preempt channel, and make sure it's there. It's
2017 * safe to access without lock, because when rp-thread is running
2018 * we should be the only one who operates on the qemufile
2020 pss
->pss_channel
= migrate_get_current()->postcopy_qemufile_src
;
2021 assert(pss
->pss_channel
);
2024 * It must be either one or multiple of host page size. Just
2025 * assert; if something wrong we're mostly split brain anyway.
2027 assert(len
% page_size
== 0);
2029 if (ram_save_host_page_urgent(pss
)) {
2030 error_report("%s: ram_save_host_page_urgent() failed: "
2031 "ramblock=%s, start_addr=0x"RAM_ADDR_FMT
,
2032 __func__
, ramblock
->idstr
, start
);
2037 * NOTE: after ram_save_host_page_urgent() succeeded, pss->page
2038 * will automatically be moved and point to the next host page
2039 * we're going to send, so no need to update here.
2041 * Normally QEMU never sends >1 host page in requests, so
2042 * logically we don't even need that as the loop should only
2043 * run once, but just to be consistent.
2047 qemu_mutex_unlock(&rs
->bitmap_mutex
);
2052 struct RAMSrcPageRequest
*new_entry
=
2053 g_new0(struct RAMSrcPageRequest
, 1);
2054 new_entry
->rb
= ramblock
;
2055 new_entry
->offset
= start
;
2056 new_entry
->len
= len
;
2058 memory_region_ref(ramblock
->mr
);
2059 qemu_mutex_lock(&rs
->src_page_req_mutex
);
2060 QSIMPLEQ_INSERT_TAIL(&rs
->src_page_requests
, new_entry
, next_req
);
2061 migration_make_urgent_request();
2062 qemu_mutex_unlock(&rs
->src_page_req_mutex
);
2067 static bool save_page_use_compression(RAMState
*rs
)
2069 if (!migrate_compress()) {
2074 * If xbzrle is enabled (e.g., after first round of migration), stop
2075 * using the data compression. In theory, xbzrle can do better than
2078 if (rs
->xbzrle_started
) {
2086 * try to compress the page before posting it out, return true if the page
2087 * has been properly handled by compression, otherwise needs other
2088 * paths to handle it
2090 static bool save_compress_page(RAMState
*rs
, PageSearchStatus
*pss
,
2091 RAMBlock
*block
, ram_addr_t offset
)
2093 if (!save_page_use_compression(rs
)) {
2098 * When starting the process of a new block, the first page of
2099 * the block should be sent out before other pages in the same
2100 * block, and all the pages in last block should have been sent
2101 * out, keeping this order is important, because the 'cont' flag
2102 * is used to avoid resending the block name.
2104 * We post the fist page as normal page as compression will take
2105 * much CPU resource.
2107 if (block
!= pss
->last_sent_block
) {
2108 ram_flush_compressed_data(rs
);
2112 if (compress_page_with_multi_thread(block
, offset
, send_queued_data
) > 0) {
2116 compression_counters
.busy
++;
2121 * ram_save_target_page_legacy: save one target page
2123 * Returns the number of pages written
2125 * @rs: current RAM state
2126 * @pss: data about the page we want to send
2128 static int ram_save_target_page_legacy(RAMState
*rs
, PageSearchStatus
*pss
)
2130 RAMBlock
*block
= pss
->block
;
2131 ram_addr_t offset
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
2134 if (control_save_page(pss
, block
, offset
, &res
)) {
2138 if (save_compress_page(rs
, pss
, block
, offset
)) {
2142 res
= save_zero_page(pss
, pss
->pss_channel
, block
, offset
);
2144 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2145 * page would be stale
2147 if (rs
->xbzrle_started
) {
2148 XBZRLE_cache_lock();
2149 xbzrle_cache_zero_page(rs
, block
->offset
+ offset
);
2150 XBZRLE_cache_unlock();
2156 * Do not use multifd in postcopy as one whole host page should be
2157 * placed. Meanwhile postcopy requires atomic update of pages, so even
2158 * if host page size == guest page size the dest guest during run may
2159 * still see partially copied pages which is data corruption.
2161 if (migrate_multifd() && !migration_in_postcopy()) {
2162 return ram_save_multifd_page(pss
->pss_channel
, block
, offset
);
2165 return ram_save_page(rs
, pss
);
2168 /* Should be called before sending a host page */
2169 static void pss_host_page_prepare(PageSearchStatus
*pss
)
2171 /* How many guest pages are there in one host page? */
2172 size_t guest_pfns
= qemu_ram_pagesize(pss
->block
) >> TARGET_PAGE_BITS
;
2174 pss
->host_page_sending
= true;
2175 if (guest_pfns
<= 1) {
2177 * This covers both when guest psize == host psize, or when guest
2178 * has larger psize than the host (guest_pfns==0).
2180 * For the latter, we always send one whole guest page per
2181 * iteration of the host page (example: an Alpha VM on x86 host
2182 * will have guest psize 8K while host psize 4K).
2184 pss
->host_page_start
= pss
->page
;
2185 pss
->host_page_end
= pss
->page
+ 1;
2188 * The host page spans over multiple guest pages, we send them
2189 * within the same host page iteration.
2191 pss
->host_page_start
= ROUND_DOWN(pss
->page
, guest_pfns
);
2192 pss
->host_page_end
= ROUND_UP(pss
->page
+ 1, guest_pfns
);
2197 * Whether the page pointed by PSS is within the host page being sent.
2198 * Must be called after a previous pss_host_page_prepare().
2200 static bool pss_within_range(PageSearchStatus
*pss
)
2202 ram_addr_t ram_addr
;
2204 assert(pss
->host_page_sending
);
2206 /* Over host-page boundary? */
2207 if (pss
->page
>= pss
->host_page_end
) {
2211 ram_addr
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
2213 return offset_in_ramblock(pss
->block
, ram_addr
);
2216 static void pss_host_page_finish(PageSearchStatus
*pss
)
2218 pss
->host_page_sending
= false;
2219 /* This is not needed, but just to reset it */
2220 pss
->host_page_start
= pss
->host_page_end
= 0;
2224 * Send an urgent host page specified by `pss'. Need to be called with
2225 * bitmap_mutex held.
2227 * Returns 0 if save host page succeeded, false otherwise.
2229 static int ram_save_host_page_urgent(PageSearchStatus
*pss
)
2231 bool page_dirty
, sent
= false;
2232 RAMState
*rs
= ram_state
;
2235 trace_postcopy_preempt_send_host_page(pss
->block
->idstr
, pss
->page
);
2236 pss_host_page_prepare(pss
);
2239 * If precopy is sending the same page, let it be done in precopy, or
2240 * we could send the same page in two channels and none of them will
2241 * receive the whole page.
2243 if (pss_overlap(pss
, &ram_state
->pss
[RAM_CHANNEL_PRECOPY
])) {
2244 trace_postcopy_preempt_hit(pss
->block
->idstr
,
2245 pss
->page
<< TARGET_PAGE_BITS
);
2250 page_dirty
= migration_bitmap_clear_dirty(rs
, pss
->block
, pss
->page
);
2253 /* Be strict to return code; it must be 1, or what else? */
2254 if (migration_ops
->ram_save_target_page(rs
, pss
) != 1) {
2255 error_report_once("%s: ram_save_target_page failed", __func__
);
2261 pss_find_next_dirty(pss
);
2262 } while (pss_within_range(pss
));
2264 pss_host_page_finish(pss
);
2265 /* For urgent requests, flush immediately if sent */
2267 qemu_fflush(pss
->pss_channel
);
2273 * ram_save_host_page: save a whole host page
2275 * Starting at *offset send pages up to the end of the current host
2276 * page. It's valid for the initial offset to point into the middle of
2277 * a host page in which case the remainder of the hostpage is sent.
2278 * Only dirty target pages are sent. Note that the host page size may
2279 * be a huge page for this block.
2281 * The saving stops at the boundary of the used_length of the block
2282 * if the RAMBlock isn't a multiple of the host page size.
2284 * The caller must be with ram_state.bitmap_mutex held to call this
2285 * function. Note that this function can temporarily release the lock, but
2286 * when the function is returned it'll make sure the lock is still held.
2288 * Returns the number of pages written or negative on error
2290 * @rs: current RAM state
2291 * @pss: data about the page we want to send
2293 static int ram_save_host_page(RAMState
*rs
, PageSearchStatus
*pss
)
2295 bool page_dirty
, preempt_active
= postcopy_preempt_active();
2296 int tmppages
, pages
= 0;
2297 size_t pagesize_bits
=
2298 qemu_ram_pagesize(pss
->block
) >> TARGET_PAGE_BITS
;
2299 unsigned long start_page
= pss
->page
;
2302 if (migrate_ram_is_ignored(pss
->block
)) {
2303 error_report("block %s should not be migrated !", pss
->block
->idstr
);
2307 /* Update host page boundary information */
2308 pss_host_page_prepare(pss
);
2311 page_dirty
= migration_bitmap_clear_dirty(rs
, pss
->block
, pss
->page
);
2313 /* Check the pages is dirty and if it is send it */
2316 * Properly yield the lock only in postcopy preempt mode
2317 * because both migration thread and rp-return thread can
2318 * operate on the bitmaps.
2320 if (preempt_active
) {
2321 qemu_mutex_unlock(&rs
->bitmap_mutex
);
2323 tmppages
= migration_ops
->ram_save_target_page(rs
, pss
);
2324 if (tmppages
>= 0) {
2327 * Allow rate limiting to happen in the middle of huge pages if
2328 * something is sent in the current iteration.
2330 if (pagesize_bits
> 1 && tmppages
> 0) {
2331 migration_rate_limit();
2334 if (preempt_active
) {
2335 qemu_mutex_lock(&rs
->bitmap_mutex
);
2342 pss_host_page_finish(pss
);
2346 pss_find_next_dirty(pss
);
2347 } while (pss_within_range(pss
));
2349 pss_host_page_finish(pss
);
2351 res
= ram_save_release_protection(rs
, pss
, start_page
);
2352 return (res
< 0 ? res
: pages
);
2356 * ram_find_and_save_block: finds a dirty page and sends it to f
2358 * Called within an RCU critical section.
2360 * Returns the number of pages written where zero means no dirty pages,
2361 * or negative on error
2363 * @rs: current RAM state
2365 * On systems where host-page-size > target-page-size it will send all the
2366 * pages in a host page that are dirty.
2368 static int ram_find_and_save_block(RAMState
*rs
)
2370 PageSearchStatus
*pss
= &rs
->pss
[RAM_CHANNEL_PRECOPY
];
2373 /* No dirty page as there is zero RAM */
2374 if (!rs
->ram_bytes_total
) {
2379 * Always keep last_seen_block/last_page valid during this procedure,
2380 * because find_dirty_block() relies on these values (e.g., we compare
2381 * last_seen_block with pss.block to see whether we searched all the
2382 * ramblocks) to detect the completion of migration. Having NULL value
2383 * of last_seen_block can conditionally cause below loop to run forever.
2385 if (!rs
->last_seen_block
) {
2386 rs
->last_seen_block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
2390 pss_init(pss
, rs
->last_seen_block
, rs
->last_page
);
2393 if (!get_queued_page(rs
, pss
)) {
2394 /* priority queue empty, so just search for something dirty */
2395 int res
= find_dirty_block(rs
, pss
);
2396 if (res
!= PAGE_DIRTY_FOUND
) {
2397 if (res
== PAGE_ALL_CLEAN
) {
2399 } else if (res
== PAGE_TRY_AGAIN
) {
2401 } else if (res
< 0) {
2407 pages
= ram_save_host_page(rs
, pss
);
2413 rs
->last_seen_block
= pss
->block
;
2414 rs
->last_page
= pss
->page
;
2419 static uint64_t ram_bytes_total_with_ignored(void)
2424 RCU_READ_LOCK_GUARD();
2426 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2427 total
+= block
->used_length
;
2432 uint64_t ram_bytes_total(void)
2437 RCU_READ_LOCK_GUARD();
2439 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2440 total
+= block
->used_length
;
2445 static void xbzrle_load_setup(void)
2447 XBZRLE
.decoded_buf
= g_malloc(TARGET_PAGE_SIZE
);
2450 static void xbzrle_load_cleanup(void)
2452 g_free(XBZRLE
.decoded_buf
);
2453 XBZRLE
.decoded_buf
= NULL
;
2456 static void ram_state_cleanup(RAMState
**rsp
)
2459 migration_page_queue_free(*rsp
);
2460 qemu_mutex_destroy(&(*rsp
)->bitmap_mutex
);
2461 qemu_mutex_destroy(&(*rsp
)->src_page_req_mutex
);
2467 static void xbzrle_cleanup(void)
2469 XBZRLE_cache_lock();
2471 cache_fini(XBZRLE
.cache
);
2472 g_free(XBZRLE
.encoded_buf
);
2473 g_free(XBZRLE
.current_buf
);
2474 g_free(XBZRLE
.zero_target_page
);
2475 XBZRLE
.cache
= NULL
;
2476 XBZRLE
.encoded_buf
= NULL
;
2477 XBZRLE
.current_buf
= NULL
;
2478 XBZRLE
.zero_target_page
= NULL
;
2480 XBZRLE_cache_unlock();
2483 static void ram_save_cleanup(void *opaque
)
2485 RAMState
**rsp
= opaque
;
2488 /* We don't use dirty log with background snapshots */
2489 if (!migrate_background_snapshot()) {
2490 /* caller have hold iothread lock or is in a bh, so there is
2491 * no writing race against the migration bitmap
2493 if (global_dirty_tracking
& GLOBAL_DIRTY_MIGRATION
) {
2495 * do not stop dirty log without starting it, since
2496 * memory_global_dirty_log_stop will assert that
2497 * memory_global_dirty_log_start/stop used in pairs
2499 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION
);
2503 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2504 g_free(block
->clear_bmap
);
2505 block
->clear_bmap
= NULL
;
2506 g_free(block
->bmap
);
2511 compress_threads_save_cleanup();
2512 ram_state_cleanup(rsp
);
2513 g_free(migration_ops
);
2514 migration_ops
= NULL
;
2517 static void ram_state_reset(RAMState
*rs
)
2521 for (i
= 0; i
< RAM_CHANNEL_MAX
; i
++) {
2522 rs
->pss
[i
].last_sent_block
= NULL
;
2525 rs
->last_seen_block
= NULL
;
2527 rs
->last_version
= ram_list
.version
;
2528 rs
->xbzrle_started
= false;
2531 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2533 /* **** functions for postcopy ***** */
2535 void ram_postcopy_migrated_memory_release(MigrationState
*ms
)
2537 struct RAMBlock
*block
;
2539 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2540 unsigned long *bitmap
= block
->bmap
;
2541 unsigned long range
= block
->used_length
>> TARGET_PAGE_BITS
;
2542 unsigned long run_start
= find_next_zero_bit(bitmap
, range
, 0);
2544 while (run_start
< range
) {
2545 unsigned long run_end
= find_next_bit(bitmap
, range
, run_start
+ 1);
2546 ram_discard_range(block
->idstr
,
2547 ((ram_addr_t
)run_start
) << TARGET_PAGE_BITS
,
2548 ((ram_addr_t
)(run_end
- run_start
))
2549 << TARGET_PAGE_BITS
);
2550 run_start
= find_next_zero_bit(bitmap
, range
, run_end
+ 1);
2556 * postcopy_send_discard_bm_ram: discard a RAMBlock
2558 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2560 * @ms: current migration state
2561 * @block: RAMBlock to discard
2563 static void postcopy_send_discard_bm_ram(MigrationState
*ms
, RAMBlock
*block
)
2565 unsigned long end
= block
->used_length
>> TARGET_PAGE_BITS
;
2566 unsigned long current
;
2567 unsigned long *bitmap
= block
->bmap
;
2569 for (current
= 0; current
< end
; ) {
2570 unsigned long one
= find_next_bit(bitmap
, end
, current
);
2571 unsigned long zero
, discard_length
;
2577 zero
= find_next_zero_bit(bitmap
, end
, one
+ 1);
2580 discard_length
= end
- one
;
2582 discard_length
= zero
- one
;
2584 postcopy_discard_send_range(ms
, one
, discard_length
);
2585 current
= one
+ discard_length
;
2589 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, RAMBlock
*block
);
2592 * postcopy_each_ram_send_discard: discard all RAMBlocks
2594 * Utility for the outgoing postcopy code.
2595 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2596 * passing it bitmap indexes and name.
2597 * (qemu_ram_foreach_block ends up passing unscaled lengths
2598 * which would mean postcopy code would have to deal with target page)
2600 * @ms: current migration state
2602 static void postcopy_each_ram_send_discard(MigrationState
*ms
)
2604 struct RAMBlock
*block
;
2606 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2607 postcopy_discard_send_init(ms
, block
->idstr
);
2610 * Deal with TPS != HPS and huge pages. It discard any partially sent
2611 * host-page size chunks, mark any partially dirty host-page size
2612 * chunks as all dirty. In this case the host-page is the host-page
2613 * for the particular RAMBlock, i.e. it might be a huge page.
2615 postcopy_chunk_hostpages_pass(ms
, block
);
2618 * Postcopy sends chunks of bitmap over the wire, but it
2619 * just needs indexes at this point, avoids it having
2620 * target page specific code.
2622 postcopy_send_discard_bm_ram(ms
, block
);
2623 postcopy_discard_send_finish(ms
);
2628 * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages
2630 * Helper for postcopy_chunk_hostpages; it's called twice to
2631 * canonicalize the two bitmaps, that are similar, but one is
2634 * Postcopy requires that all target pages in a hostpage are dirty or
2635 * clean, not a mix. This function canonicalizes the bitmaps.
2637 * @ms: current migration state
2638 * @block: block that contains the page we want to canonicalize
2640 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, RAMBlock
*block
)
2642 RAMState
*rs
= ram_state
;
2643 unsigned long *bitmap
= block
->bmap
;
2644 unsigned int host_ratio
= block
->page_size
/ TARGET_PAGE_SIZE
;
2645 unsigned long pages
= block
->used_length
>> TARGET_PAGE_BITS
;
2646 unsigned long run_start
;
2648 if (block
->page_size
== TARGET_PAGE_SIZE
) {
2649 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2653 /* Find a dirty page */
2654 run_start
= find_next_bit(bitmap
, pages
, 0);
2656 while (run_start
< pages
) {
2659 * If the start of this run of pages is in the middle of a host
2660 * page, then we need to fixup this host page.
2662 if (QEMU_IS_ALIGNED(run_start
, host_ratio
)) {
2663 /* Find the end of this run */
2664 run_start
= find_next_zero_bit(bitmap
, pages
, run_start
+ 1);
2666 * If the end isn't at the start of a host page, then the
2667 * run doesn't finish at the end of a host page
2668 * and we need to discard.
2672 if (!QEMU_IS_ALIGNED(run_start
, host_ratio
)) {
2674 unsigned long fixup_start_addr
= QEMU_ALIGN_DOWN(run_start
,
2676 run_start
= QEMU_ALIGN_UP(run_start
, host_ratio
);
2678 /* Clean up the bitmap */
2679 for (page
= fixup_start_addr
;
2680 page
< fixup_start_addr
+ host_ratio
; page
++) {
2682 * Remark them as dirty, updating the count for any pages
2683 * that weren't previously dirty.
2685 rs
->migration_dirty_pages
+= !test_and_set_bit(page
, bitmap
);
2689 /* Find the next dirty page for the next iteration */
2690 run_start
= find_next_bit(bitmap
, pages
, run_start
);
2695 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2697 * Transmit the set of pages to be discarded after precopy to the target
2698 * these are pages that:
2699 * a) Have been previously transmitted but are now dirty again
2700 * b) Pages that have never been transmitted, this ensures that
2701 * any pages on the destination that have been mapped by background
2702 * tasks get discarded (transparent huge pages is the specific concern)
2703 * Hopefully this is pretty sparse
2705 * @ms: current migration state
2707 void ram_postcopy_send_discard_bitmap(MigrationState
*ms
)
2709 RAMState
*rs
= ram_state
;
2711 RCU_READ_LOCK_GUARD();
2713 /* This should be our last sync, the src is now paused */
2714 migration_bitmap_sync(rs
, false);
2716 /* Easiest way to make sure we don't resume in the middle of a host-page */
2717 rs
->pss
[RAM_CHANNEL_PRECOPY
].last_sent_block
= NULL
;
2718 rs
->last_seen_block
= NULL
;
2721 postcopy_each_ram_send_discard(ms
);
2723 trace_ram_postcopy_send_discard_bitmap();
2727 * ram_discard_range: discard dirtied pages at the beginning of postcopy
2729 * Returns zero on success
2731 * @rbname: name of the RAMBlock of the request. NULL means the
2732 * same that last one.
2733 * @start: RAMBlock starting page
2734 * @length: RAMBlock size
2736 int ram_discard_range(const char *rbname
, uint64_t start
, size_t length
)
2738 trace_ram_discard_range(rbname
, start
, length
);
2740 RCU_READ_LOCK_GUARD();
2741 RAMBlock
*rb
= qemu_ram_block_by_name(rbname
);
2744 error_report("ram_discard_range: Failed to find block '%s'", rbname
);
2749 * On source VM, we don't need to update the received bitmap since
2750 * we don't even have one.
2752 if (rb
->receivedmap
) {
2753 bitmap_clear(rb
->receivedmap
, start
>> qemu_target_page_bits(),
2754 length
>> qemu_target_page_bits());
2757 return ram_block_discard_range(rb
, start
, length
);
2761 * For every allocation, we will try not to crash the VM if the
2762 * allocation failed.
2764 static int xbzrle_init(void)
2766 Error
*local_err
= NULL
;
2768 if (!migrate_xbzrle()) {
2772 XBZRLE_cache_lock();
2774 XBZRLE
.zero_target_page
= g_try_malloc0(TARGET_PAGE_SIZE
);
2775 if (!XBZRLE
.zero_target_page
) {
2776 error_report("%s: Error allocating zero page", __func__
);
2780 XBZRLE
.cache
= cache_init(migrate_xbzrle_cache_size(),
2781 TARGET_PAGE_SIZE
, &local_err
);
2782 if (!XBZRLE
.cache
) {
2783 error_report_err(local_err
);
2784 goto free_zero_page
;
2787 XBZRLE
.encoded_buf
= g_try_malloc0(TARGET_PAGE_SIZE
);
2788 if (!XBZRLE
.encoded_buf
) {
2789 error_report("%s: Error allocating encoded_buf", __func__
);
2793 XBZRLE
.current_buf
= g_try_malloc(TARGET_PAGE_SIZE
);
2794 if (!XBZRLE
.current_buf
) {
2795 error_report("%s: Error allocating current_buf", __func__
);
2796 goto free_encoded_buf
;
2799 /* We are all good */
2800 XBZRLE_cache_unlock();
2804 g_free(XBZRLE
.encoded_buf
);
2805 XBZRLE
.encoded_buf
= NULL
;
2807 cache_fini(XBZRLE
.cache
);
2808 XBZRLE
.cache
= NULL
;
2810 g_free(XBZRLE
.zero_target_page
);
2811 XBZRLE
.zero_target_page
= NULL
;
2813 XBZRLE_cache_unlock();
2817 static int ram_state_init(RAMState
**rsp
)
2819 *rsp
= g_try_new0(RAMState
, 1);
2822 error_report("%s: Init ramstate fail", __func__
);
2826 qemu_mutex_init(&(*rsp
)->bitmap_mutex
);
2827 qemu_mutex_init(&(*rsp
)->src_page_req_mutex
);
2828 QSIMPLEQ_INIT(&(*rsp
)->src_page_requests
);
2829 (*rsp
)->ram_bytes_total
= ram_bytes_total();
2832 * Count the total number of pages used by ram blocks not including any
2833 * gaps due to alignment or unplugs.
2834 * This must match with the initial values of dirty bitmap.
2836 (*rsp
)->migration_dirty_pages
= (*rsp
)->ram_bytes_total
>> TARGET_PAGE_BITS
;
2837 ram_state_reset(*rsp
);
2842 static void ram_list_init_bitmaps(void)
2844 MigrationState
*ms
= migrate_get_current();
2846 unsigned long pages
;
2849 /* Skip setting bitmap if there is no RAM */
2850 if (ram_bytes_total()) {
2851 shift
= ms
->clear_bitmap_shift
;
2852 if (shift
> CLEAR_BITMAP_SHIFT_MAX
) {
2853 error_report("clear_bitmap_shift (%u) too big, using "
2854 "max value (%u)", shift
, CLEAR_BITMAP_SHIFT_MAX
);
2855 shift
= CLEAR_BITMAP_SHIFT_MAX
;
2856 } else if (shift
< CLEAR_BITMAP_SHIFT_MIN
) {
2857 error_report("clear_bitmap_shift (%u) too small, using "
2858 "min value (%u)", shift
, CLEAR_BITMAP_SHIFT_MIN
);
2859 shift
= CLEAR_BITMAP_SHIFT_MIN
;
2862 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2863 pages
= block
->max_length
>> TARGET_PAGE_BITS
;
2865 * The initial dirty bitmap for migration must be set with all
2866 * ones to make sure we'll migrate every guest RAM page to
2868 * Here we set RAMBlock.bmap all to 1 because when rebegin a
2869 * new migration after a failed migration, ram_list.
2870 * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
2873 block
->bmap
= bitmap_new(pages
);
2874 bitmap_set(block
->bmap
, 0, pages
);
2875 block
->clear_bmap_shift
= shift
;
2876 block
->clear_bmap
= bitmap_new(clear_bmap_size(pages
, shift
));
2881 static void migration_bitmap_clear_discarded_pages(RAMState
*rs
)
2883 unsigned long pages
;
2886 RCU_READ_LOCK_GUARD();
2888 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
2889 pages
= ramblock_dirty_bitmap_clear_discarded_pages(rb
);
2890 rs
->migration_dirty_pages
-= pages
;
2894 static void ram_init_bitmaps(RAMState
*rs
)
2896 qemu_mutex_lock_ramlist();
2898 WITH_RCU_READ_LOCK_GUARD() {
2899 ram_list_init_bitmaps();
2900 /* We don't use dirty log with background snapshots */
2901 if (!migrate_background_snapshot()) {
2902 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION
);
2903 migration_bitmap_sync_precopy(rs
, false);
2906 qemu_mutex_unlock_ramlist();
2909 * After an eventual first bitmap sync, fixup the initial bitmap
2910 * containing all 1s to exclude any discarded pages from migration.
2912 migration_bitmap_clear_discarded_pages(rs
);
2915 static int ram_init_all(RAMState
**rsp
)
2917 if (ram_state_init(rsp
)) {
2921 if (xbzrle_init()) {
2922 ram_state_cleanup(rsp
);
2926 ram_init_bitmaps(*rsp
);
2931 static void ram_state_resume_prepare(RAMState
*rs
, QEMUFile
*out
)
2937 * Postcopy is not using xbzrle/compression, so no need for that.
2938 * Also, since source are already halted, we don't need to care
2939 * about dirty page logging as well.
2942 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2943 pages
+= bitmap_count_one(block
->bmap
,
2944 block
->used_length
>> TARGET_PAGE_BITS
);
2947 /* This may not be aligned with current bitmaps. Recalculate. */
2948 rs
->migration_dirty_pages
= pages
;
2950 ram_state_reset(rs
);
2952 /* Update RAMState cache of output QEMUFile */
2953 rs
->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
= out
;
2955 trace_ram_state_resume_prepare(pages
);
2959 * This function clears bits of the free pages reported by the caller from the
2960 * migration dirty bitmap. @addr is the host address corresponding to the
2961 * start of the continuous guest free pages, and @len is the total bytes of
2964 void qemu_guest_free_page_hint(void *addr
, size_t len
)
2968 size_t used_len
, start
, npages
;
2969 MigrationState
*s
= migrate_get_current();
2971 /* This function is currently expected to be used during live migration */
2972 if (!migration_is_setup_or_active(s
->state
)) {
2976 for (; len
> 0; len
-= used_len
, addr
+= used_len
) {
2977 block
= qemu_ram_block_from_host(addr
, false, &offset
);
2978 if (unlikely(!block
|| offset
>= block
->used_length
)) {
2980 * The implementation might not support RAMBlock resize during
2981 * live migration, but it could happen in theory with future
2982 * updates. So we add a check here to capture that case.
2984 error_report_once("%s unexpected error", __func__
);
2988 if (len
<= block
->used_length
- offset
) {
2991 used_len
= block
->used_length
- offset
;
2994 start
= offset
>> TARGET_PAGE_BITS
;
2995 npages
= used_len
>> TARGET_PAGE_BITS
;
2997 qemu_mutex_lock(&ram_state
->bitmap_mutex
);
2999 * The skipped free pages are equavalent to be sent from clear_bmap's
3000 * perspective, so clear the bits from the memory region bitmap which
3001 * are initially set. Otherwise those skipped pages will be sent in
3002 * the next round after syncing from the memory region bitmap.
3004 migration_clear_memory_region_dirty_bitmap_range(block
, start
, npages
);
3005 ram_state
->migration_dirty_pages
-=
3006 bitmap_count_one_with_offset(block
->bmap
, start
, npages
);
3007 bitmap_clear(block
->bmap
, start
, npages
);
3008 qemu_mutex_unlock(&ram_state
->bitmap_mutex
);
3013 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
3014 * long-running RCU critical section. When rcu-reclaims in the code
3015 * start to become numerous it will be necessary to reduce the
3016 * granularity of these critical sections.
3020 * ram_save_setup: Setup RAM for migration
3022 * Returns zero to indicate success and negative for error
3024 * @f: QEMUFile where to send the data
3025 * @opaque: RAMState pointer
3027 static int ram_save_setup(QEMUFile
*f
, void *opaque
)
3029 RAMState
**rsp
= opaque
;
3033 if (compress_threads_save_setup()) {
3037 /* migration has already setup the bitmap, reuse it. */
3038 if (!migration_in_colo_state()) {
3039 if (ram_init_all(rsp
) != 0) {
3040 compress_threads_save_cleanup();
3044 (*rsp
)->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
= f
;
3046 WITH_RCU_READ_LOCK_GUARD() {
3047 qemu_put_be64(f
, ram_bytes_total_with_ignored()
3048 | RAM_SAVE_FLAG_MEM_SIZE
);
3050 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3051 qemu_put_byte(f
, strlen(block
->idstr
));
3052 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, strlen(block
->idstr
));
3053 qemu_put_be64(f
, block
->used_length
);
3054 if (migrate_postcopy_ram() && block
->page_size
!=
3055 qemu_host_page_size
) {
3056 qemu_put_be64(f
, block
->page_size
);
3058 if (migrate_ignore_shared()) {
3059 qemu_put_be64(f
, block
->mr
->addr
);
3064 ret
= rdma_registration_start(f
, RAM_CONTROL_SETUP
);
3066 qemu_file_set_error(f
, ret
);
3069 ret
= rdma_registration_stop(f
, RAM_CONTROL_SETUP
);
3071 qemu_file_set_error(f
, ret
);
3074 migration_ops
= g_malloc0(sizeof(MigrationOps
));
3075 migration_ops
->ram_save_target_page
= ram_save_target_page_legacy
;
3077 qemu_mutex_unlock_iothread();
3078 ret
= multifd_send_sync_main(f
);
3079 qemu_mutex_lock_iothread();
3084 if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
3085 qemu_put_be64(f
, RAM_SAVE_FLAG_MULTIFD_FLUSH
);
3088 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3095 * ram_save_iterate: iterative stage for migration
3097 * Returns zero to indicate success and negative for error
3099 * @f: QEMUFile where to send the data
3100 * @opaque: RAMState pointer
3102 static int ram_save_iterate(QEMUFile
*f
, void *opaque
)
3104 RAMState
**temp
= opaque
;
3105 RAMState
*rs
= *temp
;
3111 if (blk_mig_bulk_active()) {
3112 /* Avoid transferring ram during bulk phase of block migration as
3113 * the bulk phase will usually take a long time and transferring
3114 * ram updates during that time is pointless. */
3119 * We'll take this lock a little bit long, but it's okay for two reasons.
3120 * Firstly, the only possible other thread to take it is who calls
3121 * qemu_guest_free_page_hint(), which should be rare; secondly, see
3122 * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which
3123 * guarantees that we'll at least released it in a regular basis.
3125 qemu_mutex_lock(&rs
->bitmap_mutex
);
3126 WITH_RCU_READ_LOCK_GUARD() {
3127 if (ram_list
.version
!= rs
->last_version
) {
3128 ram_state_reset(rs
);
3131 /* Read version before ram_list.blocks */
3134 ret
= rdma_registration_start(f
, RAM_CONTROL_ROUND
);
3136 qemu_file_set_error(f
, ret
);
3139 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
3141 while ((ret
= migration_rate_exceeded(f
)) == 0 ||
3142 postcopy_has_request(rs
)) {
3145 if (qemu_file_get_error(f
)) {
3149 pages
= ram_find_and_save_block(rs
);
3150 /* no more pages to sent */
3157 qemu_file_set_error(f
, pages
);
3161 rs
->target_page_count
+= pages
;
3164 * During postcopy, it is necessary to make sure one whole host
3165 * page is sent in one chunk.
3167 if (migrate_postcopy_ram()) {
3168 ram_flush_compressed_data(rs
);
3172 * we want to check in the 1st loop, just in case it was the 1st
3173 * time and we had to sync the dirty bitmap.
3174 * qemu_clock_get_ns() is a bit expensive, so we only check each
3177 if ((i
& 63) == 0) {
3178 uint64_t t1
= (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - t0
) /
3180 if (t1
> MAX_WAIT
) {
3181 trace_ram_save_iterate_big_wait(t1
, i
);
3188 qemu_mutex_unlock(&rs
->bitmap_mutex
);
3191 * Must occur before EOS (or any QEMUFile operation)
3192 * because of RDMA protocol.
3194 ret
= rdma_registration_stop(f
, RAM_CONTROL_ROUND
);
3196 qemu_file_set_error(f
, ret
);
3201 && migration_is_setup_or_active(migrate_get_current()->state
)) {
3202 if (migrate_multifd() && migrate_multifd_flush_after_each_section()) {
3203 ret
= multifd_send_sync_main(rs
->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
);
3209 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3211 ram_transferred_add(8);
3213 ret
= qemu_file_get_error(f
);
3223 * ram_save_complete: function called to send the remaining amount of ram
3225 * Returns zero to indicate success or negative on error
3227 * Called with iothread lock
3229 * @f: QEMUFile where to send the data
3230 * @opaque: RAMState pointer
3232 static int ram_save_complete(QEMUFile
*f
, void *opaque
)
3234 RAMState
**temp
= opaque
;
3235 RAMState
*rs
= *temp
;
3238 rs
->last_stage
= !migration_in_colo_state();
3240 WITH_RCU_READ_LOCK_GUARD() {
3241 if (!migration_in_postcopy()) {
3242 migration_bitmap_sync_precopy(rs
, true);
3245 ret
= rdma_registration_start(f
, RAM_CONTROL_FINISH
);
3247 qemu_file_set_error(f
, ret
);
3250 /* try transferring iterative blocks of memory */
3252 /* flush all remaining blocks regardless of rate limiting */
3253 qemu_mutex_lock(&rs
->bitmap_mutex
);
3257 pages
= ram_find_and_save_block(rs
);
3258 /* no more blocks to sent */
3267 qemu_mutex_unlock(&rs
->bitmap_mutex
);
3269 ram_flush_compressed_data(rs
);
3271 int ret
= rdma_registration_stop(f
, RAM_CONTROL_FINISH
);
3273 qemu_file_set_error(f
, ret
);
3281 ret
= multifd_send_sync_main(rs
->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
);
3286 if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
3287 qemu_put_be64(f
, RAM_SAVE_FLAG_MULTIFD_FLUSH
);
3289 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3295 static void ram_state_pending_estimate(void *opaque
, uint64_t *must_precopy
,
3296 uint64_t *can_postcopy
)
3298 RAMState
**temp
= opaque
;
3299 RAMState
*rs
= *temp
;
3301 uint64_t remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3303 if (migrate_postcopy_ram()) {
3304 /* We can do postcopy, and all the data is postcopiable */
3305 *can_postcopy
+= remaining_size
;
3307 *must_precopy
+= remaining_size
;
3311 static void ram_state_pending_exact(void *opaque
, uint64_t *must_precopy
,
3312 uint64_t *can_postcopy
)
3314 MigrationState
*s
= migrate_get_current();
3315 RAMState
**temp
= opaque
;
3316 RAMState
*rs
= *temp
;
3318 uint64_t remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3320 if (!migration_in_postcopy() && remaining_size
< s
->threshold_size
) {
3321 qemu_mutex_lock_iothread();
3322 WITH_RCU_READ_LOCK_GUARD() {
3323 migration_bitmap_sync_precopy(rs
, false);
3325 qemu_mutex_unlock_iothread();
3326 remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3329 if (migrate_postcopy_ram()) {
3330 /* We can do postcopy, and all the data is postcopiable */
3331 *can_postcopy
+= remaining_size
;
3333 *must_precopy
+= remaining_size
;
3337 static int load_xbzrle(QEMUFile
*f
, ram_addr_t addr
, void *host
)
3339 unsigned int xh_len
;
3341 uint8_t *loaded_data
;
3343 /* extract RLE header */
3344 xh_flags
= qemu_get_byte(f
);
3345 xh_len
= qemu_get_be16(f
);
3347 if (xh_flags
!= ENCODING_FLAG_XBZRLE
) {
3348 error_report("Failed to load XBZRLE page - wrong compression!");
3352 if (xh_len
> TARGET_PAGE_SIZE
) {
3353 error_report("Failed to load XBZRLE page - len overflow!");
3356 loaded_data
= XBZRLE
.decoded_buf
;
3357 /* load data and decode */
3358 /* it can change loaded_data to point to an internal buffer */
3359 qemu_get_buffer_in_place(f
, &loaded_data
, xh_len
);
3362 if (xbzrle_decode_buffer(loaded_data
, xh_len
, host
,
3363 TARGET_PAGE_SIZE
) == -1) {
3364 error_report("Failed to load XBZRLE page - decode error!");
3372 * ram_block_from_stream: read a RAMBlock id from the migration stream
3374 * Must be called from within a rcu critical section.
3376 * Returns a pointer from within the RCU-protected ram_list.
3378 * @mis: the migration incoming state pointer
3379 * @f: QEMUFile where to read the data from
3380 * @flags: Page flags (mostly to see if it's a continuation of previous block)
3381 * @channel: the channel we're using
3383 static inline RAMBlock
*ram_block_from_stream(MigrationIncomingState
*mis
,
3384 QEMUFile
*f
, int flags
,
3387 RAMBlock
*block
= mis
->last_recv_block
[channel
];
3391 if (flags
& RAM_SAVE_FLAG_CONTINUE
) {
3393 error_report("Ack, bad migration stream!");
3399 len
= qemu_get_byte(f
);
3400 qemu_get_buffer(f
, (uint8_t *)id
, len
);
3403 block
= qemu_ram_block_by_name(id
);
3405 error_report("Can't find block %s", id
);
3409 if (migrate_ram_is_ignored(block
)) {
3410 error_report("block %s should not be migrated !", id
);
3414 mis
->last_recv_block
[channel
] = block
;
3419 static inline void *host_from_ram_block_offset(RAMBlock
*block
,
3422 if (!offset_in_ramblock(block
, offset
)) {
3426 return block
->host
+ offset
;
3429 static void *host_page_from_ram_block_offset(RAMBlock
*block
,
3432 /* Note: Explicitly no check against offset_in_ramblock(). */
3433 return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block
->host
+ offset
),
3437 static ram_addr_t
host_page_offset_from_ram_block_offset(RAMBlock
*block
,
3440 return ((uintptr_t)block
->host
+ offset
) & (block
->page_size
- 1);
3443 void colo_record_bitmap(RAMBlock
*block
, ram_addr_t
*normal
, uint32_t pages
)
3445 qemu_mutex_lock(&ram_state
->bitmap_mutex
);
3446 for (int i
= 0; i
< pages
; i
++) {
3447 ram_addr_t offset
= normal
[i
];
3448 ram_state
->migration_dirty_pages
+= !test_and_set_bit(
3449 offset
>> TARGET_PAGE_BITS
,
3452 qemu_mutex_unlock(&ram_state
->bitmap_mutex
);
3455 static inline void *colo_cache_from_block_offset(RAMBlock
*block
,
3456 ram_addr_t offset
, bool record_bitmap
)
3458 if (!offset_in_ramblock(block
, offset
)) {
3461 if (!block
->colo_cache
) {
3462 error_report("%s: colo_cache is NULL in block :%s",
3463 __func__
, block
->idstr
);
3468 * During colo checkpoint, we need bitmap of these migrated pages.
3469 * It help us to decide which pages in ram cache should be flushed
3470 * into VM's RAM later.
3472 if (record_bitmap
) {
3473 colo_record_bitmap(block
, &offset
, 1);
3475 return block
->colo_cache
+ offset
;
3479 * ram_handle_compressed: handle the zero page case
3481 * If a page (or a whole RDMA chunk) has been
3482 * determined to be zero, then zap it.
3484 * @host: host address for the zero page
3485 * @ch: what the page is filled from. We only support zero
3486 * @size: size of the zero page
3488 void ram_handle_compressed(void *host
, uint8_t ch
, uint64_t size
)
3490 if (ch
!= 0 || !buffer_is_zero(host
, size
)) {
3491 memset(host
, ch
, size
);
3495 static void colo_init_ram_state(void)
3497 ram_state_init(&ram_state
);
3501 * colo cache: this is for secondary VM, we cache the whole
3502 * memory of the secondary VM, it is need to hold the global lock
3503 * to call this helper.
3505 int colo_init_ram_cache(void)
3509 WITH_RCU_READ_LOCK_GUARD() {
3510 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3511 block
->colo_cache
= qemu_anon_ram_alloc(block
->used_length
,
3512 NULL
, false, false);
3513 if (!block
->colo_cache
) {
3514 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3515 "size 0x" RAM_ADDR_FMT
, __func__
, block
->idstr
,
3516 block
->used_length
);
3517 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3518 if (block
->colo_cache
) {
3519 qemu_anon_ram_free(block
->colo_cache
, block
->used_length
);
3520 block
->colo_cache
= NULL
;
3525 if (!machine_dump_guest_core(current_machine
)) {
3526 qemu_madvise(block
->colo_cache
, block
->used_length
,
3527 QEMU_MADV_DONTDUMP
);
3533 * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3534 * with to decide which page in cache should be flushed into SVM's RAM. Here
3535 * we use the same name 'ram_bitmap' as for migration.
3537 if (ram_bytes_total()) {
3538 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3539 unsigned long pages
= block
->max_length
>> TARGET_PAGE_BITS
;
3540 block
->bmap
= bitmap_new(pages
);
3544 colo_init_ram_state();
3548 /* TODO: duplicated with ram_init_bitmaps */
3549 void colo_incoming_start_dirty_log(void)
3551 RAMBlock
*block
= NULL
;
3552 /* For memory_global_dirty_log_start below. */
3553 qemu_mutex_lock_iothread();
3554 qemu_mutex_lock_ramlist();
3556 memory_global_dirty_log_sync(false);
3557 WITH_RCU_READ_LOCK_GUARD() {
3558 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3559 ramblock_sync_dirty_bitmap(ram_state
, block
);
3560 /* Discard this dirty bitmap record */
3561 bitmap_zero(block
->bmap
, block
->max_length
>> TARGET_PAGE_BITS
);
3563 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION
);
3565 ram_state
->migration_dirty_pages
= 0;
3566 qemu_mutex_unlock_ramlist();
3567 qemu_mutex_unlock_iothread();
3570 /* It is need to hold the global lock to call this helper */
3571 void colo_release_ram_cache(void)
3575 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION
);
3576 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3577 g_free(block
->bmap
);
3581 WITH_RCU_READ_LOCK_GUARD() {
3582 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3583 if (block
->colo_cache
) {
3584 qemu_anon_ram_free(block
->colo_cache
, block
->used_length
);
3585 block
->colo_cache
= NULL
;
3589 ram_state_cleanup(&ram_state
);
3593 * ram_load_setup: Setup RAM for migration incoming side
3595 * Returns zero to indicate success and negative for error
3597 * @f: QEMUFile where to receive the data
3598 * @opaque: RAMState pointer
3600 static int ram_load_setup(QEMUFile
*f
, void *opaque
)
3602 xbzrle_load_setup();
3603 ramblock_recv_map_init();
3608 static int ram_load_cleanup(void *opaque
)
3612 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
3613 qemu_ram_block_writeback(rb
);
3616 xbzrle_load_cleanup();
3618 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
3619 g_free(rb
->receivedmap
);
3620 rb
->receivedmap
= NULL
;
3627 * ram_postcopy_incoming_init: allocate postcopy data structures
3629 * Returns 0 for success and negative if there was one error
3631 * @mis: current migration incoming state
3633 * Allocate data structures etc needed by incoming migration with
3634 * postcopy-ram. postcopy-ram's similarly names
3635 * postcopy_ram_incoming_init does the work.
3637 int ram_postcopy_incoming_init(MigrationIncomingState
*mis
)
3639 return postcopy_ram_incoming_init(mis
);
3643 * ram_load_postcopy: load a page in postcopy case
3645 * Returns 0 for success or -errno in case of error
3647 * Called in postcopy mode by ram_load().
3648 * rcu_read_lock is taken prior to this being called.
3650 * @f: QEMUFile where to send the data
3651 * @channel: the channel to use for loading
3653 int ram_load_postcopy(QEMUFile
*f
, int channel
)
3655 int flags
= 0, ret
= 0;
3656 bool place_needed
= false;
3657 bool matches_target_page_size
= false;
3658 MigrationIncomingState
*mis
= migration_incoming_get_current();
3659 PostcopyTmpPage
*tmp_page
= &mis
->postcopy_tmp_pages
[channel
];
3661 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
3663 void *page_buffer
= NULL
;
3664 void *place_source
= NULL
;
3665 RAMBlock
*block
= NULL
;
3669 addr
= qemu_get_be64(f
);
3672 * If qemu file error, we should stop here, and then "addr"
3675 ret
= qemu_file_get_error(f
);
3680 flags
= addr
& ~TARGET_PAGE_MASK
;
3681 addr
&= TARGET_PAGE_MASK
;
3683 trace_ram_load_postcopy_loop(channel
, (uint64_t)addr
, flags
);
3684 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
|
3685 RAM_SAVE_FLAG_COMPRESS_PAGE
)) {
3686 block
= ram_block_from_stream(mis
, f
, flags
, channel
);
3693 * Relying on used_length is racy and can result in false positives.
3694 * We might place pages beyond used_length in case RAM was shrunk
3695 * while in postcopy, which is fine - trying to place via
3696 * UFFDIO_COPY/UFFDIO_ZEROPAGE will never segfault.
3698 if (!block
->host
|| addr
>= block
->postcopy_length
) {
3699 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
3703 tmp_page
->target_pages
++;
3704 matches_target_page_size
= block
->page_size
== TARGET_PAGE_SIZE
;
3706 * Postcopy requires that we place whole host pages atomically;
3707 * these may be huge pages for RAMBlocks that are backed by
3709 * To make it atomic, the data is read into a temporary page
3710 * that's moved into place later.
3711 * The migration protocol uses, possibly smaller, target-pages
3712 * however the source ensures it always sends all the components
3713 * of a host page in one chunk.
3715 page_buffer
= tmp_page
->tmp_huge_page
+
3716 host_page_offset_from_ram_block_offset(block
, addr
);
3717 /* If all TP are zero then we can optimise the place */
3718 if (tmp_page
->target_pages
== 1) {
3719 tmp_page
->host_addr
=
3720 host_page_from_ram_block_offset(block
, addr
);
3721 } else if (tmp_page
->host_addr
!=
3722 host_page_from_ram_block_offset(block
, addr
)) {
3723 /* not the 1st TP within the HP */
3724 error_report("Non-same host page detected on channel %d: "
3725 "Target host page %p, received host page %p "
3726 "(rb %s offset 0x"RAM_ADDR_FMT
" target_pages %d)",
3727 channel
, tmp_page
->host_addr
,
3728 host_page_from_ram_block_offset(block
, addr
),
3729 block
->idstr
, addr
, tmp_page
->target_pages
);
3735 * If it's the last part of a host page then we place the host
3738 if (tmp_page
->target_pages
==
3739 (block
->page_size
/ TARGET_PAGE_SIZE
)) {
3740 place_needed
= true;
3742 place_source
= tmp_page
->tmp_huge_page
;
3745 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
3746 case RAM_SAVE_FLAG_ZERO
:
3747 ch
= qemu_get_byte(f
);
3749 * Can skip to set page_buffer when
3750 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE).
3752 if (ch
|| !matches_target_page_size
) {
3753 memset(page_buffer
, ch
, TARGET_PAGE_SIZE
);
3756 tmp_page
->all_zero
= false;
3760 case RAM_SAVE_FLAG_PAGE
:
3761 tmp_page
->all_zero
= false;
3762 if (!matches_target_page_size
) {
3763 /* For huge pages, we always use temporary buffer */
3764 qemu_get_buffer(f
, page_buffer
, TARGET_PAGE_SIZE
);
3767 * For small pages that matches target page size, we
3768 * avoid the qemu_file copy. Instead we directly use
3769 * the buffer of QEMUFile to place the page. Note: we
3770 * cannot do any QEMUFile operation before using that
3771 * buffer to make sure the buffer is valid when
3774 qemu_get_buffer_in_place(f
, (uint8_t **)&place_source
,
3778 case RAM_SAVE_FLAG_COMPRESS_PAGE
:
3779 tmp_page
->all_zero
= false;
3780 len
= qemu_get_be32(f
);
3781 if (len
< 0 || len
> compressBound(TARGET_PAGE_SIZE
)) {
3782 error_report("Invalid compressed data length: %d", len
);
3786 decompress_data_with_multi_threads(f
, page_buffer
, len
);
3788 case RAM_SAVE_FLAG_MULTIFD_FLUSH
:
3789 multifd_recv_sync_main();
3791 case RAM_SAVE_FLAG_EOS
:
3793 if (migrate_multifd() &&
3794 migrate_multifd_flush_after_each_section()) {
3795 multifd_recv_sync_main();
3799 error_report("Unknown combination of migration flags: 0x%x"
3800 " (postcopy mode)", flags
);
3805 /* Got the whole host page, wait for decompress before placing. */
3807 ret
|= wait_for_decompress_done();
3810 /* Detect for any possible file errors */
3811 if (!ret
&& qemu_file_get_error(f
)) {
3812 ret
= qemu_file_get_error(f
);
3815 if (!ret
&& place_needed
) {
3816 if (tmp_page
->all_zero
) {
3817 ret
= postcopy_place_page_zero(mis
, tmp_page
->host_addr
, block
);
3819 ret
= postcopy_place_page(mis
, tmp_page
->host_addr
,
3820 place_source
, block
);
3822 place_needed
= false;
3823 postcopy_temp_page_reset(tmp_page
);
3830 static bool postcopy_is_running(void)
3832 PostcopyState ps
= postcopy_state_get();
3833 return ps
>= POSTCOPY_INCOMING_LISTENING
&& ps
< POSTCOPY_INCOMING_END
;
3837 * Flush content of RAM cache into SVM's memory.
3838 * Only flush the pages that be dirtied by PVM or SVM or both.
3840 void colo_flush_ram_cache(void)
3842 RAMBlock
*block
= NULL
;
3845 unsigned long offset
= 0;
3847 memory_global_dirty_log_sync(false);
3848 qemu_mutex_lock(&ram_state
->bitmap_mutex
);
3849 WITH_RCU_READ_LOCK_GUARD() {
3850 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3851 ramblock_sync_dirty_bitmap(ram_state
, block
);
3855 trace_colo_flush_ram_cache_begin(ram_state
->migration_dirty_pages
);
3856 WITH_RCU_READ_LOCK_GUARD() {
3857 block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
3860 unsigned long num
= 0;
3862 offset
= colo_bitmap_find_dirty(ram_state
, block
, offset
, &num
);
3863 if (!offset_in_ramblock(block
,
3864 ((ram_addr_t
)offset
) << TARGET_PAGE_BITS
)) {
3867 block
= QLIST_NEXT_RCU(block
, next
);
3869 unsigned long i
= 0;
3871 for (i
= 0; i
< num
; i
++) {
3872 migration_bitmap_clear_dirty(ram_state
, block
, offset
+ i
);
3874 dst_host
= block
->host
3875 + (((ram_addr_t
)offset
) << TARGET_PAGE_BITS
);
3876 src_host
= block
->colo_cache
3877 + (((ram_addr_t
)offset
) << TARGET_PAGE_BITS
);
3878 memcpy(dst_host
, src_host
, TARGET_PAGE_SIZE
* num
);
3883 qemu_mutex_unlock(&ram_state
->bitmap_mutex
);
3884 trace_colo_flush_ram_cache_end();
3887 static int parse_ramblock(QEMUFile
*f
, RAMBlock
*block
, ram_addr_t length
)
3890 /* ADVISE is earlier, it shows the source has the postcopy capability on */
3891 bool postcopy_advised
= migration_incoming_postcopy_advised();
3895 if (!qemu_ram_is_migratable(block
)) {
3896 error_report("block %s should not be migrated !", block
->idstr
);
3900 if (length
!= block
->used_length
) {
3901 Error
*local_err
= NULL
;
3903 ret
= qemu_ram_resize(block
, length
, &local_err
);
3905 error_report_err(local_err
);
3908 /* For postcopy we need to check hugepage sizes match */
3909 if (postcopy_advised
&& migrate_postcopy_ram() &&
3910 block
->page_size
!= qemu_host_page_size
) {
3911 uint64_t remote_page_size
= qemu_get_be64(f
);
3912 if (remote_page_size
!= block
->page_size
) {
3913 error_report("Mismatched RAM page size %s "
3914 "(local) %zd != %" PRId64
, block
->idstr
,
3915 block
->page_size
, remote_page_size
);
3919 if (migrate_ignore_shared()) {
3920 hwaddr addr
= qemu_get_be64(f
);
3921 if (migrate_ram_is_ignored(block
) &&
3922 block
->mr
->addr
!= addr
) {
3923 error_report("Mismatched GPAs for block %s "
3924 "%" PRId64
"!= %" PRId64
, block
->idstr
,
3925 (uint64_t)addr
, (uint64_t)block
->mr
->addr
);
3929 ret
= rdma_block_notification_handle(f
, block
->idstr
);
3931 qemu_file_set_error(f
, ret
);
3937 static int parse_ramblocks(QEMUFile
*f
, ram_addr_t total_ram_bytes
)
3941 /* Synchronize RAM block list */
3942 while (!ret
&& total_ram_bytes
) {
3946 int len
= qemu_get_byte(f
);
3948 qemu_get_buffer(f
, (uint8_t *)id
, len
);
3950 length
= qemu_get_be64(f
);
3952 block
= qemu_ram_block_by_name(id
);
3954 ret
= parse_ramblock(f
, block
, length
);
3956 error_report("Unknown ramblock \"%s\", cannot accept "
3960 total_ram_bytes
-= length
;
3967 * ram_load_precopy: load pages in precopy case
3969 * Returns 0 for success or -errno in case of error
3971 * Called in precopy mode by ram_load().
3972 * rcu_read_lock is taken prior to this being called.
3974 * @f: QEMUFile where to send the data
3976 static int ram_load_precopy(QEMUFile
*f
)
3978 MigrationIncomingState
*mis
= migration_incoming_get_current();
3979 int flags
= 0, ret
= 0, invalid_flags
= 0, len
= 0, i
= 0;
3981 if (!migrate_compress()) {
3982 invalid_flags
|= RAM_SAVE_FLAG_COMPRESS_PAGE
;
3985 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
3987 void *host
= NULL
, *host_bak
= NULL
;
3991 * Yield periodically to let main loop run, but an iteration of
3992 * the main loop is expensive, so do it each some iterations
3994 if ((i
& 32767) == 0 && qemu_in_coroutine()) {
3995 aio_co_schedule(qemu_get_current_aio_context(),
3996 qemu_coroutine_self());
3997 qemu_coroutine_yield();
4001 addr
= qemu_get_be64(f
);
4002 flags
= addr
& ~TARGET_PAGE_MASK
;
4003 addr
&= TARGET_PAGE_MASK
;
4005 if (flags
& invalid_flags
) {
4006 if (flags
& invalid_flags
& RAM_SAVE_FLAG_COMPRESS_PAGE
) {
4007 error_report("Received an unexpected compressed page");
4014 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
|
4015 RAM_SAVE_FLAG_COMPRESS_PAGE
| RAM_SAVE_FLAG_XBZRLE
)) {
4016 RAMBlock
*block
= ram_block_from_stream(mis
, f
, flags
,
4017 RAM_CHANNEL_PRECOPY
);
4019 host
= host_from_ram_block_offset(block
, addr
);
4021 * After going into COLO stage, we should not load the page
4022 * into SVM's memory directly, we put them into colo_cache firstly.
4023 * NOTE: We need to keep a copy of SVM's ram in colo_cache.
4024 * Previously, we copied all these memory in preparing stage of COLO
4025 * while we need to stop VM, which is a time-consuming process.
4026 * Here we optimize it by a trick, back-up every page while in
4027 * migration process while COLO is enabled, though it affects the
4028 * speed of the migration, but it obviously reduce the downtime of
4029 * back-up all SVM'S memory in COLO preparing stage.
4031 if (migration_incoming_colo_enabled()) {
4032 if (migration_incoming_in_colo_state()) {
4033 /* In COLO stage, put all pages into cache temporarily */
4034 host
= colo_cache_from_block_offset(block
, addr
, true);
4037 * In migration stage but before COLO stage,
4038 * Put all pages into both cache and SVM's memory.
4040 host_bak
= colo_cache_from_block_offset(block
, addr
, false);
4044 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
4048 if (!migration_incoming_in_colo_state()) {
4049 ramblock_recv_bitmap_set(block
, host
);
4052 trace_ram_load_loop(block
->idstr
, (uint64_t)addr
, flags
, host
);
4055 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
4056 case RAM_SAVE_FLAG_MEM_SIZE
:
4057 ret
= parse_ramblocks(f
, addr
);
4060 case RAM_SAVE_FLAG_ZERO
:
4061 ch
= qemu_get_byte(f
);
4062 ram_handle_compressed(host
, ch
, TARGET_PAGE_SIZE
);
4065 case RAM_SAVE_FLAG_PAGE
:
4066 qemu_get_buffer(f
, host
, TARGET_PAGE_SIZE
);
4069 case RAM_SAVE_FLAG_COMPRESS_PAGE
:
4070 len
= qemu_get_be32(f
);
4071 if (len
< 0 || len
> compressBound(TARGET_PAGE_SIZE
)) {
4072 error_report("Invalid compressed data length: %d", len
);
4076 decompress_data_with_multi_threads(f
, host
, len
);
4079 case RAM_SAVE_FLAG_XBZRLE
:
4080 if (load_xbzrle(f
, addr
, host
) < 0) {
4081 error_report("Failed to decompress XBZRLE page at "
4082 RAM_ADDR_FMT
, addr
);
4087 case RAM_SAVE_FLAG_MULTIFD_FLUSH
:
4088 multifd_recv_sync_main();
4090 case RAM_SAVE_FLAG_EOS
:
4092 if (migrate_multifd() &&
4093 migrate_multifd_flush_after_each_section()) {
4094 multifd_recv_sync_main();
4097 case RAM_SAVE_FLAG_HOOK
:
4098 ret
= rdma_registration_handle(f
);
4100 qemu_file_set_error(f
, ret
);
4104 error_report("Unknown combination of migration flags: 0x%x", flags
);
4108 ret
= qemu_file_get_error(f
);
4110 if (!ret
&& host_bak
) {
4111 memcpy(host_bak
, host
, TARGET_PAGE_SIZE
);
4115 ret
|= wait_for_decompress_done();
4119 static int ram_load(QEMUFile
*f
, void *opaque
, int version_id
)
4122 static uint64_t seq_iter
;
4124 * If system is running in postcopy mode, page inserts to host memory must
4127 bool postcopy_running
= postcopy_is_running();
4131 if (version_id
!= 4) {
4136 * This RCU critical section can be very long running.
4137 * When RCU reclaims in the code start to become numerous,
4138 * it will be necessary to reduce the granularity of this
4141 WITH_RCU_READ_LOCK_GUARD() {
4142 if (postcopy_running
) {
4144 * Note! Here RAM_CHANNEL_PRECOPY is the precopy channel of
4145 * postcopy migration, we have another RAM_CHANNEL_POSTCOPY to
4146 * service fast page faults.
4148 ret
= ram_load_postcopy(f
, RAM_CHANNEL_PRECOPY
);
4150 ret
= ram_load_precopy(f
);
4153 trace_ram_load_complete(ret
, seq_iter
);
4158 static bool ram_has_postcopy(void *opaque
)
4161 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
4162 if (ramblock_is_pmem(rb
)) {
4163 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4164 "is not supported now!", rb
->idstr
, rb
->host
);
4169 return migrate_postcopy_ram();
4172 /* Sync all the dirty bitmap with destination VM. */
4173 static int ram_dirty_bitmap_sync_all(MigrationState
*s
, RAMState
*rs
)
4176 QEMUFile
*file
= s
->to_dst_file
;
4178 trace_ram_dirty_bitmap_sync_start();
4180 qatomic_set(&rs
->postcopy_bmap_sync_requested
, 0);
4181 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
4182 qemu_savevm_send_recv_bitmap(file
, block
->idstr
);
4183 trace_ram_dirty_bitmap_request(block
->idstr
);
4184 qatomic_inc(&rs
->postcopy_bmap_sync_requested
);
4187 trace_ram_dirty_bitmap_sync_wait();
4189 /* Wait until all the ramblocks' dirty bitmap synced */
4190 while (qatomic_read(&rs
->postcopy_bmap_sync_requested
)) {
4191 migration_rp_wait(s
);
4194 trace_ram_dirty_bitmap_sync_complete();
4200 * Read the received bitmap, revert it as the initial dirty bitmap.
4201 * This is only used when the postcopy migration is paused but wants
4202 * to resume from a middle point.
4204 int ram_dirty_bitmap_reload(MigrationState
*s
, RAMBlock
*block
)
4207 /* from_dst_file is always valid because we're within rp_thread */
4208 QEMUFile
*file
= s
->rp_state
.from_dst_file
;
4209 g_autofree
unsigned long *le_bitmap
= NULL
;
4210 unsigned long nbits
= block
->used_length
>> TARGET_PAGE_BITS
;
4211 uint64_t local_size
= DIV_ROUND_UP(nbits
, 8);
4212 uint64_t size
, end_mark
;
4213 RAMState
*rs
= ram_state
;
4215 trace_ram_dirty_bitmap_reload_begin(block
->idstr
);
4217 if (s
->state
!= MIGRATION_STATUS_POSTCOPY_RECOVER
) {
4218 error_report("%s: incorrect state %s", __func__
,
4219 MigrationStatus_str(s
->state
));
4224 * Note: see comments in ramblock_recv_bitmap_send() on why we
4225 * need the endianness conversion, and the paddings.
4227 local_size
= ROUND_UP(local_size
, 8);
4230 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
4232 size
= qemu_get_be64(file
);
4234 /* The size of the bitmap should match with our ramblock */
4235 if (size
!= local_size
) {
4236 error_report("%s: ramblock '%s' bitmap size mismatch "
4237 "(0x%"PRIx64
" != 0x%"PRIx64
")", __func__
,
4238 block
->idstr
, size
, local_size
);
4242 size
= qemu_get_buffer(file
, (uint8_t *)le_bitmap
, local_size
);
4243 end_mark
= qemu_get_be64(file
);
4245 ret
= qemu_file_get_error(file
);
4246 if (ret
|| size
!= local_size
) {
4247 error_report("%s: read bitmap failed for ramblock '%s': %d"
4248 " (size 0x%"PRIx64
", got: 0x%"PRIx64
")",
4249 __func__
, block
->idstr
, ret
, local_size
, size
);
4253 if (end_mark
!= RAMBLOCK_RECV_BITMAP_ENDING
) {
4254 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIx64
,
4255 __func__
, block
->idstr
, end_mark
);
4260 * Endianness conversion. We are during postcopy (though paused).
4261 * The dirty bitmap won't change. We can directly modify it.
4263 bitmap_from_le(block
->bmap
, le_bitmap
, nbits
);
4266 * What we received is "received bitmap". Revert it as the initial
4267 * dirty bitmap for this ramblock.
4269 bitmap_complement(block
->bmap
, block
->bmap
, nbits
);
4271 /* Clear dirty bits of discarded ranges that we don't want to migrate. */
4272 ramblock_dirty_bitmap_clear_discarded_pages(block
);
4274 /* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */
4275 trace_ram_dirty_bitmap_reload_complete(block
->idstr
);
4277 qatomic_dec(&rs
->postcopy_bmap_sync_requested
);
4280 * We succeeded to sync bitmap for current ramblock. Always kick the
4281 * migration thread to check whether all requested bitmaps are
4282 * reloaded. NOTE: it's racy to only kick when requested==0, because
4283 * we don't know whether the migration thread may still be increasing
4286 migration_rp_kick(s
);
4291 static int ram_resume_prepare(MigrationState
*s
, void *opaque
)
4293 RAMState
*rs
= *(RAMState
**)opaque
;
4296 ret
= ram_dirty_bitmap_sync_all(s
, rs
);
4301 ram_state_resume_prepare(rs
, s
->to_dst_file
);
4306 void postcopy_preempt_shutdown_file(MigrationState
*s
)
4308 qemu_put_be64(s
->postcopy_qemufile_src
, RAM_SAVE_FLAG_EOS
);
4309 qemu_fflush(s
->postcopy_qemufile_src
);
4312 static SaveVMHandlers savevm_ram_handlers
= {
4313 .save_setup
= ram_save_setup
,
4314 .save_live_iterate
= ram_save_iterate
,
4315 .save_live_complete_postcopy
= ram_save_complete
,
4316 .save_live_complete_precopy
= ram_save_complete
,
4317 .has_postcopy
= ram_has_postcopy
,
4318 .state_pending_exact
= ram_state_pending_exact
,
4319 .state_pending_estimate
= ram_state_pending_estimate
,
4320 .load_state
= ram_load
,
4321 .save_cleanup
= ram_save_cleanup
,
4322 .load_setup
= ram_load_setup
,
4323 .load_cleanup
= ram_load_cleanup
,
4324 .resume_prepare
= ram_resume_prepare
,
4327 static void ram_mig_ram_block_resized(RAMBlockNotifier
*n
, void *host
,
4328 size_t old_size
, size_t new_size
)
4330 PostcopyState ps
= postcopy_state_get();
4332 RAMBlock
*rb
= qemu_ram_block_from_host(host
, false, &offset
);
4336 error_report("RAM block not found");
4340 if (migrate_ram_is_ignored(rb
)) {
4344 if (!migration_is_idle()) {
4346 * Precopy code on the source cannot deal with the size of RAM blocks
4347 * changing at random points in time - especially after sending the
4348 * RAM block sizes in the migration stream, they must no longer change.
4349 * Abort and indicate a proper reason.
4351 error_setg(&err
, "RAM block '%s' resized during precopy.", rb
->idstr
);
4352 migration_cancel(err
);
4357 case POSTCOPY_INCOMING_ADVISE
:
4359 * Update what ram_postcopy_incoming_init()->init_range() does at the
4360 * time postcopy was advised. Syncing RAM blocks with the source will
4361 * result in RAM resizes.
4363 if (old_size
< new_size
) {
4364 if (ram_discard_range(rb
->idstr
, old_size
, new_size
- old_size
)) {
4365 error_report("RAM block '%s' discard of resized RAM failed",
4369 rb
->postcopy_length
= new_size
;
4371 case POSTCOPY_INCOMING_NONE
:
4372 case POSTCOPY_INCOMING_RUNNING
:
4373 case POSTCOPY_INCOMING_END
:
4375 * Once our guest is running, postcopy does no longer care about
4376 * resizes. When growing, the new memory was not available on the
4377 * source, no handler needed.
4381 error_report("RAM block '%s' resized during postcopy state: %d",
4387 static RAMBlockNotifier ram_mig_ram_notifier
= {
4388 .ram_block_resized
= ram_mig_ram_block_resized
,
4391 void ram_mig_init(void)
4393 qemu_mutex_init(&XBZRLE
.lock
);
4394 register_savevm_live("ram", 0, 4, &savevm_ram_handlers
, &ram_state
);
4395 ram_block_notifier_add(&ram_mig_ram_notifier
);