4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
8 * Juan Quintela <quintela@redhat.com>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 #include "qemu/osdep.h"
30 #include "qemu/cutils.h"
31 #include "qemu/bitops.h"
32 #include "qemu/bitmap.h"
33 #include "qemu/madvise.h"
34 #include "qemu/main-loop.h"
36 #include "ram-compress.h"
38 #include "migration.h"
39 #include "migration-stats.h"
40 #include "migration/register.h"
41 #include "migration/misc.h"
42 #include "qemu-file.h"
43 #include "postcopy-ram.h"
44 #include "page_cache.h"
45 #include "qemu/error-report.h"
46 #include "qapi/error.h"
47 #include "qapi/qapi-types-migration.h"
48 #include "qapi/qapi-events-migration.h"
49 #include "qapi/qapi-commands-migration.h"
50 #include "qapi/qmp/qerror.h"
52 #include "exec/ram_addr.h"
53 #include "exec/target_page.h"
54 #include "qemu/rcu_queue.h"
55 #include "migration/colo.h"
57 #include "sysemu/cpu-throttle.h"
61 #include "sysemu/runstate.h"
64 #include "sysemu/dirtylimit.h"
65 #include "sysemu/kvm.h"
67 #include "hw/boards.h" /* for machine_dump_guest_core() */
69 #if defined(__linux__)
70 #include "qemu/userfaultfd.h"
71 #endif /* defined(__linux__) */
73 /***********************************************************/
74 /* ram save/restore */
77 * RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
78 * worked for pages that were filled with the same char. We switched
79 * it to only search for the zero value. And to avoid confusion with
80 * RAM_SAVE_FLAG_COMPRESS_PAGE just rename it.
83 * RAM_SAVE_FLAG_FULL was obsoleted in 2009, it can be reused now
85 #define RAM_SAVE_FLAG_FULL 0x01
86 #define RAM_SAVE_FLAG_ZERO 0x02
87 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
88 #define RAM_SAVE_FLAG_PAGE 0x08
89 #define RAM_SAVE_FLAG_EOS 0x10
90 #define RAM_SAVE_FLAG_CONTINUE 0x20
91 #define RAM_SAVE_FLAG_XBZRLE 0x40
92 /* 0x80 is reserved in rdma.h for RAM_SAVE_FLAG_HOOK */
93 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
94 #define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200
95 /* We can't use any flag that is bigger than 0x200 */
97 XBZRLECacheStats xbzrle_counters
;
99 /* used by the search for pages to send */
100 struct PageSearchStatus
{
101 /* The migration channel used for a specific host page */
102 QEMUFile
*pss_channel
;
103 /* Last block from where we have sent data */
104 RAMBlock
*last_sent_block
;
105 /* Current block being searched */
107 /* Current page to search from */
109 /* Set once we wrap around */
111 /* Whether we're sending a host page */
112 bool host_page_sending
;
113 /* The start/end of current host page. Invalid if host_page_sending==false */
114 unsigned long host_page_start
;
115 unsigned long host_page_end
;
117 typedef struct PageSearchStatus PageSearchStatus
;
119 /* struct contains XBZRLE cache and a static page
120 used by the compression */
122 /* buffer used for XBZRLE encoding */
123 uint8_t *encoded_buf
;
124 /* buffer for storing page content */
125 uint8_t *current_buf
;
126 /* Cache for XBZRLE, Protected by lock. */
129 /* it will store a page full of zeros */
130 uint8_t *zero_target_page
;
131 /* buffer used for XBZRLE decoding */
132 uint8_t *decoded_buf
;
135 static void XBZRLE_cache_lock(void)
137 if (migrate_xbzrle()) {
138 qemu_mutex_lock(&XBZRLE
.lock
);
142 static void XBZRLE_cache_unlock(void)
144 if (migrate_xbzrle()) {
145 qemu_mutex_unlock(&XBZRLE
.lock
);
150 * xbzrle_cache_resize: resize the xbzrle cache
152 * This function is called from migrate_params_apply in main
153 * thread, possibly while a migration is in progress. A running
154 * migration may be using the cache and might finish during this call,
155 * hence changes to the cache are protected by XBZRLE.lock().
157 * Returns 0 for success or -1 for error
159 * @new_size: new cache size
160 * @errp: set *errp if the check failed, with reason
162 int xbzrle_cache_resize(uint64_t new_size
, Error
**errp
)
164 PageCache
*new_cache
;
167 /* Check for truncation */
168 if (new_size
!= (size_t)new_size
) {
169 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, "cache size",
170 "exceeding address space");
174 if (new_size
== migrate_xbzrle_cache_size()) {
181 if (XBZRLE
.cache
!= NULL
) {
182 new_cache
= cache_init(new_size
, TARGET_PAGE_SIZE
, errp
);
188 cache_fini(XBZRLE
.cache
);
189 XBZRLE
.cache
= new_cache
;
192 XBZRLE_cache_unlock();
196 static bool postcopy_preempt_active(void)
198 return migrate_postcopy_preempt() && migration_in_postcopy();
201 bool migrate_ram_is_ignored(RAMBlock
*block
)
203 return !qemu_ram_is_migratable(block
) ||
204 (migrate_ignore_shared() && qemu_ram_is_shared(block
)
205 && qemu_ram_is_named_file(block
));
208 #undef RAMBLOCK_FOREACH
210 int foreach_not_ignored_block(RAMBlockIterFunc func
, void *opaque
)
215 RCU_READ_LOCK_GUARD();
217 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
218 ret
= func(block
, opaque
);
226 static void ramblock_recv_map_init(void)
230 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
231 assert(!rb
->receivedmap
);
232 rb
->receivedmap
= bitmap_new(rb
->max_length
>> qemu_target_page_bits());
236 int ramblock_recv_bitmap_test(RAMBlock
*rb
, void *host_addr
)
238 return test_bit(ramblock_recv_bitmap_offset(host_addr
, rb
),
242 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock
*rb
, uint64_t byte_offset
)
244 return test_bit(byte_offset
>> TARGET_PAGE_BITS
, rb
->receivedmap
);
247 void ramblock_recv_bitmap_set(RAMBlock
*rb
, void *host_addr
)
249 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr
, rb
), rb
->receivedmap
);
252 void ramblock_recv_bitmap_set_range(RAMBlock
*rb
, void *host_addr
,
255 bitmap_set_atomic(rb
->receivedmap
,
256 ramblock_recv_bitmap_offset(host_addr
, rb
),
260 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
263 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
265 * Returns >0 if success with sent bytes, or <0 if error.
267 int64_t ramblock_recv_bitmap_send(QEMUFile
*file
,
268 const char *block_name
)
270 RAMBlock
*block
= qemu_ram_block_by_name(block_name
);
271 unsigned long *le_bitmap
, nbits
;
275 error_report("%s: invalid block name: %s", __func__
, block_name
);
279 nbits
= block
->postcopy_length
>> TARGET_PAGE_BITS
;
282 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
283 * machines we may need 4 more bytes for padding (see below
284 * comment). So extend it a bit before hand.
286 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
289 * Always use little endian when sending the bitmap. This is
290 * required that when source and destination VMs are not using the
291 * same endianness. (Note: big endian won't work.)
293 bitmap_to_le(le_bitmap
, block
->receivedmap
, nbits
);
295 /* Size of the bitmap, in bytes */
296 size
= DIV_ROUND_UP(nbits
, 8);
299 * size is always aligned to 8 bytes for 64bit machines, but it
300 * may not be true for 32bit machines. We need this padding to
301 * make sure the migration can survive even between 32bit and
304 size
= ROUND_UP(size
, 8);
306 qemu_put_be64(file
, size
);
307 qemu_put_buffer(file
, (const uint8_t *)le_bitmap
, size
);
309 * Mark as an end, in case the middle part is screwed up due to
310 * some "mysterious" reason.
312 qemu_put_be64(file
, RAMBLOCK_RECV_BITMAP_ENDING
);
317 if (qemu_file_get_error(file
)) {
318 return qemu_file_get_error(file
);
321 return size
+ sizeof(size
);
325 * An outstanding page request, on the source, having been received
328 struct RAMSrcPageRequest
{
333 QSIMPLEQ_ENTRY(RAMSrcPageRequest
) next_req
;
336 /* State of RAM for migration */
339 * PageSearchStatus structures for the channels when send pages.
340 * Protected by the bitmap_mutex.
342 PageSearchStatus pss
[RAM_CHANNEL_MAX
];
343 /* UFFD file descriptor, used in 'write-tracking' migration */
345 /* total ram size in bytes */
346 uint64_t ram_bytes_total
;
347 /* Last block that we have visited searching for dirty pages */
348 RAMBlock
*last_seen_block
;
349 /* Last dirty target page we have sent */
350 ram_addr_t last_page
;
351 /* last ram version we have seen */
352 uint32_t last_version
;
353 /* How many times we have dirty too many pages */
354 int dirty_rate_high_cnt
;
355 /* these variables are used for bitmap sync */
356 /* last time we did a full bitmap_sync */
357 int64_t time_last_bitmap_sync
;
358 /* bytes transferred at start_time */
359 uint64_t bytes_xfer_prev
;
360 /* number of dirty pages since start_time */
361 uint64_t num_dirty_pages_period
;
362 /* xbzrle misses since the beginning of the period */
363 uint64_t xbzrle_cache_miss_prev
;
364 /* Amount of xbzrle pages since the beginning of the period */
365 uint64_t xbzrle_pages_prev
;
366 /* Amount of xbzrle encoded bytes since the beginning of the period */
367 uint64_t xbzrle_bytes_prev
;
368 /* Are we really using XBZRLE (e.g., after the first round). */
370 /* Are we on the last stage of migration */
373 /* total handled target pages at the beginning of period */
374 uint64_t target_page_count_prev
;
375 /* total handled target pages since start */
376 uint64_t target_page_count
;
377 /* number of dirty bits in the bitmap */
378 uint64_t migration_dirty_pages
;
381 * - dirty/clear bitmap
382 * - migration_dirty_pages
385 QemuMutex bitmap_mutex
;
386 /* The RAMBlock used in the last src_page_requests */
387 RAMBlock
*last_req_rb
;
388 /* Queue of outstanding page requests from the destination */
389 QemuMutex src_page_req_mutex
;
390 QSIMPLEQ_HEAD(, RAMSrcPageRequest
) src_page_requests
;
393 * This is only used when postcopy is in recovery phase, to communicate
394 * between the migration thread and the return path thread on dirty
395 * bitmap synchronizations. This field is unused in other stages of
398 unsigned int postcopy_bmap_sync_requested
;
400 typedef struct RAMState RAMState
;
402 static RAMState
*ram_state
;
404 static NotifierWithReturnList precopy_notifier_list
;
406 /* Whether postcopy has queued requests? */
407 static bool postcopy_has_request(RAMState
*rs
)
409 return !QSIMPLEQ_EMPTY_ATOMIC(&rs
->src_page_requests
);
412 void precopy_infrastructure_init(void)
414 notifier_with_return_list_init(&precopy_notifier_list
);
417 void precopy_add_notifier(NotifierWithReturn
*n
)
419 notifier_with_return_list_add(&precopy_notifier_list
, n
);
422 void precopy_remove_notifier(NotifierWithReturn
*n
)
424 notifier_with_return_remove(n
);
427 int precopy_notify(PrecopyNotifyReason reason
, Error
**errp
)
429 PrecopyNotifyData pnd
;
433 return notifier_with_return_list_notify(&precopy_notifier_list
, &pnd
);
436 uint64_t ram_bytes_remaining(void)
438 return ram_state
? (ram_state
->migration_dirty_pages
* TARGET_PAGE_SIZE
) :
442 void ram_transferred_add(uint64_t bytes
)
444 if (runstate_is_running()) {
445 stat64_add(&mig_stats
.precopy_bytes
, bytes
);
446 } else if (migration_in_postcopy()) {
447 stat64_add(&mig_stats
.postcopy_bytes
, bytes
);
449 stat64_add(&mig_stats
.downtime_bytes
, bytes
);
451 stat64_add(&mig_stats
.transferred
, bytes
);
454 struct MigrationOps
{
455 int (*ram_save_target_page
)(RAMState
*rs
, PageSearchStatus
*pss
);
457 typedef struct MigrationOps MigrationOps
;
459 MigrationOps
*migration_ops
;
461 static int ram_save_host_page_urgent(PageSearchStatus
*pss
);
463 /* NOTE: page is the PFN not real ram_addr_t. */
464 static void pss_init(PageSearchStatus
*pss
, RAMBlock
*rb
, ram_addr_t page
)
468 pss
->complete_round
= false;
472 * Check whether two PSSs are actively sending the same page. Return true
473 * if it is, false otherwise.
475 static bool pss_overlap(PageSearchStatus
*pss1
, PageSearchStatus
*pss2
)
477 return pss1
->host_page_sending
&& pss2
->host_page_sending
&&
478 (pss1
->host_page_start
== pss2
->host_page_start
);
482 * save_page_header: write page header to wire
484 * If this is the 1st block, it also writes the block identification
486 * Returns the number of bytes written
488 * @pss: current PSS channel status
489 * @block: block that contains the page we want to send
490 * @offset: offset inside the block for the page
491 * in the lower bits, it contains flags
493 static size_t save_page_header(PageSearchStatus
*pss
, QEMUFile
*f
,
494 RAMBlock
*block
, ram_addr_t offset
)
497 bool same_block
= (block
== pss
->last_sent_block
);
500 offset
|= RAM_SAVE_FLAG_CONTINUE
;
502 qemu_put_be64(f
, offset
);
506 len
= strlen(block
->idstr
);
507 qemu_put_byte(f
, len
);
508 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, len
);
510 pss
->last_sent_block
= block
;
516 * mig_throttle_guest_down: throttle down the guest
518 * Reduce amount of guest cpu execution to hopefully slow down memory
519 * writes. If guest dirty memory rate is reduced below the rate at
520 * which we can transfer pages to the destination then we should be
521 * able to complete migration. Some workloads dirty memory way too
522 * fast and will not effectively converge, even with auto-converge.
524 static void mig_throttle_guest_down(uint64_t bytes_dirty_period
,
525 uint64_t bytes_dirty_threshold
)
527 uint64_t pct_initial
= migrate_cpu_throttle_initial();
528 uint64_t pct_increment
= migrate_cpu_throttle_increment();
529 bool pct_tailslow
= migrate_cpu_throttle_tailslow();
530 int pct_max
= migrate_max_cpu_throttle();
532 uint64_t throttle_now
= cpu_throttle_get_percentage();
533 uint64_t cpu_now
, cpu_ideal
, throttle_inc
;
535 /* We have not started throttling yet. Let's start it. */
536 if (!cpu_throttle_active()) {
537 cpu_throttle_set(pct_initial
);
539 /* Throttling already on, just increase the rate */
541 throttle_inc
= pct_increment
;
543 /* Compute the ideal CPU percentage used by Guest, which may
544 * make the dirty rate match the dirty rate threshold. */
545 cpu_now
= 100 - throttle_now
;
546 cpu_ideal
= cpu_now
* (bytes_dirty_threshold
* 1.0 /
548 throttle_inc
= MIN(cpu_now
- cpu_ideal
, pct_increment
);
550 cpu_throttle_set(MIN(throttle_now
+ throttle_inc
, pct_max
));
554 void mig_throttle_counter_reset(void)
556 RAMState
*rs
= ram_state
;
558 rs
->time_last_bitmap_sync
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
559 rs
->num_dirty_pages_period
= 0;
560 rs
->bytes_xfer_prev
= stat64_get(&mig_stats
.transferred
);
564 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
566 * @current_addr: address for the zero page
568 * Update the xbzrle cache to reflect a page that's been sent as all 0.
569 * The important thing is that a stale (not-yet-0'd) page be replaced
571 * As a bonus, if the page wasn't in the cache it gets added so that
572 * when a small write is made into the 0'd page it gets XBZRLE sent.
574 static void xbzrle_cache_zero_page(ram_addr_t current_addr
)
576 /* We don't care if this fails to allocate a new cache page
577 * as long as it updated an old one */
578 cache_insert(XBZRLE
.cache
, current_addr
, XBZRLE
.zero_target_page
,
579 stat64_get(&mig_stats
.dirty_sync_count
));
582 #define ENCODING_FLAG_XBZRLE 0x1
585 * save_xbzrle_page: compress and send current page
587 * Returns: 1 means that we wrote the page
588 * 0 means that page is identical to the one already sent
589 * -1 means that xbzrle would be longer than normal
591 * @rs: current RAM state
592 * @pss: current PSS channel
593 * @current_data: pointer to the address of the page contents
594 * @current_addr: addr of the page
595 * @block: block that contains the page we want to send
596 * @offset: offset inside the block for the page
598 static int save_xbzrle_page(RAMState
*rs
, PageSearchStatus
*pss
,
599 uint8_t **current_data
, ram_addr_t current_addr
,
600 RAMBlock
*block
, ram_addr_t offset
)
602 int encoded_len
= 0, bytes_xbzrle
;
603 uint8_t *prev_cached_page
;
604 QEMUFile
*file
= pss
->pss_channel
;
605 uint64_t generation
= stat64_get(&mig_stats
.dirty_sync_count
);
607 if (!cache_is_cached(XBZRLE
.cache
, current_addr
, generation
)) {
608 xbzrle_counters
.cache_miss
++;
609 if (!rs
->last_stage
) {
610 if (cache_insert(XBZRLE
.cache
, current_addr
, *current_data
,
614 /* update *current_data when the page has been
615 inserted into cache */
616 *current_data
= get_cached_data(XBZRLE
.cache
, current_addr
);
623 * Reaching here means the page has hit the xbzrle cache, no matter what
624 * encoding result it is (normal encoding, overflow or skipping the page),
625 * count the page as encoded. This is used to calculate the encoding rate.
627 * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB,
628 * 2nd page turns out to be skipped (i.e. no new bytes written to the
629 * page), the overall encoding rate will be 8KB / 2KB = 4, which has the
630 * skipped page included. In this way, the encoding rate can tell if the
631 * guest page is good for xbzrle encoding.
633 xbzrle_counters
.pages
++;
634 prev_cached_page
= get_cached_data(XBZRLE
.cache
, current_addr
);
636 /* save current buffer into memory */
637 memcpy(XBZRLE
.current_buf
, *current_data
, TARGET_PAGE_SIZE
);
639 /* XBZRLE encoding (if there is no overflow) */
640 encoded_len
= xbzrle_encode_buffer(prev_cached_page
, XBZRLE
.current_buf
,
641 TARGET_PAGE_SIZE
, XBZRLE
.encoded_buf
,
645 * Update the cache contents, so that it corresponds to the data
646 * sent, in all cases except where we skip the page.
648 if (!rs
->last_stage
&& encoded_len
!= 0) {
649 memcpy(prev_cached_page
, XBZRLE
.current_buf
, TARGET_PAGE_SIZE
);
651 * In the case where we couldn't compress, ensure that the caller
652 * sends the data from the cache, since the guest might have
653 * changed the RAM since we copied it.
655 *current_data
= prev_cached_page
;
658 if (encoded_len
== 0) {
659 trace_save_xbzrle_page_skipping();
661 } else if (encoded_len
== -1) {
662 trace_save_xbzrle_page_overflow();
663 xbzrle_counters
.overflow
++;
664 xbzrle_counters
.bytes
+= TARGET_PAGE_SIZE
;
668 /* Send XBZRLE based compressed page */
669 bytes_xbzrle
= save_page_header(pss
, pss
->pss_channel
, block
,
670 offset
| RAM_SAVE_FLAG_XBZRLE
);
671 qemu_put_byte(file
, ENCODING_FLAG_XBZRLE
);
672 qemu_put_be16(file
, encoded_len
);
673 qemu_put_buffer(file
, XBZRLE
.encoded_buf
, encoded_len
);
674 bytes_xbzrle
+= encoded_len
+ 1 + 2;
676 * Like compressed_size (please see update_compress_thread_counts),
677 * the xbzrle encoded bytes don't count the 8 byte header with
678 * RAM_SAVE_FLAG_CONTINUE.
680 xbzrle_counters
.bytes
+= bytes_xbzrle
- 8;
681 ram_transferred_add(bytes_xbzrle
);
687 * pss_find_next_dirty: find the next dirty page of current ramblock
689 * This function updates pss->page to point to the next dirty page index
690 * within the ramblock to migrate, or the end of ramblock when nothing
691 * found. Note that when pss->host_page_sending==true it means we're
692 * during sending a host page, so we won't look for dirty page that is
693 * outside the host page boundary.
695 * @pss: the current page search status
697 static void pss_find_next_dirty(PageSearchStatus
*pss
)
699 RAMBlock
*rb
= pss
->block
;
700 unsigned long size
= rb
->used_length
>> TARGET_PAGE_BITS
;
701 unsigned long *bitmap
= rb
->bmap
;
703 if (migrate_ram_is_ignored(rb
)) {
704 /* Points directly to the end, so we know no dirty page */
710 * If during sending a host page, only look for dirty pages within the
711 * current host page being send.
713 if (pss
->host_page_sending
) {
714 assert(pss
->host_page_end
);
715 size
= MIN(size
, pss
->host_page_end
);
718 pss
->page
= find_next_bit(bitmap
, size
, pss
->page
);
721 static void migration_clear_memory_region_dirty_bitmap(RAMBlock
*rb
,
727 if (!rb
->clear_bmap
|| !clear_bmap_test_and_clear(rb
, page
)) {
731 shift
= rb
->clear_bmap_shift
;
733 * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
734 * can make things easier sometimes since then start address
735 * of the small chunk will always be 64 pages aligned so the
736 * bitmap will always be aligned to unsigned long. We should
737 * even be able to remove this restriction but I'm simply
742 size
= 1ULL << (TARGET_PAGE_BITS
+ shift
);
743 start
= QEMU_ALIGN_DOWN((ram_addr_t
)page
<< TARGET_PAGE_BITS
, size
);
744 trace_migration_bitmap_clear_dirty(rb
->idstr
, start
, size
, page
);
745 memory_region_clear_dirty_bitmap(rb
->mr
, start
, size
);
749 migration_clear_memory_region_dirty_bitmap_range(RAMBlock
*rb
,
751 unsigned long npages
)
753 unsigned long i
, chunk_pages
= 1UL << rb
->clear_bmap_shift
;
754 unsigned long chunk_start
= QEMU_ALIGN_DOWN(start
, chunk_pages
);
755 unsigned long chunk_end
= QEMU_ALIGN_UP(start
+ npages
, chunk_pages
);
758 * Clear pages from start to start + npages - 1, so the end boundary is
761 for (i
= chunk_start
; i
< chunk_end
; i
+= chunk_pages
) {
762 migration_clear_memory_region_dirty_bitmap(rb
, i
);
767 * colo_bitmap_find_diry:find contiguous dirty pages from start
769 * Returns the page offset within memory region of the start of the contiguout
772 * @rs: current RAM state
773 * @rb: RAMBlock where to search for dirty pages
774 * @start: page where we start the search
775 * @num: the number of contiguous dirty pages
778 unsigned long colo_bitmap_find_dirty(RAMState
*rs
, RAMBlock
*rb
,
779 unsigned long start
, unsigned long *num
)
781 unsigned long size
= rb
->used_length
>> TARGET_PAGE_BITS
;
782 unsigned long *bitmap
= rb
->bmap
;
783 unsigned long first
, next
;
787 if (migrate_ram_is_ignored(rb
)) {
791 first
= find_next_bit(bitmap
, size
, start
);
795 next
= find_next_zero_bit(bitmap
, size
, first
+ 1);
796 assert(next
>= first
);
801 static inline bool migration_bitmap_clear_dirty(RAMState
*rs
,
808 * Clear dirty bitmap if needed. This _must_ be called before we
809 * send any of the page in the chunk because we need to make sure
810 * we can capture further page content changes when we sync dirty
811 * log the next time. So as long as we are going to send any of
812 * the page in the chunk we clear the remote dirty bitmap for all.
813 * Clearing it earlier won't be a problem, but too late will.
815 migration_clear_memory_region_dirty_bitmap(rb
, page
);
817 ret
= test_and_clear_bit(page
, rb
->bmap
);
819 rs
->migration_dirty_pages
--;
825 static void dirty_bitmap_clear_section(MemoryRegionSection
*section
,
828 const hwaddr offset
= section
->offset_within_region
;
829 const hwaddr size
= int128_get64(section
->size
);
830 const unsigned long start
= offset
>> TARGET_PAGE_BITS
;
831 const unsigned long npages
= size
>> TARGET_PAGE_BITS
;
832 RAMBlock
*rb
= section
->mr
->ram_block
;
833 uint64_t *cleared_bits
= opaque
;
836 * We don't grab ram_state->bitmap_mutex because we expect to run
837 * only when starting migration or during postcopy recovery where
838 * we don't have concurrent access.
840 if (!migration_in_postcopy() && !migrate_background_snapshot()) {
841 migration_clear_memory_region_dirty_bitmap_range(rb
, start
, npages
);
843 *cleared_bits
+= bitmap_count_one_with_offset(rb
->bmap
, start
, npages
);
844 bitmap_clear(rb
->bmap
, start
, npages
);
848 * Exclude all dirty pages from migration that fall into a discarded range as
849 * managed by a RamDiscardManager responsible for the mapped memory region of
850 * the RAMBlock. Clear the corresponding bits in the dirty bitmaps.
852 * Discarded pages ("logically unplugged") have undefined content and must
853 * not get migrated, because even reading these pages for migration might
854 * result in undesired behavior.
856 * Returns the number of cleared bits in the RAMBlock dirty bitmap.
858 * Note: The result is only stable while migrating (precopy/postcopy).
860 static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock
*rb
)
862 uint64_t cleared_bits
= 0;
864 if (rb
->mr
&& rb
->bmap
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
865 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
866 MemoryRegionSection section
= {
868 .offset_within_region
= 0,
869 .size
= int128_make64(qemu_ram_get_used_length(rb
)),
872 ram_discard_manager_replay_discarded(rdm
, §ion
,
873 dirty_bitmap_clear_section
,
880 * Check if a host-page aligned page falls into a discarded range as managed by
881 * a RamDiscardManager responsible for the mapped memory region of the RAMBlock.
883 * Note: The result is only stable while migrating (precopy/postcopy).
885 bool ramblock_page_is_discarded(RAMBlock
*rb
, ram_addr_t start
)
887 if (rb
->mr
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
888 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
889 MemoryRegionSection section
= {
891 .offset_within_region
= start
,
892 .size
= int128_make64(qemu_ram_pagesize(rb
)),
895 return !ram_discard_manager_is_populated(rdm
, §ion
);
900 /* Called with RCU critical section */
901 static void ramblock_sync_dirty_bitmap(RAMState
*rs
, RAMBlock
*rb
)
903 uint64_t new_dirty_pages
=
904 cpu_physical_memory_sync_dirty_bitmap(rb
, 0, rb
->used_length
);
906 rs
->migration_dirty_pages
+= new_dirty_pages
;
907 rs
->num_dirty_pages_period
+= new_dirty_pages
;
911 * ram_pagesize_summary: calculate all the pagesizes of a VM
913 * Returns a summary bitmap of the page sizes of all RAMBlocks
915 * For VMs with just normal pages this is equivalent to the host page
916 * size. If it's got some huge pages then it's the OR of all the
917 * different page sizes.
919 uint64_t ram_pagesize_summary(void)
922 uint64_t summary
= 0;
924 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
925 summary
|= block
->page_size
;
931 uint64_t ram_get_total_transferred_pages(void)
933 return stat64_get(&mig_stats
.normal_pages
) +
934 stat64_get(&mig_stats
.zero_pages
) +
935 compress_ram_pages() + xbzrle_counters
.pages
;
938 static void migration_update_rates(RAMState
*rs
, int64_t end_time
)
940 uint64_t page_count
= rs
->target_page_count
- rs
->target_page_count_prev
;
942 /* calculate period counters */
943 stat64_set(&mig_stats
.dirty_pages_rate
,
944 rs
->num_dirty_pages_period
* 1000 /
945 (end_time
- rs
->time_last_bitmap_sync
));
951 if (migrate_xbzrle()) {
952 double encoded_size
, unencoded_size
;
954 xbzrle_counters
.cache_miss_rate
= (double)(xbzrle_counters
.cache_miss
-
955 rs
->xbzrle_cache_miss_prev
) / page_count
;
956 rs
->xbzrle_cache_miss_prev
= xbzrle_counters
.cache_miss
;
957 unencoded_size
= (xbzrle_counters
.pages
- rs
->xbzrle_pages_prev
) *
959 encoded_size
= xbzrle_counters
.bytes
- rs
->xbzrle_bytes_prev
;
960 if (xbzrle_counters
.pages
== rs
->xbzrle_pages_prev
|| !encoded_size
) {
961 xbzrle_counters
.encoding_rate
= 0;
963 xbzrle_counters
.encoding_rate
= unencoded_size
/ encoded_size
;
965 rs
->xbzrle_pages_prev
= xbzrle_counters
.pages
;
966 rs
->xbzrle_bytes_prev
= xbzrle_counters
.bytes
;
968 compress_update_rates(page_count
);
972 * Enable dirty-limit to throttle down the guest
974 static void migration_dirty_limit_guest(void)
977 * dirty page rate quota for all vCPUs fetched from
978 * migration parameter 'vcpu_dirty_limit'
980 static int64_t quota_dirtyrate
;
981 MigrationState
*s
= migrate_get_current();
984 * If dirty limit already enabled and migration parameter
985 * vcpu-dirty-limit untouched.
987 if (dirtylimit_in_service() &&
988 quota_dirtyrate
== s
->parameters
.vcpu_dirty_limit
) {
992 quota_dirtyrate
= s
->parameters
.vcpu_dirty_limit
;
995 * Set all vCPU a quota dirtyrate, note that the second
996 * parameter will be ignored if setting all vCPU for the vm
998 qmp_set_vcpu_dirty_limit(false, -1, quota_dirtyrate
, NULL
);
999 trace_migration_dirty_limit_guest(quota_dirtyrate
);
1002 static void migration_trigger_throttle(RAMState
*rs
)
1004 uint64_t threshold
= migrate_throttle_trigger_threshold();
1005 uint64_t bytes_xfer_period
=
1006 stat64_get(&mig_stats
.transferred
) - rs
->bytes_xfer_prev
;
1007 uint64_t bytes_dirty_period
= rs
->num_dirty_pages_period
* TARGET_PAGE_SIZE
;
1008 uint64_t bytes_dirty_threshold
= bytes_xfer_period
* threshold
/ 100;
1010 /* During block migration the auto-converge logic incorrectly detects
1011 * that ram migration makes no progress. Avoid this by disabling the
1012 * throttling logic during the bulk phase of block migration. */
1013 if (blk_mig_bulk_active()) {
1018 * The following detection logic can be refined later. For now:
1019 * Check to see if the ratio between dirtied bytes and the approx.
1020 * amount of bytes that just got transferred since the last time
1021 * we were in this routine reaches the threshold. If that happens
1022 * twice, start or increase throttling.
1024 if ((bytes_dirty_period
> bytes_dirty_threshold
) &&
1025 (++rs
->dirty_rate_high_cnt
>= 2)) {
1026 rs
->dirty_rate_high_cnt
= 0;
1027 if (migrate_auto_converge()) {
1028 trace_migration_throttle();
1029 mig_throttle_guest_down(bytes_dirty_period
,
1030 bytes_dirty_threshold
);
1031 } else if (migrate_dirty_limit()) {
1032 migration_dirty_limit_guest();
1037 static void migration_bitmap_sync(RAMState
*rs
, bool last_stage
)
1042 stat64_add(&mig_stats
.dirty_sync_count
, 1);
1044 if (!rs
->time_last_bitmap_sync
) {
1045 rs
->time_last_bitmap_sync
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1048 trace_migration_bitmap_sync_start();
1049 memory_global_dirty_log_sync(last_stage
);
1051 qemu_mutex_lock(&rs
->bitmap_mutex
);
1052 WITH_RCU_READ_LOCK_GUARD() {
1053 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1054 ramblock_sync_dirty_bitmap(rs
, block
);
1056 stat64_set(&mig_stats
.dirty_bytes_last_sync
, ram_bytes_remaining());
1058 qemu_mutex_unlock(&rs
->bitmap_mutex
);
1060 memory_global_after_dirty_log_sync();
1061 trace_migration_bitmap_sync_end(rs
->num_dirty_pages_period
);
1063 end_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1065 /* more than 1 second = 1000 millisecons */
1066 if (end_time
> rs
->time_last_bitmap_sync
+ 1000) {
1067 migration_trigger_throttle(rs
);
1069 migration_update_rates(rs
, end_time
);
1071 rs
->target_page_count_prev
= rs
->target_page_count
;
1073 /* reset period counters */
1074 rs
->time_last_bitmap_sync
= end_time
;
1075 rs
->num_dirty_pages_period
= 0;
1076 rs
->bytes_xfer_prev
= stat64_get(&mig_stats
.transferred
);
1078 if (migrate_events()) {
1079 uint64_t generation
= stat64_get(&mig_stats
.dirty_sync_count
);
1080 qapi_event_send_migration_pass(generation
);
1084 static void migration_bitmap_sync_precopy(RAMState
*rs
, bool last_stage
)
1086 Error
*local_err
= NULL
;
1089 * The current notifier usage is just an optimization to migration, so we
1090 * don't stop the normal migration process in the error case.
1092 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC
, &local_err
)) {
1093 error_report_err(local_err
);
1097 migration_bitmap_sync(rs
, last_stage
);
1099 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC
, &local_err
)) {
1100 error_report_err(local_err
);
1104 void ram_release_page(const char *rbname
, uint64_t offset
)
1106 if (!migrate_release_ram() || !migration_in_postcopy()) {
1110 ram_discard_range(rbname
, offset
, TARGET_PAGE_SIZE
);
1114 * save_zero_page: send the zero page to the stream
1116 * Returns the number of pages written.
1118 * @rs: current RAM state
1119 * @pss: current PSS channel
1120 * @offset: offset inside the block for the page
1122 static int save_zero_page(RAMState
*rs
, PageSearchStatus
*pss
,
1125 uint8_t *p
= pss
->block
->host
+ offset
;
1126 QEMUFile
*file
= pss
->pss_channel
;
1129 if (!buffer_is_zero(p
, TARGET_PAGE_SIZE
)) {
1133 len
+= save_page_header(pss
, file
, pss
->block
, offset
| RAM_SAVE_FLAG_ZERO
);
1134 qemu_put_byte(file
, 0);
1136 ram_release_page(pss
->block
->idstr
, offset
);
1138 stat64_add(&mig_stats
.zero_pages
, 1);
1139 ram_transferred_add(len
);
1142 * Must let xbzrle know, otherwise a previous (now 0'd) cached
1143 * page would be stale.
1145 if (rs
->xbzrle_started
) {
1146 XBZRLE_cache_lock();
1147 xbzrle_cache_zero_page(pss
->block
->offset
+ offset
);
1148 XBZRLE_cache_unlock();
1155 * @pages: the number of pages written by the control path,
1157 * > 0 - number of pages written
1159 * Return true if the pages has been saved, otherwise false is returned.
1161 static bool control_save_page(PageSearchStatus
*pss
,
1162 ram_addr_t offset
, int *pages
)
1166 ret
= rdma_control_save_page(pss
->pss_channel
, pss
->block
->offset
, offset
,
1168 if (ret
== RAM_SAVE_CONTROL_NOT_SUPP
) {
1172 if (ret
== RAM_SAVE_CONTROL_DELAYED
) {
1181 * directly send the page to the stream
1183 * Returns the number of pages written.
1185 * @pss: current PSS channel
1186 * @block: block that contains the page we want to send
1187 * @offset: offset inside the block for the page
1188 * @buf: the page to be sent
1189 * @async: send to page asyncly
1191 static int save_normal_page(PageSearchStatus
*pss
, RAMBlock
*block
,
1192 ram_addr_t offset
, uint8_t *buf
, bool async
)
1194 QEMUFile
*file
= pss
->pss_channel
;
1196 ram_transferred_add(save_page_header(pss
, pss
->pss_channel
, block
,
1197 offset
| RAM_SAVE_FLAG_PAGE
));
1199 qemu_put_buffer_async(file
, buf
, TARGET_PAGE_SIZE
,
1200 migrate_release_ram() &&
1201 migration_in_postcopy());
1203 qemu_put_buffer(file
, buf
, TARGET_PAGE_SIZE
);
1205 ram_transferred_add(TARGET_PAGE_SIZE
);
1206 stat64_add(&mig_stats
.normal_pages
, 1);
1211 * ram_save_page: send the given page to the stream
1213 * Returns the number of pages written.
1215 * >=0 - Number of pages written - this might legally be 0
1216 * if xbzrle noticed the page was the same.
1218 * @rs: current RAM state
1219 * @block: block that contains the page we want to send
1220 * @offset: offset inside the block for the page
1222 static int ram_save_page(RAMState
*rs
, PageSearchStatus
*pss
)
1226 bool send_async
= true;
1227 RAMBlock
*block
= pss
->block
;
1228 ram_addr_t offset
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
1229 ram_addr_t current_addr
= block
->offset
+ offset
;
1231 p
= block
->host
+ offset
;
1232 trace_ram_save_page(block
->idstr
, (uint64_t)offset
, p
);
1234 XBZRLE_cache_lock();
1235 if (rs
->xbzrle_started
&& !migration_in_postcopy()) {
1236 pages
= save_xbzrle_page(rs
, pss
, &p
, current_addr
,
1238 if (!rs
->last_stage
) {
1239 /* Can't send this cached data async, since the cache page
1240 * might get updated before it gets to the wire
1246 /* XBZRLE overflow or normal page */
1248 pages
= save_normal_page(pss
, block
, offset
, p
, send_async
);
1251 XBZRLE_cache_unlock();
1256 static int ram_save_multifd_page(QEMUFile
*file
, RAMBlock
*block
,
1259 if (multifd_queue_page(file
, block
, offset
) < 0) {
1262 stat64_add(&mig_stats
.normal_pages
, 1);
1267 int compress_send_queued_data(CompressParam
*param
)
1269 PageSearchStatus
*pss
= &ram_state
->pss
[RAM_CHANNEL_PRECOPY
];
1270 MigrationState
*ms
= migrate_get_current();
1271 QEMUFile
*file
= ms
->to_dst_file
;
1274 RAMBlock
*block
= param
->block
;
1275 ram_addr_t offset
= param
->offset
;
1277 if (param
->result
== RES_NONE
) {
1281 assert(block
== pss
->last_sent_block
);
1283 if (param
->result
== RES_ZEROPAGE
) {
1284 assert(qemu_file_buffer_empty(param
->file
));
1285 len
+= save_page_header(pss
, file
, block
, offset
| RAM_SAVE_FLAG_ZERO
);
1286 qemu_put_byte(file
, 0);
1288 ram_release_page(block
->idstr
, offset
);
1289 } else if (param
->result
== RES_COMPRESS
) {
1290 assert(!qemu_file_buffer_empty(param
->file
));
1291 len
+= save_page_header(pss
, file
, block
,
1292 offset
| RAM_SAVE_FLAG_COMPRESS_PAGE
);
1293 len
+= qemu_put_qemu_file(file
, param
->file
);
1298 update_compress_thread_counts(param
, len
);
1303 #define PAGE_ALL_CLEAN 0
1304 #define PAGE_TRY_AGAIN 1
1305 #define PAGE_DIRTY_FOUND 2
1307 * find_dirty_block: find the next dirty page and update any state
1308 * associated with the search process.
1311 * <0: An error happened
1312 * PAGE_ALL_CLEAN: no dirty page found, give up
1313 * PAGE_TRY_AGAIN: no dirty page found, retry for next block
1314 * PAGE_DIRTY_FOUND: dirty page found
1316 * @rs: current RAM state
1317 * @pss: data about the state of the current dirty page scan
1318 * @again: set to false if the search has scanned the whole of RAM
1320 static int find_dirty_block(RAMState
*rs
, PageSearchStatus
*pss
)
1322 /* Update pss->page for the next dirty bit in ramblock */
1323 pss_find_next_dirty(pss
);
1325 if (pss
->complete_round
&& pss
->block
== rs
->last_seen_block
&&
1326 pss
->page
>= rs
->last_page
) {
1328 * We've been once around the RAM and haven't found anything.
1331 return PAGE_ALL_CLEAN
;
1333 if (!offset_in_ramblock(pss
->block
,
1334 ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
)) {
1335 /* Didn't find anything in this RAM Block */
1337 pss
->block
= QLIST_NEXT_RCU(pss
->block
, next
);
1339 if (migrate_multifd() &&
1340 !migrate_multifd_flush_after_each_section()) {
1341 QEMUFile
*f
= rs
->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
;
1342 int ret
= multifd_send_sync_main(f
);
1346 qemu_put_be64(f
, RAM_SAVE_FLAG_MULTIFD_FLUSH
);
1350 * If memory migration starts over, we will meet a dirtied page
1351 * which may still exists in compression threads's ring, so we
1352 * should flush the compressed data to make sure the new page
1353 * is not overwritten by the old one in the destination.
1355 * Also If xbzrle is on, stop using the data compression at this
1356 * point. In theory, xbzrle can do better than compression.
1358 compress_flush_data();
1360 /* Hit the end of the list */
1361 pss
->block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
1362 /* Flag that we've looped */
1363 pss
->complete_round
= true;
1364 /* After the first round, enable XBZRLE. */
1365 if (migrate_xbzrle()) {
1366 rs
->xbzrle_started
= true;
1369 /* Didn't find anything this time, but try again on the new block */
1370 return PAGE_TRY_AGAIN
;
1372 /* We've found something */
1373 return PAGE_DIRTY_FOUND
;
1378 * unqueue_page: gets a page of the queue
1380 * Helper for 'get_queued_page' - gets a page off the queue
1382 * Returns the block of the page (or NULL if none available)
1384 * @rs: current RAM state
1385 * @offset: used to return the offset within the RAMBlock
1387 static RAMBlock
*unqueue_page(RAMState
*rs
, ram_addr_t
*offset
)
1389 struct RAMSrcPageRequest
*entry
;
1390 RAMBlock
*block
= NULL
;
1392 if (!postcopy_has_request(rs
)) {
1396 QEMU_LOCK_GUARD(&rs
->src_page_req_mutex
);
1399 * This should _never_ change even after we take the lock, because no one
1400 * should be taking anything off the request list other than us.
1402 assert(postcopy_has_request(rs
));
1404 entry
= QSIMPLEQ_FIRST(&rs
->src_page_requests
);
1406 *offset
= entry
->offset
;
1408 if (entry
->len
> TARGET_PAGE_SIZE
) {
1409 entry
->len
-= TARGET_PAGE_SIZE
;
1410 entry
->offset
+= TARGET_PAGE_SIZE
;
1412 memory_region_unref(block
->mr
);
1413 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
1415 migration_consume_urgent_request();
1421 #if defined(__linux__)
1423 * poll_fault_page: try to get next UFFD write fault page and, if pending fault
1424 * is found, return RAM block pointer and page offset
1426 * Returns pointer to the RAMBlock containing faulting page,
1427 * NULL if no write faults are pending
1429 * @rs: current RAM state
1430 * @offset: page offset from the beginning of the block
1432 static RAMBlock
*poll_fault_page(RAMState
*rs
, ram_addr_t
*offset
)
1434 struct uffd_msg uffd_msg
;
1439 if (!migrate_background_snapshot()) {
1443 res
= uffd_read_events(rs
->uffdio_fd
, &uffd_msg
, 1);
1448 page_address
= (void *)(uintptr_t) uffd_msg
.arg
.pagefault
.address
;
1449 block
= qemu_ram_block_from_host(page_address
, false, offset
);
1450 assert(block
&& (block
->flags
& RAM_UF_WRITEPROTECT
) != 0);
1455 * ram_save_release_protection: release UFFD write protection after
1456 * a range of pages has been saved
1458 * @rs: current RAM state
1459 * @pss: page-search-status structure
1460 * @start_page: index of the first page in the range relative to pss->block
1462 * Returns 0 on success, negative value in case of an error
1464 static int ram_save_release_protection(RAMState
*rs
, PageSearchStatus
*pss
,
1465 unsigned long start_page
)
1469 /* Check if page is from UFFD-managed region. */
1470 if (pss
->block
->flags
& RAM_UF_WRITEPROTECT
) {
1471 void *page_address
= pss
->block
->host
+ (start_page
<< TARGET_PAGE_BITS
);
1472 uint64_t run_length
= (pss
->page
- start_page
) << TARGET_PAGE_BITS
;
1474 /* Flush async buffers before un-protect. */
1475 qemu_fflush(pss
->pss_channel
);
1476 /* Un-protect memory range. */
1477 res
= uffd_change_protection(rs
->uffdio_fd
, page_address
, run_length
,
1484 /* ram_write_tracking_available: check if kernel supports required UFFD features
1486 * Returns true if supports, false otherwise
1488 bool ram_write_tracking_available(void)
1490 uint64_t uffd_features
;
1493 res
= uffd_query_features(&uffd_features
);
1495 (uffd_features
& UFFD_FEATURE_PAGEFAULT_FLAG_WP
) != 0);
1498 /* ram_write_tracking_compatible: check if guest configuration is
1499 * compatible with 'write-tracking'
1501 * Returns true if compatible, false otherwise
1503 bool ram_write_tracking_compatible(void)
1505 const uint64_t uffd_ioctls_mask
= BIT(_UFFDIO_WRITEPROTECT
);
1510 /* Open UFFD file descriptor */
1511 uffd_fd
= uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP
, false);
1516 RCU_READ_LOCK_GUARD();
1518 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1519 uint64_t uffd_ioctls
;
1521 /* Nothing to do with read-only and MMIO-writable regions */
1522 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1525 /* Try to register block memory via UFFD-IO to track writes */
1526 if (uffd_register_memory(uffd_fd
, block
->host
, block
->max_length
,
1527 UFFDIO_REGISTER_MODE_WP
, &uffd_ioctls
)) {
1530 if ((uffd_ioctls
& uffd_ioctls_mask
) != uffd_ioctls_mask
) {
1537 uffd_close_fd(uffd_fd
);
1541 static inline void populate_read_range(RAMBlock
*block
, ram_addr_t offset
,
1544 const ram_addr_t end
= offset
+ size
;
1547 * We read one byte of each page; this will preallocate page tables if
1548 * required and populate the shared zeropage on MAP_PRIVATE anonymous memory
1549 * where no page was populated yet. This might require adaption when
1550 * supporting other mappings, like shmem.
1552 for (; offset
< end
; offset
+= block
->page_size
) {
1553 char tmp
= *((char *)block
->host
+ offset
);
1555 /* Don't optimize the read out */
1556 asm volatile("" : "+r" (tmp
));
1560 static inline int populate_read_section(MemoryRegionSection
*section
,
1563 const hwaddr size
= int128_get64(section
->size
);
1564 hwaddr offset
= section
->offset_within_region
;
1565 RAMBlock
*block
= section
->mr
->ram_block
;
1567 populate_read_range(block
, offset
, size
);
1572 * ram_block_populate_read: preallocate page tables and populate pages in the
1573 * RAM block by reading a byte of each page.
1575 * Since it's solely used for userfault_fd WP feature, here we just
1576 * hardcode page size to qemu_real_host_page_size.
1578 * @block: RAM block to populate
1580 static void ram_block_populate_read(RAMBlock
*rb
)
1583 * Skip populating all pages that fall into a discarded range as managed by
1584 * a RamDiscardManager responsible for the mapped memory region of the
1585 * RAMBlock. Such discarded ("logically unplugged") parts of a RAMBlock
1586 * must not get populated automatically. We don't have to track
1587 * modifications via userfaultfd WP reliably, because these pages will
1588 * not be part of the migration stream either way -- see
1589 * ramblock_dirty_bitmap_exclude_discarded_pages().
1591 * Note: The result is only stable while migrating (precopy/postcopy).
1593 if (rb
->mr
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
1594 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
1595 MemoryRegionSection section
= {
1597 .offset_within_region
= 0,
1598 .size
= rb
->mr
->size
,
1601 ram_discard_manager_replay_populated(rdm
, §ion
,
1602 populate_read_section
, NULL
);
1604 populate_read_range(rb
, 0, rb
->used_length
);
1609 * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking
1611 void ram_write_tracking_prepare(void)
1615 RCU_READ_LOCK_GUARD();
1617 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1618 /* Nothing to do with read-only and MMIO-writable regions */
1619 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1624 * Populate pages of the RAM block before enabling userfault_fd
1627 * This stage is required since ioctl(UFFDIO_WRITEPROTECT) with
1628 * UFFDIO_WRITEPROTECT_MODE_WP mode setting would silently skip
1629 * pages with pte_none() entries in page table.
1631 ram_block_populate_read(block
);
1635 static inline int uffd_protect_section(MemoryRegionSection
*section
,
1638 const hwaddr size
= int128_get64(section
->size
);
1639 const hwaddr offset
= section
->offset_within_region
;
1640 RAMBlock
*rb
= section
->mr
->ram_block
;
1641 int uffd_fd
= (uintptr_t)opaque
;
1643 return uffd_change_protection(uffd_fd
, rb
->host
+ offset
, size
, true,
1647 static int ram_block_uffd_protect(RAMBlock
*rb
, int uffd_fd
)
1649 assert(rb
->flags
& RAM_UF_WRITEPROTECT
);
1651 /* See ram_block_populate_read() */
1652 if (rb
->mr
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
1653 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
1654 MemoryRegionSection section
= {
1656 .offset_within_region
= 0,
1657 .size
= rb
->mr
->size
,
1660 return ram_discard_manager_replay_populated(rdm
, §ion
,
1661 uffd_protect_section
,
1662 (void *)(uintptr_t)uffd_fd
);
1664 return uffd_change_protection(uffd_fd
, rb
->host
,
1665 rb
->used_length
, true, false);
1669 * ram_write_tracking_start: start UFFD-WP memory tracking
1671 * Returns 0 for success or negative value in case of error
1673 int ram_write_tracking_start(void)
1676 RAMState
*rs
= ram_state
;
1679 /* Open UFFD file descriptor */
1680 uffd_fd
= uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP
, true);
1684 rs
->uffdio_fd
= uffd_fd
;
1686 RCU_READ_LOCK_GUARD();
1688 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1689 /* Nothing to do with read-only and MMIO-writable regions */
1690 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1694 /* Register block memory with UFFD to track writes */
1695 if (uffd_register_memory(rs
->uffdio_fd
, block
->host
,
1696 block
->max_length
, UFFDIO_REGISTER_MODE_WP
, NULL
)) {
1699 block
->flags
|= RAM_UF_WRITEPROTECT
;
1700 memory_region_ref(block
->mr
);
1702 /* Apply UFFD write protection to the block memory range */
1703 if (ram_block_uffd_protect(block
, uffd_fd
)) {
1707 trace_ram_write_tracking_ramblock_start(block
->idstr
, block
->page_size
,
1708 block
->host
, block
->max_length
);
1714 error_report("ram_write_tracking_start() failed: restoring initial memory state");
1716 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1717 if ((block
->flags
& RAM_UF_WRITEPROTECT
) == 0) {
1720 uffd_unregister_memory(rs
->uffdio_fd
, block
->host
, block
->max_length
);
1721 /* Cleanup flags and remove reference */
1722 block
->flags
&= ~RAM_UF_WRITEPROTECT
;
1723 memory_region_unref(block
->mr
);
1726 uffd_close_fd(uffd_fd
);
1732 * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection
1734 void ram_write_tracking_stop(void)
1736 RAMState
*rs
= ram_state
;
1739 RCU_READ_LOCK_GUARD();
1741 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1742 if ((block
->flags
& RAM_UF_WRITEPROTECT
) == 0) {
1745 uffd_unregister_memory(rs
->uffdio_fd
, block
->host
, block
->max_length
);
1747 trace_ram_write_tracking_ramblock_stop(block
->idstr
, block
->page_size
,
1748 block
->host
, block
->max_length
);
1750 /* Cleanup flags and remove reference */
1751 block
->flags
&= ~RAM_UF_WRITEPROTECT
;
1752 memory_region_unref(block
->mr
);
1755 /* Finally close UFFD file descriptor */
1756 uffd_close_fd(rs
->uffdio_fd
);
1761 /* No target OS support, stubs just fail or ignore */
1763 static RAMBlock
*poll_fault_page(RAMState
*rs
, ram_addr_t
*offset
)
1771 static int ram_save_release_protection(RAMState
*rs
, PageSearchStatus
*pss
,
1772 unsigned long start_page
)
1781 bool ram_write_tracking_available(void)
1786 bool ram_write_tracking_compatible(void)
1792 int ram_write_tracking_start(void)
1798 void ram_write_tracking_stop(void)
1802 #endif /* defined(__linux__) */
1805 * get_queued_page: unqueue a page from the postcopy requests
1807 * Skips pages that are already sent (!dirty)
1809 * Returns true if a queued page is found
1811 * @rs: current RAM state
1812 * @pss: data about the state of the current dirty page scan
1814 static bool get_queued_page(RAMState
*rs
, PageSearchStatus
*pss
)
1821 block
= unqueue_page(rs
, &offset
);
1823 * We're sending this page, and since it's postcopy nothing else
1824 * will dirty it, and we must make sure it doesn't get sent again
1825 * even if this queue request was received after the background
1826 * search already sent it.
1831 page
= offset
>> TARGET_PAGE_BITS
;
1832 dirty
= test_bit(page
, block
->bmap
);
1834 trace_get_queued_page_not_dirty(block
->idstr
, (uint64_t)offset
,
1837 trace_get_queued_page(block
->idstr
, (uint64_t)offset
, page
);
1841 } while (block
&& !dirty
);
1845 * Poll write faults too if background snapshot is enabled; that's
1846 * when we have vcpus got blocked by the write protected pages.
1848 block
= poll_fault_page(rs
, &offset
);
1853 * We want the background search to continue from the queued page
1854 * since the guest is likely to want other pages near to the page
1855 * it just requested.
1858 pss
->page
= offset
>> TARGET_PAGE_BITS
;
1861 * This unqueued page would break the "one round" check, even is
1864 pss
->complete_round
= false;
1871 * migration_page_queue_free: drop any remaining pages in the ram
1874 * It should be empty at the end anyway, but in error cases there may
1875 * be some left. in case that there is any page left, we drop it.
1878 static void migration_page_queue_free(RAMState
*rs
)
1880 struct RAMSrcPageRequest
*mspr
, *next_mspr
;
1881 /* This queue generally should be empty - but in the case of a failed
1882 * migration might have some droppings in.
1884 RCU_READ_LOCK_GUARD();
1885 QSIMPLEQ_FOREACH_SAFE(mspr
, &rs
->src_page_requests
, next_req
, next_mspr
) {
1886 memory_region_unref(mspr
->rb
->mr
);
1887 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
1893 * ram_save_queue_pages: queue the page for transmission
1895 * A request from postcopy destination for example.
1897 * Returns zero on success or negative on error
1899 * @rbname: Name of the RAMBLock of the request. NULL means the
1900 * same that last one.
1901 * @start: starting address from the start of the RAMBlock
1902 * @len: length (in bytes) to send
1904 int ram_save_queue_pages(const char *rbname
, ram_addr_t start
, ram_addr_t len
)
1907 RAMState
*rs
= ram_state
;
1909 stat64_add(&mig_stats
.postcopy_requests
, 1);
1910 RCU_READ_LOCK_GUARD();
1913 /* Reuse last RAMBlock */
1914 ramblock
= rs
->last_req_rb
;
1918 * Shouldn't happen, we can't reuse the last RAMBlock if
1919 * it's the 1st request.
1921 error_report("ram_save_queue_pages no previous block");
1925 ramblock
= qemu_ram_block_by_name(rbname
);
1928 /* We shouldn't be asked for a non-existent RAMBlock */
1929 error_report("ram_save_queue_pages no block '%s'", rbname
);
1932 rs
->last_req_rb
= ramblock
;
1934 trace_ram_save_queue_pages(ramblock
->idstr
, start
, len
);
1935 if (!offset_in_ramblock(ramblock
, start
+ len
- 1)) {
1936 error_report("%s request overrun start=" RAM_ADDR_FMT
" len="
1937 RAM_ADDR_FMT
" blocklen=" RAM_ADDR_FMT
,
1938 __func__
, start
, len
, ramblock
->used_length
);
1943 * When with postcopy preempt, we send back the page directly in the
1946 if (postcopy_preempt_active()) {
1947 ram_addr_t page_start
= start
>> TARGET_PAGE_BITS
;
1948 size_t page_size
= qemu_ram_pagesize(ramblock
);
1949 PageSearchStatus
*pss
= &ram_state
->pss
[RAM_CHANNEL_POSTCOPY
];
1952 qemu_mutex_lock(&rs
->bitmap_mutex
);
1954 pss_init(pss
, ramblock
, page_start
);
1956 * Always use the preempt channel, and make sure it's there. It's
1957 * safe to access without lock, because when rp-thread is running
1958 * we should be the only one who operates on the qemufile
1960 pss
->pss_channel
= migrate_get_current()->postcopy_qemufile_src
;
1961 assert(pss
->pss_channel
);
1964 * It must be either one or multiple of host page size. Just
1965 * assert; if something wrong we're mostly split brain anyway.
1967 assert(len
% page_size
== 0);
1969 if (ram_save_host_page_urgent(pss
)) {
1970 error_report("%s: ram_save_host_page_urgent() failed: "
1971 "ramblock=%s, start_addr=0x"RAM_ADDR_FMT
,
1972 __func__
, ramblock
->idstr
, start
);
1977 * NOTE: after ram_save_host_page_urgent() succeeded, pss->page
1978 * will automatically be moved and point to the next host page
1979 * we're going to send, so no need to update here.
1981 * Normally QEMU never sends >1 host page in requests, so
1982 * logically we don't even need that as the loop should only
1983 * run once, but just to be consistent.
1987 qemu_mutex_unlock(&rs
->bitmap_mutex
);
1992 struct RAMSrcPageRequest
*new_entry
=
1993 g_new0(struct RAMSrcPageRequest
, 1);
1994 new_entry
->rb
= ramblock
;
1995 new_entry
->offset
= start
;
1996 new_entry
->len
= len
;
1998 memory_region_ref(ramblock
->mr
);
1999 qemu_mutex_lock(&rs
->src_page_req_mutex
);
2000 QSIMPLEQ_INSERT_TAIL(&rs
->src_page_requests
, new_entry
, next_req
);
2001 migration_make_urgent_request();
2002 qemu_mutex_unlock(&rs
->src_page_req_mutex
);
2008 * try to compress the page before posting it out, return true if the page
2009 * has been properly handled by compression, otherwise needs other
2010 * paths to handle it
2012 static bool save_compress_page(RAMState
*rs
, PageSearchStatus
*pss
,
2015 if (!migrate_compress()) {
2020 * When starting the process of a new block, the first page of
2021 * the block should be sent out before other pages in the same
2022 * block, and all the pages in last block should have been sent
2023 * out, keeping this order is important, because the 'cont' flag
2024 * is used to avoid resending the block name.
2026 * We post the fist page as normal page as compression will take
2027 * much CPU resource.
2029 if (pss
->block
!= pss
->last_sent_block
) {
2030 compress_flush_data();
2034 return compress_page_with_multi_thread(pss
->block
, offset
,
2035 compress_send_queued_data
);
2039 * ram_save_target_page_legacy: save one target page
2041 * Returns the number of pages written
2043 * @rs: current RAM state
2044 * @pss: data about the page we want to send
2046 static int ram_save_target_page_legacy(RAMState
*rs
, PageSearchStatus
*pss
)
2048 RAMBlock
*block
= pss
->block
;
2049 ram_addr_t offset
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
2052 if (control_save_page(pss
, offset
, &res
)) {
2056 if (save_compress_page(rs
, pss
, offset
)) {
2060 if (save_zero_page(rs
, pss
, offset
)) {
2065 * Do not use multifd in postcopy as one whole host page should be
2066 * placed. Meanwhile postcopy requires atomic update of pages, so even
2067 * if host page size == guest page size the dest guest during run may
2068 * still see partially copied pages which is data corruption.
2070 if (migrate_multifd() && !migration_in_postcopy()) {
2071 return ram_save_multifd_page(pss
->pss_channel
, block
, offset
);
2074 return ram_save_page(rs
, pss
);
2077 /* Should be called before sending a host page */
2078 static void pss_host_page_prepare(PageSearchStatus
*pss
)
2080 /* How many guest pages are there in one host page? */
2081 size_t guest_pfns
= qemu_ram_pagesize(pss
->block
) >> TARGET_PAGE_BITS
;
2083 pss
->host_page_sending
= true;
2084 if (guest_pfns
<= 1) {
2086 * This covers both when guest psize == host psize, or when guest
2087 * has larger psize than the host (guest_pfns==0).
2089 * For the latter, we always send one whole guest page per
2090 * iteration of the host page (example: an Alpha VM on x86 host
2091 * will have guest psize 8K while host psize 4K).
2093 pss
->host_page_start
= pss
->page
;
2094 pss
->host_page_end
= pss
->page
+ 1;
2097 * The host page spans over multiple guest pages, we send them
2098 * within the same host page iteration.
2100 pss
->host_page_start
= ROUND_DOWN(pss
->page
, guest_pfns
);
2101 pss
->host_page_end
= ROUND_UP(pss
->page
+ 1, guest_pfns
);
2106 * Whether the page pointed by PSS is within the host page being sent.
2107 * Must be called after a previous pss_host_page_prepare().
2109 static bool pss_within_range(PageSearchStatus
*pss
)
2111 ram_addr_t ram_addr
;
2113 assert(pss
->host_page_sending
);
2115 /* Over host-page boundary? */
2116 if (pss
->page
>= pss
->host_page_end
) {
2120 ram_addr
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
2122 return offset_in_ramblock(pss
->block
, ram_addr
);
2125 static void pss_host_page_finish(PageSearchStatus
*pss
)
2127 pss
->host_page_sending
= false;
2128 /* This is not needed, but just to reset it */
2129 pss
->host_page_start
= pss
->host_page_end
= 0;
2133 * Send an urgent host page specified by `pss'. Need to be called with
2134 * bitmap_mutex held.
2136 * Returns 0 if save host page succeeded, false otherwise.
2138 static int ram_save_host_page_urgent(PageSearchStatus
*pss
)
2140 bool page_dirty
, sent
= false;
2141 RAMState
*rs
= ram_state
;
2144 trace_postcopy_preempt_send_host_page(pss
->block
->idstr
, pss
->page
);
2145 pss_host_page_prepare(pss
);
2148 * If precopy is sending the same page, let it be done in precopy, or
2149 * we could send the same page in two channels and none of them will
2150 * receive the whole page.
2152 if (pss_overlap(pss
, &ram_state
->pss
[RAM_CHANNEL_PRECOPY
])) {
2153 trace_postcopy_preempt_hit(pss
->block
->idstr
,
2154 pss
->page
<< TARGET_PAGE_BITS
);
2159 page_dirty
= migration_bitmap_clear_dirty(rs
, pss
->block
, pss
->page
);
2162 /* Be strict to return code; it must be 1, or what else? */
2163 if (migration_ops
->ram_save_target_page(rs
, pss
) != 1) {
2164 error_report_once("%s: ram_save_target_page failed", __func__
);
2170 pss_find_next_dirty(pss
);
2171 } while (pss_within_range(pss
));
2173 pss_host_page_finish(pss
);
2174 /* For urgent requests, flush immediately if sent */
2176 qemu_fflush(pss
->pss_channel
);
2182 * ram_save_host_page: save a whole host page
2184 * Starting at *offset send pages up to the end of the current host
2185 * page. It's valid for the initial offset to point into the middle of
2186 * a host page in which case the remainder of the hostpage is sent.
2187 * Only dirty target pages are sent. Note that the host page size may
2188 * be a huge page for this block.
2190 * The saving stops at the boundary of the used_length of the block
2191 * if the RAMBlock isn't a multiple of the host page size.
2193 * The caller must be with ram_state.bitmap_mutex held to call this
2194 * function. Note that this function can temporarily release the lock, but
2195 * when the function is returned it'll make sure the lock is still held.
2197 * Returns the number of pages written or negative on error
2199 * @rs: current RAM state
2200 * @pss: data about the page we want to send
2202 static int ram_save_host_page(RAMState
*rs
, PageSearchStatus
*pss
)
2204 bool page_dirty
, preempt_active
= postcopy_preempt_active();
2205 int tmppages
, pages
= 0;
2206 size_t pagesize_bits
=
2207 qemu_ram_pagesize(pss
->block
) >> TARGET_PAGE_BITS
;
2208 unsigned long start_page
= pss
->page
;
2211 if (migrate_ram_is_ignored(pss
->block
)) {
2212 error_report("block %s should not be migrated !", pss
->block
->idstr
);
2216 /* Update host page boundary information */
2217 pss_host_page_prepare(pss
);
2220 page_dirty
= migration_bitmap_clear_dirty(rs
, pss
->block
, pss
->page
);
2222 /* Check the pages is dirty and if it is send it */
2225 * Properly yield the lock only in postcopy preempt mode
2226 * because both migration thread and rp-return thread can
2227 * operate on the bitmaps.
2229 if (preempt_active
) {
2230 qemu_mutex_unlock(&rs
->bitmap_mutex
);
2232 tmppages
= migration_ops
->ram_save_target_page(rs
, pss
);
2233 if (tmppages
>= 0) {
2236 * Allow rate limiting to happen in the middle of huge pages if
2237 * something is sent in the current iteration.
2239 if (pagesize_bits
> 1 && tmppages
> 0) {
2240 migration_rate_limit();
2243 if (preempt_active
) {
2244 qemu_mutex_lock(&rs
->bitmap_mutex
);
2251 pss_host_page_finish(pss
);
2255 pss_find_next_dirty(pss
);
2256 } while (pss_within_range(pss
));
2258 pss_host_page_finish(pss
);
2260 res
= ram_save_release_protection(rs
, pss
, start_page
);
2261 return (res
< 0 ? res
: pages
);
2265 * ram_find_and_save_block: finds a dirty page and sends it to f
2267 * Called within an RCU critical section.
2269 * Returns the number of pages written where zero means no dirty pages,
2270 * or negative on error
2272 * @rs: current RAM state
2274 * On systems where host-page-size > target-page-size it will send all the
2275 * pages in a host page that are dirty.
2277 static int ram_find_and_save_block(RAMState
*rs
)
2279 PageSearchStatus
*pss
= &rs
->pss
[RAM_CHANNEL_PRECOPY
];
2282 /* No dirty page as there is zero RAM */
2283 if (!rs
->ram_bytes_total
) {
2288 * Always keep last_seen_block/last_page valid during this procedure,
2289 * because find_dirty_block() relies on these values (e.g., we compare
2290 * last_seen_block with pss.block to see whether we searched all the
2291 * ramblocks) to detect the completion of migration. Having NULL value
2292 * of last_seen_block can conditionally cause below loop to run forever.
2294 if (!rs
->last_seen_block
) {
2295 rs
->last_seen_block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
2299 pss_init(pss
, rs
->last_seen_block
, rs
->last_page
);
2302 if (!get_queued_page(rs
, pss
)) {
2303 /* priority queue empty, so just search for something dirty */
2304 int res
= find_dirty_block(rs
, pss
);
2305 if (res
!= PAGE_DIRTY_FOUND
) {
2306 if (res
== PAGE_ALL_CLEAN
) {
2308 } else if (res
== PAGE_TRY_AGAIN
) {
2310 } else if (res
< 0) {
2316 pages
= ram_save_host_page(rs
, pss
);
2322 rs
->last_seen_block
= pss
->block
;
2323 rs
->last_page
= pss
->page
;
2328 static uint64_t ram_bytes_total_with_ignored(void)
2333 RCU_READ_LOCK_GUARD();
2335 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2336 total
+= block
->used_length
;
2341 uint64_t ram_bytes_total(void)
2346 RCU_READ_LOCK_GUARD();
2348 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2349 total
+= block
->used_length
;
2354 static void xbzrle_load_setup(void)
2356 XBZRLE
.decoded_buf
= g_malloc(TARGET_PAGE_SIZE
);
2359 static void xbzrle_load_cleanup(void)
2361 g_free(XBZRLE
.decoded_buf
);
2362 XBZRLE
.decoded_buf
= NULL
;
2365 static void ram_state_cleanup(RAMState
**rsp
)
2368 migration_page_queue_free(*rsp
);
2369 qemu_mutex_destroy(&(*rsp
)->bitmap_mutex
);
2370 qemu_mutex_destroy(&(*rsp
)->src_page_req_mutex
);
2376 static void xbzrle_cleanup(void)
2378 XBZRLE_cache_lock();
2380 cache_fini(XBZRLE
.cache
);
2381 g_free(XBZRLE
.encoded_buf
);
2382 g_free(XBZRLE
.current_buf
);
2383 g_free(XBZRLE
.zero_target_page
);
2384 XBZRLE
.cache
= NULL
;
2385 XBZRLE
.encoded_buf
= NULL
;
2386 XBZRLE
.current_buf
= NULL
;
2387 XBZRLE
.zero_target_page
= NULL
;
2389 XBZRLE_cache_unlock();
2392 static void ram_save_cleanup(void *opaque
)
2394 RAMState
**rsp
= opaque
;
2397 /* We don't use dirty log with background snapshots */
2398 if (!migrate_background_snapshot()) {
2399 /* caller have hold iothread lock or is in a bh, so there is
2400 * no writing race against the migration bitmap
2402 if (global_dirty_tracking
& GLOBAL_DIRTY_MIGRATION
) {
2404 * do not stop dirty log without starting it, since
2405 * memory_global_dirty_log_stop will assert that
2406 * memory_global_dirty_log_start/stop used in pairs
2408 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION
);
2412 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2413 g_free(block
->clear_bmap
);
2414 block
->clear_bmap
= NULL
;
2415 g_free(block
->bmap
);
2420 compress_threads_save_cleanup();
2421 ram_state_cleanup(rsp
);
2422 g_free(migration_ops
);
2423 migration_ops
= NULL
;
2426 static void ram_state_reset(RAMState
*rs
)
2430 for (i
= 0; i
< RAM_CHANNEL_MAX
; i
++) {
2431 rs
->pss
[i
].last_sent_block
= NULL
;
2434 rs
->last_seen_block
= NULL
;
2436 rs
->last_version
= ram_list
.version
;
2437 rs
->xbzrle_started
= false;
2440 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2442 /* **** functions for postcopy ***** */
2444 void ram_postcopy_migrated_memory_release(MigrationState
*ms
)
2446 struct RAMBlock
*block
;
2448 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2449 unsigned long *bitmap
= block
->bmap
;
2450 unsigned long range
= block
->used_length
>> TARGET_PAGE_BITS
;
2451 unsigned long run_start
= find_next_zero_bit(bitmap
, range
, 0);
2453 while (run_start
< range
) {
2454 unsigned long run_end
= find_next_bit(bitmap
, range
, run_start
+ 1);
2455 ram_discard_range(block
->idstr
,
2456 ((ram_addr_t
)run_start
) << TARGET_PAGE_BITS
,
2457 ((ram_addr_t
)(run_end
- run_start
))
2458 << TARGET_PAGE_BITS
);
2459 run_start
= find_next_zero_bit(bitmap
, range
, run_end
+ 1);
2465 * postcopy_send_discard_bm_ram: discard a RAMBlock
2467 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2469 * @ms: current migration state
2470 * @block: RAMBlock to discard
2472 static void postcopy_send_discard_bm_ram(MigrationState
*ms
, RAMBlock
*block
)
2474 unsigned long end
= block
->used_length
>> TARGET_PAGE_BITS
;
2475 unsigned long current
;
2476 unsigned long *bitmap
= block
->bmap
;
2478 for (current
= 0; current
< end
; ) {
2479 unsigned long one
= find_next_bit(bitmap
, end
, current
);
2480 unsigned long zero
, discard_length
;
2486 zero
= find_next_zero_bit(bitmap
, end
, one
+ 1);
2489 discard_length
= end
- one
;
2491 discard_length
= zero
- one
;
2493 postcopy_discard_send_range(ms
, one
, discard_length
);
2494 current
= one
+ discard_length
;
2498 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, RAMBlock
*block
);
2501 * postcopy_each_ram_send_discard: discard all RAMBlocks
2503 * Utility for the outgoing postcopy code.
2504 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2505 * passing it bitmap indexes and name.
2506 * (qemu_ram_foreach_block ends up passing unscaled lengths
2507 * which would mean postcopy code would have to deal with target page)
2509 * @ms: current migration state
2511 static void postcopy_each_ram_send_discard(MigrationState
*ms
)
2513 struct RAMBlock
*block
;
2515 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2516 postcopy_discard_send_init(ms
, block
->idstr
);
2519 * Deal with TPS != HPS and huge pages. It discard any partially sent
2520 * host-page size chunks, mark any partially dirty host-page size
2521 * chunks as all dirty. In this case the host-page is the host-page
2522 * for the particular RAMBlock, i.e. it might be a huge page.
2524 postcopy_chunk_hostpages_pass(ms
, block
);
2527 * Postcopy sends chunks of bitmap over the wire, but it
2528 * just needs indexes at this point, avoids it having
2529 * target page specific code.
2531 postcopy_send_discard_bm_ram(ms
, block
);
2532 postcopy_discard_send_finish(ms
);
2537 * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages
2539 * Helper for postcopy_chunk_hostpages; it's called twice to
2540 * canonicalize the two bitmaps, that are similar, but one is
2543 * Postcopy requires that all target pages in a hostpage are dirty or
2544 * clean, not a mix. This function canonicalizes the bitmaps.
2546 * @ms: current migration state
2547 * @block: block that contains the page we want to canonicalize
2549 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, RAMBlock
*block
)
2551 RAMState
*rs
= ram_state
;
2552 unsigned long *bitmap
= block
->bmap
;
2553 unsigned int host_ratio
= block
->page_size
/ TARGET_PAGE_SIZE
;
2554 unsigned long pages
= block
->used_length
>> TARGET_PAGE_BITS
;
2555 unsigned long run_start
;
2557 if (block
->page_size
== TARGET_PAGE_SIZE
) {
2558 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2562 /* Find a dirty page */
2563 run_start
= find_next_bit(bitmap
, pages
, 0);
2565 while (run_start
< pages
) {
2568 * If the start of this run of pages is in the middle of a host
2569 * page, then we need to fixup this host page.
2571 if (QEMU_IS_ALIGNED(run_start
, host_ratio
)) {
2572 /* Find the end of this run */
2573 run_start
= find_next_zero_bit(bitmap
, pages
, run_start
+ 1);
2575 * If the end isn't at the start of a host page, then the
2576 * run doesn't finish at the end of a host page
2577 * and we need to discard.
2581 if (!QEMU_IS_ALIGNED(run_start
, host_ratio
)) {
2583 unsigned long fixup_start_addr
= QEMU_ALIGN_DOWN(run_start
,
2585 run_start
= QEMU_ALIGN_UP(run_start
, host_ratio
);
2587 /* Clean up the bitmap */
2588 for (page
= fixup_start_addr
;
2589 page
< fixup_start_addr
+ host_ratio
; page
++) {
2591 * Remark them as dirty, updating the count for any pages
2592 * that weren't previously dirty.
2594 rs
->migration_dirty_pages
+= !test_and_set_bit(page
, bitmap
);
2598 /* Find the next dirty page for the next iteration */
2599 run_start
= find_next_bit(bitmap
, pages
, run_start
);
2604 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2606 * Transmit the set of pages to be discarded after precopy to the target
2607 * these are pages that:
2608 * a) Have been previously transmitted but are now dirty again
2609 * b) Pages that have never been transmitted, this ensures that
2610 * any pages on the destination that have been mapped by background
2611 * tasks get discarded (transparent huge pages is the specific concern)
2612 * Hopefully this is pretty sparse
2614 * @ms: current migration state
2616 void ram_postcopy_send_discard_bitmap(MigrationState
*ms
)
2618 RAMState
*rs
= ram_state
;
2620 RCU_READ_LOCK_GUARD();
2622 /* This should be our last sync, the src is now paused */
2623 migration_bitmap_sync(rs
, false);
2625 /* Easiest way to make sure we don't resume in the middle of a host-page */
2626 rs
->pss
[RAM_CHANNEL_PRECOPY
].last_sent_block
= NULL
;
2627 rs
->last_seen_block
= NULL
;
2630 postcopy_each_ram_send_discard(ms
);
2632 trace_ram_postcopy_send_discard_bitmap();
2636 * ram_discard_range: discard dirtied pages at the beginning of postcopy
2638 * Returns zero on success
2640 * @rbname: name of the RAMBlock of the request. NULL means the
2641 * same that last one.
2642 * @start: RAMBlock starting page
2643 * @length: RAMBlock size
2645 int ram_discard_range(const char *rbname
, uint64_t start
, size_t length
)
2647 trace_ram_discard_range(rbname
, start
, length
);
2649 RCU_READ_LOCK_GUARD();
2650 RAMBlock
*rb
= qemu_ram_block_by_name(rbname
);
2653 error_report("ram_discard_range: Failed to find block '%s'", rbname
);
2658 * On source VM, we don't need to update the received bitmap since
2659 * we don't even have one.
2661 if (rb
->receivedmap
) {
2662 bitmap_clear(rb
->receivedmap
, start
>> qemu_target_page_bits(),
2663 length
>> qemu_target_page_bits());
2666 return ram_block_discard_range(rb
, start
, length
);
2670 * For every allocation, we will try not to crash the VM if the
2671 * allocation failed.
2673 static int xbzrle_init(void)
2675 Error
*local_err
= NULL
;
2677 if (!migrate_xbzrle()) {
2681 XBZRLE_cache_lock();
2683 XBZRLE
.zero_target_page
= g_try_malloc0(TARGET_PAGE_SIZE
);
2684 if (!XBZRLE
.zero_target_page
) {
2685 error_report("%s: Error allocating zero page", __func__
);
2689 XBZRLE
.cache
= cache_init(migrate_xbzrle_cache_size(),
2690 TARGET_PAGE_SIZE
, &local_err
);
2691 if (!XBZRLE
.cache
) {
2692 error_report_err(local_err
);
2693 goto free_zero_page
;
2696 XBZRLE
.encoded_buf
= g_try_malloc0(TARGET_PAGE_SIZE
);
2697 if (!XBZRLE
.encoded_buf
) {
2698 error_report("%s: Error allocating encoded_buf", __func__
);
2702 XBZRLE
.current_buf
= g_try_malloc(TARGET_PAGE_SIZE
);
2703 if (!XBZRLE
.current_buf
) {
2704 error_report("%s: Error allocating current_buf", __func__
);
2705 goto free_encoded_buf
;
2708 /* We are all good */
2709 XBZRLE_cache_unlock();
2713 g_free(XBZRLE
.encoded_buf
);
2714 XBZRLE
.encoded_buf
= NULL
;
2716 cache_fini(XBZRLE
.cache
);
2717 XBZRLE
.cache
= NULL
;
2719 g_free(XBZRLE
.zero_target_page
);
2720 XBZRLE
.zero_target_page
= NULL
;
2722 XBZRLE_cache_unlock();
2726 static int ram_state_init(RAMState
**rsp
)
2728 *rsp
= g_try_new0(RAMState
, 1);
2731 error_report("%s: Init ramstate fail", __func__
);
2735 qemu_mutex_init(&(*rsp
)->bitmap_mutex
);
2736 qemu_mutex_init(&(*rsp
)->src_page_req_mutex
);
2737 QSIMPLEQ_INIT(&(*rsp
)->src_page_requests
);
2738 (*rsp
)->ram_bytes_total
= ram_bytes_total();
2741 * Count the total number of pages used by ram blocks not including any
2742 * gaps due to alignment or unplugs.
2743 * This must match with the initial values of dirty bitmap.
2745 (*rsp
)->migration_dirty_pages
= (*rsp
)->ram_bytes_total
>> TARGET_PAGE_BITS
;
2746 ram_state_reset(*rsp
);
2751 static void ram_list_init_bitmaps(void)
2753 MigrationState
*ms
= migrate_get_current();
2755 unsigned long pages
;
2758 /* Skip setting bitmap if there is no RAM */
2759 if (ram_bytes_total()) {
2760 shift
= ms
->clear_bitmap_shift
;
2761 if (shift
> CLEAR_BITMAP_SHIFT_MAX
) {
2762 error_report("clear_bitmap_shift (%u) too big, using "
2763 "max value (%u)", shift
, CLEAR_BITMAP_SHIFT_MAX
);
2764 shift
= CLEAR_BITMAP_SHIFT_MAX
;
2765 } else if (shift
< CLEAR_BITMAP_SHIFT_MIN
) {
2766 error_report("clear_bitmap_shift (%u) too small, using "
2767 "min value (%u)", shift
, CLEAR_BITMAP_SHIFT_MIN
);
2768 shift
= CLEAR_BITMAP_SHIFT_MIN
;
2771 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2772 pages
= block
->max_length
>> TARGET_PAGE_BITS
;
2774 * The initial dirty bitmap for migration must be set with all
2775 * ones to make sure we'll migrate every guest RAM page to
2777 * Here we set RAMBlock.bmap all to 1 because when rebegin a
2778 * new migration after a failed migration, ram_list.
2779 * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
2782 block
->bmap
= bitmap_new(pages
);
2783 bitmap_set(block
->bmap
, 0, pages
);
2784 block
->clear_bmap_shift
= shift
;
2785 block
->clear_bmap
= bitmap_new(clear_bmap_size(pages
, shift
));
2790 static void migration_bitmap_clear_discarded_pages(RAMState
*rs
)
2792 unsigned long pages
;
2795 RCU_READ_LOCK_GUARD();
2797 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
2798 pages
= ramblock_dirty_bitmap_clear_discarded_pages(rb
);
2799 rs
->migration_dirty_pages
-= pages
;
2803 static void ram_init_bitmaps(RAMState
*rs
)
2805 qemu_mutex_lock_ramlist();
2807 WITH_RCU_READ_LOCK_GUARD() {
2808 ram_list_init_bitmaps();
2809 /* We don't use dirty log with background snapshots */
2810 if (!migrate_background_snapshot()) {
2811 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION
);
2812 migration_bitmap_sync_precopy(rs
, false);
2815 qemu_mutex_unlock_ramlist();
2818 * After an eventual first bitmap sync, fixup the initial bitmap
2819 * containing all 1s to exclude any discarded pages from migration.
2821 migration_bitmap_clear_discarded_pages(rs
);
2824 static int ram_init_all(RAMState
**rsp
)
2826 if (ram_state_init(rsp
)) {
2830 if (xbzrle_init()) {
2831 ram_state_cleanup(rsp
);
2835 ram_init_bitmaps(*rsp
);
2840 static void ram_state_resume_prepare(RAMState
*rs
, QEMUFile
*out
)
2846 * Postcopy is not using xbzrle/compression, so no need for that.
2847 * Also, since source are already halted, we don't need to care
2848 * about dirty page logging as well.
2851 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2852 pages
+= bitmap_count_one(block
->bmap
,
2853 block
->used_length
>> TARGET_PAGE_BITS
);
2856 /* This may not be aligned with current bitmaps. Recalculate. */
2857 rs
->migration_dirty_pages
= pages
;
2859 ram_state_reset(rs
);
2861 /* Update RAMState cache of output QEMUFile */
2862 rs
->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
= out
;
2864 trace_ram_state_resume_prepare(pages
);
2868 * This function clears bits of the free pages reported by the caller from the
2869 * migration dirty bitmap. @addr is the host address corresponding to the
2870 * start of the continuous guest free pages, and @len is the total bytes of
2873 void qemu_guest_free_page_hint(void *addr
, size_t len
)
2877 size_t used_len
, start
, npages
;
2878 MigrationState
*s
= migrate_get_current();
2880 /* This function is currently expected to be used during live migration */
2881 if (!migration_is_setup_or_active(s
->state
)) {
2885 for (; len
> 0; len
-= used_len
, addr
+= used_len
) {
2886 block
= qemu_ram_block_from_host(addr
, false, &offset
);
2887 if (unlikely(!block
|| offset
>= block
->used_length
)) {
2889 * The implementation might not support RAMBlock resize during
2890 * live migration, but it could happen in theory with future
2891 * updates. So we add a check here to capture that case.
2893 error_report_once("%s unexpected error", __func__
);
2897 if (len
<= block
->used_length
- offset
) {
2900 used_len
= block
->used_length
- offset
;
2903 start
= offset
>> TARGET_PAGE_BITS
;
2904 npages
= used_len
>> TARGET_PAGE_BITS
;
2906 qemu_mutex_lock(&ram_state
->bitmap_mutex
);
2908 * The skipped free pages are equavalent to be sent from clear_bmap's
2909 * perspective, so clear the bits from the memory region bitmap which
2910 * are initially set. Otherwise those skipped pages will be sent in
2911 * the next round after syncing from the memory region bitmap.
2913 migration_clear_memory_region_dirty_bitmap_range(block
, start
, npages
);
2914 ram_state
->migration_dirty_pages
-=
2915 bitmap_count_one_with_offset(block
->bmap
, start
, npages
);
2916 bitmap_clear(block
->bmap
, start
, npages
);
2917 qemu_mutex_unlock(&ram_state
->bitmap_mutex
);
2922 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
2923 * long-running RCU critical section. When rcu-reclaims in the code
2924 * start to become numerous it will be necessary to reduce the
2925 * granularity of these critical sections.
2929 * ram_save_setup: Setup RAM for migration
2931 * Returns zero to indicate success and negative for error
2933 * @f: QEMUFile where to send the data
2934 * @opaque: RAMState pointer
2936 static int ram_save_setup(QEMUFile
*f
, void *opaque
)
2938 RAMState
**rsp
= opaque
;
2942 if (compress_threads_save_setup()) {
2946 /* migration has already setup the bitmap, reuse it. */
2947 if (!migration_in_colo_state()) {
2948 if (ram_init_all(rsp
) != 0) {
2949 compress_threads_save_cleanup();
2953 (*rsp
)->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
= f
;
2955 WITH_RCU_READ_LOCK_GUARD() {
2956 qemu_put_be64(f
, ram_bytes_total_with_ignored()
2957 | RAM_SAVE_FLAG_MEM_SIZE
);
2959 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2960 qemu_put_byte(f
, strlen(block
->idstr
));
2961 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, strlen(block
->idstr
));
2962 qemu_put_be64(f
, block
->used_length
);
2963 if (migrate_postcopy_ram() && block
->page_size
!=
2964 qemu_host_page_size
) {
2965 qemu_put_be64(f
, block
->page_size
);
2967 if (migrate_ignore_shared()) {
2968 qemu_put_be64(f
, block
->mr
->addr
);
2973 ret
= rdma_registration_start(f
, RAM_CONTROL_SETUP
);
2975 qemu_file_set_error(f
, ret
);
2978 ret
= rdma_registration_stop(f
, RAM_CONTROL_SETUP
);
2980 qemu_file_set_error(f
, ret
);
2983 migration_ops
= g_malloc0(sizeof(MigrationOps
));
2984 migration_ops
->ram_save_target_page
= ram_save_target_page_legacy
;
2986 qemu_mutex_unlock_iothread();
2987 ret
= multifd_send_sync_main(f
);
2988 qemu_mutex_lock_iothread();
2993 if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
2994 qemu_put_be64(f
, RAM_SAVE_FLAG_MULTIFD_FLUSH
);
2997 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3004 * ram_save_iterate: iterative stage for migration
3006 * Returns zero to indicate success and negative for error
3008 * @f: QEMUFile where to send the data
3009 * @opaque: RAMState pointer
3011 static int ram_save_iterate(QEMUFile
*f
, void *opaque
)
3013 RAMState
**temp
= opaque
;
3014 RAMState
*rs
= *temp
;
3020 if (blk_mig_bulk_active()) {
3021 /* Avoid transferring ram during bulk phase of block migration as
3022 * the bulk phase will usually take a long time and transferring
3023 * ram updates during that time is pointless. */
3028 * We'll take this lock a little bit long, but it's okay for two reasons.
3029 * Firstly, the only possible other thread to take it is who calls
3030 * qemu_guest_free_page_hint(), which should be rare; secondly, see
3031 * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which
3032 * guarantees that we'll at least released it in a regular basis.
3034 qemu_mutex_lock(&rs
->bitmap_mutex
);
3035 WITH_RCU_READ_LOCK_GUARD() {
3036 if (ram_list
.version
!= rs
->last_version
) {
3037 ram_state_reset(rs
);
3040 /* Read version before ram_list.blocks */
3043 ret
= rdma_registration_start(f
, RAM_CONTROL_ROUND
);
3045 qemu_file_set_error(f
, ret
);
3048 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
3050 while ((ret
= migration_rate_exceeded(f
)) == 0 ||
3051 postcopy_has_request(rs
)) {
3054 if (qemu_file_get_error(f
)) {
3058 pages
= ram_find_and_save_block(rs
);
3059 /* no more pages to sent */
3066 qemu_file_set_error(f
, pages
);
3070 rs
->target_page_count
+= pages
;
3073 * During postcopy, it is necessary to make sure one whole host
3074 * page is sent in one chunk.
3076 if (migrate_postcopy_ram()) {
3077 compress_flush_data();
3081 * we want to check in the 1st loop, just in case it was the 1st
3082 * time and we had to sync the dirty bitmap.
3083 * qemu_clock_get_ns() is a bit expensive, so we only check each
3086 if ((i
& 63) == 0) {
3087 uint64_t t1
= (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - t0
) /
3089 if (t1
> MAX_WAIT
) {
3090 trace_ram_save_iterate_big_wait(t1
, i
);
3097 qemu_mutex_unlock(&rs
->bitmap_mutex
);
3100 * Must occur before EOS (or any QEMUFile operation)
3101 * because of RDMA protocol.
3103 ret
= rdma_registration_stop(f
, RAM_CONTROL_ROUND
);
3105 qemu_file_set_error(f
, ret
);
3110 && migration_is_setup_or_active(migrate_get_current()->state
)) {
3111 if (migrate_multifd() && migrate_multifd_flush_after_each_section()) {
3112 ret
= multifd_send_sync_main(rs
->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
);
3118 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3120 ram_transferred_add(8);
3122 ret
= qemu_file_get_error(f
);
3132 * ram_save_complete: function called to send the remaining amount of ram
3134 * Returns zero to indicate success or negative on error
3136 * Called with iothread lock
3138 * @f: QEMUFile where to send the data
3139 * @opaque: RAMState pointer
3141 static int ram_save_complete(QEMUFile
*f
, void *opaque
)
3143 RAMState
**temp
= opaque
;
3144 RAMState
*rs
= *temp
;
3147 rs
->last_stage
= !migration_in_colo_state();
3149 WITH_RCU_READ_LOCK_GUARD() {
3152 if (!migration_in_postcopy()) {
3153 migration_bitmap_sync_precopy(rs
, true);
3156 ret
= rdma_registration_start(f
, RAM_CONTROL_FINISH
);
3158 qemu_file_set_error(f
, ret
);
3161 /* try transferring iterative blocks of memory */
3163 /* flush all remaining blocks regardless of rate limiting */
3164 qemu_mutex_lock(&rs
->bitmap_mutex
);
3168 pages
= ram_find_and_save_block(rs
);
3169 /* no more blocks to sent */
3178 qemu_mutex_unlock(&rs
->bitmap_mutex
);
3180 compress_flush_data();
3182 rdma_reg_ret
= rdma_registration_stop(f
, RAM_CONTROL_FINISH
);
3183 if (rdma_reg_ret
< 0) {
3184 qemu_file_set_error(f
, rdma_reg_ret
);
3192 ret
= multifd_send_sync_main(rs
->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
);
3197 if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
3198 qemu_put_be64(f
, RAM_SAVE_FLAG_MULTIFD_FLUSH
);
3200 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3206 static void ram_state_pending_estimate(void *opaque
, uint64_t *must_precopy
,
3207 uint64_t *can_postcopy
)
3209 RAMState
**temp
= opaque
;
3210 RAMState
*rs
= *temp
;
3212 uint64_t remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3214 if (migrate_postcopy_ram()) {
3215 /* We can do postcopy, and all the data is postcopiable */
3216 *can_postcopy
+= remaining_size
;
3218 *must_precopy
+= remaining_size
;
3222 static void ram_state_pending_exact(void *opaque
, uint64_t *must_precopy
,
3223 uint64_t *can_postcopy
)
3225 MigrationState
*s
= migrate_get_current();
3226 RAMState
**temp
= opaque
;
3227 RAMState
*rs
= *temp
;
3229 uint64_t remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3231 if (!migration_in_postcopy() && remaining_size
< s
->threshold_size
) {
3232 qemu_mutex_lock_iothread();
3233 WITH_RCU_READ_LOCK_GUARD() {
3234 migration_bitmap_sync_precopy(rs
, false);
3236 qemu_mutex_unlock_iothread();
3237 remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3240 if (migrate_postcopy_ram()) {
3241 /* We can do postcopy, and all the data is postcopiable */
3242 *can_postcopy
+= remaining_size
;
3244 *must_precopy
+= remaining_size
;
3248 static int load_xbzrle(QEMUFile
*f
, ram_addr_t addr
, void *host
)
3250 unsigned int xh_len
;
3252 uint8_t *loaded_data
;
3254 /* extract RLE header */
3255 xh_flags
= qemu_get_byte(f
);
3256 xh_len
= qemu_get_be16(f
);
3258 if (xh_flags
!= ENCODING_FLAG_XBZRLE
) {
3259 error_report("Failed to load XBZRLE page - wrong compression!");
3263 if (xh_len
> TARGET_PAGE_SIZE
) {
3264 error_report("Failed to load XBZRLE page - len overflow!");
3267 loaded_data
= XBZRLE
.decoded_buf
;
3268 /* load data and decode */
3269 /* it can change loaded_data to point to an internal buffer */
3270 qemu_get_buffer_in_place(f
, &loaded_data
, xh_len
);
3273 if (xbzrle_decode_buffer(loaded_data
, xh_len
, host
,
3274 TARGET_PAGE_SIZE
) == -1) {
3275 error_report("Failed to load XBZRLE page - decode error!");
3283 * ram_block_from_stream: read a RAMBlock id from the migration stream
3285 * Must be called from within a rcu critical section.
3287 * Returns a pointer from within the RCU-protected ram_list.
3289 * @mis: the migration incoming state pointer
3290 * @f: QEMUFile where to read the data from
3291 * @flags: Page flags (mostly to see if it's a continuation of previous block)
3292 * @channel: the channel we're using
3294 static inline RAMBlock
*ram_block_from_stream(MigrationIncomingState
*mis
,
3295 QEMUFile
*f
, int flags
,
3298 RAMBlock
*block
= mis
->last_recv_block
[channel
];
3302 if (flags
& RAM_SAVE_FLAG_CONTINUE
) {
3304 error_report("Ack, bad migration stream!");
3310 len
= qemu_get_byte(f
);
3311 qemu_get_buffer(f
, (uint8_t *)id
, len
);
3314 block
= qemu_ram_block_by_name(id
);
3316 error_report("Can't find block %s", id
);
3320 if (migrate_ram_is_ignored(block
)) {
3321 error_report("block %s should not be migrated !", id
);
3325 mis
->last_recv_block
[channel
] = block
;
3330 static inline void *host_from_ram_block_offset(RAMBlock
*block
,
3333 if (!offset_in_ramblock(block
, offset
)) {
3337 return block
->host
+ offset
;
3340 static void *host_page_from_ram_block_offset(RAMBlock
*block
,
3343 /* Note: Explicitly no check against offset_in_ramblock(). */
3344 return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block
->host
+ offset
),
3348 static ram_addr_t
host_page_offset_from_ram_block_offset(RAMBlock
*block
,
3351 return ((uintptr_t)block
->host
+ offset
) & (block
->page_size
- 1);
3354 void colo_record_bitmap(RAMBlock
*block
, ram_addr_t
*normal
, uint32_t pages
)
3356 qemu_mutex_lock(&ram_state
->bitmap_mutex
);
3357 for (int i
= 0; i
< pages
; i
++) {
3358 ram_addr_t offset
= normal
[i
];
3359 ram_state
->migration_dirty_pages
+= !test_and_set_bit(
3360 offset
>> TARGET_PAGE_BITS
,
3363 qemu_mutex_unlock(&ram_state
->bitmap_mutex
);
3366 static inline void *colo_cache_from_block_offset(RAMBlock
*block
,
3367 ram_addr_t offset
, bool record_bitmap
)
3369 if (!offset_in_ramblock(block
, offset
)) {
3372 if (!block
->colo_cache
) {
3373 error_report("%s: colo_cache is NULL in block :%s",
3374 __func__
, block
->idstr
);
3379 * During colo checkpoint, we need bitmap of these migrated pages.
3380 * It help us to decide which pages in ram cache should be flushed
3381 * into VM's RAM later.
3383 if (record_bitmap
) {
3384 colo_record_bitmap(block
, &offset
, 1);
3386 return block
->colo_cache
+ offset
;
3390 * ram_handle_zero: handle the zero page case
3392 * If a page (or a whole RDMA chunk) has been
3393 * determined to be zero, then zap it.
3395 * @host: host address for the zero page
3396 * @ch: what the page is filled from. We only support zero
3397 * @size: size of the zero page
3399 void ram_handle_zero(void *host
, uint64_t size
)
3401 if (!buffer_is_zero(host
, size
)) {
3402 memset(host
, 0, size
);
3406 static void colo_init_ram_state(void)
3408 ram_state_init(&ram_state
);
3412 * colo cache: this is for secondary VM, we cache the whole
3413 * memory of the secondary VM, it is need to hold the global lock
3414 * to call this helper.
3416 int colo_init_ram_cache(void)
3420 WITH_RCU_READ_LOCK_GUARD() {
3421 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3422 block
->colo_cache
= qemu_anon_ram_alloc(block
->used_length
,
3423 NULL
, false, false);
3424 if (!block
->colo_cache
) {
3425 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3426 "size 0x" RAM_ADDR_FMT
, __func__
, block
->idstr
,
3427 block
->used_length
);
3428 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3429 if (block
->colo_cache
) {
3430 qemu_anon_ram_free(block
->colo_cache
, block
->used_length
);
3431 block
->colo_cache
= NULL
;
3436 if (!machine_dump_guest_core(current_machine
)) {
3437 qemu_madvise(block
->colo_cache
, block
->used_length
,
3438 QEMU_MADV_DONTDUMP
);
3444 * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3445 * with to decide which page in cache should be flushed into SVM's RAM. Here
3446 * we use the same name 'ram_bitmap' as for migration.
3448 if (ram_bytes_total()) {
3449 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3450 unsigned long pages
= block
->max_length
>> TARGET_PAGE_BITS
;
3451 block
->bmap
= bitmap_new(pages
);
3455 colo_init_ram_state();
3459 /* TODO: duplicated with ram_init_bitmaps */
3460 void colo_incoming_start_dirty_log(void)
3462 RAMBlock
*block
= NULL
;
3463 /* For memory_global_dirty_log_start below. */
3464 qemu_mutex_lock_iothread();
3465 qemu_mutex_lock_ramlist();
3467 memory_global_dirty_log_sync(false);
3468 WITH_RCU_READ_LOCK_GUARD() {
3469 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3470 ramblock_sync_dirty_bitmap(ram_state
, block
);
3471 /* Discard this dirty bitmap record */
3472 bitmap_zero(block
->bmap
, block
->max_length
>> TARGET_PAGE_BITS
);
3474 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION
);
3476 ram_state
->migration_dirty_pages
= 0;
3477 qemu_mutex_unlock_ramlist();
3478 qemu_mutex_unlock_iothread();
3481 /* It is need to hold the global lock to call this helper */
3482 void colo_release_ram_cache(void)
3486 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION
);
3487 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3488 g_free(block
->bmap
);
3492 WITH_RCU_READ_LOCK_GUARD() {
3493 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3494 if (block
->colo_cache
) {
3495 qemu_anon_ram_free(block
->colo_cache
, block
->used_length
);
3496 block
->colo_cache
= NULL
;
3500 ram_state_cleanup(&ram_state
);
3504 * ram_load_setup: Setup RAM for migration incoming side
3506 * Returns zero to indicate success and negative for error
3508 * @f: QEMUFile where to receive the data
3509 * @opaque: RAMState pointer
3511 static int ram_load_setup(QEMUFile
*f
, void *opaque
)
3513 xbzrle_load_setup();
3514 ramblock_recv_map_init();
3519 static int ram_load_cleanup(void *opaque
)
3523 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
3524 qemu_ram_block_writeback(rb
);
3527 xbzrle_load_cleanup();
3529 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
3530 g_free(rb
->receivedmap
);
3531 rb
->receivedmap
= NULL
;
3538 * ram_postcopy_incoming_init: allocate postcopy data structures
3540 * Returns 0 for success and negative if there was one error
3542 * @mis: current migration incoming state
3544 * Allocate data structures etc needed by incoming migration with
3545 * postcopy-ram. postcopy-ram's similarly names
3546 * postcopy_ram_incoming_init does the work.
3548 int ram_postcopy_incoming_init(MigrationIncomingState
*mis
)
3550 return postcopy_ram_incoming_init(mis
);
3554 * ram_load_postcopy: load a page in postcopy case
3556 * Returns 0 for success or -errno in case of error
3558 * Called in postcopy mode by ram_load().
3559 * rcu_read_lock is taken prior to this being called.
3561 * @f: QEMUFile where to send the data
3562 * @channel: the channel to use for loading
3564 int ram_load_postcopy(QEMUFile
*f
, int channel
)
3566 int flags
= 0, ret
= 0;
3567 bool place_needed
= false;
3568 bool matches_target_page_size
= false;
3569 MigrationIncomingState
*mis
= migration_incoming_get_current();
3570 PostcopyTmpPage
*tmp_page
= &mis
->postcopy_tmp_pages
[channel
];
3572 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
3574 void *page_buffer
= NULL
;
3575 void *place_source
= NULL
;
3576 RAMBlock
*block
= NULL
;
3580 addr
= qemu_get_be64(f
);
3583 * If qemu file error, we should stop here, and then "addr"
3586 ret
= qemu_file_get_error(f
);
3591 flags
= addr
& ~TARGET_PAGE_MASK
;
3592 addr
&= TARGET_PAGE_MASK
;
3594 trace_ram_load_postcopy_loop(channel
, (uint64_t)addr
, flags
);
3595 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
|
3596 RAM_SAVE_FLAG_COMPRESS_PAGE
)) {
3597 block
= ram_block_from_stream(mis
, f
, flags
, channel
);
3604 * Relying on used_length is racy and can result in false positives.
3605 * We might place pages beyond used_length in case RAM was shrunk
3606 * while in postcopy, which is fine - trying to place via
3607 * UFFDIO_COPY/UFFDIO_ZEROPAGE will never segfault.
3609 if (!block
->host
|| addr
>= block
->postcopy_length
) {
3610 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
3614 tmp_page
->target_pages
++;
3615 matches_target_page_size
= block
->page_size
== TARGET_PAGE_SIZE
;
3617 * Postcopy requires that we place whole host pages atomically;
3618 * these may be huge pages for RAMBlocks that are backed by
3620 * To make it atomic, the data is read into a temporary page
3621 * that's moved into place later.
3622 * The migration protocol uses, possibly smaller, target-pages
3623 * however the source ensures it always sends all the components
3624 * of a host page in one chunk.
3626 page_buffer
= tmp_page
->tmp_huge_page
+
3627 host_page_offset_from_ram_block_offset(block
, addr
);
3628 /* If all TP are zero then we can optimise the place */
3629 if (tmp_page
->target_pages
== 1) {
3630 tmp_page
->host_addr
=
3631 host_page_from_ram_block_offset(block
, addr
);
3632 } else if (tmp_page
->host_addr
!=
3633 host_page_from_ram_block_offset(block
, addr
)) {
3634 /* not the 1st TP within the HP */
3635 error_report("Non-same host page detected on channel %d: "
3636 "Target host page %p, received host page %p "
3637 "(rb %s offset 0x"RAM_ADDR_FMT
" target_pages %d)",
3638 channel
, tmp_page
->host_addr
,
3639 host_page_from_ram_block_offset(block
, addr
),
3640 block
->idstr
, addr
, tmp_page
->target_pages
);
3646 * If it's the last part of a host page then we place the host
3649 if (tmp_page
->target_pages
==
3650 (block
->page_size
/ TARGET_PAGE_SIZE
)) {
3651 place_needed
= true;
3653 place_source
= tmp_page
->tmp_huge_page
;
3656 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
3657 case RAM_SAVE_FLAG_ZERO
:
3658 ch
= qemu_get_byte(f
);
3660 error_report("Found a zero page with value %d", ch
);
3665 * Can skip to set page_buffer when
3666 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE).
3668 if (!matches_target_page_size
) {
3669 memset(page_buffer
, ch
, TARGET_PAGE_SIZE
);
3673 case RAM_SAVE_FLAG_PAGE
:
3674 tmp_page
->all_zero
= false;
3675 if (!matches_target_page_size
) {
3676 /* For huge pages, we always use temporary buffer */
3677 qemu_get_buffer(f
, page_buffer
, TARGET_PAGE_SIZE
);
3680 * For small pages that matches target page size, we
3681 * avoid the qemu_file copy. Instead we directly use
3682 * the buffer of QEMUFile to place the page. Note: we
3683 * cannot do any QEMUFile operation before using that
3684 * buffer to make sure the buffer is valid when
3687 qemu_get_buffer_in_place(f
, (uint8_t **)&place_source
,
3691 case RAM_SAVE_FLAG_COMPRESS_PAGE
:
3692 tmp_page
->all_zero
= false;
3693 len
= qemu_get_be32(f
);
3694 if (len
< 0 || len
> compressBound(TARGET_PAGE_SIZE
)) {
3695 error_report("Invalid compressed data length: %d", len
);
3699 decompress_data_with_multi_threads(f
, page_buffer
, len
);
3701 case RAM_SAVE_FLAG_MULTIFD_FLUSH
:
3702 multifd_recv_sync_main();
3704 case RAM_SAVE_FLAG_EOS
:
3706 if (migrate_multifd() &&
3707 migrate_multifd_flush_after_each_section()) {
3708 multifd_recv_sync_main();
3712 error_report("Unknown combination of migration flags: 0x%x"
3713 " (postcopy mode)", flags
);
3718 /* Got the whole host page, wait for decompress before placing. */
3720 ret
|= wait_for_decompress_done();
3723 /* Detect for any possible file errors */
3724 if (!ret
&& qemu_file_get_error(f
)) {
3725 ret
= qemu_file_get_error(f
);
3728 if (!ret
&& place_needed
) {
3729 if (tmp_page
->all_zero
) {
3730 ret
= postcopy_place_page_zero(mis
, tmp_page
->host_addr
, block
);
3732 ret
= postcopy_place_page(mis
, tmp_page
->host_addr
,
3733 place_source
, block
);
3735 place_needed
= false;
3736 postcopy_temp_page_reset(tmp_page
);
3743 static bool postcopy_is_running(void)
3745 PostcopyState ps
= postcopy_state_get();
3746 return ps
>= POSTCOPY_INCOMING_LISTENING
&& ps
< POSTCOPY_INCOMING_END
;
3750 * Flush content of RAM cache into SVM's memory.
3751 * Only flush the pages that be dirtied by PVM or SVM or both.
3753 void colo_flush_ram_cache(void)
3755 RAMBlock
*block
= NULL
;
3758 unsigned long offset
= 0;
3760 memory_global_dirty_log_sync(false);
3761 qemu_mutex_lock(&ram_state
->bitmap_mutex
);
3762 WITH_RCU_READ_LOCK_GUARD() {
3763 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3764 ramblock_sync_dirty_bitmap(ram_state
, block
);
3768 trace_colo_flush_ram_cache_begin(ram_state
->migration_dirty_pages
);
3769 WITH_RCU_READ_LOCK_GUARD() {
3770 block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
3773 unsigned long num
= 0;
3775 offset
= colo_bitmap_find_dirty(ram_state
, block
, offset
, &num
);
3776 if (!offset_in_ramblock(block
,
3777 ((ram_addr_t
)offset
) << TARGET_PAGE_BITS
)) {
3780 block
= QLIST_NEXT_RCU(block
, next
);
3782 unsigned long i
= 0;
3784 for (i
= 0; i
< num
; i
++) {
3785 migration_bitmap_clear_dirty(ram_state
, block
, offset
+ i
);
3787 dst_host
= block
->host
3788 + (((ram_addr_t
)offset
) << TARGET_PAGE_BITS
);
3789 src_host
= block
->colo_cache
3790 + (((ram_addr_t
)offset
) << TARGET_PAGE_BITS
);
3791 memcpy(dst_host
, src_host
, TARGET_PAGE_SIZE
* num
);
3796 qemu_mutex_unlock(&ram_state
->bitmap_mutex
);
3797 trace_colo_flush_ram_cache_end();
3800 static int parse_ramblock(QEMUFile
*f
, RAMBlock
*block
, ram_addr_t length
)
3803 /* ADVISE is earlier, it shows the source has the postcopy capability on */
3804 bool postcopy_advised
= migration_incoming_postcopy_advised();
3808 if (!qemu_ram_is_migratable(block
)) {
3809 error_report("block %s should not be migrated !", block
->idstr
);
3813 if (length
!= block
->used_length
) {
3814 Error
*local_err
= NULL
;
3816 ret
= qemu_ram_resize(block
, length
, &local_err
);
3818 error_report_err(local_err
);
3822 /* For postcopy we need to check hugepage sizes match */
3823 if (postcopy_advised
&& migrate_postcopy_ram() &&
3824 block
->page_size
!= qemu_host_page_size
) {
3825 uint64_t remote_page_size
= qemu_get_be64(f
);
3826 if (remote_page_size
!= block
->page_size
) {
3827 error_report("Mismatched RAM page size %s "
3828 "(local) %zd != %" PRId64
, block
->idstr
,
3829 block
->page_size
, remote_page_size
);
3833 if (migrate_ignore_shared()) {
3834 hwaddr addr
= qemu_get_be64(f
);
3835 if (migrate_ram_is_ignored(block
) &&
3836 block
->mr
->addr
!= addr
) {
3837 error_report("Mismatched GPAs for block %s "
3838 "%" PRId64
"!= %" PRId64
, block
->idstr
,
3839 (uint64_t)addr
, (uint64_t)block
->mr
->addr
);
3843 ret
= rdma_block_notification_handle(f
, block
->idstr
);
3845 qemu_file_set_error(f
, ret
);
3851 static int parse_ramblocks(QEMUFile
*f
, ram_addr_t total_ram_bytes
)
3855 /* Synchronize RAM block list */
3856 while (!ret
&& total_ram_bytes
) {
3860 int len
= qemu_get_byte(f
);
3862 qemu_get_buffer(f
, (uint8_t *)id
, len
);
3864 length
= qemu_get_be64(f
);
3866 block
= qemu_ram_block_by_name(id
);
3868 ret
= parse_ramblock(f
, block
, length
);
3870 error_report("Unknown ramblock \"%s\", cannot accept "
3874 total_ram_bytes
-= length
;
3881 * ram_load_precopy: load pages in precopy case
3883 * Returns 0 for success or -errno in case of error
3885 * Called in precopy mode by ram_load().
3886 * rcu_read_lock is taken prior to this being called.
3888 * @f: QEMUFile where to send the data
3890 static int ram_load_precopy(QEMUFile
*f
)
3892 MigrationIncomingState
*mis
= migration_incoming_get_current();
3893 int flags
= 0, ret
= 0, invalid_flags
= 0, len
= 0, i
= 0;
3895 if (!migrate_compress()) {
3896 invalid_flags
|= RAM_SAVE_FLAG_COMPRESS_PAGE
;
3899 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
3901 void *host
= NULL
, *host_bak
= NULL
;
3905 * Yield periodically to let main loop run, but an iteration of
3906 * the main loop is expensive, so do it each some iterations
3908 if ((i
& 32767) == 0 && qemu_in_coroutine()) {
3909 aio_co_schedule(qemu_get_current_aio_context(),
3910 qemu_coroutine_self());
3911 qemu_coroutine_yield();
3915 addr
= qemu_get_be64(f
);
3916 flags
= addr
& ~TARGET_PAGE_MASK
;
3917 addr
&= TARGET_PAGE_MASK
;
3919 if (flags
& invalid_flags
) {
3920 if (flags
& invalid_flags
& RAM_SAVE_FLAG_COMPRESS_PAGE
) {
3921 error_report("Received an unexpected compressed page");
3928 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
|
3929 RAM_SAVE_FLAG_COMPRESS_PAGE
| RAM_SAVE_FLAG_XBZRLE
)) {
3930 RAMBlock
*block
= ram_block_from_stream(mis
, f
, flags
,
3931 RAM_CHANNEL_PRECOPY
);
3933 host
= host_from_ram_block_offset(block
, addr
);
3935 * After going into COLO stage, we should not load the page
3936 * into SVM's memory directly, we put them into colo_cache firstly.
3937 * NOTE: We need to keep a copy of SVM's ram in colo_cache.
3938 * Previously, we copied all these memory in preparing stage of COLO
3939 * while we need to stop VM, which is a time-consuming process.
3940 * Here we optimize it by a trick, back-up every page while in
3941 * migration process while COLO is enabled, though it affects the
3942 * speed of the migration, but it obviously reduce the downtime of
3943 * back-up all SVM'S memory in COLO preparing stage.
3945 if (migration_incoming_colo_enabled()) {
3946 if (migration_incoming_in_colo_state()) {
3947 /* In COLO stage, put all pages into cache temporarily */
3948 host
= colo_cache_from_block_offset(block
, addr
, true);
3951 * In migration stage but before COLO stage,
3952 * Put all pages into both cache and SVM's memory.
3954 host_bak
= colo_cache_from_block_offset(block
, addr
, false);
3958 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
3962 if (!migration_incoming_in_colo_state()) {
3963 ramblock_recv_bitmap_set(block
, host
);
3966 trace_ram_load_loop(block
->idstr
, (uint64_t)addr
, flags
, host
);
3969 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
3970 case RAM_SAVE_FLAG_MEM_SIZE
:
3971 ret
= parse_ramblocks(f
, addr
);
3974 case RAM_SAVE_FLAG_ZERO
:
3975 ch
= qemu_get_byte(f
);
3977 error_report("Found a zero page with value %d", ch
);
3981 ram_handle_zero(host
, TARGET_PAGE_SIZE
);
3984 case RAM_SAVE_FLAG_PAGE
:
3985 qemu_get_buffer(f
, host
, TARGET_PAGE_SIZE
);
3988 case RAM_SAVE_FLAG_COMPRESS_PAGE
:
3989 len
= qemu_get_be32(f
);
3990 if (len
< 0 || len
> compressBound(TARGET_PAGE_SIZE
)) {
3991 error_report("Invalid compressed data length: %d", len
);
3995 decompress_data_with_multi_threads(f
, host
, len
);
3998 case RAM_SAVE_FLAG_XBZRLE
:
3999 if (load_xbzrle(f
, addr
, host
) < 0) {
4000 error_report("Failed to decompress XBZRLE page at "
4001 RAM_ADDR_FMT
, addr
);
4006 case RAM_SAVE_FLAG_MULTIFD_FLUSH
:
4007 multifd_recv_sync_main();
4009 case RAM_SAVE_FLAG_EOS
:
4011 if (migrate_multifd() &&
4012 migrate_multifd_flush_after_each_section()) {
4013 multifd_recv_sync_main();
4016 case RAM_SAVE_FLAG_HOOK
:
4017 ret
= rdma_registration_handle(f
);
4019 qemu_file_set_error(f
, ret
);
4023 error_report("Unknown combination of migration flags: 0x%x", flags
);
4027 ret
= qemu_file_get_error(f
);
4029 if (!ret
&& host_bak
) {
4030 memcpy(host_bak
, host
, TARGET_PAGE_SIZE
);
4034 ret
|= wait_for_decompress_done();
4038 static int ram_load(QEMUFile
*f
, void *opaque
, int version_id
)
4041 static uint64_t seq_iter
;
4043 * If system is running in postcopy mode, page inserts to host memory must
4046 bool postcopy_running
= postcopy_is_running();
4050 if (version_id
!= 4) {
4055 * This RCU critical section can be very long running.
4056 * When RCU reclaims in the code start to become numerous,
4057 * it will be necessary to reduce the granularity of this
4060 WITH_RCU_READ_LOCK_GUARD() {
4061 if (postcopy_running
) {
4063 * Note! Here RAM_CHANNEL_PRECOPY is the precopy channel of
4064 * postcopy migration, we have another RAM_CHANNEL_POSTCOPY to
4065 * service fast page faults.
4067 ret
= ram_load_postcopy(f
, RAM_CHANNEL_PRECOPY
);
4069 ret
= ram_load_precopy(f
);
4072 trace_ram_load_complete(ret
, seq_iter
);
4077 static bool ram_has_postcopy(void *opaque
)
4080 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
4081 if (ramblock_is_pmem(rb
)) {
4082 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4083 "is not supported now!", rb
->idstr
, rb
->host
);
4088 return migrate_postcopy_ram();
4091 /* Sync all the dirty bitmap with destination VM. */
4092 static int ram_dirty_bitmap_sync_all(MigrationState
*s
, RAMState
*rs
)
4095 QEMUFile
*file
= s
->to_dst_file
;
4097 trace_ram_dirty_bitmap_sync_start();
4099 qatomic_set(&rs
->postcopy_bmap_sync_requested
, 0);
4100 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
4101 qemu_savevm_send_recv_bitmap(file
, block
->idstr
);
4102 trace_ram_dirty_bitmap_request(block
->idstr
);
4103 qatomic_inc(&rs
->postcopy_bmap_sync_requested
);
4106 trace_ram_dirty_bitmap_sync_wait();
4108 /* Wait until all the ramblocks' dirty bitmap synced */
4109 while (qatomic_read(&rs
->postcopy_bmap_sync_requested
)) {
4110 migration_rp_wait(s
);
4113 trace_ram_dirty_bitmap_sync_complete();
4119 * Read the received bitmap, revert it as the initial dirty bitmap.
4120 * This is only used when the postcopy migration is paused but wants
4121 * to resume from a middle point.
4123 int ram_dirty_bitmap_reload(MigrationState
*s
, RAMBlock
*block
)
4126 /* from_dst_file is always valid because we're within rp_thread */
4127 QEMUFile
*file
= s
->rp_state
.from_dst_file
;
4128 g_autofree
unsigned long *le_bitmap
= NULL
;
4129 unsigned long nbits
= block
->used_length
>> TARGET_PAGE_BITS
;
4130 uint64_t local_size
= DIV_ROUND_UP(nbits
, 8);
4131 uint64_t size
, end_mark
;
4132 RAMState
*rs
= ram_state
;
4134 trace_ram_dirty_bitmap_reload_begin(block
->idstr
);
4136 if (s
->state
!= MIGRATION_STATUS_POSTCOPY_RECOVER
) {
4137 error_report("%s: incorrect state %s", __func__
,
4138 MigrationStatus_str(s
->state
));
4143 * Note: see comments in ramblock_recv_bitmap_send() on why we
4144 * need the endianness conversion, and the paddings.
4146 local_size
= ROUND_UP(local_size
, 8);
4149 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
4151 size
= qemu_get_be64(file
);
4153 /* The size of the bitmap should match with our ramblock */
4154 if (size
!= local_size
) {
4155 error_report("%s: ramblock '%s' bitmap size mismatch "
4156 "(0x%"PRIx64
" != 0x%"PRIx64
")", __func__
,
4157 block
->idstr
, size
, local_size
);
4161 size
= qemu_get_buffer(file
, (uint8_t *)le_bitmap
, local_size
);
4162 end_mark
= qemu_get_be64(file
);
4164 ret
= qemu_file_get_error(file
);
4165 if (ret
|| size
!= local_size
) {
4166 error_report("%s: read bitmap failed for ramblock '%s': %d"
4167 " (size 0x%"PRIx64
", got: 0x%"PRIx64
")",
4168 __func__
, block
->idstr
, ret
, local_size
, size
);
4172 if (end_mark
!= RAMBLOCK_RECV_BITMAP_ENDING
) {
4173 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIx64
,
4174 __func__
, block
->idstr
, end_mark
);
4179 * Endianness conversion. We are during postcopy (though paused).
4180 * The dirty bitmap won't change. We can directly modify it.
4182 bitmap_from_le(block
->bmap
, le_bitmap
, nbits
);
4185 * What we received is "received bitmap". Revert it as the initial
4186 * dirty bitmap for this ramblock.
4188 bitmap_complement(block
->bmap
, block
->bmap
, nbits
);
4190 /* Clear dirty bits of discarded ranges that we don't want to migrate. */
4191 ramblock_dirty_bitmap_clear_discarded_pages(block
);
4193 /* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */
4194 trace_ram_dirty_bitmap_reload_complete(block
->idstr
);
4196 qatomic_dec(&rs
->postcopy_bmap_sync_requested
);
4199 * We succeeded to sync bitmap for current ramblock. Always kick the
4200 * migration thread to check whether all requested bitmaps are
4201 * reloaded. NOTE: it's racy to only kick when requested==0, because
4202 * we don't know whether the migration thread may still be increasing
4205 migration_rp_kick(s
);
4210 static int ram_resume_prepare(MigrationState
*s
, void *opaque
)
4212 RAMState
*rs
= *(RAMState
**)opaque
;
4215 ret
= ram_dirty_bitmap_sync_all(s
, rs
);
4220 ram_state_resume_prepare(rs
, s
->to_dst_file
);
4225 void postcopy_preempt_shutdown_file(MigrationState
*s
)
4227 qemu_put_be64(s
->postcopy_qemufile_src
, RAM_SAVE_FLAG_EOS
);
4228 qemu_fflush(s
->postcopy_qemufile_src
);
4231 static SaveVMHandlers savevm_ram_handlers
= {
4232 .save_setup
= ram_save_setup
,
4233 .save_live_iterate
= ram_save_iterate
,
4234 .save_live_complete_postcopy
= ram_save_complete
,
4235 .save_live_complete_precopy
= ram_save_complete
,
4236 .has_postcopy
= ram_has_postcopy
,
4237 .state_pending_exact
= ram_state_pending_exact
,
4238 .state_pending_estimate
= ram_state_pending_estimate
,
4239 .load_state
= ram_load
,
4240 .save_cleanup
= ram_save_cleanup
,
4241 .load_setup
= ram_load_setup
,
4242 .load_cleanup
= ram_load_cleanup
,
4243 .resume_prepare
= ram_resume_prepare
,
4246 static void ram_mig_ram_block_resized(RAMBlockNotifier
*n
, void *host
,
4247 size_t old_size
, size_t new_size
)
4249 PostcopyState ps
= postcopy_state_get();
4251 RAMBlock
*rb
= qemu_ram_block_from_host(host
, false, &offset
);
4255 error_report("RAM block not found");
4259 if (migrate_ram_is_ignored(rb
)) {
4263 if (!migration_is_idle()) {
4265 * Precopy code on the source cannot deal with the size of RAM blocks
4266 * changing at random points in time - especially after sending the
4267 * RAM block sizes in the migration stream, they must no longer change.
4268 * Abort and indicate a proper reason.
4270 error_setg(&err
, "RAM block '%s' resized during precopy.", rb
->idstr
);
4271 migration_cancel(err
);
4276 case POSTCOPY_INCOMING_ADVISE
:
4278 * Update what ram_postcopy_incoming_init()->init_range() does at the
4279 * time postcopy was advised. Syncing RAM blocks with the source will
4280 * result in RAM resizes.
4282 if (old_size
< new_size
) {
4283 if (ram_discard_range(rb
->idstr
, old_size
, new_size
- old_size
)) {
4284 error_report("RAM block '%s' discard of resized RAM failed",
4288 rb
->postcopy_length
= new_size
;
4290 case POSTCOPY_INCOMING_NONE
:
4291 case POSTCOPY_INCOMING_RUNNING
:
4292 case POSTCOPY_INCOMING_END
:
4294 * Once our guest is running, postcopy does no longer care about
4295 * resizes. When growing, the new memory was not available on the
4296 * source, no handler needed.
4300 error_report("RAM block '%s' resized during postcopy state: %d",
4306 static RAMBlockNotifier ram_mig_ram_notifier
= {
4307 .ram_block_resized
= ram_mig_ram_block_resized
,
4310 void ram_mig_init(void)
4312 qemu_mutex_init(&XBZRLE
.lock
);
4313 register_savevm_live("ram", 0, 4, &savevm_ram_handlers
, &ram_state
);
4314 ram_block_notifier_add(&ram_mig_ram_notifier
);